Tom Lendacky | 0ab0a1d | 2013-11-12 11:46:45 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * AMD Cryptographic Coprocessor (CCP) SHA crypto API support |
| 3 | * |
| 4 | * Copyright (C) 2013 Advanced Micro Devices, Inc. |
| 5 | * |
| 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/delay.h> |
| 16 | #include <linux/scatterlist.h> |
| 17 | #include <linux/crypto.h> |
| 18 | #include <crypto/algapi.h> |
| 19 | #include <crypto/hash.h> |
| 20 | #include <crypto/internal/hash.h> |
| 21 | #include <crypto/sha.h> |
| 22 | #include <crypto/scatterwalk.h> |
| 23 | |
| 24 | #include "ccp-crypto.h" |
| 25 | |
| 26 | |
| 27 | struct ccp_sha_result { |
| 28 | struct completion completion; |
| 29 | int err; |
| 30 | }; |
| 31 | |
| 32 | static void ccp_sync_hash_complete(struct crypto_async_request *req, int err) |
| 33 | { |
| 34 | struct ccp_sha_result *result = req->data; |
| 35 | |
| 36 | if (err == -EINPROGRESS) |
| 37 | return; |
| 38 | |
| 39 | result->err = err; |
| 40 | complete(&result->completion); |
| 41 | } |
| 42 | |
| 43 | static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf, |
| 44 | struct scatterlist *sg, unsigned int len) |
| 45 | { |
| 46 | struct ccp_sha_result result; |
| 47 | struct ahash_request *req; |
| 48 | int ret; |
| 49 | |
| 50 | init_completion(&result.completion); |
| 51 | |
| 52 | req = ahash_request_alloc(tfm, GFP_KERNEL); |
| 53 | if (!req) |
| 54 | return -ENOMEM; |
| 55 | |
| 56 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
| 57 | ccp_sync_hash_complete, &result); |
| 58 | ahash_request_set_crypt(req, sg, buf, len); |
| 59 | |
| 60 | ret = crypto_ahash_digest(req); |
| 61 | if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { |
| 62 | ret = wait_for_completion_interruptible(&result.completion); |
| 63 | if (!ret) |
| 64 | ret = result.err; |
| 65 | } |
| 66 | |
| 67 | ahash_request_free(req); |
| 68 | |
| 69 | return ret; |
| 70 | } |
| 71 | |
| 72 | static int ccp_sha_finish_hmac(struct crypto_async_request *async_req) |
| 73 | { |
| 74 | struct ahash_request *req = ahash_request_cast(async_req); |
| 75 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 76 | struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); |
| 77 | struct scatterlist sg[2]; |
| 78 | unsigned int block_size = |
| 79 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
| 80 | unsigned int digest_size = crypto_ahash_digestsize(tfm); |
| 81 | |
| 82 | sg_init_table(sg, ARRAY_SIZE(sg)); |
| 83 | sg_set_buf(&sg[0], ctx->u.sha.opad, block_size); |
| 84 | sg_set_buf(&sg[1], req->result, digest_size); |
| 85 | |
| 86 | return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg, |
| 87 | block_size + digest_size); |
| 88 | } |
| 89 | |
| 90 | static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) |
| 91 | { |
| 92 | struct ahash_request *req = ahash_request_cast(async_req); |
| 93 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 94 | struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); |
| 95 | struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); |
| 96 | unsigned int digest_size = crypto_ahash_digestsize(tfm); |
| 97 | |
| 98 | if (ret) |
| 99 | goto e_free; |
| 100 | |
| 101 | if (rctx->hash_rem) { |
| 102 | /* Save remaining data to buffer */ |
| 103 | scatterwalk_map_and_copy(rctx->buf, rctx->cmd.u.sha.src, |
| 104 | rctx->hash_cnt, rctx->hash_rem, 0); |
| 105 | rctx->buf_count = rctx->hash_rem; |
| 106 | } else |
| 107 | rctx->buf_count = 0; |
| 108 | |
| 109 | memcpy(req->result, rctx->ctx, digest_size); |
| 110 | |
| 111 | /* If we're doing an HMAC, we need to perform that on the final op */ |
| 112 | if (rctx->final && ctx->u.sha.key_len) |
| 113 | ret = ccp_sha_finish_hmac(async_req); |
| 114 | |
| 115 | e_free: |
| 116 | sg_free_table(&rctx->data_sg); |
| 117 | |
| 118 | return ret; |
| 119 | } |
| 120 | |
| 121 | static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, |
| 122 | unsigned int final) |
| 123 | { |
| 124 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 125 | struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); |
| 126 | struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); |
| 127 | struct scatterlist *sg; |
| 128 | unsigned int block_size = |
| 129 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
| 130 | unsigned int len, sg_count; |
| 131 | int ret; |
| 132 | |
| 133 | if (!final && ((nbytes + rctx->buf_count) <= block_size)) { |
| 134 | scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, |
| 135 | 0, nbytes, 0); |
| 136 | rctx->buf_count += nbytes; |
| 137 | |
| 138 | return 0; |
| 139 | } |
| 140 | |
| 141 | len = rctx->buf_count + nbytes; |
| 142 | |
| 143 | rctx->final = final; |
| 144 | rctx->hash_cnt = final ? len : len & ~(block_size - 1); |
| 145 | rctx->hash_rem = final ? 0 : len & (block_size - 1); |
| 146 | if (!final && (rctx->hash_cnt == len)) { |
| 147 | /* CCP can't do zero length final, so keep some data around */ |
| 148 | rctx->hash_cnt -= block_size; |
| 149 | rctx->hash_rem = block_size; |
| 150 | } |
| 151 | |
| 152 | /* Initialize the context scatterlist */ |
| 153 | sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx)); |
| 154 | |
| 155 | /* Build the data scatterlist table - allocate enough entries for all |
| 156 | * possible data pieces (hmac ipad, buffer, input data) |
| 157 | */ |
| 158 | sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2; |
| 159 | ret = sg_alloc_table(&rctx->data_sg, sg_count, GFP_KERNEL); |
| 160 | if (ret) |
| 161 | return ret; |
| 162 | |
| 163 | sg = NULL; |
| 164 | if (rctx->first && ctx->u.sha.key_len) { |
| 165 | rctx->hash_cnt += block_size; |
| 166 | |
| 167 | sg_init_one(&rctx->pad_sg, ctx->u.sha.ipad, block_size); |
| 168 | sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg); |
| 169 | } |
| 170 | |
| 171 | if (rctx->buf_count) { |
| 172 | sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); |
| 173 | sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); |
| 174 | } |
| 175 | |
| 176 | if (nbytes) |
| 177 | sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); |
| 178 | |
| 179 | if (sg) |
| 180 | sg_mark_end(sg); |
| 181 | |
| 182 | rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */ |
| 183 | |
| 184 | memset(&rctx->cmd, 0, sizeof(rctx->cmd)); |
| 185 | INIT_LIST_HEAD(&rctx->cmd.entry); |
| 186 | rctx->cmd.engine = CCP_ENGINE_SHA; |
| 187 | rctx->cmd.u.sha.type = rctx->type; |
| 188 | rctx->cmd.u.sha.ctx = &rctx->ctx_sg; |
| 189 | rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); |
| 190 | rctx->cmd.u.sha.src = (sg) ? rctx->data_sg.sgl : NULL; |
| 191 | rctx->cmd.u.sha.src_len = rctx->hash_cnt; |
| 192 | rctx->cmd.u.sha.final = rctx->final; |
| 193 | rctx->cmd.u.sha.msg_bits = rctx->msg_bits; |
| 194 | |
| 195 | rctx->first = 0; |
| 196 | |
| 197 | ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); |
| 198 | |
| 199 | return ret; |
| 200 | } |
| 201 | |
| 202 | static int ccp_sha_init(struct ahash_request *req) |
| 203 | { |
| 204 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 205 | struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); |
| 206 | struct ccp_crypto_ahash_alg *alg = |
| 207 | ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); |
| 208 | |
| 209 | memset(rctx, 0, sizeof(*rctx)); |
| 210 | |
| 211 | memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx)); |
| 212 | rctx->type = alg->type; |
| 213 | rctx->first = 1; |
| 214 | |
| 215 | return 0; |
| 216 | } |
| 217 | |
| 218 | static int ccp_sha_update(struct ahash_request *req) |
| 219 | { |
| 220 | return ccp_do_sha_update(req, req->nbytes, 0); |
| 221 | } |
| 222 | |
| 223 | static int ccp_sha_final(struct ahash_request *req) |
| 224 | { |
| 225 | return ccp_do_sha_update(req, 0, 1); |
| 226 | } |
| 227 | |
| 228 | static int ccp_sha_finup(struct ahash_request *req) |
| 229 | { |
| 230 | return ccp_do_sha_update(req, req->nbytes, 1); |
| 231 | } |
| 232 | |
| 233 | static int ccp_sha_digest(struct ahash_request *req) |
| 234 | { |
| 235 | ccp_sha_init(req); |
| 236 | |
| 237 | return ccp_do_sha_update(req, req->nbytes, 1); |
| 238 | } |
| 239 | |
| 240 | static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, |
| 241 | unsigned int key_len) |
| 242 | { |
| 243 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); |
| 244 | struct scatterlist sg; |
| 245 | unsigned int block_size = |
| 246 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
| 247 | unsigned int digest_size = crypto_ahash_digestsize(tfm); |
| 248 | int i, ret; |
| 249 | |
| 250 | /* Set to zero until complete */ |
| 251 | ctx->u.sha.key_len = 0; |
| 252 | |
| 253 | /* Clear key area to provide zero padding for keys smaller |
| 254 | * than the block size |
| 255 | */ |
| 256 | memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key)); |
| 257 | |
| 258 | if (key_len > block_size) { |
| 259 | /* Must hash the input key */ |
| 260 | sg_init_one(&sg, key, key_len); |
| 261 | ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len); |
| 262 | if (ret) { |
| 263 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 264 | return -EINVAL; |
| 265 | } |
| 266 | |
| 267 | key_len = digest_size; |
| 268 | } else |
| 269 | memcpy(ctx->u.sha.key, key, key_len); |
| 270 | |
| 271 | for (i = 0; i < block_size; i++) { |
| 272 | ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36; |
| 273 | ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c; |
| 274 | } |
| 275 | |
| 276 | ctx->u.sha.key_len = key_len; |
| 277 | |
| 278 | return 0; |
| 279 | } |
| 280 | |
| 281 | static int ccp_sha_cra_init(struct crypto_tfm *tfm) |
| 282 | { |
| 283 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); |
| 284 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
| 285 | |
| 286 | ctx->complete = ccp_sha_complete; |
| 287 | ctx->u.sha.key_len = 0; |
| 288 | |
| 289 | crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx)); |
| 290 | |
| 291 | return 0; |
| 292 | } |
| 293 | |
| 294 | static void ccp_sha_cra_exit(struct crypto_tfm *tfm) |
| 295 | { |
| 296 | } |
| 297 | |
| 298 | static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm) |
| 299 | { |
| 300 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); |
| 301 | struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); |
| 302 | struct crypto_ahash *hmac_tfm; |
| 303 | |
| 304 | hmac_tfm = crypto_alloc_ahash(alg->child_alg, |
| 305 | CRYPTO_ALG_TYPE_AHASH, 0); |
| 306 | if (IS_ERR(hmac_tfm)) { |
| 307 | pr_warn("could not load driver %s need for HMAC support\n", |
| 308 | alg->child_alg); |
| 309 | return PTR_ERR(hmac_tfm); |
| 310 | } |
| 311 | |
| 312 | ctx->u.sha.hmac_tfm = hmac_tfm; |
| 313 | |
| 314 | return ccp_sha_cra_init(tfm); |
| 315 | } |
| 316 | |
| 317 | static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm) |
| 318 | { |
| 319 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); |
| 320 | |
| 321 | if (ctx->u.sha.hmac_tfm) |
| 322 | crypto_free_ahash(ctx->u.sha.hmac_tfm); |
| 323 | |
| 324 | ccp_sha_cra_exit(tfm); |
| 325 | } |
| 326 | |
| 327 | static const u32 sha1_init[CCP_SHA_CTXSIZE / sizeof(u32)] = { |
| 328 | cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), |
| 329 | cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), |
| 330 | cpu_to_be32(SHA1_H4), 0, 0, 0, |
| 331 | }; |
| 332 | |
| 333 | static const u32 sha224_init[CCP_SHA_CTXSIZE / sizeof(u32)] = { |
| 334 | cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), |
| 335 | cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), |
| 336 | cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), |
| 337 | cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), |
| 338 | }; |
| 339 | |
| 340 | static const u32 sha256_init[CCP_SHA_CTXSIZE / sizeof(u32)] = { |
| 341 | cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), |
| 342 | cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), |
| 343 | cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), |
| 344 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), |
| 345 | }; |
| 346 | |
| 347 | struct ccp_sha_def { |
| 348 | const char *name; |
| 349 | const char *drv_name; |
| 350 | const u32 *init; |
| 351 | enum ccp_sha_type type; |
| 352 | u32 digest_size; |
| 353 | u32 block_size; |
| 354 | }; |
| 355 | |
| 356 | static struct ccp_sha_def sha_algs[] = { |
| 357 | { |
| 358 | .name = "sha1", |
| 359 | .drv_name = "sha1-ccp", |
| 360 | .init = sha1_init, |
| 361 | .type = CCP_SHA_TYPE_1, |
| 362 | .digest_size = SHA1_DIGEST_SIZE, |
| 363 | .block_size = SHA1_BLOCK_SIZE, |
| 364 | }, |
| 365 | { |
| 366 | .name = "sha224", |
| 367 | .drv_name = "sha224-ccp", |
| 368 | .init = sha224_init, |
| 369 | .type = CCP_SHA_TYPE_224, |
| 370 | .digest_size = SHA224_DIGEST_SIZE, |
| 371 | .block_size = SHA224_BLOCK_SIZE, |
| 372 | }, |
| 373 | { |
| 374 | .name = "sha256", |
| 375 | .drv_name = "sha256-ccp", |
| 376 | .init = sha256_init, |
| 377 | .type = CCP_SHA_TYPE_256, |
| 378 | .digest_size = SHA256_DIGEST_SIZE, |
| 379 | .block_size = SHA256_BLOCK_SIZE, |
| 380 | }, |
| 381 | }; |
| 382 | |
| 383 | static int ccp_register_hmac_alg(struct list_head *head, |
| 384 | const struct ccp_sha_def *def, |
| 385 | const struct ccp_crypto_ahash_alg *base_alg) |
| 386 | { |
| 387 | struct ccp_crypto_ahash_alg *ccp_alg; |
| 388 | struct ahash_alg *alg; |
| 389 | struct hash_alg_common *halg; |
| 390 | struct crypto_alg *base; |
| 391 | int ret; |
| 392 | |
| 393 | ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); |
| 394 | if (!ccp_alg) |
| 395 | return -ENOMEM; |
| 396 | |
| 397 | /* Copy the base algorithm and only change what's necessary */ |
| 398 | memcpy(ccp_alg, base_alg, sizeof(*ccp_alg)); |
| 399 | INIT_LIST_HEAD(&ccp_alg->entry); |
| 400 | |
| 401 | strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); |
| 402 | |
| 403 | alg = &ccp_alg->alg; |
| 404 | alg->setkey = ccp_sha_setkey; |
| 405 | |
| 406 | halg = &alg->halg; |
| 407 | |
| 408 | base = &halg->base; |
| 409 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); |
| 410 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", |
| 411 | def->drv_name); |
| 412 | base->cra_init = ccp_hmac_sha_cra_init; |
| 413 | base->cra_exit = ccp_hmac_sha_cra_exit; |
| 414 | |
| 415 | ret = crypto_register_ahash(alg); |
| 416 | if (ret) { |
| 417 | pr_err("%s ahash algorithm registration error (%d)\n", |
| 418 | base->cra_name, ret); |
| 419 | kfree(ccp_alg); |
| 420 | return ret; |
| 421 | } |
| 422 | |
| 423 | list_add(&ccp_alg->entry, head); |
| 424 | |
| 425 | return ret; |
| 426 | } |
| 427 | |
| 428 | static int ccp_register_sha_alg(struct list_head *head, |
| 429 | const struct ccp_sha_def *def) |
| 430 | { |
| 431 | struct ccp_crypto_ahash_alg *ccp_alg; |
| 432 | struct ahash_alg *alg; |
| 433 | struct hash_alg_common *halg; |
| 434 | struct crypto_alg *base; |
| 435 | int ret; |
| 436 | |
| 437 | ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); |
| 438 | if (!ccp_alg) |
| 439 | return -ENOMEM; |
| 440 | |
| 441 | INIT_LIST_HEAD(&ccp_alg->entry); |
| 442 | |
| 443 | ccp_alg->init = def->init; |
| 444 | ccp_alg->type = def->type; |
| 445 | |
| 446 | alg = &ccp_alg->alg; |
| 447 | alg->init = ccp_sha_init; |
| 448 | alg->update = ccp_sha_update; |
| 449 | alg->final = ccp_sha_final; |
| 450 | alg->finup = ccp_sha_finup; |
| 451 | alg->digest = ccp_sha_digest; |
| 452 | |
| 453 | halg = &alg->halg; |
| 454 | halg->digestsize = def->digest_size; |
| 455 | |
| 456 | base = &halg->base; |
| 457 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); |
| 458 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
| 459 | def->drv_name); |
| 460 | base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | |
| 461 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 462 | CRYPTO_ALG_NEED_FALLBACK; |
| 463 | base->cra_blocksize = def->block_size; |
| 464 | base->cra_ctxsize = sizeof(struct ccp_ctx); |
| 465 | base->cra_priority = CCP_CRA_PRIORITY; |
| 466 | base->cra_type = &crypto_ahash_type; |
| 467 | base->cra_init = ccp_sha_cra_init; |
| 468 | base->cra_exit = ccp_sha_cra_exit; |
| 469 | base->cra_module = THIS_MODULE; |
| 470 | |
| 471 | ret = crypto_register_ahash(alg); |
| 472 | if (ret) { |
| 473 | pr_err("%s ahash algorithm registration error (%d)\n", |
| 474 | base->cra_name, ret); |
| 475 | kfree(ccp_alg); |
| 476 | return ret; |
| 477 | } |
| 478 | |
| 479 | list_add(&ccp_alg->entry, head); |
| 480 | |
| 481 | ret = ccp_register_hmac_alg(head, def, ccp_alg); |
| 482 | |
| 483 | return ret; |
| 484 | } |
| 485 | |
| 486 | int ccp_register_sha_algs(struct list_head *head) |
| 487 | { |
| 488 | int i, ret; |
| 489 | |
| 490 | for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { |
| 491 | ret = ccp_register_sha_alg(head, &sha_algs[i]); |
| 492 | if (ret) |
| 493 | return ret; |
| 494 | } |
| 495 | |
| 496 | return 0; |
| 497 | } |