Tom Lendacky | 1d6b8a6 | 2013-11-12 11:46:40 -0600 | [diff] [blame] | 1 | /* |
| 2 | * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support |
| 3 | * |
| 4 | * Copyright (C) 2013 Advanced Micro Devices, Inc. |
| 5 | * |
| 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/delay.h> |
| 16 | #include <linux/scatterlist.h> |
| 17 | #include <linux/crypto.h> |
| 18 | #include <crypto/algapi.h> |
| 19 | #include <crypto/aes.h> |
| 20 | #include <crypto/scatterwalk.h> |
| 21 | |
| 22 | #include "ccp-crypto.h" |
| 23 | |
| 24 | |
| 25 | struct ccp_aes_xts_def { |
| 26 | const char *name; |
| 27 | const char *drv_name; |
| 28 | }; |
| 29 | |
| 30 | static struct ccp_aes_xts_def aes_xts_algs[] = { |
| 31 | { |
| 32 | .name = "xts(aes)", |
| 33 | .drv_name = "xts-aes-ccp", |
| 34 | }, |
| 35 | }; |
| 36 | |
| 37 | struct ccp_unit_size_map { |
| 38 | unsigned int size; |
| 39 | u32 value; |
| 40 | }; |
| 41 | |
| 42 | static struct ccp_unit_size_map unit_size_map[] = { |
| 43 | { |
| 44 | .size = 4096, |
| 45 | .value = CCP_XTS_AES_UNIT_SIZE_4096, |
| 46 | }, |
| 47 | { |
| 48 | .size = 2048, |
| 49 | .value = CCP_XTS_AES_UNIT_SIZE_2048, |
| 50 | }, |
| 51 | { |
| 52 | .size = 1024, |
| 53 | .value = CCP_XTS_AES_UNIT_SIZE_1024, |
| 54 | }, |
| 55 | { |
| 56 | .size = 512, |
| 57 | .value = CCP_XTS_AES_UNIT_SIZE_512, |
| 58 | }, |
| 59 | { |
| 60 | .size = 256, |
| 61 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, |
| 62 | }, |
| 63 | { |
| 64 | .size = 128, |
| 65 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, |
| 66 | }, |
| 67 | { |
| 68 | .size = 64, |
| 69 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, |
| 70 | }, |
| 71 | { |
| 72 | .size = 32, |
| 73 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, |
| 74 | }, |
| 75 | { |
| 76 | .size = 16, |
| 77 | .value = CCP_XTS_AES_UNIT_SIZE_16, |
| 78 | }, |
| 79 | { |
| 80 | .size = 1, |
| 81 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, |
| 82 | }, |
| 83 | }; |
| 84 | |
| 85 | static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret) |
| 86 | { |
| 87 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); |
| 88 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
| 89 | |
| 90 | if (ret) |
| 91 | return ret; |
| 92 | |
| 93 | memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); |
| 94 | |
| 95 | return 0; |
| 96 | } |
| 97 | |
| 98 | static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
| 99 | unsigned int key_len) |
| 100 | { |
| 101 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); |
| 102 | |
| 103 | /* Only support 128-bit AES key with a 128-bit Tweak key, |
| 104 | * otherwise use the fallback |
| 105 | */ |
| 106 | switch (key_len) { |
| 107 | case AES_KEYSIZE_128 * 2: |
| 108 | memcpy(ctx->u.aes.key, key, key_len); |
| 109 | break; |
| 110 | } |
| 111 | ctx->u.aes.key_len = key_len / 2; |
| 112 | sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); |
| 113 | |
| 114 | return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key, |
| 115 | key_len); |
| 116 | } |
| 117 | |
| 118 | static int ccp_aes_xts_crypt(struct ablkcipher_request *req, |
| 119 | unsigned int encrypt) |
| 120 | { |
| 121 | struct crypto_tfm *tfm = |
| 122 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); |
| 123 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
| 124 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
| 125 | unsigned int unit; |
| 126 | int ret; |
| 127 | |
Tom Lendacky | 369f3da | 2013-12-10 10:38:44 -0600 | [diff] [blame] | 128 | if (!ctx->u.aes.key_len) |
Tom Lendacky | 1d6b8a6 | 2013-11-12 11:46:40 -0600 | [diff] [blame] | 129 | return -EINVAL; |
Tom Lendacky | 1d6b8a6 | 2013-11-12 11:46:40 -0600 | [diff] [blame] | 130 | |
Tom Lendacky | 369f3da | 2013-12-10 10:38:44 -0600 | [diff] [blame] | 131 | if (req->nbytes & (AES_BLOCK_SIZE - 1)) |
Tom Lendacky | 1d6b8a6 | 2013-11-12 11:46:40 -0600 | [diff] [blame] | 132 | return -EINVAL; |
Tom Lendacky | 1d6b8a6 | 2013-11-12 11:46:40 -0600 | [diff] [blame] | 133 | |
Tom Lendacky | 369f3da | 2013-12-10 10:38:44 -0600 | [diff] [blame] | 134 | if (!req->info) |
Tom Lendacky | 1d6b8a6 | 2013-11-12 11:46:40 -0600 | [diff] [blame] | 135 | return -EINVAL; |
Tom Lendacky | 1d6b8a6 | 2013-11-12 11:46:40 -0600 | [diff] [blame] | 136 | |
| 137 | for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) |
| 138 | if (!(req->nbytes & (unit_size_map[unit].size - 1))) |
| 139 | break; |
| 140 | |
| 141 | if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || |
| 142 | (ctx->u.aes.key_len != AES_KEYSIZE_128)) { |
| 143 | /* Use the fallback to process the request for any |
| 144 | * unsupported unit sizes or key sizes |
| 145 | */ |
| 146 | ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher); |
| 147 | ret = (encrypt) ? crypto_ablkcipher_encrypt(req) : |
| 148 | crypto_ablkcipher_decrypt(req); |
| 149 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); |
| 150 | |
| 151 | return ret; |
| 152 | } |
| 153 | |
| 154 | memcpy(rctx->iv, req->info, AES_BLOCK_SIZE); |
| 155 | sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE); |
| 156 | |
| 157 | memset(&rctx->cmd, 0, sizeof(rctx->cmd)); |
| 158 | INIT_LIST_HEAD(&rctx->cmd.entry); |
| 159 | rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; |
| 160 | rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT |
| 161 | : CCP_AES_ACTION_DECRYPT; |
| 162 | rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; |
| 163 | rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; |
| 164 | rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; |
| 165 | rctx->cmd.u.xts.iv = &rctx->iv_sg; |
| 166 | rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE; |
| 167 | rctx->cmd.u.xts.src = req->src; |
| 168 | rctx->cmd.u.xts.src_len = req->nbytes; |
| 169 | rctx->cmd.u.xts.dst = req->dst; |
| 170 | |
| 171 | ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); |
| 172 | |
| 173 | return ret; |
| 174 | } |
| 175 | |
| 176 | static int ccp_aes_xts_encrypt(struct ablkcipher_request *req) |
| 177 | { |
| 178 | return ccp_aes_xts_crypt(req, 1); |
| 179 | } |
| 180 | |
| 181 | static int ccp_aes_xts_decrypt(struct ablkcipher_request *req) |
| 182 | { |
| 183 | return ccp_aes_xts_crypt(req, 0); |
| 184 | } |
| 185 | |
| 186 | static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm) |
| 187 | { |
| 188 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); |
| 189 | struct crypto_ablkcipher *fallback_tfm; |
| 190 | |
| 191 | ctx->complete = ccp_aes_xts_complete; |
| 192 | ctx->u.aes.key_len = 0; |
| 193 | |
Marek Vasut | b4168a1 | 2014-05-14 11:40:58 +0200 | [diff] [blame] | 194 | fallback_tfm = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 0, |
Tom Lendacky | 1d6b8a6 | 2013-11-12 11:46:40 -0600 | [diff] [blame] | 195 | CRYPTO_ALG_ASYNC | |
| 196 | CRYPTO_ALG_NEED_FALLBACK); |
| 197 | if (IS_ERR(fallback_tfm)) { |
| 198 | pr_warn("could not load fallback driver %s\n", |
Marek Vasut | b4168a1 | 2014-05-14 11:40:58 +0200 | [diff] [blame] | 199 | crypto_tfm_alg_name(tfm)); |
Tom Lendacky | 1d6b8a6 | 2013-11-12 11:46:40 -0600 | [diff] [blame] | 200 | return PTR_ERR(fallback_tfm); |
| 201 | } |
| 202 | ctx->u.aes.tfm_ablkcipher = fallback_tfm; |
| 203 | |
| 204 | tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) + |
| 205 | fallback_tfm->base.crt_ablkcipher.reqsize; |
| 206 | |
| 207 | return 0; |
| 208 | } |
| 209 | |
| 210 | static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm) |
| 211 | { |
| 212 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); |
| 213 | |
| 214 | if (ctx->u.aes.tfm_ablkcipher) |
| 215 | crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher); |
| 216 | ctx->u.aes.tfm_ablkcipher = NULL; |
| 217 | } |
| 218 | |
| 219 | |
| 220 | static int ccp_register_aes_xts_alg(struct list_head *head, |
| 221 | const struct ccp_aes_xts_def *def) |
| 222 | { |
| 223 | struct ccp_crypto_ablkcipher_alg *ccp_alg; |
| 224 | struct crypto_alg *alg; |
| 225 | int ret; |
| 226 | |
| 227 | ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); |
| 228 | if (!ccp_alg) |
| 229 | return -ENOMEM; |
| 230 | |
| 231 | INIT_LIST_HEAD(&ccp_alg->entry); |
| 232 | |
| 233 | alg = &ccp_alg->alg; |
| 234 | |
| 235 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); |
| 236 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
| 237 | def->drv_name); |
| 238 | alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | |
| 239 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 240 | CRYPTO_ALG_NEED_FALLBACK; |
| 241 | alg->cra_blocksize = AES_BLOCK_SIZE; |
| 242 | alg->cra_ctxsize = sizeof(struct ccp_ctx); |
| 243 | alg->cra_priority = CCP_CRA_PRIORITY; |
| 244 | alg->cra_type = &crypto_ablkcipher_type; |
| 245 | alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey; |
| 246 | alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt; |
| 247 | alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt; |
| 248 | alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2; |
| 249 | alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2; |
| 250 | alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE; |
| 251 | alg->cra_init = ccp_aes_xts_cra_init; |
| 252 | alg->cra_exit = ccp_aes_xts_cra_exit; |
| 253 | alg->cra_module = THIS_MODULE; |
| 254 | |
| 255 | ret = crypto_register_alg(alg); |
| 256 | if (ret) { |
| 257 | pr_err("%s ablkcipher algorithm registration error (%d)\n", |
| 258 | alg->cra_name, ret); |
| 259 | kfree(ccp_alg); |
| 260 | return ret; |
| 261 | } |
| 262 | |
| 263 | list_add(&ccp_alg->entry, head); |
| 264 | |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | int ccp_register_aes_xts_algs(struct list_head *head) |
| 269 | { |
| 270 | int i, ret; |
| 271 | |
| 272 | for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) { |
| 273 | ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]); |
| 274 | if (ret) |
| 275 | return ret; |
| 276 | } |
| 277 | |
| 278 | return 0; |
| 279 | } |