Herbert Xu | 7a7ffe6 | 2015-08-20 15:21:45 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Symmetric key cipher operations. |
| 3 | * |
| 4 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across |
| 5 | * multiple page boundaries by using temporary blocks. In user context, |
| 6 | * the kernel is given a chance to schedule us once per page. |
| 7 | * |
| 8 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify it |
| 11 | * under the terms of the GNU General Public License as published by the Free |
| 12 | * Software Foundation; either version 2 of the License, or (at your option) |
| 13 | * any later version. |
| 14 | * |
| 15 | */ |
| 16 | |
| 17 | #include <crypto/internal/skcipher.h> |
| 18 | #include <linux/bug.h> |
| 19 | #include <linux/module.h> |
| 20 | |
| 21 | #include "internal.h" |
| 22 | |
| 23 | static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) |
| 24 | { |
| 25 | if (alg->cra_type == &crypto_blkcipher_type) |
| 26 | return sizeof(struct crypto_blkcipher *); |
| 27 | |
| 28 | BUG_ON(alg->cra_type != &crypto_ablkcipher_type && |
| 29 | alg->cra_type != &crypto_givcipher_type); |
| 30 | |
| 31 | return sizeof(struct crypto_ablkcipher *); |
| 32 | } |
| 33 | |
| 34 | static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, |
| 35 | const u8 *key, unsigned int keylen) |
| 36 | { |
| 37 | struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); |
| 38 | struct crypto_blkcipher *blkcipher = *ctx; |
| 39 | int err; |
| 40 | |
| 41 | crypto_blkcipher_clear_flags(blkcipher, ~0); |
| 42 | crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) & |
| 43 | CRYPTO_TFM_REQ_MASK); |
| 44 | err = crypto_blkcipher_setkey(blkcipher, key, keylen); |
| 45 | crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & |
| 46 | CRYPTO_TFM_RES_MASK); |
| 47 | |
| 48 | return err; |
| 49 | } |
| 50 | |
| 51 | static int skcipher_crypt_blkcipher(struct skcipher_request *req, |
| 52 | int (*crypt)(struct blkcipher_desc *, |
| 53 | struct scatterlist *, |
| 54 | struct scatterlist *, |
| 55 | unsigned int)) |
| 56 | { |
| 57 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 58 | struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); |
| 59 | struct blkcipher_desc desc = { |
| 60 | .tfm = *ctx, |
| 61 | .info = req->iv, |
| 62 | .flags = req->base.flags, |
| 63 | }; |
| 64 | |
| 65 | |
| 66 | return crypt(&desc, req->dst, req->src, req->cryptlen); |
| 67 | } |
| 68 | |
| 69 | static int skcipher_encrypt_blkcipher(struct skcipher_request *req) |
| 70 | { |
| 71 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
| 72 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); |
| 73 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; |
| 74 | |
| 75 | return skcipher_crypt_blkcipher(req, alg->encrypt); |
| 76 | } |
| 77 | |
| 78 | static int skcipher_decrypt_blkcipher(struct skcipher_request *req) |
| 79 | { |
| 80 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
| 81 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); |
| 82 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; |
| 83 | |
| 84 | return skcipher_crypt_blkcipher(req, alg->decrypt); |
| 85 | } |
| 86 | |
| 87 | static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm) |
| 88 | { |
| 89 | struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); |
| 90 | |
| 91 | crypto_free_blkcipher(*ctx); |
| 92 | } |
| 93 | |
Geliang Tang | ecdd6be | 2015-09-27 22:47:05 +0800 | [diff] [blame] | 94 | static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) |
Herbert Xu | 7a7ffe6 | 2015-08-20 15:21:45 +0800 | [diff] [blame] | 95 | { |
| 96 | struct crypto_alg *calg = tfm->__crt_alg; |
| 97 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); |
| 98 | struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); |
| 99 | struct crypto_blkcipher *blkcipher; |
| 100 | struct crypto_tfm *btfm; |
| 101 | |
| 102 | if (!crypto_mod_get(calg)) |
| 103 | return -EAGAIN; |
| 104 | |
| 105 | btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER, |
| 106 | CRYPTO_ALG_TYPE_MASK); |
| 107 | if (IS_ERR(btfm)) { |
| 108 | crypto_mod_put(calg); |
| 109 | return PTR_ERR(btfm); |
| 110 | } |
| 111 | |
| 112 | blkcipher = __crypto_blkcipher_cast(btfm); |
| 113 | *ctx = blkcipher; |
| 114 | tfm->exit = crypto_exit_skcipher_ops_blkcipher; |
| 115 | |
| 116 | skcipher->setkey = skcipher_setkey_blkcipher; |
| 117 | skcipher->encrypt = skcipher_encrypt_blkcipher; |
| 118 | skcipher->decrypt = skcipher_decrypt_blkcipher; |
| 119 | |
| 120 | skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); |
Herbert Xu | a1383cd | 2016-01-11 21:26:50 +0800 | [diff] [blame] | 121 | skcipher->has_setkey = calg->cra_blkcipher.max_keysize; |
Herbert Xu | 7a7ffe6 | 2015-08-20 15:21:45 +0800 | [diff] [blame] | 122 | |
| 123 | return 0; |
| 124 | } |
| 125 | |
| 126 | static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, |
| 127 | const u8 *key, unsigned int keylen) |
| 128 | { |
| 129 | struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); |
| 130 | struct crypto_ablkcipher *ablkcipher = *ctx; |
| 131 | int err; |
| 132 | |
| 133 | crypto_ablkcipher_clear_flags(ablkcipher, ~0); |
| 134 | crypto_ablkcipher_set_flags(ablkcipher, |
| 135 | crypto_skcipher_get_flags(tfm) & |
| 136 | CRYPTO_TFM_REQ_MASK); |
| 137 | err = crypto_ablkcipher_setkey(ablkcipher, key, keylen); |
| 138 | crypto_skcipher_set_flags(tfm, |
| 139 | crypto_ablkcipher_get_flags(ablkcipher) & |
| 140 | CRYPTO_TFM_RES_MASK); |
| 141 | |
| 142 | return err; |
| 143 | } |
| 144 | |
| 145 | static int skcipher_crypt_ablkcipher(struct skcipher_request *req, |
| 146 | int (*crypt)(struct ablkcipher_request *)) |
| 147 | { |
| 148 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 149 | struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); |
| 150 | struct ablkcipher_request *subreq = skcipher_request_ctx(req); |
| 151 | |
| 152 | ablkcipher_request_set_tfm(subreq, *ctx); |
| 153 | ablkcipher_request_set_callback(subreq, skcipher_request_flags(req), |
| 154 | req->base.complete, req->base.data); |
| 155 | ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, |
| 156 | req->iv); |
| 157 | |
| 158 | return crypt(subreq); |
| 159 | } |
| 160 | |
| 161 | static int skcipher_encrypt_ablkcipher(struct skcipher_request *req) |
| 162 | { |
| 163 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
| 164 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); |
| 165 | struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; |
| 166 | |
| 167 | return skcipher_crypt_ablkcipher(req, alg->encrypt); |
| 168 | } |
| 169 | |
| 170 | static int skcipher_decrypt_ablkcipher(struct skcipher_request *req) |
| 171 | { |
| 172 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
| 173 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); |
| 174 | struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; |
| 175 | |
| 176 | return skcipher_crypt_ablkcipher(req, alg->decrypt); |
| 177 | } |
| 178 | |
| 179 | static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) |
| 180 | { |
| 181 | struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); |
| 182 | |
| 183 | crypto_free_ablkcipher(*ctx); |
| 184 | } |
| 185 | |
Geliang Tang | ecdd6be | 2015-09-27 22:47:05 +0800 | [diff] [blame] | 186 | static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) |
Herbert Xu | 7a7ffe6 | 2015-08-20 15:21:45 +0800 | [diff] [blame] | 187 | { |
| 188 | struct crypto_alg *calg = tfm->__crt_alg; |
| 189 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); |
| 190 | struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); |
| 191 | struct crypto_ablkcipher *ablkcipher; |
| 192 | struct crypto_tfm *abtfm; |
| 193 | |
| 194 | if (!crypto_mod_get(calg)) |
| 195 | return -EAGAIN; |
| 196 | |
| 197 | abtfm = __crypto_alloc_tfm(calg, 0, 0); |
| 198 | if (IS_ERR(abtfm)) { |
| 199 | crypto_mod_put(calg); |
| 200 | return PTR_ERR(abtfm); |
| 201 | } |
| 202 | |
| 203 | ablkcipher = __crypto_ablkcipher_cast(abtfm); |
| 204 | *ctx = ablkcipher; |
| 205 | tfm->exit = crypto_exit_skcipher_ops_ablkcipher; |
| 206 | |
| 207 | skcipher->setkey = skcipher_setkey_ablkcipher; |
| 208 | skcipher->encrypt = skcipher_encrypt_ablkcipher; |
| 209 | skcipher->decrypt = skcipher_decrypt_ablkcipher; |
| 210 | |
| 211 | skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); |
| 212 | skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + |
| 213 | sizeof(struct ablkcipher_request); |
Herbert Xu | a1383cd | 2016-01-11 21:26:50 +0800 | [diff] [blame] | 214 | skcipher->has_setkey = calg->cra_ablkcipher.max_keysize; |
Herbert Xu | 7a7ffe6 | 2015-08-20 15:21:45 +0800 | [diff] [blame] | 215 | |
| 216 | return 0; |
| 217 | } |
| 218 | |
| 219 | static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) |
| 220 | { |
| 221 | if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) |
| 222 | return crypto_init_skcipher_ops_blkcipher(tfm); |
| 223 | |
| 224 | BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type && |
| 225 | tfm->__crt_alg->cra_type != &crypto_givcipher_type); |
| 226 | |
| 227 | return crypto_init_skcipher_ops_ablkcipher(tfm); |
| 228 | } |
| 229 | |
| 230 | static const struct crypto_type crypto_skcipher_type2 = { |
| 231 | .extsize = crypto_skcipher_extsize, |
| 232 | .init_tfm = crypto_skcipher_init_tfm, |
| 233 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, |
| 234 | .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, |
| 235 | .type = CRYPTO_ALG_TYPE_BLKCIPHER, |
| 236 | .tfmsize = offsetof(struct crypto_skcipher, base), |
| 237 | }; |
| 238 | |
| 239 | struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, |
| 240 | u32 type, u32 mask) |
| 241 | { |
| 242 | return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask); |
| 243 | } |
| 244 | EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); |
| 245 | |
| 246 | MODULE_LICENSE("GPL"); |
| 247 | MODULE_DESCRIPTION("Symmetric key cipher type"); |