blob: 591cbdf615af046d4e0662241b03c3b113280333 [file] [log] [blame]
Jan Glauberbf754ae2006-01-06 00:19:18 -08001/*
2 * Cryptographic API.
3 *
4 * s390 implementation of the AES Cipher Algorithm.
5 *
6 * s390 Version:
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02007 * Copyright IBM Corp. 2005, 2007
Jan Glauberbf754ae2006-01-06 00:19:18 -08008 * Author(s): Jan Glauber (jang@de.ibm.com)
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +11009 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
Jan Glauberbf754ae2006-01-06 00:19:18 -080010 *
Sebastian Siewiorf8246af2007-10-05 16:52:01 +080011 * Derived from "crypto/aes_generic.c"
Jan Glauberbf754ae2006-01-06 00:19:18 -080012 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
Jan Glauber39f09392008-12-25 13:39:37 +010020#define KMSG_COMPONENT "aes_s390"
21#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22
Sebastian Siewior89e126542007-10-17 23:18:57 +080023#include <crypto/aes.h>
Herbert Xua9e62fa2006-08-21 21:39:24 +100024#include <crypto/algapi.h>
Herbert Xu64e26802016-06-29 18:04:07 +080025#include <crypto/internal/skcipher.h>
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +110026#include <linux/err.h>
Jan Glauberbf754ae2006-01-06 00:19:18 -080027#include <linux/module.h>
Hendrik Bruecknerd05377c2015-02-19 17:34:07 +010028#include <linux/cpufeature.h>
Jan Glauberbf754ae2006-01-06 00:19:18 -080029#include <linux/init.h>
Harald Freudenberger0519e9a2014-01-16 16:01:11 +010030#include <linux/spinlock.h>
Harald Freudenbergera4f27792016-12-15 14:58:08 +010031#include <linux/fips.h>
Stephan Mueller49abc0d2016-02-17 07:00:01 +010032#include <crypto/xts.h>
Martin Schwidefskyc7d4d252016-03-17 15:22:12 +010033#include <asm/cpacf.h>
Jan Glauberbf754ae2006-01-06 00:19:18 -080034
Gerald Schaefer0200f3e2011-05-04 15:09:44 +100035static u8 *ctrblk;
Harald Freudenberger0519e9a2014-01-16 16:01:11 +010036static DEFINE_SPINLOCK(ctrblk_lock);
Martin Schwidefsky69c0e362016-08-18 12:59:46 +020037
38static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
Jan Glauberbf754ae2006-01-06 00:19:18 -080039
40struct s390_aes_ctx {
Jan Glauberbf754ae2006-01-06 00:19:18 -080041 u8 key[AES_MAX_KEY_SIZE];
42 int key_len;
Martin Schwidefskyedc63a32016-08-15 09:19:16 +020043 unsigned long fc;
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +110044 union {
Herbert Xu64e26802016-06-29 18:04:07 +080045 struct crypto_skcipher *blk;
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +110046 struct crypto_cipher *cip;
47 } fallback;
Jan Glauberbf754ae2006-01-06 00:19:18 -080048};
49
Gerald Schaefer99d97222011-04-26 16:12:42 +100050struct s390_xts_ctx {
51 u8 key[32];
Gerald Schaefer9dda2762013-11-19 17:12:47 +010052 u8 pcc_key[32];
Gerald Schaefer99d97222011-04-26 16:12:42 +100053 int key_len;
Martin Schwidefskyedc63a32016-08-15 09:19:16 +020054 unsigned long fc;
Herbert Xu64e26802016-06-29 18:04:07 +080055 struct crypto_skcipher *fallback;
Gerald Schaefer99d97222011-04-26 16:12:42 +100056};
57
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +110058static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
59 unsigned int key_len)
60{
61 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
62 int ret;
63
Roel Kluind7ac7692010-01-08 14:18:34 +110064 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
65 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +110066 CRYPTO_TFM_REQ_MASK);
67
68 ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
69 if (ret) {
70 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
Roel Kluind7ac7692010-01-08 14:18:34 +110071 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +110072 CRYPTO_TFM_RES_MASK);
73 }
74 return ret;
75}
76
Herbert Xu6c2bb982006-05-16 22:09:29 +100077static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
Herbert Xu560c06a2006-08-13 14:16:39 +100078 unsigned int key_len)
Jan Glauberbf754ae2006-01-06 00:19:18 -080079{
Herbert Xu6c2bb982006-05-16 22:09:29 +100080 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
Martin Schwidefsky69c0e362016-08-18 12:59:46 +020081 unsigned long fc;
Jan Glauberbf754ae2006-01-06 00:19:18 -080082
Martin Schwidefsky69c0e362016-08-18 12:59:46 +020083 /* Pick the correct function code based on the key length */
84 fc = (key_len == 16) ? CPACF_KM_AES_128 :
85 (key_len == 24) ? CPACF_KM_AES_192 :
86 (key_len == 32) ? CPACF_KM_AES_256 : 0;
87
88 /* Check if the function code is available */
89 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
90 if (!sctx->fc)
91 return setkey_fallback_cip(tfm, in_key, key_len);
Jan Glauberbf754ae2006-01-06 00:19:18 -080092
93 sctx->key_len = key_len;
Martin Schwidefsky69c0e362016-08-18 12:59:46 +020094 memcpy(sctx->key, in_key, key_len);
95 return 0;
Jan Glauberbf754ae2006-01-06 00:19:18 -080096}
97
Herbert Xu6c2bb982006-05-16 22:09:29 +100098static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Jan Glauberbf754ae2006-01-06 00:19:18 -080099{
Chen Gange6a67ad2015-01-01 22:56:02 +0800100 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
Jan Glauberbf754ae2006-01-06 00:19:18 -0800101
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200102 if (unlikely(!sctx->fc)) {
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100103 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
104 return;
105 }
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200106 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
Jan Glauberbf754ae2006-01-06 00:19:18 -0800107}
108
Herbert Xu6c2bb982006-05-16 22:09:29 +1000109static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Jan Glauberbf754ae2006-01-06 00:19:18 -0800110{
Chen Gange6a67ad2015-01-01 22:56:02 +0800111 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
Jan Glauberbf754ae2006-01-06 00:19:18 -0800112
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200113 if (unlikely(!sctx->fc)) {
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100114 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
115 return;
116 }
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200117 cpacf_km(sctx->fc | CPACF_DECRYPT,
118 &sctx->key, out, in, AES_BLOCK_SIZE);
Jan Glauberbf754ae2006-01-06 00:19:18 -0800119}
120
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100121static int fallback_init_cip(struct crypto_tfm *tfm)
122{
123 const char *name = tfm->__crt_alg->cra_name;
124 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
125
126 sctx->fallback.cip = crypto_alloc_cipher(name, 0,
127 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
128
129 if (IS_ERR(sctx->fallback.cip)) {
Jan Glauber39f09392008-12-25 13:39:37 +0100130 pr_err("Allocating AES fallback algorithm %s failed\n",
131 name);
Roel Kluinb59cdcb32009-12-18 17:43:18 +0100132 return PTR_ERR(sctx->fallback.cip);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100133 }
134
135 return 0;
136}
137
138static void fallback_exit_cip(struct crypto_tfm *tfm)
139{
140 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
141
142 crypto_free_cipher(sctx->fallback.cip);
143 sctx->fallback.cip = NULL;
144}
Jan Glauberbf754ae2006-01-06 00:19:18 -0800145
146static struct crypto_alg aes_alg = {
147 .cra_name = "aes",
Herbert Xu65b75c32006-08-21 21:18:50 +1000148 .cra_driver_name = "aes-s390",
Martin Schwidefskyc7d4d252016-03-17 15:22:12 +0100149 .cra_priority = 300,
Jan Glauberf67d1362007-05-04 18:47:47 +0200150 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
151 CRYPTO_ALG_NEED_FALLBACK,
Jan Glauberbf754ae2006-01-06 00:19:18 -0800152 .cra_blocksize = AES_BLOCK_SIZE,
153 .cra_ctxsize = sizeof(struct s390_aes_ctx),
154 .cra_module = THIS_MODULE,
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100155 .cra_init = fallback_init_cip,
156 .cra_exit = fallback_exit_cip,
Jan Glauberbf754ae2006-01-06 00:19:18 -0800157 .cra_u = {
158 .cipher = {
159 .cia_min_keysize = AES_MIN_KEY_SIZE,
160 .cia_max_keysize = AES_MAX_KEY_SIZE,
161 .cia_setkey = aes_set_key,
162 .cia_encrypt = aes_encrypt,
163 .cia_decrypt = aes_decrypt,
Jan Glauberbf754ae2006-01-06 00:19:18 -0800164 }
165 }
166};
167
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100168static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
169 unsigned int len)
170{
171 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
172 unsigned int ret;
173
Herbert Xu64e26802016-06-29 18:04:07 +0800174 crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
175 crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
176 CRYPTO_TFM_REQ_MASK);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100177
Herbert Xu64e26802016-06-29 18:04:07 +0800178 ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
179
180 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
181 tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
182 CRYPTO_TFM_RES_MASK;
183
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100184 return ret;
185}
186
187static int fallback_blk_dec(struct blkcipher_desc *desc,
188 struct scatterlist *dst, struct scatterlist *src,
189 unsigned int nbytes)
190{
191 unsigned int ret;
Herbert Xu64e26802016-06-29 18:04:07 +0800192 struct crypto_blkcipher *tfm = desc->tfm;
193 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
194 SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100195
Herbert Xu64e26802016-06-29 18:04:07 +0800196 skcipher_request_set_tfm(req, sctx->fallback.blk);
197 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
198 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100199
Herbert Xu64e26802016-06-29 18:04:07 +0800200 ret = crypto_skcipher_decrypt(req);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100201
Herbert Xu64e26802016-06-29 18:04:07 +0800202 skcipher_request_zero(req);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100203 return ret;
204}
205
206static int fallback_blk_enc(struct blkcipher_desc *desc,
207 struct scatterlist *dst, struct scatterlist *src,
208 unsigned int nbytes)
209{
210 unsigned int ret;
Herbert Xu64e26802016-06-29 18:04:07 +0800211 struct crypto_blkcipher *tfm = desc->tfm;
212 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
213 SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100214
Herbert Xu64e26802016-06-29 18:04:07 +0800215 skcipher_request_set_tfm(req, sctx->fallback.blk);
216 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
217 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100218
Herbert Xu64e26802016-06-29 18:04:07 +0800219 ret = crypto_skcipher_encrypt(req);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100220 return ret;
221}
222
Herbert Xua9e62fa2006-08-21 21:39:24 +1000223static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
224 unsigned int key_len)
225{
226 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200227 unsigned long fc;
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100228
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200229 /* Pick the correct function code based on the key length */
230 fc = (key_len == 16) ? CPACF_KM_AES_128 :
231 (key_len == 24) ? CPACF_KM_AES_192 :
232 (key_len == 32) ? CPACF_KM_AES_256 : 0;
233
234 /* Check if the function code is available */
235 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
236 if (!sctx->fc)
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100237 return setkey_fallback_blk(tfm, in_key, key_len);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000238
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200239 sctx->key_len = key_len;
240 memcpy(sctx->key, in_key, key_len);
241 return 0;
Herbert Xua9e62fa2006-08-21 21:39:24 +1000242}
243
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200244static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
Herbert Xua9e62fa2006-08-21 21:39:24 +1000245 struct blkcipher_walk *walk)
246{
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200247 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
248 unsigned int nbytes, n;
249 int ret;
Herbert Xua9e62fa2006-08-21 21:39:24 +1000250
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200251 ret = blkcipher_walk_virt(desc, walk);
252 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
Herbert Xua9e62fa2006-08-21 21:39:24 +1000253 /* only use complete blocks */
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200254 n = nbytes & ~(AES_BLOCK_SIZE - 1);
255 cpacf_km(sctx->fc | modifier, sctx->key,
256 walk->dst.virt.addr, walk->src.virt.addr, n);
257 ret = blkcipher_walk_done(desc, walk, nbytes - n);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000258 }
259
260 return ret;
261}
262
263static int ecb_aes_encrypt(struct blkcipher_desc *desc,
264 struct scatterlist *dst, struct scatterlist *src,
265 unsigned int nbytes)
266{
267 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
268 struct blkcipher_walk walk;
269
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200270 if (unlikely(!sctx->fc))
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100271 return fallback_blk_enc(desc, dst, src, nbytes);
272
Herbert Xua9e62fa2006-08-21 21:39:24 +1000273 blkcipher_walk_init(&walk, dst, src, nbytes);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200274 return ecb_aes_crypt(desc, 0, &walk);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000275}
276
277static int ecb_aes_decrypt(struct blkcipher_desc *desc,
278 struct scatterlist *dst, struct scatterlist *src,
279 unsigned int nbytes)
280{
281 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
282 struct blkcipher_walk walk;
283
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200284 if (unlikely(!sctx->fc))
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100285 return fallback_blk_dec(desc, dst, src, nbytes);
286
Herbert Xua9e62fa2006-08-21 21:39:24 +1000287 blkcipher_walk_init(&walk, dst, src, nbytes);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200288 return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000289}
290
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100291static int fallback_init_blk(struct crypto_tfm *tfm)
292{
293 const char *name = tfm->__crt_alg->cra_name;
294 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
295
Herbert Xu64e26802016-06-29 18:04:07 +0800296 sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
297 CRYPTO_ALG_ASYNC |
298 CRYPTO_ALG_NEED_FALLBACK);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100299
300 if (IS_ERR(sctx->fallback.blk)) {
Jan Glauber39f09392008-12-25 13:39:37 +0100301 pr_err("Allocating AES fallback algorithm %s failed\n",
302 name);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100303 return PTR_ERR(sctx->fallback.blk);
304 }
305
306 return 0;
307}
308
309static void fallback_exit_blk(struct crypto_tfm *tfm)
310{
311 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
312
Herbert Xu64e26802016-06-29 18:04:07 +0800313 crypto_free_skcipher(sctx->fallback.blk);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100314}
315
Herbert Xua9e62fa2006-08-21 21:39:24 +1000316static struct crypto_alg ecb_aes_alg = {
317 .cra_name = "ecb(aes)",
318 .cra_driver_name = "ecb-aes-s390",
Martin Schwidefskyc7d4d252016-03-17 15:22:12 +0100319 .cra_priority = 400, /* combo: aes + ecb */
Jan Glauberf67d1362007-05-04 18:47:47 +0200320 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
321 CRYPTO_ALG_NEED_FALLBACK,
Herbert Xua9e62fa2006-08-21 21:39:24 +1000322 .cra_blocksize = AES_BLOCK_SIZE,
323 .cra_ctxsize = sizeof(struct s390_aes_ctx),
324 .cra_type = &crypto_blkcipher_type,
325 .cra_module = THIS_MODULE,
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100326 .cra_init = fallback_init_blk,
327 .cra_exit = fallback_exit_blk,
Herbert Xua9e62fa2006-08-21 21:39:24 +1000328 .cra_u = {
329 .blkcipher = {
330 .min_keysize = AES_MIN_KEY_SIZE,
331 .max_keysize = AES_MAX_KEY_SIZE,
332 .setkey = ecb_aes_set_key,
333 .encrypt = ecb_aes_encrypt,
334 .decrypt = ecb_aes_decrypt,
335 }
336 }
337};
338
339static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
340 unsigned int key_len)
341{
342 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200343 unsigned long fc;
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100344
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200345 /* Pick the correct function code based on the key length */
346 fc = (key_len == 16) ? CPACF_KMC_AES_128 :
347 (key_len == 24) ? CPACF_KMC_AES_192 :
348 (key_len == 32) ? CPACF_KMC_AES_256 : 0;
349
350 /* Check if the function code is available */
351 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
352 if (!sctx->fc)
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100353 return setkey_fallback_blk(tfm, in_key, key_len);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000354
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200355 sctx->key_len = key_len;
356 memcpy(sctx->key, in_key, key_len);
357 return 0;
Herbert Xua9e62fa2006-08-21 21:39:24 +1000358}
359
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200360static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
Herbert Xua9e62fa2006-08-21 21:39:24 +1000361 struct blkcipher_walk *walk)
362{
Herbert Xuf262f0f2013-11-05 19:36:27 +0800363 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200364 unsigned int nbytes, n;
365 int ret;
Herbert Xuf262f0f2013-11-05 19:36:27 +0800366 struct {
367 u8 iv[AES_BLOCK_SIZE];
368 u8 key[AES_MAX_KEY_SIZE];
369 } param;
Herbert Xua9e62fa2006-08-21 21:39:24 +1000370
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200371 ret = blkcipher_walk_virt(desc, walk);
Herbert Xuf262f0f2013-11-05 19:36:27 +0800372 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
373 memcpy(param.key, sctx->key, sctx->key_len);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200374 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
Herbert Xua9e62fa2006-08-21 21:39:24 +1000375 /* only use complete blocks */
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200376 n = nbytes & ~(AES_BLOCK_SIZE - 1);
377 cpacf_kmc(sctx->fc | modifier, &param,
378 walk->dst.virt.addr, walk->src.virt.addr, n);
379 ret = blkcipher_walk_done(desc, walk, nbytes - n);
380 }
Herbert Xuf262f0f2013-11-05 19:36:27 +0800381 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000382 return ret;
383}
384
385static int cbc_aes_encrypt(struct blkcipher_desc *desc,
386 struct scatterlist *dst, struct scatterlist *src,
387 unsigned int nbytes)
388{
389 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
390 struct blkcipher_walk walk;
391
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200392 if (unlikely(!sctx->fc))
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100393 return fallback_blk_enc(desc, dst, src, nbytes);
394
Herbert Xua9e62fa2006-08-21 21:39:24 +1000395 blkcipher_walk_init(&walk, dst, src, nbytes);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200396 return cbc_aes_crypt(desc, 0, &walk);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000397}
398
399static int cbc_aes_decrypt(struct blkcipher_desc *desc,
400 struct scatterlist *dst, struct scatterlist *src,
401 unsigned int nbytes)
402{
403 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
404 struct blkcipher_walk walk;
405
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200406 if (unlikely(!sctx->fc))
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100407 return fallback_blk_dec(desc, dst, src, nbytes);
408
Herbert Xua9e62fa2006-08-21 21:39:24 +1000409 blkcipher_walk_init(&walk, dst, src, nbytes);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200410 return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000411}
412
413static struct crypto_alg cbc_aes_alg = {
414 .cra_name = "cbc(aes)",
415 .cra_driver_name = "cbc-aes-s390",
Martin Schwidefskyc7d4d252016-03-17 15:22:12 +0100416 .cra_priority = 400, /* combo: aes + cbc */
Jan Glauberf67d1362007-05-04 18:47:47 +0200417 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
418 CRYPTO_ALG_NEED_FALLBACK,
Herbert Xua9e62fa2006-08-21 21:39:24 +1000419 .cra_blocksize = AES_BLOCK_SIZE,
420 .cra_ctxsize = sizeof(struct s390_aes_ctx),
421 .cra_type = &crypto_blkcipher_type,
422 .cra_module = THIS_MODULE,
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100423 .cra_init = fallback_init_blk,
424 .cra_exit = fallback_exit_blk,
Herbert Xua9e62fa2006-08-21 21:39:24 +1000425 .cra_u = {
426 .blkcipher = {
427 .min_keysize = AES_MIN_KEY_SIZE,
428 .max_keysize = AES_MAX_KEY_SIZE,
429 .ivsize = AES_BLOCK_SIZE,
430 .setkey = cbc_aes_set_key,
431 .encrypt = cbc_aes_encrypt,
432 .decrypt = cbc_aes_decrypt,
433 }
434 }
435};
436
Gerald Schaefer99d97222011-04-26 16:12:42 +1000437static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
438 unsigned int len)
439{
440 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
441 unsigned int ret;
442
Herbert Xu64e26802016-06-29 18:04:07 +0800443 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
444 crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
445 CRYPTO_TFM_REQ_MASK);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000446
Herbert Xu64e26802016-06-29 18:04:07 +0800447 ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
448
449 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
450 tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
451 CRYPTO_TFM_RES_MASK;
452
Gerald Schaefer99d97222011-04-26 16:12:42 +1000453 return ret;
454}
455
456static int xts_fallback_decrypt(struct blkcipher_desc *desc,
457 struct scatterlist *dst, struct scatterlist *src,
458 unsigned int nbytes)
459{
Herbert Xu64e26802016-06-29 18:04:07 +0800460 struct crypto_blkcipher *tfm = desc->tfm;
461 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
462 SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000463 unsigned int ret;
464
Herbert Xu64e26802016-06-29 18:04:07 +0800465 skcipher_request_set_tfm(req, xts_ctx->fallback);
466 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
467 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000468
Herbert Xu64e26802016-06-29 18:04:07 +0800469 ret = crypto_skcipher_decrypt(req);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000470
Herbert Xu64e26802016-06-29 18:04:07 +0800471 skcipher_request_zero(req);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000472 return ret;
473}
474
475static int xts_fallback_encrypt(struct blkcipher_desc *desc,
476 struct scatterlist *dst, struct scatterlist *src,
477 unsigned int nbytes)
478{
Herbert Xu64e26802016-06-29 18:04:07 +0800479 struct crypto_blkcipher *tfm = desc->tfm;
480 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
481 SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000482 unsigned int ret;
483
Herbert Xu64e26802016-06-29 18:04:07 +0800484 skcipher_request_set_tfm(req, xts_ctx->fallback);
485 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
486 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000487
Herbert Xu64e26802016-06-29 18:04:07 +0800488 ret = crypto_skcipher_encrypt(req);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000489
Herbert Xu64e26802016-06-29 18:04:07 +0800490 skcipher_request_zero(req);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000491 return ret;
492}
493
494static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
495 unsigned int key_len)
496{
497 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200498 unsigned long fc;
Stephan Mueller28856a92016-02-09 15:37:47 +0100499 int err;
500
501 err = xts_check_key(tfm, in_key, key_len);
502 if (err)
503 return err;
Gerald Schaefer99d97222011-04-26 16:12:42 +1000504
Harald Freudenbergera4f27792016-12-15 14:58:08 +0100505 /* In fips mode only 128 bit or 256 bit keys are valid */
506 if (fips_enabled && key_len != 32 && key_len != 64) {
507 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
508 return -EINVAL;
509 }
510
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200511 /* Pick the correct function code based on the key length */
512 fc = (key_len == 32) ? CPACF_KM_XTS_128 :
513 (key_len == 64) ? CPACF_KM_XTS_256 : 0;
514
515 /* Check if the function code is available */
516 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
517 if (!xts_ctx->fc)
518 return xts_fallback_setkey(tfm, in_key, key_len);
519
520 /* Split the XTS key into the two subkeys */
521 key_len = key_len / 2;
Gerald Schaefer99d97222011-04-26 16:12:42 +1000522 xts_ctx->key_len = key_len;
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200523 memcpy(xts_ctx->key, in_key, key_len);
524 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000525 return 0;
526}
527
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200528static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
Gerald Schaefer99d97222011-04-26 16:12:42 +1000529 struct blkcipher_walk *walk)
530{
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200531 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
532 unsigned int offset, nbytes, n;
533 int ret;
534 struct {
535 u8 key[32];
536 u8 tweak[16];
537 u8 block[16];
538 u8 bit[16];
539 u8 xts[16];
540 } pcc_param;
Gerald Schaefer9dda2762013-11-19 17:12:47 +0100541 struct {
542 u8 key[32];
543 u8 init[16];
544 } xts_param;
Gerald Schaefer99d97222011-04-26 16:12:42 +1000545
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200546 ret = blkcipher_walk_virt(desc, walk);
547 offset = xts_ctx->key_len & 0x10;
Gerald Schaefer9dda2762013-11-19 17:12:47 +0100548 memset(pcc_param.block, 0, sizeof(pcc_param.block));
549 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
550 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
551 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200552 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200553 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000554
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200555 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
Gerald Schaefer9dda2762013-11-19 17:12:47 +0100556 memcpy(xts_param.init, pcc_param.xts, 16);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200557
558 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
Gerald Schaefer99d97222011-04-26 16:12:42 +1000559 /* only use complete blocks */
560 n = nbytes & ~(AES_BLOCK_SIZE - 1);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200561 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
562 walk->dst.virt.addr, walk->src.virt.addr, n);
563 ret = blkcipher_walk_done(desc, walk, nbytes - n);
564 }
Gerald Schaefer99d97222011-04-26 16:12:42 +1000565 return ret;
566}
567
568static int xts_aes_encrypt(struct blkcipher_desc *desc,
569 struct scatterlist *dst, struct scatterlist *src,
570 unsigned int nbytes)
571{
572 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
573 struct blkcipher_walk walk;
574
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200575 if (unlikely(!xts_ctx->fc))
Gerald Schaefer99d97222011-04-26 16:12:42 +1000576 return xts_fallback_encrypt(desc, dst, src, nbytes);
577
578 blkcipher_walk_init(&walk, dst, src, nbytes);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200579 return xts_aes_crypt(desc, 0, &walk);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000580}
581
582static int xts_aes_decrypt(struct blkcipher_desc *desc,
583 struct scatterlist *dst, struct scatterlist *src,
584 unsigned int nbytes)
585{
586 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
587 struct blkcipher_walk walk;
588
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200589 if (unlikely(!xts_ctx->fc))
Gerald Schaefer99d97222011-04-26 16:12:42 +1000590 return xts_fallback_decrypt(desc, dst, src, nbytes);
591
592 blkcipher_walk_init(&walk, dst, src, nbytes);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200593 return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000594}
595
596static int xts_fallback_init(struct crypto_tfm *tfm)
597{
598 const char *name = tfm->__crt_alg->cra_name;
599 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
600
Herbert Xu64e26802016-06-29 18:04:07 +0800601 xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
602 CRYPTO_ALG_ASYNC |
603 CRYPTO_ALG_NEED_FALLBACK);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000604
605 if (IS_ERR(xts_ctx->fallback)) {
606 pr_err("Allocating XTS fallback algorithm %s failed\n",
607 name);
608 return PTR_ERR(xts_ctx->fallback);
609 }
610 return 0;
611}
612
613static void xts_fallback_exit(struct crypto_tfm *tfm)
614{
615 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
616
Herbert Xu64e26802016-06-29 18:04:07 +0800617 crypto_free_skcipher(xts_ctx->fallback);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000618}
619
620static struct crypto_alg xts_aes_alg = {
621 .cra_name = "xts(aes)",
622 .cra_driver_name = "xts-aes-s390",
Martin Schwidefskyc7d4d252016-03-17 15:22:12 +0100623 .cra_priority = 400, /* combo: aes + xts */
Gerald Schaefer99d97222011-04-26 16:12:42 +1000624 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
625 CRYPTO_ALG_NEED_FALLBACK,
626 .cra_blocksize = AES_BLOCK_SIZE,
627 .cra_ctxsize = sizeof(struct s390_xts_ctx),
628 .cra_type = &crypto_blkcipher_type,
629 .cra_module = THIS_MODULE,
Gerald Schaefer99d97222011-04-26 16:12:42 +1000630 .cra_init = xts_fallback_init,
631 .cra_exit = xts_fallback_exit,
632 .cra_u = {
633 .blkcipher = {
634 .min_keysize = 2 * AES_MIN_KEY_SIZE,
635 .max_keysize = 2 * AES_MAX_KEY_SIZE,
636 .ivsize = AES_BLOCK_SIZE,
637 .setkey = xts_aes_set_key,
638 .encrypt = xts_aes_encrypt,
639 .decrypt = xts_aes_decrypt,
640 }
641 }
642};
643
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000644static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
645 unsigned int key_len)
646{
647 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200648 unsigned long fc;
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000649
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200650 /* Pick the correct function code based on the key length */
651 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
652 (key_len == 24) ? CPACF_KMCTR_AES_192 :
653 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000654
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200655 /* Check if the function code is available */
656 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
657 if (!sctx->fc)
658 return setkey_fallback_blk(tfm, in_key, key_len);
659
660 sctx->key_len = key_len;
661 memcpy(sctx->key, in_key, key_len);
662 return 0;
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000663}
664
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200665static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
Harald Freudenberger0519e9a2014-01-16 16:01:11 +0100666{
667 unsigned int i, n;
668
669 /* only use complete blocks, max. PAGE_SIZE */
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200670 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
Harald Freudenberger0519e9a2014-01-16 16:01:11 +0100671 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200672 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
673 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
674 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
675 ctrptr += AES_BLOCK_SIZE;
Harald Freudenberger0519e9a2014-01-16 16:01:11 +0100676 }
677 return n;
678}
679
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200680static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
681 struct blkcipher_walk *walk)
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000682{
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200683 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
684 u8 buf[AES_BLOCK_SIZE], *ctrptr;
Harald Freudenberger0519e9a2014-01-16 16:01:11 +0100685 unsigned int n, nbytes;
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200686 int ret, locked;
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000687
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200688 locked = spin_trylock(&ctrblk_lock);
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000689
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200690 ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000691 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200692 n = AES_BLOCK_SIZE;
693 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
694 n = __ctrblk_init(ctrblk, walk->iv, nbytes);
695 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
696 cpacf_kmctr(sctx->fc | modifier, sctx->key,
697 walk->dst.virt.addr, walk->src.virt.addr,
698 n, ctrptr);
699 if (ctrptr == ctrblk)
700 memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
701 AES_BLOCK_SIZE);
702 crypto_inc(walk->iv, AES_BLOCK_SIZE);
703 ret = blkcipher_walk_done(desc, walk, nbytes - n);
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000704 }
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200705 if (locked)
Harald Freudenberger0519e9a2014-01-16 16:01:11 +0100706 spin_unlock(&ctrblk_lock);
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000707 /*
708 * final block may be < AES_BLOCK_SIZE, copy only nbytes
709 */
710 if (nbytes) {
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200711 cpacf_kmctr(sctx->fc | modifier, sctx->key,
712 buf, walk->src.virt.addr,
713 AES_BLOCK_SIZE, walk->iv);
714 memcpy(walk->dst.virt.addr, buf, nbytes);
715 crypto_inc(walk->iv, AES_BLOCK_SIZE);
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000716 ret = blkcipher_walk_done(desc, walk, 0);
717 }
Harald Freudenberger0519e9a2014-01-16 16:01:11 +0100718
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000719 return ret;
720}
721
722static int ctr_aes_encrypt(struct blkcipher_desc *desc,
723 struct scatterlist *dst, struct scatterlist *src,
724 unsigned int nbytes)
725{
726 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
727 struct blkcipher_walk walk;
728
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200729 if (unlikely(!sctx->fc))
730 return fallback_blk_enc(desc, dst, src, nbytes);
731
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000732 blkcipher_walk_init(&walk, dst, src, nbytes);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200733 return ctr_aes_crypt(desc, 0, &walk);
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000734}
735
736static int ctr_aes_decrypt(struct blkcipher_desc *desc,
737 struct scatterlist *dst, struct scatterlist *src,
738 unsigned int nbytes)
739{
740 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
741 struct blkcipher_walk walk;
742
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200743 if (unlikely(!sctx->fc))
744 return fallback_blk_dec(desc, dst, src, nbytes);
745
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000746 blkcipher_walk_init(&walk, dst, src, nbytes);
Martin Schwidefsky7bac4f52016-08-15 15:17:52 +0200747 return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000748}
749
750static struct crypto_alg ctr_aes_alg = {
751 .cra_name = "ctr(aes)",
752 .cra_driver_name = "ctr-aes-s390",
Martin Schwidefskyc7d4d252016-03-17 15:22:12 +0100753 .cra_priority = 400, /* combo: aes + ctr */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200754 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
755 CRYPTO_ALG_NEED_FALLBACK,
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000756 .cra_blocksize = 1,
757 .cra_ctxsize = sizeof(struct s390_aes_ctx),
758 .cra_type = &crypto_blkcipher_type,
759 .cra_module = THIS_MODULE,
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200760 .cra_init = fallback_init_blk,
761 .cra_exit = fallback_exit_blk,
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000762 .cra_u = {
763 .blkcipher = {
764 .min_keysize = AES_MIN_KEY_SIZE,
765 .max_keysize = AES_MAX_KEY_SIZE,
766 .ivsize = AES_BLOCK_SIZE,
767 .setkey = ctr_aes_set_key,
768 .encrypt = ctr_aes_encrypt,
769 .decrypt = ctr_aes_decrypt,
770 }
771 }
772};
773
Martin Schwidefskyd863d592016-08-18 12:34:34 +0200774static struct crypto_alg *aes_s390_algs_ptr[5];
775static int aes_s390_algs_num;
776
777static int aes_s390_register_alg(struct crypto_alg *alg)
778{
779 int ret;
780
781 ret = crypto_register_alg(alg);
782 if (!ret)
783 aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
784 return ret;
785}
786
787static void aes_s390_fini(void)
788{
789 while (aes_s390_algs_num--)
790 crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
791 if (ctrblk)
792 free_page((unsigned long) ctrblk);
793}
Ingo Tuchscherer4f57ba72013-10-15 11:24:07 +0200794
Heiko Carstens9f7819c2008-04-17 07:46:17 +0200795static int __init aes_s390_init(void)
Jan Glauberbf754ae2006-01-06 00:19:18 -0800796{
797 int ret;
798
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200799 /* Query available functions for KM, KMC and KMCTR */
800 cpacf_query(CPACF_KM, &km_functions);
801 cpacf_query(CPACF_KMC, &kmc_functions);
802 cpacf_query(CPACF_KMCTR, &kmctr_functions);
Jan Glauberbf754ae2006-01-06 00:19:18 -0800803
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200804 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
805 cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
806 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
807 ret = aes_s390_register_alg(&aes_alg);
808 if (ret)
809 goto out_err;
810 ret = aes_s390_register_alg(&ecb_aes_alg);
811 if (ret)
812 goto out_err;
813 }
Jan Glauber86aa9fc2007-02-05 21:18:14 +0100814
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200815 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
816 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
817 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
818 ret = aes_s390_register_alg(&cbc_aes_alg);
819 if (ret)
820 goto out_err;
821 }
Jan Glauberbf754ae2006-01-06 00:19:18 -0800822
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200823 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
824 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
Martin Schwidefskyd863d592016-08-18 12:34:34 +0200825 ret = aes_s390_register_alg(&xts_aes_alg);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000826 if (ret)
Martin Schwidefskyd863d592016-08-18 12:34:34 +0200827 goto out_err;
Gerald Schaefer99d97222011-04-26 16:12:42 +1000828 }
829
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200830 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
831 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
832 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000833 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
834 if (!ctrblk) {
835 ret = -ENOMEM;
Martin Schwidefskyd863d592016-08-18 12:34:34 +0200836 goto out_err;
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000837 }
Martin Schwidefskyd863d592016-08-18 12:34:34 +0200838 ret = aes_s390_register_alg(&ctr_aes_alg);
839 if (ret)
840 goto out_err;
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000841 }
842
Martin Schwidefskyd863d592016-08-18 12:34:34 +0200843 return 0;
844out_err:
845 aes_s390_fini();
Jan Glauberbf754ae2006-01-06 00:19:18 -0800846 return ret;
Jan Glauberbf754ae2006-01-06 00:19:18 -0800847}
848
Hendrik Bruecknerd05377c2015-02-19 17:34:07 +0100849module_cpu_feature_match(MSA, aes_s390_init);
Heiko Carstens9f7819c2008-04-17 07:46:17 +0200850module_exit(aes_s390_fini);
Jan Glauberbf754ae2006-01-06 00:19:18 -0800851
Kees Cook5d26a102014-11-20 17:05:53 -0800852MODULE_ALIAS_CRYPTO("aes-all");
Jan Glauberbf754ae2006-01-06 00:19:18 -0800853
854MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
855MODULE_LICENSE("GPL");