blob: b3feabd39f31f8eead0a69f4340a8abc0ea17da6 [file] [log] [blame]
Jan Glauberbf754ae2006-01-06 00:19:18 -08001/*
2 * Cryptographic API.
3 *
4 * s390 implementation of the AES Cipher Algorithm.
5 *
6 * s390 Version:
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02007 * Copyright IBM Corp. 2005, 2007
Jan Glauberbf754ae2006-01-06 00:19:18 -08008 * Author(s): Jan Glauber (jang@de.ibm.com)
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +11009 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
Jan Glauberbf754ae2006-01-06 00:19:18 -080010 *
Sebastian Siewiorf8246af2007-10-05 16:52:01 +080011 * Derived from "crypto/aes_generic.c"
Jan Glauberbf754ae2006-01-06 00:19:18 -080012 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
Jan Glauber39f09392008-12-25 13:39:37 +010020#define KMSG_COMPONENT "aes_s390"
21#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22
Sebastian Siewior89e12652007-10-17 23:18:57 +080023#include <crypto/aes.h>
Herbert Xua9e62fa2006-08-21 21:39:24 +100024#include <crypto/algapi.h>
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +110025#include <linux/err.h>
Jan Glauberbf754ae2006-01-06 00:19:18 -080026#include <linux/module.h>
27#include <linux/init.h>
Jan Glauberbf754ae2006-01-06 00:19:18 -080028#include "crypt_s390.h"
29
Jan Glauber86aa9fc2007-02-05 21:18:14 +010030#define AES_KEYLEN_128 1
31#define AES_KEYLEN_192 2
32#define AES_KEYLEN_256 4
33
Gerald Schaefer0200f3e2011-05-04 15:09:44 +100034static u8 *ctrblk;
35static char keylen_flag;
Jan Glauberbf754ae2006-01-06 00:19:18 -080036
37struct s390_aes_ctx {
Jan Glauberbf754ae2006-01-06 00:19:18 -080038 u8 key[AES_MAX_KEY_SIZE];
Herbert Xua9e62fa2006-08-21 21:39:24 +100039 long enc;
40 long dec;
Jan Glauberbf754ae2006-01-06 00:19:18 -080041 int key_len;
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +110042 union {
43 struct crypto_blkcipher *blk;
44 struct crypto_cipher *cip;
45 } fallback;
Jan Glauberbf754ae2006-01-06 00:19:18 -080046};
47
Gerald Schaefer99d97222011-04-26 16:12:42 +100048struct pcc_param {
49 u8 key[32];
50 u8 tweak[16];
51 u8 block[16];
52 u8 bit[16];
53 u8 xts[16];
54};
55
56struct s390_xts_ctx {
57 u8 key[32];
Gerald Schaefer9dda2762013-11-19 17:12:47 +010058 u8 pcc_key[32];
Gerald Schaefer99d97222011-04-26 16:12:42 +100059 long enc;
60 long dec;
61 int key_len;
62 struct crypto_blkcipher *fallback;
63};
64
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +110065/*
66 * Check if the key_len is supported by the HW.
67 * Returns 0 if it is, a positive number if it is not and software fallback is
68 * required or a negative number in case the key size is not valid
69 */
70static int need_fallback(unsigned int key_len)
71{
72 switch (key_len) {
73 case 16:
74 if (!(keylen_flag & AES_KEYLEN_128))
75 return 1;
76 break;
77 case 24:
78 if (!(keylen_flag & AES_KEYLEN_192))
79 return 1;
80 break;
81 case 32:
82 if (!(keylen_flag & AES_KEYLEN_256))
83 return 1;
84 break;
85 default:
86 return -1;
87 break;
88 }
89 return 0;
90}
91
92static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
93 unsigned int key_len)
94{
95 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
96 int ret;
97
Roel Kluind7ac7692010-01-08 14:18:34 +110098 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
99 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100100 CRYPTO_TFM_REQ_MASK);
101
102 ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
103 if (ret) {
104 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
Roel Kluind7ac7692010-01-08 14:18:34 +1100105 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100106 CRYPTO_TFM_RES_MASK);
107 }
108 return ret;
109}
110
Herbert Xu6c2bb982006-05-16 22:09:29 +1000111static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
Herbert Xu560c06a2006-08-13 14:16:39 +1000112 unsigned int key_len)
Jan Glauberbf754ae2006-01-06 00:19:18 -0800113{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000114 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
Herbert Xu560c06a2006-08-13 14:16:39 +1000115 u32 *flags = &tfm->crt_flags;
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100116 int ret;
Jan Glauberbf754ae2006-01-06 00:19:18 -0800117
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100118 ret = need_fallback(key_len);
119 if (ret < 0) {
120 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
121 return -EINVAL;
Jan Glauberbf754ae2006-01-06 00:19:18 -0800122 }
123
124 sctx->key_len = key_len;
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100125 if (!ret) {
126 memcpy(sctx->key, in_key, key_len);
127 return 0;
128 }
129
130 return setkey_fallback_cip(tfm, in_key, key_len);
Jan Glauberbf754ae2006-01-06 00:19:18 -0800131}
132
Herbert Xu6c2bb982006-05-16 22:09:29 +1000133static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Jan Glauberbf754ae2006-01-06 00:19:18 -0800134{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000135 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
Jan Glauberbf754ae2006-01-06 00:19:18 -0800136
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100137 if (unlikely(need_fallback(sctx->key_len))) {
138 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
139 return;
140 }
141
Jan Glauberbf754ae2006-01-06 00:19:18 -0800142 switch (sctx->key_len) {
143 case 16:
144 crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
145 AES_BLOCK_SIZE);
146 break;
147 case 24:
148 crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
149 AES_BLOCK_SIZE);
150 break;
151 case 32:
152 crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
153 AES_BLOCK_SIZE);
154 break;
155 }
156}
157
Herbert Xu6c2bb982006-05-16 22:09:29 +1000158static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Jan Glauberbf754ae2006-01-06 00:19:18 -0800159{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000160 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
Jan Glauberbf754ae2006-01-06 00:19:18 -0800161
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100162 if (unlikely(need_fallback(sctx->key_len))) {
163 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
164 return;
165 }
166
Jan Glauberbf754ae2006-01-06 00:19:18 -0800167 switch (sctx->key_len) {
168 case 16:
169 crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
170 AES_BLOCK_SIZE);
171 break;
172 case 24:
173 crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
174 AES_BLOCK_SIZE);
175 break;
176 case 32:
177 crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
178 AES_BLOCK_SIZE);
179 break;
180 }
181}
182
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100183static int fallback_init_cip(struct crypto_tfm *tfm)
184{
185 const char *name = tfm->__crt_alg->cra_name;
186 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
187
188 sctx->fallback.cip = crypto_alloc_cipher(name, 0,
189 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
190
191 if (IS_ERR(sctx->fallback.cip)) {
Jan Glauber39f09392008-12-25 13:39:37 +0100192 pr_err("Allocating AES fallback algorithm %s failed\n",
193 name);
Roel Kluinb59cdcb32009-12-18 17:43:18 +0100194 return PTR_ERR(sctx->fallback.cip);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100195 }
196
197 return 0;
198}
199
200static void fallback_exit_cip(struct crypto_tfm *tfm)
201{
202 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
203
204 crypto_free_cipher(sctx->fallback.cip);
205 sctx->fallback.cip = NULL;
206}
Jan Glauberbf754ae2006-01-06 00:19:18 -0800207
208static struct crypto_alg aes_alg = {
209 .cra_name = "aes",
Herbert Xu65b75c32006-08-21 21:18:50 +1000210 .cra_driver_name = "aes-s390",
211 .cra_priority = CRYPT_S390_PRIORITY,
Jan Glauberf67d1362007-05-04 18:47:47 +0200212 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
213 CRYPTO_ALG_NEED_FALLBACK,
Jan Glauberbf754ae2006-01-06 00:19:18 -0800214 .cra_blocksize = AES_BLOCK_SIZE,
215 .cra_ctxsize = sizeof(struct s390_aes_ctx),
216 .cra_module = THIS_MODULE,
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100217 .cra_init = fallback_init_cip,
218 .cra_exit = fallback_exit_cip,
Jan Glauberbf754ae2006-01-06 00:19:18 -0800219 .cra_u = {
220 .cipher = {
221 .cia_min_keysize = AES_MIN_KEY_SIZE,
222 .cia_max_keysize = AES_MAX_KEY_SIZE,
223 .cia_setkey = aes_set_key,
224 .cia_encrypt = aes_encrypt,
225 .cia_decrypt = aes_decrypt,
Jan Glauberbf754ae2006-01-06 00:19:18 -0800226 }
227 }
228};
229
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100230static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
231 unsigned int len)
232{
233 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
234 unsigned int ret;
235
236 sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
237 sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
238 CRYPTO_TFM_REQ_MASK);
239
240 ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
241 if (ret) {
242 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
243 tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
244 CRYPTO_TFM_RES_MASK);
245 }
246 return ret;
247}
248
249static int fallback_blk_dec(struct blkcipher_desc *desc,
250 struct scatterlist *dst, struct scatterlist *src,
251 unsigned int nbytes)
252{
253 unsigned int ret;
254 struct crypto_blkcipher *tfm;
255 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
256
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100257 tfm = desc->tfm;
258 desc->tfm = sctx->fallback.blk;
259
Sebastian Siewior2d74d402007-12-10 15:49:41 +0800260 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100261
262 desc->tfm = tfm;
263 return ret;
264}
265
266static int fallback_blk_enc(struct blkcipher_desc *desc,
267 struct scatterlist *dst, struct scatterlist *src,
268 unsigned int nbytes)
269{
270 unsigned int ret;
271 struct crypto_blkcipher *tfm;
272 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
273
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100274 tfm = desc->tfm;
275 desc->tfm = sctx->fallback.blk;
276
Sebastian Siewior2d74d402007-12-10 15:49:41 +0800277 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100278
279 desc->tfm = tfm;
280 return ret;
281}
282
Herbert Xua9e62fa2006-08-21 21:39:24 +1000283static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
284 unsigned int key_len)
285{
286 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100287 int ret;
288
289 ret = need_fallback(key_len);
290 if (ret > 0) {
291 sctx->key_len = key_len;
292 return setkey_fallback_blk(tfm, in_key, key_len);
293 }
Herbert Xua9e62fa2006-08-21 21:39:24 +1000294
295 switch (key_len) {
296 case 16:
297 sctx->enc = KM_AES_128_ENCRYPT;
298 sctx->dec = KM_AES_128_DECRYPT;
299 break;
300 case 24:
301 sctx->enc = KM_AES_192_ENCRYPT;
302 sctx->dec = KM_AES_192_DECRYPT;
303 break;
304 case 32:
305 sctx->enc = KM_AES_256_ENCRYPT;
306 sctx->dec = KM_AES_256_DECRYPT;
307 break;
308 }
309
310 return aes_set_key(tfm, in_key, key_len);
311}
312
313static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
314 struct blkcipher_walk *walk)
315{
316 int ret = blkcipher_walk_virt(desc, walk);
317 unsigned int nbytes;
318
319 while ((nbytes = walk->nbytes)) {
320 /* only use complete blocks */
321 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
322 u8 *out = walk->dst.virt.addr;
323 u8 *in = walk->src.virt.addr;
324
325 ret = crypt_s390_km(func, param, out, in, n);
Jan Glauber36eb2ca2012-10-26 15:06:12 +0200326 if (ret < 0 || ret != n)
327 return -EIO;
Herbert Xua9e62fa2006-08-21 21:39:24 +1000328
329 nbytes &= AES_BLOCK_SIZE - 1;
330 ret = blkcipher_walk_done(desc, walk, nbytes);
331 }
332
333 return ret;
334}
335
336static int ecb_aes_encrypt(struct blkcipher_desc *desc,
337 struct scatterlist *dst, struct scatterlist *src,
338 unsigned int nbytes)
339{
340 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
341 struct blkcipher_walk walk;
342
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100343 if (unlikely(need_fallback(sctx->key_len)))
344 return fallback_blk_enc(desc, dst, src, nbytes);
345
Herbert Xua9e62fa2006-08-21 21:39:24 +1000346 blkcipher_walk_init(&walk, dst, src, nbytes);
347 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
348}
349
350static int ecb_aes_decrypt(struct blkcipher_desc *desc,
351 struct scatterlist *dst, struct scatterlist *src,
352 unsigned int nbytes)
353{
354 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
355 struct blkcipher_walk walk;
356
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100357 if (unlikely(need_fallback(sctx->key_len)))
358 return fallback_blk_dec(desc, dst, src, nbytes);
359
Herbert Xua9e62fa2006-08-21 21:39:24 +1000360 blkcipher_walk_init(&walk, dst, src, nbytes);
361 return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
362}
363
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100364static int fallback_init_blk(struct crypto_tfm *tfm)
365{
366 const char *name = tfm->__crt_alg->cra_name;
367 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
368
369 sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
370 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
371
372 if (IS_ERR(sctx->fallback.blk)) {
Jan Glauber39f09392008-12-25 13:39:37 +0100373 pr_err("Allocating AES fallback algorithm %s failed\n",
374 name);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100375 return PTR_ERR(sctx->fallback.blk);
376 }
377
378 return 0;
379}
380
381static void fallback_exit_blk(struct crypto_tfm *tfm)
382{
383 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
384
385 crypto_free_blkcipher(sctx->fallback.blk);
386 sctx->fallback.blk = NULL;
387}
388
Herbert Xua9e62fa2006-08-21 21:39:24 +1000389static struct crypto_alg ecb_aes_alg = {
390 .cra_name = "ecb(aes)",
391 .cra_driver_name = "ecb-aes-s390",
392 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
Jan Glauberf67d1362007-05-04 18:47:47 +0200393 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
394 CRYPTO_ALG_NEED_FALLBACK,
Herbert Xua9e62fa2006-08-21 21:39:24 +1000395 .cra_blocksize = AES_BLOCK_SIZE,
396 .cra_ctxsize = sizeof(struct s390_aes_ctx),
397 .cra_type = &crypto_blkcipher_type,
398 .cra_module = THIS_MODULE,
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100399 .cra_init = fallback_init_blk,
400 .cra_exit = fallback_exit_blk,
Herbert Xua9e62fa2006-08-21 21:39:24 +1000401 .cra_u = {
402 .blkcipher = {
403 .min_keysize = AES_MIN_KEY_SIZE,
404 .max_keysize = AES_MAX_KEY_SIZE,
405 .setkey = ecb_aes_set_key,
406 .encrypt = ecb_aes_encrypt,
407 .decrypt = ecb_aes_decrypt,
408 }
409 }
410};
411
412static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
413 unsigned int key_len)
414{
415 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100416 int ret;
417
418 ret = need_fallback(key_len);
419 if (ret > 0) {
420 sctx->key_len = key_len;
421 return setkey_fallback_blk(tfm, in_key, key_len);
422 }
Herbert Xua9e62fa2006-08-21 21:39:24 +1000423
424 switch (key_len) {
425 case 16:
426 sctx->enc = KMC_AES_128_ENCRYPT;
427 sctx->dec = KMC_AES_128_DECRYPT;
428 break;
429 case 24:
430 sctx->enc = KMC_AES_192_ENCRYPT;
431 sctx->dec = KMC_AES_192_DECRYPT;
432 break;
433 case 32:
434 sctx->enc = KMC_AES_256_ENCRYPT;
435 sctx->dec = KMC_AES_256_DECRYPT;
436 break;
437 }
438
439 return aes_set_key(tfm, in_key, key_len);
440}
441
Herbert Xuf262f0f2013-11-05 19:36:27 +0800442static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
Herbert Xua9e62fa2006-08-21 21:39:24 +1000443 struct blkcipher_walk *walk)
444{
Herbert Xuf262f0f2013-11-05 19:36:27 +0800445 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000446 int ret = blkcipher_walk_virt(desc, walk);
447 unsigned int nbytes = walk->nbytes;
Herbert Xuf262f0f2013-11-05 19:36:27 +0800448 struct {
449 u8 iv[AES_BLOCK_SIZE];
450 u8 key[AES_MAX_KEY_SIZE];
451 } param;
Herbert Xua9e62fa2006-08-21 21:39:24 +1000452
453 if (!nbytes)
454 goto out;
455
Herbert Xuf262f0f2013-11-05 19:36:27 +0800456 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
457 memcpy(param.key, sctx->key, sctx->key_len);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000458 do {
459 /* only use complete blocks */
460 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
461 u8 *out = walk->dst.virt.addr;
462 u8 *in = walk->src.virt.addr;
463
Herbert Xuf262f0f2013-11-05 19:36:27 +0800464 ret = crypt_s390_kmc(func, &param, out, in, n);
Jan Glauber36eb2ca2012-10-26 15:06:12 +0200465 if (ret < 0 || ret != n)
466 return -EIO;
Herbert Xua9e62fa2006-08-21 21:39:24 +1000467
468 nbytes &= AES_BLOCK_SIZE - 1;
469 ret = blkcipher_walk_done(desc, walk, nbytes);
470 } while ((nbytes = walk->nbytes));
Herbert Xuf262f0f2013-11-05 19:36:27 +0800471 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000472
473out:
474 return ret;
475}
476
477static int cbc_aes_encrypt(struct blkcipher_desc *desc,
478 struct scatterlist *dst, struct scatterlist *src,
479 unsigned int nbytes)
480{
481 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
482 struct blkcipher_walk walk;
483
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100484 if (unlikely(need_fallback(sctx->key_len)))
485 return fallback_blk_enc(desc, dst, src, nbytes);
486
Herbert Xua9e62fa2006-08-21 21:39:24 +1000487 blkcipher_walk_init(&walk, dst, src, nbytes);
Herbert Xuf262f0f2013-11-05 19:36:27 +0800488 return cbc_aes_crypt(desc, sctx->enc, &walk);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000489}
490
491static int cbc_aes_decrypt(struct blkcipher_desc *desc,
492 struct scatterlist *dst, struct scatterlist *src,
493 unsigned int nbytes)
494{
495 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
496 struct blkcipher_walk walk;
497
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100498 if (unlikely(need_fallback(sctx->key_len)))
499 return fallback_blk_dec(desc, dst, src, nbytes);
500
Herbert Xua9e62fa2006-08-21 21:39:24 +1000501 blkcipher_walk_init(&walk, dst, src, nbytes);
Herbert Xuf262f0f2013-11-05 19:36:27 +0800502 return cbc_aes_crypt(desc, sctx->dec, &walk);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000503}
504
505static struct crypto_alg cbc_aes_alg = {
506 .cra_name = "cbc(aes)",
507 .cra_driver_name = "cbc-aes-s390",
508 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
Jan Glauberf67d1362007-05-04 18:47:47 +0200509 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
510 CRYPTO_ALG_NEED_FALLBACK,
Herbert Xua9e62fa2006-08-21 21:39:24 +1000511 .cra_blocksize = AES_BLOCK_SIZE,
512 .cra_ctxsize = sizeof(struct s390_aes_ctx),
513 .cra_type = &crypto_blkcipher_type,
514 .cra_module = THIS_MODULE,
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100515 .cra_init = fallback_init_blk,
516 .cra_exit = fallback_exit_blk,
Herbert Xua9e62fa2006-08-21 21:39:24 +1000517 .cra_u = {
518 .blkcipher = {
519 .min_keysize = AES_MIN_KEY_SIZE,
520 .max_keysize = AES_MAX_KEY_SIZE,
521 .ivsize = AES_BLOCK_SIZE,
522 .setkey = cbc_aes_set_key,
523 .encrypt = cbc_aes_encrypt,
524 .decrypt = cbc_aes_decrypt,
525 }
526 }
527};
528
Gerald Schaefer99d97222011-04-26 16:12:42 +1000529static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
530 unsigned int len)
531{
532 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
533 unsigned int ret;
534
535 xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
536 xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
537 CRYPTO_TFM_REQ_MASK);
538
539 ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
540 if (ret) {
541 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
542 tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
543 CRYPTO_TFM_RES_MASK);
544 }
545 return ret;
546}
547
548static int xts_fallback_decrypt(struct blkcipher_desc *desc,
549 struct scatterlist *dst, struct scatterlist *src,
550 unsigned int nbytes)
551{
552 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
553 struct crypto_blkcipher *tfm;
554 unsigned int ret;
555
556 tfm = desc->tfm;
557 desc->tfm = xts_ctx->fallback;
558
559 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
560
561 desc->tfm = tfm;
562 return ret;
563}
564
565static int xts_fallback_encrypt(struct blkcipher_desc *desc,
566 struct scatterlist *dst, struct scatterlist *src,
567 unsigned int nbytes)
568{
569 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
570 struct crypto_blkcipher *tfm;
571 unsigned int ret;
572
573 tfm = desc->tfm;
574 desc->tfm = xts_ctx->fallback;
575
576 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
577
578 desc->tfm = tfm;
579 return ret;
580}
581
582static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
583 unsigned int key_len)
584{
585 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
586 u32 *flags = &tfm->crt_flags;
587
588 switch (key_len) {
589 case 32:
590 xts_ctx->enc = KM_XTS_128_ENCRYPT;
591 xts_ctx->dec = KM_XTS_128_DECRYPT;
592 memcpy(xts_ctx->key + 16, in_key, 16);
Gerald Schaefer9dda2762013-11-19 17:12:47 +0100593 memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000594 break;
595 case 48:
596 xts_ctx->enc = 0;
597 xts_ctx->dec = 0;
598 xts_fallback_setkey(tfm, in_key, key_len);
599 break;
600 case 64:
601 xts_ctx->enc = KM_XTS_256_ENCRYPT;
602 xts_ctx->dec = KM_XTS_256_DECRYPT;
603 memcpy(xts_ctx->key, in_key, 32);
Gerald Schaefer9dda2762013-11-19 17:12:47 +0100604 memcpy(xts_ctx->pcc_key, in_key + 32, 32);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000605 break;
606 default:
607 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
608 return -EINVAL;
609 }
610 xts_ctx->key_len = key_len;
611 return 0;
612}
613
614static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
615 struct s390_xts_ctx *xts_ctx,
616 struct blkcipher_walk *walk)
617{
618 unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
619 int ret = blkcipher_walk_virt(desc, walk);
620 unsigned int nbytes = walk->nbytes;
621 unsigned int n;
622 u8 *in, *out;
Gerald Schaefer9dda2762013-11-19 17:12:47 +0100623 struct pcc_param pcc_param;
624 struct {
625 u8 key[32];
626 u8 init[16];
627 } xts_param;
Gerald Schaefer99d97222011-04-26 16:12:42 +1000628
629 if (!nbytes)
630 goto out;
631
Gerald Schaefer9dda2762013-11-19 17:12:47 +0100632 memset(pcc_param.block, 0, sizeof(pcc_param.block));
633 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
634 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
635 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
636 memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
637 ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
Jan Glauber36eb2ca2012-10-26 15:06:12 +0200638 if (ret < 0)
639 return -EIO;
Gerald Schaefer99d97222011-04-26 16:12:42 +1000640
Gerald Schaefer9dda2762013-11-19 17:12:47 +0100641 memcpy(xts_param.key, xts_ctx->key, 32);
642 memcpy(xts_param.init, pcc_param.xts, 16);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000643 do {
644 /* only use complete blocks */
645 n = nbytes & ~(AES_BLOCK_SIZE - 1);
646 out = walk->dst.virt.addr;
647 in = walk->src.virt.addr;
648
Gerald Schaefer9dda2762013-11-19 17:12:47 +0100649 ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
Jan Glauber36eb2ca2012-10-26 15:06:12 +0200650 if (ret < 0 || ret != n)
651 return -EIO;
Gerald Schaefer99d97222011-04-26 16:12:42 +1000652
653 nbytes &= AES_BLOCK_SIZE - 1;
654 ret = blkcipher_walk_done(desc, walk, nbytes);
655 } while ((nbytes = walk->nbytes));
656out:
657 return ret;
658}
659
660static int xts_aes_encrypt(struct blkcipher_desc *desc,
661 struct scatterlist *dst, struct scatterlist *src,
662 unsigned int nbytes)
663{
664 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
665 struct blkcipher_walk walk;
666
667 if (unlikely(xts_ctx->key_len == 48))
668 return xts_fallback_encrypt(desc, dst, src, nbytes);
669
670 blkcipher_walk_init(&walk, dst, src, nbytes);
671 return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
672}
673
674static int xts_aes_decrypt(struct blkcipher_desc *desc,
675 struct scatterlist *dst, struct scatterlist *src,
676 unsigned int nbytes)
677{
678 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
679 struct blkcipher_walk walk;
680
681 if (unlikely(xts_ctx->key_len == 48))
682 return xts_fallback_decrypt(desc, dst, src, nbytes);
683
684 blkcipher_walk_init(&walk, dst, src, nbytes);
685 return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
686}
687
688static int xts_fallback_init(struct crypto_tfm *tfm)
689{
690 const char *name = tfm->__crt_alg->cra_name;
691 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
692
693 xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
694 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
695
696 if (IS_ERR(xts_ctx->fallback)) {
697 pr_err("Allocating XTS fallback algorithm %s failed\n",
698 name);
699 return PTR_ERR(xts_ctx->fallback);
700 }
701 return 0;
702}
703
704static void xts_fallback_exit(struct crypto_tfm *tfm)
705{
706 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
707
708 crypto_free_blkcipher(xts_ctx->fallback);
709 xts_ctx->fallback = NULL;
710}
711
712static struct crypto_alg xts_aes_alg = {
713 .cra_name = "xts(aes)",
714 .cra_driver_name = "xts-aes-s390",
715 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
716 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
717 CRYPTO_ALG_NEED_FALLBACK,
718 .cra_blocksize = AES_BLOCK_SIZE,
719 .cra_ctxsize = sizeof(struct s390_xts_ctx),
720 .cra_type = &crypto_blkcipher_type,
721 .cra_module = THIS_MODULE,
Gerald Schaefer99d97222011-04-26 16:12:42 +1000722 .cra_init = xts_fallback_init,
723 .cra_exit = xts_fallback_exit,
724 .cra_u = {
725 .blkcipher = {
726 .min_keysize = 2 * AES_MIN_KEY_SIZE,
727 .max_keysize = 2 * AES_MAX_KEY_SIZE,
728 .ivsize = AES_BLOCK_SIZE,
729 .setkey = xts_aes_set_key,
730 .encrypt = xts_aes_encrypt,
731 .decrypt = xts_aes_decrypt,
732 }
733 }
734};
735
Ingo Tuchscherer4f57ba72013-10-15 11:24:07 +0200736static int xts_aes_alg_reg;
737
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000738static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
739 unsigned int key_len)
740{
741 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
742
743 switch (key_len) {
744 case 16:
745 sctx->enc = KMCTR_AES_128_ENCRYPT;
746 sctx->dec = KMCTR_AES_128_DECRYPT;
747 break;
748 case 24:
749 sctx->enc = KMCTR_AES_192_ENCRYPT;
750 sctx->dec = KMCTR_AES_192_DECRYPT;
751 break;
752 case 32:
753 sctx->enc = KMCTR_AES_256_ENCRYPT;
754 sctx->dec = KMCTR_AES_256_DECRYPT;
755 break;
756 }
757
758 return aes_set_key(tfm, in_key, key_len);
759}
760
761static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
762 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
763{
764 int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
765 unsigned int i, n, nbytes;
766 u8 buf[AES_BLOCK_SIZE];
767 u8 *out, *in;
768
769 if (!walk->nbytes)
770 return ret;
771
772 memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
773 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
774 out = walk->dst.virt.addr;
775 in = walk->src.virt.addr;
776 while (nbytes >= AES_BLOCK_SIZE) {
777 /* only use complete blocks, max. PAGE_SIZE */
778 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
779 nbytes & ~(AES_BLOCK_SIZE - 1);
780 for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
781 memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
782 AES_BLOCK_SIZE);
783 crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
784 }
785 ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
Jan Glauber36eb2ca2012-10-26 15:06:12 +0200786 if (ret < 0 || ret != n)
787 return -EIO;
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000788 if (n > AES_BLOCK_SIZE)
789 memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
790 AES_BLOCK_SIZE);
791 crypto_inc(ctrblk, AES_BLOCK_SIZE);
792 out += n;
793 in += n;
794 nbytes -= n;
795 }
796 ret = blkcipher_walk_done(desc, walk, nbytes);
797 }
798 /*
799 * final block may be < AES_BLOCK_SIZE, copy only nbytes
800 */
801 if (nbytes) {
802 out = walk->dst.virt.addr;
803 in = walk->src.virt.addr;
804 ret = crypt_s390_kmctr(func, sctx->key, buf, in,
805 AES_BLOCK_SIZE, ctrblk);
Jan Glauber36eb2ca2012-10-26 15:06:12 +0200806 if (ret < 0 || ret != AES_BLOCK_SIZE)
807 return -EIO;
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000808 memcpy(out, buf, nbytes);
809 crypto_inc(ctrblk, AES_BLOCK_SIZE);
810 ret = blkcipher_walk_done(desc, walk, 0);
811 }
812 memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
813 return ret;
814}
815
816static int ctr_aes_encrypt(struct blkcipher_desc *desc,
817 struct scatterlist *dst, struct scatterlist *src,
818 unsigned int nbytes)
819{
820 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
821 struct blkcipher_walk walk;
822
823 blkcipher_walk_init(&walk, dst, src, nbytes);
824 return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
825}
826
827static int ctr_aes_decrypt(struct blkcipher_desc *desc,
828 struct scatterlist *dst, struct scatterlist *src,
829 unsigned int nbytes)
830{
831 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
832 struct blkcipher_walk walk;
833
834 blkcipher_walk_init(&walk, dst, src, nbytes);
835 return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
836}
837
838static struct crypto_alg ctr_aes_alg = {
839 .cra_name = "ctr(aes)",
840 .cra_driver_name = "ctr-aes-s390",
841 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
842 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
843 .cra_blocksize = 1,
844 .cra_ctxsize = sizeof(struct s390_aes_ctx),
845 .cra_type = &crypto_blkcipher_type,
846 .cra_module = THIS_MODULE,
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000847 .cra_u = {
848 .blkcipher = {
849 .min_keysize = AES_MIN_KEY_SIZE,
850 .max_keysize = AES_MAX_KEY_SIZE,
851 .ivsize = AES_BLOCK_SIZE,
852 .setkey = ctr_aes_set_key,
853 .encrypt = ctr_aes_encrypt,
854 .decrypt = ctr_aes_decrypt,
855 }
856 }
857};
858
Ingo Tuchscherer4f57ba72013-10-15 11:24:07 +0200859static int ctr_aes_alg_reg;
860
Heiko Carstens9f7819c2008-04-17 07:46:17 +0200861static int __init aes_s390_init(void)
Jan Glauberbf754ae2006-01-06 00:19:18 -0800862{
863 int ret;
864
Jan Glauber1822bc92011-04-19 21:29:14 +0200865 if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
Jan Glauber86aa9fc2007-02-05 21:18:14 +0100866 keylen_flag |= AES_KEYLEN_128;
Jan Glauber1822bc92011-04-19 21:29:14 +0200867 if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
Jan Glauber86aa9fc2007-02-05 21:18:14 +0100868 keylen_flag |= AES_KEYLEN_192;
Jan Glauber1822bc92011-04-19 21:29:14 +0200869 if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
Jan Glauber86aa9fc2007-02-05 21:18:14 +0100870 keylen_flag |= AES_KEYLEN_256;
Jan Glauberbf754ae2006-01-06 00:19:18 -0800871
Jan Glauber86aa9fc2007-02-05 21:18:14 +0100872 if (!keylen_flag)
873 return -EOPNOTSUPP;
874
875 /* z9 109 and z9 BC/EC only support 128 bit key length */
Sebastian Siewiorb0c3e752007-12-01 12:47:37 +1100876 if (keylen_flag == AES_KEYLEN_128)
Jan Glauber39f09392008-12-25 13:39:37 +0100877 pr_info("AES hardware acceleration is only available for"
878 " 128-bit keys\n");
Jan Glauberbf754ae2006-01-06 00:19:18 -0800879
880 ret = crypto_register_alg(&aes_alg);
Jan Glauber86aa9fc2007-02-05 21:18:14 +0100881 if (ret)
Herbert Xua9e62fa2006-08-21 21:39:24 +1000882 goto aes_err;
Herbert Xua9e62fa2006-08-21 21:39:24 +1000883
884 ret = crypto_register_alg(&ecb_aes_alg);
Jan Glauber86aa9fc2007-02-05 21:18:14 +0100885 if (ret)
Herbert Xua9e62fa2006-08-21 21:39:24 +1000886 goto ecb_aes_err;
Herbert Xua9e62fa2006-08-21 21:39:24 +1000887
888 ret = crypto_register_alg(&cbc_aes_alg);
Jan Glauber86aa9fc2007-02-05 21:18:14 +0100889 if (ret)
Herbert Xua9e62fa2006-08-21 21:39:24 +1000890 goto cbc_aes_err;
Herbert Xua9e62fa2006-08-21 21:39:24 +1000891
Gerald Schaefer99d97222011-04-26 16:12:42 +1000892 if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
893 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
894 crypt_s390_func_available(KM_XTS_256_ENCRYPT,
895 CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
896 ret = crypto_register_alg(&xts_aes_alg);
897 if (ret)
898 goto xts_aes_err;
Ingo Tuchscherer4f57ba72013-10-15 11:24:07 +0200899 xts_aes_alg_reg = 1;
Gerald Schaefer99d97222011-04-26 16:12:42 +1000900 }
901
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000902 if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
903 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
904 crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
905 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
906 crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
907 CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
908 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
909 if (!ctrblk) {
910 ret = -ENOMEM;
911 goto ctr_aes_err;
912 }
913 ret = crypto_register_alg(&ctr_aes_alg);
914 if (ret) {
915 free_page((unsigned long) ctrblk);
916 goto ctr_aes_err;
917 }
Ingo Tuchscherer4f57ba72013-10-15 11:24:07 +0200918 ctr_aes_alg_reg = 1;
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000919 }
920
Herbert Xua9e62fa2006-08-21 21:39:24 +1000921out:
Jan Glauberbf754ae2006-01-06 00:19:18 -0800922 return ret;
Herbert Xua9e62fa2006-08-21 21:39:24 +1000923
Gerald Schaefer0200f3e2011-05-04 15:09:44 +1000924ctr_aes_err:
925 crypto_unregister_alg(&xts_aes_alg);
Gerald Schaefer99d97222011-04-26 16:12:42 +1000926xts_aes_err:
927 crypto_unregister_alg(&cbc_aes_alg);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000928cbc_aes_err:
929 crypto_unregister_alg(&ecb_aes_alg);
930ecb_aes_err:
931 crypto_unregister_alg(&aes_alg);
932aes_err:
933 goto out;
Jan Glauberbf754ae2006-01-06 00:19:18 -0800934}
935
Heiko Carstens9f7819c2008-04-17 07:46:17 +0200936static void __exit aes_s390_fini(void)
Jan Glauberbf754ae2006-01-06 00:19:18 -0800937{
Ingo Tuchscherer4f57ba72013-10-15 11:24:07 +0200938 if (ctr_aes_alg_reg) {
939 crypto_unregister_alg(&ctr_aes_alg);
940 free_page((unsigned long) ctrblk);
941 }
942 if (xts_aes_alg_reg)
943 crypto_unregister_alg(&xts_aes_alg);
Herbert Xua9e62fa2006-08-21 21:39:24 +1000944 crypto_unregister_alg(&cbc_aes_alg);
945 crypto_unregister_alg(&ecb_aes_alg);
Jan Glauberbf754ae2006-01-06 00:19:18 -0800946 crypto_unregister_alg(&aes_alg);
947}
948
Heiko Carstens9f7819c2008-04-17 07:46:17 +0200949module_init(aes_s390_init);
950module_exit(aes_s390_fini);
Jan Glauberbf754ae2006-01-06 00:19:18 -0800951
Herbert Xua760a662009-02-26 14:06:31 +0800952MODULE_ALIAS("aes-all");
Jan Glauberbf754ae2006-01-06 00:19:18 -0800953
954MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
955MODULE_LICENSE("GPL");