Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Asynchronous block chaining cipher operations. |
Richard Hartmann | c4ede64 | 2010-02-16 20:23:37 +0800 | [diff] [blame] | 3 | * |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 4 | * This is the asynchronous version of blkcipher.c indicating completion |
| 5 | * via a callback. |
| 6 | * |
| 7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify it |
| 10 | * under the terms of the GNU General Public License as published by the Free |
Richard Hartmann | c4ede64 | 2010-02-16 20:23:37 +0800 | [diff] [blame] | 11 | * Software Foundation; either version 2 of the License, or (at your option) |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 12 | * any later version. |
| 13 | * |
| 14 | */ |
| 15 | |
Herbert Xu | 378f4f5 | 2007-12-17 20:07:31 +0800 | [diff] [blame] | 16 | #include <crypto/internal/skcipher.h> |
Herbert Xu | 0b67fb6 | 2009-06-25 18:43:48 +0800 | [diff] [blame] | 17 | #include <linux/cpumask.h> |
Herbert Xu | 378f4f5 | 2007-12-17 20:07:31 +0800 | [diff] [blame] | 18 | #include <linux/err.h> |
Herbert Xu | 791b4d5 | 2007-08-23 16:23:01 +0800 | [diff] [blame] | 19 | #include <linux/kernel.h> |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 20 | #include <linux/rtnetlink.h> |
| 21 | #include <linux/sched.h> |
Herbert Xu | 791b4d5 | 2007-08-23 16:23:01 +0800 | [diff] [blame] | 22 | #include <linux/slab.h> |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 23 | #include <linux/seq_file.h> |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 24 | #include <linux/cryptouser.h> |
| 25 | #include <net/netlink.h> |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 26 | |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 27 | #include <crypto/scatterwalk.h> |
| 28 | |
Herbert Xu | 378f4f5 | 2007-12-17 20:07:31 +0800 | [diff] [blame] | 29 | #include "internal.h" |
| 30 | |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 31 | struct ablkcipher_buffer { |
| 32 | struct list_head entry; |
| 33 | struct scatter_walk dst; |
| 34 | unsigned int len; |
| 35 | void *data; |
| 36 | }; |
| 37 | |
| 38 | enum { |
| 39 | ABLKCIPHER_WALK_SLOW = 1 << 0, |
| 40 | }; |
| 41 | |
| 42 | static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p) |
| 43 | { |
| 44 | scatterwalk_copychunks(p->data, &p->dst, p->len, 1); |
| 45 | } |
| 46 | |
| 47 | void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) |
| 48 | { |
| 49 | struct ablkcipher_buffer *p, *tmp; |
| 50 | |
| 51 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { |
| 52 | ablkcipher_buffer_write(p); |
| 53 | list_del(&p->entry); |
| 54 | kfree(p); |
| 55 | } |
| 56 | } |
| 57 | EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete); |
| 58 | |
| 59 | static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, |
| 60 | struct ablkcipher_buffer *p) |
| 61 | { |
| 62 | p->dst = walk->out; |
| 63 | list_add_tail(&p->entry, &walk->buffers); |
| 64 | } |
| 65 | |
| 66 | /* Get a spot of the specified length that does not straddle a page. |
| 67 | * The caller needs to ensure that there is enough space for this operation. |
| 68 | */ |
| 69 | static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) |
| 70 | { |
| 71 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); |
Joshua I. James | a861afb | 2014-12-05 14:06:16 +0900 | [diff] [blame] | 72 | |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 73 | return max(start, end_page); |
| 74 | } |
| 75 | |
| 76 | static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, |
| 77 | unsigned int bsize) |
| 78 | { |
| 79 | unsigned int n = bsize; |
| 80 | |
| 81 | for (;;) { |
| 82 | unsigned int len_this_page = scatterwalk_pagelen(&walk->out); |
| 83 | |
| 84 | if (len_this_page > n) |
| 85 | len_this_page = n; |
| 86 | scatterwalk_advance(&walk->out, n); |
| 87 | if (n == len_this_page) |
| 88 | break; |
| 89 | n -= len_this_page; |
Cristian Stoica | 5be4d4c | 2015-01-20 10:06:16 +0200 | [diff] [blame] | 90 | scatterwalk_start(&walk->out, sg_next(walk->out.sg)); |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | return bsize; |
| 94 | } |
| 95 | |
| 96 | static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, |
| 97 | unsigned int n) |
| 98 | { |
| 99 | scatterwalk_advance(&walk->in, n); |
| 100 | scatterwalk_advance(&walk->out, n); |
| 101 | |
| 102 | return n; |
| 103 | } |
| 104 | |
| 105 | static int ablkcipher_walk_next(struct ablkcipher_request *req, |
| 106 | struct ablkcipher_walk *walk); |
| 107 | |
| 108 | int ablkcipher_walk_done(struct ablkcipher_request *req, |
| 109 | struct ablkcipher_walk *walk, int err) |
| 110 | { |
| 111 | struct crypto_tfm *tfm = req->base.tfm; |
| 112 | unsigned int nbytes = 0; |
| 113 | |
| 114 | if (likely(err >= 0)) { |
| 115 | unsigned int n = walk->nbytes - err; |
| 116 | |
| 117 | if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) |
| 118 | n = ablkcipher_done_fast(walk, n); |
| 119 | else if (WARN_ON(err)) { |
| 120 | err = -EINVAL; |
| 121 | goto err; |
| 122 | } else |
| 123 | n = ablkcipher_done_slow(walk, n); |
| 124 | |
| 125 | nbytes = walk->total - n; |
| 126 | err = 0; |
| 127 | } |
| 128 | |
| 129 | scatterwalk_done(&walk->in, 0, nbytes); |
| 130 | scatterwalk_done(&walk->out, 1, nbytes); |
| 131 | |
| 132 | err: |
| 133 | walk->total = nbytes; |
| 134 | walk->nbytes = nbytes; |
| 135 | |
| 136 | if (nbytes) { |
| 137 | crypto_yield(req->base.flags); |
| 138 | return ablkcipher_walk_next(req, walk); |
| 139 | } |
| 140 | |
| 141 | if (walk->iv != req->info) |
| 142 | memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); |
Davidlohr Bueso | 33c7c0f | 2011-01-29 15:09:43 +1100 | [diff] [blame] | 143 | kfree(walk->iv_buffer); |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 144 | |
| 145 | return err; |
| 146 | } |
| 147 | EXPORT_SYMBOL_GPL(ablkcipher_walk_done); |
| 148 | |
| 149 | static inline int ablkcipher_next_slow(struct ablkcipher_request *req, |
| 150 | struct ablkcipher_walk *walk, |
| 151 | unsigned int bsize, |
| 152 | unsigned int alignmask, |
| 153 | void **src_p, void **dst_p) |
| 154 | { |
| 155 | unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); |
| 156 | struct ablkcipher_buffer *p; |
| 157 | void *src, *dst, *base; |
| 158 | unsigned int n; |
| 159 | |
| 160 | n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1); |
| 161 | n += (aligned_bsize * 3 - (alignmask + 1) + |
| 162 | (alignmask & ~(crypto_tfm_ctx_alignment() - 1))); |
| 163 | |
| 164 | p = kmalloc(n, GFP_ATOMIC); |
| 165 | if (!p) |
Jiri Slaby | 2716fbf | 2010-06-23 20:01:45 +1000 | [diff] [blame] | 166 | return ablkcipher_walk_done(req, walk, -ENOMEM); |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 167 | |
| 168 | base = p + 1; |
| 169 | |
| 170 | dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1); |
| 171 | src = dst = ablkcipher_get_spot(dst, bsize); |
| 172 | |
| 173 | p->len = bsize; |
| 174 | p->data = dst; |
| 175 | |
| 176 | scatterwalk_copychunks(src, &walk->in, bsize, 0); |
| 177 | |
| 178 | ablkcipher_queue_write(walk, p); |
| 179 | |
| 180 | walk->nbytes = bsize; |
| 181 | walk->flags |= ABLKCIPHER_WALK_SLOW; |
| 182 | |
| 183 | *src_p = src; |
| 184 | *dst_p = dst; |
| 185 | |
| 186 | return 0; |
| 187 | } |
| 188 | |
| 189 | static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk, |
| 190 | struct crypto_tfm *tfm, |
| 191 | unsigned int alignmask) |
| 192 | { |
| 193 | unsigned bs = walk->blocksize; |
| 194 | unsigned int ivsize = tfm->crt_ablkcipher.ivsize; |
| 195 | unsigned aligned_bs = ALIGN(bs, alignmask + 1); |
| 196 | unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - |
| 197 | (alignmask + 1); |
| 198 | u8 *iv; |
| 199 | |
| 200 | size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); |
| 201 | walk->iv_buffer = kmalloc(size, GFP_ATOMIC); |
| 202 | if (!walk->iv_buffer) |
| 203 | return -ENOMEM; |
| 204 | |
| 205 | iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1); |
| 206 | iv = ablkcipher_get_spot(iv, bs) + aligned_bs; |
| 207 | iv = ablkcipher_get_spot(iv, bs) + aligned_bs; |
| 208 | iv = ablkcipher_get_spot(iv, ivsize); |
| 209 | |
| 210 | walk->iv = memcpy(iv, walk->iv, ivsize); |
| 211 | return 0; |
| 212 | } |
| 213 | |
| 214 | static inline int ablkcipher_next_fast(struct ablkcipher_request *req, |
| 215 | struct ablkcipher_walk *walk) |
| 216 | { |
| 217 | walk->src.page = scatterwalk_page(&walk->in); |
| 218 | walk->src.offset = offset_in_page(walk->in.offset); |
| 219 | walk->dst.page = scatterwalk_page(&walk->out); |
| 220 | walk->dst.offset = offset_in_page(walk->out.offset); |
| 221 | |
| 222 | return 0; |
| 223 | } |
| 224 | |
| 225 | static int ablkcipher_walk_next(struct ablkcipher_request *req, |
| 226 | struct ablkcipher_walk *walk) |
| 227 | { |
| 228 | struct crypto_tfm *tfm = req->base.tfm; |
| 229 | unsigned int alignmask, bsize, n; |
| 230 | void *src, *dst; |
| 231 | int err; |
| 232 | |
| 233 | alignmask = crypto_tfm_alg_alignmask(tfm); |
| 234 | n = walk->total; |
| 235 | if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) { |
| 236 | req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; |
| 237 | return ablkcipher_walk_done(req, walk, -EINVAL); |
| 238 | } |
| 239 | |
| 240 | walk->flags &= ~ABLKCIPHER_WALK_SLOW; |
| 241 | src = dst = NULL; |
| 242 | |
| 243 | bsize = min(walk->blocksize, n); |
| 244 | n = scatterwalk_clamp(&walk->in, n); |
| 245 | n = scatterwalk_clamp(&walk->out, n); |
| 246 | |
| 247 | if (n < bsize || |
| 248 | !scatterwalk_aligned(&walk->in, alignmask) || |
| 249 | !scatterwalk_aligned(&walk->out, alignmask)) { |
| 250 | err = ablkcipher_next_slow(req, walk, bsize, alignmask, |
| 251 | &src, &dst); |
| 252 | goto set_phys_lowmem; |
| 253 | } |
| 254 | |
| 255 | walk->nbytes = n; |
| 256 | |
| 257 | return ablkcipher_next_fast(req, walk); |
| 258 | |
| 259 | set_phys_lowmem: |
| 260 | if (err >= 0) { |
| 261 | walk->src.page = virt_to_page(src); |
| 262 | walk->dst.page = virt_to_page(dst); |
| 263 | walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); |
| 264 | walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); |
| 265 | } |
| 266 | |
| 267 | return err; |
| 268 | } |
| 269 | |
| 270 | static int ablkcipher_walk_first(struct ablkcipher_request *req, |
| 271 | struct ablkcipher_walk *walk) |
| 272 | { |
| 273 | struct crypto_tfm *tfm = req->base.tfm; |
| 274 | unsigned int alignmask; |
| 275 | |
| 276 | alignmask = crypto_tfm_alg_alignmask(tfm); |
| 277 | if (WARN_ON_ONCE(in_irq())) |
| 278 | return -EDEADLK; |
| 279 | |
Jason A. Donenfeld | 70d906b | 2015-12-06 02:51:37 +0100 | [diff] [blame^] | 280 | walk->iv = req->info; |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 281 | walk->nbytes = walk->total; |
| 282 | if (unlikely(!walk->total)) |
| 283 | return 0; |
| 284 | |
| 285 | walk->iv_buffer = NULL; |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 286 | if (unlikely(((unsigned long)walk->iv & alignmask))) { |
| 287 | int err = ablkcipher_copy_iv(walk, tfm, alignmask); |
Joshua I. James | a861afb | 2014-12-05 14:06:16 +0900 | [diff] [blame] | 288 | |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 289 | if (err) |
| 290 | return err; |
| 291 | } |
| 292 | |
| 293 | scatterwalk_start(&walk->in, walk->in.sg); |
| 294 | scatterwalk_start(&walk->out, walk->out.sg); |
| 295 | |
| 296 | return ablkcipher_walk_next(req, walk); |
| 297 | } |
| 298 | |
| 299 | int ablkcipher_walk_phys(struct ablkcipher_request *req, |
| 300 | struct ablkcipher_walk *walk) |
| 301 | { |
| 302 | walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm); |
| 303 | return ablkcipher_walk_first(req, walk); |
| 304 | } |
| 305 | EXPORT_SYMBOL_GPL(ablkcipher_walk_phys); |
| 306 | |
Herbert Xu | 791b4d5 | 2007-08-23 16:23:01 +0800 | [diff] [blame] | 307 | static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, |
| 308 | unsigned int keylen) |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 309 | { |
| 310 | struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); |
| 311 | unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); |
| 312 | int ret; |
| 313 | u8 *buffer, *alignbuffer; |
| 314 | unsigned long absize; |
| 315 | |
| 316 | absize = keylen + alignmask; |
| 317 | buffer = kmalloc(absize, GFP_ATOMIC); |
| 318 | if (!buffer) |
| 319 | return -ENOMEM; |
| 320 | |
| 321 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
| 322 | memcpy(alignbuffer, key, keylen); |
| 323 | ret = cipher->setkey(tfm, alignbuffer, keylen); |
Sebastian Siewior | 0681717 | 2007-08-03 20:33:47 +0800 | [diff] [blame] | 324 | memset(alignbuffer, 0, keylen); |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 325 | kfree(buffer); |
| 326 | return ret; |
| 327 | } |
| 328 | |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 329 | static int setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
| 330 | unsigned int keylen) |
| 331 | { |
| 332 | struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 333 | unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 334 | |
| 335 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { |
| 336 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 337 | return -EINVAL; |
| 338 | } |
| 339 | |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 340 | if ((unsigned long)key & alignmask) |
| 341 | return setkey_unaligned(tfm, key, keylen); |
| 342 | |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 343 | return cipher->setkey(tfm, key, keylen); |
| 344 | } |
| 345 | |
| 346 | static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type, |
| 347 | u32 mask) |
| 348 | { |
| 349 | return alg->cra_ctxsize; |
| 350 | } |
| 351 | |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 352 | int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req) |
| 353 | { |
| 354 | return crypto_ablkcipher_encrypt(&req->creq); |
| 355 | } |
| 356 | |
| 357 | int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req) |
| 358 | { |
| 359 | return crypto_ablkcipher_decrypt(&req->creq); |
| 360 | } |
| 361 | |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 362 | static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, |
| 363 | u32 mask) |
| 364 | { |
| 365 | struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; |
| 366 | struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; |
| 367 | |
| 368 | if (alg->ivsize > PAGE_SIZE / 8) |
| 369 | return -EINVAL; |
| 370 | |
| 371 | crt->setkey = setkey; |
| 372 | crt->encrypt = alg->encrypt; |
| 373 | crt->decrypt = alg->decrypt; |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 374 | if (!alg->ivsize) { |
| 375 | crt->givencrypt = skcipher_null_givencrypt; |
| 376 | crt->givdecrypt = skcipher_null_givdecrypt; |
| 377 | } |
Herbert Xu | ecfc432 | 2007-12-05 21:08:36 +1100 | [diff] [blame] | 378 | crt->base = __crypto_ablkcipher_cast(tfm); |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 379 | crt->ivsize = alg->ivsize; |
| 380 | |
| 381 | return 0; |
| 382 | } |
| 383 | |
Herbert Xu | 3acc847 | 2011-11-03 23:46:07 +1100 | [diff] [blame] | 384 | #ifdef CONFIG_NET |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 385 | static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) |
| 386 | { |
| 387 | struct crypto_report_blkcipher rblkcipher; |
| 388 | |
Mathias Krause | 9a5467b | 2013-02-05 18:19:13 +0100 | [diff] [blame] | 389 | strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type)); |
| 390 | strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>", |
| 391 | sizeof(rblkcipher.geniv)); |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 392 | |
| 393 | rblkcipher.blocksize = alg->cra_blocksize; |
| 394 | rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; |
| 395 | rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; |
| 396 | rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; |
| 397 | |
David S. Miller | 6662df3 | 2012-04-01 20:19:05 -0400 | [diff] [blame] | 398 | if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, |
| 399 | sizeof(struct crypto_report_blkcipher), &rblkcipher)) |
| 400 | goto nla_put_failure; |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 401 | return 0; |
| 402 | |
| 403 | nla_put_failure: |
| 404 | return -EMSGSIZE; |
| 405 | } |
Herbert Xu | 3acc847 | 2011-11-03 23:46:07 +1100 | [diff] [blame] | 406 | #else |
| 407 | static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) |
| 408 | { |
| 409 | return -ENOSYS; |
| 410 | } |
| 411 | #endif |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 412 | |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 413 | static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) |
| 414 | __attribute__ ((unused)); |
| 415 | static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) |
| 416 | { |
| 417 | struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; |
| 418 | |
| 419 | seq_printf(m, "type : ablkcipher\n"); |
Herbert Xu | 189ed66 | 2007-12-14 22:29:37 +0800 | [diff] [blame] | 420 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
| 421 | "yes" : "no"); |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 422 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
| 423 | seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); |
| 424 | seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); |
| 425 | seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); |
Herbert Xu | 23508e1 | 2007-11-27 21:33:24 +0800 | [diff] [blame] | 426 | seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>"); |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 427 | } |
| 428 | |
| 429 | const struct crypto_type crypto_ablkcipher_type = { |
| 430 | .ctxsize = crypto_ablkcipher_ctxsize, |
| 431 | .init = crypto_init_ablkcipher_ops, |
| 432 | #ifdef CONFIG_PROC_FS |
| 433 | .show = crypto_ablkcipher_show, |
| 434 | #endif |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 435 | .report = crypto_ablkcipher_report, |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 436 | }; |
| 437 | EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); |
| 438 | |
Herbert Xu | 61da88e | 2007-12-17 21:51:27 +0800 | [diff] [blame] | 439 | static int no_givdecrypt(struct skcipher_givcrypt_request *req) |
| 440 | { |
| 441 | return -ENOSYS; |
| 442 | } |
| 443 | |
| 444 | static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, |
| 445 | u32 mask) |
| 446 | { |
| 447 | struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; |
| 448 | struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; |
| 449 | |
| 450 | if (alg->ivsize > PAGE_SIZE / 8) |
| 451 | return -EINVAL; |
| 452 | |
Herbert Xu | ecfc432 | 2007-12-05 21:08:36 +1100 | [diff] [blame] | 453 | crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ? |
| 454 | alg->setkey : setkey; |
Herbert Xu | 61da88e | 2007-12-17 21:51:27 +0800 | [diff] [blame] | 455 | crt->encrypt = alg->encrypt; |
| 456 | crt->decrypt = alg->decrypt; |
Herbert Xu | 21dbd96 | 2015-06-21 19:11:41 +0800 | [diff] [blame] | 457 | crt->givencrypt = alg->givencrypt ?: no_givdecrypt; |
Herbert Xu | 61da88e | 2007-12-17 21:51:27 +0800 | [diff] [blame] | 458 | crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt; |
Herbert Xu | ecfc432 | 2007-12-05 21:08:36 +1100 | [diff] [blame] | 459 | crt->base = __crypto_ablkcipher_cast(tfm); |
Herbert Xu | 61da88e | 2007-12-17 21:51:27 +0800 | [diff] [blame] | 460 | crt->ivsize = alg->ivsize; |
| 461 | |
| 462 | return 0; |
| 463 | } |
| 464 | |
Herbert Xu | 3acc847 | 2011-11-03 23:46:07 +1100 | [diff] [blame] | 465 | #ifdef CONFIG_NET |
Steffen Klassert | 3e29c10 | 2011-09-27 07:43:24 +0200 | [diff] [blame] | 466 | static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) |
| 467 | { |
| 468 | struct crypto_report_blkcipher rblkcipher; |
| 469 | |
Mathias Krause | 9a5467b | 2013-02-05 18:19:13 +0100 | [diff] [blame] | 470 | strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type)); |
| 471 | strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>", |
| 472 | sizeof(rblkcipher.geniv)); |
Steffen Klassert | 3e29c10 | 2011-09-27 07:43:24 +0200 | [diff] [blame] | 473 | |
| 474 | rblkcipher.blocksize = alg->cra_blocksize; |
| 475 | rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; |
| 476 | rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; |
| 477 | rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; |
| 478 | |
David S. Miller | 6662df3 | 2012-04-01 20:19:05 -0400 | [diff] [blame] | 479 | if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, |
| 480 | sizeof(struct crypto_report_blkcipher), &rblkcipher)) |
| 481 | goto nla_put_failure; |
Steffen Klassert | 3e29c10 | 2011-09-27 07:43:24 +0200 | [diff] [blame] | 482 | return 0; |
| 483 | |
| 484 | nla_put_failure: |
| 485 | return -EMSGSIZE; |
| 486 | } |
Herbert Xu | 3acc847 | 2011-11-03 23:46:07 +1100 | [diff] [blame] | 487 | #else |
| 488 | static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) |
| 489 | { |
| 490 | return -ENOSYS; |
| 491 | } |
| 492 | #endif |
Steffen Klassert | 3e29c10 | 2011-09-27 07:43:24 +0200 | [diff] [blame] | 493 | |
Herbert Xu | 61da88e | 2007-12-17 21:51:27 +0800 | [diff] [blame] | 494 | static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) |
| 495 | __attribute__ ((unused)); |
| 496 | static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) |
| 497 | { |
| 498 | struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; |
| 499 | |
| 500 | seq_printf(m, "type : givcipher\n"); |
Herbert Xu | 189ed66 | 2007-12-14 22:29:37 +0800 | [diff] [blame] | 501 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
| 502 | "yes" : "no"); |
Herbert Xu | 61da88e | 2007-12-17 21:51:27 +0800 | [diff] [blame] | 503 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
| 504 | seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); |
| 505 | seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); |
| 506 | seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); |
Herbert Xu | 23508e1 | 2007-11-27 21:33:24 +0800 | [diff] [blame] | 507 | seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>"); |
Herbert Xu | 61da88e | 2007-12-17 21:51:27 +0800 | [diff] [blame] | 508 | } |
| 509 | |
| 510 | const struct crypto_type crypto_givcipher_type = { |
| 511 | .ctxsize = crypto_ablkcipher_ctxsize, |
| 512 | .init = crypto_init_givcipher_ops, |
| 513 | #ifdef CONFIG_PROC_FS |
| 514 | .show = crypto_givcipher_show, |
| 515 | #endif |
Steffen Klassert | 3e29c10 | 2011-09-27 07:43:24 +0200 | [diff] [blame] | 516 | .report = crypto_givcipher_report, |
Herbert Xu | 61da88e | 2007-12-17 21:51:27 +0800 | [diff] [blame] | 517 | }; |
| 518 | EXPORT_SYMBOL_GPL(crypto_givcipher_type); |
| 519 | |
Herbert Xu | ecfc432 | 2007-12-05 21:08:36 +1100 | [diff] [blame] | 520 | const char *crypto_default_geniv(const struct crypto_alg *alg) |
| 521 | { |
Herbert Xu | 63b5ac2 | 2009-08-14 22:55:35 +1000 | [diff] [blame] | 522 | if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == |
| 523 | CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : |
| 524 | alg->cra_ablkcipher.ivsize) != |
| 525 | alg->cra_blocksize) |
| 526 | return "chainiv"; |
| 527 | |
Herbert Xu | f3d53ed | 2013-10-30 09:51:45 +0800 | [diff] [blame] | 528 | return "eseqiv"; |
Herbert Xu | ecfc432 | 2007-12-05 21:08:36 +1100 | [diff] [blame] | 529 | } |
| 530 | |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 531 | static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) |
| 532 | { |
| 533 | struct rtattr *tb[3]; |
| 534 | struct { |
| 535 | struct rtattr attr; |
| 536 | struct crypto_attr_type data; |
| 537 | } ptype; |
| 538 | struct { |
| 539 | struct rtattr attr; |
| 540 | struct crypto_attr_alg data; |
| 541 | } palg; |
| 542 | struct crypto_template *tmpl; |
| 543 | struct crypto_instance *inst; |
| 544 | struct crypto_alg *larval; |
| 545 | const char *geniv; |
| 546 | int err; |
| 547 | |
| 548 | larval = crypto_larval_lookup(alg->cra_driver_name, |
Herbert Xu | 435578a | 2009-06-25 14:46:31 +0800 | [diff] [blame] | 549 | (type & ~CRYPTO_ALG_TYPE_MASK) | |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 550 | CRYPTO_ALG_TYPE_GIVCIPHER, |
Herbert Xu | 435578a | 2009-06-25 14:46:31 +0800 | [diff] [blame] | 551 | mask | CRYPTO_ALG_TYPE_MASK); |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 552 | err = PTR_ERR(larval); |
| 553 | if (IS_ERR(larval)) |
| 554 | goto out; |
| 555 | |
| 556 | err = -EAGAIN; |
| 557 | if (!crypto_is_larval(larval)) |
| 558 | goto drop_larval; |
| 559 | |
| 560 | ptype.attr.rta_len = sizeof(ptype); |
| 561 | ptype.attr.rta_type = CRYPTOA_TYPE; |
| 562 | ptype.data.type = type | CRYPTO_ALG_GENIV; |
| 563 | /* GENIV tells the template that we're making a default geniv. */ |
| 564 | ptype.data.mask = mask | CRYPTO_ALG_GENIV; |
| 565 | tb[0] = &ptype.attr; |
| 566 | |
| 567 | palg.attr.rta_len = sizeof(palg); |
| 568 | palg.attr.rta_type = CRYPTOA_ALG; |
| 569 | /* Must use the exact name to locate ourselves. */ |
| 570 | memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME); |
| 571 | tb[1] = &palg.attr; |
| 572 | |
| 573 | tb[2] = NULL; |
| 574 | |
| 575 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == |
| 576 | CRYPTO_ALG_TYPE_BLKCIPHER) |
| 577 | geniv = alg->cra_blkcipher.geniv; |
| 578 | else |
| 579 | geniv = alg->cra_ablkcipher.geniv; |
| 580 | |
| 581 | if (!geniv) |
| 582 | geniv = crypto_default_geniv(alg); |
| 583 | |
| 584 | tmpl = crypto_lookup_template(geniv); |
| 585 | err = -ENOENT; |
| 586 | if (!tmpl) |
| 587 | goto kill_larval; |
| 588 | |
Herbert Xu | 56e3437 | 2015-05-23 15:41:48 +0800 | [diff] [blame] | 589 | if (tmpl->create) { |
| 590 | err = tmpl->create(tmpl, tb); |
| 591 | if (err) |
| 592 | goto put_tmpl; |
| 593 | goto ok; |
| 594 | } |
| 595 | |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 596 | inst = tmpl->alloc(tb); |
| 597 | err = PTR_ERR(inst); |
| 598 | if (IS_ERR(inst)) |
| 599 | goto put_tmpl; |
| 600 | |
Joshua I. James | a861afb | 2014-12-05 14:06:16 +0900 | [diff] [blame] | 601 | err = crypto_register_instance(tmpl, inst); |
| 602 | if (err) { |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 603 | tmpl->free(inst); |
| 604 | goto put_tmpl; |
| 605 | } |
| 606 | |
Herbert Xu | 56e3437 | 2015-05-23 15:41:48 +0800 | [diff] [blame] | 607 | ok: |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 608 | /* Redo the lookup to use the instance we just registered. */ |
| 609 | err = -EAGAIN; |
| 610 | |
| 611 | put_tmpl: |
| 612 | crypto_tmpl_put(tmpl); |
| 613 | kill_larval: |
| 614 | crypto_larval_kill(larval); |
| 615 | drop_larval: |
| 616 | crypto_mod_put(larval); |
| 617 | out: |
| 618 | crypto_mod_put(alg); |
| 619 | return err; |
| 620 | } |
| 621 | |
Steffen Klassert | 1e12299 | 2012-03-29 09:03:47 +0200 | [diff] [blame] | 622 | struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask) |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 623 | { |
| 624 | struct crypto_alg *alg; |
| 625 | |
| 626 | alg = crypto_alg_mod_lookup(name, type, mask); |
| 627 | if (IS_ERR(alg)) |
| 628 | return alg; |
| 629 | |
| 630 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == |
| 631 | CRYPTO_ALG_TYPE_GIVCIPHER) |
| 632 | return alg; |
| 633 | |
| 634 | if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == |
| 635 | CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : |
| 636 | alg->cra_ablkcipher.ivsize)) |
| 637 | return alg; |
| 638 | |
Herbert Xu | b170a13 | 2009-02-18 20:33:55 +0800 | [diff] [blame] | 639 | crypto_mod_put(alg); |
| 640 | alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED, |
| 641 | mask & ~CRYPTO_ALG_TESTED); |
| 642 | if (IS_ERR(alg)) |
| 643 | return alg; |
| 644 | |
| 645 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == |
| 646 | CRYPTO_ALG_TYPE_GIVCIPHER) { |
Herbert Xu | 2673953 | 2015-04-23 16:34:47 +0800 | [diff] [blame] | 647 | if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) { |
Herbert Xu | b170a13 | 2009-02-18 20:33:55 +0800 | [diff] [blame] | 648 | crypto_mod_put(alg); |
| 649 | alg = ERR_PTR(-ENOENT); |
| 650 | } |
| 651 | return alg; |
| 652 | } |
| 653 | |
| 654 | BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == |
| 655 | CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : |
| 656 | alg->cra_ablkcipher.ivsize)); |
| 657 | |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 658 | return ERR_PTR(crypto_givcipher_default(alg, type, mask)); |
| 659 | } |
Steffen Klassert | 1e12299 | 2012-03-29 09:03:47 +0200 | [diff] [blame] | 660 | EXPORT_SYMBOL_GPL(crypto_lookup_skcipher); |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 661 | |
Herbert Xu | 378f4f5 | 2007-12-17 20:07:31 +0800 | [diff] [blame] | 662 | int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, |
| 663 | u32 type, u32 mask) |
| 664 | { |
| 665 | struct crypto_alg *alg; |
| 666 | int err; |
| 667 | |
| 668 | type = crypto_skcipher_type(type); |
| 669 | mask = crypto_skcipher_mask(mask); |
| 670 | |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 671 | alg = crypto_lookup_skcipher(name, type, mask); |
Herbert Xu | 378f4f5 | 2007-12-17 20:07:31 +0800 | [diff] [blame] | 672 | if (IS_ERR(alg)) |
| 673 | return PTR_ERR(alg); |
| 674 | |
| 675 | err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); |
| 676 | crypto_mod_put(alg); |
| 677 | return err; |
| 678 | } |
| 679 | EXPORT_SYMBOL_GPL(crypto_grab_skcipher); |
| 680 | |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 681 | struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, |
| 682 | u32 type, u32 mask) |
| 683 | { |
| 684 | struct crypto_tfm *tfm; |
| 685 | int err; |
| 686 | |
| 687 | type = crypto_skcipher_type(type); |
| 688 | mask = crypto_skcipher_mask(mask); |
| 689 | |
| 690 | for (;;) { |
| 691 | struct crypto_alg *alg; |
| 692 | |
| 693 | alg = crypto_lookup_skcipher(alg_name, type, mask); |
| 694 | if (IS_ERR(alg)) { |
| 695 | err = PTR_ERR(alg); |
| 696 | goto err; |
| 697 | } |
| 698 | |
| 699 | tfm = __crypto_alloc_tfm(alg, type, mask); |
| 700 | if (!IS_ERR(tfm)) |
| 701 | return __crypto_ablkcipher_cast(tfm); |
| 702 | |
| 703 | crypto_mod_put(alg); |
| 704 | err = PTR_ERR(tfm); |
| 705 | |
| 706 | err: |
| 707 | if (err != -EAGAIN) |
| 708 | break; |
Herbert Xu | 3fc89ad | 2015-10-19 18:23:57 +0800 | [diff] [blame] | 709 | if (fatal_signal_pending(current)) { |
Herbert Xu | b9c55aa | 2007-12-04 12:46:48 +1100 | [diff] [blame] | 710 | err = -EINTR; |
| 711 | break; |
| 712 | } |
| 713 | } |
| 714 | |
| 715 | return ERR_PTR(err); |
| 716 | } |
| 717 | EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); |