blob: 3bc0e76eaaefe740154dab41d3f09ed75194ec9a [file] [log] [blame]
Herbert Xub5b7f082007-04-16 20:48:54 +10001/*
2 * Asynchronous block chaining cipher operations.
Richard Hartmannc4ede642010-02-16 20:23:37 +08003 *
Herbert Xub5b7f082007-04-16 20:48:54 +10004 * This is the asynchronous version of blkcipher.c indicating completion
5 * via a callback.
6 *
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
Richard Hartmannc4ede642010-02-16 20:23:37 +080011 * Software Foundation; either version 2 of the License, or (at your option)
Herbert Xub5b7f082007-04-16 20:48:54 +100012 * any later version.
13 *
14 */
15
Herbert Xu378f4f52007-12-17 20:07:31 +080016#include <crypto/internal/skcipher.h>
17#include <linux/err.h>
Herbert Xu791b4d52007-08-23 16:23:01 +080018#include <linux/kernel.h>
Herbert Xu791b4d52007-08-23 16:23:01 +080019#include <linux/slab.h>
Herbert Xub5b7f082007-04-16 20:48:54 +100020#include <linux/seq_file.h>
Steffen Klassert29ffc872011-09-27 07:42:32 +020021#include <linux/cryptouser.h>
22#include <net/netlink.h>
Herbert Xub5b7f082007-04-16 20:48:54 +100023
David S. Millerbf060992010-05-19 14:13:07 +100024#include <crypto/scatterwalk.h>
25
Herbert Xu378f4f52007-12-17 20:07:31 +080026#include "internal.h"
27
David S. Millerbf060992010-05-19 14:13:07 +100028struct ablkcipher_buffer {
29 struct list_head entry;
30 struct scatter_walk dst;
31 unsigned int len;
32 void *data;
33};
34
35enum {
36 ABLKCIPHER_WALK_SLOW = 1 << 0,
37};
38
39static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
40{
41 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
42}
43
44void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
45{
46 struct ablkcipher_buffer *p, *tmp;
47
48 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
49 ablkcipher_buffer_write(p);
50 list_del(&p->entry);
51 kfree(p);
52 }
53}
54EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
55
56static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
57 struct ablkcipher_buffer *p)
58{
59 p->dst = walk->out;
60 list_add_tail(&p->entry, &walk->buffers);
61}
62
63/* Get a spot of the specified length that does not straddle a page.
64 * The caller needs to ensure that there is enough space for this operation.
65 */
66static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
67{
68 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
Joshua I. Jamesa861afb2014-12-05 14:06:16 +090069
David S. Millerbf060992010-05-19 14:13:07 +100070 return max(start, end_page);
71}
72
Eric Biggersb7c2b692018-07-23 10:54:58 -070073static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
74 unsigned int n)
David S. Millerbf060992010-05-19 14:13:07 +100075{
David S. Millerbf060992010-05-19 14:13:07 +100076 for (;;) {
77 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
78
79 if (len_this_page > n)
80 len_this_page = n;
81 scatterwalk_advance(&walk->out, n);
82 if (n == len_this_page)
83 break;
84 n -= len_this_page;
Cristian Stoica5be4d4c2015-01-20 10:06:16 +020085 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
David S. Millerbf060992010-05-19 14:13:07 +100086 }
David S. Millerbf060992010-05-19 14:13:07 +100087}
88
Eric Biggersb7c2b692018-07-23 10:54:58 -070089static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
90 unsigned int n)
David S. Millerbf060992010-05-19 14:13:07 +100091{
92 scatterwalk_advance(&walk->in, n);
93 scatterwalk_advance(&walk->out, n);
David S. Millerbf060992010-05-19 14:13:07 +100094}
95
96static int ablkcipher_walk_next(struct ablkcipher_request *req,
97 struct ablkcipher_walk *walk);
98
99int ablkcipher_walk_done(struct ablkcipher_request *req,
100 struct ablkcipher_walk *walk, int err)
101{
102 struct crypto_tfm *tfm = req->base.tfm;
Eric Biggersb7c2b692018-07-23 10:54:58 -0700103 unsigned int n; /* bytes processed */
104 bool more;
David S. Millerbf060992010-05-19 14:13:07 +1000105
Eric Biggersb7c2b692018-07-23 10:54:58 -0700106 if (unlikely(err < 0))
107 goto finish;
David S. Millerbf060992010-05-19 14:13:07 +1000108
Eric Biggersb7c2b692018-07-23 10:54:58 -0700109 n = walk->nbytes - err;
110 walk->total -= n;
111 more = (walk->total != 0);
112
113 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
114 ablkcipher_done_fast(walk, n);
115 } else {
116 if (WARN_ON(err)) {
117 /* unexpected case; didn't process all bytes */
David S. Millerbf060992010-05-19 14:13:07 +1000118 err = -EINVAL;
Eric Biggersb7c2b692018-07-23 10:54:58 -0700119 goto finish;
120 }
121 ablkcipher_done_slow(walk, n);
David S. Millerbf060992010-05-19 14:13:07 +1000122 }
123
Eric Biggersb7c2b692018-07-23 10:54:58 -0700124 scatterwalk_done(&walk->in, 0, more);
125 scatterwalk_done(&walk->out, 1, more);
David S. Millerbf060992010-05-19 14:13:07 +1000126
Eric Biggersb7c2b692018-07-23 10:54:58 -0700127 if (more) {
David S. Millerbf060992010-05-19 14:13:07 +1000128 crypto_yield(req->base.flags);
129 return ablkcipher_walk_next(req, walk);
130 }
Eric Biggersb7c2b692018-07-23 10:54:58 -0700131 err = 0;
132finish:
133 walk->nbytes = 0;
David S. Millerbf060992010-05-19 14:13:07 +1000134 if (walk->iv != req->info)
135 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
Davidlohr Bueso33c7c0f2011-01-29 15:09:43 +1100136 kfree(walk->iv_buffer);
David S. Millerbf060992010-05-19 14:13:07 +1000137 return err;
138}
139EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
140
141static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
142 struct ablkcipher_walk *walk,
143 unsigned int bsize,
144 unsigned int alignmask,
145 void **src_p, void **dst_p)
146{
147 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
148 struct ablkcipher_buffer *p;
149 void *src, *dst, *base;
150 unsigned int n;
151
152 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
153 n += (aligned_bsize * 3 - (alignmask + 1) +
154 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
155
156 p = kmalloc(n, GFP_ATOMIC);
157 if (!p)
Jiri Slaby2716fbf2010-06-23 20:01:45 +1000158 return ablkcipher_walk_done(req, walk, -ENOMEM);
David S. Millerbf060992010-05-19 14:13:07 +1000159
160 base = p + 1;
161
162 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
163 src = dst = ablkcipher_get_spot(dst, bsize);
164
165 p->len = bsize;
166 p->data = dst;
167
168 scatterwalk_copychunks(src, &walk->in, bsize, 0);
169
170 ablkcipher_queue_write(walk, p);
171
172 walk->nbytes = bsize;
173 walk->flags |= ABLKCIPHER_WALK_SLOW;
174
175 *src_p = src;
176 *dst_p = dst;
177
178 return 0;
179}
180
181static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
182 struct crypto_tfm *tfm,
183 unsigned int alignmask)
184{
185 unsigned bs = walk->blocksize;
186 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
187 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
188 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
189 (alignmask + 1);
190 u8 *iv;
191
192 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
193 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
194 if (!walk->iv_buffer)
195 return -ENOMEM;
196
197 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
198 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
199 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
200 iv = ablkcipher_get_spot(iv, ivsize);
201
202 walk->iv = memcpy(iv, walk->iv, ivsize);
203 return 0;
204}
205
206static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
207 struct ablkcipher_walk *walk)
208{
209 walk->src.page = scatterwalk_page(&walk->in);
210 walk->src.offset = offset_in_page(walk->in.offset);
211 walk->dst.page = scatterwalk_page(&walk->out);
212 walk->dst.offset = offset_in_page(walk->out.offset);
213
214 return 0;
215}
216
217static int ablkcipher_walk_next(struct ablkcipher_request *req,
218 struct ablkcipher_walk *walk)
219{
220 struct crypto_tfm *tfm = req->base.tfm;
221 unsigned int alignmask, bsize, n;
222 void *src, *dst;
223 int err;
224
225 alignmask = crypto_tfm_alg_alignmask(tfm);
226 n = walk->total;
227 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
228 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
229 return ablkcipher_walk_done(req, walk, -EINVAL);
230 }
231
232 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
233 src = dst = NULL;
234
235 bsize = min(walk->blocksize, n);
236 n = scatterwalk_clamp(&walk->in, n);
237 n = scatterwalk_clamp(&walk->out, n);
238
239 if (n < bsize ||
240 !scatterwalk_aligned(&walk->in, alignmask) ||
241 !scatterwalk_aligned(&walk->out, alignmask)) {
242 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
243 &src, &dst);
244 goto set_phys_lowmem;
245 }
246
247 walk->nbytes = n;
248
249 return ablkcipher_next_fast(req, walk);
250
251set_phys_lowmem:
252 if (err >= 0) {
253 walk->src.page = virt_to_page(src);
254 walk->dst.page = virt_to_page(dst);
255 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
256 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
257 }
258
259 return err;
260}
261
262static int ablkcipher_walk_first(struct ablkcipher_request *req,
263 struct ablkcipher_walk *walk)
264{
265 struct crypto_tfm *tfm = req->base.tfm;
266 unsigned int alignmask;
267
268 alignmask = crypto_tfm_alg_alignmask(tfm);
269 if (WARN_ON_ONCE(in_irq()))
270 return -EDEADLK;
271
Jason A. Donenfeld70d906b2015-12-06 02:51:37 +0100272 walk->iv = req->info;
David S. Millerbf060992010-05-19 14:13:07 +1000273 walk->nbytes = walk->total;
274 if (unlikely(!walk->total))
275 return 0;
276
277 walk->iv_buffer = NULL;
David S. Millerbf060992010-05-19 14:13:07 +1000278 if (unlikely(((unsigned long)walk->iv & alignmask))) {
279 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
Joshua I. Jamesa861afb2014-12-05 14:06:16 +0900280
David S. Millerbf060992010-05-19 14:13:07 +1000281 if (err)
282 return err;
283 }
284
285 scatterwalk_start(&walk->in, walk->in.sg);
286 scatterwalk_start(&walk->out, walk->out.sg);
287
288 return ablkcipher_walk_next(req, walk);
289}
290
291int ablkcipher_walk_phys(struct ablkcipher_request *req,
292 struct ablkcipher_walk *walk)
293{
294 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
295 return ablkcipher_walk_first(req, walk);
296}
297EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
298
Herbert Xu791b4d52007-08-23 16:23:01 +0800299static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
300 unsigned int keylen)
Sebastian Siewiorca7c3932007-05-19 19:51:21 +1000301{
302 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
303 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
304 int ret;
305 u8 *buffer, *alignbuffer;
306 unsigned long absize;
307
308 absize = keylen + alignmask;
309 buffer = kmalloc(absize, GFP_ATOMIC);
310 if (!buffer)
311 return -ENOMEM;
312
313 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
314 memcpy(alignbuffer, key, keylen);
315 ret = cipher->setkey(tfm, alignbuffer, keylen);
Sebastian Siewior06817172007-08-03 20:33:47 +0800316 memset(alignbuffer, 0, keylen);
Sebastian Siewiorca7c3932007-05-19 19:51:21 +1000317 kfree(buffer);
318 return ret;
319}
320
Herbert Xub5b7f082007-04-16 20:48:54 +1000321static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
322 unsigned int keylen)
323{
324 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
Sebastian Siewiorca7c3932007-05-19 19:51:21 +1000325 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
Herbert Xub5b7f082007-04-16 20:48:54 +1000326
327 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
328 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
329 return -EINVAL;
330 }
331
Sebastian Siewiorca7c3932007-05-19 19:51:21 +1000332 if ((unsigned long)key & alignmask)
333 return setkey_unaligned(tfm, key, keylen);
334
Herbert Xub5b7f082007-04-16 20:48:54 +1000335 return cipher->setkey(tfm, key, keylen);
336}
337
338static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
339 u32 mask)
340{
341 return alg->cra_ctxsize;
342}
343
344static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
345 u32 mask)
346{
347 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
348 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
349
350 if (alg->ivsize > PAGE_SIZE / 8)
351 return -EINVAL;
352
353 crt->setkey = setkey;
354 crt->encrypt = alg->encrypt;
355 crt->decrypt = alg->decrypt;
Herbert Xuecfc4322007-12-05 21:08:36 +1100356 crt->base = __crypto_ablkcipher_cast(tfm);
Herbert Xub5b7f082007-04-16 20:48:54 +1000357 crt->ivsize = alg->ivsize;
358
359 return 0;
360}
361
Herbert Xu3acc8472011-11-03 23:46:07 +1100362#ifdef CONFIG_NET
Steffen Klassert29ffc872011-09-27 07:42:32 +0200363static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
364{
365 struct crypto_report_blkcipher rblkcipher;
366
Mathias Krause9a5467b2013-02-05 18:19:13 +0100367 strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
368 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
369 sizeof(rblkcipher.geniv));
Stafford Horne58104852018-06-25 21:45:37 +0900370 rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
Steffen Klassert29ffc872011-09-27 07:42:32 +0200371
372 rblkcipher.blocksize = alg->cra_blocksize;
373 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
374 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
375 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
376
David S. Miller6662df32012-04-01 20:19:05 -0400377 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
378 sizeof(struct crypto_report_blkcipher), &rblkcipher))
379 goto nla_put_failure;
Steffen Klassert29ffc872011-09-27 07:42:32 +0200380 return 0;
381
382nla_put_failure:
383 return -EMSGSIZE;
384}
Herbert Xu3acc8472011-11-03 23:46:07 +1100385#else
386static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
387{
388 return -ENOSYS;
389}
390#endif
Steffen Klassert29ffc872011-09-27 07:42:32 +0200391
Herbert Xub5b7f082007-04-16 20:48:54 +1000392static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
393 __attribute__ ((unused));
394static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
395{
396 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
397
398 seq_printf(m, "type : ablkcipher\n");
Herbert Xu189ed662007-12-14 22:29:37 +0800399 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
400 "yes" : "no");
Herbert Xub5b7f082007-04-16 20:48:54 +1000401 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
402 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
403 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
404 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
Herbert Xu23508e12007-11-27 21:33:24 +0800405 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>");
Herbert Xub5b7f082007-04-16 20:48:54 +1000406}
407
408const struct crypto_type crypto_ablkcipher_type = {
409 .ctxsize = crypto_ablkcipher_ctxsize,
410 .init = crypto_init_ablkcipher_ops,
411#ifdef CONFIG_PROC_FS
412 .show = crypto_ablkcipher_show,
413#endif
Steffen Klassert29ffc872011-09-27 07:42:32 +0200414 .report = crypto_ablkcipher_report,
Herbert Xub5b7f082007-04-16 20:48:54 +1000415};
416EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
417
Herbert Xu61da88e2007-12-17 21:51:27 +0800418static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
419 u32 mask)
420{
421 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
422 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
423
424 if (alg->ivsize > PAGE_SIZE / 8)
425 return -EINVAL;
426
Herbert Xuecfc4322007-12-05 21:08:36 +1100427 crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
428 alg->setkey : setkey;
Herbert Xu61da88e2007-12-17 21:51:27 +0800429 crt->encrypt = alg->encrypt;
430 crt->decrypt = alg->decrypt;
Herbert Xuecfc4322007-12-05 21:08:36 +1100431 crt->base = __crypto_ablkcipher_cast(tfm);
Herbert Xu61da88e2007-12-17 21:51:27 +0800432 crt->ivsize = alg->ivsize;
433
434 return 0;
435}
436
Herbert Xu3acc8472011-11-03 23:46:07 +1100437#ifdef CONFIG_NET
Steffen Klassert3e29c102011-09-27 07:43:24 +0200438static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
439{
440 struct crypto_report_blkcipher rblkcipher;
441
Mathias Krause9a5467b2013-02-05 18:19:13 +0100442 strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
443 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
444 sizeof(rblkcipher.geniv));
Stafford Horne58104852018-06-25 21:45:37 +0900445 rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
Steffen Klassert3e29c102011-09-27 07:43:24 +0200446
447 rblkcipher.blocksize = alg->cra_blocksize;
448 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
449 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
450 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
451
David S. Miller6662df32012-04-01 20:19:05 -0400452 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
453 sizeof(struct crypto_report_blkcipher), &rblkcipher))
454 goto nla_put_failure;
Steffen Klassert3e29c102011-09-27 07:43:24 +0200455 return 0;
456
457nla_put_failure:
458 return -EMSGSIZE;
459}
Herbert Xu3acc8472011-11-03 23:46:07 +1100460#else
461static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
462{
463 return -ENOSYS;
464}
465#endif
Steffen Klassert3e29c102011-09-27 07:43:24 +0200466
Herbert Xu61da88e2007-12-17 21:51:27 +0800467static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
468 __attribute__ ((unused));
469static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
470{
471 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
472
473 seq_printf(m, "type : givcipher\n");
Herbert Xu189ed662007-12-14 22:29:37 +0800474 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
475 "yes" : "no");
Herbert Xu61da88e2007-12-17 21:51:27 +0800476 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
477 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
478 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
479 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
Herbert Xu23508e12007-11-27 21:33:24 +0800480 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>");
Herbert Xu61da88e2007-12-17 21:51:27 +0800481}
482
483const struct crypto_type crypto_givcipher_type = {
484 .ctxsize = crypto_ablkcipher_ctxsize,
485 .init = crypto_init_givcipher_ops,
486#ifdef CONFIG_PROC_FS
487 .show = crypto_givcipher_show,
488#endif
Steffen Klassert3e29c102011-09-27 07:43:24 +0200489 .report = crypto_givcipher_report,
Herbert Xu61da88e2007-12-17 21:51:27 +0800490};
491EXPORT_SYMBOL_GPL(crypto_givcipher_type);