blob: 794c172b99f765871a9b4a8583c11d73ec43dc63 [file] [log] [blame]
Steffen Klassert5068c7a2010-01-07 15:57:19 +11001/*
2 * pcrypt - Parallel crypto wrapper.
3 *
4 * Copyright (C) 2009 secunet Security Networks AG
5 * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <crypto/algapi.h>
22#include <crypto/internal/aead.h>
23#include <linux/err.h>
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/slab.h>
Dan Kruchinine15bacb2010-07-14 14:31:57 +040027#include <linux/notifier.h>
Dan Kruchinina3fb1e32010-07-14 14:34:15 +040028#include <linux/kobject.h>
Steffen Klassert5068c7a2010-01-07 15:57:19 +110029#include <crypto/pcrypt.h>
30
Dan Kruchinine15bacb2010-07-14 14:31:57 +040031struct pcrypt_instance {
Dan Kruchinina3fb1e32010-07-14 14:34:15 +040032 const char *name;
Dan Kruchinine15bacb2010-07-14 14:31:57 +040033 struct padata_instance *pinst;
34 struct workqueue_struct *wq;
35
36 /*
37 * Cpumask for callback CPUs. It should be
38 * equal to serial cpumask of corresponding padata instance,
39 * so it is updated when padata notifies us about serial
40 * cpumask change.
41 *
42 * cb_cpumask is protected by RCU. This fact prevents us from
43 * using cpumask_var_t directly because the actual type of
44 * cpumsak_var_t depends on kernel configuration(particularly on
45 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
46 * cpumask_var_t may be either a pointer to the struct cpumask
47 * or a variable allocated on the stack. Thus we can not safely use
48 * cpumask_var_t with RCU operations such as rcu_assign_pointer or
49 * rcu_dereference. So cpumask_var_t is wrapped with struct
50 * pcrypt_cpumask which makes possible to use it with RCU.
51 */
52 struct pcrypt_cpumask {
53 cpumask_var_t mask;
54 } *cb_cpumask;
55 struct notifier_block nblock;
56};
57
58static struct pcrypt_instance pencrypt;
59static struct pcrypt_instance pdecrypt;
Dan Kruchinina3fb1e32010-07-14 14:34:15 +040060static struct kset *pcrypt_kset;
Steffen Klassert5068c7a2010-01-07 15:57:19 +110061
62struct pcrypt_instance_ctx {
63 struct crypto_spawn spawn;
64 unsigned int tfm_count;
65};
66
67struct pcrypt_aead_ctx {
68 struct crypto_aead *child;
69 unsigned int cb_cpu;
70};
71
72static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
Dan Kruchinine15bacb2010-07-14 14:31:57 +040073 struct pcrypt_instance *pcrypt)
Steffen Klassert5068c7a2010-01-07 15:57:19 +110074{
75 unsigned int cpu_index, cpu, i;
Dan Kruchinine15bacb2010-07-14 14:31:57 +040076 struct pcrypt_cpumask *cpumask;
Steffen Klassert5068c7a2010-01-07 15:57:19 +110077
78 cpu = *cb_cpu;
79
Dan Kruchinine15bacb2010-07-14 14:31:57 +040080 rcu_read_lock_bh();
81 cpumask = rcu_dereference(pcrypt->cb_cpumask);
82 if (cpumask_test_cpu(cpu, cpumask->mask))
Steffen Klassert5068c7a2010-01-07 15:57:19 +110083 goto out;
84
Steffen Klassertcc74f4b2010-07-20 08:52:20 +020085 if (!cpumask_weight(cpumask->mask))
86 goto out;
87
Dan Kruchinine15bacb2010-07-14 14:31:57 +040088 cpu_index = cpu % cpumask_weight(cpumask->mask);
Steffen Klassert5068c7a2010-01-07 15:57:19 +110089
Dan Kruchinine15bacb2010-07-14 14:31:57 +040090 cpu = cpumask_first(cpumask->mask);
Steffen Klassert5068c7a2010-01-07 15:57:19 +110091 for (i = 0; i < cpu_index; i++)
Dan Kruchinine15bacb2010-07-14 14:31:57 +040092 cpu = cpumask_next(cpu, cpumask->mask);
Steffen Klassert5068c7a2010-01-07 15:57:19 +110093
94 *cb_cpu = cpu;
95
96out:
Dan Kruchinine15bacb2010-07-14 14:31:57 +040097 rcu_read_unlock_bh();
98 return padata_do_parallel(pcrypt->pinst, padata, cpu);
Steffen Klassert5068c7a2010-01-07 15:57:19 +110099}
100
101static int pcrypt_aead_setkey(struct crypto_aead *parent,
102 const u8 *key, unsigned int keylen)
103{
104 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
105
106 return crypto_aead_setkey(ctx->child, key, keylen);
107}
108
109static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
110 unsigned int authsize)
111{
112 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
113
114 return crypto_aead_setauthsize(ctx->child, authsize);
115}
116
117static void pcrypt_aead_serial(struct padata_priv *padata)
118{
119 struct pcrypt_request *preq = pcrypt_padata_request(padata);
120 struct aead_request *req = pcrypt_request_ctx(preq);
121
122 aead_request_complete(req->base.data, padata->info);
123}
124
125static void pcrypt_aead_giv_serial(struct padata_priv *padata)
126{
127 struct pcrypt_request *preq = pcrypt_padata_request(padata);
128 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
129
130 aead_request_complete(req->areq.base.data, padata->info);
131}
132
133static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
134{
135 struct aead_request *req = areq->data;
136 struct pcrypt_request *preq = aead_request_ctx(req);
137 struct padata_priv *padata = pcrypt_request_padata(preq);
138
139 padata->info = err;
140 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
141
142 padata_do_serial(padata);
143}
144
145static void pcrypt_aead_enc(struct padata_priv *padata)
146{
147 struct pcrypt_request *preq = pcrypt_padata_request(padata);
148 struct aead_request *req = pcrypt_request_ctx(preq);
149
150 padata->info = crypto_aead_encrypt(req);
151
Steffen Klassert5a1436b2010-02-04 11:40:17 +1100152 if (padata->info == -EINPROGRESS)
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100153 return;
154
155 padata_do_serial(padata);
156}
157
158static int pcrypt_aead_encrypt(struct aead_request *req)
159{
160 int err;
161 struct pcrypt_request *preq = aead_request_ctx(req);
162 struct aead_request *creq = pcrypt_request_ctx(preq);
163 struct padata_priv *padata = pcrypt_request_padata(preq);
164 struct crypto_aead *aead = crypto_aead_reqtfm(req);
165 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
166 u32 flags = aead_request_flags(req);
167
168 memset(padata, 0, sizeof(struct padata_priv));
169
170 padata->parallel = pcrypt_aead_enc;
171 padata->serial = pcrypt_aead_serial;
172
173 aead_request_set_tfm(creq, ctx->child);
174 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
175 pcrypt_aead_done, req);
176 aead_request_set_crypt(creq, req->src, req->dst,
177 req->cryptlen, req->iv);
178 aead_request_set_assoc(creq, req->assoc, req->assoclen);
179
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400180 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
Steffen Klassert83f619f2010-07-07 15:32:02 +0200181 if (!err)
182 return -EINPROGRESS;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100183
184 return err;
185}
186
187static void pcrypt_aead_dec(struct padata_priv *padata)
188{
189 struct pcrypt_request *preq = pcrypt_padata_request(padata);
190 struct aead_request *req = pcrypt_request_ctx(preq);
191
192 padata->info = crypto_aead_decrypt(req);
193
Steffen Klassert5a1436b2010-02-04 11:40:17 +1100194 if (padata->info == -EINPROGRESS)
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100195 return;
196
197 padata_do_serial(padata);
198}
199
200static int pcrypt_aead_decrypt(struct aead_request *req)
201{
202 int err;
203 struct pcrypt_request *preq = aead_request_ctx(req);
204 struct aead_request *creq = pcrypt_request_ctx(preq);
205 struct padata_priv *padata = pcrypt_request_padata(preq);
206 struct crypto_aead *aead = crypto_aead_reqtfm(req);
207 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
208 u32 flags = aead_request_flags(req);
209
210 memset(padata, 0, sizeof(struct padata_priv));
211
212 padata->parallel = pcrypt_aead_dec;
213 padata->serial = pcrypt_aead_serial;
214
215 aead_request_set_tfm(creq, ctx->child);
216 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
217 pcrypt_aead_done, req);
218 aead_request_set_crypt(creq, req->src, req->dst,
219 req->cryptlen, req->iv);
220 aead_request_set_assoc(creq, req->assoc, req->assoclen);
221
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400222 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
Steffen Klassert83f619f2010-07-07 15:32:02 +0200223 if (!err)
224 return -EINPROGRESS;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100225
226 return err;
227}
228
229static void pcrypt_aead_givenc(struct padata_priv *padata)
230{
231 struct pcrypt_request *preq = pcrypt_padata_request(padata);
232 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
233
234 padata->info = crypto_aead_givencrypt(req);
235
Steffen Klassert5a1436b2010-02-04 11:40:17 +1100236 if (padata->info == -EINPROGRESS)
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100237 return;
238
239 padata_do_serial(padata);
240}
241
242static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
243{
244 int err;
245 struct aead_request *areq = &req->areq;
246 struct pcrypt_request *preq = aead_request_ctx(areq);
247 struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
248 struct padata_priv *padata = pcrypt_request_padata(preq);
249 struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
250 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
251 u32 flags = aead_request_flags(areq);
252
253 memset(padata, 0, sizeof(struct padata_priv));
254
255 padata->parallel = pcrypt_aead_givenc;
256 padata->serial = pcrypt_aead_giv_serial;
257
258 aead_givcrypt_set_tfm(creq, ctx->child);
259 aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
260 pcrypt_aead_done, areq);
261 aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
262 areq->cryptlen, areq->iv);
263 aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
264 aead_givcrypt_set_giv(creq, req->giv, req->seq);
265
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400266 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
Steffen Klassert83f619f2010-07-07 15:32:02 +0200267 if (!err)
268 return -EINPROGRESS;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100269
270 return err;
271}
272
273static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
274{
275 int cpu, cpu_index;
276 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
277 struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
278 struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
279 struct crypto_aead *cipher;
280
281 ictx->tfm_count++;
282
283 cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
284
285 ctx->cb_cpu = cpumask_first(cpu_active_mask);
286 for (cpu = 0; cpu < cpu_index; cpu++)
287 ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
288
289 cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
290
291 if (IS_ERR(cipher))
292 return PTR_ERR(cipher);
293
294 ctx->child = cipher;
295 tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
296 + sizeof(struct aead_givcrypt_request)
297 + crypto_aead_reqsize(cipher);
298
299 return 0;
300}
301
302static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
303{
304 struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
305
306 crypto_free_aead(ctx->child);
307}
308
309static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
310{
311 struct crypto_instance *inst;
312 struct pcrypt_instance_ctx *ctx;
313 int err;
314
315 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
316 if (!inst) {
317 inst = ERR_PTR(-ENOMEM);
318 goto out;
319 }
320
321 err = -ENAMETOOLONG;
322 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
323 "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
324 goto out_free_inst;
325
326 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
327
328 ctx = crypto_instance_ctx(inst);
329 err = crypto_init_spawn(&ctx->spawn, alg, inst,
330 CRYPTO_ALG_TYPE_MASK);
331 if (err)
332 goto out_free_inst;
333
334 inst->alg.cra_priority = alg->cra_priority + 100;
335 inst->alg.cra_blocksize = alg->cra_blocksize;
336 inst->alg.cra_alignmask = alg->cra_alignmask;
337
338out:
339 return inst;
340
341out_free_inst:
342 kfree(inst);
343 inst = ERR_PTR(err);
344 goto out;
345}
346
Dan Carpenter80a6d7d2010-03-24 21:35:23 +0800347static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
348 u32 type, u32 mask)
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100349{
350 struct crypto_instance *inst;
351 struct crypto_alg *alg;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100352
Dan Carpenter80a6d7d2010-03-24 21:35:23 +0800353 alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100354 if (IS_ERR(alg))
355 return ERR_CAST(alg);
356
357 inst = pcrypt_alloc_instance(alg);
358 if (IS_ERR(inst))
359 goto out_put_alg;
360
361 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
362 inst->alg.cra_type = &crypto_aead_type;
363
364 inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
365 inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
366 inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
367
368 inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
369
370 inst->alg.cra_init = pcrypt_aead_init_tfm;
371 inst->alg.cra_exit = pcrypt_aead_exit_tfm;
372
373 inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
374 inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
375 inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
376 inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
377 inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
378
379out_put_alg:
380 crypto_mod_put(alg);
381 return inst;
382}
383
384static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
385{
386 struct crypto_attr_type *algt;
387
388 algt = crypto_get_attr_type(tb);
389 if (IS_ERR(algt))
390 return ERR_CAST(algt);
391
392 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
393 case CRYPTO_ALG_TYPE_AEAD:
Dan Carpenter80a6d7d2010-03-24 21:35:23 +0800394 return pcrypt_alloc_aead(tb, algt->type, algt->mask);
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100395 }
396
397 return ERR_PTR(-EINVAL);
398}
399
400static void pcrypt_free(struct crypto_instance *inst)
401{
402 struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
403
404 crypto_drop_spawn(&ctx->spawn);
405 kfree(inst);
406}
407
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400408static int pcrypt_cpumask_change_notify(struct notifier_block *self,
409 unsigned long val, void *data)
410{
411 struct pcrypt_instance *pcrypt;
412 struct pcrypt_cpumask *new_mask, *old_mask;
413
414 if (!(val & PADATA_CPU_SERIAL))
415 return 0;
416
417 pcrypt = container_of(self, struct pcrypt_instance, nblock);
418 new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
419 if (!new_mask)
420 return -ENOMEM;
421 if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
422 kfree(new_mask);
423 return -ENOMEM;
424 }
425
426 old_mask = pcrypt->cb_cpumask;
427
428 padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, new_mask->mask);
429 rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
430 synchronize_rcu_bh();
431
432 free_cpumask_var(old_mask->mask);
433 kfree(old_mask);
434 return 0;
435}
436
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400437static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
438{
439 int ret;
440
441 pinst->kobj.kset = pcrypt_kset;
442 ret = kobject_add(&pinst->kobj, NULL, name);
443 if (!ret)
444 kobject_uevent(&pinst->kobj, KOBJ_ADD);
445
446 return ret;
447}
448
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400449static int __pcrypt_init_instance(struct pcrypt_instance *pcrypt,
450 const char *name)
451{
452 int ret = -ENOMEM;
453 struct pcrypt_cpumask *mask;
454
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400455 pcrypt->name = name;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400456 pcrypt->wq = create_workqueue(name);
457 if (!pcrypt->wq)
458 goto err;
459
460 pcrypt->pinst = padata_alloc(pcrypt->wq);
461 if (!pcrypt->pinst)
462 goto err_destroy_workqueue;
463
464 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
465 if (!mask)
466 goto err_free_padata;
467 if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
468 kfree(mask);
469 goto err_free_padata;
470 }
471
472 padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, mask->mask);
473 rcu_assign_pointer(pcrypt->cb_cpumask, mask);
474
475 pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
476 ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
477 if (ret)
478 goto err_free_cpumask;
479
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400480 ret = pcrypt_sysfs_add(pcrypt->pinst, name);
481 if (ret)
482 goto err_unregister_notifier;
483
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400484 return ret;
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400485err_unregister_notifier:
486 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400487err_free_cpumask:
488 free_cpumask_var(mask->mask);
489 kfree(mask);
490err_free_padata:
491 padata_free(pcrypt->pinst);
492err_destroy_workqueue:
493 destroy_workqueue(pcrypt->wq);
494err:
495 return ret;
496}
497
498static void __pcrypt_deinit_instance(struct pcrypt_instance *pcrypt)
499{
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400500 kobject_put(&pcrypt->pinst->kobj);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400501 free_cpumask_var(pcrypt->cb_cpumask->mask);
502 kfree(pcrypt->cb_cpumask);
503
504 padata_stop(pcrypt->pinst);
505 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
506 destroy_workqueue(pcrypt->wq);
507 padata_free(pcrypt->pinst);
508}
509
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100510static struct crypto_template pcrypt_tmpl = {
511 .name = "pcrypt",
512 .alloc = pcrypt_alloc,
513 .free = pcrypt_free,
514 .module = THIS_MODULE,
515};
516
517static int __init pcrypt_init(void)
518{
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400519 int err = -ENOMEM;
520
521 pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
522 if (!pcrypt_kset)
523 goto err;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400524
525 err = __pcrypt_init_instance(&pencrypt, "pencrypt");
526 if (err)
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400527 goto err_unreg_kset;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100528
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400529 err = __pcrypt_init_instance(&pdecrypt, "pdecrypt");
Steffen Klassert4c879172010-07-07 15:30:10 +0200530 if (err)
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400531 goto err_deinit_pencrypt;
Steffen Klassert4c879172010-07-07 15:30:10 +0200532
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400533 padata_start(pencrypt.pinst);
534 padata_start(pdecrypt.pinst);
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100535
536 return crypto_register_template(&pcrypt_tmpl);
537
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400538err_deinit_pencrypt:
539 __pcrypt_deinit_instance(&pencrypt);
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400540err_unreg_kset:
541 kset_unregister(pcrypt_kset);
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100542err:
Steffen Klassert4c879172010-07-07 15:30:10 +0200543 return err;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100544}
545
546static void __exit pcrypt_exit(void)
547{
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400548 __pcrypt_deinit_instance(&pencrypt);
549 __pcrypt_deinit_instance(&pdecrypt);
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100550
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400551 kset_unregister(pcrypt_kset);
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100552 crypto_unregister_template(&pcrypt_tmpl);
553}
554
555module_init(pcrypt_init);
556module_exit(pcrypt_exit);
557
558MODULE_LICENSE("GPL");
559MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
560MODULE_DESCRIPTION("Parallel crypto wrapper");