blob: 77425530ee1123038e573d56cca6096aeb5f7a4f [file] [log] [blame]
Steffen Klassert5068c7a2010-01-07 15:57:19 +11001/*
2 * pcrypt - Parallel crypto wrapper.
3 *
4 * Copyright (C) 2009 secunet Security Networks AG
5 * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <crypto/algapi.h>
22#include <crypto/internal/aead.h>
23#include <linux/err.h>
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/slab.h>
Dan Kruchinine15bacb2010-07-14 14:31:57 +040027#include <linux/notifier.h>
Dan Kruchinina3fb1e32010-07-14 14:34:15 +040028#include <linux/kobject.h>
Steffen Klassert5068c7a2010-01-07 15:57:19 +110029#include <crypto/pcrypt.h>
30
Steffen Klassertc57e8422010-07-27 07:16:33 +020031struct padata_pcrypt {
Dan Kruchinine15bacb2010-07-14 14:31:57 +040032 struct padata_instance *pinst;
33 struct workqueue_struct *wq;
34
35 /*
36 * Cpumask for callback CPUs. It should be
37 * equal to serial cpumask of corresponding padata instance,
38 * so it is updated when padata notifies us about serial
39 * cpumask change.
40 *
41 * cb_cpumask is protected by RCU. This fact prevents us from
42 * using cpumask_var_t directly because the actual type of
43 * cpumsak_var_t depends on kernel configuration(particularly on
44 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
45 * cpumask_var_t may be either a pointer to the struct cpumask
46 * or a variable allocated on the stack. Thus we can not safely use
47 * cpumask_var_t with RCU operations such as rcu_assign_pointer or
48 * rcu_dereference. So cpumask_var_t is wrapped with struct
49 * pcrypt_cpumask which makes possible to use it with RCU.
50 */
51 struct pcrypt_cpumask {
52 cpumask_var_t mask;
53 } *cb_cpumask;
54 struct notifier_block nblock;
55};
56
Steffen Klassertc57e8422010-07-27 07:16:33 +020057static struct padata_pcrypt pencrypt;
58static struct padata_pcrypt pdecrypt;
Dan Kruchinina3fb1e32010-07-14 14:34:15 +040059static struct kset *pcrypt_kset;
Steffen Klassert5068c7a2010-01-07 15:57:19 +110060
61struct pcrypt_instance_ctx {
62 struct crypto_spawn spawn;
63 unsigned int tfm_count;
64};
65
66struct pcrypt_aead_ctx {
67 struct crypto_aead *child;
68 unsigned int cb_cpu;
69};
70
71static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
Steffen Klassertc57e8422010-07-27 07:16:33 +020072 struct padata_pcrypt *pcrypt)
Steffen Klassert5068c7a2010-01-07 15:57:19 +110073{
74 unsigned int cpu_index, cpu, i;
Dan Kruchinine15bacb2010-07-14 14:31:57 +040075 struct pcrypt_cpumask *cpumask;
Steffen Klassert5068c7a2010-01-07 15:57:19 +110076
77 cpu = *cb_cpu;
78
Dan Kruchinine15bacb2010-07-14 14:31:57 +040079 rcu_read_lock_bh();
80 cpumask = rcu_dereference(pcrypt->cb_cpumask);
81 if (cpumask_test_cpu(cpu, cpumask->mask))
Steffen Klassert5068c7a2010-01-07 15:57:19 +110082 goto out;
83
Steffen Klassertcc74f4b2010-07-20 08:52:20 +020084 if (!cpumask_weight(cpumask->mask))
85 goto out;
86
Dan Kruchinine15bacb2010-07-14 14:31:57 +040087 cpu_index = cpu % cpumask_weight(cpumask->mask);
Steffen Klassert5068c7a2010-01-07 15:57:19 +110088
Dan Kruchinine15bacb2010-07-14 14:31:57 +040089 cpu = cpumask_first(cpumask->mask);
Steffen Klassert5068c7a2010-01-07 15:57:19 +110090 for (i = 0; i < cpu_index; i++)
Dan Kruchinine15bacb2010-07-14 14:31:57 +040091 cpu = cpumask_next(cpu, cpumask->mask);
Steffen Klassert5068c7a2010-01-07 15:57:19 +110092
93 *cb_cpu = cpu;
94
95out:
Dan Kruchinine15bacb2010-07-14 14:31:57 +040096 rcu_read_unlock_bh();
97 return padata_do_parallel(pcrypt->pinst, padata, cpu);
Steffen Klassert5068c7a2010-01-07 15:57:19 +110098}
99
100static int pcrypt_aead_setkey(struct crypto_aead *parent,
101 const u8 *key, unsigned int keylen)
102{
103 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
104
105 return crypto_aead_setkey(ctx->child, key, keylen);
106}
107
108static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
109 unsigned int authsize)
110{
111 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
112
113 return crypto_aead_setauthsize(ctx->child, authsize);
114}
115
116static void pcrypt_aead_serial(struct padata_priv *padata)
117{
118 struct pcrypt_request *preq = pcrypt_padata_request(padata);
119 struct aead_request *req = pcrypt_request_ctx(preq);
120
121 aead_request_complete(req->base.data, padata->info);
122}
123
124static void pcrypt_aead_giv_serial(struct padata_priv *padata)
125{
126 struct pcrypt_request *preq = pcrypt_padata_request(padata);
127 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
128
129 aead_request_complete(req->areq.base.data, padata->info);
130}
131
132static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
133{
134 struct aead_request *req = areq->data;
135 struct pcrypt_request *preq = aead_request_ctx(req);
136 struct padata_priv *padata = pcrypt_request_padata(preq);
137
138 padata->info = err;
139 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
140
141 padata_do_serial(padata);
142}
143
144static void pcrypt_aead_enc(struct padata_priv *padata)
145{
146 struct pcrypt_request *preq = pcrypt_padata_request(padata);
147 struct aead_request *req = pcrypt_request_ctx(preq);
148
149 padata->info = crypto_aead_encrypt(req);
150
Steffen Klassert5a1436b2010-02-04 11:40:17 +1100151 if (padata->info == -EINPROGRESS)
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100152 return;
153
154 padata_do_serial(padata);
155}
156
157static int pcrypt_aead_encrypt(struct aead_request *req)
158{
159 int err;
160 struct pcrypt_request *preq = aead_request_ctx(req);
161 struct aead_request *creq = pcrypt_request_ctx(preq);
162 struct padata_priv *padata = pcrypt_request_padata(preq);
163 struct crypto_aead *aead = crypto_aead_reqtfm(req);
164 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
165 u32 flags = aead_request_flags(req);
166
167 memset(padata, 0, sizeof(struct padata_priv));
168
169 padata->parallel = pcrypt_aead_enc;
170 padata->serial = pcrypt_aead_serial;
171
172 aead_request_set_tfm(creq, ctx->child);
173 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
174 pcrypt_aead_done, req);
175 aead_request_set_crypt(creq, req->src, req->dst,
176 req->cryptlen, req->iv);
177 aead_request_set_assoc(creq, req->assoc, req->assoclen);
178
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400179 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
Steffen Klassert83f619f2010-07-07 15:32:02 +0200180 if (!err)
181 return -EINPROGRESS;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100182
183 return err;
184}
185
186static void pcrypt_aead_dec(struct padata_priv *padata)
187{
188 struct pcrypt_request *preq = pcrypt_padata_request(padata);
189 struct aead_request *req = pcrypt_request_ctx(preq);
190
191 padata->info = crypto_aead_decrypt(req);
192
Steffen Klassert5a1436b2010-02-04 11:40:17 +1100193 if (padata->info == -EINPROGRESS)
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100194 return;
195
196 padata_do_serial(padata);
197}
198
199static int pcrypt_aead_decrypt(struct aead_request *req)
200{
201 int err;
202 struct pcrypt_request *preq = aead_request_ctx(req);
203 struct aead_request *creq = pcrypt_request_ctx(preq);
204 struct padata_priv *padata = pcrypt_request_padata(preq);
205 struct crypto_aead *aead = crypto_aead_reqtfm(req);
206 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
207 u32 flags = aead_request_flags(req);
208
209 memset(padata, 0, sizeof(struct padata_priv));
210
211 padata->parallel = pcrypt_aead_dec;
212 padata->serial = pcrypt_aead_serial;
213
214 aead_request_set_tfm(creq, ctx->child);
215 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
216 pcrypt_aead_done, req);
217 aead_request_set_crypt(creq, req->src, req->dst,
218 req->cryptlen, req->iv);
219 aead_request_set_assoc(creq, req->assoc, req->assoclen);
220
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400221 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
Steffen Klassert83f619f2010-07-07 15:32:02 +0200222 if (!err)
223 return -EINPROGRESS;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100224
225 return err;
226}
227
228static void pcrypt_aead_givenc(struct padata_priv *padata)
229{
230 struct pcrypt_request *preq = pcrypt_padata_request(padata);
231 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
232
233 padata->info = crypto_aead_givencrypt(req);
234
Steffen Klassert5a1436b2010-02-04 11:40:17 +1100235 if (padata->info == -EINPROGRESS)
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100236 return;
237
238 padata_do_serial(padata);
239}
240
241static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
242{
243 int err;
244 struct aead_request *areq = &req->areq;
245 struct pcrypt_request *preq = aead_request_ctx(areq);
246 struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
247 struct padata_priv *padata = pcrypt_request_padata(preq);
248 struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
249 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
250 u32 flags = aead_request_flags(areq);
251
252 memset(padata, 0, sizeof(struct padata_priv));
253
254 padata->parallel = pcrypt_aead_givenc;
255 padata->serial = pcrypt_aead_giv_serial;
256
257 aead_givcrypt_set_tfm(creq, ctx->child);
258 aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
259 pcrypt_aead_done, areq);
260 aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
261 areq->cryptlen, areq->iv);
262 aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
263 aead_givcrypt_set_giv(creq, req->giv, req->seq);
264
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400265 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
Steffen Klassert83f619f2010-07-07 15:32:02 +0200266 if (!err)
267 return -EINPROGRESS;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100268
269 return err;
270}
271
272static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
273{
274 int cpu, cpu_index;
275 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
276 struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
277 struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
278 struct crypto_aead *cipher;
279
280 ictx->tfm_count++;
281
282 cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
283
284 ctx->cb_cpu = cpumask_first(cpu_active_mask);
285 for (cpu = 0; cpu < cpu_index; cpu++)
286 ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
287
288 cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
289
290 if (IS_ERR(cipher))
291 return PTR_ERR(cipher);
292
293 ctx->child = cipher;
294 tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
295 + sizeof(struct aead_givcrypt_request)
296 + crypto_aead_reqsize(cipher);
297
298 return 0;
299}
300
301static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
302{
303 struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
304
305 crypto_free_aead(ctx->child);
306}
307
308static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
309{
310 struct crypto_instance *inst;
311 struct pcrypt_instance_ctx *ctx;
312 int err;
313
314 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
315 if (!inst) {
316 inst = ERR_PTR(-ENOMEM);
317 goto out;
318 }
319
320 err = -ENAMETOOLONG;
321 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
322 "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
323 goto out_free_inst;
324
325 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
326
327 ctx = crypto_instance_ctx(inst);
328 err = crypto_init_spawn(&ctx->spawn, alg, inst,
329 CRYPTO_ALG_TYPE_MASK);
330 if (err)
331 goto out_free_inst;
332
333 inst->alg.cra_priority = alg->cra_priority + 100;
334 inst->alg.cra_blocksize = alg->cra_blocksize;
335 inst->alg.cra_alignmask = alg->cra_alignmask;
336
337out:
338 return inst;
339
340out_free_inst:
341 kfree(inst);
342 inst = ERR_PTR(err);
343 goto out;
344}
345
Dan Carpenter80a6d7d2010-03-24 21:35:23 +0800346static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
347 u32 type, u32 mask)
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100348{
349 struct crypto_instance *inst;
350 struct crypto_alg *alg;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100351
Dan Carpenter80a6d7d2010-03-24 21:35:23 +0800352 alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100353 if (IS_ERR(alg))
354 return ERR_CAST(alg);
355
356 inst = pcrypt_alloc_instance(alg);
357 if (IS_ERR(inst))
358 goto out_put_alg;
359
360 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
361 inst->alg.cra_type = &crypto_aead_type;
362
363 inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
364 inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
365 inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
366
367 inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
368
369 inst->alg.cra_init = pcrypt_aead_init_tfm;
370 inst->alg.cra_exit = pcrypt_aead_exit_tfm;
371
372 inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
373 inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
374 inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
375 inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
376 inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
377
378out_put_alg:
379 crypto_mod_put(alg);
380 return inst;
381}
382
383static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
384{
385 struct crypto_attr_type *algt;
386
387 algt = crypto_get_attr_type(tb);
388 if (IS_ERR(algt))
389 return ERR_CAST(algt);
390
391 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
392 case CRYPTO_ALG_TYPE_AEAD:
Dan Carpenter80a6d7d2010-03-24 21:35:23 +0800393 return pcrypt_alloc_aead(tb, algt->type, algt->mask);
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100394 }
395
396 return ERR_PTR(-EINVAL);
397}
398
399static void pcrypt_free(struct crypto_instance *inst)
400{
401 struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
402
403 crypto_drop_spawn(&ctx->spawn);
404 kfree(inst);
405}
406
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400407static int pcrypt_cpumask_change_notify(struct notifier_block *self,
408 unsigned long val, void *data)
409{
Steffen Klassertc57e8422010-07-27 07:16:33 +0200410 struct padata_pcrypt *pcrypt;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400411 struct pcrypt_cpumask *new_mask, *old_mask;
412
413 if (!(val & PADATA_CPU_SERIAL))
414 return 0;
415
Steffen Klassertc57e8422010-07-27 07:16:33 +0200416 pcrypt = container_of(self, struct padata_pcrypt, nblock);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400417 new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
418 if (!new_mask)
419 return -ENOMEM;
420 if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
421 kfree(new_mask);
422 return -ENOMEM;
423 }
424
425 old_mask = pcrypt->cb_cpumask;
426
427 padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, new_mask->mask);
428 rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
429 synchronize_rcu_bh();
430
431 free_cpumask_var(old_mask->mask);
432 kfree(old_mask);
433 return 0;
434}
435
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400436static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
437{
438 int ret;
439
440 pinst->kobj.kset = pcrypt_kset;
441 ret = kobject_add(&pinst->kobj, NULL, name);
442 if (!ret)
443 kobject_uevent(&pinst->kobj, KOBJ_ADD);
444
445 return ret;
446}
447
Steffen Klassertc57e8422010-07-27 07:16:33 +0200448static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
449 const char *name)
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400450{
451 int ret = -ENOMEM;
452 struct pcrypt_cpumask *mask;
453
454 pcrypt->wq = create_workqueue(name);
455 if (!pcrypt->wq)
456 goto err;
457
Steffen Klasserte6cc1172010-07-27 07:14:28 +0200458 pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400459 if (!pcrypt->pinst)
460 goto err_destroy_workqueue;
461
462 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
463 if (!mask)
464 goto err_free_padata;
465 if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
466 kfree(mask);
467 goto err_free_padata;
468 }
469
470 padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, mask->mask);
471 rcu_assign_pointer(pcrypt->cb_cpumask, mask);
472
473 pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
474 ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
475 if (ret)
476 goto err_free_cpumask;
477
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400478 ret = pcrypt_sysfs_add(pcrypt->pinst, name);
479 if (ret)
480 goto err_unregister_notifier;
481
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400482 return ret;
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400483err_unregister_notifier:
484 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400485err_free_cpumask:
486 free_cpumask_var(mask->mask);
487 kfree(mask);
488err_free_padata:
489 padata_free(pcrypt->pinst);
490err_destroy_workqueue:
491 destroy_workqueue(pcrypt->wq);
492err:
493 return ret;
494}
495
Steffen Klassertc57e8422010-07-27 07:16:33 +0200496static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400497{
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400498 kobject_put(&pcrypt->pinst->kobj);
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400499 free_cpumask_var(pcrypt->cb_cpumask->mask);
500 kfree(pcrypt->cb_cpumask);
501
502 padata_stop(pcrypt->pinst);
503 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
504 destroy_workqueue(pcrypt->wq);
505 padata_free(pcrypt->pinst);
506}
507
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100508static struct crypto_template pcrypt_tmpl = {
509 .name = "pcrypt",
510 .alloc = pcrypt_alloc,
511 .free = pcrypt_free,
512 .module = THIS_MODULE,
513};
514
515static int __init pcrypt_init(void)
516{
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400517 int err = -ENOMEM;
518
519 pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
520 if (!pcrypt_kset)
521 goto err;
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400522
Steffen Klassertc57e8422010-07-27 07:16:33 +0200523 err = pcrypt_init_padata(&pencrypt, "pencrypt");
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400524 if (err)
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400525 goto err_unreg_kset;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100526
Steffen Klassertc57e8422010-07-27 07:16:33 +0200527 err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
Steffen Klassert4c879172010-07-07 15:30:10 +0200528 if (err)
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400529 goto err_deinit_pencrypt;
Steffen Klassert4c879172010-07-07 15:30:10 +0200530
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400531 padata_start(pencrypt.pinst);
532 padata_start(pdecrypt.pinst);
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100533
534 return crypto_register_template(&pcrypt_tmpl);
535
Dan Kruchinine15bacb2010-07-14 14:31:57 +0400536err_deinit_pencrypt:
Steffen Klassertc57e8422010-07-27 07:16:33 +0200537 pcrypt_fini_padata(&pencrypt);
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400538err_unreg_kset:
539 kset_unregister(pcrypt_kset);
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100540err:
Steffen Klassert4c879172010-07-07 15:30:10 +0200541 return err;
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100542}
543
544static void __exit pcrypt_exit(void)
545{
Steffen Klassertc57e8422010-07-27 07:16:33 +0200546 pcrypt_fini_padata(&pencrypt);
547 pcrypt_fini_padata(&pdecrypt);
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100548
Dan Kruchinina3fb1e32010-07-14 14:34:15 +0400549 kset_unregister(pcrypt_kset);
Steffen Klassert5068c7a2010-01-07 15:57:19 +1100550 crypto_unregister_template(&pcrypt_tmpl);
551}
552
553module_init(pcrypt_init);
554module_exit(pcrypt_exit);
555
556MODULE_LICENSE("GPL");
557MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
558MODULE_DESCRIPTION("Parallel crypto wrapper");