blob: 1ce1bf6d3bab294823e82b5c3e37b0dbbd4eee15 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Herbert Xu124b53d2007-04-16 20:49:20 +10002/*
3 * Software async crypto daemon.
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 *
Adrian Hoban298c9262010-09-20 16:05:12 +08007 * Added AEAD support to cryptd.
8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9 * Adrian Hoban <adrian.hoban@intel.com>
10 * Gabriele Paoloni <gabriele.paoloni@intel.com>
11 * Aidan O'Mahony (aidan.o.mahony@intel.com)
12 * Copyright (c) 2010, Intel Corporation.
Herbert Xu124b53d2007-04-16 20:49:20 +100013 */
14
Herbert Xu18e33e62008-07-10 16:01:22 +080015#include <crypto/internal/hash.h>
Adrian Hoban298c9262010-09-20 16:05:12 +080016#include <crypto/internal/aead.h>
Herbert Xu4e0958d2016-11-22 20:08:23 +080017#include <crypto/internal/skcipher.h>
Huang Ying1cac2cb2009-01-18 16:19:46 +110018#include <crypto/cryptd.h>
Huang Ying254eff72009-02-19 14:42:19 +080019#include <crypto/crypto_wq.h>
Herbert Xu81760ea2016-06-21 16:55:13 +080020#include <linux/atomic.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100021#include <linux/err.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100024#include <linux/list.h>
25#include <linux/module.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100026#include <linux/scatterlist.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100029
Colin Ian Kingeaf356e2017-11-30 11:26:14 +000030static unsigned int cryptd_max_cpu_qlen = 1000;
Jon Maxwellc3a53602017-11-22 16:08:17 +110031module_param(cryptd_max_cpu_qlen, uint, 0);
32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
Herbert Xu124b53d2007-04-16 20:49:20 +100033
Huang Ying254eff72009-02-19 14:42:19 +080034struct cryptd_cpu_queue {
Herbert Xu124b53d2007-04-16 20:49:20 +100035 struct crypto_queue queue;
Huang Ying254eff72009-02-19 14:42:19 +080036 struct work_struct work;
37};
38
39struct cryptd_queue {
Tejun Heoa29d8b82010-02-02 14:39:15 +090040 struct cryptd_cpu_queue __percpu *cpu_queue;
Herbert Xu124b53d2007-04-16 20:49:20 +100041};
42
43struct cryptd_instance_ctx {
44 struct crypto_spawn spawn;
Huang Ying254eff72009-02-19 14:42:19 +080045 struct cryptd_queue *queue;
Herbert Xu124b53d2007-04-16 20:49:20 +100046};
47
Herbert Xu4e0958d2016-11-22 20:08:23 +080048struct skcipherd_instance_ctx {
49 struct crypto_skcipher_spawn spawn;
50 struct cryptd_queue *queue;
51};
52
Herbert Xu46309d82009-07-12 21:38:59 +080053struct hashd_instance_ctx {
54 struct crypto_shash_spawn spawn;
55 struct cryptd_queue *queue;
56};
57
Adrian Hoban298c9262010-09-20 16:05:12 +080058struct aead_instance_ctx {
59 struct crypto_aead_spawn aead_spawn;
60 struct cryptd_queue *queue;
61};
62
Herbert Xu4e0958d2016-11-22 20:08:23 +080063struct cryptd_skcipher_ctx {
64 atomic_t refcnt;
Kees Cook36b38752018-09-18 19:10:52 -070065 struct crypto_sync_skcipher *child;
Herbert Xu4e0958d2016-11-22 20:08:23 +080066};
67
68struct cryptd_skcipher_request_ctx {
69 crypto_completion_t complete;
70};
71
Loc Hob8a28252008-05-14 21:23:00 +080072struct cryptd_hash_ctx {
Herbert Xu81760ea2016-06-21 16:55:13 +080073 atomic_t refcnt;
Herbert Xu46309d82009-07-12 21:38:59 +080074 struct crypto_shash *child;
Loc Hob8a28252008-05-14 21:23:00 +080075};
76
77struct cryptd_hash_request_ctx {
78 crypto_completion_t complete;
Herbert Xu46309d82009-07-12 21:38:59 +080079 struct shash_desc desc;
Loc Hob8a28252008-05-14 21:23:00 +080080};
Herbert Xu124b53d2007-04-16 20:49:20 +100081
Adrian Hoban298c9262010-09-20 16:05:12 +080082struct cryptd_aead_ctx {
Herbert Xu81760ea2016-06-21 16:55:13 +080083 atomic_t refcnt;
Adrian Hoban298c9262010-09-20 16:05:12 +080084 struct crypto_aead *child;
85};
86
87struct cryptd_aead_request_ctx {
88 crypto_completion_t complete;
89};
90
Huang Ying254eff72009-02-19 14:42:19 +080091static void cryptd_queue_worker(struct work_struct *work);
92
93static int cryptd_init_queue(struct cryptd_queue *queue,
94 unsigned int max_cpu_qlen)
95{
96 int cpu;
97 struct cryptd_cpu_queue *cpu_queue;
98
99 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
100 if (!queue->cpu_queue)
101 return -ENOMEM;
102 for_each_possible_cpu(cpu) {
103 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
104 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
105 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
106 }
Jon Maxwellc3a53602017-11-22 16:08:17 +1100107 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
Huang Ying254eff72009-02-19 14:42:19 +0800108 return 0;
109}
110
111static void cryptd_fini_queue(struct cryptd_queue *queue)
112{
113 int cpu;
114 struct cryptd_cpu_queue *cpu_queue;
115
116 for_each_possible_cpu(cpu) {
117 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
118 BUG_ON(cpu_queue->queue.qlen);
119 }
120 free_percpu(queue->cpu_queue);
121}
122
123static int cryptd_enqueue_request(struct cryptd_queue *queue,
124 struct crypto_async_request *request)
125{
126 int cpu, err;
127 struct cryptd_cpu_queue *cpu_queue;
Herbert Xu81760ea2016-06-21 16:55:13 +0800128 atomic_t *refcnt;
Huang Ying254eff72009-02-19 14:42:19 +0800129
130 cpu = get_cpu();
Christoph Lameter0b44f482009-10-03 19:48:23 +0900131 cpu_queue = this_cpu_ptr(queue->cpu_queue);
Huang Ying254eff72009-02-19 14:42:19 +0800132 err = crypto_enqueue_request(&cpu_queue->queue, request);
Herbert Xu81760ea2016-06-21 16:55:13 +0800133
134 refcnt = crypto_tfm_ctx(request->tfm);
Herbert Xu81760ea2016-06-21 16:55:13 +0800135
Gilad Ben-Yossef6b80ea32017-10-18 08:00:33 +0100136 if (err == -ENOSPC)
Herbert Xu81760ea2016-06-21 16:55:13 +0800137 goto out_put_cpu;
138
Huang Ying254eff72009-02-19 14:42:19 +0800139 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
Herbert Xu81760ea2016-06-21 16:55:13 +0800140
141 if (!atomic_read(refcnt))
142 goto out_put_cpu;
143
Herbert Xu81760ea2016-06-21 16:55:13 +0800144 atomic_inc(refcnt);
145
146out_put_cpu:
Huang Ying254eff72009-02-19 14:42:19 +0800147 put_cpu();
148
149 return err;
150}
151
152/* Called in workqueue context, do one real cryption work (via
153 * req->complete) and reschedule itself if there are more work to
154 * do. */
155static void cryptd_queue_worker(struct work_struct *work)
156{
157 struct cryptd_cpu_queue *cpu_queue;
158 struct crypto_async_request *req, *backlog;
159
160 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
Jussi Kivilinna9efade12012-10-21 20:42:28 +0300161 /*
162 * Only handle one request at a time to avoid hogging crypto workqueue.
163 * preempt_disable/enable is used to prevent being preempted by
164 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
165 * cryptd_enqueue_request() being accessed from software interrupts.
166 */
167 local_bh_disable();
Huang Ying254eff72009-02-19 14:42:19 +0800168 preempt_disable();
169 backlog = crypto_get_backlog(&cpu_queue->queue);
170 req = crypto_dequeue_request(&cpu_queue->queue);
171 preempt_enable();
Jussi Kivilinna9efade12012-10-21 20:42:28 +0300172 local_bh_enable();
Huang Ying254eff72009-02-19 14:42:19 +0800173
174 if (!req)
175 return;
176
177 if (backlog)
178 backlog->complete(backlog, -EINPROGRESS);
179 req->complete(req, 0);
180
181 if (cpu_queue->queue.qlen)
182 queue_work(kcrypto_wq, &cpu_queue->work);
183}
184
185static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
Herbert Xu124b53d2007-04-16 20:49:20 +1000186{
187 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
188 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
Huang Ying254eff72009-02-19 14:42:19 +0800189 return ictx->queue;
Herbert Xu124b53d2007-04-16 20:49:20 +1000190}
191
Stephan Mueller466a7b92015-03-30 21:57:06 +0200192static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
193 u32 *mask)
194{
195 struct crypto_attr_type *algt;
196
197 algt = crypto_get_attr_type(tb);
198 if (IS_ERR(algt))
199 return;
Herbert Xuf6da3202015-07-09 07:17:19 +0800200
Herbert Xu5e4b8c12015-08-13 17:29:06 +0800201 *type |= algt->type & CRYPTO_ALG_INTERNAL;
202 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
Stephan Mueller466a7b92015-03-30 21:57:06 +0200203}
204
Herbert Xu9b8c4562015-05-21 15:10:57 +0800205static int cryptd_init_instance(struct crypto_instance *inst,
206 struct crypto_alg *alg)
207{
208 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
209 "cryptd(%s)",
210 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
211 return -ENAMETOOLONG;
212
213 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
214
215 inst->alg.cra_priority = alg->cra_priority + 50;
216 inst->alg.cra_blocksize = alg->cra_blocksize;
217 inst->alg.cra_alignmask = alg->cra_alignmask;
218
219 return 0;
220}
221
Herbert Xu0b535ad2009-07-14 19:11:32 +0800222static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
223 unsigned int tail)
Herbert Xu124b53d2007-04-16 20:49:20 +1000224{
Herbert Xu0b535ad2009-07-14 19:11:32 +0800225 char *p;
Herbert Xu124b53d2007-04-16 20:49:20 +1000226 struct crypto_instance *inst;
Herbert Xu124b53d2007-04-16 20:49:20 +1000227 int err;
228
Herbert Xu0b535ad2009-07-14 19:11:32 +0800229 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
230 if (!p)
231 return ERR_PTR(-ENOMEM);
232
233 inst = (void *)(p + head);
Herbert Xu124b53d2007-04-16 20:49:20 +1000234
Herbert Xu9b8c4562015-05-21 15:10:57 +0800235 err = cryptd_init_instance(inst, alg);
236 if (err)
Herbert Xu124b53d2007-04-16 20:49:20 +1000237 goto out_free_inst;
238
Herbert Xu124b53d2007-04-16 20:49:20 +1000239out:
Herbert Xu0b535ad2009-07-14 19:11:32 +0800240 return p;
Herbert Xu124b53d2007-04-16 20:49:20 +1000241
242out_free_inst:
Herbert Xu0b535ad2009-07-14 19:11:32 +0800243 kfree(p);
244 p = ERR_PTR(err);
Herbert Xu124b53d2007-04-16 20:49:20 +1000245 goto out;
246}
247
Herbert Xu4e0958d2016-11-22 20:08:23 +0800248static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
249 const u8 *key, unsigned int keylen)
250{
251 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
Kees Cook36b38752018-09-18 19:10:52 -0700252 struct crypto_sync_skcipher *child = ctx->child;
Herbert Xu4e0958d2016-11-22 20:08:23 +0800253 int err;
254
Kees Cook36b38752018-09-18 19:10:52 -0700255 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
256 crypto_sync_skcipher_set_flags(child,
257 crypto_skcipher_get_flags(parent) &
Herbert Xu4e0958d2016-11-22 20:08:23 +0800258 CRYPTO_TFM_REQ_MASK);
Kees Cook36b38752018-09-18 19:10:52 -0700259 err = crypto_sync_skcipher_setkey(child, key, keylen);
260 crypto_skcipher_set_flags(parent,
261 crypto_sync_skcipher_get_flags(child) &
Herbert Xu4e0958d2016-11-22 20:08:23 +0800262 CRYPTO_TFM_RES_MASK);
263 return err;
264}
265
266static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
267{
268 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
269 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
270 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
271 int refcnt = atomic_read(&ctx->refcnt);
272
273 local_bh_disable();
274 rctx->complete(&req->base, err);
275 local_bh_enable();
276
277 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
278 crypto_free_skcipher(tfm);
279}
280
281static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
282 int err)
283{
284 struct skcipher_request *req = skcipher_request_cast(base);
285 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
286 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
287 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
Kees Cook36b38752018-09-18 19:10:52 -0700288 struct crypto_sync_skcipher *child = ctx->child;
289 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800290
291 if (unlikely(err == -EINPROGRESS))
292 goto out;
293
Kees Cook36b38752018-09-18 19:10:52 -0700294 skcipher_request_set_sync_tfm(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800295 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
296 NULL, NULL);
297 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
298 req->iv);
299
300 err = crypto_skcipher_encrypt(subreq);
301 skcipher_request_zero(subreq);
302
303 req->base.complete = rctx->complete;
304
305out:
306 cryptd_skcipher_complete(req, err);
307}
308
309static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
310 int err)
311{
312 struct skcipher_request *req = skcipher_request_cast(base);
313 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
314 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
315 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
Kees Cook36b38752018-09-18 19:10:52 -0700316 struct crypto_sync_skcipher *child = ctx->child;
317 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800318
319 if (unlikely(err == -EINPROGRESS))
320 goto out;
321
Kees Cook36b38752018-09-18 19:10:52 -0700322 skcipher_request_set_sync_tfm(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800323 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
324 NULL, NULL);
325 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
326 req->iv);
327
328 err = crypto_skcipher_decrypt(subreq);
329 skcipher_request_zero(subreq);
330
331 req->base.complete = rctx->complete;
332
333out:
334 cryptd_skcipher_complete(req, err);
335}
336
337static int cryptd_skcipher_enqueue(struct skcipher_request *req,
338 crypto_completion_t compl)
339{
340 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
341 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
342 struct cryptd_queue *queue;
343
344 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
345 rctx->complete = req->base.complete;
346 req->base.complete = compl;
347
348 return cryptd_enqueue_request(queue, &req->base);
349}
350
351static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
352{
353 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
354}
355
356static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
357{
358 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
359}
360
361static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
362{
363 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
364 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
365 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
366 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
367 struct crypto_skcipher *cipher;
368
369 cipher = crypto_spawn_skcipher(spawn);
370 if (IS_ERR(cipher))
371 return PTR_ERR(cipher);
372
Kees Cook36b38752018-09-18 19:10:52 -0700373 ctx->child = (struct crypto_sync_skcipher *)cipher;
Herbert Xu4e0958d2016-11-22 20:08:23 +0800374 crypto_skcipher_set_reqsize(
375 tfm, sizeof(struct cryptd_skcipher_request_ctx));
376 return 0;
377}
378
379static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
380{
381 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
382
Kees Cook36b38752018-09-18 19:10:52 -0700383 crypto_free_sync_skcipher(ctx->child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800384}
385
386static void cryptd_skcipher_free(struct skcipher_instance *inst)
387{
388 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
389
390 crypto_drop_skcipher(&ctx->spawn);
391}
392
393static int cryptd_create_skcipher(struct crypto_template *tmpl,
394 struct rtattr **tb,
395 struct cryptd_queue *queue)
396{
397 struct skcipherd_instance_ctx *ctx;
398 struct skcipher_instance *inst;
399 struct skcipher_alg *alg;
400 const char *name;
401 u32 type;
402 u32 mask;
403 int err;
404
405 type = 0;
406 mask = CRYPTO_ALG_ASYNC;
407
408 cryptd_check_internal(tb, &type, &mask);
409
410 name = crypto_attr_alg_name(tb[1]);
411 if (IS_ERR(name))
412 return PTR_ERR(name);
413
414 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
415 if (!inst)
416 return -ENOMEM;
417
418 ctx = skcipher_instance_ctx(inst);
419 ctx->queue = queue;
420
421 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
422 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
423 if (err)
424 goto out_free_inst;
425
426 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
427 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
428 if (err)
429 goto out_drop_skcipher;
430
431 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
432 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
433
434 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
435 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
436 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
437 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
438
439 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
440
441 inst->alg.init = cryptd_skcipher_init_tfm;
442 inst->alg.exit = cryptd_skcipher_exit_tfm;
443
444 inst->alg.setkey = cryptd_skcipher_setkey;
445 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
446 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
447
448 inst->free = cryptd_skcipher_free;
449
450 err = skcipher_register_instance(tmpl, inst);
451 if (err) {
452out_drop_skcipher:
453 crypto_drop_skcipher(&ctx->spawn);
454out_free_inst:
455 kfree(inst);
456 }
457 return err;
458}
459
Loc Hob8a28252008-05-14 21:23:00 +0800460static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
461{
462 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
Herbert Xu46309d82009-07-12 21:38:59 +0800463 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
464 struct crypto_shash_spawn *spawn = &ictx->spawn;
Loc Hob8a28252008-05-14 21:23:00 +0800465 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
Herbert Xu46309d82009-07-12 21:38:59 +0800466 struct crypto_shash *hash;
Loc Hob8a28252008-05-14 21:23:00 +0800467
Herbert Xu46309d82009-07-12 21:38:59 +0800468 hash = crypto_spawn_shash(spawn);
469 if (IS_ERR(hash))
470 return PTR_ERR(hash);
Loc Hob8a28252008-05-14 21:23:00 +0800471
Herbert Xu46309d82009-07-12 21:38:59 +0800472 ctx->child = hash;
Herbert Xu0d6669e2009-07-12 23:06:33 +0800473 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
474 sizeof(struct cryptd_hash_request_ctx) +
475 crypto_shash_descsize(hash));
Loc Hob8a28252008-05-14 21:23:00 +0800476 return 0;
477}
478
479static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
480{
481 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
Loc Hob8a28252008-05-14 21:23:00 +0800482
Herbert Xu46309d82009-07-12 21:38:59 +0800483 crypto_free_shash(ctx->child);
Loc Hob8a28252008-05-14 21:23:00 +0800484}
485
486static int cryptd_hash_setkey(struct crypto_ahash *parent,
487 const u8 *key, unsigned int keylen)
488{
489 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
Herbert Xu46309d82009-07-12 21:38:59 +0800490 struct crypto_shash *child = ctx->child;
Loc Hob8a28252008-05-14 21:23:00 +0800491 int err;
492
Herbert Xu46309d82009-07-12 21:38:59 +0800493 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
494 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
495 CRYPTO_TFM_REQ_MASK);
496 err = crypto_shash_setkey(child, key, keylen);
497 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
498 CRYPTO_TFM_RES_MASK);
Loc Hob8a28252008-05-14 21:23:00 +0800499 return err;
500}
501
502static int cryptd_hash_enqueue(struct ahash_request *req,
Mark Rustad3e3dc252014-07-25 02:53:38 -0700503 crypto_completion_t compl)
Loc Hob8a28252008-05-14 21:23:00 +0800504{
505 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
506 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
Huang Ying254eff72009-02-19 14:42:19 +0800507 struct cryptd_queue *queue =
508 cryptd_get_queue(crypto_ahash_tfm(tfm));
Loc Hob8a28252008-05-14 21:23:00 +0800509
510 rctx->complete = req->base.complete;
Mark Rustad3e3dc252014-07-25 02:53:38 -0700511 req->base.complete = compl;
Loc Hob8a28252008-05-14 21:23:00 +0800512
Huang Ying254eff72009-02-19 14:42:19 +0800513 return cryptd_enqueue_request(queue, &req->base);
Loc Hob8a28252008-05-14 21:23:00 +0800514}
515
Herbert Xu81760ea2016-06-21 16:55:13 +0800516static void cryptd_hash_complete(struct ahash_request *req, int err)
517{
518 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
519 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
520 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
521 int refcnt = atomic_read(&ctx->refcnt);
522
523 local_bh_disable();
524 rctx->complete(&req->base, err);
525 local_bh_enable();
526
527 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
528 crypto_free_ahash(tfm);
529}
530
Loc Hob8a28252008-05-14 21:23:00 +0800531static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
532{
Herbert Xu46309d82009-07-12 21:38:59 +0800533 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
534 struct crypto_shash *child = ctx->child;
535 struct ahash_request *req = ahash_request_cast(req_async);
536 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
537 struct shash_desc *desc = &rctx->desc;
Loc Hob8a28252008-05-14 21:23:00 +0800538
539 if (unlikely(err == -EINPROGRESS))
540 goto out;
541
Herbert Xu46309d82009-07-12 21:38:59 +0800542 desc->tfm = child;
Loc Hob8a28252008-05-14 21:23:00 +0800543
Herbert Xu46309d82009-07-12 21:38:59 +0800544 err = crypto_shash_init(desc);
Loc Hob8a28252008-05-14 21:23:00 +0800545
546 req->base.complete = rctx->complete;
547
548out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800549 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800550}
551
552static int cryptd_hash_init_enqueue(struct ahash_request *req)
553{
554 return cryptd_hash_enqueue(req, cryptd_hash_init);
555}
556
557static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
558{
Herbert Xu46309d82009-07-12 21:38:59 +0800559 struct ahash_request *req = ahash_request_cast(req_async);
Loc Hob8a28252008-05-14 21:23:00 +0800560 struct cryptd_hash_request_ctx *rctx;
Loc Hob8a28252008-05-14 21:23:00 +0800561
562 rctx = ahash_request_ctx(req);
563
564 if (unlikely(err == -EINPROGRESS))
565 goto out;
566
Herbert Xu46309d82009-07-12 21:38:59 +0800567 err = shash_ahash_update(req, &rctx->desc);
Loc Hob8a28252008-05-14 21:23:00 +0800568
569 req->base.complete = rctx->complete;
570
571out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800572 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800573}
574
575static int cryptd_hash_update_enqueue(struct ahash_request *req)
576{
577 return cryptd_hash_enqueue(req, cryptd_hash_update);
578}
579
580static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
581{
Herbert Xu46309d82009-07-12 21:38:59 +0800582 struct ahash_request *req = ahash_request_cast(req_async);
583 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
Loc Hob8a28252008-05-14 21:23:00 +0800584
585 if (unlikely(err == -EINPROGRESS))
586 goto out;
587
Herbert Xu46309d82009-07-12 21:38:59 +0800588 err = crypto_shash_final(&rctx->desc, req->result);
Loc Hob8a28252008-05-14 21:23:00 +0800589
590 req->base.complete = rctx->complete;
591
592out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800593 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800594}
595
596static int cryptd_hash_final_enqueue(struct ahash_request *req)
597{
598 return cryptd_hash_enqueue(req, cryptd_hash_final);
599}
600
Herbert Xu6fba00d2009-07-22 11:10:22 +0800601static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
602{
603 struct ahash_request *req = ahash_request_cast(req_async);
604 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
605
606 if (unlikely(err == -EINPROGRESS))
607 goto out;
608
609 err = shash_ahash_finup(req, &rctx->desc);
610
611 req->base.complete = rctx->complete;
612
613out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800614 cryptd_hash_complete(req, err);
Herbert Xu6fba00d2009-07-22 11:10:22 +0800615}
616
617static int cryptd_hash_finup_enqueue(struct ahash_request *req)
618{
619 return cryptd_hash_enqueue(req, cryptd_hash_finup);
620}
621
Loc Hob8a28252008-05-14 21:23:00 +0800622static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
623{
Herbert Xu46309d82009-07-12 21:38:59 +0800624 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
625 struct crypto_shash *child = ctx->child;
626 struct ahash_request *req = ahash_request_cast(req_async);
627 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
628 struct shash_desc *desc = &rctx->desc;
Loc Hob8a28252008-05-14 21:23:00 +0800629
630 if (unlikely(err == -EINPROGRESS))
631 goto out;
632
Herbert Xu46309d82009-07-12 21:38:59 +0800633 desc->tfm = child;
Loc Hob8a28252008-05-14 21:23:00 +0800634
Herbert Xu46309d82009-07-12 21:38:59 +0800635 err = shash_ahash_digest(req, desc);
Loc Hob8a28252008-05-14 21:23:00 +0800636
637 req->base.complete = rctx->complete;
638
639out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800640 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800641}
642
643static int cryptd_hash_digest_enqueue(struct ahash_request *req)
644{
645 return cryptd_hash_enqueue(req, cryptd_hash_digest);
646}
647
Herbert Xu6fba00d2009-07-22 11:10:22 +0800648static int cryptd_hash_export(struct ahash_request *req, void *out)
649{
650 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
651
652 return crypto_shash_export(&rctx->desc, out);
653}
654
655static int cryptd_hash_import(struct ahash_request *req, const void *in)
656{
Ard Biesheuvel0bd22232016-09-01 14:25:43 +0100657 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
658 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
659 struct shash_desc *desc = cryptd_shash_desc(req);
Herbert Xu6fba00d2009-07-22 11:10:22 +0800660
Ard Biesheuvel0bd22232016-09-01 14:25:43 +0100661 desc->tfm = ctx->child;
Ard Biesheuvel0bd22232016-09-01 14:25:43 +0100662
663 return crypto_shash_import(desc, in);
Herbert Xu6fba00d2009-07-22 11:10:22 +0800664}
665
Herbert Xu9cd899a2009-07-14 18:45:45 +0800666static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
667 struct cryptd_queue *queue)
Loc Hob8a28252008-05-14 21:23:00 +0800668{
Herbert Xu46309d82009-07-12 21:38:59 +0800669 struct hashd_instance_ctx *ctx;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800670 struct ahash_instance *inst;
Herbert Xu46309d82009-07-12 21:38:59 +0800671 struct shash_alg *salg;
Loc Hob8a28252008-05-14 21:23:00 +0800672 struct crypto_alg *alg;
Stephan Mueller466a7b92015-03-30 21:57:06 +0200673 u32 type = 0;
674 u32 mask = 0;
Herbert Xu46309d82009-07-12 21:38:59 +0800675 int err;
Loc Hob8a28252008-05-14 21:23:00 +0800676
Stephan Mueller466a7b92015-03-30 21:57:06 +0200677 cryptd_check_internal(tb, &type, &mask);
678
679 salg = shash_attr_alg(tb[1], type, mask);
Herbert Xu46309d82009-07-12 21:38:59 +0800680 if (IS_ERR(salg))
Herbert Xu9cd899a2009-07-14 18:45:45 +0800681 return PTR_ERR(salg);
Loc Hob8a28252008-05-14 21:23:00 +0800682
Herbert Xu46309d82009-07-12 21:38:59 +0800683 alg = &salg->base;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800684 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
685 sizeof(*ctx));
Steffen Klassert05ed8752009-07-15 16:51:04 +0800686 err = PTR_ERR(inst);
Loc Hob8a28252008-05-14 21:23:00 +0800687 if (IS_ERR(inst))
688 goto out_put_alg;
689
Herbert Xu0b535ad2009-07-14 19:11:32 +0800690 ctx = ahash_instance_ctx(inst);
Herbert Xu46309d82009-07-12 21:38:59 +0800691 ctx->queue = queue;
692
Herbert Xu0b535ad2009-07-14 19:11:32 +0800693 err = crypto_init_shash_spawn(&ctx->spawn, salg,
694 ahash_crypto_instance(inst));
Herbert Xu46309d82009-07-12 21:38:59 +0800695 if (err)
696 goto out_free_inst;
697
Eric Biggersa208fa82018-01-03 11:16:26 -0800698 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
699 (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
700 CRYPTO_ALG_OPTIONAL_KEY));
Loc Hob8a28252008-05-14 21:23:00 +0800701
Herbert Xu0b535ad2009-07-14 19:11:32 +0800702 inst->alg.halg.digestsize = salg->digestsize;
Wang, Rui Y1a078342015-11-29 22:45:34 +0800703 inst->alg.halg.statesize = salg->statesize;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800704 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
Loc Hob8a28252008-05-14 21:23:00 +0800705
Herbert Xu0b535ad2009-07-14 19:11:32 +0800706 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
707 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
Loc Hob8a28252008-05-14 21:23:00 +0800708
Herbert Xu0b535ad2009-07-14 19:11:32 +0800709 inst->alg.init = cryptd_hash_init_enqueue;
710 inst->alg.update = cryptd_hash_update_enqueue;
711 inst->alg.final = cryptd_hash_final_enqueue;
Herbert Xu6fba00d2009-07-22 11:10:22 +0800712 inst->alg.finup = cryptd_hash_finup_enqueue;
713 inst->alg.export = cryptd_hash_export;
714 inst->alg.import = cryptd_hash_import;
Eric Biggers841a3ff2018-01-03 11:16:23 -0800715 if (crypto_shash_alg_has_setkey(salg))
716 inst->alg.setkey = cryptd_hash_setkey;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800717 inst->alg.digest = cryptd_hash_digest_enqueue;
Loc Hob8a28252008-05-14 21:23:00 +0800718
Herbert Xu0b535ad2009-07-14 19:11:32 +0800719 err = ahash_register_instance(tmpl, inst);
Herbert Xu9cd899a2009-07-14 18:45:45 +0800720 if (err) {
721 crypto_drop_shash(&ctx->spawn);
722out_free_inst:
723 kfree(inst);
724 }
725
Loc Hob8a28252008-05-14 21:23:00 +0800726out_put_alg:
727 crypto_mod_put(alg);
Herbert Xu9cd899a2009-07-14 18:45:45 +0800728 return err;
Loc Hob8a28252008-05-14 21:23:00 +0800729}
730
Herbert Xu92b98762015-05-28 22:08:01 +0800731static int cryptd_aead_setkey(struct crypto_aead *parent,
732 const u8 *key, unsigned int keylen)
733{
734 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
735 struct crypto_aead *child = ctx->child;
736
737 return crypto_aead_setkey(child, key, keylen);
738}
739
740static int cryptd_aead_setauthsize(struct crypto_aead *parent,
741 unsigned int authsize)
742{
743 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
744 struct crypto_aead *child = ctx->child;
745
746 return crypto_aead_setauthsize(child, authsize);
747}
748
Adrian Hoban298c9262010-09-20 16:05:12 +0800749static void cryptd_aead_crypt(struct aead_request *req,
750 struct crypto_aead *child,
751 int err,
752 int (*crypt)(struct aead_request *req))
753{
754 struct cryptd_aead_request_ctx *rctx;
Herbert Xu81760ea2016-06-21 16:55:13 +0800755 struct cryptd_aead_ctx *ctx;
Herbert Xuec9f2002015-07-06 19:11:03 +0800756 crypto_completion_t compl;
Herbert Xu81760ea2016-06-21 16:55:13 +0800757 struct crypto_aead *tfm;
758 int refcnt;
Herbert Xuec9f2002015-07-06 19:11:03 +0800759
Adrian Hoban298c9262010-09-20 16:05:12 +0800760 rctx = aead_request_ctx(req);
Herbert Xuec9f2002015-07-06 19:11:03 +0800761 compl = rctx->complete;
Adrian Hoban298c9262010-09-20 16:05:12 +0800762
Herbert Xu31bd44e2016-08-25 16:49:51 +0800763 tfm = crypto_aead_reqtfm(req);
764
Adrian Hoban298c9262010-09-20 16:05:12 +0800765 if (unlikely(err == -EINPROGRESS))
766 goto out;
767 aead_request_set_tfm(req, child);
768 err = crypt( req );
Herbert Xu81760ea2016-06-21 16:55:13 +0800769
Adrian Hoban298c9262010-09-20 16:05:12 +0800770out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800771 ctx = crypto_aead_ctx(tfm);
772 refcnt = atomic_read(&ctx->refcnt);
773
Adrian Hoban298c9262010-09-20 16:05:12 +0800774 local_bh_disable();
Herbert Xuec9f2002015-07-06 19:11:03 +0800775 compl(&req->base, err);
Adrian Hoban298c9262010-09-20 16:05:12 +0800776 local_bh_enable();
Herbert Xu81760ea2016-06-21 16:55:13 +0800777
778 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
779 crypto_free_aead(tfm);
Adrian Hoban298c9262010-09-20 16:05:12 +0800780}
781
782static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
783{
784 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
785 struct crypto_aead *child = ctx->child;
786 struct aead_request *req;
787
788 req = container_of(areq, struct aead_request, base);
Herbert Xuba3749a2015-08-13 17:29:02 +0800789 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
Adrian Hoban298c9262010-09-20 16:05:12 +0800790}
791
792static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
793{
794 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
795 struct crypto_aead *child = ctx->child;
796 struct aead_request *req;
797
798 req = container_of(areq, struct aead_request, base);
Herbert Xuba3749a2015-08-13 17:29:02 +0800799 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
Adrian Hoban298c9262010-09-20 16:05:12 +0800800}
801
802static int cryptd_aead_enqueue(struct aead_request *req,
Mark Rustad3e3dc252014-07-25 02:53:38 -0700803 crypto_completion_t compl)
Adrian Hoban298c9262010-09-20 16:05:12 +0800804{
805 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
806 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
807 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
808
809 rctx->complete = req->base.complete;
Mark Rustad3e3dc252014-07-25 02:53:38 -0700810 req->base.complete = compl;
Adrian Hoban298c9262010-09-20 16:05:12 +0800811 return cryptd_enqueue_request(queue, &req->base);
812}
813
814static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
815{
816 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
817}
818
819static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
820{
821 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
822}
823
Herbert Xuf614e542015-05-28 22:08:04 +0800824static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
Adrian Hoban298c9262010-09-20 16:05:12 +0800825{
Herbert Xuf614e542015-05-28 22:08:04 +0800826 struct aead_instance *inst = aead_alg_instance(tfm);
827 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800828 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
Herbert Xuf614e542015-05-28 22:08:04 +0800829 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
Adrian Hoban298c9262010-09-20 16:05:12 +0800830 struct crypto_aead *cipher;
831
832 cipher = crypto_spawn_aead(spawn);
833 if (IS_ERR(cipher))
834 return PTR_ERR(cipher);
835
Adrian Hoban298c9262010-09-20 16:05:12 +0800836 ctx->child = cipher;
Herbert Xuec9f2002015-07-06 19:11:03 +0800837 crypto_aead_set_reqsize(
838 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
839 crypto_aead_reqsize(cipher)));
Adrian Hoban298c9262010-09-20 16:05:12 +0800840 return 0;
841}
842
Herbert Xuf614e542015-05-28 22:08:04 +0800843static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
Adrian Hoban298c9262010-09-20 16:05:12 +0800844{
Herbert Xuf614e542015-05-28 22:08:04 +0800845 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
Adrian Hoban298c9262010-09-20 16:05:12 +0800846 crypto_free_aead(ctx->child);
847}
848
849static int cryptd_create_aead(struct crypto_template *tmpl,
850 struct rtattr **tb,
851 struct cryptd_queue *queue)
852{
853 struct aead_instance_ctx *ctx;
Herbert Xuf614e542015-05-28 22:08:04 +0800854 struct aead_instance *inst;
855 struct aead_alg *alg;
Herbert Xu9b8c4562015-05-21 15:10:57 +0800856 const char *name;
857 u32 type = 0;
Herbert Xuec9f2002015-07-06 19:11:03 +0800858 u32 mask = CRYPTO_ALG_ASYNC;
Adrian Hoban298c9262010-09-20 16:05:12 +0800859 int err;
860
Stephan Mueller466a7b92015-03-30 21:57:06 +0200861 cryptd_check_internal(tb, &type, &mask);
862
Herbert Xu9b8c4562015-05-21 15:10:57 +0800863 name = crypto_attr_alg_name(tb[1]);
864 if (IS_ERR(name))
865 return PTR_ERR(name);
Adrian Hoban298c9262010-09-20 16:05:12 +0800866
Herbert Xu9b8c4562015-05-21 15:10:57 +0800867 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
868 if (!inst)
869 return -ENOMEM;
Adrian Hoban298c9262010-09-20 16:05:12 +0800870
Herbert Xuf614e542015-05-28 22:08:04 +0800871 ctx = aead_instance_ctx(inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800872 ctx->queue = queue;
873
Herbert Xuf614e542015-05-28 22:08:04 +0800874 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
Herbert Xu9b8c4562015-05-21 15:10:57 +0800875 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
Adrian Hoban298c9262010-09-20 16:05:12 +0800876 if (err)
877 goto out_free_inst;
878
Herbert Xuf614e542015-05-28 22:08:04 +0800879 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
880 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
Herbert Xu9b8c4562015-05-21 15:10:57 +0800881 if (err)
882 goto out_drop_aead;
883
Herbert Xuf614e542015-05-28 22:08:04 +0800884 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
Herbert Xu5e4b8c12015-08-13 17:29:06 +0800885 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
Herbert Xuf614e542015-05-28 22:08:04 +0800886 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
Adrian Hoban298c9262010-09-20 16:05:12 +0800887
Herbert Xuf614e542015-05-28 22:08:04 +0800888 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
889 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
890
891 inst->alg.init = cryptd_aead_init_tfm;
892 inst->alg.exit = cryptd_aead_exit_tfm;
893 inst->alg.setkey = cryptd_aead_setkey;
894 inst->alg.setauthsize = cryptd_aead_setauthsize;
895 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
896 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
897
898 err = aead_register_instance(tmpl, inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800899 if (err) {
Herbert Xu9b8c4562015-05-21 15:10:57 +0800900out_drop_aead:
901 crypto_drop_aead(&ctx->aead_spawn);
Adrian Hoban298c9262010-09-20 16:05:12 +0800902out_free_inst:
903 kfree(inst);
904 }
Adrian Hoban298c9262010-09-20 16:05:12 +0800905 return err;
906}
907
Huang Ying254eff72009-02-19 14:42:19 +0800908static struct cryptd_queue queue;
Herbert Xu124b53d2007-04-16 20:49:20 +1000909
Herbert Xu9cd899a2009-07-14 18:45:45 +0800910static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
Herbert Xu124b53d2007-04-16 20:49:20 +1000911{
912 struct crypto_attr_type *algt;
913
914 algt = crypto_get_attr_type(tb);
915 if (IS_ERR(algt))
Herbert Xu9cd899a2009-07-14 18:45:45 +0800916 return PTR_ERR(algt);
Herbert Xu124b53d2007-04-16 20:49:20 +1000917
918 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
919 case CRYPTO_ALG_TYPE_BLKCIPHER:
Herbert Xu4e0958d2016-11-22 20:08:23 +0800920 return cryptd_create_skcipher(tmpl, tb, &queue);
Loc Hob8a28252008-05-14 21:23:00 +0800921 case CRYPTO_ALG_TYPE_DIGEST:
Herbert Xu9cd899a2009-07-14 18:45:45 +0800922 return cryptd_create_hash(tmpl, tb, &queue);
Adrian Hoban298c9262010-09-20 16:05:12 +0800923 case CRYPTO_ALG_TYPE_AEAD:
924 return cryptd_create_aead(tmpl, tb, &queue);
Herbert Xu124b53d2007-04-16 20:49:20 +1000925 }
926
Herbert Xu9cd899a2009-07-14 18:45:45 +0800927 return -EINVAL;
Herbert Xu124b53d2007-04-16 20:49:20 +1000928}
929
930static void cryptd_free(struct crypto_instance *inst)
931{
932 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
Herbert Xu0b535ad2009-07-14 19:11:32 +0800933 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800934 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
Herbert Xu0b535ad2009-07-14 19:11:32 +0800935
936 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
937 case CRYPTO_ALG_TYPE_AHASH:
938 crypto_drop_shash(&hctx->spawn);
939 kfree(ahash_instance(inst));
940 return;
Adrian Hoban298c9262010-09-20 16:05:12 +0800941 case CRYPTO_ALG_TYPE_AEAD:
Herbert Xuf614e542015-05-28 22:08:04 +0800942 crypto_drop_aead(&aead_ctx->aead_spawn);
943 kfree(aead_instance(inst));
Adrian Hoban298c9262010-09-20 16:05:12 +0800944 return;
945 default:
946 crypto_drop_spawn(&ctx->spawn);
947 kfree(inst);
Herbert Xu0b535ad2009-07-14 19:11:32 +0800948 }
Herbert Xu124b53d2007-04-16 20:49:20 +1000949}
950
951static struct crypto_template cryptd_tmpl = {
952 .name = "cryptd",
Herbert Xu9cd899a2009-07-14 18:45:45 +0800953 .create = cryptd_create,
Herbert Xu124b53d2007-04-16 20:49:20 +1000954 .free = cryptd_free,
955 .module = THIS_MODULE,
956};
957
Herbert Xu4e0958d2016-11-22 20:08:23 +0800958struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
959 u32 type, u32 mask)
960{
961 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
962 struct cryptd_skcipher_ctx *ctx;
963 struct crypto_skcipher *tfm;
964
965 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
966 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
967 return ERR_PTR(-EINVAL);
968
969 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
970 if (IS_ERR(tfm))
971 return ERR_CAST(tfm);
972
973 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
974 crypto_free_skcipher(tfm);
975 return ERR_PTR(-EINVAL);
976 }
977
978 ctx = crypto_skcipher_ctx(tfm);
979 atomic_set(&ctx->refcnt, 1);
980
981 return container_of(tfm, struct cryptd_skcipher, base);
982}
983EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
984
985struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
986{
987 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
988
Kees Cook36b38752018-09-18 19:10:52 -0700989 return &ctx->child->base;
Herbert Xu4e0958d2016-11-22 20:08:23 +0800990}
991EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
992
993bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
994{
995 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
996
997 return atomic_read(&ctx->refcnt) - 1;
998}
999EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1000
1001void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1002{
1003 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1004
1005 if (atomic_dec_and_test(&ctx->refcnt))
1006 crypto_free_skcipher(&tfm->base);
1007}
1008EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1009
Huang Yingace13662009-08-06 15:35:20 +10001010struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1011 u32 type, u32 mask)
1012{
1013 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu81760ea2016-06-21 16:55:13 +08001014 struct cryptd_hash_ctx *ctx;
Huang Yingace13662009-08-06 15:35:20 +10001015 struct crypto_ahash *tfm;
1016
1017 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1018 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1019 return ERR_PTR(-EINVAL);
1020 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1021 if (IS_ERR(tfm))
1022 return ERR_CAST(tfm);
1023 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1024 crypto_free_ahash(tfm);
1025 return ERR_PTR(-EINVAL);
1026 }
1027
Herbert Xu81760ea2016-06-21 16:55:13 +08001028 ctx = crypto_ahash_ctx(tfm);
1029 atomic_set(&ctx->refcnt, 1);
1030
Huang Yingace13662009-08-06 15:35:20 +10001031 return __cryptd_ahash_cast(tfm);
1032}
1033EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1034
1035struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1036{
1037 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1038
1039 return ctx->child;
1040}
1041EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1042
Huang Ying0e1227d2009-10-19 11:53:06 +09001043struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1044{
1045 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1046 return &rctx->desc;
1047}
1048EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1049
Herbert Xu81760ea2016-06-21 16:55:13 +08001050bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1051{
1052 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1053
1054 return atomic_read(&ctx->refcnt) - 1;
1055}
1056EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1057
Huang Yingace13662009-08-06 15:35:20 +10001058void cryptd_free_ahash(struct cryptd_ahash *tfm)
1059{
Herbert Xu81760ea2016-06-21 16:55:13 +08001060 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1061
1062 if (atomic_dec_and_test(&ctx->refcnt))
1063 crypto_free_ahash(&tfm->base);
Huang Yingace13662009-08-06 15:35:20 +10001064}
1065EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1066
Adrian Hoban298c9262010-09-20 16:05:12 +08001067struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1068 u32 type, u32 mask)
1069{
1070 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu81760ea2016-06-21 16:55:13 +08001071 struct cryptd_aead_ctx *ctx;
Adrian Hoban298c9262010-09-20 16:05:12 +08001072 struct crypto_aead *tfm;
1073
1074 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1075 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1076 return ERR_PTR(-EINVAL);
1077 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1078 if (IS_ERR(tfm))
1079 return ERR_CAST(tfm);
1080 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1081 crypto_free_aead(tfm);
1082 return ERR_PTR(-EINVAL);
1083 }
Herbert Xu81760ea2016-06-21 16:55:13 +08001084
1085 ctx = crypto_aead_ctx(tfm);
1086 atomic_set(&ctx->refcnt, 1);
1087
Adrian Hoban298c9262010-09-20 16:05:12 +08001088 return __cryptd_aead_cast(tfm);
1089}
1090EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1091
1092struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1093{
1094 struct cryptd_aead_ctx *ctx;
1095 ctx = crypto_aead_ctx(&tfm->base);
1096 return ctx->child;
1097}
1098EXPORT_SYMBOL_GPL(cryptd_aead_child);
1099
Herbert Xu81760ea2016-06-21 16:55:13 +08001100bool cryptd_aead_queued(struct cryptd_aead *tfm)
1101{
1102 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1103
1104 return atomic_read(&ctx->refcnt) - 1;
1105}
1106EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1107
Adrian Hoban298c9262010-09-20 16:05:12 +08001108void cryptd_free_aead(struct cryptd_aead *tfm)
1109{
Herbert Xu81760ea2016-06-21 16:55:13 +08001110 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1111
1112 if (atomic_dec_and_test(&ctx->refcnt))
1113 crypto_free_aead(&tfm->base);
Adrian Hoban298c9262010-09-20 16:05:12 +08001114}
1115EXPORT_SYMBOL_GPL(cryptd_free_aead);
1116
Herbert Xu124b53d2007-04-16 20:49:20 +10001117static int __init cryptd_init(void)
1118{
1119 int err;
1120
Jon Maxwellc3a53602017-11-22 16:08:17 +11001121 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
Herbert Xu124b53d2007-04-16 20:49:20 +10001122 if (err)
1123 return err;
1124
1125 err = crypto_register_template(&cryptd_tmpl);
1126 if (err)
Huang Ying254eff72009-02-19 14:42:19 +08001127 cryptd_fini_queue(&queue);
Herbert Xu124b53d2007-04-16 20:49:20 +10001128
1129 return err;
1130}
1131
1132static void __exit cryptd_exit(void)
1133{
Huang Ying254eff72009-02-19 14:42:19 +08001134 cryptd_fini_queue(&queue);
Herbert Xu124b53d2007-04-16 20:49:20 +10001135 crypto_unregister_template(&cryptd_tmpl);
1136}
1137
Herbert Xub2bac6a2011-08-19 16:11:23 +08001138subsys_initcall(cryptd_init);
Herbert Xu124b53d2007-04-16 20:49:20 +10001139module_exit(cryptd_exit);
1140
1141MODULE_LICENSE("GPL");
1142MODULE_DESCRIPTION("Software async crypto daemon");
Kees Cook4943ba12014-11-24 16:32:38 -08001143MODULE_ALIAS_CRYPTO("cryptd");