blob: 5a805375865731f4cc7e94790362c208dd58d05b [file] [log] [blame]
Stephan Mueller400c40c2015-02-28 20:50:00 +01001/*
2 * algif_aead: User-space interface for AEAD algorithms
3 *
4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5 *
6 * This file provides the user-space API for AEAD ciphers.
7 *
8 * This file is derived from algif_skcipher.c.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 */
15
Tadeusz Struk83094e5e2016-03-11 11:50:33 -080016#include <crypto/internal/aead.h>
Stephan Mueller400c40c2015-02-28 20:50:00 +010017#include <crypto/scatterwalk.h>
18#include <crypto/if_alg.h>
19#include <linux/init.h>
20#include <linux/list.h>
21#include <linux/kernel.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010022#include <linux/sched/signal.h>
Stephan Mueller400c40c2015-02-28 20:50:00 +010023#include <linux/mm.h>
24#include <linux/module.h>
25#include <linux/net.h>
26#include <net/sock.h>
27
28struct aead_sg_list {
29 unsigned int cur;
30 struct scatterlist sg[ALG_MAX_PAGES];
31};
32
Tadeusz Struk83094e5e2016-03-11 11:50:33 -080033struct aead_async_rsgl {
34 struct af_alg_sgl sgl;
35 struct list_head list;
36};
37
38struct aead_async_req {
39 struct scatterlist *tsgl;
40 struct aead_async_rsgl first_rsgl;
41 struct list_head list;
42 struct kiocb *iocb;
43 unsigned int tsgls;
44 char iv[];
45};
46
Stephan Mueller400c40c2015-02-28 20:50:00 +010047struct aead_ctx {
48 struct aead_sg_list tsgl;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -080049 struct aead_async_rsgl first_rsgl;
50 struct list_head list;
Stephan Mueller400c40c2015-02-28 20:50:00 +010051
52 void *iv;
53
54 struct af_alg_completion completion;
55
56 unsigned long used;
57
58 unsigned int len;
59 bool more;
60 bool merge;
61 bool enc;
62
63 size_t aead_assoclen;
64 struct aead_request aead_req;
65};
66
67static inline int aead_sndbuf(struct sock *sk)
68{
69 struct alg_sock *ask = alg_sk(sk);
70 struct aead_ctx *ctx = ask->private;
71
72 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
73 ctx->used, 0);
74}
75
76static inline bool aead_writable(struct sock *sk)
77{
78 return PAGE_SIZE <= aead_sndbuf(sk);
79}
80
81static inline bool aead_sufficient_data(struct aead_ctx *ctx)
82{
83 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
84
Stephan Mueller0c1e16c2016-12-05 15:26:19 +010085 /*
86 * The minimum amount of memory needed for an AEAD cipher is
87 * the AAD and in case of decryption the tag.
88 */
89 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
Stephan Mueller400c40c2015-02-28 20:50:00 +010090}
91
Tadeusz Struk83094e5e2016-03-11 11:50:33 -080092static void aead_reset_ctx(struct aead_ctx *ctx)
93{
94 struct aead_sg_list *sgl = &ctx->tsgl;
95
96 sg_init_table(sgl->sg, ALG_MAX_PAGES);
97 sgl->cur = 0;
98 ctx->used = 0;
99 ctx->more = 0;
100 ctx->merge = 0;
101}
102
Stephan Mueller400c40c2015-02-28 20:50:00 +0100103static void aead_put_sgl(struct sock *sk)
104{
105 struct alg_sock *ask = alg_sk(sk);
106 struct aead_ctx *ctx = ask->private;
107 struct aead_sg_list *sgl = &ctx->tsgl;
108 struct scatterlist *sg = sgl->sg;
109 unsigned int i;
110
111 for (i = 0; i < sgl->cur; i++) {
112 if (!sg_page(sg + i))
113 continue;
114
115 put_page(sg_page(sg + i));
116 sg_assign_page(sg + i, NULL);
117 }
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800118 aead_reset_ctx(ctx);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100119}
120
121static void aead_wmem_wakeup(struct sock *sk)
122{
123 struct socket_wq *wq;
124
125 if (!aead_writable(sk))
126 return;
127
128 rcu_read_lock();
129 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800130 if (skwq_has_sleeper(wq))
Stephan Mueller400c40c2015-02-28 20:50:00 +0100131 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
132 POLLRDNORM |
133 POLLRDBAND);
134 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
135 rcu_read_unlock();
136}
137
138static int aead_wait_for_data(struct sock *sk, unsigned flags)
139{
WANG Congd9dc8b02016-11-11 10:20:50 -0800140 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100141 struct alg_sock *ask = alg_sk(sk);
142 struct aead_ctx *ctx = ask->private;
143 long timeout;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100144 int err = -ERESTARTSYS;
145
146 if (flags & MSG_DONTWAIT)
147 return -EAGAIN;
148
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800149 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
WANG Congd9dc8b02016-11-11 10:20:50 -0800150 add_wait_queue(sk_sleep(sk), &wait);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100151 for (;;) {
152 if (signal_pending(current))
153 break;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100154 timeout = MAX_SCHEDULE_TIMEOUT;
WANG Congd9dc8b02016-11-11 10:20:50 -0800155 if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
Stephan Mueller400c40c2015-02-28 20:50:00 +0100156 err = 0;
157 break;
158 }
159 }
WANG Congd9dc8b02016-11-11 10:20:50 -0800160 remove_wait_queue(sk_sleep(sk), &wait);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100161
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800162 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100163
164 return err;
165}
166
167static void aead_data_wakeup(struct sock *sk)
168{
169 struct alg_sock *ask = alg_sk(sk);
170 struct aead_ctx *ctx = ask->private;
171 struct socket_wq *wq;
172
173 if (ctx->more)
174 return;
175 if (!ctx->used)
176 return;
177
178 rcu_read_lock();
179 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800180 if (skwq_has_sleeper(wq))
Stephan Mueller400c40c2015-02-28 20:50:00 +0100181 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
182 POLLRDNORM |
183 POLLRDBAND);
184 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
185 rcu_read_unlock();
186}
187
Linus Torvaldseccd02f2015-04-15 14:09:46 -0700188static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
Stephan Mueller400c40c2015-02-28 20:50:00 +0100189{
190 struct sock *sk = sock->sk;
191 struct alg_sock *ask = alg_sk(sk);
192 struct aead_ctx *ctx = ask->private;
193 unsigned ivsize =
194 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
195 struct aead_sg_list *sgl = &ctx->tsgl;
196 struct af_alg_control con = {};
197 long copied = 0;
198 bool enc = 0;
199 bool init = 0;
200 int err = -EINVAL;
201
202 if (msg->msg_controllen) {
203 err = af_alg_cmsg_send(msg, &con);
204 if (err)
205 return err;
206
207 init = 1;
208 switch (con.op) {
209 case ALG_OP_ENCRYPT:
210 enc = 1;
211 break;
212 case ALG_OP_DECRYPT:
213 enc = 0;
214 break;
215 default:
216 return -EINVAL;
217 }
218
219 if (con.iv && con.iv->ivlen != ivsize)
220 return -EINVAL;
221 }
222
223 lock_sock(sk);
224 if (!ctx->more && ctx->used)
225 goto unlock;
226
227 if (init) {
228 ctx->enc = enc;
229 if (con.iv)
230 memcpy(ctx->iv, con.iv->iv, ivsize);
231
232 ctx->aead_assoclen = con.aead_assoclen;
233 }
234
235 while (size) {
LABBE Corentin652d5b82015-10-23 14:10:36 +0200236 size_t len = size;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100237 struct scatterlist *sg = NULL;
238
239 /* use the existing memory in an allocated page */
240 if (ctx->merge) {
241 sg = sgl->sg + sgl->cur - 1;
242 len = min_t(unsigned long, len,
243 PAGE_SIZE - sg->offset - sg->length);
244 err = memcpy_from_msg(page_address(sg_page(sg)) +
245 sg->offset + sg->length,
246 msg, len);
247 if (err)
248 goto unlock;
249
250 sg->length += len;
251 ctx->merge = (sg->offset + sg->length) &
252 (PAGE_SIZE - 1);
253
254 ctx->used += len;
255 copied += len;
256 size -= len;
257 continue;
258 }
259
260 if (!aead_writable(sk)) {
261 /* user space sent too much data */
262 aead_put_sgl(sk);
263 err = -EMSGSIZE;
264 goto unlock;
265 }
266
267 /* allocate a new page */
268 len = min_t(unsigned long, size, aead_sndbuf(sk));
269 while (len) {
LABBE Corentin652d5b82015-10-23 14:10:36 +0200270 size_t plen = 0;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100271
272 if (sgl->cur >= ALG_MAX_PAGES) {
273 aead_put_sgl(sk);
274 err = -E2BIG;
275 goto unlock;
276 }
277
278 sg = sgl->sg + sgl->cur;
LABBE Corentin652d5b82015-10-23 14:10:36 +0200279 plen = min_t(size_t, len, PAGE_SIZE);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100280
281 sg_assign_page(sg, alloc_page(GFP_KERNEL));
282 err = -ENOMEM;
283 if (!sg_page(sg))
284 goto unlock;
285
286 err = memcpy_from_msg(page_address(sg_page(sg)),
287 msg, plen);
288 if (err) {
289 __free_page(sg_page(sg));
290 sg_assign_page(sg, NULL);
291 goto unlock;
292 }
293
294 sg->offset = 0;
295 sg->length = plen;
296 len -= plen;
297 ctx->used += plen;
298 copied += plen;
299 sgl->cur++;
300 size -= plen;
301 ctx->merge = plen & (PAGE_SIZE - 1);
302 }
303 }
304
305 err = 0;
306
307 ctx->more = msg->msg_flags & MSG_MORE;
308 if (!ctx->more && !aead_sufficient_data(ctx)) {
309 aead_put_sgl(sk);
310 err = -EMSGSIZE;
311 }
312
313unlock:
314 aead_data_wakeup(sk);
315 release_sock(sk);
316
317 return err ?: copied;
318}
319
320static ssize_t aead_sendpage(struct socket *sock, struct page *page,
321 int offset, size_t size, int flags)
322{
323 struct sock *sk = sock->sk;
324 struct alg_sock *ask = alg_sk(sk);
325 struct aead_ctx *ctx = ask->private;
326 struct aead_sg_list *sgl = &ctx->tsgl;
327 int err = -EINVAL;
328
329 if (flags & MSG_SENDPAGE_NOTLAST)
330 flags |= MSG_MORE;
331
332 if (sgl->cur >= ALG_MAX_PAGES)
333 return -E2BIG;
334
335 lock_sock(sk);
336 if (!ctx->more && ctx->used)
337 goto unlock;
338
339 if (!size)
340 goto done;
341
342 if (!aead_writable(sk)) {
343 /* user space sent too much data */
344 aead_put_sgl(sk);
345 err = -EMSGSIZE;
346 goto unlock;
347 }
348
349 ctx->merge = 0;
350
351 get_page(page);
352 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
353 sgl->cur++;
354 ctx->used += size;
355
356 err = 0;
357
358done:
359 ctx->more = flags & MSG_MORE;
360 if (!ctx->more && !aead_sufficient_data(ctx)) {
361 aead_put_sgl(sk);
362 err = -EMSGSIZE;
363 }
364
365unlock:
366 aead_data_wakeup(sk);
367 release_sock(sk);
368
369 return err ?: size;
370}
371
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800372#define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
373 ((char *)req + sizeof(struct aead_request) + \
374 crypto_aead_reqsize(tfm))
375
376 #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
377 crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
378 sizeof(struct aead_request)
379
380static void aead_async_cb(struct crypto_async_request *_req, int err)
381{
382 struct sock *sk = _req->data;
383 struct alg_sock *ask = alg_sk(sk);
384 struct aead_ctx *ctx = ask->private;
385 struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
386 struct aead_request *req = aead_request_cast(_req);
387 struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
388 struct scatterlist *sg = areq->tsgl;
389 struct aead_async_rsgl *rsgl;
390 struct kiocb *iocb = areq->iocb;
391 unsigned int i, reqlen = GET_REQ_SIZE(tfm);
392
393 list_for_each_entry(rsgl, &areq->list, list) {
394 af_alg_free_sg(&rsgl->sgl);
395 if (rsgl != &areq->first_rsgl)
396 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
397 }
398
399 for (i = 0; i < areq->tsgls; i++)
400 put_page(sg_page(sg + i));
401
402 sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
403 sock_kfree_s(sk, req, reqlen);
404 __sock_put(sk);
405 iocb->ki_complete(iocb, err, err);
406}
407
408static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
409 int flags)
410{
411 struct sock *sk = sock->sk;
412 struct alg_sock *ask = alg_sk(sk);
413 struct aead_ctx *ctx = ask->private;
414 struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
415 struct aead_async_req *areq;
416 struct aead_request *req = NULL;
417 struct aead_sg_list *sgl = &ctx->tsgl;
418 struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
419 unsigned int as = crypto_aead_authsize(tfm);
420 unsigned int i, reqlen = GET_REQ_SIZE(tfm);
421 int err = -ENOMEM;
422 unsigned long used;
Stephan Mueller678b5c62016-12-08 07:09:44 +0100423 size_t outlen = 0;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800424 size_t usedpages = 0;
425
426 lock_sock(sk);
427 if (ctx->more) {
428 err = aead_wait_for_data(sk, flags);
429 if (err)
430 goto unlock;
431 }
432
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800433 if (!aead_sufficient_data(ctx))
434 goto unlock;
435
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100436 used = ctx->used;
437 if (ctx->enc)
438 outlen = used + as;
439 else
440 outlen = used - as;
441
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800442 req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
443 if (unlikely(!req))
444 goto unlock;
445
446 areq = GET_ASYM_REQ(req, tfm);
447 memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
448 INIT_LIST_HEAD(&areq->list);
449 areq->iocb = msg->msg_iocb;
450 memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
451 aead_request_set_tfm(req, tfm);
452 aead_request_set_ad(req, ctx->aead_assoclen);
453 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
454 aead_async_cb, sk);
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100455 used -= ctx->aead_assoclen;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800456
457 /* take over all tx sgls from ctx */
Stephan Mueller5937d812016-12-01 08:22:37 +0100458 areq->tsgl = sock_kmalloc(sk,
459 sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800460 GFP_KERNEL);
461 if (unlikely(!areq->tsgl))
462 goto free;
463
Stephan Mueller5937d812016-12-01 08:22:37 +0100464 sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800465 for (i = 0; i < sgl->cur; i++)
466 sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
467 sgl->sg[i].length, sgl->sg[i].offset);
468
469 areq->tsgls = sgl->cur;
470
471 /* create rx sgls */
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100472 while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800473 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
474 (outlen - usedpages));
475
476 if (list_empty(&areq->list)) {
477 rsgl = &areq->first_rsgl;
478
479 } else {
480 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
481 if (unlikely(!rsgl)) {
482 err = -ENOMEM;
483 goto free;
484 }
485 }
486 rsgl->sgl.npages = 0;
487 list_add_tail(&rsgl->list, &areq->list);
488
489 /* make one iovec available as scatterlist */
490 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
491 if (err < 0)
492 goto free;
493
494 usedpages += err;
495
496 /* chain the new scatterlist with previous one */
497 if (last_rsgl)
498 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
499
500 last_rsgl = rsgl;
501
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800502 iov_iter_advance(&msg->msg_iter, err);
503 }
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100504
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800505 /* ensure output buffer is sufficiently large */
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100506 if (usedpages < outlen) {
507 err = -EINVAL;
508 goto unlock;
509 }
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800510
511 aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
512 areq->iv);
513 err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
514 if (err) {
515 if (err == -EINPROGRESS) {
516 sock_hold(sk);
517 err = -EIOCBQUEUED;
518 aead_reset_ctx(ctx);
519 goto unlock;
520 } else if (err == -EBADMSG) {
521 aead_put_sgl(sk);
522 }
523 goto free;
524 }
525 aead_put_sgl(sk);
526
527free:
528 list_for_each_entry(rsgl, &areq->list, list) {
529 af_alg_free_sg(&rsgl->sgl);
530 if (rsgl != &areq->first_rsgl)
531 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
532 }
533 if (areq->tsgl)
534 sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
535 if (req)
536 sock_kfree_s(sk, req, reqlen);
537unlock:
538 aead_wmem_wakeup(sk);
539 release_sock(sk);
540 return err ? err : outlen;
541}
542
543static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
Stephan Mueller400c40c2015-02-28 20:50:00 +0100544{
545 struct sock *sk = sock->sk;
546 struct alg_sock *ask = alg_sk(sk);
547 struct aead_ctx *ctx = ask->private;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100548 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
549 struct aead_sg_list *sgl = &ctx->tsgl;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800550 struct aead_async_rsgl *last_rsgl = NULL;
551 struct aead_async_rsgl *rsgl, *tmp;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100552 int err = -EINVAL;
553 unsigned long used = 0;
554 size_t outlen = 0;
555 size_t usedpages = 0;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100556
557 lock_sock(sk);
558
559 /*
Stephan Mueller3f692d52016-10-21 04:59:24 +0200560 * Please see documentation of aead_request_set_crypt for the
561 * description of the AEAD memory structure expected from the caller.
Stephan Mueller400c40c2015-02-28 20:50:00 +0100562 */
563
564 if (ctx->more) {
565 err = aead_wait_for_data(sk, flags);
566 if (err)
567 goto unlock;
568 }
569
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100570 /* data length provided by caller via sendmsg/sendpage */
Stephan Mueller400c40c2015-02-28 20:50:00 +0100571 used = ctx->used;
572
573 /*
574 * Make sure sufficient data is present -- note, the same check is
575 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
576 * shall provide an information to the data sender that something is
577 * wrong, but they are irrelevant to maintain the kernel integrity.
578 * We need this check here too in case user space decides to not honor
579 * the error message in sendmsg/sendpage and still call recvmsg. This
580 * check here protects the kernel integrity.
581 */
582 if (!aead_sufficient_data(ctx))
583 goto unlock;
584
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100585 /*
586 * Calculate the minimum output buffer size holding the result of the
587 * cipher operation. When encrypting data, the receiving buffer is
588 * larger by the tag length compared to the input buffer as the
589 * encryption operation generates the tag. For decryption, the input
590 * buffer provides the tag which is consumed resulting in only the
591 * plaintext without a buffer for the tag returned to the caller.
592 */
593 if (ctx->enc)
594 outlen = used + as;
595 else
596 outlen = used - as;
Herbert Xu19fa7752015-05-27 17:24:41 +0800597
Stephan Mueller400c40c2015-02-28 20:50:00 +0100598 /*
599 * The cipher operation input data is reduced by the associated data
600 * length as this data is processed separately later on.
601 */
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100602 used -= ctx->aead_assoclen;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100603
604 /* convert iovecs of output buffers into scatterlists */
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100605 while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
Stephan Mueller400c40c2015-02-28 20:50:00 +0100606 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
607 (outlen - usedpages));
608
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800609 if (list_empty(&ctx->list)) {
610 rsgl = &ctx->first_rsgl;
611 } else {
612 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
613 if (unlikely(!rsgl)) {
614 err = -ENOMEM;
615 goto unlock;
616 }
617 }
618 rsgl->sgl.npages = 0;
619 list_add_tail(&rsgl->list, &ctx->list);
620
Stephan Mueller400c40c2015-02-28 20:50:00 +0100621 /* make one iovec available as scatterlist */
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800622 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100623 if (err < 0)
624 goto unlock;
625 usedpages += err;
Tadeusz Struk7b2a18e2015-05-15 10:18:37 -0700626 /* chain the new scatterlist with previous one */
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800627 if (last_rsgl)
628 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
629
630 last_rsgl = rsgl;
Tadeusz Struk7b2a18e2015-05-15 10:18:37 -0700631
Stephan Mueller400c40c2015-02-28 20:50:00 +0100632 iov_iter_advance(&msg->msg_iter, err);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100633 }
634
Stephan Mueller400c40c2015-02-28 20:50:00 +0100635 /* ensure output buffer is sufficiently large */
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100636 if (usedpages < outlen) {
637 err = -EINVAL;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100638 goto unlock;
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100639 }
Stephan Mueller400c40c2015-02-28 20:50:00 +0100640
Herbert Xu19fa7752015-05-27 17:24:41 +0800641 sg_mark_end(sgl->sg + sgl->cur - 1);
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800642 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
Herbert Xu19fa7752015-05-27 17:24:41 +0800643 used, ctx->iv);
644 aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100645
646 err = af_alg_wait_for_completion(ctx->enc ?
647 crypto_aead_encrypt(&ctx->aead_req) :
648 crypto_aead_decrypt(&ctx->aead_req),
649 &ctx->completion);
650
651 if (err) {
652 /* EBADMSG implies a valid cipher operation took place */
653 if (err == -EBADMSG)
654 aead_put_sgl(sk);
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800655
Stephan Mueller400c40c2015-02-28 20:50:00 +0100656 goto unlock;
657 }
658
659 aead_put_sgl(sk);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100660 err = 0;
661
662unlock:
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800663 list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
664 af_alg_free_sg(&rsgl->sgl);
Harsh Jain0b529f12017-02-01 21:10:28 +0530665 list_del(&rsgl->list);
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800666 if (rsgl != &ctx->first_rsgl)
667 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800668 }
669 INIT_LIST_HEAD(&ctx->list);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100670 aead_wmem_wakeup(sk);
671 release_sock(sk);
672
673 return err ? err : outlen;
674}
675
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800676static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
677 int flags)
678{
679 return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
680 aead_recvmsg_async(sock, msg, flags) :
681 aead_recvmsg_sync(sock, msg, flags);
682}
683
Stephan Mueller400c40c2015-02-28 20:50:00 +0100684static unsigned int aead_poll(struct file *file, struct socket *sock,
685 poll_table *wait)
686{
687 struct sock *sk = sock->sk;
688 struct alg_sock *ask = alg_sk(sk);
689 struct aead_ctx *ctx = ask->private;
690 unsigned int mask;
691
692 sock_poll_wait(file, sk_sleep(sk), wait);
693 mask = 0;
694
695 if (!ctx->more)
696 mask |= POLLIN | POLLRDNORM;
697
698 if (aead_writable(sk))
699 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
700
701 return mask;
702}
703
704static struct proto_ops algif_aead_ops = {
705 .family = PF_ALG,
706
707 .connect = sock_no_connect,
708 .socketpair = sock_no_socketpair,
709 .getname = sock_no_getname,
710 .ioctl = sock_no_ioctl,
711 .listen = sock_no_listen,
712 .shutdown = sock_no_shutdown,
713 .getsockopt = sock_no_getsockopt,
714 .mmap = sock_no_mmap,
715 .bind = sock_no_bind,
716 .accept = sock_no_accept,
717 .setsockopt = sock_no_setsockopt,
718
719 .release = af_alg_release,
720 .sendmsg = aead_sendmsg,
721 .sendpage = aead_sendpage,
722 .recvmsg = aead_recvmsg,
723 .poll = aead_poll,
724};
725
726static void *aead_bind(const char *name, u32 type, u32 mask)
727{
Herbert Xu5e4b8c12015-08-13 17:29:06 +0800728 return crypto_alloc_aead(name, type, mask);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100729}
730
731static void aead_release(void *private)
732{
733 crypto_free_aead(private);
734}
735
736static int aead_setauthsize(void *private, unsigned int authsize)
737{
738 return crypto_aead_setauthsize(private, authsize);
739}
740
741static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
742{
743 return crypto_aead_setkey(private, key, keylen);
744}
745
746static void aead_sock_destruct(struct sock *sk)
747{
748 struct alg_sock *ask = alg_sk(sk);
749 struct aead_ctx *ctx = ask->private;
750 unsigned int ivlen = crypto_aead_ivsize(
751 crypto_aead_reqtfm(&ctx->aead_req));
752
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800753 WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100754 aead_put_sgl(sk);
755 sock_kzfree_s(sk, ctx->iv, ivlen);
756 sock_kfree_s(sk, ctx, ctx->len);
757 af_alg_release_parent(sk);
758}
759
760static int aead_accept_parent(void *private, struct sock *sk)
761{
762 struct aead_ctx *ctx;
763 struct alg_sock *ask = alg_sk(sk);
764 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
765 unsigned int ivlen = crypto_aead_ivsize(private);
766
767 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
768 if (!ctx)
769 return -ENOMEM;
770 memset(ctx, 0, len);
771
772 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
773 if (!ctx->iv) {
774 sock_kfree_s(sk, ctx, len);
775 return -ENOMEM;
776 }
777 memset(ctx->iv, 0, ivlen);
778
779 ctx->len = len;
780 ctx->used = 0;
781 ctx->more = 0;
782 ctx->merge = 0;
783 ctx->enc = 0;
784 ctx->tsgl.cur = 0;
785 ctx->aead_assoclen = 0;
786 af_alg_init_completion(&ctx->completion);
787 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800788 INIT_LIST_HEAD(&ctx->list);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100789
790 ask->private = ctx;
791
792 aead_request_set_tfm(&ctx->aead_req, private);
793 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
794 af_alg_complete, &ctx->completion);
795
796 sk->sk_destruct = aead_sock_destruct;
797
798 return 0;
799}
800
801static const struct af_alg_type algif_type_aead = {
802 .bind = aead_bind,
803 .release = aead_release,
804 .setkey = aead_setkey,
805 .setauthsize = aead_setauthsize,
806 .accept = aead_accept_parent,
807 .ops = &algif_aead_ops,
808 .name = "aead",
809 .owner = THIS_MODULE
810};
811
812static int __init algif_aead_init(void)
813{
814 return af_alg_register_type(&algif_type_aead);
815}
816
817static void __exit algif_aead_exit(void)
818{
819 int err = af_alg_unregister_type(&algif_type_aead);
820 BUG_ON(err);
821}
822
823module_init(algif_aead_init);
824module_exit(algif_aead_exit);
825MODULE_LICENSE("GPL");
826MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
827MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");