blob: 805f485ddf1be4711a9d2ec47998964543f1d217 [file] [log] [blame]
Stephan Mueller400c40c2015-02-28 20:50:00 +01001/*
2 * algif_aead: User-space interface for AEAD algorithms
3 *
4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5 *
6 * This file provides the user-space API for AEAD ciphers.
7 *
Stephan Mueller400c40c2015-02-28 20:50:00 +01008 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
Stephan Muellerd887c522017-06-25 17:12:59 +020012 *
13 * The following concept of the memory management is used:
14 *
15 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
16 * filled by user space with the data submitted via sendpage/sendmsg. Filling
17 * up the TX SGL does not cause a crypto operation -- the data will only be
18 * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
19 * provide a buffer which is tracked with the RX SGL.
20 *
21 * During the processing of the recvmsg operation, the cipher request is
22 * allocated and prepared. As part of the recvmsg operation, the processed
23 * TX buffers are extracted from the TX SGL into a separate SGL.
24 *
25 * After the completion of the crypto operation, the RX SGL and the cipher
26 * request is released. The extracted TX SGL parts are released together with
27 * the RX SGL release.
Stephan Mueller400c40c2015-02-28 20:50:00 +010028 */
29
Tadeusz Struk83094e5e2016-03-11 11:50:33 -080030#include <crypto/internal/aead.h>
Stephan Mueller400c40c2015-02-28 20:50:00 +010031#include <crypto/scatterwalk.h>
32#include <crypto/if_alg.h>
Stephan Mueller72548b02017-07-30 14:32:58 +020033#include <crypto/skcipher.h>
34#include <crypto/null.h>
Stephan Mueller400c40c2015-02-28 20:50:00 +010035#include <linux/init.h>
36#include <linux/list.h>
37#include <linux/kernel.h>
38#include <linux/mm.h>
39#include <linux/module.h>
40#include <linux/net.h>
41#include <net/sock.h>
42
Stephan Mueller2a2a2512017-04-24 11:15:23 +020043struct aead_tfm {
44 struct crypto_aead *aead;
45 bool has_key;
Stephan Mueller72548b02017-07-30 14:32:58 +020046 struct crypto_skcipher *null_tfm;
Stephan Mueller2a2a2512017-04-24 11:15:23 +020047};
48
Stephan Muellerd887c522017-06-25 17:12:59 +020049static inline bool aead_sufficient_data(struct sock *sk)
50{
51 struct alg_sock *ask = alg_sk(sk);
52 struct sock *psk = ask->parent;
53 struct alg_sock *pask = alg_sk(psk);
Stephan Mueller2d975912017-08-02 07:56:19 +020054 struct af_alg_ctx *ctx = ask->private;
Stephan Muellerd887c522017-06-25 17:12:59 +020055 struct aead_tfm *aeadc = pask->private;
56 struct crypto_aead *tfm = aeadc->aead;
57 unsigned int as = crypto_aead_authsize(tfm);
Stephan Mueller400c40c2015-02-28 20:50:00 +010058
Stephan Mueller0c1e16c2016-12-05 15:26:19 +010059 /*
60 * The minimum amount of memory needed for an AEAD cipher is
61 * the AAD and in case of decryption the tag.
62 */
63 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
Stephan Mueller400c40c2015-02-28 20:50:00 +010064}
65
Linus Torvaldseccd02f2015-04-15 14:09:46 -070066static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
Stephan Mueller400c40c2015-02-28 20:50:00 +010067{
68 struct sock *sk = sock->sk;
69 struct alg_sock *ask = alg_sk(sk);
Stephan Muellerd887c522017-06-25 17:12:59 +020070 struct sock *psk = ask->parent;
71 struct alg_sock *pask = alg_sk(psk);
Stephan Muellerd887c522017-06-25 17:12:59 +020072 struct aead_tfm *aeadc = pask->private;
73 struct crypto_aead *tfm = aeadc->aead;
74 unsigned int ivsize = crypto_aead_ivsize(tfm);
Stephan Mueller400c40c2015-02-28 20:50:00 +010075
Stephan Mueller2d975912017-08-02 07:56:19 +020076 return af_alg_sendmsg(sock, msg, size, ivsize);
Tadeusz Struk83094e5e2016-03-11 11:50:33 -080077}
78
Stephan Mueller72548b02017-07-30 14:32:58 +020079static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
80 struct scatterlist *src,
81 struct scatterlist *dst, unsigned int len)
82{
83 SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
84
85 skcipher_request_set_tfm(skreq, null_tfm);
86 skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
87 NULL, NULL);
88 skcipher_request_set_crypt(skreq, src, dst, len, NULL);
89
90 return crypto_skcipher_encrypt(skreq);
91}
92
Stephan Muellerd887c522017-06-25 17:12:59 +020093static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
94 size_t ignored, int flags)
Stephan Mueller400c40c2015-02-28 20:50:00 +010095{
96 struct sock *sk = sock->sk;
97 struct alg_sock *ask = alg_sk(sk);
Stephan Muellerd887c522017-06-25 17:12:59 +020098 struct sock *psk = ask->parent;
99 struct alg_sock *pask = alg_sk(psk);
Stephan Mueller2d975912017-08-02 07:56:19 +0200100 struct af_alg_ctx *ctx = ask->private;
Stephan Muellerd887c522017-06-25 17:12:59 +0200101 struct aead_tfm *aeadc = pask->private;
102 struct crypto_aead *tfm = aeadc->aead;
Stephan Mueller72548b02017-07-30 14:32:58 +0200103 struct crypto_skcipher *null_tfm = aeadc->null_tfm;
Stephan Mueller8e1fa892017-11-10 11:04:52 +0100104 unsigned int i, as = crypto_aead_authsize(tfm);
Stephan Mueller2d975912017-08-02 07:56:19 +0200105 struct af_alg_async_req *areq;
Stephan Mueller8e1fa892017-11-10 11:04:52 +0100106 struct af_alg_tsgl *tsgl, *tmp;
107 struct scatterlist *rsgl_src, *tsgl_src = NULL;
Stephan Muellerd887c522017-06-25 17:12:59 +0200108 int err = 0;
109 size_t used = 0; /* [in] TX bufs to be en/decrypted */
110 size_t outlen = 0; /* [out] RX bufs produced by kernel */
111 size_t usedpages = 0; /* [in] RX bufs to be used from user */
112 size_t processed = 0; /* [in] TX bufs to be consumed */
Stephan Mueller400c40c2015-02-28 20:50:00 +0100113
114 /*
Stephan Muellerd887c522017-06-25 17:12:59 +0200115 * Data length provided by caller via sendmsg/sendpage that has not
116 * yet been processed.
Stephan Mueller400c40c2015-02-28 20:50:00 +0100117 */
Stephan Mueller400c40c2015-02-28 20:50:00 +0100118 used = ctx->used;
119
120 /*
121 * Make sure sufficient data is present -- note, the same check is
122 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
123 * shall provide an information to the data sender that something is
124 * wrong, but they are irrelevant to maintain the kernel integrity.
125 * We need this check here too in case user space decides to not honor
126 * the error message in sendmsg/sendpage and still call recvmsg. This
127 * check here protects the kernel integrity.
128 */
Stephan Muellerd887c522017-06-25 17:12:59 +0200129 if (!aead_sufficient_data(sk))
130 return -EINVAL;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100131
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100132 /*
133 * Calculate the minimum output buffer size holding the result of the
134 * cipher operation. When encrypting data, the receiving buffer is
135 * larger by the tag length compared to the input buffer as the
136 * encryption operation generates the tag. For decryption, the input
137 * buffer provides the tag which is consumed resulting in only the
138 * plaintext without a buffer for the tag returned to the caller.
139 */
140 if (ctx->enc)
141 outlen = used + as;
142 else
143 outlen = used - as;
Herbert Xu19fa7752015-05-27 17:24:41 +0800144
Stephan Mueller400c40c2015-02-28 20:50:00 +0100145 /*
146 * The cipher operation input data is reduced by the associated data
147 * length as this data is processed separately later on.
148 */
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100149 used -= ctx->aead_assoclen;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100150
Stephan Muellerd887c522017-06-25 17:12:59 +0200151 /* Allocate cipher request for current operation. */
Stephan Mueller2d975912017-08-02 07:56:19 +0200152 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
153 crypto_aead_reqsize(tfm));
154 if (IS_ERR(areq))
155 return PTR_ERR(areq);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100156
Stephan Muellerd887c522017-06-25 17:12:59 +0200157 /* convert iovecs of output buffers into RX SGL */
Stephan Mueller2d975912017-08-02 07:56:19 +0200158 err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
159 if (err)
160 goto free;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100161
Stephan Muellerd887c522017-06-25 17:12:59 +0200162 /*
163 * Ensure output buffer is sufficiently large. If the caller provides
164 * less buffer space, only use the relative required input size. This
165 * allows AIO operation where the caller sent all data to be processed
166 * and the AIO operation performs the operation on the different chunks
167 * of the input data.
168 */
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100169 if (usedpages < outlen) {
Stephan Muellerd887c522017-06-25 17:12:59 +0200170 size_t less = outlen - usedpages;
171
172 if (used < less) {
173 err = -EINVAL;
174 goto free;
175 }
176 used -= less;
177 outlen -= less;
Stephan Mueller0c1e16c2016-12-05 15:26:19 +0100178 }
Stephan Mueller400c40c2015-02-28 20:50:00 +0100179
Stephan Muellerd887c522017-06-25 17:12:59 +0200180 processed = used + ctx->aead_assoclen;
Stephan Mueller8e1fa892017-11-10 11:04:52 +0100181 list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
182 for (i = 0; i < tsgl->cur; i++) {
183 struct scatterlist *process_sg = tsgl->sg + i;
184
185 if (!(process_sg->length) || !sg_page(process_sg))
186 continue;
187 tsgl_src = process_sg;
188 break;
189 }
190 if (tsgl_src)
191 break;
192 }
193 if (processed && !tsgl_src) {
194 err = -EFAULT;
195 goto free;
196 }
Stephan Mueller72548b02017-07-30 14:32:58 +0200197
198 /*
199 * Copy of AAD from source to destination
200 *
201 * The AAD is copied to the destination buffer without change. Even
202 * when user space uses an in-place cipher operation, the kernel
203 * will copy the data as it does not see whether such in-place operation
204 * is initiated.
205 *
206 * To ensure efficiency, the following implementation ensure that the
207 * ciphers are invoked to perform a crypto operation in-place. This
208 * is achieved by memory management specified as follows.
209 */
210
211 /* Use the RX SGL as source (and destination) for crypto op. */
Stephan Mueller8e1fa892017-11-10 11:04:52 +0100212 rsgl_src = areq->first_rsgl.sgl.sg;
Stephan Mueller72548b02017-07-30 14:32:58 +0200213
214 if (ctx->enc) {
215 /*
216 * Encryption operation - The in-place cipher operation is
217 * achieved by the following operation:
218 *
Stephan Mueller75d11e72017-08-09 16:20:00 +0200219 * TX SGL: AAD || PT
Stephan Mueller72548b02017-07-30 14:32:58 +0200220 * | |
221 * | copy |
222 * v v
Stephan Mueller75d11e72017-08-09 16:20:00 +0200223 * RX SGL: AAD || PT || Tag
Stephan Mueller72548b02017-07-30 14:32:58 +0200224 */
Stephan Mueller8e1fa892017-11-10 11:04:52 +0100225 err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
Stephan Mueller72548b02017-07-30 14:32:58 +0200226 areq->first_rsgl.sgl.sg, processed);
227 if (err)
228 goto free;
Stephan Mueller2d975912017-08-02 07:56:19 +0200229 af_alg_pull_tsgl(sk, processed, NULL, 0);
Stephan Mueller72548b02017-07-30 14:32:58 +0200230 } else {
231 /*
232 * Decryption operation - To achieve an in-place cipher
233 * operation, the following SGL structure is used:
234 *
235 * TX SGL: AAD || CT || Tag
236 * | | ^
237 * | copy | | Create SGL link.
238 * v v |
239 * RX SGL: AAD || CT ----+
240 */
241
242 /* Copy AAD || CT to RX SGL buffer for in-place operation. */
Stephan Mueller8e1fa892017-11-10 11:04:52 +0100243 err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
Stephan Mueller72548b02017-07-30 14:32:58 +0200244 areq->first_rsgl.sgl.sg, outlen);
245 if (err)
246 goto free;
247
248 /* Create TX SGL for tag and chain it to RX SGL. */
Stephan Mueller2d975912017-08-02 07:56:19 +0200249 areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
250 processed - as);
Stephan Mueller72548b02017-07-30 14:32:58 +0200251 if (!areq->tsgl_entries)
252 areq->tsgl_entries = 1;
253 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) *
254 areq->tsgl_entries,
255 GFP_KERNEL);
256 if (!areq->tsgl) {
257 err = -ENOMEM;
258 goto free;
259 }
260 sg_init_table(areq->tsgl, areq->tsgl_entries);
261
262 /* Release TX SGL, except for tag data and reassign tag data. */
Stephan Mueller2d975912017-08-02 07:56:19 +0200263 af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
Stephan Mueller72548b02017-07-30 14:32:58 +0200264
265 /* chain the areq TX SGL holding the tag with RX SGL */
Stephan Mueller2d975912017-08-02 07:56:19 +0200266 if (usedpages) {
Stephan Mueller72548b02017-07-30 14:32:58 +0200267 /* RX SGL present */
Stephan Mueller2d975912017-08-02 07:56:19 +0200268 struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
Stephan Mueller72548b02017-07-30 14:32:58 +0200269
270 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
271 sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
272 areq->tsgl);
273 } else
274 /* no RX SGL present (e.g. authentication only) */
Stephan Mueller8e1fa892017-11-10 11:04:52 +0100275 rsgl_src = areq->tsgl;
Stephan Muellerd887c522017-06-25 17:12:59 +0200276 }
Stephan Mueller400c40c2015-02-28 20:50:00 +0100277
Stephan Muellerd887c522017-06-25 17:12:59 +0200278 /* Initialize the crypto operation */
Stephan Mueller8e1fa892017-11-10 11:04:52 +0100279 aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
Stephan Muellerd887c522017-06-25 17:12:59 +0200280 areq->first_rsgl.sgl.sg, used, ctx->iv);
Stephan Mueller2d975912017-08-02 07:56:19 +0200281 aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
282 aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
Stephan Muellerd887c522017-06-25 17:12:59 +0200283
284 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
285 /* AIO operation */
Stephan Mueller7d2c3f52017-11-10 13:20:55 +0100286 sock_hold(sk);
Stephan Muellerd887c522017-06-25 17:12:59 +0200287 areq->iocb = msg->msg_iocb;
Stephan Mueller2d975912017-08-02 07:56:19 +0200288 aead_request_set_callback(&areq->cra_u.aead_req,
Stephan Muellerd887c522017-06-25 17:12:59 +0200289 CRYPTO_TFM_REQ_MAY_BACKLOG,
Stephan Mueller2d975912017-08-02 07:56:19 +0200290 af_alg_async_cb, areq);
291 err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
292 crypto_aead_decrypt(&areq->cra_u.aead_req);
Stephan Mueller7d2c3f52017-11-10 13:20:55 +0100293
294 /* AIO operation in progress */
295 if (err == -EINPROGRESS || err == -EBUSY) {
296 /* Remember output size that will be generated. */
297 areq->outlen = outlen;
298
299 return -EIOCBQUEUED;
300 }
301
302 sock_put(sk);
Stephan Muellerd887c522017-06-25 17:12:59 +0200303 } else {
304 /* Synchronous operation */
Stephan Mueller2d975912017-08-02 07:56:19 +0200305 aead_request_set_callback(&areq->cra_u.aead_req,
Stephan Muellerd887c522017-06-25 17:12:59 +0200306 CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossef2c3f8b12017-10-18 08:00:39 +0100307 crypto_req_done, &ctx->wait);
308 err = crypto_wait_req(ctx->enc ?
Stephan Mueller2d975912017-08-02 07:56:19 +0200309 crypto_aead_encrypt(&areq->cra_u.aead_req) :
310 crypto_aead_decrypt(&areq->cra_u.aead_req),
Gilad Ben-Yossef2c3f8b12017-10-18 08:00:39 +0100311 &ctx->wait);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100312 }
313
Stephan Muellerd887c522017-06-25 17:12:59 +0200314
315free:
Stephan Mueller7d2c3f52017-11-10 13:20:55 +0100316 af_alg_free_resources(areq);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100317
318 return err ? err : outlen;
319}
320
Stephan Muellerd887c522017-06-25 17:12:59 +0200321static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
322 size_t ignored, int flags)
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800323{
Stephan Muellerd887c522017-06-25 17:12:59 +0200324 struct sock *sk = sock->sk;
325 int ret = 0;
326
327 lock_sock(sk);
328 while (msg_data_left(msg)) {
329 int err = _aead_recvmsg(sock, msg, ignored, flags);
330
331 /*
332 * This error covers -EIOCBQUEUED which implies that we can
333 * only handle one AIO request. If the caller wants to have
334 * multiple AIO requests in parallel, he must make multiple
335 * separate AIO calls.
Stephan Mueller5703c822017-07-30 14:31:18 +0200336 *
337 * Also return the error if no data has been processed so far.
Stephan Muellerd887c522017-06-25 17:12:59 +0200338 */
339 if (err <= 0) {
Stephan Mueller5703c822017-07-30 14:31:18 +0200340 if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
Stephan Muellerd887c522017-06-25 17:12:59 +0200341 ret = err;
342 goto out;
343 }
344
345 ret += err;
346 }
347
348out:
Stephan Mueller2d975912017-08-02 07:56:19 +0200349 af_alg_wmem_wakeup(sk);
Stephan Muellerd887c522017-06-25 17:12:59 +0200350 release_sock(sk);
351 return ret;
Tadeusz Struk83094e5e2016-03-11 11:50:33 -0800352}
353
Stephan Mueller400c40c2015-02-28 20:50:00 +0100354static struct proto_ops algif_aead_ops = {
355 .family = PF_ALG,
356
357 .connect = sock_no_connect,
358 .socketpair = sock_no_socketpair,
359 .getname = sock_no_getname,
360 .ioctl = sock_no_ioctl,
361 .listen = sock_no_listen,
362 .shutdown = sock_no_shutdown,
363 .getsockopt = sock_no_getsockopt,
364 .mmap = sock_no_mmap,
365 .bind = sock_no_bind,
366 .accept = sock_no_accept,
367 .setsockopt = sock_no_setsockopt,
368
369 .release = af_alg_release,
370 .sendmsg = aead_sendmsg,
Stephan Mueller2d975912017-08-02 07:56:19 +0200371 .sendpage = af_alg_sendpage,
Stephan Mueller400c40c2015-02-28 20:50:00 +0100372 .recvmsg = aead_recvmsg,
Stephan Mueller2d975912017-08-02 07:56:19 +0200373 .poll = af_alg_poll,
Stephan Mueller400c40c2015-02-28 20:50:00 +0100374};
375
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200376static int aead_check_key(struct socket *sock)
377{
378 int err = 0;
379 struct sock *psk;
380 struct alg_sock *pask;
381 struct aead_tfm *tfm;
382 struct sock *sk = sock->sk;
383 struct alg_sock *ask = alg_sk(sk);
384
385 lock_sock(sk);
386 if (ask->refcnt)
387 goto unlock_child;
388
389 psk = ask->parent;
390 pask = alg_sk(ask->parent);
391 tfm = pask->private;
392
393 err = -ENOKEY;
394 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
395 if (!tfm->has_key)
396 goto unlock;
397
398 if (!pask->refcnt++)
399 sock_hold(psk);
400
401 ask->refcnt = 1;
402 sock_put(psk);
403
404 err = 0;
405
406unlock:
407 release_sock(psk);
408unlock_child:
409 release_sock(sk);
410
411 return err;
412}
413
414static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
415 size_t size)
416{
417 int err;
418
419 err = aead_check_key(sock);
420 if (err)
421 return err;
422
423 return aead_sendmsg(sock, msg, size);
424}
425
426static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
427 int offset, size_t size, int flags)
428{
429 int err;
430
431 err = aead_check_key(sock);
432 if (err)
433 return err;
434
Stephan Mueller2d975912017-08-02 07:56:19 +0200435 return af_alg_sendpage(sock, page, offset, size, flags);
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200436}
437
438static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
439 size_t ignored, int flags)
440{
441 int err;
442
443 err = aead_check_key(sock);
444 if (err)
445 return err;
446
447 return aead_recvmsg(sock, msg, ignored, flags);
448}
449
450static struct proto_ops algif_aead_ops_nokey = {
451 .family = PF_ALG,
452
453 .connect = sock_no_connect,
454 .socketpair = sock_no_socketpair,
455 .getname = sock_no_getname,
456 .ioctl = sock_no_ioctl,
457 .listen = sock_no_listen,
458 .shutdown = sock_no_shutdown,
459 .getsockopt = sock_no_getsockopt,
460 .mmap = sock_no_mmap,
461 .bind = sock_no_bind,
462 .accept = sock_no_accept,
463 .setsockopt = sock_no_setsockopt,
464
465 .release = af_alg_release,
466 .sendmsg = aead_sendmsg_nokey,
467 .sendpage = aead_sendpage_nokey,
468 .recvmsg = aead_recvmsg_nokey,
Stephan Mueller2d975912017-08-02 07:56:19 +0200469 .poll = af_alg_poll,
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200470};
471
Stephan Mueller400c40c2015-02-28 20:50:00 +0100472static void *aead_bind(const char *name, u32 type, u32 mask)
473{
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200474 struct aead_tfm *tfm;
475 struct crypto_aead *aead;
Stephan Mueller72548b02017-07-30 14:32:58 +0200476 struct crypto_skcipher *null_tfm;
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200477
478 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
479 if (!tfm)
480 return ERR_PTR(-ENOMEM);
481
482 aead = crypto_alloc_aead(name, type, mask);
483 if (IS_ERR(aead)) {
484 kfree(tfm);
485 return ERR_CAST(aead);
486 }
487
Stephan Mueller72548b02017-07-30 14:32:58 +0200488 null_tfm = crypto_get_default_null_skcipher2();
489 if (IS_ERR(null_tfm)) {
490 crypto_free_aead(aead);
491 kfree(tfm);
492 return ERR_CAST(null_tfm);
493 }
494
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200495 tfm->aead = aead;
Stephan Mueller72548b02017-07-30 14:32:58 +0200496 tfm->null_tfm = null_tfm;
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200497
498 return tfm;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100499}
500
501static void aead_release(void *private)
502{
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200503 struct aead_tfm *tfm = private;
504
505 crypto_free_aead(tfm->aead);
506 kfree(tfm);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100507}
508
509static int aead_setauthsize(void *private, unsigned int authsize)
510{
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200511 struct aead_tfm *tfm = private;
512
513 return crypto_aead_setauthsize(tfm->aead, authsize);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100514}
515
516static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
517{
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200518 struct aead_tfm *tfm = private;
519 int err;
520
521 err = crypto_aead_setkey(tfm->aead, key, keylen);
522 tfm->has_key = !err;
523
524 return err;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100525}
526
527static void aead_sock_destruct(struct sock *sk)
528{
529 struct alg_sock *ask = alg_sk(sk);
Stephan Mueller2d975912017-08-02 07:56:19 +0200530 struct af_alg_ctx *ctx = ask->private;
Stephan Muellerd887c522017-06-25 17:12:59 +0200531 struct sock *psk = ask->parent;
532 struct alg_sock *pask = alg_sk(psk);
533 struct aead_tfm *aeadc = pask->private;
534 struct crypto_aead *tfm = aeadc->aead;
535 unsigned int ivlen = crypto_aead_ivsize(tfm);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100536
Stephan Mueller2d975912017-08-02 07:56:19 +0200537 af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
Stephan Mueller72548b02017-07-30 14:32:58 +0200538 crypto_put_default_null_skcipher2();
Stephan Mueller400c40c2015-02-28 20:50:00 +0100539 sock_kzfree_s(sk, ctx->iv, ivlen);
540 sock_kfree_s(sk, ctx, ctx->len);
541 af_alg_release_parent(sk);
542}
543
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200544static int aead_accept_parent_nokey(void *private, struct sock *sk)
Stephan Mueller400c40c2015-02-28 20:50:00 +0100545{
Stephan Mueller2d975912017-08-02 07:56:19 +0200546 struct af_alg_ctx *ctx;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100547 struct alg_sock *ask = alg_sk(sk);
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200548 struct aead_tfm *tfm = private;
549 struct crypto_aead *aead = tfm->aead;
Stephan Muellerd887c522017-06-25 17:12:59 +0200550 unsigned int len = sizeof(*ctx);
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200551 unsigned int ivlen = crypto_aead_ivsize(aead);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100552
553 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
554 if (!ctx)
555 return -ENOMEM;
556 memset(ctx, 0, len);
557
558 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
559 if (!ctx->iv) {
560 sock_kfree_s(sk, ctx, len);
561 return -ENOMEM;
562 }
563 memset(ctx->iv, 0, ivlen);
564
Stephan Muellerd887c522017-06-25 17:12:59 +0200565 INIT_LIST_HEAD(&ctx->tsgl_list);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100566 ctx->len = len;
567 ctx->used = 0;
Stephan Muellerd887c522017-06-25 17:12:59 +0200568 ctx->rcvused = 0;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100569 ctx->more = 0;
570 ctx->merge = 0;
571 ctx->enc = 0;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100572 ctx->aead_assoclen = 0;
Gilad Ben-Yossef2c3f8b12017-10-18 08:00:39 +0100573 crypto_init_wait(&ctx->wait);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100574
575 ask->private = ctx;
576
Stephan Mueller400c40c2015-02-28 20:50:00 +0100577 sk->sk_destruct = aead_sock_destruct;
578
579 return 0;
580}
581
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200582static int aead_accept_parent(void *private, struct sock *sk)
583{
584 struct aead_tfm *tfm = private;
585
586 if (!tfm->has_key)
587 return -ENOKEY;
588
589 return aead_accept_parent_nokey(private, sk);
590}
591
Stephan Mueller400c40c2015-02-28 20:50:00 +0100592static const struct af_alg_type algif_type_aead = {
593 .bind = aead_bind,
594 .release = aead_release,
595 .setkey = aead_setkey,
596 .setauthsize = aead_setauthsize,
597 .accept = aead_accept_parent,
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200598 .accept_nokey = aead_accept_parent_nokey,
Stephan Mueller400c40c2015-02-28 20:50:00 +0100599 .ops = &algif_aead_ops,
Stephan Mueller2a2a2512017-04-24 11:15:23 +0200600 .ops_nokey = &algif_aead_ops_nokey,
Stephan Mueller400c40c2015-02-28 20:50:00 +0100601 .name = "aead",
602 .owner = THIS_MODULE
603};
604
605static int __init algif_aead_init(void)
606{
607 return af_alg_register_type(&algif_type_aead);
608}
609
610static void __exit algif_aead_exit(void)
611{
612 int err = af_alg_unregister_type(&algif_type_aead);
613 BUG_ON(err);
614}
615
616module_init(algif_aead_init);
617module_exit(algif_aead_exit);
618MODULE_LICENSE("GPL");
619MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
620MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");