blob: 2176678a5b690820cd0de6c589e6a2d13a53917a [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
Dave Watsonc46234e2018-03-22 10:10:35 -070037#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070038#include <linux/module.h>
39#include <crypto/aead.h>
40
Dave Watsonc46234e2018-03-22 10:10:35 -070041#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070042#include <net/tls.h>
43
Kees Cookb16520f2018-04-10 17:52:34 -070044#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
45
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070046static int __skb_nsg(struct sk_buff *skb, int offset, int len,
47 unsigned int recursion_level)
48{
49 int start = skb_headlen(skb);
50 int i, chunk = start - offset;
51 struct sk_buff *frag_iter;
52 int elt = 0;
53
54 if (unlikely(recursion_level >= 24))
55 return -EMSGSIZE;
56
57 if (chunk > 0) {
58 if (chunk > len)
59 chunk = len;
60 elt++;
61 len -= chunk;
62 if (len == 0)
63 return elt;
64 offset += chunk;
65 }
66
67 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
68 int end;
69
70 WARN_ON(start > offset + len);
71
72 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
73 chunk = end - offset;
74 if (chunk > 0) {
75 if (chunk > len)
76 chunk = len;
77 elt++;
78 len -= chunk;
79 if (len == 0)
80 return elt;
81 offset += chunk;
82 }
83 start = end;
84 }
85
86 if (unlikely(skb_has_frag_list(skb))) {
87 skb_walk_frags(skb, frag_iter) {
88 int end, ret;
89
90 WARN_ON(start > offset + len);
91
92 end = start + frag_iter->len;
93 chunk = end - offset;
94 if (chunk > 0) {
95 if (chunk > len)
96 chunk = len;
97 ret = __skb_nsg(frag_iter, offset - start, chunk,
98 recursion_level + 1);
99 if (unlikely(ret < 0))
100 return ret;
101 elt += ret;
102 len -= chunk;
103 if (len == 0)
104 return elt;
105 offset += chunk;
106 }
107 start = end;
108 }
109 }
110 BUG_ON(len);
111 return elt;
112}
113
114/* Return the number of scatterlist elements required to completely map the
115 * skb, or -EMSGSIZE if the recursion depth is exceeded.
116 */
117static int skb_nsg(struct sk_buff *skb, int offset, int len)
118{
119 return __skb_nsg(skb, offset, len, 0);
120}
121
Vakul Garg94524d82018-08-29 15:26:55 +0530122static void tls_decrypt_done(struct crypto_async_request *req, int err)
123{
124 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530125 struct scatterlist *sgout = aead_req->dst;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700126 struct tls_sw_context_rx *ctx;
127 struct tls_context *tls_ctx;
Vakul Garg94524d82018-08-29 15:26:55 +0530128 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700129 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530130 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700131 int pending;
132
133 skb = (struct sk_buff *)req->data;
134 tls_ctx = tls_get_ctx(skb->sk);
135 ctx = tls_sw_ctx_rx(tls_ctx);
136 pending = atomic_dec_return(&ctx->decrypt_pending);
Vakul Garg94524d82018-08-29 15:26:55 +0530137
138 /* Propagate if there was an err */
139 if (err) {
140 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700141 tls_err_abort(skb->sk, err);
Vakul Garg94524d82018-08-29 15:26:55 +0530142 }
143
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700144 /* After using skb->sk to propagate sk through crypto async callback
145 * we need to NULL it again.
146 */
147 skb->sk = NULL;
148
Vakul Garg94524d82018-08-29 15:26:55 +0530149 /* Release the skb, pages and memory allocated for crypto req */
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700150 kfree_skb(skb);
Vakul Garg94524d82018-08-29 15:26:55 +0530151
152 /* Skip the first S/G entry as it points to AAD */
153 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
154 if (!sg)
155 break;
156 put_page(sg_page(sg));
157 }
158
159 kfree(aead_req);
160
161 if (!pending && READ_ONCE(ctx->async_notify))
162 complete(&ctx->async_wait.completion);
163}
164
Dave Watsonc46234e2018-03-22 10:10:35 -0700165static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530166 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700167 struct scatterlist *sgin,
168 struct scatterlist *sgout,
169 char *iv_recv,
170 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530171 struct aead_request *aead_req,
172 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700173{
174 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300175 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700176 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700177
Vakul Garg0b243d02018-08-10 20:46:41 +0530178 aead_request_set_tfm(aead_req, ctx->aead_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700179 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
180 aead_request_set_crypt(aead_req, sgin, sgout,
181 data_len + tls_ctx->rx.tag_size,
182 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700183
Vakul Garg94524d82018-08-29 15:26:55 +0530184 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700185 /* Using skb->sk to push sk through to crypto async callback
186 * handler. This allows propagating errors up to the socket
187 * if needed. It _must_ be cleared in the async handler
188 * before kfree_skb is called. We _know_ skb->sk is NULL
189 * because it is a clone from strparser.
190 */
191 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530192 aead_request_set_callback(aead_req,
193 CRYPTO_TFM_REQ_MAY_BACKLOG,
194 tls_decrypt_done, skb);
195 atomic_inc(&ctx->decrypt_pending);
196 } else {
197 aead_request_set_callback(aead_req,
198 CRYPTO_TFM_REQ_MAY_BACKLOG,
199 crypto_req_done, &ctx->async_wait);
200 }
201
202 ret = crypto_aead_decrypt(aead_req);
203 if (ret == -EINPROGRESS) {
204 if (async)
205 return ret;
206
207 ret = crypto_wait_req(ret, &ctx->async_wait);
208 }
209
210 if (async)
211 atomic_dec(&ctx->decrypt_pending);
212
Dave Watsonc46234e2018-03-22 10:10:35 -0700213 return ret;
214}
215
Dave Watson3c4d7552017-06-14 11:37:39 -0700216static void trim_sg(struct sock *sk, struct scatterlist *sg,
217 int *sg_num_elem, unsigned int *sg_size, int target_size)
218{
219 int i = *sg_num_elem - 1;
220 int trim = *sg_size - target_size;
221
222 if (trim <= 0) {
223 WARN_ON(trim < 0);
224 return;
225 }
226
227 *sg_size = target_size;
228 while (trim >= sg[i].length) {
229 trim -= sg[i].length;
230 sk_mem_uncharge(sk, sg[i].length);
231 put_page(sg_page(&sg[i]));
232 i--;
233
234 if (i < 0)
235 goto out;
236 }
237
238 sg[i].length -= trim;
239 sk_mem_uncharge(sk, trim);
240
241out:
242 *sg_num_elem = i + 1;
243}
244
245static void trim_both_sgl(struct sock *sk, int target_size)
246{
247 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300248 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530249 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700250
Vakul Garga42055e2018-09-21 09:46:13 +0530251 trim_sg(sk, rec->sg_plaintext_data,
252 &rec->sg_plaintext_num_elem,
253 &rec->sg_plaintext_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700254 target_size);
255
256 if (target_size > 0)
Dave Watsondbe42552018-03-22 10:10:06 -0700257 target_size += tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700258
Vakul Garga42055e2018-09-21 09:46:13 +0530259 trim_sg(sk, rec->sg_encrypted_data,
260 &rec->sg_encrypted_num_elem,
261 &rec->sg_encrypted_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700262 target_size);
263}
264
Dave Watson3c4d7552017-06-14 11:37:39 -0700265static int alloc_encrypted_sg(struct sock *sk, int len)
266{
267 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300268 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530269 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700270 int rc = 0;
271
John Fastabend2c3682f2018-03-18 12:56:49 -0700272 rc = sk_alloc_sg(sk, len,
Vakul Garga42055e2018-09-21 09:46:13 +0530273 rec->sg_encrypted_data, 0,
274 &rec->sg_encrypted_num_elem,
275 &rec->sg_encrypted_size, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700276
Vakul Garg52ea9922018-09-06 21:41:40 +0530277 if (rc == -ENOSPC)
Vakul Garga42055e2018-09-21 09:46:13 +0530278 rec->sg_encrypted_num_elem = ARRAY_SIZE(rec->sg_encrypted_data);
Vakul Garg52ea9922018-09-06 21:41:40 +0530279
Dave Watson3c4d7552017-06-14 11:37:39 -0700280 return rc;
281}
282
283static int alloc_plaintext_sg(struct sock *sk, int len)
284{
285 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300286 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530287 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700288 int rc = 0;
289
Vakul Garga42055e2018-09-21 09:46:13 +0530290 rc = sk_alloc_sg(sk, len, rec->sg_plaintext_data, 0,
291 &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size,
John Fastabend2c3682f2018-03-18 12:56:49 -0700292 tls_ctx->pending_open_record_frags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700293
Vakul Garg52ea9922018-09-06 21:41:40 +0530294 if (rc == -ENOSPC)
Vakul Garga42055e2018-09-21 09:46:13 +0530295 rec->sg_plaintext_num_elem = ARRAY_SIZE(rec->sg_plaintext_data);
Vakul Garg52ea9922018-09-06 21:41:40 +0530296
Dave Watson3c4d7552017-06-14 11:37:39 -0700297 return rc;
298}
299
300static void free_sg(struct sock *sk, struct scatterlist *sg,
301 int *sg_num_elem, unsigned int *sg_size)
302{
303 int i, n = *sg_num_elem;
304
305 for (i = 0; i < n; ++i) {
306 sk_mem_uncharge(sk, sg[i].length);
307 put_page(sg_page(&sg[i]));
308 }
309 *sg_num_elem = 0;
310 *sg_size = 0;
311}
312
Vakul Gargc7749732018-09-25 20:21:51 +0530313static void tls_free_open_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700314{
315 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300316 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530317 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700318
Vakul Garga42055e2018-09-21 09:46:13 +0530319 /* Return if there is no open record */
320 if (!rec)
321 return;
Dave Watson3c4d7552017-06-14 11:37:39 -0700322
Vakul Garga42055e2018-09-21 09:46:13 +0530323 free_sg(sk, rec->sg_encrypted_data,
324 &rec->sg_encrypted_num_elem,
325 &rec->sg_encrypted_size);
326
327 free_sg(sk, rec->sg_plaintext_data,
328 &rec->sg_plaintext_num_elem,
329 &rec->sg_plaintext_size);
Vakul Gargc7749732018-09-25 20:21:51 +0530330
331 kfree(rec);
Dave Watson3c4d7552017-06-14 11:37:39 -0700332}
333
Vakul Garga42055e2018-09-21 09:46:13 +0530334int tls_tx_records(struct sock *sk, int flags)
335{
336 struct tls_context *tls_ctx = tls_get_ctx(sk);
337 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
338 struct tls_rec *rec, *tmp;
339 int tx_flags, rc = 0;
340
341 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530342 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530343 struct tls_rec, list);
344
345 if (flags == -1)
346 tx_flags = rec->tx_flags;
347 else
348 tx_flags = flags;
349
350 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
351 if (rc)
352 goto tx_err;
353
354 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530355 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530356 */
Vakul Garga42055e2018-09-21 09:46:13 +0530357 list_del(&rec->list);
Vakul Gargb85135b2018-09-25 16:26:17 +0530358 free_sg(sk, rec->sg_plaintext_data,
359 &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size);
360
Vakul Garga42055e2018-09-21 09:46:13 +0530361 kfree(rec);
362 }
363
Vakul Garg9932a292018-09-24 15:35:56 +0530364 /* Tx all ready records */
365 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
366 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530367 if (flags == -1)
368 tx_flags = rec->tx_flags;
369 else
370 tx_flags = flags;
371
372 rc = tls_push_sg(sk, tls_ctx,
373 &rec->sg_encrypted_data[0],
374 0, tx_flags);
375 if (rc)
376 goto tx_err;
377
Vakul Garga42055e2018-09-21 09:46:13 +0530378 list_del(&rec->list);
Vakul Gargb85135b2018-09-25 16:26:17 +0530379 free_sg(sk, rec->sg_plaintext_data,
380 &rec->sg_plaintext_num_elem,
381 &rec->sg_plaintext_size);
382
Vakul Garga42055e2018-09-21 09:46:13 +0530383 kfree(rec);
384 } else {
385 break;
386 }
387 }
388
389tx_err:
390 if (rc < 0 && rc != -EAGAIN)
391 tls_err_abort(sk, EBADMSG);
392
393 return rc;
394}
395
396static void tls_encrypt_done(struct crypto_async_request *req, int err)
397{
398 struct aead_request *aead_req = (struct aead_request *)req;
399 struct sock *sk = req->data;
400 struct tls_context *tls_ctx = tls_get_ctx(sk);
401 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
402 struct tls_rec *rec;
403 bool ready = false;
404 int pending;
405
406 rec = container_of(aead_req, struct tls_rec, aead_req);
407
408 rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
409 rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
410
Vakul Garga42055e2018-09-21 09:46:13 +0530411
412 /* Free the record if error is previously set on socket */
413 if (err || sk->sk_err) {
414 free_sg(sk, rec->sg_encrypted_data,
415 &rec->sg_encrypted_num_elem, &rec->sg_encrypted_size);
416
417 kfree(rec);
418 rec = NULL;
419
420 /* If err is already set on socket, return the same code */
421 if (sk->sk_err) {
422 ctx->async_wait.err = sk->sk_err;
423 } else {
424 ctx->async_wait.err = err;
425 tls_err_abort(sk, err);
426 }
427 }
428
Vakul Garg9932a292018-09-24 15:35:56 +0530429 if (rec) {
430 struct tls_rec *first_rec;
431
432 /* Mark the record as ready for transmission */
433 smp_store_mb(rec->tx_ready, true);
434
435 /* If received record is at head of tx_list, schedule tx */
436 first_rec = list_first_entry(&ctx->tx_list,
437 struct tls_rec, list);
438 if (rec == first_rec)
439 ready = true;
440 }
Vakul Garga42055e2018-09-21 09:46:13 +0530441
442 pending = atomic_dec_return(&ctx->encrypt_pending);
443
444 if (!pending && READ_ONCE(ctx->async_notify))
445 complete(&ctx->async_wait.completion);
446
447 if (!ready)
448 return;
449
450 /* Schedule the transmission */
451 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
452 schedule_delayed_work(&ctx->tx_work.work, 1);
453}
454
455static int tls_do_encryption(struct sock *sk,
456 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200457 struct tls_sw_context_tx *ctx,
458 struct aead_request *aead_req,
459 size_t data_len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700460{
Vakul Garga42055e2018-09-21 09:46:13 +0530461 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700462 int rc;
463
Vakul Garga42055e2018-09-21 09:46:13 +0530464 rec->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
465 rec->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700466
467 aead_request_set_tfm(aead_req, ctx->aead_send);
468 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
Vakul Garga42055e2018-09-21 09:46:13 +0530469 aead_request_set_crypt(aead_req, rec->sg_aead_in,
470 rec->sg_aead_out,
Dave Watsondbe42552018-03-22 10:10:06 -0700471 data_len, tls_ctx->tx.iv);
Vakul Garga54667f2018-01-31 21:34:37 +0530472
473 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530474 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530475
Vakul Garg9932a292018-09-24 15:35:56 +0530476 /* Add the record in tx_list */
477 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530478 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700479
Vakul Garga42055e2018-09-21 09:46:13 +0530480 rc = crypto_aead_encrypt(aead_req);
481 if (!rc || rc != -EINPROGRESS) {
482 atomic_dec(&ctx->encrypt_pending);
483 rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
484 rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
485 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700486
Vakul Garg9932a292018-09-24 15:35:56 +0530487 if (!rc) {
488 WRITE_ONCE(rec->tx_ready, true);
489 } else if (rc != -EINPROGRESS) {
490 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530491 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530492 }
Vakul Garga42055e2018-09-21 09:46:13 +0530493
494 /* Unhook the record from context if encryption is not failure */
495 ctx->open_rec = NULL;
496 tls_advance_record_sn(sk, &tls_ctx->tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700497 return rc;
498}
499
500static int tls_push_record(struct sock *sk, int flags,
501 unsigned char record_type)
502{
503 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300504 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530505 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200506 struct aead_request *req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700507 int rc;
508
Vakul Garga42055e2018-09-21 09:46:13 +0530509 if (!rec)
510 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200511
Vakul Garga42055e2018-09-21 09:46:13 +0530512 rec->tx_flags = flags;
513 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700514
Vakul Garga42055e2018-09-21 09:46:13 +0530515 sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem - 1);
516 sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem - 1);
517
518 tls_make_aad(rec->aad_space, rec->sg_plaintext_size,
Dave Watsondbe42552018-03-22 10:10:06 -0700519 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700520 record_type);
521
522 tls_fill_prepend(tls_ctx,
Vakul Garga42055e2018-09-21 09:46:13 +0530523 page_address(sg_page(&rec->sg_encrypted_data[0])) +
524 rec->sg_encrypted_data[0].offset,
525 rec->sg_plaintext_size, record_type);
Dave Watson3c4d7552017-06-14 11:37:39 -0700526
527 tls_ctx->pending_open_record_frags = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700528
Vakul Garga42055e2018-09-21 09:46:13 +0530529 rc = tls_do_encryption(sk, tls_ctx, ctx, req, rec->sg_plaintext_size);
530 if (rc == -EINPROGRESS)
531 return -EINPROGRESS;
532
Dave Watson3c4d7552017-06-14 11:37:39 -0700533 if (rc < 0) {
Vakul Garga42055e2018-09-21 09:46:13 +0530534 tls_err_abort(sk, EBADMSG);
535 return rc;
Dave Watson3c4d7552017-06-14 11:37:39 -0700536 }
537
Vakul Garg9932a292018-09-24 15:35:56 +0530538 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700539}
540
541static int tls_sw_push_pending_record(struct sock *sk, int flags)
542{
543 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
544}
545
546static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
Dave Watson69ca9292018-03-22 10:09:53 -0700547 int length, int *pages_used,
548 unsigned int *size_used,
549 struct scatterlist *to, int to_max_pages,
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700550 bool charge)
Dave Watson3c4d7552017-06-14 11:37:39 -0700551{
Dave Watson3c4d7552017-06-14 11:37:39 -0700552 struct page *pages[MAX_SKB_FRAGS];
553
554 size_t offset;
555 ssize_t copied, use;
556 int i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700557 unsigned int size = *size_used;
558 int num_elem = *pages_used;
Dave Watson3c4d7552017-06-14 11:37:39 -0700559 int rc = 0;
560 int maxpages;
561
562 while (length > 0) {
563 i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700564 maxpages = to_max_pages - num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700565 if (maxpages == 0) {
566 rc = -EFAULT;
567 goto out;
568 }
569 copied = iov_iter_get_pages(from, pages,
570 length,
571 maxpages, &offset);
572 if (copied <= 0) {
573 rc = -EFAULT;
574 goto out;
575 }
576
577 iov_iter_advance(from, copied);
578
579 length -= copied;
580 size += copied;
581 while (copied) {
582 use = min_t(int, copied, PAGE_SIZE - offset);
583
Dave Watson69ca9292018-03-22 10:09:53 -0700584 sg_set_page(&to[num_elem],
Dave Watson3c4d7552017-06-14 11:37:39 -0700585 pages[i], use, offset);
Dave Watson69ca9292018-03-22 10:09:53 -0700586 sg_unmark_end(&to[num_elem]);
587 if (charge)
588 sk_mem_charge(sk, use);
Dave Watson3c4d7552017-06-14 11:37:39 -0700589
590 offset = 0;
591 copied -= use;
592
593 ++i;
594 ++num_elem;
595 }
596 }
597
Vakul Gargcfb40992018-08-02 20:43:10 +0530598 /* Mark the end in the last sg entry if newly added */
599 if (num_elem > *pages_used)
600 sg_mark_end(&to[num_elem - 1]);
Dave Watson3c4d7552017-06-14 11:37:39 -0700601out:
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700602 if (rc)
603 iov_iter_revert(from, size - *size_used);
Dave Watson69ca9292018-03-22 10:09:53 -0700604 *size_used = size;
605 *pages_used = num_elem;
606
Dave Watson3c4d7552017-06-14 11:37:39 -0700607 return rc;
608}
609
610static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
611 int bytes)
612{
613 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300614 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530615 struct tls_rec *rec = ctx->open_rec;
616 struct scatterlist *sg = rec->sg_plaintext_data;
Dave Watson3c4d7552017-06-14 11:37:39 -0700617 int copy, i, rc = 0;
618
619 for (i = tls_ctx->pending_open_record_frags;
Vakul Garga42055e2018-09-21 09:46:13 +0530620 i < rec->sg_plaintext_num_elem; ++i) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700621 copy = sg[i].length;
622 if (copy_from_iter(
623 page_address(sg_page(&sg[i])) + sg[i].offset,
624 copy, from) != copy) {
625 rc = -EFAULT;
626 goto out;
627 }
628 bytes -= copy;
629
630 ++tls_ctx->pending_open_record_frags;
631
632 if (!bytes)
633 break;
634 }
635
636out:
637 return rc;
638}
639
Wei Yongjunbf17b672018-09-26 12:10:48 +0000640static struct tls_rec *get_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700641{
642 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300643 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530644 struct tls_rec *rec;
645 int mem_size;
646
647 /* Return if we already have an open record */
648 if (ctx->open_rec)
649 return ctx->open_rec;
650
651 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
652
653 rec = kzalloc(mem_size, sk->sk_allocation);
654 if (!rec)
655 return NULL;
656
657 sg_init_table(&rec->sg_plaintext_data[0],
658 ARRAY_SIZE(rec->sg_plaintext_data));
659 sg_init_table(&rec->sg_encrypted_data[0],
660 ARRAY_SIZE(rec->sg_encrypted_data));
661
662 sg_init_table(rec->sg_aead_in, 2);
663 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
664 sizeof(rec->aad_space));
665 sg_unmark_end(&rec->sg_aead_in[1]);
666 sg_chain(rec->sg_aead_in, 2, rec->sg_plaintext_data);
667
668 sg_init_table(rec->sg_aead_out, 2);
669 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
670 sizeof(rec->aad_space));
671 sg_unmark_end(&rec->sg_aead_out[1]);
672 sg_chain(rec->sg_aead_out, 2, rec->sg_encrypted_data);
673
674 ctx->open_rec = rec;
675
676 return rec;
677}
678
679int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
680{
Dave Watson3c4d7552017-06-14 11:37:39 -0700681 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530682 struct tls_context *tls_ctx = tls_get_ctx(sk);
683 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
684 struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
685 bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
686 unsigned char record_type = TLS_RECORD_TYPE_DATA;
687 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Dave Watson3c4d7552017-06-14 11:37:39 -0700688 bool eor = !(msg->msg_flags & MSG_MORE);
689 size_t try_to_copy, copied = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530690 struct tls_rec *rec;
691 int required_size;
692 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700693 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530694 int record_room;
695 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700696 int orig_size;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530697 int ret = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700698
699 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
700 return -ENOTSUPP;
701
702 lock_sock(sk);
703
Vakul Garga42055e2018-09-21 09:46:13 +0530704 /* Wait till there is any pending write on socket */
705 if (unlikely(sk->sk_write_pending)) {
706 ret = wait_on_pending_writer(sk, &timeo);
707 if (unlikely(ret))
708 goto send_end;
709 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700710
711 if (unlikely(msg->msg_controllen)) {
712 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530713 if (ret) {
714 if (ret == -EINPROGRESS)
715 num_async++;
716 else if (ret != -EAGAIN)
717 goto send_end;
718 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700719 }
720
721 while (msg_data_left(msg)) {
722 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100723 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700724 goto send_end;
725 }
726
Vakul Garga42055e2018-09-21 09:46:13 +0530727 rec = get_rec(sk);
728 if (!rec) {
729 ret = -ENOMEM;
730 goto send_end;
731 }
732
733 orig_size = rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700734 full_record = false;
735 try_to_copy = msg_data_left(msg);
Vakul Garga42055e2018-09-21 09:46:13 +0530736 record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700737 if (try_to_copy >= record_room) {
738 try_to_copy = record_room;
739 full_record = true;
740 }
741
Vakul Garga42055e2018-09-21 09:46:13 +0530742 required_size = rec->sg_plaintext_size + try_to_copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700743 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700744
745 if (!sk_stream_memory_free(sk))
746 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530747
Dave Watson3c4d7552017-06-14 11:37:39 -0700748alloc_encrypted:
749 ret = alloc_encrypted_sg(sk, required_size);
750 if (ret) {
751 if (ret != -ENOSPC)
752 goto wait_for_memory;
753
754 /* Adjust try_to_copy according to the amount that was
755 * actually allocated. The difference is due
756 * to max sg elements limit
757 */
Vakul Garga42055e2018-09-21 09:46:13 +0530758 try_to_copy -= required_size - rec->sg_encrypted_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700759 full_record = true;
760 }
Vakul Garga42055e2018-09-21 09:46:13 +0530761
762 if (!is_kvec && (full_record || eor) && !async_capable) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700763 ret = zerocopy_from_iter(sk, &msg->msg_iter,
Vakul Garga42055e2018-09-21 09:46:13 +0530764 try_to_copy, &rec->sg_plaintext_num_elem,
765 &rec->sg_plaintext_size,
766 rec->sg_plaintext_data,
767 ARRAY_SIZE(rec->sg_plaintext_data),
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700768 true);
Dave Watson3c4d7552017-06-14 11:37:39 -0700769 if (ret)
770 goto fallback_to_reg_send;
771
Vakul Garga42055e2018-09-21 09:46:13 +0530772 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -0700773 copied += try_to_copy;
774 ret = tls_push_record(sk, msg->msg_flags, record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530775 if (ret) {
776 if (ret == -EINPROGRESS)
777 num_async++;
778 else if (ret != -EAGAIN)
779 goto send_end;
780 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700781 continue;
Dave Watson3c4d7552017-06-14 11:37:39 -0700782
Dave Watson3c4d7552017-06-14 11:37:39 -0700783fallback_to_reg_send:
Vakul Garga42055e2018-09-21 09:46:13 +0530784 trim_sg(sk, rec->sg_plaintext_data,
785 &rec->sg_plaintext_num_elem,
786 &rec->sg_plaintext_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700787 orig_size);
788 }
789
Vakul Garga42055e2018-09-21 09:46:13 +0530790 required_size = rec->sg_plaintext_size + try_to_copy;
Dave Watson3c4d7552017-06-14 11:37:39 -0700791alloc_plaintext:
792 ret = alloc_plaintext_sg(sk, required_size);
793 if (ret) {
794 if (ret != -ENOSPC)
795 goto wait_for_memory;
796
797 /* Adjust try_to_copy according to the amount that was
798 * actually allocated. The difference is due
799 * to max sg elements limit
800 */
Vakul Garga42055e2018-09-21 09:46:13 +0530801 try_to_copy -= required_size - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700802 full_record = true;
803
Vakul Garga42055e2018-09-21 09:46:13 +0530804 trim_sg(sk, rec->sg_encrypted_data,
805 &rec->sg_encrypted_num_elem,
806 &rec->sg_encrypted_size,
807 rec->sg_plaintext_size +
Dave Watsondbe42552018-03-22 10:10:06 -0700808 tls_ctx->tx.overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700809 }
810
811 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
812 if (ret)
813 goto trim_sgl;
814
815 copied += try_to_copy;
816 if (full_record || eor) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700817 ret = tls_push_record(sk, msg->msg_flags, record_type);
818 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530819 if (ret == -EINPROGRESS)
820 num_async++;
821 else if (ret != -EAGAIN)
822 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700823 }
824 }
825
826 continue;
827
828wait_for_sndbuf:
829 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
830wait_for_memory:
831 ret = sk_stream_wait_memory(sk, &timeo);
832 if (ret) {
833trim_sgl:
834 trim_both_sgl(sk, orig_size);
835 goto send_end;
836 }
837
Vakul Garga42055e2018-09-21 09:46:13 +0530838 if (rec->sg_encrypted_size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700839 goto alloc_encrypted;
840
841 goto alloc_plaintext;
842 }
843
Vakul Garga42055e2018-09-21 09:46:13 +0530844 if (!num_async) {
845 goto send_end;
846 } else if (num_zc) {
847 /* Wait for pending encryptions to get completed */
848 smp_store_mb(ctx->async_notify, true);
849
850 if (atomic_read(&ctx->encrypt_pending))
851 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
852 else
853 reinit_completion(&ctx->async_wait.completion);
854
855 WRITE_ONCE(ctx->async_notify, false);
856
857 if (ctx->async_wait.err) {
858 ret = ctx->async_wait.err;
859 copied = 0;
860 }
861 }
862
863 /* Transmit if any encryptions have completed */
864 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
865 cancel_delayed_work(&ctx->tx_work.work);
866 tls_tx_records(sk, msg->msg_flags);
867 }
868
Dave Watson3c4d7552017-06-14 11:37:39 -0700869send_end:
870 ret = sk_stream_error(sk, msg->msg_flags, ret);
871
872 release_sock(sk);
873 return copied ? copied : ret;
874}
875
876int tls_sw_sendpage(struct sock *sk, struct page *page,
877 int offset, size_t size, int flags)
878{
Vakul Garga42055e2018-09-21 09:46:13 +0530879 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -0700880 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300881 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700882 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Vakul Garga42055e2018-09-21 09:46:13 +0530883 size_t orig_size = size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700884 struct scatterlist *sg;
Vakul Garga42055e2018-09-21 09:46:13 +0530885 struct tls_rec *rec;
886 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700887 bool full_record;
888 int record_room;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530889 int ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530890 bool eor;
Dave Watson3c4d7552017-06-14 11:37:39 -0700891
892 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
893 MSG_SENDPAGE_NOTLAST))
894 return -ENOTSUPP;
895
896 /* No MSG_EOR from splice, only look at MSG_MORE */
897 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
898
899 lock_sock(sk);
900
901 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
902
Vakul Garga42055e2018-09-21 09:46:13 +0530903 /* Wait till there is any pending write on socket */
904 if (unlikely(sk->sk_write_pending)) {
905 ret = wait_on_pending_writer(sk, &timeo);
906 if (unlikely(ret))
907 goto sendpage_end;
908 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700909
910 /* Call the sk_stream functions to manage the sndbuf mem. */
911 while (size > 0) {
912 size_t copy, required_size;
913
914 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100915 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700916 goto sendpage_end;
917 }
918
Vakul Garga42055e2018-09-21 09:46:13 +0530919 rec = get_rec(sk);
920 if (!rec) {
921 ret = -ENOMEM;
922 goto sendpage_end;
923 }
924
Dave Watson3c4d7552017-06-14 11:37:39 -0700925 full_record = false;
Vakul Garga42055e2018-09-21 09:46:13 +0530926 record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700927 copy = size;
928 if (copy >= record_room) {
929 copy = record_room;
930 full_record = true;
931 }
Vakul Garga42055e2018-09-21 09:46:13 +0530932 required_size = rec->sg_plaintext_size + copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700933 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700934
935 if (!sk_stream_memory_free(sk))
936 goto wait_for_sndbuf;
937alloc_payload:
938 ret = alloc_encrypted_sg(sk, required_size);
939 if (ret) {
940 if (ret != -ENOSPC)
941 goto wait_for_memory;
942
943 /* Adjust copy according to the amount that was
944 * actually allocated. The difference is due
945 * to max sg elements limit
946 */
Vakul Garga42055e2018-09-21 09:46:13 +0530947 copy -= required_size - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700948 full_record = true;
949 }
950
951 get_page(page);
Vakul Garga42055e2018-09-21 09:46:13 +0530952 sg = rec->sg_plaintext_data + rec->sg_plaintext_num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700953 sg_set_page(sg, page, copy, offset);
Dave Watson7a8c4dd2018-01-19 12:30:13 -0800954 sg_unmark_end(sg);
955
Vakul Garga42055e2018-09-21 09:46:13 +0530956 rec->sg_plaintext_num_elem++;
Dave Watson3c4d7552017-06-14 11:37:39 -0700957
958 sk_mem_charge(sk, copy);
959 offset += copy;
960 size -= copy;
Vakul Garga42055e2018-09-21 09:46:13 +0530961 rec->sg_plaintext_size += copy;
962 tls_ctx->pending_open_record_frags = rec->sg_plaintext_num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700963
964 if (full_record || eor ||
Vakul Garga42055e2018-09-21 09:46:13 +0530965 rec->sg_plaintext_num_elem ==
966 ARRAY_SIZE(rec->sg_plaintext_data)) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700967 ret = tls_push_record(sk, flags, record_type);
968 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530969 if (ret == -EINPROGRESS)
970 num_async++;
971 else if (ret != -EAGAIN)
972 goto sendpage_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700973 }
974 }
975 continue;
976wait_for_sndbuf:
977 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
978wait_for_memory:
979 ret = sk_stream_wait_memory(sk, &timeo);
980 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530981 trim_both_sgl(sk, rec->sg_plaintext_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700982 goto sendpage_end;
983 }
984
Dave Watson3c4d7552017-06-14 11:37:39 -0700985 goto alloc_payload;
986 }
987
Vakul Garga42055e2018-09-21 09:46:13 +0530988 if (num_async) {
989 /* Transmit if any encryptions have completed */
990 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
991 cancel_delayed_work(&ctx->tx_work.work);
992 tls_tx_records(sk, flags);
993 }
994 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700995sendpage_end:
996 if (orig_size > size)
997 ret = orig_size - size;
998 else
999 ret = sk_stream_error(sk, flags, ret);
1000
1001 release_sock(sk);
1002 return ret;
1003}
1004
Dave Watsonc46234e2018-03-22 10:10:35 -07001005static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
1006 long timeo, int *err)
1007{
1008 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001009 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001010 struct sk_buff *skb;
1011 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1012
1013 while (!(skb = ctx->recv_pkt)) {
1014 if (sk->sk_err) {
1015 *err = sock_error(sk);
1016 return NULL;
1017 }
1018
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001019 if (sk->sk_shutdown & RCV_SHUTDOWN)
1020 return NULL;
1021
Dave Watsonc46234e2018-03-22 10:10:35 -07001022 if (sock_flag(sk, SOCK_DONE))
1023 return NULL;
1024
1025 if ((flags & MSG_DONTWAIT) || !timeo) {
1026 *err = -EAGAIN;
1027 return NULL;
1028 }
1029
1030 add_wait_queue(sk_sleep(sk), &wait);
1031 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1032 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
1033 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1034 remove_wait_queue(sk_sleep(sk), &wait);
1035
1036 /* Handle signals */
1037 if (signal_pending(current)) {
1038 *err = sock_intr_errno(timeo);
1039 return NULL;
1040 }
1041 }
1042
1043 return skb;
1044}
1045
Vakul Garg0b243d02018-08-10 20:46:41 +05301046/* This function decrypts the input skb into either out_iov or in out_sg
1047 * or in skb buffers itself. The input parameter 'zc' indicates if
1048 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1049 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1050 * NULL, then the decryption happens inside skb buffers itself, i.e.
1051 * zero-copy gets disabled and 'zc' is updated.
1052 */
1053
1054static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1055 struct iov_iter *out_iov,
1056 struct scatterlist *out_sg,
1057 int *chunk, bool *zc)
1058{
1059 struct tls_context *tls_ctx = tls_get_ctx(sk);
1060 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1061 struct strp_msg *rxm = strp_msg(skb);
1062 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1063 struct aead_request *aead_req;
1064 struct sk_buff *unused;
1065 u8 *aad, *iv, *mem = NULL;
1066 struct scatterlist *sgin = NULL;
1067 struct scatterlist *sgout = NULL;
1068 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
1069
1070 if (*zc && (out_iov || out_sg)) {
1071 if (out_iov)
1072 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1073 else
1074 n_sgout = sg_nents(out_sg);
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001075 n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
1076 rxm->full_len - tls_ctx->rx.prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301077 } else {
1078 n_sgout = 0;
1079 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001080 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301081 }
1082
Vakul Garg0b243d02018-08-10 20:46:41 +05301083 if (n_sgin < 1)
1084 return -EBADMSG;
1085
1086 /* Increment to accommodate AAD */
1087 n_sgin = n_sgin + 1;
1088
1089 nsg = n_sgin + n_sgout;
1090
1091 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1092 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1093 mem_size = mem_size + TLS_AAD_SPACE_SIZE;
1094 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1095
1096 /* Allocate a single block of memory which contains
1097 * aead_req || sgin[] || sgout[] || aad || iv.
1098 * This order achieves correct alignment for aead_req, sgin, sgout.
1099 */
1100 mem = kmalloc(mem_size, sk->sk_allocation);
1101 if (!mem)
1102 return -ENOMEM;
1103
1104 /* Segment the allocated memory */
1105 aead_req = (struct aead_request *)mem;
1106 sgin = (struct scatterlist *)(mem + aead_size);
1107 sgout = sgin + n_sgin;
1108 aad = (u8 *)(sgout + n_sgout);
1109 iv = aad + TLS_AAD_SPACE_SIZE;
1110
1111 /* Prepare IV */
1112 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1113 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1114 tls_ctx->rx.iv_size);
1115 if (err < 0) {
1116 kfree(mem);
1117 return err;
1118 }
1119 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1120
1121 /* Prepare AAD */
1122 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
1123 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
1124 ctx->control);
1125
1126 /* Prepare sgin */
1127 sg_init_table(sgin, n_sgin);
1128 sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
1129 err = skb_to_sgvec(skb, &sgin[1],
1130 rxm->offset + tls_ctx->rx.prepend_size,
1131 rxm->full_len - tls_ctx->rx.prepend_size);
1132 if (err < 0) {
1133 kfree(mem);
1134 return err;
1135 }
1136
1137 if (n_sgout) {
1138 if (out_iov) {
1139 sg_init_table(sgout, n_sgout);
1140 sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
1141
1142 *chunk = 0;
1143 err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
1144 chunk, &sgout[1],
1145 (n_sgout - 1), false);
1146 if (err < 0)
1147 goto fallback_to_reg_recv;
1148 } else if (out_sg) {
1149 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1150 } else {
1151 goto fallback_to_reg_recv;
1152 }
1153 } else {
1154fallback_to_reg_recv:
1155 sgout = sgin;
1156 pages = 0;
1157 *chunk = 0;
1158 *zc = false;
1159 }
1160
1161 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301162 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1163 data_len, aead_req, *zc);
1164 if (err == -EINPROGRESS)
1165 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301166
1167 /* Release the pages in case iov was mapped to pages */
1168 for (; pages > 0; pages--)
1169 put_page(sg_page(&sgout[pages]));
1170
1171 kfree(mem);
1172 return err;
1173}
1174
Boris Pismennydafb67f2018-07-13 14:33:40 +03001175static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg0b243d02018-08-10 20:46:41 +05301176 struct iov_iter *dest, int *chunk, bool *zc)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001177{
1178 struct tls_context *tls_ctx = tls_get_ctx(sk);
1179 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1180 struct strp_msg *rxm = strp_msg(skb);
1181 int err = 0;
1182
Boris Pismenny4799ac82018-07-13 14:33:43 +03001183#ifdef CONFIG_TLS_DEVICE
1184 err = tls_device_decrypted(sk, skb);
Boris Pismennydafb67f2018-07-13 14:33:40 +03001185 if (err < 0)
1186 return err;
Boris Pismenny4799ac82018-07-13 14:33:43 +03001187#endif
1188 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301189 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
Vakul Garg94524d82018-08-29 15:26:55 +05301190 if (err < 0) {
1191 if (err == -EINPROGRESS)
1192 tls_advance_record_sn(sk, &tls_ctx->rx);
1193
Boris Pismenny4799ac82018-07-13 14:33:43 +03001194 return err;
Vakul Garg94524d82018-08-29 15:26:55 +05301195 }
Boris Pismenny4799ac82018-07-13 14:33:43 +03001196 } else {
1197 *zc = false;
1198 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001199
1200 rxm->offset += tls_ctx->rx.prepend_size;
1201 rxm->full_len -= tls_ctx->rx.overhead_size;
1202 tls_advance_record_sn(sk, &tls_ctx->rx);
1203 ctx->decrypted = true;
1204 ctx->saved_data_ready(sk);
1205
1206 return err;
1207}
1208
1209int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1210 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001211{
Vakul Garg0b243d02018-08-10 20:46:41 +05301212 bool zc = true;
1213 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001214
Vakul Garg0b243d02018-08-10 20:46:41 +05301215 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -07001216}
1217
1218static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1219 unsigned int len)
1220{
1221 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001222 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001223
Vakul Garg94524d82018-08-29 15:26:55 +05301224 if (skb) {
1225 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001226
Vakul Garg94524d82018-08-29 15:26:55 +05301227 if (len < rxm->full_len) {
1228 rxm->offset += len;
1229 rxm->full_len -= len;
1230 return false;
1231 }
1232 kfree_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001233 }
1234
1235 /* Finished with message */
1236 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001237 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001238
1239 return true;
1240}
1241
1242int tls_sw_recvmsg(struct sock *sk,
1243 struct msghdr *msg,
1244 size_t len,
1245 int nonblock,
1246 int flags,
1247 int *addr_len)
1248{
1249 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001250 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001251 unsigned char control;
1252 struct strp_msg *rxm;
1253 struct sk_buff *skb;
1254 ssize_t copied = 0;
1255 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001256 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001257 long timeo;
Doron Roberts-Kedes0a26cf32018-07-25 14:48:21 -07001258 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Vakul Garg94524d82018-08-29 15:26:55 +05301259 int num_async = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001260
1261 flags |= nonblock;
1262
1263 if (unlikely(flags & MSG_ERRQUEUE))
1264 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1265
1266 lock_sock(sk);
1267
Daniel Borkmann06030db2018-06-15 03:07:46 +02001268 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
Dave Watsonc46234e2018-03-22 10:10:35 -07001269 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1270 do {
1271 bool zc = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301272 bool async = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001273 int chunk = 0;
1274
1275 skb = tls_wait_data(sk, flags, timeo, &err);
1276 if (!skb)
1277 goto recv_end;
1278
1279 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301280
Dave Watsonc46234e2018-03-22 10:10:35 -07001281 if (!cmsg) {
1282 int cerr;
1283
1284 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1285 sizeof(ctx->control), &ctx->control);
1286 cmsg = true;
1287 control = ctx->control;
1288 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1289 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1290 err = -EIO;
1291 goto recv_end;
1292 }
1293 }
1294 } else if (control != ctx->control) {
1295 goto recv_end;
1296 }
1297
1298 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301299 int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07001300
Vakul Garg0b243d02018-08-10 20:46:41 +05301301 if (!is_kvec && to_copy <= len &&
1302 likely(!(flags & MSG_PEEK)))
Dave Watsonc46234e2018-03-22 10:10:35 -07001303 zc = true;
Dave Watsonc46234e2018-03-22 10:10:35 -07001304
Vakul Garg0b243d02018-08-10 20:46:41 +05301305 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1306 &chunk, &zc);
Vakul Garg94524d82018-08-29 15:26:55 +05301307 if (err < 0 && err != -EINPROGRESS) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301308 tls_err_abort(sk, EBADMSG);
1309 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001310 }
Vakul Garg94524d82018-08-29 15:26:55 +05301311
1312 if (err == -EINPROGRESS) {
1313 async = true;
1314 num_async++;
1315 goto pick_next_record;
1316 }
1317
Dave Watsonc46234e2018-03-22 10:10:35 -07001318 ctx->decrypted = true;
1319 }
1320
1321 if (!zc) {
1322 chunk = min_t(unsigned int, rxm->full_len, len);
Vakul Garg94524d82018-08-29 15:26:55 +05301323
Dave Watsonc46234e2018-03-22 10:10:35 -07001324 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
1325 chunk);
1326 if (err < 0)
1327 goto recv_end;
1328 }
1329
Vakul Garg94524d82018-08-29 15:26:55 +05301330pick_next_record:
Dave Watsonc46234e2018-03-22 10:10:35 -07001331 copied += chunk;
1332 len -= chunk;
1333 if (likely(!(flags & MSG_PEEK))) {
1334 u8 control = ctx->control;
1335
Vakul Garg94524d82018-08-29 15:26:55 +05301336 /* For async, drop current skb reference */
1337 if (async)
1338 skb = NULL;
1339
Dave Watsonc46234e2018-03-22 10:10:35 -07001340 if (tls_sw_advance_skb(sk, skb, chunk)) {
1341 /* Return full control message to
1342 * userspace before trying to parse
1343 * another message type
1344 */
1345 msg->msg_flags |= MSG_EOR;
1346 if (control != TLS_RECORD_TYPE_DATA)
1347 goto recv_end;
Vakul Garg94524d82018-08-29 15:26:55 +05301348 } else {
1349 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001350 }
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001351 } else {
1352 /* MSG_PEEK right now cannot look beyond current skb
1353 * from strparser, meaning we cannot advance skb here
1354 * and thus unpause strparser since we'd loose original
1355 * one.
1356 */
1357 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001358 }
Vakul Garg94524d82018-08-29 15:26:55 +05301359
Daniel Borkmann06030db2018-06-15 03:07:46 +02001360 /* If we have a new message from strparser, continue now. */
1361 if (copied >= target && !ctx->recv_pkt)
1362 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001363 } while (len);
1364
1365recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301366 if (num_async) {
1367 /* Wait for all previously submitted records to be decrypted */
1368 smp_store_mb(ctx->async_notify, true);
1369 if (atomic_read(&ctx->decrypt_pending)) {
1370 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1371 if (err) {
1372 /* one of async decrypt failed */
1373 tls_err_abort(sk, err);
1374 copied = 0;
1375 }
1376 } else {
1377 reinit_completion(&ctx->async_wait.completion);
1378 }
1379 WRITE_ONCE(ctx->async_notify, false);
1380 }
1381
Dave Watsonc46234e2018-03-22 10:10:35 -07001382 release_sock(sk);
1383 return copied ? : err;
1384}
1385
1386ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1387 struct pipe_inode_info *pipe,
1388 size_t len, unsigned int flags)
1389{
1390 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001391 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001392 struct strp_msg *rxm = NULL;
1393 struct sock *sk = sock->sk;
1394 struct sk_buff *skb;
1395 ssize_t copied = 0;
1396 int err = 0;
1397 long timeo;
1398 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05301399 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001400
1401 lock_sock(sk);
1402
1403 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1404
1405 skb = tls_wait_data(sk, flags, timeo, &err);
1406 if (!skb)
1407 goto splice_read_end;
1408
1409 /* splice does not support reading control messages */
1410 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1411 err = -ENOTSUPP;
1412 goto splice_read_end;
1413 }
1414
1415 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301416 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -07001417
1418 if (err < 0) {
1419 tls_err_abort(sk, EBADMSG);
1420 goto splice_read_end;
1421 }
1422 ctx->decrypted = true;
1423 }
1424 rxm = strp_msg(skb);
1425
1426 chunk = min_t(unsigned int, rxm->full_len, len);
1427 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1428 if (copied < 0)
1429 goto splice_read_end;
1430
1431 if (likely(!(flags & MSG_PEEK)))
1432 tls_sw_advance_skb(sk, skb, copied);
1433
1434splice_read_end:
1435 release_sock(sk);
1436 return copied ? : err;
1437}
1438
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001439unsigned int tls_sw_poll(struct file *file, struct socket *sock,
1440 struct poll_table_struct *wait)
Dave Watsonc46234e2018-03-22 10:10:35 -07001441{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001442 unsigned int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001443 struct sock *sk = sock->sk;
1444 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001445 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001446
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001447 /* Grab POLLOUT and POLLHUP from the underlying socket */
1448 ret = ctx->sk_poll(file, sock, wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001449
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001450 /* Clear POLLIN bits, and set based on recv_pkt */
1451 ret &= ~(POLLIN | POLLRDNORM);
Dave Watsonc46234e2018-03-22 10:10:35 -07001452 if (ctx->recv_pkt)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001453 ret |= POLLIN | POLLRDNORM;
Dave Watsonc46234e2018-03-22 10:10:35 -07001454
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001455 return ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001456}
1457
1458static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1459{
1460 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001461 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cook3463e512018-06-25 16:55:05 -07001462 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07001463 struct strp_msg *rxm = strp_msg(skb);
1464 size_t cipher_overhead;
1465 size_t data_len = 0;
1466 int ret;
1467
1468 /* Verify that we have a full TLS header, or wait for more data */
1469 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
1470 return 0;
1471
Kees Cook3463e512018-06-25 16:55:05 -07001472 /* Sanity-check size of on-stack buffer. */
1473 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
1474 ret = -EINVAL;
1475 goto read_failure;
1476 }
1477
Dave Watsonc46234e2018-03-22 10:10:35 -07001478 /* Linearize header to local buffer */
1479 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
1480
1481 if (ret < 0)
1482 goto read_failure;
1483
1484 ctx->control = header[0];
1485
1486 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1487
1488 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
1489
1490 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
1491 ret = -EMSGSIZE;
1492 goto read_failure;
1493 }
1494 if (data_len < cipher_overhead) {
1495 ret = -EBADMSG;
1496 goto read_failure;
1497 }
1498
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001499 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
1500 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001501 ret = -EINVAL;
1502 goto read_failure;
1503 }
1504
Boris Pismenny4799ac82018-07-13 14:33:43 +03001505#ifdef CONFIG_TLS_DEVICE
1506 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1507 *(u64*)tls_ctx->rx.rec_seq);
1508#endif
Dave Watsonc46234e2018-03-22 10:10:35 -07001509 return data_len + TLS_HEADER_SIZE;
1510
1511read_failure:
1512 tls_err_abort(strp->sk, ret);
1513
1514 return ret;
1515}
1516
1517static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1518{
1519 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001520 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001521
1522 ctx->decrypted = false;
1523
1524 ctx->recv_pkt = skb;
1525 strp_pause(strp);
1526
Vakul Gargad13acc2018-07-30 16:08:33 +05301527 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001528}
1529
1530static void tls_data_ready(struct sock *sk)
1531{
1532 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001533 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001534
1535 strp_data_ready(&ctx->strp);
1536}
1537
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001538void tls_sw_free_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07001539{
1540 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001541 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05301542 struct tls_rec *rec, *tmp;
1543
1544 /* Wait for any pending async encryptions to complete */
1545 smp_store_mb(ctx->async_notify, true);
1546 if (atomic_read(&ctx->encrypt_pending))
1547 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1548
1549 cancel_delayed_work_sync(&ctx->tx_work.work);
1550
1551 /* Tx whatever records we can transmit and abandon the rest */
1552 tls_tx_records(sk, -1);
1553
Vakul Garg9932a292018-09-24 15:35:56 +05301554 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05301555 * the partially sent record if any at head of tx_list.
1556 */
1557 if (tls_ctx->partially_sent_record) {
1558 struct scatterlist *sg = tls_ctx->partially_sent_record;
1559
1560 while (1) {
1561 put_page(sg_page(sg));
1562 sk_mem_uncharge(sk, sg->length);
1563
1564 if (sg_is_last(sg))
1565 break;
1566 sg++;
1567 }
1568
1569 tls_ctx->partially_sent_record = NULL;
1570
Vakul Garg9932a292018-09-24 15:35:56 +05301571 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05301572 struct tls_rec, list);
Vakul Gargb85135b2018-09-25 16:26:17 +05301573
1574 free_sg(sk, rec->sg_plaintext_data,
1575 &rec->sg_plaintext_num_elem,
1576 &rec->sg_plaintext_size);
1577
Vakul Garga42055e2018-09-21 09:46:13 +05301578 list_del(&rec->list);
1579 kfree(rec);
1580 }
1581
Vakul Garg9932a292018-09-24 15:35:56 +05301582 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garga42055e2018-09-21 09:46:13 +05301583 free_sg(sk, rec->sg_encrypted_data,
1584 &rec->sg_encrypted_num_elem,
1585 &rec->sg_encrypted_size);
1586
Vakul Gargb85135b2018-09-25 16:26:17 +05301587 free_sg(sk, rec->sg_plaintext_data,
1588 &rec->sg_plaintext_num_elem,
1589 &rec->sg_plaintext_size);
1590
Vakul Garga42055e2018-09-21 09:46:13 +05301591 list_del(&rec->list);
1592 kfree(rec);
1593 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001594
Vakul Garg201876b2018-07-24 16:54:27 +05301595 crypto_free_aead(ctx->aead_send);
Vakul Gargc7749732018-09-25 20:21:51 +05301596 tls_free_open_rec(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001597
1598 kfree(ctx);
1599}
1600
Boris Pismenny39f56e12018-07-13 14:33:41 +03001601void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001602{
1603 struct tls_context *tls_ctx = tls_get_ctx(sk);
1604 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1605
Dave Watsonc46234e2018-03-22 10:10:35 -07001606 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05301607 kfree_skb(ctx->recv_pkt);
1608 ctx->recv_pkt = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001609 crypto_free_aead(ctx->aead_recv);
1610 strp_stop(&ctx->strp);
1611 write_lock_bh(&sk->sk_callback_lock);
1612 sk->sk_data_ready = ctx->saved_data_ready;
1613 write_unlock_bh(&sk->sk_callback_lock);
1614 release_sock(sk);
1615 strp_done(&ctx->strp);
1616 lock_sock(sk);
1617 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03001618}
1619
1620void tls_sw_free_resources_rx(struct sock *sk)
1621{
1622 struct tls_context *tls_ctx = tls_get_ctx(sk);
1623 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1624
1625 tls_sw_release_resources_rx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -07001626
Dave Watson3c4d7552017-06-14 11:37:39 -07001627 kfree(ctx);
1628}
1629
Vakul Garg9932a292018-09-24 15:35:56 +05301630/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05301631static void tx_work_handler(struct work_struct *work)
1632{
1633 struct delayed_work *delayed_work = to_delayed_work(work);
1634 struct tx_work *tx_work = container_of(delayed_work,
1635 struct tx_work, work);
1636 struct sock *sk = tx_work->sk;
1637 struct tls_context *tls_ctx = tls_get_ctx(sk);
1638 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1639
1640 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
1641 return;
1642
1643 lock_sock(sk);
1644 tls_tx_records(sk, -1);
1645 release_sock(sk);
1646}
1647
Dave Watsonc46234e2018-03-22 10:10:35 -07001648int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07001649{
Dave Watson3c4d7552017-06-14 11:37:39 -07001650 struct tls_crypto_info *crypto_info;
1651 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001652 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1653 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001654 struct cipher_context *cctx;
1655 struct crypto_aead **aead;
1656 struct strp_callbacks cb;
Dave Watson3c4d7552017-06-14 11:37:39 -07001657 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1658 char *iv, *rec_seq;
1659 int rc = 0;
1660
1661 if (!ctx) {
1662 rc = -EINVAL;
1663 goto out;
1664 }
1665
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001666 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001667 if (!ctx->priv_ctx_tx) {
1668 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1669 if (!sw_ctx_tx) {
1670 rc = -ENOMEM;
1671 goto out;
1672 }
1673 ctx->priv_ctx_tx = sw_ctx_tx;
1674 } else {
1675 sw_ctx_tx =
1676 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07001677 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001678 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001679 if (!ctx->priv_ctx_rx) {
1680 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1681 if (!sw_ctx_rx) {
1682 rc = -ENOMEM;
1683 goto out;
1684 }
1685 ctx->priv_ctx_rx = sw_ctx_rx;
1686 } else {
1687 sw_ctx_rx =
1688 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001689 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001690 }
1691
Dave Watsonc46234e2018-03-22 10:10:35 -07001692 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001693 crypto_init_wait(&sw_ctx_tx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001694 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001695 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001696 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05301697 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05301698 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
1699 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001700 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001701 crypto_init_wait(&sw_ctx_rx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001702 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001703 cctx = &ctx->rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001704 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07001705 }
1706
Dave Watson3c4d7552017-06-14 11:37:39 -07001707 switch (crypto_info->cipher_type) {
1708 case TLS_CIPHER_AES_GCM_128: {
1709 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1710 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1711 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1712 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1713 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1714 rec_seq =
1715 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1716 gcm_128_info =
1717 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1718 break;
1719 }
1720 default:
1721 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001722 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001723 }
1724
Kees Cookb16520f2018-04-10 17:52:34 -07001725 /* Sanity-check the IV size for stack allocations. */
Kees Cook3463e512018-06-25 16:55:05 -07001726 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07001727 rc = -EINVAL;
1728 goto free_priv;
1729 }
1730
Dave Watsonc46234e2018-03-22 10:10:35 -07001731 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1732 cctx->tag_size = tag_size;
1733 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1734 cctx->iv_size = iv_size;
1735 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1736 GFP_KERNEL);
1737 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001738 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001739 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001740 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001741 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1742 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1743 cctx->rec_seq_size = rec_seq_size;
zhong jiang969d5092018-08-01 00:50:24 +08001744 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07001745 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001746 rc = -ENOMEM;
1747 goto free_iv;
1748 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001749
Dave Watsonc46234e2018-03-22 10:10:35 -07001750 if (!*aead) {
1751 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1752 if (IS_ERR(*aead)) {
1753 rc = PTR_ERR(*aead);
1754 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001755 goto free_rec_seq;
1756 }
1757 }
1758
1759 ctx->push_pending_record = tls_sw_push_pending_record;
1760
Sabrina Dubroca7cba09c2018-09-12 17:44:41 +02001761 rc = crypto_aead_setkey(*aead, gcm_128_info->key,
Dave Watson3c4d7552017-06-14 11:37:39 -07001762 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1763 if (rc)
1764 goto free_aead;
1765
Dave Watsonc46234e2018-03-22 10:10:35 -07001766 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
1767 if (rc)
1768 goto free_aead;
1769
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001770 if (sw_ctx_rx) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001771 /* Set up strparser */
1772 memset(&cb, 0, sizeof(cb));
1773 cb.rcv_msg = tls_queue;
1774 cb.parse_msg = tls_read_size;
1775
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001776 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001777
1778 write_lock_bh(&sk->sk_callback_lock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001779 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
Dave Watsonc46234e2018-03-22 10:10:35 -07001780 sk->sk_data_ready = tls_data_ready;
1781 write_unlock_bh(&sk->sk_callback_lock);
1782
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001783 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
Dave Watsonc46234e2018-03-22 10:10:35 -07001784
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001785 strp_check_rcv(&sw_ctx_rx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001786 }
1787
1788 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07001789
1790free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07001791 crypto_free_aead(*aead);
1792 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001793free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07001794 kfree(cctx->rec_seq);
1795 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001796free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001797 kfree(cctx->iv);
1798 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001799free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001800 if (tx) {
1801 kfree(ctx->priv_ctx_tx);
1802 ctx->priv_ctx_tx = NULL;
1803 } else {
1804 kfree(ctx->priv_ctx_rx);
1805 ctx->priv_ctx_rx = NULL;
1806 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001807out:
1808 return rc;
1809}