blob: bcb24c498b8417638d35fc8b1e0e72cb5eb7ab26 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
Dave Watsonc46234e2018-03-22 10:10:35 -070037#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070038#include <linux/module.h>
39#include <crypto/aead.h>
40
Dave Watsonc46234e2018-03-22 10:10:35 -070041#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070042#include <net/tls.h>
43
Kees Cookb16520f2018-04-10 17:52:34 -070044#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
45
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070046static int __skb_nsg(struct sk_buff *skb, int offset, int len,
47 unsigned int recursion_level)
48{
49 int start = skb_headlen(skb);
50 int i, chunk = start - offset;
51 struct sk_buff *frag_iter;
52 int elt = 0;
53
54 if (unlikely(recursion_level >= 24))
55 return -EMSGSIZE;
56
57 if (chunk > 0) {
58 if (chunk > len)
59 chunk = len;
60 elt++;
61 len -= chunk;
62 if (len == 0)
63 return elt;
64 offset += chunk;
65 }
66
67 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
68 int end;
69
70 WARN_ON(start > offset + len);
71
72 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
73 chunk = end - offset;
74 if (chunk > 0) {
75 if (chunk > len)
76 chunk = len;
77 elt++;
78 len -= chunk;
79 if (len == 0)
80 return elt;
81 offset += chunk;
82 }
83 start = end;
84 }
85
86 if (unlikely(skb_has_frag_list(skb))) {
87 skb_walk_frags(skb, frag_iter) {
88 int end, ret;
89
90 WARN_ON(start > offset + len);
91
92 end = start + frag_iter->len;
93 chunk = end - offset;
94 if (chunk > 0) {
95 if (chunk > len)
96 chunk = len;
97 ret = __skb_nsg(frag_iter, offset - start, chunk,
98 recursion_level + 1);
99 if (unlikely(ret < 0))
100 return ret;
101 elt += ret;
102 len -= chunk;
103 if (len == 0)
104 return elt;
105 offset += chunk;
106 }
107 start = end;
108 }
109 }
110 BUG_ON(len);
111 return elt;
112}
113
114/* Return the number of scatterlist elements required to completely map the
115 * skb, or -EMSGSIZE if the recursion depth is exceeded.
116 */
117static int skb_nsg(struct sk_buff *skb, int offset, int len)
118{
119 return __skb_nsg(skb, offset, len, 0);
120}
121
Vakul Garg94524d82018-08-29 15:26:55 +0530122static void tls_decrypt_done(struct crypto_async_request *req, int err)
123{
124 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530125 struct scatterlist *sgout = aead_req->dst;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700126 struct tls_sw_context_rx *ctx;
127 struct tls_context *tls_ctx;
Vakul Garg94524d82018-08-29 15:26:55 +0530128 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700129 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530130 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700131 int pending;
132
133 skb = (struct sk_buff *)req->data;
134 tls_ctx = tls_get_ctx(skb->sk);
135 ctx = tls_sw_ctx_rx(tls_ctx);
136 pending = atomic_dec_return(&ctx->decrypt_pending);
Vakul Garg94524d82018-08-29 15:26:55 +0530137
138 /* Propagate if there was an err */
139 if (err) {
140 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700141 tls_err_abort(skb->sk, err);
Vakul Garg94524d82018-08-29 15:26:55 +0530142 }
143
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700144 /* After using skb->sk to propagate sk through crypto async callback
145 * we need to NULL it again.
146 */
147 skb->sk = NULL;
148
Vakul Garg94524d82018-08-29 15:26:55 +0530149 /* Release the skb, pages and memory allocated for crypto req */
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700150 kfree_skb(skb);
Vakul Garg94524d82018-08-29 15:26:55 +0530151
152 /* Skip the first S/G entry as it points to AAD */
153 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
154 if (!sg)
155 break;
156 put_page(sg_page(sg));
157 }
158
159 kfree(aead_req);
160
161 if (!pending && READ_ONCE(ctx->async_notify))
162 complete(&ctx->async_wait.completion);
163}
164
Dave Watsonc46234e2018-03-22 10:10:35 -0700165static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530166 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700167 struct scatterlist *sgin,
168 struct scatterlist *sgout,
169 char *iv_recv,
170 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530171 struct aead_request *aead_req,
172 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700173{
174 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300175 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700176 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700177
Vakul Garg0b243d02018-08-10 20:46:41 +0530178 aead_request_set_tfm(aead_req, ctx->aead_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700179 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
180 aead_request_set_crypt(aead_req, sgin, sgout,
181 data_len + tls_ctx->rx.tag_size,
182 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700183
Vakul Garg94524d82018-08-29 15:26:55 +0530184 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700185 /* Using skb->sk to push sk through to crypto async callback
186 * handler. This allows propagating errors up to the socket
187 * if needed. It _must_ be cleared in the async handler
188 * before kfree_skb is called. We _know_ skb->sk is NULL
189 * because it is a clone from strparser.
190 */
191 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530192 aead_request_set_callback(aead_req,
193 CRYPTO_TFM_REQ_MAY_BACKLOG,
194 tls_decrypt_done, skb);
195 atomic_inc(&ctx->decrypt_pending);
196 } else {
197 aead_request_set_callback(aead_req,
198 CRYPTO_TFM_REQ_MAY_BACKLOG,
199 crypto_req_done, &ctx->async_wait);
200 }
201
202 ret = crypto_aead_decrypt(aead_req);
203 if (ret == -EINPROGRESS) {
204 if (async)
205 return ret;
206
207 ret = crypto_wait_req(ret, &ctx->async_wait);
208 }
209
210 if (async)
211 atomic_dec(&ctx->decrypt_pending);
212
Dave Watsonc46234e2018-03-22 10:10:35 -0700213 return ret;
214}
215
Dave Watson3c4d7552017-06-14 11:37:39 -0700216static void trim_sg(struct sock *sk, struct scatterlist *sg,
217 int *sg_num_elem, unsigned int *sg_size, int target_size)
218{
219 int i = *sg_num_elem - 1;
220 int trim = *sg_size - target_size;
221
222 if (trim <= 0) {
223 WARN_ON(trim < 0);
224 return;
225 }
226
227 *sg_size = target_size;
228 while (trim >= sg[i].length) {
229 trim -= sg[i].length;
230 sk_mem_uncharge(sk, sg[i].length);
231 put_page(sg_page(&sg[i]));
232 i--;
233
234 if (i < 0)
235 goto out;
236 }
237
238 sg[i].length -= trim;
239 sk_mem_uncharge(sk, trim);
240
241out:
242 *sg_num_elem = i + 1;
243}
244
245static void trim_both_sgl(struct sock *sk, int target_size)
246{
247 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300248 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530249 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700250
Vakul Garga42055e2018-09-21 09:46:13 +0530251 trim_sg(sk, rec->sg_plaintext_data,
252 &rec->sg_plaintext_num_elem,
253 &rec->sg_plaintext_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700254 target_size);
255
256 if (target_size > 0)
Dave Watsondbe42552018-03-22 10:10:06 -0700257 target_size += tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700258
Vakul Garga42055e2018-09-21 09:46:13 +0530259 trim_sg(sk, rec->sg_encrypted_data,
260 &rec->sg_encrypted_num_elem,
261 &rec->sg_encrypted_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700262 target_size);
263}
264
Dave Watson3c4d7552017-06-14 11:37:39 -0700265static int alloc_encrypted_sg(struct sock *sk, int len)
266{
267 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300268 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530269 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700270 int rc = 0;
271
John Fastabend2c3682f2018-03-18 12:56:49 -0700272 rc = sk_alloc_sg(sk, len,
Vakul Garga42055e2018-09-21 09:46:13 +0530273 rec->sg_encrypted_data, 0,
274 &rec->sg_encrypted_num_elem,
275 &rec->sg_encrypted_size, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700276
Vakul Garg52ea9922018-09-06 21:41:40 +0530277 if (rc == -ENOSPC)
Vakul Garga42055e2018-09-21 09:46:13 +0530278 rec->sg_encrypted_num_elem = ARRAY_SIZE(rec->sg_encrypted_data);
Vakul Garg52ea9922018-09-06 21:41:40 +0530279
Dave Watson3c4d7552017-06-14 11:37:39 -0700280 return rc;
281}
282
283static int alloc_plaintext_sg(struct sock *sk, int len)
284{
285 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300286 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530287 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700288 int rc = 0;
289
Vakul Garga42055e2018-09-21 09:46:13 +0530290 rc = sk_alloc_sg(sk, len, rec->sg_plaintext_data, 0,
291 &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size,
John Fastabend2c3682f2018-03-18 12:56:49 -0700292 tls_ctx->pending_open_record_frags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700293
Vakul Garg52ea9922018-09-06 21:41:40 +0530294 if (rc == -ENOSPC)
Vakul Garga42055e2018-09-21 09:46:13 +0530295 rec->sg_plaintext_num_elem = ARRAY_SIZE(rec->sg_plaintext_data);
Vakul Garg52ea9922018-09-06 21:41:40 +0530296
Dave Watson3c4d7552017-06-14 11:37:39 -0700297 return rc;
298}
299
300static void free_sg(struct sock *sk, struct scatterlist *sg,
301 int *sg_num_elem, unsigned int *sg_size)
302{
303 int i, n = *sg_num_elem;
304
305 for (i = 0; i < n; ++i) {
306 sk_mem_uncharge(sk, sg[i].length);
307 put_page(sg_page(&sg[i]));
308 }
309 *sg_num_elem = 0;
310 *sg_size = 0;
311}
312
313static void tls_free_both_sg(struct sock *sk)
314{
315 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300316 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530317 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700318
Vakul Garga42055e2018-09-21 09:46:13 +0530319 /* Return if there is no open record */
320 if (!rec)
321 return;
Dave Watson3c4d7552017-06-14 11:37:39 -0700322
Vakul Garga42055e2018-09-21 09:46:13 +0530323 free_sg(sk, rec->sg_encrypted_data,
324 &rec->sg_encrypted_num_elem,
325 &rec->sg_encrypted_size);
326
327 free_sg(sk, rec->sg_plaintext_data,
328 &rec->sg_plaintext_num_elem,
329 &rec->sg_plaintext_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700330}
331
Vakul Garga42055e2018-09-21 09:46:13 +0530332static bool append_tx_ready_list(struct tls_context *tls_ctx,
333 struct tls_sw_context_tx *ctx,
334 struct tls_rec *enc_rec)
335{
336 u64 new_seq = be64_to_cpup((const __be64 *)&enc_rec->aad_space);
337 struct list_head *pos;
338
339 /* Need to insert encrypted record in tx_ready_list sorted
340 * as per sequence number. Traverse linked list from tail.
341 */
342 list_for_each_prev(pos, &ctx->tx_ready_list) {
343 struct tls_rec *rec = (struct tls_rec *)pos;
344 u64 seq = be64_to_cpup((const __be64 *)&rec->aad_space);
345
346 if (new_seq > seq)
347 break;
348 }
349
350 list_add((struct list_head *)&enc_rec->list, pos);
351
352 return is_tx_ready(tls_ctx, ctx);
353}
354
355int tls_tx_records(struct sock *sk, int flags)
356{
357 struct tls_context *tls_ctx = tls_get_ctx(sk);
358 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
359 struct tls_rec *rec, *tmp;
360 int tx_flags, rc = 0;
361
362 if (tls_is_partially_sent_record(tls_ctx)) {
363 rec = list_first_entry(&ctx->tx_ready_list,
364 struct tls_rec, list);
365
366 if (flags == -1)
367 tx_flags = rec->tx_flags;
368 else
369 tx_flags = flags;
370
371 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
372 if (rc)
373 goto tx_err;
374
375 /* Full record has been transmitted.
376 * Remove the head of tx_ready_list
377 */
378 tls_ctx->tx_seq_number++;
379 list_del(&rec->list);
380 kfree(rec);
381 }
382
383 /* Tx all ready records which have expected sequence number */
384 list_for_each_entry_safe(rec, tmp, &ctx->tx_ready_list, list) {
385 u64 seq = be64_to_cpup((const __be64 *)&rec->aad_space);
386
387 if (seq == tls_ctx->tx_seq_number) {
388 if (flags == -1)
389 tx_flags = rec->tx_flags;
390 else
391 tx_flags = flags;
392
393 rc = tls_push_sg(sk, tls_ctx,
394 &rec->sg_encrypted_data[0],
395 0, tx_flags);
396 if (rc)
397 goto tx_err;
398
399 tls_ctx->tx_seq_number++;
400 list_del(&rec->list);
401 kfree(rec);
402 } else {
403 break;
404 }
405 }
406
407tx_err:
408 if (rc < 0 && rc != -EAGAIN)
409 tls_err_abort(sk, EBADMSG);
410
411 return rc;
412}
413
414static void tls_encrypt_done(struct crypto_async_request *req, int err)
415{
416 struct aead_request *aead_req = (struct aead_request *)req;
417 struct sock *sk = req->data;
418 struct tls_context *tls_ctx = tls_get_ctx(sk);
419 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
420 struct tls_rec *rec;
421 bool ready = false;
422 int pending;
423
424 rec = container_of(aead_req, struct tls_rec, aead_req);
425
426 rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
427 rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
428
429 free_sg(sk, rec->sg_plaintext_data,
430 &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size);
431
432 /* Free the record if error is previously set on socket */
433 if (err || sk->sk_err) {
434 free_sg(sk, rec->sg_encrypted_data,
435 &rec->sg_encrypted_num_elem, &rec->sg_encrypted_size);
436
437 kfree(rec);
438 rec = NULL;
439
440 /* If err is already set on socket, return the same code */
441 if (sk->sk_err) {
442 ctx->async_wait.err = sk->sk_err;
443 } else {
444 ctx->async_wait.err = err;
445 tls_err_abort(sk, err);
446 }
447 }
448
449 /* Append the record in tx queue */
450 if (rec)
451 ready = append_tx_ready_list(tls_ctx, ctx, rec);
452
453 pending = atomic_dec_return(&ctx->encrypt_pending);
454
455 if (!pending && READ_ONCE(ctx->async_notify))
456 complete(&ctx->async_wait.completion);
457
458 if (!ready)
459 return;
460
461 /* Schedule the transmission */
462 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
463 schedule_delayed_work(&ctx->tx_work.work, 1);
464}
465
466static int tls_do_encryption(struct sock *sk,
467 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200468 struct tls_sw_context_tx *ctx,
469 struct aead_request *aead_req,
470 size_t data_len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700471{
Vakul Garga42055e2018-09-21 09:46:13 +0530472 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700473 int rc;
474
Vakul Garga42055e2018-09-21 09:46:13 +0530475 rec->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
476 rec->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700477
478 aead_request_set_tfm(aead_req, ctx->aead_send);
479 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
Vakul Garga42055e2018-09-21 09:46:13 +0530480 aead_request_set_crypt(aead_req, rec->sg_aead_in,
481 rec->sg_aead_out,
Dave Watsondbe42552018-03-22 10:10:06 -0700482 data_len, tls_ctx->tx.iv);
Vakul Garga54667f2018-01-31 21:34:37 +0530483
484 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530485 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530486
Vakul Garga42055e2018-09-21 09:46:13 +0530487 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700488
Vakul Garga42055e2018-09-21 09:46:13 +0530489 rc = crypto_aead_encrypt(aead_req);
490 if (!rc || rc != -EINPROGRESS) {
491 atomic_dec(&ctx->encrypt_pending);
492 rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
493 rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
494 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700495
Vakul Garga42055e2018-09-21 09:46:13 +0530496 /* Case of encryption failure */
497 if (rc && rc != -EINPROGRESS)
498 return rc;
499
500 /* Unhook the record from context if encryption is not failure */
501 ctx->open_rec = NULL;
502 tls_advance_record_sn(sk, &tls_ctx->tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700503 return rc;
504}
505
506static int tls_push_record(struct sock *sk, int flags,
507 unsigned char record_type)
508{
509 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300510 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530511 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200512 struct aead_request *req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700513 int rc;
514
Vakul Garga42055e2018-09-21 09:46:13 +0530515 if (!rec)
516 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200517
Vakul Garga42055e2018-09-21 09:46:13 +0530518 rec->tx_flags = flags;
519 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700520
Vakul Garga42055e2018-09-21 09:46:13 +0530521 sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem - 1);
522 sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem - 1);
523
524 tls_make_aad(rec->aad_space, rec->sg_plaintext_size,
Dave Watsondbe42552018-03-22 10:10:06 -0700525 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700526 record_type);
527
528 tls_fill_prepend(tls_ctx,
Vakul Garga42055e2018-09-21 09:46:13 +0530529 page_address(sg_page(&rec->sg_encrypted_data[0])) +
530 rec->sg_encrypted_data[0].offset,
531 rec->sg_plaintext_size, record_type);
Dave Watson3c4d7552017-06-14 11:37:39 -0700532
533 tls_ctx->pending_open_record_frags = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700534
Vakul Garga42055e2018-09-21 09:46:13 +0530535 rc = tls_do_encryption(sk, tls_ctx, ctx, req, rec->sg_plaintext_size);
536 if (rc == -EINPROGRESS)
537 return -EINPROGRESS;
538
539 free_sg(sk, rec->sg_plaintext_data, &rec->sg_plaintext_num_elem,
540 &rec->sg_plaintext_size);
541
Dave Watson3c4d7552017-06-14 11:37:39 -0700542 if (rc < 0) {
Vakul Garga42055e2018-09-21 09:46:13 +0530543 tls_err_abort(sk, EBADMSG);
544 return rc;
Dave Watson3c4d7552017-06-14 11:37:39 -0700545 }
546
Vakul Garga42055e2018-09-21 09:46:13 +0530547 /* Put the record in tx_ready_list and start tx if permitted.
548 * This happens only when encryption is not asynchronous.
549 */
550 if (append_tx_ready_list(tls_ctx, ctx, rec))
551 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700552
Vakul Garga42055e2018-09-21 09:46:13 +0530553 return 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700554}
555
556static int tls_sw_push_pending_record(struct sock *sk, int flags)
557{
558 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
559}
560
561static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
Dave Watson69ca9292018-03-22 10:09:53 -0700562 int length, int *pages_used,
563 unsigned int *size_used,
564 struct scatterlist *to, int to_max_pages,
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700565 bool charge)
Dave Watson3c4d7552017-06-14 11:37:39 -0700566{
Dave Watson3c4d7552017-06-14 11:37:39 -0700567 struct page *pages[MAX_SKB_FRAGS];
568
569 size_t offset;
570 ssize_t copied, use;
571 int i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700572 unsigned int size = *size_used;
573 int num_elem = *pages_used;
Dave Watson3c4d7552017-06-14 11:37:39 -0700574 int rc = 0;
575 int maxpages;
576
577 while (length > 0) {
578 i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700579 maxpages = to_max_pages - num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700580 if (maxpages == 0) {
581 rc = -EFAULT;
582 goto out;
583 }
584 copied = iov_iter_get_pages(from, pages,
585 length,
586 maxpages, &offset);
587 if (copied <= 0) {
588 rc = -EFAULT;
589 goto out;
590 }
591
592 iov_iter_advance(from, copied);
593
594 length -= copied;
595 size += copied;
596 while (copied) {
597 use = min_t(int, copied, PAGE_SIZE - offset);
598
Dave Watson69ca9292018-03-22 10:09:53 -0700599 sg_set_page(&to[num_elem],
Dave Watson3c4d7552017-06-14 11:37:39 -0700600 pages[i], use, offset);
Dave Watson69ca9292018-03-22 10:09:53 -0700601 sg_unmark_end(&to[num_elem]);
602 if (charge)
603 sk_mem_charge(sk, use);
Dave Watson3c4d7552017-06-14 11:37:39 -0700604
605 offset = 0;
606 copied -= use;
607
608 ++i;
609 ++num_elem;
610 }
611 }
612
Vakul Gargcfb40992018-08-02 20:43:10 +0530613 /* Mark the end in the last sg entry if newly added */
614 if (num_elem > *pages_used)
615 sg_mark_end(&to[num_elem - 1]);
Dave Watson3c4d7552017-06-14 11:37:39 -0700616out:
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700617 if (rc)
618 iov_iter_revert(from, size - *size_used);
Dave Watson69ca9292018-03-22 10:09:53 -0700619 *size_used = size;
620 *pages_used = num_elem;
621
Dave Watson3c4d7552017-06-14 11:37:39 -0700622 return rc;
623}
624
625static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
626 int bytes)
627{
628 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300629 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530630 struct tls_rec *rec = ctx->open_rec;
631 struct scatterlist *sg = rec->sg_plaintext_data;
Dave Watson3c4d7552017-06-14 11:37:39 -0700632 int copy, i, rc = 0;
633
634 for (i = tls_ctx->pending_open_record_frags;
Vakul Garga42055e2018-09-21 09:46:13 +0530635 i < rec->sg_plaintext_num_elem; ++i) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700636 copy = sg[i].length;
637 if (copy_from_iter(
638 page_address(sg_page(&sg[i])) + sg[i].offset,
639 copy, from) != copy) {
640 rc = -EFAULT;
641 goto out;
642 }
643 bytes -= copy;
644
645 ++tls_ctx->pending_open_record_frags;
646
647 if (!bytes)
648 break;
649 }
650
651out:
652 return rc;
653}
654
Vakul Garga42055e2018-09-21 09:46:13 +0530655struct tls_rec *get_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700656{
657 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300658 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530659 struct tls_rec *rec;
660 int mem_size;
661
662 /* Return if we already have an open record */
663 if (ctx->open_rec)
664 return ctx->open_rec;
665
666 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
667
668 rec = kzalloc(mem_size, sk->sk_allocation);
669 if (!rec)
670 return NULL;
671
672 sg_init_table(&rec->sg_plaintext_data[0],
673 ARRAY_SIZE(rec->sg_plaintext_data));
674 sg_init_table(&rec->sg_encrypted_data[0],
675 ARRAY_SIZE(rec->sg_encrypted_data));
676
677 sg_init_table(rec->sg_aead_in, 2);
678 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
679 sizeof(rec->aad_space));
680 sg_unmark_end(&rec->sg_aead_in[1]);
681 sg_chain(rec->sg_aead_in, 2, rec->sg_plaintext_data);
682
683 sg_init_table(rec->sg_aead_out, 2);
684 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
685 sizeof(rec->aad_space));
686 sg_unmark_end(&rec->sg_aead_out[1]);
687 sg_chain(rec->sg_aead_out, 2, rec->sg_encrypted_data);
688
689 ctx->open_rec = rec;
690
691 return rec;
692}
693
694int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
695{
Dave Watson3c4d7552017-06-14 11:37:39 -0700696 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530697 struct tls_context *tls_ctx = tls_get_ctx(sk);
698 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
699 struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
700 bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
701 unsigned char record_type = TLS_RECORD_TYPE_DATA;
702 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Dave Watson3c4d7552017-06-14 11:37:39 -0700703 bool eor = !(msg->msg_flags & MSG_MORE);
704 size_t try_to_copy, copied = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530705 struct tls_rec *rec;
706 int required_size;
707 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700708 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530709 int record_room;
710 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700711 int orig_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530712 int ret;
Dave Watson3c4d7552017-06-14 11:37:39 -0700713
714 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
715 return -ENOTSUPP;
716
717 lock_sock(sk);
718
Vakul Garga42055e2018-09-21 09:46:13 +0530719 /* Wait till there is any pending write on socket */
720 if (unlikely(sk->sk_write_pending)) {
721 ret = wait_on_pending_writer(sk, &timeo);
722 if (unlikely(ret))
723 goto send_end;
724 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700725
726 if (unlikely(msg->msg_controllen)) {
727 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530728 if (ret) {
729 if (ret == -EINPROGRESS)
730 num_async++;
731 else if (ret != -EAGAIN)
732 goto send_end;
733 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700734 }
735
736 while (msg_data_left(msg)) {
737 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100738 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700739 goto send_end;
740 }
741
Vakul Garga42055e2018-09-21 09:46:13 +0530742 rec = get_rec(sk);
743 if (!rec) {
744 ret = -ENOMEM;
745 goto send_end;
746 }
747
748 orig_size = rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700749 full_record = false;
750 try_to_copy = msg_data_left(msg);
Vakul Garga42055e2018-09-21 09:46:13 +0530751 record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700752 if (try_to_copy >= record_room) {
753 try_to_copy = record_room;
754 full_record = true;
755 }
756
Vakul Garga42055e2018-09-21 09:46:13 +0530757 required_size = rec->sg_plaintext_size + try_to_copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700758 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700759
760 if (!sk_stream_memory_free(sk))
761 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530762
Dave Watson3c4d7552017-06-14 11:37:39 -0700763alloc_encrypted:
764 ret = alloc_encrypted_sg(sk, required_size);
765 if (ret) {
766 if (ret != -ENOSPC)
767 goto wait_for_memory;
768
769 /* Adjust try_to_copy according to the amount that was
770 * actually allocated. The difference is due
771 * to max sg elements limit
772 */
Vakul Garga42055e2018-09-21 09:46:13 +0530773 try_to_copy -= required_size - rec->sg_encrypted_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700774 full_record = true;
775 }
Vakul Garga42055e2018-09-21 09:46:13 +0530776
777 if (!is_kvec && (full_record || eor) && !async_capable) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700778 ret = zerocopy_from_iter(sk, &msg->msg_iter,
Vakul Garga42055e2018-09-21 09:46:13 +0530779 try_to_copy, &rec->sg_plaintext_num_elem,
780 &rec->sg_plaintext_size,
781 rec->sg_plaintext_data,
782 ARRAY_SIZE(rec->sg_plaintext_data),
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700783 true);
Dave Watson3c4d7552017-06-14 11:37:39 -0700784 if (ret)
785 goto fallback_to_reg_send;
786
Vakul Garga42055e2018-09-21 09:46:13 +0530787 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -0700788 copied += try_to_copy;
789 ret = tls_push_record(sk, msg->msg_flags, record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530790 if (ret) {
791 if (ret == -EINPROGRESS)
792 num_async++;
793 else if (ret != -EAGAIN)
794 goto send_end;
795 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700796 continue;
Dave Watson3c4d7552017-06-14 11:37:39 -0700797
Dave Watson3c4d7552017-06-14 11:37:39 -0700798fallback_to_reg_send:
Vakul Garga42055e2018-09-21 09:46:13 +0530799 trim_sg(sk, rec->sg_plaintext_data,
800 &rec->sg_plaintext_num_elem,
801 &rec->sg_plaintext_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700802 orig_size);
803 }
804
Vakul Garga42055e2018-09-21 09:46:13 +0530805 required_size = rec->sg_plaintext_size + try_to_copy;
Dave Watson3c4d7552017-06-14 11:37:39 -0700806alloc_plaintext:
807 ret = alloc_plaintext_sg(sk, required_size);
808 if (ret) {
809 if (ret != -ENOSPC)
810 goto wait_for_memory;
811
812 /* Adjust try_to_copy according to the amount that was
813 * actually allocated. The difference is due
814 * to max sg elements limit
815 */
Vakul Garga42055e2018-09-21 09:46:13 +0530816 try_to_copy -= required_size - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700817 full_record = true;
818
Vakul Garga42055e2018-09-21 09:46:13 +0530819 trim_sg(sk, rec->sg_encrypted_data,
820 &rec->sg_encrypted_num_elem,
821 &rec->sg_encrypted_size,
822 rec->sg_plaintext_size +
Dave Watsondbe42552018-03-22 10:10:06 -0700823 tls_ctx->tx.overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700824 }
825
826 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
827 if (ret)
828 goto trim_sgl;
829
830 copied += try_to_copy;
831 if (full_record || eor) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700832 ret = tls_push_record(sk, msg->msg_flags, record_type);
833 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530834 if (ret == -EINPROGRESS)
835 num_async++;
836 else if (ret != -EAGAIN)
837 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700838 }
839 }
840
841 continue;
842
843wait_for_sndbuf:
844 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
845wait_for_memory:
846 ret = sk_stream_wait_memory(sk, &timeo);
847 if (ret) {
848trim_sgl:
849 trim_both_sgl(sk, orig_size);
850 goto send_end;
851 }
852
Vakul Garga42055e2018-09-21 09:46:13 +0530853 if (rec->sg_encrypted_size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700854 goto alloc_encrypted;
855
856 goto alloc_plaintext;
857 }
858
Vakul Garga42055e2018-09-21 09:46:13 +0530859 if (!num_async) {
860 goto send_end;
861 } else if (num_zc) {
862 /* Wait for pending encryptions to get completed */
863 smp_store_mb(ctx->async_notify, true);
864
865 if (atomic_read(&ctx->encrypt_pending))
866 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
867 else
868 reinit_completion(&ctx->async_wait.completion);
869
870 WRITE_ONCE(ctx->async_notify, false);
871
872 if (ctx->async_wait.err) {
873 ret = ctx->async_wait.err;
874 copied = 0;
875 }
876 }
877
878 /* Transmit if any encryptions have completed */
879 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
880 cancel_delayed_work(&ctx->tx_work.work);
881 tls_tx_records(sk, msg->msg_flags);
882 }
883
Dave Watson3c4d7552017-06-14 11:37:39 -0700884send_end:
885 ret = sk_stream_error(sk, msg->msg_flags, ret);
886
887 release_sock(sk);
888 return copied ? copied : ret;
889}
890
891int tls_sw_sendpage(struct sock *sk, struct page *page,
892 int offset, size_t size, int flags)
893{
Vakul Garga42055e2018-09-21 09:46:13 +0530894 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -0700895 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300896 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700897 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Vakul Garga42055e2018-09-21 09:46:13 +0530898 size_t orig_size = size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700899 struct scatterlist *sg;
Vakul Garga42055e2018-09-21 09:46:13 +0530900 struct tls_rec *rec;
901 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700902 bool full_record;
903 int record_room;
Vakul Garga42055e2018-09-21 09:46:13 +0530904 bool eor;
905 int ret;
Dave Watson3c4d7552017-06-14 11:37:39 -0700906
907 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
908 MSG_SENDPAGE_NOTLAST))
909 return -ENOTSUPP;
910
911 /* No MSG_EOR from splice, only look at MSG_MORE */
912 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
913
914 lock_sock(sk);
915
916 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
917
Vakul Garga42055e2018-09-21 09:46:13 +0530918 /* Wait till there is any pending write on socket */
919 if (unlikely(sk->sk_write_pending)) {
920 ret = wait_on_pending_writer(sk, &timeo);
921 if (unlikely(ret))
922 goto sendpage_end;
923 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700924
925 /* Call the sk_stream functions to manage the sndbuf mem. */
926 while (size > 0) {
927 size_t copy, required_size;
928
929 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100930 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700931 goto sendpage_end;
932 }
933
Vakul Garga42055e2018-09-21 09:46:13 +0530934 rec = get_rec(sk);
935 if (!rec) {
936 ret = -ENOMEM;
937 goto sendpage_end;
938 }
939
Dave Watson3c4d7552017-06-14 11:37:39 -0700940 full_record = false;
Vakul Garga42055e2018-09-21 09:46:13 +0530941 record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700942 copy = size;
943 if (copy >= record_room) {
944 copy = record_room;
945 full_record = true;
946 }
Vakul Garga42055e2018-09-21 09:46:13 +0530947 required_size = rec->sg_plaintext_size + copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700948 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700949
950 if (!sk_stream_memory_free(sk))
951 goto wait_for_sndbuf;
952alloc_payload:
953 ret = alloc_encrypted_sg(sk, required_size);
954 if (ret) {
955 if (ret != -ENOSPC)
956 goto wait_for_memory;
957
958 /* Adjust copy according to the amount that was
959 * actually allocated. The difference is due
960 * to max sg elements limit
961 */
Vakul Garga42055e2018-09-21 09:46:13 +0530962 copy -= required_size - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700963 full_record = true;
964 }
965
966 get_page(page);
Vakul Garga42055e2018-09-21 09:46:13 +0530967 sg = rec->sg_plaintext_data + rec->sg_plaintext_num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700968 sg_set_page(sg, page, copy, offset);
Dave Watson7a8c4dd2018-01-19 12:30:13 -0800969 sg_unmark_end(sg);
970
Vakul Garga42055e2018-09-21 09:46:13 +0530971 rec->sg_plaintext_num_elem++;
Dave Watson3c4d7552017-06-14 11:37:39 -0700972
973 sk_mem_charge(sk, copy);
974 offset += copy;
975 size -= copy;
Vakul Garga42055e2018-09-21 09:46:13 +0530976 rec->sg_plaintext_size += copy;
977 tls_ctx->pending_open_record_frags = rec->sg_plaintext_num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700978
979 if (full_record || eor ||
Vakul Garga42055e2018-09-21 09:46:13 +0530980 rec->sg_plaintext_num_elem ==
981 ARRAY_SIZE(rec->sg_plaintext_data)) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700982 ret = tls_push_record(sk, flags, record_type);
983 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530984 if (ret == -EINPROGRESS)
985 num_async++;
986 else if (ret != -EAGAIN)
987 goto sendpage_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700988 }
989 }
990 continue;
991wait_for_sndbuf:
992 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
993wait_for_memory:
994 ret = sk_stream_wait_memory(sk, &timeo);
995 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530996 trim_both_sgl(sk, rec->sg_plaintext_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700997 goto sendpage_end;
998 }
999
Dave Watson3c4d7552017-06-14 11:37:39 -07001000 goto alloc_payload;
1001 }
1002
Vakul Garga42055e2018-09-21 09:46:13 +05301003 if (num_async) {
1004 /* Transmit if any encryptions have completed */
1005 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1006 cancel_delayed_work(&ctx->tx_work.work);
1007 tls_tx_records(sk, flags);
1008 }
1009 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001010sendpage_end:
1011 if (orig_size > size)
1012 ret = orig_size - size;
1013 else
1014 ret = sk_stream_error(sk, flags, ret);
1015
1016 release_sock(sk);
1017 return ret;
1018}
1019
Dave Watsonc46234e2018-03-22 10:10:35 -07001020static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
1021 long timeo, int *err)
1022{
1023 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001024 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001025 struct sk_buff *skb;
1026 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1027
1028 while (!(skb = ctx->recv_pkt)) {
1029 if (sk->sk_err) {
1030 *err = sock_error(sk);
1031 return NULL;
1032 }
1033
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001034 if (sk->sk_shutdown & RCV_SHUTDOWN)
1035 return NULL;
1036
Dave Watsonc46234e2018-03-22 10:10:35 -07001037 if (sock_flag(sk, SOCK_DONE))
1038 return NULL;
1039
1040 if ((flags & MSG_DONTWAIT) || !timeo) {
1041 *err = -EAGAIN;
1042 return NULL;
1043 }
1044
1045 add_wait_queue(sk_sleep(sk), &wait);
1046 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1047 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
1048 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1049 remove_wait_queue(sk_sleep(sk), &wait);
1050
1051 /* Handle signals */
1052 if (signal_pending(current)) {
1053 *err = sock_intr_errno(timeo);
1054 return NULL;
1055 }
1056 }
1057
1058 return skb;
1059}
1060
Vakul Garg0b243d02018-08-10 20:46:41 +05301061/* This function decrypts the input skb into either out_iov or in out_sg
1062 * or in skb buffers itself. The input parameter 'zc' indicates if
1063 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1064 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1065 * NULL, then the decryption happens inside skb buffers itself, i.e.
1066 * zero-copy gets disabled and 'zc' is updated.
1067 */
1068
1069static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1070 struct iov_iter *out_iov,
1071 struct scatterlist *out_sg,
1072 int *chunk, bool *zc)
1073{
1074 struct tls_context *tls_ctx = tls_get_ctx(sk);
1075 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1076 struct strp_msg *rxm = strp_msg(skb);
1077 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1078 struct aead_request *aead_req;
1079 struct sk_buff *unused;
1080 u8 *aad, *iv, *mem = NULL;
1081 struct scatterlist *sgin = NULL;
1082 struct scatterlist *sgout = NULL;
1083 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
1084
1085 if (*zc && (out_iov || out_sg)) {
1086 if (out_iov)
1087 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1088 else
1089 n_sgout = sg_nents(out_sg);
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001090 n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
1091 rxm->full_len - tls_ctx->rx.prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301092 } else {
1093 n_sgout = 0;
1094 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001095 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301096 }
1097
Vakul Garg0b243d02018-08-10 20:46:41 +05301098 if (n_sgin < 1)
1099 return -EBADMSG;
1100
1101 /* Increment to accommodate AAD */
1102 n_sgin = n_sgin + 1;
1103
1104 nsg = n_sgin + n_sgout;
1105
1106 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1107 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1108 mem_size = mem_size + TLS_AAD_SPACE_SIZE;
1109 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1110
1111 /* Allocate a single block of memory which contains
1112 * aead_req || sgin[] || sgout[] || aad || iv.
1113 * This order achieves correct alignment for aead_req, sgin, sgout.
1114 */
1115 mem = kmalloc(mem_size, sk->sk_allocation);
1116 if (!mem)
1117 return -ENOMEM;
1118
1119 /* Segment the allocated memory */
1120 aead_req = (struct aead_request *)mem;
1121 sgin = (struct scatterlist *)(mem + aead_size);
1122 sgout = sgin + n_sgin;
1123 aad = (u8 *)(sgout + n_sgout);
1124 iv = aad + TLS_AAD_SPACE_SIZE;
1125
1126 /* Prepare IV */
1127 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1128 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1129 tls_ctx->rx.iv_size);
1130 if (err < 0) {
1131 kfree(mem);
1132 return err;
1133 }
1134 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1135
1136 /* Prepare AAD */
1137 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
1138 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
1139 ctx->control);
1140
1141 /* Prepare sgin */
1142 sg_init_table(sgin, n_sgin);
1143 sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
1144 err = skb_to_sgvec(skb, &sgin[1],
1145 rxm->offset + tls_ctx->rx.prepend_size,
1146 rxm->full_len - tls_ctx->rx.prepend_size);
1147 if (err < 0) {
1148 kfree(mem);
1149 return err;
1150 }
1151
1152 if (n_sgout) {
1153 if (out_iov) {
1154 sg_init_table(sgout, n_sgout);
1155 sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
1156
1157 *chunk = 0;
1158 err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
1159 chunk, &sgout[1],
1160 (n_sgout - 1), false);
1161 if (err < 0)
1162 goto fallback_to_reg_recv;
1163 } else if (out_sg) {
1164 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1165 } else {
1166 goto fallback_to_reg_recv;
1167 }
1168 } else {
1169fallback_to_reg_recv:
1170 sgout = sgin;
1171 pages = 0;
1172 *chunk = 0;
1173 *zc = false;
1174 }
1175
1176 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301177 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1178 data_len, aead_req, *zc);
1179 if (err == -EINPROGRESS)
1180 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301181
1182 /* Release the pages in case iov was mapped to pages */
1183 for (; pages > 0; pages--)
1184 put_page(sg_page(&sgout[pages]));
1185
1186 kfree(mem);
1187 return err;
1188}
1189
Boris Pismennydafb67f2018-07-13 14:33:40 +03001190static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg0b243d02018-08-10 20:46:41 +05301191 struct iov_iter *dest, int *chunk, bool *zc)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001192{
1193 struct tls_context *tls_ctx = tls_get_ctx(sk);
1194 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1195 struct strp_msg *rxm = strp_msg(skb);
1196 int err = 0;
1197
Boris Pismenny4799ac82018-07-13 14:33:43 +03001198#ifdef CONFIG_TLS_DEVICE
1199 err = tls_device_decrypted(sk, skb);
Boris Pismennydafb67f2018-07-13 14:33:40 +03001200 if (err < 0)
1201 return err;
Boris Pismenny4799ac82018-07-13 14:33:43 +03001202#endif
1203 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301204 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
Vakul Garg94524d82018-08-29 15:26:55 +05301205 if (err < 0) {
1206 if (err == -EINPROGRESS)
1207 tls_advance_record_sn(sk, &tls_ctx->rx);
1208
Boris Pismenny4799ac82018-07-13 14:33:43 +03001209 return err;
Vakul Garg94524d82018-08-29 15:26:55 +05301210 }
Boris Pismenny4799ac82018-07-13 14:33:43 +03001211 } else {
1212 *zc = false;
1213 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001214
1215 rxm->offset += tls_ctx->rx.prepend_size;
1216 rxm->full_len -= tls_ctx->rx.overhead_size;
1217 tls_advance_record_sn(sk, &tls_ctx->rx);
1218 ctx->decrypted = true;
1219 ctx->saved_data_ready(sk);
1220
1221 return err;
1222}
1223
1224int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1225 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001226{
Vakul Garg0b243d02018-08-10 20:46:41 +05301227 bool zc = true;
1228 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001229
Vakul Garg0b243d02018-08-10 20:46:41 +05301230 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -07001231}
1232
1233static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1234 unsigned int len)
1235{
1236 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001237 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001238
Vakul Garg94524d82018-08-29 15:26:55 +05301239 if (skb) {
1240 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001241
Vakul Garg94524d82018-08-29 15:26:55 +05301242 if (len < rxm->full_len) {
1243 rxm->offset += len;
1244 rxm->full_len -= len;
1245 return false;
1246 }
1247 kfree_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001248 }
1249
1250 /* Finished with message */
1251 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001252 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001253
1254 return true;
1255}
1256
1257int tls_sw_recvmsg(struct sock *sk,
1258 struct msghdr *msg,
1259 size_t len,
1260 int nonblock,
1261 int flags,
1262 int *addr_len)
1263{
1264 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001265 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001266 unsigned char control;
1267 struct strp_msg *rxm;
1268 struct sk_buff *skb;
1269 ssize_t copied = 0;
1270 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001271 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001272 long timeo;
Doron Roberts-Kedes0a26cf32018-07-25 14:48:21 -07001273 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Vakul Garg94524d82018-08-29 15:26:55 +05301274 int num_async = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001275
1276 flags |= nonblock;
1277
1278 if (unlikely(flags & MSG_ERRQUEUE))
1279 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1280
1281 lock_sock(sk);
1282
Daniel Borkmann06030db2018-06-15 03:07:46 +02001283 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
Dave Watsonc46234e2018-03-22 10:10:35 -07001284 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1285 do {
1286 bool zc = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301287 bool async = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001288 int chunk = 0;
1289
1290 skb = tls_wait_data(sk, flags, timeo, &err);
1291 if (!skb)
1292 goto recv_end;
1293
1294 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301295
Dave Watsonc46234e2018-03-22 10:10:35 -07001296 if (!cmsg) {
1297 int cerr;
1298
1299 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1300 sizeof(ctx->control), &ctx->control);
1301 cmsg = true;
1302 control = ctx->control;
1303 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1304 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1305 err = -EIO;
1306 goto recv_end;
1307 }
1308 }
1309 } else if (control != ctx->control) {
1310 goto recv_end;
1311 }
1312
1313 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301314 int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07001315
Vakul Garg0b243d02018-08-10 20:46:41 +05301316 if (!is_kvec && to_copy <= len &&
1317 likely(!(flags & MSG_PEEK)))
Dave Watsonc46234e2018-03-22 10:10:35 -07001318 zc = true;
Dave Watsonc46234e2018-03-22 10:10:35 -07001319
Vakul Garg0b243d02018-08-10 20:46:41 +05301320 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1321 &chunk, &zc);
Vakul Garg94524d82018-08-29 15:26:55 +05301322 if (err < 0 && err != -EINPROGRESS) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301323 tls_err_abort(sk, EBADMSG);
1324 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001325 }
Vakul Garg94524d82018-08-29 15:26:55 +05301326
1327 if (err == -EINPROGRESS) {
1328 async = true;
1329 num_async++;
1330 goto pick_next_record;
1331 }
1332
Dave Watsonc46234e2018-03-22 10:10:35 -07001333 ctx->decrypted = true;
1334 }
1335
1336 if (!zc) {
1337 chunk = min_t(unsigned int, rxm->full_len, len);
Vakul Garg94524d82018-08-29 15:26:55 +05301338
Dave Watsonc46234e2018-03-22 10:10:35 -07001339 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
1340 chunk);
1341 if (err < 0)
1342 goto recv_end;
1343 }
1344
Vakul Garg94524d82018-08-29 15:26:55 +05301345pick_next_record:
Dave Watsonc46234e2018-03-22 10:10:35 -07001346 copied += chunk;
1347 len -= chunk;
1348 if (likely(!(flags & MSG_PEEK))) {
1349 u8 control = ctx->control;
1350
Vakul Garg94524d82018-08-29 15:26:55 +05301351 /* For async, drop current skb reference */
1352 if (async)
1353 skb = NULL;
1354
Dave Watsonc46234e2018-03-22 10:10:35 -07001355 if (tls_sw_advance_skb(sk, skb, chunk)) {
1356 /* Return full control message to
1357 * userspace before trying to parse
1358 * another message type
1359 */
1360 msg->msg_flags |= MSG_EOR;
1361 if (control != TLS_RECORD_TYPE_DATA)
1362 goto recv_end;
Vakul Garg94524d82018-08-29 15:26:55 +05301363 } else {
1364 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001365 }
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001366 } else {
1367 /* MSG_PEEK right now cannot look beyond current skb
1368 * from strparser, meaning we cannot advance skb here
1369 * and thus unpause strparser since we'd loose original
1370 * one.
1371 */
1372 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001373 }
Vakul Garg94524d82018-08-29 15:26:55 +05301374
Daniel Borkmann06030db2018-06-15 03:07:46 +02001375 /* If we have a new message from strparser, continue now. */
1376 if (copied >= target && !ctx->recv_pkt)
1377 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001378 } while (len);
1379
1380recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301381 if (num_async) {
1382 /* Wait for all previously submitted records to be decrypted */
1383 smp_store_mb(ctx->async_notify, true);
1384 if (atomic_read(&ctx->decrypt_pending)) {
1385 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1386 if (err) {
1387 /* one of async decrypt failed */
1388 tls_err_abort(sk, err);
1389 copied = 0;
1390 }
1391 } else {
1392 reinit_completion(&ctx->async_wait.completion);
1393 }
1394 WRITE_ONCE(ctx->async_notify, false);
1395 }
1396
Dave Watsonc46234e2018-03-22 10:10:35 -07001397 release_sock(sk);
1398 return copied ? : err;
1399}
1400
1401ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1402 struct pipe_inode_info *pipe,
1403 size_t len, unsigned int flags)
1404{
1405 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001406 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001407 struct strp_msg *rxm = NULL;
1408 struct sock *sk = sock->sk;
1409 struct sk_buff *skb;
1410 ssize_t copied = 0;
1411 int err = 0;
1412 long timeo;
1413 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05301414 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001415
1416 lock_sock(sk);
1417
1418 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1419
1420 skb = tls_wait_data(sk, flags, timeo, &err);
1421 if (!skb)
1422 goto splice_read_end;
1423
1424 /* splice does not support reading control messages */
1425 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1426 err = -ENOTSUPP;
1427 goto splice_read_end;
1428 }
1429
1430 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301431 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -07001432
1433 if (err < 0) {
1434 tls_err_abort(sk, EBADMSG);
1435 goto splice_read_end;
1436 }
1437 ctx->decrypted = true;
1438 }
1439 rxm = strp_msg(skb);
1440
1441 chunk = min_t(unsigned int, rxm->full_len, len);
1442 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1443 if (copied < 0)
1444 goto splice_read_end;
1445
1446 if (likely(!(flags & MSG_PEEK)))
1447 tls_sw_advance_skb(sk, skb, copied);
1448
1449splice_read_end:
1450 release_sock(sk);
1451 return copied ? : err;
1452}
1453
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001454unsigned int tls_sw_poll(struct file *file, struct socket *sock,
1455 struct poll_table_struct *wait)
Dave Watsonc46234e2018-03-22 10:10:35 -07001456{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001457 unsigned int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001458 struct sock *sk = sock->sk;
1459 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001460 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001461
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001462 /* Grab POLLOUT and POLLHUP from the underlying socket */
1463 ret = ctx->sk_poll(file, sock, wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001464
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001465 /* Clear POLLIN bits, and set based on recv_pkt */
1466 ret &= ~(POLLIN | POLLRDNORM);
Dave Watsonc46234e2018-03-22 10:10:35 -07001467 if (ctx->recv_pkt)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001468 ret |= POLLIN | POLLRDNORM;
Dave Watsonc46234e2018-03-22 10:10:35 -07001469
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001470 return ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001471}
1472
1473static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1474{
1475 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001476 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cook3463e512018-06-25 16:55:05 -07001477 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07001478 struct strp_msg *rxm = strp_msg(skb);
1479 size_t cipher_overhead;
1480 size_t data_len = 0;
1481 int ret;
1482
1483 /* Verify that we have a full TLS header, or wait for more data */
1484 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
1485 return 0;
1486
Kees Cook3463e512018-06-25 16:55:05 -07001487 /* Sanity-check size of on-stack buffer. */
1488 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
1489 ret = -EINVAL;
1490 goto read_failure;
1491 }
1492
Dave Watsonc46234e2018-03-22 10:10:35 -07001493 /* Linearize header to local buffer */
1494 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
1495
1496 if (ret < 0)
1497 goto read_failure;
1498
1499 ctx->control = header[0];
1500
1501 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1502
1503 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
1504
1505 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
1506 ret = -EMSGSIZE;
1507 goto read_failure;
1508 }
1509 if (data_len < cipher_overhead) {
1510 ret = -EBADMSG;
1511 goto read_failure;
1512 }
1513
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001514 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
1515 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001516 ret = -EINVAL;
1517 goto read_failure;
1518 }
1519
Boris Pismenny4799ac82018-07-13 14:33:43 +03001520#ifdef CONFIG_TLS_DEVICE
1521 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1522 *(u64*)tls_ctx->rx.rec_seq);
1523#endif
Dave Watsonc46234e2018-03-22 10:10:35 -07001524 return data_len + TLS_HEADER_SIZE;
1525
1526read_failure:
1527 tls_err_abort(strp->sk, ret);
1528
1529 return ret;
1530}
1531
1532static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1533{
1534 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001535 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001536
1537 ctx->decrypted = false;
1538
1539 ctx->recv_pkt = skb;
1540 strp_pause(strp);
1541
Vakul Gargad13acc2018-07-30 16:08:33 +05301542 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001543}
1544
1545static void tls_data_ready(struct sock *sk)
1546{
1547 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001548 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001549
1550 strp_data_ready(&ctx->strp);
1551}
1552
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001553void tls_sw_free_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07001554{
1555 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001556 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05301557 struct tls_rec *rec, *tmp;
1558
1559 /* Wait for any pending async encryptions to complete */
1560 smp_store_mb(ctx->async_notify, true);
1561 if (atomic_read(&ctx->encrypt_pending))
1562 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1563
1564 cancel_delayed_work_sync(&ctx->tx_work.work);
1565
1566 /* Tx whatever records we can transmit and abandon the rest */
1567 tls_tx_records(sk, -1);
1568
1569 /* Free up un-sent records in tx_ready_list. First, free
1570 * the partially sent record if any at head of tx_list.
1571 */
1572 if (tls_ctx->partially_sent_record) {
1573 struct scatterlist *sg = tls_ctx->partially_sent_record;
1574
1575 while (1) {
1576 put_page(sg_page(sg));
1577 sk_mem_uncharge(sk, sg->length);
1578
1579 if (sg_is_last(sg))
1580 break;
1581 sg++;
1582 }
1583
1584 tls_ctx->partially_sent_record = NULL;
1585
1586 rec = list_first_entry(&ctx->tx_ready_list,
1587 struct tls_rec, list);
1588 list_del(&rec->list);
1589 kfree(rec);
1590 }
1591
1592 list_for_each_entry_safe(rec, tmp, &ctx->tx_ready_list, list) {
1593 free_sg(sk, rec->sg_encrypted_data,
1594 &rec->sg_encrypted_num_elem,
1595 &rec->sg_encrypted_size);
1596
1597 list_del(&rec->list);
1598 kfree(rec);
1599 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001600
Vakul Garg201876b2018-07-24 16:54:27 +05301601 crypto_free_aead(ctx->aead_send);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001602 tls_free_both_sg(sk);
1603
1604 kfree(ctx);
1605}
1606
Boris Pismenny39f56e12018-07-13 14:33:41 +03001607void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001608{
1609 struct tls_context *tls_ctx = tls_get_ctx(sk);
1610 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1611
Dave Watsonc46234e2018-03-22 10:10:35 -07001612 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05301613 kfree_skb(ctx->recv_pkt);
1614 ctx->recv_pkt = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001615 crypto_free_aead(ctx->aead_recv);
1616 strp_stop(&ctx->strp);
1617 write_lock_bh(&sk->sk_callback_lock);
1618 sk->sk_data_ready = ctx->saved_data_ready;
1619 write_unlock_bh(&sk->sk_callback_lock);
1620 release_sock(sk);
1621 strp_done(&ctx->strp);
1622 lock_sock(sk);
1623 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03001624}
1625
1626void tls_sw_free_resources_rx(struct sock *sk)
1627{
1628 struct tls_context *tls_ctx = tls_get_ctx(sk);
1629 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1630
1631 tls_sw_release_resources_rx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -07001632
Dave Watson3c4d7552017-06-14 11:37:39 -07001633 kfree(ctx);
1634}
1635
Vakul Garga42055e2018-09-21 09:46:13 +05301636/* The work handler to transmitt the encrypted records in tx_ready_list */
1637static void tx_work_handler(struct work_struct *work)
1638{
1639 struct delayed_work *delayed_work = to_delayed_work(work);
1640 struct tx_work *tx_work = container_of(delayed_work,
1641 struct tx_work, work);
1642 struct sock *sk = tx_work->sk;
1643 struct tls_context *tls_ctx = tls_get_ctx(sk);
1644 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1645
1646 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
1647 return;
1648
1649 lock_sock(sk);
1650 tls_tx_records(sk, -1);
1651 release_sock(sk);
1652}
1653
Dave Watsonc46234e2018-03-22 10:10:35 -07001654int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07001655{
Dave Watson3c4d7552017-06-14 11:37:39 -07001656 struct tls_crypto_info *crypto_info;
1657 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001658 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1659 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001660 struct cipher_context *cctx;
1661 struct crypto_aead **aead;
1662 struct strp_callbacks cb;
Dave Watson3c4d7552017-06-14 11:37:39 -07001663 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1664 char *iv, *rec_seq;
1665 int rc = 0;
1666
1667 if (!ctx) {
1668 rc = -EINVAL;
1669 goto out;
1670 }
1671
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001672 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001673 if (!ctx->priv_ctx_tx) {
1674 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1675 if (!sw_ctx_tx) {
1676 rc = -ENOMEM;
1677 goto out;
1678 }
1679 ctx->priv_ctx_tx = sw_ctx_tx;
1680 } else {
1681 sw_ctx_tx =
1682 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07001683 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001684 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001685 if (!ctx->priv_ctx_rx) {
1686 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1687 if (!sw_ctx_rx) {
1688 rc = -ENOMEM;
1689 goto out;
1690 }
1691 ctx->priv_ctx_rx = sw_ctx_rx;
1692 } else {
1693 sw_ctx_rx =
1694 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001695 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001696 }
1697
Dave Watsonc46234e2018-03-22 10:10:35 -07001698 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001699 crypto_init_wait(&sw_ctx_tx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001700 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001701 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001702 aead = &sw_ctx_tx->aead_send;
Vakul Garga42055e2018-09-21 09:46:13 +05301703 INIT_LIST_HEAD(&sw_ctx_tx->tx_ready_list);
1704 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
1705 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001706 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001707 crypto_init_wait(&sw_ctx_rx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001708 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001709 cctx = &ctx->rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001710 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07001711 }
1712
Dave Watson3c4d7552017-06-14 11:37:39 -07001713 switch (crypto_info->cipher_type) {
1714 case TLS_CIPHER_AES_GCM_128: {
1715 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1716 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1717 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1718 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1719 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1720 rec_seq =
1721 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1722 gcm_128_info =
1723 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1724 break;
1725 }
1726 default:
1727 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001728 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001729 }
1730
Kees Cookb16520f2018-04-10 17:52:34 -07001731 /* Sanity-check the IV size for stack allocations. */
Kees Cook3463e512018-06-25 16:55:05 -07001732 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07001733 rc = -EINVAL;
1734 goto free_priv;
1735 }
1736
Dave Watsonc46234e2018-03-22 10:10:35 -07001737 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1738 cctx->tag_size = tag_size;
1739 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1740 cctx->iv_size = iv_size;
1741 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1742 GFP_KERNEL);
1743 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001744 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001745 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001746 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001747 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1748 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1749 cctx->rec_seq_size = rec_seq_size;
zhong jiang969d5092018-08-01 00:50:24 +08001750 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07001751 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001752 rc = -ENOMEM;
1753 goto free_iv;
1754 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001755
Dave Watsonc46234e2018-03-22 10:10:35 -07001756 if (!*aead) {
1757 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1758 if (IS_ERR(*aead)) {
1759 rc = PTR_ERR(*aead);
1760 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001761 goto free_rec_seq;
1762 }
1763 }
1764
1765 ctx->push_pending_record = tls_sw_push_pending_record;
1766
Sabrina Dubroca7cba09c2018-09-12 17:44:41 +02001767 rc = crypto_aead_setkey(*aead, gcm_128_info->key,
Dave Watson3c4d7552017-06-14 11:37:39 -07001768 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1769 if (rc)
1770 goto free_aead;
1771
Dave Watsonc46234e2018-03-22 10:10:35 -07001772 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
1773 if (rc)
1774 goto free_aead;
1775
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001776 if (sw_ctx_rx) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001777 /* Set up strparser */
1778 memset(&cb, 0, sizeof(cb));
1779 cb.rcv_msg = tls_queue;
1780 cb.parse_msg = tls_read_size;
1781
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001782 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001783
1784 write_lock_bh(&sk->sk_callback_lock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001785 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
Dave Watsonc46234e2018-03-22 10:10:35 -07001786 sk->sk_data_ready = tls_data_ready;
1787 write_unlock_bh(&sk->sk_callback_lock);
1788
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001789 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
Dave Watsonc46234e2018-03-22 10:10:35 -07001790
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001791 strp_check_rcv(&sw_ctx_rx->strp);
Vakul Garga42055e2018-09-21 09:46:13 +05301792 } else {
1793 ctx->tx_seq_number = be64_to_cpup((const __be64 *)rec_seq);
Dave Watsonc46234e2018-03-22 10:10:35 -07001794 }
1795
1796 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07001797
1798free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07001799 crypto_free_aead(*aead);
1800 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001801free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07001802 kfree(cctx->rec_seq);
1803 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001804free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001805 kfree(cctx->iv);
1806 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001807free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001808 if (tx) {
1809 kfree(ctx->priv_ctx_tx);
1810 ctx->priv_ctx_tx = NULL;
1811 } else {
1812 kfree(ctx->priv_ctx_rx);
1813 ctx->priv_ctx_rx = NULL;
1814 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001815out:
1816 return rc;
1817}