blob: 332a5d1459b64ec5bc401fb3d4b30c9cc0629a3d [file] [log] [blame]
Ilya Lesokhine8f69792018-04-30 10:16:16 +03001/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2 *
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
8 *
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
11 * conditions are met:
12 *
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer.
16 *
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31
32#include <crypto/aead.h>
33#include <linux/highmem.h>
34#include <linux/module.h>
35#include <linux/netdevice.h>
36#include <net/dst.h>
37#include <net/inet_connection_sock.h>
38#include <net/tcp.h>
39#include <net/tls.h>
40
41/* device_offload_lock is used to synchronize tls_dev_add
42 * against NETDEV_DOWN notifications.
43 */
44static DECLARE_RWSEM(device_offload_lock);
45
46static void tls_device_gc_task(struct work_struct *work);
47
48static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
49static LIST_HEAD(tls_device_gc_list);
50static LIST_HEAD(tls_device_list);
51static DEFINE_SPINLOCK(tls_device_lock);
52
53static void tls_device_free_ctx(struct tls_context *ctx)
54{
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +030055 kfree(tls_offload_ctx_tx(ctx));
Ilya Lesokhine8f69792018-04-30 10:16:16 +030056
Ilya Lesokhine8f69792018-04-30 10:16:16 +030057 kfree(ctx);
58}
59
60static void tls_device_gc_task(struct work_struct *work)
61{
62 struct tls_context *ctx, *tmp;
63 unsigned long flags;
64 LIST_HEAD(gc_list);
65
66 spin_lock_irqsave(&tls_device_lock, flags);
67 list_splice_init(&tls_device_gc_list, &gc_list);
68 spin_unlock_irqrestore(&tls_device_lock, flags);
69
70 list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
71 struct net_device *netdev = ctx->netdev;
72
73 if (netdev) {
74 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
75 TLS_OFFLOAD_CTX_DIR_TX);
76 dev_put(netdev);
77 }
78
79 list_del(&ctx->list);
80 tls_device_free_ctx(ctx);
81 }
82}
83
84static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
85{
86 unsigned long flags;
87
88 spin_lock_irqsave(&tls_device_lock, flags);
89 list_move_tail(&ctx->list, &tls_device_gc_list);
90
91 /* schedule_work inside the spinlock
92 * to make sure tls_device_down waits for that work.
93 */
94 schedule_work(&tls_device_gc_work);
95
96 spin_unlock_irqrestore(&tls_device_lock, flags);
97}
98
99/* We assume that the socket is already connected */
100static struct net_device *get_netdev_for_sock(struct sock *sk)
101{
102 struct dst_entry *dst = sk_dst_get(sk);
103 struct net_device *netdev = NULL;
104
105 if (likely(dst)) {
106 netdev = dst->dev;
107 dev_hold(netdev);
108 }
109
110 dst_release(dst);
111
112 return netdev;
113}
114
115static void destroy_record(struct tls_record_info *record)
116{
117 int nr_frags = record->num_frags;
118 skb_frag_t *frag;
119
120 while (nr_frags-- > 0) {
121 frag = &record->frags[nr_frags];
122 __skb_frag_unref(frag);
123 }
124 kfree(record);
125}
126
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300127static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300128{
129 struct tls_record_info *info, *temp;
130
131 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
132 list_del(&info->list);
133 destroy_record(info);
134 }
135
136 offload_ctx->retransmit_hint = NULL;
137}
138
139static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
140{
141 struct tls_context *tls_ctx = tls_get_ctx(sk);
142 struct tls_record_info *info, *temp;
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300143 struct tls_offload_context_tx *ctx;
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300144 u64 deleted_records = 0;
145 unsigned long flags;
146
147 if (!tls_ctx)
148 return;
149
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300150 ctx = tls_offload_ctx_tx(tls_ctx);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300151
152 spin_lock_irqsave(&ctx->lock, flags);
153 info = ctx->retransmit_hint;
154 if (info && !before(acked_seq, info->end_seq)) {
155 ctx->retransmit_hint = NULL;
156 list_del(&info->list);
157 destroy_record(info);
158 deleted_records++;
159 }
160
161 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
162 if (before(acked_seq, info->end_seq))
163 break;
164 list_del(&info->list);
165
166 destroy_record(info);
167 deleted_records++;
168 }
169
170 ctx->unacked_record_sn += deleted_records;
171 spin_unlock_irqrestore(&ctx->lock, flags);
172}
173
174/* At this point, there should be no references on this
175 * socket and no in-flight SKBs associated with this
176 * socket, so it is safe to free all the resources.
177 */
178void tls_device_sk_destruct(struct sock *sk)
179{
180 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300181 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300182
183 if (ctx->open_record)
184 destroy_record(ctx->open_record);
185
186 delete_all_records(ctx);
187 crypto_free_aead(ctx->aead_send);
188 ctx->sk_destruct(sk);
189 clean_acked_data_disable(inet_csk(sk));
190
191 if (refcount_dec_and_test(&tls_ctx->refcount))
192 tls_device_queue_ctx_destruction(tls_ctx);
193}
194EXPORT_SYMBOL(tls_device_sk_destruct);
195
196static void tls_append_frag(struct tls_record_info *record,
197 struct page_frag *pfrag,
198 int size)
199{
200 skb_frag_t *frag;
201
202 frag = &record->frags[record->num_frags - 1];
203 if (frag->page.p == pfrag->page &&
204 frag->page_offset + frag->size == pfrag->offset) {
205 frag->size += size;
206 } else {
207 ++frag;
208 frag->page.p = pfrag->page;
209 frag->page_offset = pfrag->offset;
210 frag->size = size;
211 ++record->num_frags;
212 get_page(pfrag->page);
213 }
214
215 pfrag->offset += size;
216 record->len += size;
217}
218
219static int tls_push_record(struct sock *sk,
220 struct tls_context *ctx,
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300221 struct tls_offload_context_tx *offload_ctx,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300222 struct tls_record_info *record,
223 struct page_frag *pfrag,
224 int flags,
225 unsigned char record_type)
226{
227 struct tcp_sock *tp = tcp_sk(sk);
228 struct page_frag dummy_tag_frag;
229 skb_frag_t *frag;
230 int i;
231
232 /* fill prepend */
233 frag = &record->frags[0];
234 tls_fill_prepend(ctx,
235 skb_frag_address(frag),
236 record->len - ctx->tx.prepend_size,
237 record_type);
238
239 /* HW doesn't care about the data in the tag, because it fills it. */
240 dummy_tag_frag.page = skb_frag_page(frag);
241 dummy_tag_frag.offset = 0;
242
243 tls_append_frag(record, &dummy_tag_frag, ctx->tx.tag_size);
244 record->end_seq = tp->write_seq + record->len;
245 spin_lock_irq(&offload_ctx->lock);
246 list_add_tail(&record->list, &offload_ctx->records_list);
247 spin_unlock_irq(&offload_ctx->lock);
248 offload_ctx->open_record = NULL;
249 set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
250 tls_advance_record_sn(sk, &ctx->tx);
251
252 for (i = 0; i < record->num_frags; i++) {
253 frag = &record->frags[i];
254 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
255 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
256 frag->size, frag->page_offset);
257 sk_mem_charge(sk, frag->size);
258 get_page(skb_frag_page(frag));
259 }
260 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
261
262 /* all ready, send */
263 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
264}
265
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300266static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300267 struct page_frag *pfrag,
268 size_t prepend_size)
269{
270 struct tls_record_info *record;
271 skb_frag_t *frag;
272
273 record = kmalloc(sizeof(*record), GFP_KERNEL);
274 if (!record)
275 return -ENOMEM;
276
277 frag = &record->frags[0];
278 __skb_frag_set_page(frag, pfrag->page);
279 frag->page_offset = pfrag->offset;
280 skb_frag_size_set(frag, prepend_size);
281
282 get_page(pfrag->page);
283 pfrag->offset += prepend_size;
284
285 record->num_frags = 1;
286 record->len = prepend_size;
287 offload_ctx->open_record = record;
288 return 0;
289}
290
291static int tls_do_allocation(struct sock *sk,
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300292 struct tls_offload_context_tx *offload_ctx,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300293 struct page_frag *pfrag,
294 size_t prepend_size)
295{
296 int ret;
297
298 if (!offload_ctx->open_record) {
299 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
300 sk->sk_allocation))) {
301 sk->sk_prot->enter_memory_pressure(sk);
302 sk_stream_moderate_sndbuf(sk);
303 return -ENOMEM;
304 }
305
306 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
307 if (ret)
308 return ret;
309
310 if (pfrag->size > pfrag->offset)
311 return 0;
312 }
313
314 if (!sk_page_frag_refill(sk, pfrag))
315 return -ENOMEM;
316
317 return 0;
318}
319
320static int tls_push_data(struct sock *sk,
321 struct iov_iter *msg_iter,
322 size_t size, int flags,
323 unsigned char record_type)
324{
325 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300326 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300327 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
328 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
329 struct tls_record_info *record = ctx->open_record;
330 struct page_frag *pfrag;
331 size_t orig_size = size;
332 u32 max_open_record_len;
333 int copy, rc = 0;
334 bool done = false;
335 long timeo;
336
337 if (flags &
338 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
339 return -ENOTSUPP;
340
341 if (sk->sk_err)
342 return -sk->sk_err;
343
344 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
345 rc = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
346 if (rc < 0)
347 return rc;
348
349 pfrag = sk_page_frag(sk);
350
351 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
352 * we need to leave room for an authentication tag.
353 */
354 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
355 tls_ctx->tx.prepend_size;
356 do {
357 rc = tls_do_allocation(sk, ctx, pfrag,
358 tls_ctx->tx.prepend_size);
359 if (rc) {
360 rc = sk_stream_wait_memory(sk, &timeo);
361 if (!rc)
362 continue;
363
364 record = ctx->open_record;
365 if (!record)
366 break;
367handle_error:
368 if (record_type != TLS_RECORD_TYPE_DATA) {
369 /* avoid sending partial
370 * record with type !=
371 * application_data
372 */
373 size = orig_size;
374 destroy_record(record);
375 ctx->open_record = NULL;
376 } else if (record->len > tls_ctx->tx.prepend_size) {
377 goto last_record;
378 }
379
380 break;
381 }
382
383 record = ctx->open_record;
384 copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
385 copy = min_t(size_t, copy, (max_open_record_len - record->len));
386
387 if (copy_from_iter_nocache(page_address(pfrag->page) +
388 pfrag->offset,
389 copy, msg_iter) != copy) {
390 rc = -EFAULT;
391 goto handle_error;
392 }
393 tls_append_frag(record, pfrag, copy);
394
395 size -= copy;
396 if (!size) {
397last_record:
398 tls_push_record_flags = flags;
399 if (more) {
400 tls_ctx->pending_open_record_frags =
401 record->num_frags;
402 break;
403 }
404
405 done = true;
406 }
407
408 if (done || record->len >= max_open_record_len ||
409 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
410 rc = tls_push_record(sk,
411 tls_ctx,
412 ctx,
413 record,
414 pfrag,
415 tls_push_record_flags,
416 record_type);
417 if (rc < 0)
418 break;
419 }
420 } while (!done);
421
422 if (orig_size - size > 0)
423 rc = orig_size - size;
424
425 return rc;
426}
427
428int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
429{
430 unsigned char record_type = TLS_RECORD_TYPE_DATA;
431 int rc;
432
433 lock_sock(sk);
434
435 if (unlikely(msg->msg_controllen)) {
436 rc = tls_proccess_cmsg(sk, msg, &record_type);
437 if (rc)
438 goto out;
439 }
440
441 rc = tls_push_data(sk, &msg->msg_iter, size,
442 msg->msg_flags, record_type);
443
444out:
445 release_sock(sk);
446 return rc;
447}
448
449int tls_device_sendpage(struct sock *sk, struct page *page,
450 int offset, size_t size, int flags)
451{
452 struct iov_iter msg_iter;
453 char *kaddr = kmap(page);
454 struct kvec iov;
455 int rc;
456
457 if (flags & MSG_SENDPAGE_NOTLAST)
458 flags |= MSG_MORE;
459
460 lock_sock(sk);
461
462 if (flags & MSG_OOB) {
463 rc = -ENOTSUPP;
464 goto out;
465 }
466
467 iov.iov_base = kaddr + offset;
468 iov.iov_len = size;
469 iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
470 rc = tls_push_data(sk, &msg_iter, size,
471 flags, TLS_RECORD_TYPE_DATA);
472 kunmap(page);
473
474out:
475 release_sock(sk);
476 return rc;
477}
478
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300479struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300480 u32 seq, u64 *p_record_sn)
481{
482 u64 record_sn = context->hint_record_sn;
483 struct tls_record_info *info;
484
485 info = context->retransmit_hint;
486 if (!info ||
487 before(seq, info->end_seq - info->len)) {
488 /* if retransmit_hint is irrelevant start
489 * from the beggining of the list
490 */
491 info = list_first_entry(&context->records_list,
492 struct tls_record_info, list);
493 record_sn = context->unacked_record_sn;
494 }
495
496 list_for_each_entry_from(info, &context->records_list, list) {
497 if (before(seq, info->end_seq)) {
498 if (!context->retransmit_hint ||
499 after(info->end_seq,
500 context->retransmit_hint->end_seq)) {
501 context->hint_record_sn = record_sn;
502 context->retransmit_hint = info;
503 }
504 *p_record_sn = record_sn;
505 return info;
506 }
507 record_sn++;
508 }
509
510 return NULL;
511}
512EXPORT_SYMBOL(tls_get_record);
513
514static int tls_device_push_pending_record(struct sock *sk, int flags)
515{
516 struct iov_iter msg_iter;
517
518 iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
519 return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
520}
521
522int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
523{
524 u16 nonce_size, tag_size, iv_size, rec_seq_size;
525 struct tls_record_info *start_marker_record;
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300526 struct tls_offload_context_tx *offload_ctx;
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300527 struct tls_crypto_info *crypto_info;
528 struct net_device *netdev;
529 char *iv, *rec_seq;
530 struct sk_buff *skb;
531 int rc = -EINVAL;
532 __be64 rcd_sn;
533
534 if (!ctx)
535 goto out;
536
537 if (ctx->priv_ctx_tx) {
538 rc = -EEXIST;
539 goto out;
540 }
541
542 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
543 if (!start_marker_record) {
544 rc = -ENOMEM;
545 goto out;
546 }
547
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300548 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300549 if (!offload_ctx) {
550 rc = -ENOMEM;
551 goto free_marker_record;
552 }
553
554 crypto_info = &ctx->crypto_send;
555 switch (crypto_info->cipher_type) {
556 case TLS_CIPHER_AES_GCM_128:
557 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
558 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
559 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
560 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
561 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
562 rec_seq =
563 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
564 break;
565 default:
566 rc = -EINVAL;
567 goto free_offload_ctx;
568 }
569
570 ctx->tx.prepend_size = TLS_HEADER_SIZE + nonce_size;
571 ctx->tx.tag_size = tag_size;
572 ctx->tx.overhead_size = ctx->tx.prepend_size + ctx->tx.tag_size;
573 ctx->tx.iv_size = iv_size;
574 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
575 GFP_KERNEL);
576 if (!ctx->tx.iv) {
577 rc = -ENOMEM;
578 goto free_offload_ctx;
579 }
580
581 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
582
583 ctx->tx.rec_seq_size = rec_seq_size;
584 ctx->tx.rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
585 if (!ctx->tx.rec_seq) {
586 rc = -ENOMEM;
587 goto free_iv;
588 }
589 memcpy(ctx->tx.rec_seq, rec_seq, rec_seq_size);
590
591 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
592 if (rc)
593 goto free_rec_seq;
594
595 /* start at rec_seq - 1 to account for the start marker record */
596 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
597 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
598
599 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
600 start_marker_record->len = 0;
601 start_marker_record->num_frags = 0;
602
603 INIT_LIST_HEAD(&offload_ctx->records_list);
604 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
605 spin_lock_init(&offload_ctx->lock);
Boris Pismenny895262d2018-05-10 16:27:25 +0300606 sg_init_table(offload_ctx->sg_tx_data,
607 ARRAY_SIZE(offload_ctx->sg_tx_data));
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300608
609 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
610 ctx->push_pending_record = tls_device_push_pending_record;
611 offload_ctx->sk_destruct = sk->sk_destruct;
612
613 /* TLS offload is greatly simplified if we don't send
614 * SKBs where only part of the payload needs to be encrypted.
615 * So mark the last skb in the write queue as end of record.
616 */
617 skb = tcp_write_queue_tail(sk);
618 if (skb)
619 TCP_SKB_CB(skb)->eor = 1;
620
621 refcount_set(&ctx->refcount, 1);
622
623 /* We support starting offload on multiple sockets
624 * concurrently, so we only need a read lock here.
625 * This lock must precede get_netdev_for_sock to prevent races between
626 * NETDEV_DOWN and setsockopt.
627 */
628 down_read(&device_offload_lock);
629 netdev = get_netdev_for_sock(sk);
630 if (!netdev) {
631 pr_err_ratelimited("%s: netdev not found\n", __func__);
632 rc = -EINVAL;
633 goto release_lock;
634 }
635
636 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
637 rc = -ENOTSUPP;
638 goto release_netdev;
639 }
640
641 /* Avoid offloading if the device is down
642 * We don't want to offload new flows after
643 * the NETDEV_DOWN event
644 */
645 if (!(netdev->flags & IFF_UP)) {
646 rc = -EINVAL;
647 goto release_netdev;
648 }
649
650 ctx->priv_ctx_tx = offload_ctx;
651 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
652 &ctx->crypto_send,
653 tcp_sk(sk)->write_seq);
654 if (rc)
655 goto release_netdev;
656
657 ctx->netdev = netdev;
658
659 spin_lock_irq(&tls_device_lock);
660 list_add_tail(&ctx->list, &tls_device_list);
661 spin_unlock_irq(&tls_device_lock);
662
663 sk->sk_validate_xmit_skb = tls_validate_xmit_skb;
664 /* following this assignment tls_is_sk_tx_device_offloaded
665 * will return true and the context might be accessed
666 * by the netdev's xmit function.
667 */
668 smp_store_release(&sk->sk_destruct,
669 &tls_device_sk_destruct);
670 up_read(&device_offload_lock);
671 goto out;
672
673release_netdev:
674 dev_put(netdev);
675release_lock:
676 up_read(&device_offload_lock);
677 clean_acked_data_disable(inet_csk(sk));
678 crypto_free_aead(offload_ctx->aead_send);
679free_rec_seq:
680 kfree(ctx->tx.rec_seq);
681free_iv:
682 kfree(ctx->tx.iv);
683free_offload_ctx:
684 kfree(offload_ctx);
685 ctx->priv_ctx_tx = NULL;
686free_marker_record:
687 kfree(start_marker_record);
688out:
689 return rc;
690}
691
692static int tls_device_down(struct net_device *netdev)
693{
694 struct tls_context *ctx, *tmp;
695 unsigned long flags;
696 LIST_HEAD(list);
697
698 /* Request a write lock to block new offload attempts */
699 down_write(&device_offload_lock);
700
701 spin_lock_irqsave(&tls_device_lock, flags);
702 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
703 if (ctx->netdev != netdev ||
704 !refcount_inc_not_zero(&ctx->refcount))
705 continue;
706
707 list_move(&ctx->list, &list);
708 }
709 spin_unlock_irqrestore(&tls_device_lock, flags);
710
711 list_for_each_entry_safe(ctx, tmp, &list, list) {
712 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
713 TLS_OFFLOAD_CTX_DIR_TX);
714 ctx->netdev = NULL;
715 dev_put(netdev);
716 list_del_init(&ctx->list);
717
718 if (refcount_dec_and_test(&ctx->refcount))
719 tls_device_free_ctx(ctx);
720 }
721
722 up_write(&device_offload_lock);
723
724 flush_work(&tls_device_gc_work);
725
726 return NOTIFY_DONE;
727}
728
729static int tls_dev_event(struct notifier_block *this, unsigned long event,
730 void *ptr)
731{
732 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
733
734 if (!(dev->features & NETIF_F_HW_TLS_TX))
735 return NOTIFY_DONE;
736
737 switch (event) {
738 case NETDEV_REGISTER:
739 case NETDEV_FEAT_CHANGE:
740 if (dev->tlsdev_ops &&
741 dev->tlsdev_ops->tls_dev_add &&
742 dev->tlsdev_ops->tls_dev_del)
743 return NOTIFY_DONE;
744 else
745 return NOTIFY_BAD;
746 case NETDEV_DOWN:
747 return tls_device_down(dev);
748 }
749 return NOTIFY_DONE;
750}
751
752static struct notifier_block tls_dev_notifier = {
753 .notifier_call = tls_dev_event,
754};
755
756void __init tls_device_init(void)
757{
758 register_netdevice_notifier(&tls_dev_notifier);
759}
760
761void __exit tls_device_cleanup(void)
762{
763 unregister_netdevice_notifier(&tls_dev_notifier);
764 flush_work(&tls_device_gc_work);
765}