Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 1 | /* SCTP kernel implementation |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * (C) Copyright IBM Corp. 2001, 2004 |
| 3 | * Copyright (c) 1999-2000 Cisco, Inc. |
| 4 | * Copyright (c) 1999-2001 Motorola, Inc. |
| 5 | * |
Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 6 | * This file is part of the SCTP kernel implementation |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * |
| 8 | * These functions handle output processing. |
| 9 | * |
Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 10 | * This SCTP implementation is free software; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * you can redistribute it and/or modify it under the terms of |
| 12 | * the GNU General Public License as published by |
| 13 | * the Free Software Foundation; either version 2, or (at your option) |
| 14 | * any later version. |
| 15 | * |
Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 16 | * This SCTP implementation is distributed in the hope that it |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied |
| 18 | * ************************ |
| 19 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
| 20 | * See the GNU General Public License for more details. |
| 21 | * |
| 22 | * You should have received a copy of the GNU General Public License |
Jeff Kirsher | 4b2f13a | 2013-12-06 06:28:48 -0800 | [diff] [blame] | 23 | * along with GNU CC; see the file COPYING. If not, see |
| 24 | * <http://www.gnu.org/licenses/>. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | * |
| 26 | * Please send any bug reports or fixes you make to the |
| 27 | * email address(es): |
Daniel Borkmann | 91705c6 | 2013-07-23 14:51:47 +0200 | [diff] [blame] | 28 | * lksctp developers <linux-sctp@vger.kernel.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | * Written or modified by: |
| 31 | * La Monte H.P. Yarroll <piggy@acm.org> |
| 32 | * Karl Knutson <karl@athena.chicago.il.us> |
| 33 | * Jon Grimm <jgrimm@austin.ibm.com> |
| 34 | * Sridhar Samudrala <sri@us.ibm.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | */ |
| 36 | |
Joe Perches | 145ce50 | 2010-08-24 13:21:08 +0000 | [diff] [blame] | 37 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <linux/types.h> |
| 40 | #include <linux/kernel.h> |
| 41 | #include <linux/wait.h> |
| 42 | #include <linux/time.h> |
| 43 | #include <linux/ip.h> |
| 44 | #include <linux/ipv6.h> |
| 45 | #include <linux/init.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 46 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #include <net/inet_ecn.h> |
Vlad Yasevich | 8d2f9e81 | 2009-03-21 13:41:09 -0700 | [diff] [blame] | 48 | #include <net/ip.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | #include <net/icmp.h> |
Pavel Emelyanov | 7c73a6f | 2008-07-16 20:20:11 -0700 | [diff] [blame] | 50 | #include <net/net_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #include <linux/socket.h> /* for sa_family_t */ |
| 53 | #include <net/sock.h> |
| 54 | |
| 55 | #include <net/sctp/sctp.h> |
| 56 | #include <net/sctp/sm.h> |
Vlad Yasevich | 9ad0977 | 2007-12-16 14:06:41 -0800 | [diff] [blame] | 57 | #include <net/sctp/checksum.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | |
| 59 | /* Forward declarations for private helpers. */ |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 60 | static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, |
| 61 | struct sctp_chunk *chunk); |
| 62 | static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, |
| 63 | struct sctp_chunk *chunk); |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 64 | static void sctp_packet_append_data(struct sctp_packet *packet, |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 65 | struct sctp_chunk *chunk); |
| 66 | static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, |
| 67 | struct sctp_chunk *chunk, |
| 68 | u16 chunk_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | |
Wei Yongjun | be29714 | 2009-09-04 14:34:06 +0800 | [diff] [blame] | 70 | static void sctp_packet_reset(struct sctp_packet *packet) |
| 71 | { |
| 72 | packet->size = packet->overhead; |
| 73 | packet->has_cookie_echo = 0; |
| 74 | packet->has_sack = 0; |
| 75 | packet->has_data = 0; |
| 76 | packet->has_auth = 0; |
| 77 | packet->ipfragok = 0; |
| 78 | packet->auth = NULL; |
| 79 | } |
| 80 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | /* Config a packet. |
| 82 | * This appears to be a followup set of initializations. |
| 83 | */ |
Marcelo Ricardo Leitner | 66b91d2 | 2016-12-28 09:26:34 -0200 | [diff] [blame] | 84 | void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, |
| 85 | int ecn_capable) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | { |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 87 | struct sctp_transport *tp = packet->transport; |
| 88 | struct sctp_association *asoc = tp->asoc; |
Xin Long | df2729c | 2017-04-01 17:15:59 +0800 | [diff] [blame] | 89 | struct sock *sk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | |
Daniel Borkmann | bb33381 | 2013-06-28 19:49:40 +0200 | [diff] [blame] | 91 | pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | packet->vtag = vtag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
Xin Long | df2729c | 2017-04-01 17:15:59 +0800 | [diff] [blame] | 94 | /* do the following jobs only once for a flush schedule */ |
| 95 | if (!sctp_packet_empty(packet)) |
| 96 | return; |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 97 | |
Xin Long | df2729c | 2017-04-01 17:15:59 +0800 | [diff] [blame] | 98 | /* set packet max_size with pathmtu */ |
| 99 | packet->max_size = tp->pathmtu; |
| 100 | if (!asoc) |
| 101 | return; |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 102 | |
Xin Long | df2729c | 2017-04-01 17:15:59 +0800 | [diff] [blame] | 103 | /* update dst or transport pathmtu if in need */ |
| 104 | sk = asoc->base.sk; |
| 105 | if (!sctp_transport_dst_check(tp)) { |
| 106 | sctp_transport_route(tp, NULL, sctp_sk(sk)); |
| 107 | if (asoc->param_flags & SPP_PMTUD_ENABLE) |
Xin Long | 3ebfdf0 | 2017-04-04 13:39:55 +0800 | [diff] [blame] | 108 | sctp_assoc_sync_pmtu(asoc); |
Xin Long | df2729c | 2017-04-01 17:15:59 +0800 | [diff] [blame] | 109 | } else if (!sctp_transport_pmtu_check(tp)) { |
| 110 | if (asoc->param_flags & SPP_PMTUD_ENABLE) |
Xin Long | 3ebfdf0 | 2017-04-04 13:39:55 +0800 | [diff] [blame] | 111 | sctp_assoc_sync_pmtu(asoc); |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 112 | } |
| 113 | |
Xin Long | df2729c | 2017-04-01 17:15:59 +0800 | [diff] [blame] | 114 | /* If there a is a prepend chunk stick it on the list before |
| 115 | * any other chunks get appended. |
| 116 | */ |
| 117 | if (ecn_capable) { |
| 118 | struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | if (chunk) |
| 121 | sctp_packet_append_chunk(packet, chunk); |
| 122 | } |
Xin Long | df2729c | 2017-04-01 17:15:59 +0800 | [diff] [blame] | 123 | |
| 124 | if (!tp->dst) |
| 125 | return; |
| 126 | |
| 127 | /* set packet max_size with gso_max_size if gso is enabled*/ |
| 128 | rcu_read_lock(); |
| 129 | if (__sk_dst_get(sk) != tp->dst) { |
| 130 | dst_hold(tp->dst); |
| 131 | sk_setup_caps(sk, tp->dst); |
| 132 | } |
| 133 | packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size |
| 134 | : asoc->pathmtu; |
| 135 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | /* Initialize the packet structure. */ |
Marcelo Ricardo Leitner | 66b91d2 | 2016-12-28 09:26:34 -0200 | [diff] [blame] | 139 | void sctp_packet_init(struct sctp_packet *packet, |
| 140 | struct sctp_transport *transport, |
| 141 | __u16 sport, __u16 dport) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | { |
| 143 | struct sctp_association *asoc = transport->asoc; |
| 144 | size_t overhead; |
| 145 | |
Daniel Borkmann | bb33381 | 2013-06-28 19:49:40 +0200 | [diff] [blame] | 146 | pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | |
| 148 | packet->transport = transport; |
| 149 | packet->source_port = sport; |
| 150 | packet->destination_port = dport; |
David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 151 | INIT_LIST_HEAD(&packet->chunk_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | if (asoc) { |
YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 153 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); |
| 154 | overhead = sp->pf->af->net_header_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | } else { |
| 156 | overhead = sizeof(struct ipv6hdr); |
| 157 | } |
| 158 | overhead += sizeof(struct sctphdr); |
| 159 | packet->overhead = overhead; |
Wei Yongjun | be29714 | 2009-09-04 14:34:06 +0800 | [diff] [blame] | 160 | sctp_packet_reset(packet); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | packet->vtag = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | /* Free a packet. */ |
| 165 | void sctp_packet_free(struct sctp_packet *packet) |
| 166 | { |
David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 167 | struct sctp_chunk *chunk, *tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
Daniel Borkmann | bb33381 | 2013-06-28 19:49:40 +0200 | [diff] [blame] | 169 | pr_debug("%s: packet:%p\n", __func__, packet); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | |
David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 171 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
| 172 | list_del_init(&chunk->list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | sctp_chunk_free(chunk); |
David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 174 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | /* This routine tries to append the chunk to the offered packet. If adding |
| 178 | * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk |
| 179 | * is not present in the packet, it transmits the input packet. |
| 180 | * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long |
| 181 | * as it can fit in the packet, but any more data that does not fit in this |
| 182 | * packet can be sent only after receiving the COOKIE_ACK. |
| 183 | */ |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 184 | enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet, |
| 185 | struct sctp_chunk *chunk, |
| 186 | int one_packet, gfp_t gfp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | { |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 188 | enum sctp_xmit retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | |
Alexey Dobriyan | 5b5e092 | 2017-02-27 14:30:02 -0800 | [diff] [blame] | 190 | pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__, |
Marcelo Ricardo Leitner | 942b323 | 2016-06-02 15:05:44 -0300 | [diff] [blame] | 191 | packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | |
| 193 | switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { |
| 194 | case SCTP_XMIT_PMTU_FULL: |
| 195 | if (!packet->has_cookie_echo) { |
Xin Long | 6451944 | 2016-09-14 02:04:21 +0800 | [diff] [blame] | 196 | int error = 0; |
| 197 | |
Marcelo Ricardo Leitner | cea8768 | 2016-03-10 18:33:07 -0300 | [diff] [blame] | 198 | error = sctp_packet_transmit(packet, gfp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | if (error < 0) |
| 200 | chunk->skb->sk->sk_err = -error; |
| 201 | |
| 202 | /* If we have an empty packet, then we can NOT ever |
| 203 | * return PMTU_FULL. |
| 204 | */ |
Vlad Yasevich | 2e3216c | 2008-06-19 16:08:18 -0700 | [diff] [blame] | 205 | if (!one_packet) |
| 206 | retval = sctp_packet_append_chunk(packet, |
| 207 | chunk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | } |
| 209 | break; |
| 210 | |
| 211 | case SCTP_XMIT_RWND_FULL: |
| 212 | case SCTP_XMIT_OK: |
David Laight | 526cbef | 2014-07-22 08:59:14 +0000 | [diff] [blame] | 213 | case SCTP_XMIT_DELAY: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | break; |
Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 215 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | |
| 217 | return retval; |
| 218 | } |
| 219 | |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 220 | /* Try to bundle an auth chunk into the packet. */ |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 221 | static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt, |
| 222 | struct sctp_chunk *chunk) |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 223 | { |
| 224 | struct sctp_association *asoc = pkt->transport->asoc; |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 225 | enum sctp_xmit retval = SCTP_XMIT_OK; |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 226 | struct sctp_chunk *auth; |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 227 | |
| 228 | /* if we don't have an association, we can't do authentication */ |
| 229 | if (!asoc) |
| 230 | return retval; |
| 231 | |
| 232 | /* See if this is an auth chunk we are bundling or if |
| 233 | * auth is already bundled. |
| 234 | */ |
Vlad Yasevich | 4007cc8 | 2009-09-04 18:21:00 -0400 | [diff] [blame] | 235 | if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 236 | return retval; |
| 237 | |
| 238 | /* if the peer did not request this chunk to be authenticated, |
| 239 | * don't do it |
| 240 | */ |
| 241 | if (!chunk->auth) |
| 242 | return retval; |
| 243 | |
| 244 | auth = sctp_make_auth(asoc); |
| 245 | if (!auth) |
| 246 | return retval; |
| 247 | |
Neil Horman | ed10627 | 2012-07-02 09:59:24 +0000 | [diff] [blame] | 248 | retval = __sctp_packet_append_chunk(pkt, auth); |
| 249 | |
| 250 | if (retval != SCTP_XMIT_OK) |
| 251 | sctp_chunk_free(auth); |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 252 | |
| 253 | return retval; |
| 254 | } |
| 255 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | /* Try to bundle a SACK with the packet. */ |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 257 | static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt, |
| 258 | struct sctp_chunk *chunk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | { |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 260 | enum sctp_xmit retval = SCTP_XMIT_OK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | |
| 262 | /* If sending DATA and haven't aleady bundled a SACK, try to |
| 263 | * bundle one in to the packet. |
| 264 | */ |
| 265 | if (sctp_chunk_is_data(chunk) && !pkt->has_sack && |
| 266 | !pkt->has_cookie_echo) { |
| 267 | struct sctp_association *asoc; |
Doug Graham | af87b82 | 2009-07-29 12:05:57 -0400 | [diff] [blame] | 268 | struct timer_list *timer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | asoc = pkt->transport->asoc; |
Doug Graham | af87b82 | 2009-07-29 12:05:57 -0400 | [diff] [blame] | 270 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | |
Doug Graham | af87b82 | 2009-07-29 12:05:57 -0400 | [diff] [blame] | 272 | /* If the SACK timer is running, we have a pending SACK */ |
| 273 | if (timer_pending(timer)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | struct sctp_chunk *sack; |
Neil Horman | 4244854 | 2012-06-30 03:04:26 +0000 | [diff] [blame] | 275 | |
| 276 | if (pkt->transport->sack_generation != |
| 277 | pkt->transport->asoc->peer.sack_generation) |
| 278 | return retval; |
| 279 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | asoc->a_rwnd = asoc->rwnd; |
| 281 | sack = sctp_make_sack(asoc); |
| 282 | if (sack) { |
Neil Horman | ed10627 | 2012-07-02 09:59:24 +0000 | [diff] [blame] | 283 | retval = __sctp_packet_append_chunk(pkt, sack); |
| 284 | if (retval != SCTP_XMIT_OK) { |
| 285 | sctp_chunk_free(sack); |
| 286 | goto out; |
| 287 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | asoc->peer.sack_needed = 0; |
Doug Graham | af87b82 | 2009-07-29 12:05:57 -0400 | [diff] [blame] | 289 | if (del_timer(timer)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | sctp_association_put(asoc); |
| 291 | } |
| 292 | } |
| 293 | } |
Neil Horman | ed10627 | 2012-07-02 09:59:24 +0000 | [diff] [blame] | 294 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | return retval; |
| 296 | } |
| 297 | |
Neil Horman | ed10627 | 2012-07-02 09:59:24 +0000 | [diff] [blame] | 298 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | /* Append a chunk to the offered packet reporting back any inability to do |
| 300 | * so. |
| 301 | */ |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 302 | static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, |
| 303 | struct sctp_chunk *chunk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | { |
Marcelo Ricardo Leitner | e2f036a | 2016-09-21 08:45:55 -0300 | [diff] [blame] | 305 | __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length)); |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 306 | enum sctp_xmit retval = SCTP_XMIT_OK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 308 | /* Check to see if this chunk will fit into the packet */ |
| 309 | retval = sctp_packet_will_fit(packet, chunk, chunk_len); |
| 310 | if (retval != SCTP_XMIT_OK) |
| 311 | goto finish; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 313 | /* We believe that this chunk is OK to add to the packet */ |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 314 | switch (chunk->chunk_hdr->type) { |
wangweidong | f7010e6 | 2013-12-23 12:16:52 +0800 | [diff] [blame] | 315 | case SCTP_CID_DATA: |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 316 | /* Account for the data being in the packet */ |
| 317 | sctp_packet_append_data(packet, chunk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | /* Disallow SACK bundling after DATA. */ |
| 319 | packet->has_sack = 1; |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 320 | /* Disallow AUTH bundling after DATA */ |
| 321 | packet->has_auth = 1; |
| 322 | /* Let it be knows that packet has DATA in it */ |
| 323 | packet->has_data = 1; |
Vlad Yasevich | 759af00 | 2009-01-22 14:53:01 -0800 | [diff] [blame] | 324 | /* timestamp the chunk for rtx purposes */ |
| 325 | chunk->sent_at = jiffies; |
Xin Long | a6c2f79 | 2016-07-09 19:47:43 +0800 | [diff] [blame] | 326 | /* Mainly used for prsctp RTX policy */ |
| 327 | chunk->sent_count++; |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 328 | break; |
wangweidong | f7010e6 | 2013-12-23 12:16:52 +0800 | [diff] [blame] | 329 | case SCTP_CID_COOKIE_ECHO: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | packet->has_cookie_echo = 1; |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 331 | break; |
| 332 | |
wangweidong | f7010e6 | 2013-12-23 12:16:52 +0800 | [diff] [blame] | 333 | case SCTP_CID_SACK: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | packet->has_sack = 1; |
Michele Baldessari | 196d675 | 2012-12-01 04:49:42 +0000 | [diff] [blame] | 335 | if (chunk->asoc) |
| 336 | chunk->asoc->stats.osacks++; |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 337 | break; |
| 338 | |
wangweidong | f7010e6 | 2013-12-23 12:16:52 +0800 | [diff] [blame] | 339 | case SCTP_CID_AUTH: |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 340 | packet->has_auth = 1; |
| 341 | packet->auth = chunk; |
| 342 | break; |
| 343 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | |
| 345 | /* It is OK to send this chunk. */ |
David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 346 | list_add_tail(&chunk->list, &packet->chunk_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | packet->size += chunk_len; |
| 348 | chunk->transport = packet->transport; |
| 349 | finish: |
| 350 | return retval; |
| 351 | } |
| 352 | |
Neil Horman | ed10627 | 2012-07-02 09:59:24 +0000 | [diff] [blame] | 353 | /* Append a chunk to the offered packet reporting back any inability to do |
| 354 | * so. |
| 355 | */ |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 356 | enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet, |
| 357 | struct sctp_chunk *chunk) |
Neil Horman | ed10627 | 2012-07-02 09:59:24 +0000 | [diff] [blame] | 358 | { |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 359 | enum sctp_xmit retval = SCTP_XMIT_OK; |
Neil Horman | ed10627 | 2012-07-02 09:59:24 +0000 | [diff] [blame] | 360 | |
Daniel Borkmann | bb33381 | 2013-06-28 19:49:40 +0200 | [diff] [blame] | 361 | pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); |
Neil Horman | ed10627 | 2012-07-02 09:59:24 +0000 | [diff] [blame] | 362 | |
| 363 | /* Data chunks are special. Before seeing what else we can |
| 364 | * bundle into this packet, check to see if we are allowed to |
| 365 | * send this DATA. |
| 366 | */ |
| 367 | if (sctp_chunk_is_data(chunk)) { |
| 368 | retval = sctp_packet_can_append_data(packet, chunk); |
| 369 | if (retval != SCTP_XMIT_OK) |
| 370 | goto finish; |
| 371 | } |
| 372 | |
| 373 | /* Try to bundle AUTH chunk */ |
| 374 | retval = sctp_packet_bundle_auth(packet, chunk); |
| 375 | if (retval != SCTP_XMIT_OK) |
| 376 | goto finish; |
| 377 | |
| 378 | /* Try to bundle SACK chunk */ |
| 379 | retval = sctp_packet_bundle_sack(packet, chunk); |
| 380 | if (retval != SCTP_XMIT_OK) |
| 381 | goto finish; |
| 382 | |
| 383 | retval = __sctp_packet_append_chunk(packet, chunk); |
| 384 | |
| 385 | finish: |
| 386 | return retval; |
| 387 | } |
| 388 | |
Thomas Graf | 4c3a5bd | 2012-09-03 04:27:42 +0000 | [diff] [blame] | 389 | static void sctp_packet_release_owner(struct sk_buff *skb) |
| 390 | { |
| 391 | sk_free(skb->sk); |
| 392 | } |
| 393 | |
| 394 | static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) |
| 395 | { |
| 396 | skb_orphan(skb); |
| 397 | skb->sk = sk; |
| 398 | skb->destructor = sctp_packet_release_owner; |
| 399 | |
| 400 | /* |
| 401 | * The data chunks have already been accounted for in sctp_sendmsg(), |
| 402 | * therefore only reserve a single byte to keep socket around until |
| 403 | * the packet has been transmitted. |
| 404 | */ |
Reshetova, Elena | 14afee4 | 2017-06-30 13:08:00 +0300 | [diff] [blame] | 405 | refcount_inc(&sk->sk_wmem_alloc); |
Thomas Graf | 4c3a5bd | 2012-09-03 04:27:42 +0000 | [diff] [blame] | 406 | } |
| 407 | |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 408 | static int sctp_packet_pack(struct sctp_packet *packet, |
| 409 | struct sk_buff *head, int gso, gfp_t gfp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | { |
| 411 | struct sctp_transport *tp = packet->transport; |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 412 | struct sctp_auth_chunk *auth = NULL; |
David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 413 | struct sctp_chunk *chunk, *tmp; |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 414 | int pkt_count = 0, pkt_size; |
| 415 | struct sock *sk = head->sk; |
| 416 | struct sk_buff *nskb; |
Xin Long | ecc515d | 2016-10-24 01:01:09 +0800 | [diff] [blame] | 417 | int auth_len = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 419 | if (gso) { |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 420 | skb_shinfo(head)->gso_type = sk->sk_gso_type; |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 421 | NAPI_GRO_CB(head)->last = head; |
| 422 | } else { |
| 423 | nskb = head; |
| 424 | pkt_size = packet->size; |
| 425 | goto merge; |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 426 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 428 | do { |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 429 | /* calculate the pkt_size and alloc nskb */ |
| 430 | pkt_size = packet->overhead; |
| 431 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, |
| 432 | list) { |
| 433 | int padded = SCTP_PAD4(chunk->skb->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 435 | if (chunk == packet->auth) |
| 436 | auth_len = padded; |
| 437 | else if (auth_len + padded + packet->overhead > |
| 438 | tp->pathmtu) |
| 439 | return 0; |
| 440 | else if (pkt_size + padded > tp->pathmtu) |
| 441 | break; |
| 442 | pkt_size += padded; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | } |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 444 | nskb = alloc_skb(pkt_size + MAX_HEADER, gfp); |
| 445 | if (!nskb) |
| 446 | return 0; |
| 447 | skb_reserve(nskb, packet->overhead + MAX_HEADER); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 449 | merge: |
| 450 | /* merge chunks into nskb and append nskb into head list */ |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 451 | pkt_size -= packet->overhead; |
| 452 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 453 | int padding; |
| 454 | |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 455 | list_del_init(&chunk->list); |
| 456 | if (sctp_chunk_is_data(chunk)) { |
Xin Long | cc6ac9b | 2016-10-08 11:36:05 +0800 | [diff] [blame] | 457 | if (!sctp_chunk_retransmitted(chunk) && |
| 458 | !tp->rto_pending) { |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 459 | chunk->rtt_in_progress = 1; |
| 460 | tp->rto_pending = 1; |
| 461 | } |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 462 | } |
| 463 | |
Marcelo Ricardo Leitner | e2f036a | 2016-09-21 08:45:55 -0300 | [diff] [blame] | 464 | padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len; |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 465 | if (padding) |
Johannes Berg | b080db5 | 2017-06-16 14:29:19 +0200 | [diff] [blame] | 466 | skb_put_zero(chunk->skb, padding); |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 467 | |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 468 | if (chunk == packet->auth) |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 469 | auth = (struct sctp_auth_chunk *) |
| 470 | skb_tail_pointer(nskb); |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 471 | |
Johannes Berg | 59ae1d1 | 2017-06-16 14:29:20 +0200 | [diff] [blame] | 472 | skb_put_data(nskb, chunk->skb->data, chunk->skb->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 474 | pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n", |
| 475 | chunk, |
| 476 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), |
| 477 | chunk->has_tsn ? "TSN" : "No TSN", |
| 478 | chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, |
| 479 | ntohs(chunk->chunk_hdr->length), chunk->skb->len, |
| 480 | chunk->rtt_in_progress); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | |
Marcelo Ricardo Leitner | e2f036a | 2016-09-21 08:45:55 -0300 | [diff] [blame] | 482 | pkt_size -= SCTP_PAD4(chunk->skb->len); |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 483 | |
Marcelo Ricardo Leitner | f1533cc | 2016-07-07 09:39:29 -0300 | [diff] [blame] | 484 | if (!sctp_chunk_is_data(chunk) && chunk != packet->auth) |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 485 | sctp_chunk_free(chunk); |
| 486 | |
| 487 | if (!pkt_size) |
| 488 | break; |
| 489 | } |
| 490 | |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 491 | if (auth) { |
| 492 | sctp_auth_calculate_hmac(tp->asoc, nskb, auth, gfp); |
| 493 | /* free auth if no more chunks, or add it back */ |
| 494 | if (list_empty(&packet->chunk_list)) |
| 495 | sctp_chunk_free(packet->auth); |
| 496 | else |
Xin Long | 1aa25ec | 2016-07-30 13:58:35 +0800 | [diff] [blame] | 497 | list_add(&packet->auth->list, |
| 498 | &packet->chunk_list); |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 499 | } |
| 500 | |
| 501 | if (gso) { |
| 502 | if (skb_gro_receive(&head, nskb)) { |
| 503 | kfree_skb(nskb); |
| 504 | return 0; |
Marcelo Ricardo Leitner | f1533cc | 2016-07-07 09:39:29 -0300 | [diff] [blame] | 505 | } |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 506 | if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >= |
| 507 | sk->sk_gso_max_segs)) |
| 508 | return 0; |
Marcelo Ricardo Leitner | f1533cc | 2016-07-07 09:39:29 -0300 | [diff] [blame] | 509 | } |
| 510 | |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 511 | pkt_count++; |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 512 | } while (!list_empty(&packet->chunk_list)); |
Vlad Yasevich | 4cd57c8 | 2007-09-16 19:32:45 -0700 | [diff] [blame] | 513 | |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 514 | if (gso) { |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 515 | memset(head->cb, 0, max(sizeof(struct inet_skb_parm), |
| 516 | sizeof(struct inet6_skb_parm))); |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 517 | skb_shinfo(head)->gso_segs = pkt_count; |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 518 | skb_shinfo(head)->gso_size = GSO_BY_FRAGS; |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 519 | rcu_read_lock(); |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 520 | if (skb_dst(head) != tp->dst) { |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 521 | dst_hold(tp->dst); |
| 522 | sk_setup_caps(sk, tp->dst); |
| 523 | } |
| 524 | rcu_read_unlock(); |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 525 | goto chksum; |
| 526 | } |
| 527 | |
| 528 | if (sctp_checksum_disable) |
| 529 | return 1; |
| 530 | |
| 531 | if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) || |
| 532 | dst_xfrm(skb_dst(head)) || packet->ipfragok) { |
| 533 | struct sctphdr *sh = |
| 534 | (struct sctphdr *)skb_transport_header(head); |
| 535 | |
| 536 | sh->checksum = sctp_compute_cksum(head, 0); |
| 537 | } else { |
| 538 | chksum: |
| 539 | head->ip_summed = CHECKSUM_PARTIAL; |
Davide Caratti | dba0030 | 2017-05-18 15:44:40 +0200 | [diff] [blame] | 540 | head->csum_not_inet = 1; |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 541 | head->csum_start = skb_transport_header(head) - head->head; |
| 542 | head->csum_offset = offsetof(struct sctphdr, checksum); |
| 543 | } |
| 544 | |
| 545 | return pkt_count; |
| 546 | } |
| 547 | |
| 548 | /* All packets are sent to the network through this function from |
| 549 | * sctp_outq_tail(). |
| 550 | * |
| 551 | * The return value is always 0 for now. |
| 552 | */ |
| 553 | int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) |
| 554 | { |
| 555 | struct sctp_transport *tp = packet->transport; |
| 556 | struct sctp_association *asoc = tp->asoc; |
| 557 | struct sctp_chunk *chunk, *tmp; |
| 558 | int pkt_count, gso = 0; |
| 559 | struct dst_entry *dst; |
| 560 | struct sk_buff *head; |
| 561 | struct sctphdr *sh; |
| 562 | struct sock *sk; |
| 563 | |
| 564 | pr_debug("%s: packet:%p\n", __func__, packet); |
| 565 | if (list_empty(&packet->chunk_list)) |
| 566 | return 0; |
| 567 | chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); |
| 568 | sk = chunk->skb->sk; |
| 569 | |
| 570 | /* check gso */ |
| 571 | if (packet->size > tp->pathmtu && !packet->ipfragok) { |
| 572 | if (!sk_can_gso(sk)) { |
| 573 | pr_err_once("Trying to GSO but underlying device doesn't support it."); |
| 574 | goto out; |
| 575 | } |
| 576 | gso = 1; |
| 577 | } |
| 578 | |
| 579 | /* alloc head skb */ |
| 580 | head = alloc_skb((gso ? packet->overhead : packet->size) + |
| 581 | MAX_HEADER, gfp); |
| 582 | if (!head) |
| 583 | goto out; |
| 584 | skb_reserve(head, packet->overhead + MAX_HEADER); |
| 585 | sctp_packet_set_owner_w(head, sk); |
| 586 | |
| 587 | /* set sctp header */ |
Johannes Berg | d58ff35 | 2017-06-16 14:29:23 +0200 | [diff] [blame] | 588 | sh = skb_push(head, sizeof(struct sctphdr)); |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 589 | skb_reset_transport_header(head); |
| 590 | sh->source = htons(packet->source_port); |
| 591 | sh->dest = htons(packet->destination_port); |
| 592 | sh->vtag = htonl(packet->vtag); |
| 593 | sh->checksum = 0; |
| 594 | |
Xin Long | df2729c | 2017-04-01 17:15:59 +0800 | [diff] [blame] | 595 | /* drop packet if no dst */ |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 596 | dst = dst_clone(tp->dst); |
| 597 | if (!dst) { |
| 598 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
| 599 | kfree_skb(head); |
| 600 | goto out; |
| 601 | } |
| 602 | skb_dst_set(head, dst); |
| 603 | |
| 604 | /* pack up chunks */ |
| 605 | pkt_count = sctp_packet_pack(packet, head, gso, gfp); |
| 606 | if (!pkt_count) { |
| 607 | kfree_skb(head); |
| 608 | goto out; |
| 609 | } |
| 610 | pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len); |
| 611 | |
| 612 | /* start autoclose timer */ |
| 613 | if (packet->has_data && sctp_state(asoc, ESTABLISHED) && |
| 614 | asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { |
| 615 | struct timer_list *timer = |
| 616 | &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; |
| 617 | unsigned long timeout = |
| 618 | asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; |
| 619 | |
| 620 | if (!mod_timer(timer, jiffies + timeout)) |
| 621 | sctp_association_hold(asoc); |
| 622 | } |
| 623 | |
| 624 | /* sctp xmit */ |
| 625 | tp->af_specific->ecn_capable(sk); |
| 626 | if (asoc) { |
| 627 | asoc->stats.opackets += pkt_count; |
| 628 | if (asoc->peer.last_sent_to != tp) |
| 629 | asoc->peer.last_sent_to = tp; |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 630 | } |
| 631 | head->ignore_df = packet->ipfragok; |
Xin Long | 486a43d | 2017-03-18 19:12:22 +0800 | [diff] [blame] | 632 | if (tp->dst_pending_confirm) |
Julian Anastasov | c86a773 | 2017-02-06 23:14:13 +0200 | [diff] [blame] | 633 | skb_set_dst_pending_confirm(head, 1); |
| 634 | /* neighbour should be confirmed on successful transmission or |
| 635 | * positive error |
| 636 | */ |
Xin Long | 486a43d | 2017-03-18 19:12:22 +0800 | [diff] [blame] | 637 | if (tp->af_specific->sctp_xmit(head, tp) >= 0 && |
| 638 | tp->dst_pending_confirm) |
Julian Anastasov | c86a773 | 2017-02-06 23:14:13 +0200 | [diff] [blame] | 639 | tp->dst_pending_confirm = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 641 | out: |
David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 642 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
| 643 | list_del_init(&chunk->list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | if (!sctp_chunk_is_data(chunk)) |
YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 645 | sctp_chunk_free(chunk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | } |
Xin Long | 41001fa | 2016-09-14 02:04:23 +0800 | [diff] [blame] | 647 | sctp_packet_reset(packet); |
Xin Long | e4ff952 | 2016-11-01 00:49:41 +0800 | [diff] [blame] | 648 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | } |
| 650 | |
| 651 | /******************************************************************** |
| 652 | * 2nd Level Abstractions |
| 653 | ********************************************************************/ |
| 654 | |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 655 | /* This private function check to see if a chunk can be added */ |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 656 | static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, |
| 657 | struct sctp_chunk *chunk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | { |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 659 | size_t datasize, rwnd, inflight, flight_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | struct sctp_transport *transport = packet->transport; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | struct sctp_association *asoc = transport->asoc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | struct sctp_outq *q = &asoc->outqueue; |
| 663 | |
| 664 | /* RFC 2960 6.1 Transmission of DATA Chunks |
| 665 | * |
| 666 | * A) At any given time, the data sender MUST NOT transmit new data to |
| 667 | * any destination transport address if its peer's rwnd indicates |
| 668 | * that the peer has no buffer space (i.e. rwnd is 0, see Section |
| 669 | * 6.2.1). However, regardless of the value of rwnd (including if it |
| 670 | * is 0), the data sender can always have one DATA chunk in flight to |
| 671 | * the receiver if allowed by cwnd (see rule B below). This rule |
| 672 | * allows the sender to probe for a change in rwnd that the sender |
| 673 | * missed due to the SACK having been lost in transit from the data |
| 674 | * receiver to the data sender. |
| 675 | */ |
| 676 | |
| 677 | rwnd = asoc->peer.rwnd; |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 678 | inflight = q->outstanding_bytes; |
| 679 | flight_size = transport->flight_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | |
| 681 | datasize = sctp_data_size(chunk); |
| 682 | |
David Laight | 723189f | 2014-07-22 08:59:08 +0000 | [diff] [blame] | 683 | if (datasize > rwnd && inflight > 0) |
| 684 | /* We have (at least) one data chunk in flight, |
| 685 | * so we can't fall back to rule 6.1 B). |
| 686 | */ |
| 687 | return SCTP_XMIT_RWND_FULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | /* RFC 2960 6.1 Transmission of DATA Chunks |
| 690 | * |
| 691 | * B) At any given time, the sender MUST NOT transmit new data |
| 692 | * to a given transport address if it has cwnd or more bytes |
| 693 | * of data outstanding to that transport address. |
| 694 | */ |
| 695 | /* RFC 7.2.4 & the Implementers Guide 2.8. |
| 696 | * |
| 697 | * 3) ... |
| 698 | * When a Fast Retransmit is being performed the sender SHOULD |
| 699 | * ignore the value of cwnd and SHOULD NOT delay retransmission. |
| 700 | */ |
David Laight | 723189f | 2014-07-22 08:59:08 +0000 | [diff] [blame] | 701 | if (chunk->fast_retransmit != SCTP_NEED_FRTX && |
| 702 | flight_size >= transport->cwnd) |
| 703 | return SCTP_XMIT_RWND_FULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | |
| 705 | /* Nagle's algorithm to solve small-packet problem: |
| 706 | * Inhibit the sending of new chunks when new outgoing data arrives |
| 707 | * if any previously transmitted data on the connection remains |
| 708 | * unacknowledged. |
| 709 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | |
Xin Long | 4ea0c32 | 2017-02-19 01:52:46 +0800 | [diff] [blame] | 711 | if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && |
Xin Long | f9ba350 | 2017-03-27 00:21:15 +0800 | [diff] [blame] | 712 | !asoc->force_delay) |
Xin Long | 4ea0c32 | 2017-02-19 01:52:46 +0800 | [diff] [blame] | 713 | /* Nothing unacked */ |
David Laight | 723189f | 2014-07-22 08:59:08 +0000 | [diff] [blame] | 714 | return SCTP_XMIT_OK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | |
David Laight | 723189f | 2014-07-22 08:59:08 +0000 | [diff] [blame] | 716 | if (!sctp_packet_empty(packet)) |
| 717 | /* Append to packet */ |
| 718 | return SCTP_XMIT_OK; |
| 719 | |
David Laight | 723189f | 2014-07-22 08:59:08 +0000 | [diff] [blame] | 720 | if (!sctp_state(asoc, ESTABLISHED)) |
| 721 | return SCTP_XMIT_OK; |
| 722 | |
| 723 | /* Check whether this chunk and all the rest of pending data will fit |
| 724 | * or delay in hopes of bundling a full sized packet. |
| 725 | */ |
Xin Long | 9f8d314 | 2017-06-30 11:52:20 +0800 | [diff] [blame] | 726 | if (chunk->skb->len + q->out_qlen > transport->pathmtu - |
| 727 | packet->overhead - sizeof(struct sctp_data_chunk) - 4) |
David Laight | 723189f | 2014-07-22 08:59:08 +0000 | [diff] [blame] | 728 | /* Enough data queued to fill a packet */ |
| 729 | return SCTP_XMIT_OK; |
| 730 | |
| 731 | /* Don't delay large message writes that may have been fragmented */ |
| 732 | if (!chunk->msg->can_delay) |
| 733 | return SCTP_XMIT_OK; |
| 734 | |
| 735 | /* Defer until all data acked or packet full */ |
David Laight | 526cbef | 2014-07-22 08:59:14 +0000 | [diff] [blame] | 736 | return SCTP_XMIT_DELAY; |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 737 | } |
| 738 | |
| 739 | /* This private function does management things when adding DATA chunk */ |
| 740 | static void sctp_packet_append_data(struct sctp_packet *packet, |
| 741 | struct sctp_chunk *chunk) |
| 742 | { |
| 743 | struct sctp_transport *transport = packet->transport; |
| 744 | size_t datasize = sctp_data_size(chunk); |
| 745 | struct sctp_association *asoc = transport->asoc; |
| 746 | u32 rwnd = asoc->peer.rwnd; |
| 747 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 748 | /* Keep track of how many bytes are in flight over this transport. */ |
| 749 | transport->flight_size += datasize; |
| 750 | |
| 751 | /* Keep track of how many bytes are in flight to the receiver. */ |
| 752 | asoc->outqueue.outstanding_bytes += datasize; |
| 753 | |
Thomas Graf | a76c0ad | 2011-12-19 04:11:40 +0000 | [diff] [blame] | 754 | /* Update our view of the receiver's rwnd. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | if (datasize < rwnd) |
| 756 | rwnd -= datasize; |
| 757 | else |
| 758 | rwnd = 0; |
| 759 | |
| 760 | asoc->peer.rwnd = rwnd; |
Neil Horman | d8dd157 | 2009-11-23 15:54:00 -0500 | [diff] [blame] | 761 | sctp_chunk_assign_tsn(chunk); |
| 762 | sctp_chunk_assign_ssn(chunk); |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 763 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 765 | static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, |
| 766 | struct sctp_chunk *chunk, |
| 767 | u16 chunk_len) |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 768 | { |
Xin Long | 86b36f2 | 2017-08-05 19:59:57 +0800 | [diff] [blame] | 769 | enum sctp_xmit retval = SCTP_XMIT_OK; |
Marcelo Ricardo Leitner | 7303a14 | 2016-09-08 17:54:11 +0800 | [diff] [blame] | 770 | size_t psize, pmtu, maxsize; |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 771 | |
| 772 | psize = packet->size; |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 773 | if (packet->transport->asoc) |
| 774 | pmtu = packet->transport->asoc->pathmtu; |
| 775 | else |
| 776 | pmtu = packet->transport->pathmtu; |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 777 | |
| 778 | /* Decide if we need to fragment or resubmit later. */ |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 779 | if (psize + chunk_len > pmtu) { |
| 780 | /* It's OK to fragment at IP level if any one of the following |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 781 | * is true: |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 782 | * 1. The packet is empty (meaning this chunk is greater |
| 783 | * the MTU) |
| 784 | * 2. The packet doesn't have any data in it yet and data |
| 785 | * requires authentication. |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 786 | */ |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 787 | if (sctp_packet_empty(packet) || |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 788 | (!packet->has_data && chunk->auth)) { |
| 789 | /* We no longer do re-fragmentation. |
| 790 | * Just fragment at the IP layer, if we |
| 791 | * actually hit this condition |
| 792 | */ |
| 793 | packet->ipfragok = 1; |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 794 | goto out; |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 795 | } |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 796 | |
Marcelo Ricardo Leitner | 7303a14 | 2016-09-08 17:54:11 +0800 | [diff] [blame] | 797 | /* Similarly, if this chunk was built before a PMTU |
| 798 | * reduction, we have to fragment it at IP level now. So |
| 799 | * if the packet already contains something, we need to |
| 800 | * flush. |
| 801 | */ |
| 802 | maxsize = pmtu - packet->overhead; |
| 803 | if (packet->auth) |
Marcelo Ricardo Leitner | e2f036a | 2016-09-21 08:45:55 -0300 | [diff] [blame] | 804 | maxsize -= SCTP_PAD4(packet->auth->skb->len); |
Marcelo Ricardo Leitner | 7303a14 | 2016-09-08 17:54:11 +0800 | [diff] [blame] | 805 | if (chunk_len > maxsize) |
| 806 | retval = SCTP_XMIT_PMTU_FULL; |
| 807 | |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 808 | /* It is also okay to fragment if the chunk we are |
| 809 | * adding is a control chunk, but only if current packet |
| 810 | * is not a GSO one otherwise it causes fragmentation of |
| 811 | * a large frame. So in this case we allow the |
| 812 | * fragmentation by forcing it to be in a new packet. |
| 813 | */ |
| 814 | if (!sctp_chunk_is_data(chunk) && packet->has_data) |
| 815 | retval = SCTP_XMIT_PMTU_FULL; |
| 816 | |
| 817 | if (psize + chunk_len > packet->max_size) |
| 818 | /* Hit GSO/PMTU limit, gotta flush */ |
| 819 | retval = SCTP_XMIT_PMTU_FULL; |
| 820 | |
| 821 | if (!packet->transport->burst_limited && |
| 822 | psize + chunk_len > (packet->transport->cwnd >> 1)) |
| 823 | /* Do not allow a single GSO packet to use more |
| 824 | * than half of cwnd. |
| 825 | */ |
| 826 | retval = SCTP_XMIT_PMTU_FULL; |
| 827 | |
| 828 | if (packet->transport->burst_limited && |
| 829 | psize + chunk_len > (packet->transport->burst_limited >> 1)) |
| 830 | /* Do not allow a single GSO packet to use more |
| 831 | * than half of original cwnd. |
| 832 | */ |
| 833 | retval = SCTP_XMIT_PMTU_FULL; |
| 834 | /* Otherwise it will fit in the GSO packet */ |
Vlad Yasevich | e83963b | 2009-08-07 10:43:07 -0400 | [diff] [blame] | 835 | } |
| 836 | |
Marcelo Ricardo Leitner | 90017ac | 2016-06-02 15:05:43 -0300 | [diff] [blame] | 837 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | return retval; |
| 839 | } |