blob: 147d975b04559f7858b040b1f04dbc559ef2ec78 [file] [log] [blame]
Vlad Yasevich60c778b2008-01-11 09:57:09 -05001/* SCTP kernel implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (c) 1999-2000 Cisco, Inc.
3 * Copyright (c) 1999-2001 Motorola, Inc.
4 * Copyright (c) 2002 International Business Machines, Corp.
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +09005 *
Vlad Yasevich60c778b2008-01-11 09:57:09 -05006 * This file is part of the SCTP kernel implementation
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +09007 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * These functions are the methods for accessing the SCTP inqueue.
9 *
10 * An SCTP inqueue is a queue into which you push SCTP packets
11 * (which might be bundles or fragments of chunks) and out of which you
12 * pop SCTP whole chunks.
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +090013 *
Vlad Yasevich60c778b2008-01-11 09:57:09 -050014 * This SCTP implementation is free software;
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +090015 * you can redistribute it and/or modify it under the terms of
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 * the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +090019 *
Vlad Yasevich60c778b2008-01-11 09:57:09 -050020 * This SCTP implementation is distributed in the hope that it
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
22 * ************************
23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24 * See the GNU General Public License for more details.
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +090025 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * You should have received a copy of the GNU General Public License
Jeff Kirsher4b2f13a2013-12-06 06:28:48 -080027 * along with GNU CC; see the file COPYING. If not, see
28 * <http://www.gnu.org/licenses/>.
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +090029 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * Please send any bug reports or fixes you make to the
31 * email address(es):
Daniel Borkmann91705c62013-07-23 14:51:47 +020032 * lksctp developers <linux-sctp@vger.kernel.org>
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +090033 *
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +090034 * Written or modified by:
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * La Monte H.P. Yarroll <piggy@acm.org>
36 * Karl Knutson <karl@athena.chicago.il.us>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Joe Perches145ce502010-08-24 13:21:08 +000039#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <net/sctp/sctp.h>
42#include <net/sctp/sm.h>
43#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090044#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46/* Initialize an SCTP inqueue. */
47void sctp_inq_init(struct sctp_inq *queue)
48{
David S. Miller79af02c2005-07-08 21:47:49 -070049 INIT_LIST_HEAD(&queue->in_chunk_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 queue->in_progress = NULL;
51
52 /* Create a task for delivering data. */
David Howellsc4028952006-11-22 14:57:56 +000053 INIT_WORK(&queue->immediate, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054}
55
56/* Release the memory associated with an SCTP inqueue. */
57void sctp_inq_free(struct sctp_inq *queue)
58{
David S. Miller79af02c2005-07-08 21:47:49 -070059 struct sctp_chunk *chunk, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61 /* Empty the queue. */
David S. Miller79af02c2005-07-08 21:47:49 -070062 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
63 list_del_init(&chunk->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 sctp_chunk_free(chunk);
David S. Miller79af02c2005-07-08 21:47:49 -070065 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
67 /* If there is a packet which is currently being worked on,
68 * free it as well.
69 */
Sridhar Samudrala7a48f922006-01-17 11:51:28 -080070 if (queue->in_progress) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 sctp_chunk_free(queue->in_progress);
Sridhar Samudrala7a48f922006-01-17 11:51:28 -080072 queue->in_progress = NULL;
73 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
76/* Put a new packet in an SCTP inqueue.
77 * We assume that packet->sctp_hdr is set and in host byte order.
78 */
Sridhar Samudralaac0b0462006-08-22 00:15:33 -070079void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080{
81 /* Directly call the packet handling routine. */
Vlad Yasevich027f6e12007-11-07 11:39:27 -050082 if (chunk->rcvr->dead) {
83 sctp_chunk_free(chunk);
84 return;
85 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87 /* We are now calling this either from the soft interrupt
88 * or from the backlog processing.
89 * Eventually, we should clean up inqueue to not rely
90 * on the BH related data structures.
91 */
Eric Dumazet860fbbc2016-04-29 14:16:51 -070092 local_bh_disable();
Sridhar Samudralaac0b0462006-08-22 00:15:33 -070093 list_add_tail(&chunk->list, &q->in_chunk_list);
Michele Baldessari196d6752012-12-01 04:49:42 +000094 if (chunk->asoc)
95 chunk->asoc->stats.ipackets++;
David Howellsc4028952006-11-22 14:57:56 +000096 q->immediate.func(&q->immediate);
Eric Dumazet860fbbc2016-04-29 14:16:51 -070097 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
Vlad Yasevichbbd0d592007-10-03 17:51:34 -0700100/* Peek at the next chunk on the inqeue. */
101struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
102{
103 struct sctp_chunk *chunk;
104 sctp_chunkhdr_t *ch = NULL;
105
106 chunk = queue->in_progress;
107 /* If there is no more chunks in this packet, say so */
108 if (chunk->singleton ||
109 chunk->end_of_packet ||
110 chunk->pdiscard)
111 return NULL;
112
113 ch = (sctp_chunkhdr_t *)chunk->chunk_end;
114
115 return ch;
116}
117
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119/* Extract a chunk from an SCTP inqueue.
120 *
121 * WARNING: If you need to put the chunk on another queue, you need to
122 * make a shallow copy (clone) of it.
123 */
124struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
125{
126 struct sctp_chunk *chunk;
127 sctp_chunkhdr_t *ch = NULL;
128
129 /* The assumption is that we are safe to process the chunks
130 * at this time.
131 */
132
Marcelo Ricardo Leitner3acb50c2016-06-02 15:05:42 -0300133 chunk = queue->in_progress;
134 if (chunk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 /* There is a packet that we have been working on.
136 * Any post processing work to do before we move on?
137 */
138 if (chunk->singleton ||
139 chunk->end_of_packet ||
140 chunk->pdiscard) {
Marcelo Ricardo Leitner90017ac2016-06-02 15:05:43 -0300141 if (chunk->head_skb == chunk->skb) {
142 chunk->skb = skb_shinfo(chunk->skb)->frag_list;
143 goto new_skb;
144 }
145 if (chunk->skb->next) {
146 chunk->skb = chunk->skb->next;
147 goto new_skb;
148 }
149
150 if (chunk->head_skb)
151 chunk->skb = chunk->head_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 sctp_chunk_free(chunk);
153 chunk = queue->in_progress = NULL;
154 } else {
155 /* Nothing to do. Next chunk in the packet, please. */
156 ch = (sctp_chunkhdr_t *) chunk->chunk_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 /* Force chunk->skb->data to chunk->chunk_end. */
Daniel Borkmann26b87c72014-10-09 22:55:33 +0200158 skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
159 /* We are guaranteed to pull a SCTP header. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 }
161 }
162
163 /* Do we need to take the next packet out of the queue to process? */
164 if (!chunk) {
David S. Miller79af02c2005-07-08 21:47:49 -0700165 struct list_head *entry;
166
Marcelo Ricardo Leitner3acb50c2016-06-02 15:05:42 -0300167next_chunk:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 /* Is the queue empty? */
Marcelo Ricardo Leitner90017ac2016-06-02 15:05:43 -0300169 entry = sctp_list_dequeue(&queue->in_chunk_list);
170 if (!entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 return NULL;
172
Marcelo Ricardo Leitner3acb50c2016-06-02 15:05:42 -0300173 chunk = list_entry(entry, struct sctp_chunk, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Marcelo Ricardo Leitner3acb50c2016-06-02 15:05:42 -0300175 /* Linearize if it's not GSO */
Marcelo Ricardo Leitner90017ac2016-06-02 15:05:43 -0300176 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) != SKB_GSO_SCTP &&
177 skb_is_nonlinear(chunk->skb)) {
Marcelo Ricardo Leitner3acb50c2016-06-02 15:05:42 -0300178 if (skb_linearize(chunk->skb)) {
179 __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
180 sctp_chunk_free(chunk);
181 goto next_chunk;
182 }
183
184 /* Update sctp_hdr as it probably changed */
185 chunk->sctp_hdr = sctp_hdr(chunk->skb);
186 }
187
Marcelo Ricardo Leitner90017ac2016-06-02 15:05:43 -0300188 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
189 /* GSO-marked skbs but without frags, handle
190 * them normally
191 */
192 if (skb_shinfo(chunk->skb)->frag_list)
193 chunk->head_skb = chunk->skb;
Marcelo Ricardo Leitner3acb50c2016-06-02 15:05:42 -0300194
Marcelo Ricardo Leitner90017ac2016-06-02 15:05:43 -0300195 /* skbs with "cover letter" */
196 if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
197 chunk->skb = skb_shinfo(chunk->skb)->frag_list;
198
199 if (WARN_ON(!chunk->skb)) {
200 __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
201 sctp_chunk_free(chunk);
202 goto next_chunk;
203 }
204 }
Marcelo Ricardo Leitner486bdee2016-04-12 18:11:31 -0300205
206 if (chunk->asoc)
207 sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
Marcelo Ricardo Leitner90017ac2016-06-02 15:05:43 -0300208
209 queue->in_progress = chunk;
210
211new_skb:
212 /* This is the first chunk in the packet. */
213 ch = (sctp_chunkhdr_t *) chunk->skb->data;
214 chunk->singleton = 1;
215 chunk->data_accepted = 0;
216 chunk->pdiscard = 0;
217 chunk->auth = 0;
218 chunk->has_asconf = 0;
219 chunk->end_of_packet = 0;
220 chunk->ecn_ce_done = 0;
Marcelo Ricardo Leitner1f45f782016-07-13 15:08:57 -0300221 if (chunk->head_skb) {
222 struct sctp_input_cb
223 *cb = SCTP_INPUT_CB(chunk->skb),
224 *head_cb = SCTP_INPUT_CB(chunk->head_skb);
225
226 cb->chunk = head_cb->chunk;
227 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 }
229
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900230 chunk->chunk_hdr = ch;
231 chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
233 chunk->subh.v = NULL; /* Subheader is no longer valid. */
234
Daniel Borkmann26b87c72014-10-09 22:55:33 +0200235 if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
236 skb_tail_pointer(chunk->skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 /* This is not a singleton */
238 chunk->singleton = 0;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700239 } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
Daniel Borkmann26b87c72014-10-09 22:55:33 +0200240 /* Discard inside state machine. */
241 chunk->pdiscard = 1;
242 chunk->chunk_end = skb_tail_pointer(chunk->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 } else {
244 /* We are at the end of the packet, so mark the chunk
245 * in case we need to send a SACK.
246 */
247 chunk->end_of_packet = 1;
248 }
249
Daniel Borkmannbb333812013-06-28 19:49:40 +0200250 pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
251 chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
252 ntohs(chunk->chunk_hdr->length), chunk->skb->len);
253
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 return chunk;
255}
256
257/* Set a top-half handler.
258 *
259 * Originally, we the top-half handler was scheduled as a BH. We now
260 * call the handler directly in sctp_inq_push() at a time that
261 * we know we are lock safe.
262 * The intent is that this routine will pull stuff out of the
263 * inqueue and process it.
264 */
David Howellsc4028952006-11-22 14:57:56 +0000265void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
David Howellsc4028952006-11-22 14:57:56 +0000267 INIT_WORK(&q->immediate, callback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268}