blob: ec12a8920e5fd7a0f26d19f1695bc2feeae41518 [file] [log] [blame]
Vlad Yasevich60c778b2008-01-11 09:57:09 -05001/* SCTP kernel implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This abstraction carries sctp events to the ULP (sockets).
10 *
Vlad Yasevich60c778b2008-01-11 09:57:09 -050011 * This SCTP implementation is free software;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
Vlad Yasevich60c778b2008-01-11 09:57:09 -050017 * This SCTP implementation is distributed in the hope that it
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
Jeff Kirsher4b2f13a2013-12-06 06:28:48 -080024 * along with GNU CC; see the file COPYING. If not, see
25 * <http://www.gnu.org/licenses/>.
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 *
27 * Please send any bug reports or fixes you make to the
28 * email address(es):
Daniel Borkmann91705c62013-07-23 14:51:47 +020029 * lksctp developers <linux-sctp@vger.kernel.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 * Written or modified by:
32 * Jon Grimm <jgrimm@us.ibm.com>
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Sridhar Samudrala <sri@us.ibm.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
36
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/types.h>
39#include <linux/skbuff.h>
40#include <net/sock.h>
Neil Horman8465a5f2014-04-17 15:26:51 -040041#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <net/sctp/structs.h>
43#include <net/sctp/sctp.h>
44#include <net/sctp/sm.h>
45
46/* Forward declarations for internal helpers. */
wangweidong26ac8e52013-12-23 12:16:51 +080047static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
David S. Miller8728b832005-08-09 19:25:21 -070048 struct sctp_ulpevent *);
wangweidong26ac8e52013-12-23 12:16:51 +080049static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
David S. Miller8728b832005-08-09 19:25:21 -070050 struct sctp_ulpevent *);
Vlad Yasevichef5d4cf22007-12-16 14:05:45 -080051static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53/* 1st Level Abstractions */
54
55/* Initialize a ULP queue from a block of memory. */
56struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
57 struct sctp_association *asoc)
58{
59 memset(ulpq, 0, sizeof(struct sctp_ulpq));
60
61 ulpq->asoc = asoc;
62 skb_queue_head_init(&ulpq->reasm);
63 skb_queue_head_init(&ulpq->lobby);
64 ulpq->pd_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66 return ulpq;
67}
68
69
70/* Flush the reassembly and ordering queues. */
Vlad Yasevich0b58a812007-03-19 17:01:17 -070071void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
73 struct sk_buff *skb;
74 struct sctp_ulpevent *event;
75
76 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
77 event = sctp_skb2event(skb);
78 sctp_ulpevent_free(event);
79 }
80
81 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
82 event = sctp_skb2event(skb);
83 sctp_ulpevent_free(event);
84 }
85
86}
87
88/* Dispose of a ulpqueue. */
89void sctp_ulpq_free(struct sctp_ulpq *ulpq)
90{
91 sctp_ulpq_flush(ulpq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* Process an incoming DATA chunk. */
95int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
Al Virodd0fc662005-10-07 07:46:04 +010096 gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
98 struct sk_buff_head temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 struct sctp_ulpevent *event;
Lee A. Robertsd003b412013-02-28 04:37:30 +0000100 int event_eor = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /* Create an event from the incoming chunk. */
103 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
104 if (!event)
105 return -ENOMEM;
106
107 /* Do reassembly if needed. */
108 event = sctp_ulpq_reasm(ulpq, event);
109
110 /* Do ordering if needed. */
wangweidongcb3f8372013-12-23 12:16:50 +0800111 if ((event) && (event->msg_flags & MSG_EOR)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 /* Create a temporary list to collect chunks on. */
113 skb_queue_head_init(&temp);
114 __skb_queue_tail(&temp, sctp_event2skb(event));
115
116 event = sctp_ulpq_order(ulpq, event);
117 }
118
David S. Miller8728b832005-08-09 19:25:21 -0700119 /* Send event to the ULP. 'event' is the sctp_ulpevent for
120 * very first SKB on the 'temp' list.
121 */
Lee A. Robertsd003b412013-02-28 04:37:30 +0000122 if (event) {
123 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 sctp_ulpq_tail_event(ulpq, event);
Lee A. Robertsd003b412013-02-28 04:37:30 +0000125 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Lee A. Robertsd003b412013-02-28 04:37:30 +0000127 return event_eor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
130/* Add a new event for propagation to the ULP. */
131/* Clear the partial delivery mode for this socket. Note: This
132 * assumes that no association is currently in partial delivery mode.
133 */
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700134int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
136 struct sctp_sock *sp = sctp_sk(sk);
137
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700138 if (atomic_dec_and_test(&sp->pd_mode)) {
139 /* This means there are no other associations in PD, so
140 * we can go ahead and clear out the lobby in one shot
141 */
142 if (!skb_queue_empty(&sp->pd_lobby)) {
143 struct list_head *list;
Marcelo Ricardo Leitner311b2172016-04-13 19:12:29 -0300144 skb_queue_splice_tail_init(&sp->pd_lobby,
145 &sk->sk_receive_queue);
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700146 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
147 INIT_LIST_HEAD(list);
148 return 1;
149 }
150 } else {
151 /* There are other associations in PD, so we only need to
152 * pull stuff out of the lobby that belongs to the
153 * associations that is exiting PD (all of its notifications
154 * are posted here).
155 */
156 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
157 struct sk_buff *skb, *tmp;
158 struct sctp_ulpevent *event;
159
160 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
161 event = sctp_skb2event(skb);
162 if (event->asoc == asoc) {
163 __skb_unlink(skb, &sp->pd_lobby);
164 __skb_queue_tail(&sk->sk_receive_queue,
165 skb);
166 }
167 }
168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 }
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 return 0;
172}
173
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700174/* Set the pd_mode on the socket and ulpq */
175static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
176{
177 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
178
179 atomic_inc(&sp->pd_mode);
180 ulpq->pd_mode = 1;
181}
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183/* Clear the pd_mode and restart any pending messages waiting for delivery. */
184static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
185{
186 ulpq->pd_mode = 0;
Vlad Yasevichef5d4cf22007-12-16 14:05:45 -0800187 sctp_ulpq_reasm_drain(ulpq);
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700188 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
David S. Miller8728b832005-08-09 19:25:21 -0700191/* If the SKB of 'event' is on a list, it is the first such member
192 * of that list.
193 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
195{
196 struct sock *sk = ulpq->asoc->base.sk;
David S. Miller8728b832005-08-09 19:25:21 -0700197 struct sk_buff_head *queue, *skb_list;
198 struct sk_buff *skb = sctp_event2skb(event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 int clear_pd = 0;
200
David S. Miller8728b832005-08-09 19:25:21 -0700201 skb_list = (struct sk_buff_head *) skb->prev;
202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 /* If the socket is just going to throw this away, do not
204 * even try to deliver it.
205 */
206 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
207 goto out_free;
208
Eric Dumazet2c8c56e2014-11-11 05:54:28 -0800209 if (!sctp_ulpevent_is_notification(event)) {
Neil Horman8465a5f2014-04-17 15:26:51 -0400210 sk_mark_napi_id(sk, skb);
Eric Dumazet2c8c56e2014-11-11 05:54:28 -0800211 sk_incoming_cpu_update(sk);
212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 /* Check if the user wishes to receive this event. */
214 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
215 goto out_free;
216
217 /* If we are in partial delivery mode, post to the lobby until
218 * partial delivery is cleared, unless, of course _this_ is
219 * the association the cause of the partial delivery.
220 */
221
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700222 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 queue = &sk->sk_receive_queue;
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700224 } else {
225 if (ulpq->pd_mode) {
226 /* If the association is in partial delivery, we
227 * need to finish delivering the partially processed
228 * packet before passing any other data. This is
229 * because we don't truly support stream interleaving.
230 */
231 if ((event->msg_flags & MSG_NOTIFICATION) ||
232 (SCTP_DATA_NOT_FRAG ==
233 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
234 queue = &sctp_sk(sk)->pd_lobby;
235 else {
236 clear_pd = event->msg_flags & MSG_EOR;
237 queue = &sk->sk_receive_queue;
238 }
239 } else {
240 /*
241 * If fragment interleave is enabled, we
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300242 * can queue this to the receive queue instead
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700243 * of the lobby.
244 */
245 if (sctp_sk(sk)->frag_interleave)
246 queue = &sk->sk_receive_queue;
247 else
248 queue = &sctp_sk(sk)->pd_lobby;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 }
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 /* If we are harvesting multiple skbs they will be
253 * collected on a list.
254 */
David S. Miller8728b832005-08-09 19:25:21 -0700255 if (skb_list)
Marcelo Ricardo Leitner311b2172016-04-13 19:12:29 -0300256 skb_queue_splice_tail_init(skb_list, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 else
David S. Miller8728b832005-08-09 19:25:21 -0700258 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 /* Did we just complete partial delivery and need to get
261 * rolling again? Move pending data to the receive
262 * queue.
263 */
264 if (clear_pd)
265 sctp_ulpq_clear_pd(ulpq);
266
267 if (queue == &sk->sk_receive_queue)
Marcelo Ricardo Leitnerfb586f22016-04-08 16:41:28 -0300268 sctp_sk(sk)->pending_data_ready = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 return 1;
270
271out_free:
David S. Miller8728b832005-08-09 19:25:21 -0700272 if (skb_list)
273 sctp_queue_purge_ulpevents(skb_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 else
275 sctp_ulpevent_free(event);
David S. Miller8728b832005-08-09 19:25:21 -0700276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 return 0;
278}
279
280/* 2nd Level Abstractions */
281
282/* Helper function to store chunks that need to be reassembled. */
Vlad Yasevich01f2d382008-01-11 11:17:27 -0500283static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 struct sctp_ulpevent *event)
285{
286 struct sk_buff *pos;
287 struct sctp_ulpevent *cevent;
288 __u32 tsn, ctsn;
289
290 tsn = event->tsn;
291
292 /* See if it belongs at the end. */
293 pos = skb_peek_tail(&ulpq->reasm);
294 if (!pos) {
295 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
296 return;
297 }
298
299 /* Short circuit just dropping it at the end. */
300 cevent = sctp_skb2event(pos);
301 ctsn = cevent->tsn;
302 if (TSN_lt(ctsn, tsn)) {
303 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
304 return;
305 }
306
307 /* Find the right place in this list. We store them by TSN. */
308 skb_queue_walk(&ulpq->reasm, pos) {
309 cevent = sctp_skb2event(pos);
310 ctsn = cevent->tsn;
311
312 if (TSN_lt(tsn, ctsn))
313 break;
314 }
315
316 /* Insert before pos. */
David S. Miller43f59c82008-09-21 21:28:51 -0700317 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
319}
320
321/* Helper function to return an event corresponding to the reassembled
322 * datagram.
323 * This routine creates a re-assembled skb given the first and last skb's
324 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
325 * payload was fragmented on the way and ip had to reassemble them.
326 * We add the rest of skb's to the first skb's fraglist.
327 */
Eric W. Biedermanb01a2402012-08-06 08:47:55 +0000328static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
329 struct sk_buff_head *queue, struct sk_buff *f_frag,
330 struct sk_buff *l_frag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
332 struct sk_buff *pos;
Vladislav Yasevich672e7cc2006-05-05 17:03:49 -0700333 struct sk_buff *new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 struct sctp_ulpevent *event;
335 struct sk_buff *pnext, *last;
336 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
337
338 /* Store the pointer to the 2nd skb */
339 if (f_frag == l_frag)
340 pos = NULL;
341 else
342 pos = f_frag->next;
343
344 /* Get the last skb in the f_frag's frag_list if present. */
wangweidong8d726512013-12-23 12:16:53 +0800345 for (last = list; list; last = list, list = list->next)
346 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 /* Add the list of remaining fragments to the first fragments
349 * frag_list.
350 */
351 if (last)
352 last->next = pos;
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900353 else {
354 if (skb_cloned(f_frag)) {
355 /* This is a cloned skb, we can't just modify
356 * the frag_list. We need a new skb to do that.
357 * Instead of calling skb_unshare(), we'll do it
358 * ourselves since we need to delay the free.
359 */
360 new = skb_copy(f_frag, GFP_ATOMIC);
361 if (!new)
362 return NULL; /* try again later */
Vladislav Yasevich672e7cc2006-05-05 17:03:49 -0700363
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900364 sctp_skb_set_owner_r(new, f_frag->sk);
Vladislav Yasevich672e7cc2006-05-05 17:03:49 -0700365
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900366 skb_shinfo(new)->frag_list = pos;
367 } else
368 skb_shinfo(f_frag)->frag_list = pos;
369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371 /* Remove the first fragment from the reassembly queue. */
David S. Miller8728b832005-08-09 19:25:21 -0700372 __skb_unlink(f_frag, queue);
Vladislav Yasevich672e7cc2006-05-05 17:03:49 -0700373
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900374 /* if we did unshare, then free the old skb and re-assign */
375 if (new) {
376 kfree_skb(f_frag);
377 f_frag = new;
378 }
Vladislav Yasevich672e7cc2006-05-05 17:03:49 -0700379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 while (pos) {
381
382 pnext = pos->next;
383
384 /* Update the len and data_len fields of the first fragment. */
385 f_frag->len += pos->len;
386 f_frag->data_len += pos->len;
387
388 /* Remove the fragment from the reassembly queue. */
David S. Miller8728b832005-08-09 19:25:21 -0700389 __skb_unlink(pos, queue);
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900390
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 /* Break if we have reached the last fragment. */
392 if (pos == l_frag)
393 break;
394 pos->next = pnext;
395 pos = pnext;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
398 event = sctp_skb2event(f_frag);
Eric W. Biedermanb01a2402012-08-06 08:47:55 +0000399 SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 return event;
402}
403
404
405/* Helper function to check if an incoming chunk has filled up the last
406 * missing fragment in a SCTP datagram and return the corresponding event.
407 */
Vlad Yasevich01f2d382008-01-11 11:17:27 -0500408static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
410 struct sk_buff *pos;
411 struct sctp_ulpevent *cevent;
412 struct sk_buff *first_frag = NULL;
413 __u32 ctsn, next_tsn;
414 struct sctp_ulpevent *retval = NULL;
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700415 struct sk_buff *pd_first = NULL;
416 struct sk_buff *pd_last = NULL;
417 size_t pd_len = 0;
418 struct sctp_association *asoc;
419 u32 pd_point;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
421 /* Initialized to 0 just to avoid compiler warning message. Will
422 * never be used with this value. It is referenced only after it
423 * is set when we find the first fragment of a message.
424 */
425 next_tsn = 0;
426
427 /* The chunks are held in the reasm queue sorted by TSN.
428 * Walk through the queue sequentially and look for a sequence of
429 * fragmented chunks that complete a datagram.
430 * 'first_frag' and next_tsn are reset when we find a chunk which
431 * is the first fragment of a datagram. Once these 2 fields are set
432 * we expect to find the remaining middle fragments and the last
433 * fragment in order. If not, first_frag is reset to NULL and we
434 * start the next pass when we find another first fragment.
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700435 *
436 * There is a potential to do partial delivery if user sets
437 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
438 * to see if can do PD.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 */
440 skb_queue_walk(&ulpq->reasm, pos) {
441 cevent = sctp_skb2event(pos);
442 ctsn = cevent->tsn;
443
444 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
445 case SCTP_DATA_FIRST_FRAG:
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700446 /* If this "FIRST_FRAG" is the first
447 * element in the queue, then count it towards
448 * possible PD.
449 */
450 if (pos == ulpq->reasm.next) {
451 pd_first = pos;
452 pd_last = pos;
453 pd_len = pos->len;
454 } else {
455 pd_first = NULL;
456 pd_last = NULL;
457 pd_len = 0;
458 }
459
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 first_frag = pos;
461 next_tsn = ctsn + 1;
462 break;
463
464 case SCTP_DATA_MIDDLE_FRAG:
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700465 if ((first_frag) && (ctsn == next_tsn)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 next_tsn++;
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700467 if (pd_first) {
468 pd_last = pos;
469 pd_len += pos->len;
470 }
471 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 first_frag = NULL;
473 break;
474
475 case SCTP_DATA_LAST_FRAG:
476 if (first_frag && (ctsn == next_tsn))
477 goto found;
478 else
479 first_frag = NULL;
480 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700481 }
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700484 asoc = ulpq->asoc;
485 if (pd_first) {
486 /* Make sure we can enter partial deliver.
487 * We can trigger partial delivery only if framgent
488 * interleave is set, or the socket is not already
489 * in partial delivery.
490 */
491 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
492 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
493 goto done;
494
495 cevent = sctp_skb2event(pd_first);
496 pd_point = sctp_sk(asoc->base.sk)->pd_point;
497 if (pd_point && pd_point <= pd_len) {
Eric W. Biedermanb01a2402012-08-06 08:47:55 +0000498 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
499 &ulpq->reasm,
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700500 pd_first,
501 pd_last);
502 if (retval)
503 sctp_ulpq_set_pd(ulpq);
504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 }
506done:
507 return retval;
508found:
Eric W. Biedermanb01a2402012-08-06 08:47:55 +0000509 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
510 &ulpq->reasm, first_frag, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 if (retval)
512 retval->msg_flags |= MSG_EOR;
513 goto done;
514}
515
516/* Retrieve the next set of fragments of a partial message. */
Vlad Yasevich01f2d382008-01-11 11:17:27 -0500517static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
519 struct sk_buff *pos, *last_frag, *first_frag;
520 struct sctp_ulpevent *cevent;
521 __u32 ctsn, next_tsn;
522 int is_last;
523 struct sctp_ulpevent *retval;
524
525 /* The chunks are held in the reasm queue sorted by TSN.
526 * Walk through the queue sequentially and look for the first
527 * sequence of fragmented chunks.
528 */
529
530 if (skb_queue_empty(&ulpq->reasm))
531 return NULL;
532
533 last_frag = first_frag = NULL;
534 retval = NULL;
535 next_tsn = 0;
536 is_last = 0;
537
538 skb_queue_walk(&ulpq->reasm, pos) {
539 cevent = sctp_skb2event(pos);
540 ctsn = cevent->tsn;
541
542 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
Lee A. Robertsd003b412013-02-28 04:37:30 +0000543 case SCTP_DATA_FIRST_FRAG:
544 if (!first_frag)
545 return NULL;
546 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 case SCTP_DATA_MIDDLE_FRAG:
548 if (!first_frag) {
549 first_frag = pos;
550 next_tsn = ctsn + 1;
551 last_frag = pos;
Lee A. Robertsd003b412013-02-28 04:37:30 +0000552 } else if (next_tsn == ctsn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 next_tsn++;
Lee A. Robertsd003b412013-02-28 04:37:30 +0000554 last_frag = pos;
555 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 goto done;
557 break;
558 case SCTP_DATA_LAST_FRAG:
559 if (!first_frag)
560 first_frag = pos;
561 else if (ctsn != next_tsn)
562 goto done;
563 last_frag = pos;
564 is_last = 1;
565 goto done;
566 default:
567 return NULL;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 }
570
571 /* We have the reassembled event. There is no need to look
572 * further.
573 */
574done:
Eric W. Biedermanb01a2402012-08-06 08:47:55 +0000575 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
576 &ulpq->reasm, first_frag, last_frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 if (retval && is_last)
578 retval->msg_flags |= MSG_EOR;
579
580 return retval;
581}
582
583
584/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
585 * need reassembling.
586 */
587static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
588 struct sctp_ulpevent *event)
589{
590 struct sctp_ulpevent *retval = NULL;
591
592 /* Check if this is part of a fragmented message. */
593 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
594 event->msg_flags |= MSG_EOR;
595 return event;
596 }
597
598 sctp_ulpq_store_reasm(ulpq, event);
599 if (!ulpq->pd_mode)
600 retval = sctp_ulpq_retrieve_reassembled(ulpq);
601 else {
602 __u32 ctsn, ctsnap;
603
604 /* Do not even bother unless this is the next tsn to
605 * be delivered.
606 */
607 ctsn = event->tsn;
608 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
609 if (TSN_lte(ctsn, ctsnap))
610 retval = sctp_ulpq_retrieve_partial(ulpq);
611 }
612
613 return retval;
614}
615
616/* Retrieve the first part (sequential fragments) for partial delivery. */
Vlad Yasevich01f2d382008-01-11 11:17:27 -0500617static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
619 struct sk_buff *pos, *last_frag, *first_frag;
620 struct sctp_ulpevent *cevent;
621 __u32 ctsn, next_tsn;
622 struct sctp_ulpevent *retval;
623
624 /* The chunks are held in the reasm queue sorted by TSN.
625 * Walk through the queue sequentially and look for a sequence of
626 * fragmented chunks that start a datagram.
627 */
628
629 if (skb_queue_empty(&ulpq->reasm))
630 return NULL;
631
632 last_frag = first_frag = NULL;
633 retval = NULL;
634 next_tsn = 0;
635
636 skb_queue_walk(&ulpq->reasm, pos) {
637 cevent = sctp_skb2event(pos);
638 ctsn = cevent->tsn;
639
640 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
641 case SCTP_DATA_FIRST_FRAG:
642 if (!first_frag) {
643 first_frag = pos;
644 next_tsn = ctsn + 1;
645 last_frag = pos;
646 } else
647 goto done;
648 break;
649
650 case SCTP_DATA_MIDDLE_FRAG:
651 if (!first_frag)
652 return NULL;
653 if (ctsn == next_tsn) {
654 next_tsn++;
655 last_frag = pos;
656 } else
657 goto done;
658 break;
Lee A. Robertsd003b412013-02-28 04:37:30 +0000659
660 case SCTP_DATA_LAST_FRAG:
661 if (!first_frag)
662 return NULL;
663 else
664 goto done;
665 break;
666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 default:
668 return NULL;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700669 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 }
671
672 /* We have the reassembled event. There is no need to look
673 * further.
674 */
675done:
Eric W. Biedermanb01a2402012-08-06 08:47:55 +0000676 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
677 &ulpq->reasm, first_frag, last_frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return retval;
679}
680
Vlad Yasevichea2dfb32007-07-13 17:01:19 -0400681/*
682 * Flush out stale fragments from the reassembly queue when processing
683 * a Forward TSN.
684 *
685 * RFC 3758, Section 3.6
686 *
687 * After receiving and processing a FORWARD TSN, the data receiver MUST
688 * take cautions in updating its re-assembly queue. The receiver MUST
689 * remove any partially reassembled message, which is still missing one
690 * or more TSNs earlier than or equal to the new cumulative TSN point.
691 * In the event that the receiver has invoked the partial delivery API,
692 * a notification SHOULD also be generated to inform the upper layer API
693 * that the message being partially delivered will NOT be completed.
694 */
695void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
696{
697 struct sk_buff *pos, *tmp;
698 struct sctp_ulpevent *event;
699 __u32 tsn;
700
701 if (skb_queue_empty(&ulpq->reasm))
702 return;
703
704 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
705 event = sctp_skb2event(pos);
706 tsn = event->tsn;
707
708 /* Since the entire message must be abandoned by the
709 * sender (item A3 in Section 3.5, RFC 3758), we can
710 * free all fragments on the list that are less then
711 * or equal to ctsn_point
712 */
713 if (TSN_lte(tsn, fwd_tsn)) {
714 __skb_unlink(pos, &ulpq->reasm);
715 sctp_ulpevent_free(event);
716 } else
717 break;
718 }
719}
720
Vlad Yasevichef5d4cf22007-12-16 14:05:45 -0800721/*
722 * Drain the reassembly queue. If we just cleared parted delivery, it
723 * is possible that the reassembly queue will contain already reassembled
724 * messages. Retrieve any such messages and give them to the user.
725 */
726static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
727{
728 struct sctp_ulpevent *event = NULL;
729 struct sk_buff_head temp;
730
731 if (skb_queue_empty(&ulpq->reasm))
732 return;
733
734 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
735 /* Do ordering if needed. */
wangweidongcb3f8372013-12-23 12:16:50 +0800736 if ((event) && (event->msg_flags & MSG_EOR)) {
Vlad Yasevichef5d4cf22007-12-16 14:05:45 -0800737 skb_queue_head_init(&temp);
738 __skb_queue_tail(&temp, sctp_event2skb(event));
739
740 event = sctp_ulpq_order(ulpq, event);
741 }
742
743 /* Send event to the ULP. 'event' is the
744 * sctp_ulpevent for very first SKB on the temp' list.
745 */
746 if (event)
747 sctp_ulpq_tail_event(ulpq, event);
748 }
749}
750
751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752/* Helper function to gather skbs that have possibly become
753 * ordered by an an incoming chunk.
754 */
Vlad Yasevich01f2d382008-01-11 11:17:27 -0500755static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 struct sctp_ulpevent *event)
757{
David S. Miller8728b832005-08-09 19:25:21 -0700758 struct sk_buff_head *event_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 struct sk_buff *pos, *tmp;
760 struct sctp_ulpevent *cevent;
761 struct sctp_stream *in;
Hagen Paul Pfeiferefea2c62011-03-04 11:45:05 +0000762 __u16 sid, csid, cssn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
764 sid = event->stream;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 in = &ulpq->asoc->ssnmap->in;
766
David S. Miller8728b832005-08-09 19:25:21 -0700767 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 /* We are holding the chunks by stream, by SSN. */
770 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
771 cevent = (struct sctp_ulpevent *) pos->cb;
772 csid = cevent->stream;
773 cssn = cevent->ssn;
774
775 /* Have we gone too far? */
776 if (csid > sid)
777 break;
778
779 /* Have we not gone far enough? */
780 if (csid < sid)
781 continue;
782
783 if (cssn != sctp_ssn_peek(in, sid))
784 break;
785
786 /* Found it, so mark in the ssnmap. */
787 sctp_ssn_next(in, sid);
788
David S. Miller8728b832005-08-09 19:25:21 -0700789 __skb_unlink(pos, &ulpq->lobby);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
791 /* Attach all gathered skbs to the event. */
David S. Miller8728b832005-08-09 19:25:21 -0700792 __skb_queue_tail(event_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 }
794}
795
796/* Helper function to store chunks needing ordering. */
Vlad Yasevich01f2d382008-01-11 11:17:27 -0500797static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 struct sctp_ulpevent *event)
799{
800 struct sk_buff *pos;
801 struct sctp_ulpevent *cevent;
802 __u16 sid, csid;
803 __u16 ssn, cssn;
804
805 pos = skb_peek_tail(&ulpq->lobby);
806 if (!pos) {
807 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
808 return;
809 }
810
811 sid = event->stream;
812 ssn = event->ssn;
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900813
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 cevent = (struct sctp_ulpevent *) pos->cb;
815 csid = cevent->stream;
816 cssn = cevent->ssn;
817 if (sid > csid) {
818 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
819 return;
820 }
821
822 if ((sid == csid) && SSN_lt(cssn, ssn)) {
823 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
824 return;
825 }
826
827 /* Find the right place in this list. We store them by
828 * stream ID and then by SSN.
829 */
830 skb_queue_walk(&ulpq->lobby, pos) {
831 cevent = (struct sctp_ulpevent *) pos->cb;
832 csid = cevent->stream;
833 cssn = cevent->ssn;
834
835 if (csid > sid)
836 break;
837 if (csid == sid && SSN_lt(ssn, cssn))
838 break;
839 }
840
841
842 /* Insert before pos. */
David S. Miller43f59c82008-09-21 21:28:51 -0700843 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844}
845
846static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
David S. Miller8728b832005-08-09 19:25:21 -0700847 struct sctp_ulpevent *event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848{
849 __u16 sid, ssn;
850 struct sctp_stream *in;
851
852 /* Check if this message needs ordering. */
853 if (SCTP_DATA_UNORDERED & event->msg_flags)
854 return event;
855
856 /* Note: The stream ID must be verified before this routine. */
857 sid = event->stream;
858 ssn = event->ssn;
859 in = &ulpq->asoc->ssnmap->in;
860
861 /* Is this the expected SSN for this stream ID? */
862 if (ssn != sctp_ssn_peek(in, sid)) {
863 /* We've received something out of order, so find where it
864 * needs to be placed. We order by stream and then by SSN.
865 */
866 sctp_ulpq_store_ordered(ulpq, event);
867 return NULL;
868 }
869
870 /* Mark that the next chunk has been found. */
871 sctp_ssn_next(in, sid);
872
873 /* Go find any other chunks that were waiting for
874 * ordering.
875 */
876 sctp_ulpq_retrieve_ordered(ulpq, event);
877
878 return event;
879}
880
881/* Helper function to gather skbs that have possibly become
882 * ordered by forward tsn skipping their dependencies.
883 */
Vlad Yasevich01f2d382008-01-11 11:17:27 -0500884static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885{
886 struct sk_buff *pos, *tmp;
887 struct sctp_ulpevent *cevent;
David S. Miller8728b832005-08-09 19:25:21 -0700888 struct sctp_ulpevent *event;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 struct sctp_stream *in;
890 struct sk_buff_head temp;
Vlad Yasevichc068be52008-01-15 11:41:56 -0500891 struct sk_buff_head *lobby = &ulpq->lobby;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 __u16 csid, cssn;
893
894 in = &ulpq->asoc->ssnmap->in;
895
896 /* We are holding the chunks by stream, by SSN. */
David S. Miller8728b832005-08-09 19:25:21 -0700897 skb_queue_head_init(&temp);
898 event = NULL;
Vlad Yasevichc068be52008-01-15 11:41:56 -0500899 sctp_skb_for_each(pos, lobby, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 cevent = (struct sctp_ulpevent *) pos->cb;
901 csid = cevent->stream;
902 cssn = cevent->ssn;
903
Vlad Yasevichea2dfb32007-07-13 17:01:19 -0400904 /* Have we gone too far? */
905 if (csid > sid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 break;
907
Vlad Yasevichea2dfb32007-07-13 17:01:19 -0400908 /* Have we not gone far enough? */
909 if (csid < sid)
910 continue;
911
912 /* see if this ssn has been marked by skipping */
Vlad Yasevichc068be52008-01-15 11:41:56 -0500913 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
Vlad Yasevichea2dfb32007-07-13 17:01:19 -0400914 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
Vlad Yasevichc068be52008-01-15 11:41:56 -0500916 __skb_unlink(pos, lobby);
Vlad Yasevichea2dfb32007-07-13 17:01:19 -0400917 if (!event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 /* Create a temporary list to collect chunks on. */
919 event = sctp_skb2event(pos);
Vlad Yasevichea2dfb32007-07-13 17:01:19 -0400920
921 /* Attach all gathered skbs to the event. */
922 __skb_queue_tail(&temp, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 }
924
Vlad Yasevichc068be52008-01-15 11:41:56 -0500925 /* If we didn't reap any data, see if the next expected SSN
926 * is next on the queue and if so, use that.
927 */
928 if (event == NULL && pos != (struct sk_buff *)lobby) {
929 cevent = (struct sctp_ulpevent *) pos->cb;
930 csid = cevent->stream;
931 cssn = cevent->ssn;
932
933 if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
934 sctp_ssn_next(in, csid);
935 __skb_unlink(pos, lobby);
936 __skb_queue_tail(&temp, pos);
937 event = sctp_skb2event(pos);
938 }
939 }
940
David S. Miller8728b832005-08-09 19:25:21 -0700941 /* Send event to the ULP. 'event' is the sctp_ulpevent for
942 * very first SKB on the 'temp' list.
943 */
Vlad Yasevichea2dfb32007-07-13 17:01:19 -0400944 if (event) {
945 /* see if we have more ordered that we can deliver */
946 sctp_ulpq_retrieve_ordered(ulpq, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 sctp_ulpq_tail_event(ulpq, event);
Vlad Yasevichea2dfb32007-07-13 17:01:19 -0400948 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949}
950
Vlad Yasevichea2dfb32007-07-13 17:01:19 -0400951/* Skip over an SSN. This is used during the processing of
952 * Forwared TSN chunk to skip over the abandoned ordered data
953 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
955{
956 struct sctp_stream *in;
957
958 /* Note: The stream ID must be verified before this routine. */
959 in = &ulpq->asoc->ssnmap->in;
960
961 /* Is this an old SSN? If so ignore. */
962 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
963 return;
964
965 /* Mark that we are no longer expecting this SSN or lower. */
966 sctp_ssn_skip(in, sid, ssn);
967
968 /* Go find any other chunks that were waiting for
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900969 * ordering and deliver them if needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 */
Vlad Yasevichea2dfb32007-07-13 17:01:19 -0400971 sctp_ulpq_reap_ordered(ulpq, sid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972}
973
Pavel Emelyanov16d14ef2007-10-23 20:30:25 -0700974static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
975 struct sk_buff_head *list, __u16 needed)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976{
977 __u16 freed = 0;
Lee A. Roberts95ac7b82013-02-28 04:37:29 +0000978 __u32 tsn, last_tsn;
979 struct sk_buff *skb, *flist, *last;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 struct sctp_ulpevent *event;
981 struct sctp_tsnmap *tsnmap;
982
983 tsnmap = &ulpq->asoc->peer.tsn_map;
984
Lee A. Robertse67f85e2013-02-28 04:37:28 +0000985 while ((skb = skb_peek_tail(list)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 event = sctp_skb2event(skb);
987 tsn = event->tsn;
988
Lee A. Robertse67f85e2013-02-28 04:37:28 +0000989 /* Don't renege below the Cumulative TSN ACK Point. */
990 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
991 break;
992
Lee A. Roberts95ac7b82013-02-28 04:37:29 +0000993 /* Events in ordering queue may have multiple fragments
994 * corresponding to additional TSNs. Sum the total
995 * freed space; find the last TSN.
996 */
Lee A. Robertse67f85e2013-02-28 04:37:28 +0000997 freed += skb_headlen(skb);
Lee A. Roberts95ac7b82013-02-28 04:37:29 +0000998 flist = skb_shinfo(skb)->frag_list;
999 for (last = flist; flist; flist = flist->next) {
1000 last = flist;
1001 freed += skb_headlen(last);
1002 }
1003 if (last)
1004 last_tsn = sctp_skb2event(last)->tsn;
1005 else
1006 last_tsn = tsn;
1007
1008 /* Unlink the event, then renege all applicable TSNs. */
1009 __skb_unlink(skb, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 sctp_ulpevent_free(event);
Lee A. Roberts95ac7b82013-02-28 04:37:29 +00001011 while (TSN_lte(tsn, last_tsn)) {
1012 sctp_tsnmap_renege(tsnmap, tsn);
1013 tsn++;
1014 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 if (freed >= needed)
1016 return freed;
1017 }
1018
1019 return freed;
1020}
1021
Pavel Emelyanov16d14ef2007-10-23 20:30:25 -07001022/* Renege 'needed' bytes from the ordering queue. */
1023static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1024{
1025 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1026}
1027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028/* Renege 'needed' bytes from the reassembly queue. */
1029static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1030{
Pavel Emelyanov16d14ef2007-10-23 20:30:25 -07001031 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032}
1033
1034/* Partial deliver the first message as there is pressure on rwnd. */
1035void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
Al Virodd0fc662005-10-07 07:46:04 +01001036 gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037{
1038 struct sctp_ulpevent *event;
1039 struct sctp_association *asoc;
Vlad Yasevichb6e13312007-04-20 12:23:15 -07001040 struct sctp_sock *sp;
Lee A. Robertsd003b412013-02-28 04:37:30 +00001041 __u32 ctsn;
1042 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
1044 asoc = ulpq->asoc;
Vlad Yasevichb6e13312007-04-20 12:23:15 -07001045 sp = sctp_sk(asoc->base.sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
Vlad Yasevichb6e13312007-04-20 12:23:15 -07001047 /* If the association is already in Partial Delivery mode
Lee A. Robertsd003b412013-02-28 04:37:30 +00001048 * we have nothing to do.
Vlad Yasevichb6e13312007-04-20 12:23:15 -07001049 */
1050 if (ulpq->pd_mode)
1051 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
Lee A. Robertsd003b412013-02-28 04:37:30 +00001053 /* Data must be at or below the Cumulative TSN ACK Point to
1054 * start partial delivery.
1055 */
1056 skb = skb_peek(&asoc->ulpq.reasm);
1057 if (skb != NULL) {
1058 ctsn = sctp_skb2event(skb)->tsn;
1059 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1060 return;
1061 }
1062
Vlad Yasevichb6e13312007-04-20 12:23:15 -07001063 /* If the user enabled fragment interleave socket option,
1064 * multiple associations can enter partial delivery.
1065 * Otherwise, we can only enter partial delivery if the
1066 * socket is not in partial deliver mode.
1067 */
1068 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 /* Is partial delivery possible? */
1070 event = sctp_ulpq_retrieve_first(ulpq);
1071 /* Send event to the ULP. */
1072 if (event) {
1073 sctp_ulpq_tail_event(ulpq, event);
Vlad Yasevichd49d91d2007-03-23 11:32:00 -07001074 sctp_ulpq_set_pd(ulpq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 return;
1076 }
1077 }
1078}
1079
1080/* Renege some packets to make room for an incoming chunk. */
1081void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
Al Virodd0fc662005-10-07 07:46:04 +01001082 gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
1084 struct sctp_association *asoc;
1085 __u16 needed, freed;
1086
1087 asoc = ulpq->asoc;
1088
1089 if (chunk) {
1090 needed = ntohs(chunk->chunk_hdr->length);
1091 needed -= sizeof(sctp_data_chunk_t);
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +09001092 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 needed = SCTP_DEFAULT_MAXWINDOW;
1094
1095 freed = 0;
1096
1097 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1098 freed = sctp_ulpq_renege_order(ulpq, needed);
1099 if (freed < needed) {
1100 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1101 }
1102 }
1103 /* If able to free enough room, accept this chunk. */
1104 if (chunk && (freed >= needed)) {
Lee A. Robertsd003b412013-02-28 04:37:30 +00001105 int retval;
1106 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1107 /*
1108 * Enter partial delivery if chunk has not been
1109 * delivered; otherwise, drain the reassembly queue.
1110 */
1111 if (retval <= 0)
1112 sctp_ulpq_partial_delivery(ulpq, gfp);
1113 else if (retval == 1)
1114 sctp_ulpq_reasm_drain(ulpq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 }
1116
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001117 sk_mem_reclaim(asoc->base.sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118}
1119
1120
1121
1122/* Notify the application if an association is aborted and in
1123 * partial delivery mode. Send up any pending received messages.
1124 */
Al Virodd0fc662005-10-07 07:46:04 +01001125void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126{
1127 struct sctp_ulpevent *ev = NULL;
1128 struct sock *sk;
1129
1130 if (!ulpq->pd_mode)
1131 return;
1132
1133 sk = ulpq->asoc->base.sk;
1134 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1135 &sctp_sk(sk)->subscribe))
1136 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1137 SCTP_PARTIAL_DELIVERY_ABORTED,
1138 gfp);
1139 if (ev)
1140 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1141
1142 /* If there is data waiting, send it up the socket now. */
1143 if (sctp_ulpq_clear_pd(ulpq) || ev)
Marcelo Ricardo Leitnerfb586f22016-04-08 16:41:28 -03001144 sctp_sk(sk)->pending_data_ready = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145}