blob: 0fa4d4d4df17f566a198a0685f1bcdf8015b4a82 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* SCTP kernel reference Implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This abstraction carries sctp events to the ULP (sockets).
10 *
11 * The SCTP reference implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * The SCTP reference implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, write to
25 * the Free Software Foundation, 59 Temple Place - Suite 330,
26 * Boston, MA 02111-1307, USA.
27 *
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <lksctp-developers@lists.sourceforge.net>
31 *
32 * Or submit a bug report through the following website:
33 * http://www.sf.net/projects/lksctp
34 *
35 * Written or modified by:
36 * Jon Grimm <jgrimm@us.ibm.com>
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Sridhar Samudrala <sri@us.ibm.com>
39 *
40 * Any bugs reported given to us we will try to fix... any fixes shared will
41 * be incorporated into the next SCTP release.
42 */
43
44#include <linux/types.h>
45#include <linux/skbuff.h>
46#include <net/sock.h>
47#include <net/sctp/structs.h>
48#include <net/sctp/sctp.h>
49#include <net/sctp/sm.h>
50
51/* Forward declarations for internal helpers. */
52static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
David S. Miller8728b832005-08-09 19:25:21 -070053 struct sctp_ulpevent *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
David S. Miller8728b832005-08-09 19:25:21 -070055 struct sctp_ulpevent *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57/* 1st Level Abstractions */
58
59/* Initialize a ULP queue from a block of memory. */
60struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
61 struct sctp_association *asoc)
62{
63 memset(ulpq, 0, sizeof(struct sctp_ulpq));
64
65 ulpq->asoc = asoc;
66 skb_queue_head_init(&ulpq->reasm);
67 skb_queue_head_init(&ulpq->lobby);
68 ulpq->pd_mode = 0;
69 ulpq->malloced = 0;
70
71 return ulpq;
72}
73
74
75/* Flush the reassembly and ordering queues. */
Vlad Yasevich0b58a812007-03-19 17:01:17 -070076void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
78 struct sk_buff *skb;
79 struct sctp_ulpevent *event;
80
81 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
82 event = sctp_skb2event(skb);
83 sctp_ulpevent_free(event);
84 }
85
86 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
87 event = sctp_skb2event(skb);
88 sctp_ulpevent_free(event);
89 }
90
91}
92
93/* Dispose of a ulpqueue. */
94void sctp_ulpq_free(struct sctp_ulpq *ulpq)
95{
96 sctp_ulpq_flush(ulpq);
97 if (ulpq->malloced)
98 kfree(ulpq);
99}
100
101/* Process an incoming DATA chunk. */
102int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
Al Virodd0fc662005-10-07 07:46:04 +0100103 gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
105 struct sk_buff_head temp;
106 sctp_data_chunk_t *hdr;
107 struct sctp_ulpevent *event;
108
109 hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
110
111 /* Create an event from the incoming chunk. */
112 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
113 if (!event)
114 return -ENOMEM;
115
116 /* Do reassembly if needed. */
117 event = sctp_ulpq_reasm(ulpq, event);
118
119 /* Do ordering if needed. */
120 if ((event) && (event->msg_flags & MSG_EOR)){
121 /* Create a temporary list to collect chunks on. */
122 skb_queue_head_init(&temp);
123 __skb_queue_tail(&temp, sctp_event2skb(event));
124
125 event = sctp_ulpq_order(ulpq, event);
126 }
127
David S. Miller8728b832005-08-09 19:25:21 -0700128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
129 * very first SKB on the 'temp' list.
130 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 if (event)
132 sctp_ulpq_tail_event(ulpq, event);
133
134 return 0;
135}
136
137/* Add a new event for propagation to the ULP. */
138/* Clear the partial delivery mode for this socket. Note: This
139 * assumes that no association is currently in partial delivery mode.
140 */
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700141int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
143 struct sctp_sock *sp = sctp_sk(sk);
144
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700145 if (atomic_dec_and_test(&sp->pd_mode)) {
146 /* This means there are no other associations in PD, so
147 * we can go ahead and clear out the lobby in one shot
148 */
149 if (!skb_queue_empty(&sp->pd_lobby)) {
150 struct list_head *list;
151 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
152 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
153 INIT_LIST_HEAD(list);
154 return 1;
155 }
156 } else {
157 /* There are other associations in PD, so we only need to
158 * pull stuff out of the lobby that belongs to the
159 * associations that is exiting PD (all of its notifications
160 * are posted here).
161 */
162 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
163 struct sk_buff *skb, *tmp;
164 struct sctp_ulpevent *event;
165
166 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
167 event = sctp_skb2event(skb);
168 if (event->asoc == asoc) {
169 __skb_unlink(skb, &sp->pd_lobby);
170 __skb_queue_tail(&sk->sk_receive_queue,
171 skb);
172 }
173 }
174 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 }
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 return 0;
178}
179
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700180/* Set the pd_mode on the socket and ulpq */
181static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
182{
183 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
184
185 atomic_inc(&sp->pd_mode);
186 ulpq->pd_mode = 1;
187}
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189/* Clear the pd_mode and restart any pending messages waiting for delivery. */
190static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
191{
192 ulpq->pd_mode = 0;
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700193 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194}
195
David S. Miller8728b832005-08-09 19:25:21 -0700196/* If the SKB of 'event' is on a list, it is the first such member
197 * of that list.
198 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
200{
201 struct sock *sk = ulpq->asoc->base.sk;
David S. Miller8728b832005-08-09 19:25:21 -0700202 struct sk_buff_head *queue, *skb_list;
203 struct sk_buff *skb = sctp_event2skb(event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 int clear_pd = 0;
205
David S. Miller8728b832005-08-09 19:25:21 -0700206 skb_list = (struct sk_buff_head *) skb->prev;
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 /* If the socket is just going to throw this away, do not
209 * even try to deliver it.
210 */
211 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
212 goto out_free;
213
214 /* Check if the user wishes to receive this event. */
215 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
216 goto out_free;
217
218 /* If we are in partial delivery mode, post to the lobby until
219 * partial delivery is cleared, unless, of course _this_ is
220 * the association the cause of the partial delivery.
221 */
222
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700223 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 queue = &sk->sk_receive_queue;
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700225 } else {
226 if (ulpq->pd_mode) {
227 /* If the association is in partial delivery, we
228 * need to finish delivering the partially processed
229 * packet before passing any other data. This is
230 * because we don't truly support stream interleaving.
231 */
232 if ((event->msg_flags & MSG_NOTIFICATION) ||
233 (SCTP_DATA_NOT_FRAG ==
234 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
235 queue = &sctp_sk(sk)->pd_lobby;
236 else {
237 clear_pd = event->msg_flags & MSG_EOR;
238 queue = &sk->sk_receive_queue;
239 }
240 } else {
241 /*
242 * If fragment interleave is enabled, we
243 * can queue this to the recieve queue instead
244 * of the lobby.
245 */
246 if (sctp_sk(sk)->frag_interleave)
247 queue = &sk->sk_receive_queue;
248 else
249 queue = &sctp_sk(sk)->pd_lobby;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 }
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253 /* If we are harvesting multiple skbs they will be
254 * collected on a list.
255 */
David S. Miller8728b832005-08-09 19:25:21 -0700256 if (skb_list)
257 sctp_skb_list_tail(skb_list, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 else
David S. Miller8728b832005-08-09 19:25:21 -0700259 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
261 /* Did we just complete partial delivery and need to get
262 * rolling again? Move pending data to the receive
263 * queue.
264 */
265 if (clear_pd)
266 sctp_ulpq_clear_pd(ulpq);
267
268 if (queue == &sk->sk_receive_queue)
269 sk->sk_data_ready(sk, 0);
270 return 1;
271
272out_free:
David S. Miller8728b832005-08-09 19:25:21 -0700273 if (skb_list)
274 sctp_queue_purge_ulpevents(skb_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 else
276 sctp_ulpevent_free(event);
David S. Miller8728b832005-08-09 19:25:21 -0700277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 return 0;
279}
280
281/* 2nd Level Abstractions */
282
283/* Helper function to store chunks that need to be reassembled. */
284static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
285 struct sctp_ulpevent *event)
286{
287 struct sk_buff *pos;
288 struct sctp_ulpevent *cevent;
289 __u32 tsn, ctsn;
290
291 tsn = event->tsn;
292
293 /* See if it belongs at the end. */
294 pos = skb_peek_tail(&ulpq->reasm);
295 if (!pos) {
296 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
297 return;
298 }
299
300 /* Short circuit just dropping it at the end. */
301 cevent = sctp_skb2event(pos);
302 ctsn = cevent->tsn;
303 if (TSN_lt(ctsn, tsn)) {
304 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
305 return;
306 }
307
308 /* Find the right place in this list. We store them by TSN. */
309 skb_queue_walk(&ulpq->reasm, pos) {
310 cevent = sctp_skb2event(pos);
311 ctsn = cevent->tsn;
312
313 if (TSN_lt(tsn, ctsn))
314 break;
315 }
316
317 /* Insert before pos. */
318 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
319
320}
321
322/* Helper function to return an event corresponding to the reassembled
323 * datagram.
324 * This routine creates a re-assembled skb given the first and last skb's
325 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
326 * payload was fragmented on the way and ip had to reassemble them.
327 * We add the rest of skb's to the first skb's fraglist.
328 */
David S. Miller8728b832005-08-09 19:25:21 -0700329static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330{
331 struct sk_buff *pos;
Vladislav Yasevich672e7cc2006-05-05 17:03:49 -0700332 struct sk_buff *new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 struct sctp_ulpevent *event;
334 struct sk_buff *pnext, *last;
335 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
336
337 /* Store the pointer to the 2nd skb */
338 if (f_frag == l_frag)
339 pos = NULL;
340 else
341 pos = f_frag->next;
342
343 /* Get the last skb in the f_frag's frag_list if present. */
344 for (last = list; list; last = list, list = list->next);
345
346 /* Add the list of remaining fragments to the first fragments
347 * frag_list.
348 */
349 if (last)
350 last->next = pos;
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900351 else {
352 if (skb_cloned(f_frag)) {
353 /* This is a cloned skb, we can't just modify
354 * the frag_list. We need a new skb to do that.
355 * Instead of calling skb_unshare(), we'll do it
356 * ourselves since we need to delay the free.
357 */
358 new = skb_copy(f_frag, GFP_ATOMIC);
359 if (!new)
360 return NULL; /* try again later */
Vladislav Yasevich672e7cc2006-05-05 17:03:49 -0700361
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900362 sctp_skb_set_owner_r(new, f_frag->sk);
Vladislav Yasevich672e7cc2006-05-05 17:03:49 -0700363
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900364 skb_shinfo(new)->frag_list = pos;
365 } else
366 skb_shinfo(f_frag)->frag_list = pos;
367 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
369 /* Remove the first fragment from the reassembly queue. */
David S. Miller8728b832005-08-09 19:25:21 -0700370 __skb_unlink(f_frag, queue);
Vladislav Yasevich672e7cc2006-05-05 17:03:49 -0700371
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900372 /* if we did unshare, then free the old skb and re-assign */
373 if (new) {
374 kfree_skb(f_frag);
375 f_frag = new;
376 }
Vladislav Yasevich672e7cc2006-05-05 17:03:49 -0700377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 while (pos) {
379
380 pnext = pos->next;
381
382 /* Update the len and data_len fields of the first fragment. */
383 f_frag->len += pos->len;
384 f_frag->data_len += pos->len;
385
386 /* Remove the fragment from the reassembly queue. */
David S. Miller8728b832005-08-09 19:25:21 -0700387 __skb_unlink(pos, queue);
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900388
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 /* Break if we have reached the last fragment. */
390 if (pos == l_frag)
391 break;
392 pos->next = pnext;
393 pos = pnext;
394 };
395
396 event = sctp_skb2event(f_frag);
397 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
398
399 return event;
400}
401
402
403/* Helper function to check if an incoming chunk has filled up the last
404 * missing fragment in a SCTP datagram and return the corresponding event.
405 */
406static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
407{
408 struct sk_buff *pos;
409 struct sctp_ulpevent *cevent;
410 struct sk_buff *first_frag = NULL;
411 __u32 ctsn, next_tsn;
412 struct sctp_ulpevent *retval = NULL;
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700413 struct sk_buff *pd_first = NULL;
414 struct sk_buff *pd_last = NULL;
415 size_t pd_len = 0;
416 struct sctp_association *asoc;
417 u32 pd_point;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 /* Initialized to 0 just to avoid compiler warning message. Will
420 * never be used with this value. It is referenced only after it
421 * is set when we find the first fragment of a message.
422 */
423 next_tsn = 0;
424
425 /* The chunks are held in the reasm queue sorted by TSN.
426 * Walk through the queue sequentially and look for a sequence of
427 * fragmented chunks that complete a datagram.
428 * 'first_frag' and next_tsn are reset when we find a chunk which
429 * is the first fragment of a datagram. Once these 2 fields are set
430 * we expect to find the remaining middle fragments and the last
431 * fragment in order. If not, first_frag is reset to NULL and we
432 * start the next pass when we find another first fragment.
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700433 *
434 * There is a potential to do partial delivery if user sets
435 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
436 * to see if can do PD.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 */
438 skb_queue_walk(&ulpq->reasm, pos) {
439 cevent = sctp_skb2event(pos);
440 ctsn = cevent->tsn;
441
442 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
443 case SCTP_DATA_FIRST_FRAG:
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700444 /* If this "FIRST_FRAG" is the first
445 * element in the queue, then count it towards
446 * possible PD.
447 */
448 if (pos == ulpq->reasm.next) {
449 pd_first = pos;
450 pd_last = pos;
451 pd_len = pos->len;
452 } else {
453 pd_first = NULL;
454 pd_last = NULL;
455 pd_len = 0;
456 }
457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 first_frag = pos;
459 next_tsn = ctsn + 1;
460 break;
461
462 case SCTP_DATA_MIDDLE_FRAG:
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700463 if ((first_frag) && (ctsn == next_tsn)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 next_tsn++;
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700465 if (pd_first) {
466 pd_last = pos;
467 pd_len += pos->len;
468 }
469 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 first_frag = NULL;
471 break;
472
473 case SCTP_DATA_LAST_FRAG:
474 if (first_frag && (ctsn == next_tsn))
475 goto found;
476 else
477 first_frag = NULL;
478 break;
479 };
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700482 asoc = ulpq->asoc;
483 if (pd_first) {
484 /* Make sure we can enter partial deliver.
485 * We can trigger partial delivery only if framgent
486 * interleave is set, or the socket is not already
487 * in partial delivery.
488 */
489 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
490 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
491 goto done;
492
493 cevent = sctp_skb2event(pd_first);
494 pd_point = sctp_sk(asoc->base.sk)->pd_point;
495 if (pd_point && pd_point <= pd_len) {
496 retval = sctp_make_reassembled_event(&ulpq->reasm,
497 pd_first,
498 pd_last);
499 if (retval)
500 sctp_ulpq_set_pd(ulpq);
501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 }
503done:
504 return retval;
505found:
David S. Miller8728b832005-08-09 19:25:21 -0700506 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 if (retval)
508 retval->msg_flags |= MSG_EOR;
509 goto done;
510}
511
512/* Retrieve the next set of fragments of a partial message. */
513static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
514{
515 struct sk_buff *pos, *last_frag, *first_frag;
516 struct sctp_ulpevent *cevent;
517 __u32 ctsn, next_tsn;
518 int is_last;
519 struct sctp_ulpevent *retval;
520
521 /* The chunks are held in the reasm queue sorted by TSN.
522 * Walk through the queue sequentially and look for the first
523 * sequence of fragmented chunks.
524 */
525
526 if (skb_queue_empty(&ulpq->reasm))
527 return NULL;
528
529 last_frag = first_frag = NULL;
530 retval = NULL;
531 next_tsn = 0;
532 is_last = 0;
533
534 skb_queue_walk(&ulpq->reasm, pos) {
535 cevent = sctp_skb2event(pos);
536 ctsn = cevent->tsn;
537
538 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
539 case SCTP_DATA_MIDDLE_FRAG:
540 if (!first_frag) {
541 first_frag = pos;
542 next_tsn = ctsn + 1;
543 last_frag = pos;
544 } else if (next_tsn == ctsn)
545 next_tsn++;
546 else
547 goto done;
548 break;
549 case SCTP_DATA_LAST_FRAG:
550 if (!first_frag)
551 first_frag = pos;
552 else if (ctsn != next_tsn)
553 goto done;
554 last_frag = pos;
555 is_last = 1;
556 goto done;
557 default:
558 return NULL;
559 };
560 }
561
562 /* We have the reassembled event. There is no need to look
563 * further.
564 */
565done:
David S. Miller8728b832005-08-09 19:25:21 -0700566 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 if (retval && is_last)
568 retval->msg_flags |= MSG_EOR;
569
570 return retval;
571}
572
573
574/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
575 * need reassembling.
576 */
577static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
578 struct sctp_ulpevent *event)
579{
580 struct sctp_ulpevent *retval = NULL;
581
582 /* Check if this is part of a fragmented message. */
583 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
584 event->msg_flags |= MSG_EOR;
585 return event;
586 }
587
588 sctp_ulpq_store_reasm(ulpq, event);
589 if (!ulpq->pd_mode)
590 retval = sctp_ulpq_retrieve_reassembled(ulpq);
591 else {
592 __u32 ctsn, ctsnap;
593
594 /* Do not even bother unless this is the next tsn to
595 * be delivered.
596 */
597 ctsn = event->tsn;
598 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
599 if (TSN_lte(ctsn, ctsnap))
600 retval = sctp_ulpq_retrieve_partial(ulpq);
601 }
602
603 return retval;
604}
605
606/* Retrieve the first part (sequential fragments) for partial delivery. */
607static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
608{
609 struct sk_buff *pos, *last_frag, *first_frag;
610 struct sctp_ulpevent *cevent;
611 __u32 ctsn, next_tsn;
612 struct sctp_ulpevent *retval;
613
614 /* The chunks are held in the reasm queue sorted by TSN.
615 * Walk through the queue sequentially and look for a sequence of
616 * fragmented chunks that start a datagram.
617 */
618
619 if (skb_queue_empty(&ulpq->reasm))
620 return NULL;
621
622 last_frag = first_frag = NULL;
623 retval = NULL;
624 next_tsn = 0;
625
626 skb_queue_walk(&ulpq->reasm, pos) {
627 cevent = sctp_skb2event(pos);
628 ctsn = cevent->tsn;
629
630 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
631 case SCTP_DATA_FIRST_FRAG:
632 if (!first_frag) {
633 first_frag = pos;
634 next_tsn = ctsn + 1;
635 last_frag = pos;
636 } else
637 goto done;
638 break;
639
640 case SCTP_DATA_MIDDLE_FRAG:
641 if (!first_frag)
642 return NULL;
643 if (ctsn == next_tsn) {
644 next_tsn++;
645 last_frag = pos;
646 } else
647 goto done;
648 break;
649 default:
650 return NULL;
651 };
652 }
653
654 /* We have the reassembled event. There is no need to look
655 * further.
656 */
657done:
David S. Miller8728b832005-08-09 19:25:21 -0700658 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 return retval;
660}
661
662/* Helper function to gather skbs that have possibly become
663 * ordered by an an incoming chunk.
664 */
665static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
666 struct sctp_ulpevent *event)
667{
David S. Miller8728b832005-08-09 19:25:21 -0700668 struct sk_buff_head *event_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 struct sk_buff *pos, *tmp;
670 struct sctp_ulpevent *cevent;
671 struct sctp_stream *in;
672 __u16 sid, csid;
673 __u16 ssn, cssn;
674
675 sid = event->stream;
676 ssn = event->ssn;
677 in = &ulpq->asoc->ssnmap->in;
678
David S. Miller8728b832005-08-09 19:25:21 -0700679 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
680
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 /* We are holding the chunks by stream, by SSN. */
682 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
683 cevent = (struct sctp_ulpevent *) pos->cb;
684 csid = cevent->stream;
685 cssn = cevent->ssn;
686
687 /* Have we gone too far? */
688 if (csid > sid)
689 break;
690
691 /* Have we not gone far enough? */
692 if (csid < sid)
693 continue;
694
695 if (cssn != sctp_ssn_peek(in, sid))
696 break;
697
698 /* Found it, so mark in the ssnmap. */
699 sctp_ssn_next(in, sid);
700
David S. Miller8728b832005-08-09 19:25:21 -0700701 __skb_unlink(pos, &ulpq->lobby);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
703 /* Attach all gathered skbs to the event. */
David S. Miller8728b832005-08-09 19:25:21 -0700704 __skb_queue_tail(event_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 }
706}
707
708/* Helper function to store chunks needing ordering. */
709static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
710 struct sctp_ulpevent *event)
711{
712 struct sk_buff *pos;
713 struct sctp_ulpevent *cevent;
714 __u16 sid, csid;
715 __u16 ssn, cssn;
716
717 pos = skb_peek_tail(&ulpq->lobby);
718 if (!pos) {
719 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
720 return;
721 }
722
723 sid = event->stream;
724 ssn = event->ssn;
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900725
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 cevent = (struct sctp_ulpevent *) pos->cb;
727 csid = cevent->stream;
728 cssn = cevent->ssn;
729 if (sid > csid) {
730 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
731 return;
732 }
733
734 if ((sid == csid) && SSN_lt(cssn, ssn)) {
735 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
736 return;
737 }
738
739 /* Find the right place in this list. We store them by
740 * stream ID and then by SSN.
741 */
742 skb_queue_walk(&ulpq->lobby, pos) {
743 cevent = (struct sctp_ulpevent *) pos->cb;
744 csid = cevent->stream;
745 cssn = cevent->ssn;
746
747 if (csid > sid)
748 break;
749 if (csid == sid && SSN_lt(ssn, cssn))
750 break;
751 }
752
753
754 /* Insert before pos. */
755 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
756
757}
758
759static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
David S. Miller8728b832005-08-09 19:25:21 -0700760 struct sctp_ulpevent *event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761{
762 __u16 sid, ssn;
763 struct sctp_stream *in;
764
765 /* Check if this message needs ordering. */
766 if (SCTP_DATA_UNORDERED & event->msg_flags)
767 return event;
768
769 /* Note: The stream ID must be verified before this routine. */
770 sid = event->stream;
771 ssn = event->ssn;
772 in = &ulpq->asoc->ssnmap->in;
773
774 /* Is this the expected SSN for this stream ID? */
775 if (ssn != sctp_ssn_peek(in, sid)) {
776 /* We've received something out of order, so find where it
777 * needs to be placed. We order by stream and then by SSN.
778 */
779 sctp_ulpq_store_ordered(ulpq, event);
780 return NULL;
781 }
782
783 /* Mark that the next chunk has been found. */
784 sctp_ssn_next(in, sid);
785
786 /* Go find any other chunks that were waiting for
787 * ordering.
788 */
789 sctp_ulpq_retrieve_ordered(ulpq, event);
790
791 return event;
792}
793
794/* Helper function to gather skbs that have possibly become
795 * ordered by forward tsn skipping their dependencies.
796 */
797static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
798{
799 struct sk_buff *pos, *tmp;
800 struct sctp_ulpevent *cevent;
David S. Miller8728b832005-08-09 19:25:21 -0700801 struct sctp_ulpevent *event;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 struct sctp_stream *in;
803 struct sk_buff_head temp;
804 __u16 csid, cssn;
805
806 in = &ulpq->asoc->ssnmap->in;
807
808 /* We are holding the chunks by stream, by SSN. */
David S. Miller8728b832005-08-09 19:25:21 -0700809 skb_queue_head_init(&temp);
810 event = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
812 cevent = (struct sctp_ulpevent *) pos->cb;
813 csid = cevent->stream;
814 cssn = cevent->ssn;
815
816 if (cssn != sctp_ssn_peek(in, csid))
817 break;
818
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900819 /* Found it, so mark in the ssnmap. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 sctp_ssn_next(in, csid);
821
David S. Miller8728b832005-08-09 19:25:21 -0700822 __skb_unlink(pos, &ulpq->lobby);
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900823 if (!event) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 /* Create a temporary list to collect chunks on. */
825 event = sctp_skb2event(pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 __skb_queue_tail(&temp, sctp_event2skb(event));
827 } else {
828 /* Attach all gathered skbs to the event. */
David S. Miller8728b832005-08-09 19:25:21 -0700829 __skb_queue_tail(&temp, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
831 }
832
David S. Miller8728b832005-08-09 19:25:21 -0700833 /* Send event to the ULP. 'event' is the sctp_ulpevent for
834 * very first SKB on the 'temp' list.
835 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 if (event)
837 sctp_ulpq_tail_event(ulpq, event);
838}
839
840/* Skip over an SSN. */
841void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
842{
843 struct sctp_stream *in;
844
845 /* Note: The stream ID must be verified before this routine. */
846 in = &ulpq->asoc->ssnmap->in;
847
848 /* Is this an old SSN? If so ignore. */
849 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
850 return;
851
852 /* Mark that we are no longer expecting this SSN or lower. */
853 sctp_ssn_skip(in, sid, ssn);
854
855 /* Go find any other chunks that were waiting for
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900856 * ordering and deliver them if needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 */
858 sctp_ulpq_reap_ordered(ulpq);
859 return;
860}
861
862/* Renege 'needed' bytes from the ordering queue. */
863static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
864{
865 __u16 freed = 0;
866 __u32 tsn;
867 struct sk_buff *skb;
868 struct sctp_ulpevent *event;
869 struct sctp_tsnmap *tsnmap;
870
871 tsnmap = &ulpq->asoc->peer.tsn_map;
872
873 while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) {
874 freed += skb_headlen(skb);
875 event = sctp_skb2event(skb);
876 tsn = event->tsn;
877
878 sctp_ulpevent_free(event);
879 sctp_tsnmap_renege(tsnmap, tsn);
880 if (freed >= needed)
881 return freed;
882 }
883
884 return freed;
885}
886
887/* Renege 'needed' bytes from the reassembly queue. */
888static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
889{
890 __u16 freed = 0;
891 __u32 tsn;
892 struct sk_buff *skb;
893 struct sctp_ulpevent *event;
894 struct sctp_tsnmap *tsnmap;
895
896 tsnmap = &ulpq->asoc->peer.tsn_map;
897
898 /* Walk backwards through the list, reneges the newest tsns. */
899 while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
900 freed += skb_headlen(skb);
901 event = sctp_skb2event(skb);
902 tsn = event->tsn;
903
904 sctp_ulpevent_free(event);
905 sctp_tsnmap_renege(tsnmap, tsn);
906 if (freed >= needed)
907 return freed;
908 }
909
910 return freed;
911}
912
913/* Partial deliver the first message as there is pressure on rwnd. */
914void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
Alexey Dobriyan3182cd82005-07-11 20:57:47 -0700915 struct sctp_chunk *chunk,
Al Virodd0fc662005-10-07 07:46:04 +0100916 gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917{
918 struct sctp_ulpevent *event;
919 struct sctp_association *asoc;
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700920 struct sctp_sock *sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
922 asoc = ulpq->asoc;
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700923 sp = sctp_sk(asoc->base.sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700925 /* If the association is already in Partial Delivery mode
926 * we have noting to do.
927 */
928 if (ulpq->pd_mode)
929 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930
Vlad Yasevichb6e13312007-04-20 12:23:15 -0700931 /* If the user enabled fragment interleave socket option,
932 * multiple associations can enter partial delivery.
933 * Otherwise, we can only enter partial delivery if the
934 * socket is not in partial deliver mode.
935 */
936 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 /* Is partial delivery possible? */
938 event = sctp_ulpq_retrieve_first(ulpq);
939 /* Send event to the ULP. */
940 if (event) {
941 sctp_ulpq_tail_event(ulpq, event);
Vlad Yasevichd49d91d2007-03-23 11:32:00 -0700942 sctp_ulpq_set_pd(ulpq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 return;
944 }
945 }
946}
947
948/* Renege some packets to make room for an incoming chunk. */
949void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
Al Virodd0fc662005-10-07 07:46:04 +0100950 gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951{
952 struct sctp_association *asoc;
953 __u16 needed, freed;
954
955 asoc = ulpq->asoc;
956
957 if (chunk) {
958 needed = ntohs(chunk->chunk_hdr->length);
959 needed -= sizeof(sctp_data_chunk_t);
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900960 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 needed = SCTP_DEFAULT_MAXWINDOW;
962
963 freed = 0;
964
965 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
966 freed = sctp_ulpq_renege_order(ulpq, needed);
967 if (freed < needed) {
968 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
969 }
970 }
971 /* If able to free enough room, accept this chunk. */
972 if (chunk && (freed >= needed)) {
973 __u32 tsn;
974 tsn = ntohl(chunk->subh.data_hdr->tsn);
975 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
976 sctp_ulpq_tail_data(ulpq, chunk, gfp);
YOSHIFUJI Hideakid808ad92007-02-09 23:25:18 +0900977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
979 }
980
981 return;
982}
983
984
985
986/* Notify the application if an association is aborted and in
987 * partial delivery mode. Send up any pending received messages.
988 */
Al Virodd0fc662005-10-07 07:46:04 +0100989void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990{
991 struct sctp_ulpevent *ev = NULL;
992 struct sock *sk;
993
994 if (!ulpq->pd_mode)
995 return;
996
997 sk = ulpq->asoc->base.sk;
998 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
999 &sctp_sk(sk)->subscribe))
1000 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1001 SCTP_PARTIAL_DELIVERY_ABORTED,
1002 gfp);
1003 if (ev)
1004 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1005
1006 /* If there is data waiting, send it up the socket now. */
1007 if (sctp_ulpq_clear_pd(ulpq) || ev)
1008 sk->sk_data_ready(sk, 0);
1009}