1 /* SCTP kernel reference Implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This abstraction carries sctp events to the ULP (sockets).
10 *
11 * The SCTP reference implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * The SCTP reference implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, write to
25 * the Free Software Foundation, 59 Temple Place - Suite 330,
26 * Boston, MA 02111-1307, USA.
27 *
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <lksctp-developers@lists.sourceforge.net>
31 *
32 * Or submit a bug report through the following website:
33 * http://www.sf.net/projects/lksctp
34 *
35 * Written or modified by:
36 * Jon Grimm <jgrimm@us.ibm.com>
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Sridhar Samudrala <sri@us.ibm.com>
39 *
40 * Any bugs reported given to us we will try to fix... any fixes shared will
41 * be incorporated into the next SCTP release.
42 */
43
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
46 #include <net/sock.h>
47 #include <net/sctp/structs.h>
48 #include <net/sctp/sctp.h>
49 #include <net/sctp/sm.h>
50
51 /* Forward declarations for internal helpers. */
52 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 struct sctp_ulpevent *);
54 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 struct sctp_ulpevent *);
56
57 /* 1st Level Abstractions */
58
59 /* Initialize a ULP queue from a block of memory. */
sctp_ulpq_init(struct sctp_ulpq * ulpq,struct sctp_association * asoc)60 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
61 struct sctp_association *asoc)
62 {
63 memset(ulpq, 0, sizeof(struct sctp_ulpq));
64
65 ulpq->asoc = asoc;
66 skb_queue_head_init(&ulpq->reasm);
67 skb_queue_head_init(&ulpq->lobby);
68 ulpq->pd_mode = 0;
69 ulpq->malloced = 0;
70
71 return ulpq;
72 }
73
74
75 /* Flush the reassembly and ordering queues. */
sctp_ulpq_flush(struct sctp_ulpq * ulpq)76 static void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
77 {
78 struct sk_buff *skb;
79 struct sctp_ulpevent *event;
80
81 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
82 event = sctp_skb2event(skb);
83 sctp_ulpevent_free(event);
84 }
85
86 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
87 event = sctp_skb2event(skb);
88 sctp_ulpevent_free(event);
89 }
90
91 }
92
93 /* Dispose of a ulpqueue. */
sctp_ulpq_free(struct sctp_ulpq * ulpq)94 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
95 {
96 sctp_ulpq_flush(ulpq);
97 if (ulpq->malloced)
98 kfree(ulpq);
99 }
100
101 /* Process an incoming DATA chunk. */
sctp_ulpq_tail_data(struct sctp_ulpq * ulpq,struct sctp_chunk * chunk,int gfp)102 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
103 int gfp)
104 {
105 struct sk_buff_head temp;
106 sctp_data_chunk_t *hdr;
107 struct sctp_ulpevent *event;
108
109 hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
110
111 /* Create an event from the incoming chunk. */
112 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
113 if (!event)
114 return -ENOMEM;
115
116 /* Do reassembly if needed. */
117 event = sctp_ulpq_reasm(ulpq, event);
118
119 /* Do ordering if needed. */
120 if ((event) && (event->msg_flags & MSG_EOR)){
121 /* Create a temporary list to collect chunks on. */
122 skb_queue_head_init(&temp);
123 __skb_queue_tail(&temp, sctp_event2skb(event));
124
125 event = sctp_ulpq_order(ulpq, event);
126 }
127
128 /* Send event to the ULP. */
129 if (event)
130 sctp_ulpq_tail_event(ulpq, event);
131
132 return 0;
133 }
134
135 /* Add a new event for propagation to the ULP. */
136 /* Clear the partial delivery mode for this socket. Note: This
137 * assumes that no association is currently in partial delivery mode.
138 */
sctp_clear_pd(struct sock * sk)139 int sctp_clear_pd(struct sock *sk)
140 {
141 struct sctp_opt *sp;
142 sp = sctp_sk(sk);
143
144 sp->pd_mode = 0;
145 if (!skb_queue_empty(&sp->pd_lobby)) {
146 struct list_head *list;
147 sctp_skb_list_tail(&sp->pd_lobby, &sk->receive_queue);
148 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
149 INIT_LIST_HEAD(list);
150 return 1;
151 }
152 return 0;
153 }
154
155 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
sctp_ulpq_clear_pd(struct sctp_ulpq * ulpq)156 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
157 {
158 ulpq->pd_mode = 0;
159 return sctp_clear_pd(ulpq->asoc->base.sk);
160 }
161
162
163
sctp_ulpq_tail_event(struct sctp_ulpq * ulpq,struct sctp_ulpevent * event)164 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
165 {
166 struct sock *sk = ulpq->asoc->base.sk;
167 struct sk_buff_head *queue;
168 int clear_pd = 0;
169
170 /* If the socket is just going to throw this away, do not
171 * even try to deliver it.
172 */
173 if (sk->dead || (sk->shutdown & RCV_SHUTDOWN))
174 goto out_free;
175
176 /* Check if the user wishes to receive this event. */
177 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
178 goto out_free;
179
180 /* If we are in partial delivery mode, post to the lobby until
181 * partial delivery is cleared, unless, of course _this_ is
182 * the association the cause of the partial delivery.
183 */
184
185 if (!sctp_sk(sk)->pd_mode) {
186 queue = &sk->receive_queue;
187 } else if (ulpq->pd_mode) {
188 if (event->msg_flags & MSG_NOTIFICATION)
189 queue = &sctp_sk(sk)->pd_lobby;
190 else {
191 clear_pd = event->msg_flags & MSG_EOR;
192 queue = &sk->receive_queue;
193 }
194 } else
195 queue = &sctp_sk(sk)->pd_lobby;
196
197
198 /* If we are harvesting multiple skbs they will be
199 * collected on a list.
200 */
201 if (sctp_event2skb(event)->list)
202 sctp_skb_list_tail(sctp_event2skb(event)->list, queue);
203 else
204 __skb_queue_tail(queue, sctp_event2skb(event));
205
206 /* Did we just complete partial delivery and need to get
207 * rolling again? Move pending data to the receive
208 * queue.
209 */
210 if (clear_pd)
211 sctp_ulpq_clear_pd(ulpq);
212
213 if (queue == &sk->receive_queue)
214 sk->data_ready(sk, 0);
215 return 1;
216
217 out_free:
218 if (sctp_event2skb(event)->list)
219 sctp_queue_purge_ulpevents(sctp_event2skb(event)->list);
220 else
221 sctp_ulpevent_free(event);
222 return 0;
223 }
224
225 /* 2nd Level Abstractions */
226
227 /* Helper function to store chunks that need to be reassembled. */
sctp_ulpq_store_reasm(struct sctp_ulpq * ulpq,struct sctp_ulpevent * event)228 static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
229 struct sctp_ulpevent *event)
230 {
231 struct sk_buff *pos;
232 struct sctp_ulpevent *cevent;
233 __u32 tsn, ctsn;
234
235 tsn = event->tsn;
236
237 /* See if it belongs at the end. */
238 pos = skb_peek_tail(&ulpq->reasm);
239 if (!pos) {
240 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
241 return;
242 }
243
244 /* Short circuit just dropping it at the end. */
245 cevent = sctp_skb2event(pos);
246 ctsn = cevent->tsn;
247 if (TSN_lt(ctsn, tsn)) {
248 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
249 return;
250 }
251
252 /* Find the right place in this list. We store them by TSN. */
253 skb_queue_walk(&ulpq->reasm, pos) {
254 cevent = sctp_skb2event(pos);
255 ctsn = cevent->tsn;
256
257 if (TSN_lt(tsn, ctsn))
258 break;
259 }
260
261 /* Insert before pos. */
262 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
263
264 }
265
266 /* Helper function to return an event corresponding to the reassembled
267 * datagram.
268 * This routine creates a re-assembled skb given the first and last skb's
269 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
270 * payload was fragmented on the way and ip had to reassemble them.
271 * We add the rest of skb's to the first skb's fraglist.
272 */
sctp_make_reassembled_event(struct sk_buff * f_frag,struct sk_buff * l_frag)273 static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
274 {
275 struct sk_buff *pos;
276 struct sk_buff *new = NULL;
277 struct sctp_ulpevent *event;
278 struct sk_buff *pnext, *last;
279 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
280
281 /* Store the pointer to the 2nd skb */
282 if (f_frag == l_frag)
283 pos = NULL;
284 else
285 pos = f_frag->next;
286
287 /* Get the last skb in the f_frag's frag_list if present. */
288 for (last = list; list; last = list, list = list->next);
289
290 /* Add the list of remaining fragments to the first fragments
291 * frag_list.
292 */
293 if (last)
294 last->next = pos;
295 else {
296 if (skb_cloned(f_frag)) {
297 /* This is a cloned skb, we can't just modify
298 * the frag_list. We need a new skb to do that.
299 * Instead of calling skb_unshare(), we'll do it
300 * ourselves since we need to delay the free.
301 */
302 new = skb_copy(f_frag, GFP_ATOMIC);
303 if (!new)
304 return NULL; /* try again later */
305
306 new->sk = f_frag->sk;
307
308 skb_shinfo(new)->frag_list = pos;
309 } else
310 skb_shinfo(f_frag)->frag_list = pos;
311 }
312
313 /* Remove the first fragment from the reassembly queue. */
314 __skb_unlink(f_frag, f_frag->list);
315
316 /* if we did unshare, then free the old skb and re-assign */
317 if (new) {
318 kfree_skb(f_frag);
319 f_frag = new;
320 }
321
322 while (pos) {
323
324 pnext = pos->next;
325
326 /* Update the len and data_len fields of the first fragment. */
327 f_frag->len += pos->len;
328 f_frag->data_len += pos->len;
329
330 /* Remove the fragment from the reassembly queue. */
331 __skb_unlink(pos, pos->list);
332
333 /* Break if we have reached the last fragment. */
334 if (pos == l_frag)
335 break;
336 pos->next = pnext;
337 pos = pnext;
338 };
339
340 event = sctp_skb2event(f_frag);
341 SCTP_INC_STATS(SctpReasmUsrMsgs);
342
343 return event;
344 }
345
346
347 /* Helper function to check if an incoming chunk has filled up the last
348 * missing fragment in a SCTP datagram and return the corresponding event.
349 */
sctp_ulpq_retrieve_reassembled(struct sctp_ulpq * ulpq)350 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
351 {
352 struct sk_buff *pos;
353 struct sctp_ulpevent *cevent;
354 struct sk_buff *first_frag = NULL;
355 __u32 ctsn, next_tsn;
356 struct sctp_ulpevent *retval = NULL;
357
358 /* Initialized to 0 just to avoid compiler warning message. Will
359 * never be used with this value. It is referenced only after it
360 * is set when we find the first fragment of a message.
361 */
362 next_tsn = 0;
363
364 /* The chunks are held in the reasm queue sorted by TSN.
365 * Walk through the queue sequentially and look for a sequence of
366 * fragmented chunks that complete a datagram.
367 * 'first_frag' and next_tsn are reset when we find a chunk which
368 * is the first fragment of a datagram. Once these 2 fields are set
369 * we expect to find the remaining middle fragments and the last
370 * fragment in order. If not, first_frag is reset to NULL and we
371 * start the next pass when we find another first fragment.
372 */
373 skb_queue_walk(&ulpq->reasm, pos) {
374 cevent = sctp_skb2event(pos);
375 ctsn = cevent->tsn;
376
377 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
378 case SCTP_DATA_FIRST_FRAG:
379 first_frag = pos;
380 next_tsn = ctsn + 1;
381 break;
382
383 case SCTP_DATA_MIDDLE_FRAG:
384 if ((first_frag) && (ctsn == next_tsn))
385 next_tsn++;
386 else
387 first_frag = NULL;
388 break;
389
390 case SCTP_DATA_LAST_FRAG:
391 if (first_frag && (ctsn == next_tsn))
392 goto found;
393 else
394 first_frag = NULL;
395 break;
396 };
397
398 }
399 done:
400 return retval;
401 found:
402 retval = sctp_make_reassembled_event(first_frag, pos);
403 if (retval)
404 retval->msg_flags |= MSG_EOR;
405 goto done;
406 }
407
408 /* Retrieve the next set of fragments of a partial message. */
sctp_ulpq_retrieve_partial(struct sctp_ulpq * ulpq)409 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
410 {
411 struct sk_buff *pos, *last_frag, *first_frag;
412 struct sctp_ulpevent *cevent;
413 __u32 ctsn, next_tsn;
414 int is_last;
415 struct sctp_ulpevent *retval;
416
417 /* The chunks are held in the reasm queue sorted by TSN.
418 * Walk through the queue sequentially and look for the first
419 * sequence of fragmented chunks.
420 */
421
422 if (skb_queue_empty(&ulpq->reasm))
423 return NULL;
424
425 last_frag = first_frag = NULL;
426 retval = NULL;
427 next_tsn = 0;
428 is_last = 0;
429
430 skb_queue_walk(&ulpq->reasm, pos) {
431 cevent = sctp_skb2event(pos);
432 ctsn = cevent->tsn;
433
434 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
435 case SCTP_DATA_MIDDLE_FRAG:
436 if (!first_frag) {
437 first_frag = pos;
438 next_tsn = ctsn + 1;
439 last_frag = pos;
440 } else if (next_tsn == ctsn)
441 next_tsn++;
442 else
443 goto done;
444 break;
445 case SCTP_DATA_LAST_FRAG:
446 if (!first_frag)
447 first_frag = pos;
448 else if (ctsn != next_tsn)
449 goto done;
450 last_frag = pos;
451 is_last = 1;
452 goto done;
453 default:
454 return NULL;
455 };
456 }
457
458 /* We have the reassembled event. There is no need to look
459 * further.
460 */
461 done:
462 retval = sctp_make_reassembled_event(first_frag, last_frag);
463 if (retval && is_last)
464 retval->msg_flags |= MSG_EOR;
465
466 return retval;
467 }
468
469
470 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
471 * need reassembling.
472 */
sctp_ulpq_reasm(struct sctp_ulpq * ulpq,struct sctp_ulpevent * event)473 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
474 struct sctp_ulpevent *event)
475 {
476 struct sctp_ulpevent *retval = NULL;
477
478 /* Check if this is part of a fragmented message. */
479 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
480 event->msg_flags |= MSG_EOR;
481 return event;
482 }
483
484 sctp_ulpq_store_reasm(ulpq, event);
485 if (!ulpq->pd_mode)
486 retval = sctp_ulpq_retrieve_reassembled(ulpq);
487 else {
488 __u32 ctsn, ctsnap;
489
490 /* Do not even bother unless this is the next tsn to
491 * be delivered.
492 */
493 ctsn = event->tsn;
494 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
495 if (TSN_lte(ctsn, ctsnap))
496 retval = sctp_ulpq_retrieve_partial(ulpq);
497 }
498
499 return retval;
500 }
501
502 /* Retrieve the first part (sequential fragments) for partial delivery. */
sctp_ulpq_retrieve_first(struct sctp_ulpq * ulpq)503 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
504 {
505 struct sk_buff *pos, *last_frag, *first_frag;
506 struct sctp_ulpevent *cevent;
507 __u32 ctsn, next_tsn;
508 struct sctp_ulpevent *retval;
509
510 /* The chunks are held in the reasm queue sorted by TSN.
511 * Walk through the queue sequentially and look for a sequence of
512 * fragmented chunks that start a datagram.
513 */
514
515 if (skb_queue_empty(&ulpq->reasm))
516 return NULL;
517
518 last_frag = first_frag = NULL;
519 retval = NULL;
520 next_tsn = 0;
521
522 skb_queue_walk(&ulpq->reasm, pos) {
523 cevent = sctp_skb2event(pos);
524 ctsn = cevent->tsn;
525
526 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
527 case SCTP_DATA_FIRST_FRAG:
528 if (!first_frag) {
529 first_frag = pos;
530 next_tsn = ctsn + 1;
531 last_frag = pos;
532 } else
533 goto done;
534 break;
535
536 case SCTP_DATA_MIDDLE_FRAG:
537 if (!first_frag)
538 return NULL;
539 if (ctsn == next_tsn) {
540 next_tsn++;
541 last_frag = pos;
542 } else
543 goto done;
544 break;
545 default:
546 return NULL;
547 };
548 }
549
550 /* We have the reassembled event. There is no need to look
551 * further.
552 */
553 done:
554 retval = sctp_make_reassembled_event(first_frag, last_frag);
555 return retval;
556 }
557
558 /* Helper function to gather skbs that have possibly become
559 * ordered by an an incoming chunk.
560 */
sctp_ulpq_retrieve_ordered(struct sctp_ulpq * ulpq,struct sctp_ulpevent * event)561 static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
562 struct sctp_ulpevent *event)
563 {
564 struct sk_buff *pos, *tmp;
565 struct sctp_ulpevent *cevent;
566 struct sctp_stream *in;
567 __u16 sid, csid;
568 __u16 ssn, cssn;
569
570 sid = event->stream;
571 ssn = event->ssn;
572 in = &ulpq->asoc->ssnmap->in;
573
574 /* We are holding the chunks by stream, by SSN. */
575 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
576 cevent = (struct sctp_ulpevent *) pos->cb;
577 csid = cevent->stream;
578 cssn = cevent->ssn;
579
580 /* Have we gone too far? */
581 if (csid > sid)
582 break;
583
584 /* Have we not gone far enough? */
585 if (csid < sid)
586 continue;
587
588 if (cssn != sctp_ssn_peek(in, sid))
589 break;
590
591 /* Found it, so mark in the ssnmap. */
592 sctp_ssn_next(in, sid);
593
594 __skb_unlink(pos, pos->list);
595
596 /* Attach all gathered skbs to the event. */
597 __skb_queue_tail(sctp_event2skb(event)->list, pos);
598 }
599 }
600
601 /* Helper function to store chunks needing ordering. */
sctp_ulpq_store_ordered(struct sctp_ulpq * ulpq,struct sctp_ulpevent * event)602 static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
603 struct sctp_ulpevent *event)
604 {
605 struct sk_buff *pos;
606 struct sctp_ulpevent *cevent;
607 __u16 sid, csid;
608 __u16 ssn, cssn;
609
610 pos = skb_peek_tail(&ulpq->lobby);
611 if (!pos) {
612 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
613 return;
614 }
615
616 sid = event->stream;
617 ssn = event->ssn;
618
619 cevent = (struct sctp_ulpevent *) pos->cb;
620 csid = cevent->stream;
621 cssn = cevent->ssn;
622 if (sid > csid) {
623 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
624 return;
625 }
626
627 if ((sid == csid) && SSN_lt(cssn, ssn)) {
628 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
629 return;
630 }
631
632 /* Find the right place in this list. We store them by
633 * stream ID and then by SSN.
634 */
635 skb_queue_walk(&ulpq->lobby, pos) {
636 cevent = (struct sctp_ulpevent *) pos->cb;
637 csid = cevent->stream;
638 cssn = cevent->ssn;
639
640 if (csid > sid)
641 break;
642 if (csid == sid && SSN_lt(ssn, cssn))
643 break;
644 }
645
646
647 /* Insert before pos. */
648 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
649
650 }
651
sctp_ulpq_order(struct sctp_ulpq * ulpq,struct sctp_ulpevent * event)652 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
653 struct sctp_ulpevent *event)
654 {
655 __u16 sid, ssn;
656 struct sctp_stream *in;
657
658 /* Check if this message needs ordering. */
659 if (SCTP_DATA_UNORDERED & event->msg_flags)
660 return event;
661
662 /* Note: The stream ID must be verified before this routine. */
663 sid = event->stream;
664 ssn = event->ssn;
665 in = &ulpq->asoc->ssnmap->in;
666
667 /* Is this the expected SSN for this stream ID? */
668 if (ssn != sctp_ssn_peek(in, sid)) {
669 /* We've received something out of order, so find where it
670 * needs to be placed. We order by stream and then by SSN.
671 */
672 sctp_ulpq_store_ordered(ulpq, event);
673 return NULL;
674 }
675
676 /* Mark that the next chunk has been found. */
677 sctp_ssn_next(in, sid);
678
679 /* Go find any other chunks that were waiting for
680 * ordering.
681 */
682 sctp_ulpq_retrieve_ordered(ulpq, event);
683
684 return event;
685 }
686
687 /* Helper function to gather skbs that have possibly become
688 * ordered by forward tsn skipping their dependencies.
689 */
sctp_ulpq_reap_ordered(struct sctp_ulpq * ulpq)690 static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
691 {
692 struct sk_buff *pos, *tmp;
693 struct sctp_ulpevent *cevent;
694 struct sctp_ulpevent *event = NULL;
695 struct sctp_stream *in;
696 struct sk_buff_head temp;
697 __u16 csid, cssn;
698
699 in = &ulpq->asoc->ssnmap->in;
700
701 /* We are holding the chunks by stream, by SSN. */
702 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
703 cevent = (struct sctp_ulpevent *) pos->cb;
704 csid = cevent->stream;
705 cssn = cevent->ssn;
706
707 if (cssn != sctp_ssn_peek(in, csid))
708 break;
709
710 /* Found it, so mark in the ssnmap. */
711 sctp_ssn_next(in, csid);
712
713 __skb_unlink(pos, pos->list);
714 if (!event) {
715 /* Create a temporary list to collect chunks on. */
716 event = sctp_skb2event(pos);
717 skb_queue_head_init(&temp);
718 __skb_queue_tail(&temp, sctp_event2skb(event));
719 } else {
720 /* Attach all gathered skbs to the event. */
721 __skb_queue_tail(sctp_event2skb(event)->list, pos);
722 }
723 }
724
725 /* Send event to the ULP. */
726 if (event)
727 sctp_ulpq_tail_event(ulpq, event);
728 }
729
730 /* Skip over an SSN. */
sctp_ulpq_skip(struct sctp_ulpq * ulpq,__u16 sid,__u16 ssn)731 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
732 {
733 struct sctp_stream *in;
734
735 /* Note: The stream ID must be verified before this routine. */
736 in = &ulpq->asoc->ssnmap->in;
737
738 /* Is this an old SSN? If so ignore. */
739 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
740 return;
741
742 /* Mark that we are no longer expecting this SSN or lower. */
743 sctp_ssn_skip(in, sid, ssn);
744
745 /* Go find any other chunks that were waiting for
746 * ordering and deliver them if needed.
747 */
748 sctp_ulpq_reap_ordered(ulpq);
749 return;
750 }
751
752 /* Renege 'needed' bytes from the ordering queue. */
sctp_ulpq_renege_order(struct sctp_ulpq * ulpq,__u16 needed)753 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
754 {
755 __u16 freed = 0;
756 __u32 tsn;
757 struct sk_buff *skb;
758 struct sctp_ulpevent *event;
759 struct sctp_tsnmap *tsnmap;
760
761 tsnmap = &ulpq->asoc->peer.tsn_map;
762
763 while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) {
764 freed += skb_headlen(skb);
765 event = sctp_skb2event(skb);
766 tsn = event->tsn;
767
768 sctp_ulpevent_free(event);
769 sctp_tsnmap_renege(tsnmap, tsn);
770 if (freed >= needed)
771 return freed;
772 }
773
774 return freed;
775 }
776
777 /* Renege 'needed' bytes from the reassembly queue. */
sctp_ulpq_renege_frags(struct sctp_ulpq * ulpq,__u16 needed)778 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
779 {
780 __u16 freed = 0;
781 __u32 tsn;
782 struct sk_buff *skb;
783 struct sctp_ulpevent *event;
784 struct sctp_tsnmap *tsnmap;
785
786 tsnmap = &ulpq->asoc->peer.tsn_map;
787
788 /* Walk backwards through the list, reneges the newest tsns. */
789 while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
790 freed += skb_headlen(skb);
791 event = sctp_skb2event(skb);
792 tsn = event->tsn;
793
794 sctp_ulpevent_free(event);
795 sctp_tsnmap_renege(tsnmap, tsn);
796 if (freed >= needed)
797 return freed;
798 }
799
800 return freed;
801 }
802
803 /* Partial deliver the first message as there is pressure on rwnd. */
sctp_ulpq_partial_delivery(struct sctp_ulpq * ulpq,struct sctp_chunk * chunk,int gfp)804 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
805 struct sctp_chunk *chunk, int gfp)
806 {
807 struct sctp_ulpevent *event;
808 struct sctp_association *asoc;
809
810 asoc = ulpq->asoc;
811
812 /* Are we already in partial delivery mode? */
813 if (!sctp_sk(asoc->base.sk)->pd_mode) {
814
815 /* Is partial delivery possible? */
816 event = sctp_ulpq_retrieve_first(ulpq);
817 /* Send event to the ULP. */
818 if (event) {
819 sctp_ulpq_tail_event(ulpq, event);
820 sctp_sk(asoc->base.sk)->pd_mode = 1;
821 ulpq->pd_mode = 1;
822 return;
823 }
824 }
825 }
826
827 /* Renege some packets to make room for an incoming chunk. */
sctp_ulpq_renege(struct sctp_ulpq * ulpq,struct sctp_chunk * chunk,int gfp)828 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
829 int gfp)
830 {
831 struct sctp_association *asoc;
832 __u16 needed, freed;
833
834 asoc = ulpq->asoc;
835
836 if (chunk) {
837 needed = ntohs(chunk->chunk_hdr->length);
838 needed -= sizeof(sctp_data_chunk_t);
839 } else
840 needed = SCTP_DEFAULT_MAXWINDOW;
841
842 freed = 0;
843
844 if (skb_queue_empty(&asoc->base.sk->receive_queue)) {
845 freed = sctp_ulpq_renege_order(ulpq, needed);
846 if (freed < needed) {
847 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
848 }
849 }
850 /* If able to free enough room, accept this chunk. */
851 if (chunk && (freed >= needed)) {
852 __u32 tsn;
853 tsn = ntohl(chunk->subh.data_hdr->tsn);
854 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
855 sctp_ulpq_tail_data(ulpq, chunk, gfp);
856
857 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
858 }
859
860 return;
861 }
862
863
864
865 /* Notify the application if an association is aborted and in
866 * partial delivery mode. Send up any pending received messages.
867 */
sctp_ulpq_abort_pd(struct sctp_ulpq * ulpq,int gfp)868 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int gfp)
869 {
870 struct sctp_ulpevent *ev = NULL;
871 struct sock *sk;
872
873 if (!ulpq->pd_mode)
874 return;
875
876 sk = ulpq->asoc->base.sk;
877 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
878 &sctp_sk(sk)->subscribe))
879 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
880 SCTP_PARTIAL_DELIVERY_ABORTED,
881 gfp);
882 if (ev)
883 __skb_queue_tail(&sk->receive_queue, sctp_event2skb(ev));
884
885 /* If there is data waiting, send it up the socket now. */
886 if (sctp_ulpq_clear_pd(ulpq) || ev)
887 sk->data_ready(sk, 0);
888 }
889