xref: /linux-6.15/net/sctp/ulpqueue.c (revision 178ca044)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 Nokia, Inc.
7  * Copyright (c) 2001 La Monte H.P. Yarroll
8  *
9  * This abstraction carries sctp events to the ULP (sockets).
10  *
11  * This SCTP implementation is free software;
12  * you can redistribute it and/or modify it under the terms of
13  * the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * This SCTP implementation is distributed in the hope that it
18  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19  *                 ************************
20  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21  * See the GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with GNU CC; see the file COPYING.  If not, see
25  * <http://www.gnu.org/licenses/>.
26  *
27  * Please send any bug reports or fixes you make to the
28  * email address(es):
29  *    lksctp developers <[email protected]>
30  *
31  * Written or modified by:
32  *    Jon Grimm             <[email protected]>
33  *    La Monte H.P. Yarroll <[email protected]>
34  *    Sridhar Samudrala     <[email protected]>
35  */
36 
37 #include <linux/slab.h>
38 #include <linux/types.h>
39 #include <linux/skbuff.h>
40 #include <net/sock.h>
41 #include <net/busy_poll.h>
42 #include <net/sctp/structs.h>
43 #include <net/sctp/sctp.h>
44 #include <net/sctp/sm.h>
45 
46 /* Forward declarations for internal helpers.  */
47 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
48 					      struct sctp_ulpevent *);
49 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
50 					      struct sctp_ulpevent *);
51 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
52 
53 /* 1st Level Abstractions */
54 
55 /* Initialize a ULP queue from a block of memory.  */
56 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
57 				 struct sctp_association *asoc)
58 {
59 	memset(ulpq, 0, sizeof(struct sctp_ulpq));
60 
61 	ulpq->asoc = asoc;
62 	skb_queue_head_init(&ulpq->reasm);
63 	skb_queue_head_init(&ulpq->reasm_uo);
64 	skb_queue_head_init(&ulpq->lobby);
65 	ulpq->pd_mode  = 0;
66 
67 	return ulpq;
68 }
69 
70 
71 /* Flush the reassembly and ordering queues.  */
72 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
73 {
74 	struct sk_buff *skb;
75 	struct sctp_ulpevent *event;
76 
77 	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
78 		event = sctp_skb2event(skb);
79 		sctp_ulpevent_free(event);
80 	}
81 
82 	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
83 		event = sctp_skb2event(skb);
84 		sctp_ulpevent_free(event);
85 	}
86 
87 	while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
88 		event = sctp_skb2event(skb);
89 		sctp_ulpevent_free(event);
90 	}
91 }
92 
93 /* Dispose of a ulpqueue.  */
94 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
95 {
96 	sctp_ulpq_flush(ulpq);
97 }
98 
99 /* Process an incoming DATA chunk.  */
100 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
101 			gfp_t gfp)
102 {
103 	struct sk_buff_head temp;
104 	struct sctp_ulpevent *event;
105 	int event_eor = 0;
106 
107 	/* Create an event from the incoming chunk. */
108 	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
109 	if (!event)
110 		return -ENOMEM;
111 
112 	event->ssn = ntohs(chunk->subh.data_hdr->ssn);
113 	event->ppid = chunk->subh.data_hdr->ppid;
114 
115 	/* Do reassembly if needed.  */
116 	event = sctp_ulpq_reasm(ulpq, event);
117 
118 	/* Do ordering if needed.  */
119 	if (event) {
120 		/* Create a temporary list to collect chunks on.  */
121 		skb_queue_head_init(&temp);
122 		__skb_queue_tail(&temp, sctp_event2skb(event));
123 
124 		if (event->msg_flags & MSG_EOR)
125 			event = sctp_ulpq_order(ulpq, event);
126 	}
127 
128 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
129 	 * very first SKB on the 'temp' list.
130 	 */
131 	if (event) {
132 		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
133 		sctp_ulpq_tail_event(ulpq, event);
134 	}
135 
136 	return event_eor;
137 }
138 
139 /* Add a new event for propagation to the ULP.  */
140 /* Clear the partial delivery mode for this socket.   Note: This
141  * assumes that no association is currently in partial delivery mode.
142  */
143 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
144 {
145 	struct sctp_sock *sp = sctp_sk(sk);
146 
147 	if (atomic_dec_and_test(&sp->pd_mode)) {
148 		/* This means there are no other associations in PD, so
149 		 * we can go ahead and clear out the lobby in one shot
150 		 */
151 		if (!skb_queue_empty(&sp->pd_lobby)) {
152 			skb_queue_splice_tail_init(&sp->pd_lobby,
153 						   &sk->sk_receive_queue);
154 			return 1;
155 		}
156 	} else {
157 		/* There are other associations in PD, so we only need to
158 		 * pull stuff out of the lobby that belongs to the
159 		 * associations that is exiting PD (all of its notifications
160 		 * are posted here).
161 		 */
162 		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
163 			struct sk_buff *skb, *tmp;
164 			struct sctp_ulpevent *event;
165 
166 			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
167 				event = sctp_skb2event(skb);
168 				if (event->asoc == asoc) {
169 					__skb_unlink(skb, &sp->pd_lobby);
170 					__skb_queue_tail(&sk->sk_receive_queue,
171 							 skb);
172 				}
173 			}
174 		}
175 	}
176 
177 	return 0;
178 }
179 
180 /* Set the pd_mode on the socket and ulpq */
181 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
182 {
183 	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
184 
185 	atomic_inc(&sp->pd_mode);
186 	ulpq->pd_mode = 1;
187 }
188 
189 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
190 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
191 {
192 	ulpq->pd_mode = 0;
193 	sctp_ulpq_reasm_drain(ulpq);
194 	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
195 }
196 
197 /* If the SKB of 'event' is on a list, it is the first such member
198  * of that list.
199  */
200 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
201 {
202 	struct sock *sk = ulpq->asoc->base.sk;
203 	struct sctp_sock *sp = sctp_sk(sk);
204 	struct sk_buff_head *queue, *skb_list;
205 	struct sk_buff *skb = sctp_event2skb(event);
206 	int clear_pd = 0;
207 
208 	skb_list = (struct sk_buff_head *) skb->prev;
209 
210 	/* If the socket is just going to throw this away, do not
211 	 * even try to deliver it.
212 	 */
213 	if (sk->sk_shutdown & RCV_SHUTDOWN &&
214 	    (sk->sk_shutdown & SEND_SHUTDOWN ||
215 	     !sctp_ulpevent_is_notification(event)))
216 		goto out_free;
217 
218 	if (!sctp_ulpevent_is_notification(event)) {
219 		sk_mark_napi_id(sk, skb);
220 		sk_incoming_cpu_update(sk);
221 	}
222 	/* Check if the user wishes to receive this event.  */
223 	if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
224 		goto out_free;
225 
226 	/* If we are in partial delivery mode, post to the lobby until
227 	 * partial delivery is cleared, unless, of course _this_ is
228 	 * the association the cause of the partial delivery.
229 	 */
230 
231 	if (atomic_read(&sp->pd_mode) == 0) {
232 		queue = &sk->sk_receive_queue;
233 	} else {
234 		if (ulpq->pd_mode) {
235 			/* If the association is in partial delivery, we
236 			 * need to finish delivering the partially processed
237 			 * packet before passing any other data.  This is
238 			 * because we don't truly support stream interleaving.
239 			 */
240 			if ((event->msg_flags & MSG_NOTIFICATION) ||
241 			    (SCTP_DATA_NOT_FRAG ==
242 				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
243 				queue = &sp->pd_lobby;
244 			else {
245 				clear_pd = event->msg_flags & MSG_EOR;
246 				queue = &sk->sk_receive_queue;
247 			}
248 		} else {
249 			/*
250 			 * If fragment interleave is enabled, we
251 			 * can queue this to the receive queue instead
252 			 * of the lobby.
253 			 */
254 			if (sp->frag_interleave)
255 				queue = &sk->sk_receive_queue;
256 			else
257 				queue = &sp->pd_lobby;
258 		}
259 	}
260 
261 	/* If we are harvesting multiple skbs they will be
262 	 * collected on a list.
263 	 */
264 	if (skb_list)
265 		skb_queue_splice_tail_init(skb_list, queue);
266 	else
267 		__skb_queue_tail(queue, skb);
268 
269 	/* Did we just complete partial delivery and need to get
270 	 * rolling again?  Move pending data to the receive
271 	 * queue.
272 	 */
273 	if (clear_pd)
274 		sctp_ulpq_clear_pd(ulpq);
275 
276 	if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
277 		if (!sock_owned_by_user(sk))
278 			sp->data_ready_signalled = 1;
279 		sk->sk_data_ready(sk);
280 	}
281 	return 1;
282 
283 out_free:
284 	if (skb_list)
285 		sctp_queue_purge_ulpevents(skb_list);
286 	else
287 		sctp_ulpevent_free(event);
288 
289 	return 0;
290 }
291 
292 /* 2nd Level Abstractions */
293 
294 /* Helper function to store chunks that need to be reassembled.  */
295 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
296 					 struct sctp_ulpevent *event)
297 {
298 	struct sk_buff *pos;
299 	struct sctp_ulpevent *cevent;
300 	__u32 tsn, ctsn;
301 
302 	tsn = event->tsn;
303 
304 	/* See if it belongs at the end. */
305 	pos = skb_peek_tail(&ulpq->reasm);
306 	if (!pos) {
307 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
308 		return;
309 	}
310 
311 	/* Short circuit just dropping it at the end. */
312 	cevent = sctp_skb2event(pos);
313 	ctsn = cevent->tsn;
314 	if (TSN_lt(ctsn, tsn)) {
315 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
316 		return;
317 	}
318 
319 	/* Find the right place in this list. We store them by TSN.  */
320 	skb_queue_walk(&ulpq->reasm, pos) {
321 		cevent = sctp_skb2event(pos);
322 		ctsn = cevent->tsn;
323 
324 		if (TSN_lt(tsn, ctsn))
325 			break;
326 	}
327 
328 	/* Insert before pos. */
329 	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
330 
331 }
332 
333 /* Helper function to return an event corresponding to the reassembled
334  * datagram.
335  * This routine creates a re-assembled skb given the first and last skb's
336  * as stored in the reassembly queue. The skb's may be non-linear if the sctp
337  * payload was fragmented on the way and ip had to reassemble them.
338  * We add the rest of skb's to the first skb's fraglist.
339  */
340 struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
341 						  struct sk_buff_head *queue,
342 						  struct sk_buff *f_frag,
343 						  struct sk_buff *l_frag)
344 {
345 	struct sk_buff *pos;
346 	struct sk_buff *new = NULL;
347 	struct sctp_ulpevent *event;
348 	struct sk_buff *pnext, *last;
349 	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
350 
351 	/* Store the pointer to the 2nd skb */
352 	if (f_frag == l_frag)
353 		pos = NULL;
354 	else
355 		pos = f_frag->next;
356 
357 	/* Get the last skb in the f_frag's frag_list if present. */
358 	for (last = list; list; last = list, list = list->next)
359 		;
360 
361 	/* Add the list of remaining fragments to the first fragments
362 	 * frag_list.
363 	 */
364 	if (last)
365 		last->next = pos;
366 	else {
367 		if (skb_cloned(f_frag)) {
368 			/* This is a cloned skb, we can't just modify
369 			 * the frag_list.  We need a new skb to do that.
370 			 * Instead of calling skb_unshare(), we'll do it
371 			 * ourselves since we need to delay the free.
372 			 */
373 			new = skb_copy(f_frag, GFP_ATOMIC);
374 			if (!new)
375 				return NULL;	/* try again later */
376 
377 			sctp_skb_set_owner_r(new, f_frag->sk);
378 
379 			skb_shinfo(new)->frag_list = pos;
380 		} else
381 			skb_shinfo(f_frag)->frag_list = pos;
382 	}
383 
384 	/* Remove the first fragment from the reassembly queue.  */
385 	__skb_unlink(f_frag, queue);
386 
387 	/* if we did unshare, then free the old skb and re-assign */
388 	if (new) {
389 		kfree_skb(f_frag);
390 		f_frag = new;
391 	}
392 
393 	while (pos) {
394 
395 		pnext = pos->next;
396 
397 		/* Update the len and data_len fields of the first fragment. */
398 		f_frag->len += pos->len;
399 		f_frag->data_len += pos->len;
400 
401 		/* Remove the fragment from the reassembly queue.  */
402 		__skb_unlink(pos, queue);
403 
404 		/* Break if we have reached the last fragment.  */
405 		if (pos == l_frag)
406 			break;
407 		pos->next = pnext;
408 		pos = pnext;
409 	}
410 
411 	event = sctp_skb2event(f_frag);
412 	SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
413 
414 	return event;
415 }
416 
417 
418 /* Helper function to check if an incoming chunk has filled up the last
419  * missing fragment in a SCTP datagram and return the corresponding event.
420  */
421 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
422 {
423 	struct sk_buff *pos;
424 	struct sctp_ulpevent *cevent;
425 	struct sk_buff *first_frag = NULL;
426 	__u32 ctsn, next_tsn;
427 	struct sctp_ulpevent *retval = NULL;
428 	struct sk_buff *pd_first = NULL;
429 	struct sk_buff *pd_last = NULL;
430 	size_t pd_len = 0;
431 	struct sctp_association *asoc;
432 	u32 pd_point;
433 
434 	/* Initialized to 0 just to avoid compiler warning message.  Will
435 	 * never be used with this value. It is referenced only after it
436 	 * is set when we find the first fragment of a message.
437 	 */
438 	next_tsn = 0;
439 
440 	/* The chunks are held in the reasm queue sorted by TSN.
441 	 * Walk through the queue sequentially and look for a sequence of
442 	 * fragmented chunks that complete a datagram.
443 	 * 'first_frag' and next_tsn are reset when we find a chunk which
444 	 * is the first fragment of a datagram. Once these 2 fields are set
445 	 * we expect to find the remaining middle fragments and the last
446 	 * fragment in order. If not, first_frag is reset to NULL and we
447 	 * start the next pass when we find another first fragment.
448 	 *
449 	 * There is a potential to do partial delivery if user sets
450 	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
451 	 * to see if can do PD.
452 	 */
453 	skb_queue_walk(&ulpq->reasm, pos) {
454 		cevent = sctp_skb2event(pos);
455 		ctsn = cevent->tsn;
456 
457 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
458 		case SCTP_DATA_FIRST_FRAG:
459 			/* If this "FIRST_FRAG" is the first
460 			 * element in the queue, then count it towards
461 			 * possible PD.
462 			 */
463 			if (skb_queue_is_first(&ulpq->reasm, pos)) {
464 			    pd_first = pos;
465 			    pd_last = pos;
466 			    pd_len = pos->len;
467 			} else {
468 			    pd_first = NULL;
469 			    pd_last = NULL;
470 			    pd_len = 0;
471 			}
472 
473 			first_frag = pos;
474 			next_tsn = ctsn + 1;
475 			break;
476 
477 		case SCTP_DATA_MIDDLE_FRAG:
478 			if ((first_frag) && (ctsn == next_tsn)) {
479 				next_tsn++;
480 				if (pd_first) {
481 				    pd_last = pos;
482 				    pd_len += pos->len;
483 				}
484 			} else
485 				first_frag = NULL;
486 			break;
487 
488 		case SCTP_DATA_LAST_FRAG:
489 			if (first_frag && (ctsn == next_tsn))
490 				goto found;
491 			else
492 				first_frag = NULL;
493 			break;
494 		}
495 	}
496 
497 	asoc = ulpq->asoc;
498 	if (pd_first) {
499 		/* Make sure we can enter partial deliver.
500 		 * We can trigger partial delivery only if framgent
501 		 * interleave is set, or the socket is not already
502 		 * in  partial delivery.
503 		 */
504 		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
505 		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
506 			goto done;
507 
508 		cevent = sctp_skb2event(pd_first);
509 		pd_point = sctp_sk(asoc->base.sk)->pd_point;
510 		if (pd_point && pd_point <= pd_len) {
511 			retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
512 							     &ulpq->reasm,
513 							     pd_first,
514 							     pd_last);
515 			if (retval)
516 				sctp_ulpq_set_pd(ulpq);
517 		}
518 	}
519 done:
520 	return retval;
521 found:
522 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
523 					     &ulpq->reasm, first_frag, pos);
524 	if (retval)
525 		retval->msg_flags |= MSG_EOR;
526 	goto done;
527 }
528 
529 /* Retrieve the next set of fragments of a partial message. */
530 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
531 {
532 	struct sk_buff *pos, *last_frag, *first_frag;
533 	struct sctp_ulpevent *cevent;
534 	__u32 ctsn, next_tsn;
535 	int is_last;
536 	struct sctp_ulpevent *retval;
537 
538 	/* The chunks are held in the reasm queue sorted by TSN.
539 	 * Walk through the queue sequentially and look for the first
540 	 * sequence of fragmented chunks.
541 	 */
542 
543 	if (skb_queue_empty(&ulpq->reasm))
544 		return NULL;
545 
546 	last_frag = first_frag = NULL;
547 	retval = NULL;
548 	next_tsn = 0;
549 	is_last = 0;
550 
551 	skb_queue_walk(&ulpq->reasm, pos) {
552 		cevent = sctp_skb2event(pos);
553 		ctsn = cevent->tsn;
554 
555 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
556 		case SCTP_DATA_FIRST_FRAG:
557 			if (!first_frag)
558 				return NULL;
559 			goto done;
560 		case SCTP_DATA_MIDDLE_FRAG:
561 			if (!first_frag) {
562 				first_frag = pos;
563 				next_tsn = ctsn + 1;
564 				last_frag = pos;
565 			} else if (next_tsn == ctsn) {
566 				next_tsn++;
567 				last_frag = pos;
568 			} else
569 				goto done;
570 			break;
571 		case SCTP_DATA_LAST_FRAG:
572 			if (!first_frag)
573 				first_frag = pos;
574 			else if (ctsn != next_tsn)
575 				goto done;
576 			last_frag = pos;
577 			is_last = 1;
578 			goto done;
579 		default:
580 			return NULL;
581 		}
582 	}
583 
584 	/* We have the reassembled event. There is no need to look
585 	 * further.
586 	 */
587 done:
588 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
589 					&ulpq->reasm, first_frag, last_frag);
590 	if (retval && is_last)
591 		retval->msg_flags |= MSG_EOR;
592 
593 	return retval;
594 }
595 
596 
597 /* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
598  * need reassembling.
599  */
600 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
601 						struct sctp_ulpevent *event)
602 {
603 	struct sctp_ulpevent *retval = NULL;
604 
605 	/* Check if this is part of a fragmented message.  */
606 	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
607 		event->msg_flags |= MSG_EOR;
608 		return event;
609 	}
610 
611 	sctp_ulpq_store_reasm(ulpq, event);
612 	if (!ulpq->pd_mode)
613 		retval = sctp_ulpq_retrieve_reassembled(ulpq);
614 	else {
615 		__u32 ctsn, ctsnap;
616 
617 		/* Do not even bother unless this is the next tsn to
618 		 * be delivered.
619 		 */
620 		ctsn = event->tsn;
621 		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
622 		if (TSN_lte(ctsn, ctsnap))
623 			retval = sctp_ulpq_retrieve_partial(ulpq);
624 	}
625 
626 	return retval;
627 }
628 
629 /* Retrieve the first part (sequential fragments) for partial delivery.  */
630 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
631 {
632 	struct sk_buff *pos, *last_frag, *first_frag;
633 	struct sctp_ulpevent *cevent;
634 	__u32 ctsn, next_tsn;
635 	struct sctp_ulpevent *retval;
636 
637 	/* The chunks are held in the reasm queue sorted by TSN.
638 	 * Walk through the queue sequentially and look for a sequence of
639 	 * fragmented chunks that start a datagram.
640 	 */
641 
642 	if (skb_queue_empty(&ulpq->reasm))
643 		return NULL;
644 
645 	last_frag = first_frag = NULL;
646 	retval = NULL;
647 	next_tsn = 0;
648 
649 	skb_queue_walk(&ulpq->reasm, pos) {
650 		cevent = sctp_skb2event(pos);
651 		ctsn = cevent->tsn;
652 
653 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
654 		case SCTP_DATA_FIRST_FRAG:
655 			if (!first_frag) {
656 				first_frag = pos;
657 				next_tsn = ctsn + 1;
658 				last_frag = pos;
659 			} else
660 				goto done;
661 			break;
662 
663 		case SCTP_DATA_MIDDLE_FRAG:
664 			if (!first_frag)
665 				return NULL;
666 			if (ctsn == next_tsn) {
667 				next_tsn++;
668 				last_frag = pos;
669 			} else
670 				goto done;
671 			break;
672 
673 		case SCTP_DATA_LAST_FRAG:
674 			if (!first_frag)
675 				return NULL;
676 			else
677 				goto done;
678 			break;
679 
680 		default:
681 			return NULL;
682 		}
683 	}
684 
685 	/* We have the reassembled event. There is no need to look
686 	 * further.
687 	 */
688 done:
689 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
690 					&ulpq->reasm, first_frag, last_frag);
691 	return retval;
692 }
693 
694 /*
695  * Flush out stale fragments from the reassembly queue when processing
696  * a Forward TSN.
697  *
698  * RFC 3758, Section 3.6
699  *
700  * After receiving and processing a FORWARD TSN, the data receiver MUST
701  * take cautions in updating its re-assembly queue.  The receiver MUST
702  * remove any partially reassembled message, which is still missing one
703  * or more TSNs earlier than or equal to the new cumulative TSN point.
704  * In the event that the receiver has invoked the partial delivery API,
705  * a notification SHOULD also be generated to inform the upper layer API
706  * that the message being partially delivered will NOT be completed.
707  */
708 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
709 {
710 	struct sk_buff *pos, *tmp;
711 	struct sctp_ulpevent *event;
712 	__u32 tsn;
713 
714 	if (skb_queue_empty(&ulpq->reasm))
715 		return;
716 
717 	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
718 		event = sctp_skb2event(pos);
719 		tsn = event->tsn;
720 
721 		/* Since the entire message must be abandoned by the
722 		 * sender (item A3 in Section 3.5, RFC 3758), we can
723 		 * free all fragments on the list that are less then
724 		 * or equal to ctsn_point
725 		 */
726 		if (TSN_lte(tsn, fwd_tsn)) {
727 			__skb_unlink(pos, &ulpq->reasm);
728 			sctp_ulpevent_free(event);
729 		} else
730 			break;
731 	}
732 }
733 
734 /*
735  * Drain the reassembly queue.  If we just cleared parted delivery, it
736  * is possible that the reassembly queue will contain already reassembled
737  * messages.  Retrieve any such messages and give them to the user.
738  */
739 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
740 {
741 	struct sctp_ulpevent *event = NULL;
742 
743 	if (skb_queue_empty(&ulpq->reasm))
744 		return;
745 
746 	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
747 		struct sk_buff_head temp;
748 
749 		skb_queue_head_init(&temp);
750 		__skb_queue_tail(&temp, sctp_event2skb(event));
751 
752 		/* Do ordering if needed.  */
753 		if (event->msg_flags & MSG_EOR)
754 			event = sctp_ulpq_order(ulpq, event);
755 
756 		/* Send event to the ULP.  'event' is the
757 		 * sctp_ulpevent for  very first SKB on the  temp' list.
758 		 */
759 		if (event)
760 			sctp_ulpq_tail_event(ulpq, event);
761 	}
762 }
763 
764 
765 /* Helper function to gather skbs that have possibly become
766  * ordered by an an incoming chunk.
767  */
768 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
769 					      struct sctp_ulpevent *event)
770 {
771 	struct sk_buff_head *event_list;
772 	struct sk_buff *pos, *tmp;
773 	struct sctp_ulpevent *cevent;
774 	struct sctp_stream *stream;
775 	__u16 sid, csid, cssn;
776 
777 	sid = event->stream;
778 	stream  = &ulpq->asoc->stream;
779 
780 	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
781 
782 	/* We are holding the chunks by stream, by SSN.  */
783 	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
784 		cevent = (struct sctp_ulpevent *) pos->cb;
785 		csid = cevent->stream;
786 		cssn = cevent->ssn;
787 
788 		/* Have we gone too far?  */
789 		if (csid > sid)
790 			break;
791 
792 		/* Have we not gone far enough?  */
793 		if (csid < sid)
794 			continue;
795 
796 		if (cssn != sctp_ssn_peek(stream, in, sid))
797 			break;
798 
799 		/* Found it, so mark in the stream. */
800 		sctp_ssn_next(stream, in, sid);
801 
802 		__skb_unlink(pos, &ulpq->lobby);
803 
804 		/* Attach all gathered skbs to the event.  */
805 		__skb_queue_tail(event_list, pos);
806 	}
807 }
808 
809 /* Helper function to store chunks needing ordering.  */
810 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
811 					   struct sctp_ulpevent *event)
812 {
813 	struct sk_buff *pos;
814 	struct sctp_ulpevent *cevent;
815 	__u16 sid, csid;
816 	__u16 ssn, cssn;
817 
818 	pos = skb_peek_tail(&ulpq->lobby);
819 	if (!pos) {
820 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
821 		return;
822 	}
823 
824 	sid = event->stream;
825 	ssn = event->ssn;
826 
827 	cevent = (struct sctp_ulpevent *) pos->cb;
828 	csid = cevent->stream;
829 	cssn = cevent->ssn;
830 	if (sid > csid) {
831 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
832 		return;
833 	}
834 
835 	if ((sid == csid) && SSN_lt(cssn, ssn)) {
836 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
837 		return;
838 	}
839 
840 	/* Find the right place in this list.  We store them by
841 	 * stream ID and then by SSN.
842 	 */
843 	skb_queue_walk(&ulpq->lobby, pos) {
844 		cevent = (struct sctp_ulpevent *) pos->cb;
845 		csid = cevent->stream;
846 		cssn = cevent->ssn;
847 
848 		if (csid > sid)
849 			break;
850 		if (csid == sid && SSN_lt(ssn, cssn))
851 			break;
852 	}
853 
854 
855 	/* Insert before pos. */
856 	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
857 }
858 
859 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
860 					     struct sctp_ulpevent *event)
861 {
862 	__u16 sid, ssn;
863 	struct sctp_stream *stream;
864 
865 	/* Check if this message needs ordering.  */
866 	if (event->msg_flags & SCTP_DATA_UNORDERED)
867 		return event;
868 
869 	/* Note: The stream ID must be verified before this routine.  */
870 	sid = event->stream;
871 	ssn = event->ssn;
872 	stream  = &ulpq->asoc->stream;
873 
874 	/* Is this the expected SSN for this stream ID?  */
875 	if (ssn != sctp_ssn_peek(stream, in, sid)) {
876 		/* We've received something out of order, so find where it
877 		 * needs to be placed.  We order by stream and then by SSN.
878 		 */
879 		sctp_ulpq_store_ordered(ulpq, event);
880 		return NULL;
881 	}
882 
883 	/* Mark that the next chunk has been found.  */
884 	sctp_ssn_next(stream, in, sid);
885 
886 	/* Go find any other chunks that were waiting for
887 	 * ordering.
888 	 */
889 	sctp_ulpq_retrieve_ordered(ulpq, event);
890 
891 	return event;
892 }
893 
894 /* Helper function to gather skbs that have possibly become
895  * ordered by forward tsn skipping their dependencies.
896  */
897 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
898 {
899 	struct sk_buff *pos, *tmp;
900 	struct sctp_ulpevent *cevent;
901 	struct sctp_ulpevent *event;
902 	struct sctp_stream *stream;
903 	struct sk_buff_head temp;
904 	struct sk_buff_head *lobby = &ulpq->lobby;
905 	__u16 csid, cssn;
906 
907 	stream = &ulpq->asoc->stream;
908 
909 	/* We are holding the chunks by stream, by SSN.  */
910 	skb_queue_head_init(&temp);
911 	event = NULL;
912 	sctp_skb_for_each(pos, lobby, tmp) {
913 		cevent = (struct sctp_ulpevent *) pos->cb;
914 		csid = cevent->stream;
915 		cssn = cevent->ssn;
916 
917 		/* Have we gone too far?  */
918 		if (csid > sid)
919 			break;
920 
921 		/* Have we not gone far enough?  */
922 		if (csid < sid)
923 			continue;
924 
925 		/* see if this ssn has been marked by skipping */
926 		if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
927 			break;
928 
929 		__skb_unlink(pos, lobby);
930 		if (!event)
931 			/* Create a temporary list to collect chunks on.  */
932 			event = sctp_skb2event(pos);
933 
934 		/* Attach all gathered skbs to the event.  */
935 		__skb_queue_tail(&temp, pos);
936 	}
937 
938 	/* If we didn't reap any data, see if the next expected SSN
939 	 * is next on the queue and if so, use that.
940 	 */
941 	if (event == NULL && pos != (struct sk_buff *)lobby) {
942 		cevent = (struct sctp_ulpevent *) pos->cb;
943 		csid = cevent->stream;
944 		cssn = cevent->ssn;
945 
946 		if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
947 			sctp_ssn_next(stream, in, csid);
948 			__skb_unlink(pos, lobby);
949 			__skb_queue_tail(&temp, pos);
950 			event = sctp_skb2event(pos);
951 		}
952 	}
953 
954 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
955 	 * very first SKB on the 'temp' list.
956 	 */
957 	if (event) {
958 		/* see if we have more ordered that we can deliver */
959 		sctp_ulpq_retrieve_ordered(ulpq, event);
960 		sctp_ulpq_tail_event(ulpq, event);
961 	}
962 }
963 
964 /* Skip over an SSN. This is used during the processing of
965  * Forwared TSN chunk to skip over the abandoned ordered data
966  */
967 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
968 {
969 	struct sctp_stream *stream;
970 
971 	/* Note: The stream ID must be verified before this routine.  */
972 	stream  = &ulpq->asoc->stream;
973 
974 	/* Is this an old SSN?  If so ignore. */
975 	if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
976 		return;
977 
978 	/* Mark that we are no longer expecting this SSN or lower. */
979 	sctp_ssn_skip(stream, in, sid, ssn);
980 
981 	/* Go find any other chunks that were waiting for
982 	 * ordering and deliver them if needed.
983 	 */
984 	sctp_ulpq_reap_ordered(ulpq, sid);
985 }
986 
987 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
988 			    __u16 needed)
989 {
990 	__u16 freed = 0;
991 	__u32 tsn, last_tsn;
992 	struct sk_buff *skb, *flist, *last;
993 	struct sctp_ulpevent *event;
994 	struct sctp_tsnmap *tsnmap;
995 
996 	tsnmap = &ulpq->asoc->peer.tsn_map;
997 
998 	while ((skb = skb_peek_tail(list)) != NULL) {
999 		event = sctp_skb2event(skb);
1000 		tsn = event->tsn;
1001 
1002 		/* Don't renege below the Cumulative TSN ACK Point. */
1003 		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
1004 			break;
1005 
1006 		/* Events in ordering queue may have multiple fragments
1007 		 * corresponding to additional TSNs.  Sum the total
1008 		 * freed space; find the last TSN.
1009 		 */
1010 		freed += skb_headlen(skb);
1011 		flist = skb_shinfo(skb)->frag_list;
1012 		for (last = flist; flist; flist = flist->next) {
1013 			last = flist;
1014 			freed += skb_headlen(last);
1015 		}
1016 		if (last)
1017 			last_tsn = sctp_skb2event(last)->tsn;
1018 		else
1019 			last_tsn = tsn;
1020 
1021 		/* Unlink the event, then renege all applicable TSNs. */
1022 		__skb_unlink(skb, list);
1023 		sctp_ulpevent_free(event);
1024 		while (TSN_lte(tsn, last_tsn)) {
1025 			sctp_tsnmap_renege(tsnmap, tsn);
1026 			tsn++;
1027 		}
1028 		if (freed >= needed)
1029 			return freed;
1030 	}
1031 
1032 	return freed;
1033 }
1034 
1035 /* Renege 'needed' bytes from the ordering queue. */
1036 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1037 {
1038 	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1039 }
1040 
1041 /* Renege 'needed' bytes from the reassembly queue. */
1042 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1043 {
1044 	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1045 }
1046 
1047 /* Partial deliver the first message as there is pressure on rwnd. */
1048 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1049 				gfp_t gfp)
1050 {
1051 	struct sctp_ulpevent *event;
1052 	struct sctp_association *asoc;
1053 	struct sctp_sock *sp;
1054 	__u32 ctsn;
1055 	struct sk_buff *skb;
1056 
1057 	asoc = ulpq->asoc;
1058 	sp = sctp_sk(asoc->base.sk);
1059 
1060 	/* If the association is already in Partial Delivery mode
1061 	 * we have nothing to do.
1062 	 */
1063 	if (ulpq->pd_mode)
1064 		return;
1065 
1066 	/* Data must be at or below the Cumulative TSN ACK Point to
1067 	 * start partial delivery.
1068 	 */
1069 	skb = skb_peek(&asoc->ulpq.reasm);
1070 	if (skb != NULL) {
1071 		ctsn = sctp_skb2event(skb)->tsn;
1072 		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1073 			return;
1074 	}
1075 
1076 	/* If the user enabled fragment interleave socket option,
1077 	 * multiple associations can enter partial delivery.
1078 	 * Otherwise, we can only enter partial delivery if the
1079 	 * socket is not in partial deliver mode.
1080 	 */
1081 	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1082 		/* Is partial delivery possible?  */
1083 		event = sctp_ulpq_retrieve_first(ulpq);
1084 		/* Send event to the ULP.   */
1085 		if (event) {
1086 			struct sk_buff_head temp;
1087 
1088 			skb_queue_head_init(&temp);
1089 			__skb_queue_tail(&temp, sctp_event2skb(event));
1090 			sctp_ulpq_tail_event(ulpq, event);
1091 			sctp_ulpq_set_pd(ulpq);
1092 			return;
1093 		}
1094 	}
1095 }
1096 
1097 /* Renege some packets to make room for an incoming chunk.  */
1098 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1099 		      gfp_t gfp)
1100 {
1101 	struct sctp_association *asoc = ulpq->asoc;
1102 	__u32 freed = 0;
1103 	__u16 needed;
1104 
1105 	needed = ntohs(chunk->chunk_hdr->length) -
1106 		 sizeof(struct sctp_data_chunk);
1107 
1108 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1109 		freed = sctp_ulpq_renege_order(ulpq, needed);
1110 		if (freed < needed)
1111 			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1112 	}
1113 	/* If able to free enough room, accept this chunk. */
1114 	if (freed >= needed) {
1115 		int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1116 		/*
1117 		 * Enter partial delivery if chunk has not been
1118 		 * delivered; otherwise, drain the reassembly queue.
1119 		 */
1120 		if (retval <= 0)
1121 			sctp_ulpq_partial_delivery(ulpq, gfp);
1122 		else if (retval == 1)
1123 			sctp_ulpq_reasm_drain(ulpq);
1124 	}
1125 
1126 	sk_mem_reclaim(asoc->base.sk);
1127 }
1128 
1129 
1130 
1131 /* Notify the application if an association is aborted and in
1132  * partial delivery mode.  Send up any pending received messages.
1133  */
1134 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1135 {
1136 	struct sctp_ulpevent *ev = NULL;
1137 	struct sctp_sock *sp;
1138 	struct sock *sk;
1139 
1140 	if (!ulpq->pd_mode)
1141 		return;
1142 
1143 	sk = ulpq->asoc->base.sk;
1144 	sp = sctp_sk(sk);
1145 	if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1146 				       SCTP_PARTIAL_DELIVERY_EVENT))
1147 		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1148 					      SCTP_PARTIAL_DELIVERY_ABORTED,
1149 					      0, 0, 0, gfp);
1150 	if (ev)
1151 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1152 
1153 	/* If there is data waiting, send it up the socket now. */
1154 	if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1155 		sp->data_ready_signalled = 1;
1156 		sk->sk_data_ready(sk);
1157 	}
1158 }
1159