xref: /freebsd-12.1/sys/netinet/sctputil.c (revision f6ce56c6)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #if defined(INET6) || defined(INET)
55 #include <netinet/tcp_var.h>
56 #endif
57 #include <netinet/udp.h>
58 #include <netinet/udp_var.h>
59 #include <netinet/in_kdtrace.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
sctp_sblog(struct sockbuf * sb,struct sctp_tcb * stcb,int from,int incr)74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
sctp_log_closing(struct sctp_inpcb * inp,struct sctp_tcb * stcb,int16_t loc)97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
rto_logging(struct sctp_nets * net,int from)123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
sctp_log_strm_del_alt(struct sctp_tcb * stcb,uint32_t tsn,uint16_t sseq,uint16_t stream,int from)142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
sctp_log_nagle_event(struct sctp_tcb * stcb,int action)164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
sctp_log_sack(uint32_t old_cumack,uint32_t cumack,uint32_t tsn,uint16_t gaps,uint16_t dups,int from)185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
sctp_log_map(uint32_t map,uint32_t cum,uint32_t high,int from)206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
sctp_log_fr(uint32_t biggest_tsn,uint32_t biggest_new_tsn,uint32_t tsn,int from)226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
sctp_log_mb(struct mbuf * m,int from)247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
sctp_log_mbc(struct mbuf * m,int from)274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
sctp_log_strm_del(struct sctp_queued_to_read * control,struct sctp_queued_to_read * poschk,int from)285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
sctp_log_cwnd(struct sctp_tcb * stcb,struct sctp_nets * net,int augment,uint8_t from)316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
sctp_log_lock(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint8_t from)353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
sctp_log_maxburst(struct sctp_tcb * stcb,struct sctp_nets * net,int error,int burst,uint8_t from)399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
sctp_log_rwnd(uint8_t from,uint32_t peers_rwnd,uint32_t snd_size,uint32_t overhead)428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
sctp_log_rwnd_set(uint8_t from,uint32_t peers_rwnd,uint32_t flight_size,uint32_t overhead,uint32_t a_rwndval)448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
sctp_log_mbcnt(uint8_t from,uint32_t total_oq,uint32_t book,uint32_t total_mbcnt_q,uint32_t mbcnt)469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
sctp_misc_ints(uint8_t from,uint32_t a,uint32_t b,uint32_t c,uint32_t d)490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
sctp_wakeup_log(struct sctp_tcb * stcb,uint32_t wake_cnt,int from)501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
sctp_log_block(uint8_t from,struct sctp_association * asoc,ssize_t sendlen)549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
sctp_fill_stat_log(void * optval SCTP_UNUSED,size_t * optsize SCTP_UNUSED)572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
sctp_print_audit_report(void)584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
sctp_auditing(int from,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
sctp_audit_log(uint8_t ev,uint8_t fd)762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * sctp_stop_timers_for_shutdown() should be called
777  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
778  * state to make sure that all timers are stopped.
779  */
780 void
sctp_stop_timers_for_shutdown(struct sctp_tcb * stcb)781 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
782 {
783 	struct sctp_association *asoc;
784 	struct sctp_nets *net;
785 
786 	asoc = &stcb->asoc;
787 
788 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
789 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
790 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
791 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
792 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
793 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
794 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
795 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
796 	}
797 }
798 
799 /*
800  * A list of sizes based on typical mtu's, used only if next hop size not
801  * returned. These values MUST be multiples of 4 and MUST be ordered.
802  */
803 static uint32_t sctp_mtu_sizes[] = {
804 	68,
805 	296,
806 	508,
807 	512,
808 	544,
809 	576,
810 	1004,
811 	1492,
812 	1500,
813 	1536,
814 	2000,
815 	2048,
816 	4352,
817 	4464,
818 	8166,
819 	17912,
820 	32000,
821 	65532
822 };
823 
824 /*
825  * Return the largest MTU in sctp_mtu_sizes smaller than val.
826  * If val is smaller than the minimum, just return the largest
827  * multiple of 4 smaller or equal to val.
828  * Ensure that the result is a multiple of 4.
829  */
830 uint32_t
sctp_get_prev_mtu(uint32_t val)831 sctp_get_prev_mtu(uint32_t val)
832 {
833 	uint32_t i;
834 
835 	val &= 0xfffffffc;
836 	if (val <= sctp_mtu_sizes[0]) {
837 		return (val);
838 	}
839 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
840 		if (val <= sctp_mtu_sizes[i]) {
841 			break;
842 		}
843 	}
844 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
845 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
846 	return (sctp_mtu_sizes[i - 1]);
847 }
848 
849 /*
850  * Return the smallest MTU in sctp_mtu_sizes larger than val.
851  * If val is larger than the maximum, just return the largest multiple of 4 smaller
852  * or equal to val.
853  * Ensure that the result is a multiple of 4.
854  */
855 uint32_t
sctp_get_next_mtu(uint32_t val)856 sctp_get_next_mtu(uint32_t val)
857 {
858 	/* select another MTU that is just bigger than this one */
859 	uint32_t i;
860 
861 	val &= 0xfffffffc;
862 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
863 		if (val < sctp_mtu_sizes[i]) {
864 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
865 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
866 			return (sctp_mtu_sizes[i]);
867 		}
868 	}
869 	return (val);
870 }
871 
872 void
sctp_fill_random_store(struct sctp_pcb * m)873 sctp_fill_random_store(struct sctp_pcb *m)
874 {
875 	/*
876 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
877 	 * our counter. The result becomes our good random numbers and we
878 	 * then setup to give these out. Note that we do no locking to
879 	 * protect this. This is ok, since if competing folks call this we
880 	 * will get more gobbled gook in the random store which is what we
881 	 * want. There is a danger that two guys will use the same random
882 	 * numbers, but thats ok too since that is random as well :->
883 	 */
884 	m->store_at = 0;
885 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
886 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
887 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
888 	m->random_counter++;
889 }
890 
891 uint32_t
sctp_select_initial_TSN(struct sctp_pcb * inp)892 sctp_select_initial_TSN(struct sctp_pcb *inp)
893 {
894 	/*
895 	 * A true implementation should use random selection process to get
896 	 * the initial stream sequence number, using RFC1750 as a good
897 	 * guideline
898 	 */
899 	uint32_t x, *xp;
900 	uint8_t *p;
901 	int store_at, new_store;
902 
903 	if (inp->initial_sequence_debug != 0) {
904 		uint32_t ret;
905 
906 		ret = inp->initial_sequence_debug;
907 		inp->initial_sequence_debug++;
908 		return (ret);
909 	}
910 retry:
911 	store_at = inp->store_at;
912 	new_store = store_at + sizeof(uint32_t);
913 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
914 		new_store = 0;
915 	}
916 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
917 		goto retry;
918 	}
919 	if (new_store == 0) {
920 		/* Refill the random store */
921 		sctp_fill_random_store(inp);
922 	}
923 	p = &inp->random_store[store_at];
924 	xp = (uint32_t *)p;
925 	x = *xp;
926 	return (x);
927 }
928 
929 uint32_t
sctp_select_a_tag(struct sctp_inpcb * inp,uint16_t lport,uint16_t rport,int check)930 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
931 {
932 	uint32_t x;
933 	struct timeval now;
934 
935 	if (check) {
936 		(void)SCTP_GETTIME_TIMEVAL(&now);
937 	}
938 	for (;;) {
939 		x = sctp_select_initial_TSN(&inp->sctp_ep);
940 		if (x == 0) {
941 			/* we never use 0 */
942 			continue;
943 		}
944 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
945 			break;
946 		}
947 	}
948 	return (x);
949 }
950 
951 int32_t
sctp_map_assoc_state(int kernel_state)952 sctp_map_assoc_state(int kernel_state)
953 {
954 	int32_t user_state;
955 
956 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
957 		user_state = SCTP_CLOSED;
958 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
959 		user_state = SCTP_SHUTDOWN_PENDING;
960 	} else {
961 		switch (kernel_state & SCTP_STATE_MASK) {
962 		case SCTP_STATE_EMPTY:
963 			user_state = SCTP_CLOSED;
964 			break;
965 		case SCTP_STATE_INUSE:
966 			user_state = SCTP_CLOSED;
967 			break;
968 		case SCTP_STATE_COOKIE_WAIT:
969 			user_state = SCTP_COOKIE_WAIT;
970 			break;
971 		case SCTP_STATE_COOKIE_ECHOED:
972 			user_state = SCTP_COOKIE_ECHOED;
973 			break;
974 		case SCTP_STATE_OPEN:
975 			user_state = SCTP_ESTABLISHED;
976 			break;
977 		case SCTP_STATE_SHUTDOWN_SENT:
978 			user_state = SCTP_SHUTDOWN_SENT;
979 			break;
980 		case SCTP_STATE_SHUTDOWN_RECEIVED:
981 			user_state = SCTP_SHUTDOWN_RECEIVED;
982 			break;
983 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
984 			user_state = SCTP_SHUTDOWN_ACK_SENT;
985 			break;
986 		default:
987 			user_state = SCTP_CLOSED;
988 			break;
989 		}
990 	}
991 	return (user_state);
992 }
993 
994 int
sctp_init_asoc(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint32_t override_tag,uint32_t vrf_id,uint16_t o_strms)995 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
996     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
997 {
998 	struct sctp_association *asoc;
999 
1000 	/*
1001 	 * Anything set to zero is taken care of by the allocation routine's
1002 	 * bzero
1003 	 */
1004 
1005 	/*
1006 	 * Up front select what scoping to apply on addresses I tell my peer
1007 	 * Not sure what to do with these right now, we will need to come up
1008 	 * with a way to set them. We may need to pass them through from the
1009 	 * caller in the sctp_aloc_assoc() function.
1010 	 */
1011 	int i;
1012 #if defined(SCTP_DETAILED_STR_STATS)
1013 	int j;
1014 #endif
1015 
1016 	asoc = &stcb->asoc;
1017 	/* init all variables to a known value. */
1018 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1019 	asoc->max_burst = inp->sctp_ep.max_burst;
1020 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1021 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1022 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1023 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1024 	asoc->ecn_supported = inp->ecn_supported;
1025 	asoc->prsctp_supported = inp->prsctp_supported;
1026 	asoc->idata_supported = inp->idata_supported;
1027 	asoc->auth_supported = inp->auth_supported;
1028 	asoc->asconf_supported = inp->asconf_supported;
1029 	asoc->reconfig_supported = inp->reconfig_supported;
1030 	asoc->nrsack_supported = inp->nrsack_supported;
1031 	asoc->pktdrop_supported = inp->pktdrop_supported;
1032 	asoc->idata_supported = inp->idata_supported;
1033 	asoc->sctp_cmt_pf = (uint8_t)0;
1034 	asoc->sctp_frag_point = inp->sctp_frag_point;
1035 	asoc->sctp_features = inp->sctp_features;
1036 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1037 	asoc->max_cwnd = inp->max_cwnd;
1038 #ifdef INET6
1039 	if (inp->sctp_ep.default_flowlabel) {
1040 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1041 	} else {
1042 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1043 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1044 			asoc->default_flowlabel &= 0x000fffff;
1045 			asoc->default_flowlabel |= 0x80000000;
1046 		} else {
1047 			asoc->default_flowlabel = 0;
1048 		}
1049 	}
1050 #endif
1051 	asoc->sb_send_resv = 0;
1052 	if (override_tag) {
1053 		asoc->my_vtag = override_tag;
1054 	} else {
1055 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1056 	}
1057 	/* Get the nonce tags */
1058 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1059 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1060 	asoc->vrf_id = vrf_id;
1061 
1062 #ifdef SCTP_ASOCLOG_OF_TSNS
1063 	asoc->tsn_in_at = 0;
1064 	asoc->tsn_out_at = 0;
1065 	asoc->tsn_in_wrapped = 0;
1066 	asoc->tsn_out_wrapped = 0;
1067 	asoc->cumack_log_at = 0;
1068 	asoc->cumack_log_atsnt = 0;
1069 #endif
1070 #ifdef SCTP_FS_SPEC_LOG
1071 	asoc->fs_index = 0;
1072 #endif
1073 	asoc->refcnt = 0;
1074 	asoc->assoc_up_sent = 0;
1075 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1076 	    sctp_select_initial_TSN(&inp->sctp_ep);
1077 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1078 	/* we are optimisitic here */
1079 	asoc->peer_supports_nat = 0;
1080 	asoc->sent_queue_retran_cnt = 0;
1081 
1082 	/* for CMT */
1083 	asoc->last_net_cmt_send_started = NULL;
1084 
1085 	/* This will need to be adjusted */
1086 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1087 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1088 	asoc->asconf_seq_in = asoc->last_acked_seq;
1089 
1090 	/* here we are different, we hold the next one we expect */
1091 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1092 
1093 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1094 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1095 
1096 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1097 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1098 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1099 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1100 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1101 	asoc->free_chunk_cnt = 0;
1102 
1103 	asoc->iam_blocking = 0;
1104 	asoc->context = inp->sctp_context;
1105 	asoc->local_strreset_support = inp->local_strreset_support;
1106 	asoc->def_send = inp->def_send;
1107 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1108 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1109 	asoc->pr_sctp_cnt = 0;
1110 	asoc->total_output_queue_size = 0;
1111 
1112 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1113 		asoc->scope.ipv6_addr_legal = 1;
1114 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1115 			asoc->scope.ipv4_addr_legal = 1;
1116 		} else {
1117 			asoc->scope.ipv4_addr_legal = 0;
1118 		}
1119 	} else {
1120 		asoc->scope.ipv6_addr_legal = 0;
1121 		asoc->scope.ipv4_addr_legal = 1;
1122 	}
1123 
1124 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1125 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1126 
1127 	asoc->smallest_mtu = inp->sctp_frag_point;
1128 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1129 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1130 
1131 	asoc->stream_locked_on = 0;
1132 	asoc->ecn_echo_cnt_onq = 0;
1133 	asoc->stream_locked = 0;
1134 
1135 	asoc->send_sack = 1;
1136 
1137 	LIST_INIT(&asoc->sctp_restricted_addrs);
1138 
1139 	TAILQ_INIT(&asoc->nets);
1140 	TAILQ_INIT(&asoc->pending_reply_queue);
1141 	TAILQ_INIT(&asoc->asconf_ack_sent);
1142 	/* Setup to fill the hb random cache at first HB */
1143 	asoc->hb_random_idx = 4;
1144 
1145 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1146 
1147 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1148 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1149 
1150 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1151 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1152 
1153 	/*
1154 	 * Now the stream parameters, here we allocate space for all streams
1155 	 * that we request by default.
1156 	 */
1157 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1158 	    o_strms;
1159 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1160 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1161 	    SCTP_M_STRMO);
1162 	if (asoc->strmout == NULL) {
1163 		/* big trouble no memory */
1164 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1165 		return (ENOMEM);
1166 	}
1167 	for (i = 0; i < asoc->streamoutcnt; i++) {
1168 		/*
1169 		 * inbound side must be set to 0xffff, also NOTE when we get
1170 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1171 		 * count (streamoutcnt) but first check if we sent to any of
1172 		 * the upper streams that were dropped (if some were). Those
1173 		 * that were dropped must be notified to the upper layer as
1174 		 * failed to send.
1175 		 */
1176 		asoc->strmout[i].next_mid_ordered = 0;
1177 		asoc->strmout[i].next_mid_unordered = 0;
1178 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1179 		asoc->strmout[i].chunks_on_queues = 0;
1180 #if defined(SCTP_DETAILED_STR_STATS)
1181 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1182 			asoc->strmout[i].abandoned_sent[j] = 0;
1183 			asoc->strmout[i].abandoned_unsent[j] = 0;
1184 		}
1185 #else
1186 		asoc->strmout[i].abandoned_sent[0] = 0;
1187 		asoc->strmout[i].abandoned_unsent[0] = 0;
1188 #endif
1189 		asoc->strmout[i].sid = i;
1190 		asoc->strmout[i].last_msg_incomplete = 0;
1191 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1192 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1193 	}
1194 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1195 
1196 	/* Now the mapping array */
1197 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1198 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1199 	    SCTP_M_MAP);
1200 	if (asoc->mapping_array == NULL) {
1201 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1202 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1203 		return (ENOMEM);
1204 	}
1205 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1206 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1207 	    SCTP_M_MAP);
1208 	if (asoc->nr_mapping_array == NULL) {
1209 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1210 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1211 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1212 		return (ENOMEM);
1213 	}
1214 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1215 
1216 	/* Now the init of the other outqueues */
1217 	TAILQ_INIT(&asoc->free_chunks);
1218 	TAILQ_INIT(&asoc->control_send_queue);
1219 	TAILQ_INIT(&asoc->asconf_send_queue);
1220 	TAILQ_INIT(&asoc->send_queue);
1221 	TAILQ_INIT(&asoc->sent_queue);
1222 	TAILQ_INIT(&asoc->resetHead);
1223 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1224 	TAILQ_INIT(&asoc->asconf_queue);
1225 	/* authentication fields */
1226 	asoc->authinfo.random = NULL;
1227 	asoc->authinfo.active_keyid = 0;
1228 	asoc->authinfo.assoc_key = NULL;
1229 	asoc->authinfo.assoc_keyid = 0;
1230 	asoc->authinfo.recv_key = NULL;
1231 	asoc->authinfo.recv_keyid = 0;
1232 	LIST_INIT(&asoc->shared_keys);
1233 	asoc->marked_retrans = 0;
1234 	asoc->port = inp->sctp_ep.port;
1235 	asoc->timoinit = 0;
1236 	asoc->timodata = 0;
1237 	asoc->timosack = 0;
1238 	asoc->timoshutdown = 0;
1239 	asoc->timoheartbeat = 0;
1240 	asoc->timocookie = 0;
1241 	asoc->timoshutdownack = 0;
1242 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1243 	asoc->discontinuity_time = asoc->start_time;
1244 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1245 		asoc->abandoned_unsent[i] = 0;
1246 		asoc->abandoned_sent[i] = 0;
1247 	}
1248 	/*
1249 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1250 	 * freed later when the association is freed.
1251 	 */
1252 	return (0);
1253 }
1254 
1255 void
sctp_print_mapping_array(struct sctp_association * asoc)1256 sctp_print_mapping_array(struct sctp_association *asoc)
1257 {
1258 	unsigned int i, limit;
1259 
1260 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1261 	    asoc->mapping_array_size,
1262 	    asoc->mapping_array_base_tsn,
1263 	    asoc->cumulative_tsn,
1264 	    asoc->highest_tsn_inside_map,
1265 	    asoc->highest_tsn_inside_nr_map);
1266 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1267 		if (asoc->mapping_array[limit - 1] != 0) {
1268 			break;
1269 		}
1270 	}
1271 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1272 	for (i = 0; i < limit; i++) {
1273 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1274 	}
1275 	if (limit % 16)
1276 		SCTP_PRINTF("\n");
1277 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1278 		if (asoc->nr_mapping_array[limit - 1]) {
1279 			break;
1280 		}
1281 	}
1282 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1283 	for (i = 0; i < limit; i++) {
1284 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1285 	}
1286 	if (limit % 16)
1287 		SCTP_PRINTF("\n");
1288 }
1289 
1290 int
sctp_expand_mapping_array(struct sctp_association * asoc,uint32_t needed)1291 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1292 {
1293 	/* mapping array needs to grow */
1294 	uint8_t *new_array1, *new_array2;
1295 	uint32_t new_size;
1296 
1297 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1298 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1299 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1300 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1301 		/* can't get more, forget it */
1302 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1303 		if (new_array1) {
1304 			SCTP_FREE(new_array1, SCTP_M_MAP);
1305 		}
1306 		if (new_array2) {
1307 			SCTP_FREE(new_array2, SCTP_M_MAP);
1308 		}
1309 		return (-1);
1310 	}
1311 	memset(new_array1, 0, new_size);
1312 	memset(new_array2, 0, new_size);
1313 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1314 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1315 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1316 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1317 	asoc->mapping_array = new_array1;
1318 	asoc->nr_mapping_array = new_array2;
1319 	asoc->mapping_array_size = new_size;
1320 	return (0);
1321 }
1322 
1323 
1324 static void
sctp_iterator_work(struct sctp_iterator * it)1325 sctp_iterator_work(struct sctp_iterator *it)
1326 {
1327 	int iteration_count = 0;
1328 	int inp_skip = 0;
1329 	int first_in = 1;
1330 	struct sctp_inpcb *tinp;
1331 
1332 	SCTP_INP_INFO_RLOCK();
1333 	SCTP_ITERATOR_LOCK();
1334 	sctp_it_ctl.cur_it = it;
1335 	if (it->inp) {
1336 		SCTP_INP_RLOCK(it->inp);
1337 		SCTP_INP_DECR_REF(it->inp);
1338 	}
1339 	if (it->inp == NULL) {
1340 		/* iterator is complete */
1341 done_with_iterator:
1342 		sctp_it_ctl.cur_it = NULL;
1343 		SCTP_ITERATOR_UNLOCK();
1344 		SCTP_INP_INFO_RUNLOCK();
1345 		if (it->function_atend != NULL) {
1346 			(*it->function_atend) (it->pointer, it->val);
1347 		}
1348 		SCTP_FREE(it, SCTP_M_ITER);
1349 		return;
1350 	}
1351 select_a_new_ep:
1352 	if (first_in) {
1353 		first_in = 0;
1354 	} else {
1355 		SCTP_INP_RLOCK(it->inp);
1356 	}
1357 	while (((it->pcb_flags) &&
1358 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1359 	    ((it->pcb_features) &&
1360 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1361 		/* endpoint flags or features don't match, so keep looking */
1362 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1363 			SCTP_INP_RUNLOCK(it->inp);
1364 			goto done_with_iterator;
1365 		}
1366 		tinp = it->inp;
1367 		it->inp = LIST_NEXT(it->inp, sctp_list);
1368 		SCTP_INP_RUNLOCK(tinp);
1369 		if (it->inp == NULL) {
1370 			goto done_with_iterator;
1371 		}
1372 		SCTP_INP_RLOCK(it->inp);
1373 	}
1374 	/* now go through each assoc which is in the desired state */
1375 	if (it->done_current_ep == 0) {
1376 		if (it->function_inp != NULL)
1377 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1378 		it->done_current_ep = 1;
1379 	}
1380 	if (it->stcb == NULL) {
1381 		/* run the per instance function */
1382 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1383 	}
1384 	if ((inp_skip) || it->stcb == NULL) {
1385 		if (it->function_inp_end != NULL) {
1386 			inp_skip = (*it->function_inp_end) (it->inp,
1387 			    it->pointer,
1388 			    it->val);
1389 		}
1390 		SCTP_INP_RUNLOCK(it->inp);
1391 		goto no_stcb;
1392 	}
1393 	while (it->stcb) {
1394 		SCTP_TCB_LOCK(it->stcb);
1395 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1396 			/* not in the right state... keep looking */
1397 			SCTP_TCB_UNLOCK(it->stcb);
1398 			goto next_assoc;
1399 		}
1400 		/* see if we have limited out the iterator loop */
1401 		iteration_count++;
1402 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1403 			/* Pause to let others grab the lock */
1404 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1405 			SCTP_TCB_UNLOCK(it->stcb);
1406 			SCTP_INP_INCR_REF(it->inp);
1407 			SCTP_INP_RUNLOCK(it->inp);
1408 			SCTP_ITERATOR_UNLOCK();
1409 			SCTP_INP_INFO_RUNLOCK();
1410 			SCTP_INP_INFO_RLOCK();
1411 			SCTP_ITERATOR_LOCK();
1412 			if (sctp_it_ctl.iterator_flags) {
1413 				/* We won't be staying here */
1414 				SCTP_INP_DECR_REF(it->inp);
1415 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1416 				if (sctp_it_ctl.iterator_flags &
1417 				    SCTP_ITERATOR_STOP_CUR_IT) {
1418 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1419 					goto done_with_iterator;
1420 				}
1421 				if (sctp_it_ctl.iterator_flags &
1422 				    SCTP_ITERATOR_STOP_CUR_INP) {
1423 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1424 					goto no_stcb;
1425 				}
1426 				/* If we reach here huh? */
1427 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1428 				    sctp_it_ctl.iterator_flags);
1429 				sctp_it_ctl.iterator_flags = 0;
1430 			}
1431 			SCTP_INP_RLOCK(it->inp);
1432 			SCTP_INP_DECR_REF(it->inp);
1433 			SCTP_TCB_LOCK(it->stcb);
1434 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1435 			iteration_count = 0;
1436 		}
1437 
1438 		/* run function on this one */
1439 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1440 
1441 		/*
1442 		 * we lie here, it really needs to have its own type but
1443 		 * first I must verify that this won't effect things :-0
1444 		 */
1445 		if (it->no_chunk_output == 0)
1446 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1447 
1448 		SCTP_TCB_UNLOCK(it->stcb);
1449 next_assoc:
1450 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1451 		if (it->stcb == NULL) {
1452 			/* Run last function */
1453 			if (it->function_inp_end != NULL) {
1454 				inp_skip = (*it->function_inp_end) (it->inp,
1455 				    it->pointer,
1456 				    it->val);
1457 			}
1458 		}
1459 	}
1460 	SCTP_INP_RUNLOCK(it->inp);
1461 no_stcb:
1462 	/* done with all assocs on this endpoint, move on to next endpoint */
1463 	it->done_current_ep = 0;
1464 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1465 		it->inp = NULL;
1466 	} else {
1467 		it->inp = LIST_NEXT(it->inp, sctp_list);
1468 	}
1469 	if (it->inp == NULL) {
1470 		goto done_with_iterator;
1471 	}
1472 	goto select_a_new_ep;
1473 }
1474 
1475 void
sctp_iterator_worker(void)1476 sctp_iterator_worker(void)
1477 {
1478 	struct sctp_iterator *it, *nit;
1479 
1480 	/* This function is called with the WQ lock in place */
1481 
1482 	sctp_it_ctl.iterator_running = 1;
1483 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1484 		/* now lets work on this one */
1485 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1486 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1487 		CURVNET_SET(it->vn);
1488 		sctp_iterator_work(it);
1489 		CURVNET_RESTORE();
1490 		SCTP_IPI_ITERATOR_WQ_LOCK();
1491 		/* sa_ignore FREED_MEMORY */
1492 	}
1493 	sctp_it_ctl.iterator_running = 0;
1494 	return;
1495 }
1496 
1497 
1498 static void
sctp_handle_addr_wq(void)1499 sctp_handle_addr_wq(void)
1500 {
1501 	/* deal with the ADDR wq from the rtsock calls */
1502 	struct sctp_laddr *wi, *nwi;
1503 	struct sctp_asconf_iterator *asc;
1504 
1505 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1506 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1507 	if (asc == NULL) {
1508 		/* Try later, no memory */
1509 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1510 		    (struct sctp_inpcb *)NULL,
1511 		    (struct sctp_tcb *)NULL,
1512 		    (struct sctp_nets *)NULL);
1513 		return;
1514 	}
1515 	LIST_INIT(&asc->list_of_work);
1516 	asc->cnt = 0;
1517 
1518 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1519 		LIST_REMOVE(wi, sctp_nxt_addr);
1520 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1521 		asc->cnt++;
1522 	}
1523 
1524 	if (asc->cnt == 0) {
1525 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1526 	} else {
1527 		int ret;
1528 
1529 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1530 		    sctp_asconf_iterator_stcb,
1531 		    NULL,	/* No ep end for boundall */
1532 		    SCTP_PCB_FLAGS_BOUNDALL,
1533 		    SCTP_PCB_ANY_FEATURES,
1534 		    SCTP_ASOC_ANY_STATE,
1535 		    (void *)asc, 0,
1536 		    sctp_asconf_iterator_end, NULL, 0);
1537 		if (ret) {
1538 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1539 			/*
1540 			 * Freeing if we are stopping or put back on the
1541 			 * addr_wq.
1542 			 */
1543 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1544 				sctp_asconf_iterator_end(asc, 0);
1545 			} else {
1546 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1547 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1548 				}
1549 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1550 			}
1551 		}
1552 	}
1553 }
1554 
1555 void
sctp_timeout_handler(void * t)1556 sctp_timeout_handler(void *t)
1557 {
1558 	struct sctp_inpcb *inp;
1559 	struct sctp_tcb *stcb;
1560 	struct sctp_nets *net;
1561 	struct sctp_timer *tmr;
1562 	struct mbuf *op_err;
1563 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1564 	struct socket *so;
1565 #endif
1566 	int did_output;
1567 	int type;
1568 
1569 	tmr = (struct sctp_timer *)t;
1570 	inp = (struct sctp_inpcb *)tmr->ep;
1571 	stcb = (struct sctp_tcb *)tmr->tcb;
1572 	net = (struct sctp_nets *)tmr->net;
1573 	CURVNET_SET((struct vnet *)tmr->vnet);
1574 	did_output = 1;
1575 
1576 #ifdef SCTP_AUDITING_ENABLED
1577 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1578 	sctp_auditing(3, inp, stcb, net);
1579 #endif
1580 
1581 	/* sanity checks... */
1582 	if (tmr->self != (void *)tmr) {
1583 		/*
1584 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1585 		 * (void *)tmr);
1586 		 */
1587 		CURVNET_RESTORE();
1588 		return;
1589 	}
1590 	tmr->stopped_from = 0xa001;
1591 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1592 		/*
1593 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1594 		 * tmr->type);
1595 		 */
1596 		CURVNET_RESTORE();
1597 		return;
1598 	}
1599 	tmr->stopped_from = 0xa002;
1600 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1601 		CURVNET_RESTORE();
1602 		return;
1603 	}
1604 	/* if this is an iterator timeout, get the struct and clear inp */
1605 	tmr->stopped_from = 0xa003;
1606 	if (inp) {
1607 		SCTP_INP_INCR_REF(inp);
1608 		if ((inp->sctp_socket == NULL) &&
1609 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1610 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1611 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1612 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1613 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1614 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1615 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1616 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1617 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1618 			SCTP_INP_DECR_REF(inp);
1619 			CURVNET_RESTORE();
1620 			return;
1621 		}
1622 	}
1623 	tmr->stopped_from = 0xa004;
1624 	if (stcb) {
1625 		atomic_add_int(&stcb->asoc.refcnt, 1);
1626 		if (stcb->asoc.state == 0) {
1627 			atomic_add_int(&stcb->asoc.refcnt, -1);
1628 			if (inp) {
1629 				SCTP_INP_DECR_REF(inp);
1630 			}
1631 			CURVNET_RESTORE();
1632 			return;
1633 		}
1634 	}
1635 	type = tmr->type;
1636 	tmr->stopped_from = 0xa005;
1637 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1638 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1639 		if (inp) {
1640 			SCTP_INP_DECR_REF(inp);
1641 		}
1642 		if (stcb) {
1643 			atomic_add_int(&stcb->asoc.refcnt, -1);
1644 		}
1645 		CURVNET_RESTORE();
1646 		return;
1647 	}
1648 	tmr->stopped_from = 0xa006;
1649 
1650 	if (stcb) {
1651 		SCTP_TCB_LOCK(stcb);
1652 		atomic_add_int(&stcb->asoc.refcnt, -1);
1653 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1654 		    ((stcb->asoc.state == 0) ||
1655 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1656 			SCTP_TCB_UNLOCK(stcb);
1657 			if (inp) {
1658 				SCTP_INP_DECR_REF(inp);
1659 			}
1660 			CURVNET_RESTORE();
1661 			return;
1662 		}
1663 	} else if (inp != NULL) {
1664 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1665 			SCTP_INP_WLOCK(inp);
1666 		}
1667 	} else {
1668 		SCTP_WQ_ADDR_LOCK();
1669 	}
1670 	/* record in stopped what t-o occurred */
1671 	tmr->stopped_from = type;
1672 
1673 	/* mark as being serviced now */
1674 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1675 		/*
1676 		 * Callout has been rescheduled.
1677 		 */
1678 		goto get_out;
1679 	}
1680 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1681 		/*
1682 		 * Not active, so no action.
1683 		 */
1684 		goto get_out;
1685 	}
1686 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1687 
1688 	/* call the handler for the appropriate timer type */
1689 	switch (type) {
1690 	case SCTP_TIMER_TYPE_ADDR_WQ:
1691 		sctp_handle_addr_wq();
1692 		break;
1693 	case SCTP_TIMER_TYPE_SEND:
1694 		if ((stcb == NULL) || (inp == NULL)) {
1695 			break;
1696 		}
1697 		SCTP_STAT_INCR(sctps_timodata);
1698 		stcb->asoc.timodata++;
1699 		stcb->asoc.num_send_timers_up--;
1700 		if (stcb->asoc.num_send_timers_up < 0) {
1701 			stcb->asoc.num_send_timers_up = 0;
1702 		}
1703 		SCTP_TCB_LOCK_ASSERT(stcb);
1704 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1705 			/* no need to unlock on tcb its gone */
1706 
1707 			goto out_decr;
1708 		}
1709 		SCTP_TCB_LOCK_ASSERT(stcb);
1710 #ifdef SCTP_AUDITING_ENABLED
1711 		sctp_auditing(4, inp, stcb, net);
1712 #endif
1713 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1714 		if ((stcb->asoc.num_send_timers_up == 0) &&
1715 		    (stcb->asoc.sent_queue_cnt > 0)) {
1716 			struct sctp_tmit_chunk *chk;
1717 
1718 			/*
1719 			 * safeguard. If there on some on the sent queue
1720 			 * somewhere but no timers running something is
1721 			 * wrong... so we start a timer on the first chunk
1722 			 * on the send queue on whatever net it is sent to.
1723 			 */
1724 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1725 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1726 			    chk->whoTo);
1727 		}
1728 		break;
1729 	case SCTP_TIMER_TYPE_INIT:
1730 		if ((stcb == NULL) || (inp == NULL)) {
1731 			break;
1732 		}
1733 		SCTP_STAT_INCR(sctps_timoinit);
1734 		stcb->asoc.timoinit++;
1735 		if (sctp_t1init_timer(inp, stcb, net)) {
1736 			/* no need to unlock on tcb its gone */
1737 			goto out_decr;
1738 		}
1739 		/* We do output but not here */
1740 		did_output = 0;
1741 		break;
1742 	case SCTP_TIMER_TYPE_RECV:
1743 		if ((stcb == NULL) || (inp == NULL)) {
1744 			break;
1745 		}
1746 		SCTP_STAT_INCR(sctps_timosack);
1747 		stcb->asoc.timosack++;
1748 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1749 #ifdef SCTP_AUDITING_ENABLED
1750 		sctp_auditing(4, inp, stcb, net);
1751 #endif
1752 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1753 		break;
1754 	case SCTP_TIMER_TYPE_SHUTDOWN:
1755 		if ((stcb == NULL) || (inp == NULL)) {
1756 			break;
1757 		}
1758 		if (sctp_shutdown_timer(inp, stcb, net)) {
1759 			/* no need to unlock on tcb its gone */
1760 			goto out_decr;
1761 		}
1762 		SCTP_STAT_INCR(sctps_timoshutdown);
1763 		stcb->asoc.timoshutdown++;
1764 #ifdef SCTP_AUDITING_ENABLED
1765 		sctp_auditing(4, inp, stcb, net);
1766 #endif
1767 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1768 		break;
1769 	case SCTP_TIMER_TYPE_HEARTBEAT:
1770 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1771 			break;
1772 		}
1773 		SCTP_STAT_INCR(sctps_timoheartbeat);
1774 		stcb->asoc.timoheartbeat++;
1775 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1776 			/* no need to unlock on tcb its gone */
1777 			goto out_decr;
1778 		}
1779 #ifdef SCTP_AUDITING_ENABLED
1780 		sctp_auditing(4, inp, stcb, net);
1781 #endif
1782 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1783 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1784 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1785 		}
1786 		break;
1787 	case SCTP_TIMER_TYPE_COOKIE:
1788 		if ((stcb == NULL) || (inp == NULL)) {
1789 			break;
1790 		}
1791 
1792 		if (sctp_cookie_timer(inp, stcb, net)) {
1793 			/* no need to unlock on tcb its gone */
1794 			goto out_decr;
1795 		}
1796 		SCTP_STAT_INCR(sctps_timocookie);
1797 		stcb->asoc.timocookie++;
1798 #ifdef SCTP_AUDITING_ENABLED
1799 		sctp_auditing(4, inp, stcb, net);
1800 #endif
1801 		/*
1802 		 * We consider T3 and Cookie timer pretty much the same with
1803 		 * respect to where from in chunk_output.
1804 		 */
1805 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1806 		break;
1807 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1808 		{
1809 			struct timeval tv;
1810 			int i, secret;
1811 
1812 			if (inp == NULL) {
1813 				break;
1814 			}
1815 			SCTP_STAT_INCR(sctps_timosecret);
1816 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1817 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1818 			inp->sctp_ep.last_secret_number =
1819 			    inp->sctp_ep.current_secret_number;
1820 			inp->sctp_ep.current_secret_number++;
1821 			if (inp->sctp_ep.current_secret_number >=
1822 			    SCTP_HOW_MANY_SECRETS) {
1823 				inp->sctp_ep.current_secret_number = 0;
1824 			}
1825 			secret = (int)inp->sctp_ep.current_secret_number;
1826 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1827 				inp->sctp_ep.secret_key[secret][i] =
1828 				    sctp_select_initial_TSN(&inp->sctp_ep);
1829 			}
1830 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1831 		}
1832 		did_output = 0;
1833 		break;
1834 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1835 		if ((stcb == NULL) || (inp == NULL)) {
1836 			break;
1837 		}
1838 		SCTP_STAT_INCR(sctps_timopathmtu);
1839 		sctp_pathmtu_timer(inp, stcb, net);
1840 		did_output = 0;
1841 		break;
1842 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1847 			/* no need to unlock on tcb its gone */
1848 			goto out_decr;
1849 		}
1850 		SCTP_STAT_INCR(sctps_timoshutdownack);
1851 		stcb->asoc.timoshutdownack++;
1852 #ifdef SCTP_AUDITING_ENABLED
1853 		sctp_auditing(4, inp, stcb, net);
1854 #endif
1855 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1856 		break;
1857 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1858 		if ((stcb == NULL) || (inp == NULL)) {
1859 			break;
1860 		}
1861 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1862 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1863 		    "Shutdown guard timer expired");
1864 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1865 		/* no need to unlock on tcb its gone */
1866 		goto out_decr;
1867 
1868 	case SCTP_TIMER_TYPE_STRRESET:
1869 		if ((stcb == NULL) || (inp == NULL)) {
1870 			break;
1871 		}
1872 		if (sctp_strreset_timer(inp, stcb, net)) {
1873 			/* no need to unlock on tcb its gone */
1874 			goto out_decr;
1875 		}
1876 		SCTP_STAT_INCR(sctps_timostrmrst);
1877 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1878 		break;
1879 	case SCTP_TIMER_TYPE_ASCONF:
1880 		if ((stcb == NULL) || (inp == NULL)) {
1881 			break;
1882 		}
1883 		if (sctp_asconf_timer(inp, stcb, net)) {
1884 			/* no need to unlock on tcb its gone */
1885 			goto out_decr;
1886 		}
1887 		SCTP_STAT_INCR(sctps_timoasconf);
1888 #ifdef SCTP_AUDITING_ENABLED
1889 		sctp_auditing(4, inp, stcb, net);
1890 #endif
1891 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1892 		break;
1893 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1894 		if ((stcb == NULL) || (inp == NULL)) {
1895 			break;
1896 		}
1897 		sctp_delete_prim_timer(inp, stcb, net);
1898 		SCTP_STAT_INCR(sctps_timodelprim);
1899 		break;
1900 
1901 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1902 		if ((stcb == NULL) || (inp == NULL)) {
1903 			break;
1904 		}
1905 		SCTP_STAT_INCR(sctps_timoautoclose);
1906 		sctp_autoclose_timer(inp, stcb, net);
1907 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1908 		did_output = 0;
1909 		break;
1910 	case SCTP_TIMER_TYPE_ASOCKILL:
1911 		if ((stcb == NULL) || (inp == NULL)) {
1912 			break;
1913 		}
1914 		SCTP_STAT_INCR(sctps_timoassockill);
1915 		/* Can we free it yet? */
1916 		SCTP_INP_DECR_REF(inp);
1917 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1918 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1919 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1920 		so = SCTP_INP_SO(inp);
1921 		atomic_add_int(&stcb->asoc.refcnt, 1);
1922 		SCTP_TCB_UNLOCK(stcb);
1923 		SCTP_SOCKET_LOCK(so, 1);
1924 		SCTP_TCB_LOCK(stcb);
1925 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1926 #endif
1927 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1928 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1929 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1930 		SCTP_SOCKET_UNLOCK(so, 1);
1931 #endif
1932 		/*
1933 		 * free asoc, always unlocks (or destroy's) so prevent
1934 		 * duplicate unlock or unlock of a free mtx :-0
1935 		 */
1936 		stcb = NULL;
1937 		goto out_no_decr;
1938 	case SCTP_TIMER_TYPE_INPKILL:
1939 		SCTP_STAT_INCR(sctps_timoinpkill);
1940 		if (inp == NULL) {
1941 			break;
1942 		}
1943 		/*
1944 		 * special case, take away our increment since WE are the
1945 		 * killer
1946 		 */
1947 		SCTP_INP_DECR_REF(inp);
1948 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1949 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1950 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1951 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1952 		inp = NULL;
1953 		goto out_no_decr;
1954 	default:
1955 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1956 		    type);
1957 		break;
1958 	}
1959 #ifdef SCTP_AUDITING_ENABLED
1960 	sctp_audit_log(0xF1, (uint8_t)type);
1961 	if (inp)
1962 		sctp_auditing(5, inp, stcb, net);
1963 #endif
1964 	if ((did_output) && stcb) {
1965 		/*
1966 		 * Now we need to clean up the control chunk chain if an
1967 		 * ECNE is on it. It must be marked as UNSENT again so next
1968 		 * call will continue to send it until such time that we get
1969 		 * a CWR, to remove it. It is, however, less likely that we
1970 		 * will find a ecn echo on the chain though.
1971 		 */
1972 		sctp_fix_ecn_echo(&stcb->asoc);
1973 	}
1974 get_out:
1975 	if (stcb) {
1976 		SCTP_TCB_UNLOCK(stcb);
1977 	} else if (inp != NULL) {
1978 		SCTP_INP_WUNLOCK(inp);
1979 	} else {
1980 		SCTP_WQ_ADDR_UNLOCK();
1981 	}
1982 
1983 out_decr:
1984 	if (inp) {
1985 		SCTP_INP_DECR_REF(inp);
1986 	}
1987 
1988 out_no_decr:
1989 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1990 	CURVNET_RESTORE();
1991 }
1992 
1993 void
sctp_timer_start(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)1994 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1995     struct sctp_nets *net)
1996 {
1997 	uint32_t to_ticks;
1998 	struct sctp_timer *tmr;
1999 
2000 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
2001 		return;
2002 
2003 	tmr = NULL;
2004 	if (stcb) {
2005 		SCTP_TCB_LOCK_ASSERT(stcb);
2006 	}
2007 	switch (t_type) {
2008 	case SCTP_TIMER_TYPE_ADDR_WQ:
2009 		/* Only 1 tick away :-) */
2010 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2011 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2012 		break;
2013 	case SCTP_TIMER_TYPE_SEND:
2014 		/* Here we use the RTO timer */
2015 		{
2016 			int rto_val;
2017 
2018 			if ((stcb == NULL) || (net == NULL)) {
2019 				return;
2020 			}
2021 			tmr = &net->rxt_timer;
2022 			if (net->RTO == 0) {
2023 				rto_val = stcb->asoc.initial_rto;
2024 			} else {
2025 				rto_val = net->RTO;
2026 			}
2027 			to_ticks = MSEC_TO_TICKS(rto_val);
2028 		}
2029 		break;
2030 	case SCTP_TIMER_TYPE_INIT:
2031 		/*
2032 		 * Here we use the INIT timer default usually about 1
2033 		 * minute.
2034 		 */
2035 		if ((stcb == NULL) || (net == NULL)) {
2036 			return;
2037 		}
2038 		tmr = &net->rxt_timer;
2039 		if (net->RTO == 0) {
2040 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2041 		} else {
2042 			to_ticks = MSEC_TO_TICKS(net->RTO);
2043 		}
2044 		break;
2045 	case SCTP_TIMER_TYPE_RECV:
2046 		/*
2047 		 * Here we use the Delayed-Ack timer value from the inp
2048 		 * ususually about 200ms.
2049 		 */
2050 		if (stcb == NULL) {
2051 			return;
2052 		}
2053 		tmr = &stcb->asoc.dack_timer;
2054 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2055 		break;
2056 	case SCTP_TIMER_TYPE_SHUTDOWN:
2057 		/* Here we use the RTO of the destination. */
2058 		if ((stcb == NULL) || (net == NULL)) {
2059 			return;
2060 		}
2061 		if (net->RTO == 0) {
2062 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2063 		} else {
2064 			to_ticks = MSEC_TO_TICKS(net->RTO);
2065 		}
2066 		tmr = &net->rxt_timer;
2067 		break;
2068 	case SCTP_TIMER_TYPE_HEARTBEAT:
2069 		/*
2070 		 * the net is used here so that we can add in the RTO. Even
2071 		 * though we use a different timer. We also add the HB timer
2072 		 * PLUS a random jitter.
2073 		 */
2074 		if ((stcb == NULL) || (net == NULL)) {
2075 			return;
2076 		} else {
2077 			uint32_t rndval;
2078 			uint32_t jitter;
2079 
2080 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2081 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2082 				return;
2083 			}
2084 			if (net->RTO == 0) {
2085 				to_ticks = stcb->asoc.initial_rto;
2086 			} else {
2087 				to_ticks = net->RTO;
2088 			}
2089 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2090 			jitter = rndval % to_ticks;
2091 			if (jitter >= (to_ticks >> 1)) {
2092 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2093 			} else {
2094 				to_ticks = to_ticks - jitter;
2095 			}
2096 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2097 			    !(net->dest_state & SCTP_ADDR_PF)) {
2098 				to_ticks += net->heart_beat_delay;
2099 			}
2100 			/*
2101 			 * Now we must convert the to_ticks that are now in
2102 			 * ms to ticks.
2103 			 */
2104 			to_ticks = MSEC_TO_TICKS(to_ticks);
2105 			tmr = &net->hb_timer;
2106 		}
2107 		break;
2108 	case SCTP_TIMER_TYPE_COOKIE:
2109 		/*
2110 		 * Here we can use the RTO timer from the network since one
2111 		 * RTT was compelete. If a retran happened then we will be
2112 		 * using the RTO initial value.
2113 		 */
2114 		if ((stcb == NULL) || (net == NULL)) {
2115 			return;
2116 		}
2117 		if (net->RTO == 0) {
2118 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2119 		} else {
2120 			to_ticks = MSEC_TO_TICKS(net->RTO);
2121 		}
2122 		tmr = &net->rxt_timer;
2123 		break;
2124 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2125 		/*
2126 		 * nothing needed but the endpoint here ususually about 60
2127 		 * minutes.
2128 		 */
2129 		tmr = &inp->sctp_ep.signature_change;
2130 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2131 		break;
2132 	case SCTP_TIMER_TYPE_ASOCKILL:
2133 		if (stcb == NULL) {
2134 			return;
2135 		}
2136 		tmr = &stcb->asoc.strreset_timer;
2137 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2138 		break;
2139 	case SCTP_TIMER_TYPE_INPKILL:
2140 		/*
2141 		 * The inp is setup to die. We re-use the signature_chage
2142 		 * timer since that has stopped and we are in the GONE
2143 		 * state.
2144 		 */
2145 		tmr = &inp->sctp_ep.signature_change;
2146 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2147 		break;
2148 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2149 		/*
2150 		 * Here we use the value found in the EP for PMTU ususually
2151 		 * about 10 minutes.
2152 		 */
2153 		if ((stcb == NULL) || (net == NULL)) {
2154 			return;
2155 		}
2156 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2157 			return;
2158 		}
2159 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2160 		tmr = &net->pmtu_timer;
2161 		break;
2162 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2163 		/* Here we use the RTO of the destination */
2164 		if ((stcb == NULL) || (net == NULL)) {
2165 			return;
2166 		}
2167 		if (net->RTO == 0) {
2168 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2169 		} else {
2170 			to_ticks = MSEC_TO_TICKS(net->RTO);
2171 		}
2172 		tmr = &net->rxt_timer;
2173 		break;
2174 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2175 		/*
2176 		 * Here we use the endpoints shutdown guard timer usually
2177 		 * about 3 minutes.
2178 		 */
2179 		if (stcb == NULL) {
2180 			return;
2181 		}
2182 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2183 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2184 		} else {
2185 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2186 		}
2187 		tmr = &stcb->asoc.shut_guard_timer;
2188 		break;
2189 	case SCTP_TIMER_TYPE_STRRESET:
2190 		/*
2191 		 * Here the timer comes from the stcb but its value is from
2192 		 * the net's RTO.
2193 		 */
2194 		if ((stcb == NULL) || (net == NULL)) {
2195 			return;
2196 		}
2197 		if (net->RTO == 0) {
2198 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2199 		} else {
2200 			to_ticks = MSEC_TO_TICKS(net->RTO);
2201 		}
2202 		tmr = &stcb->asoc.strreset_timer;
2203 		break;
2204 	case SCTP_TIMER_TYPE_ASCONF:
2205 		/*
2206 		 * Here the timer comes from the stcb but its value is from
2207 		 * the net's RTO.
2208 		 */
2209 		if ((stcb == NULL) || (net == NULL)) {
2210 			return;
2211 		}
2212 		if (net->RTO == 0) {
2213 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2214 		} else {
2215 			to_ticks = MSEC_TO_TICKS(net->RTO);
2216 		}
2217 		tmr = &stcb->asoc.asconf_timer;
2218 		break;
2219 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2220 		if ((stcb == NULL) || (net != NULL)) {
2221 			return;
2222 		}
2223 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2224 		tmr = &stcb->asoc.delete_prim_timer;
2225 		break;
2226 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2227 		if (stcb == NULL) {
2228 			return;
2229 		}
2230 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2231 			/*
2232 			 * Really an error since stcb is NOT set to
2233 			 * autoclose
2234 			 */
2235 			return;
2236 		}
2237 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2238 		tmr = &stcb->asoc.autoclose_timer;
2239 		break;
2240 	default:
2241 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2242 		    __func__, t_type);
2243 		return;
2244 		break;
2245 	}
2246 	if ((to_ticks <= 0) || (tmr == NULL)) {
2247 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2248 		    __func__, t_type, to_ticks, (void *)tmr);
2249 		return;
2250 	}
2251 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2252 		/*
2253 		 * we do NOT allow you to have it already running. if it is
2254 		 * we leave the current one up unchanged
2255 		 */
2256 		return;
2257 	}
2258 	/* At this point we can proceed */
2259 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2260 		stcb->asoc.num_send_timers_up++;
2261 	}
2262 	tmr->stopped_from = 0;
2263 	tmr->type = t_type;
2264 	tmr->ep = (void *)inp;
2265 	tmr->tcb = (void *)stcb;
2266 	tmr->net = (void *)net;
2267 	tmr->self = (void *)tmr;
2268 	tmr->vnet = (void *)curvnet;
2269 	tmr->ticks = sctp_get_tick_count();
2270 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2271 	return;
2272 }
2273 
2274 void
sctp_timer_stop(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t from)2275 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2276     struct sctp_nets *net, uint32_t from)
2277 {
2278 	struct sctp_timer *tmr;
2279 
2280 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2281 	    (inp == NULL))
2282 		return;
2283 
2284 	tmr = NULL;
2285 	if (stcb) {
2286 		SCTP_TCB_LOCK_ASSERT(stcb);
2287 	}
2288 	switch (t_type) {
2289 	case SCTP_TIMER_TYPE_ADDR_WQ:
2290 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2291 		break;
2292 	case SCTP_TIMER_TYPE_SEND:
2293 		if ((stcb == NULL) || (net == NULL)) {
2294 			return;
2295 		}
2296 		tmr = &net->rxt_timer;
2297 		break;
2298 	case SCTP_TIMER_TYPE_INIT:
2299 		if ((stcb == NULL) || (net == NULL)) {
2300 			return;
2301 		}
2302 		tmr = &net->rxt_timer;
2303 		break;
2304 	case SCTP_TIMER_TYPE_RECV:
2305 		if (stcb == NULL) {
2306 			return;
2307 		}
2308 		tmr = &stcb->asoc.dack_timer;
2309 		break;
2310 	case SCTP_TIMER_TYPE_SHUTDOWN:
2311 		if ((stcb == NULL) || (net == NULL)) {
2312 			return;
2313 		}
2314 		tmr = &net->rxt_timer;
2315 		break;
2316 	case SCTP_TIMER_TYPE_HEARTBEAT:
2317 		if ((stcb == NULL) || (net == NULL)) {
2318 			return;
2319 		}
2320 		tmr = &net->hb_timer;
2321 		break;
2322 	case SCTP_TIMER_TYPE_COOKIE:
2323 		if ((stcb == NULL) || (net == NULL)) {
2324 			return;
2325 		}
2326 		tmr = &net->rxt_timer;
2327 		break;
2328 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2329 		/* nothing needed but the endpoint here */
2330 		tmr = &inp->sctp_ep.signature_change;
2331 		/*
2332 		 * We re-use the newcookie timer for the INP kill timer. We
2333 		 * must assure that we do not kill it by accident.
2334 		 */
2335 		break;
2336 	case SCTP_TIMER_TYPE_ASOCKILL:
2337 		/*
2338 		 * Stop the asoc kill timer.
2339 		 */
2340 		if (stcb == NULL) {
2341 			return;
2342 		}
2343 		tmr = &stcb->asoc.strreset_timer;
2344 		break;
2345 
2346 	case SCTP_TIMER_TYPE_INPKILL:
2347 		/*
2348 		 * The inp is setup to die. We re-use the signature_chage
2349 		 * timer since that has stopped and we are in the GONE
2350 		 * state.
2351 		 */
2352 		tmr = &inp->sctp_ep.signature_change;
2353 		break;
2354 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2355 		if ((stcb == NULL) || (net == NULL)) {
2356 			return;
2357 		}
2358 		tmr = &net->pmtu_timer;
2359 		break;
2360 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2361 		if ((stcb == NULL) || (net == NULL)) {
2362 			return;
2363 		}
2364 		tmr = &net->rxt_timer;
2365 		break;
2366 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2367 		if (stcb == NULL) {
2368 			return;
2369 		}
2370 		tmr = &stcb->asoc.shut_guard_timer;
2371 		break;
2372 	case SCTP_TIMER_TYPE_STRRESET:
2373 		if (stcb == NULL) {
2374 			return;
2375 		}
2376 		tmr = &stcb->asoc.strreset_timer;
2377 		break;
2378 	case SCTP_TIMER_TYPE_ASCONF:
2379 		if (stcb == NULL) {
2380 			return;
2381 		}
2382 		tmr = &stcb->asoc.asconf_timer;
2383 		break;
2384 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2385 		if (stcb == NULL) {
2386 			return;
2387 		}
2388 		tmr = &stcb->asoc.delete_prim_timer;
2389 		break;
2390 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2391 		if (stcb == NULL) {
2392 			return;
2393 		}
2394 		tmr = &stcb->asoc.autoclose_timer;
2395 		break;
2396 	default:
2397 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2398 		    __func__, t_type);
2399 		break;
2400 	}
2401 	if (tmr == NULL) {
2402 		return;
2403 	}
2404 	if ((tmr->type != t_type) && tmr->type) {
2405 		/*
2406 		 * Ok we have a timer that is under joint use. Cookie timer
2407 		 * per chance with the SEND timer. We therefore are NOT
2408 		 * running the timer that the caller wants stopped.  So just
2409 		 * return.
2410 		 */
2411 		return;
2412 	}
2413 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2414 		stcb->asoc.num_send_timers_up--;
2415 		if (stcb->asoc.num_send_timers_up < 0) {
2416 			stcb->asoc.num_send_timers_up = 0;
2417 		}
2418 	}
2419 	tmr->self = NULL;
2420 	tmr->stopped_from = from;
2421 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2422 	return;
2423 }
2424 
2425 uint32_t
sctp_calculate_len(struct mbuf * m)2426 sctp_calculate_len(struct mbuf *m)
2427 {
2428 	uint32_t tlen = 0;
2429 	struct mbuf *at;
2430 
2431 	at = m;
2432 	while (at) {
2433 		tlen += SCTP_BUF_LEN(at);
2434 		at = SCTP_BUF_NEXT(at);
2435 	}
2436 	return (tlen);
2437 }
2438 
2439 void
sctp_mtu_size_reset(struct sctp_inpcb * inp,struct sctp_association * asoc,uint32_t mtu)2440 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2441     struct sctp_association *asoc, uint32_t mtu)
2442 {
2443 	/*
2444 	 * Reset the P-MTU size on this association, this involves changing
2445 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2446 	 * allow the DF flag to be cleared.
2447 	 */
2448 	struct sctp_tmit_chunk *chk;
2449 	unsigned int eff_mtu, ovh;
2450 
2451 	asoc->smallest_mtu = mtu;
2452 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2453 		ovh = SCTP_MIN_OVERHEAD;
2454 	} else {
2455 		ovh = SCTP_MIN_V4_OVERHEAD;
2456 	}
2457 	eff_mtu = mtu - ovh;
2458 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2459 		if (chk->send_size > eff_mtu) {
2460 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2461 		}
2462 	}
2463 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2464 		if (chk->send_size > eff_mtu) {
2465 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2466 		}
2467 	}
2468 }
2469 
2470 
2471 /*
2472  * Given an association and starting time of the current RTT period, update
2473  * RTO in number of msecs. net should point to the current network.
2474  * Return 1, if an RTO update was performed, return 0 if no update was
2475  * performed due to invalid starting point.
2476  */
2477 
2478 int
sctp_calculate_rto(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_nets * net,struct timeval * old,int rtt_from_sack)2479 sctp_calculate_rto(struct sctp_tcb *stcb,
2480     struct sctp_association *asoc,
2481     struct sctp_nets *net,
2482     struct timeval *old,
2483     int rtt_from_sack)
2484 {
2485 	struct timeval now;
2486 	uint64_t rtt_us;	/* RTT in us */
2487 	int32_t rtt;		/* RTT in ms */
2488 	uint32_t new_rto;
2489 	int first_measure = 0;
2490 
2491 	/************************/
2492 	/* 1. calculate new RTT */
2493 	/************************/
2494 	/* get the current time */
2495 	if (stcb->asoc.use_precise_time) {
2496 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2497 	} else {
2498 		(void)SCTP_GETTIME_TIMEVAL(&now);
2499 	}
2500 	if ((old->tv_sec > now.tv_sec) ||
2501 	    ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) {
2502 		/* The starting point is in the future. */
2503 		return (0);
2504 	}
2505 	timevalsub(&now, old);
2506 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2507 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2508 		/* The RTT is larger than a sane value. */
2509 		return (0);
2510 	}
2511 	/* store the current RTT in us */
2512 	net->rtt = rtt_us;
2513 	/* compute rtt in ms */
2514 	rtt = (int32_t)(net->rtt / 1000);
2515 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2516 		/*
2517 		 * Tell the CC module that a new update has just occurred
2518 		 * from a sack
2519 		 */
2520 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2521 	}
2522 	/*
2523 	 * Do we need to determine the lan? We do this only on sacks i.e.
2524 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2525 	 */
2526 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2527 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2528 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2529 			net->lan_type = SCTP_LAN_INTERNET;
2530 		} else {
2531 			net->lan_type = SCTP_LAN_LOCAL;
2532 		}
2533 	}
2534 
2535 	/***************************/
2536 	/* 2. update RTTVAR & SRTT */
2537 	/***************************/
2538 	/*-
2539 	 * Compute the scaled average lastsa and the
2540 	 * scaled variance lastsv as described in van Jacobson
2541 	 * Paper "Congestion Avoidance and Control", Annex A.
2542 	 *
2543 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2544 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2545 	 */
2546 	if (net->RTO_measured) {
2547 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2548 		net->lastsa += rtt;
2549 		if (rtt < 0) {
2550 			rtt = -rtt;
2551 		}
2552 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2553 		net->lastsv += rtt;
2554 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2555 			rto_logging(net, SCTP_LOG_RTTVAR);
2556 		}
2557 	} else {
2558 		/* First RTO measurment */
2559 		net->RTO_measured = 1;
2560 		first_measure = 1;
2561 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2562 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2563 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2564 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2565 		}
2566 	}
2567 	if (net->lastsv == 0) {
2568 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2569 	}
2570 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2571 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2572 	    (stcb->asoc.sat_network_lockout == 0)) {
2573 		stcb->asoc.sat_network = 1;
2574 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2575 		stcb->asoc.sat_network = 0;
2576 		stcb->asoc.sat_network_lockout = 1;
2577 	}
2578 	/* bound it, per C6/C7 in Section 5.3.1 */
2579 	if (new_rto < stcb->asoc.minrto) {
2580 		new_rto = stcb->asoc.minrto;
2581 	}
2582 	if (new_rto > stcb->asoc.maxrto) {
2583 		new_rto = stcb->asoc.maxrto;
2584 	}
2585 	net->RTO = new_rto;
2586 	return (1);
2587 }
2588 
2589 /*
2590  * return a pointer to a contiguous piece of data from the given mbuf chain
2591  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2592  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2593  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2594  */
2595 caddr_t
sctp_m_getptr(struct mbuf * m,int off,int len,uint8_t * in_ptr)2596 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2597 {
2598 	uint32_t count;
2599 	uint8_t *ptr;
2600 
2601 	ptr = in_ptr;
2602 	if ((off < 0) || (len <= 0))
2603 		return (NULL);
2604 
2605 	/* find the desired start location */
2606 	while ((m != NULL) && (off > 0)) {
2607 		if (off < SCTP_BUF_LEN(m))
2608 			break;
2609 		off -= SCTP_BUF_LEN(m);
2610 		m = SCTP_BUF_NEXT(m);
2611 	}
2612 	if (m == NULL)
2613 		return (NULL);
2614 
2615 	/* is the current mbuf large enough (eg. contiguous)? */
2616 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2617 		return (mtod(m, caddr_t)+off);
2618 	} else {
2619 		/* else, it spans more than one mbuf, so save a temp copy... */
2620 		while ((m != NULL) && (len > 0)) {
2621 			count = min(SCTP_BUF_LEN(m) - off, len);
2622 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2623 			len -= count;
2624 			ptr += count;
2625 			off = 0;
2626 			m = SCTP_BUF_NEXT(m);
2627 		}
2628 		if ((m == NULL) && (len > 0))
2629 			return (NULL);
2630 		else
2631 			return ((caddr_t)in_ptr);
2632 	}
2633 }
2634 
2635 
2636 
2637 struct sctp_paramhdr *
sctp_get_next_param(struct mbuf * m,int offset,struct sctp_paramhdr * pull,int pull_limit)2638 sctp_get_next_param(struct mbuf *m,
2639     int offset,
2640     struct sctp_paramhdr *pull,
2641     int pull_limit)
2642 {
2643 	/* This just provides a typed signature to Peter's Pull routine */
2644 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2645 	    (uint8_t *)pull));
2646 }
2647 
2648 
2649 struct mbuf *
sctp_add_pad_tombuf(struct mbuf * m,int padlen)2650 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2651 {
2652 	struct mbuf *m_last;
2653 	caddr_t dp;
2654 
2655 	if (padlen > 3) {
2656 		return (NULL);
2657 	}
2658 	if (padlen <= M_TRAILINGSPACE(m)) {
2659 		/*
2660 		 * The easy way. We hope the majority of the time we hit
2661 		 * here :)
2662 		 */
2663 		m_last = m;
2664 	} else {
2665 		/* Hard way we must grow the mbuf chain */
2666 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2667 		if (m_last == NULL) {
2668 			return (NULL);
2669 		}
2670 		SCTP_BUF_LEN(m_last) = 0;
2671 		SCTP_BUF_NEXT(m_last) = NULL;
2672 		SCTP_BUF_NEXT(m) = m_last;
2673 	}
2674 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2675 	SCTP_BUF_LEN(m_last) += padlen;
2676 	memset(dp, 0, padlen);
2677 	return (m_last);
2678 }
2679 
2680 struct mbuf *
sctp_pad_lastmbuf(struct mbuf * m,int padval,struct mbuf * last_mbuf)2681 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2682 {
2683 	/* find the last mbuf in chain and pad it */
2684 	struct mbuf *m_at;
2685 
2686 	if (last_mbuf != NULL) {
2687 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2688 	} else {
2689 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2690 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2691 				return (sctp_add_pad_tombuf(m_at, padval));
2692 			}
2693 		}
2694 	}
2695 	return (NULL);
2696 }
2697 
2698 static void
sctp_notify_assoc_change(uint16_t state,struct sctp_tcb * stcb,uint16_t error,struct sctp_abort_chunk * abort,uint8_t from_peer,int so_locked SCTP_UNUSED)2699 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2700     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2701 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2702     SCTP_UNUSED
2703 #endif
2704 )
2705 {
2706 	struct mbuf *m_notify;
2707 	struct sctp_assoc_change *sac;
2708 	struct sctp_queued_to_read *control;
2709 	unsigned int notif_len;
2710 	uint16_t abort_len;
2711 	unsigned int i;
2712 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2713 	struct socket *so;
2714 #endif
2715 
2716 	if (stcb == NULL) {
2717 		return;
2718 	}
2719 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2720 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2721 		if (abort != NULL) {
2722 			abort_len = ntohs(abort->ch.chunk_length);
2723 			/*
2724 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2725 			 * contiguous.
2726 			 */
2727 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2728 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
2729 			}
2730 		} else {
2731 			abort_len = 0;
2732 		}
2733 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2734 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2735 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2736 			notif_len += abort_len;
2737 		}
2738 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2739 		if (m_notify == NULL) {
2740 			/* Retry with smaller value. */
2741 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2742 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2743 			if (m_notify == NULL) {
2744 				goto set_error;
2745 			}
2746 		}
2747 		SCTP_BUF_NEXT(m_notify) = NULL;
2748 		sac = mtod(m_notify, struct sctp_assoc_change *);
2749 		memset(sac, 0, notif_len);
2750 		sac->sac_type = SCTP_ASSOC_CHANGE;
2751 		sac->sac_flags = 0;
2752 		sac->sac_length = sizeof(struct sctp_assoc_change);
2753 		sac->sac_state = state;
2754 		sac->sac_error = error;
2755 		/* XXX verify these stream counts */
2756 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2757 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2758 		sac->sac_assoc_id = sctp_get_associd(stcb);
2759 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2760 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2761 				i = 0;
2762 				if (stcb->asoc.prsctp_supported == 1) {
2763 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2764 				}
2765 				if (stcb->asoc.auth_supported == 1) {
2766 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2767 				}
2768 				if (stcb->asoc.asconf_supported == 1) {
2769 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2770 				}
2771 				if (stcb->asoc.idata_supported == 1) {
2772 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2773 				}
2774 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2775 				if (stcb->asoc.reconfig_supported == 1) {
2776 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2777 				}
2778 				sac->sac_length += i;
2779 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2780 				memcpy(sac->sac_info, abort, abort_len);
2781 				sac->sac_length += abort_len;
2782 			}
2783 		}
2784 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2785 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2786 		    0, 0, stcb->asoc.context, 0, 0, 0,
2787 		    m_notify);
2788 		if (control != NULL) {
2789 			control->length = SCTP_BUF_LEN(m_notify);
2790 			control->spec_flags = M_NOTIFICATION;
2791 			/* not that we need this */
2792 			control->tail_mbuf = m_notify;
2793 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2794 			    control,
2795 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2796 			    so_locked);
2797 		} else {
2798 			sctp_m_freem(m_notify);
2799 		}
2800 	}
2801 	/*
2802 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2803 	 * comes in.
2804 	 */
2805 set_error:
2806 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2807 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2808 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2809 		SOCK_LOCK(stcb->sctp_socket);
2810 		if (from_peer) {
2811 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
2812 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2813 				stcb->sctp_socket->so_error = ECONNREFUSED;
2814 			} else {
2815 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2816 				stcb->sctp_socket->so_error = ECONNRESET;
2817 			}
2818 		} else {
2819 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
2820 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
2821 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2822 				stcb->sctp_socket->so_error = ETIMEDOUT;
2823 			} else {
2824 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2825 				stcb->sctp_socket->so_error = ECONNABORTED;
2826 			}
2827 		}
2828 		SOCK_UNLOCK(stcb->sctp_socket);
2829 	}
2830 	/* Wake ANY sleepers */
2831 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2832 	so = SCTP_INP_SO(stcb->sctp_ep);
2833 	if (!so_locked) {
2834 		atomic_add_int(&stcb->asoc.refcnt, 1);
2835 		SCTP_TCB_UNLOCK(stcb);
2836 		SCTP_SOCKET_LOCK(so, 1);
2837 		SCTP_TCB_LOCK(stcb);
2838 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2839 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2840 			SCTP_SOCKET_UNLOCK(so, 1);
2841 			return;
2842 		}
2843 	}
2844 #endif
2845 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2846 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2847 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2848 		socantrcvmore(stcb->sctp_socket);
2849 	}
2850 	sorwakeup(stcb->sctp_socket);
2851 	sowwakeup(stcb->sctp_socket);
2852 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2853 	if (!so_locked) {
2854 		SCTP_SOCKET_UNLOCK(so, 1);
2855 	}
2856 #endif
2857 }
2858 
2859 static void
sctp_notify_peer_addr_change(struct sctp_tcb * stcb,uint32_t state,struct sockaddr * sa,uint32_t error,int so_locked SCTP_UNUSED)2860 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2861     struct sockaddr *sa, uint32_t error, int so_locked
2862 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2863     SCTP_UNUSED
2864 #endif
2865 )
2866 {
2867 	struct mbuf *m_notify;
2868 	struct sctp_paddr_change *spc;
2869 	struct sctp_queued_to_read *control;
2870 
2871 	if ((stcb == NULL) ||
2872 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2873 		/* event not enabled */
2874 		return;
2875 	}
2876 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2877 	if (m_notify == NULL)
2878 		return;
2879 	SCTP_BUF_LEN(m_notify) = 0;
2880 	spc = mtod(m_notify, struct sctp_paddr_change *);
2881 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2882 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2883 	spc->spc_flags = 0;
2884 	spc->spc_length = sizeof(struct sctp_paddr_change);
2885 	switch (sa->sa_family) {
2886 #ifdef INET
2887 	case AF_INET:
2888 #ifdef INET6
2889 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2890 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2891 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2892 		} else {
2893 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2894 		}
2895 #else
2896 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2897 #endif
2898 		break;
2899 #endif
2900 #ifdef INET6
2901 	case AF_INET6:
2902 		{
2903 			struct sockaddr_in6 *sin6;
2904 
2905 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2906 
2907 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2908 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2909 				if (sin6->sin6_scope_id == 0) {
2910 					/* recover scope_id for user */
2911 					(void)sa6_recoverscope(sin6);
2912 				} else {
2913 					/* clear embedded scope_id for user */
2914 					in6_clearscope(&sin6->sin6_addr);
2915 				}
2916 			}
2917 			break;
2918 		}
2919 #endif
2920 	default:
2921 		/* TSNH */
2922 		break;
2923 	}
2924 	spc->spc_state = state;
2925 	spc->spc_error = error;
2926 	spc->spc_assoc_id = sctp_get_associd(stcb);
2927 
2928 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2929 	SCTP_BUF_NEXT(m_notify) = NULL;
2930 
2931 	/* append to socket */
2932 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2933 	    0, 0, stcb->asoc.context, 0, 0, 0,
2934 	    m_notify);
2935 	if (control == NULL) {
2936 		/* no memory */
2937 		sctp_m_freem(m_notify);
2938 		return;
2939 	}
2940 	control->length = SCTP_BUF_LEN(m_notify);
2941 	control->spec_flags = M_NOTIFICATION;
2942 	/* not that we need this */
2943 	control->tail_mbuf = m_notify;
2944 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2945 	    control,
2946 	    &stcb->sctp_socket->so_rcv, 1,
2947 	    SCTP_READ_LOCK_NOT_HELD,
2948 	    so_locked);
2949 }
2950 
2951 
2952 static void
sctp_notify_send_failed(struct sctp_tcb * stcb,uint8_t sent,uint32_t error,struct sctp_tmit_chunk * chk,int so_locked SCTP_UNUSED)2953 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2954     struct sctp_tmit_chunk *chk, int so_locked
2955 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2956     SCTP_UNUSED
2957 #endif
2958 )
2959 {
2960 	struct mbuf *m_notify;
2961 	struct sctp_send_failed *ssf;
2962 	struct sctp_send_failed_event *ssfe;
2963 	struct sctp_queued_to_read *control;
2964 	struct sctp_chunkhdr *chkhdr;
2965 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2966 
2967 	if ((stcb == NULL) ||
2968 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2969 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2970 		/* event not enabled */
2971 		return;
2972 	}
2973 
2974 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2975 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2976 	} else {
2977 		notifhdr_len = sizeof(struct sctp_send_failed);
2978 	}
2979 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2980 	if (m_notify == NULL)
2981 		/* no space left */
2982 		return;
2983 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2984 	if (stcb->asoc.idata_supported) {
2985 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2986 	} else {
2987 		chkhdr_len = sizeof(struct sctp_data_chunk);
2988 	}
2989 	/* Use some defaults in case we can't access the chunk header */
2990 	if (chk->send_size >= chkhdr_len) {
2991 		payload_len = chk->send_size - chkhdr_len;
2992 	} else {
2993 		payload_len = 0;
2994 	}
2995 	padding_len = 0;
2996 	if (chk->data != NULL) {
2997 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2998 		if (chkhdr != NULL) {
2999 			chk_len = ntohs(chkhdr->chunk_length);
3000 			if ((chk_len >= chkhdr_len) &&
3001 			    (chk->send_size >= chk_len) &&
3002 			    (chk->send_size - chk_len < 4)) {
3003 				padding_len = chk->send_size - chk_len;
3004 				payload_len = chk->send_size - chkhdr_len - padding_len;
3005 			}
3006 		}
3007 	}
3008 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3009 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3010 		memset(ssfe, 0, notifhdr_len);
3011 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3012 		if (sent) {
3013 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3014 		} else {
3015 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3016 		}
3017 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3018 		ssfe->ssfe_error = error;
3019 		/* not exactly what the user sent in, but should be close :) */
3020 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3021 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3022 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3023 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3024 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3025 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3026 	} else {
3027 		ssf = mtod(m_notify, struct sctp_send_failed *);
3028 		memset(ssf, 0, notifhdr_len);
3029 		ssf->ssf_type = SCTP_SEND_FAILED;
3030 		if (sent) {
3031 			ssf->ssf_flags = SCTP_DATA_SENT;
3032 		} else {
3033 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3034 		}
3035 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3036 		ssf->ssf_error = error;
3037 		/* not exactly what the user sent in, but should be close :) */
3038 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3039 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3040 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3041 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3042 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3043 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3044 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3045 	}
3046 	if (chk->data != NULL) {
3047 		/* Trim off the sctp chunk header (it should be there) */
3048 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3049 			m_adj(chk->data, chkhdr_len);
3050 			m_adj(chk->data, -padding_len);
3051 			sctp_mbuf_crush(chk->data);
3052 			chk->send_size -= (chkhdr_len + padding_len);
3053 		}
3054 	}
3055 	SCTP_BUF_NEXT(m_notify) = chk->data;
3056 	/* Steal off the mbuf */
3057 	chk->data = NULL;
3058 	/*
3059 	 * For this case, we check the actual socket buffer, since the assoc
3060 	 * is going away we don't want to overfill the socket buffer for a
3061 	 * non-reader
3062 	 */
3063 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3064 		sctp_m_freem(m_notify);
3065 		return;
3066 	}
3067 	/* append to socket */
3068 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3069 	    0, 0, stcb->asoc.context, 0, 0, 0,
3070 	    m_notify);
3071 	if (control == NULL) {
3072 		/* no memory */
3073 		sctp_m_freem(m_notify);
3074 		return;
3075 	}
3076 	control->length = SCTP_BUF_LEN(m_notify);
3077 	control->spec_flags = M_NOTIFICATION;
3078 	/* not that we need this */
3079 	control->tail_mbuf = m_notify;
3080 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3081 	    control,
3082 	    &stcb->sctp_socket->so_rcv, 1,
3083 	    SCTP_READ_LOCK_NOT_HELD,
3084 	    so_locked);
3085 }
3086 
3087 
3088 static void
sctp_notify_send_failed2(struct sctp_tcb * stcb,uint32_t error,struct sctp_stream_queue_pending * sp,int so_locked SCTP_UNUSED)3089 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3090     struct sctp_stream_queue_pending *sp, int so_locked
3091 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3092     SCTP_UNUSED
3093 #endif
3094 )
3095 {
3096 	struct mbuf *m_notify;
3097 	struct sctp_send_failed *ssf;
3098 	struct sctp_send_failed_event *ssfe;
3099 	struct sctp_queued_to_read *control;
3100 	int notifhdr_len;
3101 
3102 	if ((stcb == NULL) ||
3103 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3104 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3105 		/* event not enabled */
3106 		return;
3107 	}
3108 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3109 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3110 	} else {
3111 		notifhdr_len = sizeof(struct sctp_send_failed);
3112 	}
3113 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3114 	if (m_notify == NULL) {
3115 		/* no space left */
3116 		return;
3117 	}
3118 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3119 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3120 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3121 		memset(ssfe, 0, notifhdr_len);
3122 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3123 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3124 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3125 		ssfe->ssfe_error = error;
3126 		/* not exactly what the user sent in, but should be close :) */
3127 		ssfe->ssfe_info.snd_sid = sp->sid;
3128 		if (sp->some_taken) {
3129 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3130 		} else {
3131 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3132 		}
3133 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3134 		ssfe->ssfe_info.snd_context = sp->context;
3135 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3136 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3137 	} else {
3138 		ssf = mtod(m_notify, struct sctp_send_failed *);
3139 		memset(ssf, 0, notifhdr_len);
3140 		ssf->ssf_type = SCTP_SEND_FAILED;
3141 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3142 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3143 		ssf->ssf_error = error;
3144 		/* not exactly what the user sent in, but should be close :) */
3145 		ssf->ssf_info.sinfo_stream = sp->sid;
3146 		ssf->ssf_info.sinfo_ssn = 0;
3147 		if (sp->some_taken) {
3148 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3149 		} else {
3150 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3151 		}
3152 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3153 		ssf->ssf_info.sinfo_context = sp->context;
3154 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3155 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3156 	}
3157 	SCTP_BUF_NEXT(m_notify) = sp->data;
3158 
3159 	/* Steal off the mbuf */
3160 	sp->data = NULL;
3161 	/*
3162 	 * For this case, we check the actual socket buffer, since the assoc
3163 	 * is going away we don't want to overfill the socket buffer for a
3164 	 * non-reader
3165 	 */
3166 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3167 		sctp_m_freem(m_notify);
3168 		return;
3169 	}
3170 	/* append to socket */
3171 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3172 	    0, 0, stcb->asoc.context, 0, 0, 0,
3173 	    m_notify);
3174 	if (control == NULL) {
3175 		/* no memory */
3176 		sctp_m_freem(m_notify);
3177 		return;
3178 	}
3179 	control->length = SCTP_BUF_LEN(m_notify);
3180 	control->spec_flags = M_NOTIFICATION;
3181 	/* not that we need this */
3182 	control->tail_mbuf = m_notify;
3183 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3184 	    control,
3185 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3186 }
3187 
3188 
3189 
3190 static void
sctp_notify_adaptation_layer(struct sctp_tcb * stcb)3191 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3192 {
3193 	struct mbuf *m_notify;
3194 	struct sctp_adaptation_event *sai;
3195 	struct sctp_queued_to_read *control;
3196 
3197 	if ((stcb == NULL) ||
3198 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3199 		/* event not enabled */
3200 		return;
3201 	}
3202 
3203 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3204 	if (m_notify == NULL)
3205 		/* no space left */
3206 		return;
3207 	SCTP_BUF_LEN(m_notify) = 0;
3208 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3209 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3210 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3211 	sai->sai_flags = 0;
3212 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3213 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3214 	sai->sai_assoc_id = sctp_get_associd(stcb);
3215 
3216 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3217 	SCTP_BUF_NEXT(m_notify) = NULL;
3218 
3219 	/* append to socket */
3220 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3221 	    0, 0, stcb->asoc.context, 0, 0, 0,
3222 	    m_notify);
3223 	if (control == NULL) {
3224 		/* no memory */
3225 		sctp_m_freem(m_notify);
3226 		return;
3227 	}
3228 	control->length = SCTP_BUF_LEN(m_notify);
3229 	control->spec_flags = M_NOTIFICATION;
3230 	/* not that we need this */
3231 	control->tail_mbuf = m_notify;
3232 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3233 	    control,
3234 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3235 }
3236 
3237 /* This always must be called with the read-queue LOCKED in the INP */
3238 static void
sctp_notify_partial_delivery_indication(struct sctp_tcb * stcb,uint32_t error,uint32_t val,int so_locked SCTP_UNUSED)3239 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3240     uint32_t val, int so_locked
3241 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3242     SCTP_UNUSED
3243 #endif
3244 )
3245 {
3246 	struct mbuf *m_notify;
3247 	struct sctp_pdapi_event *pdapi;
3248 	struct sctp_queued_to_read *control;
3249 	struct sockbuf *sb;
3250 
3251 	if ((stcb == NULL) ||
3252 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3253 		/* event not enabled */
3254 		return;
3255 	}
3256 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3257 		return;
3258 	}
3259 
3260 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3261 	if (m_notify == NULL)
3262 		/* no space left */
3263 		return;
3264 	SCTP_BUF_LEN(m_notify) = 0;
3265 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3266 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3267 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3268 	pdapi->pdapi_flags = 0;
3269 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3270 	pdapi->pdapi_indication = error;
3271 	pdapi->pdapi_stream = (val >> 16);
3272 	pdapi->pdapi_seq = (val & 0x0000ffff);
3273 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3274 
3275 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3276 	SCTP_BUF_NEXT(m_notify) = NULL;
3277 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3278 	    0, 0, stcb->asoc.context, 0, 0, 0,
3279 	    m_notify);
3280 	if (control == NULL) {
3281 		/* no memory */
3282 		sctp_m_freem(m_notify);
3283 		return;
3284 	}
3285 	control->length = SCTP_BUF_LEN(m_notify);
3286 	control->spec_flags = M_NOTIFICATION;
3287 	/* not that we need this */
3288 	control->tail_mbuf = m_notify;
3289 	sb = &stcb->sctp_socket->so_rcv;
3290 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3291 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3292 	}
3293 	sctp_sballoc(stcb, sb, m_notify);
3294 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3295 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3296 	}
3297 	control->end_added = 1;
3298 	if (stcb->asoc.control_pdapi)
3299 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3300 	else {
3301 		/* we really should not see this case */
3302 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3303 	}
3304 	if (stcb->sctp_ep && stcb->sctp_socket) {
3305 		/* This should always be the case */
3306 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3307 		struct socket *so;
3308 
3309 		so = SCTP_INP_SO(stcb->sctp_ep);
3310 		if (!so_locked) {
3311 			atomic_add_int(&stcb->asoc.refcnt, 1);
3312 			SCTP_TCB_UNLOCK(stcb);
3313 			SCTP_SOCKET_LOCK(so, 1);
3314 			SCTP_TCB_LOCK(stcb);
3315 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3316 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3317 				SCTP_SOCKET_UNLOCK(so, 1);
3318 				return;
3319 			}
3320 		}
3321 #endif
3322 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3323 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3324 		if (!so_locked) {
3325 			SCTP_SOCKET_UNLOCK(so, 1);
3326 		}
3327 #endif
3328 	}
3329 }
3330 
3331 static void
sctp_notify_shutdown_event(struct sctp_tcb * stcb)3332 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3333 {
3334 	struct mbuf *m_notify;
3335 	struct sctp_shutdown_event *sse;
3336 	struct sctp_queued_to_read *control;
3337 
3338 	/*
3339 	 * For TCP model AND UDP connected sockets we will send an error up
3340 	 * when an SHUTDOWN completes
3341 	 */
3342 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3343 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3344 		/* mark socket closed for read/write and wakeup! */
3345 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3346 		struct socket *so;
3347 
3348 		so = SCTP_INP_SO(stcb->sctp_ep);
3349 		atomic_add_int(&stcb->asoc.refcnt, 1);
3350 		SCTP_TCB_UNLOCK(stcb);
3351 		SCTP_SOCKET_LOCK(so, 1);
3352 		SCTP_TCB_LOCK(stcb);
3353 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3354 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3355 			SCTP_SOCKET_UNLOCK(so, 1);
3356 			return;
3357 		}
3358 #endif
3359 		socantsendmore(stcb->sctp_socket);
3360 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3361 		SCTP_SOCKET_UNLOCK(so, 1);
3362 #endif
3363 	}
3364 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3365 		/* event not enabled */
3366 		return;
3367 	}
3368 
3369 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3370 	if (m_notify == NULL)
3371 		/* no space left */
3372 		return;
3373 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3374 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3375 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3376 	sse->sse_flags = 0;
3377 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3378 	sse->sse_assoc_id = sctp_get_associd(stcb);
3379 
3380 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3381 	SCTP_BUF_NEXT(m_notify) = NULL;
3382 
3383 	/* append to socket */
3384 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3385 	    0, 0, stcb->asoc.context, 0, 0, 0,
3386 	    m_notify);
3387 	if (control == NULL) {
3388 		/* no memory */
3389 		sctp_m_freem(m_notify);
3390 		return;
3391 	}
3392 	control->length = SCTP_BUF_LEN(m_notify);
3393 	control->spec_flags = M_NOTIFICATION;
3394 	/* not that we need this */
3395 	control->tail_mbuf = m_notify;
3396 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3397 	    control,
3398 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3399 }
3400 
3401 static void
sctp_notify_sender_dry_event(struct sctp_tcb * stcb,int so_locked SCTP_UNUSED)3402 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3403     int so_locked
3404 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3405     SCTP_UNUSED
3406 #endif
3407 )
3408 {
3409 	struct mbuf *m_notify;
3410 	struct sctp_sender_dry_event *event;
3411 	struct sctp_queued_to_read *control;
3412 
3413 	if ((stcb == NULL) ||
3414 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3415 		/* event not enabled */
3416 		return;
3417 	}
3418 
3419 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3420 	if (m_notify == NULL) {
3421 		/* no space left */
3422 		return;
3423 	}
3424 	SCTP_BUF_LEN(m_notify) = 0;
3425 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3426 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3427 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3428 	event->sender_dry_flags = 0;
3429 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3430 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3431 
3432 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3433 	SCTP_BUF_NEXT(m_notify) = NULL;
3434 
3435 	/* append to socket */
3436 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3437 	    0, 0, stcb->asoc.context, 0, 0, 0,
3438 	    m_notify);
3439 	if (control == NULL) {
3440 		/* no memory */
3441 		sctp_m_freem(m_notify);
3442 		return;
3443 	}
3444 	control->length = SCTP_BUF_LEN(m_notify);
3445 	control->spec_flags = M_NOTIFICATION;
3446 	/* not that we need this */
3447 	control->tail_mbuf = m_notify;
3448 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3449 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3450 }
3451 
3452 
3453 void
sctp_notify_stream_reset_add(struct sctp_tcb * stcb,uint16_t numberin,uint16_t numberout,int flag)3454 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3455 {
3456 	struct mbuf *m_notify;
3457 	struct sctp_queued_to_read *control;
3458 	struct sctp_stream_change_event *stradd;
3459 
3460 	if ((stcb == NULL) ||
3461 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3462 		/* event not enabled */
3463 		return;
3464 	}
3465 	if ((stcb->asoc.peer_req_out) && flag) {
3466 		/* Peer made the request, don't tell the local user */
3467 		stcb->asoc.peer_req_out = 0;
3468 		return;
3469 	}
3470 	stcb->asoc.peer_req_out = 0;
3471 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3472 	if (m_notify == NULL)
3473 		/* no space left */
3474 		return;
3475 	SCTP_BUF_LEN(m_notify) = 0;
3476 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3477 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3478 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3479 	stradd->strchange_flags = flag;
3480 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3481 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3482 	stradd->strchange_instrms = numberin;
3483 	stradd->strchange_outstrms = numberout;
3484 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3485 	SCTP_BUF_NEXT(m_notify) = NULL;
3486 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3487 		/* no space */
3488 		sctp_m_freem(m_notify);
3489 		return;
3490 	}
3491 	/* append to socket */
3492 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3493 	    0, 0, stcb->asoc.context, 0, 0, 0,
3494 	    m_notify);
3495 	if (control == NULL) {
3496 		/* no memory */
3497 		sctp_m_freem(m_notify);
3498 		return;
3499 	}
3500 	control->length = SCTP_BUF_LEN(m_notify);
3501 	control->spec_flags = M_NOTIFICATION;
3502 	/* not that we need this */
3503 	control->tail_mbuf = m_notify;
3504 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3505 	    control,
3506 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3507 }
3508 
3509 void
sctp_notify_stream_reset_tsn(struct sctp_tcb * stcb,uint32_t sending_tsn,uint32_t recv_tsn,int flag)3510 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3511 {
3512 	struct mbuf *m_notify;
3513 	struct sctp_queued_to_read *control;
3514 	struct sctp_assoc_reset_event *strasoc;
3515 
3516 	if ((stcb == NULL) ||
3517 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3518 		/* event not enabled */
3519 		return;
3520 	}
3521 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3522 	if (m_notify == NULL)
3523 		/* no space left */
3524 		return;
3525 	SCTP_BUF_LEN(m_notify) = 0;
3526 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3527 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3528 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3529 	strasoc->assocreset_flags = flag;
3530 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3531 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3532 	strasoc->assocreset_local_tsn = sending_tsn;
3533 	strasoc->assocreset_remote_tsn = recv_tsn;
3534 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3535 	SCTP_BUF_NEXT(m_notify) = NULL;
3536 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3537 		/* no space */
3538 		sctp_m_freem(m_notify);
3539 		return;
3540 	}
3541 	/* append to socket */
3542 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3543 	    0, 0, stcb->asoc.context, 0, 0, 0,
3544 	    m_notify);
3545 	if (control == NULL) {
3546 		/* no memory */
3547 		sctp_m_freem(m_notify);
3548 		return;
3549 	}
3550 	control->length = SCTP_BUF_LEN(m_notify);
3551 	control->spec_flags = M_NOTIFICATION;
3552 	/* not that we need this */
3553 	control->tail_mbuf = m_notify;
3554 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3555 	    control,
3556 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3557 }
3558 
3559 
3560 
3561 static void
sctp_notify_stream_reset(struct sctp_tcb * stcb,int number_entries,uint16_t * list,int flag)3562 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3563     int number_entries, uint16_t *list, int flag)
3564 {
3565 	struct mbuf *m_notify;
3566 	struct sctp_queued_to_read *control;
3567 	struct sctp_stream_reset_event *strreset;
3568 	int len;
3569 
3570 	if ((stcb == NULL) ||
3571 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3572 		/* event not enabled */
3573 		return;
3574 	}
3575 
3576 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3577 	if (m_notify == NULL)
3578 		/* no space left */
3579 		return;
3580 	SCTP_BUF_LEN(m_notify) = 0;
3581 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3582 	if (len > M_TRAILINGSPACE(m_notify)) {
3583 		/* never enough room */
3584 		sctp_m_freem(m_notify);
3585 		return;
3586 	}
3587 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3588 	memset(strreset, 0, len);
3589 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3590 	strreset->strreset_flags = flag;
3591 	strreset->strreset_length = len;
3592 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3593 	if (number_entries) {
3594 		int i;
3595 
3596 		for (i = 0; i < number_entries; i++) {
3597 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3598 		}
3599 	}
3600 	SCTP_BUF_LEN(m_notify) = len;
3601 	SCTP_BUF_NEXT(m_notify) = NULL;
3602 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3603 		/* no space */
3604 		sctp_m_freem(m_notify);
3605 		return;
3606 	}
3607 	/* append to socket */
3608 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3609 	    0, 0, stcb->asoc.context, 0, 0, 0,
3610 	    m_notify);
3611 	if (control == NULL) {
3612 		/* no memory */
3613 		sctp_m_freem(m_notify);
3614 		return;
3615 	}
3616 	control->length = SCTP_BUF_LEN(m_notify);
3617 	control->spec_flags = M_NOTIFICATION;
3618 	/* not that we need this */
3619 	control->tail_mbuf = m_notify;
3620 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3621 	    control,
3622 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3623 }
3624 
3625 
3626 static void
sctp_notify_remote_error(struct sctp_tcb * stcb,uint16_t error,struct sctp_error_chunk * chunk)3627 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3628 {
3629 	struct mbuf *m_notify;
3630 	struct sctp_remote_error *sre;
3631 	struct sctp_queued_to_read *control;
3632 	unsigned int notif_len;
3633 	uint16_t chunk_len;
3634 
3635 	if ((stcb == NULL) ||
3636 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3637 		return;
3638 	}
3639 	if (chunk != NULL) {
3640 		chunk_len = ntohs(chunk->ch.chunk_length);
3641 		/*
3642 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3643 		 * contiguous.
3644 		 */
3645 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3646 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3647 		}
3648 	} else {
3649 		chunk_len = 0;
3650 	}
3651 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3652 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3653 	if (m_notify == NULL) {
3654 		/* Retry with smaller value. */
3655 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3656 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3657 		if (m_notify == NULL) {
3658 			return;
3659 		}
3660 	}
3661 	SCTP_BUF_NEXT(m_notify) = NULL;
3662 	sre = mtod(m_notify, struct sctp_remote_error *);
3663 	memset(sre, 0, notif_len);
3664 	sre->sre_type = SCTP_REMOTE_ERROR;
3665 	sre->sre_flags = 0;
3666 	sre->sre_length = sizeof(struct sctp_remote_error);
3667 	sre->sre_error = error;
3668 	sre->sre_assoc_id = sctp_get_associd(stcb);
3669 	if (notif_len > sizeof(struct sctp_remote_error)) {
3670 		memcpy(sre->sre_data, chunk, chunk_len);
3671 		sre->sre_length += chunk_len;
3672 	}
3673 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3674 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3675 	    0, 0, stcb->asoc.context, 0, 0, 0,
3676 	    m_notify);
3677 	if (control != NULL) {
3678 		control->length = SCTP_BUF_LEN(m_notify);
3679 		control->spec_flags = M_NOTIFICATION;
3680 		/* not that we need this */
3681 		control->tail_mbuf = m_notify;
3682 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3683 		    control,
3684 		    &stcb->sctp_socket->so_rcv, 1,
3685 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3686 	} else {
3687 		sctp_m_freem(m_notify);
3688 	}
3689 }
3690 
3691 
3692 void
sctp_ulp_notify(uint32_t notification,struct sctp_tcb * stcb,uint32_t error,void * data,int so_locked SCTP_UNUSED)3693 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3694     uint32_t error, void *data, int so_locked
3695 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3696     SCTP_UNUSED
3697 #endif
3698 )
3699 {
3700 	if ((stcb == NULL) ||
3701 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3702 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3703 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3704 		/* If the socket is gone we are out of here */
3705 		return;
3706 	}
3707 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3708 		return;
3709 	}
3710 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3711 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3712 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3713 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3714 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3715 			/* Don't report these in front states */
3716 			return;
3717 		}
3718 	}
3719 	switch (notification) {
3720 	case SCTP_NOTIFY_ASSOC_UP:
3721 		if (stcb->asoc.assoc_up_sent == 0) {
3722 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3723 			stcb->asoc.assoc_up_sent = 1;
3724 		}
3725 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3726 			sctp_notify_adaptation_layer(stcb);
3727 		}
3728 		if (stcb->asoc.auth_supported == 0) {
3729 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3730 			    NULL, so_locked);
3731 		}
3732 		break;
3733 	case SCTP_NOTIFY_ASSOC_DOWN:
3734 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3735 		break;
3736 	case SCTP_NOTIFY_INTERFACE_DOWN:
3737 		{
3738 			struct sctp_nets *net;
3739 
3740 			net = (struct sctp_nets *)data;
3741 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3742 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3743 			break;
3744 		}
3745 	case SCTP_NOTIFY_INTERFACE_UP:
3746 		{
3747 			struct sctp_nets *net;
3748 
3749 			net = (struct sctp_nets *)data;
3750 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3751 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3752 			break;
3753 		}
3754 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3755 		{
3756 			struct sctp_nets *net;
3757 
3758 			net = (struct sctp_nets *)data;
3759 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3760 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3761 			break;
3762 		}
3763 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3764 		sctp_notify_send_failed2(stcb, error,
3765 		    (struct sctp_stream_queue_pending *)data, so_locked);
3766 		break;
3767 	case SCTP_NOTIFY_SENT_DG_FAIL:
3768 		sctp_notify_send_failed(stcb, 1, error,
3769 		    (struct sctp_tmit_chunk *)data, so_locked);
3770 		break;
3771 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3772 		sctp_notify_send_failed(stcb, 0, error,
3773 		    (struct sctp_tmit_chunk *)data, so_locked);
3774 		break;
3775 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3776 		{
3777 			uint32_t val;
3778 
3779 			val = *((uint32_t *)data);
3780 
3781 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3782 			break;
3783 		}
3784 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3785 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3786 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3787 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3788 		} else {
3789 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3790 		}
3791 		break;
3792 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3793 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3794 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3795 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3796 		} else {
3797 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3798 		}
3799 		break;
3800 	case SCTP_NOTIFY_ASSOC_RESTART:
3801 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3802 		if (stcb->asoc.auth_supported == 0) {
3803 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3804 			    NULL, so_locked);
3805 		}
3806 		break;
3807 	case SCTP_NOTIFY_STR_RESET_SEND:
3808 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3809 		break;
3810 	case SCTP_NOTIFY_STR_RESET_RECV:
3811 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3812 		break;
3813 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3814 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3815 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3816 		break;
3817 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3818 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3819 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3820 		break;
3821 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3822 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3823 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3824 		break;
3825 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3826 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3827 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3828 		break;
3829 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3830 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3831 		    error, so_locked);
3832 		break;
3833 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3834 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3835 		    error, so_locked);
3836 		break;
3837 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3838 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3839 		    error, so_locked);
3840 		break;
3841 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3842 		sctp_notify_shutdown_event(stcb);
3843 		break;
3844 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3845 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3846 		    (uint16_t)(uintptr_t)data,
3847 		    so_locked);
3848 		break;
3849 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3850 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3851 		    (uint16_t)(uintptr_t)data,
3852 		    so_locked);
3853 		break;
3854 	case SCTP_NOTIFY_NO_PEER_AUTH:
3855 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3856 		    (uint16_t)(uintptr_t)data,
3857 		    so_locked);
3858 		break;
3859 	case SCTP_NOTIFY_SENDER_DRY:
3860 		sctp_notify_sender_dry_event(stcb, so_locked);
3861 		break;
3862 	case SCTP_NOTIFY_REMOTE_ERROR:
3863 		sctp_notify_remote_error(stcb, error, data);
3864 		break;
3865 	default:
3866 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3867 		    __func__, notification, notification);
3868 		break;
3869 	}			/* end switch */
3870 }
3871 
3872 void
sctp_report_all_outbound(struct sctp_tcb * stcb,uint16_t error,int so_locked SCTP_UNUSED)3873 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked
3874 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3875     SCTP_UNUSED
3876 #endif
3877 )
3878 {
3879 	struct sctp_association *asoc;
3880 	struct sctp_stream_out *outs;
3881 	struct sctp_tmit_chunk *chk, *nchk;
3882 	struct sctp_stream_queue_pending *sp, *nsp;
3883 	int i;
3884 
3885 	if (stcb == NULL) {
3886 		return;
3887 	}
3888 	asoc = &stcb->asoc;
3889 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3890 		/* already being freed */
3891 		return;
3892 	}
3893 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3894 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3895 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3896 		return;
3897 	}
3898 	/* now through all the gunk freeing chunks */
3899 	/* sent queue SHOULD be empty */
3900 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3901 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3902 		asoc->sent_queue_cnt--;
3903 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3904 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3905 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3906 #ifdef INVARIANTS
3907 			} else {
3908 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3909 #endif
3910 			}
3911 		}
3912 		if (chk->data != NULL) {
3913 			sctp_free_bufspace(stcb, asoc, chk, 1);
3914 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3915 			    error, chk, so_locked);
3916 			if (chk->data) {
3917 				sctp_m_freem(chk->data);
3918 				chk->data = NULL;
3919 			}
3920 		}
3921 		sctp_free_a_chunk(stcb, chk, so_locked);
3922 		/* sa_ignore FREED_MEMORY */
3923 	}
3924 	/* pending send queue SHOULD be empty */
3925 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3926 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3927 		asoc->send_queue_cnt--;
3928 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3929 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3930 #ifdef INVARIANTS
3931 		} else {
3932 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3933 #endif
3934 		}
3935 		if (chk->data != NULL) {
3936 			sctp_free_bufspace(stcb, asoc, chk, 1);
3937 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3938 			    error, chk, so_locked);
3939 			if (chk->data) {
3940 				sctp_m_freem(chk->data);
3941 				chk->data = NULL;
3942 			}
3943 		}
3944 		sctp_free_a_chunk(stcb, chk, so_locked);
3945 		/* sa_ignore FREED_MEMORY */
3946 	}
3947 	for (i = 0; i < asoc->streamoutcnt; i++) {
3948 		/* For each stream */
3949 		outs = &asoc->strmout[i];
3950 		/* clean up any sends there */
3951 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3952 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3953 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3954 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
3955 			sctp_free_spbufspace(stcb, asoc, sp);
3956 			if (sp->data) {
3957 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3958 				    error, (void *)sp, so_locked);
3959 				if (sp->data) {
3960 					sctp_m_freem(sp->data);
3961 					sp->data = NULL;
3962 					sp->tail_mbuf = NULL;
3963 					sp->length = 0;
3964 				}
3965 			}
3966 			if (sp->net) {
3967 				sctp_free_remote_addr(sp->net);
3968 				sp->net = NULL;
3969 			}
3970 			/* Free the chunk */
3971 			sctp_free_a_strmoq(stcb, sp, so_locked);
3972 			/* sa_ignore FREED_MEMORY */
3973 		}
3974 	}
3975 }
3976 
3977 void
sctp_abort_notification(struct sctp_tcb * stcb,uint8_t from_peer,uint16_t error,struct sctp_abort_chunk * abort,int so_locked SCTP_UNUSED)3978 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3979     struct sctp_abort_chunk *abort, int so_locked
3980 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3981     SCTP_UNUSED
3982 #endif
3983 )
3984 {
3985 	if (stcb == NULL) {
3986 		return;
3987 	}
3988 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3989 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3990 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3991 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3992 	}
3993 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3994 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3995 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3996 		return;
3997 	}
3998 	SCTP_TCB_SEND_LOCK(stcb);
3999 	SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4000 	/* Tell them we lost the asoc */
4001 	sctp_report_all_outbound(stcb, error, so_locked);
4002 	SCTP_TCB_SEND_UNLOCK(stcb);
4003 	if (from_peer) {
4004 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4005 	} else {
4006 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4007 	}
4008 }
4009 
4010 void
sctp_abort_association(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct mbuf * m,int iphlen,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct mbuf * op_err,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id,uint16_t port)4011 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4012     struct mbuf *m, int iphlen,
4013     struct sockaddr *src, struct sockaddr *dst,
4014     struct sctphdr *sh, struct mbuf *op_err,
4015     uint8_t mflowtype, uint32_t mflowid,
4016     uint32_t vrf_id, uint16_t port)
4017 {
4018 	uint32_t vtag;
4019 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4020 	struct socket *so;
4021 #endif
4022 
4023 	vtag = 0;
4024 	if (stcb != NULL) {
4025 		vtag = stcb->asoc.peer_vtag;
4026 		vrf_id = stcb->asoc.vrf_id;
4027 	}
4028 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4029 	    mflowtype, mflowid, inp->fibnum,
4030 	    vrf_id, port);
4031 	if (stcb != NULL) {
4032 		/* We have a TCB to abort, send notification too */
4033 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4034 		/* Ok, now lets free it */
4035 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4036 		so = SCTP_INP_SO(inp);
4037 		atomic_add_int(&stcb->asoc.refcnt, 1);
4038 		SCTP_TCB_UNLOCK(stcb);
4039 		SCTP_SOCKET_LOCK(so, 1);
4040 		SCTP_TCB_LOCK(stcb);
4041 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4042 #endif
4043 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4044 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4045 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4046 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4047 		}
4048 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4049 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4050 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4051 		SCTP_SOCKET_UNLOCK(so, 1);
4052 #endif
4053 	}
4054 }
4055 #ifdef SCTP_ASOCLOG_OF_TSNS
4056 void
sctp_print_out_track_log(struct sctp_tcb * stcb)4057 sctp_print_out_track_log(struct sctp_tcb *stcb)
4058 {
4059 #ifdef NOSIY_PRINTS
4060 	int i;
4061 
4062 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4063 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4064 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4065 		SCTP_PRINTF("None rcvd\n");
4066 		goto none_in;
4067 	}
4068 	if (stcb->asoc.tsn_in_wrapped) {
4069 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4070 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4071 			    stcb->asoc.in_tsnlog[i].tsn,
4072 			    stcb->asoc.in_tsnlog[i].strm,
4073 			    stcb->asoc.in_tsnlog[i].seq,
4074 			    stcb->asoc.in_tsnlog[i].flgs,
4075 			    stcb->asoc.in_tsnlog[i].sz);
4076 		}
4077 	}
4078 	if (stcb->asoc.tsn_in_at) {
4079 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4080 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4081 			    stcb->asoc.in_tsnlog[i].tsn,
4082 			    stcb->asoc.in_tsnlog[i].strm,
4083 			    stcb->asoc.in_tsnlog[i].seq,
4084 			    stcb->asoc.in_tsnlog[i].flgs,
4085 			    stcb->asoc.in_tsnlog[i].sz);
4086 		}
4087 	}
4088 none_in:
4089 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4090 	if ((stcb->asoc.tsn_out_at == 0) &&
4091 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4092 		SCTP_PRINTF("None sent\n");
4093 	}
4094 	if (stcb->asoc.tsn_out_wrapped) {
4095 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4096 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4097 			    stcb->asoc.out_tsnlog[i].tsn,
4098 			    stcb->asoc.out_tsnlog[i].strm,
4099 			    stcb->asoc.out_tsnlog[i].seq,
4100 			    stcb->asoc.out_tsnlog[i].flgs,
4101 			    stcb->asoc.out_tsnlog[i].sz);
4102 		}
4103 	}
4104 	if (stcb->asoc.tsn_out_at) {
4105 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4106 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4107 			    stcb->asoc.out_tsnlog[i].tsn,
4108 			    stcb->asoc.out_tsnlog[i].strm,
4109 			    stcb->asoc.out_tsnlog[i].seq,
4110 			    stcb->asoc.out_tsnlog[i].flgs,
4111 			    stcb->asoc.out_tsnlog[i].sz);
4112 		}
4113 	}
4114 #endif
4115 }
4116 #endif
4117 
4118 void
sctp_abort_an_association(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct mbuf * op_err,int so_locked SCTP_UNUSED)4119 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4120     struct mbuf *op_err,
4121     int so_locked
4122 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4123     SCTP_UNUSED
4124 #endif
4125 )
4126 {
4127 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4128 	struct socket *so;
4129 #endif
4130 
4131 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4132 	so = SCTP_INP_SO(inp);
4133 #endif
4134 	if (stcb == NULL) {
4135 		/* Got to have a TCB */
4136 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4137 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4138 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4139 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4140 			}
4141 		}
4142 		return;
4143 	}
4144 	/* notify the peer */
4145 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4146 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4147 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4148 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4149 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4150 	}
4151 	/* notify the ulp */
4152 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4153 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4154 	}
4155 	/* now free the asoc */
4156 #ifdef SCTP_ASOCLOG_OF_TSNS
4157 	sctp_print_out_track_log(stcb);
4158 #endif
4159 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4160 	if (!so_locked) {
4161 		atomic_add_int(&stcb->asoc.refcnt, 1);
4162 		SCTP_TCB_UNLOCK(stcb);
4163 		SCTP_SOCKET_LOCK(so, 1);
4164 		SCTP_TCB_LOCK(stcb);
4165 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4166 	}
4167 #endif
4168 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4169 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4170 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4171 	if (!so_locked) {
4172 		SCTP_SOCKET_UNLOCK(so, 1);
4173 	}
4174 #endif
4175 }
4176 
4177 void
sctp_handle_ootb(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_inpcb * inp,struct mbuf * cause,uint8_t mflowtype,uint32_t mflowid,uint16_t fibnum,uint32_t vrf_id,uint16_t port)4178 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4179     struct sockaddr *src, struct sockaddr *dst,
4180     struct sctphdr *sh, struct sctp_inpcb *inp,
4181     struct mbuf *cause,
4182     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4183     uint32_t vrf_id, uint16_t port)
4184 {
4185 	struct sctp_chunkhdr *ch, chunk_buf;
4186 	unsigned int chk_length;
4187 	int contains_init_chunk;
4188 
4189 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4190 	/* Generate a TO address for future reference */
4191 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4192 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4193 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4194 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4195 		}
4196 	}
4197 	contains_init_chunk = 0;
4198 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4199 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4200 	while (ch != NULL) {
4201 		chk_length = ntohs(ch->chunk_length);
4202 		if (chk_length < sizeof(*ch)) {
4203 			/* break to abort land */
4204 			break;
4205 		}
4206 		switch (ch->chunk_type) {
4207 		case SCTP_INIT:
4208 			contains_init_chunk = 1;
4209 			break;
4210 		case SCTP_PACKET_DROPPED:
4211 			/* we don't respond to pkt-dropped */
4212 			return;
4213 		case SCTP_ABORT_ASSOCIATION:
4214 			/* we don't respond with an ABORT to an ABORT */
4215 			return;
4216 		case SCTP_SHUTDOWN_COMPLETE:
4217 			/*
4218 			 * we ignore it since we are not waiting for it and
4219 			 * peer is gone
4220 			 */
4221 			return;
4222 		case SCTP_SHUTDOWN_ACK:
4223 			sctp_send_shutdown_complete2(src, dst, sh,
4224 			    mflowtype, mflowid, fibnum,
4225 			    vrf_id, port);
4226 			return;
4227 		default:
4228 			break;
4229 		}
4230 		offset += SCTP_SIZE32(chk_length);
4231 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4232 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4233 	}
4234 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4235 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4236 	    (contains_init_chunk == 0))) {
4237 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4238 		    mflowtype, mflowid, fibnum,
4239 		    vrf_id, port);
4240 	}
4241 }
4242 
4243 /*
4244  * check the inbound datagram to make sure there is not an abort inside it,
4245  * if there is return 1, else return 0.
4246  */
4247 int
sctp_is_there_an_abort_here(struct mbuf * m,int iphlen,uint32_t * vtagfill)4248 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4249 {
4250 	struct sctp_chunkhdr *ch;
4251 	struct sctp_init_chunk *init_chk, chunk_buf;
4252 	int offset;
4253 	unsigned int chk_length;
4254 
4255 	offset = iphlen + sizeof(struct sctphdr);
4256 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4257 	    (uint8_t *)&chunk_buf);
4258 	while (ch != NULL) {
4259 		chk_length = ntohs(ch->chunk_length);
4260 		if (chk_length < sizeof(*ch)) {
4261 			/* packet is probably corrupt */
4262 			break;
4263 		}
4264 		/* we seem to be ok, is it an abort? */
4265 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4266 			/* yep, tell them */
4267 			return (1);
4268 		}
4269 		if (ch->chunk_type == SCTP_INITIATION) {
4270 			/* need to update the Vtag */
4271 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4272 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4273 			if (init_chk != NULL) {
4274 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4275 			}
4276 		}
4277 		/* Nope, move to the next chunk */
4278 		offset += SCTP_SIZE32(chk_length);
4279 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4280 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4281 	}
4282 	return (0);
4283 }
4284 
4285 /*
4286  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4287  * set (i.e. it's 0) so, create this function to compare link local scopes
4288  */
4289 #ifdef INET6
4290 uint32_t
sctp_is_same_scope(struct sockaddr_in6 * addr1,struct sockaddr_in6 * addr2)4291 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4292 {
4293 	struct sockaddr_in6 a, b;
4294 
4295 	/* save copies */
4296 	a = *addr1;
4297 	b = *addr2;
4298 
4299 	if (a.sin6_scope_id == 0)
4300 		if (sa6_recoverscope(&a)) {
4301 			/* can't get scope, so can't match */
4302 			return (0);
4303 		}
4304 	if (b.sin6_scope_id == 0)
4305 		if (sa6_recoverscope(&b)) {
4306 			/* can't get scope, so can't match */
4307 			return (0);
4308 		}
4309 	if (a.sin6_scope_id != b.sin6_scope_id)
4310 		return (0);
4311 
4312 	return (1);
4313 }
4314 
4315 /*
4316  * returns a sockaddr_in6 with embedded scope recovered and removed
4317  */
4318 struct sockaddr_in6 *
sctp_recover_scope(struct sockaddr_in6 * addr,struct sockaddr_in6 * store)4319 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4320 {
4321 	/* check and strip embedded scope junk */
4322 	if (addr->sin6_family == AF_INET6) {
4323 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4324 			if (addr->sin6_scope_id == 0) {
4325 				*store = *addr;
4326 				if (!sa6_recoverscope(store)) {
4327 					/* use the recovered scope */
4328 					addr = store;
4329 				}
4330 			} else {
4331 				/* else, return the original "to" addr */
4332 				in6_clearscope(&addr->sin6_addr);
4333 			}
4334 		}
4335 	}
4336 	return (addr);
4337 }
4338 #endif
4339 
4340 /*
4341  * are the two addresses the same?  currently a "scopeless" check returns: 1
4342  * if same, 0 if not
4343  */
4344 int
sctp_cmpaddr(struct sockaddr * sa1,struct sockaddr * sa2)4345 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4346 {
4347 
4348 	/* must be valid */
4349 	if (sa1 == NULL || sa2 == NULL)
4350 		return (0);
4351 
4352 	/* must be the same family */
4353 	if (sa1->sa_family != sa2->sa_family)
4354 		return (0);
4355 
4356 	switch (sa1->sa_family) {
4357 #ifdef INET6
4358 	case AF_INET6:
4359 		{
4360 			/* IPv6 addresses */
4361 			struct sockaddr_in6 *sin6_1, *sin6_2;
4362 
4363 			sin6_1 = (struct sockaddr_in6 *)sa1;
4364 			sin6_2 = (struct sockaddr_in6 *)sa2;
4365 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4366 			    sin6_2));
4367 		}
4368 #endif
4369 #ifdef INET
4370 	case AF_INET:
4371 		{
4372 			/* IPv4 addresses */
4373 			struct sockaddr_in *sin_1, *sin_2;
4374 
4375 			sin_1 = (struct sockaddr_in *)sa1;
4376 			sin_2 = (struct sockaddr_in *)sa2;
4377 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4378 		}
4379 #endif
4380 	default:
4381 		/* we don't do these... */
4382 		return (0);
4383 	}
4384 }
4385 
4386 void
sctp_print_address(struct sockaddr * sa)4387 sctp_print_address(struct sockaddr *sa)
4388 {
4389 #ifdef INET6
4390 	char ip6buf[INET6_ADDRSTRLEN];
4391 #endif
4392 
4393 	switch (sa->sa_family) {
4394 #ifdef INET6
4395 	case AF_INET6:
4396 		{
4397 			struct sockaddr_in6 *sin6;
4398 
4399 			sin6 = (struct sockaddr_in6 *)sa;
4400 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4401 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4402 			    ntohs(sin6->sin6_port),
4403 			    sin6->sin6_scope_id);
4404 			break;
4405 		}
4406 #endif
4407 #ifdef INET
4408 	case AF_INET:
4409 		{
4410 			struct sockaddr_in *sin;
4411 			unsigned char *p;
4412 
4413 			sin = (struct sockaddr_in *)sa;
4414 			p = (unsigned char *)&sin->sin_addr;
4415 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4416 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4417 			break;
4418 		}
4419 #endif
4420 	default:
4421 		SCTP_PRINTF("?\n");
4422 		break;
4423 	}
4424 }
4425 
4426 void
sctp_pull_off_control_to_new_inp(struct sctp_inpcb * old_inp,struct sctp_inpcb * new_inp,struct sctp_tcb * stcb,int waitflags)4427 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4428     struct sctp_inpcb *new_inp,
4429     struct sctp_tcb *stcb,
4430     int waitflags)
4431 {
4432 	/*
4433 	 * go through our old INP and pull off any control structures that
4434 	 * belong to stcb and move then to the new inp.
4435 	 */
4436 	struct socket *old_so, *new_so;
4437 	struct sctp_queued_to_read *control, *nctl;
4438 	struct sctp_readhead tmp_queue;
4439 	struct mbuf *m;
4440 	int error = 0;
4441 
4442 	old_so = old_inp->sctp_socket;
4443 	new_so = new_inp->sctp_socket;
4444 	TAILQ_INIT(&tmp_queue);
4445 	error = sblock(&old_so->so_rcv, waitflags);
4446 	if (error) {
4447 		/*
4448 		 * Gak, can't get sblock, we have a problem. data will be
4449 		 * left stranded.. and we don't dare look at it since the
4450 		 * other thread may be reading something. Oh well, its a
4451 		 * screwed up app that does a peeloff OR a accept while
4452 		 * reading from the main socket... actually its only the
4453 		 * peeloff() case, since I think read will fail on a
4454 		 * listening socket..
4455 		 */
4456 		return;
4457 	}
4458 	/* lock the socket buffers */
4459 	SCTP_INP_READ_LOCK(old_inp);
4460 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4461 		/* Pull off all for out target stcb */
4462 		if (control->stcb == stcb) {
4463 			/* remove it we want it */
4464 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4465 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4466 			m = control->data;
4467 			while (m) {
4468 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4469 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4470 				}
4471 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4472 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4473 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4474 				}
4475 				m = SCTP_BUF_NEXT(m);
4476 			}
4477 		}
4478 	}
4479 	SCTP_INP_READ_UNLOCK(old_inp);
4480 	/* Remove the sb-lock on the old socket */
4481 
4482 	sbunlock(&old_so->so_rcv);
4483 	/* Now we move them over to the new socket buffer */
4484 	SCTP_INP_READ_LOCK(new_inp);
4485 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4486 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4487 		m = control->data;
4488 		while (m) {
4489 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4490 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4491 			}
4492 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4493 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4494 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4495 			}
4496 			m = SCTP_BUF_NEXT(m);
4497 		}
4498 	}
4499 	SCTP_INP_READ_UNLOCK(new_inp);
4500 }
4501 
4502 void
sctp_wakeup_the_read_socket(struct sctp_inpcb * inp,struct sctp_tcb * stcb,int so_locked SCTP_UNUSED)4503 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4504     struct sctp_tcb *stcb,
4505     int so_locked
4506 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4507     SCTP_UNUSED
4508 #endif
4509 )
4510 {
4511 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4512 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4513 		struct socket *so;
4514 
4515 		so = SCTP_INP_SO(inp);
4516 		if (!so_locked) {
4517 			if (stcb) {
4518 				atomic_add_int(&stcb->asoc.refcnt, 1);
4519 				SCTP_TCB_UNLOCK(stcb);
4520 			}
4521 			SCTP_SOCKET_LOCK(so, 1);
4522 			if (stcb) {
4523 				SCTP_TCB_LOCK(stcb);
4524 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4525 			}
4526 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4527 				SCTP_SOCKET_UNLOCK(so, 1);
4528 				return;
4529 			}
4530 		}
4531 #endif
4532 		sctp_sorwakeup(inp, inp->sctp_socket);
4533 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4534 		if (!so_locked) {
4535 			SCTP_SOCKET_UNLOCK(so, 1);
4536 		}
4537 #endif
4538 	}
4539 }
4540 
4541 void
sctp_add_to_readq(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_queued_to_read * control,struct sockbuf * sb,int end,int inp_read_lock_held,int so_locked SCTP_UNUSED)4542 sctp_add_to_readq(struct sctp_inpcb *inp,
4543     struct sctp_tcb *stcb,
4544     struct sctp_queued_to_read *control,
4545     struct sockbuf *sb,
4546     int end,
4547     int inp_read_lock_held,
4548     int so_locked
4549 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4550     SCTP_UNUSED
4551 #endif
4552 )
4553 {
4554 	/*
4555 	 * Here we must place the control on the end of the socket read
4556 	 * queue AND increment sb_cc so that select will work properly on
4557 	 * read.
4558 	 */
4559 	struct mbuf *m, *prev = NULL;
4560 
4561 	if (inp == NULL) {
4562 		/* Gak, TSNH!! */
4563 #ifdef INVARIANTS
4564 		panic("Gak, inp NULL on add_to_readq");
4565 #endif
4566 		return;
4567 	}
4568 	if (inp_read_lock_held == 0)
4569 		SCTP_INP_READ_LOCK(inp);
4570 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4571 		sctp_free_remote_addr(control->whoFrom);
4572 		if (control->data) {
4573 			sctp_m_freem(control->data);
4574 			control->data = NULL;
4575 		}
4576 		sctp_free_a_readq(stcb, control);
4577 		if (inp_read_lock_held == 0)
4578 			SCTP_INP_READ_UNLOCK(inp);
4579 		return;
4580 	}
4581 	if (!(control->spec_flags & M_NOTIFICATION)) {
4582 		atomic_add_int(&inp->total_recvs, 1);
4583 		if (!control->do_not_ref_stcb) {
4584 			atomic_add_int(&stcb->total_recvs, 1);
4585 		}
4586 	}
4587 	m = control->data;
4588 	control->held_length = 0;
4589 	control->length = 0;
4590 	while (m) {
4591 		if (SCTP_BUF_LEN(m) == 0) {
4592 			/* Skip mbufs with NO length */
4593 			if (prev == NULL) {
4594 				/* First one */
4595 				control->data = sctp_m_free(m);
4596 				m = control->data;
4597 			} else {
4598 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4599 				m = SCTP_BUF_NEXT(prev);
4600 			}
4601 			if (m == NULL) {
4602 				control->tail_mbuf = prev;
4603 			}
4604 			continue;
4605 		}
4606 		prev = m;
4607 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4608 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4609 		}
4610 		sctp_sballoc(stcb, sb, m);
4611 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4612 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4613 		}
4614 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4615 		m = SCTP_BUF_NEXT(m);
4616 	}
4617 	if (prev != NULL) {
4618 		control->tail_mbuf = prev;
4619 	} else {
4620 		/* Everything got collapsed out?? */
4621 		sctp_free_remote_addr(control->whoFrom);
4622 		sctp_free_a_readq(stcb, control);
4623 		if (inp_read_lock_held == 0)
4624 			SCTP_INP_READ_UNLOCK(inp);
4625 		return;
4626 	}
4627 	if (end) {
4628 		control->end_added = 1;
4629 	}
4630 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4631 	control->on_read_q = 1;
4632 	if (inp_read_lock_held == 0)
4633 		SCTP_INP_READ_UNLOCK(inp);
4634 	if (inp && inp->sctp_socket) {
4635 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4636 	}
4637 }
4638 
4639 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4640  *************ALTERNATE ROUTING CODE
4641  */
4642 
4643 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4644  *************ALTERNATE ROUTING CODE
4645  */
4646 
4647 struct mbuf *
sctp_generate_cause(uint16_t code,char * info)4648 sctp_generate_cause(uint16_t code, char *info)
4649 {
4650 	struct mbuf *m;
4651 	struct sctp_gen_error_cause *cause;
4652 	size_t info_len;
4653 	uint16_t len;
4654 
4655 	if ((code == 0) || (info == NULL)) {
4656 		return (NULL);
4657 	}
4658 	info_len = strlen(info);
4659 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4660 		return (NULL);
4661 	}
4662 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4663 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4664 	if (m != NULL) {
4665 		SCTP_BUF_LEN(m) = len;
4666 		cause = mtod(m, struct sctp_gen_error_cause *);
4667 		cause->code = htons(code);
4668 		cause->length = htons(len);
4669 		memcpy(cause->info, info, info_len);
4670 	}
4671 	return (m);
4672 }
4673 
4674 struct mbuf *
sctp_generate_no_user_data_cause(uint32_t tsn)4675 sctp_generate_no_user_data_cause(uint32_t tsn)
4676 {
4677 	struct mbuf *m;
4678 	struct sctp_error_no_user_data *no_user_data_cause;
4679 	uint16_t len;
4680 
4681 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4682 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4683 	if (m != NULL) {
4684 		SCTP_BUF_LEN(m) = len;
4685 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4686 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4687 		no_user_data_cause->cause.length = htons(len);
4688 		no_user_data_cause->tsn = htonl(tsn);
4689 	}
4690 	return (m);
4691 }
4692 
4693 #ifdef SCTP_MBCNT_LOGGING
4694 void
sctp_free_bufspace(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_tmit_chunk * tp1,int chk_cnt)4695 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4696     struct sctp_tmit_chunk *tp1, int chk_cnt)
4697 {
4698 	if (tp1->data == NULL) {
4699 		return;
4700 	}
4701 	asoc->chunks_on_out_queue -= chk_cnt;
4702 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4703 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4704 		    asoc->total_output_queue_size,
4705 		    tp1->book_size,
4706 		    0,
4707 		    tp1->mbcnt);
4708 	}
4709 	if (asoc->total_output_queue_size >= tp1->book_size) {
4710 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4711 	} else {
4712 		asoc->total_output_queue_size = 0;
4713 	}
4714 
4715 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4716 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4717 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4718 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4719 		} else {
4720 			stcb->sctp_socket->so_snd.sb_cc = 0;
4721 
4722 		}
4723 	}
4724 }
4725 
4726 #endif
4727 
4728 int
sctp_release_pr_sctp_chunk(struct sctp_tcb * stcb,struct sctp_tmit_chunk * tp1,uint8_t sent,int so_locked SCTP_UNUSED)4729 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4730     uint8_t sent, int so_locked
4731 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4732     SCTP_UNUSED
4733 #endif
4734 )
4735 {
4736 	struct sctp_stream_out *strq;
4737 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4738 	struct sctp_stream_queue_pending *sp;
4739 	uint32_t mid;
4740 	uint16_t sid;
4741 	uint8_t foundeom = 0;
4742 	int ret_sz = 0;
4743 	int notdone;
4744 	int do_wakeup_routine = 0;
4745 
4746 	sid = tp1->rec.data.sid;
4747 	mid = tp1->rec.data.mid;
4748 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4749 		stcb->asoc.abandoned_sent[0]++;
4750 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4751 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4752 #if defined(SCTP_DETAILED_STR_STATS)
4753 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4754 #endif
4755 	} else {
4756 		stcb->asoc.abandoned_unsent[0]++;
4757 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4758 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4759 #if defined(SCTP_DETAILED_STR_STATS)
4760 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4761 #endif
4762 	}
4763 	do {
4764 		ret_sz += tp1->book_size;
4765 		if (tp1->data != NULL) {
4766 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4767 				sctp_flight_size_decrease(tp1);
4768 				sctp_total_flight_decrease(stcb, tp1);
4769 			}
4770 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4771 			stcb->asoc.peers_rwnd += tp1->send_size;
4772 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4773 			if (sent) {
4774 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4775 			} else {
4776 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4777 			}
4778 			if (tp1->data) {
4779 				sctp_m_freem(tp1->data);
4780 				tp1->data = NULL;
4781 			}
4782 			do_wakeup_routine = 1;
4783 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4784 				stcb->asoc.sent_queue_cnt_removeable--;
4785 			}
4786 		}
4787 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4788 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4789 		    SCTP_DATA_NOT_FRAG) {
4790 			/* not frag'ed we ae done   */
4791 			notdone = 0;
4792 			foundeom = 1;
4793 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4794 			/* end of frag, we are done */
4795 			notdone = 0;
4796 			foundeom = 1;
4797 		} else {
4798 			/*
4799 			 * Its a begin or middle piece, we must mark all of
4800 			 * it
4801 			 */
4802 			notdone = 1;
4803 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4804 		}
4805 	} while (tp1 && notdone);
4806 	if (foundeom == 0) {
4807 		/*
4808 		 * The multi-part message was scattered across the send and
4809 		 * sent queue.
4810 		 */
4811 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4812 			if ((tp1->rec.data.sid != sid) ||
4813 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4814 				break;
4815 			}
4816 			/*
4817 			 * save to chk in case we have some on stream out
4818 			 * queue. If so and we have an un-transmitted one we
4819 			 * don't have to fudge the TSN.
4820 			 */
4821 			chk = tp1;
4822 			ret_sz += tp1->book_size;
4823 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4824 			if (sent) {
4825 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4826 			} else {
4827 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4828 			}
4829 			if (tp1->data) {
4830 				sctp_m_freem(tp1->data);
4831 				tp1->data = NULL;
4832 			}
4833 			/* No flight involved here book the size to 0 */
4834 			tp1->book_size = 0;
4835 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4836 				foundeom = 1;
4837 			}
4838 			do_wakeup_routine = 1;
4839 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4840 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4841 			/*
4842 			 * on to the sent queue so we can wait for it to be
4843 			 * passed by.
4844 			 */
4845 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4846 			    sctp_next);
4847 			stcb->asoc.send_queue_cnt--;
4848 			stcb->asoc.sent_queue_cnt++;
4849 		}
4850 	}
4851 	if (foundeom == 0) {
4852 		/*
4853 		 * Still no eom found. That means there is stuff left on the
4854 		 * stream out queue.. yuck.
4855 		 */
4856 		SCTP_TCB_SEND_LOCK(stcb);
4857 		strq = &stcb->asoc.strmout[sid];
4858 		sp = TAILQ_FIRST(&strq->outqueue);
4859 		if (sp != NULL) {
4860 			sp->discard_rest = 1;
4861 			/*
4862 			 * We may need to put a chunk on the queue that
4863 			 * holds the TSN that would have been sent with the
4864 			 * LAST bit.
4865 			 */
4866 			if (chk == NULL) {
4867 				/* Yep, we have to */
4868 				sctp_alloc_a_chunk(stcb, chk);
4869 				if (chk == NULL) {
4870 					/*
4871 					 * we are hosed. All we can do is
4872 					 * nothing.. which will cause an
4873 					 * abort if the peer is paying
4874 					 * attention.
4875 					 */
4876 					goto oh_well;
4877 				}
4878 				memset(chk, 0, sizeof(*chk));
4879 				chk->rec.data.rcv_flags = 0;
4880 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4881 				chk->asoc = &stcb->asoc;
4882 				if (stcb->asoc.idata_supported == 0) {
4883 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4884 						chk->rec.data.mid = 0;
4885 					} else {
4886 						chk->rec.data.mid = strq->next_mid_ordered;
4887 					}
4888 				} else {
4889 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4890 						chk->rec.data.mid = strq->next_mid_unordered;
4891 					} else {
4892 						chk->rec.data.mid = strq->next_mid_ordered;
4893 					}
4894 				}
4895 				chk->rec.data.sid = sp->sid;
4896 				chk->rec.data.ppid = sp->ppid;
4897 				chk->rec.data.context = sp->context;
4898 				chk->flags = sp->act_flags;
4899 				chk->whoTo = NULL;
4900 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4901 				strq->chunks_on_queues++;
4902 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4903 				stcb->asoc.sent_queue_cnt++;
4904 				stcb->asoc.pr_sctp_cnt++;
4905 			}
4906 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4907 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4908 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4909 			}
4910 			if (stcb->asoc.idata_supported == 0) {
4911 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4912 					strq->next_mid_ordered++;
4913 				}
4914 			} else {
4915 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4916 					strq->next_mid_unordered++;
4917 				} else {
4918 					strq->next_mid_ordered++;
4919 				}
4920 			}
4921 	oh_well:
4922 			if (sp->data) {
4923 				/*
4924 				 * Pull any data to free up the SB and allow
4925 				 * sender to "add more" while we will throw
4926 				 * away :-)
4927 				 */
4928 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4929 				ret_sz += sp->length;
4930 				do_wakeup_routine = 1;
4931 				sp->some_taken = 1;
4932 				sctp_m_freem(sp->data);
4933 				sp->data = NULL;
4934 				sp->tail_mbuf = NULL;
4935 				sp->length = 0;
4936 			}
4937 		}
4938 		SCTP_TCB_SEND_UNLOCK(stcb);
4939 	}
4940 	if (do_wakeup_routine) {
4941 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4942 		struct socket *so;
4943 
4944 		so = SCTP_INP_SO(stcb->sctp_ep);
4945 		if (!so_locked) {
4946 			atomic_add_int(&stcb->asoc.refcnt, 1);
4947 			SCTP_TCB_UNLOCK(stcb);
4948 			SCTP_SOCKET_LOCK(so, 1);
4949 			SCTP_TCB_LOCK(stcb);
4950 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4951 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4952 				/* assoc was freed while we were unlocked */
4953 				SCTP_SOCKET_UNLOCK(so, 1);
4954 				return (ret_sz);
4955 			}
4956 		}
4957 #endif
4958 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4959 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4960 		if (!so_locked) {
4961 			SCTP_SOCKET_UNLOCK(so, 1);
4962 		}
4963 #endif
4964 	}
4965 	return (ret_sz);
4966 }
4967 
4968 /*
4969  * checks to see if the given address, sa, is one that is currently known by
4970  * the kernel note: can't distinguish the same address on multiple interfaces
4971  * and doesn't handle multiple addresses with different zone/scope id's note:
4972  * ifa_ifwithaddr() compares the entire sockaddr struct
4973  */
4974 struct sctp_ifa *
sctp_find_ifa_in_ep(struct sctp_inpcb * inp,struct sockaddr * addr,int holds_lock)4975 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4976     int holds_lock)
4977 {
4978 	struct sctp_laddr *laddr;
4979 
4980 	if (holds_lock == 0) {
4981 		SCTP_INP_RLOCK(inp);
4982 	}
4983 
4984 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4985 		if (laddr->ifa == NULL)
4986 			continue;
4987 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4988 			continue;
4989 #ifdef INET
4990 		if (addr->sa_family == AF_INET) {
4991 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4992 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4993 				/* found him. */
4994 				if (holds_lock == 0) {
4995 					SCTP_INP_RUNLOCK(inp);
4996 				}
4997 				return (laddr->ifa);
4998 				break;
4999 			}
5000 		}
5001 #endif
5002 #ifdef INET6
5003 		if (addr->sa_family == AF_INET6) {
5004 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5005 			    &laddr->ifa->address.sin6)) {
5006 				/* found him. */
5007 				if (holds_lock == 0) {
5008 					SCTP_INP_RUNLOCK(inp);
5009 				}
5010 				return (laddr->ifa);
5011 				break;
5012 			}
5013 		}
5014 #endif
5015 	}
5016 	if (holds_lock == 0) {
5017 		SCTP_INP_RUNLOCK(inp);
5018 	}
5019 	return (NULL);
5020 }
5021 
5022 uint32_t
sctp_get_ifa_hash_val(struct sockaddr * addr)5023 sctp_get_ifa_hash_val(struct sockaddr *addr)
5024 {
5025 	switch (addr->sa_family) {
5026 #ifdef INET
5027 	case AF_INET:
5028 		{
5029 			struct sockaddr_in *sin;
5030 
5031 			sin = (struct sockaddr_in *)addr;
5032 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5033 		}
5034 #endif
5035 #ifdef INET6
5036 	case AF_INET6:
5037 		{
5038 			struct sockaddr_in6 *sin6;
5039 			uint32_t hash_of_addr;
5040 
5041 			sin6 = (struct sockaddr_in6 *)addr;
5042 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5043 			    sin6->sin6_addr.s6_addr32[1] +
5044 			    sin6->sin6_addr.s6_addr32[2] +
5045 			    sin6->sin6_addr.s6_addr32[3]);
5046 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5047 			return (hash_of_addr);
5048 		}
5049 #endif
5050 	default:
5051 		break;
5052 	}
5053 	return (0);
5054 }
5055 
5056 struct sctp_ifa *
sctp_find_ifa_by_addr(struct sockaddr * addr,uint32_t vrf_id,int holds_lock)5057 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5058 {
5059 	struct sctp_ifa *sctp_ifap;
5060 	struct sctp_vrf *vrf;
5061 	struct sctp_ifalist *hash_head;
5062 	uint32_t hash_of_addr;
5063 
5064 	if (holds_lock == 0)
5065 		SCTP_IPI_ADDR_RLOCK();
5066 
5067 	vrf = sctp_find_vrf(vrf_id);
5068 	if (vrf == NULL) {
5069 		if (holds_lock == 0)
5070 			SCTP_IPI_ADDR_RUNLOCK();
5071 		return (NULL);
5072 	}
5073 
5074 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5075 
5076 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5077 	if (hash_head == NULL) {
5078 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5079 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5080 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5081 		sctp_print_address(addr);
5082 		SCTP_PRINTF("No such bucket for address\n");
5083 		if (holds_lock == 0)
5084 			SCTP_IPI_ADDR_RUNLOCK();
5085 
5086 		return (NULL);
5087 	}
5088 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5089 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5090 			continue;
5091 #ifdef INET
5092 		if (addr->sa_family == AF_INET) {
5093 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5094 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5095 				/* found him. */
5096 				if (holds_lock == 0)
5097 					SCTP_IPI_ADDR_RUNLOCK();
5098 				return (sctp_ifap);
5099 				break;
5100 			}
5101 		}
5102 #endif
5103 #ifdef INET6
5104 		if (addr->sa_family == AF_INET6) {
5105 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5106 			    &sctp_ifap->address.sin6)) {
5107 				/* found him. */
5108 				if (holds_lock == 0)
5109 					SCTP_IPI_ADDR_RUNLOCK();
5110 				return (sctp_ifap);
5111 				break;
5112 			}
5113 		}
5114 #endif
5115 	}
5116 	if (holds_lock == 0)
5117 		SCTP_IPI_ADDR_RUNLOCK();
5118 	return (NULL);
5119 }
5120 
5121 static void
sctp_user_rcvd(struct sctp_tcb * stcb,uint32_t * freed_so_far,int hold_rlock,uint32_t rwnd_req)5122 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5123     uint32_t rwnd_req)
5124 {
5125 	/* User pulled some data, do we need a rwnd update? */
5126 	int r_unlocked = 0;
5127 	uint32_t dif, rwnd;
5128 	struct socket *so = NULL;
5129 
5130 	if (stcb == NULL)
5131 		return;
5132 
5133 	atomic_add_int(&stcb->asoc.refcnt, 1);
5134 
5135 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5136 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5137 		/* Pre-check If we are freeing no update */
5138 		goto no_lock;
5139 	}
5140 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5141 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5142 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5143 		goto out;
5144 	}
5145 	so = stcb->sctp_socket;
5146 	if (so == NULL) {
5147 		goto out;
5148 	}
5149 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5150 	/* Have you have freed enough to look */
5151 	*freed_so_far = 0;
5152 	/* Yep, its worth a look and the lock overhead */
5153 
5154 	/* Figure out what the rwnd would be */
5155 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5156 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5157 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5158 	} else {
5159 		dif = 0;
5160 	}
5161 	if (dif >= rwnd_req) {
5162 		if (hold_rlock) {
5163 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5164 			r_unlocked = 1;
5165 		}
5166 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5167 			/*
5168 			 * One last check before we allow the guy possibly
5169 			 * to get in. There is a race, where the guy has not
5170 			 * reached the gate. In that case
5171 			 */
5172 			goto out;
5173 		}
5174 		SCTP_TCB_LOCK(stcb);
5175 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5176 			/* No reports here */
5177 			SCTP_TCB_UNLOCK(stcb);
5178 			goto out;
5179 		}
5180 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5181 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5182 
5183 		sctp_chunk_output(stcb->sctp_ep, stcb,
5184 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5185 		/* make sure no timer is running */
5186 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5187 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5188 		SCTP_TCB_UNLOCK(stcb);
5189 	} else {
5190 		/* Update how much we have pending */
5191 		stcb->freed_by_sorcv_sincelast = dif;
5192 	}
5193 out:
5194 	if (so && r_unlocked && hold_rlock) {
5195 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5196 	}
5197 
5198 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5199 no_lock:
5200 	atomic_add_int(&stcb->asoc.refcnt, -1);
5201 	return;
5202 }
5203 
5204 int
sctp_sorecvmsg(struct socket * so,struct uio * uio,struct mbuf ** mp,struct sockaddr * from,int fromlen,int * msg_flags,struct sctp_sndrcvinfo * sinfo,int filling_sinfo)5205 sctp_sorecvmsg(struct socket *so,
5206     struct uio *uio,
5207     struct mbuf **mp,
5208     struct sockaddr *from,
5209     int fromlen,
5210     int *msg_flags,
5211     struct sctp_sndrcvinfo *sinfo,
5212     int filling_sinfo)
5213 {
5214 	/*
5215 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5216 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5217 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5218 	 * On the way out we may send out any combination of:
5219 	 * MSG_NOTIFICATION MSG_EOR
5220 	 *
5221 	 */
5222 	struct sctp_inpcb *inp = NULL;
5223 	ssize_t my_len = 0;
5224 	ssize_t cp_len = 0;
5225 	int error = 0;
5226 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5227 	struct mbuf *m = NULL;
5228 	struct sctp_tcb *stcb = NULL;
5229 	int wakeup_read_socket = 0;
5230 	int freecnt_applied = 0;
5231 	int out_flags = 0, in_flags = 0;
5232 	int block_allowed = 1;
5233 	uint32_t freed_so_far = 0;
5234 	ssize_t copied_so_far = 0;
5235 	int in_eeor_mode = 0;
5236 	int no_rcv_needed = 0;
5237 	uint32_t rwnd_req = 0;
5238 	int hold_sblock = 0;
5239 	int hold_rlock = 0;
5240 	ssize_t slen = 0;
5241 	uint32_t held_length = 0;
5242 	int sockbuf_lock = 0;
5243 
5244 	if (uio == NULL) {
5245 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5246 		return (EINVAL);
5247 	}
5248 
5249 	if (msg_flags) {
5250 		in_flags = *msg_flags;
5251 		if (in_flags & MSG_PEEK)
5252 			SCTP_STAT_INCR(sctps_read_peeks);
5253 	} else {
5254 		in_flags = 0;
5255 	}
5256 	slen = uio->uio_resid;
5257 
5258 	/* Pull in and set up our int flags */
5259 	if (in_flags & MSG_OOB) {
5260 		/* Out of band's NOT supported */
5261 		return (EOPNOTSUPP);
5262 	}
5263 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5264 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5265 		return (EINVAL);
5266 	}
5267 	if ((in_flags & (MSG_DONTWAIT
5268 	    | MSG_NBIO
5269 	    )) ||
5270 	    SCTP_SO_IS_NBIO(so)) {
5271 		block_allowed = 0;
5272 	}
5273 	/* setup the endpoint */
5274 	inp = (struct sctp_inpcb *)so->so_pcb;
5275 	if (inp == NULL) {
5276 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5277 		return (EFAULT);
5278 	}
5279 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5280 	/* Must be at least a MTU's worth */
5281 	if (rwnd_req < SCTP_MIN_RWND)
5282 		rwnd_req = SCTP_MIN_RWND;
5283 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5284 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5285 		sctp_misc_ints(SCTP_SORECV_ENTER,
5286 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5287 	}
5288 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5289 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5290 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5291 	}
5292 
5293 
5294 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5295 	if (error) {
5296 		goto release_unlocked;
5297 	}
5298 	sockbuf_lock = 1;
5299 restart:
5300 
5301 
5302 restart_nosblocks:
5303 	if (hold_sblock == 0) {
5304 		SOCKBUF_LOCK(&so->so_rcv);
5305 		hold_sblock = 1;
5306 	}
5307 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5308 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5309 		goto out;
5310 	}
5311 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5312 		if (so->so_error) {
5313 			error = so->so_error;
5314 			if ((in_flags & MSG_PEEK) == 0)
5315 				so->so_error = 0;
5316 			goto out;
5317 		} else {
5318 			if (so->so_rcv.sb_cc == 0) {
5319 				/* indicate EOF */
5320 				error = 0;
5321 				goto out;
5322 			}
5323 		}
5324 	}
5325 	if (so->so_rcv.sb_cc <= held_length) {
5326 		if (so->so_error) {
5327 			error = so->so_error;
5328 			if ((in_flags & MSG_PEEK) == 0) {
5329 				so->so_error = 0;
5330 			}
5331 			goto out;
5332 		}
5333 		if ((so->so_rcv.sb_cc == 0) &&
5334 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5335 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5336 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5337 				/*
5338 				 * For active open side clear flags for
5339 				 * re-use passive open is blocked by
5340 				 * connect.
5341 				 */
5342 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5343 					/*
5344 					 * You were aborted, passive side
5345 					 * always hits here
5346 					 */
5347 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5348 					error = ECONNRESET;
5349 				}
5350 				so->so_state &= ~(SS_ISCONNECTING |
5351 				    SS_ISDISCONNECTING |
5352 				    SS_ISCONFIRMING |
5353 				    SS_ISCONNECTED);
5354 				if (error == 0) {
5355 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5356 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5357 						error = ENOTCONN;
5358 					}
5359 				}
5360 				goto out;
5361 			}
5362 		}
5363 		if (block_allowed) {
5364 			error = sbwait(&so->so_rcv);
5365 			if (error) {
5366 				goto out;
5367 			}
5368 			held_length = 0;
5369 			goto restart_nosblocks;
5370 		} else {
5371 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5372 			error = EWOULDBLOCK;
5373 			goto out;
5374 		}
5375 	}
5376 	if (hold_sblock == 1) {
5377 		SOCKBUF_UNLOCK(&so->so_rcv);
5378 		hold_sblock = 0;
5379 	}
5380 	/* we possibly have data we can read */
5381 	/* sa_ignore FREED_MEMORY */
5382 	control = TAILQ_FIRST(&inp->read_queue);
5383 	if (control == NULL) {
5384 		/*
5385 		 * This could be happening since the appender did the
5386 		 * increment but as not yet did the tailq insert onto the
5387 		 * read_queue
5388 		 */
5389 		if (hold_rlock == 0) {
5390 			SCTP_INP_READ_LOCK(inp);
5391 		}
5392 		control = TAILQ_FIRST(&inp->read_queue);
5393 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5394 #ifdef INVARIANTS
5395 			panic("Huh, its non zero and nothing on control?");
5396 #endif
5397 			so->so_rcv.sb_cc = 0;
5398 		}
5399 		SCTP_INP_READ_UNLOCK(inp);
5400 		hold_rlock = 0;
5401 		goto restart;
5402 	}
5403 
5404 	if ((control->length == 0) &&
5405 	    (control->do_not_ref_stcb)) {
5406 		/*
5407 		 * Clean up code for freeing assoc that left behind a
5408 		 * pdapi.. maybe a peer in EEOR that just closed after
5409 		 * sending and never indicated a EOR.
5410 		 */
5411 		if (hold_rlock == 0) {
5412 			hold_rlock = 1;
5413 			SCTP_INP_READ_LOCK(inp);
5414 		}
5415 		control->held_length = 0;
5416 		if (control->data) {
5417 			/* Hmm there is data here .. fix */
5418 			struct mbuf *m_tmp;
5419 			int cnt = 0;
5420 
5421 			m_tmp = control->data;
5422 			while (m_tmp) {
5423 				cnt += SCTP_BUF_LEN(m_tmp);
5424 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5425 					control->tail_mbuf = m_tmp;
5426 					control->end_added = 1;
5427 				}
5428 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5429 			}
5430 			control->length = cnt;
5431 		} else {
5432 			/* remove it */
5433 			TAILQ_REMOVE(&inp->read_queue, control, next);
5434 			/* Add back any hiddend data */
5435 			sctp_free_remote_addr(control->whoFrom);
5436 			sctp_free_a_readq(stcb, control);
5437 		}
5438 		if (hold_rlock) {
5439 			hold_rlock = 0;
5440 			SCTP_INP_READ_UNLOCK(inp);
5441 		}
5442 		goto restart;
5443 	}
5444 	if ((control->length == 0) &&
5445 	    (control->end_added == 1)) {
5446 		/*
5447 		 * Do we also need to check for (control->pdapi_aborted ==
5448 		 * 1)?
5449 		 */
5450 		if (hold_rlock == 0) {
5451 			hold_rlock = 1;
5452 			SCTP_INP_READ_LOCK(inp);
5453 		}
5454 		TAILQ_REMOVE(&inp->read_queue, control, next);
5455 		if (control->data) {
5456 #ifdef INVARIANTS
5457 			panic("control->data not null but control->length == 0");
5458 #else
5459 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5460 			sctp_m_freem(control->data);
5461 			control->data = NULL;
5462 #endif
5463 		}
5464 		if (control->aux_data) {
5465 			sctp_m_free(control->aux_data);
5466 			control->aux_data = NULL;
5467 		}
5468 #ifdef INVARIANTS
5469 		if (control->on_strm_q) {
5470 			panic("About to free ctl:%p so:%p and its in %d",
5471 			    control, so, control->on_strm_q);
5472 		}
5473 #endif
5474 		sctp_free_remote_addr(control->whoFrom);
5475 		sctp_free_a_readq(stcb, control);
5476 		if (hold_rlock) {
5477 			hold_rlock = 0;
5478 			SCTP_INP_READ_UNLOCK(inp);
5479 		}
5480 		goto restart;
5481 	}
5482 	if (control->length == 0) {
5483 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5484 		    (filling_sinfo)) {
5485 			/* find a more suitable one then this */
5486 			ctl = TAILQ_NEXT(control, next);
5487 			while (ctl) {
5488 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5489 				    (ctl->some_taken ||
5490 				    (ctl->spec_flags & M_NOTIFICATION) ||
5491 				    ((ctl->do_not_ref_stcb == 0) &&
5492 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5493 				    ) {
5494 					/*-
5495 					 * If we have a different TCB next, and there is data
5496 					 * present. If we have already taken some (pdapi), OR we can
5497 					 * ref the tcb and no delivery as started on this stream, we
5498 					 * take it. Note we allow a notification on a different
5499 					 * assoc to be delivered..
5500 					 */
5501 					control = ctl;
5502 					goto found_one;
5503 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5504 					    (ctl->length) &&
5505 					    ((ctl->some_taken) ||
5506 					    ((ctl->do_not_ref_stcb == 0) &&
5507 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5508 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5509 					/*-
5510 					 * If we have the same tcb, and there is data present, and we
5511 					 * have the strm interleave feature present. Then if we have
5512 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5513 					 * not started a delivery for this stream, we can take it.
5514 					 * Note we do NOT allow a notificaiton on the same assoc to
5515 					 * be delivered.
5516 					 */
5517 					control = ctl;
5518 					goto found_one;
5519 				}
5520 				ctl = TAILQ_NEXT(ctl, next);
5521 			}
5522 		}
5523 		/*
5524 		 * if we reach here, not suitable replacement is available
5525 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5526 		 * into the our held count, and its time to sleep again.
5527 		 */
5528 		held_length = so->so_rcv.sb_cc;
5529 		control->held_length = so->so_rcv.sb_cc;
5530 		goto restart;
5531 	}
5532 	/* Clear the held length since there is something to read */
5533 	control->held_length = 0;
5534 found_one:
5535 	/*
5536 	 * If we reach here, control has a some data for us to read off.
5537 	 * Note that stcb COULD be NULL.
5538 	 */
5539 	if (hold_rlock == 0) {
5540 		hold_rlock = 1;
5541 		SCTP_INP_READ_LOCK(inp);
5542 	}
5543 	control->some_taken++;
5544 	stcb = control->stcb;
5545 	if (stcb) {
5546 		if ((control->do_not_ref_stcb == 0) &&
5547 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5548 			if (freecnt_applied == 0)
5549 				stcb = NULL;
5550 		} else if (control->do_not_ref_stcb == 0) {
5551 			/* you can't free it on me please */
5552 			/*
5553 			 * The lock on the socket buffer protects us so the
5554 			 * free code will stop. But since we used the
5555 			 * socketbuf lock and the sender uses the tcb_lock
5556 			 * to increment, we need to use the atomic add to
5557 			 * the refcnt
5558 			 */
5559 			if (freecnt_applied) {
5560 #ifdef INVARIANTS
5561 				panic("refcnt already incremented");
5562 #else
5563 				SCTP_PRINTF("refcnt already incremented?\n");
5564 #endif
5565 			} else {
5566 				atomic_add_int(&stcb->asoc.refcnt, 1);
5567 				freecnt_applied = 1;
5568 			}
5569 			/*
5570 			 * Setup to remember how much we have not yet told
5571 			 * the peer our rwnd has opened up. Note we grab the
5572 			 * value from the tcb from last time. Note too that
5573 			 * sack sending clears this when a sack is sent,
5574 			 * which is fine. Once we hit the rwnd_req, we then
5575 			 * will go to the sctp_user_rcvd() that will not
5576 			 * lock until it KNOWs it MUST send a WUP-SACK.
5577 			 */
5578 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
5579 			stcb->freed_by_sorcv_sincelast = 0;
5580 		}
5581 	}
5582 	if (stcb &&
5583 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5584 	    control->do_not_ref_stcb == 0) {
5585 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5586 	}
5587 
5588 	/* First lets get off the sinfo and sockaddr info */
5589 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5590 		sinfo->sinfo_stream = control->sinfo_stream;
5591 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5592 		sinfo->sinfo_flags = control->sinfo_flags;
5593 		sinfo->sinfo_ppid = control->sinfo_ppid;
5594 		sinfo->sinfo_context = control->sinfo_context;
5595 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5596 		sinfo->sinfo_tsn = control->sinfo_tsn;
5597 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5598 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5599 		nxt = TAILQ_NEXT(control, next);
5600 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5601 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5602 			struct sctp_extrcvinfo *s_extra;
5603 
5604 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5605 			if ((nxt) &&
5606 			    (nxt->length)) {
5607 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5608 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5609 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5610 				}
5611 				if (nxt->spec_flags & M_NOTIFICATION) {
5612 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5613 				}
5614 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5615 				s_extra->serinfo_next_length = nxt->length;
5616 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5617 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5618 				if (nxt->tail_mbuf != NULL) {
5619 					if (nxt->end_added) {
5620 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5621 					}
5622 				}
5623 			} else {
5624 				/*
5625 				 * we explicitly 0 this, since the memcpy
5626 				 * got some other things beyond the older
5627 				 * sinfo_ that is on the control's structure
5628 				 * :-D
5629 				 */
5630 				nxt = NULL;
5631 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5632 				s_extra->serinfo_next_aid = 0;
5633 				s_extra->serinfo_next_length = 0;
5634 				s_extra->serinfo_next_ppid = 0;
5635 				s_extra->serinfo_next_stream = 0;
5636 			}
5637 		}
5638 		/*
5639 		 * update off the real current cum-ack, if we have an stcb.
5640 		 */
5641 		if ((control->do_not_ref_stcb == 0) && stcb)
5642 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5643 		/*
5644 		 * mask off the high bits, we keep the actual chunk bits in
5645 		 * there.
5646 		 */
5647 		sinfo->sinfo_flags &= 0x00ff;
5648 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5649 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5650 		}
5651 	}
5652 #ifdef SCTP_ASOCLOG_OF_TSNS
5653 	{
5654 		int index, newindex;
5655 		struct sctp_pcbtsn_rlog *entry;
5656 
5657 		do {
5658 			index = inp->readlog_index;
5659 			newindex = index + 1;
5660 			if (newindex >= SCTP_READ_LOG_SIZE) {
5661 				newindex = 0;
5662 			}
5663 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5664 		entry = &inp->readlog[index];
5665 		entry->vtag = control->sinfo_assoc_id;
5666 		entry->strm = control->sinfo_stream;
5667 		entry->seq = (uint16_t)control->mid;
5668 		entry->sz = control->length;
5669 		entry->flgs = control->sinfo_flags;
5670 	}
5671 #endif
5672 	if ((fromlen > 0) && (from != NULL)) {
5673 		union sctp_sockstore store;
5674 		size_t len;
5675 
5676 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5677 #ifdef INET6
5678 		case AF_INET6:
5679 			len = sizeof(struct sockaddr_in6);
5680 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5681 			store.sin6.sin6_port = control->port_from;
5682 			break;
5683 #endif
5684 #ifdef INET
5685 		case AF_INET:
5686 #ifdef INET6
5687 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5688 				len = sizeof(struct sockaddr_in6);
5689 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5690 				    &store.sin6);
5691 				store.sin6.sin6_port = control->port_from;
5692 			} else {
5693 				len = sizeof(struct sockaddr_in);
5694 				store.sin = control->whoFrom->ro._l_addr.sin;
5695 				store.sin.sin_port = control->port_from;
5696 			}
5697 #else
5698 			len = sizeof(struct sockaddr_in);
5699 			store.sin = control->whoFrom->ro._l_addr.sin;
5700 			store.sin.sin_port = control->port_from;
5701 #endif
5702 			break;
5703 #endif
5704 		default:
5705 			len = 0;
5706 			break;
5707 		}
5708 		memcpy(from, &store, min((size_t)fromlen, len));
5709 #ifdef INET6
5710 		{
5711 			struct sockaddr_in6 lsa6, *from6;
5712 
5713 			from6 = (struct sockaddr_in6 *)from;
5714 			sctp_recover_scope_mac(from6, (&lsa6));
5715 		}
5716 #endif
5717 	}
5718 	if (hold_rlock) {
5719 		SCTP_INP_READ_UNLOCK(inp);
5720 		hold_rlock = 0;
5721 	}
5722 	if (hold_sblock) {
5723 		SOCKBUF_UNLOCK(&so->so_rcv);
5724 		hold_sblock = 0;
5725 	}
5726 	/* now copy out what data we can */
5727 	if (mp == NULL) {
5728 		/* copy out each mbuf in the chain up to length */
5729 get_more_data:
5730 		m = control->data;
5731 		while (m) {
5732 			/* Move out all we can */
5733 			cp_len = uio->uio_resid;
5734 			my_len = SCTP_BUF_LEN(m);
5735 			if (cp_len > my_len) {
5736 				/* not enough in this buf */
5737 				cp_len = my_len;
5738 			}
5739 			if (hold_rlock) {
5740 				SCTP_INP_READ_UNLOCK(inp);
5741 				hold_rlock = 0;
5742 			}
5743 			if (cp_len > 0)
5744 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
5745 			/* re-read */
5746 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5747 				goto release;
5748 			}
5749 
5750 			if ((control->do_not_ref_stcb == 0) && stcb &&
5751 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5752 				no_rcv_needed = 1;
5753 			}
5754 			if (error) {
5755 				/* error we are out of here */
5756 				goto release;
5757 			}
5758 			SCTP_INP_READ_LOCK(inp);
5759 			hold_rlock = 1;
5760 			if (cp_len == SCTP_BUF_LEN(m)) {
5761 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5762 				    (control->end_added)) {
5763 					out_flags |= MSG_EOR;
5764 					if ((control->do_not_ref_stcb == 0) &&
5765 					    (control->stcb != NULL) &&
5766 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5767 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5768 				}
5769 				if (control->spec_flags & M_NOTIFICATION) {
5770 					out_flags |= MSG_NOTIFICATION;
5771 				}
5772 				/* we ate up the mbuf */
5773 				if (in_flags & MSG_PEEK) {
5774 					/* just looking */
5775 					m = SCTP_BUF_NEXT(m);
5776 					copied_so_far += cp_len;
5777 				} else {
5778 					/* dispose of the mbuf */
5779 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5780 						sctp_sblog(&so->so_rcv,
5781 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5782 					}
5783 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5784 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5785 						sctp_sblog(&so->so_rcv,
5786 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5787 					}
5788 					copied_so_far += cp_len;
5789 					freed_so_far += (uint32_t)cp_len;
5790 					freed_so_far += MSIZE;
5791 					atomic_subtract_int(&control->length, cp_len);
5792 					control->data = sctp_m_free(m);
5793 					m = control->data;
5794 					/*
5795 					 * been through it all, must hold sb
5796 					 * lock ok to null tail
5797 					 */
5798 					if (control->data == NULL) {
5799 #ifdef INVARIANTS
5800 						if ((control->end_added == 0) ||
5801 						    (TAILQ_NEXT(control, next) == NULL)) {
5802 							/*
5803 							 * If the end is not
5804 							 * added, OR the
5805 							 * next is NOT null
5806 							 * we MUST have the
5807 							 * lock.
5808 							 */
5809 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5810 								panic("Hmm we don't own the lock?");
5811 							}
5812 						}
5813 #endif
5814 						control->tail_mbuf = NULL;
5815 #ifdef INVARIANTS
5816 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5817 							panic("end_added, nothing left and no MSG_EOR");
5818 						}
5819 #endif
5820 					}
5821 				}
5822 			} else {
5823 				/* Do we need to trim the mbuf? */
5824 				if (control->spec_flags & M_NOTIFICATION) {
5825 					out_flags |= MSG_NOTIFICATION;
5826 				}
5827 				if ((in_flags & MSG_PEEK) == 0) {
5828 					SCTP_BUF_RESV_UF(m, cp_len);
5829 					SCTP_BUF_LEN(m) -= (int)cp_len;
5830 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5831 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
5832 					}
5833 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5834 					if ((control->do_not_ref_stcb == 0) &&
5835 					    stcb) {
5836 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5837 					}
5838 					copied_so_far += cp_len;
5839 					freed_so_far += (uint32_t)cp_len;
5840 					freed_so_far += MSIZE;
5841 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5842 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5843 						    SCTP_LOG_SBRESULT, 0);
5844 					}
5845 					atomic_subtract_int(&control->length, cp_len);
5846 				} else {
5847 					copied_so_far += cp_len;
5848 				}
5849 			}
5850 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5851 				break;
5852 			}
5853 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5854 			    (control->do_not_ref_stcb == 0) &&
5855 			    (freed_so_far >= rwnd_req)) {
5856 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5857 			}
5858 		}		/* end while(m) */
5859 		/*
5860 		 * At this point we have looked at it all and we either have
5861 		 * a MSG_EOR/or read all the user wants... <OR>
5862 		 * control->length == 0.
5863 		 */
5864 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5865 			/* we are done with this control */
5866 			if (control->length == 0) {
5867 				if (control->data) {
5868 #ifdef INVARIANTS
5869 					panic("control->data not null at read eor?");
5870 #else
5871 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5872 					sctp_m_freem(control->data);
5873 					control->data = NULL;
5874 #endif
5875 				}
5876 		done_with_control:
5877 				if (hold_rlock == 0) {
5878 					SCTP_INP_READ_LOCK(inp);
5879 					hold_rlock = 1;
5880 				}
5881 				TAILQ_REMOVE(&inp->read_queue, control, next);
5882 				/* Add back any hiddend data */
5883 				if (control->held_length) {
5884 					held_length = 0;
5885 					control->held_length = 0;
5886 					wakeup_read_socket = 1;
5887 				}
5888 				if (control->aux_data) {
5889 					sctp_m_free(control->aux_data);
5890 					control->aux_data = NULL;
5891 				}
5892 				no_rcv_needed = control->do_not_ref_stcb;
5893 				sctp_free_remote_addr(control->whoFrom);
5894 				control->data = NULL;
5895 #ifdef INVARIANTS
5896 				if (control->on_strm_q) {
5897 					panic("About to free ctl:%p so:%p and its in %d",
5898 					    control, so, control->on_strm_q);
5899 				}
5900 #endif
5901 				sctp_free_a_readq(stcb, control);
5902 				control = NULL;
5903 				if ((freed_so_far >= rwnd_req) &&
5904 				    (no_rcv_needed == 0))
5905 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5906 
5907 			} else {
5908 				/*
5909 				 * The user did not read all of this
5910 				 * message, turn off the returned MSG_EOR
5911 				 * since we are leaving more behind on the
5912 				 * control to read.
5913 				 */
5914 #ifdef INVARIANTS
5915 				if (control->end_added &&
5916 				    (control->data == NULL) &&
5917 				    (control->tail_mbuf == NULL)) {
5918 					panic("Gak, control->length is corrupt?");
5919 				}
5920 #endif
5921 				no_rcv_needed = control->do_not_ref_stcb;
5922 				out_flags &= ~MSG_EOR;
5923 			}
5924 		}
5925 		if (out_flags & MSG_EOR) {
5926 			goto release;
5927 		}
5928 		if ((uio->uio_resid == 0) ||
5929 		    ((in_eeor_mode) &&
5930 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
5931 			goto release;
5932 		}
5933 		/*
5934 		 * If I hit here the receiver wants more and this message is
5935 		 * NOT done (pd-api). So two questions. Can we block? if not
5936 		 * we are done. Did the user NOT set MSG_WAITALL?
5937 		 */
5938 		if (block_allowed == 0) {
5939 			goto release;
5940 		}
5941 		/*
5942 		 * We need to wait for more data a few things: - We don't
5943 		 * sbunlock() so we don't get someone else reading. - We
5944 		 * must be sure to account for the case where what is added
5945 		 * is NOT to our control when we wakeup.
5946 		 */
5947 
5948 		/*
5949 		 * Do we need to tell the transport a rwnd update might be
5950 		 * needed before we go to sleep?
5951 		 */
5952 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5953 		    ((freed_so_far >= rwnd_req) &&
5954 		    (control->do_not_ref_stcb == 0) &&
5955 		    (no_rcv_needed == 0))) {
5956 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5957 		}
5958 wait_some_more:
5959 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5960 			goto release;
5961 		}
5962 
5963 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5964 			goto release;
5965 
5966 		if (hold_rlock == 1) {
5967 			SCTP_INP_READ_UNLOCK(inp);
5968 			hold_rlock = 0;
5969 		}
5970 		if (hold_sblock == 0) {
5971 			SOCKBUF_LOCK(&so->so_rcv);
5972 			hold_sblock = 1;
5973 		}
5974 		if ((copied_so_far) && (control->length == 0) &&
5975 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5976 			goto release;
5977 		}
5978 		if (so->so_rcv.sb_cc <= control->held_length) {
5979 			error = sbwait(&so->so_rcv);
5980 			if (error) {
5981 				goto release;
5982 			}
5983 			control->held_length = 0;
5984 		}
5985 		if (hold_sblock) {
5986 			SOCKBUF_UNLOCK(&so->so_rcv);
5987 			hold_sblock = 0;
5988 		}
5989 		if (control->length == 0) {
5990 			/* still nothing here */
5991 			if (control->end_added == 1) {
5992 				/* he aborted, or is done i.e.did a shutdown */
5993 				out_flags |= MSG_EOR;
5994 				if (control->pdapi_aborted) {
5995 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5996 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5997 
5998 					out_flags |= MSG_TRUNC;
5999 				} else {
6000 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6001 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6002 				}
6003 				goto done_with_control;
6004 			}
6005 			if (so->so_rcv.sb_cc > held_length) {
6006 				control->held_length = so->so_rcv.sb_cc;
6007 				held_length = 0;
6008 			}
6009 			goto wait_some_more;
6010 		} else if (control->data == NULL) {
6011 			/*
6012 			 * we must re-sync since data is probably being
6013 			 * added
6014 			 */
6015 			SCTP_INP_READ_LOCK(inp);
6016 			if ((control->length > 0) && (control->data == NULL)) {
6017 				/*
6018 				 * big trouble.. we have the lock and its
6019 				 * corrupt?
6020 				 */
6021 #ifdef INVARIANTS
6022 				panic("Impossible data==NULL length !=0");
6023 #endif
6024 				out_flags |= MSG_EOR;
6025 				out_flags |= MSG_TRUNC;
6026 				control->length = 0;
6027 				SCTP_INP_READ_UNLOCK(inp);
6028 				goto done_with_control;
6029 			}
6030 			SCTP_INP_READ_UNLOCK(inp);
6031 			/* We will fall around to get more data */
6032 		}
6033 		goto get_more_data;
6034 	} else {
6035 		/*-
6036 		 * Give caller back the mbuf chain,
6037 		 * store in uio_resid the length
6038 		 */
6039 		wakeup_read_socket = 0;
6040 		if ((control->end_added == 0) ||
6041 		    (TAILQ_NEXT(control, next) == NULL)) {
6042 			/* Need to get rlock */
6043 			if (hold_rlock == 0) {
6044 				SCTP_INP_READ_LOCK(inp);
6045 				hold_rlock = 1;
6046 			}
6047 		}
6048 		if (control->end_added) {
6049 			out_flags |= MSG_EOR;
6050 			if ((control->do_not_ref_stcb == 0) &&
6051 			    (control->stcb != NULL) &&
6052 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6053 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6054 		}
6055 		if (control->spec_flags & M_NOTIFICATION) {
6056 			out_flags |= MSG_NOTIFICATION;
6057 		}
6058 		uio->uio_resid = control->length;
6059 		*mp = control->data;
6060 		m = control->data;
6061 		while (m) {
6062 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6063 				sctp_sblog(&so->so_rcv,
6064 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6065 			}
6066 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6067 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6068 			freed_so_far += MSIZE;
6069 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6070 				sctp_sblog(&so->so_rcv,
6071 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6072 			}
6073 			m = SCTP_BUF_NEXT(m);
6074 		}
6075 		control->data = control->tail_mbuf = NULL;
6076 		control->length = 0;
6077 		if (out_flags & MSG_EOR) {
6078 			/* Done with this control */
6079 			goto done_with_control;
6080 		}
6081 	}
6082 release:
6083 	if (hold_rlock == 1) {
6084 		SCTP_INP_READ_UNLOCK(inp);
6085 		hold_rlock = 0;
6086 	}
6087 	if (hold_sblock == 1) {
6088 		SOCKBUF_UNLOCK(&so->so_rcv);
6089 		hold_sblock = 0;
6090 	}
6091 
6092 	sbunlock(&so->so_rcv);
6093 	sockbuf_lock = 0;
6094 
6095 release_unlocked:
6096 	if (hold_sblock) {
6097 		SOCKBUF_UNLOCK(&so->so_rcv);
6098 		hold_sblock = 0;
6099 	}
6100 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6101 		if ((freed_so_far >= rwnd_req) &&
6102 		    (control && (control->do_not_ref_stcb == 0)) &&
6103 		    (no_rcv_needed == 0))
6104 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6105 	}
6106 out:
6107 	if (msg_flags) {
6108 		*msg_flags = out_flags;
6109 	}
6110 	if (((out_flags & MSG_EOR) == 0) &&
6111 	    ((in_flags & MSG_PEEK) == 0) &&
6112 	    (sinfo) &&
6113 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6114 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6115 		struct sctp_extrcvinfo *s_extra;
6116 
6117 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6118 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6119 	}
6120 	if (hold_rlock == 1) {
6121 		SCTP_INP_READ_UNLOCK(inp);
6122 	}
6123 	if (hold_sblock) {
6124 		SOCKBUF_UNLOCK(&so->so_rcv);
6125 	}
6126 	if (sockbuf_lock) {
6127 		sbunlock(&so->so_rcv);
6128 	}
6129 
6130 	if (freecnt_applied) {
6131 		/*
6132 		 * The lock on the socket buffer protects us so the free
6133 		 * code will stop. But since we used the socketbuf lock and
6134 		 * the sender uses the tcb_lock to increment, we need to use
6135 		 * the atomic add to the refcnt.
6136 		 */
6137 		if (stcb == NULL) {
6138 #ifdef INVARIANTS
6139 			panic("stcb for refcnt has gone NULL?");
6140 			goto stage_left;
6141 #else
6142 			goto stage_left;
6143 #endif
6144 		}
6145 		/* Save the value back for next time */
6146 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6147 		atomic_add_int(&stcb->asoc.refcnt, -1);
6148 	}
6149 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6150 		if (stcb) {
6151 			sctp_misc_ints(SCTP_SORECV_DONE,
6152 			    freed_so_far,
6153 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6154 			    stcb->asoc.my_rwnd,
6155 			    so->so_rcv.sb_cc);
6156 		} else {
6157 			sctp_misc_ints(SCTP_SORECV_DONE,
6158 			    freed_so_far,
6159 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6160 			    0,
6161 			    so->so_rcv.sb_cc);
6162 		}
6163 	}
6164 stage_left:
6165 	if (wakeup_read_socket) {
6166 		sctp_sorwakeup(inp, so);
6167 	}
6168 	return (error);
6169 }
6170 
6171 
6172 #ifdef SCTP_MBUF_LOGGING
6173 struct mbuf *
sctp_m_free(struct mbuf * m)6174 sctp_m_free(struct mbuf *m)
6175 {
6176 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6177 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6178 	}
6179 	return (m_free(m));
6180 }
6181 
6182 void
sctp_m_freem(struct mbuf * mb)6183 sctp_m_freem(struct mbuf *mb)
6184 {
6185 	while (mb != NULL)
6186 		mb = sctp_m_free(mb);
6187 }
6188 
6189 #endif
6190 
6191 int
sctp_dynamic_set_primary(struct sockaddr * sa,uint32_t vrf_id)6192 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6193 {
6194 	/*
6195 	 * Given a local address. For all associations that holds the
6196 	 * address, request a peer-set-primary.
6197 	 */
6198 	struct sctp_ifa *ifa;
6199 	struct sctp_laddr *wi;
6200 
6201 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6202 	if (ifa == NULL) {
6203 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6204 		return (EADDRNOTAVAIL);
6205 	}
6206 	/*
6207 	 * Now that we have the ifa we must awaken the iterator with this
6208 	 * message.
6209 	 */
6210 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6211 	if (wi == NULL) {
6212 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6213 		return (ENOMEM);
6214 	}
6215 	/* Now incr the count and int wi structure */
6216 	SCTP_INCR_LADDR_COUNT();
6217 	memset(wi, 0, sizeof(*wi));
6218 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6219 	wi->ifa = ifa;
6220 	wi->action = SCTP_SET_PRIM_ADDR;
6221 	atomic_add_int(&ifa->refcount, 1);
6222 
6223 	/* Now add it to the work queue */
6224 	SCTP_WQ_ADDR_LOCK();
6225 	/*
6226 	 * Should this really be a tailq? As it is we will process the
6227 	 * newest first :-0
6228 	 */
6229 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6230 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6231 	    (struct sctp_inpcb *)NULL,
6232 	    (struct sctp_tcb *)NULL,
6233 	    (struct sctp_nets *)NULL);
6234 	SCTP_WQ_ADDR_UNLOCK();
6235 	return (0);
6236 }
6237 
6238 
6239 int
sctp_soreceive(struct socket * so,struct sockaddr ** psa,struct uio * uio,struct mbuf ** mp0,struct mbuf ** controlp,int * flagsp)6240 sctp_soreceive(struct socket *so,
6241     struct sockaddr **psa,
6242     struct uio *uio,
6243     struct mbuf **mp0,
6244     struct mbuf **controlp,
6245     int *flagsp)
6246 {
6247 	int error, fromlen;
6248 	uint8_t sockbuf[256];
6249 	struct sockaddr *from;
6250 	struct sctp_extrcvinfo sinfo;
6251 	int filling_sinfo = 1;
6252 	int flags;
6253 	struct sctp_inpcb *inp;
6254 
6255 	inp = (struct sctp_inpcb *)so->so_pcb;
6256 	/* pickup the assoc we are reading from */
6257 	if (inp == NULL) {
6258 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6259 		return (EINVAL);
6260 	}
6261 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6262 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6263 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6264 	    (controlp == NULL)) {
6265 		/* user does not want the sndrcv ctl */
6266 		filling_sinfo = 0;
6267 	}
6268 	if (psa) {
6269 		from = (struct sockaddr *)sockbuf;
6270 		fromlen = sizeof(sockbuf);
6271 		from->sa_len = 0;
6272 	} else {
6273 		from = NULL;
6274 		fromlen = 0;
6275 	}
6276 
6277 	if (filling_sinfo) {
6278 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6279 	}
6280 	if (flagsp != NULL) {
6281 		flags = *flagsp;
6282 	} else {
6283 		flags = 0;
6284 	}
6285 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6286 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6287 	if (flagsp != NULL) {
6288 		*flagsp = flags;
6289 	}
6290 	if (controlp != NULL) {
6291 		/* copy back the sinfo in a CMSG format */
6292 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6293 			*controlp = sctp_build_ctl_nchunk(inp,
6294 			    (struct sctp_sndrcvinfo *)&sinfo);
6295 		} else {
6296 			*controlp = NULL;
6297 		}
6298 	}
6299 	if (psa) {
6300 		/* copy back the address info */
6301 		if (from && from->sa_len) {
6302 			*psa = sodupsockaddr(from, M_NOWAIT);
6303 		} else {
6304 			*psa = NULL;
6305 		}
6306 	}
6307 	return (error);
6308 }
6309 
6310 
6311 
6312 
6313 
6314 int
sctp_connectx_helper_add(struct sctp_tcb * stcb,struct sockaddr * addr,int totaddr,int * error)6315 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6316     int totaddr, int *error)
6317 {
6318 	int added = 0;
6319 	int i;
6320 	struct sctp_inpcb *inp;
6321 	struct sockaddr *sa;
6322 	size_t incr = 0;
6323 #ifdef INET
6324 	struct sockaddr_in *sin;
6325 #endif
6326 #ifdef INET6
6327 	struct sockaddr_in6 *sin6;
6328 #endif
6329 
6330 	sa = addr;
6331 	inp = stcb->sctp_ep;
6332 	*error = 0;
6333 	for (i = 0; i < totaddr; i++) {
6334 		switch (sa->sa_family) {
6335 #ifdef INET
6336 		case AF_INET:
6337 			incr = sizeof(struct sockaddr_in);
6338 			sin = (struct sockaddr_in *)sa;
6339 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6340 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6341 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6342 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6343 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6344 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6345 				*error = EINVAL;
6346 				goto out_now;
6347 			}
6348 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6349 			    SCTP_DONOT_SETSCOPE,
6350 			    SCTP_ADDR_IS_CONFIRMED)) {
6351 				/* assoc gone no un-lock */
6352 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6353 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6354 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6355 				*error = ENOBUFS;
6356 				goto out_now;
6357 			}
6358 			added++;
6359 			break;
6360 #endif
6361 #ifdef INET6
6362 		case AF_INET6:
6363 			incr = sizeof(struct sockaddr_in6);
6364 			sin6 = (struct sockaddr_in6 *)sa;
6365 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6366 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6367 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6368 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6369 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6370 				*error = EINVAL;
6371 				goto out_now;
6372 			}
6373 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6374 			    SCTP_DONOT_SETSCOPE,
6375 			    SCTP_ADDR_IS_CONFIRMED)) {
6376 				/* assoc gone no un-lock */
6377 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6378 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6379 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6380 				*error = ENOBUFS;
6381 				goto out_now;
6382 			}
6383 			added++;
6384 			break;
6385 #endif
6386 		default:
6387 			break;
6388 		}
6389 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6390 	}
6391 out_now:
6392 	return (added);
6393 }
6394 
6395 int
sctp_connectx_helper_find(struct sctp_inpcb * inp,struct sockaddr * addr,unsigned int totaddr,unsigned int * num_v4,unsigned int * num_v6,unsigned int limit)6396 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6397     unsigned int totaddr,
6398     unsigned int *num_v4, unsigned int *num_v6,
6399     unsigned int limit)
6400 {
6401 	struct sockaddr *sa;
6402 	struct sctp_tcb *stcb;
6403 	unsigned int incr, at, i;
6404 
6405 	at = 0;
6406 	sa = addr;
6407 	*num_v6 = *num_v4 = 0;
6408 	/* account and validate addresses */
6409 	if (totaddr == 0) {
6410 		return (EINVAL);
6411 	}
6412 	for (i = 0; i < totaddr; i++) {
6413 		if (at + sizeof(struct sockaddr) > limit) {
6414 			return (EINVAL);
6415 		}
6416 		switch (sa->sa_family) {
6417 #ifdef INET
6418 		case AF_INET:
6419 			incr = (unsigned int)sizeof(struct sockaddr_in);
6420 			if (sa->sa_len != incr) {
6421 				return (EINVAL);
6422 			}
6423 			(*num_v4) += 1;
6424 			break;
6425 #endif
6426 #ifdef INET6
6427 		case AF_INET6:
6428 			{
6429 				struct sockaddr_in6 *sin6;
6430 
6431 				sin6 = (struct sockaddr_in6 *)sa;
6432 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6433 					/* Must be non-mapped for connectx */
6434 					return (EINVAL);
6435 				}
6436 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6437 				if (sa->sa_len != incr) {
6438 					return (EINVAL);
6439 				}
6440 				(*num_v6) += 1;
6441 				break;
6442 			}
6443 #endif
6444 		default:
6445 			return (EINVAL);
6446 		}
6447 		if ((at + incr) > limit) {
6448 			return (EINVAL);
6449 		}
6450 		SCTP_INP_INCR_REF(inp);
6451 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6452 		if (stcb != NULL) {
6453 			SCTP_TCB_UNLOCK(stcb);
6454 			return (EALREADY);
6455 		} else {
6456 			SCTP_INP_DECR_REF(inp);
6457 		}
6458 		at += incr;
6459 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6460 	}
6461 	return (0);
6462 }
6463 
6464 /*
6465  * sctp_bindx(ADD) for one address.
6466  * assumes all arguments are valid/checked by caller.
6467  */
6468 void
sctp_bindx_add_address(struct socket * so,struct sctp_inpcb * inp,struct sockaddr * sa,sctp_assoc_t assoc_id,uint32_t vrf_id,int * error,void * p)6469 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6470     struct sockaddr *sa, sctp_assoc_t assoc_id,
6471     uint32_t vrf_id, int *error, void *p)
6472 {
6473 	struct sockaddr *addr_touse;
6474 #if defined(INET) && defined(INET6)
6475 	struct sockaddr_in sin;
6476 #endif
6477 
6478 	/* see if we're bound all already! */
6479 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6480 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6481 		*error = EINVAL;
6482 		return;
6483 	}
6484 	addr_touse = sa;
6485 #ifdef INET6
6486 	if (sa->sa_family == AF_INET6) {
6487 #ifdef INET
6488 		struct sockaddr_in6 *sin6;
6489 
6490 #endif
6491 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6492 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493 			*error = EINVAL;
6494 			return;
6495 		}
6496 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6497 			/* can only bind v6 on PF_INET6 sockets */
6498 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6499 			*error = EINVAL;
6500 			return;
6501 		}
6502 #ifdef INET
6503 		sin6 = (struct sockaddr_in6 *)addr_touse;
6504 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6505 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6506 			    SCTP_IPV6_V6ONLY(inp)) {
6507 				/* can't bind v4-mapped on PF_INET sockets */
6508 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6509 				*error = EINVAL;
6510 				return;
6511 			}
6512 			in6_sin6_2_sin(&sin, sin6);
6513 			addr_touse = (struct sockaddr *)&sin;
6514 		}
6515 #endif
6516 	}
6517 #endif
6518 #ifdef INET
6519 	if (sa->sa_family == AF_INET) {
6520 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6521 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6522 			*error = EINVAL;
6523 			return;
6524 		}
6525 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6526 		    SCTP_IPV6_V6ONLY(inp)) {
6527 			/* can't bind v4 on PF_INET sockets */
6528 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6529 			*error = EINVAL;
6530 			return;
6531 		}
6532 	}
6533 #endif
6534 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6535 		if (p == NULL) {
6536 			/* Can't get proc for Net/Open BSD */
6537 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6538 			*error = EINVAL;
6539 			return;
6540 		}
6541 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6542 		return;
6543 	}
6544 	/*
6545 	 * No locks required here since bind and mgmt_ep_sa all do their own
6546 	 * locking. If we do something for the FIX: below we may need to
6547 	 * lock in that case.
6548 	 */
6549 	if (assoc_id == 0) {
6550 		/* add the address */
6551 		struct sctp_inpcb *lep;
6552 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6553 
6554 		/* validate the incoming port */
6555 		if ((lsin->sin_port != 0) &&
6556 		    (lsin->sin_port != inp->sctp_lport)) {
6557 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6558 			*error = EINVAL;
6559 			return;
6560 		} else {
6561 			/* user specified 0 port, set it to existing port */
6562 			lsin->sin_port = inp->sctp_lport;
6563 		}
6564 
6565 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6566 		if (lep != NULL) {
6567 			/*
6568 			 * We must decrement the refcount since we have the
6569 			 * ep already and are binding. No remove going on
6570 			 * here.
6571 			 */
6572 			SCTP_INP_DECR_REF(lep);
6573 		}
6574 		if (lep == inp) {
6575 			/* already bound to it.. ok */
6576 			return;
6577 		} else if (lep == NULL) {
6578 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6579 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6580 			    SCTP_ADD_IP_ADDRESS,
6581 			    vrf_id, NULL);
6582 		} else {
6583 			*error = EADDRINUSE;
6584 		}
6585 		if (*error)
6586 			return;
6587 	} else {
6588 		/*
6589 		 * FIX: decide whether we allow assoc based bindx
6590 		 */
6591 	}
6592 }
6593 
6594 /*
6595  * sctp_bindx(DELETE) for one address.
6596  * assumes all arguments are valid/checked by caller.
6597  */
6598 void
sctp_bindx_delete_address(struct sctp_inpcb * inp,struct sockaddr * sa,sctp_assoc_t assoc_id,uint32_t vrf_id,int * error)6599 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6600     struct sockaddr *sa, sctp_assoc_t assoc_id,
6601     uint32_t vrf_id, int *error)
6602 {
6603 	struct sockaddr *addr_touse;
6604 #if defined(INET) && defined(INET6)
6605 	struct sockaddr_in sin;
6606 #endif
6607 
6608 	/* see if we're bound all already! */
6609 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6610 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6611 		*error = EINVAL;
6612 		return;
6613 	}
6614 	addr_touse = sa;
6615 #ifdef INET6
6616 	if (sa->sa_family == AF_INET6) {
6617 #ifdef INET
6618 		struct sockaddr_in6 *sin6;
6619 #endif
6620 
6621 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6622 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6623 			*error = EINVAL;
6624 			return;
6625 		}
6626 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6627 			/* can only bind v6 on PF_INET6 sockets */
6628 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6629 			*error = EINVAL;
6630 			return;
6631 		}
6632 #ifdef INET
6633 		sin6 = (struct sockaddr_in6 *)addr_touse;
6634 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6635 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6636 			    SCTP_IPV6_V6ONLY(inp)) {
6637 				/* can't bind mapped-v4 on PF_INET sockets */
6638 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6639 				*error = EINVAL;
6640 				return;
6641 			}
6642 			in6_sin6_2_sin(&sin, sin6);
6643 			addr_touse = (struct sockaddr *)&sin;
6644 		}
6645 #endif
6646 	}
6647 #endif
6648 #ifdef INET
6649 	if (sa->sa_family == AF_INET) {
6650 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6651 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6652 			*error = EINVAL;
6653 			return;
6654 		}
6655 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6656 		    SCTP_IPV6_V6ONLY(inp)) {
6657 			/* can't bind v4 on PF_INET sockets */
6658 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6659 			*error = EINVAL;
6660 			return;
6661 		}
6662 	}
6663 #endif
6664 	/*
6665 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6666 	 * below is ever changed we may need to lock before calling
6667 	 * association level binding.
6668 	 */
6669 	if (assoc_id == 0) {
6670 		/* delete the address */
6671 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6672 		    SCTP_DEL_IP_ADDRESS,
6673 		    vrf_id, NULL);
6674 	} else {
6675 		/*
6676 		 * FIX: decide whether we allow assoc based bindx
6677 		 */
6678 	}
6679 }
6680 
6681 /*
6682  * returns the valid local address count for an assoc, taking into account
6683  * all scoping rules
6684  */
6685 int
sctp_local_addr_count(struct sctp_tcb * stcb)6686 sctp_local_addr_count(struct sctp_tcb *stcb)
6687 {
6688 	int loopback_scope;
6689 #if defined(INET)
6690 	int ipv4_local_scope, ipv4_addr_legal;
6691 #endif
6692 #if defined (INET6)
6693 	int local_scope, site_scope, ipv6_addr_legal;
6694 #endif
6695 	struct sctp_vrf *vrf;
6696 	struct sctp_ifn *sctp_ifn;
6697 	struct sctp_ifa *sctp_ifa;
6698 	int count = 0;
6699 
6700 	/* Turn on all the appropriate scopes */
6701 	loopback_scope = stcb->asoc.scope.loopback_scope;
6702 #if defined(INET)
6703 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6704 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6705 #endif
6706 #if defined(INET6)
6707 	local_scope = stcb->asoc.scope.local_scope;
6708 	site_scope = stcb->asoc.scope.site_scope;
6709 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6710 #endif
6711 	SCTP_IPI_ADDR_RLOCK();
6712 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6713 	if (vrf == NULL) {
6714 		/* no vrf, no addresses */
6715 		SCTP_IPI_ADDR_RUNLOCK();
6716 		return (0);
6717 	}
6718 
6719 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6720 		/*
6721 		 * bound all case: go through all ifns on the vrf
6722 		 */
6723 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6724 			if ((loopback_scope == 0) &&
6725 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6726 				continue;
6727 			}
6728 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6729 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6730 					continue;
6731 				switch (sctp_ifa->address.sa.sa_family) {
6732 #ifdef INET
6733 				case AF_INET:
6734 					if (ipv4_addr_legal) {
6735 						struct sockaddr_in *sin;
6736 
6737 						sin = &sctp_ifa->address.sin;
6738 						if (sin->sin_addr.s_addr == 0) {
6739 							/*
6740 							 * skip unspecified
6741 							 * addrs
6742 							 */
6743 							continue;
6744 						}
6745 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6746 						    &sin->sin_addr) != 0) {
6747 							continue;
6748 						}
6749 						if ((ipv4_local_scope == 0) &&
6750 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6751 							continue;
6752 						}
6753 						/* count this one */
6754 						count++;
6755 					} else {
6756 						continue;
6757 					}
6758 					break;
6759 #endif
6760 #ifdef INET6
6761 				case AF_INET6:
6762 					if (ipv6_addr_legal) {
6763 						struct sockaddr_in6 *sin6;
6764 
6765 						sin6 = &sctp_ifa->address.sin6;
6766 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6767 							continue;
6768 						}
6769 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6770 						    &sin6->sin6_addr) != 0) {
6771 							continue;
6772 						}
6773 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6774 							if (local_scope == 0)
6775 								continue;
6776 							if (sin6->sin6_scope_id == 0) {
6777 								if (sa6_recoverscope(sin6) != 0)
6778 									/*
6779 									 *
6780 									 * bad
6781 									 * link
6782 									 *
6783 									 * local
6784 									 *
6785 									 * address
6786 									 */
6787 									continue;
6788 							}
6789 						}
6790 						if ((site_scope == 0) &&
6791 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6792 							continue;
6793 						}
6794 						/* count this one */
6795 						count++;
6796 					}
6797 					break;
6798 #endif
6799 				default:
6800 					/* TSNH */
6801 					break;
6802 				}
6803 			}
6804 		}
6805 	} else {
6806 		/*
6807 		 * subset bound case
6808 		 */
6809 		struct sctp_laddr *laddr;
6810 
6811 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6812 		    sctp_nxt_addr) {
6813 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6814 				continue;
6815 			}
6816 			/* count this one */
6817 			count++;
6818 		}
6819 	}
6820 	SCTP_IPI_ADDR_RUNLOCK();
6821 	return (count);
6822 }
6823 
6824 #if defined(SCTP_LOCAL_TRACE_BUF)
6825 
6826 void
sctp_log_trace(uint32_t subsys,const char * str SCTP_UNUSED,uint32_t a,uint32_t b,uint32_t c,uint32_t d,uint32_t e,uint32_t f)6827 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6828 {
6829 	uint32_t saveindex, newindex;
6830 
6831 	do {
6832 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6833 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6834 			newindex = 1;
6835 		} else {
6836 			newindex = saveindex + 1;
6837 		}
6838 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6839 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6840 		saveindex = 0;
6841 	}
6842 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6843 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6844 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6845 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6846 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6847 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6848 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6849 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6850 }
6851 
6852 #endif
6853 static void
sctp_recv_udp_tunneled_packet(struct mbuf * m,int off,struct inpcb * inp,const struct sockaddr * sa SCTP_UNUSED,void * ctx SCTP_UNUSED)6854 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6855     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6856 {
6857 	struct ip *iph;
6858 #ifdef INET6
6859 	struct ip6_hdr *ip6;
6860 #endif
6861 	struct mbuf *sp, *last;
6862 	struct udphdr *uhdr;
6863 	uint16_t port;
6864 
6865 	if ((m->m_flags & M_PKTHDR) == 0) {
6866 		/* Can't handle one that is not a pkt hdr */
6867 		goto out;
6868 	}
6869 	/* Pull the src port */
6870 	iph = mtod(m, struct ip *);
6871 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6872 	port = uhdr->uh_sport;
6873 	/*
6874 	 * Split out the mbuf chain. Leave the IP header in m, place the
6875 	 * rest in the sp.
6876 	 */
6877 	sp = m_split(m, off, M_NOWAIT);
6878 	if (sp == NULL) {
6879 		/* Gak, drop packet, we can't do a split */
6880 		goto out;
6881 	}
6882 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6883 		/* Gak, packet can't have an SCTP header in it - too small */
6884 		m_freem(sp);
6885 		goto out;
6886 	}
6887 	/* Now pull up the UDP header and SCTP header together */
6888 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6889 	if (sp == NULL) {
6890 		/* Gak pullup failed */
6891 		goto out;
6892 	}
6893 	/* Trim out the UDP header */
6894 	m_adj(sp, sizeof(struct udphdr));
6895 
6896 	/* Now reconstruct the mbuf chain */
6897 	for (last = m; last->m_next; last = last->m_next);
6898 	last->m_next = sp;
6899 	m->m_pkthdr.len += sp->m_pkthdr.len;
6900 	/*
6901 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6902 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6903 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6904 	 * SCTP checksum. Therefore, clear the bit.
6905 	 */
6906 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6907 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6908 	    m->m_pkthdr.len,
6909 	    if_name(m->m_pkthdr.rcvif),
6910 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6911 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6912 	iph = mtod(m, struct ip *);
6913 	switch (iph->ip_v) {
6914 #ifdef INET
6915 	case IPVERSION:
6916 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6917 		sctp_input_with_port(m, off, port);
6918 		break;
6919 #endif
6920 #ifdef INET6
6921 	case IPV6_VERSION >> 4:
6922 		ip6 = mtod(m, struct ip6_hdr *);
6923 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6924 		sctp6_input_with_port(&m, &off, port);
6925 		break;
6926 #endif
6927 	default:
6928 		goto out;
6929 		break;
6930 	}
6931 	return;
6932 out:
6933 	m_freem(m);
6934 }
6935 
6936 #ifdef INET
6937 static void
sctp_recv_icmp_tunneled_packet(int cmd,struct sockaddr * sa,void * vip,void * ctx SCTP_UNUSED)6938 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6939 {
6940 	struct ip *outer_ip, *inner_ip;
6941 	struct sctphdr *sh;
6942 	struct icmp *icmp;
6943 	struct udphdr *udp;
6944 	struct sctp_inpcb *inp;
6945 	struct sctp_tcb *stcb;
6946 	struct sctp_nets *net;
6947 	struct sctp_init_chunk *ch;
6948 	struct sockaddr_in src, dst;
6949 	uint8_t type, code;
6950 
6951 	inner_ip = (struct ip *)vip;
6952 	icmp = (struct icmp *)((caddr_t)inner_ip -
6953 	    (sizeof(struct icmp) - sizeof(struct ip)));
6954 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6955 	if (ntohs(outer_ip->ip_len) <
6956 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6957 		return;
6958 	}
6959 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6960 	sh = (struct sctphdr *)(udp + 1);
6961 	memset(&src, 0, sizeof(struct sockaddr_in));
6962 	src.sin_family = AF_INET;
6963 	src.sin_len = sizeof(struct sockaddr_in);
6964 	src.sin_port = sh->src_port;
6965 	src.sin_addr = inner_ip->ip_src;
6966 	memset(&dst, 0, sizeof(struct sockaddr_in));
6967 	dst.sin_family = AF_INET;
6968 	dst.sin_len = sizeof(struct sockaddr_in);
6969 	dst.sin_port = sh->dest_port;
6970 	dst.sin_addr = inner_ip->ip_dst;
6971 	/*
6972 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6973 	 * holds our local endpoint address. Thus we reverse the dst and the
6974 	 * src in the lookup.
6975 	 */
6976 	inp = NULL;
6977 	net = NULL;
6978 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6979 	    (struct sockaddr *)&src,
6980 	    &inp, &net, 1,
6981 	    SCTP_DEFAULT_VRFID);
6982 	if ((stcb != NULL) &&
6983 	    (net != NULL) &&
6984 	    (inp != NULL)) {
6985 		/* Check the UDP port numbers */
6986 		if ((udp->uh_dport != net->port) ||
6987 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6988 			SCTP_TCB_UNLOCK(stcb);
6989 			return;
6990 		}
6991 		/* Check the verification tag */
6992 		if (ntohl(sh->v_tag) != 0) {
6993 			/*
6994 			 * This must be the verification tag used for
6995 			 * sending out packets. We don't consider packets
6996 			 * reflecting the verification tag.
6997 			 */
6998 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6999 				SCTP_TCB_UNLOCK(stcb);
7000 				return;
7001 			}
7002 		} else {
7003 			if (ntohs(outer_ip->ip_len) >=
7004 			    sizeof(struct ip) +
7005 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7006 				/*
7007 				 * In this case we can check if we got an
7008 				 * INIT chunk and if the initiate tag
7009 				 * matches.
7010 				 */
7011 				ch = (struct sctp_init_chunk *)(sh + 1);
7012 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7013 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7014 					SCTP_TCB_UNLOCK(stcb);
7015 					return;
7016 				}
7017 			} else {
7018 				SCTP_TCB_UNLOCK(stcb);
7019 				return;
7020 			}
7021 		}
7022 		type = icmp->icmp_type;
7023 		code = icmp->icmp_code;
7024 		if ((type == ICMP_UNREACH) &&
7025 		    (code == ICMP_UNREACH_PORT)) {
7026 			code = ICMP_UNREACH_PROTOCOL;
7027 		}
7028 		sctp_notify(inp, stcb, net, type, code,
7029 		    ntohs(inner_ip->ip_len),
7030 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7031 	} else {
7032 		if ((stcb == NULL) && (inp != NULL)) {
7033 			/* reduce ref-count */
7034 			SCTP_INP_WLOCK(inp);
7035 			SCTP_INP_DECR_REF(inp);
7036 			SCTP_INP_WUNLOCK(inp);
7037 		}
7038 		if (stcb) {
7039 			SCTP_TCB_UNLOCK(stcb);
7040 		}
7041 	}
7042 	return;
7043 }
7044 #endif
7045 
7046 #ifdef INET6
7047 static void
sctp_recv_icmp6_tunneled_packet(int cmd,struct sockaddr * sa,void * d,void * ctx SCTP_UNUSED)7048 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7049 {
7050 	struct ip6ctlparam *ip6cp;
7051 	struct sctp_inpcb *inp;
7052 	struct sctp_tcb *stcb;
7053 	struct sctp_nets *net;
7054 	struct sctphdr sh;
7055 	struct udphdr udp;
7056 	struct sockaddr_in6 src, dst;
7057 	uint8_t type, code;
7058 
7059 	ip6cp = (struct ip6ctlparam *)d;
7060 	/*
7061 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7062 	 */
7063 	if (ip6cp->ip6c_m == NULL) {
7064 		return;
7065 	}
7066 	/*
7067 	 * Check if we can safely examine the ports and the verification tag
7068 	 * of the SCTP common header.
7069 	 */
7070 	if (ip6cp->ip6c_m->m_pkthdr.len <
7071 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7072 		return;
7073 	}
7074 	/* Copy out the UDP header. */
7075 	memset(&udp, 0, sizeof(struct udphdr));
7076 	m_copydata(ip6cp->ip6c_m,
7077 	    ip6cp->ip6c_off,
7078 	    sizeof(struct udphdr),
7079 	    (caddr_t)&udp);
7080 	/* Copy out the port numbers and the verification tag. */
7081 	memset(&sh, 0, sizeof(struct sctphdr));
7082 	m_copydata(ip6cp->ip6c_m,
7083 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7084 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7085 	    (caddr_t)&sh);
7086 	memset(&src, 0, sizeof(struct sockaddr_in6));
7087 	src.sin6_family = AF_INET6;
7088 	src.sin6_len = sizeof(struct sockaddr_in6);
7089 	src.sin6_port = sh.src_port;
7090 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7091 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7092 		return;
7093 	}
7094 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7095 	dst.sin6_family = AF_INET6;
7096 	dst.sin6_len = sizeof(struct sockaddr_in6);
7097 	dst.sin6_port = sh.dest_port;
7098 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7099 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7100 		return;
7101 	}
7102 	inp = NULL;
7103 	net = NULL;
7104 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7105 	    (struct sockaddr *)&src,
7106 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7107 	if ((stcb != NULL) &&
7108 	    (net != NULL) &&
7109 	    (inp != NULL)) {
7110 		/* Check the UDP port numbers */
7111 		if ((udp.uh_dport != net->port) ||
7112 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7113 			SCTP_TCB_UNLOCK(stcb);
7114 			return;
7115 		}
7116 		/* Check the verification tag */
7117 		if (ntohl(sh.v_tag) != 0) {
7118 			/*
7119 			 * This must be the verification tag used for
7120 			 * sending out packets. We don't consider packets
7121 			 * reflecting the verification tag.
7122 			 */
7123 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7124 				SCTP_TCB_UNLOCK(stcb);
7125 				return;
7126 			}
7127 		} else {
7128 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7129 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7130 			    sizeof(struct sctphdr) +
7131 			    sizeof(struct sctp_chunkhdr) +
7132 			    offsetof(struct sctp_init, a_rwnd)) {
7133 				/*
7134 				 * In this case we can check if we got an
7135 				 * INIT chunk and if the initiate tag
7136 				 * matches.
7137 				 */
7138 				uint32_t initiate_tag;
7139 				uint8_t chunk_type;
7140 
7141 				m_copydata(ip6cp->ip6c_m,
7142 				    ip6cp->ip6c_off +
7143 				    sizeof(struct udphdr) +
7144 				    sizeof(struct sctphdr),
7145 				    sizeof(uint8_t),
7146 				    (caddr_t)&chunk_type);
7147 				m_copydata(ip6cp->ip6c_m,
7148 				    ip6cp->ip6c_off +
7149 				    sizeof(struct udphdr) +
7150 				    sizeof(struct sctphdr) +
7151 				    sizeof(struct sctp_chunkhdr),
7152 				    sizeof(uint32_t),
7153 				    (caddr_t)&initiate_tag);
7154 				if ((chunk_type != SCTP_INITIATION) ||
7155 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7156 					SCTP_TCB_UNLOCK(stcb);
7157 					return;
7158 				}
7159 			} else {
7160 				SCTP_TCB_UNLOCK(stcb);
7161 				return;
7162 			}
7163 		}
7164 		type = ip6cp->ip6c_icmp6->icmp6_type;
7165 		code = ip6cp->ip6c_icmp6->icmp6_code;
7166 		if ((type == ICMP6_DST_UNREACH) &&
7167 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7168 			type = ICMP6_PARAM_PROB;
7169 			code = ICMP6_PARAMPROB_NEXTHEADER;
7170 		}
7171 		sctp6_notify(inp, stcb, net, type, code,
7172 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7173 	} else {
7174 		if ((stcb == NULL) && (inp != NULL)) {
7175 			/* reduce inp's ref-count */
7176 			SCTP_INP_WLOCK(inp);
7177 			SCTP_INP_DECR_REF(inp);
7178 			SCTP_INP_WUNLOCK(inp);
7179 		}
7180 		if (stcb) {
7181 			SCTP_TCB_UNLOCK(stcb);
7182 		}
7183 	}
7184 }
7185 #endif
7186 
7187 void
sctp_over_udp_stop(void)7188 sctp_over_udp_stop(void)
7189 {
7190 	/*
7191 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7192 	 * for writting!
7193 	 */
7194 #ifdef INET
7195 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7196 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7197 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7198 	}
7199 #endif
7200 #ifdef INET6
7201 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7202 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7203 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7204 	}
7205 #endif
7206 }
7207 
7208 int
sctp_over_udp_start(void)7209 sctp_over_udp_start(void)
7210 {
7211 	uint16_t port;
7212 	int ret;
7213 #ifdef INET
7214 	struct sockaddr_in sin;
7215 #endif
7216 #ifdef INET6
7217 	struct sockaddr_in6 sin6;
7218 #endif
7219 	/*
7220 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7221 	 * for writting!
7222 	 */
7223 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7224 	if (ntohs(port) == 0) {
7225 		/* Must have a port set */
7226 		return (EINVAL);
7227 	}
7228 #ifdef INET
7229 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7230 		/* Already running -- must stop first */
7231 		return (EALREADY);
7232 	}
7233 #endif
7234 #ifdef INET6
7235 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7236 		/* Already running -- must stop first */
7237 		return (EALREADY);
7238 	}
7239 #endif
7240 #ifdef INET
7241 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7242 	    SOCK_DGRAM, IPPROTO_UDP,
7243 	    curthread->td_ucred, curthread))) {
7244 		sctp_over_udp_stop();
7245 		return (ret);
7246 	}
7247 	/* Call the special UDP hook. */
7248 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7249 	    sctp_recv_udp_tunneled_packet,
7250 	    sctp_recv_icmp_tunneled_packet,
7251 	    NULL))) {
7252 		sctp_over_udp_stop();
7253 		return (ret);
7254 	}
7255 	/* Ok, we have a socket, bind it to the port. */
7256 	memset(&sin, 0, sizeof(struct sockaddr_in));
7257 	sin.sin_len = sizeof(struct sockaddr_in);
7258 	sin.sin_family = AF_INET;
7259 	sin.sin_port = htons(port);
7260 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7261 	    (struct sockaddr *)&sin, curthread))) {
7262 		sctp_over_udp_stop();
7263 		return (ret);
7264 	}
7265 #endif
7266 #ifdef INET6
7267 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7268 	    SOCK_DGRAM, IPPROTO_UDP,
7269 	    curthread->td_ucred, curthread))) {
7270 		sctp_over_udp_stop();
7271 		return (ret);
7272 	}
7273 	/* Call the special UDP hook. */
7274 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7275 	    sctp_recv_udp_tunneled_packet,
7276 	    sctp_recv_icmp6_tunneled_packet,
7277 	    NULL))) {
7278 		sctp_over_udp_stop();
7279 		return (ret);
7280 	}
7281 	/* Ok, we have a socket, bind it to the port. */
7282 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7283 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7284 	sin6.sin6_family = AF_INET6;
7285 	sin6.sin6_port = htons(port);
7286 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7287 	    (struct sockaddr *)&sin6, curthread))) {
7288 		sctp_over_udp_stop();
7289 		return (ret);
7290 	}
7291 #endif
7292 	return (0);
7293 }
7294 
7295 /*
7296  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7297  * If all arguments are zero, zero is returned.
7298  */
7299 uint32_t
sctp_min_mtu(uint32_t mtu1,uint32_t mtu2,uint32_t mtu3)7300 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7301 {
7302 	if (mtu1 > 0) {
7303 		if (mtu2 > 0) {
7304 			if (mtu3 > 0) {
7305 				return (min(mtu1, min(mtu2, mtu3)));
7306 			} else {
7307 				return (min(mtu1, mtu2));
7308 			}
7309 		} else {
7310 			if (mtu3 > 0) {
7311 				return (min(mtu1, mtu3));
7312 			} else {
7313 				return (mtu1);
7314 			}
7315 		}
7316 	} else {
7317 		if (mtu2 > 0) {
7318 			if (mtu3 > 0) {
7319 				return (min(mtu2, mtu3));
7320 			} else {
7321 				return (mtu2);
7322 			}
7323 		} else {
7324 			return (mtu3);
7325 		}
7326 	}
7327 }
7328 
7329 void
sctp_hc_set_mtu(union sctp_sockstore * addr,uint16_t fibnum,uint32_t mtu)7330 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7331 {
7332 	struct in_conninfo inc;
7333 
7334 	memset(&inc, 0, sizeof(struct in_conninfo));
7335 	inc.inc_fibnum = fibnum;
7336 	switch (addr->sa.sa_family) {
7337 #ifdef INET
7338 	case AF_INET:
7339 		inc.inc_faddr = addr->sin.sin_addr;
7340 		break;
7341 #endif
7342 #ifdef INET6
7343 	case AF_INET6:
7344 		inc.inc_flags |= INC_ISIPV6;
7345 		inc.inc6_faddr = addr->sin6.sin6_addr;
7346 		break;
7347 #endif
7348 	default:
7349 		return;
7350 	}
7351 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7352 }
7353 
7354 uint32_t
sctp_hc_get_mtu(union sctp_sockstore * addr,uint16_t fibnum)7355 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7356 {
7357 	struct in_conninfo inc;
7358 
7359 	memset(&inc, 0, sizeof(struct in_conninfo));
7360 	inc.inc_fibnum = fibnum;
7361 	switch (addr->sa.sa_family) {
7362 #ifdef INET
7363 	case AF_INET:
7364 		inc.inc_faddr = addr->sin.sin_addr;
7365 		break;
7366 #endif
7367 #ifdef INET6
7368 	case AF_INET6:
7369 		inc.inc_flags |= INC_ISIPV6;
7370 		inc.inc6_faddr = addr->sin6.sin6_addr;
7371 		break;
7372 #endif
7373 	default:
7374 		return (0);
7375 	}
7376 	return ((uint32_t)tcp_hc_getmtu(&inc));
7377 }
7378 
7379 void
sctp_set_state(struct sctp_tcb * stcb,int new_state)7380 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7381 {
7382 #if defined(KDTRACE_HOOKS)
7383 	int old_state = stcb->asoc.state;
7384 #endif
7385 
7386 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7387 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7388 	    new_state));
7389 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7390 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7391 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7392 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7393 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7394 	}
7395 #if defined(KDTRACE_HOOKS)
7396 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7397 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7398 	    (new_state == SCTP_STATE_INUSE))) {
7399 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7400 	}
7401 #endif
7402 }
7403 
7404 void
sctp_add_substate(struct sctp_tcb * stcb,int substate)7405 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7406 {
7407 #if defined(KDTRACE_HOOKS)
7408 	int old_state = stcb->asoc.state;
7409 #endif
7410 
7411 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7412 	    ("sctp_add_substate: Can't set state (substate = %x)",
7413 	    substate));
7414 	stcb->asoc.state |= substate;
7415 #if defined(KDTRACE_HOOKS)
7416 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7417 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7418 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7419 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7420 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7421 	}
7422 #endif
7423 }
7424