1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctputil.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_input.h>
46 #include <netinet/sctp_indata.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_kdtrace.h>
52
53 #define SHIFT_MPTCP_MULTI_N 40
54 #define SHIFT_MPTCP_MULTI_Z 16
55 #define SHIFT_MPTCP_MULTI 8
56
57 static void
sctp_enforce_cwnd_limit(struct sctp_association * assoc,struct sctp_nets * net)58 sctp_enforce_cwnd_limit(struct sctp_association *assoc, struct sctp_nets *net)
59 {
60 if ((assoc->max_cwnd > 0) &&
61 (net->cwnd > assoc->max_cwnd) &&
62 (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) {
63 net->cwnd = assoc->max_cwnd;
64 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
65 net->cwnd = net->mtu - sizeof(struct sctphdr);
66 }
67 }
68 }
69
70 static void
sctp_set_initial_cc_param(struct sctp_tcb * stcb,struct sctp_nets * net)71 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
72 {
73 struct sctp_association *assoc;
74 uint32_t cwnd_in_mtu;
75
76 assoc = &stcb->asoc;
77 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
78 if (cwnd_in_mtu == 0) {
79 /* Using 0 means that the value of RFC 4960 is used. */
80 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
81 } else {
82 /*
83 * We take the minimum of the burst limit and the initial
84 * congestion window.
85 */
86 if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst))
87 cwnd_in_mtu = assoc->max_burst;
88 net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
89 }
90 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
91 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
92 /* In case of resource pooling initialize appropriately */
93 net->cwnd /= assoc->numnets;
94 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
95 net->cwnd = net->mtu - sizeof(struct sctphdr);
96 }
97 }
98 sctp_enforce_cwnd_limit(assoc, net);
99 net->ssthresh = assoc->peers_rwnd;
100 SDT_PROBE5(sctp, cwnd, net, init,
101 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
102 0, net->cwnd);
103 if (SCTP_BASE_SYSCTL(sctp_logging_level) &
104 (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
105 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
106 }
107 }
108
109 static void
sctp_cwnd_update_after_fr(struct sctp_tcb * stcb,struct sctp_association * asoc)110 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
111 struct sctp_association *asoc)
112 {
113 struct sctp_nets *net;
114 uint32_t t_ssthresh, t_cwnd;
115 uint64_t t_ucwnd_sbw;
116
117 /* MT FIXME: Don't compute this over and over again */
118 t_ssthresh = 0;
119 t_cwnd = 0;
120 t_ucwnd_sbw = 0;
121 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
122 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
123 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
124 t_ssthresh += net->ssthresh;
125 t_cwnd += net->cwnd;
126 if (net->lastsa > 0) {
127 t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)net->lastsa;
128 }
129 }
130 if (t_ucwnd_sbw == 0) {
131 t_ucwnd_sbw = 1;
132 }
133 }
134
135 /*-
136 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
137 * (net->fast_retran_loss_recovery == 0)))
138 */
139 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
140 if ((asoc->fast_retran_loss_recovery == 0) ||
141 (asoc->sctp_cmt_on_off > 0)) {
142 /* out of a RFC2582 Fast recovery window? */
143 if (net->net_ack > 0) {
144 /*
145 * per section 7.2.3, are there any
146 * destinations that had a fast retransmit
147 * to them. If so what we need to do is
148 * adjust ssthresh and cwnd.
149 */
150 struct sctp_tmit_chunk *lchk;
151 int old_cwnd = net->cwnd;
152
153 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
154 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
155 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) {
156 net->ssthresh = (uint32_t)(((uint64_t)4 *
157 (uint64_t)net->mtu *
158 (uint64_t)net->ssthresh) /
159 (uint64_t)t_ssthresh);
160 }
161 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) {
162 uint32_t srtt;
163
164 srtt = net->lastsa;
165 /*
166 * lastsa>>3; we don't need
167 * to devide ...
168 */
169 if (srtt == 0) {
170 srtt = 1;
171 }
172 /*
173 * Short Version => Equal to
174 * Contel Version MBe
175 */
176 net->ssthresh = (uint32_t)(((uint64_t)4 *
177 (uint64_t)net->mtu *
178 (uint64_t)net->cwnd) /
179 ((uint64_t)srtt *
180 t_ucwnd_sbw));
181 /* INCREASE FACTOR */ ;
182 }
183 if ((net->cwnd > t_cwnd / 2) &&
184 (net->ssthresh < net->cwnd - t_cwnd / 2)) {
185 net->ssthresh = net->cwnd - t_cwnd / 2;
186 }
187 if (net->ssthresh < net->mtu) {
188 net->ssthresh = net->mtu;
189 }
190 } else {
191 net->ssthresh = net->cwnd / 2;
192 if (net->ssthresh < (net->mtu * 2)) {
193 net->ssthresh = 2 * net->mtu;
194 }
195 }
196 net->cwnd = net->ssthresh;
197 sctp_enforce_cwnd_limit(asoc, net);
198 SDT_PROBE5(sctp, cwnd, net, fr,
199 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
200 old_cwnd, net->cwnd);
201 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
202 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
203 SCTP_CWND_LOG_FROM_FR);
204 }
205 lchk = TAILQ_FIRST(&asoc->send_queue);
206
207 net->partial_bytes_acked = 0;
208 /* Turn on fast recovery window */
209 asoc->fast_retran_loss_recovery = 1;
210 if (lchk == NULL) {
211 /* Mark end of the window */
212 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
213 } else {
214 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
215 }
216
217 /*
218 * CMT fast recovery -- per destination
219 * recovery variable.
220 */
221 net->fast_retran_loss_recovery = 1;
222
223 if (lchk == NULL) {
224 /* Mark end of the window */
225 net->fast_recovery_tsn = asoc->sending_seq - 1;
226 } else {
227 net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
228 }
229
230 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
231 stcb->sctp_ep, stcb, net,
232 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_1);
233 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
234 stcb->sctp_ep, stcb, net);
235 }
236 } else if (net->net_ack > 0) {
237 /*
238 * Mark a peg that we WOULD have done a cwnd
239 * reduction but RFC2582 prevented this action.
240 */
241 SCTP_STAT_INCR(sctps_fastretransinrtt);
242 }
243 }
244 }
245
246 /* Defines for instantaneous bw decisions */
247 #define SCTP_INST_LOOSING 1 /* Losing to other flows */
248 #define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */
249 #define SCTP_INST_GAINING 3 /* Gaining, step down possible */
250
251 static int
cc_bw_same(struct sctp_tcb * stcb,struct sctp_nets * net,uint64_t nbw,uint64_t rtt_offset,uint64_t vtag,uint8_t inst_ind)252 cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw,
253 uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind)
254 {
255 uint64_t oth, probepoint;
256
257 probepoint = (((uint64_t)net->cwnd) << 32);
258 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
259 /*
260 * rtt increased we don't update bw.. so we don't update the
261 * rtt either.
262 */
263 /* Probe point 5 */
264 probepoint |= ((5 << 16) | 1);
265 SDT_PROBE5(sctp, cwnd, net, rttvar,
266 vtag,
267 ((net->cc_mod.rtcc.lbw << 32) | nbw),
268 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
269 net->flight_size,
270 probepoint);
271 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
272 if (net->cc_mod.rtcc.last_step_state == 5)
273 net->cc_mod.rtcc.step_cnt++;
274 else
275 net->cc_mod.rtcc.step_cnt = 1;
276 net->cc_mod.rtcc.last_step_state = 5;
277 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
278 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
279 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
280 /* Try a step down */
281 oth = net->cc_mod.rtcc.vol_reduce;
282 oth <<= 16;
283 oth |= net->cc_mod.rtcc.step_cnt;
284 oth <<= 16;
285 oth |= net->cc_mod.rtcc.last_step_state;
286 SDT_PROBE5(sctp, cwnd, net, rttstep,
287 vtag,
288 ((net->cc_mod.rtcc.lbw << 32) | nbw),
289 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
290 oth,
291 probepoint);
292 if (net->cwnd > (4 * net->mtu)) {
293 net->cwnd -= net->mtu;
294 net->cc_mod.rtcc.vol_reduce++;
295 } else {
296 net->cc_mod.rtcc.step_cnt = 0;
297 }
298 }
299 }
300 return (1);
301 }
302 if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) {
303 /*
304 * rtt decreased, there could be more room. we update both
305 * the bw and the rtt here to lock this in as a good step
306 * down.
307 */
308 /* Probe point 6 */
309 probepoint |= ((6 << 16) | 0);
310 SDT_PROBE5(sctp, cwnd, net, rttvar,
311 vtag,
312 ((net->cc_mod.rtcc.lbw << 32) | nbw),
313 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
314 net->flight_size,
315 probepoint);
316 if (net->cc_mod.rtcc.steady_step) {
317 oth = net->cc_mod.rtcc.vol_reduce;
318 oth <<= 16;
319 oth |= net->cc_mod.rtcc.step_cnt;
320 oth <<= 16;
321 oth |= net->cc_mod.rtcc.last_step_state;
322 SDT_PROBE5(sctp, cwnd, net, rttstep,
323 vtag,
324 ((net->cc_mod.rtcc.lbw << 32) | nbw),
325 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
326 oth,
327 probepoint);
328 if ((net->cc_mod.rtcc.last_step_state == 5) &&
329 (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) {
330 /* Step down worked */
331 net->cc_mod.rtcc.step_cnt = 0;
332 return (1);
333 } else {
334 net->cc_mod.rtcc.last_step_state = 6;
335 net->cc_mod.rtcc.step_cnt = 0;
336 }
337 }
338 net->cc_mod.rtcc.lbw = nbw;
339 net->cc_mod.rtcc.lbw_rtt = net->rtt;
340 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
341 if (inst_ind == SCTP_INST_GAINING)
342 return (1);
343 else if (inst_ind == SCTP_INST_NEUTRAL)
344 return (1);
345 else
346 return (0);
347 }
348 /*
349 * Ok bw and rtt remained the same .. no update to any
350 */
351 /* Probe point 7 */
352 probepoint |= ((7 << 16) | net->cc_mod.rtcc.ret_from_eq);
353 SDT_PROBE5(sctp, cwnd, net, rttvar,
354 vtag,
355 ((net->cc_mod.rtcc.lbw << 32) | nbw),
356 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
357 net->flight_size,
358 probepoint);
359 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
360 if (net->cc_mod.rtcc.last_step_state == 5)
361 net->cc_mod.rtcc.step_cnt++;
362 else
363 net->cc_mod.rtcc.step_cnt = 1;
364 net->cc_mod.rtcc.last_step_state = 5;
365 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
366 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
367 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
368 /* Try a step down */
369 if (net->cwnd > (4 * net->mtu)) {
370 net->cwnd -= net->mtu;
371 net->cc_mod.rtcc.vol_reduce++;
372 return (1);
373 } else {
374 net->cc_mod.rtcc.step_cnt = 0;
375 }
376 }
377 }
378 if (inst_ind == SCTP_INST_GAINING)
379 return (1);
380 else if (inst_ind == SCTP_INST_NEUTRAL)
381 return (1);
382 else
383 return ((int)net->cc_mod.rtcc.ret_from_eq);
384 }
385
386 static int
cc_bw_decrease(struct sctp_tcb * stcb,struct sctp_nets * net,uint64_t nbw,uint64_t rtt_offset,uint64_t vtag,uint8_t inst_ind)387 cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
388 uint64_t vtag, uint8_t inst_ind)
389 {
390 uint64_t oth, probepoint;
391
392 /* Bandwidth decreased. */
393 probepoint = (((uint64_t)net->cwnd) << 32);
394 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
395 /* rtt increased */
396 /* Did we add more */
397 if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) &&
398 (inst_ind != SCTP_INST_LOOSING)) {
399 /* We caused it maybe.. back off? */
400 /* PROBE POINT 1 */
401 probepoint |= ((1 << 16) | 1);
402 SDT_PROBE5(sctp, cwnd, net, rttvar,
403 vtag,
404 ((net->cc_mod.rtcc.lbw << 32) | nbw),
405 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
406 net->flight_size,
407 probepoint);
408 if (net->cc_mod.rtcc.ret_from_eq) {
409 /*
410 * Switch over to CA if we are less
411 * aggressive
412 */
413 net->ssthresh = net->cwnd - 1;
414 net->partial_bytes_acked = 0;
415 }
416 return (1);
417 }
418 /* Probe point 2 */
419 probepoint |= ((2 << 16) | 0);
420 SDT_PROBE5(sctp, cwnd, net, rttvar,
421 vtag,
422 ((net->cc_mod.rtcc.lbw << 32) | nbw),
423 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
424 net->flight_size,
425 probepoint);
426 /* Someone else - fight for more? */
427 if (net->cc_mod.rtcc.steady_step) {
428 oth = net->cc_mod.rtcc.vol_reduce;
429 oth <<= 16;
430 oth |= net->cc_mod.rtcc.step_cnt;
431 oth <<= 16;
432 oth |= net->cc_mod.rtcc.last_step_state;
433 SDT_PROBE5(sctp, cwnd, net, rttstep,
434 vtag,
435 ((net->cc_mod.rtcc.lbw << 32) | nbw),
436 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
437 oth,
438 probepoint);
439 /*
440 * Did we voluntarily give up some? if so take one
441 * back please
442 */
443 if ((net->cc_mod.rtcc.vol_reduce) &&
444 (inst_ind != SCTP_INST_GAINING)) {
445 net->cwnd += net->mtu;
446 sctp_enforce_cwnd_limit(&stcb->asoc, net);
447 net->cc_mod.rtcc.vol_reduce--;
448 }
449 net->cc_mod.rtcc.last_step_state = 2;
450 net->cc_mod.rtcc.step_cnt = 0;
451 }
452 goto out_decision;
453 } else if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) {
454 /* bw & rtt decreased */
455 /* Probe point 3 */
456 probepoint |= ((3 << 16) | 0);
457 SDT_PROBE5(sctp, cwnd, net, rttvar,
458 vtag,
459 ((net->cc_mod.rtcc.lbw << 32) | nbw),
460 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
461 net->flight_size,
462 probepoint);
463 if (net->cc_mod.rtcc.steady_step) {
464 oth = net->cc_mod.rtcc.vol_reduce;
465 oth <<= 16;
466 oth |= net->cc_mod.rtcc.step_cnt;
467 oth <<= 16;
468 oth |= net->cc_mod.rtcc.last_step_state;
469 SDT_PROBE5(sctp, cwnd, net, rttstep,
470 vtag,
471 ((net->cc_mod.rtcc.lbw << 32) | nbw),
472 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
473 oth,
474 probepoint);
475 if ((net->cc_mod.rtcc.vol_reduce) &&
476 (inst_ind != SCTP_INST_GAINING)) {
477 net->cwnd += net->mtu;
478 sctp_enforce_cwnd_limit(&stcb->asoc, net);
479 net->cc_mod.rtcc.vol_reduce--;
480 }
481 net->cc_mod.rtcc.last_step_state = 3;
482 net->cc_mod.rtcc.step_cnt = 0;
483 }
484 goto out_decision;
485 }
486 /* The bw decreased but rtt stayed the same */
487 /* Probe point 4 */
488 probepoint |= ((4 << 16) | 0);
489 SDT_PROBE5(sctp, cwnd, net, rttvar,
490 vtag,
491 ((net->cc_mod.rtcc.lbw << 32) | nbw),
492 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
493 net->flight_size,
494 probepoint);
495 if (net->cc_mod.rtcc.steady_step) {
496 oth = net->cc_mod.rtcc.vol_reduce;
497 oth <<= 16;
498 oth |= net->cc_mod.rtcc.step_cnt;
499 oth <<= 16;
500 oth |= net->cc_mod.rtcc.last_step_state;
501 SDT_PROBE5(sctp, cwnd, net, rttstep,
502 vtag,
503 ((net->cc_mod.rtcc.lbw << 32) | nbw),
504 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
505 oth,
506 probepoint);
507 if ((net->cc_mod.rtcc.vol_reduce) &&
508 (inst_ind != SCTP_INST_GAINING)) {
509 net->cwnd += net->mtu;
510 sctp_enforce_cwnd_limit(&stcb->asoc, net);
511 net->cc_mod.rtcc.vol_reduce--;
512 }
513 net->cc_mod.rtcc.last_step_state = 4;
514 net->cc_mod.rtcc.step_cnt = 0;
515 }
516 out_decision:
517 net->cc_mod.rtcc.lbw = nbw;
518 net->cc_mod.rtcc.lbw_rtt = net->rtt;
519 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
520 if (inst_ind == SCTP_INST_GAINING) {
521 return (1);
522 } else {
523 return (0);
524 }
525 }
526
527 static int
cc_bw_increase(struct sctp_tcb * stcb,struct sctp_nets * net,uint64_t nbw,uint64_t vtag)528 cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag)
529 {
530 uint64_t oth, probepoint;
531
532 /*
533 * BW increased, so update and return 0, since all actions in our
534 * table say to do the normal CC update. Note that we pay no
535 * attention to the inst_ind since our overall sum is increasing.
536 */
537 /* PROBE POINT 0 */
538 probepoint = (((uint64_t)net->cwnd) << 32);
539 SDT_PROBE5(sctp, cwnd, net, rttvar,
540 vtag,
541 ((net->cc_mod.rtcc.lbw << 32) | nbw),
542 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
543 net->flight_size,
544 probepoint);
545 if (net->cc_mod.rtcc.steady_step) {
546 oth = net->cc_mod.rtcc.vol_reduce;
547 oth <<= 16;
548 oth |= net->cc_mod.rtcc.step_cnt;
549 oth <<= 16;
550 oth |= net->cc_mod.rtcc.last_step_state;
551 SDT_PROBE5(sctp, cwnd, net, rttstep,
552 vtag,
553 ((net->cc_mod.rtcc.lbw << 32) | nbw),
554 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
555 oth,
556 probepoint);
557 net->cc_mod.rtcc.last_step_state = 0;
558 net->cc_mod.rtcc.step_cnt = 0;
559 net->cc_mod.rtcc.vol_reduce = 0;
560 }
561 net->cc_mod.rtcc.lbw = nbw;
562 net->cc_mod.rtcc.lbw_rtt = net->rtt;
563 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
564 return (0);
565 }
566
567 /* RTCC Algorithm to limit growth of cwnd, return
568 * true if you want to NOT allow cwnd growth
569 */
570 static int
cc_bw_limit(struct sctp_tcb * stcb,struct sctp_nets * net,uint64_t nbw)571 cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw)
572 {
573 uint64_t bw_offset, rtt_offset;
574 uint64_t probepoint, rtt, vtag;
575 uint64_t bytes_for_this_rtt, inst_bw;
576 uint64_t div, inst_off;
577 int bw_shift;
578 uint8_t inst_ind;
579 int ret;
580
581 /*-
582 * Here we need to see if we want
583 * to limit cwnd growth due to increase
584 * in overall rtt but no increase in bw.
585 * We use the following table to figure
586 * out what we should do. When we return
587 * 0, cc update goes on as planned. If we
588 * return 1, then no cc update happens and cwnd
589 * stays where it is at.
590 * ----------------------------------
591 * BW | RTT | Action
592 * *********************************
593 * INC | INC | return 0
594 * ----------------------------------
595 * INC | SAME | return 0
596 * ----------------------------------
597 * INC | DECR | return 0
598 * ----------------------------------
599 * SAME | INC | return 1
600 * ----------------------------------
601 * SAME | SAME | return 1
602 * ----------------------------------
603 * SAME | DECR | return 0
604 * ----------------------------------
605 * DECR | INC | return 0 or 1 based on if we caused.
606 * ----------------------------------
607 * DECR | SAME | return 0
608 * ----------------------------------
609 * DECR | DECR | return 0
610 * ----------------------------------
611 *
612 * We are a bit fuzz on what an increase or
613 * decrease is. For BW it is the same if
614 * it did not change within 1/64th. For
615 * RTT it stayed the same if it did not
616 * change within 1/32nd
617 */
618 bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw);
619 rtt = stcb->asoc.my_vtag;
620 vtag = (rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport);
621 probepoint = (((uint64_t)net->cwnd) << 32);
622 rtt = net->rtt;
623 if (net->cc_mod.rtcc.rtt_set_this_sack) {
624 net->cc_mod.rtcc.rtt_set_this_sack = 0;
625 bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc;
626 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
627 if (net->rtt) {
628 div = net->rtt / 1000;
629 if (div) {
630 inst_bw = bytes_for_this_rtt / div;
631 inst_off = inst_bw >> bw_shift;
632 if (inst_bw > nbw)
633 inst_ind = SCTP_INST_GAINING;
634 else if ((inst_bw + inst_off) < nbw)
635 inst_ind = SCTP_INST_LOOSING;
636 else
637 inst_ind = SCTP_INST_NEUTRAL;
638 probepoint |= ((0xb << 16) | inst_ind);
639 } else {
640 inst_ind = net->cc_mod.rtcc.last_inst_ind;
641 inst_bw = bytes_for_this_rtt / (uint64_t)(net->rtt);
642 /* Can't determine do not change */
643 probepoint |= ((0xc << 16) | inst_ind);
644 }
645 } else {
646 inst_ind = net->cc_mod.rtcc.last_inst_ind;
647 inst_bw = bytes_for_this_rtt;
648 /* Can't determine do not change */
649 probepoint |= ((0xd << 16) | inst_ind);
650 }
651 SDT_PROBE5(sctp, cwnd, net, rttvar,
652 vtag,
653 ((nbw << 32) | inst_bw),
654 ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt),
655 net->flight_size,
656 probepoint);
657 } else {
658 /* No rtt measurement, use last one */
659 inst_ind = net->cc_mod.rtcc.last_inst_ind;
660 }
661 bw_offset = net->cc_mod.rtcc.lbw >> bw_shift;
662 if (nbw > net->cc_mod.rtcc.lbw + bw_offset) {
663 ret = cc_bw_increase(stcb, net, nbw, vtag);
664 goto out;
665 }
666 rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt);
667 if (nbw < net->cc_mod.rtcc.lbw - bw_offset) {
668 ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind);
669 goto out;
670 }
671 /*
672 * If we reach here then we are in a situation where the bw stayed
673 * the same.
674 */
675 ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind);
676 out:
677 net->cc_mod.rtcc.last_inst_ind = inst_ind;
678 return (ret);
679 }
680
681 static void
sctp_cwnd_update_after_sack_common(struct sctp_tcb * stcb,struct sctp_association * asoc,int accum_moved,int reneged_all SCTP_UNUSED,int will_exit,int use_rtcc)682 sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb,
683 struct sctp_association *asoc,
684 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc)
685 {
686 struct sctp_nets *net;
687 int old_cwnd;
688 uint32_t t_ssthresh, t_cwnd, incr;
689 uint64_t t_ucwnd_sbw;
690 uint64_t t_path_mptcp;
691 uint64_t mptcp_like_alpha;
692 uint32_t srtt;
693 uint64_t max_path;
694
695 /* MT FIXME: Don't compute this over and over again */
696 t_ssthresh = 0;
697 t_cwnd = 0;
698 t_ucwnd_sbw = 0;
699 t_path_mptcp = 0;
700 mptcp_like_alpha = 1;
701 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
702 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) ||
703 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) {
704 max_path = 0;
705 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
706 t_ssthresh += net->ssthresh;
707 t_cwnd += net->cwnd;
708 /* lastsa>>3; we don't need to devide ... */
709 srtt = net->lastsa;
710 if (srtt > 0) {
711 uint64_t tmp;
712
713 t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)srtt;
714 t_path_mptcp += (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_Z) /
715 (((uint64_t)net->mtu) * (uint64_t)srtt);
716 tmp = (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_N) /
717 ((uint64_t)net->mtu * (uint64_t)(srtt * srtt));
718 if (tmp > max_path) {
719 max_path = tmp;
720 }
721 }
722 }
723 if (t_path_mptcp > 0) {
724 mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp);
725 } else {
726 mptcp_like_alpha = 1;
727 }
728 }
729 if (t_ssthresh == 0) {
730 t_ssthresh = 1;
731 }
732 if (t_ucwnd_sbw == 0) {
733 t_ucwnd_sbw = 1;
734 }
735 /******************************/
736 /* update cwnd and Early FR */
737 /******************************/
738 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
739 #ifdef JANA_CMT_FAST_RECOVERY
740 /*
741 * CMT fast recovery code. Need to debug.
742 */
743 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
744 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
745 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
746 net->will_exit_fast_recovery = 1;
747 }
748 }
749 #endif
750 /* if nothing was acked on this destination skip it */
751 if (net->net_ack == 0) {
752 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
753 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
754 }
755 continue;
756 }
757 #ifdef JANA_CMT_FAST_RECOVERY
758 /*
759 * CMT fast recovery code
760 */
761 /*
762 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
763 * && net->will_exit_fast_recovery == 0) { @@@ Do something
764 * } else if (sctp_cmt_on_off == 0 &&
765 * asoc->fast_retran_loss_recovery && will_exit == 0) {
766 */
767 #endif
768
769 if (asoc->fast_retran_loss_recovery &&
770 (will_exit == 0) &&
771 (asoc->sctp_cmt_on_off == 0)) {
772 /*
773 * If we are in loss recovery we skip any cwnd
774 * update
775 */
776 return;
777 }
778 /*
779 * Did any measurements go on for this network?
780 */
781 if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) {
782 uint64_t nbw;
783
784 /*
785 * At this point our bw_bytes has been updated by
786 * incoming sack information.
787 *
788 * But our bw may not yet be set.
789 *
790 */
791 if ((net->cc_mod.rtcc.new_tot_time / 1000) > 0) {
792 nbw = net->cc_mod.rtcc.bw_bytes / (net->cc_mod.rtcc.new_tot_time / 1000);
793 } else {
794 nbw = net->cc_mod.rtcc.bw_bytes;
795 }
796 if (net->cc_mod.rtcc.lbw) {
797 if (cc_bw_limit(stcb, net, nbw)) {
798 /* Hold here, no update */
799 continue;
800 }
801 } else {
802 uint64_t vtag, probepoint;
803
804 probepoint = (((uint64_t)net->cwnd) << 32);
805 probepoint |= ((0xa << 16) | 0);
806 vtag = (net->rtt << 32) |
807 (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
808 (stcb->rport);
809
810 SDT_PROBE5(sctp, cwnd, net, rttvar,
811 vtag,
812 nbw,
813 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
814 net->flight_size,
815 probepoint);
816 net->cc_mod.rtcc.lbw = nbw;
817 net->cc_mod.rtcc.lbw_rtt = net->rtt;
818 if (net->cc_mod.rtcc.rtt_set_this_sack) {
819 net->cc_mod.rtcc.rtt_set_this_sack = 0;
820 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
821 }
822 }
823 }
824 /*
825 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
826 * moved.
827 */
828 if (accum_moved ||
829 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
830 /* If the cumulative ack moved we can proceed */
831 if (net->cwnd <= net->ssthresh) {
832 /* We are in slow start */
833 if (net->flight_size + net->net_ack >= net->cwnd) {
834 uint32_t limit;
835
836 old_cwnd = net->cwnd;
837 switch (asoc->sctp_cmt_on_off) {
838 case SCTP_CMT_RPV1:
839 limit = (uint32_t)(((uint64_t)net->mtu *
840 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
841 (uint64_t)net->ssthresh) /
842 (uint64_t)t_ssthresh);
843 incr = (uint32_t)(((uint64_t)net->net_ack *
844 (uint64_t)net->ssthresh) /
845 (uint64_t)t_ssthresh);
846 if (incr > limit) {
847 incr = limit;
848 }
849 if (incr == 0) {
850 incr = 1;
851 }
852 break;
853 case SCTP_CMT_RPV2:
854 /*
855 * lastsa>>3; we don't need
856 * to divide ...
857 */
858 srtt = net->lastsa;
859 if (srtt == 0) {
860 srtt = 1;
861 }
862 limit = (uint32_t)(((uint64_t)net->mtu *
863 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
864 (uint64_t)net->cwnd) /
865 ((uint64_t)srtt * t_ucwnd_sbw));
866 /* INCREASE FACTOR */
867 incr = (uint32_t)(((uint64_t)net->net_ack *
868 (uint64_t)net->cwnd) /
869 ((uint64_t)srtt * t_ucwnd_sbw));
870 /* INCREASE FACTOR */
871 if (incr > limit) {
872 incr = limit;
873 }
874 if (incr == 0) {
875 incr = 1;
876 }
877 break;
878 case SCTP_CMT_MPTCP:
879 limit = (uint32_t)(((uint64_t)net->mtu *
880 mptcp_like_alpha *
881 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >>
882 SHIFT_MPTCP_MULTI);
883 incr = (uint32_t)(((uint64_t)net->net_ack *
884 mptcp_like_alpha) >>
885 SHIFT_MPTCP_MULTI);
886 if (incr > limit) {
887 incr = limit;
888 }
889 if (incr > net->net_ack) {
890 incr = net->net_ack;
891 }
892 if (incr > net->mtu) {
893 incr = net->mtu;
894 }
895 break;
896 default:
897 incr = net->net_ack;
898 if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) {
899 incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable);
900 }
901 break;
902 }
903 net->cwnd += incr;
904 sctp_enforce_cwnd_limit(asoc, net);
905 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
906 sctp_log_cwnd(stcb, net, incr,
907 SCTP_CWND_LOG_FROM_SS);
908 }
909 SDT_PROBE5(sctp, cwnd, net, ack,
910 stcb->asoc.my_vtag,
911 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
912 net,
913 old_cwnd, net->cwnd);
914 } else {
915 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
916 sctp_log_cwnd(stcb, net, net->net_ack,
917 SCTP_CWND_LOG_NOADV_SS);
918 }
919 }
920 } else {
921 /* We are in congestion avoidance */
922 /*
923 * Add to pba
924 */
925 net->partial_bytes_acked += net->net_ack;
926
927 if ((net->flight_size + net->net_ack >= net->cwnd) &&
928 (net->partial_bytes_acked >= net->cwnd)) {
929 net->partial_bytes_acked -= net->cwnd;
930 old_cwnd = net->cwnd;
931 switch (asoc->sctp_cmt_on_off) {
932 case SCTP_CMT_RPV1:
933 incr = (uint32_t)(((uint64_t)net->mtu *
934 (uint64_t)net->ssthresh) /
935 (uint64_t)t_ssthresh);
936 if (incr == 0) {
937 incr = 1;
938 }
939 break;
940 case SCTP_CMT_RPV2:
941 /*
942 * lastsa>>3; we don't need
943 * to divide ...
944 */
945 srtt = net->lastsa;
946 if (srtt == 0) {
947 srtt = 1;
948 }
949 incr = (uint32_t)((uint64_t)net->mtu *
950 (uint64_t)net->cwnd /
951 ((uint64_t)srtt *
952 t_ucwnd_sbw));
953 /* INCREASE FACTOR */
954 if (incr == 0) {
955 incr = 1;
956 }
957 break;
958 case SCTP_CMT_MPTCP:
959 incr = (uint32_t)((mptcp_like_alpha *
960 (uint64_t)net->cwnd) >>
961 SHIFT_MPTCP_MULTI);
962 if (incr > net->mtu) {
963 incr = net->mtu;
964 }
965 break;
966 default:
967 incr = net->mtu;
968 break;
969 }
970 net->cwnd += incr;
971 sctp_enforce_cwnd_limit(asoc, net);
972 SDT_PROBE5(sctp, cwnd, net, ack,
973 stcb->asoc.my_vtag,
974 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
975 net,
976 old_cwnd, net->cwnd);
977 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
978 sctp_log_cwnd(stcb, net, net->mtu,
979 SCTP_CWND_LOG_FROM_CA);
980 }
981 } else {
982 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
983 sctp_log_cwnd(stcb, net, net->net_ack,
984 SCTP_CWND_LOG_NOADV_CA);
985 }
986 }
987 }
988 } else {
989 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
990 sctp_log_cwnd(stcb, net, net->mtu,
991 SCTP_CWND_LOG_NO_CUMACK);
992 }
993 }
994 }
995 }
996
997 static void
sctp_cwnd_update_exit_pf_common(struct sctp_tcb * stcb,struct sctp_nets * net)998 sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net)
999 {
1000 int old_cwnd;
1001
1002 old_cwnd = net->cwnd;
1003 net->cwnd = net->mtu;
1004 SDT_PROBE5(sctp, cwnd, net, ack,
1005 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
1006 old_cwnd, net->cwnd);
1007 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
1008 (void *)net, net->cwnd);
1009 }
1010
1011 static void
sctp_cwnd_update_after_timeout(struct sctp_tcb * stcb,struct sctp_nets * net)1012 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
1013 {
1014 int old_cwnd = net->cwnd;
1015 uint32_t t_ssthresh, t_cwnd;
1016 uint64_t t_ucwnd_sbw;
1017
1018 /* MT FIXME: Don't compute this over and over again */
1019 t_ssthresh = 0;
1020 t_cwnd = 0;
1021 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
1022 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
1023 struct sctp_nets *lnet;
1024 uint32_t srtt;
1025
1026 t_ucwnd_sbw = 0;
1027 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1028 t_ssthresh += lnet->ssthresh;
1029 t_cwnd += lnet->cwnd;
1030 srtt = lnet->lastsa;
1031 /* lastsa>>3; we don't need to divide ... */
1032 if (srtt > 0) {
1033 t_ucwnd_sbw += (uint64_t)lnet->cwnd / (uint64_t)srtt;
1034 }
1035 }
1036 if (t_ssthresh < 1) {
1037 t_ssthresh = 1;
1038 }
1039 if (t_ucwnd_sbw < 1) {
1040 t_ucwnd_sbw = 1;
1041 }
1042 if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) {
1043 net->ssthresh = (uint32_t)(((uint64_t)4 *
1044 (uint64_t)net->mtu *
1045 (uint64_t)net->ssthresh) /
1046 (uint64_t)t_ssthresh);
1047 } else {
1048 uint64_t cc_delta;
1049
1050 srtt = net->lastsa;
1051 /* lastsa>>3; we don't need to divide ... */
1052 if (srtt == 0) {
1053 srtt = 1;
1054 }
1055 cc_delta = t_ucwnd_sbw * (uint64_t)srtt / 2;
1056 if (cc_delta < t_cwnd) {
1057 net->ssthresh = (uint32_t)((uint64_t)t_cwnd - cc_delta);
1058 } else {
1059 net->ssthresh = net->mtu;
1060 }
1061 }
1062 if ((net->cwnd > t_cwnd / 2) &&
1063 (net->ssthresh < net->cwnd - t_cwnd / 2)) {
1064 net->ssthresh = net->cwnd - t_cwnd / 2;
1065 }
1066 if (net->ssthresh < net->mtu) {
1067 net->ssthresh = net->mtu;
1068 }
1069 } else {
1070 net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
1071 }
1072 net->cwnd = net->mtu;
1073 net->partial_bytes_acked = 0;
1074 SDT_PROBE5(sctp, cwnd, net, to,
1075 stcb->asoc.my_vtag,
1076 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1077 net,
1078 old_cwnd, net->cwnd);
1079 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1080 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
1081 }
1082 }
1083
1084 static void
sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb * stcb,struct sctp_nets * net,int in_window,int num_pkt_lost,int use_rtcc)1085 sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net,
1086 int in_window, int num_pkt_lost, int use_rtcc)
1087 {
1088 int old_cwnd = net->cwnd;
1089
1090 if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) {
1091 /* Data center Congestion Control */
1092 if (in_window == 0) {
1093 /*
1094 * Go to CA with the cwnd at the point we sent the
1095 * TSN that was marked with a CE.
1096 */
1097 if (net->ecn_prev_cwnd < net->cwnd) {
1098 /* Restore to prev cwnd */
1099 net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost);
1100 } else {
1101 /* Just cut in 1/2 */
1102 net->cwnd /= 2;
1103 }
1104 /* Drop to CA */
1105 net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu);
1106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1107 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1108 }
1109 } else {
1110 /*
1111 * Further tuning down required over the drastic
1112 * original cut
1113 */
1114 net->ssthresh -= (net->mtu * num_pkt_lost);
1115 net->cwnd -= (net->mtu * num_pkt_lost);
1116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1117 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1118 }
1119 }
1120 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
1121 } else {
1122 if (in_window == 0) {
1123 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
1124 net->ssthresh = net->cwnd / 2;
1125 if (net->ssthresh < net->mtu) {
1126 net->ssthresh = net->mtu;
1127 /*
1128 * here back off the timer as well, to slow
1129 * us down
1130 */
1131 net->RTO <<= 1;
1132 }
1133 net->cwnd = net->ssthresh;
1134 SDT_PROBE5(sctp, cwnd, net, ecn,
1135 stcb->asoc.my_vtag,
1136 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1137 net,
1138 old_cwnd, net->cwnd);
1139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1140 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1141 }
1142 }
1143 }
1144
1145 }
1146
1147 static void
sctp_cwnd_update_after_packet_dropped(struct sctp_tcb * stcb,struct sctp_nets * net,struct sctp_pktdrop_chunk * cp,uint32_t * bottle_bw,uint32_t * on_queue)1148 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
1149 struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
1150 uint32_t *bottle_bw, uint32_t *on_queue)
1151 {
1152 uint32_t bw_avail;
1153 unsigned int incr;
1154 int old_cwnd = net->cwnd;
1155
1156 /* get bottle neck bw */
1157 *bottle_bw = ntohl(cp->bottle_bw);
1158 /* and whats on queue */
1159 *on_queue = ntohl(cp->current_onq);
1160 /*
1161 * adjust the on-queue if our flight is more it could be that the
1162 * router has not yet gotten data "in-flight" to it
1163 */
1164 if (*on_queue < net->flight_size) {
1165 *on_queue = net->flight_size;
1166 }
1167 /* rtt is measured in micro seconds, bottle_bw in bytes per second */
1168 bw_avail = (uint32_t)(((uint64_t)(*bottle_bw) * net->rtt) / (uint64_t)1000000);
1169 if (bw_avail > *bottle_bw) {
1170 /*
1171 * Cap the growth to no more than the bottle neck. This can
1172 * happen as RTT slides up due to queues. It also means if
1173 * you have more than a 1 second RTT with a empty queue you
1174 * will be limited to the bottle_bw per second no matter if
1175 * other points have 1/2 the RTT and you could get more
1176 * out...
1177 */
1178 bw_avail = *bottle_bw;
1179 }
1180 if (*on_queue > bw_avail) {
1181 /*
1182 * No room for anything else don't allow anything else to be
1183 * "added to the fire".
1184 */
1185 int seg_inflight, seg_onqueue, my_portion;
1186
1187 net->partial_bytes_acked = 0;
1188 /* how much are we over queue size? */
1189 incr = *on_queue - bw_avail;
1190 if (stcb->asoc.seen_a_sack_this_pkt) {
1191 /*
1192 * undo any cwnd adjustment that the sack might have
1193 * made
1194 */
1195 net->cwnd = net->prev_cwnd;
1196 }
1197 /* Now how much of that is mine? */
1198 seg_inflight = net->flight_size / net->mtu;
1199 seg_onqueue = *on_queue / net->mtu;
1200 my_portion = (incr * seg_inflight) / seg_onqueue;
1201
1202 /* Have I made an adjustment already */
1203 if (net->cwnd > net->flight_size) {
1204 /*
1205 * for this flight I made an adjustment we need to
1206 * decrease the portion by a share our previous
1207 * adjustment.
1208 */
1209 int diff_adj;
1210
1211 diff_adj = net->cwnd - net->flight_size;
1212 if (diff_adj > my_portion)
1213 my_portion = 0;
1214 else
1215 my_portion -= diff_adj;
1216 }
1217 /*
1218 * back down to the previous cwnd (assume we have had a sack
1219 * before this packet). minus what ever portion of the
1220 * overage is my fault.
1221 */
1222 net->cwnd -= my_portion;
1223
1224 /* we will NOT back down more than 1 MTU */
1225 if (net->cwnd <= net->mtu) {
1226 net->cwnd = net->mtu;
1227 }
1228 /* force into CA */
1229 net->ssthresh = net->cwnd - 1;
1230 } else {
1231 /*
1232 * Take 1/4 of the space left or max burst up .. whichever
1233 * is less.
1234 */
1235 incr = (bw_avail - *on_queue) >> 2;
1236 if ((stcb->asoc.max_burst > 0) &&
1237 (stcb->asoc.max_burst * net->mtu < incr)) {
1238 incr = stcb->asoc.max_burst * net->mtu;
1239 }
1240 net->cwnd += incr;
1241 }
1242 if (net->cwnd > bw_avail) {
1243 /* We can't exceed the pipe size */
1244 net->cwnd = bw_avail;
1245 }
1246 if (net->cwnd < net->mtu) {
1247 /* We always have 1 MTU */
1248 net->cwnd = net->mtu;
1249 }
1250 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1251 if (net->cwnd - old_cwnd != 0) {
1252 /* log only changes */
1253 SDT_PROBE5(sctp, cwnd, net, pd,
1254 stcb->asoc.my_vtag,
1255 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1256 net,
1257 old_cwnd, net->cwnd);
1258 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1259 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
1260 SCTP_CWND_LOG_FROM_SAT);
1261 }
1262 }
1263 }
1264
1265 static void
sctp_cwnd_update_after_output(struct sctp_tcb * stcb,struct sctp_nets * net,int burst_limit)1266 sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
1267 struct sctp_nets *net, int burst_limit)
1268 {
1269 int old_cwnd = net->cwnd;
1270
1271 if (net->ssthresh < net->cwnd)
1272 net->ssthresh = net->cwnd;
1273 if (burst_limit) {
1274 net->cwnd = (net->flight_size + (burst_limit * net->mtu));
1275 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1276 SDT_PROBE5(sctp, cwnd, net, bl,
1277 stcb->asoc.my_vtag,
1278 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1279 net,
1280 old_cwnd, net->cwnd);
1281 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1282 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
1283 }
1284 }
1285 }
1286
1287 static void
sctp_cwnd_update_after_sack(struct sctp_tcb * stcb,struct sctp_association * asoc,int accum_moved,int reneged_all,int will_exit)1288 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
1289 struct sctp_association *asoc,
1290 int accum_moved, int reneged_all, int will_exit)
1291 {
1292 /* Passing a zero argument in last disables the rtcc algorithm */
1293 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0);
1294 }
1295
1296 static void
sctp_cwnd_update_after_ecn_echo(struct sctp_tcb * stcb,struct sctp_nets * net,int in_window,int num_pkt_lost)1297 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
1298 int in_window, int num_pkt_lost)
1299 {
1300 /* Passing a zero argument in last disables the rtcc algorithm */
1301 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0);
1302 }
1303
1304 /* Here starts the RTCCVAR type CC invented by RRS which
1305 * is a slight mod to RFC2581. We reuse a common routine or
1306 * two since these algorithms are so close and need to
1307 * remain the same.
1308 */
1309 static void
sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb * stcb,struct sctp_nets * net,int in_window,int num_pkt_lost)1310 sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
1311 int in_window, int num_pkt_lost)
1312 {
1313 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1);
1314 }
1315
1316 static void
sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets * net,struct sctp_tmit_chunk * tp1)1317 sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net,
1318 struct sctp_tmit_chunk *tp1)
1319 {
1320 net->cc_mod.rtcc.bw_bytes += tp1->send_size;
1321 }
1322
1323 static void
sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb * stcb SCTP_UNUSED,struct sctp_nets * net)1324 sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED,
1325 struct sctp_nets *net)
1326 {
1327 if (net->cc_mod.rtcc.tls_needs_set > 0) {
1328 /* We had a bw measurment going on */
1329 struct timeval ltls;
1330
1331 SCTP_GETPTIME_TIMEVAL(<ls);
1332 timevalsub(<ls, &net->cc_mod.rtcc.tls);
1333 net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec;
1334 }
1335 }
1336
1337 static void
sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb * stcb,struct sctp_nets * net)1338 sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb,
1339 struct sctp_nets *net)
1340 {
1341 uint64_t vtag, probepoint;
1342
1343 if (net->cc_mod.rtcc.lbw) {
1344 /* Clear the old bw.. we went to 0 in-flight */
1345 vtag = (net->rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
1346 (stcb->rport);
1347 probepoint = (((uint64_t)net->cwnd) << 32);
1348 /* Probe point 8 */
1349 probepoint |= ((8 << 16) | 0);
1350 SDT_PROBE5(sctp, cwnd, net, rttvar,
1351 vtag,
1352 ((net->cc_mod.rtcc.lbw << 32) | 0),
1353 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
1354 net->flight_size,
1355 probepoint);
1356 net->cc_mod.rtcc.lbw_rtt = 0;
1357 net->cc_mod.rtcc.cwnd_at_bw_set = 0;
1358 net->cc_mod.rtcc.lbw = 0;
1359 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
1360 net->cc_mod.rtcc.vol_reduce = 0;
1361 net->cc_mod.rtcc.bw_tot_time = 0;
1362 net->cc_mod.rtcc.bw_bytes = 0;
1363 net->cc_mod.rtcc.tls_needs_set = 0;
1364 if (net->cc_mod.rtcc.steady_step) {
1365 net->cc_mod.rtcc.vol_reduce = 0;
1366 net->cc_mod.rtcc.step_cnt = 0;
1367 net->cc_mod.rtcc.last_step_state = 0;
1368 }
1369 if (net->cc_mod.rtcc.ret_from_eq) {
1370 /* less aggressive one - reset cwnd too */
1371 uint32_t cwnd_in_mtu, cwnd;
1372
1373 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
1374 if (cwnd_in_mtu == 0) {
1375 /*
1376 * Using 0 means that the value of RFC 4960
1377 * is used.
1378 */
1379 cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
1380 } else {
1381 /*
1382 * We take the minimum of the burst limit
1383 * and the initial congestion window.
1384 */
1385 if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst))
1386 cwnd_in_mtu = stcb->asoc.max_burst;
1387 cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
1388 }
1389 if (net->cwnd > cwnd) {
1390 /*
1391 * Only set if we are not a timeout (i.e.
1392 * down to 1 mtu)
1393 */
1394 net->cwnd = cwnd;
1395 }
1396 }
1397 }
1398 }
1399
1400 static void
sctp_set_rtcc_initial_cc_param(struct sctp_tcb * stcb,struct sctp_nets * net)1401 sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb,
1402 struct sctp_nets *net)
1403 {
1404 uint64_t vtag, probepoint;
1405
1406 sctp_set_initial_cc_param(stcb, net);
1407 stcb->asoc.use_precise_time = 1;
1408 probepoint = (((uint64_t)net->cwnd) << 32);
1409 probepoint |= ((9 << 16) | 0);
1410 vtag = (net->rtt << 32) |
1411 (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
1412 (stcb->rport);
1413 SDT_PROBE5(sctp, cwnd, net, rttvar,
1414 vtag,
1415 0,
1416 0,
1417 0,
1418 probepoint);
1419 net->cc_mod.rtcc.lbw_rtt = 0;
1420 net->cc_mod.rtcc.cwnd_at_bw_set = 0;
1421 net->cc_mod.rtcc.vol_reduce = 0;
1422 net->cc_mod.rtcc.lbw = 0;
1423 net->cc_mod.rtcc.vol_reduce = 0;
1424 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
1425 net->cc_mod.rtcc.bw_tot_time = 0;
1426 net->cc_mod.rtcc.bw_bytes = 0;
1427 net->cc_mod.rtcc.tls_needs_set = 0;
1428 net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret);
1429 net->cc_mod.rtcc.steady_step = SCTP_BASE_SYSCTL(sctp_steady_step);
1430 net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn);
1431 net->cc_mod.rtcc.step_cnt = 0;
1432 net->cc_mod.rtcc.last_step_state = 0;
1433 }
1434
1435 static int
sctp_cwnd_rtcc_socket_option(struct sctp_tcb * stcb,int setorget,struct sctp_cc_option * cc_opt)1436 sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget,
1437 struct sctp_cc_option *cc_opt)
1438 {
1439 struct sctp_nets *net;
1440
1441 if (setorget == 1) {
1442 /* a set */
1443 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
1444 if ((cc_opt->aid_value.assoc_value != 0) &&
1445 (cc_opt->aid_value.assoc_value != 1)) {
1446 return (EINVAL);
1447 }
1448 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1449 net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value;
1450 }
1451 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
1452 if ((cc_opt->aid_value.assoc_value != 0) &&
1453 (cc_opt->aid_value.assoc_value != 1)) {
1454 return (EINVAL);
1455 }
1456 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1457 net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value;
1458 }
1459 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
1460 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1461 net->cc_mod.rtcc.steady_step = cc_opt->aid_value.assoc_value;
1462 }
1463 } else {
1464 return (EINVAL);
1465 }
1466 } else {
1467 /* a get */
1468 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
1469 net = TAILQ_FIRST(&stcb->asoc.nets);
1470 if (net == NULL) {
1471 return (EFAULT);
1472 }
1473 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq;
1474 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
1475 net = TAILQ_FIRST(&stcb->asoc.nets);
1476 if (net == NULL) {
1477 return (EFAULT);
1478 }
1479 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn;
1480 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
1481 net = TAILQ_FIRST(&stcb->asoc.nets);
1482 if (net == NULL) {
1483 return (EFAULT);
1484 }
1485 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.steady_step;
1486 } else {
1487 return (EINVAL);
1488 }
1489 }
1490 return (0);
1491 }
1492
1493 static void
sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb * stcb SCTP_UNUSED,struct sctp_nets * net)1494 sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED,
1495 struct sctp_nets *net)
1496 {
1497 if (net->cc_mod.rtcc.tls_needs_set == 0) {
1498 SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls);
1499 net->cc_mod.rtcc.tls_needs_set = 2;
1500 }
1501 }
1502
1503 static void
sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb * stcb,struct sctp_association * asoc,int accum_moved,int reneged_all,int will_exit)1504 sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb,
1505 struct sctp_association *asoc,
1506 int accum_moved, int reneged_all, int will_exit)
1507 {
1508 /* Passing a one argument at the last enables the rtcc algorithm */
1509 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1);
1510 }
1511
1512 static void
sctp_rtt_rtcc_calculated(struct sctp_tcb * stcb SCTP_UNUSED,struct sctp_nets * net,struct timeval * now SCTP_UNUSED)1513 sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED,
1514 struct sctp_nets *net,
1515 struct timeval *now SCTP_UNUSED)
1516 {
1517 net->cc_mod.rtcc.rtt_set_this_sack = 1;
1518 }
1519
1520 /* Here starts Sally Floyds HS-TCP */
1521
1522 struct sctp_hs_raise_drop {
1523 int32_t cwnd;
1524 int8_t increase;
1525 int8_t drop_percent;
1526 };
1527
1528 #define SCTP_HS_TABLE_SIZE 73
1529
1530 static const struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
1531 {38, 1, 50}, /* 0 */
1532 {118, 2, 44}, /* 1 */
1533 {221, 3, 41}, /* 2 */
1534 {347, 4, 38}, /* 3 */
1535 {495, 5, 37}, /* 4 */
1536 {663, 6, 35}, /* 5 */
1537 {851, 7, 34}, /* 6 */
1538 {1058, 8, 33}, /* 7 */
1539 {1284, 9, 32}, /* 8 */
1540 {1529, 10, 31}, /* 9 */
1541 {1793, 11, 30}, /* 10 */
1542 {2076, 12, 29}, /* 11 */
1543 {2378, 13, 28}, /* 12 */
1544 {2699, 14, 28}, /* 13 */
1545 {3039, 15, 27}, /* 14 */
1546 {3399, 16, 27}, /* 15 */
1547 {3778, 17, 26}, /* 16 */
1548 {4177, 18, 26}, /* 17 */
1549 {4596, 19, 25}, /* 18 */
1550 {5036, 20, 25}, /* 19 */
1551 {5497, 21, 24}, /* 20 */
1552 {5979, 22, 24}, /* 21 */
1553 {6483, 23, 23}, /* 22 */
1554 {7009, 24, 23}, /* 23 */
1555 {7558, 25, 22}, /* 24 */
1556 {8130, 26, 22}, /* 25 */
1557 {8726, 27, 22}, /* 26 */
1558 {9346, 28, 21}, /* 27 */
1559 {9991, 29, 21}, /* 28 */
1560 {10661, 30, 21}, /* 29 */
1561 {11358, 31, 20}, /* 30 */
1562 {12082, 32, 20}, /* 31 */
1563 {12834, 33, 20}, /* 32 */
1564 {13614, 34, 19}, /* 33 */
1565 {14424, 35, 19}, /* 34 */
1566 {15265, 36, 19}, /* 35 */
1567 {16137, 37, 19}, /* 36 */
1568 {17042, 38, 18}, /* 37 */
1569 {17981, 39, 18}, /* 38 */
1570 {18955, 40, 18}, /* 39 */
1571 {19965, 41, 17}, /* 40 */
1572 {21013, 42, 17}, /* 41 */
1573 {22101, 43, 17}, /* 42 */
1574 {23230, 44, 17}, /* 43 */
1575 {24402, 45, 16}, /* 44 */
1576 {25618, 46, 16}, /* 45 */
1577 {26881, 47, 16}, /* 46 */
1578 {28193, 48, 16}, /* 47 */
1579 {29557, 49, 15}, /* 48 */
1580 {30975, 50, 15}, /* 49 */
1581 {32450, 51, 15}, /* 50 */
1582 {33986, 52, 15}, /* 51 */
1583 {35586, 53, 14}, /* 52 */
1584 {37253, 54, 14}, /* 53 */
1585 {38992, 55, 14}, /* 54 */
1586 {40808, 56, 14}, /* 55 */
1587 {42707, 57, 13}, /* 56 */
1588 {44694, 58, 13}, /* 57 */
1589 {46776, 59, 13}, /* 58 */
1590 {48961, 60, 13}, /* 59 */
1591 {51258, 61, 13}, /* 60 */
1592 {53677, 62, 12}, /* 61 */
1593 {56230, 63, 12}, /* 62 */
1594 {58932, 64, 12}, /* 63 */
1595 {61799, 65, 12}, /* 64 */
1596 {64851, 66, 11}, /* 65 */
1597 {68113, 67, 11}, /* 66 */
1598 {71617, 68, 11}, /* 67 */
1599 {75401, 69, 10}, /* 68 */
1600 {79517, 70, 10}, /* 69 */
1601 {84035, 71, 10}, /* 70 */
1602 {89053, 72, 10}, /* 71 */
1603 {94717, 73, 9} /* 72 */
1604 };
1605
1606 static void
sctp_hs_cwnd_increase(struct sctp_tcb * stcb,struct sctp_nets * net)1607 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
1608 {
1609 int cur_val, i, indx, incr;
1610 int old_cwnd = net->cwnd;
1611
1612 cur_val = net->cwnd >> 10;
1613 indx = SCTP_HS_TABLE_SIZE - 1;
1614
1615 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1616 /* normal mode */
1617 if (net->net_ack > net->mtu) {
1618 net->cwnd += net->mtu;
1619 } else {
1620 net->cwnd += net->net_ack;
1621 }
1622 } else {
1623 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
1624 if (cur_val < sctp_cwnd_adjust[i].cwnd) {
1625 indx = i;
1626 break;
1627 }
1628 }
1629 net->last_hs_used = indx;
1630 incr = (((int32_t)sctp_cwnd_adjust[indx].increase) << 10);
1631 net->cwnd += incr;
1632 }
1633 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1634 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1635 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS);
1636 }
1637 }
1638
1639 static void
sctp_hs_cwnd_decrease(struct sctp_tcb * stcb,struct sctp_nets * net)1640 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
1641 {
1642 int cur_val, i, indx;
1643 int old_cwnd = net->cwnd;
1644
1645 cur_val = net->cwnd >> 10;
1646 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1647 /* normal mode */
1648 net->ssthresh = net->cwnd / 2;
1649 if (net->ssthresh < (net->mtu * 2)) {
1650 net->ssthresh = 2 * net->mtu;
1651 }
1652 net->cwnd = net->ssthresh;
1653 } else {
1654 /* drop by the proper amount */
1655 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
1656 (int32_t)sctp_cwnd_adjust[net->last_hs_used].drop_percent);
1657 net->cwnd = net->ssthresh;
1658 /* now where are we */
1659 indx = net->last_hs_used;
1660 cur_val = net->cwnd >> 10;
1661 /* reset where we are in the table */
1662 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1663 /* feel out of hs */
1664 net->last_hs_used = 0;
1665 } else {
1666 for (i = indx; i >= 1; i--) {
1667 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
1668 break;
1669 }
1670 }
1671 net->last_hs_used = indx;
1672 }
1673 }
1674 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1675 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1676 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
1677 }
1678 }
1679
1680 static void
sctp_hs_cwnd_update_after_fr(struct sctp_tcb * stcb,struct sctp_association * asoc)1681 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
1682 struct sctp_association *asoc)
1683 {
1684 struct sctp_nets *net;
1685
1686 /*
1687 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
1688 * (net->fast_retran_loss_recovery == 0)))
1689 */
1690 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1691 if ((asoc->fast_retran_loss_recovery == 0) ||
1692 (asoc->sctp_cmt_on_off > 0)) {
1693 /* out of a RFC2582 Fast recovery window? */
1694 if (net->net_ack > 0) {
1695 /*
1696 * per section 7.2.3, are there any
1697 * destinations that had a fast retransmit
1698 * to them. If so what we need to do is
1699 * adjust ssthresh and cwnd.
1700 */
1701 struct sctp_tmit_chunk *lchk;
1702
1703 sctp_hs_cwnd_decrease(stcb, net);
1704
1705 lchk = TAILQ_FIRST(&asoc->send_queue);
1706
1707 net->partial_bytes_acked = 0;
1708 /* Turn on fast recovery window */
1709 asoc->fast_retran_loss_recovery = 1;
1710 if (lchk == NULL) {
1711 /* Mark end of the window */
1712 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
1713 } else {
1714 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
1715 }
1716
1717 /*
1718 * CMT fast recovery -- per destination
1719 * recovery variable.
1720 */
1721 net->fast_retran_loss_recovery = 1;
1722
1723 if (lchk == NULL) {
1724 /* Mark end of the window */
1725 net->fast_recovery_tsn = asoc->sending_seq - 1;
1726 } else {
1727 net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
1728 }
1729
1730 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
1731 stcb->sctp_ep, stcb, net,
1732 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_2);
1733 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
1734 stcb->sctp_ep, stcb, net);
1735 }
1736 } else if (net->net_ack > 0) {
1737 /*
1738 * Mark a peg that we WOULD have done a cwnd
1739 * reduction but RFC2582 prevented this action.
1740 */
1741 SCTP_STAT_INCR(sctps_fastretransinrtt);
1742 }
1743 }
1744 }
1745
1746 static void
sctp_hs_cwnd_update_after_sack(struct sctp_tcb * stcb,struct sctp_association * asoc,int accum_moved,int reneged_all SCTP_UNUSED,int will_exit)1747 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
1748 struct sctp_association *asoc,
1749 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
1750 {
1751 struct sctp_nets *net;
1752
1753 /******************************/
1754 /* update cwnd and Early FR */
1755 /******************************/
1756 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1757 #ifdef JANA_CMT_FAST_RECOVERY
1758 /*
1759 * CMT fast recovery code. Need to debug.
1760 */
1761 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
1762 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
1763 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
1764 net->will_exit_fast_recovery = 1;
1765 }
1766 }
1767 #endif
1768 /* if nothing was acked on this destination skip it */
1769 if (net->net_ack == 0) {
1770 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1771 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
1772 }
1773 continue;
1774 }
1775 #ifdef JANA_CMT_FAST_RECOVERY
1776 /*
1777 * CMT fast recovery code
1778 */
1779 /*
1780 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
1781 * && net->will_exit_fast_recovery == 0) { @@@ Do something
1782 * } else if (sctp_cmt_on_off == 0 &&
1783 * asoc->fast_retran_loss_recovery && will_exit == 0) {
1784 */
1785 #endif
1786
1787 if (asoc->fast_retran_loss_recovery &&
1788 (will_exit == 0) &&
1789 (asoc->sctp_cmt_on_off == 0)) {
1790 /*
1791 * If we are in loss recovery we skip any cwnd
1792 * update
1793 */
1794 return;
1795 }
1796 /*
1797 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
1798 * moved.
1799 */
1800 if (accum_moved ||
1801 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
1802 /* If the cumulative ack moved we can proceed */
1803 if (net->cwnd <= net->ssthresh) {
1804 /* We are in slow start */
1805 if (net->flight_size + net->net_ack >= net->cwnd) {
1806 sctp_hs_cwnd_increase(stcb, net);
1807 } else {
1808 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1809 sctp_log_cwnd(stcb, net, net->net_ack,
1810 SCTP_CWND_LOG_NOADV_SS);
1811 }
1812 }
1813 } else {
1814 /* We are in congestion avoidance */
1815 net->partial_bytes_acked += net->net_ack;
1816 if ((net->flight_size + net->net_ack >= net->cwnd) &&
1817 (net->partial_bytes_acked >= net->cwnd)) {
1818 net->partial_bytes_acked -= net->cwnd;
1819 net->cwnd += net->mtu;
1820 sctp_enforce_cwnd_limit(asoc, net);
1821 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1822 sctp_log_cwnd(stcb, net, net->mtu,
1823 SCTP_CWND_LOG_FROM_CA);
1824 }
1825 } else {
1826 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1827 sctp_log_cwnd(stcb, net, net->net_ack,
1828 SCTP_CWND_LOG_NOADV_CA);
1829 }
1830 }
1831 }
1832 } else {
1833 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1834 sctp_log_cwnd(stcb, net, net->mtu,
1835 SCTP_CWND_LOG_NO_CUMACK);
1836 }
1837 }
1838 }
1839 }
1840
1841 /*
1842 * H-TCP congestion control. The algorithm is detailed in:
1843 * R.N.Shorten, D.J.Leith:
1844 * "H-TCP: TCP for high-speed and long-distance networks"
1845 * Proc. PFLDnet, Argonne, 2004.
1846 * http://www.hamilton.ie/net/htcp3.pdf
1847 */
1848
1849 static int use_rtt_scaling = 1;
1850 static int use_bandwidth_switch = 1;
1851
1852 static inline int
between(uint32_t seq1,uint32_t seq2,uint32_t seq3)1853 between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
1854 {
1855 return (seq3 - seq2 >= seq1 - seq2);
1856 }
1857
1858 static inline uint32_t
htcp_cong_time(struct htcp * ca)1859 htcp_cong_time(struct htcp *ca)
1860 {
1861 return (sctp_get_tick_count() - ca->last_cong);
1862 }
1863
1864 static inline uint32_t
htcp_ccount(struct htcp * ca)1865 htcp_ccount(struct htcp *ca)
1866 {
1867 return (ca->minRTT == 0 ? htcp_cong_time(ca) : htcp_cong_time(ca) / ca->minRTT);
1868 }
1869
1870 static inline void
htcp_reset(struct htcp * ca)1871 htcp_reset(struct htcp *ca)
1872 {
1873 ca->undo_last_cong = ca->last_cong;
1874 ca->undo_maxRTT = ca->maxRTT;
1875 ca->undo_old_maxB = ca->old_maxB;
1876 ca->last_cong = sctp_get_tick_count();
1877 }
1878
1879 #ifdef SCTP_NOT_USED
1880
1881 static uint32_t
htcp_cwnd_undo(struct sctp_tcb * stcb,struct sctp_nets * net)1882 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
1883 {
1884 net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong;
1885 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT;
1886 net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB;
1887 return (max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->cc_mod.htcp_ca.beta) * net->mtu));
1888 }
1889
1890 #endif
1891
1892 static inline void
measure_rtt(struct sctp_nets * net)1893 measure_rtt(struct sctp_nets *net)
1894 {
1895 uint32_t srtt = net->lastsa >> SCTP_RTT_SHIFT;
1896
1897 /* keep track of minimum RTT seen so far, minRTT is zero at first */
1898 if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT)
1899 net->cc_mod.htcp_ca.minRTT = srtt;
1900
1901 /* max RTT */
1902 if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) {
1903 if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT)
1904 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT;
1905 if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT + sctp_msecs_to_ticks(20))
1906 net->cc_mod.htcp_ca.maxRTT = srtt;
1907 }
1908 }
1909
1910 static void
measure_achieved_throughput(struct sctp_nets * net)1911 measure_achieved_throughput(struct sctp_nets *net)
1912 {
1913 uint32_t now = sctp_get_tick_count();
1914
1915 if (net->fast_retran_ip == 0)
1916 net->cc_mod.htcp_ca.bytes_acked = net->net_ack;
1917
1918 if (!use_bandwidth_switch)
1919 return;
1920
1921 /* achieved throughput calculations */
1922 /* JRS - not 100% sure of this statement */
1923 if (net->fast_retran_ip == 1) {
1924 net->cc_mod.htcp_ca.bytecount = 0;
1925 net->cc_mod.htcp_ca.lasttime = now;
1926 return;
1927 }
1928
1929 net->cc_mod.htcp_ca.bytecount += net->net_ack;
1930 if ((net->cc_mod.htcp_ca.bytecount >= net->cwnd - (((net->cc_mod.htcp_ca.alpha >> 7) ? (net->cc_mod.htcp_ca.alpha >> 7) : 1) * net->mtu)) &&
1931 (now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT) &&
1932 (net->cc_mod.htcp_ca.minRTT > 0)) {
1933 uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount / net->mtu * hz / (now - net->cc_mod.htcp_ca.lasttime);
1934
1935 if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) {
1936 /* just after backoff */
1937 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi;
1938 } else {
1939 net->cc_mod.htcp_ca.Bi = (3 * net->cc_mod.htcp_ca.Bi + cur_Bi) / 4;
1940 if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB)
1941 net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi;
1942 if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB)
1943 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB;
1944 }
1945 net->cc_mod.htcp_ca.bytecount = 0;
1946 net->cc_mod.htcp_ca.lasttime = now;
1947 }
1948 }
1949
1950 static inline void
htcp_beta_update(struct htcp * ca,uint32_t minRTT,uint32_t maxRTT)1951 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
1952 {
1953 if (use_bandwidth_switch) {
1954 uint32_t maxB = ca->maxB;
1955 uint32_t old_maxB = ca->old_maxB;
1956
1957 ca->old_maxB = ca->maxB;
1958
1959 if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) {
1960 ca->beta = BETA_MIN;
1961 ca->modeswitch = 0;
1962 return;
1963 }
1964 }
1965
1966 if (ca->modeswitch && minRTT > sctp_msecs_to_ticks(10) && maxRTT) {
1967 ca->beta = (minRTT << 7) / maxRTT;
1968 if (ca->beta < BETA_MIN)
1969 ca->beta = BETA_MIN;
1970 else if (ca->beta > BETA_MAX)
1971 ca->beta = BETA_MAX;
1972 } else {
1973 ca->beta = BETA_MIN;
1974 ca->modeswitch = 1;
1975 }
1976 }
1977
1978 static inline void
htcp_alpha_update(struct htcp * ca)1979 htcp_alpha_update(struct htcp *ca)
1980 {
1981 uint32_t minRTT = ca->minRTT;
1982 uint32_t factor = 1;
1983 uint32_t diff = htcp_cong_time(ca);
1984
1985 if (diff > (uint32_t)hz) {
1986 diff -= hz;
1987 factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz;
1988 }
1989
1990 if (use_rtt_scaling && minRTT) {
1991 uint32_t scale = (hz << 3) / (10 * minRTT);
1992
1993 scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to
1994 * interval [0.5,10]<<3 */
1995 factor = (factor << 3) / scale;
1996 if (factor != 0)
1997 factor = 1;
1998 }
1999
2000 ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
2001 if (ca->alpha != 0)
2002 ca->alpha = ALPHA_BASE;
2003 }
2004
2005 /* After we have the rtt data to calculate beta, we'd still prefer to wait one
2006 * rtt before we adjust our beta to ensure we are working from a consistent
2007 * data.
2008 *
2009 * This function should be called when we hit a congestion event since only at
2010 * that point do we really have a real sense of maxRTT (the queues en route
2011 * were getting just too full now).
2012 */
2013 static void
htcp_param_update(struct sctp_nets * net)2014 htcp_param_update(struct sctp_nets *net)
2015 {
2016 uint32_t minRTT = net->cc_mod.htcp_ca.minRTT;
2017 uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT;
2018
2019 htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT);
2020 htcp_alpha_update(&net->cc_mod.htcp_ca);
2021
2022 /*
2023 * add slowly fading memory for maxRTT to accommodate routing
2024 * changes etc
2025 */
2026 if (minRTT > 0 && maxRTT > minRTT)
2027 net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
2028 }
2029
2030 static uint32_t
htcp_recalc_ssthresh(struct sctp_nets * net)2031 htcp_recalc_ssthresh(struct sctp_nets *net)
2032 {
2033 htcp_param_update(net);
2034 return (max(((net->cwnd / net->mtu * net->cc_mod.htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu));
2035 }
2036
2037 static void
htcp_cong_avoid(struct sctp_tcb * stcb,struct sctp_nets * net)2038 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
2039 {
2040 /*-
2041 * How to handle these functions?
2042 * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
2043 * return;
2044 */
2045 if (net->cwnd <= net->ssthresh) {
2046 /* We are in slow start */
2047 if (net->flight_size + net->net_ack >= net->cwnd) {
2048 if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
2049 net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
2050 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2051 sctp_log_cwnd(stcb, net, net->mtu,
2052 SCTP_CWND_LOG_FROM_SS);
2053 }
2054
2055 } else {
2056 net->cwnd += net->net_ack;
2057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2058 sctp_log_cwnd(stcb, net, net->net_ack,
2059 SCTP_CWND_LOG_FROM_SS);
2060 }
2061 }
2062 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2063 } else {
2064 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2065 sctp_log_cwnd(stcb, net, net->net_ack,
2066 SCTP_CWND_LOG_NOADV_SS);
2067 }
2068 }
2069 } else {
2070 measure_rtt(net);
2071
2072 /*
2073 * In dangerous area, increase slowly. In theory this is
2074 * net->cwnd += alpha / net->cwnd
2075 */
2076 /* What is snd_cwnd_cnt?? */
2077 if (((net->partial_bytes_acked / net->mtu * net->cc_mod.htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) {
2078 /*-
2079 * Does SCTP have a cwnd clamp?
2080 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
2081 */
2082 net->cwnd += net->mtu;
2083 net->partial_bytes_acked = 0;
2084 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2085 htcp_alpha_update(&net->cc_mod.htcp_ca);
2086 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2087 sctp_log_cwnd(stcb, net, net->mtu,
2088 SCTP_CWND_LOG_FROM_CA);
2089 }
2090 } else {
2091 net->partial_bytes_acked += net->net_ack;
2092 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2093 sctp_log_cwnd(stcb, net, net->net_ack,
2094 SCTP_CWND_LOG_NOADV_CA);
2095 }
2096 }
2097
2098 net->cc_mod.htcp_ca.bytes_acked = net->mtu;
2099 }
2100 }
2101
2102 #ifdef SCTP_NOT_USED
2103 /* Lower bound on congestion window. */
2104 static uint32_t
htcp_min_cwnd(struct sctp_tcb * stcb,struct sctp_nets * net)2105 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
2106 {
2107 return (net->ssthresh);
2108 }
2109 #endif
2110
2111 static void
htcp_init(struct sctp_nets * net)2112 htcp_init(struct sctp_nets *net)
2113 {
2114 memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp));
2115 net->cc_mod.htcp_ca.alpha = ALPHA_BASE;
2116 net->cc_mod.htcp_ca.beta = BETA_MIN;
2117 net->cc_mod.htcp_ca.bytes_acked = net->mtu;
2118 net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count();
2119 }
2120
2121 static void
sctp_htcp_set_initial_cc_param(struct sctp_tcb * stcb,struct sctp_nets * net)2122 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
2123 {
2124 /*
2125 * We take the max of the burst limit times a MTU or the
2126 * INITIAL_CWND. We then limit this to 4 MTU's of sending.
2127 */
2128 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
2129 net->ssthresh = stcb->asoc.peers_rwnd;
2130 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2131 htcp_init(net);
2132
2133 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
2134 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
2135 }
2136 }
2137
2138 static void
sctp_htcp_cwnd_update_after_sack(struct sctp_tcb * stcb,struct sctp_association * asoc,int accum_moved,int reneged_all SCTP_UNUSED,int will_exit)2139 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
2140 struct sctp_association *asoc,
2141 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
2142 {
2143 struct sctp_nets *net;
2144
2145 /******************************/
2146 /* update cwnd and Early FR */
2147 /******************************/
2148 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2149 #ifdef JANA_CMT_FAST_RECOVERY
2150 /*
2151 * CMT fast recovery code. Need to debug.
2152 */
2153 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
2154 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
2155 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
2156 net->will_exit_fast_recovery = 1;
2157 }
2158 }
2159 #endif
2160 /* if nothing was acked on this destination skip it */
2161 if (net->net_ack == 0) {
2162 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2163 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
2164 }
2165 continue;
2166 }
2167 #ifdef JANA_CMT_FAST_RECOVERY
2168 /*
2169 * CMT fast recovery code
2170 */
2171 /*
2172 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
2173 * && net->will_exit_fast_recovery == 0) { @@@ Do something
2174 * } else if (sctp_cmt_on_off == 0 &&
2175 * asoc->fast_retran_loss_recovery && will_exit == 0) {
2176 */
2177 #endif
2178
2179 if (asoc->fast_retran_loss_recovery &&
2180 will_exit == 0 &&
2181 (asoc->sctp_cmt_on_off == 0)) {
2182 /*
2183 * If we are in loss recovery we skip any cwnd
2184 * update
2185 */
2186 return;
2187 }
2188 /*
2189 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
2190 * moved.
2191 */
2192 if (accum_moved ||
2193 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
2194 htcp_cong_avoid(stcb, net);
2195 measure_achieved_throughput(net);
2196 } else {
2197 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2198 sctp_log_cwnd(stcb, net, net->mtu,
2199 SCTP_CWND_LOG_NO_CUMACK);
2200 }
2201 }
2202 }
2203 }
2204
2205 static void
sctp_htcp_cwnd_update_after_fr(struct sctp_tcb * stcb,struct sctp_association * asoc)2206 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
2207 struct sctp_association *asoc)
2208 {
2209 struct sctp_nets *net;
2210
2211 /*
2212 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
2213 * (net->fast_retran_loss_recovery == 0)))
2214 */
2215 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2216 if ((asoc->fast_retran_loss_recovery == 0) ||
2217 (asoc->sctp_cmt_on_off > 0)) {
2218 /* out of a RFC2582 Fast recovery window? */
2219 if (net->net_ack > 0) {
2220 /*
2221 * per section 7.2.3, are there any
2222 * destinations that had a fast retransmit
2223 * to them. If so what we need to do is
2224 * adjust ssthresh and cwnd.
2225 */
2226 struct sctp_tmit_chunk *lchk;
2227 int old_cwnd = net->cwnd;
2228
2229 /* JRS - reset as if state were changed */
2230 htcp_reset(&net->cc_mod.htcp_ca);
2231 net->ssthresh = htcp_recalc_ssthresh(net);
2232 net->cwnd = net->ssthresh;
2233 sctp_enforce_cwnd_limit(asoc, net);
2234 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2235 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
2236 SCTP_CWND_LOG_FROM_FR);
2237 }
2238 lchk = TAILQ_FIRST(&asoc->send_queue);
2239
2240 net->partial_bytes_acked = 0;
2241 /* Turn on fast recovery window */
2242 asoc->fast_retran_loss_recovery = 1;
2243 if (lchk == NULL) {
2244 /* Mark end of the window */
2245 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
2246 } else {
2247 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
2248 }
2249
2250 /*
2251 * CMT fast recovery -- per destination
2252 * recovery variable.
2253 */
2254 net->fast_retran_loss_recovery = 1;
2255
2256 if (lchk == NULL) {
2257 /* Mark end of the window */
2258 net->fast_recovery_tsn = asoc->sending_seq - 1;
2259 } else {
2260 net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
2261 }
2262
2263 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
2264 stcb->sctp_ep, stcb, net,
2265 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_3);
2266 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
2267 stcb->sctp_ep, stcb, net);
2268 }
2269 } else if (net->net_ack > 0) {
2270 /*
2271 * Mark a peg that we WOULD have done a cwnd
2272 * reduction but RFC2582 prevented this action.
2273 */
2274 SCTP_STAT_INCR(sctps_fastretransinrtt);
2275 }
2276 }
2277 }
2278
2279 static void
sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb * stcb,struct sctp_nets * net)2280 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
2281 struct sctp_nets *net)
2282 {
2283 int old_cwnd = net->cwnd;
2284
2285 /* JRS - reset as if the state were being changed to timeout */
2286 htcp_reset(&net->cc_mod.htcp_ca);
2287 net->ssthresh = htcp_recalc_ssthresh(net);
2288 net->cwnd = net->mtu;
2289 net->partial_bytes_acked = 0;
2290 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2291 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
2292 }
2293 }
2294
2295 static void
sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb * stcb,struct sctp_nets * net,int in_window,int num_pkt_lost SCTP_UNUSED)2296 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
2297 struct sctp_nets *net, int in_window, int num_pkt_lost SCTP_UNUSED)
2298 {
2299 int old_cwnd;
2300
2301 old_cwnd = net->cwnd;
2302
2303 /* JRS - reset hctp as if state changed */
2304 if (in_window == 0) {
2305 htcp_reset(&net->cc_mod.htcp_ca);
2306 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
2307 net->ssthresh = htcp_recalc_ssthresh(net);
2308 if (net->ssthresh < net->mtu) {
2309 net->ssthresh = net->mtu;
2310 /* here back off the timer as well, to slow us down */
2311 net->RTO <<= 1;
2312 }
2313 net->cwnd = net->ssthresh;
2314 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2315 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2316 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
2317 }
2318 }
2319 }
2320
2321 const struct sctp_cc_functions sctp_cc_functions[] = {
2322 {
2323 .sctp_set_initial_cc_param = sctp_set_initial_cc_param,
2324 .sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack,
2325 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2326 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
2327 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2328 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
2329 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2330 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2331 },
2332 {
2333 .sctp_set_initial_cc_param = sctp_set_initial_cc_param,
2334 .sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack,
2335 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2336 .sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr,
2337 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2338 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
2339 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2340 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2341 },
2342 {
2343 .sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param,
2344 .sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack,
2345 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2346 .sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr,
2347 .sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout,
2348 .sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo,
2349 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2350 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2351 },
2352 {
2353 .sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param,
2354 .sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack,
2355 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2356 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
2357 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2358 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo,
2359 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2360 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2361 .sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted,
2362 .sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged,
2363 .sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins,
2364 .sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack,
2365 .sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option,
2366 .sctp_rtt_calculated = sctp_rtt_rtcc_calculated
2367 }
2368 };
2369