1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2008-2010 Lawrence Stewart <[email protected]>
5 * Copyright (c) 2010 The FreeBSD Foundation
6 * All rights reserved.
7 *
8 * This software was developed by Lawrence Stewart while studying at the Centre
9 * for Advanced Internet Architectures, Swinburne University of Technology, made
10 * possible in part by a grant from the Cisco University Research Program Fund
11 * at Community Foundation Silicon Valley.
12 *
13 * Portions of this software were developed at the Centre for Advanced
14 * Internet Architectures, Swinburne University of Technology, Melbourne,
15 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39 /*
40 * An implementation of the CUBIC congestion control algorithm for FreeBSD,
41 * based on the Internet Draft "draft-rhee-tcpm-cubic-02" by Rhee, Xu and Ha.
42 * Originally released as part of the NewTCP research project at Swinburne
43 * University of Technology's Centre for Advanced Internet Architectures,
44 * Melbourne, Australia, which was made possible in part by a grant from the
45 * Cisco University Research Program Fund at Community Foundation Silicon
46 * Valley. More details are available at:
47 * http://caia.swin.edu.au/urp/newtcp/
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52
53 #include <sys/param.h>
54 #include <sys/kernel.h>
55 #include <sys/limits.h>
56 #include <sys/malloc.h>
57 #include <sys/module.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/sysctl.h>
61 #include <sys/systm.h>
62
63 #include <net/vnet.h>
64
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_seq.h>
67 #include <netinet/tcp_timer.h>
68 #include <netinet/tcp_var.h>
69 #include <netinet/cc/cc.h>
70 #include <netinet/cc/cc_cubic.h>
71 #include <netinet/cc/cc_module.h>
72
73 static void cubic_ack_received(struct cc_var *ccv, uint16_t type);
74 static void cubic_cb_destroy(struct cc_var *ccv);
75 static int cubic_cb_init(struct cc_var *ccv);
76 static void cubic_cong_signal(struct cc_var *ccv, uint32_t type);
77 static void cubic_conn_init(struct cc_var *ccv);
78 static int cubic_mod_init(void);
79 static void cubic_post_recovery(struct cc_var *ccv);
80 static void cubic_record_rtt(struct cc_var *ccv);
81 static void cubic_ssthresh_update(struct cc_var *ccv, uint32_t maxseg);
82 static void cubic_after_idle(struct cc_var *ccv);
83
84 struct cubic {
85 /* Cubic K in fixed point form with CUBIC_SHIFT worth of precision. */
86 int64_t K;
87 /* Sum of RTT samples across an epoch in ticks. */
88 int64_t sum_rtt_ticks;
89 /* cwnd at the most recent congestion event. */
90 unsigned long max_cwnd;
91 /* cwnd at the previous congestion event. */
92 unsigned long prev_max_cwnd;
93 /* A copy of prev_max_cwnd. Used for CC_RTO_ERR */
94 unsigned long prev_max_cwnd_cp;
95 /* various flags */
96 uint32_t flags;
97 #define CUBICFLAG_CONG_EVENT 0x00000001 /* congestion experienced */
98 #define CUBICFLAG_IN_SLOWSTART 0x00000002 /* in slow start */
99 #define CUBICFLAG_IN_APPLIMIT 0x00000004 /* application limited */
100 #define CUBICFLAG_RTO_EVENT 0x00000008 /* RTO experienced */
101 /* Minimum observed rtt in ticks. */
102 int min_rtt_ticks;
103 /* Mean observed rtt between congestion epochs. */
104 int mean_rtt_ticks;
105 /* ACKs since last congestion event. */
106 int epoch_ack_count;
107 /* Timestamp (in ticks) of arriving in congestion avoidance from last
108 * congestion event.
109 */
110 int t_last_cong;
111 /* Timestamp (in ticks) of a previous congestion event. Used for
112 * CC_RTO_ERR.
113 */
114 int t_last_cong_prev;
115 };
116
117 static MALLOC_DEFINE(M_CUBIC, "cubic data",
118 "Per connection data required for the CUBIC congestion control algorithm");
119
120 struct cc_algo cubic_cc_algo = {
121 .name = "cubic",
122 .ack_received = cubic_ack_received,
123 .cb_destroy = cubic_cb_destroy,
124 .cb_init = cubic_cb_init,
125 .cong_signal = cubic_cong_signal,
126 .conn_init = cubic_conn_init,
127 .mod_init = cubic_mod_init,
128 .post_recovery = cubic_post_recovery,
129 .after_idle = cubic_after_idle,
130 };
131
132 static void
cubic_ack_received(struct cc_var * ccv,uint16_t type)133 cubic_ack_received(struct cc_var *ccv, uint16_t type)
134 {
135 struct cubic *cubic_data;
136 unsigned long w_tf, w_cubic_next;
137 int ticks_since_cong;
138
139 cubic_data = ccv->cc_data;
140 cubic_record_rtt(ccv);
141
142 /*
143 * For a regular ACK and we're not in cong/fast recovery and
144 * we're cwnd limited, always recalculate cwnd.
145 */
146 if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
147 (ccv->flags & CCF_CWND_LIMITED)) {
148 /* Use the logic in NewReno ack_received() for slow start. */
149 if (CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) ||
150 cubic_data->min_rtt_ticks == TCPTV_SRTTBASE) {
151 cubic_data->flags |= CUBICFLAG_IN_SLOWSTART;
152 newreno_cc_algo.ack_received(ccv, type);
153 } else {
154 if ((cubic_data->flags & CUBICFLAG_RTO_EVENT) &&
155 (cubic_data->flags & CUBICFLAG_IN_SLOWSTART)) {
156 /* RFC8312 Section 4.7 */
157 cubic_data->flags &= ~(CUBICFLAG_RTO_EVENT |
158 CUBICFLAG_IN_SLOWSTART);
159 cubic_data->max_cwnd = CCV(ccv, snd_cwnd);
160 cubic_data->K = 0;
161 } else if (cubic_data->flags & (CUBICFLAG_IN_SLOWSTART |
162 CUBICFLAG_IN_APPLIMIT)) {
163 cubic_data->flags &= ~(CUBICFLAG_IN_SLOWSTART |
164 CUBICFLAG_IN_APPLIMIT);
165 cubic_data->t_last_cong = ticks;
166 cubic_data->K = cubic_k(cubic_data->max_cwnd /
167 CCV(ccv, t_maxseg));
168 }
169 if ((ticks_since_cong =
170 ticks - cubic_data->t_last_cong) < 0) {
171 /*
172 * dragging t_last_cong along
173 */
174 ticks_since_cong = INT_MAX;
175 cubic_data->t_last_cong = ticks - INT_MAX;
176 }
177 /*
178 * The mean RTT is used to best reflect the equations in
179 * the I-D. Using min_rtt in the tf_cwnd calculation
180 * causes w_tf to grow much faster than it should if the
181 * RTT is dominated by network buffering rather than
182 * propagation delay.
183 */
184 w_tf = tf_cwnd(ticks_since_cong,
185 cubic_data->mean_rtt_ticks, cubic_data->max_cwnd,
186 CCV(ccv, t_maxseg));
187
188 w_cubic_next = cubic_cwnd(ticks_since_cong +
189 cubic_data->mean_rtt_ticks, cubic_data->max_cwnd,
190 CCV(ccv, t_maxseg), cubic_data->K);
191
192 ccv->flags &= ~CCF_ABC_SENTAWND;
193
194 if (w_cubic_next < w_tf) {
195 /*
196 * TCP-friendly region, follow tf
197 * cwnd growth.
198 */
199 if (CCV(ccv, snd_cwnd) < w_tf)
200 CCV(ccv, snd_cwnd) = ulmin(w_tf, INT_MAX);
201 } else if (CCV(ccv, snd_cwnd) < w_cubic_next) {
202 /*
203 * Concave or convex region, follow CUBIC
204 * cwnd growth.
205 * Only update snd_cwnd, if it doesn't shrink.
206 */
207 CCV(ccv, snd_cwnd) = ulmin(w_cubic_next,
208 INT_MAX);
209 }
210
211 /*
212 * If we're not in slow start and we're probing for a
213 * new cwnd limit at the start of a connection
214 * (happens when hostcache has a relevant entry),
215 * keep updating our current estimate of the
216 * max_cwnd.
217 */
218 if (((cubic_data->flags & CUBICFLAG_CONG_EVENT) == 0) &&
219 cubic_data->max_cwnd < CCV(ccv, snd_cwnd)) {
220 cubic_data->max_cwnd = CCV(ccv, snd_cwnd);
221 cubic_data->K = cubic_k(cubic_data->max_cwnd /
222 CCV(ccv, t_maxseg));
223 }
224 }
225 } else if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
226 !(ccv->flags & CCF_CWND_LIMITED)) {
227 cubic_data->flags |= CUBICFLAG_IN_APPLIMIT;
228 }
229 }
230
231 /*
232 * This is a Cubic specific implementation of after_idle.
233 * - Reset cwnd by calling New Reno implementation of after_idle.
234 * - Reset t_last_cong.
235 */
236 static void
cubic_after_idle(struct cc_var * ccv)237 cubic_after_idle(struct cc_var *ccv)
238 {
239 struct cubic *cubic_data;
240
241 cubic_data = ccv->cc_data;
242
243 cubic_data->max_cwnd = ulmax(cubic_data->max_cwnd, CCV(ccv, snd_cwnd));
244 cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, t_maxseg));
245
246 newreno_cc_algo.after_idle(ccv);
247 cubic_data->t_last_cong = ticks;
248 }
249
250 static void
cubic_cb_destroy(struct cc_var * ccv)251 cubic_cb_destroy(struct cc_var *ccv)
252 {
253 free(ccv->cc_data, M_CUBIC);
254 }
255
256 static int
cubic_cb_init(struct cc_var * ccv)257 cubic_cb_init(struct cc_var *ccv)
258 {
259 struct cubic *cubic_data;
260
261 cubic_data = malloc(sizeof(struct cubic), M_CUBIC, M_NOWAIT|M_ZERO);
262
263 if (cubic_data == NULL)
264 return (ENOMEM);
265
266 /* Init some key variables with sensible defaults. */
267 cubic_data->t_last_cong = ticks;
268 cubic_data->min_rtt_ticks = TCPTV_SRTTBASE;
269 cubic_data->mean_rtt_ticks = 1;
270
271 ccv->cc_data = cubic_data;
272
273 return (0);
274 }
275
276 /*
277 * Perform any necessary tasks before we enter congestion recovery.
278 */
279 static void
cubic_cong_signal(struct cc_var * ccv,uint32_t type)280 cubic_cong_signal(struct cc_var *ccv, uint32_t type)
281 {
282 struct cubic *cubic_data;
283 u_int mss;
284
285 cubic_data = ccv->cc_data;
286 mss = tcp_maxseg(ccv->ccvc.tcp);
287
288 switch (type) {
289 case CC_NDUPACK:
290 if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
291 if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
292 cubic_ssthresh_update(ccv, mss);
293 cubic_data->flags |= CUBICFLAG_CONG_EVENT;
294 cubic_data->t_last_cong = ticks;
295 cubic_data->K = cubic_k(cubic_data->max_cwnd / mss);
296 }
297 ENTER_RECOVERY(CCV(ccv, t_flags));
298 }
299 break;
300
301 case CC_ECN:
302 if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
303 cubic_ssthresh_update(ccv, mss);
304 cubic_data->flags |= CUBICFLAG_CONG_EVENT;
305 cubic_data->t_last_cong = ticks;
306 cubic_data->K = cubic_k(cubic_data->max_cwnd / mss);
307 CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
308 ENTER_CONGRECOVERY(CCV(ccv, t_flags));
309 }
310 break;
311
312 case CC_RTO:
313 /* RFC8312 Section 4.7 */
314 if (CCV(ccv, t_rxtshift) == 1) {
315 cubic_data->t_last_cong_prev = cubic_data->t_last_cong;
316 cubic_data->prev_max_cwnd_cp = cubic_data->prev_max_cwnd;
317 }
318 cubic_data->flags |= CUBICFLAG_CONG_EVENT | CUBICFLAG_RTO_EVENT;
319 cubic_data->prev_max_cwnd = cubic_data->max_cwnd;
320 CCV(ccv, snd_ssthresh) = ((uint64_t)CCV(ccv, snd_cwnd) *
321 CUBIC_BETA) >> CUBIC_SHIFT;
322 CCV(ccv, snd_cwnd) = mss;
323 break;
324
325 case CC_RTO_ERR:
326 cubic_data->flags &= ~(CUBICFLAG_CONG_EVENT | CUBICFLAG_RTO_EVENT);
327 cubic_data->max_cwnd = cubic_data->prev_max_cwnd;
328 cubic_data->prev_max_cwnd = cubic_data->prev_max_cwnd_cp;
329 cubic_data->t_last_cong = cubic_data->t_last_cong_prev;
330 cubic_data->K = cubic_k(cubic_data->max_cwnd / mss);
331 break;
332 }
333 }
334
335 static void
cubic_conn_init(struct cc_var * ccv)336 cubic_conn_init(struct cc_var *ccv)
337 {
338 struct cubic *cubic_data;
339
340 cubic_data = ccv->cc_data;
341
342 /*
343 * Ensure we have a sane initial value for max_cwnd recorded. Without
344 * this here bad things happen when entries from the TCP hostcache
345 * get used.
346 */
347 cubic_data->max_cwnd = CCV(ccv, snd_cwnd);
348 }
349
350 static int
cubic_mod_init(void)351 cubic_mod_init(void)
352 {
353 return (0);
354 }
355
356 /*
357 * Perform any necessary tasks before we exit congestion recovery.
358 */
359 static void
cubic_post_recovery(struct cc_var * ccv)360 cubic_post_recovery(struct cc_var *ccv)
361 {
362 struct cubic *cubic_data;
363 int pipe;
364
365 cubic_data = ccv->cc_data;
366 pipe = 0;
367
368 if (IN_FASTRECOVERY(CCV(ccv, t_flags))) {
369 /*
370 * If inflight data is less than ssthresh, set cwnd
371 * conservatively to avoid a burst of data, as suggested in
372 * the NewReno RFC. Otherwise, use the CUBIC method.
373 *
374 * XXXLAS: Find a way to do this without needing curack
375 */
376 if (V_tcp_do_rfc6675_pipe)
377 pipe = tcp_compute_pipe(ccv->ccvc.tcp);
378 else
379 pipe = CCV(ccv, snd_max) - ccv->curack;
380
381 if (pipe < CCV(ccv, snd_ssthresh))
382 /*
383 * Ensure that cwnd does not collapse to 1 MSS under
384 * adverse conditions. Implements RFC6582
385 */
386 CCV(ccv, snd_cwnd) = max(pipe, CCV(ccv, t_maxseg)) +
387 CCV(ccv, t_maxseg);
388 else
389 /* Update cwnd based on beta and adjusted max_cwnd. */
390 CCV(ccv, snd_cwnd) = max(((uint64_t)cubic_data->max_cwnd *
391 CUBIC_BETA) >> CUBIC_SHIFT,
392 2 * CCV(ccv, t_maxseg));
393 }
394
395 /* Calculate the average RTT between congestion epochs. */
396 if (cubic_data->epoch_ack_count > 0 &&
397 cubic_data->sum_rtt_ticks >= cubic_data->epoch_ack_count) {
398 cubic_data->mean_rtt_ticks = (int)(cubic_data->sum_rtt_ticks /
399 cubic_data->epoch_ack_count);
400 }
401
402 cubic_data->epoch_ack_count = 0;
403 cubic_data->sum_rtt_ticks = 0;
404 }
405
406 /*
407 * Record the min RTT and sum samples for the epoch average RTT calculation.
408 */
409 static void
cubic_record_rtt(struct cc_var * ccv)410 cubic_record_rtt(struct cc_var *ccv)
411 {
412 struct cubic *cubic_data;
413 int t_srtt_ticks;
414
415 /* Ignore srtt until a min number of samples have been taken. */
416 if (CCV(ccv, t_rttupdated) >= CUBIC_MIN_RTT_SAMPLES) {
417 cubic_data = ccv->cc_data;
418 t_srtt_ticks = CCV(ccv, t_srtt) / TCP_RTT_SCALE;
419
420 /*
421 * Record the current SRTT as our minrtt if it's the smallest
422 * we've seen or minrtt is currently equal to its initialised
423 * value.
424 *
425 * XXXLAS: Should there be some hysteresis for minrtt?
426 */
427 if ((t_srtt_ticks < cubic_data->min_rtt_ticks ||
428 cubic_data->min_rtt_ticks == TCPTV_SRTTBASE)) {
429 cubic_data->min_rtt_ticks = max(1, t_srtt_ticks);
430
431 /*
432 * If the connection is within its first congestion
433 * epoch, ensure we prime mean_rtt_ticks with a
434 * reasonable value until the epoch average RTT is
435 * calculated in cubic_post_recovery().
436 */
437 if (cubic_data->min_rtt_ticks >
438 cubic_data->mean_rtt_ticks)
439 cubic_data->mean_rtt_ticks =
440 cubic_data->min_rtt_ticks;
441 }
442
443 /* Sum samples for epoch average RTT calculation. */
444 cubic_data->sum_rtt_ticks += t_srtt_ticks;
445 cubic_data->epoch_ack_count++;
446 }
447 }
448
449 /*
450 * Update the ssthresh in the event of congestion.
451 */
452 static void
cubic_ssthresh_update(struct cc_var * ccv,uint32_t maxseg)453 cubic_ssthresh_update(struct cc_var *ccv, uint32_t maxseg)
454 {
455 struct cubic *cubic_data;
456 uint32_t ssthresh;
457 uint32_t cwnd;
458
459 cubic_data = ccv->cc_data;
460 cwnd = CCV(ccv, snd_cwnd);
461
462 /* Fast convergence heuristic. */
463 if (cwnd < cubic_data->max_cwnd) {
464 cwnd = ((uint64_t)cwnd * CUBIC_FC_FACTOR) >> CUBIC_SHIFT;
465 }
466 cubic_data->prev_max_cwnd = cubic_data->max_cwnd;
467 cubic_data->max_cwnd = cwnd;
468
469 /*
470 * On the first congestion event, set ssthresh to cwnd * 0.5
471 * and reduce max_cwnd to cwnd * beta. This aligns the cubic concave
472 * region appropriately. On subsequent congestion events, set
473 * ssthresh to cwnd * beta.
474 */
475 if ((cubic_data->flags & CUBICFLAG_CONG_EVENT) == 0) {
476 ssthresh = cwnd >> 1;
477 cubic_data->max_cwnd = ((uint64_t)cwnd *
478 CUBIC_BETA) >> CUBIC_SHIFT;
479 } else {
480 ssthresh = ((uint64_t)cwnd *
481 CUBIC_BETA) >> CUBIC_SHIFT;
482 }
483 CCV(ccv, snd_ssthresh) = max(ssthresh, 2 * maxseg);
484 }
485
486 DECLARE_CC_MODULE(cubic, &cubic_cc_algo);
487 MODULE_VERSION(cubic, 1);
488