xref: /netbsd-src/sys/netinet/tcp_congctl.c (revision c9496f6b604074a9451a67df576a5b423068e71e)
1 /*	$NetBSD: tcp_congctl.c,v 1.23 2017/01/02 09:29:38 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 1999, 2001, 2005, 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation
9  * Facility, NASA Ames Research Center.
10  * This code is derived from software contributed to The NetBSD Foundation
11  * by Charles M. Hannum.
12  * This code is derived from software contributed to The NetBSD Foundation
13  * by Rui Paulo.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 /*
38  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
39  * All rights reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the project nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  */
65 
66 /*
67  *      @(#)COPYRIGHT   1.1 (NRL) 17 January 1995
68  *
69  * NRL grants permission for redistribution and use in source and binary
70  * forms, with or without modification, of the software and documentation
71  * created at NRL provided that the following conditions are met:
72  *
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce the above copyright
76  *    notice, this list of conditions and the following disclaimer in the
77  *    documentation and/or other materials provided with the distribution.
78  * 3. All advertising materials mentioning features or use of this software
79  *    must display the following acknowledgements:
80  *      This product includes software developed by the University of
81  *      California, Berkeley and its contributors.
82  *      This product includes software developed at the Information
83  *      Technology Division, US Naval Research Laboratory.
84  * 4. Neither the name of the NRL nor the names of its contributors
85  *    may be used to endorse or promote products derived from this software
86  *    without specific prior written permission.
87  *
88  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
89  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
90  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
91  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
92  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
93  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
94  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
95  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
96  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
97  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
98  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
99  *
100  * The views and conclusions contained in the software and documentation
101  * are those of the authors and should not be interpreted as representing
102  * official policies, either expressed or implied, of the US Naval
103  * Research Laboratory (NRL).
104  */
105 
106 /*
107  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
108  *	The Regents of the University of California.  All rights reserved.
109  *
110  * Redistribution and use in source and binary forms, with or without
111  * modification, are permitted provided that the following conditions
112  * are met:
113  * 1. Redistributions of source code must retain the above copyright
114  *    notice, this list of conditions and the following disclaimer.
115  * 2. Redistributions in binary form must reproduce the above copyright
116  *    notice, this list of conditions and the following disclaimer in the
117  *    documentation and/or other materials provided with the distribution.
118  * 3. Neither the name of the University nor the names of its contributors
119  *    may be used to endorse or promote products derived from this software
120  *    without specific prior written permission.
121  *
122  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
123  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
124  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
125  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
126  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
127  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
128  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
129  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
130  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
131  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
132  * SUCH DAMAGE.
133  *
134  *	@(#)tcp_input.c	8.12 (Berkeley) 5/24/95
135  */
136 
137 #include <sys/cdefs.h>
138 __KERNEL_RCSID(0, "$NetBSD: tcp_congctl.c,v 1.23 2017/01/02 09:29:38 skrll Exp $");
139 
140 #ifdef _KERNEL_OPT
141 #include "opt_inet.h"
142 #include "opt_tcp_debug.h"
143 #include "opt_tcp_congctl.h"
144 #endif
145 
146 #include <sys/param.h>
147 #include <sys/systm.h>
148 #include <sys/malloc.h>
149 #include <sys/mbuf.h>
150 #include <sys/protosw.h>
151 #include <sys/socket.h>
152 #include <sys/socketvar.h>
153 #include <sys/errno.h>
154 #include <sys/syslog.h>
155 #include <sys/pool.h>
156 #include <sys/domain.h>
157 #include <sys/kernel.h>
158 #include <sys/mutex.h>
159 
160 #include <net/if.h>
161 
162 #include <netinet/in.h>
163 #include <netinet/in_systm.h>
164 #include <netinet/ip.h>
165 #include <netinet/in_pcb.h>
166 #include <netinet/in_var.h>
167 #include <netinet/ip_var.h>
168 
169 #ifdef INET6
170 #ifndef INET
171 #include <netinet/in.h>
172 #endif
173 #include <netinet/ip6.h>
174 #include <netinet6/ip6_var.h>
175 #include <netinet6/in6_pcb.h>
176 #include <netinet6/ip6_var.h>
177 #include <netinet6/in6_var.h>
178 #include <netinet/icmp6.h>
179 #endif
180 
181 #include <netinet/tcp.h>
182 #include <netinet/tcp_fsm.h>
183 #include <netinet/tcp_seq.h>
184 #include <netinet/tcp_timer.h>
185 #include <netinet/tcp_var.h>
186 #include <netinet/tcpip.h>
187 #include <netinet/tcp_congctl.h>
188 #ifdef TCP_DEBUG
189 #include <netinet/tcp_debug.h>
190 #endif
191 
192 /*
193  * TODO:
194  *   consider separating the actual implementations in another file.
195  */
196 
197 static void tcp_common_congestion_exp(struct tcpcb *, int, int);
198 
199 static int  tcp_reno_do_fast_retransmit(struct tcpcb *, const struct tcphdr *);
200 static int  tcp_reno_fast_retransmit(struct tcpcb *, const struct tcphdr *);
201 static void tcp_reno_slow_retransmit(struct tcpcb *);
202 static void tcp_reno_fast_retransmit_newack(struct tcpcb *,
203     const struct tcphdr *);
204 static void tcp_reno_newack(struct tcpcb *, const struct tcphdr *);
205 static void tcp_reno_congestion_exp(struct tcpcb *tp);
206 
207 static int  tcp_newreno_fast_retransmit(struct tcpcb *, const struct tcphdr *);
208 static void tcp_newreno_fast_retransmit_newack(struct tcpcb *,
209 	const struct tcphdr *);
210 static void tcp_newreno_newack(struct tcpcb *, const struct tcphdr *);
211 
212 static int tcp_cubic_fast_retransmit(struct tcpcb *, const struct tcphdr *);
213 static void tcp_cubic_slow_retransmit(struct tcpcb *tp);
214 static void tcp_cubic_newack(struct tcpcb *, const struct tcphdr *);
215 static void tcp_cubic_congestion_exp(struct tcpcb *);
216 
217 static void tcp_congctl_fillnames(void);
218 
219 extern int tcprexmtthresh;
220 
221 MALLOC_DEFINE(M_TCPCONGCTL, "tcpcongctl", "TCP congestion control structures");
222 
223 /* currently selected global congestion control */
224 char tcp_congctl_global_name[TCPCC_MAXLEN];
225 
226 /* available global congestion control algorithms */
227 char tcp_congctl_avail[10 * TCPCC_MAXLEN];
228 
229 /*
230  * Used to list the available congestion control algorithms.
231  */
232 TAILQ_HEAD(, tcp_congctlent) tcp_congctlhd =
233     TAILQ_HEAD_INITIALIZER(tcp_congctlhd);
234 
235 static struct tcp_congctlent * tcp_congctl_global;
236 
237 static kmutex_t tcp_congctl_mtx;
238 
239 void
240 tcp_congctl_init(void)
241 {
242 	int r __diagused;
243 
244 	mutex_init(&tcp_congctl_mtx, MUTEX_DEFAULT, IPL_NONE);
245 
246 	/* Base algorithms. */
247 	r = tcp_congctl_register("reno", &tcp_reno_ctl);
248 	KASSERT(r == 0);
249 	r = tcp_congctl_register("newreno", &tcp_newreno_ctl);
250 	KASSERT(r == 0);
251 	r = tcp_congctl_register("cubic", &tcp_cubic_ctl);
252 	KASSERT(r == 0);
253 
254 	/* NewReno is the default. */
255 #ifndef TCP_CONGCTL_DEFAULT
256 #define TCP_CONGCTL_DEFAULT "newreno"
257 #endif
258 
259 	r = tcp_congctl_select(NULL, TCP_CONGCTL_DEFAULT);
260 	KASSERT(r == 0);
261 }
262 
263 /*
264  * Register a congestion algorithm and select it if we have none.
265  */
266 int
267 tcp_congctl_register(const char *name, const struct tcp_congctl *tcc)
268 {
269 	struct tcp_congctlent *ntcc, *tccp;
270 
271 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent)
272 		if (!strcmp(name, tccp->congctl_name)) {
273 			/* name already registered */
274 			return EEXIST;
275 		}
276 
277 	ntcc = malloc(sizeof(*ntcc), M_TCPCONGCTL, M_WAITOK|M_ZERO);
278 
279 	strlcpy(ntcc->congctl_name, name, sizeof(ntcc->congctl_name) - 1);
280 	ntcc->congctl_ctl = tcc;
281 
282 	TAILQ_INSERT_TAIL(&tcp_congctlhd, ntcc, congctl_ent);
283 	tcp_congctl_fillnames();
284 
285 	if (TAILQ_FIRST(&tcp_congctlhd) == ntcc)
286 		tcp_congctl_select(NULL, name);
287 
288 	return 0;
289 }
290 
291 int
292 tcp_congctl_unregister(const char *name)
293 {
294 	struct tcp_congctlent *tccp, *rtccp;
295 	unsigned int size;
296 
297 	rtccp = NULL;
298 	size = 0;
299 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
300 		if (!strcmp(name, tccp->congctl_name))
301 			rtccp = tccp;
302 		size++;
303 	}
304 
305 	if (!rtccp)
306 		return ENOENT;
307 
308 	if (size <= 1 || tcp_congctl_global == rtccp || rtccp->congctl_refcnt)
309 		return EBUSY;
310 
311 	TAILQ_REMOVE(&tcp_congctlhd, rtccp, congctl_ent);
312 	free(rtccp, M_TCPCONGCTL);
313 	tcp_congctl_fillnames();
314 
315 	return 0;
316 }
317 
318 /*
319  * Select a congestion algorithm by name.
320  */
321 int
322 tcp_congctl_select(struct tcpcb *tp, const char *name)
323 {
324 	struct tcp_congctlent *tccp, *old_tccp, *new_tccp;
325 	bool old_found, new_found;
326 
327 	KASSERT(name);
328 
329 	old_found = (tp == NULL || tp->t_congctl == NULL);
330 	old_tccp = NULL;
331 	new_found = false;
332 	new_tccp = NULL;
333 
334 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
335 		if (!old_found && tccp->congctl_ctl == tp->t_congctl) {
336 			old_tccp = tccp;
337 			old_found = true;
338 		}
339 
340 		if (!new_found && !strcmp(name, tccp->congctl_name)) {
341 			new_tccp = tccp;
342 			new_found = true;
343 		}
344 
345 		if (new_found && old_found) {
346 			if (tp) {
347 				mutex_enter(&tcp_congctl_mtx);
348 				if (old_tccp)
349 					old_tccp->congctl_refcnt--;
350 				tp->t_congctl = new_tccp->congctl_ctl;
351 				new_tccp->congctl_refcnt++;
352 				mutex_exit(&tcp_congctl_mtx);
353 			} else {
354 				tcp_congctl_global = new_tccp;
355 				strlcpy(tcp_congctl_global_name,
356 				    new_tccp->congctl_name,
357 				    sizeof(tcp_congctl_global_name) - 1);
358 			}
359 			return 0;
360 		}
361 	}
362 
363 	return EINVAL;
364 }
365 
366 void
367 tcp_congctl_release(struct tcpcb *tp)
368 {
369 	struct tcp_congctlent *tccp;
370 
371 	KASSERT(tp->t_congctl);
372 
373 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
374 		if (tccp->congctl_ctl == tp->t_congctl) {
375 			tccp->congctl_refcnt--;
376 			return;
377 		}
378 	}
379 }
380 
381 /*
382  * Returns the name of a congestion algorithm.
383  */
384 const char *
385 tcp_congctl_bystruct(const struct tcp_congctl *tcc)
386 {
387 	struct tcp_congctlent *tccp;
388 
389 	KASSERT(tcc);
390 
391 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent)
392 		if (tccp->congctl_ctl == tcc)
393 			return tccp->congctl_name;
394 
395 	return NULL;
396 }
397 
398 static void
399 tcp_congctl_fillnames(void)
400 {
401 	struct tcp_congctlent *tccp;
402 	const char *delim = " ";
403 
404 	tcp_congctl_avail[0] = '\0';
405 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
406 		strlcat(tcp_congctl_avail, tccp->congctl_name,
407 		    sizeof(tcp_congctl_avail) - 1);
408 		if (TAILQ_NEXT(tccp, congctl_ent))
409 			strlcat(tcp_congctl_avail, delim,
410 			    sizeof(tcp_congctl_avail) - 1);
411 	}
412 
413 }
414 
415 /* ------------------------------------------------------------------------ */
416 
417 /*
418  * Common stuff
419  */
420 
421 /* Window reduction (1-beta) for [New]Reno: 0.5 */
422 #define RENO_BETAA 1
423 #define RENO_BETAB 2
424 /* Window reduction (1-beta) for Cubic: 0.8 */
425 #define CUBIC_BETAA 4
426 #define CUBIC_BETAB 5
427 /* Draft Rhee Section 4.1 */
428 #define CUBIC_CA 4
429 #define CUBIC_CB 10
430 
431 static void
432 tcp_common_congestion_exp(struct tcpcb *tp, int betaa, int betab)
433 {
434 	u_int win;
435 
436 	/*
437 	 * Reduce the congestion window and the slow start threshold.
438 	 */
439 	win = min(tp->snd_wnd, tp->snd_cwnd) * betaa / betab / tp->t_segsz;
440 	if (win < 2)
441 		win = 2;
442 
443 	tp->snd_ssthresh = win * tp->t_segsz;
444 	tp->snd_recover = tp->snd_max;
445 	tp->snd_cwnd = tp->snd_ssthresh;
446 
447 	/*
448 	 * When using TCP ECN, notify the peer that
449 	 * we reduced the cwnd.
450 	 */
451 	if (TCP_ECN_ALLOWED(tp))
452 		tp->t_flags |= TF_ECN_SND_CWR;
453 }
454 
455 
456 /* ------------------------------------------------------------------------ */
457 
458 /*
459  * TCP/Reno congestion control.
460  */
461 static void
462 tcp_reno_congestion_exp(struct tcpcb *tp)
463 {
464 
465 	tcp_common_congestion_exp(tp, RENO_BETAA, RENO_BETAB);
466 }
467 
468 static int
469 tcp_reno_do_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
470 {
471 	/*
472 	 * Dup acks mean that packets have left the
473 	 * network (they're now cached at the receiver)
474 	 * so bump cwnd by the amount in the receiver
475 	 * to keep a constant cwnd packets in the
476 	 * network.
477 	 *
478 	 * If we are using TCP/SACK, then enter
479 	 * Fast Recovery if the receiver SACKs
480 	 * data that is tcprexmtthresh * MSS
481 	 * bytes past the last ACKed segment,
482 	 * irrespective of the number of DupAcks.
483 	 */
484 
485 	tcp_seq onxt = tp->snd_nxt;
486 
487 	tp->t_partialacks = 0;
488 	TCP_TIMER_DISARM(tp, TCPT_REXMT);
489 	tp->t_rtttime = 0;
490 	if (TCP_SACK_ENABLED(tp)) {
491 		tp->t_dupacks = tcprexmtthresh;
492 		tp->sack_newdata = tp->snd_nxt;
493 		tp->snd_cwnd = tp->t_segsz;
494 		(void) tcp_output(tp);
495 		return 0;
496 	}
497 	tp->snd_nxt = th->th_ack;
498 	tp->snd_cwnd = tp->t_segsz;
499 	(void) tcp_output(tp);
500 	tp->snd_cwnd = tp->snd_ssthresh + tp->t_segsz * tp->t_dupacks;
501 	if (SEQ_GT(onxt, tp->snd_nxt))
502 		tp->snd_nxt = onxt;
503 
504 	return 0;
505 }
506 
507 static int
508 tcp_reno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
509 {
510 
511 	/*
512 	 * We know we're losing at the current
513 	 * window size so do congestion avoidance
514 	 * (set ssthresh to half the current window
515 	 * and pull our congestion window back to
516 	 * the new ssthresh).
517 	 */
518 
519 	tcp_reno_congestion_exp(tp);
520 	return tcp_reno_do_fast_retransmit(tp, th);
521 }
522 
523 static void
524 tcp_reno_slow_retransmit(struct tcpcb *tp)
525 {
526 	u_int win;
527 
528 	/*
529 	 * Close the congestion window down to one segment
530 	 * (we'll open it by one segment for each ack we get).
531 	 * Since we probably have a window's worth of unacked
532 	 * data accumulated, this "slow start" keeps us from
533 	 * dumping all that data as back-to-back packets (which
534 	 * might overwhelm an intermediate gateway).
535 	 *
536 	 * There are two phases to the opening: Initially we
537 	 * open by one mss on each ack.  This makes the window
538 	 * size increase exponentially with time.  If the
539 	 * window is larger than the path can handle, this
540 	 * exponential growth results in dropped packet(s)
541 	 * almost immediately.  To get more time between
542 	 * drops but still "push" the network to take advantage
543 	 * of improving conditions, we switch from exponential
544 	 * to linear window opening at some threshhold size.
545 	 * For a threshhold, we use half the current window
546 	 * size, truncated to a multiple of the mss.
547 	 *
548 	 * (the minimum cwnd that will give us exponential
549 	 * growth is 2 mss.  We don't allow the threshhold
550 	 * to go below this.)
551 	 */
552 
553 	win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_segsz;
554 	if (win < 2)
555 		win = 2;
556 	/* Loss Window MUST be one segment. */
557 	tp->snd_cwnd = tp->t_segsz;
558 	tp->snd_ssthresh = win * tp->t_segsz;
559 	tp->t_partialacks = -1;
560 	tp->t_dupacks = 0;
561 	tp->t_bytes_acked = 0;
562 
563 	if (TCP_ECN_ALLOWED(tp))
564 		tp->t_flags |= TF_ECN_SND_CWR;
565 }
566 
567 static void
568 tcp_reno_fast_retransmit_newack(struct tcpcb *tp,
569     const struct tcphdr *th)
570 {
571 	if (tp->t_partialacks < 0) {
572 		/*
573 		 * We were not in fast recovery.  Reset the duplicate ack
574 		 * counter.
575 		 */
576 		tp->t_dupacks = 0;
577 	} else {
578 		/*
579 		 * Clamp the congestion window to the crossover point and
580 		 * exit fast recovery.
581 		 */
582 		if (tp->snd_cwnd > tp->snd_ssthresh)
583 			tp->snd_cwnd = tp->snd_ssthresh;
584 		tp->t_partialacks = -1;
585 		tp->t_dupacks = 0;
586 		tp->t_bytes_acked = 0;
587 		if (TCP_SACK_ENABLED(tp) && SEQ_GT(th->th_ack, tp->snd_fack))
588 			tp->snd_fack = th->th_ack;
589 	}
590 }
591 
592 static void
593 tcp_reno_newack(struct tcpcb *tp, const struct tcphdr *th)
594 {
595 	/*
596 	 * When new data is acked, open the congestion window.
597 	 */
598 
599 	u_int cw = tp->snd_cwnd;
600 	u_int incr = tp->t_segsz;
601 
602 	if (tcp_do_abc) {
603 
604 		/*
605 		 * RFC 3465 Appropriate Byte Counting (ABC)
606 		 */
607 
608 		int acked = th->th_ack - tp->snd_una;
609 
610 		if (cw >= tp->snd_ssthresh) {
611 			tp->t_bytes_acked += acked;
612 			if (tp->t_bytes_acked >= cw) {
613 				/* Time to increase the window. */
614 				tp->t_bytes_acked -= cw;
615 			} else {
616 				/* No need to increase yet. */
617 				incr = 0;
618 			}
619 		} else {
620 			/*
621 			 * use 2*SMSS or 1*SMSS for the "L" param,
622 			 * depending on sysctl setting.
623 			 *
624 			 * (See RFC 3465 2.3 Choosing the Limit)
625 			 */
626 			u_int abc_lim;
627 
628 			abc_lim = (tcp_abc_aggressive == 0 ||
629 			    tp->snd_nxt != tp->snd_max) ? incr : incr * 2;
630 			incr = min(acked, abc_lim);
631 		}
632 	} else {
633 
634 		/*
635 		 * If the window gives us less than ssthresh packets
636 		 * in flight, open exponentially (segsz per packet).
637 		 * Otherwise open linearly: segsz per window
638 		 * (segsz^2 / cwnd per packet).
639 		 */
640 
641 		if (cw >= tp->snd_ssthresh) {
642 			incr = incr * incr / cw;
643 		}
644 	}
645 
646 	tp->snd_cwnd = min(cw + incr, TCP_MAXWIN << tp->snd_scale);
647 }
648 
649 const struct tcp_congctl tcp_reno_ctl = {
650 	.fast_retransmit = tcp_reno_fast_retransmit,
651 	.slow_retransmit = tcp_reno_slow_retransmit,
652 	.fast_retransmit_newack = tcp_reno_fast_retransmit_newack,
653 	.newack = tcp_reno_newack,
654 	.cong_exp = tcp_reno_congestion_exp,
655 };
656 
657 /*
658  * TCP/NewReno Congestion control.
659  */
660 static int
661 tcp_newreno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
662 {
663 
664 	if (SEQ_LT(th->th_ack, tp->snd_high)) {
665 		/*
666 		 * False fast retransmit after timeout.
667 		 * Do not enter fast recovery
668 		 */
669 		tp->t_dupacks = 0;
670 		return 1;
671 	}
672 	/*
673 	 * Fast retransmit is same as reno.
674 	 */
675 	return tcp_reno_fast_retransmit(tp, th);
676 }
677 
678 /*
679  * Implement the NewReno response to a new ack, checking for partial acks in
680  * fast recovery.
681  */
682 static void
683 tcp_newreno_fast_retransmit_newack(struct tcpcb *tp, const struct tcphdr *th)
684 {
685 	if (tp->t_partialacks < 0) {
686 		/*
687 		 * We were not in fast recovery.  Reset the duplicate ack
688 		 * counter.
689 		 */
690 		tp->t_dupacks = 0;
691 	} else if (SEQ_LT(th->th_ack, tp->snd_recover)) {
692 		/*
693 		 * This is a partial ack.  Retransmit the first unacknowledged
694 		 * segment and deflate the congestion window by the amount of
695 		 * acknowledged data.  Do not exit fast recovery.
696 		 */
697 		tcp_seq onxt = tp->snd_nxt;
698 		u_long ocwnd = tp->snd_cwnd;
699 		int sack_num_segs = 1, sack_bytes_rxmt = 0;
700 
701 		/*
702 		 * snd_una has not yet been updated and the socket's send
703 		 * buffer has not yet drained off the ACK'd data, so we
704 		 * have to leave snd_una as it was to get the correct data
705 		 * offset in tcp_output().
706 		 */
707 		tp->t_partialacks++;
708 		TCP_TIMER_DISARM(tp, TCPT_REXMT);
709 		tp->t_rtttime = 0;
710 
711 		if (TCP_SACK_ENABLED(tp)) {
712 			/*
713 			 * Partial ack handling within a sack recovery episode.
714 			 * Keeping this very simple for now. When a partial ack
715 			 * is received, force snd_cwnd to a value that will
716 			 * allow the sender to transmit no more than 2 segments.
717 			 * If necessary, a fancier scheme can be adopted at a
718 			 * later point, but for now, the goal is to prevent the
719 			 * sender from bursting a large amount of data in the
720 			 * midst of sack recovery.
721 		 	 */
722 
723 			/*
724 			 * send one or 2 segments based on how much
725 			 * new data was acked
726 			 */
727 			if (((th->th_ack - tp->snd_una) / tp->t_segsz) > 2)
728 				sack_num_segs = 2;
729 			(void)tcp_sack_output(tp, &sack_bytes_rxmt);
730 			tp->snd_cwnd = sack_bytes_rxmt +
731 			    (tp->snd_nxt - tp->sack_newdata) +
732 			    sack_num_segs * tp->t_segsz;
733 			tp->t_flags |= TF_ACKNOW;
734 			(void) tcp_output(tp);
735 		} else {
736 			tp->snd_nxt = th->th_ack;
737 			/*
738 			 * Set snd_cwnd to one segment beyond ACK'd offset
739 			 * snd_una is not yet updated when we're called
740 			 */
741 			tp->snd_cwnd = tp->t_segsz + (th->th_ack - tp->snd_una);
742 			(void) tcp_output(tp);
743 			tp->snd_cwnd = ocwnd;
744 			if (SEQ_GT(onxt, tp->snd_nxt))
745 				tp->snd_nxt = onxt;
746 			/*
747 			 * Partial window deflation.  Relies on fact that
748 			 * tp->snd_una not updated yet.
749 		 	 */
750 			tp->snd_cwnd -= (th->th_ack - tp->snd_una -
751 			    tp->t_segsz);
752 		}
753 	} else {
754 		/*
755 		 * Complete ack.  Inflate the congestion window to ssthresh
756 		 * and exit fast recovery.
757 		 *
758 		 * Window inflation should have left us with approx.
759 		 * snd_ssthresh outstanding data.  But in case we
760 		 * would be inclined to send a burst, better to do
761 		 * it via the slow start mechanism.
762 		 */
763 		if (SEQ_SUB(tp->snd_max, th->th_ack) < tp->snd_ssthresh)
764 			tp->snd_cwnd = SEQ_SUB(tp->snd_max, th->th_ack)
765 			    + tp->t_segsz;
766 		else
767 			tp->snd_cwnd = tp->snd_ssthresh;
768 		tp->t_partialacks = -1;
769 		tp->t_dupacks = 0;
770 		tp->t_bytes_acked = 0;
771 		if (TCP_SACK_ENABLED(tp) && SEQ_GT(th->th_ack, tp->snd_fack))
772 			tp->snd_fack = th->th_ack;
773 	}
774 }
775 
776 static void
777 tcp_newreno_newack(struct tcpcb *tp, const struct tcphdr *th)
778 {
779 	/*
780 	 * If we are still in fast recovery (meaning we are using
781 	 * NewReno and we have only received partial acks), do not
782 	 * inflate the window yet.
783 	 */
784 	if (tp->t_partialacks < 0)
785 		tcp_reno_newack(tp, th);
786 }
787 
788 
789 const struct tcp_congctl tcp_newreno_ctl = {
790 	.fast_retransmit = tcp_newreno_fast_retransmit,
791 	.slow_retransmit = tcp_reno_slow_retransmit,
792 	.fast_retransmit_newack = tcp_newreno_fast_retransmit_newack,
793 	.newack = tcp_newreno_newack,
794 	.cong_exp = tcp_reno_congestion_exp,
795 };
796 
797 /*
798  * CUBIC - http://tools.ietf.org/html/draft-rhee-tcpm-cubic-02
799  */
800 
801 /* Cubic prototypes */
802 static void	tcp_cubic_update_ctime(struct tcpcb *tp);
803 static uint32_t	tcp_cubic_diff_ctime(struct tcpcb *);
804 static uint32_t	tcp_cubic_cbrt(uint32_t);
805 static ulong	tcp_cubic_getW(struct tcpcb *, uint32_t, uint32_t);
806 
807 /* Cubic TIME functions - XXX I don't like using timevals and microuptime */
808 /*
809  * Set congestion timer to now
810  */
811 static void
812 tcp_cubic_update_ctime(struct tcpcb *tp)
813 {
814 	struct timeval now_timeval;
815 
816 	getmicrouptime(&now_timeval);
817 	tp->snd_cubic_ctime = now_timeval.tv_sec * 1000 +
818 	    now_timeval.tv_usec / 1000;
819 }
820 
821 /*
822  * miliseconds from last congestion
823  */
824 static uint32_t
825 tcp_cubic_diff_ctime(struct tcpcb *tp)
826 {
827 	struct timeval now_timeval;
828 
829 	getmicrouptime(&now_timeval);
830 	return now_timeval.tv_sec * 1000 + now_timeval.tv_usec / 1000 -
831 	    tp->snd_cubic_ctime;
832 }
833 
834 /*
835  * Approximate cubic root
836  */
837 #define CBRT_ROUNDS 30
838 static uint32_t
839 tcp_cubic_cbrt(uint32_t v)
840 {
841 	int i, rounds = CBRT_ROUNDS;
842 	uint64_t x = v / 3;
843 
844 	/* We fail to calculate correct for small numbers */
845 	if (v == 0)
846 		return 0;
847 	else if (v < 4)
848 		return 1;
849 
850 	/*
851 	 * largest x that 2*x^3+3*x fits 64bit
852 	 * Avoid overflow for a time cost
853 	 */
854 	if (x > 2097151)
855 		rounds += 10;
856 
857 	for (i = 0; i < rounds; i++)
858 		if (rounds == CBRT_ROUNDS)
859 			x = (v + 2 * x * x * x) / (3 * x * x);
860 		else
861 			/* Avoid overflow */
862 			x = v / (3 * x * x) + 2 * x / 3;
863 
864 	return (uint32_t)x;
865 }
866 
867 /* Draft Rhee Section 3.1 - get W(t+rtt) - Eq. 1 */
868 static ulong
869 tcp_cubic_getW(struct tcpcb *tp, uint32_t ms_elapsed, uint32_t rtt)
870 {
871 	uint32_t K;
872 	long tK3;
873 
874 	/* Section 3.1 Eq. 2 */
875 	K = tcp_cubic_cbrt(tp->snd_cubic_wmax / CUBIC_BETAB *
876 	    CUBIC_CB / CUBIC_CA);
877 	/*  (t-K)^3 - not clear why is the measure unit mattering */
878 	tK3 = (long)(ms_elapsed + rtt) - (long)K;
879 	tK3 = tK3 * tK3 * tK3;
880 
881 	return CUBIC_CA * tK3 / CUBIC_CB + tp->snd_cubic_wmax;
882 }
883 
884 static void
885 tcp_cubic_congestion_exp(struct tcpcb *tp)
886 {
887 
888 	/*
889 	 * Congestion - Set WMax and shrink cwnd
890 	 */
891 	tcp_cubic_update_ctime(tp);
892 
893 	/* Section 3.6 - Fast Convergence */
894 	if (tp->snd_cubic_wmax < tp->snd_cubic_wmax_last) {
895 		tp->snd_cubic_wmax_last = tp->snd_cubic_wmax;
896 		tp->snd_cubic_wmax = tp->snd_cubic_wmax / 2 +
897 		    tp->snd_cubic_wmax * CUBIC_BETAA / CUBIC_BETAB / 2;
898 	} else {
899 		tp->snd_cubic_wmax_last = tp->snd_cubic_wmax;
900 		tp->snd_cubic_wmax = tp->snd_cwnd;
901 	}
902 
903 	tp->snd_cubic_wmax = max(tp->t_segsz, tp->snd_cubic_wmax);
904 
905 	/* Shrink CWND */
906 	tcp_common_congestion_exp(tp, CUBIC_BETAA, CUBIC_BETAB);
907 }
908 
909 static int
910 tcp_cubic_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
911 {
912 
913 	if (SEQ_LT(th->th_ack, tp->snd_high)) {
914 		/* See newreno */
915 		tp->t_dupacks = 0;
916 		return 1;
917 	}
918 
919 	/*
920 	 * mark WMax
921 	 */
922 	tcp_cubic_congestion_exp(tp);
923 
924 	/* Do fast retransmit */
925 	return tcp_reno_do_fast_retransmit(tp, th);
926 }
927 
928 static void
929 tcp_cubic_newack(struct tcpcb *tp, const struct tcphdr *th)
930 {
931 	uint32_t ms_elapsed, rtt;
932 	u_long w_tcp;
933 
934 	/* Congestion avoidance and not in fast recovery and usable rtt */
935 	if (tp->snd_cwnd > tp->snd_ssthresh && tp->t_partialacks < 0 &&
936 	    /*
937 	     * t_srtt is 1/32 units of slow ticks
938 	     * converting it in ms would be equal to
939 	     * (t_srtt >> 5) * 1000 / PR_SLOWHZ ~= (t_srtt << 5) / PR_SLOWHZ
940 	     */
941 	    (rtt = (tp->t_srtt << 5) / PR_SLOWHZ) > 0) {
942 		ms_elapsed = tcp_cubic_diff_ctime(tp);
943 
944 		/* Compute W_tcp(t) */
945 		w_tcp = tp->snd_cubic_wmax * CUBIC_BETAA / CUBIC_BETAB +
946 		    ms_elapsed / rtt / 3;
947 
948 		if (tp->snd_cwnd > w_tcp) {
949 			/* Not in TCP friendly mode */
950 			tp->snd_cwnd += (tcp_cubic_getW(tp, ms_elapsed, rtt) -
951 			    tp->snd_cwnd) / tp->snd_cwnd;
952 		} else {
953 			/* friendly TCP mode */
954 			tp->snd_cwnd = w_tcp;
955 		}
956 
957 		/* Make sure we are within limits */
958 		tp->snd_cwnd = max(tp->snd_cwnd, tp->t_segsz);
959 		tp->snd_cwnd = min(tp->snd_cwnd, TCP_MAXWIN << tp->snd_scale);
960 	} else {
961 		/* Use New Reno */
962 		tcp_newreno_newack(tp, th);
963 	}
964 }
965 
966 static void
967 tcp_cubic_slow_retransmit(struct tcpcb *tp)
968 {
969 
970 	/* Timeout - Mark new congestion */
971 	tcp_cubic_congestion_exp(tp);
972 
973 	/* Loss Window MUST be one segment. */
974 	tp->snd_cwnd = tp->t_segsz;
975 	tp->t_partialacks = -1;
976 	tp->t_dupacks = 0;
977 	tp->t_bytes_acked = 0;
978 
979 	if (TCP_ECN_ALLOWED(tp))
980 		tp->t_flags |= TF_ECN_SND_CWR;
981 }
982 
983 const struct tcp_congctl tcp_cubic_ctl = {
984 	.fast_retransmit = tcp_cubic_fast_retransmit,
985 	.slow_retransmit = tcp_cubic_slow_retransmit,
986 	.fast_retransmit_newack = tcp_newreno_fast_retransmit_newack,
987 	.newack = tcp_cubic_newack,
988 	.cong_exp = tcp_cubic_congestion_exp,
989 };
990