xref: /dflybsd-src/sys/netinet/tcp_sack.c (revision 832b6303b46f1ad1a6a1f5cf261417a280878d69)
1 /*
2  * Copyright (c) 2003, 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2003, 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $DragonFly: src/sys/netinet/tcp_sack.c,v 1.8 2008/08/15 21:37:16 nth Exp $
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/thread.h>
42 #include <sys/types.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 
46 #include <net/if.h>
47 
48 #include <netinet/in.h>
49 #include <netinet/in_systm.h>
50 #include <netinet/ip.h>
51 #include <netinet/in_var.h>
52 #include <netinet/in_pcb.h>
53 #include <netinet/ip_var.h>
54 #include <netinet/tcp.h>
55 #include <netinet/tcp_seq.h>
56 #include <netinet/tcp_var.h>
57 
58 /*
59  * Implemented:
60  *
61  * RFC 2018
62  * RFC 2883
63  * RFC 3517
64  */
65 
66 struct sackblock {
67 	tcp_seq			sblk_start;
68 	tcp_seq			sblk_end;
69 	TAILQ_ENTRY(sackblock)	sblk_list;
70 };
71 
72 #define	MAXSAVEDBLOCKS	8			/* per connection limit */
73 
74 static int insert_block(struct scoreboard *scb,
75 			const struct raw_sackblock *raw_sb, boolean_t *update);
76 
77 static MALLOC_DEFINE(M_SACKBLOCK, "sblk", "sackblock struct");
78 
79 /*
80  * Per-tcpcb initialization.
81  */
82 void
83 tcp_sack_tcpcb_init(struct tcpcb *tp)
84 {
85 	struct scoreboard *scb = &tp->scb;
86 
87 	scb->nblocks = 0;
88 	TAILQ_INIT(&scb->sackblocks);
89 	scb->lastfound = NULL;
90 }
91 
92 /*
93  * Find the SACK block containing or immediately preceding "seq".
94  * The boolean result indicates whether the sequence is actually
95  * contained in the SACK block.
96  */
97 static boolean_t
98 sack_block_lookup(struct scoreboard *scb, tcp_seq seq, struct sackblock **sb)
99 {
100 	struct sackblock *hint = scb->lastfound;
101 	struct sackblock *cur, *last, *prev;
102 
103 	if (TAILQ_EMPTY(&scb->sackblocks)) {
104 		*sb = NULL;
105 		return FALSE;
106 	}
107 
108 	if (hint == NULL) {
109 		/* No hint.  Search from start to end. */
110 		cur = TAILQ_FIRST(&scb->sackblocks);
111 		last = NULL;
112 		prev = TAILQ_LAST(&scb->sackblocks, sackblock_list);
113 	} else  {
114 		if (SEQ_GEQ(seq, hint->sblk_start)) {
115 			/* Search from hint to end of list. */
116 			cur = hint;
117 			last = NULL;
118 			prev = TAILQ_LAST(&scb->sackblocks, sackblock_list);
119 		} else {
120 			/* Search from front of list to hint. */
121 			cur = TAILQ_FIRST(&scb->sackblocks);
122 			last = hint;
123 			prev = TAILQ_PREV(hint, sackblock_list, sblk_list);
124 		}
125 	}
126 
127 	do {
128 		if (SEQ_GT(cur->sblk_end, seq)) {
129 			if (SEQ_GEQ(seq, cur->sblk_start)) {
130 				*sb = scb->lastfound = cur;
131 				return TRUE;
132 			} else {
133 				*sb = scb->lastfound =
134 				    TAILQ_PREV(cur, sackblock_list, sblk_list);
135 				return FALSE;
136 			}
137 		}
138 		cur = TAILQ_NEXT(cur, sblk_list);
139 	} while (cur != last);
140 
141 	*sb = scb->lastfound = prev;
142 	return FALSE;
143 }
144 
145 /*
146  * Allocate a SACK block.
147  */
148 static __inline struct sackblock *
149 alloc_sackblock(struct scoreboard *scb, const struct raw_sackblock *raw_sb)
150 {
151 	struct sackblock *sb;
152 
153 	if (scb->freecache != NULL) {
154 		sb = scb->freecache;
155 		scb->freecache = NULL;
156 		tcpstat.tcps_sacksbfast++;
157 	} else {
158 		sb = kmalloc(sizeof(struct sackblock), M_SACKBLOCK, M_NOWAIT);
159 		if (sb == NULL) {
160 			tcpstat.tcps_sacksbfailed++;
161 			return NULL;
162 		}
163 	}
164 	sb->sblk_start = raw_sb->rblk_start;
165 	sb->sblk_end = raw_sb->rblk_end;
166 	return sb;
167 }
168 
169 static __inline struct sackblock *
170 alloc_sackblock_limit(struct scoreboard *scb,
171     const struct raw_sackblock *raw_sb)
172 {
173 	if (scb->nblocks == MAXSAVEDBLOCKS) {
174 		/*
175 		 * Should try to kick out older blocks XXX JH
176 		 * May be able to coalesce with existing block.
177 		 * Or, go other way and free all blocks if we hit
178 		 * this limit.
179 		 */
180 		tcpstat.tcps_sacksboverflow++;
181 		return NULL;
182 	}
183 	return alloc_sackblock(scb, raw_sb);
184 }
185 
186 /*
187  * Free a SACK block.
188  */
189 static __inline void
190 free_sackblock(struct scoreboard *scb, struct sackblock *s)
191 {
192 	if (scb->freecache == NULL) {
193 		/* YYY Maybe use the latest freed block? */
194 		scb->freecache = s;
195 		return;
196 	}
197 	kfree(s, M_SACKBLOCK);
198 }
199 
200 /*
201  * Free up SACK blocks for data that's been acked.
202  */
203 static void
204 tcp_sack_ack_blocks(struct scoreboard *scb, tcp_seq th_ack)
205 {
206 	struct sackblock *sb, *nb;
207 
208 	sb = TAILQ_FIRST(&scb->sackblocks);
209 	while (sb && SEQ_LEQ(sb->sblk_end, th_ack)) {
210 		nb = TAILQ_NEXT(sb, sblk_list);
211 		if (scb->lastfound == sb)
212 			scb->lastfound = NULL;
213 		TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
214 		free_sackblock(scb, sb);
215 		--scb->nblocks;
216 		KASSERT(scb->nblocks >= 0,
217 		    ("SACK block count underflow: %d < 0", scb->nblocks));
218 		sb = nb;
219 	}
220 	if (sb && SEQ_GT(th_ack, sb->sblk_start))
221 		sb->sblk_start = th_ack;	/* other side reneged? XXX */
222 }
223 
224 /*
225  * Delete and free SACK blocks saved in scoreboard.
226  */
227 void
228 tcp_sack_cleanup(struct scoreboard *scb)
229 {
230 	struct sackblock *sb, *nb;
231 
232 	TAILQ_FOREACH_MUTABLE(sb, &scb->sackblocks, sblk_list, nb) {
233 		free_sackblock(scb, sb);
234 		--scb->nblocks;
235 	}
236 	KASSERT(scb->nblocks == 0,
237 	    ("SACK block %d count not zero", scb->nblocks));
238 	TAILQ_INIT(&scb->sackblocks);
239 	scb->lastfound = NULL;
240 }
241 
242 /*
243  * Delete and free SACK blocks saved in scoreboard.
244  * Delete the one slot block cache.
245  */
246 void
247 tcp_sack_destroy(struct scoreboard *scb)
248 {
249 	tcp_sack_cleanup(scb);
250 	if (scb->freecache != NULL) {
251 		kfree(scb->freecache, M_SACKBLOCK);
252 		scb->freecache = NULL;
253 	}
254 }
255 
256 /*
257  * Cleanup the reported SACK block information
258  */
259 void
260 tcp_sack_report_cleanup(struct tcpcb *tp)
261 {
262 	tp->sack_flags &=
263 	    ~(TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG | TSACK_F_SACKLEFT);
264 	tp->reportblk.rblk_start = tp->reportblk.rblk_end;
265 }
266 
267 /*
268  * Returns	0 if not D-SACK block,
269  *		1 if D-SACK,
270  *		2 if duplicate of out-of-order D-SACK block.
271  */
272 int
273 tcp_sack_ndsack_blocks(struct raw_sackblock *blocks, const int numblocks,
274 		       tcp_seq snd_una)
275 {
276 	if (numblocks == 0)
277 		return 0;
278 
279 	if (SEQ_LT(blocks[0].rblk_start, snd_una))
280 		return 1;
281 
282 	/* block 0 inside block 1 */
283 	if (numblocks > 1 &&
284 	    SEQ_GEQ(blocks[0].rblk_start, blocks[1].rblk_start) &&
285 	    SEQ_LEQ(blocks[0].rblk_end, blocks[1].rblk_end))
286 		return 2;
287 
288 	return 0;
289 }
290 
291 /*
292  * Update scoreboard on new incoming ACK.
293  */
294 static void
295 tcp_sack_add_blocks(struct tcpcb *tp, struct tcpopt *to)
296 {
297 	const int numblocks = to->to_nsackblocks;
298 	struct raw_sackblock *blocks = to->to_sackblocks;
299 	struct scoreboard *scb = &tp->scb;
300 	int startblock, i;
301 
302 	if (tcp_sack_ndsack_blocks(blocks, numblocks, tp->snd_una) > 0)
303 		startblock = 1;
304 	else
305 		startblock = 0;
306 
307 	to->to_flags |= TOF_SACK_REDUNDANT;
308 	for (i = startblock; i < numblocks; i++) {
309 		struct raw_sackblock *newsackblock = &blocks[i];
310 		boolean_t update;
311 		int error;
312 
313 		/* Guard against ACK reordering */
314 		if (SEQ_LEQ(newsackblock->rblk_start, tp->snd_una))
315 			continue;
316 
317 		/* Don't accept bad SACK blocks */
318 		if (SEQ_GT(newsackblock->rblk_end, tp->snd_max)) {
319 			tcpstat.tcps_rcvbadsackopt++;
320 			break;		/* skip all other blocks */
321 		}
322 		tcpstat.tcps_sacksbupdate++;
323 
324 		error = insert_block(scb, newsackblock, &update);
325 		if (update)
326 			to->to_flags &= ~TOF_SACK_REDUNDANT;
327 		if (error)
328 			break;
329 	}
330 }
331 
332 void
333 tcp_sack_update_scoreboard(struct tcpcb *tp, struct tcpopt *to)
334 {
335 	struct scoreboard *scb = &tp->scb;
336 	int rexmt_high_update = 0;
337 
338 	tcp_sack_ack_blocks(scb, tp->snd_una);
339 	tcp_sack_add_blocks(tp, to);
340 	tcp_sack_update_lostseq(scb, tp->snd_una, tp->t_maxseg,
341 	    tp->t_rxtthresh);
342 	if (SEQ_LT(tp->rexmt_high, tp->snd_una)) {
343 		tp->rexmt_high = tp->snd_una;
344 		rexmt_high_update = 1;
345 	}
346 	if (tp->sack_flags & TSACK_F_SACKRESCUED) {
347 		if (SEQ_LEQ(tp->rexmt_rescue, tp->snd_una)) {
348 			tp->sack_flags &= ~TSACK_F_SACKRESCUED;
349 		} else if (tcp_aggressive_rescuesack && rexmt_high_update &&
350 		    SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) {
351 			/* Drag RescueRxt along with HighRxt */
352 			tp->rexmt_rescue = tp->rexmt_high;
353 		}
354 	}
355 }
356 
357 /*
358  * Insert SACK block into sender's scoreboard.
359  */
360 static int
361 insert_block(struct scoreboard *scb, const struct raw_sackblock *raw_sb,
362     boolean_t *update)
363 {
364 	struct sackblock *sb, *workingblock;
365 	boolean_t overlap_front;
366 
367 	*update = TRUE;
368 	if (TAILQ_EMPTY(&scb->sackblocks)) {
369 		struct sackblock *newblock;
370 
371 		KASSERT(scb->nblocks == 0, ("emply scb w/ blocks"));
372 
373 		newblock = alloc_sackblock(scb, raw_sb);
374 		if (newblock == NULL)
375 			return ENOMEM;
376 		TAILQ_INSERT_HEAD(&scb->sackblocks, newblock, sblk_list);
377 		scb->nblocks = 1;
378 		return 0;
379 	}
380 
381 	KASSERT(scb->nblocks > 0, ("insert_block() called w/ no blocks"));
382 	KASSERT(scb->nblocks <= MAXSAVEDBLOCKS,
383 	    ("too many SACK blocks %d", scb->nblocks));
384 
385 	overlap_front = sack_block_lookup(scb, raw_sb->rblk_start, &sb);
386 
387 	if (sb == NULL) {
388 		workingblock = alloc_sackblock_limit(scb, raw_sb);
389 		if (workingblock == NULL)
390 			return ENOMEM;
391 		TAILQ_INSERT_HEAD(&scb->sackblocks, workingblock, sblk_list);
392 		++scb->nblocks;
393 	} else {
394 		if (overlap_front || sb->sblk_end == raw_sb->rblk_start) {
395 			tcpstat.tcps_sacksbreused++;
396 
397 			/* Extend old block */
398 			workingblock = sb;
399 			if (SEQ_GT(raw_sb->rblk_end, sb->sblk_end)) {
400 				sb->sblk_end = raw_sb->rblk_end;
401 			} else {
402 				/* Exact match, nothing to consolidate */
403 				*update = FALSE;
404 				return 0;
405 			}
406 		} else {
407 			workingblock = alloc_sackblock_limit(scb, raw_sb);
408 			if (workingblock == NULL)
409 				return ENOMEM;
410 			TAILQ_INSERT_AFTER(&scb->sackblocks, sb, workingblock,
411 			    sblk_list);
412 			++scb->nblocks;
413 		}
414 	}
415 
416 	/* Consolidate right-hand side. */
417 	sb = TAILQ_NEXT(workingblock, sblk_list);
418 	while (sb != NULL &&
419 	    SEQ_GEQ(workingblock->sblk_end, sb->sblk_end)) {
420 		struct sackblock *nextblock;
421 
422 		nextblock = TAILQ_NEXT(sb, sblk_list);
423 		if (scb->lastfound == sb)
424 			scb->lastfound = NULL;
425 		/* Remove completely overlapped block */
426 		TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
427 		free_sackblock(scb, sb);
428 		--scb->nblocks;
429 		KASSERT(scb->nblocks > 0,
430 		    ("removed overlapped block: %d blocks left", scb->nblocks));
431 		sb = nextblock;
432 	}
433 	if (sb != NULL &&
434 	    SEQ_GEQ(workingblock->sblk_end, sb->sblk_start)) {
435 		/* Extend new block to cover partially overlapped old block. */
436 		workingblock->sblk_end = sb->sblk_end;
437 		if (scb->lastfound == sb)
438 			scb->lastfound = NULL;
439 		TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
440 		free_sackblock(scb, sb);
441 		--scb->nblocks;
442 		KASSERT(scb->nblocks > 0,
443 		    ("removed partial right: %d blocks left", scb->nblocks));
444 	}
445 	return 0;
446 }
447 
448 #ifdef DEBUG_SACK_BLOCKS
449 static void
450 tcp_sack_dump_blocks(struct scoreboard *scb)
451 {
452 	struct sackblock *sb;
453 
454 	kprintf("%d blocks:", scb->nblocks);
455 	TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list)
456 		kprintf(" [%u, %u)", sb->sblk_start, sb->sblk_end);
457 	kprintf("\n");
458 }
459 #else
460 static __inline void
461 tcp_sack_dump_blocks(struct scoreboard *scb)
462 {
463 }
464 #endif
465 
466 /*
467  * Optimization to quickly determine which packets are lost.
468  */
469 void
470 tcp_sack_update_lostseq(struct scoreboard *scb, tcp_seq snd_una, u_int maxseg,
471     int rxtthresh)
472 {
473 	struct sackblock *sb;
474 	int nsackblocks = 0;
475 	int bytes_sacked = 0;
476 	int rxtthresh_bytes;
477 
478 	if (tcp_do_rfc3517bis)
479 		rxtthresh_bytes = (rxtthresh - 1) * maxseg;
480 	else
481 		rxtthresh_bytes = rxtthresh * maxseg;
482 
483 	sb = TAILQ_LAST(&scb->sackblocks, sackblock_list);
484 	while (sb != NULL) {
485 		++nsackblocks;
486 		bytes_sacked += sb->sblk_end - sb->sblk_start;
487 		if (nsackblocks == rxtthresh ||
488 		    bytes_sacked >= rxtthresh_bytes) {
489 			scb->lostseq = sb->sblk_start;
490 			return;
491 		}
492 		sb = TAILQ_PREV(sb, sackblock_list, sblk_list);
493 	}
494 	scb->lostseq = snd_una;
495 }
496 
497 /*
498  * Return whether the given sequence number is considered lost.
499  */
500 boolean_t
501 tcp_sack_islost(struct scoreboard *scb, tcp_seq seqnum)
502 {
503 	return SEQ_LT(seqnum, scb->lostseq);
504 }
505 
506 /*
507  * True if at least "amount" has been SACKed.  Used by Early Retransmit.
508  */
509 boolean_t
510 tcp_sack_has_sacked(struct scoreboard *scb, u_int amount)
511 {
512 	struct sackblock *sb;
513 	int bytes_sacked = 0;
514 
515 	TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) {
516 		bytes_sacked += sb->sblk_end - sb->sblk_start;
517 		if (bytes_sacked >= amount)
518 			return TRUE;
519 	}
520 	return FALSE;
521 }
522 
523 /*
524  * Number of bytes SACKed below seq.
525  */
526 int
527 tcp_sack_bytes_below(struct scoreboard *scb, tcp_seq seq)
528 {
529 	struct sackblock *sb;
530 	int bytes_sacked = 0;
531 
532 	sb = TAILQ_FIRST(&scb->sackblocks);
533 	while (sb && SEQ_GT(seq, sb->sblk_start)) {
534 		bytes_sacked += seq_min(seq, sb->sblk_end) - sb->sblk_start;
535 		sb = TAILQ_NEXT(sb, sblk_list);
536 	}
537 	return bytes_sacked;
538 }
539 
540 /*
541  * Return estimate of the number of bytes outstanding in the network.
542  */
543 uint32_t
544 tcp_sack_compute_pipe(struct tcpcb *tp)
545 {
546 	struct scoreboard *scb = &tp->scb;
547 	struct sackblock *sb;
548 	int nlost, nretransmitted;
549 	tcp_seq end;
550 
551 	nlost = tp->snd_max - scb->lostseq;
552 	nretransmitted = tp->rexmt_high - tp->snd_una;
553 
554 	TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) {
555 		if (SEQ_LT(sb->sblk_start, tp->rexmt_high)) {
556 			end = seq_min(sb->sblk_end, tp->rexmt_high);
557 			nretransmitted -= end - sb->sblk_start;
558 		}
559 		if (SEQ_GEQ(sb->sblk_start, scb->lostseq))
560 			nlost -= sb->sblk_end - sb->sblk_start;
561 	}
562 
563 	return (nlost + nretransmitted);
564 }
565 
566 /*
567  * Return the sequence number and length of the next segment to transmit
568  * when in Fast Recovery.
569  */
570 boolean_t
571 tcp_sack_nextseg(struct tcpcb *tp, tcp_seq *nextrexmt, uint32_t *plen,
572     boolean_t *rescue)
573 {
574 	struct scoreboard *scb = &tp->scb;
575 	struct socket *so = tp->t_inpcb->inp_socket;
576 	struct sackblock *sb;
577 	const struct sackblock *lastblock =
578 	    TAILQ_LAST(&scb->sackblocks, sackblock_list);
579 	tcp_seq torexmt;
580 	long len, off;
581 
582 	/* skip SACKed data */
583 	tcp_sack_skip_sacked(scb, &tp->rexmt_high);
584 
585 	/* Look for lost data. */
586 	torexmt = tp->rexmt_high;
587 	*rescue = FALSE;
588 	if (lastblock != NULL) {
589 		if (SEQ_LT(torexmt, lastblock->sblk_end) &&
590 		    tcp_sack_islost(scb, torexmt)) {
591 sendunsacked:
592 			*nextrexmt = torexmt;
593 			/* If the left-hand edge has been SACKed, pull it in. */
594 			if (sack_block_lookup(scb, torexmt + tp->t_maxseg, &sb))
595 				*plen = sb->sblk_start - torexmt;
596 			else
597 				*plen = tp->t_maxseg;
598 			return TRUE;
599 		}
600 	}
601 
602 	/* See if unsent data available within send window. */
603 	off = tp->snd_max - tp->snd_una;
604 	len = (long) ulmin(so->so_snd.ssb_cc, tp->snd_wnd) - off;
605 	if (len > 0) {
606 		*nextrexmt = tp->snd_max;	/* Send new data. */
607 		*plen = tp->t_maxseg;
608 		return TRUE;
609 	}
610 
611 	/* We're less certain this data has been lost. */
612 	if (lastblock != NULL && SEQ_LT(torexmt, lastblock->sblk_end))
613 		goto sendunsacked;
614 
615 	/* Rescue retransmission */
616 	if (tcp_do_rescuesack || tcp_do_rfc3517bis) {
617 		tcpstat.tcps_sackrescue_try++;
618 		if (tp->sack_flags & TSACK_F_SACKRESCUED) {
619 			if (!tcp_aggressive_rescuesack)
620 				return FALSE;
621 
622 			/*
623 			 * Aggressive variant of the rescue retransmission.
624 			 *
625 			 * The idea of the rescue retransmission is to sustain
626 			 * the ACK clock thus to avoid timeout retransmission.
627 			 *
628 			 * Under some situations, the conservative approach
629 			 * suggested in the draft
630  			 * http://tools.ietf.org/html/
631 			 * draft-nishida-tcpm-rescue-retransmission-00
632 			 * could not sustain ACK clock, since it only allows
633 			 * one rescue retransmission before a cumulative ACK
634 			 * covers the segement transmitted by rescue
635 			 * retransmission.
636 			 *
637 			 * We try to locate the next unSACKed segment which
638 			 * follows the previously sent rescue segment.  If
639 			 * there is no such segment, we loop back to the first
640 			 * unacknowledged segment.
641 			 */
642 
643 			/*
644 			 * Skip SACKed data, but here we follow
645 			 * the last transmitted rescue segment.
646 			 */
647 			torexmt = tp->rexmt_rescue;
648 			tcp_sack_skip_sacked(scb, &torexmt);
649 		}
650 		if (torexmt == tp->snd_max) {
651 			/* Nothing left to retransmit; restart */
652 			torexmt = tp->snd_una;
653 		}
654 		*rescue = TRUE;
655 		goto sendunsacked;
656 	} else if (tcp_do_smartsack && lastblock == NULL) {
657 		tcpstat.tcps_sackrescue_try++;
658 		*rescue = TRUE;
659 		goto sendunsacked;
660 	}
661 
662 	return FALSE;
663 }
664 
665 /*
666  * Return the next sequence number higher than "*prexmt" that has
667  * not been SACKed.
668  */
669 void
670 tcp_sack_skip_sacked(struct scoreboard *scb, tcp_seq *prexmt)
671 {
672 	struct sackblock *sb;
673 
674 	/* skip SACKed data */
675 	if (sack_block_lookup(scb, *prexmt, &sb))
676 		*prexmt = sb->sblk_end;
677 }
678 
679 /*
680  * The length of the first amount of unSACKed data
681  */
682 uint32_t
683 tcp_sack_first_unsacked_len(struct tcpcb *tp)
684 {
685 	struct sackblock *sb;
686 
687 	sb = TAILQ_FIRST(&tp->scb.sackblocks);
688 	if (sb == NULL)
689 		return tp->t_maxseg;
690 
691 	KASSERT(SEQ_LT(tp->snd_una, sb->sblk_start),
692 	    ("invalid sb start %u, snd_una %u",
693 	     sb->sblk_start, tp->snd_una));
694 	return (sb->sblk_start - tp->snd_una);
695 }
696 
697 #ifdef later
698 void
699 tcp_sack_save_scoreboard(struct scoreboard *scb)
700 {
701 	struct scoreboard *scb = &tp->scb;
702 
703 	scb->sackblocks_prev = scb->sackblocks;
704 	TAILQ_INIT(&scb->sackblocks);
705 }
706 
707 void
708 tcp_sack_revert_scoreboard(struct scoreboard *scb, tcp_seq snd_una,
709 			   u_int maxseg)
710 {
711 	struct sackblock *sb;
712 
713 	scb->sackblocks = scb->sackblocks_prev;
714 	scb->nblocks = 0;
715 	TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list)
716 		++scb->nblocks;
717 	tcp_sack_ack_blocks(scb, snd_una);
718 	scb->lastfound = NULL;
719 }
720 #endif
721 
722 #ifdef DEBUG_SACK_HISTORY
723 static void
724 tcp_sack_dump_history(char *msg, struct tcpcb *tp)
725 {
726 	int i;
727 	static int ndumped;
728 
729 	/* only need a couple of these to debug most problems */
730 	if (++ndumped > 900)
731 		return;
732 
733 	kprintf("%s:\tnsackhistory %d: ", msg, tp->nsackhistory);
734 	for (i = 0; i < tp->nsackhistory; ++i)
735 		kprintf("[%u, %u) ", tp->sackhistory[i].rblk_start,
736 		    tp->sackhistory[i].rblk_end);
737 	kprintf("\n");
738 }
739 #else
740 static __inline void
741 tcp_sack_dump_history(char *msg, struct tcpcb *tp)
742 {
743 }
744 #endif
745 
746 /*
747  * Remove old SACK blocks from the SACK history that have already been ACKed.
748  */
749 static void
750 tcp_sack_ack_history(struct tcpcb *tp)
751 {
752 	int i, nblocks, openslot;
753 
754 	tcp_sack_dump_history("before tcp_sack_ack_history", tp);
755 	nblocks = tp->nsackhistory;
756 	for (i = openslot = 0; i < nblocks; ++i) {
757 		if (SEQ_LEQ(tp->sackhistory[i].rblk_end, tp->rcv_nxt)) {
758 			--tp->nsackhistory;
759 			continue;
760 		}
761 		if (SEQ_LT(tp->sackhistory[i].rblk_start, tp->rcv_nxt))
762 			tp->sackhistory[i].rblk_start = tp->rcv_nxt;
763 		if (i == openslot)
764 			++openslot;
765 		else
766 			tp->sackhistory[openslot++] = tp->sackhistory[i];
767 	}
768 	tcp_sack_dump_history("after tcp_sack_ack_history", tp);
769 	KASSERT(openslot == tp->nsackhistory,
770 	    ("tcp_sack_ack_history miscounted: %d != %d",
771 	    openslot, tp->nsackhistory));
772 }
773 
774 /*
775  * Add or merge newblock into reported history.
776  * Also remove or update SACK blocks that will be acked.
777  */
778 static void
779 tcp_sack_update_reported_history(struct tcpcb *tp, tcp_seq start, tcp_seq end)
780 {
781 	struct raw_sackblock copy[MAX_SACK_REPORT_BLOCKS];
782 	int i, cindex;
783 
784 	tcp_sack_dump_history("before tcp_sack_update_reported_history", tp);
785 	/*
786 	 * Six cases:
787 	 *	0) no overlap
788 	 *	1) newblock == oldblock
789 	 *	2) oldblock contains newblock
790 	 *	3) newblock contains oldblock
791 	 *	4) tail of oldblock overlaps or abuts start of newblock
792 	 *	5) tail of newblock overlaps or abuts head of oldblock
793 	 */
794 	for (i = cindex = 0; i < tp->nsackhistory; ++i) {
795 		struct raw_sackblock *oldblock = &tp->sackhistory[i];
796 		tcp_seq old_start = oldblock->rblk_start;
797 		tcp_seq old_end = oldblock->rblk_end;
798 
799 		if (SEQ_LT(end, old_start) || SEQ_GT(start, old_end)) {
800 			/* Case 0:  no overlap.  Copy old block. */
801 			copy[cindex++] = *oldblock;
802 			continue;
803 		}
804 
805 		if (SEQ_GEQ(start, old_start) && SEQ_LEQ(end, old_end)) {
806 			/* Cases 1 & 2.  Move block to front of history. */
807 			int j;
808 
809 			start = old_start;
810 			end = old_end;
811 			/* no need to check rest of blocks */
812 			for (j = i + 1; j < tp->nsackhistory; ++j)
813 				copy[cindex++] = tp->sackhistory[j];
814 			break;
815 		}
816 
817 		if (SEQ_GEQ(old_end, start) && SEQ_LT(old_start, start)) {
818 			/* Case 4:  extend start of new block. */
819 			start = old_start;
820 		} else if (SEQ_GEQ(end, old_start) && SEQ_GT(old_end, end)) {
821 			/* Case 5: extend end of new block */
822 			end = old_end;
823 		} else {
824 			/* Case 3.  Delete old block by not copying it. */
825 			KASSERT(SEQ_LEQ(start, old_start) &&
826 				SEQ_GEQ(end, old_end),
827 			    ("bad logic: old [%u, %u), new [%u, %u)",
828 			     old_start, old_end, start, end));
829 		}
830 	}
831 
832 	/* insert new block */
833 	tp->sackhistory[0].rblk_start = start;
834 	tp->sackhistory[0].rblk_end = end;
835 	cindex = min(cindex, MAX_SACK_REPORT_BLOCKS - 1);
836 	for (i = 0; i < cindex; ++i)
837 		tp->sackhistory[i + 1] = copy[i];
838 	tp->nsackhistory = cindex + 1;
839 	tcp_sack_dump_history("after tcp_sack_update_reported_history", tp);
840 }
841 
842 /*
843  * Fill in SACK report to return to data sender.
844  */
845 void
846 tcp_sack_fill_report(struct tcpcb *tp, u_char *opt, u_int *plen)
847 {
848 	u_int optlen = *plen;
849 	uint32_t *lp = (uint32_t *)(opt + optlen);
850 	uint32_t *olp;
851 	tcp_seq hstart = tp->rcv_nxt, hend;
852 	int nblocks;
853 
854 	KASSERT(TCP_MAXOLEN - optlen >=
855 	    TCPOLEN_SACK_ALIGNED + TCPOLEN_SACK_BLOCK,
856 	    ("no room for SACK header and one block: optlen %d", optlen));
857 
858 	if (tp->sack_flags & TSACK_F_DUPSEG)
859 		tcpstat.tcps_snddsackopt++;
860 	else
861 		tcpstat.tcps_sndsackopt++;
862 
863 	olp = lp++;
864 	optlen += TCPOLEN_SACK_ALIGNED;
865 
866 	tcp_sack_ack_history(tp);
867 	if (tp->reportblk.rblk_start != tp->reportblk.rblk_end) {
868 		*lp++ = htonl(tp->reportblk.rblk_start);
869 		*lp++ = htonl(tp->reportblk.rblk_end);
870 		optlen += TCPOLEN_SACK_BLOCK;
871 		hstart = tp->reportblk.rblk_start;
872 		hend = tp->reportblk.rblk_end;
873 		if (tp->sack_flags & TSACK_F_ENCLOSESEG) {
874 			KASSERT(TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK,
875 			    ("no room for enclosing SACK block: oplen %d",
876 			    optlen));
877 			*lp++ = htonl(tp->encloseblk.rblk_start);
878 			*lp++ = htonl(tp->encloseblk.rblk_end);
879 			optlen += TCPOLEN_SACK_BLOCK;
880 			hstart = tp->encloseblk.rblk_start;
881 			hend = tp->encloseblk.rblk_end;
882 		}
883 		if (SEQ_GT(hstart, tp->rcv_nxt))
884 			tcp_sack_update_reported_history(tp, hstart, hend);
885 	}
886 	if (tcp_do_smartsack && (tp->sack_flags & TSACK_F_SACKLEFT)) {
887 		/* Fill in from left!  Walk re-assembly queue. */
888 		struct tseg_qent *q;
889 
890 		q = TAILQ_FIRST(&tp->t_segq);
891 		while (q != NULL &&
892 		    TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) {
893 			*lp++ = htonl(q->tqe_th->th_seq);
894 			*lp++ = htonl(TCP_SACK_BLKEND(
895 			    q->tqe_th->th_seq + q->tqe_len,
896 			    q->tqe_th->th_flags));
897 			optlen += TCPOLEN_SACK_BLOCK;
898 			q = TAILQ_NEXT(q, tqe_q);
899 		}
900 	} else {
901 		int n = 0;
902 
903 		/* Fill in SACK blocks from right side. */
904 		while (n < tp->nsackhistory &&
905 		    TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) {
906 			if (tp->sackhistory[n].rblk_start != hstart) {
907 				*lp++ = htonl(tp->sackhistory[n].rblk_start);
908 				*lp++ = htonl(tp->sackhistory[n].rblk_end);
909 				optlen += TCPOLEN_SACK_BLOCK;
910 			}
911 			++n;
912 		}
913 	}
914 	tp->reportblk.rblk_start = tp->reportblk.rblk_end;
915 	tp->sack_flags &=
916 	    ~(TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG | TSACK_F_SACKLEFT);
917 	nblocks = (lp - olp - 1) / 2;
918 	*olp = htonl(TCPOPT_SACK_ALIGNED |
919 		     (TCPOLEN_SACK + nblocks * TCPOLEN_SACK_BLOCK));
920 	*plen = optlen;
921 }
922