xref: /dflybsd-src/sys/netinet/tcp_sack.c (revision 798bebecb96354df59a078da163e6857c3f1cea5)
1 /*
2  * Copyright (c) 2003, 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2003, 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $DragonFly: src/sys/netinet/tcp_sack.c,v 1.8 2008/08/15 21:37:16 nth Exp $
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/thread.h>
42 #include <sys/types.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 
46 #include <net/if.h>
47 
48 #include <netinet/in.h>
49 #include <netinet/in_systm.h>
50 #include <netinet/ip.h>
51 #include <netinet/in_var.h>
52 #include <netinet/in_pcb.h>
53 #include <netinet/ip_var.h>
54 #include <netinet/tcp.h>
55 #include <netinet/tcp_seq.h>
56 #include <netinet/tcp_var.h>
57 
58 /*
59  * Implemented:
60  *
61  * RFC 2018
62  * RFC 2883
63  * RFC 3517
64  */
65 
66 struct sackblock {
67 	tcp_seq			sblk_start;
68 	tcp_seq			sblk_end;
69 	TAILQ_ENTRY(sackblock)	sblk_list;
70 };
71 
72 #define	MAXSAVEDBLOCKS	8			/* per connection limit */
73 
74 static int insert_block(struct scoreboard *scb,
75 			const struct raw_sackblock *raw_sb, boolean_t *update);
76 
77 static MALLOC_DEFINE(M_SACKBLOCK, "sblk", "sackblock struct");
78 
79 /*
80  * Per-tcpcb initialization.
81  */
82 void
83 tcp_sack_tcpcb_init(struct tcpcb *tp)
84 {
85 	struct scoreboard *scb = &tp->scb;
86 
87 	scb->nblocks = 0;
88 	TAILQ_INIT(&scb->sackblocks);
89 	scb->lastfound = NULL;
90 }
91 
92 /*
93  * Find the SACK block containing or immediately preceding "seq".
94  * The boolean result indicates whether the sequence is actually
95  * contained in the SACK block.
96  */
97 static boolean_t
98 sack_block_lookup(struct scoreboard *scb, tcp_seq seq, struct sackblock **sb)
99 {
100 	struct sackblock *hint = scb->lastfound;
101 	struct sackblock *cur, *last, *prev;
102 
103 	if (TAILQ_EMPTY(&scb->sackblocks)) {
104 		*sb = NULL;
105 		return FALSE;
106 	}
107 
108 	if (hint == NULL) {
109 		/* No hint.  Search from start to end. */
110 		cur = TAILQ_FIRST(&scb->sackblocks);
111 		last = NULL;
112 		prev = TAILQ_LAST(&scb->sackblocks, sackblock_list);
113 	} else  {
114 		if (SEQ_GEQ(seq, hint->sblk_start)) {
115 			/* Search from hint to end of list. */
116 			cur = hint;
117 			last = NULL;
118 			prev = TAILQ_LAST(&scb->sackblocks, sackblock_list);
119 		} else {
120 			/* Search from front of list to hint. */
121 			cur = TAILQ_FIRST(&scb->sackblocks);
122 			last = hint;
123 			prev = TAILQ_PREV(hint, sackblock_list, sblk_list);
124 		}
125 	}
126 
127 	do {
128 		if (SEQ_GT(cur->sblk_end, seq)) {
129 			if (SEQ_GEQ(seq, cur->sblk_start)) {
130 				*sb = scb->lastfound = cur;
131 				return TRUE;
132 			} else {
133 				*sb = scb->lastfound =
134 				    TAILQ_PREV(cur, sackblock_list, sblk_list);
135 				return FALSE;
136 			}
137 		}
138 		cur = TAILQ_NEXT(cur, sblk_list);
139 	} while (cur != last);
140 
141 	*sb = scb->lastfound = prev;
142 	return FALSE;
143 }
144 
145 /*
146  * Allocate a SACK block.
147  */
148 static __inline struct sackblock *
149 alloc_sackblock(struct scoreboard *scb, const struct raw_sackblock *raw_sb)
150 {
151 	struct sackblock *sb;
152 
153 	if (scb->freecache != NULL) {
154 		sb = scb->freecache;
155 		scb->freecache = NULL;
156 		tcpstat.tcps_sacksbfast++;
157 	} else {
158 		sb = kmalloc(sizeof(struct sackblock), M_SACKBLOCK, M_NOWAIT);
159 		if (sb == NULL) {
160 			tcpstat.tcps_sacksbfailed++;
161 			return NULL;
162 		}
163 	}
164 	sb->sblk_start = raw_sb->rblk_start;
165 	sb->sblk_end = raw_sb->rblk_end;
166 	return sb;
167 }
168 
169 static __inline struct sackblock *
170 alloc_sackblock_limit(struct scoreboard *scb,
171     const struct raw_sackblock *raw_sb)
172 {
173 	if (scb->nblocks == MAXSAVEDBLOCKS) {
174 		/*
175 		 * Should try to kick out older blocks XXX JH
176 		 * May be able to coalesce with existing block.
177 		 * Or, go other way and free all blocks if we hit
178 		 * this limit.
179 		 */
180 		tcpstat.tcps_sacksboverflow++;
181 		return NULL;
182 	}
183 	return alloc_sackblock(scb, raw_sb);
184 }
185 
186 /*
187  * Free a SACK block.
188  */
189 static __inline void
190 free_sackblock(struct scoreboard *scb, struct sackblock *s)
191 {
192 	if (scb->freecache == NULL) {
193 		/* YYY Maybe use the latest freed block? */
194 		scb->freecache = s;
195 		return;
196 	}
197 	kfree(s, M_SACKBLOCK);
198 }
199 
200 /*
201  * Free up SACK blocks for data that's been acked.
202  */
203 static void
204 tcp_sack_ack_blocks(struct scoreboard *scb, tcp_seq th_ack)
205 {
206 	struct sackblock *sb, *nb;
207 
208 	sb = TAILQ_FIRST(&scb->sackblocks);
209 	while (sb && SEQ_LEQ(sb->sblk_end, th_ack)) {
210 		nb = TAILQ_NEXT(sb, sblk_list);
211 		if (scb->lastfound == sb)
212 			scb->lastfound = NULL;
213 		TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
214 		free_sackblock(scb, sb);
215 		--scb->nblocks;
216 		KASSERT(scb->nblocks >= 0,
217 		    ("SACK block count underflow: %d < 0", scb->nblocks));
218 		sb = nb;
219 	}
220 	if (sb && SEQ_GEQ(th_ack, sb->sblk_start)) {
221 		/* Other side reneged? XXX */
222 		tcpstat.tcps_sackrenege++;
223 		tcp_sack_cleanup(scb);
224 	}
225 }
226 
227 /*
228  * Delete and free SACK blocks saved in scoreboard.
229  */
230 void
231 tcp_sack_cleanup(struct scoreboard *scb)
232 {
233 	struct sackblock *sb, *nb;
234 
235 	TAILQ_FOREACH_MUTABLE(sb, &scb->sackblocks, sblk_list, nb) {
236 		free_sackblock(scb, sb);
237 		--scb->nblocks;
238 	}
239 	KASSERT(scb->nblocks == 0,
240 	    ("SACK block %d count not zero", scb->nblocks));
241 	TAILQ_INIT(&scb->sackblocks);
242 	scb->lastfound = NULL;
243 }
244 
245 /*
246  * Delete and free SACK blocks saved in scoreboard.
247  * Delete the one slot block cache.
248  */
249 void
250 tcp_sack_destroy(struct scoreboard *scb)
251 {
252 	tcp_sack_cleanup(scb);
253 	if (scb->freecache != NULL) {
254 		kfree(scb->freecache, M_SACKBLOCK);
255 		scb->freecache = NULL;
256 	}
257 }
258 
259 /*
260  * Cleanup the reported SACK block information
261  */
262 void
263 tcp_sack_report_cleanup(struct tcpcb *tp)
264 {
265 	tp->sack_flags &=
266 	    ~(TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG | TSACK_F_SACKLEFT);
267 	tp->reportblk.rblk_start = tp->reportblk.rblk_end;
268 }
269 
270 /*
271  * Returns	0 if not D-SACK block,
272  *		1 if D-SACK,
273  *		2 if duplicate of out-of-order D-SACK block.
274  */
275 int
276 tcp_sack_ndsack_blocks(const struct raw_sackblock *blocks, const int numblocks,
277     tcp_seq snd_una)
278 {
279 	if (numblocks == 0)
280 		return 0;
281 
282 	if (SEQ_LT(blocks[0].rblk_start, snd_una))
283 		return 1;
284 
285 	/* block 0 inside block 1 */
286 	if (numblocks > 1 &&
287 	    SEQ_GEQ(blocks[0].rblk_start, blocks[1].rblk_start) &&
288 	    SEQ_LEQ(blocks[0].rblk_end, blocks[1].rblk_end))
289 		return 2;
290 
291 	return 0;
292 }
293 
294 /*
295  * Update scoreboard on new incoming ACK.
296  */
297 static void
298 tcp_sack_add_blocks(struct tcpcb *tp, struct tcpopt *to)
299 {
300 	const int numblocks = to->to_nsackblocks;
301 	struct raw_sackblock *blocks = to->to_sackblocks;
302 	struct scoreboard *scb = &tp->scb;
303 	int startblock, i;
304 
305 	if (tcp_sack_ndsack_blocks(blocks, numblocks, tp->snd_una) > 0)
306 		startblock = 1;
307 	else
308 		startblock = 0;
309 
310 	to->to_flags |= TOF_SACK_REDUNDANT;
311 	for (i = startblock; i < numblocks; i++) {
312 		struct raw_sackblock *newsackblock = &blocks[i];
313 		boolean_t update;
314 		int error;
315 
316 		/* Guard against ACK reordering */
317 		if (SEQ_LEQ(newsackblock->rblk_start, tp->snd_una))
318 			continue;
319 
320 		/* Don't accept bad SACK blocks */
321 		if (SEQ_GT(newsackblock->rblk_end, tp->snd_max)) {
322 			tcpstat.tcps_rcvbadsackopt++;
323 			break;		/* skip all other blocks */
324 		}
325 		tcpstat.tcps_sacksbupdate++;
326 
327 		error = insert_block(scb, newsackblock, &update);
328 		if (update)
329 			to->to_flags &= ~TOF_SACK_REDUNDANT;
330 		if (error)
331 			break;
332 	}
333 }
334 
335 void
336 tcp_sack_update_scoreboard(struct tcpcb *tp, struct tcpopt *to)
337 {
338 	struct scoreboard *scb = &tp->scb;
339 	int rexmt_high_update = 0;
340 
341 	tcp_sack_ack_blocks(scb, tp->snd_una);
342 	tcp_sack_add_blocks(tp, to);
343 	tcp_sack_update_lostseq(scb, tp->snd_una, tp->t_maxseg,
344 	    tp->t_rxtthresh);
345 	if (SEQ_LT(tp->rexmt_high, tp->snd_una)) {
346 		tp->rexmt_high = tp->snd_una;
347 		rexmt_high_update = 1;
348 	}
349 	if (tp->sack_flags & TSACK_F_SACKRESCUED) {
350 		if (SEQ_LEQ(tp->rexmt_rescue, tp->snd_una)) {
351 			tp->sack_flags &= ~TSACK_F_SACKRESCUED;
352 		} else if (tcp_aggressive_rescuesack && rexmt_high_update &&
353 		    SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) {
354 			/* Drag RescueRxt along with HighRxt */
355 			tp->rexmt_rescue = tp->rexmt_high;
356 		}
357 	}
358 }
359 
360 /*
361  * Insert SACK block into sender's scoreboard.
362  */
363 static int
364 insert_block(struct scoreboard *scb, const struct raw_sackblock *raw_sb,
365     boolean_t *update)
366 {
367 	struct sackblock *sb, *workingblock;
368 	boolean_t overlap_front;
369 
370 	*update = TRUE;
371 	if (TAILQ_EMPTY(&scb->sackblocks)) {
372 		struct sackblock *newblock;
373 
374 		KASSERT(scb->nblocks == 0, ("emply scb w/ blocks"));
375 
376 		newblock = alloc_sackblock(scb, raw_sb);
377 		if (newblock == NULL)
378 			return ENOMEM;
379 		TAILQ_INSERT_HEAD(&scb->sackblocks, newblock, sblk_list);
380 		scb->nblocks = 1;
381 		return 0;
382 	}
383 
384 	KASSERT(scb->nblocks > 0, ("insert_block() called w/ no blocks"));
385 	KASSERT(scb->nblocks <= MAXSAVEDBLOCKS,
386 	    ("too many SACK blocks %d", scb->nblocks));
387 
388 	overlap_front = sack_block_lookup(scb, raw_sb->rblk_start, &sb);
389 
390 	if (sb == NULL) {
391 		workingblock = alloc_sackblock_limit(scb, raw_sb);
392 		if (workingblock == NULL)
393 			return ENOMEM;
394 		TAILQ_INSERT_HEAD(&scb->sackblocks, workingblock, sblk_list);
395 		++scb->nblocks;
396 	} else {
397 		if (overlap_front || sb->sblk_end == raw_sb->rblk_start) {
398 			tcpstat.tcps_sacksbreused++;
399 
400 			/* Extend old block */
401 			workingblock = sb;
402 			if (SEQ_GT(raw_sb->rblk_end, sb->sblk_end)) {
403 				sb->sblk_end = raw_sb->rblk_end;
404 			} else {
405 				/* Exact match, nothing to consolidate */
406 				*update = FALSE;
407 				return 0;
408 			}
409 		} else {
410 			workingblock = alloc_sackblock_limit(scb, raw_sb);
411 			if (workingblock == NULL)
412 				return ENOMEM;
413 			TAILQ_INSERT_AFTER(&scb->sackblocks, sb, workingblock,
414 			    sblk_list);
415 			++scb->nblocks;
416 		}
417 	}
418 
419 	/* Consolidate right-hand side. */
420 	sb = TAILQ_NEXT(workingblock, sblk_list);
421 	while (sb != NULL &&
422 	    SEQ_GEQ(workingblock->sblk_end, sb->sblk_end)) {
423 		struct sackblock *nextblock;
424 
425 		nextblock = TAILQ_NEXT(sb, sblk_list);
426 		if (scb->lastfound == sb)
427 			scb->lastfound = NULL;
428 		/* Remove completely overlapped block */
429 		TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
430 		free_sackblock(scb, sb);
431 		--scb->nblocks;
432 		KASSERT(scb->nblocks > 0,
433 		    ("removed overlapped block: %d blocks left", scb->nblocks));
434 		sb = nextblock;
435 	}
436 	if (sb != NULL &&
437 	    SEQ_GEQ(workingblock->sblk_end, sb->sblk_start)) {
438 		/* Extend new block to cover partially overlapped old block. */
439 		workingblock->sblk_end = sb->sblk_end;
440 		if (scb->lastfound == sb)
441 			scb->lastfound = NULL;
442 		TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
443 		free_sackblock(scb, sb);
444 		--scb->nblocks;
445 		KASSERT(scb->nblocks > 0,
446 		    ("removed partial right: %d blocks left", scb->nblocks));
447 	}
448 	return 0;
449 }
450 
451 #ifdef DEBUG_SACK_BLOCKS
452 static void
453 tcp_sack_dump_blocks(const struct scoreboard *scb)
454 {
455 	const struct sackblock *sb;
456 
457 	kprintf("%d blocks:", scb->nblocks);
458 	TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list)
459 		kprintf(" [%u, %u)", sb->sblk_start, sb->sblk_end);
460 	kprintf("\n");
461 }
462 #else
463 static __inline void
464 tcp_sack_dump_blocks(const struct scoreboard *scb)
465 {
466 }
467 #endif
468 
469 /*
470  * Optimization to quickly determine which packets are lost.
471  */
472 void
473 tcp_sack_update_lostseq(struct scoreboard *scb, tcp_seq snd_una, u_int maxseg,
474     int rxtthresh)
475 {
476 	struct sackblock *sb;
477 	int nsackblocks = 0;
478 	int bytes_sacked = 0;
479 	int rxtthresh_bytes;
480 
481 	if (tcp_do_rfc3517bis)
482 		rxtthresh_bytes = (rxtthresh - 1) * maxseg;
483 	else
484 		rxtthresh_bytes = rxtthresh * maxseg;
485 
486 	sb = TAILQ_LAST(&scb->sackblocks, sackblock_list);
487 	while (sb != NULL) {
488 		++nsackblocks;
489 		bytes_sacked += sb->sblk_end - sb->sblk_start;
490 		if (nsackblocks == rxtthresh ||
491 		    bytes_sacked >= rxtthresh_bytes) {
492 			scb->lostseq = sb->sblk_start;
493 			return;
494 		}
495 		sb = TAILQ_PREV(sb, sackblock_list, sblk_list);
496 	}
497 	scb->lostseq = snd_una;
498 }
499 
500 /*
501  * Return whether the given sequence number is considered lost.
502  */
503 boolean_t
504 tcp_sack_islost(const struct scoreboard *scb, tcp_seq seqnum)
505 {
506 	return SEQ_LT(seqnum, scb->lostseq);
507 }
508 
509 /*
510  * True if at least "amount" has been SACKed.  Used by Early Retransmit.
511  */
512 boolean_t
513 tcp_sack_has_sacked(const struct scoreboard *scb, u_int amount)
514 {
515 	const struct sackblock *sb;
516 	int bytes_sacked = 0;
517 
518 	TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) {
519 		bytes_sacked += sb->sblk_end - sb->sblk_start;
520 		if (bytes_sacked >= amount)
521 			return TRUE;
522 	}
523 	return FALSE;
524 }
525 
526 /*
527  * Number of bytes SACKed below seq.
528  */
529 int
530 tcp_sack_bytes_below(const struct scoreboard *scb, tcp_seq seq)
531 {
532 	const struct sackblock *sb;
533 	int bytes_sacked = 0;
534 
535 	sb = TAILQ_FIRST(&scb->sackblocks);
536 	while (sb && SEQ_GT(seq, sb->sblk_start)) {
537 		bytes_sacked += seq_min(seq, sb->sblk_end) - sb->sblk_start;
538 		sb = TAILQ_NEXT(sb, sblk_list);
539 	}
540 	return bytes_sacked;
541 }
542 
543 /*
544  * Return estimate of the number of bytes outstanding in the network.
545  */
546 uint32_t
547 tcp_sack_compute_pipe(const struct tcpcb *tp)
548 {
549 	const struct scoreboard *scb = &tp->scb;
550 	const struct sackblock *sb;
551 	int nlost, nretransmitted;
552 	tcp_seq end;
553 
554 	nlost = tp->snd_max - scb->lostseq;
555 	nretransmitted = tp->rexmt_high - tp->snd_una;
556 
557 	TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) {
558 		if (SEQ_LT(sb->sblk_start, tp->rexmt_high)) {
559 			end = seq_min(sb->sblk_end, tp->rexmt_high);
560 			nretransmitted -= end - sb->sblk_start;
561 		}
562 		if (SEQ_GEQ(sb->sblk_start, scb->lostseq))
563 			nlost -= sb->sblk_end - sb->sblk_start;
564 	}
565 
566 	return (nlost + nretransmitted);
567 }
568 
569 /*
570  * Return the sequence number and length of the next segment to transmit
571  * when in Fast Recovery.
572  */
573 boolean_t
574 tcp_sack_nextseg(struct tcpcb *tp, tcp_seq *nextrexmt, uint32_t *plen,
575     boolean_t *rescue)
576 {
577 	struct scoreboard *scb = &tp->scb;
578 	struct socket *so = tp->t_inpcb->inp_socket;
579 	struct sackblock *sb;
580 	const struct sackblock *lastblock =
581 	    TAILQ_LAST(&scb->sackblocks, sackblock_list);
582 	tcp_seq torexmt;
583 	long len, off;
584 
585 	/* skip SACKed data */
586 	tcp_sack_skip_sacked(scb, &tp->rexmt_high);
587 
588 	/* Look for lost data. */
589 	torexmt = tp->rexmt_high;
590 	*rescue = FALSE;
591 	if (lastblock != NULL) {
592 		if (SEQ_LT(torexmt, lastblock->sblk_end) &&
593 		    tcp_sack_islost(scb, torexmt)) {
594 sendunsacked:
595 			*nextrexmt = torexmt;
596 			/* If the left-hand edge has been SACKed, pull it in. */
597 			if (sack_block_lookup(scb, torexmt + tp->t_maxseg, &sb))
598 				*plen = sb->sblk_start - torexmt;
599 			else
600 				*plen = tp->t_maxseg;
601 			return TRUE;
602 		}
603 	}
604 
605 	/* See if unsent data available within send window. */
606 	off = tp->snd_max - tp->snd_una;
607 	len = (long) ulmin(so->so_snd.ssb_cc, tp->snd_wnd) - off;
608 	if (len > 0) {
609 		*nextrexmt = tp->snd_max;	/* Send new data. */
610 		*plen = tp->t_maxseg;
611 		return TRUE;
612 	}
613 
614 	/* We're less certain this data has been lost. */
615 	if (lastblock != NULL && SEQ_LT(torexmt, lastblock->sblk_end))
616 		goto sendunsacked;
617 
618 	/* Rescue retransmission */
619 	if (tcp_do_rescuesack || tcp_do_rfc3517bis) {
620 		tcpstat.tcps_sackrescue_try++;
621 		if (tp->sack_flags & TSACK_F_SACKRESCUED) {
622 			if (!tcp_aggressive_rescuesack)
623 				return FALSE;
624 
625 			/*
626 			 * Aggressive variant of the rescue retransmission.
627 			 *
628 			 * The idea of the rescue retransmission is to sustain
629 			 * the ACK clock thus to avoid timeout retransmission.
630 			 *
631 			 * Under some situations, the conservative approach
632 			 * suggested in the draft
633  			 * http://tools.ietf.org/html/
634 			 * draft-nishida-tcpm-rescue-retransmission-00
635 			 * could not sustain ACK clock, since it only allows
636 			 * one rescue retransmission before a cumulative ACK
637 			 * covers the segement transmitted by rescue
638 			 * retransmission.
639 			 *
640 			 * We try to locate the next unSACKed segment which
641 			 * follows the previously sent rescue segment.  If
642 			 * there is no such segment, we loop back to the first
643 			 * unacknowledged segment.
644 			 */
645 
646 			/*
647 			 * Skip SACKed data, but here we follow
648 			 * the last transmitted rescue segment.
649 			 */
650 			torexmt = tp->rexmt_rescue;
651 			tcp_sack_skip_sacked(scb, &torexmt);
652 		}
653 		if (torexmt == tp->snd_max) {
654 			/* Nothing left to retransmit; restart */
655 			torexmt = tp->snd_una;
656 		}
657 		*rescue = TRUE;
658 		goto sendunsacked;
659 	} else if (tcp_do_smartsack && lastblock == NULL) {
660 		tcpstat.tcps_sackrescue_try++;
661 		*rescue = TRUE;
662 		goto sendunsacked;
663 	}
664 
665 	return FALSE;
666 }
667 
668 /*
669  * Return the next sequence number higher than "*prexmt" that has
670  * not been SACKed.
671  */
672 void
673 tcp_sack_skip_sacked(struct scoreboard *scb, tcp_seq *prexmt)
674 {
675 	struct sackblock *sb;
676 
677 	/* skip SACKed data */
678 	if (sack_block_lookup(scb, *prexmt, &sb))
679 		*prexmt = sb->sblk_end;
680 }
681 
682 /*
683  * The length of the first amount of unSACKed data
684  */
685 uint32_t
686 tcp_sack_first_unsacked_len(const struct tcpcb *tp)
687 {
688 	const struct sackblock *sb;
689 
690 	sb = TAILQ_FIRST(&tp->scb.sackblocks);
691 	if (sb == NULL)
692 		return tp->t_maxseg;
693 
694 	KASSERT(SEQ_LT(tp->snd_una, sb->sblk_start),
695 	    ("invalid sb start %u, snd_una %u",
696 	     sb->sblk_start, tp->snd_una));
697 	return (sb->sblk_start - tp->snd_una);
698 }
699 
700 #ifdef later
701 void
702 tcp_sack_save_scoreboard(struct scoreboard *scb)
703 {
704 	struct scoreboard *scb = &tp->scb;
705 
706 	scb->sackblocks_prev = scb->sackblocks;
707 	TAILQ_INIT(&scb->sackblocks);
708 }
709 
710 void
711 tcp_sack_revert_scoreboard(struct scoreboard *scb, tcp_seq snd_una,
712 			   u_int maxseg)
713 {
714 	struct sackblock *sb;
715 
716 	scb->sackblocks = scb->sackblocks_prev;
717 	scb->nblocks = 0;
718 	TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list)
719 		++scb->nblocks;
720 	tcp_sack_ack_blocks(scb, snd_una);
721 	scb->lastfound = NULL;
722 }
723 #endif
724 
725 #ifdef DEBUG_SACK_HISTORY
726 static void
727 tcp_sack_dump_history(const char *msg, const struct tcpcb *tp)
728 {
729 	int i;
730 	static int ndumped;
731 
732 	/* only need a couple of these to debug most problems */
733 	if (++ndumped > 900)
734 		return;
735 
736 	kprintf("%s:\tnsackhistory %d: ", msg, tp->nsackhistory);
737 	for (i = 0; i < tp->nsackhistory; ++i)
738 		kprintf("[%u, %u) ", tp->sackhistory[i].rblk_start,
739 		    tp->sackhistory[i].rblk_end);
740 	kprintf("\n");
741 }
742 #else
743 static __inline void
744 tcp_sack_dump_history(const char *msg, const struct tcpcb *tp)
745 {
746 }
747 #endif
748 
749 /*
750  * Remove old SACK blocks from the SACK history that have already been ACKed.
751  */
752 static void
753 tcp_sack_ack_history(struct tcpcb *tp)
754 {
755 	int i, nblocks, openslot;
756 
757 	tcp_sack_dump_history("before tcp_sack_ack_history", tp);
758 	nblocks = tp->nsackhistory;
759 	for (i = openslot = 0; i < nblocks; ++i) {
760 		if (SEQ_LEQ(tp->sackhistory[i].rblk_end, tp->rcv_nxt)) {
761 			--tp->nsackhistory;
762 			continue;
763 		}
764 		if (SEQ_LT(tp->sackhistory[i].rblk_start, tp->rcv_nxt))
765 			tp->sackhistory[i].rblk_start = tp->rcv_nxt;
766 		if (i == openslot)
767 			++openslot;
768 		else
769 			tp->sackhistory[openslot++] = tp->sackhistory[i];
770 	}
771 	tcp_sack_dump_history("after tcp_sack_ack_history", tp);
772 	KASSERT(openslot == tp->nsackhistory,
773 	    ("tcp_sack_ack_history miscounted: %d != %d",
774 	    openslot, tp->nsackhistory));
775 }
776 
777 /*
778  * Add or merge newblock into reported history.
779  * Also remove or update SACK blocks that will be acked.
780  */
781 static void
782 tcp_sack_update_reported_history(struct tcpcb *tp, tcp_seq start, tcp_seq end)
783 {
784 	struct raw_sackblock copy[MAX_SACK_REPORT_BLOCKS];
785 	int i, cindex;
786 
787 	tcp_sack_dump_history("before tcp_sack_update_reported_history", tp);
788 	/*
789 	 * Six cases:
790 	 *	0) no overlap
791 	 *	1) newblock == oldblock
792 	 *	2) oldblock contains newblock
793 	 *	3) newblock contains oldblock
794 	 *	4) tail of oldblock overlaps or abuts start of newblock
795 	 *	5) tail of newblock overlaps or abuts head of oldblock
796 	 */
797 	for (i = cindex = 0; i < tp->nsackhistory; ++i) {
798 		struct raw_sackblock *oldblock = &tp->sackhistory[i];
799 		tcp_seq old_start = oldblock->rblk_start;
800 		tcp_seq old_end = oldblock->rblk_end;
801 
802 		if (SEQ_LT(end, old_start) || SEQ_GT(start, old_end)) {
803 			/* Case 0:  no overlap.  Copy old block. */
804 			copy[cindex++] = *oldblock;
805 			continue;
806 		}
807 
808 		if (SEQ_GEQ(start, old_start) && SEQ_LEQ(end, old_end)) {
809 			/* Cases 1 & 2.  Move block to front of history. */
810 			int j;
811 
812 			start = old_start;
813 			end = old_end;
814 			/* no need to check rest of blocks */
815 			for (j = i + 1; j < tp->nsackhistory; ++j)
816 				copy[cindex++] = tp->sackhistory[j];
817 			break;
818 		}
819 
820 		if (SEQ_GEQ(old_end, start) && SEQ_LT(old_start, start)) {
821 			/* Case 4:  extend start of new block. */
822 			start = old_start;
823 		} else if (SEQ_GEQ(end, old_start) && SEQ_GT(old_end, end)) {
824 			/* Case 5: extend end of new block */
825 			end = old_end;
826 		} else {
827 			/* Case 3.  Delete old block by not copying it. */
828 			KASSERT(SEQ_LEQ(start, old_start) &&
829 				SEQ_GEQ(end, old_end),
830 			    ("bad logic: old [%u, %u), new [%u, %u)",
831 			     old_start, old_end, start, end));
832 		}
833 	}
834 
835 	/* insert new block */
836 	tp->sackhistory[0].rblk_start = start;
837 	tp->sackhistory[0].rblk_end = end;
838 	cindex = min(cindex, MAX_SACK_REPORT_BLOCKS - 1);
839 	for (i = 0; i < cindex; ++i)
840 		tp->sackhistory[i + 1] = copy[i];
841 	tp->nsackhistory = cindex + 1;
842 	tcp_sack_dump_history("after tcp_sack_update_reported_history", tp);
843 }
844 
845 /*
846  * Fill in SACK report to return to data sender.
847  */
848 void
849 tcp_sack_fill_report(struct tcpcb *tp, u_char *opt, u_int *plen)
850 {
851 	u_int optlen = *plen;
852 	uint32_t *lp = (uint32_t *)(opt + optlen);
853 	uint32_t *olp;
854 	tcp_seq hstart = tp->rcv_nxt, hend;
855 	int nblocks;
856 
857 	KASSERT(TCP_MAXOLEN - optlen >=
858 	    TCPOLEN_SACK_ALIGNED + TCPOLEN_SACK_BLOCK,
859 	    ("no room for SACK header and one block: optlen %d", optlen));
860 
861 	if (tp->sack_flags & TSACK_F_DUPSEG)
862 		tcpstat.tcps_snddsackopt++;
863 	else
864 		tcpstat.tcps_sndsackopt++;
865 
866 	olp = lp++;
867 	optlen += TCPOLEN_SACK_ALIGNED;
868 
869 	tcp_sack_ack_history(tp);
870 	if (tp->reportblk.rblk_start != tp->reportblk.rblk_end) {
871 		*lp++ = htonl(tp->reportblk.rblk_start);
872 		*lp++ = htonl(tp->reportblk.rblk_end);
873 		optlen += TCPOLEN_SACK_BLOCK;
874 		hstart = tp->reportblk.rblk_start;
875 		hend = tp->reportblk.rblk_end;
876 		if (tp->sack_flags & TSACK_F_ENCLOSESEG) {
877 			KASSERT(TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK,
878 			    ("no room for enclosing SACK block: oplen %d",
879 			    optlen));
880 			*lp++ = htonl(tp->encloseblk.rblk_start);
881 			*lp++ = htonl(tp->encloseblk.rblk_end);
882 			optlen += TCPOLEN_SACK_BLOCK;
883 			hstart = tp->encloseblk.rblk_start;
884 			hend = tp->encloseblk.rblk_end;
885 		}
886 		if (SEQ_GT(hstart, tp->rcv_nxt))
887 			tcp_sack_update_reported_history(tp, hstart, hend);
888 	}
889 	if (tcp_do_smartsack && (tp->sack_flags & TSACK_F_SACKLEFT)) {
890 		/* Fill in from left!  Walk re-assembly queue. */
891 		struct tseg_qent *q;
892 
893 		q = TAILQ_FIRST(&tp->t_segq);
894 		while (q != NULL &&
895 		    TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) {
896 			*lp++ = htonl(q->tqe_th->th_seq);
897 			*lp++ = htonl(TCP_SACK_BLKEND(
898 			    q->tqe_th->th_seq + q->tqe_len,
899 			    q->tqe_th->th_flags));
900 			optlen += TCPOLEN_SACK_BLOCK;
901 			q = TAILQ_NEXT(q, tqe_q);
902 		}
903 	} else {
904 		int n = 0;
905 
906 		/* Fill in SACK blocks from right side. */
907 		while (n < tp->nsackhistory &&
908 		    TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) {
909 			if (tp->sackhistory[n].rblk_start != hstart) {
910 				*lp++ = htonl(tp->sackhistory[n].rblk_start);
911 				*lp++ = htonl(tp->sackhistory[n].rblk_end);
912 				optlen += TCPOLEN_SACK_BLOCK;
913 			}
914 			++n;
915 		}
916 	}
917 	tp->reportblk.rblk_start = tp->reportblk.rblk_end;
918 	tp->sack_flags &=
919 	    ~(TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG | TSACK_F_SACKLEFT);
920 	nblocks = (lp - olp - 1) / 2;
921 	*olp = htonl(TCPOPT_SACK_ALIGNED |
922 		     (TCPOLEN_SACK + nblocks * TCPOLEN_SACK_BLOCK));
923 	*plen = optlen;
924 }
925