1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $DragonFly: src/sys/netinet/tcp_sack.c,v 1.8 2008/08/15 21:37:16 nth Exp $ 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/malloc.h> 40 #include <sys/queue.h> 41 #include <sys/thread.h> 42 #include <sys/types.h> 43 #include <sys/socket.h> 44 #include <sys/socketvar.h> 45 46 #include <net/if.h> 47 48 #include <netinet/in.h> 49 #include <netinet/in_systm.h> 50 #include <netinet/ip.h> 51 #include <netinet/in_var.h> 52 #include <netinet/in_pcb.h> 53 #include <netinet/ip_var.h> 54 #include <netinet/tcp.h> 55 #include <netinet/tcp_seq.h> 56 #include <netinet/tcp_var.h> 57 58 /* 59 * Implemented: 60 * 61 * RFC 2018 62 * RFC 2883 63 * RFC 3517 64 */ 65 66 struct sackblock { 67 tcp_seq sblk_start; 68 tcp_seq sblk_end; 69 TAILQ_ENTRY(sackblock) sblk_list; 70 }; 71 72 #define MAXSAVEDBLOCKS 8 /* per connection limit */ 73 74 static int insert_block(struct scoreboard *scb, 75 const struct raw_sackblock *raw_sb, boolean_t *update); 76 static void update_lostseq(struct scoreboard *scb, tcp_seq snd_una, 77 u_int maxseg, int rxtthresh); 78 79 static MALLOC_DEFINE(M_SACKBLOCK, "sblk", "sackblock struct"); 80 81 /* 82 * Per-tcpcb initialization. 83 */ 84 void 85 tcp_sack_tcpcb_init(struct tcpcb *tp) 86 { 87 struct scoreboard *scb = &tp->scb; 88 89 scb->nblocks = 0; 90 TAILQ_INIT(&scb->sackblocks); 91 scb->lastfound = NULL; 92 } 93 94 /* 95 * Find the SACK block containing or immediately preceding "seq". 96 * The boolean result indicates whether the sequence is actually 97 * contained in the SACK block. 98 */ 99 static boolean_t 100 sack_block_lookup(struct scoreboard *scb, tcp_seq seq, struct sackblock **sb) 101 { 102 struct sackblock *hint = scb->lastfound; 103 struct sackblock *cur, *last, *prev; 104 105 if (TAILQ_EMPTY(&scb->sackblocks)) { 106 *sb = NULL; 107 return FALSE; 108 } 109 110 if (hint == NULL) { 111 /* No hint. Search from start to end. */ 112 cur = TAILQ_FIRST(&scb->sackblocks); 113 last = NULL; 114 prev = TAILQ_LAST(&scb->sackblocks, sackblock_list); 115 } else { 116 if (SEQ_GEQ(seq, hint->sblk_start)) { 117 /* Search from hint to end of list. */ 118 cur = hint; 119 last = NULL; 120 prev = TAILQ_LAST(&scb->sackblocks, sackblock_list); 121 } else { 122 /* Search from front of list to hint. */ 123 cur = TAILQ_FIRST(&scb->sackblocks); 124 last = hint; 125 prev = TAILQ_PREV(hint, sackblock_list, sblk_list); 126 } 127 } 128 129 do { 130 if (SEQ_GT(cur->sblk_end, seq)) { 131 if (SEQ_GEQ(seq, cur->sblk_start)) { 132 *sb = scb->lastfound = cur; 133 return TRUE; 134 } else { 135 *sb = scb->lastfound = 136 TAILQ_PREV(cur, sackblock_list, sblk_list); 137 return FALSE; 138 } 139 } 140 cur = TAILQ_NEXT(cur, sblk_list); 141 } while (cur != last); 142 143 *sb = scb->lastfound = prev; 144 return FALSE; 145 } 146 147 /* 148 * Allocate a SACK block. 149 */ 150 static __inline struct sackblock * 151 alloc_sackblock(struct scoreboard *scb, const struct raw_sackblock *raw_sb) 152 { 153 struct sackblock *sb; 154 155 if (scb->freecache != NULL) { 156 sb = scb->freecache; 157 scb->freecache = NULL; 158 tcpstat.tcps_sacksbfast++; 159 } else { 160 sb = kmalloc(sizeof(struct sackblock), M_SACKBLOCK, M_NOWAIT); 161 if (sb == NULL) { 162 tcpstat.tcps_sacksbfailed++; 163 return NULL; 164 } 165 } 166 sb->sblk_start = raw_sb->rblk_start; 167 sb->sblk_end = raw_sb->rblk_end; 168 return sb; 169 } 170 171 static __inline struct sackblock * 172 alloc_sackblock_limit(struct scoreboard *scb, 173 const struct raw_sackblock *raw_sb) 174 { 175 if (scb->nblocks == MAXSAVEDBLOCKS) { 176 /* 177 * Should try to kick out older blocks XXX JH 178 * May be able to coalesce with existing block. 179 * Or, go other way and free all blocks if we hit 180 * this limit. 181 */ 182 tcpstat.tcps_sacksboverflow++; 183 return NULL; 184 } 185 return alloc_sackblock(scb, raw_sb); 186 } 187 188 /* 189 * Free a SACK block. 190 */ 191 static __inline void 192 free_sackblock(struct scoreboard *scb, struct sackblock *s) 193 { 194 if (scb->freecache == NULL) { 195 /* YYY Maybe use the latest freed block? */ 196 scb->freecache = s; 197 return; 198 } 199 kfree(s, M_SACKBLOCK); 200 } 201 202 /* 203 * Free up SACK blocks for data that's been acked. 204 */ 205 static void 206 tcp_sack_ack_blocks(struct scoreboard *scb, tcp_seq th_ack) 207 { 208 struct sackblock *sb, *nb; 209 210 sb = TAILQ_FIRST(&scb->sackblocks); 211 while (sb && SEQ_LEQ(sb->sblk_end, th_ack)) { 212 nb = TAILQ_NEXT(sb, sblk_list); 213 if (scb->lastfound == sb) 214 scb->lastfound = NULL; 215 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list); 216 free_sackblock(scb, sb); 217 --scb->nblocks; 218 KASSERT(scb->nblocks >= 0, 219 ("SACK block count underflow: %d < 0", scb->nblocks)); 220 sb = nb; 221 } 222 if (sb && SEQ_GT(th_ack, sb->sblk_start)) 223 sb->sblk_start = th_ack; /* other side reneged? XXX */ 224 } 225 226 /* 227 * Delete and free SACK blocks saved in scoreboard. 228 */ 229 void 230 tcp_sack_cleanup(struct scoreboard *scb) 231 { 232 struct sackblock *sb, *nb; 233 234 TAILQ_FOREACH_MUTABLE(sb, &scb->sackblocks, sblk_list, nb) { 235 free_sackblock(scb, sb); 236 --scb->nblocks; 237 } 238 KASSERT(scb->nblocks == 0, 239 ("SACK block %d count not zero", scb->nblocks)); 240 TAILQ_INIT(&scb->sackblocks); 241 scb->lastfound = NULL; 242 } 243 244 /* 245 * Delete and free SACK blocks saved in scoreboard. 246 * Delete the one slot block cache. 247 */ 248 void 249 tcp_sack_destroy(struct scoreboard *scb) 250 { 251 tcp_sack_cleanup(scb); 252 if (scb->freecache != NULL) { 253 kfree(scb->freecache, M_SACKBLOCK); 254 scb->freecache = NULL; 255 } 256 } 257 258 /* 259 * Cleanup the reported SACK block information 260 */ 261 void 262 tcp_sack_report_cleanup(struct tcpcb *tp) 263 { 264 tp->t_flags &= ~(TF_DUPSEG | TF_ENCLOSESEG | TF_SACKLEFT); 265 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 266 } 267 268 /* 269 * Returns 0 if not D-SACK block, 270 * 1 if D-SACK, 271 * 2 if duplicate of out-of-order D-SACK block. 272 */ 273 int 274 tcp_sack_ndsack_blocks(struct raw_sackblock *blocks, const int numblocks, 275 tcp_seq snd_una) 276 { 277 if (numblocks == 0) 278 return 0; 279 280 if (SEQ_LT(blocks[0].rblk_start, snd_una)) 281 return 1; 282 283 /* block 0 inside block 1 */ 284 if (numblocks > 1 && 285 SEQ_GEQ(blocks[0].rblk_start, blocks[1].rblk_start) && 286 SEQ_LEQ(blocks[0].rblk_end, blocks[1].rblk_end)) 287 return 2; 288 289 return 0; 290 } 291 292 /* 293 * Update scoreboard on new incoming ACK. 294 */ 295 static void 296 tcp_sack_add_blocks(struct tcpcb *tp, struct tcpopt *to) 297 { 298 const int numblocks = to->to_nsackblocks; 299 struct raw_sackblock *blocks = to->to_sackblocks; 300 struct scoreboard *scb = &tp->scb; 301 int startblock, i; 302 303 if (tcp_sack_ndsack_blocks(blocks, numblocks, tp->snd_una) > 0) 304 startblock = 1; 305 else 306 startblock = 0; 307 308 to->to_flags |= TOF_SACK_REDUNDANT; 309 for (i = startblock; i < numblocks; i++) { 310 struct raw_sackblock *newsackblock = &blocks[i]; 311 boolean_t update; 312 int error; 313 314 /* don't accept bad SACK blocks */ 315 if (SEQ_GT(newsackblock->rblk_end, tp->snd_max)) { 316 tcpstat.tcps_rcvbadsackopt++; 317 break; /* skip all other blocks */ 318 } 319 tcpstat.tcps_sacksbupdate++; 320 321 error = insert_block(scb, newsackblock, &update); 322 if (update) 323 to->to_flags &= ~TOF_SACK_REDUNDANT; 324 if (error) 325 break; 326 } 327 } 328 329 void 330 tcp_sack_update_scoreboard(struct tcpcb *tp, struct tcpopt *to) 331 { 332 struct scoreboard *scb = &tp->scb; 333 int rexmt_high_update = 0; 334 335 tcp_sack_ack_blocks(scb, tp->snd_una); 336 tcp_sack_add_blocks(tp, to); 337 update_lostseq(scb, tp->snd_una, tp->t_maxseg, tp->t_rxtthresh); 338 if (SEQ_LT(tp->rexmt_high, tp->snd_una)) { 339 tp->rexmt_high = tp->snd_una; 340 rexmt_high_update = 1; 341 } 342 if (tp->t_flags & TF_SACKRESCUED) { 343 if (SEQ_LT(tp->rexmt_rescue, tp->snd_una)) { 344 tp->t_flags &= ~TF_SACKRESCUED; 345 } else if (tcp_aggressive_rescuesack && rexmt_high_update && 346 SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) { 347 /* Drag RescueRxt along with HighRxt */ 348 tp->rexmt_rescue = tp->rexmt_high; 349 } 350 } 351 } 352 353 /* 354 * Insert SACK block into sender's scoreboard. 355 */ 356 static int 357 insert_block(struct scoreboard *scb, const struct raw_sackblock *raw_sb, 358 boolean_t *update) 359 { 360 struct sackblock *sb, *workingblock; 361 boolean_t overlap_front; 362 363 *update = TRUE; 364 if (TAILQ_EMPTY(&scb->sackblocks)) { 365 struct sackblock *newblock; 366 367 KASSERT(scb->nblocks == 0, ("emply scb w/ blocks")); 368 369 newblock = alloc_sackblock(scb, raw_sb); 370 if (newblock == NULL) 371 return ENOMEM; 372 TAILQ_INSERT_HEAD(&scb->sackblocks, newblock, sblk_list); 373 scb->nblocks = 1; 374 return 0; 375 } 376 377 KASSERT(scb->nblocks > 0, ("insert_block() called w/ no blocks")); 378 KASSERT(scb->nblocks <= MAXSAVEDBLOCKS, 379 ("too many SACK blocks %d", scb->nblocks)); 380 381 overlap_front = sack_block_lookup(scb, raw_sb->rblk_start, &sb); 382 383 if (sb == NULL) { 384 workingblock = alloc_sackblock_limit(scb, raw_sb); 385 if (workingblock == NULL) 386 return ENOMEM; 387 TAILQ_INSERT_HEAD(&scb->sackblocks, workingblock, sblk_list); 388 ++scb->nblocks; 389 } else { 390 if (overlap_front || sb->sblk_end == raw_sb->rblk_start) { 391 /* Extend old block */ 392 workingblock = sb; 393 if (SEQ_GT(raw_sb->rblk_end, sb->sblk_end)) 394 sb->sblk_end = raw_sb->rblk_end; 395 else 396 *update = FALSE; 397 tcpstat.tcps_sacksbreused++; 398 } else { 399 workingblock = alloc_sackblock_limit(scb, raw_sb); 400 if (workingblock == NULL) 401 return ENOMEM; 402 TAILQ_INSERT_AFTER(&scb->sackblocks, sb, workingblock, 403 sblk_list); 404 ++scb->nblocks; 405 } 406 } 407 408 /* Consolidate right-hand side. */ 409 sb = TAILQ_NEXT(workingblock, sblk_list); 410 while (sb != NULL && 411 SEQ_GEQ(workingblock->sblk_end, sb->sblk_end)) { 412 struct sackblock *nextblock; 413 414 nextblock = TAILQ_NEXT(sb, sblk_list); 415 if (scb->lastfound == sb) 416 scb->lastfound = NULL; 417 /* Remove completely overlapped block */ 418 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list); 419 free_sackblock(scb, sb); 420 --scb->nblocks; 421 KASSERT(scb->nblocks > 0, 422 ("removed overlapped block: %d blocks left", scb->nblocks)); 423 sb = nextblock; 424 } 425 if (sb != NULL && 426 SEQ_GEQ(workingblock->sblk_end, sb->sblk_start)) { 427 /* Extend new block to cover partially overlapped old block. */ 428 workingblock->sblk_end = sb->sblk_end; 429 if (scb->lastfound == sb) 430 scb->lastfound = NULL; 431 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list); 432 free_sackblock(scb, sb); 433 --scb->nblocks; 434 KASSERT(scb->nblocks > 0, 435 ("removed partial right: %d blocks left", scb->nblocks)); 436 } 437 return 0; 438 } 439 440 #ifdef DEBUG_SACK_BLOCKS 441 static void 442 tcp_sack_dump_blocks(struct scoreboard *scb) 443 { 444 struct sackblock *sb; 445 446 kprintf("%d blocks:", scb->nblocks); 447 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) 448 kprintf(" [%u, %u)", sb->sblk_start, sb->sblk_end); 449 kprintf("\n"); 450 } 451 #else 452 static __inline void 453 tcp_sack_dump_blocks(struct scoreboard *scb) 454 { 455 } 456 #endif 457 458 /* 459 * Optimization to quickly determine which packets are lost. 460 */ 461 static void 462 update_lostseq(struct scoreboard *scb, tcp_seq snd_una, u_int maxseg, 463 int rxtthresh) 464 { 465 struct sackblock *sb; 466 int nsackblocks = 0; 467 int bytes_sacked = 0; 468 469 sb = TAILQ_LAST(&scb->sackblocks, sackblock_list); 470 while (sb != NULL) { 471 ++nsackblocks; 472 bytes_sacked += sb->sblk_end - sb->sblk_start; 473 if (nsackblocks == rxtthresh || 474 bytes_sacked >= rxtthresh * maxseg) { 475 scb->lostseq = sb->sblk_start; 476 return; 477 } 478 sb = TAILQ_PREV(sb, sackblock_list, sblk_list); 479 } 480 scb->lostseq = snd_una; 481 } 482 483 /* 484 * Return whether the given sequence number is considered lost. 485 */ 486 static boolean_t 487 scb_islost(struct scoreboard *scb, tcp_seq seqnum) 488 { 489 return SEQ_LT(seqnum, scb->lostseq); 490 } 491 492 /* 493 * True if at least "amount" has been SACKed. Used by Early Retransmit. 494 */ 495 boolean_t 496 tcp_sack_has_sacked(struct scoreboard *scb, u_int amount) 497 { 498 struct sackblock *sb; 499 int bytes_sacked = 0; 500 501 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) { 502 bytes_sacked += sb->sblk_end - sb->sblk_start; 503 if (bytes_sacked >= amount) 504 return TRUE; 505 } 506 return FALSE; 507 } 508 509 /* 510 * Number of bytes SACKed below seq. 511 */ 512 int 513 tcp_sack_bytes_below(struct scoreboard *scb, tcp_seq seq) 514 { 515 struct sackblock *sb; 516 int bytes_sacked = 0; 517 518 sb = TAILQ_FIRST(&scb->sackblocks); 519 while (sb && SEQ_GT(seq, sb->sblk_start)) { 520 bytes_sacked += seq_min(seq, sb->sblk_end) - sb->sblk_start; 521 sb = TAILQ_NEXT(sb, sblk_list); 522 } 523 return bytes_sacked; 524 } 525 526 /* 527 * Return estimate of the number of bytes outstanding in the network. 528 */ 529 uint32_t 530 tcp_sack_compute_pipe(struct tcpcb *tp) 531 { 532 struct scoreboard *scb = &tp->scb; 533 struct sackblock *sb; 534 int nlost, nretransmitted; 535 tcp_seq end; 536 537 nlost = tp->snd_max - scb->lostseq; 538 nretransmitted = tp->rexmt_high - tp->snd_una; 539 540 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) { 541 if (SEQ_LT(sb->sblk_start, tp->rexmt_high)) { 542 end = seq_min(sb->sblk_end, tp->rexmt_high); 543 nretransmitted -= end - sb->sblk_start; 544 } 545 if (SEQ_GEQ(sb->sblk_start, scb->lostseq)) 546 nlost -= sb->sblk_end - sb->sblk_start; 547 } 548 549 return (nlost + nretransmitted); 550 } 551 552 /* 553 * Return the sequence number and length of the next segment to transmit 554 * when in Fast Recovery. 555 */ 556 boolean_t 557 tcp_sack_nextseg(struct tcpcb *tp, tcp_seq *nextrexmt, uint32_t *plen, 558 boolean_t *rescue) 559 { 560 struct scoreboard *scb = &tp->scb; 561 struct socket *so = tp->t_inpcb->inp_socket; 562 struct sackblock *sb; 563 const struct sackblock *lastblock = 564 TAILQ_LAST(&scb->sackblocks, sackblock_list); 565 tcp_seq torexmt; 566 long len, off; 567 568 /* skip SACKed data */ 569 tcp_sack_skip_sacked(scb, &tp->rexmt_high); 570 571 /* Look for lost data. */ 572 torexmt = tp->rexmt_high; 573 *rescue = FALSE; 574 if (lastblock != NULL) { 575 if (SEQ_LT(torexmt, lastblock->sblk_end) && 576 scb_islost(scb, torexmt)) { 577 sendunsacked: 578 *nextrexmt = torexmt; 579 /* If the left-hand edge has been SACKed, pull it in. */ 580 if (sack_block_lookup(scb, torexmt + tp->t_maxseg, &sb)) 581 *plen = sb->sblk_start - torexmt; 582 else 583 *plen = tp->t_maxseg; 584 return TRUE; 585 } 586 } 587 588 /* See if unsent data available within send window. */ 589 off = tp->snd_max - tp->snd_una; 590 len = (long) ulmin(so->so_snd.ssb_cc, tp->snd_wnd) - off; 591 if (len > 0) { 592 *nextrexmt = tp->snd_max; /* Send new data. */ 593 *plen = tp->t_maxseg; 594 return TRUE; 595 } 596 597 /* We're less certain this data has been lost. */ 598 if (lastblock != NULL && SEQ_LT(torexmt, lastblock->sblk_end)) 599 goto sendunsacked; 600 601 /* Rescue retransmission */ 602 if (tcp_do_rescuesack) { 603 tcpstat.tcps_sackrescue_try++; 604 if (tp->t_flags & TF_SACKRESCUED) { 605 if (!tcp_aggressive_rescuesack) 606 return FALSE; 607 608 /* 609 * Aggressive variant of the rescue retransmission. 610 * 611 * The idea of the rescue retransmission is to sustain 612 * the ACK clock thus to avoid timeout retransmission. 613 * 614 * Under some situations, the conservative approach 615 * suggested in the draft 616 * http://tools.ietf.org/html/ 617 * draft-nishida-tcpm-rescue-retransmission-00 618 * could not sustain ACK clock, since it only allows 619 * one rescue retransmission before a cumulative ACK 620 * covers the segement transmitted by rescue 621 * retransmission. 622 * 623 * We try to locate the next unSACKed segment which 624 * follows the previously sent rescue segment. If 625 * there is no such segment, we loop back to the first 626 * unacknowledged segment. 627 */ 628 629 /* 630 * Skip SACKed data, but here we follow 631 * the last transmitted rescue segment. 632 */ 633 torexmt = tp->rexmt_rescue; 634 tcp_sack_skip_sacked(scb, &torexmt); 635 if (torexmt == tp->snd_max) { 636 /* Nothing left to retransmit; restart */ 637 torexmt = tp->snd_una; 638 } 639 } 640 *rescue = TRUE; 641 goto sendunsacked; 642 } else if (tcp_do_smartsack && lastblock == NULL) { 643 tcpstat.tcps_sackrescue_try++; 644 *rescue = TRUE; 645 goto sendunsacked; 646 } 647 648 return FALSE; 649 } 650 651 /* 652 * Return the next sequence number higher than "*prexmt" that has 653 * not been SACKed. 654 */ 655 void 656 tcp_sack_skip_sacked(struct scoreboard *scb, tcp_seq *prexmt) 657 { 658 struct sackblock *sb; 659 660 /* skip SACKed data */ 661 if (sack_block_lookup(scb, *prexmt, &sb)) 662 *prexmt = sb->sblk_end; 663 } 664 665 #ifdef later 666 void 667 tcp_sack_save_scoreboard(struct scoreboard *scb) 668 { 669 struct scoreboard *scb = &tp->scb; 670 671 scb->sackblocks_prev = scb->sackblocks; 672 TAILQ_INIT(&scb->sackblocks); 673 } 674 675 void 676 tcp_sack_revert_scoreboard(struct scoreboard *scb, tcp_seq snd_una, 677 u_int maxseg) 678 { 679 struct sackblock *sb; 680 681 scb->sackblocks = scb->sackblocks_prev; 682 scb->nblocks = 0; 683 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) 684 ++scb->nblocks; 685 tcp_sack_ack_blocks(scb, snd_una); 686 scb->lastfound = NULL; 687 } 688 #endif 689 690 #ifdef DEBUG_SACK_HISTORY 691 static void 692 tcp_sack_dump_history(char *msg, struct tcpcb *tp) 693 { 694 int i; 695 static int ndumped; 696 697 /* only need a couple of these to debug most problems */ 698 if (++ndumped > 900) 699 return; 700 701 kprintf("%s:\tnsackhistory %d: ", msg, tp->nsackhistory); 702 for (i = 0; i < tp->nsackhistory; ++i) 703 kprintf("[%u, %u) ", tp->sackhistory[i].rblk_start, 704 tp->sackhistory[i].rblk_end); 705 kprintf("\n"); 706 } 707 #else 708 static __inline void 709 tcp_sack_dump_history(char *msg, struct tcpcb *tp) 710 { 711 } 712 #endif 713 714 /* 715 * Remove old SACK blocks from the SACK history that have already been ACKed. 716 */ 717 static void 718 tcp_sack_ack_history(struct tcpcb *tp) 719 { 720 int i, nblocks, openslot; 721 722 tcp_sack_dump_history("before tcp_sack_ack_history", tp); 723 nblocks = tp->nsackhistory; 724 for (i = openslot = 0; i < nblocks; ++i) { 725 if (SEQ_LEQ(tp->sackhistory[i].rblk_end, tp->rcv_nxt)) { 726 --tp->nsackhistory; 727 continue; 728 } 729 if (SEQ_LT(tp->sackhistory[i].rblk_start, tp->rcv_nxt)) 730 tp->sackhistory[i].rblk_start = tp->rcv_nxt; 731 if (i == openslot) 732 ++openslot; 733 else 734 tp->sackhistory[openslot++] = tp->sackhistory[i]; 735 } 736 tcp_sack_dump_history("after tcp_sack_ack_history", tp); 737 KASSERT(openslot == tp->nsackhistory, 738 ("tcp_sack_ack_history miscounted: %d != %d", 739 openslot, tp->nsackhistory)); 740 } 741 742 /* 743 * Add or merge newblock into reported history. 744 * Also remove or update SACK blocks that will be acked. 745 */ 746 static void 747 tcp_sack_update_reported_history(struct tcpcb *tp, tcp_seq start, tcp_seq end) 748 { 749 struct raw_sackblock copy[MAX_SACK_REPORT_BLOCKS]; 750 int i, cindex; 751 752 tcp_sack_dump_history("before tcp_sack_update_reported_history", tp); 753 /* 754 * Six cases: 755 * 0) no overlap 756 * 1) newblock == oldblock 757 * 2) oldblock contains newblock 758 * 3) newblock contains oldblock 759 * 4) tail of oldblock overlaps or abuts start of newblock 760 * 5) tail of newblock overlaps or abuts head of oldblock 761 */ 762 for (i = cindex = 0; i < tp->nsackhistory; ++i) { 763 struct raw_sackblock *oldblock = &tp->sackhistory[i]; 764 tcp_seq old_start = oldblock->rblk_start; 765 tcp_seq old_end = oldblock->rblk_end; 766 767 if (SEQ_LT(end, old_start) || SEQ_GT(start, old_end)) { 768 /* Case 0: no overlap. Copy old block. */ 769 copy[cindex++] = *oldblock; 770 continue; 771 } 772 773 if (SEQ_GEQ(start, old_start) && SEQ_LEQ(end, old_end)) { 774 /* Cases 1 & 2. Move block to front of history. */ 775 int j; 776 777 start = old_start; 778 end = old_end; 779 /* no need to check rest of blocks */ 780 for (j = i + 1; j < tp->nsackhistory; ++j) 781 copy[cindex++] = tp->sackhistory[j]; 782 break; 783 } 784 785 if (SEQ_GEQ(old_end, start) && SEQ_LT(old_start, start)) { 786 /* Case 4: extend start of new block. */ 787 start = old_start; 788 } else if (SEQ_GEQ(end, old_start) && SEQ_GT(old_end, end)) { 789 /* Case 5: extend end of new block */ 790 end = old_end; 791 } else { 792 /* Case 3. Delete old block by not copying it. */ 793 KASSERT(SEQ_LEQ(start, old_start) && 794 SEQ_GEQ(end, old_end), 795 ("bad logic: old [%u, %u), new [%u, %u)", 796 old_start, old_end, start, end)); 797 } 798 } 799 800 /* insert new block */ 801 tp->sackhistory[0].rblk_start = start; 802 tp->sackhistory[0].rblk_end = end; 803 cindex = min(cindex, MAX_SACK_REPORT_BLOCKS - 1); 804 for (i = 0; i < cindex; ++i) 805 tp->sackhistory[i + 1] = copy[i]; 806 tp->nsackhistory = cindex + 1; 807 tcp_sack_dump_history("after tcp_sack_update_reported_history", tp); 808 } 809 810 /* 811 * Fill in SACK report to return to data sender. 812 */ 813 void 814 tcp_sack_fill_report(struct tcpcb *tp, u_char *opt, u_int *plen) 815 { 816 u_int optlen = *plen; 817 uint32_t *lp = (uint32_t *)(opt + optlen); 818 uint32_t *olp; 819 tcp_seq hstart = tp->rcv_nxt, hend; 820 int nblocks; 821 822 KASSERT(TCP_MAXOLEN - optlen >= 823 TCPOLEN_SACK_ALIGNED + TCPOLEN_SACK_BLOCK, 824 ("no room for SACK header and one block: optlen %d", optlen)); 825 826 if (tp->t_flags & TF_DUPSEG) 827 tcpstat.tcps_snddsackopt++; 828 else 829 tcpstat.tcps_sndsackopt++; 830 831 olp = lp++; 832 optlen += TCPOLEN_SACK_ALIGNED; 833 834 tcp_sack_ack_history(tp); 835 if (tp->reportblk.rblk_start != tp->reportblk.rblk_end) { 836 *lp++ = htonl(tp->reportblk.rblk_start); 837 *lp++ = htonl(tp->reportblk.rblk_end); 838 optlen += TCPOLEN_SACK_BLOCK; 839 hstart = tp->reportblk.rblk_start; 840 hend = tp->reportblk.rblk_end; 841 if (tp->t_flags & TF_ENCLOSESEG) { 842 KASSERT(TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK, 843 ("no room for enclosing SACK block: oplen %d", 844 optlen)); 845 *lp++ = htonl(tp->encloseblk.rblk_start); 846 *lp++ = htonl(tp->encloseblk.rblk_end); 847 optlen += TCPOLEN_SACK_BLOCK; 848 hstart = tp->encloseblk.rblk_start; 849 hend = tp->encloseblk.rblk_end; 850 } 851 if (SEQ_GT(hstart, tp->rcv_nxt)) 852 tcp_sack_update_reported_history(tp, hstart, hend); 853 } 854 if (tcp_do_smartsack && (tp->t_flags & TF_SACKLEFT)) { 855 /* Fill in from left! Walk re-assembly queue. */ 856 struct tseg_qent *q; 857 858 q = LIST_FIRST(&tp->t_segq); 859 while (q != NULL && 860 TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) { 861 *lp++ = htonl(q->tqe_th->th_seq); 862 *lp++ = htonl(TCP_SACK_BLKEND( 863 q->tqe_th->th_seq + q->tqe_len, 864 q->tqe_th->th_flags)); 865 optlen += TCPOLEN_SACK_BLOCK; 866 q = LIST_NEXT(q, tqe_q); 867 } 868 } else { 869 int n = 0; 870 871 /* Fill in SACK blocks from right side. */ 872 while (n < tp->nsackhistory && 873 TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) { 874 if (tp->sackhistory[n].rblk_start != hstart) { 875 *lp++ = htonl(tp->sackhistory[n].rblk_start); 876 *lp++ = htonl(tp->sackhistory[n].rblk_end); 877 optlen += TCPOLEN_SACK_BLOCK; 878 } 879 ++n; 880 } 881 } 882 tp->reportblk.rblk_start = tp->reportblk.rblk_end; 883 tp->t_flags &= ~(TF_DUPSEG | TF_ENCLOSESEG | TF_SACKLEFT); 884 nblocks = (lp - olp - 1) / 2; 885 *olp = htonl(TCPOPT_SACK_ALIGNED | 886 (TCPOLEN_SACK + nblocks * TCPOLEN_SACK_BLOCK)); 887 *plen = optlen; 888 } 889