1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <sys/proc.h> 38 #include <netinet/sctp_var.h> 39 #include <netinet/sctp_sysctl.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctp_pcb.h> 42 #include <netinet/sctputil.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctputil.h> 46 #include <netinet/sctp_auth.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_asconf.h> 49 #include <netinet/sctp_indata.h> 50 #include <netinet/sctp_bsd_addr.h> 51 #include <netinet/sctp_input.h> 52 #include <netinet/udp.h> 53 54 55 56 #define SCTP_MAX_GAPS_INARRAY 4 57 struct sack_track { 58 uint8_t right_edge; /* mergable on the right edge */ 59 uint8_t left_edge; /* mergable on the left edge */ 60 uint8_t num_entries; 61 uint8_t spare; 62 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]; 63 }; 64 65 struct sack_track sack_array[256] = { 66 {0, 0, 0, 0, /* 0x00 */ 67 {{0, 0}, 68 {0, 0}, 69 {0, 0}, 70 {0, 0} 71 } 72 }, 73 {1, 0, 1, 0, /* 0x01 */ 74 {{0, 0}, 75 {0, 0}, 76 {0, 0}, 77 {0, 0} 78 } 79 }, 80 {0, 0, 1, 0, /* 0x02 */ 81 {{1, 1}, 82 {0, 0}, 83 {0, 0}, 84 {0, 0} 85 } 86 }, 87 {1, 0, 1, 0, /* 0x03 */ 88 {{0, 1}, 89 {0, 0}, 90 {0, 0}, 91 {0, 0} 92 } 93 }, 94 {0, 0, 1, 0, /* 0x04 */ 95 {{2, 2}, 96 {0, 0}, 97 {0, 0}, 98 {0, 0} 99 } 100 }, 101 {1, 0, 2, 0, /* 0x05 */ 102 {{0, 0}, 103 {2, 2}, 104 {0, 0}, 105 {0, 0} 106 } 107 }, 108 {0, 0, 1, 0, /* 0x06 */ 109 {{1, 2}, 110 {0, 0}, 111 {0, 0}, 112 {0, 0} 113 } 114 }, 115 {1, 0, 1, 0, /* 0x07 */ 116 {{0, 2}, 117 {0, 0}, 118 {0, 0}, 119 {0, 0} 120 } 121 }, 122 {0, 0, 1, 0, /* 0x08 */ 123 {{3, 3}, 124 {0, 0}, 125 {0, 0}, 126 {0, 0} 127 } 128 }, 129 {1, 0, 2, 0, /* 0x09 */ 130 {{0, 0}, 131 {3, 3}, 132 {0, 0}, 133 {0, 0} 134 } 135 }, 136 {0, 0, 2, 0, /* 0x0a */ 137 {{1, 1}, 138 {3, 3}, 139 {0, 0}, 140 {0, 0} 141 } 142 }, 143 {1, 0, 2, 0, /* 0x0b */ 144 {{0, 1}, 145 {3, 3}, 146 {0, 0}, 147 {0, 0} 148 } 149 }, 150 {0, 0, 1, 0, /* 0x0c */ 151 {{2, 3}, 152 {0, 0}, 153 {0, 0}, 154 {0, 0} 155 } 156 }, 157 {1, 0, 2, 0, /* 0x0d */ 158 {{0, 0}, 159 {2, 3}, 160 {0, 0}, 161 {0, 0} 162 } 163 }, 164 {0, 0, 1, 0, /* 0x0e */ 165 {{1, 3}, 166 {0, 0}, 167 {0, 0}, 168 {0, 0} 169 } 170 }, 171 {1, 0, 1, 0, /* 0x0f */ 172 {{0, 3}, 173 {0, 0}, 174 {0, 0}, 175 {0, 0} 176 } 177 }, 178 {0, 0, 1, 0, /* 0x10 */ 179 {{4, 4}, 180 {0, 0}, 181 {0, 0}, 182 {0, 0} 183 } 184 }, 185 {1, 0, 2, 0, /* 0x11 */ 186 {{0, 0}, 187 {4, 4}, 188 {0, 0}, 189 {0, 0} 190 } 191 }, 192 {0, 0, 2, 0, /* 0x12 */ 193 {{1, 1}, 194 {4, 4}, 195 {0, 0}, 196 {0, 0} 197 } 198 }, 199 {1, 0, 2, 0, /* 0x13 */ 200 {{0, 1}, 201 {4, 4}, 202 {0, 0}, 203 {0, 0} 204 } 205 }, 206 {0, 0, 2, 0, /* 0x14 */ 207 {{2, 2}, 208 {4, 4}, 209 {0, 0}, 210 {0, 0} 211 } 212 }, 213 {1, 0, 3, 0, /* 0x15 */ 214 {{0, 0}, 215 {2, 2}, 216 {4, 4}, 217 {0, 0} 218 } 219 }, 220 {0, 0, 2, 0, /* 0x16 */ 221 {{1, 2}, 222 {4, 4}, 223 {0, 0}, 224 {0, 0} 225 } 226 }, 227 {1, 0, 2, 0, /* 0x17 */ 228 {{0, 2}, 229 {4, 4}, 230 {0, 0}, 231 {0, 0} 232 } 233 }, 234 {0, 0, 1, 0, /* 0x18 */ 235 {{3, 4}, 236 {0, 0}, 237 {0, 0}, 238 {0, 0} 239 } 240 }, 241 {1, 0, 2, 0, /* 0x19 */ 242 {{0, 0}, 243 {3, 4}, 244 {0, 0}, 245 {0, 0} 246 } 247 }, 248 {0, 0, 2, 0, /* 0x1a */ 249 {{1, 1}, 250 {3, 4}, 251 {0, 0}, 252 {0, 0} 253 } 254 }, 255 {1, 0, 2, 0, /* 0x1b */ 256 {{0, 1}, 257 {3, 4}, 258 {0, 0}, 259 {0, 0} 260 } 261 }, 262 {0, 0, 1, 0, /* 0x1c */ 263 {{2, 4}, 264 {0, 0}, 265 {0, 0}, 266 {0, 0} 267 } 268 }, 269 {1, 0, 2, 0, /* 0x1d */ 270 {{0, 0}, 271 {2, 4}, 272 {0, 0}, 273 {0, 0} 274 } 275 }, 276 {0, 0, 1, 0, /* 0x1e */ 277 {{1, 4}, 278 {0, 0}, 279 {0, 0}, 280 {0, 0} 281 } 282 }, 283 {1, 0, 1, 0, /* 0x1f */ 284 {{0, 4}, 285 {0, 0}, 286 {0, 0}, 287 {0, 0} 288 } 289 }, 290 {0, 0, 1, 0, /* 0x20 */ 291 {{5, 5}, 292 {0, 0}, 293 {0, 0}, 294 {0, 0} 295 } 296 }, 297 {1, 0, 2, 0, /* 0x21 */ 298 {{0, 0}, 299 {5, 5}, 300 {0, 0}, 301 {0, 0} 302 } 303 }, 304 {0, 0, 2, 0, /* 0x22 */ 305 {{1, 1}, 306 {5, 5}, 307 {0, 0}, 308 {0, 0} 309 } 310 }, 311 {1, 0, 2, 0, /* 0x23 */ 312 {{0, 1}, 313 {5, 5}, 314 {0, 0}, 315 {0, 0} 316 } 317 }, 318 {0, 0, 2, 0, /* 0x24 */ 319 {{2, 2}, 320 {5, 5}, 321 {0, 0}, 322 {0, 0} 323 } 324 }, 325 {1, 0, 3, 0, /* 0x25 */ 326 {{0, 0}, 327 {2, 2}, 328 {5, 5}, 329 {0, 0} 330 } 331 }, 332 {0, 0, 2, 0, /* 0x26 */ 333 {{1, 2}, 334 {5, 5}, 335 {0, 0}, 336 {0, 0} 337 } 338 }, 339 {1, 0, 2, 0, /* 0x27 */ 340 {{0, 2}, 341 {5, 5}, 342 {0, 0}, 343 {0, 0} 344 } 345 }, 346 {0, 0, 2, 0, /* 0x28 */ 347 {{3, 3}, 348 {5, 5}, 349 {0, 0}, 350 {0, 0} 351 } 352 }, 353 {1, 0, 3, 0, /* 0x29 */ 354 {{0, 0}, 355 {3, 3}, 356 {5, 5}, 357 {0, 0} 358 } 359 }, 360 {0, 0, 3, 0, /* 0x2a */ 361 {{1, 1}, 362 {3, 3}, 363 {5, 5}, 364 {0, 0} 365 } 366 }, 367 {1, 0, 3, 0, /* 0x2b */ 368 {{0, 1}, 369 {3, 3}, 370 {5, 5}, 371 {0, 0} 372 } 373 }, 374 {0, 0, 2, 0, /* 0x2c */ 375 {{2, 3}, 376 {5, 5}, 377 {0, 0}, 378 {0, 0} 379 } 380 }, 381 {1, 0, 3, 0, /* 0x2d */ 382 {{0, 0}, 383 {2, 3}, 384 {5, 5}, 385 {0, 0} 386 } 387 }, 388 {0, 0, 2, 0, /* 0x2e */ 389 {{1, 3}, 390 {5, 5}, 391 {0, 0}, 392 {0, 0} 393 } 394 }, 395 {1, 0, 2, 0, /* 0x2f */ 396 {{0, 3}, 397 {5, 5}, 398 {0, 0}, 399 {0, 0} 400 } 401 }, 402 {0, 0, 1, 0, /* 0x30 */ 403 {{4, 5}, 404 {0, 0}, 405 {0, 0}, 406 {0, 0} 407 } 408 }, 409 {1, 0, 2, 0, /* 0x31 */ 410 {{0, 0}, 411 {4, 5}, 412 {0, 0}, 413 {0, 0} 414 } 415 }, 416 {0, 0, 2, 0, /* 0x32 */ 417 {{1, 1}, 418 {4, 5}, 419 {0, 0}, 420 {0, 0} 421 } 422 }, 423 {1, 0, 2, 0, /* 0x33 */ 424 {{0, 1}, 425 {4, 5}, 426 {0, 0}, 427 {0, 0} 428 } 429 }, 430 {0, 0, 2, 0, /* 0x34 */ 431 {{2, 2}, 432 {4, 5}, 433 {0, 0}, 434 {0, 0} 435 } 436 }, 437 {1, 0, 3, 0, /* 0x35 */ 438 {{0, 0}, 439 {2, 2}, 440 {4, 5}, 441 {0, 0} 442 } 443 }, 444 {0, 0, 2, 0, /* 0x36 */ 445 {{1, 2}, 446 {4, 5}, 447 {0, 0}, 448 {0, 0} 449 } 450 }, 451 {1, 0, 2, 0, /* 0x37 */ 452 {{0, 2}, 453 {4, 5}, 454 {0, 0}, 455 {0, 0} 456 } 457 }, 458 {0, 0, 1, 0, /* 0x38 */ 459 {{3, 5}, 460 {0, 0}, 461 {0, 0}, 462 {0, 0} 463 } 464 }, 465 {1, 0, 2, 0, /* 0x39 */ 466 {{0, 0}, 467 {3, 5}, 468 {0, 0}, 469 {0, 0} 470 } 471 }, 472 {0, 0, 2, 0, /* 0x3a */ 473 {{1, 1}, 474 {3, 5}, 475 {0, 0}, 476 {0, 0} 477 } 478 }, 479 {1, 0, 2, 0, /* 0x3b */ 480 {{0, 1}, 481 {3, 5}, 482 {0, 0}, 483 {0, 0} 484 } 485 }, 486 {0, 0, 1, 0, /* 0x3c */ 487 {{2, 5}, 488 {0, 0}, 489 {0, 0}, 490 {0, 0} 491 } 492 }, 493 {1, 0, 2, 0, /* 0x3d */ 494 {{0, 0}, 495 {2, 5}, 496 {0, 0}, 497 {0, 0} 498 } 499 }, 500 {0, 0, 1, 0, /* 0x3e */ 501 {{1, 5}, 502 {0, 0}, 503 {0, 0}, 504 {0, 0} 505 } 506 }, 507 {1, 0, 1, 0, /* 0x3f */ 508 {{0, 5}, 509 {0, 0}, 510 {0, 0}, 511 {0, 0} 512 } 513 }, 514 {0, 0, 1, 0, /* 0x40 */ 515 {{6, 6}, 516 {0, 0}, 517 {0, 0}, 518 {0, 0} 519 } 520 }, 521 {1, 0, 2, 0, /* 0x41 */ 522 {{0, 0}, 523 {6, 6}, 524 {0, 0}, 525 {0, 0} 526 } 527 }, 528 {0, 0, 2, 0, /* 0x42 */ 529 {{1, 1}, 530 {6, 6}, 531 {0, 0}, 532 {0, 0} 533 } 534 }, 535 {1, 0, 2, 0, /* 0x43 */ 536 {{0, 1}, 537 {6, 6}, 538 {0, 0}, 539 {0, 0} 540 } 541 }, 542 {0, 0, 2, 0, /* 0x44 */ 543 {{2, 2}, 544 {6, 6}, 545 {0, 0}, 546 {0, 0} 547 } 548 }, 549 {1, 0, 3, 0, /* 0x45 */ 550 {{0, 0}, 551 {2, 2}, 552 {6, 6}, 553 {0, 0} 554 } 555 }, 556 {0, 0, 2, 0, /* 0x46 */ 557 {{1, 2}, 558 {6, 6}, 559 {0, 0}, 560 {0, 0} 561 } 562 }, 563 {1, 0, 2, 0, /* 0x47 */ 564 {{0, 2}, 565 {6, 6}, 566 {0, 0}, 567 {0, 0} 568 } 569 }, 570 {0, 0, 2, 0, /* 0x48 */ 571 {{3, 3}, 572 {6, 6}, 573 {0, 0}, 574 {0, 0} 575 } 576 }, 577 {1, 0, 3, 0, /* 0x49 */ 578 {{0, 0}, 579 {3, 3}, 580 {6, 6}, 581 {0, 0} 582 } 583 }, 584 {0, 0, 3, 0, /* 0x4a */ 585 {{1, 1}, 586 {3, 3}, 587 {6, 6}, 588 {0, 0} 589 } 590 }, 591 {1, 0, 3, 0, /* 0x4b */ 592 {{0, 1}, 593 {3, 3}, 594 {6, 6}, 595 {0, 0} 596 } 597 }, 598 {0, 0, 2, 0, /* 0x4c */ 599 {{2, 3}, 600 {6, 6}, 601 {0, 0}, 602 {0, 0} 603 } 604 }, 605 {1, 0, 3, 0, /* 0x4d */ 606 {{0, 0}, 607 {2, 3}, 608 {6, 6}, 609 {0, 0} 610 } 611 }, 612 {0, 0, 2, 0, /* 0x4e */ 613 {{1, 3}, 614 {6, 6}, 615 {0, 0}, 616 {0, 0} 617 } 618 }, 619 {1, 0, 2, 0, /* 0x4f */ 620 {{0, 3}, 621 {6, 6}, 622 {0, 0}, 623 {0, 0} 624 } 625 }, 626 {0, 0, 2, 0, /* 0x50 */ 627 {{4, 4}, 628 {6, 6}, 629 {0, 0}, 630 {0, 0} 631 } 632 }, 633 {1, 0, 3, 0, /* 0x51 */ 634 {{0, 0}, 635 {4, 4}, 636 {6, 6}, 637 {0, 0} 638 } 639 }, 640 {0, 0, 3, 0, /* 0x52 */ 641 {{1, 1}, 642 {4, 4}, 643 {6, 6}, 644 {0, 0} 645 } 646 }, 647 {1, 0, 3, 0, /* 0x53 */ 648 {{0, 1}, 649 {4, 4}, 650 {6, 6}, 651 {0, 0} 652 } 653 }, 654 {0, 0, 3, 0, /* 0x54 */ 655 {{2, 2}, 656 {4, 4}, 657 {6, 6}, 658 {0, 0} 659 } 660 }, 661 {1, 0, 4, 0, /* 0x55 */ 662 {{0, 0}, 663 {2, 2}, 664 {4, 4}, 665 {6, 6} 666 } 667 }, 668 {0, 0, 3, 0, /* 0x56 */ 669 {{1, 2}, 670 {4, 4}, 671 {6, 6}, 672 {0, 0} 673 } 674 }, 675 {1, 0, 3, 0, /* 0x57 */ 676 {{0, 2}, 677 {4, 4}, 678 {6, 6}, 679 {0, 0} 680 } 681 }, 682 {0, 0, 2, 0, /* 0x58 */ 683 {{3, 4}, 684 {6, 6}, 685 {0, 0}, 686 {0, 0} 687 } 688 }, 689 {1, 0, 3, 0, /* 0x59 */ 690 {{0, 0}, 691 {3, 4}, 692 {6, 6}, 693 {0, 0} 694 } 695 }, 696 {0, 0, 3, 0, /* 0x5a */ 697 {{1, 1}, 698 {3, 4}, 699 {6, 6}, 700 {0, 0} 701 } 702 }, 703 {1, 0, 3, 0, /* 0x5b */ 704 {{0, 1}, 705 {3, 4}, 706 {6, 6}, 707 {0, 0} 708 } 709 }, 710 {0, 0, 2, 0, /* 0x5c */ 711 {{2, 4}, 712 {6, 6}, 713 {0, 0}, 714 {0, 0} 715 } 716 }, 717 {1, 0, 3, 0, /* 0x5d */ 718 {{0, 0}, 719 {2, 4}, 720 {6, 6}, 721 {0, 0} 722 } 723 }, 724 {0, 0, 2, 0, /* 0x5e */ 725 {{1, 4}, 726 {6, 6}, 727 {0, 0}, 728 {0, 0} 729 } 730 }, 731 {1, 0, 2, 0, /* 0x5f */ 732 {{0, 4}, 733 {6, 6}, 734 {0, 0}, 735 {0, 0} 736 } 737 }, 738 {0, 0, 1, 0, /* 0x60 */ 739 {{5, 6}, 740 {0, 0}, 741 {0, 0}, 742 {0, 0} 743 } 744 }, 745 {1, 0, 2, 0, /* 0x61 */ 746 {{0, 0}, 747 {5, 6}, 748 {0, 0}, 749 {0, 0} 750 } 751 }, 752 {0, 0, 2, 0, /* 0x62 */ 753 {{1, 1}, 754 {5, 6}, 755 {0, 0}, 756 {0, 0} 757 } 758 }, 759 {1, 0, 2, 0, /* 0x63 */ 760 {{0, 1}, 761 {5, 6}, 762 {0, 0}, 763 {0, 0} 764 } 765 }, 766 {0, 0, 2, 0, /* 0x64 */ 767 {{2, 2}, 768 {5, 6}, 769 {0, 0}, 770 {0, 0} 771 } 772 }, 773 {1, 0, 3, 0, /* 0x65 */ 774 {{0, 0}, 775 {2, 2}, 776 {5, 6}, 777 {0, 0} 778 } 779 }, 780 {0, 0, 2, 0, /* 0x66 */ 781 {{1, 2}, 782 {5, 6}, 783 {0, 0}, 784 {0, 0} 785 } 786 }, 787 {1, 0, 2, 0, /* 0x67 */ 788 {{0, 2}, 789 {5, 6}, 790 {0, 0}, 791 {0, 0} 792 } 793 }, 794 {0, 0, 2, 0, /* 0x68 */ 795 {{3, 3}, 796 {5, 6}, 797 {0, 0}, 798 {0, 0} 799 } 800 }, 801 {1, 0, 3, 0, /* 0x69 */ 802 {{0, 0}, 803 {3, 3}, 804 {5, 6}, 805 {0, 0} 806 } 807 }, 808 {0, 0, 3, 0, /* 0x6a */ 809 {{1, 1}, 810 {3, 3}, 811 {5, 6}, 812 {0, 0} 813 } 814 }, 815 {1, 0, 3, 0, /* 0x6b */ 816 {{0, 1}, 817 {3, 3}, 818 {5, 6}, 819 {0, 0} 820 } 821 }, 822 {0, 0, 2, 0, /* 0x6c */ 823 {{2, 3}, 824 {5, 6}, 825 {0, 0}, 826 {0, 0} 827 } 828 }, 829 {1, 0, 3, 0, /* 0x6d */ 830 {{0, 0}, 831 {2, 3}, 832 {5, 6}, 833 {0, 0} 834 } 835 }, 836 {0, 0, 2, 0, /* 0x6e */ 837 {{1, 3}, 838 {5, 6}, 839 {0, 0}, 840 {0, 0} 841 } 842 }, 843 {1, 0, 2, 0, /* 0x6f */ 844 {{0, 3}, 845 {5, 6}, 846 {0, 0}, 847 {0, 0} 848 } 849 }, 850 {0, 0, 1, 0, /* 0x70 */ 851 {{4, 6}, 852 {0, 0}, 853 {0, 0}, 854 {0, 0} 855 } 856 }, 857 {1, 0, 2, 0, /* 0x71 */ 858 {{0, 0}, 859 {4, 6}, 860 {0, 0}, 861 {0, 0} 862 } 863 }, 864 {0, 0, 2, 0, /* 0x72 */ 865 {{1, 1}, 866 {4, 6}, 867 {0, 0}, 868 {0, 0} 869 } 870 }, 871 {1, 0, 2, 0, /* 0x73 */ 872 {{0, 1}, 873 {4, 6}, 874 {0, 0}, 875 {0, 0} 876 } 877 }, 878 {0, 0, 2, 0, /* 0x74 */ 879 {{2, 2}, 880 {4, 6}, 881 {0, 0}, 882 {0, 0} 883 } 884 }, 885 {1, 0, 3, 0, /* 0x75 */ 886 {{0, 0}, 887 {2, 2}, 888 {4, 6}, 889 {0, 0} 890 } 891 }, 892 {0, 0, 2, 0, /* 0x76 */ 893 {{1, 2}, 894 {4, 6}, 895 {0, 0}, 896 {0, 0} 897 } 898 }, 899 {1, 0, 2, 0, /* 0x77 */ 900 {{0, 2}, 901 {4, 6}, 902 {0, 0}, 903 {0, 0} 904 } 905 }, 906 {0, 0, 1, 0, /* 0x78 */ 907 {{3, 6}, 908 {0, 0}, 909 {0, 0}, 910 {0, 0} 911 } 912 }, 913 {1, 0, 2, 0, /* 0x79 */ 914 {{0, 0}, 915 {3, 6}, 916 {0, 0}, 917 {0, 0} 918 } 919 }, 920 {0, 0, 2, 0, /* 0x7a */ 921 {{1, 1}, 922 {3, 6}, 923 {0, 0}, 924 {0, 0} 925 } 926 }, 927 {1, 0, 2, 0, /* 0x7b */ 928 {{0, 1}, 929 {3, 6}, 930 {0, 0}, 931 {0, 0} 932 } 933 }, 934 {0, 0, 1, 0, /* 0x7c */ 935 {{2, 6}, 936 {0, 0}, 937 {0, 0}, 938 {0, 0} 939 } 940 }, 941 {1, 0, 2, 0, /* 0x7d */ 942 {{0, 0}, 943 {2, 6}, 944 {0, 0}, 945 {0, 0} 946 } 947 }, 948 {0, 0, 1, 0, /* 0x7e */ 949 {{1, 6}, 950 {0, 0}, 951 {0, 0}, 952 {0, 0} 953 } 954 }, 955 {1, 0, 1, 0, /* 0x7f */ 956 {{0, 6}, 957 {0, 0}, 958 {0, 0}, 959 {0, 0} 960 } 961 }, 962 {0, 1, 1, 0, /* 0x80 */ 963 {{7, 7}, 964 {0, 0}, 965 {0, 0}, 966 {0, 0} 967 } 968 }, 969 {1, 1, 2, 0, /* 0x81 */ 970 {{0, 0}, 971 {7, 7}, 972 {0, 0}, 973 {0, 0} 974 } 975 }, 976 {0, 1, 2, 0, /* 0x82 */ 977 {{1, 1}, 978 {7, 7}, 979 {0, 0}, 980 {0, 0} 981 } 982 }, 983 {1, 1, 2, 0, /* 0x83 */ 984 {{0, 1}, 985 {7, 7}, 986 {0, 0}, 987 {0, 0} 988 } 989 }, 990 {0, 1, 2, 0, /* 0x84 */ 991 {{2, 2}, 992 {7, 7}, 993 {0, 0}, 994 {0, 0} 995 } 996 }, 997 {1, 1, 3, 0, /* 0x85 */ 998 {{0, 0}, 999 {2, 2}, 1000 {7, 7}, 1001 {0, 0} 1002 } 1003 }, 1004 {0, 1, 2, 0, /* 0x86 */ 1005 {{1, 2}, 1006 {7, 7}, 1007 {0, 0}, 1008 {0, 0} 1009 } 1010 }, 1011 {1, 1, 2, 0, /* 0x87 */ 1012 {{0, 2}, 1013 {7, 7}, 1014 {0, 0}, 1015 {0, 0} 1016 } 1017 }, 1018 {0, 1, 2, 0, /* 0x88 */ 1019 {{3, 3}, 1020 {7, 7}, 1021 {0, 0}, 1022 {0, 0} 1023 } 1024 }, 1025 {1, 1, 3, 0, /* 0x89 */ 1026 {{0, 0}, 1027 {3, 3}, 1028 {7, 7}, 1029 {0, 0} 1030 } 1031 }, 1032 {0, 1, 3, 0, /* 0x8a */ 1033 {{1, 1}, 1034 {3, 3}, 1035 {7, 7}, 1036 {0, 0} 1037 } 1038 }, 1039 {1, 1, 3, 0, /* 0x8b */ 1040 {{0, 1}, 1041 {3, 3}, 1042 {7, 7}, 1043 {0, 0} 1044 } 1045 }, 1046 {0, 1, 2, 0, /* 0x8c */ 1047 {{2, 3}, 1048 {7, 7}, 1049 {0, 0}, 1050 {0, 0} 1051 } 1052 }, 1053 {1, 1, 3, 0, /* 0x8d */ 1054 {{0, 0}, 1055 {2, 3}, 1056 {7, 7}, 1057 {0, 0} 1058 } 1059 }, 1060 {0, 1, 2, 0, /* 0x8e */ 1061 {{1, 3}, 1062 {7, 7}, 1063 {0, 0}, 1064 {0, 0} 1065 } 1066 }, 1067 {1, 1, 2, 0, /* 0x8f */ 1068 {{0, 3}, 1069 {7, 7}, 1070 {0, 0}, 1071 {0, 0} 1072 } 1073 }, 1074 {0, 1, 2, 0, /* 0x90 */ 1075 {{4, 4}, 1076 {7, 7}, 1077 {0, 0}, 1078 {0, 0} 1079 } 1080 }, 1081 {1, 1, 3, 0, /* 0x91 */ 1082 {{0, 0}, 1083 {4, 4}, 1084 {7, 7}, 1085 {0, 0} 1086 } 1087 }, 1088 {0, 1, 3, 0, /* 0x92 */ 1089 {{1, 1}, 1090 {4, 4}, 1091 {7, 7}, 1092 {0, 0} 1093 } 1094 }, 1095 {1, 1, 3, 0, /* 0x93 */ 1096 {{0, 1}, 1097 {4, 4}, 1098 {7, 7}, 1099 {0, 0} 1100 } 1101 }, 1102 {0, 1, 3, 0, /* 0x94 */ 1103 {{2, 2}, 1104 {4, 4}, 1105 {7, 7}, 1106 {0, 0} 1107 } 1108 }, 1109 {1, 1, 4, 0, /* 0x95 */ 1110 {{0, 0}, 1111 {2, 2}, 1112 {4, 4}, 1113 {7, 7} 1114 } 1115 }, 1116 {0, 1, 3, 0, /* 0x96 */ 1117 {{1, 2}, 1118 {4, 4}, 1119 {7, 7}, 1120 {0, 0} 1121 } 1122 }, 1123 {1, 1, 3, 0, /* 0x97 */ 1124 {{0, 2}, 1125 {4, 4}, 1126 {7, 7}, 1127 {0, 0} 1128 } 1129 }, 1130 {0, 1, 2, 0, /* 0x98 */ 1131 {{3, 4}, 1132 {7, 7}, 1133 {0, 0}, 1134 {0, 0} 1135 } 1136 }, 1137 {1, 1, 3, 0, /* 0x99 */ 1138 {{0, 0}, 1139 {3, 4}, 1140 {7, 7}, 1141 {0, 0} 1142 } 1143 }, 1144 {0, 1, 3, 0, /* 0x9a */ 1145 {{1, 1}, 1146 {3, 4}, 1147 {7, 7}, 1148 {0, 0} 1149 } 1150 }, 1151 {1, 1, 3, 0, /* 0x9b */ 1152 {{0, 1}, 1153 {3, 4}, 1154 {7, 7}, 1155 {0, 0} 1156 } 1157 }, 1158 {0, 1, 2, 0, /* 0x9c */ 1159 {{2, 4}, 1160 {7, 7}, 1161 {0, 0}, 1162 {0, 0} 1163 } 1164 }, 1165 {1, 1, 3, 0, /* 0x9d */ 1166 {{0, 0}, 1167 {2, 4}, 1168 {7, 7}, 1169 {0, 0} 1170 } 1171 }, 1172 {0, 1, 2, 0, /* 0x9e */ 1173 {{1, 4}, 1174 {7, 7}, 1175 {0, 0}, 1176 {0, 0} 1177 } 1178 }, 1179 {1, 1, 2, 0, /* 0x9f */ 1180 {{0, 4}, 1181 {7, 7}, 1182 {0, 0}, 1183 {0, 0} 1184 } 1185 }, 1186 {0, 1, 2, 0, /* 0xa0 */ 1187 {{5, 5}, 1188 {7, 7}, 1189 {0, 0}, 1190 {0, 0} 1191 } 1192 }, 1193 {1, 1, 3, 0, /* 0xa1 */ 1194 {{0, 0}, 1195 {5, 5}, 1196 {7, 7}, 1197 {0, 0} 1198 } 1199 }, 1200 {0, 1, 3, 0, /* 0xa2 */ 1201 {{1, 1}, 1202 {5, 5}, 1203 {7, 7}, 1204 {0, 0} 1205 } 1206 }, 1207 {1, 1, 3, 0, /* 0xa3 */ 1208 {{0, 1}, 1209 {5, 5}, 1210 {7, 7}, 1211 {0, 0} 1212 } 1213 }, 1214 {0, 1, 3, 0, /* 0xa4 */ 1215 {{2, 2}, 1216 {5, 5}, 1217 {7, 7}, 1218 {0, 0} 1219 } 1220 }, 1221 {1, 1, 4, 0, /* 0xa5 */ 1222 {{0, 0}, 1223 {2, 2}, 1224 {5, 5}, 1225 {7, 7} 1226 } 1227 }, 1228 {0, 1, 3, 0, /* 0xa6 */ 1229 {{1, 2}, 1230 {5, 5}, 1231 {7, 7}, 1232 {0, 0} 1233 } 1234 }, 1235 {1, 1, 3, 0, /* 0xa7 */ 1236 {{0, 2}, 1237 {5, 5}, 1238 {7, 7}, 1239 {0, 0} 1240 } 1241 }, 1242 {0, 1, 3, 0, /* 0xa8 */ 1243 {{3, 3}, 1244 {5, 5}, 1245 {7, 7}, 1246 {0, 0} 1247 } 1248 }, 1249 {1, 1, 4, 0, /* 0xa9 */ 1250 {{0, 0}, 1251 {3, 3}, 1252 {5, 5}, 1253 {7, 7} 1254 } 1255 }, 1256 {0, 1, 4, 0, /* 0xaa */ 1257 {{1, 1}, 1258 {3, 3}, 1259 {5, 5}, 1260 {7, 7} 1261 } 1262 }, 1263 {1, 1, 4, 0, /* 0xab */ 1264 {{0, 1}, 1265 {3, 3}, 1266 {5, 5}, 1267 {7, 7} 1268 } 1269 }, 1270 {0, 1, 3, 0, /* 0xac */ 1271 {{2, 3}, 1272 {5, 5}, 1273 {7, 7}, 1274 {0, 0} 1275 } 1276 }, 1277 {1, 1, 4, 0, /* 0xad */ 1278 {{0, 0}, 1279 {2, 3}, 1280 {5, 5}, 1281 {7, 7} 1282 } 1283 }, 1284 {0, 1, 3, 0, /* 0xae */ 1285 {{1, 3}, 1286 {5, 5}, 1287 {7, 7}, 1288 {0, 0} 1289 } 1290 }, 1291 {1, 1, 3, 0, /* 0xaf */ 1292 {{0, 3}, 1293 {5, 5}, 1294 {7, 7}, 1295 {0, 0} 1296 } 1297 }, 1298 {0, 1, 2, 0, /* 0xb0 */ 1299 {{4, 5}, 1300 {7, 7}, 1301 {0, 0}, 1302 {0, 0} 1303 } 1304 }, 1305 {1, 1, 3, 0, /* 0xb1 */ 1306 {{0, 0}, 1307 {4, 5}, 1308 {7, 7}, 1309 {0, 0} 1310 } 1311 }, 1312 {0, 1, 3, 0, /* 0xb2 */ 1313 {{1, 1}, 1314 {4, 5}, 1315 {7, 7}, 1316 {0, 0} 1317 } 1318 }, 1319 {1, 1, 3, 0, /* 0xb3 */ 1320 {{0, 1}, 1321 {4, 5}, 1322 {7, 7}, 1323 {0, 0} 1324 } 1325 }, 1326 {0, 1, 3, 0, /* 0xb4 */ 1327 {{2, 2}, 1328 {4, 5}, 1329 {7, 7}, 1330 {0, 0} 1331 } 1332 }, 1333 {1, 1, 4, 0, /* 0xb5 */ 1334 {{0, 0}, 1335 {2, 2}, 1336 {4, 5}, 1337 {7, 7} 1338 } 1339 }, 1340 {0, 1, 3, 0, /* 0xb6 */ 1341 {{1, 2}, 1342 {4, 5}, 1343 {7, 7}, 1344 {0, 0} 1345 } 1346 }, 1347 {1, 1, 3, 0, /* 0xb7 */ 1348 {{0, 2}, 1349 {4, 5}, 1350 {7, 7}, 1351 {0, 0} 1352 } 1353 }, 1354 {0, 1, 2, 0, /* 0xb8 */ 1355 {{3, 5}, 1356 {7, 7}, 1357 {0, 0}, 1358 {0, 0} 1359 } 1360 }, 1361 {1, 1, 3, 0, /* 0xb9 */ 1362 {{0, 0}, 1363 {3, 5}, 1364 {7, 7}, 1365 {0, 0} 1366 } 1367 }, 1368 {0, 1, 3, 0, /* 0xba */ 1369 {{1, 1}, 1370 {3, 5}, 1371 {7, 7}, 1372 {0, 0} 1373 } 1374 }, 1375 {1, 1, 3, 0, /* 0xbb */ 1376 {{0, 1}, 1377 {3, 5}, 1378 {7, 7}, 1379 {0, 0} 1380 } 1381 }, 1382 {0, 1, 2, 0, /* 0xbc */ 1383 {{2, 5}, 1384 {7, 7}, 1385 {0, 0}, 1386 {0, 0} 1387 } 1388 }, 1389 {1, 1, 3, 0, /* 0xbd */ 1390 {{0, 0}, 1391 {2, 5}, 1392 {7, 7}, 1393 {0, 0} 1394 } 1395 }, 1396 {0, 1, 2, 0, /* 0xbe */ 1397 {{1, 5}, 1398 {7, 7}, 1399 {0, 0}, 1400 {0, 0} 1401 } 1402 }, 1403 {1, 1, 2, 0, /* 0xbf */ 1404 {{0, 5}, 1405 {7, 7}, 1406 {0, 0}, 1407 {0, 0} 1408 } 1409 }, 1410 {0, 1, 1, 0, /* 0xc0 */ 1411 {{6, 7}, 1412 {0, 0}, 1413 {0, 0}, 1414 {0, 0} 1415 } 1416 }, 1417 {1, 1, 2, 0, /* 0xc1 */ 1418 {{0, 0}, 1419 {6, 7}, 1420 {0, 0}, 1421 {0, 0} 1422 } 1423 }, 1424 {0, 1, 2, 0, /* 0xc2 */ 1425 {{1, 1}, 1426 {6, 7}, 1427 {0, 0}, 1428 {0, 0} 1429 } 1430 }, 1431 {1, 1, 2, 0, /* 0xc3 */ 1432 {{0, 1}, 1433 {6, 7}, 1434 {0, 0}, 1435 {0, 0} 1436 } 1437 }, 1438 {0, 1, 2, 0, /* 0xc4 */ 1439 {{2, 2}, 1440 {6, 7}, 1441 {0, 0}, 1442 {0, 0} 1443 } 1444 }, 1445 {1, 1, 3, 0, /* 0xc5 */ 1446 {{0, 0}, 1447 {2, 2}, 1448 {6, 7}, 1449 {0, 0} 1450 } 1451 }, 1452 {0, 1, 2, 0, /* 0xc6 */ 1453 {{1, 2}, 1454 {6, 7}, 1455 {0, 0}, 1456 {0, 0} 1457 } 1458 }, 1459 {1, 1, 2, 0, /* 0xc7 */ 1460 {{0, 2}, 1461 {6, 7}, 1462 {0, 0}, 1463 {0, 0} 1464 } 1465 }, 1466 {0, 1, 2, 0, /* 0xc8 */ 1467 {{3, 3}, 1468 {6, 7}, 1469 {0, 0}, 1470 {0, 0} 1471 } 1472 }, 1473 {1, 1, 3, 0, /* 0xc9 */ 1474 {{0, 0}, 1475 {3, 3}, 1476 {6, 7}, 1477 {0, 0} 1478 } 1479 }, 1480 {0, 1, 3, 0, /* 0xca */ 1481 {{1, 1}, 1482 {3, 3}, 1483 {6, 7}, 1484 {0, 0} 1485 } 1486 }, 1487 {1, 1, 3, 0, /* 0xcb */ 1488 {{0, 1}, 1489 {3, 3}, 1490 {6, 7}, 1491 {0, 0} 1492 } 1493 }, 1494 {0, 1, 2, 0, /* 0xcc */ 1495 {{2, 3}, 1496 {6, 7}, 1497 {0, 0}, 1498 {0, 0} 1499 } 1500 }, 1501 {1, 1, 3, 0, /* 0xcd */ 1502 {{0, 0}, 1503 {2, 3}, 1504 {6, 7}, 1505 {0, 0} 1506 } 1507 }, 1508 {0, 1, 2, 0, /* 0xce */ 1509 {{1, 3}, 1510 {6, 7}, 1511 {0, 0}, 1512 {0, 0} 1513 } 1514 }, 1515 {1, 1, 2, 0, /* 0xcf */ 1516 {{0, 3}, 1517 {6, 7}, 1518 {0, 0}, 1519 {0, 0} 1520 } 1521 }, 1522 {0, 1, 2, 0, /* 0xd0 */ 1523 {{4, 4}, 1524 {6, 7}, 1525 {0, 0}, 1526 {0, 0} 1527 } 1528 }, 1529 {1, 1, 3, 0, /* 0xd1 */ 1530 {{0, 0}, 1531 {4, 4}, 1532 {6, 7}, 1533 {0, 0} 1534 } 1535 }, 1536 {0, 1, 3, 0, /* 0xd2 */ 1537 {{1, 1}, 1538 {4, 4}, 1539 {6, 7}, 1540 {0, 0} 1541 } 1542 }, 1543 {1, 1, 3, 0, /* 0xd3 */ 1544 {{0, 1}, 1545 {4, 4}, 1546 {6, 7}, 1547 {0, 0} 1548 } 1549 }, 1550 {0, 1, 3, 0, /* 0xd4 */ 1551 {{2, 2}, 1552 {4, 4}, 1553 {6, 7}, 1554 {0, 0} 1555 } 1556 }, 1557 {1, 1, 4, 0, /* 0xd5 */ 1558 {{0, 0}, 1559 {2, 2}, 1560 {4, 4}, 1561 {6, 7} 1562 } 1563 }, 1564 {0, 1, 3, 0, /* 0xd6 */ 1565 {{1, 2}, 1566 {4, 4}, 1567 {6, 7}, 1568 {0, 0} 1569 } 1570 }, 1571 {1, 1, 3, 0, /* 0xd7 */ 1572 {{0, 2}, 1573 {4, 4}, 1574 {6, 7}, 1575 {0, 0} 1576 } 1577 }, 1578 {0, 1, 2, 0, /* 0xd8 */ 1579 {{3, 4}, 1580 {6, 7}, 1581 {0, 0}, 1582 {0, 0} 1583 } 1584 }, 1585 {1, 1, 3, 0, /* 0xd9 */ 1586 {{0, 0}, 1587 {3, 4}, 1588 {6, 7}, 1589 {0, 0} 1590 } 1591 }, 1592 {0, 1, 3, 0, /* 0xda */ 1593 {{1, 1}, 1594 {3, 4}, 1595 {6, 7}, 1596 {0, 0} 1597 } 1598 }, 1599 {1, 1, 3, 0, /* 0xdb */ 1600 {{0, 1}, 1601 {3, 4}, 1602 {6, 7}, 1603 {0, 0} 1604 } 1605 }, 1606 {0, 1, 2, 0, /* 0xdc */ 1607 {{2, 4}, 1608 {6, 7}, 1609 {0, 0}, 1610 {0, 0} 1611 } 1612 }, 1613 {1, 1, 3, 0, /* 0xdd */ 1614 {{0, 0}, 1615 {2, 4}, 1616 {6, 7}, 1617 {0, 0} 1618 } 1619 }, 1620 {0, 1, 2, 0, /* 0xde */ 1621 {{1, 4}, 1622 {6, 7}, 1623 {0, 0}, 1624 {0, 0} 1625 } 1626 }, 1627 {1, 1, 2, 0, /* 0xdf */ 1628 {{0, 4}, 1629 {6, 7}, 1630 {0, 0}, 1631 {0, 0} 1632 } 1633 }, 1634 {0, 1, 1, 0, /* 0xe0 */ 1635 {{5, 7}, 1636 {0, 0}, 1637 {0, 0}, 1638 {0, 0} 1639 } 1640 }, 1641 {1, 1, 2, 0, /* 0xe1 */ 1642 {{0, 0}, 1643 {5, 7}, 1644 {0, 0}, 1645 {0, 0} 1646 } 1647 }, 1648 {0, 1, 2, 0, /* 0xe2 */ 1649 {{1, 1}, 1650 {5, 7}, 1651 {0, 0}, 1652 {0, 0} 1653 } 1654 }, 1655 {1, 1, 2, 0, /* 0xe3 */ 1656 {{0, 1}, 1657 {5, 7}, 1658 {0, 0}, 1659 {0, 0} 1660 } 1661 }, 1662 {0, 1, 2, 0, /* 0xe4 */ 1663 {{2, 2}, 1664 {5, 7}, 1665 {0, 0}, 1666 {0, 0} 1667 } 1668 }, 1669 {1, 1, 3, 0, /* 0xe5 */ 1670 {{0, 0}, 1671 {2, 2}, 1672 {5, 7}, 1673 {0, 0} 1674 } 1675 }, 1676 {0, 1, 2, 0, /* 0xe6 */ 1677 {{1, 2}, 1678 {5, 7}, 1679 {0, 0}, 1680 {0, 0} 1681 } 1682 }, 1683 {1, 1, 2, 0, /* 0xe7 */ 1684 {{0, 2}, 1685 {5, 7}, 1686 {0, 0}, 1687 {0, 0} 1688 } 1689 }, 1690 {0, 1, 2, 0, /* 0xe8 */ 1691 {{3, 3}, 1692 {5, 7}, 1693 {0, 0}, 1694 {0, 0} 1695 } 1696 }, 1697 {1, 1, 3, 0, /* 0xe9 */ 1698 {{0, 0}, 1699 {3, 3}, 1700 {5, 7}, 1701 {0, 0} 1702 } 1703 }, 1704 {0, 1, 3, 0, /* 0xea */ 1705 {{1, 1}, 1706 {3, 3}, 1707 {5, 7}, 1708 {0, 0} 1709 } 1710 }, 1711 {1, 1, 3, 0, /* 0xeb */ 1712 {{0, 1}, 1713 {3, 3}, 1714 {5, 7}, 1715 {0, 0} 1716 } 1717 }, 1718 {0, 1, 2, 0, /* 0xec */ 1719 {{2, 3}, 1720 {5, 7}, 1721 {0, 0}, 1722 {0, 0} 1723 } 1724 }, 1725 {1, 1, 3, 0, /* 0xed */ 1726 {{0, 0}, 1727 {2, 3}, 1728 {5, 7}, 1729 {0, 0} 1730 } 1731 }, 1732 {0, 1, 2, 0, /* 0xee */ 1733 {{1, 3}, 1734 {5, 7}, 1735 {0, 0}, 1736 {0, 0} 1737 } 1738 }, 1739 {1, 1, 2, 0, /* 0xef */ 1740 {{0, 3}, 1741 {5, 7}, 1742 {0, 0}, 1743 {0, 0} 1744 } 1745 }, 1746 {0, 1, 1, 0, /* 0xf0 */ 1747 {{4, 7}, 1748 {0, 0}, 1749 {0, 0}, 1750 {0, 0} 1751 } 1752 }, 1753 {1, 1, 2, 0, /* 0xf1 */ 1754 {{0, 0}, 1755 {4, 7}, 1756 {0, 0}, 1757 {0, 0} 1758 } 1759 }, 1760 {0, 1, 2, 0, /* 0xf2 */ 1761 {{1, 1}, 1762 {4, 7}, 1763 {0, 0}, 1764 {0, 0} 1765 } 1766 }, 1767 {1, 1, 2, 0, /* 0xf3 */ 1768 {{0, 1}, 1769 {4, 7}, 1770 {0, 0}, 1771 {0, 0} 1772 } 1773 }, 1774 {0, 1, 2, 0, /* 0xf4 */ 1775 {{2, 2}, 1776 {4, 7}, 1777 {0, 0}, 1778 {0, 0} 1779 } 1780 }, 1781 {1, 1, 3, 0, /* 0xf5 */ 1782 {{0, 0}, 1783 {2, 2}, 1784 {4, 7}, 1785 {0, 0} 1786 } 1787 }, 1788 {0, 1, 2, 0, /* 0xf6 */ 1789 {{1, 2}, 1790 {4, 7}, 1791 {0, 0}, 1792 {0, 0} 1793 } 1794 }, 1795 {1, 1, 2, 0, /* 0xf7 */ 1796 {{0, 2}, 1797 {4, 7}, 1798 {0, 0}, 1799 {0, 0} 1800 } 1801 }, 1802 {0, 1, 1, 0, /* 0xf8 */ 1803 {{3, 7}, 1804 {0, 0}, 1805 {0, 0}, 1806 {0, 0} 1807 } 1808 }, 1809 {1, 1, 2, 0, /* 0xf9 */ 1810 {{0, 0}, 1811 {3, 7}, 1812 {0, 0}, 1813 {0, 0} 1814 } 1815 }, 1816 {0, 1, 2, 0, /* 0xfa */ 1817 {{1, 1}, 1818 {3, 7}, 1819 {0, 0}, 1820 {0, 0} 1821 } 1822 }, 1823 {1, 1, 2, 0, /* 0xfb */ 1824 {{0, 1}, 1825 {3, 7}, 1826 {0, 0}, 1827 {0, 0} 1828 } 1829 }, 1830 {0, 1, 1, 0, /* 0xfc */ 1831 {{2, 7}, 1832 {0, 0}, 1833 {0, 0}, 1834 {0, 0} 1835 } 1836 }, 1837 {1, 1, 2, 0, /* 0xfd */ 1838 {{0, 0}, 1839 {2, 7}, 1840 {0, 0}, 1841 {0, 0} 1842 } 1843 }, 1844 {0, 1, 1, 0, /* 0xfe */ 1845 {{1, 7}, 1846 {0, 0}, 1847 {0, 0}, 1848 {0, 0} 1849 } 1850 }, 1851 {1, 1, 1, 0, /* 0xff */ 1852 {{0, 7}, 1853 {0, 0}, 1854 {0, 0}, 1855 {0, 0} 1856 } 1857 } 1858 }; 1859 1860 1861 int 1862 sctp_is_address_in_scope(struct sctp_ifa *ifa, 1863 int ipv4_addr_legal, 1864 int ipv6_addr_legal, 1865 int loopback_scope, 1866 int ipv4_local_scope, 1867 int local_scope, 1868 int site_scope, 1869 int do_update) 1870 { 1871 if ((loopback_scope == 0) && 1872 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) { 1873 /* 1874 * skip loopback if not in scope * 1875 */ 1876 return (0); 1877 } 1878 switch (ifa->address.sa.sa_family) { 1879 case AF_INET: 1880 if (ipv4_addr_legal) { 1881 struct sockaddr_in *sin; 1882 1883 sin = (struct sockaddr_in *)&ifa->address.sin; 1884 if (sin->sin_addr.s_addr == 0) { 1885 /* not in scope , unspecified */ 1886 return (0); 1887 } 1888 if ((ipv4_local_scope == 0) && 1889 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1890 /* private address not in scope */ 1891 return (0); 1892 } 1893 } else { 1894 return (0); 1895 } 1896 break; 1897 #ifdef INET6 1898 case AF_INET6: 1899 if (ipv6_addr_legal) { 1900 struct sockaddr_in6 *sin6; 1901 1902 /* 1903 * Must update the flags, bummer, which means any 1904 * IFA locks must now be applied HERE <-> 1905 */ 1906 if (do_update) { 1907 sctp_gather_internal_ifa_flags(ifa); 1908 } 1909 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1910 return (0); 1911 } 1912 /* ok to use deprecated addresses? */ 1913 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; 1914 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1915 /* skip unspecifed addresses */ 1916 return (0); 1917 } 1918 if ( /* (local_scope == 0) && */ 1919 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) { 1920 return (0); 1921 } 1922 if ((site_scope == 0) && 1923 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1924 return (0); 1925 } 1926 } else { 1927 return (0); 1928 } 1929 break; 1930 #endif 1931 default: 1932 return (0); 1933 } 1934 return (1); 1935 } 1936 1937 static struct mbuf * 1938 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa) 1939 { 1940 struct sctp_paramhdr *parmh; 1941 struct mbuf *mret; 1942 int len; 1943 1944 if (ifa->address.sa.sa_family == AF_INET) { 1945 len = sizeof(struct sctp_ipv4addr_param); 1946 } else if (ifa->address.sa.sa_family == AF_INET6) { 1947 len = sizeof(struct sctp_ipv6addr_param); 1948 } else { 1949 /* unknown type */ 1950 return (m); 1951 } 1952 if (M_TRAILINGSPACE(m) >= len) { 1953 /* easy side we just drop it on the end */ 1954 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m))); 1955 mret = m; 1956 } else { 1957 /* Need more space */ 1958 mret = m; 1959 while (SCTP_BUF_NEXT(mret) != NULL) { 1960 mret = SCTP_BUF_NEXT(mret); 1961 } 1962 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA); 1963 if (SCTP_BUF_NEXT(mret) == NULL) { 1964 /* We are hosed, can't add more addresses */ 1965 return (m); 1966 } 1967 mret = SCTP_BUF_NEXT(mret); 1968 parmh = mtod(mret, struct sctp_paramhdr *); 1969 } 1970 /* now add the parameter */ 1971 switch (ifa->address.sa.sa_family) { 1972 case AF_INET: 1973 { 1974 struct sctp_ipv4addr_param *ipv4p; 1975 struct sockaddr_in *sin; 1976 1977 sin = (struct sockaddr_in *)&ifa->address.sin; 1978 ipv4p = (struct sctp_ipv4addr_param *)parmh; 1979 parmh->param_type = htons(SCTP_IPV4_ADDRESS); 1980 parmh->param_length = htons(len); 1981 ipv4p->addr = sin->sin_addr.s_addr; 1982 SCTP_BUF_LEN(mret) += len; 1983 break; 1984 } 1985 #ifdef INET6 1986 case AF_INET6: 1987 { 1988 struct sctp_ipv6addr_param *ipv6p; 1989 struct sockaddr_in6 *sin6; 1990 1991 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; 1992 ipv6p = (struct sctp_ipv6addr_param *)parmh; 1993 parmh->param_type = htons(SCTP_IPV6_ADDRESS); 1994 parmh->param_length = htons(len); 1995 memcpy(ipv6p->addr, &sin6->sin6_addr, 1996 sizeof(ipv6p->addr)); 1997 /* clear embedded scope in the address */ 1998 in6_clearscope((struct in6_addr *)ipv6p->addr); 1999 SCTP_BUF_LEN(mret) += len; 2000 break; 2001 } 2002 #endif 2003 default: 2004 return (m); 2005 } 2006 return (mret); 2007 } 2008 2009 2010 struct mbuf * 2011 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope, 2012 struct mbuf *m_at, int cnt_inits_to) 2013 { 2014 struct sctp_vrf *vrf = NULL; 2015 int cnt, limit_out = 0, total_count; 2016 uint32_t vrf_id; 2017 2018 vrf_id = inp->def_vrf_id; 2019 SCTP_IPI_ADDR_RLOCK(); 2020 vrf = sctp_find_vrf(vrf_id); 2021 if (vrf == NULL) { 2022 SCTP_IPI_ADDR_RUNLOCK(); 2023 return (m_at); 2024 } 2025 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2026 struct sctp_ifa *sctp_ifap; 2027 struct sctp_ifn *sctp_ifnp; 2028 2029 cnt = cnt_inits_to; 2030 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) { 2031 limit_out = 1; 2032 cnt = SCTP_ADDRESS_LIMIT; 2033 goto skip_count; 2034 } 2035 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2036 if ((scope->loopback_scope == 0) && 2037 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2038 /* 2039 * Skip loopback devices if loopback_scope 2040 * not set 2041 */ 2042 continue; 2043 } 2044 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2045 if (sctp_is_address_in_scope(sctp_ifap, 2046 scope->ipv4_addr_legal, 2047 scope->ipv6_addr_legal, 2048 scope->loopback_scope, 2049 scope->ipv4_local_scope, 2050 scope->local_scope, 2051 scope->site_scope, 1) == 0) { 2052 continue; 2053 } 2054 cnt++; 2055 if (cnt > SCTP_ADDRESS_LIMIT) { 2056 break; 2057 } 2058 } 2059 if (cnt > SCTP_ADDRESS_LIMIT) { 2060 break; 2061 } 2062 } 2063 skip_count: 2064 if (cnt > 1) { 2065 total_count = 0; 2066 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2067 cnt = 0; 2068 if ((scope->loopback_scope == 0) && 2069 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2070 /* 2071 * Skip loopback devices if 2072 * loopback_scope not set 2073 */ 2074 continue; 2075 } 2076 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2077 if (sctp_is_address_in_scope(sctp_ifap, 2078 scope->ipv4_addr_legal, 2079 scope->ipv6_addr_legal, 2080 scope->loopback_scope, 2081 scope->ipv4_local_scope, 2082 scope->local_scope, 2083 scope->site_scope, 0) == 0) { 2084 continue; 2085 } 2086 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap); 2087 if (limit_out) { 2088 cnt++; 2089 total_count++; 2090 if (cnt >= 2) { 2091 /* 2092 * two from each 2093 * address 2094 */ 2095 break; 2096 } 2097 if (total_count > SCTP_ADDRESS_LIMIT) { 2098 /* No more addresses */ 2099 break; 2100 } 2101 } 2102 } 2103 } 2104 } 2105 } else { 2106 struct sctp_laddr *laddr; 2107 2108 cnt = cnt_inits_to; 2109 /* First, how many ? */ 2110 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2111 if (laddr->ifa == NULL) { 2112 continue; 2113 } 2114 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) 2115 /* 2116 * Address being deleted by the system, dont 2117 * list. 2118 */ 2119 continue; 2120 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2121 /* 2122 * Address being deleted on this ep don't 2123 * list. 2124 */ 2125 continue; 2126 } 2127 if (sctp_is_address_in_scope(laddr->ifa, 2128 scope->ipv4_addr_legal, 2129 scope->ipv6_addr_legal, 2130 scope->loopback_scope, 2131 scope->ipv4_local_scope, 2132 scope->local_scope, 2133 scope->site_scope, 1) == 0) { 2134 continue; 2135 } 2136 cnt++; 2137 } 2138 if (cnt > SCTP_ADDRESS_LIMIT) { 2139 limit_out = 1; 2140 } 2141 /* 2142 * To get through a NAT we only list addresses if we have 2143 * more than one. That way if you just bind a single address 2144 * we let the source of the init dictate our address. 2145 */ 2146 if (cnt > 1) { 2147 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2148 cnt = 0; 2149 if (laddr->ifa == NULL) { 2150 continue; 2151 } 2152 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) 2153 continue; 2154 2155 if (sctp_is_address_in_scope(laddr->ifa, 2156 scope->ipv4_addr_legal, 2157 scope->ipv6_addr_legal, 2158 scope->loopback_scope, 2159 scope->ipv4_local_scope, 2160 scope->local_scope, 2161 scope->site_scope, 0) == 0) { 2162 continue; 2163 } 2164 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa); 2165 cnt++; 2166 if (cnt >= SCTP_ADDRESS_LIMIT) { 2167 break; 2168 } 2169 } 2170 } 2171 } 2172 SCTP_IPI_ADDR_RUNLOCK(); 2173 return (m_at); 2174 } 2175 2176 static struct sctp_ifa * 2177 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa, 2178 uint8_t dest_is_loop, 2179 uint8_t dest_is_priv, 2180 sa_family_t fam) 2181 { 2182 uint8_t dest_is_global = 0; 2183 2184 /* dest_is_priv is true if destination is a private address */ 2185 /* dest_is_loop is true if destination is a loopback addresses */ 2186 2187 /* 2188 * Here we determine if its a preferred address. A preferred address 2189 * means it is the same scope or higher scope then the destination. 2190 * L = loopback, P = private, G = global 2191 * ----------------------------------------- src | dest | result 2192 * ---------------------------------------- L | L | yes 2193 * ----------------------------------------- P | L | 2194 * yes-v4 no-v6 ----------------------------------------- G | 2195 * L | yes-v4 no-v6 ----------------------------------------- L 2196 * | P | no ----------------------------------------- P | 2197 * P | yes ----------------------------------------- G | 2198 * P | no ----------------------------------------- L | G 2199 * | no ----------------------------------------- P | G | 2200 * no ----------------------------------------- G | G | 2201 * yes ----------------------------------------- 2202 */ 2203 2204 if (ifa->address.sa.sa_family != fam) { 2205 /* forget mis-matched family */ 2206 return (NULL); 2207 } 2208 if ((dest_is_priv == 0) && (dest_is_loop == 0)) { 2209 dest_is_global = 1; 2210 } 2211 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:"); 2212 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa); 2213 /* Ok the address may be ok */ 2214 if (fam == AF_INET6) { 2215 /* ok to use deprecated addresses? no lets not! */ 2216 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2217 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n"); 2218 return (NULL); 2219 } 2220 if (ifa->src_is_priv && !ifa->src_is_loop) { 2221 if (dest_is_loop) { 2222 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n"); 2223 return (NULL); 2224 } 2225 } 2226 if (ifa->src_is_glob) { 2227 if (dest_is_loop) { 2228 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n"); 2229 return (NULL); 2230 } 2231 } 2232 } 2233 /* 2234 * Now that we know what is what, implement or table this could in 2235 * theory be done slicker (it used to be), but this is 2236 * straightforward and easier to validate :-) 2237 */ 2238 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n", 2239 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob); 2240 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n", 2241 dest_is_loop, dest_is_priv, dest_is_global); 2242 2243 if ((ifa->src_is_loop) && (dest_is_priv)) { 2244 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n"); 2245 return (NULL); 2246 } 2247 if ((ifa->src_is_glob) && (dest_is_priv)) { 2248 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n"); 2249 return (NULL); 2250 } 2251 if ((ifa->src_is_loop) && (dest_is_global)) { 2252 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n"); 2253 return (NULL); 2254 } 2255 if ((ifa->src_is_priv) && (dest_is_global)) { 2256 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n"); 2257 return (NULL); 2258 } 2259 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n"); 2260 /* its a preferred address */ 2261 return (ifa); 2262 } 2263 2264 static struct sctp_ifa * 2265 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa, 2266 uint8_t dest_is_loop, 2267 uint8_t dest_is_priv, 2268 sa_family_t fam) 2269 { 2270 uint8_t dest_is_global = 0; 2271 2272 2273 /* 2274 * Here we determine if its a acceptable address. A acceptable 2275 * address means it is the same scope or higher scope but we can 2276 * allow for NAT which means its ok to have a global dest and a 2277 * private src. 2278 * 2279 * L = loopback, P = private, G = global 2280 * ----------------------------------------- src | dest | result 2281 * ----------------------------------------- L | L | yes 2282 * ----------------------------------------- P | L | 2283 * yes-v4 no-v6 ----------------------------------------- G | 2284 * L | yes ----------------------------------------- L | 2285 * P | no ----------------------------------------- P | P 2286 * | yes ----------------------------------------- G | P 2287 * | yes - May not work ----------------------------------------- 2288 * L | G | no ----------------------------------------- P 2289 * | G | yes - May not work 2290 * ----------------------------------------- G | G | yes 2291 * ----------------------------------------- 2292 */ 2293 2294 if (ifa->address.sa.sa_family != fam) { 2295 /* forget non matching family */ 2296 return (NULL); 2297 } 2298 /* Ok the address may be ok */ 2299 if ((dest_is_loop == 0) && (dest_is_priv == 0)) { 2300 dest_is_global = 1; 2301 } 2302 if (fam == AF_INET6) { 2303 /* ok to use deprecated addresses? */ 2304 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2305 return (NULL); 2306 } 2307 if (ifa->src_is_priv) { 2308 /* Special case, linklocal to loop */ 2309 if (dest_is_loop) 2310 return (NULL); 2311 } 2312 } 2313 /* 2314 * Now that we know what is what, implement our table. This could in 2315 * theory be done slicker (it used to be), but this is 2316 * straightforward and easier to validate :-) 2317 */ 2318 if ((ifa->src_is_loop == 1) && (dest_is_priv)) { 2319 return (NULL); 2320 } 2321 if ((ifa->src_is_loop == 1) && (dest_is_global)) { 2322 return (NULL); 2323 } 2324 /* its an acceptable address */ 2325 return (ifa); 2326 } 2327 2328 int 2329 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) 2330 { 2331 struct sctp_laddr *laddr; 2332 2333 if (stcb == NULL) { 2334 /* There are no restrictions, no TCB :-) */ 2335 return (0); 2336 } 2337 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { 2338 if (laddr->ifa == NULL) { 2339 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 2340 __FUNCTION__); 2341 continue; 2342 } 2343 if (laddr->ifa == ifa) { 2344 /* Yes it is on the list */ 2345 return (1); 2346 } 2347 } 2348 return (0); 2349 } 2350 2351 2352 int 2353 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) 2354 { 2355 struct sctp_laddr *laddr; 2356 2357 if (ifa == NULL) 2358 return (0); 2359 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2360 if (laddr->ifa == NULL) { 2361 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 2362 __FUNCTION__); 2363 continue; 2364 } 2365 if ((laddr->ifa == ifa) && laddr->action == 0) 2366 /* same pointer */ 2367 return (1); 2368 } 2369 return (0); 2370 } 2371 2372 2373 2374 static struct sctp_ifa * 2375 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, 2376 sctp_route_t * ro, 2377 uint32_t vrf_id, 2378 int non_asoc_addr_ok, 2379 uint8_t dest_is_priv, 2380 uint8_t dest_is_loop, 2381 sa_family_t fam) 2382 { 2383 struct sctp_laddr *laddr, *starting_point; 2384 void *ifn; 2385 int resettotop = 0; 2386 struct sctp_ifn *sctp_ifn; 2387 struct sctp_ifa *sctp_ifa, *sifa; 2388 struct sctp_vrf *vrf; 2389 uint32_t ifn_index; 2390 2391 vrf = sctp_find_vrf(vrf_id); 2392 if (vrf == NULL) 2393 return (NULL); 2394 2395 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2396 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2397 sctp_ifn = sctp_find_ifn(ifn, ifn_index); 2398 /* 2399 * first question, is the ifn we will emit on in our list, if so, we 2400 * want such an address. Note that we first looked for a preferred 2401 * address. 2402 */ 2403 if (sctp_ifn) { 2404 /* is a preferred one on the interface we route out? */ 2405 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2406 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2407 (non_asoc_addr_ok == 0)) 2408 continue; 2409 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, 2410 dest_is_loop, 2411 dest_is_priv, fam); 2412 if (sifa == NULL) 2413 continue; 2414 if (sctp_is_addr_in_ep(inp, sifa)) { 2415 atomic_add_int(&sifa->refcount, 1); 2416 return (sifa); 2417 } 2418 } 2419 } 2420 /* 2421 * ok, now we now need to find one on the list of the addresses. We 2422 * can't get one on the emitting interface so let's find first a 2423 * preferred one. If not that an acceptable one otherwise... we 2424 * return NULL. 2425 */ 2426 starting_point = inp->next_addr_touse; 2427 once_again: 2428 if (inp->next_addr_touse == NULL) { 2429 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2430 resettotop = 1; 2431 } 2432 for (laddr = inp->next_addr_touse; laddr; 2433 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2434 if (laddr->ifa == NULL) { 2435 /* address has been removed */ 2436 continue; 2437 } 2438 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2439 /* address is being deleted */ 2440 continue; 2441 } 2442 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, 2443 dest_is_priv, fam); 2444 if (sifa == NULL) 2445 continue; 2446 atomic_add_int(&sifa->refcount, 1); 2447 return (sifa); 2448 } 2449 if (resettotop == 0) { 2450 inp->next_addr_touse = NULL; 2451 goto once_again; 2452 } 2453 inp->next_addr_touse = starting_point; 2454 resettotop = 0; 2455 once_again_too: 2456 if (inp->next_addr_touse == NULL) { 2457 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2458 resettotop = 1; 2459 } 2460 /* ok, what about an acceptable address in the inp */ 2461 for (laddr = inp->next_addr_touse; laddr; 2462 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2463 if (laddr->ifa == NULL) { 2464 /* address has been removed */ 2465 continue; 2466 } 2467 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2468 /* address is being deleted */ 2469 continue; 2470 } 2471 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, 2472 dest_is_priv, fam); 2473 if (sifa == NULL) 2474 continue; 2475 atomic_add_int(&sifa->refcount, 1); 2476 return (sifa); 2477 } 2478 if (resettotop == 0) { 2479 inp->next_addr_touse = NULL; 2480 goto once_again_too; 2481 } 2482 /* 2483 * no address bound can be a source for the destination we are in 2484 * trouble 2485 */ 2486 return (NULL); 2487 } 2488 2489 2490 2491 static struct sctp_ifa * 2492 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, 2493 struct sctp_tcb *stcb, 2494 struct sctp_nets *net, 2495 sctp_route_t * ro, 2496 uint32_t vrf_id, 2497 uint8_t dest_is_priv, 2498 uint8_t dest_is_loop, 2499 int non_asoc_addr_ok, 2500 sa_family_t fam) 2501 { 2502 struct sctp_laddr *laddr, *starting_point; 2503 void *ifn; 2504 struct sctp_ifn *sctp_ifn; 2505 struct sctp_ifa *sctp_ifa, *sifa; 2506 uint8_t start_at_beginning = 0; 2507 struct sctp_vrf *vrf; 2508 uint32_t ifn_index; 2509 2510 /* 2511 * first question, is the ifn we will emit on in our list, if so, we 2512 * want that one. 2513 */ 2514 vrf = sctp_find_vrf(vrf_id); 2515 if (vrf == NULL) 2516 return (NULL); 2517 2518 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2519 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2520 sctp_ifn = sctp_find_ifn(ifn, ifn_index); 2521 2522 /* 2523 * first question, is the ifn we will emit on in our list? If so, 2524 * we want that one. First we look for a preferred. Second, we go 2525 * for an acceptable. 2526 */ 2527 if (sctp_ifn) { 2528 /* first try for a preferred address on the ep */ 2529 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2530 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2531 continue; 2532 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2533 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2534 if (sifa == NULL) 2535 continue; 2536 if (((non_asoc_addr_ok == 0) && 2537 (sctp_is_addr_restricted(stcb, sifa))) || 2538 (non_asoc_addr_ok && 2539 (sctp_is_addr_restricted(stcb, sifa)) && 2540 (!sctp_is_addr_pending(stcb, sifa)))) { 2541 /* on the no-no list */ 2542 continue; 2543 } 2544 atomic_add_int(&sifa->refcount, 1); 2545 return (sifa); 2546 } 2547 } 2548 /* next try for an acceptable address on the ep */ 2549 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2550 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2551 continue; 2552 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2553 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2554 if (sifa == NULL) 2555 continue; 2556 if (((non_asoc_addr_ok == 0) && 2557 (sctp_is_addr_restricted(stcb, sifa))) || 2558 (non_asoc_addr_ok && 2559 (sctp_is_addr_restricted(stcb, sifa)) && 2560 (!sctp_is_addr_pending(stcb, sifa)))) { 2561 /* on the no-no list */ 2562 continue; 2563 } 2564 atomic_add_int(&sifa->refcount, 1); 2565 return (sifa); 2566 } 2567 } 2568 2569 } 2570 /* 2571 * if we can't find one like that then we must look at all addresses 2572 * bound to pick one at first preferable then secondly acceptable. 2573 */ 2574 starting_point = stcb->asoc.last_used_address; 2575 sctp_from_the_top: 2576 if (stcb->asoc.last_used_address == NULL) { 2577 start_at_beginning = 1; 2578 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2579 } 2580 /* search beginning with the last used address */ 2581 for (laddr = stcb->asoc.last_used_address; laddr; 2582 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2583 if (laddr->ifa == NULL) { 2584 /* address has been removed */ 2585 continue; 2586 } 2587 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2588 /* address is being deleted */ 2589 continue; 2590 } 2591 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam); 2592 if (sifa == NULL) 2593 continue; 2594 if (((non_asoc_addr_ok == 0) && 2595 (sctp_is_addr_restricted(stcb, sifa))) || 2596 (non_asoc_addr_ok && 2597 (sctp_is_addr_restricted(stcb, sifa)) && 2598 (!sctp_is_addr_pending(stcb, sifa)))) { 2599 /* on the no-no list */ 2600 continue; 2601 } 2602 stcb->asoc.last_used_address = laddr; 2603 atomic_add_int(&sifa->refcount, 1); 2604 return (sifa); 2605 } 2606 if (start_at_beginning == 0) { 2607 stcb->asoc.last_used_address = NULL; 2608 goto sctp_from_the_top; 2609 } 2610 /* now try for any higher scope than the destination */ 2611 stcb->asoc.last_used_address = starting_point; 2612 start_at_beginning = 0; 2613 sctp_from_the_top2: 2614 if (stcb->asoc.last_used_address == NULL) { 2615 start_at_beginning = 1; 2616 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2617 } 2618 /* search beginning with the last used address */ 2619 for (laddr = stcb->asoc.last_used_address; laddr; 2620 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2621 if (laddr->ifa == NULL) { 2622 /* address has been removed */ 2623 continue; 2624 } 2625 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2626 /* address is being deleted */ 2627 continue; 2628 } 2629 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, 2630 dest_is_priv, fam); 2631 if (sifa == NULL) 2632 continue; 2633 if (((non_asoc_addr_ok == 0) && 2634 (sctp_is_addr_restricted(stcb, sifa))) || 2635 (non_asoc_addr_ok && 2636 (sctp_is_addr_restricted(stcb, sifa)) && 2637 (!sctp_is_addr_pending(stcb, sifa)))) { 2638 /* on the no-no list */ 2639 continue; 2640 } 2641 stcb->asoc.last_used_address = laddr; 2642 atomic_add_int(&sifa->refcount, 1); 2643 return (sifa); 2644 } 2645 if (start_at_beginning == 0) { 2646 stcb->asoc.last_used_address = NULL; 2647 goto sctp_from_the_top2; 2648 } 2649 return (NULL); 2650 } 2651 2652 static struct sctp_ifa * 2653 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, 2654 struct sctp_tcb *stcb, 2655 int non_asoc_addr_ok, 2656 uint8_t dest_is_loop, 2657 uint8_t dest_is_priv, 2658 int addr_wanted, 2659 sa_family_t fam, 2660 sctp_route_t * ro 2661 ) 2662 { 2663 struct sctp_ifa *ifa, *sifa; 2664 int num_eligible_addr = 0; 2665 2666 #ifdef INET6 2667 struct sockaddr_in6 sin6, lsa6; 2668 2669 if (fam == AF_INET6) { 2670 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6)); 2671 (void)sa6_recoverscope(&sin6); 2672 } 2673 #endif /* INET6 */ 2674 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2675 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2676 (non_asoc_addr_ok == 0)) 2677 continue; 2678 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 2679 dest_is_priv, fam); 2680 if (sifa == NULL) 2681 continue; 2682 #ifdef INET6 2683 if (fam == AF_INET6 && 2684 dest_is_loop && 2685 sifa->src_is_loop && sifa->src_is_priv) { 2686 /* 2687 * don't allow fe80::1 to be a src on loop ::1, we 2688 * don't list it to the peer so we will get an 2689 * abort. 2690 */ 2691 continue; 2692 } 2693 if (fam == AF_INET6 && 2694 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) && 2695 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) { 2696 /* 2697 * link-local <-> link-local must belong to the same 2698 * scope. 2699 */ 2700 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6)); 2701 (void)sa6_recoverscope(&lsa6); 2702 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) { 2703 continue; 2704 } 2705 } 2706 #endif /* INET6 */ 2707 2708 /* 2709 * Check if the IPv6 address matches to next-hop. In the 2710 * mobile case, old IPv6 address may be not deleted from the 2711 * interface. Then, the interface has previous and new 2712 * addresses. We should use one corresponding to the 2713 * next-hop. (by micchie) 2714 */ 2715 #ifdef INET6 2716 if (stcb && fam == AF_INET6 && 2717 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { 2718 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro) 2719 == 0) { 2720 continue; 2721 } 2722 } 2723 #endif 2724 /* Avoid topologically incorrect IPv4 address */ 2725 if (stcb && fam == AF_INET && 2726 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { 2727 if (sctp_v4src_match_nexthop(sifa, ro) == 0) { 2728 continue; 2729 } 2730 } 2731 if (stcb) { 2732 if (((non_asoc_addr_ok == 0) && 2733 (sctp_is_addr_restricted(stcb, sifa))) || 2734 (non_asoc_addr_ok && 2735 (sctp_is_addr_restricted(stcb, sifa)) && 2736 (!sctp_is_addr_pending(stcb, sifa)))) { 2737 /* 2738 * It is restricted for some reason.. 2739 * probably not yet added. 2740 */ 2741 continue; 2742 } 2743 } 2744 if (num_eligible_addr >= addr_wanted) { 2745 return (sifa); 2746 } 2747 num_eligible_addr++; 2748 } 2749 return (NULL); 2750 } 2751 2752 2753 static int 2754 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn, 2755 struct sctp_tcb *stcb, 2756 int non_asoc_addr_ok, 2757 uint8_t dest_is_loop, 2758 uint8_t dest_is_priv, 2759 sa_family_t fam) 2760 { 2761 struct sctp_ifa *ifa, *sifa; 2762 int num_eligible_addr = 0; 2763 2764 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2765 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2766 (non_asoc_addr_ok == 0)) { 2767 continue; 2768 } 2769 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 2770 dest_is_priv, fam); 2771 if (sifa == NULL) { 2772 continue; 2773 } 2774 if (stcb) { 2775 if (((non_asoc_addr_ok == 0) && 2776 (sctp_is_addr_restricted(stcb, sifa))) || 2777 (non_asoc_addr_ok && 2778 (sctp_is_addr_restricted(stcb, sifa)) && 2779 (!sctp_is_addr_pending(stcb, sifa)))) { 2780 /* 2781 * It is restricted for some reason.. 2782 * probably not yet added. 2783 */ 2784 continue; 2785 } 2786 } 2787 num_eligible_addr++; 2788 } 2789 return (num_eligible_addr); 2790 } 2791 2792 static struct sctp_ifa * 2793 sctp_choose_boundall(struct sctp_inpcb *inp, 2794 struct sctp_tcb *stcb, 2795 struct sctp_nets *net, 2796 sctp_route_t * ro, 2797 uint32_t vrf_id, 2798 uint8_t dest_is_priv, 2799 uint8_t dest_is_loop, 2800 int non_asoc_addr_ok, 2801 sa_family_t fam) 2802 { 2803 int cur_addr_num = 0, num_preferred = 0; 2804 void *ifn; 2805 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn; 2806 struct sctp_ifa *sctp_ifa, *sifa; 2807 uint32_t ifn_index; 2808 struct sctp_vrf *vrf; 2809 2810 /*- 2811 * For boundall we can use any address in the association. 2812 * If non_asoc_addr_ok is set we can use any address (at least in 2813 * theory). So we look for preferred addresses first. If we find one, 2814 * we use it. Otherwise we next try to get an address on the 2815 * interface, which we should be able to do (unless non_asoc_addr_ok 2816 * is false and we are routed out that way). In these cases where we 2817 * can't use the address of the interface we go through all the 2818 * ifn's looking for an address we can use and fill that in. Punting 2819 * means we send back address 0, which will probably cause problems 2820 * actually since then IP will fill in the address of the route ifn, 2821 * which means we probably already rejected it.. i.e. here comes an 2822 * abort :-<. 2823 */ 2824 vrf = sctp_find_vrf(vrf_id); 2825 if (vrf == NULL) 2826 return (NULL); 2827 2828 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2829 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2830 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index); 2831 if (sctp_ifn == NULL) { 2832 /* ?? We don't have this guy ?? */ 2833 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n"); 2834 goto bound_all_plan_b; 2835 } 2836 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n", 2837 ifn_index, sctp_ifn->ifn_name); 2838 2839 if (net) { 2840 cur_addr_num = net->indx_of_eligible_next_to_use; 2841 } 2842 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, 2843 stcb, 2844 non_asoc_addr_ok, 2845 dest_is_loop, 2846 dest_is_priv, fam); 2847 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n", 2848 num_preferred, sctp_ifn->ifn_name); 2849 if (num_preferred == 0) { 2850 /* 2851 * no eligible addresses, we must use some other interface 2852 * address if we can find one. 2853 */ 2854 goto bound_all_plan_b; 2855 } 2856 /* 2857 * Ok we have num_eligible_addr set with how many we can use, this 2858 * may vary from call to call due to addresses being deprecated 2859 * etc.. 2860 */ 2861 if (cur_addr_num >= num_preferred) { 2862 cur_addr_num = 0; 2863 } 2864 /* 2865 * select the nth address from the list (where cur_addr_num is the 2866 * nth) and 0 is the first one, 1 is the second one etc... 2867 */ 2868 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num); 2869 2870 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, 2871 dest_is_priv, cur_addr_num, fam, ro); 2872 2873 /* if sctp_ifa is NULL something changed??, fall to plan b. */ 2874 if (sctp_ifa) { 2875 atomic_add_int(&sctp_ifa->refcount, 1); 2876 if (net) { 2877 /* save off where the next one we will want */ 2878 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 2879 } 2880 return (sctp_ifa); 2881 } 2882 /* 2883 * plan_b: Look at all interfaces and find a preferred address. If 2884 * no preferred fall through to plan_c. 2885 */ 2886 bound_all_plan_b: 2887 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n"); 2888 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 2889 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n", 2890 sctp_ifn->ifn_name); 2891 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 2892 /* wrong base scope */ 2893 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n"); 2894 continue; 2895 } 2896 if ((sctp_ifn == looked_at) && looked_at) { 2897 /* already looked at this guy */ 2898 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n"); 2899 continue; 2900 } 2901 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok, 2902 dest_is_loop, dest_is_priv, fam); 2903 SCTPDBG(SCTP_DEBUG_OUTPUT2, 2904 "Found ifn:%p %d preferred source addresses\n", 2905 ifn, num_preferred); 2906 if (num_preferred == 0) { 2907 /* None on this interface. */ 2908 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n"); 2909 continue; 2910 } 2911 SCTPDBG(SCTP_DEBUG_OUTPUT2, 2912 "num preferred:%d on interface:%p cur_addr_num:%d\n", 2913 num_preferred, sctp_ifn, cur_addr_num); 2914 2915 /* 2916 * Ok we have num_eligible_addr set with how many we can 2917 * use, this may vary from call to call due to addresses 2918 * being deprecated etc.. 2919 */ 2920 if (cur_addr_num >= num_preferred) { 2921 cur_addr_num = 0; 2922 } 2923 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, 2924 dest_is_priv, cur_addr_num, fam, ro); 2925 if (sifa == NULL) 2926 continue; 2927 if (net) { 2928 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 2929 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n", 2930 cur_addr_num); 2931 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:"); 2932 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); 2933 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:"); 2934 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa); 2935 } 2936 atomic_add_int(&sifa->refcount, 1); 2937 return (sifa); 2938 2939 } 2940 2941 /* plan_c: do we have an acceptable address on the emit interface */ 2942 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n"); 2943 if (emit_ifn == NULL) { 2944 goto plan_d; 2945 } 2946 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) { 2947 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2948 (non_asoc_addr_ok == 0)) 2949 continue; 2950 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, 2951 dest_is_priv, fam); 2952 if (sifa == NULL) 2953 continue; 2954 if (stcb) { 2955 if (((non_asoc_addr_ok == 0) && 2956 (sctp_is_addr_restricted(stcb, sifa))) || 2957 (non_asoc_addr_ok && 2958 (sctp_is_addr_restricted(stcb, sifa)) && 2959 (!sctp_is_addr_pending(stcb, sifa)))) { 2960 /* 2961 * It is restricted for some reason.. 2962 * probably not yet added. 2963 */ 2964 continue; 2965 } 2966 } 2967 atomic_add_int(&sifa->refcount, 1); 2968 return (sifa); 2969 } 2970 plan_d: 2971 /* 2972 * plan_d: We are in trouble. No preferred address on the emit 2973 * interface. And not even a preferred address on all interfaces. Go 2974 * out and see if we can find an acceptable address somewhere 2975 * amongst all interfaces. 2976 */ 2977 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D\n"); 2978 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 2979 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 2980 /* wrong base scope */ 2981 continue; 2982 } 2983 if ((sctp_ifn == looked_at) && looked_at) 2984 /* already looked at this guy */ 2985 continue; 2986 2987 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2988 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2989 (non_asoc_addr_ok == 0)) 2990 continue; 2991 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, 2992 dest_is_loop, 2993 dest_is_priv, fam); 2994 if (sifa == NULL) 2995 continue; 2996 if (stcb) { 2997 if (((non_asoc_addr_ok == 0) && 2998 (sctp_is_addr_restricted(stcb, sifa))) || 2999 (non_asoc_addr_ok && 3000 (sctp_is_addr_restricted(stcb, sifa)) && 3001 (!sctp_is_addr_pending(stcb, sifa)))) { 3002 /* 3003 * It is restricted for some 3004 * reason.. probably not yet added. 3005 */ 3006 continue; 3007 } 3008 } 3009 atomic_add_int(&sifa->refcount, 1); 3010 return (sifa); 3011 } 3012 } 3013 /* 3014 * Ok we can find NO address to source from that is not on our 3015 * restricted list and non_asoc_address is NOT ok, or it is on our 3016 * restricted list. We can't source to it :-( 3017 */ 3018 return (NULL); 3019 } 3020 3021 3022 3023 /* tcb may be NULL */ 3024 struct sctp_ifa * 3025 sctp_source_address_selection(struct sctp_inpcb *inp, 3026 struct sctp_tcb *stcb, 3027 sctp_route_t * ro, 3028 struct sctp_nets *net, 3029 int non_asoc_addr_ok, uint32_t vrf_id) 3030 { 3031 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst; 3032 3033 #ifdef INET6 3034 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst; 3035 3036 #endif 3037 struct sctp_ifa *answer; 3038 uint8_t dest_is_priv, dest_is_loop; 3039 sa_family_t fam; 3040 3041 /*- 3042 * Rules: - Find the route if needed, cache if I can. - Look at 3043 * interface address in route, Is it in the bound list. If so we 3044 * have the best source. - If not we must rotate amongst the 3045 * addresses. 3046 * 3047 * Cavets and issues 3048 * 3049 * Do we need to pay attention to scope. We can have a private address 3050 * or a global address we are sourcing or sending to. So if we draw 3051 * it out 3052 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3053 * For V4 3054 *------------------------------------------ 3055 * source * dest * result 3056 * ----------------------------------------- 3057 * <a> Private * Global * NAT 3058 * ----------------------------------------- 3059 * <b> Private * Private * No problem 3060 * ----------------------------------------- 3061 * <c> Global * Private * Huh, How will this work? 3062 * ----------------------------------------- 3063 * <d> Global * Global * No Problem 3064 *------------------------------------------ 3065 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3066 * For V6 3067 *------------------------------------------ 3068 * source * dest * result 3069 * ----------------------------------------- 3070 * <a> Linklocal * Global * 3071 * ----------------------------------------- 3072 * <b> Linklocal * Linklocal * No problem 3073 * ----------------------------------------- 3074 * <c> Global * Linklocal * Huh, How will this work? 3075 * ----------------------------------------- 3076 * <d> Global * Global * No Problem 3077 *------------------------------------------ 3078 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3079 * 3080 * And then we add to that what happens if there are multiple addresses 3081 * assigned to an interface. Remember the ifa on a ifn is a linked 3082 * list of addresses. So one interface can have more than one IP 3083 * address. What happens if we have both a private and a global 3084 * address? Do we then use context of destination to sort out which 3085 * one is best? And what about NAT's sending P->G may get you a NAT 3086 * translation, or should you select the G thats on the interface in 3087 * preference. 3088 * 3089 * Decisions: 3090 * 3091 * - count the number of addresses on the interface. 3092 * - if it is one, no problem except case <c>. 3093 * For <a> we will assume a NAT out there. 3094 * - if there are more than one, then we need to worry about scope P 3095 * or G. We should prefer G -> G and P -> P if possible. 3096 * Then as a secondary fall back to mixed types G->P being a last 3097 * ditch one. 3098 * - The above all works for bound all, but bound specific we need to 3099 * use the same concept but instead only consider the bound 3100 * addresses. If the bound set is NOT assigned to the interface then 3101 * we must use rotation amongst the bound addresses.. 3102 */ 3103 if (ro->ro_rt == NULL) { 3104 /* 3105 * Need a route to cache. 3106 */ 3107 SCTP_RTALLOC(ro, vrf_id); 3108 } 3109 if (ro->ro_rt == NULL) { 3110 return (NULL); 3111 } 3112 fam = to->sin_family; 3113 dest_is_priv = dest_is_loop = 0; 3114 /* Setup our scopes for the destination */ 3115 switch (fam) { 3116 case AF_INET: 3117 /* Scope based on outbound address */ 3118 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) { 3119 dest_is_loop = 1; 3120 if (net != NULL) { 3121 /* mark it as local */ 3122 net->addr_is_local = 1; 3123 } 3124 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) { 3125 dest_is_priv = 1; 3126 } 3127 break; 3128 #ifdef INET6 3129 case AF_INET6: 3130 /* Scope based on outbound address */ 3131 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) || 3132 SCTP_ROUTE_IS_REAL_LOOP(ro)) { 3133 /* 3134 * If the address is a loopback address, which 3135 * consists of "::1" OR "fe80::1%lo0", we are 3136 * loopback scope. But we don't use dest_is_priv 3137 * (link local addresses). 3138 */ 3139 dest_is_loop = 1; 3140 if (net != NULL) { 3141 /* mark it as local */ 3142 net->addr_is_local = 1; 3143 } 3144 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) { 3145 dest_is_priv = 1; 3146 } 3147 break; 3148 #endif 3149 } 3150 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:"); 3151 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)to); 3152 SCTP_IPI_ADDR_RLOCK(); 3153 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3154 /* 3155 * Bound all case 3156 */ 3157 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id, 3158 dest_is_priv, dest_is_loop, 3159 non_asoc_addr_ok, fam); 3160 SCTP_IPI_ADDR_RUNLOCK(); 3161 return (answer); 3162 } 3163 /* 3164 * Subset bound case 3165 */ 3166 if (stcb) { 3167 answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro, 3168 vrf_id, dest_is_priv, 3169 dest_is_loop, 3170 non_asoc_addr_ok, fam); 3171 } else { 3172 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id, 3173 non_asoc_addr_ok, 3174 dest_is_priv, 3175 dest_is_loop, fam); 3176 } 3177 SCTP_IPI_ADDR_RUNLOCK(); 3178 return (answer); 3179 } 3180 3181 static int 3182 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize) 3183 { 3184 struct cmsghdr cmh; 3185 int tlen, at; 3186 3187 tlen = SCTP_BUF_LEN(control); 3188 at = 0; 3189 /* 3190 * Independent of how many mbufs, find the c_type inside the control 3191 * structure and copy out the data. 3192 */ 3193 while (at < tlen) { 3194 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 3195 /* not enough room for one more we are done. */ 3196 return (0); 3197 } 3198 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 3199 if (((int)cmh.cmsg_len + at) > tlen) { 3200 /* 3201 * this is real messed up since there is not enough 3202 * data here to cover the cmsg header. We are done. 3203 */ 3204 return (0); 3205 } 3206 if ((cmh.cmsg_level == IPPROTO_SCTP) && 3207 (c_type == cmh.cmsg_type)) { 3208 /* found the one we want, copy it out */ 3209 at += CMSG_ALIGN(sizeof(struct cmsghdr)); 3210 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) { 3211 /* 3212 * space of cmsg_len after header not big 3213 * enough 3214 */ 3215 return (0); 3216 } 3217 m_copydata(control, at, cpsize, data); 3218 return (1); 3219 } else { 3220 at += CMSG_ALIGN(cmh.cmsg_len); 3221 if (cmh.cmsg_len == 0) { 3222 break; 3223 } 3224 } 3225 } 3226 /* not found */ 3227 return (0); 3228 } 3229 3230 static struct mbuf * 3231 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset, 3232 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature) 3233 { 3234 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; 3235 struct sctp_state_cookie *stc; 3236 struct sctp_paramhdr *ph; 3237 uint8_t *foo; 3238 int sig_offset; 3239 uint16_t cookie_sz; 3240 3241 mret = NULL; 3242 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + 3243 sizeof(struct sctp_paramhdr)), 0, 3244 M_DONTWAIT, 1, MT_DATA); 3245 if (mret == NULL) { 3246 return (NULL); 3247 } 3248 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT); 3249 if (copy_init == NULL) { 3250 sctp_m_freem(mret); 3251 return (NULL); 3252 } 3253 #ifdef SCTP_MBUF_LOGGING 3254 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 3255 struct mbuf *mat; 3256 3257 mat = copy_init; 3258 while (mat) { 3259 if (SCTP_BUF_IS_EXTENDED(mat)) { 3260 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 3261 } 3262 mat = SCTP_BUF_NEXT(mat); 3263 } 3264 } 3265 #endif 3266 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL, 3267 M_DONTWAIT); 3268 if (copy_initack == NULL) { 3269 sctp_m_freem(mret); 3270 sctp_m_freem(copy_init); 3271 return (NULL); 3272 } 3273 #ifdef SCTP_MBUF_LOGGING 3274 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 3275 struct mbuf *mat; 3276 3277 mat = copy_initack; 3278 while (mat) { 3279 if (SCTP_BUF_IS_EXTENDED(mat)) { 3280 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 3281 } 3282 mat = SCTP_BUF_NEXT(mat); 3283 } 3284 } 3285 #endif 3286 /* easy side we just drop it on the end */ 3287 ph = mtod(mret, struct sctp_paramhdr *); 3288 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) + 3289 sizeof(struct sctp_paramhdr); 3290 stc = (struct sctp_state_cookie *)((caddr_t)ph + 3291 sizeof(struct sctp_paramhdr)); 3292 ph->param_type = htons(SCTP_STATE_COOKIE); 3293 ph->param_length = 0; /* fill in at the end */ 3294 /* Fill in the stc cookie data */ 3295 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie)); 3296 3297 /* tack the INIT and then the INIT-ACK onto the chain */ 3298 cookie_sz = 0; 3299 m_at = mret; 3300 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3301 cookie_sz += SCTP_BUF_LEN(m_at); 3302 if (SCTP_BUF_NEXT(m_at) == NULL) { 3303 SCTP_BUF_NEXT(m_at) = copy_init; 3304 break; 3305 } 3306 } 3307 3308 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3309 cookie_sz += SCTP_BUF_LEN(m_at); 3310 if (SCTP_BUF_NEXT(m_at) == NULL) { 3311 SCTP_BUF_NEXT(m_at) = copy_initack; 3312 break; 3313 } 3314 } 3315 3316 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3317 cookie_sz += SCTP_BUF_LEN(m_at); 3318 if (SCTP_BUF_NEXT(m_at) == NULL) { 3319 break; 3320 } 3321 } 3322 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA); 3323 if (sig == NULL) { 3324 /* no space, so free the entire chain */ 3325 sctp_m_freem(mret); 3326 return (NULL); 3327 } 3328 SCTP_BUF_LEN(sig) = 0; 3329 SCTP_BUF_NEXT(m_at) = sig; 3330 sig_offset = 0; 3331 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset); 3332 memset(foo, 0, SCTP_SIGNATURE_SIZE); 3333 *signature = foo; 3334 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE; 3335 cookie_sz += SCTP_SIGNATURE_SIZE; 3336 ph->param_length = htons(cookie_sz); 3337 return (mret); 3338 } 3339 3340 3341 static uint8_t 3342 sctp_get_ect(struct sctp_tcb *stcb, 3343 struct sctp_tmit_chunk *chk) 3344 { 3345 uint8_t this_random; 3346 3347 /* Huh? */ 3348 if (sctp_ecn_enable == 0) 3349 return (0); 3350 3351 if (sctp_ecn_nonce == 0) 3352 /* no nonce, always return ECT0 */ 3353 return (SCTP_ECT0_BIT); 3354 3355 if (stcb->asoc.peer_supports_ecn_nonce == 0) { 3356 /* Peer does NOT support it, so we send a ECT0 only */ 3357 return (SCTP_ECT0_BIT); 3358 } 3359 if (chk == NULL) 3360 return (SCTP_ECT0_BIT); 3361 3362 if ((stcb->asoc.hb_random_idx > 3) || 3363 ((stcb->asoc.hb_random_idx == 3) && 3364 (stcb->asoc.hb_ect_randombit > 7))) { 3365 uint32_t rndval; 3366 3367 warp_drive_sa: 3368 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 3369 memcpy(stcb->asoc.hb_random_values, &rndval, 3370 sizeof(stcb->asoc.hb_random_values)); 3371 this_random = stcb->asoc.hb_random_values[0]; 3372 stcb->asoc.hb_random_idx = 0; 3373 stcb->asoc.hb_ect_randombit = 0; 3374 } else { 3375 if (stcb->asoc.hb_ect_randombit > 7) { 3376 stcb->asoc.hb_ect_randombit = 0; 3377 stcb->asoc.hb_random_idx++; 3378 if (stcb->asoc.hb_random_idx > 3) { 3379 goto warp_drive_sa; 3380 } 3381 } 3382 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 3383 } 3384 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) { 3385 if (chk != NULL) 3386 /* ECN Nonce stuff */ 3387 chk->rec.data.ect_nonce = SCTP_ECT1_BIT; 3388 stcb->asoc.hb_ect_randombit++; 3389 return (SCTP_ECT1_BIT); 3390 } else { 3391 stcb->asoc.hb_ect_randombit++; 3392 return (SCTP_ECT0_BIT); 3393 } 3394 } 3395 3396 static int 3397 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, 3398 struct sctp_tcb *stcb, /* may be NULL */ 3399 struct sctp_nets *net, 3400 struct sockaddr *to, 3401 struct mbuf *m, 3402 uint32_t auth_offset, 3403 struct sctp_auth_chunk *auth, 3404 int nofragment_flag, 3405 int ecn_ok, 3406 struct sctp_tmit_chunk *chk, 3407 int out_of_asoc_ok, 3408 uint16_t port, 3409 int so_locked, 3410 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3411 SCTP_UNUSED 3412 #endif 3413 union sctp_sockstore *over_addr 3414 ) 3415 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ 3416 { 3417 /* 3418 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet 3419 * header WITH an SCTPHDR but no IP header, endpoint inp and sa 3420 * structure: - fill in the HMAC digest of any AUTH chunk in the 3421 * packet. - calculate and fill in the SCTP checksum. - prepend an 3422 * IP address header. - if boundall use INADDR_ANY. - if 3423 * boundspecific do source address selection. - set fragmentation 3424 * option for ipV4. - On return from IP output, check/adjust mtu 3425 * size of output interface and smallest_mtu size as well. 3426 */ 3427 /* Will need ifdefs around this */ 3428 struct mbuf *o_pak; 3429 struct mbuf *newm; 3430 struct sctphdr *sctphdr; 3431 int packet_length; 3432 uint32_t csum; 3433 int ret; 3434 uint32_t vrf_id; 3435 sctp_route_t *ro = NULL; 3436 struct udphdr *udp; 3437 3438 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) { 3439 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 3440 sctp_m_freem(m); 3441 return (EFAULT); 3442 } 3443 if (stcb) { 3444 vrf_id = stcb->asoc.vrf_id; 3445 } else { 3446 vrf_id = inp->def_vrf_id; 3447 } 3448 3449 /* fill in the HMAC digest for any AUTH chunk in the packet */ 3450 if ((auth != NULL) && (stcb != NULL)) { 3451 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb); 3452 } 3453 /* Calculate the csum and fill in the length of the packet */ 3454 sctphdr = mtod(m, struct sctphdr *); 3455 if (sctp_no_csum_on_loopback && 3456 (stcb) && 3457 (to->sa_family == AF_INET) && 3458 (stcb->asoc.loopback_scope)) { 3459 sctphdr->checksum = 0; 3460 /* 3461 * This can probably now be taken out since my audit shows 3462 * no more bad pktlen's coming in. But we will wait a while 3463 * yet. 3464 */ 3465 packet_length = sctp_calculate_len(m); 3466 } else { 3467 sctphdr->checksum = 0; 3468 csum = sctp_calculate_sum(m, &packet_length, 0); 3469 sctphdr->checksum = csum; 3470 } 3471 3472 if (to->sa_family == AF_INET) { 3473 struct ip *ip = NULL; 3474 sctp_route_t iproute; 3475 uint8_t tos_value; 3476 3477 if (port) { 3478 newm = sctp_get_mbuf_for_msg(sizeof(struct ip) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA); 3479 } else { 3480 newm = sctp_get_mbuf_for_msg(sizeof(struct ip), 1, M_DONTWAIT, 1, MT_DATA); 3481 } 3482 if (newm == NULL) { 3483 sctp_m_freem(m); 3484 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 3485 return (ENOMEM); 3486 } 3487 if (port) { 3488 SCTP_ALIGN_TO_END(newm, sizeof(struct ip) + sizeof(struct udphdr)); 3489 SCTP_BUF_LEN(newm) = sizeof(struct ip) + sizeof(struct udphdr); 3490 packet_length += sizeof(struct ip) + sizeof(struct udphdr); 3491 } else { 3492 SCTP_ALIGN_TO_END(newm, sizeof(struct ip)); 3493 SCTP_BUF_LEN(newm) = sizeof(struct ip); 3494 packet_length += sizeof(struct ip); 3495 } 3496 SCTP_BUF_NEXT(newm) = m; 3497 m = newm; 3498 ip = mtod(m, struct ip *); 3499 ip->ip_v = IPVERSION; 3500 ip->ip_hl = (sizeof(struct ip) >> 2); 3501 if (net) { 3502 tos_value = net->tos_flowlabel & 0x000000ff; 3503 } else { 3504 tos_value = inp->ip_inp.inp.inp_ip_tos; 3505 } 3506 if ((nofragment_flag) && (port == 0)) { 3507 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__APPLE__) 3508 ip->ip_off = IP_DF; 3509 #else 3510 ip->ip_off = htons(IP_DF); 3511 #endif 3512 } else 3513 ip->ip_off = 0; 3514 3515 /* FreeBSD has a function for ip_id's */ 3516 ip->ip_id = ip_newid(); 3517 3518 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; 3519 ip->ip_len = packet_length; 3520 if (stcb) { 3521 if ((stcb->asoc.ecn_allowed) && ecn_ok) { 3522 /* Enable ECN */ 3523 ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk)); 3524 } else { 3525 /* No ECN */ 3526 ip->ip_tos = (u_char)(tos_value & 0xfc); 3527 } 3528 } else { 3529 /* no association at all */ 3530 ip->ip_tos = (tos_value & 0xfc); 3531 } 3532 if (port) { 3533 ip->ip_p = IPPROTO_UDP; 3534 } else { 3535 ip->ip_p = IPPROTO_SCTP; 3536 } 3537 ip->ip_sum = 0; 3538 if (net == NULL) { 3539 ro = &iproute; 3540 memset(&iproute, 0, sizeof(iproute)); 3541 memcpy(&ro->ro_dst, to, to->sa_len); 3542 } else { 3543 ro = (sctp_route_t *) & net->ro; 3544 } 3545 /* Now the address selection part */ 3546 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr; 3547 3548 /* call the routine to select the src address */ 3549 if (net && out_of_asoc_ok == 0) { 3550 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) { 3551 sctp_free_ifa(net->ro._s_addr); 3552 net->ro._s_addr = NULL; 3553 net->src_addr_selected = 0; 3554 if (ro->ro_rt) { 3555 RTFREE(ro->ro_rt); 3556 ro->ro_rt = NULL; 3557 } 3558 } 3559 if (net->src_addr_selected == 0) { 3560 /* Cache the source address */ 3561 net->ro._s_addr = sctp_source_address_selection(inp, stcb, 3562 ro, net, 0, 3563 vrf_id); 3564 net->src_addr_selected = 1; 3565 } 3566 if (net->ro._s_addr == NULL) { 3567 /* No route to host */ 3568 net->src_addr_selected = 0; 3569 goto no_route; 3570 } 3571 ip->ip_src = net->ro._s_addr->address.sin.sin_addr; 3572 } else { 3573 if (over_addr == NULL) { 3574 struct sctp_ifa *_lsrc; 3575 3576 _lsrc = sctp_source_address_selection(inp, stcb, ro, 3577 net, 3578 out_of_asoc_ok, 3579 vrf_id); 3580 if (_lsrc == NULL) { 3581 goto no_route; 3582 } 3583 ip->ip_src = _lsrc->address.sin.sin_addr; 3584 sctp_free_ifa(_lsrc); 3585 } else { 3586 ip->ip_src = over_addr->sin.sin_addr; 3587 SCTP_RTALLOC((&ro->ro_rt), vrf_id); 3588 } 3589 } 3590 if (port) { 3591 udp = (struct udphdr *)(ip + 1); 3592 udp->uh_sport = htons(sctp_udp_tunneling_port); 3593 udp->uh_dport = port; 3594 udp->uh_ulen = htons(packet_length - sizeof(struct ip)); 3595 udp->uh_sum = 0; 3596 } 3597 /* 3598 * If source address selection fails and we find no route 3599 * then the ip_output should fail as well with a 3600 * NO_ROUTE_TO_HOST type error. We probably should catch 3601 * that somewhere and abort the association right away 3602 * (assuming this is an INIT being sent). 3603 */ 3604 if ((ro->ro_rt == NULL)) { 3605 /* 3606 * src addr selection failed to find a route (or 3607 * valid source addr), so we can't get there from 3608 * here (yet)! 3609 */ 3610 no_route: 3611 SCTPDBG(SCTP_DEBUG_OUTPUT1, 3612 "%s: dropped packet - no valid source addr\n", 3613 __FUNCTION__); 3614 if (net) { 3615 SCTPDBG(SCTP_DEBUG_OUTPUT1, 3616 "Destination was "); 3617 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, 3618 &net->ro._l_addr.sa); 3619 if (net->dest_state & SCTP_ADDR_CONFIRMED) { 3620 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) { 3621 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net); 3622 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 3623 stcb, 3624 SCTP_FAILED_THRESHOLD, 3625 (void *)net, 3626 so_locked); 3627 net->dest_state &= ~SCTP_ADDR_REACHABLE; 3628 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 3629 /* 3630 * JRS 5/14/07 - If a 3631 * destination is 3632 * unreachable, the PF bit 3633 * is turned off. This 3634 * allows an unambiguous use 3635 * of the PF bit for 3636 * destinations that are 3637 * reachable but potentially 3638 * failed. If the 3639 * destination is set to the 3640 * unreachable state, also 3641 * set the destination to 3642 * the PF state. 3643 */ 3644 /* 3645 * Add debug message here if 3646 * destination is not in PF 3647 * state. 3648 */ 3649 /* 3650 * Stop any running T3 3651 * timers here? 3652 */ 3653 if (sctp_cmt_on_off && sctp_cmt_pf) { 3654 net->dest_state &= ~SCTP_ADDR_PF; 3655 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination %p moved from PF to unreachable.\n", 3656 net); 3657 } 3658 } 3659 } 3660 if (stcb) { 3661 if (net == stcb->asoc.primary_destination) { 3662 /* need a new primary */ 3663 struct sctp_nets *alt; 3664 3665 alt = sctp_find_alternate_net(stcb, net, 0); 3666 if (alt != net) { 3667 if (sctp_set_primary_addr(stcb, 3668 (struct sockaddr *)NULL, 3669 alt) == 0) { 3670 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 3671 if (net->ro._s_addr) { 3672 sctp_free_ifa(net->ro._s_addr); 3673 net->ro._s_addr = NULL; 3674 } 3675 net->src_addr_selected = 0; 3676 } 3677 } 3678 } 3679 } 3680 } 3681 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 3682 sctp_m_freem(m); 3683 return (EHOSTUNREACH); 3684 } 3685 if (ro != &iproute) { 3686 memcpy(&iproute, ro, sizeof(*ro)); 3687 } 3688 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n", 3689 (uint32_t) (ntohl(ip->ip_src.s_addr))); 3690 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n", 3691 (uint32_t) (ntohl(ip->ip_dst.s_addr))); 3692 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n", 3693 ro->ro_rt); 3694 3695 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 3696 /* failed to prepend data, give up */ 3697 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 3698 sctp_m_freem(m); 3699 return (ENOMEM); 3700 } 3701 #ifdef SCTP_PACKET_LOGGING 3702 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 3703 sctp_packet_log(m, packet_length); 3704 #endif 3705 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 3706 3707 /* send it out. table id is taken from stcb */ 3708 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id); 3709 3710 SCTP_STAT_INCR(sctps_sendpackets); 3711 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 3712 if (ret) 3713 SCTP_STAT_INCR(sctps_senderrors); 3714 3715 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret); 3716 if (net == NULL) { 3717 /* free tempy routes */ 3718 if (ro->ro_rt) { 3719 RTFREE(ro->ro_rt); 3720 ro->ro_rt = NULL; 3721 } 3722 } else { 3723 /* PMTU check versus smallest asoc MTU goes here */ 3724 if ((ro->ro_rt != NULL) && 3725 (net->ro._s_addr)) { 3726 uint32_t mtu; 3727 3728 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 3729 if (mtu && 3730 (stcb->asoc.smallest_mtu > mtu)) { 3731 #ifdef SCTP_PRINT_FOR_B_AND_M 3732 SCTP_PRINTF("sctp_mtu_size_reset called after ip_output mtu-change:%d\n", 3733 mtu); 3734 #endif 3735 sctp_mtu_size_reset(inp, &stcb->asoc, mtu); 3736 net->mtu = mtu; 3737 } 3738 } else if (ro->ro_rt == NULL) { 3739 /* route was freed */ 3740 if (net->ro._s_addr && 3741 net->src_addr_selected) { 3742 sctp_free_ifa(net->ro._s_addr); 3743 net->ro._s_addr = NULL; 3744 } 3745 net->src_addr_selected = 0; 3746 } 3747 } 3748 return (ret); 3749 } 3750 #ifdef INET6 3751 else if (to->sa_family == AF_INET6) { 3752 uint32_t flowlabel; 3753 struct ip6_hdr *ip6h; 3754 struct route_in6 ip6route; 3755 struct ifnet *ifp; 3756 u_char flowTop; 3757 uint16_t flowBottom; 3758 u_char tosBottom, tosTop; 3759 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; 3760 int prev_scope = 0; 3761 struct sockaddr_in6 lsa6_storage; 3762 int error; 3763 u_short prev_port = 0; 3764 3765 if (net != NULL) { 3766 flowlabel = net->tos_flowlabel; 3767 } else { 3768 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 3769 } 3770 3771 if (port) { 3772 newm = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA); 3773 } else { 3774 newm = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr), 1, M_DONTWAIT, 1, MT_DATA); 3775 } 3776 if (newm == NULL) { 3777 sctp_m_freem(m); 3778 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 3779 return (ENOMEM); 3780 } 3781 if (port) { 3782 SCTP_ALIGN_TO_END(newm, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); 3783 SCTP_BUF_LEN(newm) = sizeof(struct ip6_hdr) + sizeof(struct udphdr); 3784 packet_length += sizeof(struct ip6_hdr) + sizeof(struct udphdr); 3785 } else { 3786 SCTP_ALIGN_TO_END(newm, sizeof(struct ip6_hdr)); 3787 SCTP_BUF_LEN(newm) = sizeof(struct ip6_hdr); 3788 packet_length += sizeof(struct ip6_hdr); 3789 } 3790 SCTP_BUF_NEXT(newm) = m; 3791 m = newm; 3792 3793 ip6h = mtod(m, struct ip6_hdr *); 3794 /* 3795 * We assume here that inp_flow is in host byte order within 3796 * the TCB! 3797 */ 3798 flowBottom = flowlabel & 0x0000ffff; 3799 flowTop = ((flowlabel & 0x000f0000) >> 16); 3800 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION); 3801 /* protect *sin6 from overwrite */ 3802 sin6 = (struct sockaddr_in6 *)to; 3803 tmp = *sin6; 3804 sin6 = &tmp; 3805 3806 /* KAME hack: embed scopeid */ 3807 if (sa6_embedscope(sin6, ip6_use_defzone) != 0) { 3808 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 3809 return (EINVAL); 3810 } 3811 if (net == NULL) { 3812 memset(&ip6route, 0, sizeof(ip6route)); 3813 ro = (sctp_route_t *) & ip6route; 3814 memcpy(&ro->ro_dst, sin6, sin6->sin6_len); 3815 } else { 3816 ro = (sctp_route_t *) & net->ro; 3817 } 3818 if (stcb != NULL) { 3819 if ((stcb->asoc.ecn_allowed) && ecn_ok) { 3820 /* Enable ECN */ 3821 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4); 3822 } else { 3823 /* No ECN */ 3824 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4); 3825 } 3826 } else { 3827 /* we could get no asoc if it is a O-O-T-B packet */ 3828 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4); 3829 } 3830 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom)); 3831 if (port) { 3832 ip6h->ip6_nxt = IPPROTO_UDP; 3833 } else { 3834 ip6h->ip6_nxt = IPPROTO_SCTP; 3835 } 3836 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr)); 3837 ip6h->ip6_dst = sin6->sin6_addr; 3838 3839 /* 3840 * Add SRC address selection here: we can only reuse to a 3841 * limited degree the kame src-addr-sel, since we can try 3842 * their selection but it may not be bound. 3843 */ 3844 bzero(&lsa6_tmp, sizeof(lsa6_tmp)); 3845 lsa6_tmp.sin6_family = AF_INET6; 3846 lsa6_tmp.sin6_len = sizeof(lsa6_tmp); 3847 lsa6 = &lsa6_tmp; 3848 if (net && out_of_asoc_ok == 0) { 3849 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) { 3850 sctp_free_ifa(net->ro._s_addr); 3851 net->ro._s_addr = NULL; 3852 net->src_addr_selected = 0; 3853 if (ro->ro_rt) { 3854 RTFREE(ro->ro_rt); 3855 ro->ro_rt = NULL; 3856 } 3857 } 3858 if (net->src_addr_selected == 0) { 3859 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 3860 /* KAME hack: embed scopeid */ 3861 if (sa6_embedscope(sin6, ip6_use_defzone) != 0) { 3862 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 3863 return (EINVAL); 3864 } 3865 /* Cache the source address */ 3866 net->ro._s_addr = sctp_source_address_selection(inp, 3867 stcb, 3868 ro, 3869 net, 3870 0, 3871 vrf_id); 3872 (void)sa6_recoverscope(sin6); 3873 net->src_addr_selected = 1; 3874 } 3875 if (net->ro._s_addr == NULL) { 3876 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n"); 3877 net->src_addr_selected = 0; 3878 goto no_route; 3879 } 3880 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr; 3881 } else { 3882 sin6 = (struct sockaddr_in6 *)&ro->ro_dst; 3883 /* KAME hack: embed scopeid */ 3884 if (sa6_embedscope(sin6, ip6_use_defzone) != 0) { 3885 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 3886 return (EINVAL); 3887 } 3888 if (over_addr == NULL) { 3889 struct sctp_ifa *_lsrc; 3890 3891 _lsrc = sctp_source_address_selection(inp, stcb, ro, 3892 net, 3893 out_of_asoc_ok, 3894 vrf_id); 3895 if (_lsrc == NULL) { 3896 goto no_route; 3897 } 3898 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr; 3899 sctp_free_ifa(_lsrc); 3900 } else { 3901 lsa6->sin6_addr = over_addr->sin6.sin6_addr; 3902 SCTP_RTALLOC((&ro->ro_rt), vrf_id); 3903 } 3904 (void)sa6_recoverscope(sin6); 3905 } 3906 lsa6->sin6_port = inp->sctp_lport; 3907 3908 if (ro->ro_rt == NULL) { 3909 /* 3910 * src addr selection failed to find a route (or 3911 * valid source addr), so we can't get there from 3912 * here! 3913 */ 3914 goto no_route; 3915 } 3916 /* 3917 * XXX: sa6 may not have a valid sin6_scope_id in the 3918 * non-SCOPEDROUTING case. 3919 */ 3920 bzero(&lsa6_storage, sizeof(lsa6_storage)); 3921 lsa6_storage.sin6_family = AF_INET6; 3922 lsa6_storage.sin6_len = sizeof(lsa6_storage); 3923 lsa6_storage.sin6_addr = lsa6->sin6_addr; 3924 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) { 3925 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error); 3926 sctp_m_freem(m); 3927 return (error); 3928 } 3929 /* XXX */ 3930 lsa6_storage.sin6_addr = lsa6->sin6_addr; 3931 lsa6_storage.sin6_port = inp->sctp_lport; 3932 lsa6 = &lsa6_storage; 3933 ip6h->ip6_src = lsa6->sin6_addr; 3934 3935 if (port) { 3936 udp = (struct udphdr *)(ip6h + 1); 3937 udp->uh_sport = htons(sctp_udp_tunneling_port); 3938 udp->uh_dport = port; 3939 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr)); 3940 udp->uh_sum = 0; 3941 } 3942 /* 3943 * We set the hop limit now since there is a good chance 3944 * that our ro pointer is now filled 3945 */ 3946 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro); 3947 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 3948 3949 #ifdef SCTP_DEBUG 3950 /* Copy to be sure something bad is not happening */ 3951 sin6->sin6_addr = ip6h->ip6_dst; 3952 lsa6->sin6_addr = ip6h->ip6_src; 3953 #endif 3954 3955 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n"); 3956 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: "); 3957 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6); 3958 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: "); 3959 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6); 3960 if (net) { 3961 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 3962 /* preserve the port and scope for link local send */ 3963 prev_scope = sin6->sin6_scope_id; 3964 prev_port = sin6->sin6_port; 3965 } 3966 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 3967 /* failed to prepend data, give up */ 3968 sctp_m_freem(m); 3969 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 3970 return (ENOMEM); 3971 } 3972 #ifdef SCTP_PACKET_LOGGING 3973 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 3974 sctp_packet_log(m, packet_length); 3975 #endif 3976 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 3977 3978 /* send it out. table id is taken from stcb */ 3979 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, 3980 stcb, vrf_id); 3981 3982 if (net) { 3983 /* for link local this must be done */ 3984 sin6->sin6_scope_id = prev_scope; 3985 sin6->sin6_port = prev_port; 3986 } 3987 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); 3988 SCTP_STAT_INCR(sctps_sendpackets); 3989 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 3990 if (ret) { 3991 SCTP_STAT_INCR(sctps_senderrors); 3992 } 3993 if (net == NULL) { 3994 /* Now if we had a temp route free it */ 3995 if (ro->ro_rt) { 3996 RTFREE(ro->ro_rt); 3997 } 3998 } else { 3999 /* PMTU check versus smallest asoc MTU goes here */ 4000 if (ro->ro_rt == NULL) { 4001 /* Route was freed */ 4002 if (net->ro._s_addr && 4003 net->src_addr_selected) { 4004 sctp_free_ifa(net->ro._s_addr); 4005 net->ro._s_addr = NULL; 4006 } 4007 net->src_addr_selected = 0; 4008 } 4009 if ((ro->ro_rt != NULL) && 4010 (net->ro._s_addr)) { 4011 uint32_t mtu; 4012 4013 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 4014 if (mtu && 4015 (stcb->asoc.smallest_mtu > mtu)) { 4016 #ifdef SCTP_PRINT_FOR_B_AND_M 4017 SCTP_PRINTF("sctp_mtu_size_reset called after ip6_output mtu-change:%d\n", 4018 mtu); 4019 #endif 4020 sctp_mtu_size_reset(inp, &stcb->asoc, mtu); 4021 net->mtu = mtu; 4022 } 4023 } else if (ifp) { 4024 if (ND_IFINFO(ifp)->linkmtu && 4025 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) { 4026 #ifdef SCTP_PRINT_FOR_B_AND_M 4027 SCTP_PRINTF("sctp_mtu_size_reset called via ifp ND_IFINFO() linkmtu:%d\n", 4028 ND_IFINFO(ifp)->linkmtu); 4029 #endif 4030 sctp_mtu_size_reset(inp, 4031 &stcb->asoc, 4032 ND_IFINFO(ifp)->linkmtu); 4033 } 4034 } 4035 } 4036 return (ret); 4037 } 4038 #endif 4039 else { 4040 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", 4041 ((struct sockaddr *)to)->sa_family); 4042 sctp_m_freem(m); 4043 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 4044 return (EFAULT); 4045 } 4046 } 4047 4048 4049 void 4050 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked 4051 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4052 SCTP_UNUSED 4053 #endif 4054 ) 4055 { 4056 struct mbuf *m, *m_at, *mp_last; 4057 struct sctp_nets *net; 4058 struct sctp_init_msg *initm; 4059 struct sctp_supported_addr_param *sup_addr; 4060 struct sctp_ecn_supported_param *ecn; 4061 struct sctp_prsctp_supported_param *prsctp; 4062 struct sctp_ecn_nonce_supported_param *ecn_nonce; 4063 struct sctp_supported_chunk_types_param *pr_supported; 4064 int cnt_inits_to = 0; 4065 int padval, ret; 4066 int num_ext; 4067 int p_len; 4068 4069 /* INIT's always go to the primary (and usually ONLY address) */ 4070 mp_last = NULL; 4071 net = stcb->asoc.primary_destination; 4072 if (net == NULL) { 4073 net = TAILQ_FIRST(&stcb->asoc.nets); 4074 if (net == NULL) { 4075 /* TSNH */ 4076 return; 4077 } 4078 /* we confirm any address we send an INIT to */ 4079 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 4080 (void)sctp_set_primary_addr(stcb, NULL, net); 4081 } else { 4082 /* we confirm any address we send an INIT to */ 4083 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 4084 } 4085 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n"); 4086 #ifdef INET6 4087 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) { 4088 /* 4089 * special hook, if we are sending to link local it will not 4090 * show up in our private address count. 4091 */ 4092 struct sockaddr_in6 *sin6l; 4093 4094 sin6l = &net->ro._l_addr.sin6; 4095 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr)) 4096 cnt_inits_to = 1; 4097 } 4098 #endif 4099 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4100 /* This case should not happen */ 4101 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n"); 4102 return; 4103 } 4104 /* start the INIT timer */ 4105 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); 4106 4107 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA); 4108 if (m == NULL) { 4109 /* No memory, INIT timer will re-attempt. */ 4110 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n"); 4111 return; 4112 } 4113 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg); 4114 /* 4115 * assume peer supports asconf in order to be able to queue local 4116 * address changes while an INIT is in flight and before the assoc 4117 * is established. 4118 */ 4119 stcb->asoc.peer_supports_asconf = 1; 4120 /* Now lets put the SCTP header in place */ 4121 initm = mtod(m, struct sctp_init_msg *); 4122 initm->sh.src_port = inp->sctp_lport; 4123 initm->sh.dest_port = stcb->rport; 4124 initm->sh.v_tag = 0; 4125 initm->sh.checksum = 0; /* calculate later */ 4126 /* now the chunk header */ 4127 initm->msg.ch.chunk_type = SCTP_INITIATION; 4128 initm->msg.ch.chunk_flags = 0; 4129 /* fill in later from mbuf we build */ 4130 initm->msg.ch.chunk_length = 0; 4131 /* place in my tag */ 4132 initm->msg.init.initiate_tag = htonl(stcb->asoc.my_vtag); 4133 /* set up some of the credits. */ 4134 initm->msg.init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), 4135 SCTP_MINIMAL_RWND)); 4136 4137 initm->msg.init.num_outbound_streams = htons(stcb->asoc.pre_open_streams); 4138 initm->msg.init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams); 4139 initm->msg.init.initial_tsn = htonl(stcb->asoc.init_seq_number); 4140 /* now the address restriction */ 4141 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)initm + 4142 sizeof(*initm)); 4143 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE); 4144 #ifdef INET6 4145 /* we support 2 types: IPv6/IPv4 */ 4146 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint16_t)); 4147 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS); 4148 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS); 4149 #else 4150 /* we support 1 type: IPv4 */ 4151 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint8_t)); 4152 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS); 4153 sup_addr->addr_type[1] = htons(0); /* this is the padding */ 4154 #endif 4155 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t); 4156 4157 if (inp->sctp_ep.adaptation_layer_indicator) { 4158 struct sctp_adaptation_layer_indication *ali; 4159 4160 ali = (struct sctp_adaptation_layer_indication *)( 4161 (caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t)); 4162 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 4163 ali->ph.param_length = htons(sizeof(*ali)); 4164 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 4165 SCTP_BUF_LEN(m) += sizeof(*ali); 4166 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + 4167 sizeof(*ali)); 4168 } else { 4169 ecn = (struct sctp_ecn_supported_param *)((caddr_t)sup_addr + 4170 sizeof(*sup_addr) + sizeof(uint16_t)); 4171 } 4172 4173 /* now any cookie time extensions */ 4174 if (stcb->asoc.cookie_preserve_req) { 4175 struct sctp_cookie_perserve_param *cookie_preserve; 4176 4177 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn); 4178 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE); 4179 cookie_preserve->ph.param_length = htons( 4180 sizeof(*cookie_preserve)); 4181 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req); 4182 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve); 4183 ecn = (struct sctp_ecn_supported_param *)( 4184 (caddr_t)cookie_preserve + sizeof(*cookie_preserve)); 4185 stcb->asoc.cookie_preserve_req = 0; 4186 } 4187 /* ECN parameter */ 4188 if (sctp_ecn_enable == 1) { 4189 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 4190 ecn->ph.param_length = htons(sizeof(*ecn)); 4191 SCTP_BUF_LEN(m) += sizeof(*ecn); 4192 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 4193 sizeof(*ecn)); 4194 } else { 4195 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 4196 } 4197 /* And now tell the peer we do pr-sctp */ 4198 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 4199 prsctp->ph.param_length = htons(sizeof(*prsctp)); 4200 SCTP_BUF_LEN(m) += sizeof(*prsctp); 4201 4202 /* And now tell the peer we do all the extensions */ 4203 pr_supported = (struct sctp_supported_chunk_types_param *) 4204 ((caddr_t)prsctp + sizeof(*prsctp)); 4205 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 4206 num_ext = 0; 4207 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 4208 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 4209 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 4210 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 4211 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 4212 if (!sctp_auth_disable) 4213 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 4214 p_len = sizeof(*pr_supported) + num_ext; 4215 pr_supported->ph.param_length = htons(p_len); 4216 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 4217 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4218 4219 /* ECN nonce: And now tell the peer we support ECN nonce */ 4220 if (sctp_ecn_nonce) { 4221 ecn_nonce = (struct sctp_ecn_nonce_supported_param *) 4222 ((caddr_t)pr_supported + SCTP_SIZE32(p_len)); 4223 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED); 4224 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce)); 4225 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce); 4226 } 4227 /* add authentication parameters */ 4228 if (!sctp_auth_disable) { 4229 struct sctp_auth_random *randp; 4230 struct sctp_auth_hmac_algo *hmacs; 4231 struct sctp_auth_chunk_list *chunks; 4232 4233 /* attach RANDOM parameter, if available */ 4234 if (stcb->asoc.authinfo.random != NULL) { 4235 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4236 p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len; 4237 #ifdef SCTP_AUTH_DRAFT_04 4238 randp->ph.param_type = htons(SCTP_RANDOM); 4239 randp->ph.param_length = htons(p_len); 4240 bcopy(stcb->asoc.authinfo.random->key, 4241 randp->random_data, 4242 stcb->asoc.authinfo.random_len); 4243 #else 4244 /* random key already contains the header */ 4245 bcopy(stcb->asoc.authinfo.random->key, randp, p_len); 4246 #endif 4247 /* zero out any padding required */ 4248 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len); 4249 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4250 } 4251 /* add HMAC_ALGO parameter */ 4252 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4253 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs, 4254 (uint8_t *) hmacs->hmac_ids); 4255 if (p_len > 0) { 4256 p_len += sizeof(*hmacs); 4257 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 4258 hmacs->ph.param_length = htons(p_len); 4259 /* zero out any padding required */ 4260 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 4261 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4262 } 4263 /* add CHUNKS parameter */ 4264 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4265 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, 4266 chunks->chunk_types); 4267 if (p_len > 0) { 4268 p_len += sizeof(*chunks); 4269 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 4270 chunks->ph.param_length = htons(p_len); 4271 /* zero out any padding required */ 4272 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 4273 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4274 } 4275 } 4276 m_at = m; 4277 /* now the addresses */ 4278 { 4279 struct sctp_scoping scp; 4280 4281 /* 4282 * To optimize this we could put the scoping stuff into a 4283 * structure and remove the individual uint8's from the 4284 * assoc structure. Then we could just sifa in the address 4285 * within the stcb.. but for now this is a quick hack to get 4286 * the address stuff teased apart. 4287 */ 4288 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal; 4289 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal; 4290 scp.loopback_scope = stcb->asoc.loopback_scope; 4291 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope; 4292 scp.local_scope = stcb->asoc.local_scope; 4293 scp.site_scope = stcb->asoc.site_scope; 4294 4295 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 4296 } 4297 4298 /* calulate the size and update pkt header and chunk header */ 4299 p_len = 0; 4300 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 4301 if (SCTP_BUF_NEXT(m_at) == NULL) 4302 mp_last = m_at; 4303 p_len += SCTP_BUF_LEN(m_at); 4304 } 4305 initm->msg.ch.chunk_length = htons((p_len - sizeof(struct sctphdr))); 4306 /* 4307 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return 4308 * here since the timer will drive a retranmission. 4309 */ 4310 4311 /* I don't expect this to execute but we will be safe here */ 4312 padval = p_len % 4; 4313 if ((padval) && (mp_last)) { 4314 /* 4315 * The compiler worries that mp_last may not be set even 4316 * though I think it is impossible :-> however we add 4317 * mp_last here just in case. 4318 */ 4319 ret = sctp_add_pad_tombuf(mp_last, (4 - padval)); 4320 if (ret) { 4321 /* Houston we have a problem, no space */ 4322 sctp_m_freem(m); 4323 return; 4324 } 4325 p_len += padval; 4326 } 4327 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n"); 4328 ret = sctp_lowlevel_chunk_output(inp, stcb, net, 4329 (struct sockaddr *)&net->ro._l_addr, 4330 m, 0, NULL, 0, 0, NULL, 0, net->port, so_locked, NULL); 4331 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret); 4332 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 4333 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); 4334 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 4335 } 4336 4337 struct mbuf * 4338 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, 4339 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp) 4340 { 4341 /* 4342 * Given a mbuf containing an INIT or INIT-ACK with the param_offset 4343 * being equal to the beginning of the params i.e. (iphlen + 4344 * sizeof(struct sctp_init_msg) parse through the parameters to the 4345 * end of the mbuf verifying that all parameters are known. 4346 * 4347 * For unknown parameters build and return a mbuf with 4348 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop 4349 * processing this chunk stop, and set *abort_processing to 1. 4350 * 4351 * By having param_offset be pre-set to where parameters begin it is 4352 * hoped that this routine may be reused in the future by new 4353 * features. 4354 */ 4355 struct sctp_paramhdr *phdr, params; 4356 4357 struct mbuf *mat, *op_err; 4358 char tempbuf[SCTP_PARAM_BUFFER_SIZE]; 4359 int at, limit, pad_needed; 4360 uint16_t ptype, plen, padded_size; 4361 int err_at; 4362 4363 *abort_processing = 0; 4364 mat = in_initpkt; 4365 err_at = 0; 4366 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk); 4367 at = param_offset; 4368 op_err = NULL; 4369 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n"); 4370 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 4371 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) { 4372 ptype = ntohs(phdr->param_type); 4373 plen = ntohs(phdr->param_length); 4374 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) { 4375 /* wacked parameter */ 4376 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen); 4377 goto invalid_size; 4378 } 4379 limit -= SCTP_SIZE32(plen); 4380 /*- 4381 * All parameters for all chunks that we know/understand are 4382 * listed here. We process them other places and make 4383 * appropriate stop actions per the upper bits. However this 4384 * is the generic routine processor's can call to get back 4385 * an operr.. to either incorporate (init-ack) or send. 4386 */ 4387 padded_size = SCTP_SIZE32(plen); 4388 switch (ptype) { 4389 /* Param's with variable size */ 4390 case SCTP_HEARTBEAT_INFO: 4391 case SCTP_STATE_COOKIE: 4392 case SCTP_UNRECOG_PARAM: 4393 case SCTP_ERROR_CAUSE_IND: 4394 /* ok skip fwd */ 4395 at += padded_size; 4396 break; 4397 /* Param's with variable size within a range */ 4398 case SCTP_CHUNK_LIST: 4399 case SCTP_SUPPORTED_CHUNK_EXT: 4400 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) { 4401 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen); 4402 goto invalid_size; 4403 } 4404 at += padded_size; 4405 break; 4406 case SCTP_SUPPORTED_ADDRTYPE: 4407 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) { 4408 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen); 4409 goto invalid_size; 4410 } 4411 at += padded_size; 4412 break; 4413 case SCTP_RANDOM: 4414 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) { 4415 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen); 4416 goto invalid_size; 4417 } 4418 at += padded_size; 4419 break; 4420 case SCTP_SET_PRIM_ADDR: 4421 case SCTP_DEL_IP_ADDRESS: 4422 case SCTP_ADD_IP_ADDRESS: 4423 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) && 4424 (padded_size != sizeof(struct sctp_asconf_addr_param))) { 4425 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen); 4426 goto invalid_size; 4427 } 4428 at += padded_size; 4429 break; 4430 /* Param's with a fixed size */ 4431 case SCTP_IPV4_ADDRESS: 4432 if (padded_size != sizeof(struct sctp_ipv4addr_param)) { 4433 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen); 4434 goto invalid_size; 4435 } 4436 at += padded_size; 4437 break; 4438 case SCTP_IPV6_ADDRESS: 4439 if (padded_size != sizeof(struct sctp_ipv6addr_param)) { 4440 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen); 4441 goto invalid_size; 4442 } 4443 at += padded_size; 4444 break; 4445 case SCTP_COOKIE_PRESERVE: 4446 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) { 4447 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen); 4448 goto invalid_size; 4449 } 4450 at += padded_size; 4451 break; 4452 case SCTP_ECN_NONCE_SUPPORTED: 4453 case SCTP_PRSCTP_SUPPORTED: 4454 if (padded_size != sizeof(struct sctp_paramhdr)) { 4455 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecnnonce/prsctp %d\n", plen); 4456 goto invalid_size; 4457 } 4458 at += padded_size; 4459 break; 4460 case SCTP_ECN_CAPABLE: 4461 if (padded_size != sizeof(struct sctp_ecn_supported_param)) { 4462 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen); 4463 goto invalid_size; 4464 } 4465 at += padded_size; 4466 break; 4467 case SCTP_ULP_ADAPTATION: 4468 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) { 4469 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen); 4470 goto invalid_size; 4471 } 4472 at += padded_size; 4473 break; 4474 case SCTP_SUCCESS_REPORT: 4475 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) { 4476 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen); 4477 goto invalid_size; 4478 } 4479 at += padded_size; 4480 break; 4481 case SCTP_HOSTNAME_ADDRESS: 4482 { 4483 /* We can NOT handle HOST NAME addresses!! */ 4484 int l_len; 4485 4486 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n"); 4487 *abort_processing = 1; 4488 if (op_err == NULL) { 4489 /* Ok need to try to get a mbuf */ 4490 #ifdef INET6 4491 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4492 #else 4493 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4494 #endif 4495 l_len += plen; 4496 l_len += sizeof(struct sctp_paramhdr); 4497 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 4498 if (op_err) { 4499 SCTP_BUF_LEN(op_err) = 0; 4500 /* 4501 * pre-reserve space for ip 4502 * and sctp header and 4503 * chunk hdr 4504 */ 4505 #ifdef INET6 4506 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 4507 #else 4508 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 4509 #endif 4510 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 4511 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 4512 } 4513 } 4514 if (op_err) { 4515 /* If we have space */ 4516 struct sctp_paramhdr s; 4517 4518 if (err_at % 4) { 4519 uint32_t cpthis = 0; 4520 4521 pad_needed = 4 - (err_at % 4); 4522 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 4523 err_at += pad_needed; 4524 } 4525 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR); 4526 s.param_length = htons(sizeof(s) + plen); 4527 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 4528 err_at += sizeof(s); 4529 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen)); 4530 if (phdr == NULL) { 4531 sctp_m_freem(op_err); 4532 /* 4533 * we are out of memory but 4534 * we still need to have a 4535 * look at what to do (the 4536 * system is in trouble 4537 * though). 4538 */ 4539 return (NULL); 4540 } 4541 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 4542 err_at += plen; 4543 } 4544 return (op_err); 4545 break; 4546 } 4547 default: 4548 /* 4549 * we do not recognize the parameter figure out what 4550 * we do. 4551 */ 4552 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype); 4553 if ((ptype & 0x4000) == 0x4000) { 4554 /* Report bit is set?? */ 4555 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n"); 4556 if (op_err == NULL) { 4557 int l_len; 4558 4559 /* Ok need to try to get an mbuf */ 4560 #ifdef INET6 4561 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4562 #else 4563 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4564 #endif 4565 l_len += plen; 4566 l_len += sizeof(struct sctp_paramhdr); 4567 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 4568 if (op_err) { 4569 SCTP_BUF_LEN(op_err) = 0; 4570 #ifdef INET6 4571 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 4572 #else 4573 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 4574 #endif 4575 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 4576 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 4577 } 4578 } 4579 if (op_err) { 4580 /* If we have space */ 4581 struct sctp_paramhdr s; 4582 4583 if (err_at % 4) { 4584 uint32_t cpthis = 0; 4585 4586 pad_needed = 4 - (err_at % 4); 4587 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 4588 err_at += pad_needed; 4589 } 4590 s.param_type = htons(SCTP_UNRECOG_PARAM); 4591 s.param_length = htons(sizeof(s) + plen); 4592 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 4593 err_at += sizeof(s); 4594 if (plen > sizeof(tempbuf)) { 4595 plen = sizeof(tempbuf); 4596 } 4597 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen)); 4598 if (phdr == NULL) { 4599 sctp_m_freem(op_err); 4600 /* 4601 * we are out of memory but 4602 * we still need to have a 4603 * look at what to do (the 4604 * system is in trouble 4605 * though). 4606 */ 4607 op_err = NULL; 4608 goto more_processing; 4609 } 4610 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 4611 err_at += plen; 4612 } 4613 } 4614 more_processing: 4615 if ((ptype & 0x8000) == 0x0000) { 4616 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n"); 4617 return (op_err); 4618 } else { 4619 /* skip this chunk and continue processing */ 4620 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n"); 4621 at += SCTP_SIZE32(plen); 4622 } 4623 break; 4624 4625 } 4626 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 4627 } 4628 return (op_err); 4629 invalid_size: 4630 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n"); 4631 *abort_processing = 1; 4632 if ((op_err == NULL) && phdr) { 4633 int l_len; 4634 4635 #ifdef INET6 4636 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4637 #else 4638 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4639 #endif 4640 l_len += (2 * sizeof(struct sctp_paramhdr)); 4641 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 4642 if (op_err) { 4643 SCTP_BUF_LEN(op_err) = 0; 4644 #ifdef INET6 4645 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 4646 #else 4647 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 4648 #endif 4649 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 4650 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 4651 } 4652 } 4653 if ((op_err) && phdr) { 4654 struct sctp_paramhdr s; 4655 4656 if (err_at % 4) { 4657 uint32_t cpthis = 0; 4658 4659 pad_needed = 4 - (err_at % 4); 4660 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 4661 err_at += pad_needed; 4662 } 4663 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4664 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr)); 4665 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 4666 err_at += sizeof(s); 4667 /* Only copy back the p-hdr that caused the issue */ 4668 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr); 4669 } 4670 return (op_err); 4671 } 4672 4673 static int 4674 sctp_are_there_new_addresses(struct sctp_association *asoc, 4675 struct mbuf *in_initpkt, int iphlen, int offset) 4676 { 4677 /* 4678 * Given a INIT packet, look through the packet to verify that there 4679 * are NO new addresses. As we go through the parameters add reports 4680 * of any un-understood parameters that require an error. Also we 4681 * must return (1) to drop the packet if we see a un-understood 4682 * parameter that tells us to drop the chunk. 4683 */ 4684 struct sockaddr_in sin4, *sa4; 4685 4686 #ifdef INET6 4687 struct sockaddr_in6 sin6, *sa6; 4688 4689 #endif 4690 struct sockaddr *sa_touse; 4691 struct sockaddr *sa; 4692 struct sctp_paramhdr *phdr, params; 4693 struct ip *iph; 4694 4695 #ifdef INET6 4696 struct ip6_hdr *ip6h; 4697 4698 #endif 4699 struct mbuf *mat; 4700 uint16_t ptype, plen; 4701 int err_at; 4702 uint8_t fnd; 4703 struct sctp_nets *net; 4704 4705 memset(&sin4, 0, sizeof(sin4)); 4706 #ifdef INET6 4707 memset(&sin6, 0, sizeof(sin6)); 4708 #endif 4709 sin4.sin_family = AF_INET; 4710 sin4.sin_len = sizeof(sin4); 4711 #ifdef INET6 4712 sin6.sin6_family = AF_INET6; 4713 sin6.sin6_len = sizeof(sin6); 4714 #endif 4715 sa_touse = NULL; 4716 /* First what about the src address of the pkt ? */ 4717 iph = mtod(in_initpkt, struct ip *); 4718 switch (iph->ip_v) { 4719 case IPVERSION: 4720 /* source addr is IPv4 */ 4721 sin4.sin_addr = iph->ip_src; 4722 sa_touse = (struct sockaddr *)&sin4; 4723 break; 4724 #ifdef INET6 4725 case IPV6_VERSION >> 4: 4726 /* source addr is IPv6 */ 4727 ip6h = mtod(in_initpkt, struct ip6_hdr *); 4728 sin6.sin6_addr = ip6h->ip6_src; 4729 sa_touse = (struct sockaddr *)&sin6; 4730 break; 4731 #endif 4732 default: 4733 return (1); 4734 } 4735 4736 fnd = 0; 4737 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4738 sa = (struct sockaddr *)&net->ro._l_addr; 4739 if (sa->sa_family == sa_touse->sa_family) { 4740 if (sa->sa_family == AF_INET) { 4741 sa4 = (struct sockaddr_in *)sa; 4742 if (sa4->sin_addr.s_addr == 4743 sin4.sin_addr.s_addr) { 4744 fnd = 1; 4745 break; 4746 } 4747 } 4748 #ifdef INET6 4749 if (sa->sa_family == AF_INET6) { 4750 sa6 = (struct sockaddr_in6 *)sa; 4751 if (SCTP6_ARE_ADDR_EQUAL(sa6, 4752 &sin6)) { 4753 fnd = 1; 4754 break; 4755 } 4756 } 4757 #endif 4758 } 4759 } 4760 if (fnd == 0) { 4761 /* New address added! no need to look futher. */ 4762 return (1); 4763 } 4764 /* Ok so far lets munge through the rest of the packet */ 4765 mat = in_initpkt; 4766 err_at = 0; 4767 sa_touse = NULL; 4768 offset += sizeof(struct sctp_init_chunk); 4769 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 4770 while (phdr) { 4771 ptype = ntohs(phdr->param_type); 4772 plen = ntohs(phdr->param_length); 4773 if (ptype == SCTP_IPV4_ADDRESS) { 4774 struct sctp_ipv4addr_param *p4, p4_buf; 4775 4776 phdr = sctp_get_next_param(mat, offset, 4777 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); 4778 if (plen != sizeof(struct sctp_ipv4addr_param) || 4779 phdr == NULL) { 4780 return (1); 4781 } 4782 p4 = (struct sctp_ipv4addr_param *)phdr; 4783 sin4.sin_addr.s_addr = p4->addr; 4784 sa_touse = (struct sockaddr *)&sin4; 4785 } else if (ptype == SCTP_IPV6_ADDRESS) { 4786 struct sctp_ipv6addr_param *p6, p6_buf; 4787 4788 phdr = sctp_get_next_param(mat, offset, 4789 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); 4790 if (plen != sizeof(struct sctp_ipv6addr_param) || 4791 phdr == NULL) { 4792 return (1); 4793 } 4794 p6 = (struct sctp_ipv6addr_param *)phdr; 4795 #ifdef INET6 4796 memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 4797 sizeof(p6->addr)); 4798 #endif 4799 sa_touse = (struct sockaddr *)&sin4; 4800 } 4801 if (sa_touse) { 4802 /* ok, sa_touse points to one to check */ 4803 fnd = 0; 4804 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4805 sa = (struct sockaddr *)&net->ro._l_addr; 4806 if (sa->sa_family != sa_touse->sa_family) { 4807 continue; 4808 } 4809 if (sa->sa_family == AF_INET) { 4810 sa4 = (struct sockaddr_in *)sa; 4811 if (sa4->sin_addr.s_addr == 4812 sin4.sin_addr.s_addr) { 4813 fnd = 1; 4814 break; 4815 } 4816 } 4817 #ifdef INET6 4818 if (sa->sa_family == AF_INET6) { 4819 sa6 = (struct sockaddr_in6 *)sa; 4820 if (SCTP6_ARE_ADDR_EQUAL( 4821 sa6, &sin6)) { 4822 fnd = 1; 4823 break; 4824 } 4825 } 4826 #endif 4827 } 4828 if (!fnd) { 4829 /* New addr added! no need to look further */ 4830 return (1); 4831 } 4832 } 4833 offset += SCTP_SIZE32(plen); 4834 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 4835 } 4836 return (0); 4837 } 4838 4839 /* 4840 * Given a MBUF chain that was sent into us containing an INIT. Build a 4841 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done 4842 * a pullup to include IPv6/4header, SCTP header and initial part of INIT 4843 * message (i.e. the struct sctp_init_msg). 4844 */ 4845 void 4846 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4847 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh, 4848 struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock) 4849 { 4850 struct sctp_association *asoc; 4851 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last; 4852 struct sctp_init_msg *initackm_out; 4853 struct sctp_ecn_supported_param *ecn; 4854 struct sctp_prsctp_supported_param *prsctp; 4855 struct sctp_ecn_nonce_supported_param *ecn_nonce; 4856 struct sctp_supported_chunk_types_param *pr_supported; 4857 union sctp_sockstore store, store1, *over_addr; 4858 struct sockaddr_in *sin, *to_sin; 4859 4860 #ifdef INET6 4861 struct sockaddr_in6 *sin6, *to_sin6; 4862 4863 #endif 4864 struct ip *iph; 4865 4866 #ifdef INET6 4867 struct ip6_hdr *ip6; 4868 4869 #endif 4870 struct sockaddr *to; 4871 struct sctp_state_cookie stc; 4872 struct sctp_nets *net = NULL; 4873 uint8_t *signature = NULL; 4874 int cnt_inits_to = 0; 4875 uint16_t his_limit, i_want; 4876 int abort_flag, padval; 4877 int num_ext; 4878 int p_len; 4879 struct socket *so; 4880 4881 if (stcb) 4882 asoc = &stcb->asoc; 4883 else 4884 asoc = NULL; 4885 mp_last = NULL; 4886 if ((asoc != NULL) && 4887 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) && 4888 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) { 4889 /* new addresses, out of here in non-cookie-wait states */ 4890 /* 4891 * Send a ABORT, we don't add the new address error clause 4892 * though we even set the T bit and copy in the 0 tag.. this 4893 * looks no different than if no listener was present. 4894 */ 4895 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port); 4896 return; 4897 } 4898 abort_flag = 0; 4899 op_err = sctp_arethere_unrecognized_parameters(init_pkt, 4900 (offset + sizeof(struct sctp_init_chunk)), 4901 &abort_flag, (struct sctp_chunkhdr *)init_chk); 4902 if (abort_flag) { 4903 do_a_abort: 4904 sctp_send_abort(init_pkt, iphlen, sh, 4905 init_chk->init.initiate_tag, op_err, vrf_id, port); 4906 return; 4907 } 4908 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 4909 if (m == NULL) { 4910 /* No memory, INIT timer will re-attempt. */ 4911 if (op_err) 4912 sctp_m_freem(op_err); 4913 return; 4914 } 4915 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg); 4916 4917 /* the time I built cookie */ 4918 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered); 4919 4920 /* populate any tie tags */ 4921 if (asoc != NULL) { 4922 /* unlock before tag selections */ 4923 stc.tie_tag_my_vtag = asoc->my_vtag_nonce; 4924 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce; 4925 stc.cookie_life = asoc->cookie_life; 4926 net = asoc->primary_destination; 4927 } else { 4928 stc.tie_tag_my_vtag = 0; 4929 stc.tie_tag_peer_vtag = 0; 4930 /* life I will award this cookie */ 4931 stc.cookie_life = inp->sctp_ep.def_cookie_life; 4932 } 4933 4934 /* copy in the ports for later check */ 4935 stc.myport = sh->dest_port; 4936 stc.peerport = sh->src_port; 4937 4938 /* 4939 * If we wanted to honor cookie life extentions, we would add to 4940 * stc.cookie_life. For now we should NOT honor any extension 4941 */ 4942 stc.site_scope = stc.local_scope = stc.loopback_scope = 0; 4943 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 4944 struct inpcb *in_inp; 4945 4946 /* Its a V6 socket */ 4947 in_inp = (struct inpcb *)inp; 4948 stc.ipv6_addr_legal = 1; 4949 /* Now look at the binding flag to see if V4 will be legal */ 4950 if (SCTP_IPV6_V6ONLY(in_inp) == 0) { 4951 stc.ipv4_addr_legal = 1; 4952 } else { 4953 /* V4 addresses are NOT legal on the association */ 4954 stc.ipv4_addr_legal = 0; 4955 } 4956 } else { 4957 /* Its a V4 socket, no - V6 */ 4958 stc.ipv4_addr_legal = 1; 4959 stc.ipv6_addr_legal = 0; 4960 } 4961 4962 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE 4963 stc.ipv4_scope = 1; 4964 #else 4965 stc.ipv4_scope = 0; 4966 #endif 4967 /* now for scope setup */ 4968 memset((caddr_t)&store, 0, sizeof(store)); 4969 memset((caddr_t)&store1, 0, sizeof(store1)); 4970 sin = &store.sin; 4971 to_sin = &store1.sin; 4972 #ifdef INET6 4973 sin6 = &store.sin6; 4974 to_sin6 = &store1.sin6; 4975 #endif 4976 iph = mtod(init_pkt, struct ip *); 4977 /* establish the to_addr's */ 4978 switch (iph->ip_v) { 4979 case IPVERSION: 4980 to_sin->sin_port = sh->dest_port; 4981 to_sin->sin_family = AF_INET; 4982 to_sin->sin_len = sizeof(struct sockaddr_in); 4983 to_sin->sin_addr = iph->ip_dst; 4984 break; 4985 #ifdef INET6 4986 case IPV6_VERSION >> 4: 4987 ip6 = mtod(init_pkt, struct ip6_hdr *); 4988 to_sin6->sin6_addr = ip6->ip6_dst; 4989 to_sin6->sin6_scope_id = 0; 4990 to_sin6->sin6_port = sh->dest_port; 4991 to_sin6->sin6_family = AF_INET6; 4992 to_sin6->sin6_len = sizeof(struct sockaddr_in6); 4993 break; 4994 #endif 4995 default: 4996 goto do_a_abort; 4997 break; 4998 }; 4999 5000 if (net == NULL) { 5001 to = (struct sockaddr *)&store; 5002 switch (iph->ip_v) { 5003 case IPVERSION: 5004 { 5005 sin->sin_family = AF_INET; 5006 sin->sin_len = sizeof(struct sockaddr_in); 5007 sin->sin_port = sh->src_port; 5008 sin->sin_addr = iph->ip_src; 5009 /* lookup address */ 5010 stc.address[0] = sin->sin_addr.s_addr; 5011 stc.address[1] = 0; 5012 stc.address[2] = 0; 5013 stc.address[3] = 0; 5014 stc.addr_type = SCTP_IPV4_ADDRESS; 5015 /* local from address */ 5016 stc.laddress[0] = to_sin->sin_addr.s_addr; 5017 stc.laddress[1] = 0; 5018 stc.laddress[2] = 0; 5019 stc.laddress[3] = 0; 5020 stc.laddr_type = SCTP_IPV4_ADDRESS; 5021 /* scope_id is only for v6 */ 5022 stc.scope_id = 0; 5023 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE 5024 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { 5025 stc.ipv4_scope = 1; 5026 } 5027 #else 5028 stc.ipv4_scope = 1; 5029 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ 5030 /* Must use the address in this case */ 5031 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) { 5032 stc.loopback_scope = 1; 5033 stc.ipv4_scope = 1; 5034 stc.site_scope = 1; 5035 stc.local_scope = 0; 5036 } 5037 break; 5038 } 5039 #ifdef INET6 5040 case IPV6_VERSION >> 4: 5041 { 5042 ip6 = mtod(init_pkt, struct ip6_hdr *); 5043 sin6->sin6_family = AF_INET6; 5044 sin6->sin6_len = sizeof(struct sockaddr_in6); 5045 sin6->sin6_port = sh->src_port; 5046 sin6->sin6_addr = ip6->ip6_src; 5047 /* lookup address */ 5048 memcpy(&stc.address, &sin6->sin6_addr, 5049 sizeof(struct in6_addr)); 5050 sin6->sin6_scope_id = 0; 5051 stc.addr_type = SCTP_IPV6_ADDRESS; 5052 stc.scope_id = 0; 5053 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) { 5054 /* 5055 * FIX ME: does this have scope from 5056 * rcvif? 5057 */ 5058 (void)sa6_recoverscope(sin6); 5059 stc.scope_id = sin6->sin6_scope_id; 5060 sa6_embedscope(sin6, ip6_use_defzone); 5061 stc.loopback_scope = 1; 5062 stc.local_scope = 0; 5063 stc.site_scope = 1; 5064 stc.ipv4_scope = 1; 5065 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 5066 /* 5067 * If the new destination is a 5068 * LINK_LOCAL we must have common 5069 * both site and local scope. Don't 5070 * set local scope though since we 5071 * must depend on the source to be 5072 * added implicitly. We cannot 5073 * assure just because we share one 5074 * link that all links are common. 5075 */ 5076 stc.local_scope = 0; 5077 stc.site_scope = 1; 5078 stc.ipv4_scope = 1; 5079 /* 5080 * we start counting for the private 5081 * address stuff at 1. since the 5082 * link local we source from won't 5083 * show up in our scoped count. 5084 */ 5085 cnt_inits_to = 1; 5086 /* 5087 * pull out the scope_id from 5088 * incoming pkt 5089 */ 5090 /* 5091 * FIX ME: does this have scope from 5092 * rcvif? 5093 */ 5094 (void)sa6_recoverscope(sin6); 5095 stc.scope_id = sin6->sin6_scope_id; 5096 sa6_embedscope(sin6, ip6_use_defzone); 5097 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { 5098 /* 5099 * If the new destination is 5100 * SITE_LOCAL then we must have site 5101 * scope in common. 5102 */ 5103 stc.site_scope = 1; 5104 } 5105 memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr)); 5106 stc.laddr_type = SCTP_IPV6_ADDRESS; 5107 break; 5108 } 5109 #endif 5110 default: 5111 /* TSNH */ 5112 goto do_a_abort; 5113 break; 5114 } 5115 } else { 5116 /* set the scope per the existing tcb */ 5117 5118 #ifdef INET6 5119 struct sctp_nets *lnet; 5120 5121 #endif 5122 5123 stc.loopback_scope = asoc->loopback_scope; 5124 stc.ipv4_scope = asoc->ipv4_local_scope; 5125 stc.site_scope = asoc->site_scope; 5126 stc.local_scope = asoc->local_scope; 5127 #ifdef INET6 5128 /* Why do we not consider IPv4 LL addresses? */ 5129 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 5130 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) { 5131 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) { 5132 /* 5133 * if we have a LL address, start 5134 * counting at 1. 5135 */ 5136 cnt_inits_to = 1; 5137 } 5138 } 5139 } 5140 #endif 5141 /* use the net pointer */ 5142 to = (struct sockaddr *)&net->ro._l_addr; 5143 switch (to->sa_family) { 5144 case AF_INET: 5145 sin = (struct sockaddr_in *)to; 5146 stc.address[0] = sin->sin_addr.s_addr; 5147 stc.address[1] = 0; 5148 stc.address[2] = 0; 5149 stc.address[3] = 0; 5150 stc.addr_type = SCTP_IPV4_ADDRESS; 5151 if (net->src_addr_selected == 0) { 5152 /* 5153 * strange case here, the INIT should have 5154 * did the selection. 5155 */ 5156 net->ro._s_addr = sctp_source_address_selection(inp, 5157 stcb, (sctp_route_t *) & net->ro, 5158 net, 0, vrf_id); 5159 if (net->ro._s_addr == NULL) 5160 return; 5161 5162 net->src_addr_selected = 1; 5163 5164 } 5165 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr; 5166 stc.laddress[1] = 0; 5167 stc.laddress[2] = 0; 5168 stc.laddress[3] = 0; 5169 stc.laddr_type = SCTP_IPV4_ADDRESS; 5170 break; 5171 #ifdef INET6 5172 case AF_INET6: 5173 sin6 = (struct sockaddr_in6 *)to; 5174 memcpy(&stc.address, &sin6->sin6_addr, 5175 sizeof(struct in6_addr)); 5176 stc.addr_type = SCTP_IPV6_ADDRESS; 5177 if (net->src_addr_selected == 0) { 5178 /* 5179 * strange case here, the INIT should have 5180 * did the selection. 5181 */ 5182 net->ro._s_addr = sctp_source_address_selection(inp, 5183 stcb, (sctp_route_t *) & net->ro, 5184 net, 0, vrf_id); 5185 if (net->ro._s_addr == NULL) 5186 return; 5187 5188 net->src_addr_selected = 1; 5189 } 5190 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr, 5191 sizeof(struct in6_addr)); 5192 stc.laddr_type = SCTP_IPV6_ADDRESS; 5193 break; 5194 #endif 5195 } 5196 } 5197 /* Now lets put the SCTP header in place */ 5198 initackm_out = mtod(m, struct sctp_init_msg *); 5199 initackm_out->sh.src_port = inp->sctp_lport; 5200 initackm_out->sh.dest_port = sh->src_port; 5201 initackm_out->sh.v_tag = init_chk->init.initiate_tag; 5202 /* Save it off for quick ref */ 5203 stc.peers_vtag = init_chk->init.initiate_tag; 5204 initackm_out->sh.checksum = 0; /* calculate later */ 5205 /* who are we */ 5206 memcpy(stc.identification, SCTP_VERSION_STRING, 5207 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification))); 5208 /* now the chunk header */ 5209 initackm_out->msg.ch.chunk_type = SCTP_INITIATION_ACK; 5210 initackm_out->msg.ch.chunk_flags = 0; 5211 /* fill in later from mbuf we build */ 5212 initackm_out->msg.ch.chunk_length = 0; 5213 /* place in my tag */ 5214 if ((asoc != NULL) && 5215 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 5216 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) || 5217 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) { 5218 /* re-use the v-tags and init-seq here */ 5219 initackm_out->msg.init.initiate_tag = htonl(asoc->my_vtag); 5220 initackm_out->msg.init.initial_tsn = htonl(asoc->init_seq_number); 5221 } else { 5222 uint32_t vtag, itsn; 5223 5224 if (hold_inp_lock) { 5225 SCTP_INP_INCR_REF(inp); 5226 SCTP_INP_RUNLOCK(inp); 5227 } 5228 if (asoc) { 5229 atomic_add_int(&asoc->refcnt, 1); 5230 SCTP_TCB_UNLOCK(stcb); 5231 vtag = sctp_select_a_tag(inp, 1); 5232 initackm_out->msg.init.initiate_tag = htonl(vtag); 5233 /* get a TSN to use too */ 5234 itsn = sctp_select_initial_TSN(&inp->sctp_ep); 5235 initackm_out->msg.init.initial_tsn = htonl(itsn); 5236 SCTP_TCB_LOCK(stcb); 5237 atomic_add_int(&asoc->refcnt, -1); 5238 } else { 5239 vtag = sctp_select_a_tag(inp, 1); 5240 initackm_out->msg.init.initiate_tag = htonl(vtag); 5241 /* get a TSN to use too */ 5242 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 5243 } 5244 if (hold_inp_lock) { 5245 SCTP_INP_RLOCK(inp); 5246 SCTP_INP_DECR_REF(inp); 5247 } 5248 } 5249 /* save away my tag to */ 5250 stc.my_vtag = initackm_out->msg.init.initiate_tag; 5251 5252 /* set up some of the credits. */ 5253 so = inp->sctp_socket; 5254 if (so == NULL) { 5255 /* memory problem */ 5256 sctp_m_freem(m); 5257 return; 5258 } else { 5259 initackm_out->msg.init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND)); 5260 } 5261 /* set what I want */ 5262 his_limit = ntohs(init_chk->init.num_inbound_streams); 5263 /* choose what I want */ 5264 if (asoc != NULL) { 5265 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) { 5266 i_want = asoc->streamoutcnt; 5267 } else { 5268 i_want = inp->sctp_ep.pre_open_stream_count; 5269 } 5270 } else { 5271 i_want = inp->sctp_ep.pre_open_stream_count; 5272 } 5273 if (his_limit < i_want) { 5274 /* I Want more :< */ 5275 initackm_out->msg.init.num_outbound_streams = init_chk->init.num_inbound_streams; 5276 } else { 5277 /* I can have what I want :> */ 5278 initackm_out->msg.init.num_outbound_streams = htons(i_want); 5279 } 5280 /* tell him his limt. */ 5281 initackm_out->msg.init.num_inbound_streams = 5282 htons(inp->sctp_ep.max_open_streams_intome); 5283 /* setup the ECN pointer */ 5284 5285 if (inp->sctp_ep.adaptation_layer_indicator) { 5286 struct sctp_adaptation_layer_indication *ali; 5287 5288 ali = (struct sctp_adaptation_layer_indication *)( 5289 (caddr_t)initackm_out + sizeof(*initackm_out)); 5290 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 5291 ali->ph.param_length = htons(sizeof(*ali)); 5292 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 5293 SCTP_BUF_LEN(m) += sizeof(*ali); 5294 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + 5295 sizeof(*ali)); 5296 } else { 5297 ecn = (struct sctp_ecn_supported_param *)( 5298 (caddr_t)initackm_out + sizeof(*initackm_out)); 5299 } 5300 5301 /* ECN parameter */ 5302 if (sctp_ecn_enable == 1) { 5303 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 5304 ecn->ph.param_length = htons(sizeof(*ecn)); 5305 SCTP_BUF_LEN(m) += sizeof(*ecn); 5306 5307 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 5308 sizeof(*ecn)); 5309 } else { 5310 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 5311 } 5312 /* And now tell the peer we do pr-sctp */ 5313 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 5314 prsctp->ph.param_length = htons(sizeof(*prsctp)); 5315 SCTP_BUF_LEN(m) += sizeof(*prsctp); 5316 5317 /* And now tell the peer we do all the extensions */ 5318 pr_supported = (struct sctp_supported_chunk_types_param *) 5319 ((caddr_t)prsctp + sizeof(*prsctp)); 5320 5321 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 5322 num_ext = 0; 5323 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 5324 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 5325 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 5326 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 5327 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 5328 if (!sctp_auth_disable) 5329 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 5330 p_len = sizeof(*pr_supported) + num_ext; 5331 pr_supported->ph.param_length = htons(p_len); 5332 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 5333 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 5334 5335 /* ECN nonce: And now tell the peer we support ECN nonce */ 5336 if (sctp_ecn_nonce) { 5337 ecn_nonce = (struct sctp_ecn_nonce_supported_param *) 5338 ((caddr_t)pr_supported + SCTP_SIZE32(p_len)); 5339 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED); 5340 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce)); 5341 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce); 5342 } 5343 /* add authentication parameters */ 5344 if (!sctp_auth_disable) { 5345 struct sctp_auth_random *randp; 5346 struct sctp_auth_hmac_algo *hmacs; 5347 struct sctp_auth_chunk_list *chunks; 5348 uint16_t random_len; 5349 5350 /* generate and add RANDOM parameter */ 5351 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT; 5352 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 5353 randp->ph.param_type = htons(SCTP_RANDOM); 5354 p_len = sizeof(*randp) + random_len; 5355 randp->ph.param_length = htons(p_len); 5356 SCTP_READ_RANDOM(randp->random_data, random_len); 5357 /* zero out any padding required */ 5358 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len); 5359 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 5360 5361 /* add HMAC_ALGO parameter */ 5362 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 5363 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs, 5364 (uint8_t *) hmacs->hmac_ids); 5365 if (p_len > 0) { 5366 p_len += sizeof(*hmacs); 5367 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 5368 hmacs->ph.param_length = htons(p_len); 5369 /* zero out any padding required */ 5370 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 5371 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 5372 } 5373 /* add CHUNKS parameter */ 5374 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 5375 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks, 5376 chunks->chunk_types); 5377 if (p_len > 0) { 5378 p_len += sizeof(*chunks); 5379 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 5380 chunks->ph.param_length = htons(p_len); 5381 /* zero out any padding required */ 5382 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 5383 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 5384 } 5385 } 5386 m_at = m; 5387 /* now the addresses */ 5388 { 5389 struct sctp_scoping scp; 5390 5391 /* 5392 * To optimize this we could put the scoping stuff into a 5393 * structure and remove the individual uint8's from the stc 5394 * structure. Then we could just sifa in the address within 5395 * the stc.. but for now this is a quick hack to get the 5396 * address stuff teased apart. 5397 */ 5398 scp.ipv4_addr_legal = stc.ipv4_addr_legal; 5399 scp.ipv6_addr_legal = stc.ipv6_addr_legal; 5400 scp.loopback_scope = stc.loopback_scope; 5401 scp.ipv4_local_scope = stc.ipv4_scope; 5402 scp.local_scope = stc.local_scope; 5403 scp.site_scope = stc.site_scope; 5404 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 5405 } 5406 5407 /* tack on the operational error if present */ 5408 if (op_err) { 5409 struct mbuf *ol; 5410 int llen; 5411 5412 llen = 0; 5413 ol = op_err; 5414 while (ol) { 5415 llen += SCTP_BUF_LEN(ol); 5416 ol = SCTP_BUF_NEXT(ol); 5417 } 5418 if (llen % 4) { 5419 /* must add a pad to the param */ 5420 uint32_t cpthis = 0; 5421 int padlen; 5422 5423 padlen = 4 - (llen % 4); 5424 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis); 5425 } 5426 while (SCTP_BUF_NEXT(m_at) != NULL) { 5427 m_at = SCTP_BUF_NEXT(m_at); 5428 } 5429 SCTP_BUF_NEXT(m_at) = op_err; 5430 while (SCTP_BUF_NEXT(m_at) != NULL) { 5431 m_at = SCTP_BUF_NEXT(m_at); 5432 } 5433 } 5434 /* pre-calulate the size and update pkt header and chunk header */ 5435 p_len = 0; 5436 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 5437 p_len += SCTP_BUF_LEN(m_tmp); 5438 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5439 /* m_tmp should now point to last one */ 5440 break; 5441 } 5442 } 5443 5444 /* Now we must build a cookie */ 5445 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 5446 sizeof(struct sctphdr), &stc, &signature); 5447 if (m_cookie == NULL) { 5448 /* memory problem */ 5449 sctp_m_freem(m); 5450 return; 5451 } 5452 /* Now append the cookie to the end and update the space/size */ 5453 SCTP_BUF_NEXT(m_tmp) = m_cookie; 5454 5455 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 5456 p_len += SCTP_BUF_LEN(m_tmp); 5457 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5458 /* m_tmp should now point to last one */ 5459 mp_last = m_tmp; 5460 break; 5461 } 5462 } 5463 /* 5464 * Place in the size, but we don't include the last pad (if any) in 5465 * the INIT-ACK. 5466 */ 5467 initackm_out->msg.ch.chunk_length = htons((p_len - sizeof(struct sctphdr))); 5468 5469 /* 5470 * Time to sign the cookie, we don't sign over the cookie signature 5471 * though thus we set trailer. 5472 */ 5473 (void)sctp_hmac_m(SCTP_HMAC, 5474 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], 5475 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr), 5476 (uint8_t *) signature, SCTP_SIGNATURE_SIZE); 5477 /* 5478 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return 5479 * here since the timer will drive a retranmission. 5480 */ 5481 padval = p_len % 4; 5482 if ((padval) && (mp_last)) { 5483 /* see my previous comments on mp_last */ 5484 int ret; 5485 5486 ret = sctp_add_pad_tombuf(mp_last, (4 - padval)); 5487 if (ret) { 5488 /* Houston we have a problem, no space */ 5489 sctp_m_freem(m); 5490 return; 5491 } 5492 p_len += padval; 5493 } 5494 if (stc.loopback_scope) { 5495 over_addr = &store1; 5496 } else { 5497 over_addr = NULL; 5498 5499 } 5500 5501 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, 5502 NULL, 0, port, SCTP_SO_NOT_LOCKED, over_addr); 5503 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5504 } 5505 5506 5507 void 5508 sctp_insert_on_wheel(struct sctp_tcb *stcb, 5509 struct sctp_association *asoc, 5510 struct sctp_stream_out *strq, int holds_lock) 5511 { 5512 struct sctp_stream_out *stre, *strn; 5513 5514 if (holds_lock == 0) { 5515 SCTP_TCB_SEND_LOCK(stcb); 5516 } 5517 if ((strq->next_spoke.tqe_next) || 5518 (strq->next_spoke.tqe_prev)) { 5519 /* already on wheel */ 5520 goto outof_here; 5521 } 5522 stre = TAILQ_FIRST(&asoc->out_wheel); 5523 if (stre == NULL) { 5524 /* only one on wheel */ 5525 TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke); 5526 goto outof_here; 5527 } 5528 for (; stre; stre = strn) { 5529 strn = TAILQ_NEXT(stre, next_spoke); 5530 if (stre->stream_no > strq->stream_no) { 5531 TAILQ_INSERT_BEFORE(stre, strq, next_spoke); 5532 goto outof_here; 5533 } else if (stre->stream_no == strq->stream_no) { 5534 /* huh, should not happen */ 5535 goto outof_here; 5536 } else if (strn == NULL) { 5537 /* next one is null */ 5538 TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq, 5539 next_spoke); 5540 } 5541 } 5542 outof_here: 5543 if (holds_lock == 0) { 5544 SCTP_TCB_SEND_UNLOCK(stcb); 5545 } 5546 } 5547 5548 static void 5549 sctp_remove_from_wheel(struct sctp_tcb *stcb, 5550 struct sctp_association *asoc, 5551 struct sctp_stream_out *strq) 5552 { 5553 /* take off and then setup so we know it is not on the wheel */ 5554 SCTP_TCB_SEND_LOCK(stcb); 5555 if (TAILQ_FIRST(&strq->outqueue)) { 5556 /* more was added */ 5557 SCTP_TCB_SEND_UNLOCK(stcb); 5558 return; 5559 } 5560 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke); 5561 strq->next_spoke.tqe_next = NULL; 5562 strq->next_spoke.tqe_prev = NULL; 5563 SCTP_TCB_SEND_UNLOCK(stcb); 5564 } 5565 5566 static void 5567 sctp_prune_prsctp(struct sctp_tcb *stcb, 5568 struct sctp_association *asoc, 5569 struct sctp_sndrcvinfo *srcv, 5570 int dataout) 5571 { 5572 int freed_spc = 0; 5573 struct sctp_tmit_chunk *chk, *nchk; 5574 5575 SCTP_TCB_LOCK_ASSERT(stcb); 5576 if ((asoc->peer_supports_prsctp) && 5577 (asoc->sent_queue_cnt_removeable > 0)) { 5578 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 5579 /* 5580 * Look for chunks marked with the PR_SCTP flag AND 5581 * the buffer space flag. If the one being sent is 5582 * equal or greater priority then purge the old one 5583 * and free some space. 5584 */ 5585 if (PR_SCTP_BUF_ENABLED(chk->flags)) { 5586 /* 5587 * This one is PR-SCTP AND buffer space 5588 * limited type 5589 */ 5590 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 5591 /* 5592 * Lower numbers equates to higher 5593 * priority so if the one we are 5594 * looking at has a larger or equal 5595 * priority we want to drop the data 5596 * and NOT retransmit it. 5597 */ 5598 if (chk->data) { 5599 /* 5600 * We release the book_size 5601 * if the mbuf is here 5602 */ 5603 int ret_spc; 5604 int cause; 5605 5606 if (chk->sent > SCTP_DATAGRAM_UNSENT) 5607 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT; 5608 else 5609 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT; 5610 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 5611 cause, 5612 &asoc->sent_queue, SCTP_SO_LOCKED); 5613 freed_spc += ret_spc; 5614 if (freed_spc >= dataout) { 5615 return; 5616 } 5617 } /* if chunk was present */ 5618 } /* if of sufficent priority */ 5619 } /* if chunk has enabled */ 5620 } /* tailqforeach */ 5621 5622 chk = TAILQ_FIRST(&asoc->send_queue); 5623 while (chk) { 5624 nchk = TAILQ_NEXT(chk, sctp_next); 5625 /* Here we must move to the sent queue and mark */ 5626 if (PR_SCTP_TTL_ENABLED(chk->flags)) { 5627 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 5628 if (chk->data) { 5629 /* 5630 * We release the book_size 5631 * if the mbuf is here 5632 */ 5633 int ret_spc; 5634 5635 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 5636 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT, 5637 &asoc->send_queue, SCTP_SO_LOCKED); 5638 5639 freed_spc += ret_spc; 5640 if (freed_spc >= dataout) { 5641 return; 5642 } 5643 } /* end if chk->data */ 5644 } /* end if right class */ 5645 } /* end if chk pr-sctp */ 5646 chk = nchk; 5647 } /* end while (chk) */ 5648 } /* if enabled in asoc */ 5649 } 5650 5651 int 5652 sctp_get_frag_point(struct sctp_tcb *stcb, 5653 struct sctp_association *asoc) 5654 { 5655 int siz, ovh; 5656 5657 /* 5658 * For endpoints that have both v6 and v4 addresses we must reserve 5659 * room for the ipv6 header, for those that are only dealing with V4 5660 * we use a larger frag point. 5661 */ 5662 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 5663 ovh = SCTP_MED_OVERHEAD; 5664 } else { 5665 ovh = SCTP_MED_V4_OVERHEAD; 5666 } 5667 5668 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu) 5669 siz = asoc->smallest_mtu - ovh; 5670 else 5671 siz = (stcb->asoc.sctp_frag_point - ovh); 5672 /* 5673 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { 5674 */ 5675 /* A data chunk MUST fit in a cluster */ 5676 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */ 5677 /* } */ 5678 5679 /* adjust for an AUTH chunk if DATA requires auth */ 5680 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) 5681 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 5682 5683 if (siz % 4) { 5684 /* make it an even word boundary please */ 5685 siz -= (siz % 4); 5686 } 5687 return (siz); 5688 } 5689 5690 static void 5691 sctp_set_prsctp_policy(struct sctp_tcb *stcb, 5692 struct sctp_stream_queue_pending *sp) 5693 { 5694 sp->pr_sctp_on = 0; 5695 if (stcb->asoc.peer_supports_prsctp) { 5696 /* 5697 * We assume that the user wants PR_SCTP_TTL if the user 5698 * provides a positive lifetime but does not specify any 5699 * PR_SCTP policy. This is a BAD assumption and causes 5700 * problems at least with the U-Vancovers MPI folks. I will 5701 * change this to be no policy means NO PR-SCTP. 5702 */ 5703 if (PR_SCTP_ENABLED(sp->sinfo_flags)) { 5704 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 5705 sp->pr_sctp_on = 1; 5706 } else { 5707 return; 5708 } 5709 switch (PR_SCTP_POLICY(sp->sinfo_flags)) { 5710 case CHUNK_FLAGS_PR_SCTP_BUF: 5711 /* 5712 * Time to live is a priority stored in tv_sec when 5713 * doing the buffer drop thing. 5714 */ 5715 sp->ts.tv_sec = sp->timetolive; 5716 sp->ts.tv_usec = 0; 5717 break; 5718 case CHUNK_FLAGS_PR_SCTP_TTL: 5719 { 5720 struct timeval tv; 5721 5722 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 5723 tv.tv_sec = sp->timetolive / 1000; 5724 tv.tv_usec = (sp->timetolive * 1000) % 1000000; 5725 timevaladd(&sp->ts, &tv); 5726 } 5727 break; 5728 case CHUNK_FLAGS_PR_SCTP_RTX: 5729 /* 5730 * Time to live is a the number or retransmissions 5731 * stored in tv_sec. 5732 */ 5733 sp->ts.tv_sec = sp->timetolive; 5734 sp->ts.tv_usec = 0; 5735 break; 5736 default: 5737 SCTPDBG(SCTP_DEBUG_USRREQ1, 5738 "Unknown PR_SCTP policy %u.\n", 5739 PR_SCTP_POLICY(sp->sinfo_flags)); 5740 break; 5741 } 5742 } 5743 } 5744 5745 static int 5746 sctp_msg_append(struct sctp_tcb *stcb, 5747 struct sctp_nets *net, 5748 struct mbuf *m, 5749 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock) 5750 { 5751 int error = 0, holds_lock; 5752 struct mbuf *at; 5753 struct sctp_stream_queue_pending *sp = NULL; 5754 struct sctp_stream_out *strm; 5755 5756 /* 5757 * Given an mbuf chain, put it into the association send queue and 5758 * place it on the wheel 5759 */ 5760 holds_lock = hold_stcb_lock; 5761 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) { 5762 /* Invalid stream number */ 5763 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 5764 error = EINVAL; 5765 goto out_now; 5766 } 5767 if ((stcb->asoc.stream_locked) && 5768 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) { 5769 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 5770 error = EINVAL; 5771 goto out_now; 5772 } 5773 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 5774 /* Now can we send this? */ 5775 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) || 5776 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5777 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 5778 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) { 5779 /* got data while shutting down */ 5780 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 5781 error = ECONNRESET; 5782 goto out_now; 5783 } 5784 sctp_alloc_a_strmoq(stcb, sp); 5785 if (sp == NULL) { 5786 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 5787 error = ENOMEM; 5788 goto out_now; 5789 } 5790 sp->sinfo_flags = srcv->sinfo_flags; 5791 sp->timetolive = srcv->sinfo_timetolive; 5792 sp->ppid = srcv->sinfo_ppid; 5793 sp->context = srcv->sinfo_context; 5794 sp->strseq = 0; 5795 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 5796 sp->net = net; 5797 sp->addr_over = 1; 5798 } else { 5799 sp->net = stcb->asoc.primary_destination; 5800 sp->addr_over = 0; 5801 } 5802 atomic_add_int(&sp->net->ref_count, 1); 5803 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 5804 sp->stream = srcv->sinfo_stream; 5805 sp->msg_is_complete = 1; 5806 sp->sender_all_done = 1; 5807 sp->some_taken = 0; 5808 sp->data = m; 5809 sp->tail_mbuf = NULL; 5810 sp->length = 0; 5811 at = m; 5812 sctp_set_prsctp_policy(stcb, sp); 5813 /* 5814 * We could in theory (for sendall) sifa the length in, but we would 5815 * still have to hunt through the chain since we need to setup the 5816 * tail_mbuf 5817 */ 5818 while (at) { 5819 if (SCTP_BUF_NEXT(at) == NULL) 5820 sp->tail_mbuf = at; 5821 sp->length += SCTP_BUF_LEN(at); 5822 at = SCTP_BUF_NEXT(at); 5823 } 5824 SCTP_TCB_SEND_LOCK(stcb); 5825 sctp_snd_sb_alloc(stcb, sp->length); 5826 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1); 5827 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 5828 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 5829 sp->strseq = strm->next_sequence_sent; 5830 strm->next_sequence_sent++; 5831 } 5832 if ((strm->next_spoke.tqe_next == NULL) && 5833 (strm->next_spoke.tqe_prev == NULL)) { 5834 /* Not on wheel, insert */ 5835 sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1); 5836 } 5837 m = NULL; 5838 SCTP_TCB_SEND_UNLOCK(stcb); 5839 out_now: 5840 if (m) { 5841 sctp_m_freem(m); 5842 } 5843 return (error); 5844 } 5845 5846 5847 static struct mbuf * 5848 sctp_copy_mbufchain(struct mbuf *clonechain, 5849 struct mbuf *outchain, 5850 struct mbuf **endofchain, 5851 int can_take_mbuf, 5852 int sizeofcpy, 5853 uint8_t copy_by_ref) 5854 { 5855 struct mbuf *m; 5856 struct mbuf *appendchain; 5857 caddr_t cp; 5858 int len; 5859 5860 if (endofchain == NULL) { 5861 /* error */ 5862 error_out: 5863 if (outchain) 5864 sctp_m_freem(outchain); 5865 return (NULL); 5866 } 5867 if (can_take_mbuf) { 5868 appendchain = clonechain; 5869 } else { 5870 if (!copy_by_ref && 5871 (sizeofcpy <= (int)((((sctp_mbuf_threshold_count - 1) * MLEN) + MHLEN))) 5872 ) { 5873 /* Its not in a cluster */ 5874 if (*endofchain == NULL) { 5875 /* lets get a mbuf cluster */ 5876 if (outchain == NULL) { 5877 /* This is the general case */ 5878 new_mbuf: 5879 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 5880 if (outchain == NULL) { 5881 goto error_out; 5882 } 5883 SCTP_BUF_LEN(outchain) = 0; 5884 *endofchain = outchain; 5885 /* get the prepend space */ 5886 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4)); 5887 } else { 5888 /* 5889 * We really should not get a NULL 5890 * in endofchain 5891 */ 5892 /* find end */ 5893 m = outchain; 5894 while (m) { 5895 if (SCTP_BUF_NEXT(m) == NULL) { 5896 *endofchain = m; 5897 break; 5898 } 5899 m = SCTP_BUF_NEXT(m); 5900 } 5901 /* sanity */ 5902 if (*endofchain == NULL) { 5903 /* 5904 * huh, TSNH XXX maybe we 5905 * should panic 5906 */ 5907 sctp_m_freem(outchain); 5908 goto new_mbuf; 5909 } 5910 } 5911 /* get the new end of length */ 5912 len = M_TRAILINGSPACE(*endofchain); 5913 } else { 5914 /* how much is left at the end? */ 5915 len = M_TRAILINGSPACE(*endofchain); 5916 } 5917 /* Find the end of the data, for appending */ 5918 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain))); 5919 5920 /* Now lets copy it out */ 5921 if (len >= sizeofcpy) { 5922 /* It all fits, copy it in */ 5923 m_copydata(clonechain, 0, sizeofcpy, cp); 5924 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 5925 } else { 5926 /* fill up the end of the chain */ 5927 if (len > 0) { 5928 m_copydata(clonechain, 0, len, cp); 5929 SCTP_BUF_LEN((*endofchain)) += len; 5930 /* now we need another one */ 5931 sizeofcpy -= len; 5932 } 5933 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 5934 if (m == NULL) { 5935 /* We failed */ 5936 goto error_out; 5937 } 5938 SCTP_BUF_NEXT((*endofchain)) = m; 5939 *endofchain = m; 5940 cp = mtod((*endofchain), caddr_t); 5941 m_copydata(clonechain, len, sizeofcpy, cp); 5942 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 5943 } 5944 return (outchain); 5945 } else { 5946 /* copy the old fashion way */ 5947 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT); 5948 #ifdef SCTP_MBUF_LOGGING 5949 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 5950 struct mbuf *mat; 5951 5952 mat = appendchain; 5953 while (mat) { 5954 if (SCTP_BUF_IS_EXTENDED(mat)) { 5955 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 5956 } 5957 mat = SCTP_BUF_NEXT(mat); 5958 } 5959 } 5960 #endif 5961 } 5962 } 5963 if (appendchain == NULL) { 5964 /* error */ 5965 if (outchain) 5966 sctp_m_freem(outchain); 5967 return (NULL); 5968 } 5969 if (outchain) { 5970 /* tack on to the end */ 5971 if (*endofchain != NULL) { 5972 SCTP_BUF_NEXT(((*endofchain))) = appendchain; 5973 } else { 5974 m = outchain; 5975 while (m) { 5976 if (SCTP_BUF_NEXT(m) == NULL) { 5977 SCTP_BUF_NEXT(m) = appendchain; 5978 break; 5979 } 5980 m = SCTP_BUF_NEXT(m); 5981 } 5982 } 5983 /* 5984 * save off the end and update the end-chain postion 5985 */ 5986 m = appendchain; 5987 while (m) { 5988 if (SCTP_BUF_NEXT(m) == NULL) { 5989 *endofchain = m; 5990 break; 5991 } 5992 m = SCTP_BUF_NEXT(m); 5993 } 5994 return (outchain); 5995 } else { 5996 /* save off the end and update the end-chain postion */ 5997 m = appendchain; 5998 while (m) { 5999 if (SCTP_BUF_NEXT(m) == NULL) { 6000 *endofchain = m; 6001 break; 6002 } 6003 m = SCTP_BUF_NEXT(m); 6004 } 6005 return (appendchain); 6006 } 6007 } 6008 6009 int 6010 sctp_med_chunk_output(struct sctp_inpcb *inp, 6011 struct sctp_tcb *stcb, 6012 struct sctp_association *asoc, 6013 int *num_out, 6014 int *reason_code, 6015 int control_only, int *cwnd_full, int from_where, 6016 struct timeval *now, int *now_filled, int frag_point, int so_locked 6017 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 6018 SCTP_UNUSED 6019 #endif 6020 ); 6021 6022 static void 6023 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, 6024 uint32_t val) 6025 { 6026 struct sctp_copy_all *ca; 6027 struct mbuf *m; 6028 int ret = 0; 6029 int added_control = 0; 6030 int un_sent, do_chunk_output = 1; 6031 struct sctp_association *asoc; 6032 6033 ca = (struct sctp_copy_all *)ptr; 6034 if (ca->m == NULL) { 6035 return; 6036 } 6037 if (ca->inp != inp) { 6038 /* TSNH */ 6039 return; 6040 } 6041 if ((ca->m) && ca->sndlen) { 6042 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT); 6043 if (m == NULL) { 6044 /* can't copy so we are done */ 6045 ca->cnt_failed++; 6046 return; 6047 } 6048 #ifdef SCTP_MBUF_LOGGING 6049 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 6050 struct mbuf *mat; 6051 6052 mat = m; 6053 while (mat) { 6054 if (SCTP_BUF_IS_EXTENDED(mat)) { 6055 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 6056 } 6057 mat = SCTP_BUF_NEXT(mat); 6058 } 6059 } 6060 #endif 6061 } else { 6062 m = NULL; 6063 } 6064 SCTP_TCB_LOCK_ASSERT(stcb); 6065 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) { 6066 /* Abort this assoc with m as the user defined reason */ 6067 if (m) { 6068 struct sctp_paramhdr *ph; 6069 6070 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT); 6071 if (m) { 6072 ph = mtod(m, struct sctp_paramhdr *); 6073 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 6074 ph->param_length = htons(ca->sndlen); 6075 } 6076 /* 6077 * We add one here to keep the assoc from 6078 * dis-appearing on us. 6079 */ 6080 atomic_add_int(&stcb->asoc.refcnt, 1); 6081 sctp_abort_an_association(inp, stcb, 6082 SCTP_RESPONSE_TO_USER_REQ, 6083 m, SCTP_SO_NOT_LOCKED); 6084 /* 6085 * sctp_abort_an_association calls sctp_free_asoc() 6086 * free association will NOT free it since we 6087 * incremented the refcnt .. we do this to prevent 6088 * it being freed and things getting tricky since we 6089 * could end up (from free_asoc) calling inpcb_free 6090 * which would get a recursive lock call to the 6091 * iterator lock.. But as a consequence of that the 6092 * stcb will return to us un-locked.. since 6093 * free_asoc returns with either no TCB or the TCB 6094 * unlocked, we must relock.. to unlock in the 6095 * iterator timer :-0 6096 */ 6097 SCTP_TCB_LOCK(stcb); 6098 atomic_add_int(&stcb->asoc.refcnt, -1); 6099 goto no_chunk_output; 6100 } 6101 } else { 6102 if (m) { 6103 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m, 6104 &ca->sndrcv, 1); 6105 } 6106 asoc = &stcb->asoc; 6107 if (ca->sndrcv.sinfo_flags & SCTP_EOF) { 6108 /* shutdown this assoc */ 6109 int cnt; 6110 6111 cnt = sctp_is_there_unsent_data(stcb); 6112 6113 if (TAILQ_EMPTY(&asoc->send_queue) && 6114 TAILQ_EMPTY(&asoc->sent_queue) && 6115 (cnt == 0)) { 6116 if (asoc->locked_on_sending) { 6117 goto abort_anyway; 6118 } 6119 /* 6120 * there is nothing queued to send, so I'm 6121 * done... 6122 */ 6123 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 6124 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 6125 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 6126 /* 6127 * only send SHUTDOWN the first time 6128 * through 6129 */ 6130 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 6131 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 6132 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 6133 } 6134 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 6135 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 6136 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 6137 asoc->primary_destination); 6138 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 6139 asoc->primary_destination); 6140 added_control = 1; 6141 do_chunk_output = 0; 6142 } 6143 } else { 6144 /* 6145 * we still got (or just got) data to send, 6146 * so set SHUTDOWN_PENDING 6147 */ 6148 /* 6149 * XXX sockets draft says that SCTP_EOF 6150 * should be sent with no data. currently, 6151 * we will allow user data to be sent first 6152 * and move to SHUTDOWN-PENDING 6153 */ 6154 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 6155 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 6156 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 6157 if (asoc->locked_on_sending) { 6158 /* 6159 * Locked to send out the 6160 * data 6161 */ 6162 struct sctp_stream_queue_pending *sp; 6163 6164 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 6165 if (sp) { 6166 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 6167 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 6168 } 6169 } 6170 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 6171 if (TAILQ_EMPTY(&asoc->send_queue) && 6172 TAILQ_EMPTY(&asoc->sent_queue) && 6173 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 6174 abort_anyway: 6175 atomic_add_int(&stcb->asoc.refcnt, 1); 6176 sctp_abort_an_association(stcb->sctp_ep, stcb, 6177 SCTP_RESPONSE_TO_USER_REQ, 6178 NULL, SCTP_SO_NOT_LOCKED); 6179 atomic_add_int(&stcb->asoc.refcnt, -1); 6180 goto no_chunk_output; 6181 } 6182 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 6183 asoc->primary_destination); 6184 } 6185 } 6186 6187 } 6188 } 6189 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 6190 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk))); 6191 6192 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 6193 (stcb->asoc.total_flight > 0) && 6194 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 6195 ) { 6196 do_chunk_output = 0; 6197 } 6198 if (do_chunk_output) 6199 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED); 6200 else if (added_control) { 6201 int num_out = 0, reason = 0, cwnd_full = 0, now_filled = 0; 6202 struct timeval now; 6203 int frag_point; 6204 6205 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 6206 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 6207 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED); 6208 } 6209 no_chunk_output: 6210 if (ret) { 6211 ca->cnt_failed++; 6212 } else { 6213 ca->cnt_sent++; 6214 } 6215 } 6216 6217 static void 6218 sctp_sendall_completes(void *ptr, uint32_t val) 6219 { 6220 struct sctp_copy_all *ca; 6221 6222 ca = (struct sctp_copy_all *)ptr; 6223 /* 6224 * Do a notify here? Kacheong suggests that the notify be done at 6225 * the send time.. so you would push up a notification if any send 6226 * failed. Don't know if this is feasable since the only failures we 6227 * have is "memory" related and if you cannot get an mbuf to send 6228 * the data you surely can't get an mbuf to send up to notify the 6229 * user you can't send the data :-> 6230 */ 6231 6232 /* now free everything */ 6233 sctp_m_freem(ca->m); 6234 SCTP_FREE(ca, SCTP_M_COPYAL); 6235 } 6236 6237 6238 #define MC_ALIGN(m, len) do { \ 6239 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \ 6240 } while (0) 6241 6242 6243 6244 static struct mbuf * 6245 sctp_copy_out_all(struct uio *uio, int len) 6246 { 6247 struct mbuf *ret, *at; 6248 int left, willcpy, cancpy, error; 6249 6250 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA); 6251 if (ret == NULL) { 6252 /* TSNH */ 6253 return (NULL); 6254 } 6255 left = len; 6256 SCTP_BUF_LEN(ret) = 0; 6257 /* save space for the data chunk header */ 6258 cancpy = M_TRAILINGSPACE(ret); 6259 willcpy = min(cancpy, left); 6260 at = ret; 6261 while (left > 0) { 6262 /* Align data to the end */ 6263 error = uiomove(mtod(at, caddr_t), willcpy, uio); 6264 if (error) { 6265 err_out_now: 6266 sctp_m_freem(at); 6267 return (NULL); 6268 } 6269 SCTP_BUF_LEN(at) = willcpy; 6270 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0; 6271 left -= willcpy; 6272 if (left > 0) { 6273 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA); 6274 if (SCTP_BUF_NEXT(at) == NULL) { 6275 goto err_out_now; 6276 } 6277 at = SCTP_BUF_NEXT(at); 6278 SCTP_BUF_LEN(at) = 0; 6279 cancpy = M_TRAILINGSPACE(at); 6280 willcpy = min(cancpy, left); 6281 } 6282 } 6283 return (ret); 6284 } 6285 6286 static int 6287 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, 6288 struct sctp_sndrcvinfo *srcv) 6289 { 6290 int ret; 6291 struct sctp_copy_all *ca; 6292 6293 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all), 6294 SCTP_M_COPYAL); 6295 if (ca == NULL) { 6296 sctp_m_freem(m); 6297 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 6298 return (ENOMEM); 6299 } 6300 memset(ca, 0, sizeof(struct sctp_copy_all)); 6301 6302 ca->inp = inp; 6303 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo)); 6304 /* 6305 * take off the sendall flag, it would be bad if we failed to do 6306 * this :-0 6307 */ 6308 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; 6309 /* get length and mbuf chain */ 6310 if (uio) { 6311 ca->sndlen = uio->uio_resid; 6312 ca->m = sctp_copy_out_all(uio, ca->sndlen); 6313 if (ca->m == NULL) { 6314 SCTP_FREE(ca, SCTP_M_COPYAL); 6315 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 6316 return (ENOMEM); 6317 } 6318 } else { 6319 /* Gather the length of the send */ 6320 struct mbuf *mat; 6321 6322 mat = m; 6323 ca->sndlen = 0; 6324 while (m) { 6325 ca->sndlen += SCTP_BUF_LEN(m); 6326 m = SCTP_BUF_NEXT(m); 6327 } 6328 ca->m = mat; 6329 } 6330 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL, 6331 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, 6332 SCTP_ASOC_ANY_STATE, 6333 (void *)ca, 0, 6334 sctp_sendall_completes, inp, 1); 6335 if (ret) { 6336 SCTP_PRINTF("Failed to initiate iterator for sendall\n"); 6337 SCTP_FREE(ca, SCTP_M_COPYAL); 6338 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 6339 return (EFAULT); 6340 } 6341 return (0); 6342 } 6343 6344 6345 void 6346 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) 6347 { 6348 struct sctp_tmit_chunk *chk, *nchk; 6349 6350 chk = TAILQ_FIRST(&asoc->control_send_queue); 6351 while (chk) { 6352 nchk = TAILQ_NEXT(chk, sctp_next); 6353 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 6354 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 6355 if (chk->data) { 6356 sctp_m_freem(chk->data); 6357 chk->data = NULL; 6358 } 6359 asoc->ctrl_queue_cnt--; 6360 sctp_free_a_chunk(stcb, chk); 6361 } 6362 chk = nchk; 6363 } 6364 } 6365 6366 void 6367 sctp_toss_old_asconf(struct sctp_tcb *stcb) 6368 { 6369 struct sctp_association *asoc; 6370 struct sctp_tmit_chunk *chk, *chk_tmp; 6371 struct sctp_asconf_chunk *acp; 6372 6373 asoc = &stcb->asoc; 6374 for (chk = TAILQ_FIRST(&asoc->asconf_send_queue); chk != NULL; 6375 chk = chk_tmp) { 6376 /* get next chk */ 6377 chk_tmp = TAILQ_NEXT(chk, sctp_next); 6378 /* find SCTP_ASCONF chunk in queue */ 6379 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 6380 if (chk->data) { 6381 acp = mtod(chk->data, struct sctp_asconf_chunk *); 6382 if (compare_with_wrap(ntohl(acp->serial_number), stcb->asoc.asconf_seq_out_acked, MAX_SEQ)) { 6383 /* Not Acked yet */ 6384 break; 6385 } 6386 } 6387 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); 6388 if (chk->data) { 6389 sctp_m_freem(chk->data); 6390 chk->data = NULL; 6391 } 6392 asoc->ctrl_queue_cnt--; 6393 sctp_free_a_chunk(stcb, chk); 6394 } 6395 } 6396 } 6397 6398 6399 static void 6400 sctp_clean_up_datalist(struct sctp_tcb *stcb, 6401 6402 struct sctp_association *asoc, 6403 struct sctp_tmit_chunk **data_list, 6404 int bundle_at, 6405 struct sctp_nets *net) 6406 { 6407 int i; 6408 struct sctp_tmit_chunk *tp1; 6409 6410 for (i = 0; i < bundle_at; i++) { 6411 /* off of the send queue */ 6412 if (i) { 6413 /* 6414 * Any chunk NOT 0 you zap the time chunk 0 gets 6415 * zapped or set based on if a RTO measurment is 6416 * needed. 6417 */ 6418 data_list[i]->do_rtt = 0; 6419 } 6420 /* record time */ 6421 data_list[i]->sent_rcv_time = net->last_sent_time; 6422 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq; 6423 TAILQ_REMOVE(&asoc->send_queue, 6424 data_list[i], 6425 sctp_next); 6426 /* on to the sent queue */ 6427 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead); 6428 if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq, 6429 data_list[i]->rec.data.TSN_seq, MAX_TSN))) { 6430 struct sctp_tmit_chunk *tpp; 6431 6432 /* need to move back */ 6433 back_up_more: 6434 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next); 6435 if (tpp == NULL) { 6436 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next); 6437 goto all_done; 6438 } 6439 tp1 = tpp; 6440 if (compare_with_wrap(tp1->rec.data.TSN_seq, 6441 data_list[i]->rec.data.TSN_seq, MAX_TSN)) { 6442 goto back_up_more; 6443 } 6444 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next); 6445 } else { 6446 TAILQ_INSERT_TAIL(&asoc->sent_queue, 6447 data_list[i], 6448 sctp_next); 6449 } 6450 all_done: 6451 /* This does not lower until the cum-ack passes it */ 6452 asoc->sent_queue_cnt++; 6453 asoc->send_queue_cnt--; 6454 if ((asoc->peers_rwnd <= 0) && 6455 (asoc->total_flight == 0) && 6456 (bundle_at == 1)) { 6457 /* Mark the chunk as being a window probe */ 6458 SCTP_STAT_INCR(sctps_windowprobed); 6459 } 6460 #ifdef SCTP_AUDITING_ENABLED 6461 sctp_audit_log(0xC2, 3); 6462 #endif 6463 data_list[i]->sent = SCTP_DATAGRAM_SENT; 6464 data_list[i]->snd_count = 1; 6465 data_list[i]->rec.data.chunk_was_revoked = 0; 6466 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 6467 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 6468 data_list[i]->whoTo->flight_size, 6469 data_list[i]->book_size, 6470 (uintptr_t) data_list[i]->whoTo, 6471 data_list[i]->rec.data.TSN_seq); 6472 } 6473 sctp_flight_size_increase(data_list[i]); 6474 sctp_total_flight_increase(stcb, data_list[i]); 6475 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 6476 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 6477 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh); 6478 } 6479 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 6480 (uint32_t) (data_list[i]->send_size + sctp_peer_chunk_oh)); 6481 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 6482 /* SWS sender side engages */ 6483 asoc->peers_rwnd = 0; 6484 } 6485 } 6486 } 6487 6488 static void 6489 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc) 6490 { 6491 struct sctp_tmit_chunk *chk, *nchk; 6492 6493 for (chk = TAILQ_FIRST(&asoc->control_send_queue); 6494 chk; chk = nchk) { 6495 nchk = TAILQ_NEXT(chk, sctp_next); 6496 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 6497 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 6498 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 6499 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 6500 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 6501 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 6502 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 6503 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 6504 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 6505 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 6506 /* Stray chunks must be cleaned up */ 6507 clean_up_anyway: 6508 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 6509 if (chk->data) { 6510 sctp_m_freem(chk->data); 6511 chk->data = NULL; 6512 } 6513 asoc->ctrl_queue_cnt--; 6514 sctp_free_a_chunk(stcb, chk); 6515 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 6516 /* special handling, we must look into the param */ 6517 if (chk != asoc->str_reset) { 6518 goto clean_up_anyway; 6519 } 6520 } 6521 } 6522 } 6523 6524 6525 static int 6526 sctp_can_we_split_this(struct sctp_tcb *stcb, 6527 uint32_t length, 6528 uint32_t goal_mtu, uint32_t frag_point, int eeor_on) 6529 { 6530 /* 6531 * Make a decision on if I should split a msg into multiple parts. 6532 * This is only asked of incomplete messages. 6533 */ 6534 if (eeor_on) { 6535 /* 6536 * If we are doing EEOR we need to always send it if its the 6537 * entire thing, since it might be all the guy is putting in 6538 * the hopper. 6539 */ 6540 if (goal_mtu >= length) { 6541 /*- 6542 * If we have data outstanding, 6543 * we get another chance when the sack 6544 * arrives to transmit - wait for more data 6545 */ 6546 if (stcb->asoc.total_flight == 0) { 6547 /* 6548 * If nothing is in flight, we zero the 6549 * packet counter. 6550 */ 6551 return (length); 6552 } 6553 return (0); 6554 6555 } else { 6556 /* You can fill the rest */ 6557 return (goal_mtu); 6558 } 6559 } 6560 /*- 6561 * For those strange folk that make the send buffer 6562 * smaller than our fragmentation point, we can't 6563 * get a full msg in so we have to allow splitting. 6564 */ 6565 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) { 6566 return (length); 6567 } 6568 if ((length <= goal_mtu) || 6569 ((length - goal_mtu) < sctp_min_residual)) { 6570 /* Sub-optimial residual don't split in non-eeor mode. */ 6571 return (0); 6572 } 6573 /* 6574 * If we reach here length is larger than the goal_mtu. Do we wish 6575 * to split it for the sake of packet putting together? 6576 */ 6577 if (goal_mtu >= min(sctp_min_split_point, frag_point)) { 6578 /* Its ok to split it */ 6579 return (min(goal_mtu, frag_point)); 6580 } 6581 /* Nope, can't split */ 6582 return (0); 6583 6584 } 6585 6586 static uint32_t 6587 sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net, 6588 struct sctp_stream_out *strq, 6589 uint32_t goal_mtu, 6590 uint32_t frag_point, 6591 int *locked, 6592 int *giveup, 6593 int eeor_mode, 6594 int *bail) 6595 { 6596 /* Move from the stream to the send_queue keeping track of the total */ 6597 struct sctp_association *asoc; 6598 struct sctp_stream_queue_pending *sp; 6599 struct sctp_tmit_chunk *chk; 6600 struct sctp_data_chunk *dchkh; 6601 uint32_t to_move, length; 6602 uint8_t rcv_flags = 0; 6603 uint8_t some_taken; 6604 uint8_t send_lock_up = 0; 6605 6606 SCTP_TCB_LOCK_ASSERT(stcb); 6607 asoc = &stcb->asoc; 6608 one_more_time: 6609 /* sa_ignore FREED_MEMORY */ 6610 sp = TAILQ_FIRST(&strq->outqueue); 6611 if (sp == NULL) { 6612 *locked = 0; 6613 if (send_lock_up == 0) { 6614 SCTP_TCB_SEND_LOCK(stcb); 6615 send_lock_up = 1; 6616 } 6617 sp = TAILQ_FIRST(&strq->outqueue); 6618 if (sp) { 6619 goto one_more_time; 6620 } 6621 if (strq->last_msg_incomplete) { 6622 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n", 6623 strq->stream_no, 6624 strq->last_msg_incomplete); 6625 strq->last_msg_incomplete = 0; 6626 } 6627 to_move = 0; 6628 if (send_lock_up) { 6629 SCTP_TCB_SEND_UNLOCK(stcb); 6630 send_lock_up = 0; 6631 } 6632 goto out_of; 6633 } 6634 if ((sp->msg_is_complete) && (sp->length == 0)) { 6635 if (sp->sender_all_done) { 6636 /* 6637 * We are doing differed cleanup. Last time through 6638 * when we took all the data the sender_all_done was 6639 * not set. 6640 */ 6641 if (sp->put_last_out == 0) { 6642 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 6643 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", 6644 sp->sender_all_done, 6645 sp->length, 6646 sp->msg_is_complete, 6647 sp->put_last_out, 6648 send_lock_up); 6649 } 6650 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) { 6651 SCTP_TCB_SEND_LOCK(stcb); 6652 send_lock_up = 1; 6653 } 6654 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 6655 TAILQ_REMOVE(&strq->outqueue, sp, next); 6656 sctp_free_remote_addr(sp->net); 6657 if (sp->data) { 6658 sctp_m_freem(sp->data); 6659 sp->data = NULL; 6660 } 6661 sctp_free_a_strmoq(stcb, sp); 6662 6663 /* we can't be locked to it */ 6664 *locked = 0; 6665 stcb->asoc.locked_on_sending = NULL; 6666 if (send_lock_up) { 6667 SCTP_TCB_SEND_UNLOCK(stcb); 6668 send_lock_up = 0; 6669 } 6670 /* back to get the next msg */ 6671 goto one_more_time; 6672 } else { 6673 /* 6674 * sender just finished this but still holds a 6675 * reference 6676 */ 6677 *locked = 1; 6678 *giveup = 1; 6679 to_move = 0; 6680 goto out_of; 6681 } 6682 } else { 6683 /* is there some to get */ 6684 if (sp->length == 0) { 6685 /* no */ 6686 *locked = 1; 6687 *giveup = 1; 6688 to_move = 0; 6689 goto out_of; 6690 } 6691 } 6692 some_taken = sp->some_taken; 6693 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 6694 sp->msg_is_complete = 1; 6695 } 6696 re_look: 6697 length = sp->length; 6698 if (sp->msg_is_complete) { 6699 /* The message is complete */ 6700 to_move = min(length, frag_point); 6701 if (to_move == length) { 6702 /* All of it fits in the MTU */ 6703 if (sp->some_taken) { 6704 rcv_flags |= SCTP_DATA_LAST_FRAG; 6705 sp->put_last_out = 1; 6706 } else { 6707 rcv_flags |= SCTP_DATA_NOT_FRAG; 6708 sp->put_last_out = 1; 6709 } 6710 } else { 6711 /* Not all of it fits, we fragment */ 6712 if (sp->some_taken == 0) { 6713 rcv_flags |= SCTP_DATA_FIRST_FRAG; 6714 } 6715 sp->some_taken = 1; 6716 } 6717 } else { 6718 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, 6719 frag_point, eeor_mode); 6720 if (to_move) { 6721 /*- 6722 * We use a snapshot of length in case it 6723 * is expanding during the compare. 6724 */ 6725 uint32_t llen; 6726 6727 llen = length; 6728 if (to_move >= llen) { 6729 to_move = llen; 6730 if (send_lock_up == 0) { 6731 /*- 6732 * We are taking all of an incomplete msg 6733 * thus we need a send lock. 6734 */ 6735 SCTP_TCB_SEND_LOCK(stcb); 6736 send_lock_up = 1; 6737 if (sp->msg_is_complete) { 6738 /* 6739 * the sender finished the 6740 * msg 6741 */ 6742 goto re_look; 6743 } 6744 } 6745 } 6746 if (sp->some_taken == 0) { 6747 rcv_flags |= SCTP_DATA_FIRST_FRAG; 6748 sp->some_taken = 1; 6749 } 6750 } else { 6751 /* Nothing to take. */ 6752 if (sp->some_taken) { 6753 *locked = 1; 6754 } 6755 *giveup = 1; 6756 to_move = 0; 6757 goto out_of; 6758 } 6759 } 6760 6761 /* If we reach here, we can copy out a chunk */ 6762 sctp_alloc_a_chunk(stcb, chk); 6763 if (chk == NULL) { 6764 /* No chunk memory */ 6765 *giveup = 1; 6766 to_move = 0; 6767 goto out_of; 6768 } 6769 /* 6770 * Setup for unordered if needed by looking at the user sent info 6771 * flags. 6772 */ 6773 if (sp->sinfo_flags & SCTP_UNORDERED) { 6774 rcv_flags |= SCTP_DATA_UNORDERED; 6775 } 6776 /* clear out the chunk before setting up */ 6777 memset(chk, 0, sizeof(*chk)); 6778 chk->rec.data.rcv_flags = rcv_flags; 6779 6780 if (to_move >= length) { 6781 /* we think we can steal the whole thing */ 6782 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) { 6783 SCTP_TCB_SEND_LOCK(stcb); 6784 send_lock_up = 1; 6785 } 6786 if (to_move < sp->length) { 6787 /* bail, it changed */ 6788 goto dont_do_it; 6789 } 6790 chk->data = sp->data; 6791 chk->last_mbuf = sp->tail_mbuf; 6792 /* register the stealing */ 6793 sp->data = sp->tail_mbuf = NULL; 6794 } else { 6795 struct mbuf *m; 6796 6797 dont_do_it: 6798 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT); 6799 chk->last_mbuf = NULL; 6800 if (chk->data == NULL) { 6801 sp->some_taken = some_taken; 6802 sctp_free_a_chunk(stcb, chk); 6803 *bail = 1; 6804 to_move = 0; 6805 goto out_of; 6806 } 6807 #ifdef SCTP_MBUF_LOGGING 6808 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 6809 struct mbuf *mat; 6810 6811 mat = chk->data; 6812 while (mat) { 6813 if (SCTP_BUF_IS_EXTENDED(mat)) { 6814 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 6815 } 6816 mat = SCTP_BUF_NEXT(mat); 6817 } 6818 } 6819 #endif 6820 /* Pull off the data */ 6821 m_adj(sp->data, to_move); 6822 /* Now lets work our way down and compact it */ 6823 m = sp->data; 6824 while (m && (SCTP_BUF_LEN(m) == 0)) { 6825 sp->data = SCTP_BUF_NEXT(m); 6826 SCTP_BUF_NEXT(m) = NULL; 6827 if (sp->tail_mbuf == m) { 6828 /*- 6829 * Freeing tail? TSNH since 6830 * we supposedly were taking less 6831 * than the sp->length. 6832 */ 6833 #ifdef INVARIANTS 6834 panic("Huh, freing tail? - TSNH"); 6835 #else 6836 SCTP_PRINTF("Huh, freeing tail? - TSNH\n"); 6837 sp->tail_mbuf = sp->data = NULL; 6838 sp->length = 0; 6839 #endif 6840 6841 } 6842 sctp_m_free(m); 6843 m = sp->data; 6844 } 6845 } 6846 if (SCTP_BUF_IS_EXTENDED(chk->data)) { 6847 chk->copy_by_ref = 1; 6848 } else { 6849 chk->copy_by_ref = 0; 6850 } 6851 /* 6852 * get last_mbuf and counts of mb useage This is ugly but hopefully 6853 * its only one mbuf. 6854 */ 6855 if (chk->last_mbuf == NULL) { 6856 chk->last_mbuf = chk->data; 6857 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) { 6858 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf); 6859 } 6860 } 6861 if (to_move > length) { 6862 /*- This should not happen either 6863 * since we always lower to_move to the size 6864 * of sp->length if its larger. 6865 */ 6866 #ifdef INVARIANTS 6867 panic("Huh, how can to_move be larger?"); 6868 #else 6869 SCTP_PRINTF("Huh, how can to_move be larger?\n"); 6870 sp->length = 0; 6871 #endif 6872 } else { 6873 atomic_subtract_int(&sp->length, to_move); 6874 } 6875 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) { 6876 /* Not enough room for a chunk header, get some */ 6877 struct mbuf *m; 6878 6879 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA); 6880 if (m == NULL) { 6881 /* 6882 * we're in trouble here. _PREPEND below will free 6883 * all the data if there is no leading space, so we 6884 * must put the data back and restore. 6885 */ 6886 if (send_lock_up == 0) { 6887 SCTP_TCB_SEND_LOCK(stcb); 6888 send_lock_up = 1; 6889 } 6890 if (chk->data == NULL) { 6891 /* unsteal the data */ 6892 sp->data = chk->data; 6893 sp->tail_mbuf = chk->last_mbuf; 6894 } else { 6895 struct mbuf *m_tmp; 6896 6897 /* reassemble the data */ 6898 m_tmp = sp->data; 6899 sp->data = chk->data; 6900 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp; 6901 } 6902 sp->some_taken = some_taken; 6903 atomic_add_int(&sp->length, to_move); 6904 chk->data = NULL; 6905 *bail = 1; 6906 sctp_free_a_chunk(stcb, chk); 6907 to_move = 0; 6908 goto out_of; 6909 } else { 6910 SCTP_BUF_LEN(m) = 0; 6911 SCTP_BUF_NEXT(m) = chk->data; 6912 chk->data = m; 6913 M_ALIGN(chk->data, 4); 6914 } 6915 } 6916 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT); 6917 if (chk->data == NULL) { 6918 /* HELP, TSNH since we assured it would not above? */ 6919 #ifdef INVARIANTS 6920 panic("prepend failes HELP?"); 6921 #else 6922 SCTP_PRINTF("prepend fails HELP?\n"); 6923 sctp_free_a_chunk(stcb, chk); 6924 #endif 6925 *bail = 1; 6926 to_move = 0; 6927 goto out_of; 6928 } 6929 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk)); 6930 chk->book_size = chk->send_size = (to_move + 6931 sizeof(struct sctp_data_chunk)); 6932 chk->book_size_scale = 0; 6933 chk->sent = SCTP_DATAGRAM_UNSENT; 6934 6935 chk->flags = 0; 6936 chk->asoc = &stcb->asoc; 6937 chk->pad_inplace = 0; 6938 chk->no_fr_allowed = 0; 6939 chk->rec.data.stream_seq = sp->strseq; 6940 chk->rec.data.stream_number = sp->stream; 6941 chk->rec.data.payloadtype = sp->ppid; 6942 chk->rec.data.context = sp->context; 6943 chk->rec.data.doing_fast_retransmit = 0; 6944 chk->rec.data.ect_nonce = 0; /* ECN Nonce */ 6945 6946 chk->rec.data.timetodrop = sp->ts; 6947 chk->flags = sp->act_flags; 6948 chk->addr_over = sp->addr_over; 6949 6950 chk->whoTo = net; 6951 atomic_add_int(&chk->whoTo->ref_count, 1); 6952 6953 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1); 6954 if (sctp_logging_level & SCTP_LOG_AT_SEND_2_OUTQ) { 6955 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND, 6956 (uintptr_t) stcb, sp->length, 6957 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq), 6958 chk->rec.data.TSN_seq); 6959 } 6960 dchkh = mtod(chk->data, struct sctp_data_chunk *); 6961 /* 6962 * Put the rest of the things in place now. Size was done earlier in 6963 * previous loop prior to padding. 6964 */ 6965 6966 #ifdef SCTP_ASOCLOG_OF_TSNS 6967 SCTP_TCB_LOCK_ASSERT(stcb); 6968 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) { 6969 asoc->tsn_out_at = 0; 6970 asoc->tsn_out_wrapped = 1; 6971 } 6972 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq; 6973 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number; 6974 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq; 6975 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size; 6976 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags; 6977 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb; 6978 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at; 6979 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2; 6980 asoc->tsn_out_at++; 6981 #endif 6982 6983 dchkh->ch.chunk_type = SCTP_DATA; 6984 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags; 6985 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq); 6986 dchkh->dp.stream_id = htons(strq->stream_no); 6987 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq); 6988 dchkh->dp.protocol_id = chk->rec.data.payloadtype; 6989 dchkh->ch.chunk_length = htons(chk->send_size); 6990 /* Now advance the chk->send_size by the actual pad needed. */ 6991 if (chk->send_size < SCTP_SIZE32(chk->book_size)) { 6992 /* need a pad */ 6993 struct mbuf *lm; 6994 int pads; 6995 6996 pads = SCTP_SIZE32(chk->book_size) - chk->send_size; 6997 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) { 6998 chk->pad_inplace = 1; 6999 } 7000 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) { 7001 /* pad added an mbuf */ 7002 chk->last_mbuf = lm; 7003 } 7004 chk->send_size += pads; 7005 } 7006 /* We only re-set the policy if it is on */ 7007 if (sp->pr_sctp_on) { 7008 sctp_set_prsctp_policy(stcb, sp); 7009 asoc->pr_sctp_cnt++; 7010 chk->pr_sctp_on = 1; 7011 } else { 7012 chk->pr_sctp_on = 0; 7013 } 7014 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) { 7015 /* All done pull and kill the message */ 7016 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 7017 if (sp->put_last_out == 0) { 7018 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n"); 7019 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", 7020 sp->sender_all_done, 7021 sp->length, 7022 sp->msg_is_complete, 7023 sp->put_last_out, 7024 send_lock_up); 7025 } 7026 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) { 7027 SCTP_TCB_SEND_LOCK(stcb); 7028 send_lock_up = 1; 7029 } 7030 TAILQ_REMOVE(&strq->outqueue, sp, next); 7031 sctp_free_remote_addr(sp->net); 7032 if (sp->data) { 7033 sctp_m_freem(sp->data); 7034 sp->data = NULL; 7035 } 7036 sctp_free_a_strmoq(stcb, sp); 7037 7038 /* we can't be locked to it */ 7039 *locked = 0; 7040 stcb->asoc.locked_on_sending = NULL; 7041 } else { 7042 /* more to go, we are locked */ 7043 *locked = 1; 7044 } 7045 asoc->chunks_on_out_queue++; 7046 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next); 7047 asoc->send_queue_cnt++; 7048 out_of: 7049 if (send_lock_up) { 7050 SCTP_TCB_SEND_UNLOCK(stcb); 7051 send_lock_up = 0; 7052 } 7053 return (to_move); 7054 } 7055 7056 7057 static struct sctp_stream_out * 7058 sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc) 7059 { 7060 struct sctp_stream_out *strq; 7061 7062 /* Find the next stream to use */ 7063 if (asoc->last_out_stream == NULL) { 7064 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel); 7065 if (asoc->last_out_stream == NULL) { 7066 /* huh nothing on the wheel, TSNH */ 7067 return (NULL); 7068 } 7069 goto done_it; 7070 } 7071 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke); 7072 done_it: 7073 if (strq == NULL) { 7074 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel); 7075 } 7076 /* Save off the last stream */ 7077 asoc->last_out_stream = strq; 7078 return (strq); 7079 7080 } 7081 7082 7083 static void 7084 sctp_fill_outqueue(struct sctp_tcb *stcb, 7085 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now) 7086 { 7087 struct sctp_association *asoc; 7088 struct sctp_stream_out *strq, *strqn, *strqt; 7089 int goal_mtu, moved_how_much, total_moved = 0, bail = 0; 7090 int locked, giveup; 7091 struct sctp_stream_queue_pending *sp; 7092 7093 SCTP_TCB_LOCK_ASSERT(stcb); 7094 asoc = &stcb->asoc; 7095 #ifdef INET6 7096 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 7097 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 7098 } else { 7099 /* ?? not sure what else to do */ 7100 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 7101 } 7102 #else 7103 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 7104 #endif 7105 /* Need an allowance for the data chunk header too */ 7106 goal_mtu -= sizeof(struct sctp_data_chunk); 7107 7108 /* must make even word boundary */ 7109 goal_mtu &= 0xfffffffc; 7110 if (asoc->locked_on_sending) { 7111 /* We are stuck on one stream until the message completes. */ 7112 strqn = strq = asoc->locked_on_sending; 7113 locked = 1; 7114 } else { 7115 strqn = strq = sctp_select_a_stream(stcb, asoc); 7116 locked = 0; 7117 } 7118 7119 while ((goal_mtu > 0) && strq) { 7120 sp = TAILQ_FIRST(&strq->outqueue); 7121 /* 7122 * If CMT is off, we must validate that the stream in 7123 * question has the first item pointed towards are network 7124 * destionation requested by the caller. Note that if we 7125 * turn out to be locked to a stream (assigning TSN's then 7126 * we must stop, since we cannot look for another stream 7127 * with data to send to that destination). In CMT's case, by 7128 * skipping this check, we will send one data packet towards 7129 * the requested net. 7130 */ 7131 if (sp == NULL) { 7132 break; 7133 } 7134 if ((sp->net != net) && (sctp_cmt_on_off == 0)) { 7135 /* none for this network */ 7136 if (locked) { 7137 break; 7138 } else { 7139 strq = sctp_select_a_stream(stcb, asoc); 7140 if (strq == NULL) 7141 /* none left */ 7142 break; 7143 if (strqn == strq) { 7144 /* I have circled */ 7145 break; 7146 } 7147 continue; 7148 } 7149 } 7150 giveup = 0; 7151 bail = 0; 7152 moved_how_much = sctp_move_to_outqueue(stcb, net, strq, goal_mtu, frag_point, &locked, 7153 &giveup, eeor_mode, &bail); 7154 asoc->last_out_stream = strq; 7155 if (locked) { 7156 asoc->locked_on_sending = strq; 7157 if ((moved_how_much == 0) || (giveup) || bail) 7158 /* no more to move for now */ 7159 break; 7160 } else { 7161 asoc->locked_on_sending = NULL; 7162 strqt = sctp_select_a_stream(stcb, asoc); 7163 if (TAILQ_FIRST(&strq->outqueue) == NULL) { 7164 if (strq == strqn) { 7165 /* Must move start to next one */ 7166 strqn = TAILQ_NEXT(asoc->last_out_stream, next_spoke); 7167 if (strqn == NULL) { 7168 strqn = TAILQ_FIRST(&asoc->out_wheel); 7169 if (strqn == NULL) { 7170 break; 7171 } 7172 } 7173 } 7174 sctp_remove_from_wheel(stcb, asoc, strq); 7175 } 7176 if ((giveup) || bail) { 7177 break; 7178 } 7179 strq = strqt; 7180 if (strq == NULL) { 7181 break; 7182 } 7183 } 7184 total_moved += moved_how_much; 7185 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk)); 7186 goal_mtu &= 0xfffffffc; 7187 } 7188 if (bail) 7189 *quit_now = 1; 7190 7191 if (total_moved == 0) { 7192 if ((sctp_cmt_on_off == 0) && 7193 (net == stcb->asoc.primary_destination)) { 7194 /* ran dry for primary network net */ 7195 SCTP_STAT_INCR(sctps_primary_randry); 7196 } else if (sctp_cmt_on_off) { 7197 /* ran dry with CMT on */ 7198 SCTP_STAT_INCR(sctps_cmt_randry); 7199 } 7200 } 7201 } 7202 7203 void 7204 sctp_fix_ecn_echo(struct sctp_association *asoc) 7205 { 7206 struct sctp_tmit_chunk *chk; 7207 7208 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 7209 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 7210 chk->sent = SCTP_DATAGRAM_UNSENT; 7211 } 7212 } 7213 } 7214 7215 static void 7216 sctp_move_to_an_alt(struct sctp_tcb *stcb, 7217 struct sctp_association *asoc, 7218 struct sctp_nets *net) 7219 { 7220 struct sctp_tmit_chunk *chk; 7221 struct sctp_nets *a_net; 7222 7223 SCTP_TCB_LOCK_ASSERT(stcb); 7224 /* 7225 * JRS 5/14/07 - If CMT PF is turned on, find an alternate 7226 * destination using the PF algorithm for finding alternate 7227 * destinations. 7228 */ 7229 if (sctp_cmt_on_off && sctp_cmt_pf) { 7230 a_net = sctp_find_alternate_net(stcb, net, 2); 7231 } else { 7232 a_net = sctp_find_alternate_net(stcb, net, 0); 7233 } 7234 if ((a_net != net) && 7235 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) { 7236 /* 7237 * We only proceed if a valid alternate is found that is not 7238 * this one and is reachable. Here we must move all chunks 7239 * queued in the send queue off of the destination address 7240 * to our alternate. 7241 */ 7242 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 7243 if (chk->whoTo == net) { 7244 /* Move the chunk to our alternate */ 7245 sctp_free_remote_addr(chk->whoTo); 7246 chk->whoTo = a_net; 7247 atomic_add_int(&a_net->ref_count, 1); 7248 } 7249 } 7250 } 7251 } 7252 7253 int 7254 sctp_med_chunk_output(struct sctp_inpcb *inp, 7255 struct sctp_tcb *stcb, 7256 struct sctp_association *asoc, 7257 int *num_out, 7258 int *reason_code, 7259 int control_only, int *cwnd_full, int from_where, 7260 struct timeval *now, int *now_filled, int frag_point, int so_locked 7261 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 7262 SCTP_UNUSED 7263 #endif 7264 ) 7265 { 7266 /* 7267 * Ok this is the generic chunk service queue. we must do the 7268 * following: - Service the stream queue that is next, moving any 7269 * message (note I must get a complete message i.e. FIRST/MIDDLE and 7270 * LAST to the out queue in one pass) and assigning TSN's - Check to 7271 * see if the cwnd/rwnd allows any output, if so we go ahead and 7272 * fomulate and send the low level chunks. Making sure to combine 7273 * any control in the control chunk queue also. 7274 */ 7275 struct sctp_nets *net; 7276 struct mbuf *outchain, *endoutchain; 7277 struct sctp_tmit_chunk *chk, *nchk; 7278 struct sctphdr *shdr; 7279 7280 /* temp arrays for unlinking */ 7281 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 7282 int no_fragmentflg, error; 7283 int one_chunk, hbflag, skip_data_for_this_net; 7284 int asconf, cookie, no_out_cnt; 7285 int bundle_at, ctl_cnt, no_data_chunks, cwnd_full_ind, eeor_mode; 7286 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out; 7287 struct sctp_nets *start_at, *old_startat = NULL, *send_start_at; 7288 int tsns_sent = 0; 7289 uint32_t auth_offset = 0; 7290 struct sctp_auth_chunk *auth = NULL; 7291 7292 /* 7293 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the 7294 * destination. 7295 */ 7296 int pf_hbflag = 0; 7297 int quit_now = 0; 7298 7299 *num_out = 0; 7300 cwnd_full_ind = 0; 7301 7302 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 7303 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) || 7304 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { 7305 eeor_mode = 1; 7306 } else { 7307 eeor_mode = 0; 7308 } 7309 ctl_cnt = no_out_cnt = asconf = cookie = 0; 7310 /* 7311 * First lets prime the pump. For each destination, if there is room 7312 * in the flight size, attempt to pull an MTU's worth out of the 7313 * stream queues into the general send_queue 7314 */ 7315 #ifdef SCTP_AUDITING_ENABLED 7316 sctp_audit_log(0xC2, 2); 7317 #endif 7318 SCTP_TCB_LOCK_ASSERT(stcb); 7319 hbflag = 0; 7320 if ((control_only) || (asoc->stream_reset_outstanding)) 7321 no_data_chunks = 1; 7322 else 7323 no_data_chunks = 0; 7324 7325 /* Nothing to possible to send? */ 7326 if (TAILQ_EMPTY(&asoc->control_send_queue) && 7327 TAILQ_EMPTY(&asoc->asconf_send_queue) && 7328 TAILQ_EMPTY(&asoc->send_queue) && 7329 TAILQ_EMPTY(&asoc->out_wheel)) { 7330 *reason_code = 9; 7331 return (0); 7332 } 7333 if (asoc->peers_rwnd == 0) { 7334 /* No room in peers rwnd */ 7335 *cwnd_full = 1; 7336 *reason_code = 1; 7337 if (asoc->total_flight > 0) { 7338 /* we are allowed one chunk in flight */ 7339 no_data_chunks = 1; 7340 } 7341 } 7342 if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) { 7343 if (sctp_cmt_on_off) { 7344 /* 7345 * for CMT we start at the next one past the one we 7346 * last added data to. 7347 */ 7348 if (TAILQ_FIRST(&asoc->send_queue) != NULL) { 7349 goto skip_the_fill_from_streams; 7350 } 7351 if (asoc->last_net_data_came_from) { 7352 net = TAILQ_NEXT(asoc->last_net_data_came_from, sctp_next); 7353 if (net == NULL) { 7354 net = TAILQ_FIRST(&asoc->nets); 7355 } 7356 } else { 7357 /* back to start */ 7358 net = TAILQ_FIRST(&asoc->nets); 7359 } 7360 7361 /* 7362 * JRI-TODO: CMT-MPI. Simply set the first 7363 * destination (net) to be optimized for the next 7364 * message to be pulled out of the outwheel. 1. peek 7365 * at outwheel 2. If large message, set net = 7366 * highest_cwnd 3. If small message, set net = 7367 * lowest rtt 7368 */ 7369 } else { 7370 net = asoc->primary_destination; 7371 if (net == NULL) { 7372 /* TSNH */ 7373 net = TAILQ_FIRST(&asoc->nets); 7374 } 7375 } 7376 start_at = net; 7377 7378 one_more_time: 7379 for (; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 7380 net->window_probe = 0; 7381 if (old_startat && (old_startat == net)) { 7382 break; 7383 } 7384 /* 7385 * JRI: if dest is unreachable or unconfirmed, do 7386 * not send data to it 7387 */ 7388 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) || (net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 7389 continue; 7390 } 7391 /* 7392 * JRI: if dest is in PF state, do not send data to 7393 * it 7394 */ 7395 if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF)) { 7396 continue; 7397 } 7398 if ((sctp_cmt_on_off == 0) && (net->ref_count < 2)) { 7399 /* nothing can be in queue for this guy */ 7400 continue; 7401 } 7402 if (net->flight_size >= net->cwnd) { 7403 /* skip this network, no room */ 7404 cwnd_full_ind++; 7405 continue; 7406 } 7407 /* 7408 * JRI : this for loop we are in takes in each net, 7409 * if its's got space in cwnd and has data sent to 7410 * it (when CMT is off) then it calls 7411 * sctp_fill_outqueue for the net. This gets data on 7412 * the send queue for that network. 7413 * 7414 * In sctp_fill_outqueue TSN's are assigned and data is 7415 * copied out of the stream buffers. Note mostly 7416 * copy by reference (we hope). 7417 */ 7418 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 7419 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FILL_OUTQ_CALLED); 7420 } 7421 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now); 7422 if (quit_now) { 7423 /* memory alloc failure */ 7424 no_data_chunks = 1; 7425 goto skip_the_fill_from_streams; 7426 } 7427 } 7428 if (start_at != TAILQ_FIRST(&asoc->nets)) { 7429 /* got to pick up the beginning stuff. */ 7430 old_startat = start_at; 7431 start_at = net = TAILQ_FIRST(&asoc->nets); 7432 if (old_startat) 7433 goto one_more_time; 7434 } 7435 } 7436 skip_the_fill_from_streams: 7437 *cwnd_full = cwnd_full_ind; 7438 7439 /* now service each destination and send out what we can for it */ 7440 /* Nothing to send? */ 7441 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) && 7442 (TAILQ_FIRST(&asoc->asconf_send_queue) == NULL) && 7443 (TAILQ_FIRST(&asoc->send_queue) == NULL)) { 7444 *reason_code = 8; 7445 return (0); 7446 } 7447 if (no_data_chunks) { 7448 chk = TAILQ_FIRST(&asoc->asconf_send_queue); 7449 if (chk == NULL) 7450 chk = TAILQ_FIRST(&asoc->control_send_queue); 7451 } else { 7452 chk = TAILQ_FIRST(&asoc->send_queue); 7453 } 7454 if (chk) { 7455 send_start_at = chk->whoTo; 7456 } else { 7457 send_start_at = TAILQ_FIRST(&asoc->nets); 7458 } 7459 old_startat = NULL; 7460 again_one_more_time: 7461 for (net = send_start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 7462 /* how much can we send? */ 7463 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */ 7464 if (old_startat && (old_startat == net)) { 7465 /* through list ocmpletely. */ 7466 break; 7467 } 7468 tsns_sent = 0; 7469 if (net->ref_count < 2) { 7470 /* 7471 * Ref-count of 1 so we cannot have data or control 7472 * queued to this address. Skip it. 7473 */ 7474 continue; 7475 } 7476 ctl_cnt = bundle_at = 0; 7477 endoutchain = outchain = NULL; 7478 no_fragmentflg = 1; 7479 one_chunk = 0; 7480 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 7481 skip_data_for_this_net = 1; 7482 } else { 7483 skip_data_for_this_net = 0; 7484 } 7485 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) { 7486 /* 7487 * if we have a route and an ifp check to see if we 7488 * have room to send to this guy 7489 */ 7490 struct ifnet *ifp; 7491 7492 ifp = net->ro.ro_rt->rt_ifp; 7493 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) { 7494 SCTP_STAT_INCR(sctps_ifnomemqueued); 7495 if (sctp_logging_level & SCTP_LOG_MAXBURST_ENABLE) { 7496 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED); 7497 } 7498 continue; 7499 } 7500 } 7501 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 7502 case AF_INET: 7503 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 7504 break; 7505 #ifdef INET6 7506 case AF_INET6: 7507 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 7508 break; 7509 #endif 7510 default: 7511 /* TSNH */ 7512 mtu = net->mtu; 7513 break; 7514 } 7515 mx_mtu = mtu; 7516 to_out = 0; 7517 if (mtu > asoc->peers_rwnd) { 7518 if (asoc->total_flight > 0) { 7519 /* We have a packet in flight somewhere */ 7520 r_mtu = asoc->peers_rwnd; 7521 } else { 7522 /* We are always allowed to send one MTU out */ 7523 one_chunk = 1; 7524 r_mtu = mtu; 7525 } 7526 } else { 7527 r_mtu = mtu; 7528 } 7529 /************************/ 7530 /* ASCONF transmission */ 7531 /************************/ 7532 /* Now first lets go through the asconf queue */ 7533 for (chk = TAILQ_FIRST(&asoc->asconf_send_queue); 7534 chk; chk = nchk) { 7535 nchk = TAILQ_NEXT(chk, sctp_next); 7536 if (chk->rec.chunk_id.id != SCTP_ASCONF) { 7537 continue; 7538 } 7539 if (chk->whoTo != net) { 7540 /* 7541 * No, not sent to the network we are 7542 * looking at 7543 */ 7544 break; 7545 } 7546 if (chk->data == NULL) { 7547 break; 7548 } 7549 if (chk->sent != SCTP_DATAGRAM_UNSENT && 7550 chk->sent != SCTP_DATAGRAM_RESEND) { 7551 break; 7552 } 7553 /* 7554 * if no AUTH is yet included and this chunk 7555 * requires it, make sure to account for it. We 7556 * don't apply the size until the AUTH chunk is 7557 * actually added below in case there is no room for 7558 * this chunk. NOTE: we overload the use of "omtu" 7559 * here 7560 */ 7561 if ((auth == NULL) && 7562 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 7563 stcb->asoc.peer_auth_chunks)) { 7564 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 7565 } else 7566 omtu = 0; 7567 /* Here we do NOT factor the r_mtu */ 7568 if ((chk->send_size < (int)(mtu - omtu)) || 7569 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 7570 /* 7571 * We probably should glom the mbuf chain 7572 * from the chk->data for control but the 7573 * problem is it becomes yet one more level 7574 * of tracking to do if for some reason 7575 * output fails. Then I have got to 7576 * reconstruct the merged control chain.. el 7577 * yucko.. for now we take the easy way and 7578 * do the copy 7579 */ 7580 /* 7581 * Add an AUTH chunk, if chunk requires it 7582 * save the offset into the chain for AUTH 7583 */ 7584 if ((auth == NULL) && 7585 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 7586 stcb->asoc.peer_auth_chunks))) { 7587 outchain = sctp_add_auth_chunk(outchain, 7588 &endoutchain, 7589 &auth, 7590 &auth_offset, 7591 stcb, 7592 chk->rec.chunk_id.id); 7593 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7594 } 7595 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 7596 (int)chk->rec.chunk_id.can_take_data, 7597 chk->send_size, chk->copy_by_ref); 7598 if (outchain == NULL) { 7599 *reason_code = 8; 7600 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 7601 return (ENOMEM); 7602 } 7603 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7604 /* update our MTU size */ 7605 if (mtu > (chk->send_size + omtu)) 7606 mtu -= (chk->send_size + omtu); 7607 else 7608 mtu = 0; 7609 to_out += (chk->send_size + omtu); 7610 /* Do clear IP_DF ? */ 7611 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 7612 no_fragmentflg = 0; 7613 } 7614 if (chk->rec.chunk_id.can_take_data) 7615 chk->data = NULL; 7616 /* 7617 * set hb flag since we can use these for 7618 * RTO 7619 */ 7620 hbflag = 1; 7621 asconf = 1; 7622 /* 7623 * should sysctl this: don't bundle data 7624 * with ASCONF since it requires AUTH 7625 */ 7626 no_data_chunks = 1; 7627 chk->sent = SCTP_DATAGRAM_SENT; 7628 chk->snd_count++; 7629 if (mtu == 0) { 7630 /* 7631 * Ok we are out of room but we can 7632 * output without effecting the 7633 * flight size since this little guy 7634 * is a control only packet. 7635 */ 7636 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 7637 /* 7638 * do NOT clear the asconf flag as 7639 * it is used to do appropriate 7640 * source address selection. 7641 */ 7642 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT); 7643 if (outchain == NULL) { 7644 /* no memory */ 7645 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 7646 error = ENOBUFS; 7647 *reason_code = 7; 7648 continue; 7649 } 7650 shdr = mtod(outchain, struct sctphdr *); 7651 shdr->src_port = inp->sctp_lport; 7652 shdr->dest_port = stcb->rport; 7653 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 7654 shdr->checksum = 0; 7655 auth_offset += sizeof(struct sctphdr); 7656 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 7657 (struct sockaddr *)&net->ro._l_addr, 7658 outchain, auth_offset, auth, 7659 no_fragmentflg, 0, NULL, asconf, net->port, so_locked, NULL))) { 7660 if (error == ENOBUFS) { 7661 asoc->ifp_had_enobuf = 1; 7662 SCTP_STAT_INCR(sctps_lowlevelerr); 7663 } 7664 if (from_where == 0) { 7665 SCTP_STAT_INCR(sctps_lowlevelerrusr); 7666 } 7667 if (*now_filled == 0) { 7668 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7669 *now_filled = 1; 7670 *now = net->last_sent_time; 7671 } else { 7672 net->last_sent_time = *now; 7673 } 7674 hbflag = 0; 7675 /* error, could not output */ 7676 if (error == EHOSTUNREACH) { 7677 /* 7678 * Destination went 7679 * unreachable 7680 * during this send 7681 */ 7682 sctp_move_to_an_alt(stcb, asoc, net); 7683 } 7684 *reason_code = 7; 7685 continue; 7686 } else 7687 asoc->ifp_had_enobuf = 0; 7688 if (*now_filled == 0) { 7689 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7690 *now_filled = 1; 7691 *now = net->last_sent_time; 7692 } else { 7693 net->last_sent_time = *now; 7694 } 7695 hbflag = 0; 7696 /* 7697 * increase the number we sent, if a 7698 * cookie is sent we don't tell them 7699 * any was sent out. 7700 */ 7701 outchain = endoutchain = NULL; 7702 auth = NULL; 7703 auth_offset = 0; 7704 if (!no_out_cnt) 7705 *num_out += ctl_cnt; 7706 /* recalc a clean slate and setup */ 7707 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 7708 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 7709 } else { 7710 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD); 7711 } 7712 to_out = 0; 7713 no_fragmentflg = 1; 7714 } 7715 } 7716 } 7717 /************************/ 7718 /* Control transmission */ 7719 /************************/ 7720 /* Now first lets go through the control queue */ 7721 for (chk = TAILQ_FIRST(&asoc->control_send_queue); 7722 chk; chk = nchk) { 7723 nchk = TAILQ_NEXT(chk, sctp_next); 7724 if (chk->whoTo != net) { 7725 /* 7726 * No, not sent to the network we are 7727 * looking at 7728 */ 7729 continue; 7730 } 7731 if (chk->data == NULL) { 7732 continue; 7733 } 7734 if (chk->sent != SCTP_DATAGRAM_UNSENT) { 7735 /* 7736 * It must be unsent. Cookies and ASCONF's 7737 * hang around but there timers will force 7738 * when marked for resend. 7739 */ 7740 continue; 7741 } 7742 /* 7743 * if no AUTH is yet included and this chunk 7744 * requires it, make sure to account for it. We 7745 * don't apply the size until the AUTH chunk is 7746 * actually added below in case there is no room for 7747 * this chunk. NOTE: we overload the use of "omtu" 7748 * here 7749 */ 7750 if ((auth == NULL) && 7751 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 7752 stcb->asoc.peer_auth_chunks)) { 7753 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 7754 } else 7755 omtu = 0; 7756 /* Here we do NOT factor the r_mtu */ 7757 if ((chk->send_size < (int)(mtu - omtu)) || 7758 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 7759 /* 7760 * We probably should glom the mbuf chain 7761 * from the chk->data for control but the 7762 * problem is it becomes yet one more level 7763 * of tracking to do if for some reason 7764 * output fails. Then I have got to 7765 * reconstruct the merged control chain.. el 7766 * yucko.. for now we take the easy way and 7767 * do the copy 7768 */ 7769 /* 7770 * Add an AUTH chunk, if chunk requires it 7771 * save the offset into the chain for AUTH 7772 */ 7773 if ((auth == NULL) && 7774 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 7775 stcb->asoc.peer_auth_chunks))) { 7776 outchain = sctp_add_auth_chunk(outchain, 7777 &endoutchain, 7778 &auth, 7779 &auth_offset, 7780 stcb, 7781 chk->rec.chunk_id.id); 7782 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7783 } 7784 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 7785 (int)chk->rec.chunk_id.can_take_data, 7786 chk->send_size, chk->copy_by_ref); 7787 if (outchain == NULL) { 7788 *reason_code = 8; 7789 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 7790 return (ENOMEM); 7791 } 7792 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7793 /* update our MTU size */ 7794 if (mtu > (chk->send_size + omtu)) 7795 mtu -= (chk->send_size + omtu); 7796 else 7797 mtu = 0; 7798 to_out += (chk->send_size + omtu); 7799 /* Do clear IP_DF ? */ 7800 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 7801 no_fragmentflg = 0; 7802 } 7803 if (chk->rec.chunk_id.can_take_data) 7804 chk->data = NULL; 7805 /* Mark things to be removed, if needed */ 7806 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 7807 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 7808 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 7809 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 7810 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 7811 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 7812 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 7813 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 7814 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 7815 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 7816 7817 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) { 7818 hbflag = 1; 7819 /* 7820 * JRS 5/14/07 - Set the 7821 * flag to say a heartbeat 7822 * is being sent. 7823 */ 7824 pf_hbflag = 1; 7825 } 7826 /* remove these chunks at the end */ 7827 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) { 7828 /* turn off the timer */ 7829 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 7830 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 7831 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1); 7832 } 7833 } 7834 ctl_cnt++; 7835 } else { 7836 /* 7837 * Other chunks, since they have 7838 * timers running (i.e. COOKIE) we 7839 * just "trust" that it gets sent or 7840 * retransmitted. 7841 */ 7842 ctl_cnt++; 7843 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 7844 cookie = 1; 7845 no_out_cnt = 1; 7846 } 7847 chk->sent = SCTP_DATAGRAM_SENT; 7848 chk->snd_count++; 7849 } 7850 if (mtu == 0) { 7851 /* 7852 * Ok we are out of room but we can 7853 * output without effecting the 7854 * flight size since this little guy 7855 * is a control only packet. 7856 */ 7857 if (asconf) { 7858 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 7859 /* 7860 * do NOT clear the asconf 7861 * flag as it is used to do 7862 * appropriate source 7863 * address selection. 7864 */ 7865 } 7866 if (cookie) { 7867 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 7868 cookie = 0; 7869 } 7870 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT); 7871 if (outchain == NULL) { 7872 /* no memory */ 7873 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 7874 error = ENOBUFS; 7875 goto error_out_again; 7876 } 7877 shdr = mtod(outchain, struct sctphdr *); 7878 shdr->src_port = inp->sctp_lport; 7879 shdr->dest_port = stcb->rport; 7880 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 7881 shdr->checksum = 0; 7882 auth_offset += sizeof(struct sctphdr); 7883 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 7884 (struct sockaddr *)&net->ro._l_addr, 7885 outchain, auth_offset, auth, 7886 no_fragmentflg, 0, NULL, asconf, net->port, so_locked, NULL))) { 7887 if (error == ENOBUFS) { 7888 asoc->ifp_had_enobuf = 1; 7889 SCTP_STAT_INCR(sctps_lowlevelerr); 7890 } 7891 if (from_where == 0) { 7892 SCTP_STAT_INCR(sctps_lowlevelerrusr); 7893 } 7894 error_out_again: 7895 /* error, could not output */ 7896 if (hbflag) { 7897 if (*now_filled == 0) { 7898 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7899 *now_filled = 1; 7900 *now = net->last_sent_time; 7901 } else { 7902 net->last_sent_time = *now; 7903 } 7904 hbflag = 0; 7905 } 7906 if (error == EHOSTUNREACH) { 7907 /* 7908 * Destination went 7909 * unreachable 7910 * during this send 7911 */ 7912 sctp_move_to_an_alt(stcb, asoc, net); 7913 } 7914 *reason_code = 7; 7915 continue; 7916 } else 7917 asoc->ifp_had_enobuf = 0; 7918 /* Only HB or ASCONF advances time */ 7919 if (hbflag) { 7920 if (*now_filled == 0) { 7921 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7922 *now_filled = 1; 7923 *now = net->last_sent_time; 7924 } else { 7925 net->last_sent_time = *now; 7926 } 7927 hbflag = 0; 7928 } 7929 /* 7930 * increase the number we sent, if a 7931 * cookie is sent we don't tell them 7932 * any was sent out. 7933 */ 7934 outchain = endoutchain = NULL; 7935 auth = NULL; 7936 auth_offset = 0; 7937 if (!no_out_cnt) 7938 *num_out += ctl_cnt; 7939 /* recalc a clean slate and setup */ 7940 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 7941 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 7942 } else { 7943 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD); 7944 } 7945 to_out = 0; 7946 no_fragmentflg = 1; 7947 } 7948 } 7949 } 7950 /*********************/ 7951 /* Data transmission */ 7952 /*********************/ 7953 /* 7954 * if AUTH for DATA is required and no AUTH has been added 7955 * yet, account for this in the mtu now... if no data can be 7956 * bundled, this adjustment won't matter anyways since the 7957 * packet will be going out... 7958 */ 7959 if ((auth == NULL) && 7960 sctp_auth_is_required_chunk(SCTP_DATA, 7961 stcb->asoc.peer_auth_chunks)) { 7962 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 7963 } 7964 /* now lets add any data within the MTU constraints */ 7965 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 7966 case AF_INET: 7967 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr))) 7968 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 7969 else 7970 omtu = 0; 7971 break; 7972 #ifdef INET6 7973 case AF_INET6: 7974 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr))) 7975 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 7976 else 7977 omtu = 0; 7978 break; 7979 #endif 7980 default: 7981 /* TSNH */ 7982 omtu = 0; 7983 break; 7984 } 7985 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) && (skip_data_for_this_net == 0)) || 7986 (cookie)) { 7987 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) { 7988 if (no_data_chunks) { 7989 /* let only control go out */ 7990 *reason_code = 1; 7991 break; 7992 } 7993 if (net->flight_size >= net->cwnd) { 7994 /* skip this net, no room for data */ 7995 *reason_code = 2; 7996 break; 7997 } 7998 nchk = TAILQ_NEXT(chk, sctp_next); 7999 if (chk->whoTo != net) { 8000 /* No, not sent to this net */ 8001 continue; 8002 } 8003 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) { 8004 /*- 8005 * strange, we have a chunk that is 8006 * to big for its destination and 8007 * yet no fragment ok flag. 8008 * Something went wrong when the 8009 * PMTU changed...we did not mark 8010 * this chunk for some reason?? I 8011 * will fix it here by letting IP 8012 * fragment it for now and printing 8013 * a warning. This really should not 8014 * happen ... 8015 */ 8016 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n", 8017 chk->send_size, mtu); 8018 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 8019 } 8020 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) || 8021 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) { 8022 /* ok we will add this one */ 8023 8024 /* 8025 * Add an AUTH chunk, if chunk 8026 * requires it, save the offset into 8027 * the chain for AUTH 8028 */ 8029 if ((auth == NULL) && 8030 (sctp_auth_is_required_chunk(SCTP_DATA, 8031 stcb->asoc.peer_auth_chunks))) { 8032 8033 outchain = sctp_add_auth_chunk(outchain, 8034 &endoutchain, 8035 &auth, 8036 &auth_offset, 8037 stcb, 8038 SCTP_DATA); 8039 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 8040 } 8041 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0, 8042 chk->send_size, chk->copy_by_ref); 8043 if (outchain == NULL) { 8044 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n"); 8045 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 8046 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 8047 } 8048 *reason_code = 3; 8049 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 8050 return (ENOMEM); 8051 } 8052 /* upate our MTU size */ 8053 /* Do clear IP_DF ? */ 8054 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 8055 no_fragmentflg = 0; 8056 } 8057 /* unsigned subtraction of mtu */ 8058 if (mtu > chk->send_size) 8059 mtu -= chk->send_size; 8060 else 8061 mtu = 0; 8062 /* unsigned subtraction of r_mtu */ 8063 if (r_mtu > chk->send_size) 8064 r_mtu -= chk->send_size; 8065 else 8066 r_mtu = 0; 8067 8068 to_out += chk->send_size; 8069 if ((to_out > mx_mtu) && no_fragmentflg) { 8070 #ifdef INVARIANTS 8071 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out); 8072 #else 8073 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n", 8074 mx_mtu, to_out); 8075 #endif 8076 } 8077 chk->window_probe = 0; 8078 data_list[bundle_at++] = chk; 8079 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 8080 mtu = 0; 8081 break; 8082 } 8083 if (chk->sent == SCTP_DATAGRAM_UNSENT) { 8084 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 8085 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks); 8086 } else { 8087 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks); 8088 } 8089 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) && 8090 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) 8091 /* 8092 * Count number of 8093 * user msg's that 8094 * were fragmented 8095 * we do this by 8096 * counting when we 8097 * see a LAST 8098 * fragment only. 8099 */ 8100 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs); 8101 } 8102 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) { 8103 if (one_chunk) { 8104 data_list[0]->window_probe = 1; 8105 net->window_probe = 1; 8106 } 8107 break; 8108 } 8109 } else { 8110 /* 8111 * Must be sent in order of the 8112 * TSN's (on a network) 8113 */ 8114 break; 8115 } 8116 } /* for (chunk gather loop for this net) */ 8117 } /* if asoc.state OPEN */ 8118 /* Is there something to send for this destination? */ 8119 if (outchain) { 8120 /* We may need to start a control timer or two */ 8121 if (asconf) { 8122 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, 8123 stcb, net); 8124 /* 8125 * do NOT clear the asconf flag as it is 8126 * used to do appropriate source address 8127 * selection. 8128 */ 8129 } 8130 if (cookie) { 8131 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 8132 cookie = 0; 8133 } 8134 /* must start a send timer if data is being sent */ 8135 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { 8136 /* 8137 * no timer running on this destination 8138 * restart it. 8139 */ 8140 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 8141 } else if (sctp_cmt_on_off && sctp_cmt_pf && pf_hbflag && ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) 8142 && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { 8143 /* 8144 * JRS 5/14/07 - If a HB has been sent to a 8145 * PF destination and no T3 timer is 8146 * currently running, start the T3 timer to 8147 * track the HBs that were sent. 8148 */ 8149 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 8150 } 8151 /* Now send it, if there is anything to send :> */ 8152 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT); 8153 if (outchain == NULL) { 8154 /* out of mbufs */ 8155 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 8156 error = ENOBUFS; 8157 goto errored_send; 8158 } 8159 shdr = mtod(outchain, struct sctphdr *); 8160 shdr->src_port = inp->sctp_lport; 8161 shdr->dest_port = stcb->rport; 8162 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 8163 shdr->checksum = 0; 8164 auth_offset += sizeof(struct sctphdr); 8165 if ((error = sctp_lowlevel_chunk_output(inp, 8166 stcb, 8167 net, 8168 (struct sockaddr *)&net->ro._l_addr, 8169 outchain, 8170 auth_offset, 8171 auth, 8172 no_fragmentflg, 8173 bundle_at, 8174 data_list[0], 8175 asconf, net->port, so_locked, NULL))) { 8176 /* error, we could not output */ 8177 if (error == ENOBUFS) { 8178 SCTP_STAT_INCR(sctps_lowlevelerr); 8179 asoc->ifp_had_enobuf = 1; 8180 } 8181 if (from_where == 0) { 8182 SCTP_STAT_INCR(sctps_lowlevelerrusr); 8183 } 8184 errored_send: 8185 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 8186 if (hbflag) { 8187 if (*now_filled == 0) { 8188 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 8189 *now_filled = 1; 8190 *now = net->last_sent_time; 8191 } else { 8192 net->last_sent_time = *now; 8193 } 8194 hbflag = 0; 8195 } 8196 if (error == EHOSTUNREACH) { 8197 /* 8198 * Destination went unreachable 8199 * during this send 8200 */ 8201 sctp_move_to_an_alt(stcb, asoc, net); 8202 } 8203 *reason_code = 6; 8204 /*- 8205 * I add this line to be paranoid. As far as 8206 * I can tell the continue, takes us back to 8207 * the top of the for, but just to make sure 8208 * I will reset these again here. 8209 */ 8210 ctl_cnt = bundle_at = 0; 8211 continue; /* This takes us back to the 8212 * for() for the nets. */ 8213 } else { 8214 asoc->ifp_had_enobuf = 0; 8215 } 8216 outchain = endoutchain = NULL; 8217 auth = NULL; 8218 auth_offset = 0; 8219 if (bundle_at || hbflag) { 8220 /* For data/asconf and hb set time */ 8221 if (*now_filled == 0) { 8222 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 8223 *now_filled = 1; 8224 *now = net->last_sent_time; 8225 } else { 8226 net->last_sent_time = *now; 8227 } 8228 } 8229 if (!no_out_cnt) { 8230 *num_out += (ctl_cnt + bundle_at); 8231 } 8232 if (bundle_at) { 8233 /* setup for a RTO measurement */ 8234 tsns_sent = data_list[0]->rec.data.TSN_seq; 8235 /* fill time if not already filled */ 8236 if (*now_filled == 0) { 8237 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 8238 *now_filled = 1; 8239 *now = asoc->time_last_sent; 8240 } else { 8241 asoc->time_last_sent = *now; 8242 } 8243 data_list[0]->do_rtt = 1; 8244 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at); 8245 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net); 8246 if (sctp_early_fr) { 8247 if (net->flight_size < net->cwnd) { 8248 /* start or restart it */ 8249 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 8250 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 8251 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2); 8252 } 8253 SCTP_STAT_INCR(sctps_earlyfrstrout); 8254 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net); 8255 } else { 8256 /* stop it if its running */ 8257 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 8258 SCTP_STAT_INCR(sctps_earlyfrstpout); 8259 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 8260 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3); 8261 } 8262 } 8263 } 8264 } 8265 if (one_chunk) { 8266 break; 8267 } 8268 } 8269 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 8270 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND); 8271 } 8272 } 8273 if (old_startat == NULL) { 8274 old_startat = send_start_at; 8275 send_start_at = TAILQ_FIRST(&asoc->nets); 8276 if (old_startat) 8277 goto again_one_more_time; 8278 } 8279 /* 8280 * At the end there should be no NON timed chunks hanging on this 8281 * queue. 8282 */ 8283 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 8284 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND); 8285 } 8286 if ((*num_out == 0) && (*reason_code == 0)) { 8287 *reason_code = 4; 8288 } else { 8289 *reason_code = 5; 8290 } 8291 sctp_clean_up_ctl(stcb, asoc); 8292 return (0); 8293 } 8294 8295 void 8296 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err) 8297 { 8298 /*- 8299 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of 8300 * the control chunk queue. 8301 */ 8302 struct sctp_chunkhdr *hdr; 8303 struct sctp_tmit_chunk *chk; 8304 struct mbuf *mat; 8305 8306 SCTP_TCB_LOCK_ASSERT(stcb); 8307 sctp_alloc_a_chunk(stcb, chk); 8308 if (chk == NULL) { 8309 /* no memory */ 8310 sctp_m_freem(op_err); 8311 return; 8312 } 8313 chk->copy_by_ref = 0; 8314 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT); 8315 if (op_err == NULL) { 8316 sctp_free_a_chunk(stcb, chk); 8317 return; 8318 } 8319 chk->send_size = 0; 8320 mat = op_err; 8321 while (mat != NULL) { 8322 chk->send_size += SCTP_BUF_LEN(mat); 8323 mat = SCTP_BUF_NEXT(mat); 8324 } 8325 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR; 8326 chk->rec.chunk_id.can_take_data = 1; 8327 chk->sent = SCTP_DATAGRAM_UNSENT; 8328 chk->snd_count = 0; 8329 chk->flags = 0; 8330 chk->asoc = &stcb->asoc; 8331 chk->data = op_err; 8332 chk->whoTo = chk->asoc->primary_destination; 8333 atomic_add_int(&chk->whoTo->ref_count, 1); 8334 hdr = mtod(op_err, struct sctp_chunkhdr *); 8335 hdr->chunk_type = SCTP_OPERATION_ERROR; 8336 hdr->chunk_flags = 0; 8337 hdr->chunk_length = htons(chk->send_size); 8338 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, 8339 chk, 8340 sctp_next); 8341 chk->asoc->ctrl_queue_cnt++; 8342 } 8343 8344 int 8345 sctp_send_cookie_echo(struct mbuf *m, 8346 int offset, 8347 struct sctp_tcb *stcb, 8348 struct sctp_nets *net) 8349 { 8350 /*- 8351 * pull out the cookie and put it at the front of the control chunk 8352 * queue. 8353 */ 8354 int at; 8355 struct mbuf *cookie; 8356 struct sctp_paramhdr parm, *phdr; 8357 struct sctp_chunkhdr *hdr; 8358 struct sctp_tmit_chunk *chk; 8359 uint16_t ptype, plen; 8360 8361 /* First find the cookie in the param area */ 8362 cookie = NULL; 8363 at = offset + sizeof(struct sctp_init_chunk); 8364 8365 SCTP_TCB_LOCK_ASSERT(stcb); 8366 do { 8367 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm)); 8368 if (phdr == NULL) { 8369 return (-3); 8370 } 8371 ptype = ntohs(phdr->param_type); 8372 plen = ntohs(phdr->param_length); 8373 if (ptype == SCTP_STATE_COOKIE) { 8374 int pad; 8375 8376 /* found the cookie */ 8377 if ((pad = (plen % 4))) { 8378 plen += 4 - pad; 8379 } 8380 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT); 8381 if (cookie == NULL) { 8382 /* No memory */ 8383 return (-2); 8384 } 8385 #ifdef SCTP_MBUF_LOGGING 8386 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 8387 struct mbuf *mat; 8388 8389 mat = cookie; 8390 while (mat) { 8391 if (SCTP_BUF_IS_EXTENDED(mat)) { 8392 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 8393 } 8394 mat = SCTP_BUF_NEXT(mat); 8395 } 8396 } 8397 #endif 8398 break; 8399 } 8400 at += SCTP_SIZE32(plen); 8401 } while (phdr); 8402 if (cookie == NULL) { 8403 /* Did not find the cookie */ 8404 return (-3); 8405 } 8406 /* ok, we got the cookie lets change it into a cookie echo chunk */ 8407 8408 /* first the change from param to cookie */ 8409 hdr = mtod(cookie, struct sctp_chunkhdr *); 8410 hdr->chunk_type = SCTP_COOKIE_ECHO; 8411 hdr->chunk_flags = 0; 8412 /* get the chunk stuff now and place it in the FRONT of the queue */ 8413 sctp_alloc_a_chunk(stcb, chk); 8414 if (chk == NULL) { 8415 /* no memory */ 8416 sctp_m_freem(cookie); 8417 return (-5); 8418 } 8419 chk->copy_by_ref = 0; 8420 chk->send_size = plen; 8421 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO; 8422 chk->rec.chunk_id.can_take_data = 0; 8423 chk->sent = SCTP_DATAGRAM_UNSENT; 8424 chk->snd_count = 0; 8425 chk->flags = CHUNK_FLAGS_FRAGMENT_OK; 8426 chk->asoc = &stcb->asoc; 8427 chk->data = cookie; 8428 chk->whoTo = chk->asoc->primary_destination; 8429 atomic_add_int(&chk->whoTo->ref_count, 1); 8430 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next); 8431 chk->asoc->ctrl_queue_cnt++; 8432 return (0); 8433 } 8434 8435 void 8436 sctp_send_heartbeat_ack(struct sctp_tcb *stcb, 8437 struct mbuf *m, 8438 int offset, 8439 int chk_length, 8440 struct sctp_nets *net) 8441 { 8442 /* 8443 * take a HB request and make it into a HB ack and send it. 8444 */ 8445 struct mbuf *outchain; 8446 struct sctp_chunkhdr *chdr; 8447 struct sctp_tmit_chunk *chk; 8448 8449 8450 if (net == NULL) 8451 /* must have a net pointer */ 8452 return; 8453 8454 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT); 8455 if (outchain == NULL) { 8456 /* gak out of memory */ 8457 return; 8458 } 8459 #ifdef SCTP_MBUF_LOGGING 8460 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 8461 struct mbuf *mat; 8462 8463 mat = outchain; 8464 while (mat) { 8465 if (SCTP_BUF_IS_EXTENDED(mat)) { 8466 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 8467 } 8468 mat = SCTP_BUF_NEXT(mat); 8469 } 8470 } 8471 #endif 8472 chdr = mtod(outchain, struct sctp_chunkhdr *); 8473 chdr->chunk_type = SCTP_HEARTBEAT_ACK; 8474 chdr->chunk_flags = 0; 8475 if (chk_length % 4) { 8476 /* need pad */ 8477 uint32_t cpthis = 0; 8478 int padlen; 8479 8480 padlen = 4 - (chk_length % 4); 8481 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis); 8482 } 8483 sctp_alloc_a_chunk(stcb, chk); 8484 if (chk == NULL) { 8485 /* no memory */ 8486 sctp_m_freem(outchain); 8487 return; 8488 } 8489 chk->copy_by_ref = 0; 8490 chk->send_size = chk_length; 8491 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK; 8492 chk->rec.chunk_id.can_take_data = 1; 8493 chk->sent = SCTP_DATAGRAM_UNSENT; 8494 chk->snd_count = 0; 8495 chk->flags = 0; 8496 chk->asoc = &stcb->asoc; 8497 chk->data = outchain; 8498 chk->whoTo = net; 8499 atomic_add_int(&chk->whoTo->ref_count, 1); 8500 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 8501 chk->asoc->ctrl_queue_cnt++; 8502 } 8503 8504 void 8505 sctp_send_cookie_ack(struct sctp_tcb *stcb) 8506 { 8507 /* formulate and queue a cookie-ack back to sender */ 8508 struct mbuf *cookie_ack; 8509 struct sctp_chunkhdr *hdr; 8510 struct sctp_tmit_chunk *chk; 8511 8512 cookie_ack = NULL; 8513 SCTP_TCB_LOCK_ASSERT(stcb); 8514 8515 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER); 8516 if (cookie_ack == NULL) { 8517 /* no mbuf's */ 8518 return; 8519 } 8520 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD); 8521 sctp_alloc_a_chunk(stcb, chk); 8522 if (chk == NULL) { 8523 /* no memory */ 8524 sctp_m_freem(cookie_ack); 8525 return; 8526 } 8527 chk->copy_by_ref = 0; 8528 chk->send_size = sizeof(struct sctp_chunkhdr); 8529 chk->rec.chunk_id.id = SCTP_COOKIE_ACK; 8530 chk->rec.chunk_id.can_take_data = 1; 8531 chk->sent = SCTP_DATAGRAM_UNSENT; 8532 chk->snd_count = 0; 8533 chk->flags = 0; 8534 chk->asoc = &stcb->asoc; 8535 chk->data = cookie_ack; 8536 if (chk->asoc->last_control_chunk_from != NULL) { 8537 chk->whoTo = chk->asoc->last_control_chunk_from; 8538 } else { 8539 chk->whoTo = chk->asoc->primary_destination; 8540 } 8541 atomic_add_int(&chk->whoTo->ref_count, 1); 8542 hdr = mtod(cookie_ack, struct sctp_chunkhdr *); 8543 hdr->chunk_type = SCTP_COOKIE_ACK; 8544 hdr->chunk_flags = 0; 8545 hdr->chunk_length = htons(chk->send_size); 8546 SCTP_BUF_LEN(cookie_ack) = chk->send_size; 8547 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 8548 chk->asoc->ctrl_queue_cnt++; 8549 return; 8550 } 8551 8552 8553 void 8554 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) 8555 { 8556 /* formulate and queue a SHUTDOWN-ACK back to the sender */ 8557 struct mbuf *m_shutdown_ack; 8558 struct sctp_shutdown_ack_chunk *ack_cp; 8559 struct sctp_tmit_chunk *chk; 8560 8561 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 8562 if (m_shutdown_ack == NULL) { 8563 /* no mbuf's */ 8564 return; 8565 } 8566 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD); 8567 sctp_alloc_a_chunk(stcb, chk); 8568 if (chk == NULL) { 8569 /* no memory */ 8570 sctp_m_freem(m_shutdown_ack); 8571 return; 8572 } 8573 chk->copy_by_ref = 0; 8574 chk->send_size = sizeof(struct sctp_chunkhdr); 8575 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK; 8576 chk->rec.chunk_id.can_take_data = 1; 8577 chk->sent = SCTP_DATAGRAM_UNSENT; 8578 chk->snd_count = 0; 8579 chk->flags = 0; 8580 chk->asoc = &stcb->asoc; 8581 chk->data = m_shutdown_ack; 8582 chk->whoTo = net; 8583 atomic_add_int(&net->ref_count, 1); 8584 8585 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *); 8586 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK; 8587 ack_cp->ch.chunk_flags = 0; 8588 ack_cp->ch.chunk_length = htons(chk->send_size); 8589 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size; 8590 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 8591 chk->asoc->ctrl_queue_cnt++; 8592 return; 8593 } 8594 8595 void 8596 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net) 8597 { 8598 /* formulate and queue a SHUTDOWN to the sender */ 8599 struct mbuf *m_shutdown; 8600 struct sctp_shutdown_chunk *shutdown_cp; 8601 struct sctp_tmit_chunk *chk; 8602 8603 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 8604 if (m_shutdown == NULL) { 8605 /* no mbuf's */ 8606 return; 8607 } 8608 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD); 8609 sctp_alloc_a_chunk(stcb, chk); 8610 if (chk == NULL) { 8611 /* no memory */ 8612 sctp_m_freem(m_shutdown); 8613 return; 8614 } 8615 chk->copy_by_ref = 0; 8616 chk->send_size = sizeof(struct sctp_shutdown_chunk); 8617 chk->rec.chunk_id.id = SCTP_SHUTDOWN; 8618 chk->rec.chunk_id.can_take_data = 1; 8619 chk->sent = SCTP_DATAGRAM_UNSENT; 8620 chk->snd_count = 0; 8621 chk->flags = 0; 8622 chk->asoc = &stcb->asoc; 8623 chk->data = m_shutdown; 8624 chk->whoTo = net; 8625 atomic_add_int(&net->ref_count, 1); 8626 8627 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *); 8628 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN; 8629 shutdown_cp->ch.chunk_flags = 0; 8630 shutdown_cp->ch.chunk_length = htons(chk->send_size); 8631 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); 8632 SCTP_BUF_LEN(m_shutdown) = chk->send_size; 8633 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 8634 chk->asoc->ctrl_queue_cnt++; 8635 return; 8636 } 8637 8638 void 8639 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked) 8640 { 8641 /* 8642 * formulate and queue an ASCONF to the peer. ASCONF parameters 8643 * should be queued on the assoc queue. 8644 */ 8645 struct sctp_tmit_chunk *chk; 8646 struct mbuf *m_asconf; 8647 int len; 8648 8649 SCTP_TCB_LOCK_ASSERT(stcb); 8650 8651 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) && 8652 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) { 8653 /* can't send a new one if there is one in flight already */ 8654 return; 8655 } 8656 /* compose an ASCONF chunk, maximum length is PMTU */ 8657 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked); 8658 if (m_asconf == NULL) { 8659 return; 8660 } 8661 sctp_alloc_a_chunk(stcb, chk); 8662 if (chk == NULL) { 8663 /* no memory */ 8664 sctp_m_freem(m_asconf); 8665 return; 8666 } 8667 chk->copy_by_ref = 0; 8668 chk->data = m_asconf; 8669 chk->send_size = len; 8670 chk->rec.chunk_id.id = SCTP_ASCONF; 8671 chk->rec.chunk_id.can_take_data = 0; 8672 chk->sent = SCTP_DATAGRAM_UNSENT; 8673 chk->snd_count = 0; 8674 chk->flags = CHUNK_FLAGS_FRAGMENT_OK; 8675 chk->asoc = &stcb->asoc; 8676 chk->whoTo = net; 8677 atomic_add_int(&chk->whoTo->ref_count, 1); 8678 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next); 8679 chk->asoc->ctrl_queue_cnt++; 8680 return; 8681 } 8682 8683 void 8684 sctp_send_asconf_ack(struct sctp_tcb *stcb) 8685 { 8686 /* 8687 * formulate and queue a asconf-ack back to sender. the asconf-ack 8688 * must be stored in the tcb. 8689 */ 8690 struct sctp_tmit_chunk *chk; 8691 struct sctp_asconf_ack *ack, *latest_ack; 8692 struct mbuf *m_ack, *m; 8693 struct sctp_nets *net = NULL; 8694 8695 SCTP_TCB_LOCK_ASSERT(stcb); 8696 /* Get the latest ASCONF-ACK */ 8697 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead); 8698 if (latest_ack == NULL) { 8699 return; 8700 } 8701 if (latest_ack->last_sent_to != NULL && 8702 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) { 8703 /* we're doing a retransmission */ 8704 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0); 8705 if (net == NULL) { 8706 /* no alternate */ 8707 if (stcb->asoc.last_control_chunk_from == NULL) 8708 net = stcb->asoc.primary_destination; 8709 else 8710 net = stcb->asoc.last_control_chunk_from; 8711 } 8712 } else { 8713 /* normal case */ 8714 if (stcb->asoc.last_control_chunk_from == NULL) 8715 net = stcb->asoc.primary_destination; 8716 else 8717 net = stcb->asoc.last_control_chunk_from; 8718 } 8719 latest_ack->last_sent_to = net; 8720 8721 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) { 8722 if (ack->data == NULL) { 8723 continue; 8724 } 8725 /* copy the asconf_ack */ 8726 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT); 8727 if (m_ack == NULL) { 8728 /* couldn't copy it */ 8729 return; 8730 } 8731 #ifdef SCTP_MBUF_LOGGING 8732 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 8733 struct mbuf *mat; 8734 8735 mat = m_ack; 8736 while (mat) { 8737 if (SCTP_BUF_IS_EXTENDED(mat)) { 8738 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 8739 } 8740 mat = SCTP_BUF_NEXT(mat); 8741 } 8742 } 8743 #endif 8744 8745 sctp_alloc_a_chunk(stcb, chk); 8746 if (chk == NULL) { 8747 /* no memory */ 8748 if (m_ack) 8749 sctp_m_freem(m_ack); 8750 return; 8751 } 8752 chk->copy_by_ref = 0; 8753 8754 chk->whoTo = net; 8755 chk->data = m_ack; 8756 chk->send_size = 0; 8757 /* Get size */ 8758 m = m_ack; 8759 chk->send_size = ack->len; 8760 chk->rec.chunk_id.id = SCTP_ASCONF_ACK; 8761 chk->rec.chunk_id.can_take_data = 1; 8762 chk->sent = SCTP_DATAGRAM_UNSENT; 8763 chk->snd_count = 0; 8764 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */ 8765 chk->asoc = &stcb->asoc; 8766 atomic_add_int(&chk->whoTo->ref_count, 1); 8767 8768 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 8769 chk->asoc->ctrl_queue_cnt++; 8770 } 8771 return; 8772 } 8773 8774 8775 static int 8776 sctp_chunk_retransmission(struct sctp_inpcb *inp, 8777 struct sctp_tcb *stcb, 8778 struct sctp_association *asoc, 8779 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked 8780 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 8781 SCTP_UNUSED 8782 #endif 8783 ) 8784 { 8785 /*- 8786 * send out one MTU of retransmission. If fast_retransmit is 8787 * happening we ignore the cwnd. Otherwise we obey the cwnd and 8788 * rwnd. For a Cookie or Asconf in the control chunk queue we 8789 * retransmit them by themselves. 8790 * 8791 * For data chunks we will pick out the lowest TSN's in the sent_queue 8792 * marked for resend and bundle them all together (up to a MTU of 8793 * destination). The address to send to should have been 8794 * selected/changed where the retransmission was marked (i.e. in FR 8795 * or t3-timeout routines). 8796 */ 8797 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 8798 struct sctp_tmit_chunk *chk, *fwd; 8799 struct mbuf *m, *endofchain; 8800 struct sctphdr *shdr; 8801 struct sctp_nets *net = NULL; 8802 uint32_t tsns_sent = 0; 8803 int no_fragmentflg, bundle_at, cnt_thru; 8804 unsigned int mtu; 8805 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started; 8806 struct sctp_auth_chunk *auth = NULL; 8807 uint32_t auth_offset = 0; 8808 uint32_t dmtu = 0; 8809 8810 SCTP_TCB_LOCK_ASSERT(stcb); 8811 tmr_started = ctl_cnt = bundle_at = error = 0; 8812 no_fragmentflg = 1; 8813 fwd_tsn = 0; 8814 *cnt_out = 0; 8815 fwd = NULL; 8816 endofchain = m = NULL; 8817 #ifdef SCTP_AUDITING_ENABLED 8818 sctp_audit_log(0xC3, 1); 8819 #endif 8820 if ((TAILQ_EMPTY(&asoc->sent_queue)) && 8821 (TAILQ_EMPTY(&asoc->control_send_queue))) { 8822 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n", 8823 asoc->sent_queue_retran_cnt); 8824 asoc->sent_queue_cnt = 0; 8825 asoc->sent_queue_cnt_removeable = 0; 8826 /* send back 0/0 so we enter normal transmission */ 8827 *cnt_out = 0; 8828 return (0); 8829 } 8830 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8831 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) || 8832 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) || 8833 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) { 8834 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 8835 if (chk != asoc->str_reset) { 8836 /* 8837 * not eligible for retran if its 8838 * not ours 8839 */ 8840 continue; 8841 } 8842 } 8843 ctl_cnt++; 8844 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 8845 fwd_tsn = 1; 8846 fwd = chk; 8847 } 8848 /* 8849 * Add an AUTH chunk, if chunk requires it save the 8850 * offset into the chain for AUTH 8851 */ 8852 if ((auth == NULL) && 8853 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 8854 stcb->asoc.peer_auth_chunks))) { 8855 m = sctp_add_auth_chunk(m, &endofchain, 8856 &auth, &auth_offset, 8857 stcb, 8858 chk->rec.chunk_id.id); 8859 } 8860 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 8861 break; 8862 } 8863 } 8864 one_chunk = 0; 8865 cnt_thru = 0; 8866 /* do we have control chunks to retransmit? */ 8867 if (m != NULL) { 8868 /* Start a timer no matter if we suceed or fail */ 8869 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 8870 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo); 8871 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) 8872 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo); 8873 8874 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT); 8875 if (m == NULL) { 8876 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 8877 return (ENOBUFS); 8878 } 8879 shdr = mtod(m, struct sctphdr *); 8880 shdr->src_port = inp->sctp_lport; 8881 shdr->dest_port = stcb->rport; 8882 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 8883 shdr->checksum = 0; 8884 auth_offset += sizeof(struct sctphdr); 8885 chk->snd_count++; /* update our count */ 8886 8887 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, 8888 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset, 8889 auth, no_fragmentflg, 0, NULL, 0, chk->whoTo->port, so_locked, NULL))) { 8890 SCTP_STAT_INCR(sctps_lowlevelerr); 8891 return (error); 8892 } 8893 m = endofchain = NULL; 8894 auth = NULL; 8895 auth_offset = 0; 8896 /* 8897 * We don't want to mark the net->sent time here since this 8898 * we use this for HB and retrans cannot measure RTT 8899 */ 8900 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */ 8901 *cnt_out += 1; 8902 chk->sent = SCTP_DATAGRAM_SENT; 8903 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 8904 if (fwd_tsn == 0) { 8905 return (0); 8906 } else { 8907 /* Clean up the fwd-tsn list */ 8908 sctp_clean_up_ctl(stcb, asoc); 8909 return (0); 8910 } 8911 } 8912 /* 8913 * Ok, it is just data retransmission we need to do or that and a 8914 * fwd-tsn with it all. 8915 */ 8916 if (TAILQ_EMPTY(&asoc->sent_queue)) { 8917 return (SCTP_RETRAN_DONE); 8918 } 8919 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) || 8920 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) { 8921 /* not yet open, resend the cookie and that is it */ 8922 return (1); 8923 } 8924 #ifdef SCTP_AUDITING_ENABLED 8925 sctp_auditing(20, inp, stcb, NULL); 8926 #endif 8927 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 8928 if (chk->sent != SCTP_DATAGRAM_RESEND) { 8929 /* No, not sent to this net or not ready for rtx */ 8930 continue; 8931 } 8932 if ((sctp_max_retran_chunk) && (chk->snd_count >= sctp_max_retran_chunk)) { 8933 /* Gak, we have exceeded max unlucky retran, abort! */ 8934 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n", 8935 chk->snd_count, 8936 sctp_max_retran_chunk); 8937 atomic_add_int(&stcb->asoc.refcnt, 1); 8938 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked); 8939 SCTP_TCB_LOCK(stcb); 8940 atomic_subtract_int(&stcb->asoc.refcnt, 1); 8941 return (SCTP_RETRAN_EXIT); 8942 } 8943 /* pick up the net */ 8944 net = chk->whoTo; 8945 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 8946 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 8947 } else { 8948 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 8949 } 8950 8951 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) { 8952 /* No room in peers rwnd */ 8953 uint32_t tsn; 8954 8955 tsn = asoc->last_acked_seq + 1; 8956 if (tsn == chk->rec.data.TSN_seq) { 8957 /* 8958 * we make a special exception for this 8959 * case. The peer has no rwnd but is missing 8960 * the lowest chunk.. which is probably what 8961 * is holding up the rwnd. 8962 */ 8963 goto one_chunk_around; 8964 } 8965 return (1); 8966 } 8967 one_chunk_around: 8968 if (asoc->peers_rwnd < mtu) { 8969 one_chunk = 1; 8970 if ((asoc->peers_rwnd == 0) && 8971 (asoc->total_flight == 0)) { 8972 chk->window_probe = 1; 8973 chk->whoTo->window_probe = 1; 8974 } 8975 } 8976 #ifdef SCTP_AUDITING_ENABLED 8977 sctp_audit_log(0xC3, 2); 8978 #endif 8979 bundle_at = 0; 8980 m = NULL; 8981 net->fast_retran_ip = 0; 8982 if (chk->rec.data.doing_fast_retransmit == 0) { 8983 /* 8984 * if no FR in progress skip destination that have 8985 * flight_size > cwnd. 8986 */ 8987 if (net->flight_size >= net->cwnd) { 8988 continue; 8989 } 8990 } else { 8991 /* 8992 * Mark the destination net to have FR recovery 8993 * limits put on it. 8994 */ 8995 *fr_done = 1; 8996 net->fast_retran_ip = 1; 8997 } 8998 8999 /* 9000 * if no AUTH is yet included and this chunk requires it, 9001 * make sure to account for it. We don't apply the size 9002 * until the AUTH chunk is actually added below in case 9003 * there is no room for this chunk. 9004 */ 9005 if ((auth == NULL) && 9006 sctp_auth_is_required_chunk(SCTP_DATA, 9007 stcb->asoc.peer_auth_chunks)) { 9008 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 9009 } else 9010 dmtu = 0; 9011 9012 if ((chk->send_size <= (mtu - dmtu)) || 9013 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 9014 /* ok we will add this one */ 9015 if ((auth == NULL) && 9016 (sctp_auth_is_required_chunk(SCTP_DATA, 9017 stcb->asoc.peer_auth_chunks))) { 9018 m = sctp_add_auth_chunk(m, &endofchain, 9019 &auth, &auth_offset, 9020 stcb, SCTP_DATA); 9021 } 9022 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 9023 if (m == NULL) { 9024 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 9025 return (ENOMEM); 9026 } 9027 /* Do clear IP_DF ? */ 9028 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 9029 no_fragmentflg = 0; 9030 } 9031 /* upate our MTU size */ 9032 if (mtu > (chk->send_size + dmtu)) 9033 mtu -= (chk->send_size + dmtu); 9034 else 9035 mtu = 0; 9036 data_list[bundle_at++] = chk; 9037 if (one_chunk && (asoc->total_flight <= 0)) { 9038 SCTP_STAT_INCR(sctps_windowprobed); 9039 } 9040 } 9041 if (one_chunk == 0) { 9042 /* 9043 * now are there anymore forward from chk to pick 9044 * up? 9045 */ 9046 fwd = TAILQ_NEXT(chk, sctp_next); 9047 while (fwd) { 9048 if (fwd->sent != SCTP_DATAGRAM_RESEND) { 9049 /* Nope, not for retran */ 9050 fwd = TAILQ_NEXT(fwd, sctp_next); 9051 continue; 9052 } 9053 if (fwd->whoTo != net) { 9054 /* Nope, not the net in question */ 9055 fwd = TAILQ_NEXT(fwd, sctp_next); 9056 continue; 9057 } 9058 if ((auth == NULL) && 9059 sctp_auth_is_required_chunk(SCTP_DATA, 9060 stcb->asoc.peer_auth_chunks)) { 9061 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 9062 } else 9063 dmtu = 0; 9064 if (fwd->send_size <= (mtu - dmtu)) { 9065 if ((auth == NULL) && 9066 (sctp_auth_is_required_chunk(SCTP_DATA, 9067 stcb->asoc.peer_auth_chunks))) { 9068 m = sctp_add_auth_chunk(m, 9069 &endofchain, 9070 &auth, &auth_offset, 9071 stcb, 9072 SCTP_DATA); 9073 } 9074 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref); 9075 if (m == NULL) { 9076 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 9077 return (ENOMEM); 9078 } 9079 /* Do clear IP_DF ? */ 9080 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) { 9081 no_fragmentflg = 0; 9082 } 9083 /* upate our MTU size */ 9084 if (mtu > (fwd->send_size + dmtu)) 9085 mtu -= (fwd->send_size + dmtu); 9086 else 9087 mtu = 0; 9088 data_list[bundle_at++] = fwd; 9089 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 9090 break; 9091 } 9092 fwd = TAILQ_NEXT(fwd, sctp_next); 9093 } else { 9094 /* can't fit so we are done */ 9095 break; 9096 } 9097 } 9098 } 9099 /* Is there something to send for this destination? */ 9100 if (m) { 9101 /* 9102 * No matter if we fail/or suceed we should start a 9103 * timer. A failure is like a lost IP packet :-) 9104 */ 9105 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 9106 /* 9107 * no timer running on this destination 9108 * restart it. 9109 */ 9110 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 9111 tmr_started = 1; 9112 } 9113 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT); 9114 if (m == NULL) { 9115 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 9116 return (ENOBUFS); 9117 } 9118 shdr = mtod(m, struct sctphdr *); 9119 shdr->src_port = inp->sctp_lport; 9120 shdr->dest_port = stcb->rport; 9121 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 9122 shdr->checksum = 0; 9123 auth_offset += sizeof(struct sctphdr); 9124 /* Now lets send it, if there is anything to send :> */ 9125 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 9126 (struct sockaddr *)&net->ro._l_addr, m, auth_offset, 9127 auth, no_fragmentflg, 0, NULL, 0, net->port, so_locked, NULL))) { 9128 /* error, we could not output */ 9129 SCTP_STAT_INCR(sctps_lowlevelerr); 9130 return (error); 9131 } 9132 m = endofchain = NULL; 9133 auth = NULL; 9134 auth_offset = 0; 9135 /* For HB's */ 9136 /* 9137 * We don't want to mark the net->sent time here 9138 * since this we use this for HB and retrans cannot 9139 * measure RTT 9140 */ 9141 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */ 9142 9143 /* For auto-close */ 9144 cnt_thru++; 9145 if (*now_filled == 0) { 9146 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 9147 *now = asoc->time_last_sent; 9148 *now_filled = 1; 9149 } else { 9150 asoc->time_last_sent = *now; 9151 } 9152 *cnt_out += bundle_at; 9153 #ifdef SCTP_AUDITING_ENABLED 9154 sctp_audit_log(0xC4, bundle_at); 9155 #endif 9156 if (bundle_at) { 9157 tsns_sent = data_list[0]->rec.data.TSN_seq; 9158 } 9159 for (i = 0; i < bundle_at; i++) { 9160 SCTP_STAT_INCR(sctps_sendretransdata); 9161 data_list[i]->sent = SCTP_DATAGRAM_SENT; 9162 /* 9163 * When we have a revoked data, and we 9164 * retransmit it, then we clear the revoked 9165 * flag since this flag dictates if we 9166 * subtracted from the fs 9167 */ 9168 if (data_list[i]->rec.data.chunk_was_revoked) { 9169 /* Deflate the cwnd */ 9170 data_list[i]->whoTo->cwnd -= data_list[i]->book_size; 9171 data_list[i]->rec.data.chunk_was_revoked = 0; 9172 } 9173 data_list[i]->snd_count++; 9174 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 9175 /* record the time */ 9176 data_list[i]->sent_rcv_time = asoc->time_last_sent; 9177 if (data_list[i]->book_size_scale) { 9178 /* 9179 * need to double the book size on 9180 * this one 9181 */ 9182 data_list[i]->book_size_scale = 0; 9183 /* 9184 * Since we double the booksize, we 9185 * must also double the output queue 9186 * size, since this get shrunk when 9187 * we free by this amount. 9188 */ 9189 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size); 9190 data_list[i]->book_size *= 2; 9191 9192 9193 } else { 9194 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 9195 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 9196 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh); 9197 } 9198 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 9199 (uint32_t) (data_list[i]->send_size + 9200 sctp_peer_chunk_oh)); 9201 } 9202 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 9203 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND, 9204 data_list[i]->whoTo->flight_size, 9205 data_list[i]->book_size, 9206 (uintptr_t) data_list[i]->whoTo, 9207 data_list[i]->rec.data.TSN_seq); 9208 } 9209 sctp_flight_size_increase(data_list[i]); 9210 sctp_total_flight_increase(stcb, data_list[i]); 9211 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 9212 /* SWS sender side engages */ 9213 asoc->peers_rwnd = 0; 9214 } 9215 if ((i == 0) && 9216 (data_list[i]->rec.data.doing_fast_retransmit)) { 9217 SCTP_STAT_INCR(sctps_sendfastretrans); 9218 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) && 9219 (tmr_started == 0)) { 9220 /*- 9221 * ok we just fast-retrans'd 9222 * the lowest TSN, i.e the 9223 * first on the list. In 9224 * this case we want to give 9225 * some more time to get a 9226 * SACK back without a 9227 * t3-expiring. 9228 */ 9229 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 9230 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4); 9231 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 9232 } 9233 } 9234 } 9235 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 9236 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND); 9237 } 9238 #ifdef SCTP_AUDITING_ENABLED 9239 sctp_auditing(21, inp, stcb, NULL); 9240 #endif 9241 } else { 9242 /* None will fit */ 9243 return (1); 9244 } 9245 if (asoc->sent_queue_retran_cnt <= 0) { 9246 /* all done we have no more to retran */ 9247 asoc->sent_queue_retran_cnt = 0; 9248 break; 9249 } 9250 if (one_chunk) { 9251 /* No more room in rwnd */ 9252 return (1); 9253 } 9254 /* stop the for loop here. we sent out a packet */ 9255 break; 9256 } 9257 return (0); 9258 } 9259 9260 9261 static int 9262 sctp_timer_validation(struct sctp_inpcb *inp, 9263 struct sctp_tcb *stcb, 9264 struct sctp_association *asoc, 9265 int ret) 9266 { 9267 struct sctp_nets *net; 9268 9269 /* Validate that a timer is running somewhere */ 9270 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 9271 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 9272 /* Here is a timer */ 9273 return (ret); 9274 } 9275 } 9276 SCTP_TCB_LOCK_ASSERT(stcb); 9277 /* Gak, we did not have a timer somewhere */ 9278 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n"); 9279 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination); 9280 return (ret); 9281 } 9282 9283 void 9284 sctp_chunk_output(struct sctp_inpcb *inp, 9285 struct sctp_tcb *stcb, 9286 int from_where, 9287 int so_locked 9288 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 9289 SCTP_UNUSED 9290 #endif 9291 ) 9292 { 9293 /*- 9294 * Ok this is the generic chunk service queue. we must do the 9295 * following: 9296 * - See if there are retransmits pending, if so we must 9297 * do these first. 9298 * - Service the stream queue that is next, moving any 9299 * message (note I must get a complete message i.e. 9300 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning 9301 * TSN's 9302 * - Check to see if the cwnd/rwnd allows any output, if so we 9303 * go ahead and fomulate and send the low level chunks. Making sure 9304 * to combine any control in the control chunk queue also. 9305 */ 9306 struct sctp_association *asoc; 9307 struct sctp_nets *net; 9308 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0, 9309 burst_cnt = 0, burst_limit = 0; 9310 struct timeval now; 9311 int now_filled = 0; 9312 int cwnd_full = 0; 9313 int nagle_on = 0; 9314 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 9315 int un_sent = 0; 9316 int fr_done, tot_frs = 0; 9317 9318 asoc = &stcb->asoc; 9319 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) { 9320 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) { 9321 nagle_on = 0; 9322 } else { 9323 nagle_on = 1; 9324 } 9325 } 9326 SCTP_TCB_LOCK_ASSERT(stcb); 9327 9328 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 9329 9330 if ((un_sent <= 0) && 9331 (TAILQ_EMPTY(&asoc->control_send_queue)) && 9332 (asoc->sent_queue_retran_cnt == 0)) { 9333 /* Nothing to do unless there is something to be sent left */ 9334 return; 9335 } 9336 /* 9337 * Do we have something to send, data or control AND a sack timer 9338 * running, if so piggy-back the sack. 9339 */ 9340 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 9341 sctp_send_sack(stcb); 9342 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 9343 } 9344 while (asoc->sent_queue_retran_cnt) { 9345 /*- 9346 * Ok, it is retransmission time only, we send out only ONE 9347 * packet with a single call off to the retran code. 9348 */ 9349 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) { 9350 /*- 9351 * Special hook for handling cookiess discarded 9352 * by peer that carried data. Send cookie-ack only 9353 * and then the next call with get the retran's. 9354 */ 9355 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 9356 &cwnd_full, from_where, 9357 &now, &now_filled, frag_point, so_locked); 9358 return; 9359 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { 9360 /* if its not from a HB then do it */ 9361 fr_done = 0; 9362 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked); 9363 if (fr_done) { 9364 tot_frs++; 9365 } 9366 } else { 9367 /* 9368 * its from any other place, we don't allow retran 9369 * output (only control) 9370 */ 9371 ret = 1; 9372 } 9373 if (ret > 0) { 9374 /* Can't send anymore */ 9375 /*- 9376 * now lets push out control by calling med-level 9377 * output once. this assures that we WILL send HB's 9378 * if queued too. 9379 */ 9380 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 9381 &cwnd_full, from_where, 9382 &now, &now_filled, frag_point, so_locked); 9383 #ifdef SCTP_AUDITING_ENABLED 9384 sctp_auditing(8, inp, stcb, NULL); 9385 #endif 9386 (void)sctp_timer_validation(inp, stcb, asoc, ret); 9387 return; 9388 } 9389 if (ret < 0) { 9390 /*- 9391 * The count was off.. retran is not happening so do 9392 * the normal retransmission. 9393 */ 9394 #ifdef SCTP_AUDITING_ENABLED 9395 sctp_auditing(9, inp, stcb, NULL); 9396 #endif 9397 if (ret == SCTP_RETRAN_EXIT) { 9398 return; 9399 } 9400 break; 9401 } 9402 if (from_where == SCTP_OUTPUT_FROM_T3) { 9403 /* Only one transmission allowed out of a timeout */ 9404 #ifdef SCTP_AUDITING_ENABLED 9405 sctp_auditing(10, inp, stcb, NULL); 9406 #endif 9407 /* Push out any control */ 9408 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where, 9409 &now, &now_filled, frag_point, so_locked); 9410 return; 9411 } 9412 if (tot_frs > asoc->max_burst) { 9413 /* Hit FR burst limit */ 9414 return; 9415 } 9416 if ((num_out == 0) && (ret == 0)) { 9417 9418 /* No more retrans to send */ 9419 break; 9420 } 9421 } 9422 #ifdef SCTP_AUDITING_ENABLED 9423 sctp_auditing(12, inp, stcb, NULL); 9424 #endif 9425 /* Check for bad destinations, if they exist move chunks around. */ 9426 burst_limit = asoc->max_burst; 9427 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 9428 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 9429 SCTP_ADDR_NOT_REACHABLE) { 9430 /*- 9431 * if possible move things off of this address we 9432 * still may send below due to the dormant state but 9433 * we try to find an alternate address to send to 9434 * and if we have one we move all queued data on the 9435 * out wheel to this alternate address. 9436 */ 9437 if (net->ref_count > 1) 9438 sctp_move_to_an_alt(stcb, asoc, net); 9439 } else if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) == 9440 SCTP_ADDR_PF)) { 9441 /* 9442 * JRS 5/14/07 - If CMT PF is on and the current 9443 * destination is in PF state, move all queued data 9444 * to an alternate desination. 9445 */ 9446 if (net->ref_count > 1) 9447 sctp_move_to_an_alt(stcb, asoc, net); 9448 } else { 9449 /*- 9450 * if ((asoc->sat_network) || (net->addr_is_local)) 9451 * { burst_limit = asoc->max_burst * 9452 * SCTP_SAT_NETWORK_BURST_INCR; } 9453 */ 9454 if (sctp_use_cwnd_based_maxburst) { 9455 if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) { 9456 /* 9457 * JRS - Use the congestion control 9458 * given in the congestion control 9459 * module 9460 */ 9461 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, burst_limit); 9462 if (sctp_logging_level & SCTP_LOG_MAXBURST_ENABLE) { 9463 sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED); 9464 } 9465 SCTP_STAT_INCR(sctps_maxburstqueued); 9466 } 9467 net->fast_retran_ip = 0; 9468 } else { 9469 if (net->flight_size == 0) { 9470 /* Should be decaying the cwnd here */ 9471 ; 9472 } 9473 } 9474 } 9475 9476 } 9477 burst_cnt = 0; 9478 cwnd_full = 0; 9479 do { 9480 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, 9481 &reason_code, 0, &cwnd_full, from_where, 9482 &now, &now_filled, frag_point, so_locked); 9483 if (error) { 9484 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error); 9485 if (sctp_logging_level & SCTP_LOG_MAXBURST_ENABLE) { 9486 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP); 9487 } 9488 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 9489 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES); 9490 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES); 9491 } 9492 break; 9493 } 9494 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out); 9495 9496 tot_out += num_out; 9497 burst_cnt++; 9498 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 9499 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES); 9500 if (num_out == 0) { 9501 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES); 9502 } 9503 } 9504 if (nagle_on) { 9505 /*- 9506 * When nagle is on, we look at how much is un_sent, then 9507 * if its smaller than an MTU and we have data in 9508 * flight we stop. 9509 */ 9510 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 9511 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) 9512 * sizeof(struct sctp_data_chunk))); 9513 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) && 9514 (stcb->asoc.total_flight > 0)) { 9515 break; 9516 } 9517 } 9518 if (TAILQ_EMPTY(&asoc->control_send_queue) && 9519 TAILQ_EMPTY(&asoc->send_queue) && 9520 TAILQ_EMPTY(&asoc->out_wheel)) { 9521 /* Nothing left to send */ 9522 break; 9523 } 9524 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) { 9525 /* Nothing left to send */ 9526 break; 9527 } 9528 } while (num_out && (sctp_use_cwnd_based_maxburst || 9529 (burst_cnt < burst_limit))); 9530 9531 if (sctp_use_cwnd_based_maxburst == 0) { 9532 if (burst_cnt >= burst_limit) { 9533 SCTP_STAT_INCR(sctps_maxburstqueued); 9534 asoc->burst_limit_applied = 1; 9535 if (sctp_logging_level & SCTP_LOG_MAXBURST_ENABLE) { 9536 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED); 9537 } 9538 } else { 9539 asoc->burst_limit_applied = 0; 9540 } 9541 } 9542 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 9543 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES); 9544 } 9545 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n", 9546 tot_out); 9547 9548 /*- 9549 * Now we need to clean up the control chunk chain if a ECNE is on 9550 * it. It must be marked as UNSENT again so next call will continue 9551 * to send it until such time that we get a CWR, to remove it. 9552 */ 9553 if (stcb->asoc.ecn_echo_cnt_onq) 9554 sctp_fix_ecn_echo(asoc); 9555 return; 9556 } 9557 9558 9559 int 9560 sctp_output(inp, m, addr, control, p, flags) 9561 struct sctp_inpcb *inp; 9562 struct mbuf *m; 9563 struct sockaddr *addr; 9564 struct mbuf *control; 9565 struct thread *p; 9566 int flags; 9567 { 9568 if (inp == NULL) { 9569 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 9570 return (EINVAL); 9571 } 9572 if (inp->sctp_socket == NULL) { 9573 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 9574 return (EINVAL); 9575 } 9576 return (sctp_sosend(inp->sctp_socket, 9577 addr, 9578 (struct uio *)NULL, 9579 m, 9580 control, 9581 flags, p 9582 )); 9583 } 9584 9585 void 9586 send_forward_tsn(struct sctp_tcb *stcb, 9587 struct sctp_association *asoc) 9588 { 9589 struct sctp_tmit_chunk *chk; 9590 struct sctp_forward_tsn_chunk *fwdtsn; 9591 9592 SCTP_TCB_LOCK_ASSERT(stcb); 9593 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 9594 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 9595 /* mark it to unsent */ 9596 chk->sent = SCTP_DATAGRAM_UNSENT; 9597 chk->snd_count = 0; 9598 /* Do we correct its output location? */ 9599 if (chk->whoTo != asoc->primary_destination) { 9600 sctp_free_remote_addr(chk->whoTo); 9601 chk->whoTo = asoc->primary_destination; 9602 atomic_add_int(&chk->whoTo->ref_count, 1); 9603 } 9604 goto sctp_fill_in_rest; 9605 } 9606 } 9607 /* Ok if we reach here we must build one */ 9608 sctp_alloc_a_chunk(stcb, chk); 9609 if (chk == NULL) { 9610 return; 9611 } 9612 chk->copy_by_ref = 0; 9613 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN; 9614 chk->rec.chunk_id.can_take_data = 0; 9615 chk->asoc = asoc; 9616 chk->whoTo = NULL; 9617 9618 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 9619 if (chk->data == NULL) { 9620 sctp_free_a_chunk(stcb, chk); 9621 return; 9622 } 9623 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 9624 chk->sent = SCTP_DATAGRAM_UNSENT; 9625 chk->snd_count = 0; 9626 chk->whoTo = asoc->primary_destination; 9627 atomic_add_int(&chk->whoTo->ref_count, 1); 9628 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); 9629 asoc->ctrl_queue_cnt++; 9630 sctp_fill_in_rest: 9631 /*- 9632 * Here we go through and fill out the part that deals with 9633 * stream/seq of the ones we skip. 9634 */ 9635 SCTP_BUF_LEN(chk->data) = 0; 9636 { 9637 struct sctp_tmit_chunk *at, *tp1, *last; 9638 struct sctp_strseq *strseq; 9639 unsigned int cnt_of_space, i, ovh; 9640 unsigned int space_needed; 9641 unsigned int cnt_of_skipped = 0; 9642 9643 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { 9644 if (at->sent != SCTP_FORWARD_TSN_SKIP) { 9645 /* no more to look at */ 9646 break; 9647 } 9648 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 9649 /* We don't report these */ 9650 continue; 9651 } 9652 cnt_of_skipped++; 9653 } 9654 space_needed = (sizeof(struct sctp_forward_tsn_chunk) + 9655 (cnt_of_skipped * sizeof(struct sctp_strseq))); 9656 9657 cnt_of_space = M_TRAILINGSPACE(chk->data); 9658 9659 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 9660 ovh = SCTP_MIN_OVERHEAD; 9661 } else { 9662 ovh = SCTP_MIN_V4_OVERHEAD; 9663 } 9664 if (cnt_of_space > (asoc->smallest_mtu - ovh)) { 9665 /* trim to a mtu size */ 9666 cnt_of_space = asoc->smallest_mtu - ovh; 9667 } 9668 if (cnt_of_space < space_needed) { 9669 /*- 9670 * ok we must trim down the chunk by lowering the 9671 * advance peer ack point. 9672 */ 9673 cnt_of_skipped = (cnt_of_space - 9674 ((sizeof(struct sctp_forward_tsn_chunk)) / 9675 sizeof(struct sctp_strseq))); 9676 /*- 9677 * Go through and find the TSN that will be the one 9678 * we report. 9679 */ 9680 at = TAILQ_FIRST(&asoc->sent_queue); 9681 for (i = 0; i < cnt_of_skipped; i++) { 9682 tp1 = TAILQ_NEXT(at, sctp_next); 9683 at = tp1; 9684 } 9685 last = at; 9686 /*- 9687 * last now points to last one I can report, update 9688 * peer ack point 9689 */ 9690 asoc->advanced_peer_ack_point = last->rec.data.TSN_seq; 9691 space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq)); 9692 } 9693 chk->send_size = space_needed; 9694 /* Setup the chunk */ 9695 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *); 9696 fwdtsn->ch.chunk_length = htons(chk->send_size); 9697 fwdtsn->ch.chunk_flags = 0; 9698 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN; 9699 fwdtsn->new_cumulative_tsn = htonl(asoc->advanced_peer_ack_point); 9700 chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) + 9701 (cnt_of_skipped * sizeof(struct sctp_strseq))); 9702 SCTP_BUF_LEN(chk->data) = chk->send_size; 9703 fwdtsn++; 9704 /*- 9705 * Move pointer to after the fwdtsn and transfer to the 9706 * strseq pointer. 9707 */ 9708 strseq = (struct sctp_strseq *)fwdtsn; 9709 /*- 9710 * Now populate the strseq list. This is done blindly 9711 * without pulling out duplicate stream info. This is 9712 * inefficent but won't harm the process since the peer will 9713 * look at these in sequence and will thus release anything. 9714 * It could mean we exceed the PMTU and chop off some that 9715 * we could have included.. but this is unlikely (aka 1432/4 9716 * would mean 300+ stream seq's would have to be reported in 9717 * one FWD-TSN. With a bit of work we can later FIX this to 9718 * optimize and pull out duplcates.. but it does add more 9719 * overhead. So for now... not! 9720 */ 9721 at = TAILQ_FIRST(&asoc->sent_queue); 9722 for (i = 0; i < cnt_of_skipped; i++) { 9723 tp1 = TAILQ_NEXT(at, sctp_next); 9724 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 9725 /* We don't report these */ 9726 i--; 9727 at = tp1; 9728 continue; 9729 } 9730 strseq->stream = ntohs(at->rec.data.stream_number); 9731 strseq->sequence = ntohs(at->rec.data.stream_seq); 9732 strseq++; 9733 at = tp1; 9734 } 9735 } 9736 return; 9737 9738 } 9739 9740 void 9741 sctp_send_sack(struct sctp_tcb *stcb) 9742 { 9743 /*- 9744 * Queue up a SACK in the control queue. We must first check to see 9745 * if a SACK is somehow on the control queue. If so, we will take 9746 * and and remove the old one. 9747 */ 9748 struct sctp_association *asoc; 9749 struct sctp_tmit_chunk *chk, *a_chk; 9750 struct sctp_sack_chunk *sack; 9751 struct sctp_gap_ack_block *gap_descriptor; 9752 struct sack_track *selector; 9753 int mergeable = 0; 9754 int offset; 9755 caddr_t limit; 9756 uint32_t *dup; 9757 int limit_reached = 0; 9758 unsigned int i, jstart, siz, j; 9759 unsigned int num_gap_blocks = 0, space; 9760 int num_dups = 0; 9761 int space_req; 9762 9763 a_chk = NULL; 9764 asoc = &stcb->asoc; 9765 SCTP_TCB_LOCK_ASSERT(stcb); 9766 if (asoc->last_data_chunk_from == NULL) { 9767 /* Hmm we never received anything */ 9768 return; 9769 } 9770 sctp_set_rwnd(stcb, asoc); 9771 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 9772 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) { 9773 /* Hmm, found a sack already on queue, remove it */ 9774 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 9775 asoc->ctrl_queue_cnt++; 9776 a_chk = chk; 9777 if (a_chk->data) { 9778 sctp_m_freem(a_chk->data); 9779 a_chk->data = NULL; 9780 } 9781 sctp_free_remote_addr(a_chk->whoTo); 9782 a_chk->whoTo = NULL; 9783 break; 9784 } 9785 } 9786 if (a_chk == NULL) { 9787 sctp_alloc_a_chunk(stcb, a_chk); 9788 if (a_chk == NULL) { 9789 /* No memory so we drop the idea, and set a timer */ 9790 if (stcb->asoc.delayed_ack) { 9791 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 9792 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); 9793 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 9794 stcb->sctp_ep, stcb, NULL); 9795 } else { 9796 stcb->asoc.send_sack = 1; 9797 } 9798 return; 9799 } 9800 a_chk->copy_by_ref = 0; 9801 /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */ 9802 a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; 9803 a_chk->rec.chunk_id.can_take_data = 1; 9804 } 9805 /* Clear our pkt counts */ 9806 asoc->data_pkts_seen = 0; 9807 9808 a_chk->asoc = asoc; 9809 a_chk->snd_count = 0; 9810 a_chk->send_size = 0; /* fill in later */ 9811 a_chk->sent = SCTP_DATAGRAM_UNSENT; 9812 a_chk->whoTo = NULL; 9813 9814 if ((asoc->numduptsns) || 9815 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE) 9816 ) { 9817 /*- 9818 * Ok, we have some duplicates or the destination for the 9819 * sack is unreachable, lets see if we can select an 9820 * alternate than asoc->last_data_chunk_from 9821 */ 9822 if ((!(asoc->last_data_chunk_from->dest_state & 9823 SCTP_ADDR_NOT_REACHABLE)) && 9824 (asoc->used_alt_onsack > asoc->numnets)) { 9825 /* We used an alt last time, don't this time */ 9826 a_chk->whoTo = NULL; 9827 } else { 9828 asoc->used_alt_onsack++; 9829 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0); 9830 } 9831 if (a_chk->whoTo == NULL) { 9832 /* Nope, no alternate */ 9833 a_chk->whoTo = asoc->last_data_chunk_from; 9834 asoc->used_alt_onsack = 0; 9835 } 9836 } else { 9837 /* 9838 * No duplicates so we use the last place we received data 9839 * from. 9840 */ 9841 asoc->used_alt_onsack = 0; 9842 a_chk->whoTo = asoc->last_data_chunk_from; 9843 } 9844 if (a_chk->whoTo) { 9845 atomic_add_int(&a_chk->whoTo->ref_count, 1); 9846 } 9847 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) { 9848 /* no gaps */ 9849 space_req = sizeof(struct sctp_sack_chunk); 9850 } else { 9851 /* gaps get a cluster */ 9852 space_req = MCLBYTES; 9853 } 9854 /* Ok now lets formulate a MBUF with our sack */ 9855 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA); 9856 if ((a_chk->data == NULL) || 9857 (a_chk->whoTo == NULL)) { 9858 /* rats, no mbuf memory */ 9859 if (a_chk->data) { 9860 /* was a problem with the destination */ 9861 sctp_m_freem(a_chk->data); 9862 a_chk->data = NULL; 9863 } 9864 sctp_free_a_chunk(stcb, a_chk); 9865 /* sa_ignore NO_NULL_CHK */ 9866 if (stcb->asoc.delayed_ack) { 9867 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 9868 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); 9869 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 9870 stcb->sctp_ep, stcb, NULL); 9871 } else { 9872 stcb->asoc.send_sack = 1; 9873 } 9874 return; 9875 } 9876 /* ok, lets go through and fill it in */ 9877 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD); 9878 space = M_TRAILINGSPACE(a_chk->data); 9879 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) { 9880 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD); 9881 } 9882 limit = mtod(a_chk->data, caddr_t); 9883 limit += space; 9884 9885 sack = mtod(a_chk->data, struct sctp_sack_chunk *); 9886 sack->ch.chunk_type = SCTP_SELECTIVE_ACK; 9887 /* 0x01 is used by nonce for ecn */ 9888 if ((sctp_ecn_enable) && 9889 (sctp_ecn_nonce) && 9890 (asoc->peer_supports_ecn_nonce)) 9891 sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM); 9892 else 9893 sack->ch.chunk_flags = 0; 9894 9895 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 9896 /*- 9897 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been 9898 * received, then set high bit to 1, else 0. Reset 9899 * pkts_rcvd. 9900 */ 9901 sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6); 9902 asoc->cmt_dac_pkts_rcvd = 0; 9903 } 9904 #ifdef SCTP_ASOCLOG_OF_TSNS 9905 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn; 9906 stcb->asoc.cumack_log_atsnt++; 9907 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) { 9908 stcb->asoc.cumack_log_atsnt = 0; 9909 } 9910 #endif 9911 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 9912 sack->sack.a_rwnd = htonl(asoc->my_rwnd); 9913 asoc->my_last_reported_rwnd = asoc->my_rwnd; 9914 9915 /* reset the readers interpretation */ 9916 stcb->freed_by_sorcv_sincelast = 0; 9917 9918 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk)); 9919 9920 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 9921 if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) { 9922 offset = 1; 9923 /*- 9924 * cum-ack behind the mapping array, so we start and use all 9925 * entries. 9926 */ 9927 jstart = 0; 9928 } else { 9929 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 9930 /*- 9931 * we skip the first one when the cum-ack is at or above the 9932 * mapping array base. Note this only works if 9933 */ 9934 jstart = 1; 9935 } 9936 if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) { 9937 /* we have a gap .. maybe */ 9938 for (i = 0; i < siz; i++) { 9939 selector = &sack_array[asoc->mapping_array[i]]; 9940 if (mergeable && selector->right_edge) { 9941 /* 9942 * Backup, left and right edges were ok to 9943 * merge. 9944 */ 9945 num_gap_blocks--; 9946 gap_descriptor--; 9947 } 9948 if (selector->num_entries == 0) 9949 mergeable = 0; 9950 else { 9951 for (j = jstart; j < selector->num_entries; j++) { 9952 if (mergeable && selector->right_edge) { 9953 /* 9954 * do a merge by NOT setting 9955 * the left side 9956 */ 9957 mergeable = 0; 9958 } else { 9959 /* 9960 * no merge, set the left 9961 * side 9962 */ 9963 mergeable = 0; 9964 gap_descriptor->start = htons((selector->gaps[j].start + offset)); 9965 } 9966 gap_descriptor->end = htons((selector->gaps[j].end + offset)); 9967 num_gap_blocks++; 9968 gap_descriptor++; 9969 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 9970 /* no more room */ 9971 limit_reached = 1; 9972 break; 9973 } 9974 } 9975 if (selector->left_edge) { 9976 mergeable = 1; 9977 } 9978 } 9979 if (limit_reached) { 9980 /* Reached the limit stop */ 9981 break; 9982 } 9983 jstart = 0; 9984 offset += 8; 9985 } 9986 if (num_gap_blocks == 0) { 9987 /* 9988 * slide not yet happened, and somehow we got called 9989 * to send a sack. Cumack needs to move up. 9990 */ 9991 int abort_flag = 0; 9992 9993 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 9994 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 9995 sctp_sack_check(stcb, 0, 0, &abort_flag); 9996 } 9997 } 9998 /* now we must add any dups we are going to report. */ 9999 if ((limit_reached == 0) && (asoc->numduptsns)) { 10000 dup = (uint32_t *) gap_descriptor; 10001 for (i = 0; i < asoc->numduptsns; i++) { 10002 *dup = htonl(asoc->dup_tsns[i]); 10003 dup++; 10004 num_dups++; 10005 if (((caddr_t)dup + sizeof(uint32_t)) > limit) { 10006 /* no more room */ 10007 break; 10008 } 10009 } 10010 asoc->numduptsns = 0; 10011 } 10012 /* 10013 * now that the chunk is prepared queue it to the control chunk 10014 * queue. 10015 */ 10016 a_chk->send_size = (sizeof(struct sctp_sack_chunk) + 10017 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) + 10018 (num_dups * sizeof(int32_t))); 10019 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 10020 sack->sack.num_gap_ack_blks = htons(num_gap_blocks); 10021 sack->sack.num_dup_tsns = htons(num_dups); 10022 sack->ch.chunk_length = htons(a_chk->send_size); 10023 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next); 10024 asoc->ctrl_queue_cnt++; 10025 asoc->send_sack = 0; 10026 SCTP_STAT_INCR(sctps_sendsacks); 10027 return; 10028 } 10029 10030 10031 void 10032 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked 10033 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 10034 SCTP_UNUSED 10035 #endif 10036 ) 10037 { 10038 struct mbuf *m_abort; 10039 struct mbuf *m_out = NULL, *m_end = NULL; 10040 struct sctp_abort_chunk *abort = NULL; 10041 int sz; 10042 uint32_t auth_offset = 0; 10043 struct sctp_auth_chunk *auth = NULL; 10044 struct sctphdr *shdr; 10045 10046 /*- 10047 * Add an AUTH chunk, if chunk requires it and save the offset into 10048 * the chain for AUTH 10049 */ 10050 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION, 10051 stcb->asoc.peer_auth_chunks)) { 10052 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset, 10053 stcb, SCTP_ABORT_ASSOCIATION); 10054 } 10055 SCTP_TCB_LOCK_ASSERT(stcb); 10056 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 10057 if (m_abort == NULL) { 10058 /* no mbuf's */ 10059 if (m_out) 10060 sctp_m_freem(m_out); 10061 return; 10062 } 10063 /* link in any error */ 10064 SCTP_BUF_NEXT(m_abort) = operr; 10065 sz = 0; 10066 if (operr) { 10067 struct mbuf *n; 10068 10069 n = operr; 10070 while (n) { 10071 sz += SCTP_BUF_LEN(n); 10072 n = SCTP_BUF_NEXT(n); 10073 } 10074 } 10075 SCTP_BUF_LEN(m_abort) = sizeof(*abort); 10076 if (m_out == NULL) { 10077 /* NO Auth chunk prepended, so reserve space in front */ 10078 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD); 10079 m_out = m_abort; 10080 } else { 10081 /* Put AUTH chunk at the front of the chain */ 10082 SCTP_BUF_NEXT(m_end) = m_abort; 10083 } 10084 10085 /* fill in the ABORT chunk */ 10086 abort = mtod(m_abort, struct sctp_abort_chunk *); 10087 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION; 10088 abort->ch.chunk_flags = 0; 10089 abort->ch.chunk_length = htons(sizeof(*abort) + sz); 10090 10091 /* prepend and fill in the SCTP header */ 10092 SCTP_BUF_PREPEND(m_out, sizeof(struct sctphdr), M_DONTWAIT); 10093 if (m_out == NULL) { 10094 /* TSNH: no memory */ 10095 return; 10096 } 10097 shdr = mtod(m_out, struct sctphdr *); 10098 shdr->src_port = stcb->sctp_ep->sctp_lport; 10099 shdr->dest_port = stcb->rport; 10100 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 10101 shdr->checksum = 0; 10102 auth_offset += sizeof(struct sctphdr); 10103 10104 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, 10105 stcb->asoc.primary_destination, 10106 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr, 10107 m_out, auth_offset, auth, 1, 0, NULL, 0, stcb->asoc.primary_destination->port, so_locked, NULL); 10108 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10109 } 10110 10111 void 10112 sctp_send_shutdown_complete(struct sctp_tcb *stcb, 10113 struct sctp_nets *net) 10114 { 10115 /* formulate and SEND a SHUTDOWN-COMPLETE */ 10116 struct mbuf *m_shutdown_comp; 10117 struct sctp_shutdown_complete_msg *comp_cp; 10118 10119 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_complete_msg), 0, M_DONTWAIT, 1, MT_HEADER); 10120 if (m_shutdown_comp == NULL) { 10121 /* no mbuf's */ 10122 return; 10123 } 10124 comp_cp = mtod(m_shutdown_comp, struct sctp_shutdown_complete_msg *); 10125 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 10126 comp_cp->shut_cmp.ch.chunk_flags = 0; 10127 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 10128 comp_cp->sh.src_port = stcb->sctp_ep->sctp_lport; 10129 comp_cp->sh.dest_port = stcb->rport; 10130 comp_cp->sh.v_tag = htonl(stcb->asoc.peer_vtag); 10131 comp_cp->sh.checksum = 0; 10132 10133 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_msg); 10134 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 10135 (struct sockaddr *)&net->ro._l_addr, 10136 m_shutdown_comp, 0, NULL, 1, 0, NULL, 0, net->port, SCTP_SO_NOT_LOCKED, NULL); 10137 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10138 return; 10139 } 10140 10141 void 10142 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh, 10143 uint32_t vrf_id, uint16_t port) 10144 { 10145 /* formulate and SEND a SHUTDOWN-COMPLETE */ 10146 struct mbuf *o_pak; 10147 struct mbuf *mout; 10148 struct ip *iph, *iph_out; 10149 struct udphdr *udp; 10150 10151 #ifdef INET6 10152 struct ip6_hdr *ip6, *ip6_out; 10153 10154 #endif 10155 int offset_out, len, mlen; 10156 struct sctp_shutdown_complete_msg *comp_cp; 10157 10158 iph = mtod(m, struct ip *); 10159 switch (iph->ip_v) { 10160 case IPVERSION: 10161 len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg)); 10162 break; 10163 #ifdef INET6 10164 case IPV6_VERSION >> 4: 10165 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg)); 10166 break; 10167 #endif 10168 default: 10169 return; 10170 } 10171 if (port) { 10172 len += sizeof(struct udphdr); 10173 } 10174 mout = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA); 10175 if (mout == NULL) { 10176 return; 10177 } 10178 SCTP_BUF_LEN(mout) = len; 10179 SCTP_BUF_NEXT(mout) = NULL; 10180 iph_out = NULL; 10181 #ifdef INET6 10182 ip6_out = NULL; 10183 #endif 10184 offset_out = 0; 10185 10186 switch (iph->ip_v) { 10187 case IPVERSION: 10188 iph_out = mtod(mout, struct ip *); 10189 10190 /* Fill in the IP header for the ABORT */ 10191 iph_out->ip_v = IPVERSION; 10192 iph_out->ip_hl = (sizeof(struct ip) / 4); 10193 iph_out->ip_tos = (u_char)0; 10194 iph_out->ip_id = 0; 10195 iph_out->ip_off = 0; 10196 iph_out->ip_ttl = MAXTTL; 10197 if (port) { 10198 iph_out->ip_p = IPPROTO_UDP; 10199 } else { 10200 iph_out->ip_p = IPPROTO_SCTP; 10201 } 10202 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 10203 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 10204 10205 /* let IP layer calculate this */ 10206 iph_out->ip_sum = 0; 10207 offset_out += sizeof(*iph_out); 10208 comp_cp = (struct sctp_shutdown_complete_msg *)( 10209 (caddr_t)iph_out + offset_out); 10210 break; 10211 #ifdef INET6 10212 case IPV6_VERSION >> 4: 10213 ip6 = (struct ip6_hdr *)iph; 10214 ip6_out = mtod(mout, struct ip6_hdr *); 10215 10216 /* Fill in the IPv6 header for the ABORT */ 10217 ip6_out->ip6_flow = ip6->ip6_flow; 10218 ip6_out->ip6_hlim = ip6_defhlim; 10219 if (port) { 10220 ip6_out->ip6_nxt = IPPROTO_UDP; 10221 } else { 10222 ip6_out->ip6_nxt = IPPROTO_SCTP; 10223 } 10224 ip6_out->ip6_src = ip6->ip6_dst; 10225 ip6_out->ip6_dst = ip6->ip6_src; 10226 /* 10227 * ?? The old code had both the iph len + payload, I think 10228 * this is wrong and would never have worked 10229 */ 10230 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg); 10231 offset_out += sizeof(*ip6_out); 10232 comp_cp = (struct sctp_shutdown_complete_msg *)( 10233 (caddr_t)ip6_out + offset_out); 10234 break; 10235 #endif 10236 default: 10237 /* Currently not supported. */ 10238 return; 10239 } 10240 if (port) { 10241 udp = (struct udphdr *)comp_cp; 10242 udp->uh_sport = htons(sctp_udp_tunneling_port); 10243 udp->uh_dport = port; 10244 udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr)); 10245 udp->uh_sum = 0; 10246 offset_out += sizeof(struct udphdr); 10247 comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr)); 10248 } 10249 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 10250 /* no mbuf's */ 10251 sctp_m_freem(mout); 10252 return; 10253 } 10254 /* Now copy in and fill in the ABORT tags etc. */ 10255 comp_cp->sh.src_port = sh->dest_port; 10256 comp_cp->sh.dest_port = sh->src_port; 10257 comp_cp->sh.checksum = 0; 10258 comp_cp->sh.v_tag = sh->v_tag; 10259 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB; 10260 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 10261 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 10262 10263 /* add checksum */ 10264 comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out); 10265 if (iph_out != NULL) { 10266 sctp_route_t ro; 10267 int ret; 10268 struct sctp_tcb *stcb = NULL; 10269 10270 mlen = SCTP_BUF_LEN(mout); 10271 bzero(&ro, sizeof ro); 10272 /* set IPv4 length */ 10273 iph_out->ip_len = mlen; 10274 #ifdef SCTP_PACKET_LOGGING 10275 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 10276 sctp_packet_log(mout, mlen); 10277 #endif 10278 SCTP_ATTACH_CHAIN(o_pak, mout, mlen); 10279 10280 /* out it goes */ 10281 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id); 10282 10283 /* Free the route if we got one back */ 10284 if (ro.ro_rt) 10285 RTFREE(ro.ro_rt); 10286 } 10287 #ifdef INET6 10288 if (ip6_out != NULL) { 10289 struct route_in6 ro; 10290 int ret; 10291 struct sctp_tcb *stcb = NULL; 10292 struct ifnet *ifp = NULL; 10293 10294 bzero(&ro, sizeof(ro)); 10295 mlen = SCTP_BUF_LEN(mout); 10296 #ifdef SCTP_PACKET_LOGGING 10297 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 10298 sctp_packet_log(mout, mlen); 10299 #endif 10300 SCTP_ATTACH_CHAIN(o_pak, mout, mlen); 10301 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id); 10302 10303 /* Free the route if we got one back */ 10304 if (ro.ro_rt) 10305 RTFREE(ro.ro_rt); 10306 } 10307 #endif 10308 SCTP_STAT_INCR(sctps_sendpackets); 10309 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 10310 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10311 return; 10312 10313 } 10314 10315 static struct sctp_nets * 10316 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now) 10317 { 10318 struct sctp_nets *net, *hnet; 10319 int ms_goneby, highest_ms, state_overide = 0; 10320 10321 (void)SCTP_GETTIME_TIMEVAL(now); 10322 highest_ms = 0; 10323 hnet = NULL; 10324 SCTP_TCB_LOCK_ASSERT(stcb); 10325 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 10326 if ( 10327 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) || 10328 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE) 10329 ) { 10330 /* 10331 * Skip this guy from consideration if HB is off AND 10332 * its confirmed 10333 */ 10334 continue; 10335 } 10336 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) { 10337 /* skip this dest net from consideration */ 10338 continue; 10339 } 10340 if (net->last_sent_time.tv_sec) { 10341 /* Sent to so we subtract */ 10342 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000; 10343 } else 10344 /* Never been sent to */ 10345 ms_goneby = 0x7fffffff; 10346 /*- 10347 * When the address state is unconfirmed but still 10348 * considered reachable, we HB at a higher rate. Once it 10349 * goes confirmed OR reaches the "unreachable" state, thenw 10350 * we cut it back to HB at a more normal pace. 10351 */ 10352 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) { 10353 state_overide = 1; 10354 } else { 10355 state_overide = 0; 10356 } 10357 10358 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) && 10359 (ms_goneby > highest_ms)) { 10360 highest_ms = ms_goneby; 10361 hnet = net; 10362 } 10363 } 10364 if (hnet && 10365 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) { 10366 state_overide = 1; 10367 } else { 10368 state_overide = 0; 10369 } 10370 10371 if (hnet && highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) { 10372 /*- 10373 * Found the one with longest delay bounds OR it is 10374 * unconfirmed and still not marked unreachable. 10375 */ 10376 SCTPDBG(SCTP_DEBUG_OUTPUT4, "net:%p is the hb winner -", hnet); 10377 #ifdef SCTP_DEBUG 10378 if (hnet) { 10379 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT4, 10380 (struct sockaddr *)&hnet->ro._l_addr); 10381 } else { 10382 SCTPDBG(SCTP_DEBUG_OUTPUT4, " none\n"); 10383 } 10384 #endif 10385 /* update the timer now */ 10386 hnet->last_sent_time = *now; 10387 return (hnet); 10388 } 10389 /* Nothing to HB */ 10390 return (NULL); 10391 } 10392 10393 int 10394 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net) 10395 { 10396 struct sctp_tmit_chunk *chk; 10397 struct sctp_nets *net; 10398 struct sctp_heartbeat_chunk *hb; 10399 struct timeval now; 10400 struct sockaddr_in *sin; 10401 struct sockaddr_in6 *sin6; 10402 10403 SCTP_TCB_LOCK_ASSERT(stcb); 10404 if (user_req == 0) { 10405 net = sctp_select_hb_destination(stcb, &now); 10406 if (net == NULL) { 10407 /*- 10408 * All our busy none to send to, just start the 10409 * timer again. 10410 */ 10411 if (stcb->asoc.state == 0) { 10412 return (0); 10413 } 10414 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, 10415 stcb->sctp_ep, 10416 stcb, 10417 net); 10418 return (0); 10419 } 10420 } else { 10421 net = u_net; 10422 if (net == NULL) { 10423 return (0); 10424 } 10425 (void)SCTP_GETTIME_TIMEVAL(&now); 10426 } 10427 sin = (struct sockaddr_in *)&net->ro._l_addr; 10428 if (sin->sin_family != AF_INET) { 10429 if (sin->sin_family != AF_INET6) { 10430 /* huh */ 10431 return (0); 10432 } 10433 } 10434 sctp_alloc_a_chunk(stcb, chk); 10435 if (chk == NULL) { 10436 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n"); 10437 return (0); 10438 } 10439 chk->copy_by_ref = 0; 10440 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST; 10441 chk->rec.chunk_id.can_take_data = 1; 10442 chk->asoc = &stcb->asoc; 10443 chk->send_size = sizeof(struct sctp_heartbeat_chunk); 10444 10445 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 10446 if (chk->data == NULL) { 10447 sctp_free_a_chunk(stcb, chk); 10448 return (0); 10449 } 10450 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 10451 SCTP_BUF_LEN(chk->data) = chk->send_size; 10452 chk->sent = SCTP_DATAGRAM_UNSENT; 10453 chk->snd_count = 0; 10454 chk->whoTo = net; 10455 atomic_add_int(&chk->whoTo->ref_count, 1); 10456 /* Now we have a mbuf that we can fill in with the details */ 10457 hb = mtod(chk->data, struct sctp_heartbeat_chunk *); 10458 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk)); 10459 /* fill out chunk header */ 10460 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST; 10461 hb->ch.chunk_flags = 0; 10462 hb->ch.chunk_length = htons(chk->send_size); 10463 /* Fill out hb parameter */ 10464 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); 10465 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); 10466 hb->heartbeat.hb_info.time_value_1 = now.tv_sec; 10467 hb->heartbeat.hb_info.time_value_2 = now.tv_usec; 10468 /* Did our user request this one, put it in */ 10469 hb->heartbeat.hb_info.user_req = user_req; 10470 hb->heartbeat.hb_info.addr_family = sin->sin_family; 10471 hb->heartbeat.hb_info.addr_len = sin->sin_len; 10472 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 10473 /* 10474 * we only take from the entropy pool if the address is not 10475 * confirmed. 10476 */ 10477 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 10478 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 10479 } else { 10480 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0; 10481 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0; 10482 } 10483 if (sin->sin_family == AF_INET) { 10484 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr)); 10485 } else if (sin->sin_family == AF_INET6) { 10486 /* We leave the scope the way it is in our lookup table. */ 10487 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 10488 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr)); 10489 } else { 10490 /* huh compiler bug */ 10491 return (0); 10492 } 10493 10494 /* 10495 * JRS 5/14/07 - In CMT PF, the T3 timer is used to track 10496 * PF-heartbeats. Because of this, threshold management is done by 10497 * the t3 timer handler, and does not need to be done upon the send 10498 * of a PF-heartbeat. If CMT PF is on and the destination to which a 10499 * heartbeat is being sent is in PF state, do NOT do threshold 10500 * management. 10501 */ 10502 if ((sctp_cmt_pf == 0) || ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF)) { 10503 /* ok we have a destination that needs a beat */ 10504 /* lets do the theshold management Qiaobing style */ 10505 if (sctp_threshold_management(stcb->sctp_ep, stcb, net, 10506 stcb->asoc.max_send_times)) { 10507 /*- 10508 * we have lost the association, in a way this is 10509 * quite bad since we really are one less time since 10510 * we really did not send yet. This is the down side 10511 * to the Q's style as defined in the RFC and not my 10512 * alternate style defined in the RFC. 10513 */ 10514 if (chk->data != NULL) { 10515 sctp_m_freem(chk->data); 10516 chk->data = NULL; 10517 } 10518 /* 10519 * Here we do NOT use the macro since the 10520 * association is now gone. 10521 */ 10522 if (chk->whoTo) { 10523 sctp_free_remote_addr(chk->whoTo); 10524 chk->whoTo = NULL; 10525 } 10526 sctp_free_a_chunk((struct sctp_tcb *)NULL, chk); 10527 return (-1); 10528 } 10529 } 10530 net->hb_responded = 0; 10531 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 10532 stcb->asoc.ctrl_queue_cnt++; 10533 SCTP_STAT_INCR(sctps_sendheartbeat); 10534 /*- 10535 * Call directly med level routine to put out the chunk. It will 10536 * always tumble out control chunks aka HB but it may even tumble 10537 * out data too. 10538 */ 10539 return (1); 10540 } 10541 10542 void 10543 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 10544 uint32_t high_tsn) 10545 { 10546 struct sctp_association *asoc; 10547 struct sctp_ecne_chunk *ecne; 10548 struct sctp_tmit_chunk *chk; 10549 10550 asoc = &stcb->asoc; 10551 SCTP_TCB_LOCK_ASSERT(stcb); 10552 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 10553 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 10554 /* found a previous ECN_ECHO update it if needed */ 10555 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 10556 ecne->tsn = htonl(high_tsn); 10557 return; 10558 } 10559 } 10560 /* nope could not find one to update so we must build one */ 10561 sctp_alloc_a_chunk(stcb, chk); 10562 if (chk == NULL) { 10563 return; 10564 } 10565 chk->copy_by_ref = 0; 10566 SCTP_STAT_INCR(sctps_sendecne); 10567 chk->rec.chunk_id.id = SCTP_ECN_ECHO; 10568 chk->rec.chunk_id.can_take_data = 0; 10569 chk->asoc = &stcb->asoc; 10570 chk->send_size = sizeof(struct sctp_ecne_chunk); 10571 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 10572 if (chk->data == NULL) { 10573 sctp_free_a_chunk(stcb, chk); 10574 return; 10575 } 10576 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 10577 SCTP_BUF_LEN(chk->data) = chk->send_size; 10578 chk->sent = SCTP_DATAGRAM_UNSENT; 10579 chk->snd_count = 0; 10580 chk->whoTo = net; 10581 atomic_add_int(&chk->whoTo->ref_count, 1); 10582 stcb->asoc.ecn_echo_cnt_onq++; 10583 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 10584 ecne->ch.chunk_type = SCTP_ECN_ECHO; 10585 ecne->ch.chunk_flags = 0; 10586 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk)); 10587 ecne->tsn = htonl(high_tsn); 10588 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 10589 asoc->ctrl_queue_cnt++; 10590 } 10591 10592 void 10593 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, 10594 struct mbuf *m, int iphlen, int bad_crc) 10595 { 10596 struct sctp_association *asoc; 10597 struct sctp_pktdrop_chunk *drp; 10598 struct sctp_tmit_chunk *chk; 10599 uint8_t *datap; 10600 int len; 10601 int was_trunc = 0; 10602 struct ip *iph; 10603 10604 #ifdef INET6 10605 struct ip6_hdr *ip6h; 10606 10607 #endif 10608 int fullsz = 0, extra = 0; 10609 long spc; 10610 int offset; 10611 struct sctp_chunkhdr *ch, chunk_buf; 10612 unsigned int chk_length; 10613 10614 if (!stcb) { 10615 return; 10616 } 10617 asoc = &stcb->asoc; 10618 SCTP_TCB_LOCK_ASSERT(stcb); 10619 if (asoc->peer_supports_pktdrop == 0) { 10620 /*- 10621 * peer must declare support before I send one. 10622 */ 10623 return; 10624 } 10625 if (stcb->sctp_socket == NULL) { 10626 return; 10627 } 10628 sctp_alloc_a_chunk(stcb, chk); 10629 if (chk == NULL) { 10630 return; 10631 } 10632 chk->copy_by_ref = 0; 10633 iph = mtod(m, struct ip *); 10634 if (iph == NULL) { 10635 sctp_free_a_chunk(stcb, chk); 10636 return; 10637 } 10638 switch (iph->ip_v) { 10639 case IPVERSION: 10640 /* IPv4 */ 10641 len = chk->send_size = iph->ip_len; 10642 break; 10643 #ifdef INET6 10644 case IPV6_VERSION >> 4: 10645 /* IPv6 */ 10646 ip6h = mtod(m, struct ip6_hdr *); 10647 len = chk->send_size = htons(ip6h->ip6_plen); 10648 break; 10649 #endif 10650 default: 10651 return; 10652 } 10653 /* Validate that we do not have an ABORT in here. */ 10654 offset = iphlen + sizeof(struct sctphdr); 10655 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 10656 sizeof(*ch), (uint8_t *) & chunk_buf); 10657 while (ch != NULL) { 10658 chk_length = ntohs(ch->chunk_length); 10659 if (chk_length < sizeof(*ch)) { 10660 /* break to abort land */ 10661 break; 10662 } 10663 switch (ch->chunk_type) { 10664 case SCTP_PACKET_DROPPED: 10665 case SCTP_ABORT_ASSOCIATION: 10666 /*- 10667 * we don't respond with an PKT-DROP to an ABORT 10668 * or PKT-DROP 10669 */ 10670 sctp_free_a_chunk(stcb, chk); 10671 return; 10672 default: 10673 break; 10674 } 10675 offset += SCTP_SIZE32(chk_length); 10676 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 10677 sizeof(*ch), (uint8_t *) & chunk_buf); 10678 } 10679 10680 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) > 10681 min(stcb->asoc.smallest_mtu, MCLBYTES)) { 10682 /* 10683 * only send 1 mtu worth, trim off the excess on the end. 10684 */ 10685 fullsz = len - extra; 10686 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD; 10687 was_trunc = 1; 10688 } 10689 chk->asoc = &stcb->asoc; 10690 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 10691 if (chk->data == NULL) { 10692 jump_out: 10693 sctp_free_a_chunk(stcb, chk); 10694 return; 10695 } 10696 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 10697 drp = mtod(chk->data, struct sctp_pktdrop_chunk *); 10698 if (drp == NULL) { 10699 sctp_m_freem(chk->data); 10700 chk->data = NULL; 10701 goto jump_out; 10702 } 10703 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) + 10704 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD)); 10705 chk->book_size_scale = 0; 10706 if (was_trunc) { 10707 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED; 10708 drp->trunc_len = htons(fullsz); 10709 /* 10710 * Len is already adjusted to size minus overhead above take 10711 * out the pkt_drop chunk itself from it. 10712 */ 10713 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk); 10714 len = chk->send_size; 10715 } else { 10716 /* no truncation needed */ 10717 drp->ch.chunk_flags = 0; 10718 drp->trunc_len = htons(0); 10719 } 10720 if (bad_crc) { 10721 drp->ch.chunk_flags |= SCTP_BADCRC; 10722 } 10723 chk->send_size += sizeof(struct sctp_pktdrop_chunk); 10724 SCTP_BUF_LEN(chk->data) = chk->send_size; 10725 chk->sent = SCTP_DATAGRAM_UNSENT; 10726 chk->snd_count = 0; 10727 if (net) { 10728 /* we should hit here */ 10729 chk->whoTo = net; 10730 } else { 10731 chk->whoTo = asoc->primary_destination; 10732 } 10733 atomic_add_int(&chk->whoTo->ref_count, 1); 10734 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED; 10735 chk->rec.chunk_id.can_take_data = 1; 10736 drp->ch.chunk_type = SCTP_PACKET_DROPPED; 10737 drp->ch.chunk_length = htons(chk->send_size); 10738 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket); 10739 if (spc < 0) { 10740 spc = 0; 10741 } 10742 drp->bottle_bw = htonl(spc); 10743 if (asoc->my_rwnd) { 10744 drp->current_onq = htonl(asoc->size_on_reasm_queue + 10745 asoc->size_on_all_streams + 10746 asoc->my_rwnd_control_len + 10747 stcb->sctp_socket->so_rcv.sb_cc); 10748 } else { 10749 /*- 10750 * If my rwnd is 0, possibly from mbuf depletion as well as 10751 * space used, tell the peer there is NO space aka onq == bw 10752 */ 10753 drp->current_onq = htonl(spc); 10754 } 10755 drp->reserved = 0; 10756 datap = drp->data; 10757 m_copydata(m, iphlen, len, (caddr_t)datap); 10758 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 10759 asoc->ctrl_queue_cnt++; 10760 } 10761 10762 void 10763 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn) 10764 { 10765 struct sctp_association *asoc; 10766 struct sctp_cwr_chunk *cwr; 10767 struct sctp_tmit_chunk *chk; 10768 10769 asoc = &stcb->asoc; 10770 SCTP_TCB_LOCK_ASSERT(stcb); 10771 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 10772 if (chk->rec.chunk_id.id == SCTP_ECN_CWR) { 10773 /* found a previous ECN_CWR update it if needed */ 10774 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 10775 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn), 10776 MAX_TSN)) { 10777 cwr->tsn = htonl(high_tsn); 10778 } 10779 return; 10780 } 10781 } 10782 /* nope could not find one to update so we must build one */ 10783 sctp_alloc_a_chunk(stcb, chk); 10784 if (chk == NULL) { 10785 return; 10786 } 10787 chk->copy_by_ref = 0; 10788 chk->rec.chunk_id.id = SCTP_ECN_CWR; 10789 chk->rec.chunk_id.can_take_data = 1; 10790 chk->asoc = &stcb->asoc; 10791 chk->send_size = sizeof(struct sctp_cwr_chunk); 10792 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 10793 if (chk->data == NULL) { 10794 sctp_free_a_chunk(stcb, chk); 10795 return; 10796 } 10797 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 10798 SCTP_BUF_LEN(chk->data) = chk->send_size; 10799 chk->sent = SCTP_DATAGRAM_UNSENT; 10800 chk->snd_count = 0; 10801 chk->whoTo = net; 10802 atomic_add_int(&chk->whoTo->ref_count, 1); 10803 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 10804 cwr->ch.chunk_type = SCTP_ECN_CWR; 10805 cwr->ch.chunk_flags = 0; 10806 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk)); 10807 cwr->tsn = htonl(high_tsn); 10808 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 10809 asoc->ctrl_queue_cnt++; 10810 } 10811 10812 void 10813 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk, 10814 int number_entries, uint16_t * list, 10815 uint32_t seq, uint32_t resp_seq, uint32_t last_sent) 10816 { 10817 int len, old_len, i; 10818 struct sctp_stream_reset_out_request *req_out; 10819 struct sctp_chunkhdr *ch; 10820 10821 ch = mtod(chk->data, struct sctp_chunkhdr *); 10822 10823 10824 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 10825 10826 /* get to new offset for the param. */ 10827 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len); 10828 /* now how long will this param be? */ 10829 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries)); 10830 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST); 10831 req_out->ph.param_length = htons(len); 10832 req_out->request_seq = htonl(seq); 10833 req_out->response_seq = htonl(resp_seq); 10834 req_out->send_reset_at_tsn = htonl(last_sent); 10835 if (number_entries) { 10836 for (i = 0; i < number_entries; i++) { 10837 req_out->list_of_streams[i] = htons(list[i]); 10838 } 10839 } 10840 if (SCTP_SIZE32(len) > len) { 10841 /*- 10842 * Need to worry about the pad we may end up adding to the 10843 * end. This is easy since the struct is either aligned to 4 10844 * bytes or 2 bytes off. 10845 */ 10846 req_out->list_of_streams[number_entries] = 0; 10847 } 10848 /* now fix the chunk length */ 10849 ch->chunk_length = htons(len + old_len); 10850 chk->book_size = len + old_len; 10851 chk->book_size_scale = 0; 10852 chk->send_size = SCTP_SIZE32(chk->book_size); 10853 SCTP_BUF_LEN(chk->data) = chk->send_size; 10854 return; 10855 } 10856 10857 10858 void 10859 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, 10860 int number_entries, uint16_t * list, 10861 uint32_t seq) 10862 { 10863 int len, old_len, i; 10864 struct sctp_stream_reset_in_request *req_in; 10865 struct sctp_chunkhdr *ch; 10866 10867 ch = mtod(chk->data, struct sctp_chunkhdr *); 10868 10869 10870 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 10871 10872 /* get to new offset for the param. */ 10873 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len); 10874 /* now how long will this param be? */ 10875 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries)); 10876 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST); 10877 req_in->ph.param_length = htons(len); 10878 req_in->request_seq = htonl(seq); 10879 if (number_entries) { 10880 for (i = 0; i < number_entries; i++) { 10881 req_in->list_of_streams[i] = htons(list[i]); 10882 } 10883 } 10884 if (SCTP_SIZE32(len) > len) { 10885 /*- 10886 * Need to worry about the pad we may end up adding to the 10887 * end. This is easy since the struct is either aligned to 4 10888 * bytes or 2 bytes off. 10889 */ 10890 req_in->list_of_streams[number_entries] = 0; 10891 } 10892 /* now fix the chunk length */ 10893 ch->chunk_length = htons(len + old_len); 10894 chk->book_size = len + old_len; 10895 chk->book_size_scale = 0; 10896 chk->send_size = SCTP_SIZE32(chk->book_size); 10897 SCTP_BUF_LEN(chk->data) = chk->send_size; 10898 return; 10899 } 10900 10901 10902 void 10903 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, 10904 uint32_t seq) 10905 { 10906 int len, old_len; 10907 struct sctp_stream_reset_tsn_request *req_tsn; 10908 struct sctp_chunkhdr *ch; 10909 10910 ch = mtod(chk->data, struct sctp_chunkhdr *); 10911 10912 10913 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 10914 10915 /* get to new offset for the param. */ 10916 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len); 10917 /* now how long will this param be? */ 10918 len = sizeof(struct sctp_stream_reset_tsn_request); 10919 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST); 10920 req_tsn->ph.param_length = htons(len); 10921 req_tsn->request_seq = htonl(seq); 10922 10923 /* now fix the chunk length */ 10924 ch->chunk_length = htons(len + old_len); 10925 chk->send_size = len + old_len; 10926 chk->book_size = SCTP_SIZE32(chk->send_size); 10927 chk->book_size_scale = 0; 10928 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 10929 return; 10930 } 10931 10932 void 10933 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, 10934 uint32_t resp_seq, uint32_t result) 10935 { 10936 int len, old_len; 10937 struct sctp_stream_reset_response *resp; 10938 struct sctp_chunkhdr *ch; 10939 10940 ch = mtod(chk->data, struct sctp_chunkhdr *); 10941 10942 10943 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 10944 10945 /* get to new offset for the param. */ 10946 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len); 10947 /* now how long will this param be? */ 10948 len = sizeof(struct sctp_stream_reset_response); 10949 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 10950 resp->ph.param_length = htons(len); 10951 resp->response_seq = htonl(resp_seq); 10952 resp->result = ntohl(result); 10953 10954 /* now fix the chunk length */ 10955 ch->chunk_length = htons(len + old_len); 10956 chk->book_size = len + old_len; 10957 chk->book_size_scale = 0; 10958 chk->send_size = SCTP_SIZE32(chk->book_size); 10959 SCTP_BUF_LEN(chk->data) = chk->send_size; 10960 return; 10961 10962 } 10963 10964 10965 void 10966 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, 10967 uint32_t resp_seq, uint32_t result, 10968 uint32_t send_una, uint32_t recv_next) 10969 { 10970 int len, old_len; 10971 struct sctp_stream_reset_response_tsn *resp; 10972 struct sctp_chunkhdr *ch; 10973 10974 ch = mtod(chk->data, struct sctp_chunkhdr *); 10975 10976 10977 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 10978 10979 /* get to new offset for the param. */ 10980 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len); 10981 /* now how long will this param be? */ 10982 len = sizeof(struct sctp_stream_reset_response_tsn); 10983 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 10984 resp->ph.param_length = htons(len); 10985 resp->response_seq = htonl(resp_seq); 10986 resp->result = htonl(result); 10987 resp->senders_next_tsn = htonl(send_una); 10988 resp->receivers_next_tsn = htonl(recv_next); 10989 10990 /* now fix the chunk length */ 10991 ch->chunk_length = htons(len + old_len); 10992 chk->book_size = len + old_len; 10993 chk->send_size = SCTP_SIZE32(chk->book_size); 10994 chk->book_size_scale = 0; 10995 SCTP_BUF_LEN(chk->data) = chk->send_size; 10996 return; 10997 } 10998 10999 11000 int 11001 sctp_send_str_reset_req(struct sctp_tcb *stcb, 11002 int number_entries, uint16_t * list, 11003 uint8_t send_out_req, uint32_t resp_seq, 11004 uint8_t send_in_req, 11005 uint8_t send_tsn_req) 11006 { 11007 11008 struct sctp_association *asoc; 11009 struct sctp_tmit_chunk *chk; 11010 struct sctp_chunkhdr *ch; 11011 uint32_t seq; 11012 11013 asoc = &stcb->asoc; 11014 if (asoc->stream_reset_outstanding) { 11015 /*- 11016 * Already one pending, must get ACK back to clear the flag. 11017 */ 11018 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY); 11019 return (EBUSY); 11020 } 11021 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0)) { 11022 /* nothing to do */ 11023 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11024 return (EINVAL); 11025 } 11026 if (send_tsn_req && (send_out_req || send_in_req)) { 11027 /* error, can't do that */ 11028 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11029 return (EINVAL); 11030 } 11031 sctp_alloc_a_chunk(stcb, chk); 11032 if (chk == NULL) { 11033 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 11034 return (ENOMEM); 11035 } 11036 chk->copy_by_ref = 0; 11037 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 11038 chk->rec.chunk_id.can_take_data = 0; 11039 chk->asoc = &stcb->asoc; 11040 chk->book_size = sizeof(struct sctp_chunkhdr); 11041 chk->send_size = SCTP_SIZE32(chk->book_size); 11042 chk->book_size_scale = 0; 11043 11044 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 11045 if (chk->data == NULL) { 11046 sctp_free_a_chunk(stcb, chk); 11047 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 11048 return (ENOMEM); 11049 } 11050 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 11051 11052 /* setup chunk parameters */ 11053 chk->sent = SCTP_DATAGRAM_UNSENT; 11054 chk->snd_count = 0; 11055 chk->whoTo = asoc->primary_destination; 11056 atomic_add_int(&chk->whoTo->ref_count, 1); 11057 11058 ch = mtod(chk->data, struct sctp_chunkhdr *); 11059 ch->chunk_type = SCTP_STREAM_RESET; 11060 ch->chunk_flags = 0; 11061 ch->chunk_length = htons(chk->book_size); 11062 SCTP_BUF_LEN(chk->data) = chk->send_size; 11063 11064 seq = stcb->asoc.str_reset_seq_out; 11065 if (send_out_req) { 11066 sctp_add_stream_reset_out(chk, number_entries, list, 11067 seq, resp_seq, (stcb->asoc.sending_seq - 1)); 11068 asoc->stream_reset_out_is_outstanding = 1; 11069 seq++; 11070 asoc->stream_reset_outstanding++; 11071 } 11072 if (send_in_req) { 11073 sctp_add_stream_reset_in(chk, number_entries, list, seq); 11074 asoc->stream_reset_outstanding++; 11075 } 11076 if (send_tsn_req) { 11077 sctp_add_stream_reset_tsn(chk, seq); 11078 asoc->stream_reset_outstanding++; 11079 } 11080 asoc->str_reset = chk; 11081 11082 /* insert the chunk for sending */ 11083 TAILQ_INSERT_TAIL(&asoc->control_send_queue, 11084 chk, 11085 sctp_next); 11086 asoc->ctrl_queue_cnt++; 11087 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 11088 return (0); 11089 } 11090 11091 void 11092 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag, 11093 struct mbuf *err_cause, uint32_t vrf_id, uint16_t port) 11094 { 11095 /*- 11096 * Formulate the abort message, and send it back down. 11097 */ 11098 struct mbuf *o_pak; 11099 struct mbuf *mout; 11100 struct sctp_abort_msg *abm; 11101 struct ip *iph, *iph_out; 11102 struct udphdr *udp; 11103 11104 #ifdef INET6 11105 struct ip6_hdr *ip6, *ip6_out; 11106 11107 #endif 11108 int iphlen_out, len; 11109 11110 /* don't respond to ABORT with ABORT */ 11111 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) { 11112 if (err_cause) 11113 sctp_m_freem(err_cause); 11114 return; 11115 } 11116 iph = mtod(m, struct ip *); 11117 switch (iph->ip_v) { 11118 case IPVERSION: 11119 len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg)); 11120 break; 11121 #ifdef INET6 11122 case IPV6_VERSION >> 4: 11123 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg)); 11124 break; 11125 #endif 11126 default: 11127 return; 11128 } 11129 if (port) { 11130 len += sizeof(struct udphdr); 11131 } 11132 mout = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA); 11133 if (mout == NULL) { 11134 if (err_cause) 11135 sctp_m_freem(err_cause); 11136 return; 11137 } 11138 SCTP_BUF_LEN(mout) = len; 11139 SCTP_BUF_NEXT(mout) = err_cause; 11140 iph_out = NULL; 11141 #ifdef INET6 11142 ip6_out = NULL; 11143 #endif 11144 switch (iph->ip_v) { 11145 case IPVERSION: 11146 iph_out = mtod(mout, struct ip *); 11147 11148 /* Fill in the IP header for the ABORT */ 11149 iph_out->ip_v = IPVERSION; 11150 iph_out->ip_hl = (sizeof(struct ip) / 4); 11151 iph_out->ip_tos = (u_char)0; 11152 iph_out->ip_id = 0; 11153 iph_out->ip_off = 0; 11154 iph_out->ip_ttl = MAXTTL; 11155 if (port) { 11156 iph_out->ip_p = IPPROTO_UDP; 11157 } else { 11158 iph_out->ip_p = IPPROTO_SCTP; 11159 } 11160 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 11161 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 11162 /* let IP layer calculate this */ 11163 iph_out->ip_sum = 0; 11164 11165 iphlen_out = sizeof(*iph_out); 11166 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out); 11167 break; 11168 #ifdef INET6 11169 case IPV6_VERSION >> 4: 11170 ip6 = (struct ip6_hdr *)iph; 11171 ip6_out = mtod(mout, struct ip6_hdr *); 11172 11173 /* Fill in the IP6 header for the ABORT */ 11174 ip6_out->ip6_flow = ip6->ip6_flow; 11175 ip6_out->ip6_hlim = ip6_defhlim; 11176 if (port) { 11177 ip6_out->ip6_nxt = IPPROTO_UDP; 11178 } else { 11179 ip6_out->ip6_nxt = IPPROTO_SCTP; 11180 } 11181 ip6_out->ip6_src = ip6->ip6_dst; 11182 ip6_out->ip6_dst = ip6->ip6_src; 11183 11184 iphlen_out = sizeof(*ip6_out); 11185 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out); 11186 break; 11187 #endif 11188 default: 11189 /* Currently not supported */ 11190 if (err_cause) 11191 sctp_m_freem(err_cause); 11192 sctp_m_freem(mout); 11193 return; 11194 } 11195 11196 udp = (struct udphdr *)abm; 11197 if (port) { 11198 udp->uh_sport = htons(sctp_udp_tunneling_port); 11199 udp->uh_dport = port; 11200 /* set udp->uh_ulen later */ 11201 udp->uh_sum = 0; 11202 iphlen_out += sizeof(struct udphdr); 11203 abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr)); 11204 } 11205 abm->sh.src_port = sh->dest_port; 11206 abm->sh.dest_port = sh->src_port; 11207 abm->sh.checksum = 0; 11208 if (vtag == 0) { 11209 abm->sh.v_tag = sh->v_tag; 11210 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB; 11211 } else { 11212 abm->sh.v_tag = htonl(vtag); 11213 abm->msg.ch.chunk_flags = 0; 11214 } 11215 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION; 11216 11217 if (err_cause) { 11218 struct mbuf *m_tmp = err_cause; 11219 int err_len = 0; 11220 11221 /* get length of the err_cause chain */ 11222 while (m_tmp != NULL) { 11223 err_len += SCTP_BUF_LEN(m_tmp); 11224 m_tmp = SCTP_BUF_NEXT(m_tmp); 11225 } 11226 len = SCTP_BUF_LEN(mout) + err_len; 11227 if (err_len % 4) { 11228 /* need pad at end of chunk */ 11229 uint32_t cpthis = 0; 11230 int padlen; 11231 11232 padlen = 4 - (len % 4); 11233 m_copyback(mout, len, padlen, (caddr_t)&cpthis); 11234 len += padlen; 11235 } 11236 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len); 11237 } else { 11238 len = SCTP_BUF_LEN(mout); 11239 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch)); 11240 } 11241 11242 /* add checksum */ 11243 abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out); 11244 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 11245 /* no mbuf's */ 11246 sctp_m_freem(mout); 11247 return; 11248 } 11249 if (iph_out != NULL) { 11250 sctp_route_t ro; 11251 struct sctp_tcb *stcb = NULL; 11252 int ret; 11253 11254 /* zap the stack pointer to the route */ 11255 bzero(&ro, sizeof ro); 11256 if (port) { 11257 udp->uh_ulen = htons(len - sizeof(struct ip)); 11258 } 11259 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n"); 11260 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh); 11261 /* set IPv4 length */ 11262 iph_out->ip_len = len; 11263 /* out it goes */ 11264 #ifdef SCTP_PACKET_LOGGING 11265 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 11266 sctp_packet_log(mout, len); 11267 #endif 11268 SCTP_ATTACH_CHAIN(o_pak, mout, len); 11269 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id); 11270 11271 /* Free the route if we got one back */ 11272 if (ro.ro_rt) 11273 RTFREE(ro.ro_rt); 11274 } 11275 #ifdef INET6 11276 if (ip6_out != NULL) { 11277 struct route_in6 ro; 11278 int ret; 11279 struct sctp_tcb *stcb = NULL; 11280 struct ifnet *ifp = NULL; 11281 11282 /* zap the stack pointer to the route */ 11283 bzero(&ro, sizeof(ro)); 11284 if (port) { 11285 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr)); 11286 } 11287 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n"); 11288 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh); 11289 ip6_out->ip6_plen = len - sizeof(*ip6_out); 11290 #ifdef SCTP_PACKET_LOGGING 11291 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 11292 sctp_packet_log(mout, len); 11293 #endif 11294 SCTP_ATTACH_CHAIN(o_pak, mout, len); 11295 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id); 11296 11297 /* Free the route if we got one back */ 11298 if (ro.ro_rt) 11299 RTFREE(ro.ro_rt); 11300 } 11301 #endif 11302 SCTP_STAT_INCR(sctps_sendpackets); 11303 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 11304 } 11305 11306 void 11307 sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag, 11308 uint32_t vrf_id, uint16_t port) 11309 { 11310 struct mbuf *o_pak; 11311 struct sctphdr *ihdr; 11312 int retcode; 11313 struct sctphdr *ohdr; 11314 struct sctp_chunkhdr *ophdr; 11315 struct ip *iph; 11316 struct udphdr *udp; 11317 struct mbuf *mout; 11318 11319 #ifdef SCTP_DEBUG 11320 struct sockaddr_in6 lsa6, fsa6; 11321 11322 #endif 11323 uint32_t val; 11324 struct mbuf *at; 11325 int len; 11326 11327 iph = mtod(m, struct ip *); 11328 ihdr = (struct sctphdr *)((caddr_t)iph + iphlen); 11329 11330 SCTP_BUF_PREPEND(scm, (sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)), M_DONTWAIT); 11331 if (scm == NULL) { 11332 /* can't send because we can't add a mbuf */ 11333 return; 11334 } 11335 ohdr = mtod(scm, struct sctphdr *); 11336 ohdr->src_port = ihdr->dest_port; 11337 ohdr->dest_port = ihdr->src_port; 11338 ohdr->v_tag = vtag; 11339 ohdr->checksum = 0; 11340 ophdr = (struct sctp_chunkhdr *)(ohdr + 1); 11341 ophdr->chunk_type = SCTP_OPERATION_ERROR; 11342 ophdr->chunk_flags = 0; 11343 len = 0; 11344 at = scm; 11345 while (at) { 11346 len += SCTP_BUF_LEN(at); 11347 at = SCTP_BUF_NEXT(at); 11348 } 11349 ophdr->chunk_length = htons(len - sizeof(struct sctphdr)); 11350 if (len % 4) { 11351 /* need padding */ 11352 uint32_t cpthis = 0; 11353 int padlen; 11354 11355 padlen = 4 - (len % 4); 11356 m_copyback(scm, len, padlen, (caddr_t)&cpthis); 11357 len += padlen; 11358 } 11359 val = sctp_calculate_sum(scm, NULL, 0); 11360 #ifdef INET6 11361 if (port) { 11362 mout = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA); 11363 } else { 11364 mout = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr), 1, M_DONTWAIT, 1, MT_DATA); 11365 } 11366 #else 11367 if (port) { 11368 mout = sctp_get_mbuf_for_msg(sizeof(struct ip) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA); 11369 } else { 11370 mout = sctp_get_mbuf_for_msg(sizeof(struct ip), 1, M_DONTWAIT, 1, MT_DATA); 11371 } 11372 #endif 11373 if (mout == NULL) { 11374 sctp_m_freem(scm); 11375 return; 11376 } 11377 SCTP_BUF_NEXT(mout) = scm; 11378 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 11379 sctp_m_freem(mout); 11380 return; 11381 } 11382 ohdr->checksum = val; 11383 switch (iph->ip_v) { 11384 case IPVERSION: 11385 { 11386 /* V4 */ 11387 struct ip *out; 11388 sctp_route_t ro; 11389 struct sctp_tcb *stcb = NULL; 11390 11391 SCTP_BUF_LEN(mout) = sizeof(struct ip); 11392 len += sizeof(struct ip); 11393 if (port) { 11394 SCTP_BUF_LEN(mout) += sizeof(struct udphdr); 11395 len += sizeof(struct udphdr); 11396 } 11397 bzero(&ro, sizeof ro); 11398 out = mtod(mout, struct ip *); 11399 out->ip_v = iph->ip_v; 11400 out->ip_hl = (sizeof(struct ip) / 4); 11401 out->ip_tos = iph->ip_tos; 11402 out->ip_id = iph->ip_id; 11403 out->ip_off = 0; 11404 out->ip_ttl = MAXTTL; 11405 if (port) { 11406 out->ip_p = IPPROTO_UDP; 11407 } else { 11408 out->ip_p = IPPROTO_SCTP; 11409 } 11410 out->ip_sum = 0; 11411 out->ip_src = iph->ip_dst; 11412 out->ip_dst = iph->ip_src; 11413 out->ip_len = len; 11414 if (port) { 11415 udp = (struct udphdr *)(out + 1); 11416 udp->uh_sport = htons(sctp_udp_tunneling_port); 11417 udp->uh_dport = port; 11418 udp->uh_ulen = htons(len - sizeof(struct ip)); 11419 udp->uh_sum = 0; 11420 } 11421 #ifdef SCTP_PACKET_LOGGING 11422 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 11423 sctp_packet_log(mout, len); 11424 #endif 11425 SCTP_ATTACH_CHAIN(o_pak, mout, len); 11426 11427 SCTP_IP_OUTPUT(retcode, o_pak, &ro, stcb, vrf_id); 11428 11429 SCTP_STAT_INCR(sctps_sendpackets); 11430 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 11431 /* Free the route if we got one back */ 11432 if (ro.ro_rt) 11433 RTFREE(ro.ro_rt); 11434 break; 11435 } 11436 #ifdef INET6 11437 case IPV6_VERSION >> 4: 11438 { 11439 /* V6 */ 11440 struct route_in6 ro; 11441 int ret; 11442 struct sctp_tcb *stcb = NULL; 11443 struct ifnet *ifp = NULL; 11444 struct ip6_hdr *out6, *in6; 11445 11446 SCTP_BUF_LEN(mout) = sizeof(struct ip6_hdr); 11447 len += sizeof(struct ip6_hdr); 11448 bzero(&ro, sizeof ro); 11449 if (port) { 11450 SCTP_BUF_LEN(mout) += sizeof(struct udphdr); 11451 len += sizeof(struct udphdr); 11452 } 11453 in6 = mtod(m, struct ip6_hdr *); 11454 out6 = mtod(mout, struct ip6_hdr *); 11455 out6->ip6_flow = in6->ip6_flow; 11456 out6->ip6_hlim = ip6_defhlim; 11457 if (port) { 11458 out6->ip6_nxt = IPPROTO_UDP; 11459 } else { 11460 out6->ip6_nxt = IPPROTO_SCTP; 11461 } 11462 out6->ip6_src = in6->ip6_dst; 11463 out6->ip6_dst = in6->ip6_src; 11464 out6->ip6_plen = len - sizeof(struct ip6_hdr); 11465 if (port) { 11466 udp = (struct udphdr *)(out6 + 1); 11467 udp->uh_sport = htons(sctp_udp_tunneling_port); 11468 udp->uh_dport = port; 11469 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr)); 11470 udp->uh_sum = 0; 11471 } 11472 #ifdef SCTP_DEBUG 11473 bzero(&lsa6, sizeof(lsa6)); 11474 lsa6.sin6_len = sizeof(lsa6); 11475 lsa6.sin6_family = AF_INET6; 11476 lsa6.sin6_addr = out6->ip6_src; 11477 bzero(&fsa6, sizeof(fsa6)); 11478 fsa6.sin6_len = sizeof(fsa6); 11479 fsa6.sin6_family = AF_INET6; 11480 fsa6.sin6_addr = out6->ip6_dst; 11481 #endif 11482 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_operr_to calling ipv6 output:\n"); 11483 SCTPDBG(SCTP_DEBUG_OUTPUT2, "src: "); 11484 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&lsa6); 11485 SCTPDBG(SCTP_DEBUG_OUTPUT2, "dst "); 11486 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&fsa6); 11487 11488 #ifdef SCTP_PACKET_LOGGING 11489 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 11490 sctp_packet_log(mout, len); 11491 #endif 11492 SCTP_ATTACH_CHAIN(o_pak, mout, len); 11493 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id); 11494 11495 SCTP_STAT_INCR(sctps_sendpackets); 11496 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 11497 /* Free the route if we got one back */ 11498 if (ro.ro_rt) 11499 RTFREE(ro.ro_rt); 11500 break; 11501 } 11502 #endif 11503 default: 11504 /* TSNH */ 11505 break; 11506 } 11507 } 11508 11509 static struct mbuf * 11510 sctp_copy_resume(struct sctp_stream_queue_pending *sp, 11511 struct uio *uio, 11512 struct sctp_sndrcvinfo *srcv, 11513 int max_send_len, 11514 int user_marks_eor, 11515 int *error, 11516 uint32_t * sndout, 11517 struct mbuf **new_tail) 11518 { 11519 struct mbuf *m; 11520 11521 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, 11522 (M_PKTHDR | (user_marks_eor ? M_EOR : 0))); 11523 if (m == NULL) { 11524 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 11525 *error = ENOMEM; 11526 } else { 11527 *sndout = m_length(m, NULL); 11528 *new_tail = m_last(m); 11529 } 11530 return (m); 11531 } 11532 11533 static int 11534 sctp_copy_one(struct sctp_stream_queue_pending *sp, 11535 struct uio *uio, 11536 int resv_upfront) 11537 { 11538 int left; 11539 11540 left = sp->length; 11541 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, 11542 resv_upfront, 0); 11543 if (sp->data == NULL) { 11544 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 11545 return (ENOMEM); 11546 } 11547 sp->tail_mbuf = m_last(sp->data); 11548 return (0); 11549 } 11550 11551 11552 11553 static struct sctp_stream_queue_pending * 11554 sctp_copy_it_in(struct sctp_tcb *stcb, 11555 struct sctp_association *asoc, 11556 struct sctp_sndrcvinfo *srcv, 11557 struct uio *uio, 11558 struct sctp_nets *net, 11559 int max_send_len, 11560 int user_marks_eor, 11561 int *error, 11562 int non_blocking) 11563 { 11564 /*- 11565 * This routine must be very careful in its work. Protocol 11566 * processing is up and running so care must be taken to spl...() 11567 * when you need to do something that may effect the stcb/asoc. The 11568 * sb is locked however. When data is copied the protocol processing 11569 * should be enabled since this is a slower operation... 11570 */ 11571 struct sctp_stream_queue_pending *sp = NULL; 11572 int resv_in_first; 11573 11574 *error = 0; 11575 /* Now can we send this? */ 11576 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 11577 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 11578 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 11579 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 11580 /* got data while shutting down */ 11581 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 11582 *error = ECONNRESET; 11583 goto out_now; 11584 } 11585 sctp_alloc_a_strmoq(stcb, sp); 11586 if (sp == NULL) { 11587 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 11588 *error = ENOMEM; 11589 goto out_now; 11590 } 11591 sp->act_flags = 0; 11592 sp->sender_all_done = 0; 11593 sp->sinfo_flags = srcv->sinfo_flags; 11594 sp->timetolive = srcv->sinfo_timetolive; 11595 sp->ppid = srcv->sinfo_ppid; 11596 sp->context = srcv->sinfo_context; 11597 sp->strseq = 0; 11598 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 11599 11600 sp->stream = srcv->sinfo_stream; 11601 sp->length = min(uio->uio_resid, max_send_len); 11602 if ((sp->length == (uint32_t) uio->uio_resid) && 11603 ((user_marks_eor == 0) || 11604 (srcv->sinfo_flags & SCTP_EOF) || 11605 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) { 11606 sp->msg_is_complete = 1; 11607 } else { 11608 sp->msg_is_complete = 0; 11609 } 11610 sp->sender_all_done = 0; 11611 sp->some_taken = 0; 11612 sp->put_last_out = 0; 11613 resv_in_first = sizeof(struct sctp_data_chunk); 11614 sp->data = sp->tail_mbuf = NULL; 11615 *error = sctp_copy_one(sp, uio, resv_in_first); 11616 if (*error) { 11617 sctp_free_a_strmoq(stcb, sp); 11618 sp = NULL; 11619 } else { 11620 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 11621 sp->net = net; 11622 sp->addr_over = 1; 11623 } else { 11624 sp->net = asoc->primary_destination; 11625 sp->addr_over = 0; 11626 } 11627 atomic_add_int(&sp->net->ref_count, 1); 11628 sctp_set_prsctp_policy(stcb, sp); 11629 } 11630 out_now: 11631 return (sp); 11632 } 11633 11634 11635 int 11636 sctp_sosend(struct socket *so, 11637 struct sockaddr *addr, 11638 struct uio *uio, 11639 struct mbuf *top, 11640 struct mbuf *control, 11641 int flags, 11642 struct thread *p 11643 ) 11644 { 11645 struct sctp_inpcb *inp; 11646 int error, use_rcvinfo = 0; 11647 struct sctp_sndrcvinfo srcv; 11648 11649 inp = (struct sctp_inpcb *)so->so_pcb; 11650 if (control) { 11651 /* process cmsg snd/rcv info (maybe a assoc-id) */ 11652 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control, 11653 sizeof(srcv))) { 11654 /* got one */ 11655 use_rcvinfo = 1; 11656 } 11657 } 11658 error = sctp_lower_sosend(so, addr, uio, top, 11659 control, 11660 flags, 11661 use_rcvinfo, &srcv 11662 ,p 11663 ); 11664 return (error); 11665 } 11666 11667 11668 int 11669 sctp_lower_sosend(struct socket *so, 11670 struct sockaddr *addr, 11671 struct uio *uio, 11672 struct mbuf *i_pak, 11673 struct mbuf *control, 11674 int flags, 11675 int use_rcvinfo, 11676 struct sctp_sndrcvinfo *srcv 11677 , 11678 struct thread *p 11679 ) 11680 { 11681 unsigned int sndlen = 0, max_len; 11682 int error, len; 11683 struct mbuf *top = NULL; 11684 int queue_only = 0, queue_only_for_init = 0; 11685 int free_cnt_applied = 0; 11686 int un_sent = 0; 11687 int now_filled = 0; 11688 unsigned int inqueue_bytes = 0; 11689 struct sctp_block_entry be; 11690 struct sctp_inpcb *inp; 11691 struct sctp_tcb *stcb = NULL; 11692 struct timeval now; 11693 struct sctp_nets *net; 11694 struct sctp_association *asoc; 11695 struct sctp_inpcb *t_inp; 11696 int user_marks_eor; 11697 int create_lock_applied = 0; 11698 int nagle_applies = 0; 11699 int some_on_control = 0; 11700 int got_all_of_the_send = 0; 11701 int hold_tcblock = 0; 11702 int non_blocking = 0; 11703 int temp_flags = 0; 11704 uint32_t local_add_more, local_soresv = 0; 11705 11706 error = 0; 11707 net = NULL; 11708 stcb = NULL; 11709 asoc = NULL; 11710 11711 t_inp = inp = (struct sctp_inpcb *)so->so_pcb; 11712 if (inp == NULL) { 11713 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 11714 error = EFAULT; 11715 if (i_pak) { 11716 SCTP_RELEASE_PKT(i_pak); 11717 } 11718 return (error); 11719 } 11720 if ((uio == NULL) && (i_pak == NULL)) { 11721 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11722 return (EINVAL); 11723 } 11724 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 11725 atomic_add_int(&inp->total_sends, 1); 11726 if (uio) { 11727 if (uio->uio_resid < 0) { 11728 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11729 return (EINVAL); 11730 } 11731 sndlen = uio->uio_resid; 11732 } else { 11733 top = SCTP_HEADER_TO_CHAIN(i_pak); 11734 sndlen = SCTP_HEADER_LEN(i_pak); 11735 } 11736 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n", 11737 addr, 11738 sndlen); 11739 /*- 11740 * Pre-screen address, if one is given the sin-len 11741 * must be set correctly! 11742 */ 11743 if (addr) { 11744 if ((addr->sa_family == AF_INET) && 11745 (addr->sa_len != sizeof(struct sockaddr_in))) { 11746 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11747 error = EINVAL; 11748 goto out_unlocked; 11749 } else if ((addr->sa_family == AF_INET6) && 11750 (addr->sa_len != sizeof(struct sockaddr_in6))) { 11751 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11752 error = EINVAL; 11753 goto out_unlocked; 11754 } 11755 } 11756 hold_tcblock = 0; 11757 11758 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 11759 (inp->sctp_socket->so_qlimit)) { 11760 /* The listener can NOT send */ 11761 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 11762 error = EFAULT; 11763 goto out_unlocked; 11764 } 11765 if ((use_rcvinfo) && srcv) { 11766 if (INVALID_SINFO_FLAG(srcv->sinfo_flags) || 11767 PR_SCTP_INVALID_POLICY(srcv->sinfo_flags)) { 11768 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11769 error = EINVAL; 11770 goto out_unlocked; 11771 } 11772 if (srcv->sinfo_flags) 11773 SCTP_STAT_INCR(sctps_sends_with_flags); 11774 11775 if (srcv->sinfo_flags & SCTP_SENDALL) { 11776 /* its a sendall */ 11777 error = sctp_sendall(inp, uio, top, srcv); 11778 top = NULL; 11779 goto out_unlocked; 11780 } 11781 } 11782 /* now we must find the assoc */ 11783 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 11784 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 11785 SCTP_INP_RLOCK(inp); 11786 stcb = LIST_FIRST(&inp->sctp_asoc_list); 11787 if (stcb == NULL) { 11788 SCTP_INP_RUNLOCK(inp); 11789 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); 11790 error = ENOTCONN; 11791 goto out_unlocked; 11792 } 11793 hold_tcblock = 0; 11794 SCTP_INP_RUNLOCK(inp); 11795 if (addr) { 11796 /* Must locate the net structure if addr given */ 11797 net = sctp_findnet(stcb, addr); 11798 if (net) { 11799 /* validate port was 0 or correct */ 11800 struct sockaddr_in *sin; 11801 11802 sin = (struct sockaddr_in *)addr; 11803 if ((sin->sin_port != 0) && 11804 (sin->sin_port != stcb->rport)) { 11805 net = NULL; 11806 } 11807 } 11808 temp_flags |= SCTP_ADDR_OVER; 11809 } else 11810 net = stcb->asoc.primary_destination; 11811 if (addr && (net == NULL)) { 11812 /* Could not find address, was it legal */ 11813 if (addr->sa_family == AF_INET) { 11814 struct sockaddr_in *sin; 11815 11816 sin = (struct sockaddr_in *)addr; 11817 if (sin->sin_addr.s_addr == 0) { 11818 if ((sin->sin_port == 0) || 11819 (sin->sin_port == stcb->rport)) { 11820 net = stcb->asoc.primary_destination; 11821 } 11822 } 11823 } else { 11824 struct sockaddr_in6 *sin6; 11825 11826 sin6 = (struct sockaddr_in6 *)addr; 11827 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 11828 if ((sin6->sin6_port == 0) || 11829 (sin6->sin6_port == stcb->rport)) { 11830 net = stcb->asoc.primary_destination; 11831 } 11832 } 11833 } 11834 } 11835 if (net == NULL) { 11836 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11837 error = EINVAL; 11838 goto out_unlocked; 11839 } 11840 } else if (use_rcvinfo && srcv && srcv->sinfo_assoc_id) { 11841 stcb = sctp_findassociation_ep_asocid(inp, srcv->sinfo_assoc_id, 0); 11842 if (stcb) { 11843 if (addr) 11844 /* 11845 * Must locate the net structure if addr 11846 * given 11847 */ 11848 net = sctp_findnet(stcb, addr); 11849 else 11850 net = stcb->asoc.primary_destination; 11851 if ((srcv->sinfo_flags & SCTP_ADDR_OVER) && 11852 ((net == NULL) || (addr == NULL))) { 11853 struct sockaddr_in *sin; 11854 11855 if (addr == NULL) { 11856 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11857 error = EINVAL; 11858 goto out_unlocked; 11859 } 11860 sin = (struct sockaddr_in *)addr; 11861 /* Validate port is 0 or correct */ 11862 if ((sin->sin_port != 0) && 11863 (sin->sin_port != stcb->rport)) { 11864 net = NULL; 11865 } 11866 } 11867 } 11868 hold_tcblock = 0; 11869 } else if (addr) { 11870 /*- 11871 * Since we did not use findep we must 11872 * increment it, and if we don't find a tcb 11873 * decrement it. 11874 */ 11875 SCTP_INP_WLOCK(inp); 11876 SCTP_INP_INCR_REF(inp); 11877 SCTP_INP_WUNLOCK(inp); 11878 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 11879 if (stcb == NULL) { 11880 SCTP_INP_WLOCK(inp); 11881 SCTP_INP_DECR_REF(inp); 11882 SCTP_INP_WUNLOCK(inp); 11883 } else { 11884 hold_tcblock = 1; 11885 } 11886 } 11887 if ((stcb == NULL) && (addr)) { 11888 /* Possible implicit send? */ 11889 SCTP_ASOC_CREATE_LOCK(inp); 11890 create_lock_applied = 1; 11891 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 11892 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 11893 /* Should I really unlock ? */ 11894 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 11895 error = EFAULT; 11896 goto out_unlocked; 11897 11898 } 11899 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 11900 (addr->sa_family == AF_INET6)) { 11901 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11902 error = EINVAL; 11903 goto out_unlocked; 11904 } 11905 SCTP_INP_WLOCK(inp); 11906 SCTP_INP_INCR_REF(inp); 11907 SCTP_INP_WUNLOCK(inp); 11908 /* With the lock applied look again */ 11909 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 11910 if (stcb == NULL) { 11911 SCTP_INP_WLOCK(inp); 11912 SCTP_INP_DECR_REF(inp); 11913 SCTP_INP_WUNLOCK(inp); 11914 } else { 11915 hold_tcblock = 1; 11916 } 11917 if (t_inp != inp) { 11918 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); 11919 error = ENOTCONN; 11920 goto out_unlocked; 11921 } 11922 } 11923 if (stcb == NULL) { 11924 if (addr == NULL) { 11925 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT); 11926 error = ENOENT; 11927 goto out_unlocked; 11928 } else { 11929 /* 11930 * UDP style, we must go ahead and start the INIT 11931 * process 11932 */ 11933 uint32_t vrf_id; 11934 11935 if ((use_rcvinfo) && (srcv) && 11936 ((srcv->sinfo_flags & SCTP_ABORT) || 11937 ((srcv->sinfo_flags & SCTP_EOF) && 11938 (sndlen == 0)))) { 11939 /*- 11940 * User asks to abort a non-existant assoc, 11941 * or EOF a non-existant assoc with no data 11942 */ 11943 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT); 11944 error = ENOENT; 11945 goto out_unlocked; 11946 } 11947 /* get an asoc/stcb struct */ 11948 vrf_id = inp->def_vrf_id; 11949 #ifdef INVARIANTS 11950 if (create_lock_applied == 0) { 11951 panic("Error, should hold create lock and I don't?"); 11952 } 11953 #endif 11954 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id, 11955 p 11956 ); 11957 if (stcb == NULL) { 11958 /* Error is setup for us in the call */ 11959 goto out_unlocked; 11960 } 11961 if (create_lock_applied) { 11962 SCTP_ASOC_CREATE_UNLOCK(inp); 11963 create_lock_applied = 0; 11964 } else { 11965 SCTP_PRINTF("Huh-3? create lock should have been on??\n"); 11966 } 11967 /* 11968 * Turn on queue only flag to prevent data from 11969 * being sent 11970 */ 11971 queue_only = 1; 11972 asoc = &stcb->asoc; 11973 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT); 11974 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 11975 11976 /* initialize authentication params for the assoc */ 11977 sctp_initialize_auth_params(inp, stcb); 11978 11979 if (control) { 11980 /* 11981 * see if a init structure exists in cmsg 11982 * headers 11983 */ 11984 struct sctp_initmsg initm; 11985 int i; 11986 11987 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control, 11988 sizeof(initm))) { 11989 /* 11990 * we have an INIT override of the 11991 * default 11992 */ 11993 if (initm.sinit_max_attempts) 11994 asoc->max_init_times = initm.sinit_max_attempts; 11995 if (initm.sinit_num_ostreams) 11996 asoc->pre_open_streams = initm.sinit_num_ostreams; 11997 if (initm.sinit_max_instreams) 11998 asoc->max_inbound_streams = initm.sinit_max_instreams; 11999 if (initm.sinit_max_init_timeo) 12000 asoc->initial_init_rto_max = initm.sinit_max_init_timeo; 12001 if (asoc->streamoutcnt < asoc->pre_open_streams) { 12002 /* Default is NOT correct */ 12003 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, defout:%d pre_open:%d\n", 12004 asoc->streamoutcnt, asoc->pre_open_streams); 12005 /* 12006 * What happens if this 12007 * fails? we panic ... 12008 */ 12009 { 12010 struct sctp_stream_out *tmp_str; 12011 int had_lock = 0; 12012 12013 if (hold_tcblock) { 12014 had_lock = 1; 12015 SCTP_TCB_UNLOCK(stcb); 12016 } 12017 SCTP_MALLOC(tmp_str, 12018 struct sctp_stream_out *, 12019 (asoc->pre_open_streams * 12020 sizeof(struct sctp_stream_out)), 12021 SCTP_M_STRMO); 12022 if (had_lock) { 12023 SCTP_TCB_LOCK(stcb); 12024 } 12025 if (tmp_str != NULL) { 12026 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 12027 asoc->strmout = tmp_str; 12028 asoc->streamoutcnt = asoc->pre_open_streams; 12029 } else { 12030 asoc->pre_open_streams = asoc->streamoutcnt; 12031 } 12032 } 12033 for (i = 0; i < asoc->streamoutcnt; i++) { 12034 /*- 12035 * inbound side must be set 12036 * to 0xffff, also NOTE when 12037 * we get the INIT-ACK back 12038 * (for INIT sender) we MUST 12039 * reduce the count 12040 * (streamoutcnt) but first 12041 * check if we sent to any 12042 * of the upper streams that 12043 * were dropped (if some 12044 * were). Those that were 12045 * dropped must be notified 12046 * to the upper layer as 12047 * failed to send. 12048 */ 12049 asoc->strmout[i].next_sequence_sent = 0x0; 12050 TAILQ_INIT(&asoc->strmout[i].outqueue); 12051 asoc->strmout[i].stream_no = i; 12052 asoc->strmout[i].last_msg_incomplete = 0; 12053 asoc->strmout[i].next_spoke.tqe_next = 0; 12054 asoc->strmout[i].next_spoke.tqe_prev = 0; 12055 } 12056 } 12057 } 12058 } 12059 hold_tcblock = 1; 12060 /* out with the INIT */ 12061 queue_only_for_init = 1; 12062 /*- 12063 * we may want to dig in after this call and adjust the MTU 12064 * value. It defaulted to 1500 (constant) but the ro 12065 * structure may now have an update and thus we may need to 12066 * change it BEFORE we append the message. 12067 */ 12068 net = stcb->asoc.primary_destination; 12069 asoc = &stcb->asoc; 12070 } 12071 } 12072 if ((SCTP_SO_IS_NBIO(so) 12073 || (flags & MSG_NBIO) 12074 )) { 12075 non_blocking = 1; 12076 } 12077 asoc = &stcb->asoc; 12078 12079 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) { 12080 if (sndlen > asoc->smallest_mtu) { 12081 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 12082 error = EMSGSIZE; 12083 goto out_unlocked; 12084 } 12085 } 12086 /* would we block? */ 12087 if (non_blocking) { 12088 if (hold_tcblock == 0) { 12089 SCTP_TCB_LOCK(stcb); 12090 hold_tcblock = 1; 12091 } 12092 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 12093 if ((SCTP_SB_LIMIT_SND(so) < 12094 (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) || 12095 (stcb->asoc.chunks_on_out_queue > 12096 sctp_max_chunks_on_queue)) { 12097 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK); 12098 if (sndlen > SCTP_SB_LIMIT_SND(so)) 12099 error = EMSGSIZE; 12100 else 12101 error = EWOULDBLOCK; 12102 goto out_unlocked; 12103 } 12104 stcb->asoc.sb_send_resv += sndlen; 12105 SCTP_TCB_UNLOCK(stcb); 12106 hold_tcblock = 0; 12107 } else { 12108 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen); 12109 } 12110 local_soresv = sndlen; 12111 /* Keep the stcb from being freed under our feet */ 12112 if (free_cnt_applied) { 12113 #ifdef INVARIANTS 12114 panic("refcnt already incremented"); 12115 #else 12116 printf("refcnt:1 already incremented?\n"); 12117 #endif 12118 } else { 12119 atomic_add_int(&stcb->asoc.refcnt, 1); 12120 free_cnt_applied = 1; 12121 } 12122 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 12123 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 12124 error = ECONNRESET; 12125 goto out_unlocked; 12126 } 12127 if (create_lock_applied) { 12128 SCTP_ASOC_CREATE_UNLOCK(inp); 12129 create_lock_applied = 0; 12130 } 12131 if (asoc->stream_reset_outstanding) { 12132 /* 12133 * Can't queue any data while stream reset is underway. 12134 */ 12135 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN); 12136 error = EAGAIN; 12137 goto out_unlocked; 12138 } 12139 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 12140 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 12141 queue_only = 1; 12142 } 12143 if ((use_rcvinfo == 0) || (srcv == NULL)) { 12144 /* Grab the default stuff from the asoc */ 12145 srcv = (struct sctp_sndrcvinfo *)&stcb->asoc.def_send; 12146 } 12147 /* we are now done with all control */ 12148 if (control) { 12149 sctp_m_freem(control); 12150 control = NULL; 12151 } 12152 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 12153 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 12154 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 12155 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 12156 if ((use_rcvinfo) && 12157 (srcv->sinfo_flags & SCTP_ABORT)) { 12158 ; 12159 } else { 12160 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 12161 error = ECONNRESET; 12162 goto out_unlocked; 12163 } 12164 } 12165 /* Ok, we will attempt a msgsnd :> */ 12166 if (p) { 12167 p->td_ru.ru_msgsnd++; 12168 } 12169 if (stcb) { 12170 if (((srcv->sinfo_flags | temp_flags) & SCTP_ADDR_OVER) == 0) { 12171 net = stcb->asoc.primary_destination; 12172 } 12173 } 12174 if (net == NULL) { 12175 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12176 error = EINVAL; 12177 goto out_unlocked; 12178 } 12179 if ((net->flight_size > net->cwnd) && (sctp_cmt_on_off == 0)) { 12180 /*- 12181 * CMT: Added check for CMT above. net above is the primary 12182 * dest. If CMT is ON, sender should always attempt to send 12183 * with the output routine sctp_fill_outqueue() that loops 12184 * through all destination addresses. Therefore, if CMT is 12185 * ON, queue_only is NOT set to 1 here, so that 12186 * sctp_chunk_output() can be called below. 12187 */ 12188 queue_only = 1; 12189 12190 } else if (asoc->ifp_had_enobuf) { 12191 SCTP_STAT_INCR(sctps_ifnomemqueued); 12192 if (net->flight_size > (net->mtu * 2)) 12193 queue_only = 1; 12194 asoc->ifp_had_enobuf = 0; 12195 } else { 12196 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 12197 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk))); 12198 } 12199 /* Are we aborting? */ 12200 if (srcv->sinfo_flags & SCTP_ABORT) { 12201 struct mbuf *mm; 12202 int tot_demand, tot_out = 0, max_out; 12203 12204 SCTP_STAT_INCR(sctps_sends_with_abort); 12205 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 12206 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 12207 /* It has to be up before we abort */ 12208 /* how big is the user initiated abort? */ 12209 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12210 error = EINVAL; 12211 goto out; 12212 } 12213 if (hold_tcblock) { 12214 SCTP_TCB_UNLOCK(stcb); 12215 hold_tcblock = 0; 12216 } 12217 if (top) { 12218 struct mbuf *cntm = NULL; 12219 12220 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA); 12221 if (sndlen != 0) { 12222 cntm = top; 12223 while (cntm) { 12224 tot_out += SCTP_BUF_LEN(cntm); 12225 cntm = SCTP_BUF_NEXT(cntm); 12226 } 12227 } 12228 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 12229 } else { 12230 /* Must fit in a MTU */ 12231 tot_out = sndlen; 12232 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 12233 if (tot_demand > SCTP_DEFAULT_ADD_MORE) { 12234 /* To big */ 12235 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 12236 error = EMSGSIZE; 12237 goto out; 12238 } 12239 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA); 12240 } 12241 if (mm == NULL) { 12242 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 12243 error = ENOMEM; 12244 goto out; 12245 } 12246 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr); 12247 max_out -= sizeof(struct sctp_abort_msg); 12248 if (tot_out > max_out) { 12249 tot_out = max_out; 12250 } 12251 if (mm) { 12252 struct sctp_paramhdr *ph; 12253 12254 /* now move forward the data pointer */ 12255 ph = mtod(mm, struct sctp_paramhdr *); 12256 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 12257 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out)); 12258 ph++; 12259 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr); 12260 if (top == NULL) { 12261 error = uiomove((caddr_t)ph, (int)tot_out, uio); 12262 if (error) { 12263 /*- 12264 * Here if we can't get his data we 12265 * still abort we just don't get to 12266 * send the users note :-0 12267 */ 12268 sctp_m_freem(mm); 12269 mm = NULL; 12270 } 12271 } else { 12272 if (sndlen != 0) { 12273 SCTP_BUF_NEXT(mm) = top; 12274 } 12275 } 12276 } 12277 if (hold_tcblock == 0) { 12278 SCTP_TCB_LOCK(stcb); 12279 hold_tcblock = 1; 12280 } 12281 atomic_add_int(&stcb->asoc.refcnt, -1); 12282 free_cnt_applied = 0; 12283 /* release this lock, otherwise we hang on ourselves */ 12284 sctp_abort_an_association(stcb->sctp_ep, stcb, 12285 SCTP_RESPONSE_TO_USER_REQ, 12286 mm, SCTP_SO_LOCKED); 12287 /* now relock the stcb so everything is sane */ 12288 hold_tcblock = 0; 12289 stcb = NULL; 12290 /* 12291 * In this case top is already chained to mm avoid double 12292 * free, since we free it below if top != NULL and driver 12293 * would free it after sending the packet out 12294 */ 12295 if (sndlen != 0) { 12296 top = NULL; 12297 } 12298 goto out_unlocked; 12299 } 12300 /* Calculate the maximum we can send */ 12301 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 12302 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { 12303 if (non_blocking) { 12304 /* we already checked for non-blocking above. */ 12305 max_len = sndlen; 12306 } else { 12307 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 12308 } 12309 } else { 12310 max_len = 0; 12311 } 12312 if (hold_tcblock) { 12313 SCTP_TCB_UNLOCK(stcb); 12314 hold_tcblock = 0; 12315 } 12316 /* Is the stream no. valid? */ 12317 if (srcv->sinfo_stream >= asoc->streamoutcnt) { 12318 /* Invalid stream number */ 12319 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12320 error = EINVAL; 12321 goto out_unlocked; 12322 } 12323 if (asoc->strmout == NULL) { 12324 /* huh? software error */ 12325 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 12326 error = EFAULT; 12327 goto out_unlocked; 12328 } 12329 /* Unless E_EOR mode is on, we must make a send FIT in one call. */ 12330 if ((user_marks_eor == 0) && 12331 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) { 12332 /* It will NEVER fit */ 12333 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 12334 error = EMSGSIZE; 12335 goto out_unlocked; 12336 } 12337 if ((uio == NULL) && user_marks_eor) { 12338 /*- 12339 * We do not support eeor mode for 12340 * sending with mbuf chains (like sendfile). 12341 */ 12342 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12343 error = EINVAL; 12344 goto out_unlocked; 12345 } 12346 if (user_marks_eor) { 12347 local_add_more = sctp_add_more_threshold; 12348 } else { 12349 /*- 12350 * For non-eeor the whole message must fit in 12351 * the socket send buffer. 12352 */ 12353 local_add_more = sndlen; 12354 } 12355 len = 0; 12356 if (non_blocking) { 12357 goto skip_preblock; 12358 } 12359 if (((max_len <= local_add_more) && 12360 (SCTP_SB_LIMIT_SND(so) > local_add_more)) || 12361 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) > sctp_max_chunks_on_queue)) { /* if */ 12362 /* No room right no ! */ 12363 SOCKBUF_LOCK(&so->so_snd); 12364 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 12365 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + sctp_add_more_threshold)) || 12366 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) > sctp_max_chunks_on_queue /* while */ )) { 12367 12368 if (sctp_logging_level & SCTP_BLK_LOGGING_ENABLE) { 12369 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, 12370 so, asoc, sndlen); 12371 } 12372 be.error = 0; 12373 stcb->block_entry = &be; 12374 error = sbwait(&so->so_snd); 12375 stcb->block_entry = NULL; 12376 if (error || so->so_error || be.error) { 12377 if (error == 0) { 12378 if (so->so_error) 12379 error = so->so_error; 12380 if (be.error) { 12381 error = be.error; 12382 } 12383 } 12384 SOCKBUF_UNLOCK(&so->so_snd); 12385 goto out_unlocked; 12386 } 12387 if (sctp_logging_level & SCTP_BLK_LOGGING_ENABLE) { 12388 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 12389 so, asoc, stcb->asoc.total_output_queue_size); 12390 } 12391 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 12392 goto out_unlocked; 12393 } 12394 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 12395 } 12396 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 12397 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { 12398 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 12399 } else { 12400 max_len = 0; 12401 } 12402 SOCKBUF_UNLOCK(&so->so_snd); 12403 } 12404 skip_preblock: 12405 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 12406 goto out_unlocked; 12407 } 12408 atomic_add_int(&stcb->total_sends, 1); 12409 /* 12410 * sndlen covers for mbuf case uio_resid covers for the non-mbuf 12411 * case NOTE: uio will be null when top/mbuf is passed 12412 */ 12413 if (sndlen == 0) { 12414 if (srcv->sinfo_flags & SCTP_EOF) { 12415 got_all_of_the_send = 1; 12416 goto dataless_eof; 12417 } else { 12418 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12419 error = EINVAL; 12420 goto out; 12421 } 12422 } 12423 if (top == NULL) { 12424 struct sctp_stream_queue_pending *sp; 12425 struct sctp_stream_out *strm; 12426 uint32_t sndout, initial_out; 12427 12428 initial_out = uio->uio_resid; 12429 12430 SCTP_TCB_SEND_LOCK(stcb); 12431 if ((asoc->stream_locked) && 12432 (asoc->stream_locked_on != srcv->sinfo_stream)) { 12433 SCTP_TCB_SEND_UNLOCK(stcb); 12434 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12435 error = EINVAL; 12436 goto out; 12437 } 12438 SCTP_TCB_SEND_UNLOCK(stcb); 12439 12440 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 12441 if (strm->last_msg_incomplete == 0) { 12442 do_a_copy_in: 12443 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking); 12444 if ((sp == NULL) || (error)) { 12445 goto out; 12446 } 12447 SCTP_TCB_SEND_LOCK(stcb); 12448 if (sp->msg_is_complete) { 12449 strm->last_msg_incomplete = 0; 12450 asoc->stream_locked = 0; 12451 } else { 12452 /* 12453 * Just got locked to this guy in case of an 12454 * interrupt. 12455 */ 12456 strm->last_msg_incomplete = 1; 12457 asoc->stream_locked = 1; 12458 asoc->stream_locked_on = srcv->sinfo_stream; 12459 sp->sender_all_done = 0; 12460 } 12461 sctp_snd_sb_alloc(stcb, sp->length); 12462 atomic_add_int(&asoc->stream_queue_cnt, 1); 12463 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 12464 sp->strseq = strm->next_sequence_sent; 12465 if (sctp_logging_level & SCTP_LOG_AT_SEND_2_SCTP) { 12466 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN, 12467 (uintptr_t) stcb, sp->length, 12468 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0); 12469 } 12470 strm->next_sequence_sent++; 12471 } else { 12472 SCTP_STAT_INCR(sctps_sends_with_unord); 12473 } 12474 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 12475 if ((strm->next_spoke.tqe_next == NULL) && 12476 (strm->next_spoke.tqe_prev == NULL)) { 12477 /* Not on wheel, insert */ 12478 sctp_insert_on_wheel(stcb, asoc, strm, 1); 12479 } 12480 SCTP_TCB_SEND_UNLOCK(stcb); 12481 } else { 12482 SCTP_TCB_SEND_LOCK(stcb); 12483 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); 12484 SCTP_TCB_SEND_UNLOCK(stcb); 12485 if (sp == NULL) { 12486 /* ???? Huh ??? last msg is gone */ 12487 #ifdef INVARIANTS 12488 panic("Warning: Last msg marked incomplete, yet nothing left?"); 12489 #else 12490 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n"); 12491 strm->last_msg_incomplete = 0; 12492 #endif 12493 goto do_a_copy_in; 12494 12495 } 12496 } 12497 while (uio->uio_resid > 0) { 12498 /* How much room do we have? */ 12499 struct mbuf *new_tail, *mm; 12500 12501 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 12502 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 12503 else 12504 max_len = 0; 12505 12506 if ((max_len > sctp_add_more_threshold) || 12507 (max_len && (SCTP_SB_LIMIT_SND(so) < sctp_add_more_threshold)) || 12508 (uio->uio_resid && 12509 (uio->uio_resid <= (int)max_len))) { 12510 sndout = 0; 12511 new_tail = NULL; 12512 if (hold_tcblock) { 12513 SCTP_TCB_UNLOCK(stcb); 12514 hold_tcblock = 0; 12515 } 12516 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail); 12517 if ((mm == NULL) || error) { 12518 if (mm) { 12519 sctp_m_freem(mm); 12520 } 12521 goto out; 12522 } 12523 /* Update the mbuf and count */ 12524 SCTP_TCB_SEND_LOCK(stcb); 12525 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 12526 /* 12527 * we need to get out. Peer probably 12528 * aborted. 12529 */ 12530 sctp_m_freem(mm); 12531 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) { 12532 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 12533 error = ECONNRESET; 12534 } 12535 SCTP_TCB_SEND_UNLOCK(stcb); 12536 goto out; 12537 } 12538 if (sp->tail_mbuf) { 12539 /* tack it to the end */ 12540 SCTP_BUF_NEXT(sp->tail_mbuf) = mm; 12541 sp->tail_mbuf = new_tail; 12542 } else { 12543 /* A stolen mbuf */ 12544 sp->data = mm; 12545 sp->tail_mbuf = new_tail; 12546 } 12547 sctp_snd_sb_alloc(stcb, sndout); 12548 atomic_add_int(&sp->length, sndout); 12549 len += sndout; 12550 12551 /* Did we reach EOR? */ 12552 if ((uio->uio_resid == 0) && 12553 ((user_marks_eor == 0) || 12554 (srcv->sinfo_flags & SCTP_EOF) || 12555 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR))) 12556 ) { 12557 sp->msg_is_complete = 1; 12558 } else { 12559 sp->msg_is_complete = 0; 12560 } 12561 SCTP_TCB_SEND_UNLOCK(stcb); 12562 } 12563 if (uio->uio_resid == 0) { 12564 /* got it all? */ 12565 continue; 12566 } 12567 /* PR-SCTP? */ 12568 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) { 12569 /* 12570 * This is ugly but we must assure locking 12571 * order 12572 */ 12573 if (hold_tcblock == 0) { 12574 SCTP_TCB_LOCK(stcb); 12575 hold_tcblock = 1; 12576 } 12577 sctp_prune_prsctp(stcb, asoc, srcv, sndlen); 12578 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 12579 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 12580 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 12581 else 12582 max_len = 0; 12583 if (max_len > 0) { 12584 continue; 12585 } 12586 SCTP_TCB_UNLOCK(stcb); 12587 hold_tcblock = 0; 12588 } 12589 /* wait for space now */ 12590 if (non_blocking) { 12591 /* Non-blocking io in place out */ 12592 goto skip_out_eof; 12593 } 12594 if ((net->flight_size > net->cwnd) && 12595 (sctp_cmt_on_off == 0)) { 12596 queue_only = 1; 12597 } else if (asoc->ifp_had_enobuf) { 12598 SCTP_STAT_INCR(sctps_ifnomemqueued); 12599 if (net->flight_size > (net->mtu * 2)) { 12600 queue_only = 1; 12601 } else { 12602 queue_only = 0; 12603 } 12604 asoc->ifp_had_enobuf = 0; 12605 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 12606 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 12607 sizeof(struct sctp_data_chunk))); 12608 } else { 12609 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 12610 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 12611 sizeof(struct sctp_data_chunk))); 12612 if (net->flight_size > (net->mtu * stcb->asoc.max_burst)) { 12613 queue_only = 1; 12614 SCTP_STAT_INCR(sctps_send_burst_avoid); 12615 } else if (net->flight_size > net->cwnd) { 12616 queue_only = 1; 12617 SCTP_STAT_INCR(sctps_send_cwnd_avoid); 12618 } else { 12619 queue_only = 0; 12620 } 12621 } 12622 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 12623 (stcb->asoc.total_flight > 0) && 12624 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && 12625 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 12626 ) { 12627 12628 /*- 12629 * Ok, Nagle is set on and we have data outstanding. 12630 * Don't send anything and let SACKs drive out the 12631 * data unless wen have a "full" segment to send. 12632 */ 12633 if (sctp_logging_level & SCTP_NAGLE_LOGGING_ENABLE) { 12634 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 12635 } 12636 SCTP_STAT_INCR(sctps_naglequeued); 12637 nagle_applies = 1; 12638 } else { 12639 if (sctp_logging_level & SCTP_NAGLE_LOGGING_ENABLE) { 12640 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 12641 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 12642 } 12643 SCTP_STAT_INCR(sctps_naglesent); 12644 nagle_applies = 0; 12645 } 12646 /* What about the INIT, send it maybe */ 12647 if (sctp_logging_level & SCTP_BLK_LOGGING_ENABLE) { 12648 12649 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, 12650 nagle_applies, un_sent); 12651 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, 12652 stcb->asoc.total_flight, 12653 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); 12654 } 12655 if (queue_only_for_init) { 12656 if (hold_tcblock == 0) { 12657 SCTP_TCB_LOCK(stcb); 12658 hold_tcblock = 1; 12659 } 12660 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 12661 /* a collision took us forward? */ 12662 queue_only_for_init = 0; 12663 queue_only = 0; 12664 } else { 12665 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 12666 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT); 12667 queue_only_for_init = 0; 12668 queue_only = 1; 12669 } 12670 } 12671 if ((queue_only == 0) && (nagle_applies == 0) 12672 ) { 12673 /*- 12674 * need to start chunk output 12675 * before blocking.. note that if 12676 * a lock is already applied, then 12677 * the input via the net is happening 12678 * and I don't need to start output :-D 12679 */ 12680 if (hold_tcblock == 0) { 12681 if (SCTP_TCB_TRYLOCK(stcb)) { 12682 hold_tcblock = 1; 12683 sctp_chunk_output(inp, 12684 stcb, 12685 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 12686 } 12687 } else { 12688 sctp_chunk_output(inp, 12689 stcb, 12690 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 12691 } 12692 if (hold_tcblock == 1) { 12693 SCTP_TCB_UNLOCK(stcb); 12694 hold_tcblock = 0; 12695 } 12696 } 12697 SOCKBUF_LOCK(&so->so_snd); 12698 /*- 12699 * This is a bit strange, but I think it will 12700 * work. The total_output_queue_size is locked and 12701 * protected by the TCB_LOCK, which we just released. 12702 * There is a race that can occur between releasing it 12703 * above, and me getting the socket lock, where sacks 12704 * come in but we have not put the SB_WAIT on the 12705 * so_snd buffer to get the wakeup. After the LOCK 12706 * is applied the sack_processing will also need to 12707 * LOCK the so->so_snd to do the actual sowwakeup(). So 12708 * once we have the socket buffer lock if we recheck the 12709 * size we KNOW we will get to sleep safely with the 12710 * wakeup flag in place. 12711 */ 12712 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size + 12713 min(sctp_add_more_threshold, SCTP_SB_LIMIT_SND(so))) 12714 ) { 12715 if (sctp_logging_level & SCTP_BLK_LOGGING_ENABLE) { 12716 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 12717 so, asoc, uio->uio_resid); 12718 } 12719 be.error = 0; 12720 stcb->block_entry = &be; 12721 error = sbwait(&so->so_snd); 12722 stcb->block_entry = NULL; 12723 12724 if (error || so->so_error || be.error) { 12725 if (error == 0) { 12726 if (so->so_error) 12727 error = so->so_error; 12728 if (be.error) { 12729 error = be.error; 12730 } 12731 } 12732 SOCKBUF_UNLOCK(&so->so_snd); 12733 goto out_unlocked; 12734 } 12735 if (sctp_logging_level & SCTP_BLK_LOGGING_ENABLE) { 12736 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 12737 so, asoc, stcb->asoc.total_output_queue_size); 12738 } 12739 } 12740 SOCKBUF_UNLOCK(&so->so_snd); 12741 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 12742 goto out_unlocked; 12743 } 12744 } 12745 SCTP_TCB_SEND_LOCK(stcb); 12746 if (sp) { 12747 if (sp->msg_is_complete == 0) { 12748 strm->last_msg_incomplete = 1; 12749 asoc->stream_locked = 1; 12750 asoc->stream_locked_on = srcv->sinfo_stream; 12751 } else { 12752 sp->sender_all_done = 1; 12753 strm->last_msg_incomplete = 0; 12754 asoc->stream_locked = 0; 12755 } 12756 } else { 12757 SCTP_PRINTF("Huh no sp TSNH?\n"); 12758 strm->last_msg_incomplete = 0; 12759 asoc->stream_locked = 0; 12760 } 12761 SCTP_TCB_SEND_UNLOCK(stcb); 12762 if (uio->uio_resid == 0) { 12763 got_all_of_the_send = 1; 12764 } 12765 } else if (top) { 12766 /* We send in a 0, since we do NOT have any locks */ 12767 error = sctp_msg_append(stcb, net, top, srcv, 0); 12768 top = NULL; 12769 if (srcv->sinfo_flags & SCTP_EOF) { 12770 /* 12771 * This should only happen for Panda for the mbuf 12772 * send case, which does NOT yet support EEOR mode. 12773 * Thus, we can just set this flag to do the proper 12774 * EOF handling. 12775 */ 12776 got_all_of_the_send = 1; 12777 } 12778 } 12779 if (error) { 12780 goto out; 12781 } 12782 dataless_eof: 12783 /* EOF thing ? */ 12784 if ((srcv->sinfo_flags & SCTP_EOF) && 12785 (got_all_of_the_send == 1) && 12786 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) 12787 ) { 12788 int cnt; 12789 12790 SCTP_STAT_INCR(sctps_sends_with_eof); 12791 error = 0; 12792 if (hold_tcblock == 0) { 12793 SCTP_TCB_LOCK(stcb); 12794 hold_tcblock = 1; 12795 } 12796 cnt = sctp_is_there_unsent_data(stcb); 12797 if (TAILQ_EMPTY(&asoc->send_queue) && 12798 TAILQ_EMPTY(&asoc->sent_queue) && 12799 (cnt == 0)) { 12800 if (asoc->locked_on_sending) { 12801 goto abort_anyway; 12802 } 12803 /* there is nothing queued to send, so I'm done... */ 12804 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 12805 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 12806 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 12807 /* only send SHUTDOWN the first time through */ 12808 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 12809 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 12810 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 12811 } 12812 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 12813 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 12814 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 12815 asoc->primary_destination); 12816 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 12817 asoc->primary_destination); 12818 } 12819 } else { 12820 /*- 12821 * we still got (or just got) data to send, so set 12822 * SHUTDOWN_PENDING 12823 */ 12824 /*- 12825 * XXX sockets draft says that SCTP_EOF should be 12826 * sent with no data. currently, we will allow user 12827 * data to be sent first and move to 12828 * SHUTDOWN-PENDING 12829 */ 12830 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 12831 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 12832 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 12833 if (hold_tcblock == 0) { 12834 SCTP_TCB_LOCK(stcb); 12835 hold_tcblock = 1; 12836 } 12837 if (asoc->locked_on_sending) { 12838 /* Locked to send out the data */ 12839 struct sctp_stream_queue_pending *sp; 12840 12841 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 12842 if (sp) { 12843 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 12844 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 12845 } 12846 } 12847 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 12848 if (TAILQ_EMPTY(&asoc->send_queue) && 12849 TAILQ_EMPTY(&asoc->sent_queue) && 12850 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 12851 abort_anyway: 12852 if (free_cnt_applied) { 12853 atomic_add_int(&stcb->asoc.refcnt, -1); 12854 free_cnt_applied = 0; 12855 } 12856 sctp_abort_an_association(stcb->sctp_ep, stcb, 12857 SCTP_RESPONSE_TO_USER_REQ, 12858 NULL, SCTP_SO_LOCKED); 12859 /* 12860 * now relock the stcb so everything 12861 * is sane 12862 */ 12863 hold_tcblock = 0; 12864 stcb = NULL; 12865 goto out; 12866 } 12867 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 12868 asoc->primary_destination); 12869 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY); 12870 } 12871 } 12872 } 12873 skip_out_eof: 12874 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 12875 some_on_control = 1; 12876 } 12877 if ((net->flight_size > net->cwnd) && 12878 (sctp_cmt_on_off == 0)) { 12879 queue_only = 1; 12880 } else if (asoc->ifp_had_enobuf) { 12881 SCTP_STAT_INCR(sctps_ifnomemqueued); 12882 if (net->flight_size > (net->mtu * 2)) { 12883 queue_only = 1; 12884 } else { 12885 queue_only = 0; 12886 } 12887 asoc->ifp_had_enobuf = 0; 12888 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 12889 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 12890 sizeof(struct sctp_data_chunk))); 12891 } else { 12892 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 12893 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 12894 sizeof(struct sctp_data_chunk))); 12895 if (net->flight_size > (net->mtu * stcb->asoc.max_burst)) { 12896 queue_only = 1; 12897 SCTP_STAT_INCR(sctps_send_burst_avoid); 12898 } else if (net->flight_size > net->cwnd) { 12899 queue_only = 1; 12900 SCTP_STAT_INCR(sctps_send_cwnd_avoid); 12901 } else { 12902 queue_only = 0; 12903 } 12904 } 12905 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 12906 (stcb->asoc.total_flight > 0) && 12907 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && 12908 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 12909 ) { 12910 /*- 12911 * Ok, Nagle is set on and we have data outstanding. 12912 * Don't send anything and let SACKs drive out the 12913 * data unless wen have a "full" segment to send. 12914 */ 12915 if (sctp_logging_level & SCTP_NAGLE_LOGGING_ENABLE) { 12916 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 12917 } 12918 SCTP_STAT_INCR(sctps_naglequeued); 12919 nagle_applies = 1; 12920 } else { 12921 if (sctp_logging_level & SCTP_NAGLE_LOGGING_ENABLE) { 12922 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 12923 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 12924 } 12925 SCTP_STAT_INCR(sctps_naglesent); 12926 nagle_applies = 0; 12927 } 12928 if (queue_only_for_init) { 12929 if (hold_tcblock == 0) { 12930 SCTP_TCB_LOCK(stcb); 12931 hold_tcblock = 1; 12932 } 12933 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 12934 /* a collision took us forward? */ 12935 queue_only_for_init = 0; 12936 queue_only = 0; 12937 } else { 12938 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 12939 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 12940 queue_only_for_init = 0; 12941 queue_only = 1; 12942 } 12943 } 12944 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) { 12945 /* we can attempt to send too. */ 12946 if (hold_tcblock == 0) { 12947 /* 12948 * If there is activity recv'ing sacks no need to 12949 * send 12950 */ 12951 if (SCTP_TCB_TRYLOCK(stcb)) { 12952 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 12953 hold_tcblock = 1; 12954 } 12955 } else { 12956 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 12957 } 12958 } else if ((queue_only == 0) && 12959 (stcb->asoc.peers_rwnd == 0) && 12960 (stcb->asoc.total_flight == 0)) { 12961 /* We get to have a probe outstanding */ 12962 if (hold_tcblock == 0) { 12963 hold_tcblock = 1; 12964 SCTP_TCB_LOCK(stcb); 12965 } 12966 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 12967 } else if (some_on_control) { 12968 int num_out, reason, cwnd_full, frag_point; 12969 12970 /* Here we do control only */ 12971 if (hold_tcblock == 0) { 12972 hold_tcblock = 1; 12973 SCTP_TCB_LOCK(stcb); 12974 } 12975 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 12976 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 12977 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED); 12978 } 12979 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d", 12980 queue_only, stcb->asoc.peers_rwnd, un_sent, 12981 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue, 12982 stcb->asoc.total_output_queue_size, error); 12983 12984 out: 12985 out_unlocked: 12986 12987 if (local_soresv && stcb) { 12988 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen); 12989 local_soresv = 0; 12990 } 12991 if (create_lock_applied) { 12992 SCTP_ASOC_CREATE_UNLOCK(inp); 12993 create_lock_applied = 0; 12994 } 12995 if ((stcb) && hold_tcblock) { 12996 SCTP_TCB_UNLOCK(stcb); 12997 } 12998 if (stcb && free_cnt_applied) { 12999 atomic_add_int(&stcb->asoc.refcnt, -1); 13000 } 13001 #ifdef INVARIANTS 13002 if (stcb) { 13003 if (mtx_owned(&stcb->tcb_mtx)) { 13004 panic("Leaving with tcb mtx owned?"); 13005 } 13006 if (mtx_owned(&stcb->tcb_send_mtx)) { 13007 panic("Leaving with tcb send mtx owned?"); 13008 } 13009 } 13010 #endif 13011 if (top) { 13012 sctp_m_freem(top); 13013 } 13014 if (control) { 13015 sctp_m_freem(control); 13016 } 13017 return (error); 13018 } 13019 13020 13021 /* 13022 * generate an AUTHentication chunk, if required 13023 */ 13024 struct mbuf * 13025 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, 13026 struct sctp_auth_chunk **auth_ret, uint32_t * offset, 13027 struct sctp_tcb *stcb, uint8_t chunk) 13028 { 13029 struct mbuf *m_auth; 13030 struct sctp_auth_chunk *auth; 13031 int chunk_len; 13032 13033 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) || 13034 (stcb == NULL)) 13035 return (m); 13036 13037 /* sysctl disabled auth? */ 13038 if (sctp_auth_disable) 13039 return (m); 13040 13041 /* peer doesn't do auth... */ 13042 if (!stcb->asoc.peer_supports_auth) { 13043 return (m); 13044 } 13045 /* does the requested chunk require auth? */ 13046 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) { 13047 return (m); 13048 } 13049 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER); 13050 if (m_auth == NULL) { 13051 /* no mbuf's */ 13052 return (m); 13053 } 13054 /* reserve some space if this will be the first mbuf */ 13055 if (m == NULL) 13056 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD); 13057 /* fill in the AUTH chunk details */ 13058 auth = mtod(m_auth, struct sctp_auth_chunk *); 13059 bzero(auth, sizeof(*auth)); 13060 auth->ch.chunk_type = SCTP_AUTHENTICATION; 13061 auth->ch.chunk_flags = 0; 13062 chunk_len = sizeof(*auth) + 13063 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id); 13064 auth->ch.chunk_length = htons(chunk_len); 13065 auth->hmac_id = htons(stcb->asoc.peer_hmac_id); 13066 /* key id and hmac digest will be computed and filled in upon send */ 13067 13068 /* save the offset where the auth was inserted into the chain */ 13069 if (m != NULL) { 13070 struct mbuf *cn; 13071 13072 *offset = 0; 13073 cn = m; 13074 while (cn) { 13075 *offset += SCTP_BUF_LEN(cn); 13076 cn = SCTP_BUF_NEXT(cn); 13077 } 13078 } else 13079 *offset = 0; 13080 13081 /* update length and return pointer to the auth chunk */ 13082 SCTP_BUF_LEN(m_auth) = chunk_len; 13083 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0); 13084 if (auth_ret != NULL) 13085 *auth_ret = auth; 13086 13087 return (m); 13088 } 13089 13090 #ifdef INET6 13091 int 13092 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro) 13093 { 13094 struct nd_prefix *pfx = NULL; 13095 struct nd_pfxrouter *pfxrtr = NULL; 13096 struct sockaddr_in6 gw6; 13097 13098 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6) 13099 return (0); 13100 13101 /* get prefix entry of address */ 13102 LIST_FOREACH(pfx, &nd_prefix, ndpr_entry) { 13103 if (pfx->ndpr_stateflags & NDPRF_DETACHED) 13104 continue; 13105 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr, 13106 &src6->sin6_addr, &pfx->ndpr_mask)) 13107 break; 13108 } 13109 /* no prefix entry in the prefix list */ 13110 if (pfx == NULL) { 13111 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for "); 13112 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); 13113 return (0); 13114 } 13115 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is "); 13116 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); 13117 13118 /* search installed gateway from prefix entry */ 13119 for (pfxrtr = pfx->ndpr_advrtrs.lh_first; pfxrtr; pfxrtr = 13120 pfxrtr->pfr_next) { 13121 memset(&gw6, 0, sizeof(struct sockaddr_in6)); 13122 gw6.sin6_family = AF_INET6; 13123 gw6.sin6_len = sizeof(struct sockaddr_in6); 13124 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr, 13125 sizeof(struct in6_addr)); 13126 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is "); 13127 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6); 13128 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is "); 13129 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); 13130 if (sctp_cmpaddr((struct sockaddr *)&gw6, 13131 ro->ro_rt->rt_gateway)) { 13132 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n"); 13133 return (1); 13134 } 13135 } 13136 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n"); 13137 return (0); 13138 } 13139 13140 #endif 13141 13142 int 13143 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro) 13144 { 13145 struct sockaddr_in *sin, *mask; 13146 struct ifaddr *ifa; 13147 struct in_addr srcnetaddr, gwnetaddr; 13148 13149 if (ro == NULL || ro->ro_rt == NULL || 13150 sifa->address.sa.sa_family != AF_INET) { 13151 return (0); 13152 } 13153 ifa = (struct ifaddr *)sifa->ifa; 13154 mask = (struct sockaddr_in *)(ifa->ifa_netmask); 13155 sin = (struct sockaddr_in *)&sifa->address.sin; 13156 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); 13157 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is "); 13158 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); 13159 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr); 13160 13161 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway; 13162 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); 13163 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is "); 13164 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); 13165 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr); 13166 if (srcnetaddr.s_addr == gwnetaddr.s_addr) { 13167 return (1); 13168 } 13169 return (0); 13170 } 13171