1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_indata.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 48 49 /* 50 * NOTES: On the outbound side of things I need to check the sack timer to 51 * see if I should generate a sack into the chunk queue (if I have data to 52 * send that is and will be sending it .. for bundling. 53 * 54 * The callback in sctp_usrreq.c will get called when the socket is read from. 55 * This will cause sctp_service_queues() to get called on the top entry in 56 * the list. 57 */ 58 59 void 60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 61 { 62 uint32_t calc, calc_w_oh; 63 64 /* 65 * This is really set wrong with respect to a 1-2-m socket. Since 66 * the sb_cc is the count that everyone as put up. When we re-write 67 * sctp_soreceive then we will fix this so that ONLY this 68 * associations data is taken into account. 69 */ 70 if (stcb->sctp_socket == NULL) 71 return; 72 73 if (stcb->asoc.sb_cc == 0 && 74 asoc->size_on_reasm_queue == 0 && 75 asoc->size_on_all_streams == 0) { 76 /* Full rwnd granted */ 77 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 78 SCTP_MINIMAL_RWND); 79 return; 80 } 81 /* get actual space */ 82 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 83 84 /* 85 * take out what has NOT been put on socket queue and we yet hold 86 * for putting up. 87 */ 88 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 89 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 90 91 if (calc == 0) { 92 /* out of space */ 93 asoc->my_rwnd = 0; 94 return; 95 } 96 /* what is the overhead of all these rwnd's */ 97 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 98 asoc->my_rwnd = calc; 99 if (calc_w_oh == 0) { 100 /* 101 * If our overhead is greater than the advertised rwnd, we 102 * clamp the rwnd to 1. This lets us still accept inbound 103 * segments, but hopefully will shut the sender down when he 104 * finally gets the message. 105 */ 106 asoc->my_rwnd = 1; 107 } else { 108 /* SWS threshold */ 109 if (asoc->my_rwnd && 110 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 111 /* SWS engaged, tell peer none left */ 112 asoc->my_rwnd = 1; 113 } 114 } 115 } 116 117 /* Calculate what the rwnd would be */ 118 119 uint32_t 120 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 121 { 122 uint32_t calc = 0, calc_w_oh; 123 124 /* 125 * This is really set wrong with respect to a 1-2-m socket. Since 126 * the sb_cc is the count that everyone as put up. When we re-write 127 * sctp_soreceive then we will fix this so that ONLY this 128 * associations data is taken into account. 129 */ 130 if (stcb->sctp_socket == NULL) 131 return (calc); 132 133 if (stcb->asoc.sb_cc == 0 && 134 asoc->size_on_reasm_queue == 0 && 135 asoc->size_on_all_streams == 0) { 136 /* Full rwnd granted */ 137 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 138 SCTP_MINIMAL_RWND); 139 return (calc); 140 } 141 /* get actual space */ 142 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 143 144 /* 145 * take out what has NOT been put on socket queue and we yet hold 146 * for putting up. 147 */ 148 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 149 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 150 151 if (calc == 0) { 152 /* out of space */ 153 return (calc); 154 } 155 /* what is the overhead of all these rwnd's */ 156 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 157 if (calc_w_oh == 0) { 158 /* 159 * If our overhead is greater than the advertised rwnd, we 160 * clamp the rwnd to 1. This lets us still accept inbound 161 * segments, but hopefully will shut the sender down when he 162 * finally gets the message. 163 */ 164 calc = 1; 165 } else { 166 /* SWS threshold */ 167 if (calc && 168 (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 169 /* SWS engaged, tell peer none left */ 170 calc = 1; 171 } 172 } 173 return (calc); 174 } 175 176 177 178 /* 179 * Build out our readq entry based on the incoming packet. 180 */ 181 struct sctp_queued_to_read * 182 sctp_build_readq_entry(struct sctp_tcb *stcb, 183 struct sctp_nets *net, 184 uint32_t tsn, uint32_t ppid, 185 uint32_t context, uint16_t stream_no, 186 uint16_t stream_seq, uint8_t flags, 187 struct mbuf *dm) 188 { 189 struct sctp_queued_to_read *read_queue_e = NULL; 190 191 sctp_alloc_a_readq(stcb, read_queue_e); 192 if (read_queue_e == NULL) { 193 goto failed_build; 194 } 195 read_queue_e->sinfo_stream = stream_no; 196 read_queue_e->sinfo_ssn = stream_seq; 197 read_queue_e->sinfo_flags = (flags << 8); 198 read_queue_e->sinfo_ppid = ppid; 199 read_queue_e->sinfo_context = stcb->asoc.context; 200 read_queue_e->sinfo_timetolive = 0; 201 read_queue_e->sinfo_tsn = tsn; 202 read_queue_e->sinfo_cumtsn = tsn; 203 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 204 read_queue_e->whoFrom = net; 205 read_queue_e->length = 0; 206 atomic_add_int(&net->ref_count, 1); 207 read_queue_e->data = dm; 208 read_queue_e->spec_flags = 0; 209 read_queue_e->tail_mbuf = NULL; 210 read_queue_e->aux_data = NULL; 211 read_queue_e->stcb = stcb; 212 read_queue_e->port_from = stcb->rport; 213 read_queue_e->do_not_ref_stcb = 0; 214 read_queue_e->end_added = 0; 215 read_queue_e->some_taken = 0; 216 read_queue_e->pdapi_aborted = 0; 217 failed_build: 218 return (read_queue_e); 219 } 220 221 222 /* 223 * Build out our readq entry based on the incoming packet. 224 */ 225 static struct sctp_queued_to_read * 226 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 227 struct sctp_tmit_chunk *chk) 228 { 229 struct sctp_queued_to_read *read_queue_e = NULL; 230 231 sctp_alloc_a_readq(stcb, read_queue_e); 232 if (read_queue_e == NULL) { 233 goto failed_build; 234 } 235 read_queue_e->sinfo_stream = chk->rec.data.stream_number; 236 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 237 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 238 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 239 read_queue_e->sinfo_context = stcb->asoc.context; 240 read_queue_e->sinfo_timetolive = 0; 241 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 242 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 243 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 244 read_queue_e->whoFrom = chk->whoTo; 245 read_queue_e->aux_data = NULL; 246 read_queue_e->length = 0; 247 atomic_add_int(&chk->whoTo->ref_count, 1); 248 read_queue_e->data = chk->data; 249 read_queue_e->tail_mbuf = NULL; 250 read_queue_e->stcb = stcb; 251 read_queue_e->port_from = stcb->rport; 252 read_queue_e->spec_flags = 0; 253 read_queue_e->do_not_ref_stcb = 0; 254 read_queue_e->end_added = 0; 255 read_queue_e->some_taken = 0; 256 read_queue_e->pdapi_aborted = 0; 257 failed_build: 258 return (read_queue_e); 259 } 260 261 262 struct mbuf * 263 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, 264 struct sctp_sndrcvinfo *sinfo) 265 { 266 struct sctp_sndrcvinfo *outinfo; 267 struct cmsghdr *cmh; 268 struct mbuf *ret; 269 int len; 270 int use_extended = 0; 271 272 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 273 /* user does not want the sndrcv ctl */ 274 return (NULL); 275 } 276 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 277 use_extended = 1; 278 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 279 } else { 280 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 281 } 282 283 284 ret = sctp_get_mbuf_for_msg(len, 285 0, M_DONTWAIT, 1, MT_DATA); 286 287 if (ret == NULL) { 288 /* No space */ 289 return (ret); 290 } 291 /* We need a CMSG header followed by the struct */ 292 cmh = mtod(ret, struct cmsghdr *); 293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 294 cmh->cmsg_level = IPPROTO_SCTP; 295 if (use_extended) { 296 cmh->cmsg_type = SCTP_EXTRCV; 297 cmh->cmsg_len = len; 298 memcpy(outinfo, sinfo, len); 299 } else { 300 cmh->cmsg_type = SCTP_SNDRCV; 301 cmh->cmsg_len = len; 302 *outinfo = *sinfo; 303 } 304 SCTP_BUF_LEN(ret) = cmh->cmsg_len; 305 return (ret); 306 } 307 308 309 char * 310 sctp_build_ctl_cchunk(struct sctp_inpcb *inp, 311 int *control_len, 312 struct sctp_sndrcvinfo *sinfo) 313 { 314 struct sctp_sndrcvinfo *outinfo; 315 struct cmsghdr *cmh; 316 char *buf; 317 int len; 318 int use_extended = 0; 319 320 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 321 /* user does not want the sndrcv ctl */ 322 return (NULL); 323 } 324 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 325 use_extended = 1; 326 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 327 } else { 328 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 329 } 330 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG); 331 if (buf == NULL) { 332 /* No space */ 333 return (buf); 334 } 335 /* We need a CMSG header followed by the struct */ 336 cmh = (struct cmsghdr *)buf; 337 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 338 cmh->cmsg_level = IPPROTO_SCTP; 339 if (use_extended) { 340 cmh->cmsg_type = SCTP_EXTRCV; 341 cmh->cmsg_len = len; 342 memcpy(outinfo, sinfo, len); 343 } else { 344 cmh->cmsg_type = SCTP_SNDRCV; 345 cmh->cmsg_len = len; 346 *outinfo = *sinfo; 347 } 348 *control_len = len; 349 return (buf); 350 } 351 352 353 /* 354 * We are delivering currently from the reassembly queue. We must continue to 355 * deliver until we either: 1) run out of space. 2) run out of sequential 356 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 357 */ 358 static void 359 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 360 { 361 struct sctp_tmit_chunk *chk; 362 uint16_t nxt_todel; 363 uint16_t stream_no; 364 int end = 0; 365 int cntDel; 366 struct sctp_queued_to_read *control, *ctl, *ctlat; 367 368 if (stcb == NULL) 369 return; 370 371 cntDel = stream_no = 0; 372 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 373 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 374 /* socket above is long gone */ 375 asoc->fragmented_delivery_inprogress = 0; 376 chk = TAILQ_FIRST(&asoc->reasmqueue); 377 while (chk) { 378 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 379 asoc->size_on_reasm_queue -= chk->send_size; 380 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 381 /* 382 * Lose the data pointer, since its in the socket 383 * buffer 384 */ 385 if (chk->data) { 386 sctp_m_freem(chk->data); 387 chk->data = NULL; 388 } 389 /* Now free the address and data */ 390 sctp_free_a_chunk(stcb, chk); 391 /* sa_ignore FREED_MEMORY */ 392 chk = TAILQ_FIRST(&asoc->reasmqueue); 393 } 394 return; 395 } 396 SCTP_TCB_LOCK_ASSERT(stcb); 397 do { 398 chk = TAILQ_FIRST(&asoc->reasmqueue); 399 if (chk == NULL) { 400 return; 401 } 402 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 403 /* Can't deliver more :< */ 404 return; 405 } 406 stream_no = chk->rec.data.stream_number; 407 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 408 if (nxt_todel != chk->rec.data.stream_seq && 409 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 410 /* 411 * Not the next sequence to deliver in its stream OR 412 * unordered 413 */ 414 return; 415 } 416 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 417 418 control = sctp_build_readq_entry_chk(stcb, chk); 419 if (control == NULL) { 420 /* out of memory? */ 421 return; 422 } 423 /* save it off for our future deliveries */ 424 stcb->asoc.control_pdapi = control; 425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 426 end = 1; 427 else 428 end = 0; 429 sctp_add_to_readq(stcb->sctp_ep, 430 stcb, control, &stcb->sctp_socket->so_rcv, end); 431 cntDel++; 432 } else { 433 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 434 end = 1; 435 else 436 end = 0; 437 if (sctp_append_to_readq(stcb->sctp_ep, stcb, 438 stcb->asoc.control_pdapi, 439 chk->data, end, chk->rec.data.TSN_seq, 440 &stcb->sctp_socket->so_rcv)) { 441 /* 442 * something is very wrong, either 443 * control_pdapi is NULL, or the tail_mbuf 444 * is corrupt, or there is a EOM already on 445 * the mbuf chain. 446 */ 447 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 448 panic("This should not happen control_pdapi NULL?"); 449 } 450 /* if we did not panic, it was a EOM */ 451 panic("Bad chunking ??"); 452 return; 453 } 454 cntDel++; 455 } 456 /* pull it we did it */ 457 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 458 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 459 asoc->fragmented_delivery_inprogress = 0; 460 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 461 asoc->strmin[stream_no].last_sequence_delivered++; 462 } 463 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 464 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 465 } 466 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 467 /* 468 * turn the flag back on since we just delivered 469 * yet another one. 470 */ 471 asoc->fragmented_delivery_inprogress = 1; 472 } 473 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 474 asoc->last_flags_delivered = chk->rec.data.rcv_flags; 475 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 476 asoc->last_strm_no_delivered = chk->rec.data.stream_number; 477 478 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 479 asoc->size_on_reasm_queue -= chk->send_size; 480 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 481 /* free up the chk */ 482 chk->data = NULL; 483 sctp_free_a_chunk(stcb, chk); 484 485 if (asoc->fragmented_delivery_inprogress == 0) { 486 /* 487 * Now lets see if we can deliver the next one on 488 * the stream 489 */ 490 struct sctp_stream_in *strm; 491 492 strm = &asoc->strmin[stream_no]; 493 nxt_todel = strm->last_sequence_delivered + 1; 494 ctl = TAILQ_FIRST(&strm->inqueue); 495 if (ctl && (nxt_todel == ctl->sinfo_ssn)) { 496 while (ctl != NULL) { 497 /* Deliver more if we can. */ 498 if (nxt_todel == ctl->sinfo_ssn) { 499 ctlat = TAILQ_NEXT(ctl, next); 500 TAILQ_REMOVE(&strm->inqueue, ctl, next); 501 asoc->size_on_all_streams -= ctl->length; 502 sctp_ucount_decr(asoc->cnt_on_all_streams); 503 strm->last_sequence_delivered++; 504 sctp_add_to_readq(stcb->sctp_ep, stcb, 505 ctl, 506 &stcb->sctp_socket->so_rcv, 1); 507 ctl = ctlat; 508 } else { 509 break; 510 } 511 nxt_todel = strm->last_sequence_delivered + 1; 512 } 513 } 514 break; 515 } 516 /* sa_ignore FREED_MEMORY */ 517 chk = TAILQ_FIRST(&asoc->reasmqueue); 518 } while (chk); 519 } 520 521 /* 522 * Queue the chunk either right into the socket buffer if it is the next one 523 * to go OR put it in the correct place in the delivery queue. If we do 524 * append to the so_buf, keep doing so until we are out of order. One big 525 * question still remains, what to do when the socket buffer is FULL?? 526 */ 527 static void 528 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 529 struct sctp_queued_to_read *control, int *abort_flag) 530 { 531 /* 532 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 533 * all the data in one stream this could happen quite rapidly. One 534 * could use the TSN to keep track of things, but this scheme breaks 535 * down in the other type of stream useage that could occur. Send a 536 * single msg to stream 0, send 4Billion messages to stream 1, now 537 * send a message to stream 0. You have a situation where the TSN 538 * has wrapped but not in the stream. Is this worth worrying about 539 * or should we just change our queue sort at the bottom to be by 540 * TSN. 541 * 542 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 543 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 544 * assignment this could happen... and I don't see how this would be 545 * a violation. So for now I am undecided an will leave the sort by 546 * SSN alone. Maybe a hybred approach is the answer 547 * 548 */ 549 struct sctp_stream_in *strm; 550 struct sctp_queued_to_read *at; 551 int queue_needed; 552 uint16_t nxt_todel; 553 struct mbuf *oper; 554 555 queue_needed = 1; 556 asoc->size_on_all_streams += control->length; 557 sctp_ucount_incr(asoc->cnt_on_all_streams); 558 strm = &asoc->strmin[control->sinfo_stream]; 559 nxt_todel = strm->last_sequence_delivered + 1; 560 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 561 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 562 } 563 SCTPDBG(SCTP_DEBUG_INDATA1, 564 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 565 (uint32_t) control->sinfo_stream, 566 (uint32_t) strm->last_sequence_delivered, 567 (uint32_t) nxt_todel); 568 if (compare_with_wrap(strm->last_sequence_delivered, 569 control->sinfo_ssn, MAX_SEQ) || 570 (strm->last_sequence_delivered == control->sinfo_ssn)) { 571 /* The incoming sseq is behind where we last delivered? */ 572 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 573 control->sinfo_ssn, strm->last_sequence_delivered); 574 /* 575 * throw it in the stream so it gets cleaned up in 576 * association destruction 577 */ 578 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 579 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 580 0, M_DONTWAIT, 1, MT_DATA); 581 if (oper) { 582 struct sctp_paramhdr *ph; 583 uint32_t *ippp; 584 585 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 586 (sizeof(uint32_t) * 3); 587 ph = mtod(oper, struct sctp_paramhdr *); 588 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 589 ph->param_length = htons(SCTP_BUF_LEN(oper)); 590 ippp = (uint32_t *) (ph + 1); 591 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1); 592 ippp++; 593 *ippp = control->sinfo_tsn; 594 ippp++; 595 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 596 } 597 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 598 sctp_abort_an_association(stcb->sctp_ep, stcb, 599 SCTP_PEER_FAULTY, oper); 600 601 *abort_flag = 1; 602 return; 603 604 } 605 if (nxt_todel == control->sinfo_ssn) { 606 /* can be delivered right away? */ 607 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 608 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 609 } 610 queue_needed = 0; 611 asoc->size_on_all_streams -= control->length; 612 sctp_ucount_decr(asoc->cnt_on_all_streams); 613 strm->last_sequence_delivered++; 614 sctp_add_to_readq(stcb->sctp_ep, stcb, 615 control, 616 &stcb->sctp_socket->so_rcv, 1); 617 control = TAILQ_FIRST(&strm->inqueue); 618 while (control != NULL) { 619 /* all delivered */ 620 nxt_todel = strm->last_sequence_delivered + 1; 621 if (nxt_todel == control->sinfo_ssn) { 622 at = TAILQ_NEXT(control, next); 623 TAILQ_REMOVE(&strm->inqueue, control, next); 624 asoc->size_on_all_streams -= control->length; 625 sctp_ucount_decr(asoc->cnt_on_all_streams); 626 strm->last_sequence_delivered++; 627 /* 628 * We ignore the return of deliver_data here 629 * since we always can hold the chunk on the 630 * d-queue. And we have a finite number that 631 * can be delivered from the strq. 632 */ 633 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 634 sctp_log_strm_del(control, NULL, 635 SCTP_STR_LOG_FROM_IMMED_DEL); 636 } 637 sctp_add_to_readq(stcb->sctp_ep, stcb, 638 control, 639 &stcb->sctp_socket->so_rcv, 1); 640 control = at; 641 continue; 642 } 643 break; 644 } 645 } 646 if (queue_needed) { 647 /* 648 * Ok, we did not deliver this guy, find the correct place 649 * to put it on the queue. 650 */ 651 if (TAILQ_EMPTY(&strm->inqueue)) { 652 /* Empty queue */ 653 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 654 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 655 } 656 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 657 } else { 658 TAILQ_FOREACH(at, &strm->inqueue, next) { 659 if (compare_with_wrap(at->sinfo_ssn, 660 control->sinfo_ssn, MAX_SEQ)) { 661 /* 662 * one in queue is bigger than the 663 * new one, insert before this one 664 */ 665 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 666 sctp_log_strm_del(control, at, 667 SCTP_STR_LOG_FROM_INSERT_MD); 668 } 669 TAILQ_INSERT_BEFORE(at, control, next); 670 break; 671 } else if (at->sinfo_ssn == control->sinfo_ssn) { 672 /* 673 * Gak, He sent me a duplicate str 674 * seq number 675 */ 676 /* 677 * foo bar, I guess I will just free 678 * this new guy, should we abort 679 * too? FIX ME MAYBE? Or it COULD be 680 * that the SSN's have wrapped. 681 * Maybe I should compare to TSN 682 * somehow... sigh for now just blow 683 * away the chunk! 684 */ 685 686 if (control->data) 687 sctp_m_freem(control->data); 688 control->data = NULL; 689 asoc->size_on_all_streams -= control->length; 690 sctp_ucount_decr(asoc->cnt_on_all_streams); 691 if (control->whoFrom) 692 sctp_free_remote_addr(control->whoFrom); 693 control->whoFrom = NULL; 694 sctp_free_a_readq(stcb, control); 695 return; 696 } else { 697 if (TAILQ_NEXT(at, next) == NULL) { 698 /* 699 * We are at the end, insert 700 * it after this one 701 */ 702 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 703 sctp_log_strm_del(control, at, 704 SCTP_STR_LOG_FROM_INSERT_TL); 705 } 706 TAILQ_INSERT_AFTER(&strm->inqueue, 707 at, control, next); 708 break; 709 } 710 } 711 } 712 } 713 } 714 } 715 716 /* 717 * Returns two things: You get the total size of the deliverable parts of the 718 * first fragmented message on the reassembly queue. And you get a 1 back if 719 * all of the message is ready or a 0 back if the message is still incomplete 720 */ 721 static int 722 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 723 { 724 struct sctp_tmit_chunk *chk; 725 uint32_t tsn; 726 727 *t_size = 0; 728 chk = TAILQ_FIRST(&asoc->reasmqueue); 729 if (chk == NULL) { 730 /* nothing on the queue */ 731 return (0); 732 } 733 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 734 /* Not a first on the queue */ 735 return (0); 736 } 737 tsn = chk->rec.data.TSN_seq; 738 while (chk) { 739 if (tsn != chk->rec.data.TSN_seq) { 740 return (0); 741 } 742 *t_size += chk->send_size; 743 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 744 return (1); 745 } 746 tsn++; 747 chk = TAILQ_NEXT(chk, sctp_next); 748 } 749 return (0); 750 } 751 752 static void 753 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 754 { 755 struct sctp_tmit_chunk *chk; 756 uint16_t nxt_todel; 757 uint32_t tsize; 758 759 doit_again: 760 chk = TAILQ_FIRST(&asoc->reasmqueue); 761 if (chk == NULL) { 762 /* Huh? */ 763 asoc->size_on_reasm_queue = 0; 764 asoc->cnt_on_reasm_queue = 0; 765 return; 766 } 767 if (asoc->fragmented_delivery_inprogress == 0) { 768 nxt_todel = 769 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 770 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 771 (nxt_todel == chk->rec.data.stream_seq || 772 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 773 /* 774 * Yep the first one is here and its ok to deliver 775 * but should we? 776 */ 777 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 778 (tsize > stcb->sctp_ep->partial_delivery_point))) { 779 780 /* 781 * Yes, we setup to start reception, by 782 * backing down the TSN just in case we 783 * can't deliver. If we 784 */ 785 asoc->fragmented_delivery_inprogress = 1; 786 asoc->tsn_last_delivered = 787 chk->rec.data.TSN_seq - 1; 788 asoc->str_of_pdapi = 789 chk->rec.data.stream_number; 790 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 791 asoc->pdapi_ppid = chk->rec.data.payloadtype; 792 asoc->fragment_flags = chk->rec.data.rcv_flags; 793 sctp_service_reassembly(stcb, asoc); 794 } 795 } 796 } else { 797 /* 798 * Service re-assembly will deliver stream data queued at 799 * the end of fragmented delivery.. but it wont know to go 800 * back and call itself again... we do that here with the 801 * got doit_again 802 */ 803 sctp_service_reassembly(stcb, asoc); 804 if (asoc->fragmented_delivery_inprogress == 0) { 805 /* 806 * finished our Fragmented delivery, could be more 807 * waiting? 808 */ 809 goto doit_again; 810 } 811 } 812 } 813 814 /* 815 * Dump onto the re-assembly queue, in its proper place. After dumping on the 816 * queue, see if anthing can be delivered. If so pull it off (or as much as 817 * we can. If we run out of space then we must dump what we can and set the 818 * appropriate flag to say we queued what we could. 819 */ 820 static void 821 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 822 struct sctp_tmit_chunk *chk, int *abort_flag) 823 { 824 struct mbuf *oper; 825 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn; 826 u_char last_flags; 827 struct sctp_tmit_chunk *at, *prev, *next; 828 829 prev = next = NULL; 830 cum_ackp1 = asoc->tsn_last_delivered + 1; 831 if (TAILQ_EMPTY(&asoc->reasmqueue)) { 832 /* This is the first one on the queue */ 833 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 834 /* 835 * we do not check for delivery of anything when only one 836 * fragment is here 837 */ 838 asoc->size_on_reasm_queue = chk->send_size; 839 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 840 if (chk->rec.data.TSN_seq == cum_ackp1) { 841 if (asoc->fragmented_delivery_inprogress == 0 && 842 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 843 SCTP_DATA_FIRST_FRAG) { 844 /* 845 * An empty queue, no delivery inprogress, 846 * we hit the next one and it does NOT have 847 * a FIRST fragment mark. 848 */ 849 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 850 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 851 0, M_DONTWAIT, 1, MT_DATA); 852 853 if (oper) { 854 struct sctp_paramhdr *ph; 855 uint32_t *ippp; 856 857 SCTP_BUF_LEN(oper) = 858 sizeof(struct sctp_paramhdr) + 859 (sizeof(uint32_t) * 3); 860 ph = mtod(oper, struct sctp_paramhdr *); 861 ph->param_type = 862 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 863 ph->param_length = htons(SCTP_BUF_LEN(oper)); 864 ippp = (uint32_t *) (ph + 1); 865 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2); 866 ippp++; 867 *ippp = chk->rec.data.TSN_seq; 868 ippp++; 869 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 870 871 } 872 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 873 sctp_abort_an_association(stcb->sctp_ep, stcb, 874 SCTP_PEER_FAULTY, oper); 875 *abort_flag = 1; 876 } else if (asoc->fragmented_delivery_inprogress && 877 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 878 /* 879 * We are doing a partial delivery and the 880 * NEXT chunk MUST be either the LAST or 881 * MIDDLE fragment NOT a FIRST 882 */ 883 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 884 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 885 0, M_DONTWAIT, 1, MT_DATA); 886 if (oper) { 887 struct sctp_paramhdr *ph; 888 uint32_t *ippp; 889 890 SCTP_BUF_LEN(oper) = 891 sizeof(struct sctp_paramhdr) + 892 (3 * sizeof(uint32_t)); 893 ph = mtod(oper, struct sctp_paramhdr *); 894 ph->param_type = 895 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 896 ph->param_length = htons(SCTP_BUF_LEN(oper)); 897 ippp = (uint32_t *) (ph + 1); 898 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3); 899 ippp++; 900 *ippp = chk->rec.data.TSN_seq; 901 ippp++; 902 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 903 } 904 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 905 sctp_abort_an_association(stcb->sctp_ep, stcb, 906 SCTP_PEER_FAULTY, oper); 907 *abort_flag = 1; 908 } else if (asoc->fragmented_delivery_inprogress) { 909 /* 910 * Here we are ok with a MIDDLE or LAST 911 * piece 912 */ 913 if (chk->rec.data.stream_number != 914 asoc->str_of_pdapi) { 915 /* Got to be the right STR No */ 916 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n", 917 chk->rec.data.stream_number, 918 asoc->str_of_pdapi); 919 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 920 0, M_DONTWAIT, 1, MT_DATA); 921 if (oper) { 922 struct sctp_paramhdr *ph; 923 uint32_t *ippp; 924 925 SCTP_BUF_LEN(oper) = 926 sizeof(struct sctp_paramhdr) + 927 (sizeof(uint32_t) * 3); 928 ph = mtod(oper, 929 struct sctp_paramhdr *); 930 ph->param_type = 931 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 932 ph->param_length = 933 htons(SCTP_BUF_LEN(oper)); 934 ippp = (uint32_t *) (ph + 1); 935 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 936 ippp++; 937 *ippp = chk->rec.data.TSN_seq; 938 ippp++; 939 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 940 } 941 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4; 942 sctp_abort_an_association(stcb->sctp_ep, 943 stcb, SCTP_PEER_FAULTY, oper); 944 *abort_flag = 1; 945 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 946 SCTP_DATA_UNORDERED && 947 chk->rec.data.stream_seq != 948 asoc->ssn_of_pdapi) { 949 /* Got to be the right STR Seq */ 950 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n", 951 chk->rec.data.stream_seq, 952 asoc->ssn_of_pdapi); 953 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 954 0, M_DONTWAIT, 1, MT_DATA); 955 if (oper) { 956 struct sctp_paramhdr *ph; 957 uint32_t *ippp; 958 959 SCTP_BUF_LEN(oper) = 960 sizeof(struct sctp_paramhdr) + 961 (3 * sizeof(uint32_t)); 962 ph = mtod(oper, 963 struct sctp_paramhdr *); 964 ph->param_type = 965 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 966 ph->param_length = 967 htons(SCTP_BUF_LEN(oper)); 968 ippp = (uint32_t *) (ph + 1); 969 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 970 ippp++; 971 *ippp = chk->rec.data.TSN_seq; 972 ippp++; 973 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 974 975 } 976 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5; 977 sctp_abort_an_association(stcb->sctp_ep, 978 stcb, SCTP_PEER_FAULTY, oper); 979 *abort_flag = 1; 980 } 981 } 982 } 983 return; 984 } 985 /* Find its place */ 986 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 987 if (compare_with_wrap(at->rec.data.TSN_seq, 988 chk->rec.data.TSN_seq, MAX_TSN)) { 989 /* 990 * one in queue is bigger than the new one, insert 991 * before this one 992 */ 993 /* A check */ 994 asoc->size_on_reasm_queue += chk->send_size; 995 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 996 next = at; 997 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 998 break; 999 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 1000 /* Gak, He sent me a duplicate str seq number */ 1001 /* 1002 * foo bar, I guess I will just free this new guy, 1003 * should we abort too? FIX ME MAYBE? Or it COULD be 1004 * that the SSN's have wrapped. Maybe I should 1005 * compare to TSN somehow... sigh for now just blow 1006 * away the chunk! 1007 */ 1008 if (chk->data) { 1009 sctp_m_freem(chk->data); 1010 chk->data = NULL; 1011 } 1012 sctp_free_a_chunk(stcb, chk); 1013 return; 1014 } else { 1015 last_flags = at->rec.data.rcv_flags; 1016 last_tsn = at->rec.data.TSN_seq; 1017 prev = at; 1018 if (TAILQ_NEXT(at, sctp_next) == NULL) { 1019 /* 1020 * We are at the end, insert it after this 1021 * one 1022 */ 1023 /* check it first */ 1024 asoc->size_on_reasm_queue += chk->send_size; 1025 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1026 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1027 break; 1028 } 1029 } 1030 } 1031 /* Now the audits */ 1032 if (prev) { 1033 prev_tsn = chk->rec.data.TSN_seq - 1; 1034 if (prev_tsn == prev->rec.data.TSN_seq) { 1035 /* 1036 * Ok the one I am dropping onto the end is the 1037 * NEXT. A bit of valdiation here. 1038 */ 1039 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1040 SCTP_DATA_FIRST_FRAG || 1041 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1042 SCTP_DATA_MIDDLE_FRAG) { 1043 /* 1044 * Insert chk MUST be a MIDDLE or LAST 1045 * fragment 1046 */ 1047 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1048 SCTP_DATA_FIRST_FRAG) { 1049 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n"); 1050 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n"); 1051 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1052 0, M_DONTWAIT, 1, MT_DATA); 1053 if (oper) { 1054 struct sctp_paramhdr *ph; 1055 uint32_t *ippp; 1056 1057 SCTP_BUF_LEN(oper) = 1058 sizeof(struct sctp_paramhdr) + 1059 (3 * sizeof(uint32_t)); 1060 ph = mtod(oper, 1061 struct sctp_paramhdr *); 1062 ph->param_type = 1063 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1064 ph->param_length = 1065 htons(SCTP_BUF_LEN(oper)); 1066 ippp = (uint32_t *) (ph + 1); 1067 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1068 ippp++; 1069 *ippp = chk->rec.data.TSN_seq; 1070 ippp++; 1071 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1072 1073 } 1074 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6; 1075 sctp_abort_an_association(stcb->sctp_ep, 1076 stcb, SCTP_PEER_FAULTY, oper); 1077 *abort_flag = 1; 1078 return; 1079 } 1080 if (chk->rec.data.stream_number != 1081 prev->rec.data.stream_number) { 1082 /* 1083 * Huh, need the correct STR here, 1084 * they must be the same. 1085 */ 1086 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1087 chk->rec.data.stream_number, 1088 prev->rec.data.stream_number); 1089 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1090 0, M_DONTWAIT, 1, MT_DATA); 1091 if (oper) { 1092 struct sctp_paramhdr *ph; 1093 uint32_t *ippp; 1094 1095 SCTP_BUF_LEN(oper) = 1096 sizeof(struct sctp_paramhdr) + 1097 (3 * sizeof(uint32_t)); 1098 ph = mtod(oper, 1099 struct sctp_paramhdr *); 1100 ph->param_type = 1101 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1102 ph->param_length = 1103 htons(SCTP_BUF_LEN(oper)); 1104 ippp = (uint32_t *) (ph + 1); 1105 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1106 ippp++; 1107 *ippp = chk->rec.data.TSN_seq; 1108 ippp++; 1109 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1110 } 1111 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1112 sctp_abort_an_association(stcb->sctp_ep, 1113 stcb, SCTP_PEER_FAULTY, oper); 1114 1115 *abort_flag = 1; 1116 return; 1117 } 1118 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1119 chk->rec.data.stream_seq != 1120 prev->rec.data.stream_seq) { 1121 /* 1122 * Huh, need the correct STR here, 1123 * they must be the same. 1124 */ 1125 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1126 chk->rec.data.stream_seq, 1127 prev->rec.data.stream_seq); 1128 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1129 0, M_DONTWAIT, 1, MT_DATA); 1130 if (oper) { 1131 struct sctp_paramhdr *ph; 1132 uint32_t *ippp; 1133 1134 SCTP_BUF_LEN(oper) = 1135 sizeof(struct sctp_paramhdr) + 1136 (3 * sizeof(uint32_t)); 1137 ph = mtod(oper, 1138 struct sctp_paramhdr *); 1139 ph->param_type = 1140 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1141 ph->param_length = 1142 htons(SCTP_BUF_LEN(oper)); 1143 ippp = (uint32_t *) (ph + 1); 1144 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1145 ippp++; 1146 *ippp = chk->rec.data.TSN_seq; 1147 ippp++; 1148 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1149 } 1150 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8; 1151 sctp_abort_an_association(stcb->sctp_ep, 1152 stcb, SCTP_PEER_FAULTY, oper); 1153 1154 *abort_flag = 1; 1155 return; 1156 } 1157 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1158 SCTP_DATA_LAST_FRAG) { 1159 /* Insert chk MUST be a FIRST */ 1160 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1161 SCTP_DATA_FIRST_FRAG) { 1162 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1163 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1164 0, M_DONTWAIT, 1, MT_DATA); 1165 if (oper) { 1166 struct sctp_paramhdr *ph; 1167 uint32_t *ippp; 1168 1169 SCTP_BUF_LEN(oper) = 1170 sizeof(struct sctp_paramhdr) + 1171 (3 * sizeof(uint32_t)); 1172 ph = mtod(oper, 1173 struct sctp_paramhdr *); 1174 ph->param_type = 1175 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1176 ph->param_length = 1177 htons(SCTP_BUF_LEN(oper)); 1178 ippp = (uint32_t *) (ph + 1); 1179 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1180 ippp++; 1181 *ippp = chk->rec.data.TSN_seq; 1182 ippp++; 1183 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1184 1185 } 1186 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9; 1187 sctp_abort_an_association(stcb->sctp_ep, 1188 stcb, SCTP_PEER_FAULTY, oper); 1189 1190 *abort_flag = 1; 1191 return; 1192 } 1193 } 1194 } 1195 } 1196 if (next) { 1197 post_tsn = chk->rec.data.TSN_seq + 1; 1198 if (post_tsn == next->rec.data.TSN_seq) { 1199 /* 1200 * Ok the one I am inserting ahead of is my NEXT 1201 * one. A bit of valdiation here. 1202 */ 1203 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1204 /* Insert chk MUST be a last fragment */ 1205 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1206 != SCTP_DATA_LAST_FRAG) { 1207 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n"); 1208 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n"); 1209 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1210 0, M_DONTWAIT, 1, MT_DATA); 1211 if (oper) { 1212 struct sctp_paramhdr *ph; 1213 uint32_t *ippp; 1214 1215 SCTP_BUF_LEN(oper) = 1216 sizeof(struct sctp_paramhdr) + 1217 (3 * sizeof(uint32_t)); 1218 ph = mtod(oper, 1219 struct sctp_paramhdr *); 1220 ph->param_type = 1221 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1222 ph->param_length = 1223 htons(SCTP_BUF_LEN(oper)); 1224 ippp = (uint32_t *) (ph + 1); 1225 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1226 ippp++; 1227 *ippp = chk->rec.data.TSN_seq; 1228 ippp++; 1229 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1230 } 1231 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10; 1232 sctp_abort_an_association(stcb->sctp_ep, 1233 stcb, SCTP_PEER_FAULTY, oper); 1234 1235 *abort_flag = 1; 1236 return; 1237 } 1238 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1239 SCTP_DATA_MIDDLE_FRAG || 1240 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1241 SCTP_DATA_LAST_FRAG) { 1242 /* 1243 * Insert chk CAN be MIDDLE or FIRST NOT 1244 * LAST 1245 */ 1246 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1247 SCTP_DATA_LAST_FRAG) { 1248 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n"); 1249 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n"); 1250 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1251 0, M_DONTWAIT, 1, MT_DATA); 1252 if (oper) { 1253 struct sctp_paramhdr *ph; 1254 uint32_t *ippp; 1255 1256 SCTP_BUF_LEN(oper) = 1257 sizeof(struct sctp_paramhdr) + 1258 (3 * sizeof(uint32_t)); 1259 ph = mtod(oper, 1260 struct sctp_paramhdr *); 1261 ph->param_type = 1262 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1263 ph->param_length = 1264 htons(SCTP_BUF_LEN(oper)); 1265 ippp = (uint32_t *) (ph + 1); 1266 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1267 ippp++; 1268 *ippp = chk->rec.data.TSN_seq; 1269 ippp++; 1270 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1271 1272 } 1273 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11; 1274 sctp_abort_an_association(stcb->sctp_ep, 1275 stcb, SCTP_PEER_FAULTY, oper); 1276 1277 *abort_flag = 1; 1278 return; 1279 } 1280 if (chk->rec.data.stream_number != 1281 next->rec.data.stream_number) { 1282 /* 1283 * Huh, need the correct STR here, 1284 * they must be the same. 1285 */ 1286 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1287 chk->rec.data.stream_number, 1288 next->rec.data.stream_number); 1289 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1290 0, M_DONTWAIT, 1, MT_DATA); 1291 if (oper) { 1292 struct sctp_paramhdr *ph; 1293 uint32_t *ippp; 1294 1295 SCTP_BUF_LEN(oper) = 1296 sizeof(struct sctp_paramhdr) + 1297 (3 * sizeof(uint32_t)); 1298 ph = mtod(oper, 1299 struct sctp_paramhdr *); 1300 ph->param_type = 1301 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1302 ph->param_length = 1303 htons(SCTP_BUF_LEN(oper)); 1304 ippp = (uint32_t *) (ph + 1); 1305 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1306 ippp++; 1307 *ippp = chk->rec.data.TSN_seq; 1308 ippp++; 1309 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1310 1311 } 1312 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1313 sctp_abort_an_association(stcb->sctp_ep, 1314 stcb, SCTP_PEER_FAULTY, oper); 1315 1316 *abort_flag = 1; 1317 return; 1318 } 1319 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1320 chk->rec.data.stream_seq != 1321 next->rec.data.stream_seq) { 1322 /* 1323 * Huh, need the correct STR here, 1324 * they must be the same. 1325 */ 1326 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1327 chk->rec.data.stream_seq, 1328 next->rec.data.stream_seq); 1329 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1330 0, M_DONTWAIT, 1, MT_DATA); 1331 if (oper) { 1332 struct sctp_paramhdr *ph; 1333 uint32_t *ippp; 1334 1335 SCTP_BUF_LEN(oper) = 1336 sizeof(struct sctp_paramhdr) + 1337 (3 * sizeof(uint32_t)); 1338 ph = mtod(oper, 1339 struct sctp_paramhdr *); 1340 ph->param_type = 1341 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1342 ph->param_length = 1343 htons(SCTP_BUF_LEN(oper)); 1344 ippp = (uint32_t *) (ph + 1); 1345 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1346 ippp++; 1347 *ippp = chk->rec.data.TSN_seq; 1348 ippp++; 1349 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1350 } 1351 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13; 1352 sctp_abort_an_association(stcb->sctp_ep, 1353 stcb, SCTP_PEER_FAULTY, oper); 1354 1355 *abort_flag = 1; 1356 return; 1357 1358 } 1359 } 1360 } 1361 } 1362 /* Do we need to do some delivery? check */ 1363 sctp_deliver_reasm_check(stcb, asoc); 1364 } 1365 1366 /* 1367 * This is an unfortunate routine. It checks to make sure a evil guy is not 1368 * stuffing us full of bad packet fragments. A broken peer could also do this 1369 * but this is doubtful. It is to bad I must worry about evil crackers sigh 1370 * :< more cycles. 1371 */ 1372 static int 1373 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1374 uint32_t TSN_seq) 1375 { 1376 struct sctp_tmit_chunk *at; 1377 uint32_t tsn_est; 1378 1379 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1380 if (compare_with_wrap(TSN_seq, 1381 at->rec.data.TSN_seq, MAX_TSN)) { 1382 /* is it one bigger? */ 1383 tsn_est = at->rec.data.TSN_seq + 1; 1384 if (tsn_est == TSN_seq) { 1385 /* yep. It better be a last then */ 1386 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1387 SCTP_DATA_LAST_FRAG) { 1388 /* 1389 * Ok this guy belongs next to a guy 1390 * that is NOT last, it should be a 1391 * middle/last, not a complete 1392 * chunk. 1393 */ 1394 return (1); 1395 } else { 1396 /* 1397 * This guy is ok since its a LAST 1398 * and the new chunk is a fully 1399 * self- contained one. 1400 */ 1401 return (0); 1402 } 1403 } 1404 } else if (TSN_seq == at->rec.data.TSN_seq) { 1405 /* Software error since I have a dup? */ 1406 return (1); 1407 } else { 1408 /* 1409 * Ok, 'at' is larger than new chunk but does it 1410 * need to be right before it. 1411 */ 1412 tsn_est = TSN_seq + 1; 1413 if (tsn_est == at->rec.data.TSN_seq) { 1414 /* Yep, It better be a first */ 1415 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1416 SCTP_DATA_FIRST_FRAG) { 1417 return (1); 1418 } else { 1419 return (0); 1420 } 1421 } 1422 } 1423 } 1424 return (0); 1425 } 1426 1427 1428 static int 1429 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1430 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1431 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1432 int *break_flag, int last_chunk) 1433 { 1434 /* Process a data chunk */ 1435 /* struct sctp_tmit_chunk *chk; */ 1436 struct sctp_tmit_chunk *chk; 1437 uint32_t tsn, gap; 1438 struct mbuf *dmbuf; 1439 int indx, the_len; 1440 int need_reasm_check = 0; 1441 uint16_t strmno, strmseq; 1442 struct mbuf *oper; 1443 struct sctp_queued_to_read *control; 1444 int ordered; 1445 uint32_t protocol_id; 1446 uint8_t chunk_flags; 1447 struct sctp_stream_reset_list *liste; 1448 1449 chk = NULL; 1450 tsn = ntohl(ch->dp.tsn); 1451 chunk_flags = ch->ch.chunk_flags; 1452 protocol_id = ch->dp.protocol_id; 1453 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0); 1454 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 1455 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE); 1456 } 1457 if (stcb == NULL) { 1458 return (0); 1459 } 1460 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); 1461 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) || 1462 asoc->cumulative_tsn == tsn) { 1463 /* It is a duplicate */ 1464 SCTP_STAT_INCR(sctps_recvdupdata); 1465 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1466 /* Record a dup for the next outbound sack */ 1467 asoc->dup_tsns[asoc->numduptsns] = tsn; 1468 asoc->numduptsns++; 1469 } 1470 return (0); 1471 } 1472 /* Calculate the number of TSN's between the base and this TSN */ 1473 if (tsn >= asoc->mapping_array_base_tsn) { 1474 gap = tsn - asoc->mapping_array_base_tsn; 1475 } else { 1476 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; 1477 } 1478 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1479 /* Can't hold the bit in the mapping at max array, toss it */ 1480 return (0); 1481 } 1482 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1483 SCTP_TCB_LOCK_ASSERT(stcb); 1484 if (sctp_expand_mapping_array(asoc, gap)) { 1485 /* Can't expand, drop it */ 1486 return (0); 1487 } 1488 } 1489 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) { 1490 *high_tsn = tsn; 1491 } 1492 /* See if we have received this one already */ 1493 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1494 SCTP_STAT_INCR(sctps_recvdupdata); 1495 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1496 /* Record a dup for the next outbound sack */ 1497 asoc->dup_tsns[asoc->numduptsns] = tsn; 1498 asoc->numduptsns++; 1499 } 1500 asoc->send_sack = 1; 1501 return (0); 1502 } 1503 /* 1504 * Check to see about the GONE flag, duplicates would cause a sack 1505 * to be sent up above 1506 */ 1507 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1508 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1509 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1510 ) { 1511 /* 1512 * wait a minute, this guy is gone, there is no longer a 1513 * receiver. Send peer an ABORT! 1514 */ 1515 struct mbuf *op_err; 1516 1517 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1518 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 1519 *abort_flag = 1; 1520 return (0); 1521 } 1522 /* 1523 * Now before going further we see if there is room. If NOT then we 1524 * MAY let one through only IF this TSN is the one we are waiting 1525 * for on a partial delivery API. 1526 */ 1527 1528 /* now do the tests */ 1529 if (((asoc->cnt_on_all_streams + 1530 asoc->cnt_on_reasm_queue + 1531 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) || 1532 (((int)asoc->my_rwnd) <= 0)) { 1533 /* 1534 * When we have NO room in the rwnd we check to make sure 1535 * the reader is doing its job... 1536 */ 1537 if (stcb->sctp_socket->so_rcv.sb_cc) { 1538 /* some to read, wake-up */ 1539 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1540 } 1541 /* now is it in the mapping array of what we have accepted? */ 1542 if (compare_with_wrap(tsn, 1543 asoc->highest_tsn_inside_map, MAX_TSN)) { 1544 1545 /* Nope not in the valid range dump it */ 1546 SCTPDBG(SCTP_DEBUG_INDATA1, "My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n", 1547 (u_long)tsn, (u_long)asoc->my_rwnd, 1548 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)); 1549 sctp_set_rwnd(stcb, asoc); 1550 if ((asoc->cnt_on_all_streams + 1551 asoc->cnt_on_reasm_queue + 1552 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) { 1553 SCTP_STAT_INCR(sctps_datadropchklmt); 1554 } else { 1555 SCTP_STAT_INCR(sctps_datadroprwnd); 1556 } 1557 indx = *break_flag; 1558 *break_flag = 1; 1559 return (0); 1560 } 1561 } 1562 strmno = ntohs(ch->dp.stream_id); 1563 if (strmno >= asoc->streamincnt) { 1564 struct sctp_paramhdr *phdr; 1565 struct mbuf *mb; 1566 1567 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1568 0, M_DONTWAIT, 1, MT_DATA); 1569 if (mb != NULL) { 1570 /* add some space up front so prepend will work well */ 1571 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); 1572 phdr = mtod(mb, struct sctp_paramhdr *); 1573 /* 1574 * Error causes are just param's and this one has 1575 * two back to back phdr, one with the error type 1576 * and size, the other with the streamid and a rsvd 1577 */ 1578 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); 1579 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1580 phdr->param_length = 1581 htons(sizeof(struct sctp_paramhdr) * 2); 1582 phdr++; 1583 /* We insert the stream in the type field */ 1584 phdr->param_type = ch->dp.stream_id; 1585 /* And set the length to 0 for the rsvd field */ 1586 phdr->param_length = 0; 1587 sctp_queue_op_err(stcb, mb); 1588 } 1589 SCTP_STAT_INCR(sctps_badsid); 1590 SCTP_TCB_LOCK_ASSERT(stcb); 1591 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 1592 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 1593 /* we have a new high score */ 1594 asoc->highest_tsn_inside_map = tsn; 1595 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 1596 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 1597 } 1598 } 1599 if (tsn == (asoc->cumulative_tsn + 1)) { 1600 /* Update cum-ack */ 1601 asoc->cumulative_tsn = tsn; 1602 } 1603 return (0); 1604 } 1605 /* 1606 * Before we continue lets validate that we are not being fooled by 1607 * an evil attacker. We can only have 4k chunks based on our TSN 1608 * spread allowed by the mapping array 512 * 8 bits, so there is no 1609 * way our stream sequence numbers could have wrapped. We of course 1610 * only validate the FIRST fragment so the bit must be set. 1611 */ 1612 strmseq = ntohs(ch->dp.stream_sequence); 1613 #ifdef SCTP_ASOCLOG_OF_TSNS 1614 SCTP_TCB_LOCK_ASSERT(stcb); 1615 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1616 asoc->tsn_in_at = 0; 1617 asoc->tsn_in_wrapped = 1; 1618 } 1619 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1620 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1621 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; 1622 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1623 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1624 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1625 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1626 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1627 asoc->tsn_in_at++; 1628 #endif 1629 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1630 (TAILQ_EMPTY(&asoc->resetHead)) && 1631 (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1632 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered, 1633 strmseq, MAX_SEQ) || 1634 asoc->strmin[strmno].last_sequence_delivered == strmseq)) { 1635 /* The incoming sseq is behind where we last delivered? */ 1636 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1637 strmseq, asoc->strmin[strmno].last_sequence_delivered); 1638 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1639 0, M_DONTWAIT, 1, MT_DATA); 1640 if (oper) { 1641 struct sctp_paramhdr *ph; 1642 uint32_t *ippp; 1643 1644 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1645 (3 * sizeof(uint32_t)); 1646 ph = mtod(oper, struct sctp_paramhdr *); 1647 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1648 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1649 ippp = (uint32_t *) (ph + 1); 1650 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1651 ippp++; 1652 *ippp = tsn; 1653 ippp++; 1654 *ippp = ((strmno << 16) | strmseq); 1655 1656 } 1657 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1658 sctp_abort_an_association(stcb->sctp_ep, stcb, 1659 SCTP_PEER_FAULTY, oper); 1660 *abort_flag = 1; 1661 return (0); 1662 } 1663 /************************************ 1664 * From here down we may find ch-> invalid 1665 * so its a good idea NOT to use it. 1666 *************************************/ 1667 1668 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1669 if (last_chunk == 0) { 1670 dmbuf = SCTP_M_COPYM(*m, 1671 (offset + sizeof(struct sctp_data_chunk)), 1672 the_len, M_DONTWAIT); 1673 #ifdef SCTP_MBUF_LOGGING 1674 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 1675 struct mbuf *mat; 1676 1677 mat = dmbuf; 1678 while (mat) { 1679 if (SCTP_BUF_IS_EXTENDED(mat)) { 1680 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1681 } 1682 mat = SCTP_BUF_NEXT(mat); 1683 } 1684 } 1685 #endif 1686 } else { 1687 /* We can steal the last chunk */ 1688 int l_len; 1689 1690 dmbuf = *m; 1691 /* lop off the top part */ 1692 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1693 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1694 l_len = SCTP_BUF_LEN(dmbuf); 1695 } else { 1696 /* 1697 * need to count up the size hopefully does not hit 1698 * this to often :-0 1699 */ 1700 struct mbuf *lat; 1701 1702 l_len = 0; 1703 lat = dmbuf; 1704 while (lat) { 1705 l_len += SCTP_BUF_LEN(lat); 1706 lat = SCTP_BUF_NEXT(lat); 1707 } 1708 } 1709 if (l_len > the_len) { 1710 /* Trim the end round bytes off too */ 1711 m_adj(dmbuf, -(l_len - the_len)); 1712 } 1713 } 1714 if (dmbuf == NULL) { 1715 SCTP_STAT_INCR(sctps_nomem); 1716 return (0); 1717 } 1718 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1719 asoc->fragmented_delivery_inprogress == 0 && 1720 TAILQ_EMPTY(&asoc->resetHead) && 1721 ((ordered == 0) || 1722 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1723 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1724 /* Candidate for express delivery */ 1725 /* 1726 * Its not fragmented, No PD-API is up, Nothing in the 1727 * delivery queue, Its un-ordered OR ordered and the next to 1728 * deliver AND nothing else is stuck on the stream queue, 1729 * And there is room for it in the socket buffer. Lets just 1730 * stuff it up the buffer.... 1731 */ 1732 1733 /* It would be nice to avoid this copy if we could :< */ 1734 sctp_alloc_a_readq(stcb, control); 1735 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1736 protocol_id, 1737 stcb->asoc.context, 1738 strmno, strmseq, 1739 chunk_flags, 1740 dmbuf); 1741 if (control == NULL) { 1742 goto failed_express_del; 1743 } 1744 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1); 1745 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1746 /* for ordered, bump what we delivered */ 1747 asoc->strmin[strmno].last_sequence_delivered++; 1748 } 1749 SCTP_STAT_INCR(sctps_recvexpress); 1750 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 1751 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1752 SCTP_STR_LOG_FROM_EXPRS_DEL); 1753 } 1754 control = NULL; 1755 goto finish_express_del; 1756 } 1757 failed_express_del: 1758 /* If we reach here this is a new chunk */ 1759 chk = NULL; 1760 control = NULL; 1761 /* Express for fragmented delivery? */ 1762 if ((asoc->fragmented_delivery_inprogress) && 1763 (stcb->asoc.control_pdapi) && 1764 (asoc->str_of_pdapi == strmno) && 1765 (asoc->ssn_of_pdapi == strmseq) 1766 ) { 1767 control = stcb->asoc.control_pdapi; 1768 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1769 /* Can't be another first? */ 1770 goto failed_pdapi_express_del; 1771 } 1772 if (tsn == (control->sinfo_tsn + 1)) { 1773 /* Yep, we can add it on */ 1774 int end = 0; 1775 uint32_t cumack; 1776 1777 if (chunk_flags & SCTP_DATA_LAST_FRAG) { 1778 end = 1; 1779 } 1780 cumack = asoc->cumulative_tsn; 1781 if ((cumack + 1) == tsn) 1782 cumack = tsn; 1783 1784 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1785 tsn, 1786 &stcb->sctp_socket->so_rcv)) { 1787 SCTP_PRINTF("Append fails end:%d\n", end); 1788 goto failed_pdapi_express_del; 1789 } 1790 SCTP_STAT_INCR(sctps_recvexpressm); 1791 control->sinfo_tsn = tsn; 1792 asoc->tsn_last_delivered = tsn; 1793 asoc->fragment_flags = chunk_flags; 1794 asoc->tsn_of_pdapi_last_delivered = tsn; 1795 asoc->last_flags_delivered = chunk_flags; 1796 asoc->last_strm_seq_delivered = strmseq; 1797 asoc->last_strm_no_delivered = strmno; 1798 if (end) { 1799 /* clean up the flags and such */ 1800 asoc->fragmented_delivery_inprogress = 0; 1801 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1802 asoc->strmin[strmno].last_sequence_delivered++; 1803 } 1804 stcb->asoc.control_pdapi = NULL; 1805 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { 1806 /* 1807 * There could be another message 1808 * ready 1809 */ 1810 need_reasm_check = 1; 1811 } 1812 } 1813 control = NULL; 1814 goto finish_express_del; 1815 } 1816 } 1817 failed_pdapi_express_del: 1818 control = NULL; 1819 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1820 sctp_alloc_a_chunk(stcb, chk); 1821 if (chk == NULL) { 1822 /* No memory so we drop the chunk */ 1823 SCTP_STAT_INCR(sctps_nomem); 1824 if (last_chunk == 0) { 1825 /* we copied it, free the copy */ 1826 sctp_m_freem(dmbuf); 1827 } 1828 return (0); 1829 } 1830 chk->rec.data.TSN_seq = tsn; 1831 chk->no_fr_allowed = 0; 1832 chk->rec.data.stream_seq = strmseq; 1833 chk->rec.data.stream_number = strmno; 1834 chk->rec.data.payloadtype = protocol_id; 1835 chk->rec.data.context = stcb->asoc.context; 1836 chk->rec.data.doing_fast_retransmit = 0; 1837 chk->rec.data.rcv_flags = chunk_flags; 1838 chk->asoc = asoc; 1839 chk->send_size = the_len; 1840 chk->whoTo = net; 1841 atomic_add_int(&net->ref_count, 1); 1842 chk->data = dmbuf; 1843 } else { 1844 sctp_alloc_a_readq(stcb, control); 1845 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1846 protocol_id, 1847 stcb->asoc.context, 1848 strmno, strmseq, 1849 chunk_flags, 1850 dmbuf); 1851 if (control == NULL) { 1852 /* No memory so we drop the chunk */ 1853 SCTP_STAT_INCR(sctps_nomem); 1854 if (last_chunk == 0) { 1855 /* we copied it, free the copy */ 1856 sctp_m_freem(dmbuf); 1857 } 1858 return (0); 1859 } 1860 control->length = the_len; 1861 } 1862 1863 /* Mark it as received */ 1864 /* Now queue it where it belongs */ 1865 if (control != NULL) { 1866 /* First a sanity check */ 1867 if (asoc->fragmented_delivery_inprogress) { 1868 /* 1869 * Ok, we have a fragmented delivery in progress if 1870 * this chunk is next to deliver OR belongs in our 1871 * view to the reassembly, the peer is evil or 1872 * broken. 1873 */ 1874 uint32_t estimate_tsn; 1875 1876 estimate_tsn = asoc->tsn_last_delivered + 1; 1877 if (TAILQ_EMPTY(&asoc->reasmqueue) && 1878 (estimate_tsn == control->sinfo_tsn)) { 1879 /* Evil/Broke peer */ 1880 sctp_m_freem(control->data); 1881 control->data = NULL; 1882 if (control->whoFrom) { 1883 sctp_free_remote_addr(control->whoFrom); 1884 control->whoFrom = NULL; 1885 } 1886 sctp_free_a_readq(stcb, control); 1887 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1888 0, M_DONTWAIT, 1, MT_DATA); 1889 if (oper) { 1890 struct sctp_paramhdr *ph; 1891 uint32_t *ippp; 1892 1893 SCTP_BUF_LEN(oper) = 1894 sizeof(struct sctp_paramhdr) + 1895 (3 * sizeof(uint32_t)); 1896 ph = mtod(oper, struct sctp_paramhdr *); 1897 ph->param_type = 1898 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1899 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1900 ippp = (uint32_t *) (ph + 1); 1901 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1902 ippp++; 1903 *ippp = tsn; 1904 ippp++; 1905 *ippp = ((strmno << 16) | strmseq); 1906 } 1907 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1908 sctp_abort_an_association(stcb->sctp_ep, stcb, 1909 SCTP_PEER_FAULTY, oper); 1910 1911 *abort_flag = 1; 1912 return (0); 1913 } else { 1914 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1915 sctp_m_freem(control->data); 1916 control->data = NULL; 1917 if (control->whoFrom) { 1918 sctp_free_remote_addr(control->whoFrom); 1919 control->whoFrom = NULL; 1920 } 1921 sctp_free_a_readq(stcb, control); 1922 1923 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1924 0, M_DONTWAIT, 1, MT_DATA); 1925 if (oper) { 1926 struct sctp_paramhdr *ph; 1927 uint32_t *ippp; 1928 1929 SCTP_BUF_LEN(oper) = 1930 sizeof(struct sctp_paramhdr) + 1931 (3 * sizeof(uint32_t)); 1932 ph = mtod(oper, 1933 struct sctp_paramhdr *); 1934 ph->param_type = 1935 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1936 ph->param_length = 1937 htons(SCTP_BUF_LEN(oper)); 1938 ippp = (uint32_t *) (ph + 1); 1939 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16); 1940 ippp++; 1941 *ippp = tsn; 1942 ippp++; 1943 *ippp = ((strmno << 16) | strmseq); 1944 } 1945 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1946 sctp_abort_an_association(stcb->sctp_ep, 1947 stcb, SCTP_PEER_FAULTY, oper); 1948 1949 *abort_flag = 1; 1950 return (0); 1951 } 1952 } 1953 } else { 1954 /* No PDAPI running */ 1955 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1956 /* 1957 * Reassembly queue is NOT empty validate 1958 * that this tsn does not need to be in 1959 * reasembly queue. If it does then our peer 1960 * is broken or evil. 1961 */ 1962 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1963 sctp_m_freem(control->data); 1964 control->data = NULL; 1965 if (control->whoFrom) { 1966 sctp_free_remote_addr(control->whoFrom); 1967 control->whoFrom = NULL; 1968 } 1969 sctp_free_a_readq(stcb, control); 1970 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1971 0, M_DONTWAIT, 1, MT_DATA); 1972 if (oper) { 1973 struct sctp_paramhdr *ph; 1974 uint32_t *ippp; 1975 1976 SCTP_BUF_LEN(oper) = 1977 sizeof(struct sctp_paramhdr) + 1978 (3 * sizeof(uint32_t)); 1979 ph = mtod(oper, 1980 struct sctp_paramhdr *); 1981 ph->param_type = 1982 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1983 ph->param_length = 1984 htons(SCTP_BUF_LEN(oper)); 1985 ippp = (uint32_t *) (ph + 1); 1986 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 1987 ippp++; 1988 *ippp = tsn; 1989 ippp++; 1990 *ippp = ((strmno << 16) | strmseq); 1991 } 1992 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 1993 sctp_abort_an_association(stcb->sctp_ep, 1994 stcb, SCTP_PEER_FAULTY, oper); 1995 1996 *abort_flag = 1; 1997 return (0); 1998 } 1999 } 2000 } 2001 /* ok, if we reach here we have passed the sanity checks */ 2002 if (chunk_flags & SCTP_DATA_UNORDERED) { 2003 /* queue directly into socket buffer */ 2004 sctp_add_to_readq(stcb->sctp_ep, stcb, 2005 control, 2006 &stcb->sctp_socket->so_rcv, 1); 2007 } else { 2008 /* 2009 * Special check for when streams are resetting. We 2010 * could be more smart about this and check the 2011 * actual stream to see if it is not being reset.. 2012 * that way we would not create a HOLB when amongst 2013 * streams being reset and those not being reset. 2014 * 2015 * We take complete messages that have a stream reset 2016 * intervening (aka the TSN is after where our 2017 * cum-ack needs to be) off and put them on a 2018 * pending_reply_queue. The reassembly ones we do 2019 * not have to worry about since they are all sorted 2020 * and proceessed by TSN order. It is only the 2021 * singletons I must worry about. 2022 */ 2023 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2024 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN))) 2025 ) { 2026 /* 2027 * yep its past where we need to reset... go 2028 * ahead and queue it. 2029 */ 2030 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2031 /* first one on */ 2032 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2033 } else { 2034 struct sctp_queued_to_read *ctlOn; 2035 unsigned char inserted = 0; 2036 2037 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue); 2038 while (ctlOn) { 2039 if (compare_with_wrap(control->sinfo_tsn, 2040 ctlOn->sinfo_tsn, MAX_TSN)) { 2041 ctlOn = TAILQ_NEXT(ctlOn, next); 2042 } else { 2043 /* found it */ 2044 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2045 inserted = 1; 2046 break; 2047 } 2048 } 2049 if (inserted == 0) { 2050 /* 2051 * must be put at end, use 2052 * prevP (all setup from 2053 * loop) to setup nextP. 2054 */ 2055 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2056 } 2057 } 2058 } else { 2059 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 2060 if (*abort_flag) { 2061 return (0); 2062 } 2063 } 2064 } 2065 } else { 2066 /* Into the re-assembly queue */ 2067 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2068 if (*abort_flag) { 2069 /* 2070 * the assoc is now gone and chk was put onto the 2071 * reasm queue, which has all been freed. 2072 */ 2073 *m = NULL; 2074 return (0); 2075 } 2076 } 2077 finish_express_del: 2078 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 2079 /* we have a new high score */ 2080 asoc->highest_tsn_inside_map = tsn; 2081 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2082 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2083 } 2084 } 2085 if (tsn == (asoc->cumulative_tsn + 1)) { 2086 /* Update cum-ack */ 2087 asoc->cumulative_tsn = tsn; 2088 } 2089 if (last_chunk) { 2090 *m = NULL; 2091 } 2092 if (ordered) { 2093 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2094 } else { 2095 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2096 } 2097 SCTP_STAT_INCR(sctps_recvdata); 2098 /* Set it present please */ 2099 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 2100 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2101 } 2102 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2103 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2104 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2105 } 2106 SCTP_TCB_LOCK_ASSERT(stcb); 2107 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2108 /* check the special flag for stream resets */ 2109 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2110 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) || 2111 (asoc->cumulative_tsn == liste->tsn)) 2112 ) { 2113 /* 2114 * we have finished working through the backlogged TSN's now 2115 * time to reset streams. 1: call reset function. 2: free 2116 * pending_reply space 3: distribute any chunks in 2117 * pending_reply_queue. 2118 */ 2119 struct sctp_queued_to_read *ctl; 2120 2121 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams); 2122 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2123 SCTP_FREE(liste, SCTP_M_STRESET); 2124 /* sa_ignore FREED_MEMORY */ 2125 liste = TAILQ_FIRST(&asoc->resetHead); 2126 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2127 if (ctl && (liste == NULL)) { 2128 /* All can be removed */ 2129 while (ctl) { 2130 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2131 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2132 if (*abort_flag) { 2133 return (0); 2134 } 2135 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2136 } 2137 } else if (ctl) { 2138 /* more than one in queue */ 2139 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) { 2140 /* 2141 * if ctl->sinfo_tsn is <= liste->tsn we can 2142 * process it which is the NOT of 2143 * ctl->sinfo_tsn > liste->tsn 2144 */ 2145 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2146 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2147 if (*abort_flag) { 2148 return (0); 2149 } 2150 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2151 } 2152 } 2153 /* 2154 * Now service re-assembly to pick up anything that has been 2155 * held on reassembly queue? 2156 */ 2157 sctp_deliver_reasm_check(stcb, asoc); 2158 need_reasm_check = 0; 2159 } 2160 if (need_reasm_check) { 2161 /* Another one waits ? */ 2162 sctp_deliver_reasm_check(stcb, asoc); 2163 } 2164 return (1); 2165 } 2166 2167 int8_t sctp_map_lookup_tab[256] = { 2168 -1, 0, -1, 1, -1, 0, -1, 2, 2169 -1, 0, -1, 1, -1, 0, -1, 3, 2170 -1, 0, -1, 1, -1, 0, -1, 2, 2171 -1, 0, -1, 1, -1, 0, -1, 4, 2172 -1, 0, -1, 1, -1, 0, -1, 2, 2173 -1, 0, -1, 1, -1, 0, -1, 3, 2174 -1, 0, -1, 1, -1, 0, -1, 2, 2175 -1, 0, -1, 1, -1, 0, -1, 5, 2176 -1, 0, -1, 1, -1, 0, -1, 2, 2177 -1, 0, -1, 1, -1, 0, -1, 3, 2178 -1, 0, -1, 1, -1, 0, -1, 2, 2179 -1, 0, -1, 1, -1, 0, -1, 4, 2180 -1, 0, -1, 1, -1, 0, -1, 2, 2181 -1, 0, -1, 1, -1, 0, -1, 3, 2182 -1, 0, -1, 1, -1, 0, -1, 2, 2183 -1, 0, -1, 1, -1, 0, -1, 6, 2184 -1, 0, -1, 1, -1, 0, -1, 2, 2185 -1, 0, -1, 1, -1, 0, -1, 3, 2186 -1, 0, -1, 1, -1, 0, -1, 2, 2187 -1, 0, -1, 1, -1, 0, -1, 4, 2188 -1, 0, -1, 1, -1, 0, -1, 2, 2189 -1, 0, -1, 1, -1, 0, -1, 3, 2190 -1, 0, -1, 1, -1, 0, -1, 2, 2191 -1, 0, -1, 1, -1, 0, -1, 5, 2192 -1, 0, -1, 1, -1, 0, -1, 2, 2193 -1, 0, -1, 1, -1, 0, -1, 3, 2194 -1, 0, -1, 1, -1, 0, -1, 2, 2195 -1, 0, -1, 1, -1, 0, -1, 4, 2196 -1, 0, -1, 1, -1, 0, -1, 2, 2197 -1, 0, -1, 1, -1, 0, -1, 3, 2198 -1, 0, -1, 1, -1, 0, -1, 2, 2199 -1, 0, -1, 1, -1, 0, -1, 7, 2200 }; 2201 2202 2203 void 2204 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag) 2205 { 2206 /* 2207 * Now we also need to check the mapping array in a couple of ways. 2208 * 1) Did we move the cum-ack point? 2209 */ 2210 struct sctp_association *asoc; 2211 int i, at; 2212 int all_ones, last_all_ones = 0; 2213 int slide_from, slide_end, lgap, distance; 2214 uint32_t old_cumack, old_base, old_highest; 2215 unsigned char aux_array[64]; 2216 2217 2218 asoc = &stcb->asoc; 2219 at = 0; 2220 2221 old_cumack = asoc->cumulative_tsn; 2222 old_base = asoc->mapping_array_base_tsn; 2223 old_highest = asoc->highest_tsn_inside_map; 2224 if (asoc->mapping_array_size < 64) 2225 memcpy(aux_array, asoc->mapping_array, 2226 asoc->mapping_array_size); 2227 else 2228 memcpy(aux_array, asoc->mapping_array, 64); 2229 2230 /* 2231 * We could probably improve this a small bit by calculating the 2232 * offset of the current cum-ack as the starting point. 2233 */ 2234 all_ones = 1; 2235 at = 0; 2236 for (i = 0; i < stcb->asoc.mapping_array_size; i++) { 2237 2238 if (asoc->mapping_array[i] == 0xff) { 2239 at += 8; 2240 last_all_ones = 1; 2241 } else { 2242 /* there is a 0 bit */ 2243 all_ones = 0; 2244 at += sctp_map_lookup_tab[asoc->mapping_array[i]]; 2245 last_all_ones = 0; 2246 break; 2247 } 2248 } 2249 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones); 2250 /* at is one off, since in the table a embedded -1 is present */ 2251 at++; 2252 2253 if (compare_with_wrap(asoc->cumulative_tsn, 2254 asoc->highest_tsn_inside_map, 2255 MAX_TSN)) { 2256 #ifdef INVARIANTS 2257 panic("huh, cumack greater than high-tsn in map"); 2258 #else 2259 SCTP_PRINTF("huh, cumack greater than high-tsn in map - should panic?\n"); 2260 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2261 #endif 2262 } 2263 if (all_ones || 2264 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) { 2265 /* The complete array was completed by a single FR */ 2266 /* higest becomes the cum-ack */ 2267 int clr; 2268 2269 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 2270 /* clear the array */ 2271 if (all_ones) 2272 clr = asoc->mapping_array_size; 2273 else { 2274 clr = (at >> 3) + 1; 2275 /* 2276 * this should be the allones case but just in case 2277 * :> 2278 */ 2279 if (clr > asoc->mapping_array_size) 2280 clr = asoc->mapping_array_size; 2281 } 2282 memset(asoc->mapping_array, 0, clr); 2283 /* base becomes one ahead of the cum-ack */ 2284 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2285 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2286 sctp_log_map(old_base, old_cumack, old_highest, 2287 SCTP_MAP_PREPARE_SLIDE); 2288 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2289 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED); 2290 } 2291 } else if (at >= 8) { 2292 /* we can slide the mapping array down */ 2293 /* Calculate the new byte postion we can move down */ 2294 slide_from = at >> 3; 2295 /* 2296 * now calculate the ceiling of the move using our highest 2297 * TSN value 2298 */ 2299 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 2300 lgap = asoc->highest_tsn_inside_map - 2301 asoc->mapping_array_base_tsn; 2302 } else { 2303 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) + 2304 asoc->highest_tsn_inside_map + 1; 2305 } 2306 slide_end = lgap >> 3; 2307 if (slide_end < slide_from) { 2308 panic("impossible slide"); 2309 } 2310 distance = (slide_end - slide_from) + 1; 2311 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2312 sctp_log_map(old_base, old_cumack, old_highest, 2313 SCTP_MAP_PREPARE_SLIDE); 2314 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2315 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2316 } 2317 if (distance + slide_from > asoc->mapping_array_size || 2318 distance < 0) { 2319 /* 2320 * Here we do NOT slide forward the array so that 2321 * hopefully when more data comes in to fill it up 2322 * we will be able to slide it forward. Really I 2323 * don't think this should happen :-0 2324 */ 2325 2326 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2327 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2328 (uint32_t) asoc->mapping_array_size, 2329 SCTP_MAP_SLIDE_NONE); 2330 } 2331 } else { 2332 int ii; 2333 2334 for (ii = 0; ii < distance; ii++) { 2335 asoc->mapping_array[ii] = 2336 asoc->mapping_array[slide_from + ii]; 2337 } 2338 for (ii = distance; ii <= slide_end; ii++) { 2339 asoc->mapping_array[ii] = 0; 2340 } 2341 asoc->mapping_array_base_tsn += (slide_from << 3); 2342 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2343 sctp_log_map(asoc->mapping_array_base_tsn, 2344 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2345 SCTP_MAP_SLIDE_RESULT); 2346 } 2347 } 2348 } 2349 /* 2350 * Now we need to see if we need to queue a sack or just start the 2351 * timer (if allowed). 2352 */ 2353 if (ok_to_sack) { 2354 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2355 /* 2356 * Ok special case, in SHUTDOWN-SENT case. here we 2357 * maker sure SACK timer is off and instead send a 2358 * SHUTDOWN and a SACK 2359 */ 2360 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2361 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2362 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18); 2363 } 2364 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 2365 sctp_send_sack(stcb); 2366 } else { 2367 int is_a_gap; 2368 2369 /* is there a gap now ? */ 2370 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2371 stcb->asoc.cumulative_tsn, MAX_TSN); 2372 2373 /* 2374 * CMT DAC algorithm: increase number of packets 2375 * received since last ack 2376 */ 2377 stcb->asoc.cmt_dac_pkts_rcvd++; 2378 2379 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2380 * SACK */ 2381 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2382 * longer is one */ 2383 (stcb->asoc.numduptsns) || /* we have dup's */ 2384 (is_a_gap) || /* is still a gap */ 2385 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2386 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2387 ) { 2388 2389 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) && 2390 (stcb->asoc.send_sack == 0) && 2391 (stcb->asoc.numduptsns == 0) && 2392 (stcb->asoc.delayed_ack) && 2393 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2394 2395 /* 2396 * CMT DAC algorithm: With CMT, 2397 * delay acks even in the face of 2398 * 2399 * reordering. Therefore, if acks that 2400 * do not have to be sent because of 2401 * the above reasons, will be 2402 * delayed. That is, acks that would 2403 * have been sent due to gap reports 2404 * will be delayed with DAC. Start 2405 * the delayed ack timer. 2406 */ 2407 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2408 stcb->sctp_ep, stcb, NULL); 2409 } else { 2410 /* 2411 * Ok we must build a SACK since the 2412 * timer is pending, we got our 2413 * first packet OR there are gaps or 2414 * duplicates. 2415 */ 2416 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2417 sctp_send_sack(stcb); 2418 } 2419 } else { 2420 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2421 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2422 stcb->sctp_ep, stcb, NULL); 2423 } 2424 } 2425 } 2426 } 2427 } 2428 2429 void 2430 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2431 { 2432 struct sctp_tmit_chunk *chk; 2433 uint32_t tsize; 2434 uint16_t nxt_todel; 2435 2436 if (asoc->fragmented_delivery_inprogress) { 2437 sctp_service_reassembly(stcb, asoc); 2438 } 2439 /* Can we proceed further, i.e. the PD-API is complete */ 2440 if (asoc->fragmented_delivery_inprogress) { 2441 /* no */ 2442 return; 2443 } 2444 /* 2445 * Now is there some other chunk I can deliver from the reassembly 2446 * queue. 2447 */ 2448 doit_again: 2449 chk = TAILQ_FIRST(&asoc->reasmqueue); 2450 if (chk == NULL) { 2451 asoc->size_on_reasm_queue = 0; 2452 asoc->cnt_on_reasm_queue = 0; 2453 return; 2454 } 2455 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2456 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2457 ((nxt_todel == chk->rec.data.stream_seq) || 2458 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2459 /* 2460 * Yep the first one is here. We setup to start reception, 2461 * by backing down the TSN just in case we can't deliver. 2462 */ 2463 2464 /* 2465 * Before we start though either all of the message should 2466 * be here or 1/4 the socket buffer max or nothing on the 2467 * delivery queue and something can be delivered. 2468 */ 2469 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 2470 (tsize > stcb->sctp_ep->partial_delivery_point))) { 2471 asoc->fragmented_delivery_inprogress = 1; 2472 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2473 asoc->str_of_pdapi = chk->rec.data.stream_number; 2474 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2475 asoc->pdapi_ppid = chk->rec.data.payloadtype; 2476 asoc->fragment_flags = chk->rec.data.rcv_flags; 2477 sctp_service_reassembly(stcb, asoc); 2478 if (asoc->fragmented_delivery_inprogress == 0) { 2479 goto doit_again; 2480 } 2481 } 2482 } 2483 } 2484 2485 int 2486 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2487 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2488 struct sctp_nets *net, uint32_t * high_tsn) 2489 { 2490 struct sctp_data_chunk *ch, chunk_buf; 2491 struct sctp_association *asoc; 2492 int num_chunks = 0; /* number of control chunks processed */ 2493 int stop_proc = 0; 2494 int chk_length, break_flag, last_chunk; 2495 int abort_flag = 0, was_a_gap = 0; 2496 struct mbuf *m; 2497 2498 /* set the rwnd */ 2499 sctp_set_rwnd(stcb, &stcb->asoc); 2500 2501 m = *mm; 2502 SCTP_TCB_LOCK_ASSERT(stcb); 2503 asoc = &stcb->asoc; 2504 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2505 stcb->asoc.cumulative_tsn, MAX_TSN)) { 2506 /* there was a gap before this data was processed */ 2507 was_a_gap = 1; 2508 } 2509 /* 2510 * setup where we got the last DATA packet from for any SACK that 2511 * may need to go out. Don't bump the net. This is done ONLY when a 2512 * chunk is assigned. 2513 */ 2514 asoc->last_data_chunk_from = net; 2515 2516 /*- 2517 * Now before we proceed we must figure out if this is a wasted 2518 * cluster... i.e. it is a small packet sent in and yet the driver 2519 * underneath allocated a full cluster for it. If so we must copy it 2520 * to a smaller mbuf and free up the cluster mbuf. This will help 2521 * with cluster starvation. Note for __Panda__ we don't do this 2522 * since it has clusters all the way down to 64 bytes. 2523 */ 2524 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2525 /* we only handle mbufs that are singletons.. not chains */ 2526 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA); 2527 if (m) { 2528 /* ok lets see if we can copy the data up */ 2529 caddr_t *from, *to; 2530 2531 /* get the pointers and copy */ 2532 to = mtod(m, caddr_t *); 2533 from = mtod((*mm), caddr_t *); 2534 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2535 /* copy the length and free up the old */ 2536 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2537 sctp_m_freem(*mm); 2538 /* sucess, back copy */ 2539 *mm = m; 2540 } else { 2541 /* We are in trouble in the mbuf world .. yikes */ 2542 m = *mm; 2543 } 2544 } 2545 /* get pointer to the first chunk header */ 2546 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2547 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2548 if (ch == NULL) { 2549 return (1); 2550 } 2551 /* 2552 * process all DATA chunks... 2553 */ 2554 *high_tsn = asoc->cumulative_tsn; 2555 break_flag = 0; 2556 asoc->data_pkts_seen++; 2557 while (stop_proc == 0) { 2558 /* validate chunk length */ 2559 chk_length = ntohs(ch->ch.chunk_length); 2560 if (length - *offset < chk_length) { 2561 /* all done, mutulated chunk */ 2562 stop_proc = 1; 2563 break; 2564 } 2565 if (ch->ch.chunk_type == SCTP_DATA) { 2566 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 2567 /* 2568 * Need to send an abort since we had a 2569 * invalid data chunk. 2570 */ 2571 struct mbuf *op_err; 2572 2573 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)), 2574 0, M_DONTWAIT, 1, MT_DATA); 2575 2576 if (op_err) { 2577 struct sctp_paramhdr *ph; 2578 uint32_t *ippp; 2579 2580 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) + 2581 (2 * sizeof(uint32_t)); 2582 ph = mtod(op_err, struct sctp_paramhdr *); 2583 ph->param_type = 2584 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2585 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 2586 ippp = (uint32_t *) (ph + 1); 2587 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2588 ippp++; 2589 *ippp = asoc->cumulative_tsn; 2590 2591 } 2592 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2593 sctp_abort_association(inp, stcb, m, iphlen, sh, 2594 op_err, 0); 2595 return (2); 2596 } 2597 #ifdef SCTP_AUDITING_ENABLED 2598 sctp_audit_log(0xB1, 0); 2599 #endif 2600 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2601 last_chunk = 1; 2602 } else { 2603 last_chunk = 0; 2604 } 2605 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2606 chk_length, net, high_tsn, &abort_flag, &break_flag, 2607 last_chunk)) { 2608 num_chunks++; 2609 } 2610 if (abort_flag) 2611 return (2); 2612 2613 if (break_flag) { 2614 /* 2615 * Set because of out of rwnd space and no 2616 * drop rep space left. 2617 */ 2618 stop_proc = 1; 2619 break; 2620 } 2621 } else { 2622 /* not a data chunk in the data region */ 2623 switch (ch->ch.chunk_type) { 2624 case SCTP_INITIATION: 2625 case SCTP_INITIATION_ACK: 2626 case SCTP_SELECTIVE_ACK: 2627 case SCTP_HEARTBEAT_REQUEST: 2628 case SCTP_HEARTBEAT_ACK: 2629 case SCTP_ABORT_ASSOCIATION: 2630 case SCTP_SHUTDOWN: 2631 case SCTP_SHUTDOWN_ACK: 2632 case SCTP_OPERATION_ERROR: 2633 case SCTP_COOKIE_ECHO: 2634 case SCTP_COOKIE_ACK: 2635 case SCTP_ECN_ECHO: 2636 case SCTP_ECN_CWR: 2637 case SCTP_SHUTDOWN_COMPLETE: 2638 case SCTP_AUTHENTICATION: 2639 case SCTP_ASCONF_ACK: 2640 case SCTP_PACKET_DROPPED: 2641 case SCTP_STREAM_RESET: 2642 case SCTP_FORWARD_CUM_TSN: 2643 case SCTP_ASCONF: 2644 /* 2645 * Now, what do we do with KNOWN chunks that 2646 * are NOT in the right place? 2647 * 2648 * For now, I do nothing but ignore them. We 2649 * may later want to add sysctl stuff to 2650 * switch out and do either an ABORT() or 2651 * possibly process them. 2652 */ 2653 if (sctp_strict_data_order) { 2654 struct mbuf *op_err; 2655 2656 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 2657 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0); 2658 return (2); 2659 } 2660 break; 2661 default: 2662 /* unknown chunk type, use bit rules */ 2663 if (ch->ch.chunk_type & 0x40) { 2664 /* Add a error report to the queue */ 2665 struct mbuf *merr; 2666 struct sctp_paramhdr *phd; 2667 2668 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA); 2669 if (merr) { 2670 phd = mtod(merr, struct sctp_paramhdr *); 2671 /* 2672 * We cheat and use param 2673 * type since we did not 2674 * bother to define a error 2675 * cause struct. They are 2676 * the same basic format 2677 * with different names. 2678 */ 2679 phd->param_type = 2680 htons(SCTP_CAUSE_UNRECOG_CHUNK); 2681 phd->param_length = 2682 htons(chk_length + sizeof(*phd)); 2683 SCTP_BUF_LEN(merr) = sizeof(*phd); 2684 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, 2685 SCTP_SIZE32(chk_length), 2686 M_DONTWAIT); 2687 if (SCTP_BUF_NEXT(merr)) { 2688 sctp_queue_op_err(stcb, merr); 2689 } else { 2690 sctp_m_freem(merr); 2691 } 2692 } 2693 } 2694 if ((ch->ch.chunk_type & 0x80) == 0) { 2695 /* discard the rest of this packet */ 2696 stop_proc = 1; 2697 } /* else skip this bad chunk and 2698 * continue... */ 2699 break; 2700 }; /* switch of chunk type */ 2701 } 2702 *offset += SCTP_SIZE32(chk_length); 2703 if ((*offset >= length) || stop_proc) { 2704 /* no more data left in the mbuf chain */ 2705 stop_proc = 1; 2706 continue; 2707 } 2708 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2709 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2710 if (ch == NULL) { 2711 *offset = length; 2712 stop_proc = 1; 2713 break; 2714 2715 } 2716 } /* while */ 2717 if (break_flag) { 2718 /* 2719 * we need to report rwnd overrun drops. 2720 */ 2721 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0); 2722 } 2723 if (num_chunks) { 2724 /* 2725 * Did we get data, if so update the time for auto-close and 2726 * give peer credit for being alive. 2727 */ 2728 SCTP_STAT_INCR(sctps_recvpktwithdata); 2729 stcb->asoc.overall_error_count = 0; 2730 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2731 } 2732 /* now service all of the reassm queue if needed */ 2733 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2734 sctp_service_queues(stcb, asoc); 2735 2736 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2737 /* Assure that we ack right away */ 2738 stcb->asoc.send_sack = 1; 2739 } 2740 /* Start a sack timer or QUEUE a SACK for sending */ 2741 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) && 2742 (stcb->asoc.mapping_array[0] != 0xff)) { 2743 if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) || 2744 (stcb->asoc.delayed_ack == 0) || 2745 (stcb->asoc.send_sack == 1)) { 2746 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2747 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2748 } 2749 sctp_send_sack(stcb); 2750 } else { 2751 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2752 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2753 stcb->sctp_ep, stcb, NULL); 2754 } 2755 } 2756 } else { 2757 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2758 } 2759 if (abort_flag) 2760 return (2); 2761 2762 return (0); 2763 } 2764 2765 static void 2766 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 2767 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2768 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 2769 int num_seg, int *ecn_seg_sums) 2770 { 2771 /************************************************/ 2772 /* process fragments and update sendqueue */ 2773 /************************************************/ 2774 struct sctp_sack *sack; 2775 struct sctp_gap_ack_block *frag, block; 2776 struct sctp_tmit_chunk *tp1; 2777 int i; 2778 unsigned int j; 2779 int num_frs = 0; 2780 2781 uint16_t frag_strt, frag_end, primary_flag_set; 2782 u_long last_frag_high; 2783 2784 /* 2785 * @@@ JRI : TODO: This flag is not used anywhere .. remove? 2786 */ 2787 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 2788 primary_flag_set = 1; 2789 } else { 2790 primary_flag_set = 0; 2791 } 2792 sack = &ch->sack; 2793 2794 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 2795 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 2796 *offset += sizeof(block); 2797 if (frag == NULL) { 2798 return; 2799 } 2800 tp1 = NULL; 2801 last_frag_high = 0; 2802 for (i = 0; i < num_seg; i++) { 2803 frag_strt = ntohs(frag->start); 2804 frag_end = ntohs(frag->end); 2805 /* some sanity checks on the fargment offsets */ 2806 if (frag_strt > frag_end) { 2807 /* this one is malformed, skip */ 2808 frag++; 2809 continue; 2810 } 2811 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked, 2812 MAX_TSN)) 2813 *biggest_tsn_acked = frag_end + last_tsn; 2814 2815 /* mark acked dgs and find out the highestTSN being acked */ 2816 if (tp1 == NULL) { 2817 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2818 2819 /* save the locations of the last frags */ 2820 last_frag_high = frag_end + last_tsn; 2821 } else { 2822 /* 2823 * now lets see if we need to reset the queue due to 2824 * a out-of-order SACK fragment 2825 */ 2826 if (compare_with_wrap(frag_strt + last_tsn, 2827 last_frag_high, MAX_TSN)) { 2828 /* 2829 * if the new frag starts after the last TSN 2830 * frag covered, we are ok and this one is 2831 * beyond the last one 2832 */ 2833 ; 2834 } else { 2835 /* 2836 * ok, they have reset us, so we need to 2837 * reset the queue this will cause extra 2838 * hunting but hey, they chose the 2839 * performance hit when they failed to order 2840 * there gaps.. 2841 */ 2842 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2843 } 2844 last_frag_high = frag_end + last_tsn; 2845 } 2846 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) { 2847 while (tp1) { 2848 if (tp1->rec.data.doing_fast_retransmit) 2849 num_frs++; 2850 2851 /* 2852 * CMT: CUCv2 algorithm. For each TSN being 2853 * processed from the sent queue, track the 2854 * next expected pseudo-cumack, or 2855 * rtx_pseudo_cumack, if required. Separate 2856 * cumack trackers for first transmissions, 2857 * and retransmissions. 2858 */ 2859 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2860 (tp1->snd_count == 1)) { 2861 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2862 tp1->whoTo->find_pseudo_cumack = 0; 2863 } 2864 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2865 (tp1->snd_count > 1)) { 2866 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2867 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2868 } 2869 if (tp1->rec.data.TSN_seq == j) { 2870 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2871 /* 2872 * must be held until 2873 * cum-ack passes 2874 */ 2875 /* 2876 * ECN Nonce: Add the nonce 2877 * value to the sender's 2878 * nonce sum 2879 */ 2880 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2881 /*- 2882 * If it is less than RESEND, it is 2883 * now no-longer in flight. 2884 * Higher values may already be set 2885 * via previous Gap Ack Blocks... 2886 * i.e. ACKED or RESEND. 2887 */ 2888 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2889 *biggest_newly_acked_tsn, MAX_TSN)) { 2890 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2891 } 2892 /* 2893 * CMT: SFR algo 2894 * (and HTNA) - set 2895 * saw_newack to 1 2896 * for dest being 2897 * newly acked. 2898 * update 2899 * this_sack_highest_ 2900 * newack if 2901 * appropriate. 2902 */ 2903 if (tp1->rec.data.chunk_was_revoked == 0) 2904 tp1->whoTo->saw_newack = 1; 2905 2906 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2907 tp1->whoTo->this_sack_highest_newack, 2908 MAX_TSN)) { 2909 tp1->whoTo->this_sack_highest_newack = 2910 tp1->rec.data.TSN_seq; 2911 } 2912 /* 2913 * CMT DAC algo: 2914 * also update 2915 * this_sack_lowest_n 2916 * ewack 2917 */ 2918 if (*this_sack_lowest_newack == 0) { 2919 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 2920 sctp_log_sack(*this_sack_lowest_newack, 2921 last_tsn, 2922 tp1->rec.data.TSN_seq, 2923 0, 2924 0, 2925 SCTP_LOG_TSN_ACKED); 2926 } 2927 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2928 } 2929 /* 2930 * CMT: CUCv2 2931 * algorithm. If 2932 * (rtx-)pseudo-cumac 2933 * k for corresp 2934 * dest is being 2935 * acked, then we 2936 * have a new 2937 * (rtx-)pseudo-cumac 2938 * k. Set 2939 * new_(rtx_)pseudo_c 2940 * umack to TRUE so 2941 * that the cwnd for 2942 * this dest can be 2943 * updated. Also 2944 * trigger search 2945 * for the next 2946 * expected 2947 * (rtx-)pseudo-cumac 2948 * k. Separate 2949 * pseudo_cumack 2950 * trackers for 2951 * first 2952 * transmissions and 2953 * retransmissions. 2954 */ 2955 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2956 if (tp1->rec.data.chunk_was_revoked == 0) { 2957 tp1->whoTo->new_pseudo_cumack = 1; 2958 } 2959 tp1->whoTo->find_pseudo_cumack = 1; 2960 } 2961 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 2962 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2963 } 2964 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2965 if (tp1->rec.data.chunk_was_revoked == 0) { 2966 tp1->whoTo->new_pseudo_cumack = 1; 2967 } 2968 tp1->whoTo->find_rtx_pseudo_cumack = 1; 2969 } 2970 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 2971 sctp_log_sack(*biggest_newly_acked_tsn, 2972 last_tsn, 2973 tp1->rec.data.TSN_seq, 2974 frag_strt, 2975 frag_end, 2976 SCTP_LOG_TSN_ACKED); 2977 } 2978 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 2979 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 2980 tp1->whoTo->flight_size, 2981 tp1->book_size, 2982 (uintptr_t) tp1->whoTo, 2983 tp1->rec.data.TSN_seq); 2984 } 2985 sctp_flight_size_decrease(tp1); 2986 sctp_total_flight_decrease(stcb, tp1); 2987 2988 tp1->whoTo->net_ack += tp1->send_size; 2989 if (tp1->snd_count < 2) { 2990 /* 2991 * True 2992 * non-retran 2993 * smited 2994 * chunk */ 2995 tp1->whoTo->net_ack2 += tp1->send_size; 2996 2997 /* 2998 * update RTO 2999 * too ? */ 3000 if (tp1->do_rtt) { 3001 tp1->whoTo->RTO = 3002 sctp_calculate_rto(stcb, 3003 asoc, 3004 tp1->whoTo, 3005 &tp1->sent_rcv_time, 3006 sctp_align_safe_nocopy); 3007 tp1->do_rtt = 0; 3008 } 3009 } 3010 } 3011 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3012 (*ecn_seg_sums) += tp1->rec.data.ect_nonce; 3013 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM; 3014 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3015 asoc->this_sack_highest_gap, 3016 MAX_TSN)) { 3017 asoc->this_sack_highest_gap = 3018 tp1->rec.data.TSN_seq; 3019 } 3020 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3021 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3022 #ifdef SCTP_AUDITING_ENABLED 3023 sctp_audit_log(0xB2, 3024 (asoc->sent_queue_retran_cnt & 0x000000ff)); 3025 #endif 3026 } 3027 } 3028 /* 3029 * All chunks NOT UNSENT 3030 * fall through here and are 3031 * marked 3032 */ 3033 tp1->sent = SCTP_DATAGRAM_MARKED; 3034 if (tp1->rec.data.chunk_was_revoked) { 3035 /* deflate the cwnd */ 3036 tp1->whoTo->cwnd -= tp1->book_size; 3037 tp1->rec.data.chunk_was_revoked = 0; 3038 } 3039 } 3040 break; 3041 } /* if (tp1->TSN_seq == j) */ 3042 if (compare_with_wrap(tp1->rec.data.TSN_seq, j, 3043 MAX_TSN)) 3044 break; 3045 3046 tp1 = TAILQ_NEXT(tp1, sctp_next); 3047 } /* end while (tp1) */ 3048 } /* end for (j = fragStart */ 3049 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3050 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 3051 *offset += sizeof(block); 3052 if (frag == NULL) { 3053 break; 3054 } 3055 } 3056 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3057 if (num_frs) 3058 sctp_log_fr(*biggest_tsn_acked, 3059 *biggest_newly_acked_tsn, 3060 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3061 } 3062 } 3063 3064 static void 3065 sctp_check_for_revoked(struct sctp_tcb *stcb, 3066 struct sctp_association *asoc, uint32_t cumack, 3067 u_long biggest_tsn_acked) 3068 { 3069 struct sctp_tmit_chunk *tp1; 3070 int tot_revoked = 0; 3071 3072 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3073 while (tp1) { 3074 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack, 3075 MAX_TSN)) { 3076 /* 3077 * ok this guy is either ACK or MARKED. If it is 3078 * ACKED it has been previously acked but not this 3079 * time i.e. revoked. If it is MARKED it was ACK'ed 3080 * again. 3081 */ 3082 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3083 MAX_TSN)) 3084 break; 3085 3086 3087 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3088 /* it has been revoked */ 3089 tp1->sent = SCTP_DATAGRAM_SENT; 3090 tp1->rec.data.chunk_was_revoked = 1; 3091 /* 3092 * We must add this stuff back in to assure 3093 * timers and such get started. 3094 */ 3095 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3096 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3097 tp1->whoTo->flight_size, 3098 tp1->book_size, 3099 (uintptr_t) tp1->whoTo, 3100 tp1->rec.data.TSN_seq); 3101 } 3102 sctp_flight_size_increase(tp1); 3103 sctp_total_flight_increase(stcb, tp1); 3104 /* 3105 * We inflate the cwnd to compensate for our 3106 * artificial inflation of the flight_size. 3107 */ 3108 tp1->whoTo->cwnd += tp1->book_size; 3109 tot_revoked++; 3110 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 3111 sctp_log_sack(asoc->last_acked_seq, 3112 cumack, 3113 tp1->rec.data.TSN_seq, 3114 0, 3115 0, 3116 SCTP_LOG_TSN_REVOKED); 3117 } 3118 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3119 /* it has been re-acked in this SACK */ 3120 tp1->sent = SCTP_DATAGRAM_ACKED; 3121 } 3122 } 3123 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3124 break; 3125 tp1 = TAILQ_NEXT(tp1, sctp_next); 3126 } 3127 if (tot_revoked > 0) { 3128 /* 3129 * Setup the ecn nonce re-sync point. We do this since once 3130 * data is revoked we begin to retransmit things, which do 3131 * NOT have the ECN bits set. This means we are now out of 3132 * sync and must wait until we get back in sync with the 3133 * peer to check ECN bits. 3134 */ 3135 tp1 = TAILQ_FIRST(&asoc->send_queue); 3136 if (tp1 == NULL) { 3137 asoc->nonce_resync_tsn = asoc->sending_seq; 3138 } else { 3139 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq; 3140 } 3141 asoc->nonce_wait_for_ecne = 0; 3142 asoc->nonce_sum_check = 0; 3143 } 3144 } 3145 3146 static void 3147 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3148 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved) 3149 { 3150 struct sctp_tmit_chunk *tp1; 3151 int strike_flag = 0; 3152 struct timeval now; 3153 int tot_retrans = 0; 3154 uint32_t sending_seq; 3155 struct sctp_nets *net; 3156 int num_dests_sacked = 0; 3157 3158 /* 3159 * select the sending_seq, this is either the next thing ready to be 3160 * sent but not transmitted, OR, the next seq we assign. 3161 */ 3162 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3163 if (tp1 == NULL) { 3164 sending_seq = asoc->sending_seq; 3165 } else { 3166 sending_seq = tp1->rec.data.TSN_seq; 3167 } 3168 3169 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3170 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3171 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3172 if (net->saw_newack) 3173 num_dests_sacked++; 3174 } 3175 } 3176 if (stcb->asoc.peer_supports_prsctp) { 3177 (void)SCTP_GETTIME_TIMEVAL(&now); 3178 } 3179 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3180 while (tp1) { 3181 strike_flag = 0; 3182 if (tp1->no_fr_allowed) { 3183 /* this one had a timeout or something */ 3184 tp1 = TAILQ_NEXT(tp1, sctp_next); 3185 continue; 3186 } 3187 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3188 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3189 sctp_log_fr(biggest_tsn_newly_acked, 3190 tp1->rec.data.TSN_seq, 3191 tp1->sent, 3192 SCTP_FR_LOG_CHECK_STRIKE); 3193 } 3194 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3195 MAX_TSN) || 3196 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3197 /* done */ 3198 break; 3199 } 3200 if (stcb->asoc.peer_supports_prsctp) { 3201 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3202 /* Is it expired? */ 3203 if ( 3204 (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) 3205 ) { 3206 /* Yes so drop it */ 3207 if (tp1->data != NULL) { 3208 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3209 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3210 &asoc->sent_queue); 3211 } 3212 tp1 = TAILQ_NEXT(tp1, sctp_next); 3213 continue; 3214 } 3215 } 3216 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3217 /* Has it been retransmitted tv_sec times? */ 3218 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3219 /* Yes, so drop it */ 3220 if (tp1->data != NULL) { 3221 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3222 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3223 &asoc->sent_queue); 3224 } 3225 tp1 = TAILQ_NEXT(tp1, sctp_next); 3226 continue; 3227 } 3228 } 3229 } 3230 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3231 asoc->this_sack_highest_gap, MAX_TSN)) { 3232 /* we are beyond the tsn in the sack */ 3233 break; 3234 } 3235 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3236 /* either a RESEND, ACKED, or MARKED */ 3237 /* skip */ 3238 tp1 = TAILQ_NEXT(tp1, sctp_next); 3239 continue; 3240 } 3241 /* 3242 * CMT : SFR algo (covers part of DAC and HTNA as well) 3243 */ 3244 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3245 /* 3246 * No new acks were receieved for data sent to this 3247 * dest. Therefore, according to the SFR algo for 3248 * CMT, no data sent to this dest can be marked for 3249 * FR using this SACK. 3250 */ 3251 tp1 = TAILQ_NEXT(tp1, sctp_next); 3252 continue; 3253 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq, 3254 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) { 3255 /* 3256 * CMT: New acks were receieved for data sent to 3257 * this dest. But no new acks were seen for data 3258 * sent after tp1. Therefore, according to the SFR 3259 * algo for CMT, tp1 cannot be marked for FR using 3260 * this SACK. This step covers part of the DAC algo 3261 * and the HTNA algo as well. 3262 */ 3263 tp1 = TAILQ_NEXT(tp1, sctp_next); 3264 continue; 3265 } 3266 /* 3267 * Here we check to see if we were have already done a FR 3268 * and if so we see if the biggest TSN we saw in the sack is 3269 * smaller than the recovery point. If so we don't strike 3270 * the tsn... otherwise we CAN strike the TSN. 3271 */ 3272 /* 3273 * @@@ JRI: Check for CMT if (accum_moved && 3274 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3275 * 0)) { 3276 */ 3277 if (accum_moved && asoc->fast_retran_loss_recovery) { 3278 /* 3279 * Strike the TSN if in fast-recovery and cum-ack 3280 * moved. 3281 */ 3282 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3283 sctp_log_fr(biggest_tsn_newly_acked, 3284 tp1->rec.data.TSN_seq, 3285 tp1->sent, 3286 SCTP_FR_LOG_STRIKE_CHUNK); 3287 } 3288 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3289 tp1->sent++; 3290 } 3291 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3292 /* 3293 * CMT DAC algorithm: If SACK flag is set to 3294 * 0, then lowest_newack test will not pass 3295 * because it would have been set to the 3296 * cumack earlier. If not already to be 3297 * rtx'd, If not a mixed sack and if tp1 is 3298 * not between two sacked TSNs, then mark by 3299 * one more. NOTE that we are marking by one 3300 * additional time since the SACK DAC flag 3301 * indicates that two packets have been 3302 * received after this missing TSN. 3303 */ 3304 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3305 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3306 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3307 sctp_log_fr(16 + num_dests_sacked, 3308 tp1->rec.data.TSN_seq, 3309 tp1->sent, 3310 SCTP_FR_LOG_STRIKE_CHUNK); 3311 } 3312 tp1->sent++; 3313 } 3314 } 3315 } else if (tp1->rec.data.doing_fast_retransmit) { 3316 /* 3317 * For those that have done a FR we must take 3318 * special consideration if we strike. I.e the 3319 * biggest_newly_acked must be higher than the 3320 * sending_seq at the time we did the FR. 3321 */ 3322 if ( 3323 #ifdef SCTP_FR_TO_ALTERNATE 3324 /* 3325 * If FR's go to new networks, then we must only do 3326 * this for singly homed asoc's. However if the FR's 3327 * go to the same network (Armando's work) then its 3328 * ok to FR multiple times. 3329 */ 3330 (asoc->numnets < 2) 3331 #else 3332 (1) 3333 #endif 3334 ) { 3335 3336 if ((compare_with_wrap(biggest_tsn_newly_acked, 3337 tp1->rec.data.fast_retran_tsn, MAX_TSN)) || 3338 (biggest_tsn_newly_acked == 3339 tp1->rec.data.fast_retran_tsn)) { 3340 /* 3341 * Strike the TSN, since this ack is 3342 * beyond where things were when we 3343 * did a FR. 3344 */ 3345 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3346 sctp_log_fr(biggest_tsn_newly_acked, 3347 tp1->rec.data.TSN_seq, 3348 tp1->sent, 3349 SCTP_FR_LOG_STRIKE_CHUNK); 3350 } 3351 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3352 tp1->sent++; 3353 } 3354 strike_flag = 1; 3355 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3356 /* 3357 * CMT DAC algorithm: If 3358 * SACK flag is set to 0, 3359 * then lowest_newack test 3360 * will not pass because it 3361 * would have been set to 3362 * the cumack earlier. If 3363 * not already to be rtx'd, 3364 * If not a mixed sack and 3365 * if tp1 is not between two 3366 * sacked TSNs, then mark by 3367 * one more. NOTE that we 3368 * are marking by one 3369 * additional time since the 3370 * SACK DAC flag indicates 3371 * that two packets have 3372 * been received after this 3373 * missing TSN. 3374 */ 3375 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3376 (num_dests_sacked == 1) && 3377 compare_with_wrap(this_sack_lowest_newack, 3378 tp1->rec.data.TSN_seq, MAX_TSN)) { 3379 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3380 sctp_log_fr(32 + num_dests_sacked, 3381 tp1->rec.data.TSN_seq, 3382 tp1->sent, 3383 SCTP_FR_LOG_STRIKE_CHUNK); 3384 } 3385 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3386 tp1->sent++; 3387 3388 } 3389 } 3390 } 3391 } 3392 } 3393 /* 3394 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3395 * algo covers HTNA. 3396 */ 3397 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3398 biggest_tsn_newly_acked, MAX_TSN)) { 3399 /* 3400 * We don't strike these: This is the HTNA 3401 * algorithm i.e. we don't strike If our TSN is 3402 * larger than the Highest TSN Newly Acked. 3403 */ 3404 ; 3405 } else { 3406 /* Strike the TSN */ 3407 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3408 sctp_log_fr(biggest_tsn_newly_acked, 3409 tp1->rec.data.TSN_seq, 3410 tp1->sent, 3411 SCTP_FR_LOG_STRIKE_CHUNK); 3412 } 3413 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3414 tp1->sent++; 3415 } 3416 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3417 /* 3418 * CMT DAC algorithm: If SACK flag is set to 3419 * 0, then lowest_newack test will not pass 3420 * because it would have been set to the 3421 * cumack earlier. If not already to be 3422 * rtx'd, If not a mixed sack and if tp1 is 3423 * not between two sacked TSNs, then mark by 3424 * one more. NOTE that we are marking by one 3425 * additional time since the SACK DAC flag 3426 * indicates that two packets have been 3427 * received after this missing TSN. 3428 */ 3429 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3430 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3431 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3432 sctp_log_fr(48 + num_dests_sacked, 3433 tp1->rec.data.TSN_seq, 3434 tp1->sent, 3435 SCTP_FR_LOG_STRIKE_CHUNK); 3436 } 3437 tp1->sent++; 3438 } 3439 } 3440 } 3441 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3442 /* Increment the count to resend */ 3443 struct sctp_nets *alt; 3444 3445 /* printf("OK, we are now ready to FR this guy\n"); */ 3446 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3447 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3448 0, SCTP_FR_MARKED); 3449 } 3450 if (strike_flag) { 3451 /* This is a subsequent FR */ 3452 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3453 } 3454 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3455 if (sctp_cmt_on_off) { 3456 /* 3457 * CMT: Using RTX_SSTHRESH policy for CMT. 3458 * If CMT is being used, then pick dest with 3459 * largest ssthresh for any retransmission. 3460 */ 3461 tp1->no_fr_allowed = 1; 3462 alt = tp1->whoTo; 3463 /* sa_ignore NO_NULL_CHK */ 3464 if (sctp_cmt_on_off && sctp_cmt_pf) { 3465 /* 3466 * JRS 5/18/07 - If CMT PF is on, 3467 * use the PF version of 3468 * find_alt_net() 3469 */ 3470 alt = sctp_find_alternate_net(stcb, alt, 2); 3471 } else { 3472 /* 3473 * JRS 5/18/07 - If only CMT is on, 3474 * use the CMT version of 3475 * find_alt_net() 3476 */ 3477 alt = sctp_find_alternate_net(stcb, alt, 1); 3478 } 3479 if (alt == NULL) { 3480 alt = tp1->whoTo; 3481 } 3482 /* 3483 * CUCv2: If a different dest is picked for 3484 * the retransmission, then new 3485 * (rtx-)pseudo_cumack needs to be tracked 3486 * for orig dest. Let CUCv2 track new (rtx-) 3487 * pseudo-cumack always. 3488 */ 3489 if (tp1->whoTo) { 3490 tp1->whoTo->find_pseudo_cumack = 1; 3491 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3492 } 3493 } else {/* CMT is OFF */ 3494 3495 #ifdef SCTP_FR_TO_ALTERNATE 3496 /* Can we find an alternate? */ 3497 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3498 #else 3499 /* 3500 * default behavior is to NOT retransmit 3501 * FR's to an alternate. Armando Caro's 3502 * paper details why. 3503 */ 3504 alt = tp1->whoTo; 3505 #endif 3506 } 3507 3508 tp1->rec.data.doing_fast_retransmit = 1; 3509 tot_retrans++; 3510 /* mark the sending seq for possible subsequent FR's */ 3511 /* 3512 * printf("Marking TSN for FR new value %x\n", 3513 * (uint32_t)tpi->rec.data.TSN_seq); 3514 */ 3515 if (TAILQ_EMPTY(&asoc->send_queue)) { 3516 /* 3517 * If the queue of send is empty then its 3518 * the next sequence number that will be 3519 * assigned so we subtract one from this to 3520 * get the one we last sent. 3521 */ 3522 tp1->rec.data.fast_retran_tsn = sending_seq; 3523 } else { 3524 /* 3525 * If there are chunks on the send queue 3526 * (unsent data that has made it from the 3527 * stream queues but not out the door, we 3528 * take the first one (which will have the 3529 * lowest TSN) and subtract one to get the 3530 * one we last sent. 3531 */ 3532 struct sctp_tmit_chunk *ttt; 3533 3534 ttt = TAILQ_FIRST(&asoc->send_queue); 3535 tp1->rec.data.fast_retran_tsn = 3536 ttt->rec.data.TSN_seq; 3537 } 3538 3539 if (tp1->do_rtt) { 3540 /* 3541 * this guy had a RTO calculation pending on 3542 * it, cancel it 3543 */ 3544 tp1->do_rtt = 0; 3545 } 3546 /* fix counts and things */ 3547 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3548 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3549 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3550 tp1->book_size, 3551 (uintptr_t) tp1->whoTo, 3552 tp1->rec.data.TSN_seq); 3553 } 3554 if (tp1->whoTo) { 3555 tp1->whoTo->net_ack++; 3556 sctp_flight_size_decrease(tp1); 3557 } 3558 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 3559 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3560 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh); 3561 } 3562 /* add back to the rwnd */ 3563 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh); 3564 3565 /* remove from the total flight */ 3566 sctp_total_flight_decrease(stcb, tp1); 3567 if (alt != tp1->whoTo) { 3568 /* yes, there is an alternate. */ 3569 sctp_free_remote_addr(tp1->whoTo); 3570 /* sa_ignore FREED_MEMORY */ 3571 tp1->whoTo = alt; 3572 atomic_add_int(&alt->ref_count, 1); 3573 } 3574 } 3575 tp1 = TAILQ_NEXT(tp1, sctp_next); 3576 } /* while (tp1) */ 3577 3578 if (tot_retrans > 0) { 3579 /* 3580 * Setup the ecn nonce re-sync point. We do this since once 3581 * we go to FR something we introduce a Karn's rule scenario 3582 * and won't know the totals for the ECN bits. 3583 */ 3584 asoc->nonce_resync_tsn = sending_seq; 3585 asoc->nonce_wait_for_ecne = 0; 3586 asoc->nonce_sum_check = 0; 3587 } 3588 } 3589 3590 struct sctp_tmit_chunk * 3591 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3592 struct sctp_association *asoc) 3593 { 3594 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3595 struct timeval now; 3596 int now_filled = 0; 3597 3598 if (asoc->peer_supports_prsctp == 0) { 3599 return (NULL); 3600 } 3601 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3602 while (tp1) { 3603 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3604 tp1->sent != SCTP_DATAGRAM_RESEND) { 3605 /* no chance to advance, out of here */ 3606 break; 3607 } 3608 if (!PR_SCTP_ENABLED(tp1->flags)) { 3609 /* 3610 * We can't fwd-tsn past any that are reliable aka 3611 * retransmitted until the asoc fails. 3612 */ 3613 break; 3614 } 3615 if (!now_filled) { 3616 (void)SCTP_GETTIME_TIMEVAL(&now); 3617 now_filled = 1; 3618 } 3619 tp2 = TAILQ_NEXT(tp1, sctp_next); 3620 /* 3621 * now we got a chunk which is marked for another 3622 * retransmission to a PR-stream but has run out its chances 3623 * already maybe OR has been marked to skip now. Can we skip 3624 * it if its a resend? 3625 */ 3626 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3627 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3628 /* 3629 * Now is this one marked for resend and its time is 3630 * now up? 3631 */ 3632 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3633 /* Yes so drop it */ 3634 if (tp1->data) { 3635 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3636 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3637 &asoc->sent_queue); 3638 } 3639 } else { 3640 /* 3641 * No, we are done when hit one for resend 3642 * whos time as not expired. 3643 */ 3644 break; 3645 } 3646 } 3647 /* 3648 * Ok now if this chunk is marked to drop it we can clean up 3649 * the chunk, advance our peer ack point and we can check 3650 * the next chunk. 3651 */ 3652 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3653 /* advance PeerAckPoint goes forward */ 3654 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3655 a_adv = tp1; 3656 /* 3657 * we don't want to de-queue it here. Just wait for 3658 * the next peer SACK to come with a new cumTSN and 3659 * then the chunk will be droped in the normal 3660 * fashion. 3661 */ 3662 if (tp1->data) { 3663 sctp_free_bufspace(stcb, asoc, tp1, 1); 3664 /* 3665 * Maybe there should be another 3666 * notification type 3667 */ 3668 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3669 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3670 tp1); 3671 sctp_m_freem(tp1->data); 3672 tp1->data = NULL; 3673 if (stcb->sctp_socket) { 3674 sctp_sowwakeup(stcb->sctp_ep, 3675 stcb->sctp_socket); 3676 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 3677 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN); 3678 } 3679 } 3680 } 3681 } else { 3682 /* 3683 * If it is still in RESEND we can advance no 3684 * further 3685 */ 3686 break; 3687 } 3688 /* 3689 * If we hit here we just dumped tp1, move to next tsn on 3690 * sent queue. 3691 */ 3692 tp1 = tp2; 3693 } 3694 return (a_adv); 3695 } 3696 3697 static void 3698 sctp_fs_audit(struct sctp_association *asoc) 3699 { 3700 struct sctp_tmit_chunk *chk; 3701 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3702 3703 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3704 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3705 inflight++; 3706 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3707 resend++; 3708 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3709 inbetween++; 3710 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3711 above++; 3712 } else { 3713 acked++; 3714 } 3715 } 3716 3717 if ((inflight > 0) || (inbetween > 0)) { 3718 #ifdef INVARIANTS 3719 panic("Flight size-express incorrect? \n"); 3720 #else 3721 SCTP_PRINTF("Flight size-express incorrect inflight:%d inbetween:%d\n", 3722 inflight, inbetween); 3723 #endif 3724 } 3725 } 3726 3727 3728 static void 3729 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3730 struct sctp_association *asoc, 3731 struct sctp_nets *net, 3732 struct sctp_tmit_chunk *tp1) 3733 { 3734 struct sctp_tmit_chunk *chk; 3735 3736 /* First setup this one and get it moved back */ 3737 tp1->sent = SCTP_DATAGRAM_UNSENT; 3738 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3739 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3740 tp1->whoTo->flight_size, 3741 tp1->book_size, 3742 (uintptr_t) tp1->whoTo, 3743 tp1->rec.data.TSN_seq); 3744 } 3745 sctp_flight_size_decrease(tp1); 3746 sctp_total_flight_decrease(stcb, tp1); 3747 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3748 TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next); 3749 asoc->sent_queue_cnt--; 3750 asoc->send_queue_cnt++; 3751 /* 3752 * Now all guys marked for RESEND on the sent_queue must be moved 3753 * back too. 3754 */ 3755 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3756 if (chk->sent == SCTP_DATAGRAM_RESEND) { 3757 /* Another chunk to move */ 3758 chk->sent = SCTP_DATAGRAM_UNSENT; 3759 /* It should not be in flight */ 3760 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3761 TAILQ_INSERT_AFTER(&asoc->send_queue, tp1, chk, sctp_next); 3762 asoc->sent_queue_cnt--; 3763 asoc->send_queue_cnt++; 3764 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3765 } 3766 } 3767 } 3768 3769 void 3770 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3771 uint32_t rwnd, int nonce_sum_flag, int *abort_now) 3772 { 3773 struct sctp_nets *net; 3774 struct sctp_association *asoc; 3775 struct sctp_tmit_chunk *tp1, *tp2; 3776 uint32_t old_rwnd; 3777 int win_probe_recovery = 0; 3778 int win_probe_recovered = 0; 3779 int j, done_once = 0; 3780 3781 3782 if (sctp_logging_level & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3783 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3784 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3785 } 3786 SCTP_TCB_LOCK_ASSERT(stcb); 3787 #ifdef SCTP_ASOCLOG_OF_TSNS 3788 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3789 stcb->asoc.cumack_log_at++; 3790 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3791 stcb->asoc.cumack_log_at = 0; 3792 } 3793 #endif 3794 asoc = &stcb->asoc; 3795 old_rwnd = asoc->peers_rwnd; 3796 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) { 3797 /* old ack */ 3798 return; 3799 } else if (asoc->last_acked_seq == cumack) { 3800 /* Window update sack */ 3801 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3802 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 3803 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3804 /* SWS sender side engages */ 3805 asoc->peers_rwnd = 0; 3806 } 3807 if (asoc->peers_rwnd > old_rwnd) { 3808 goto again; 3809 } 3810 return; 3811 3812 } 3813 /* First setup for CC stuff */ 3814 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3815 net->prev_cwnd = net->cwnd; 3816 net->net_ack = 0; 3817 net->net_ack2 = 0; 3818 3819 /* 3820 * CMT: Reset CUC and Fast recovery algo variables before 3821 * SACK processing 3822 */ 3823 net->new_pseudo_cumack = 0; 3824 net->will_exit_fast_recovery = 0; 3825 } 3826 if (sctp_strict_sacks) { 3827 uint32_t send_s; 3828 3829 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3830 tp1 = TAILQ_LAST(&asoc->sent_queue, 3831 sctpchunk_listhead); 3832 send_s = tp1->rec.data.TSN_seq + 1; 3833 } else { 3834 send_s = asoc->sending_seq; 3835 } 3836 if ((cumack == send_s) || 3837 compare_with_wrap(cumack, send_s, MAX_TSN)) { 3838 #ifndef INVARIANTS 3839 struct mbuf *oper; 3840 3841 #endif 3842 #ifdef INVARIANTS 3843 panic("Impossible sack 1"); 3844 #else 3845 *abort_now = 1; 3846 /* XXX */ 3847 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 3848 0, M_DONTWAIT, 1, MT_DATA); 3849 if (oper) { 3850 struct sctp_paramhdr *ph; 3851 uint32_t *ippp; 3852 3853 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 3854 sizeof(uint32_t); 3855 ph = mtod(oper, struct sctp_paramhdr *); 3856 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 3857 ph->param_length = htons(SCTP_BUF_LEN(oper)); 3858 ippp = (uint32_t *) (ph + 1); 3859 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 3860 } 3861 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 3862 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 3863 return; 3864 #endif 3865 } 3866 } 3867 asoc->this_sack_highest_gap = cumack; 3868 stcb->asoc.overall_error_count = 0; 3869 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) { 3870 /* process the new consecutive TSN first */ 3871 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3872 while (tp1) { 3873 tp2 = TAILQ_NEXT(tp1, sctp_next); 3874 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq, 3875 MAX_TSN) || 3876 cumack == tp1->rec.data.TSN_seq) { 3877 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 3878 printf("Warning, an unsent is now acked?\n"); 3879 } 3880 /* 3881 * ECN Nonce: Add the nonce to the sender's 3882 * nonce sum 3883 */ 3884 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 3885 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 3886 /* 3887 * If it is less than ACKED, it is 3888 * now no-longer in flight. Higher 3889 * values may occur during marking 3890 */ 3891 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3892 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3893 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 3894 tp1->whoTo->flight_size, 3895 tp1->book_size, 3896 (uintptr_t) tp1->whoTo, 3897 tp1->rec.data.TSN_seq); 3898 } 3899 sctp_flight_size_decrease(tp1); 3900 sctp_total_flight_decrease(stcb, tp1); 3901 } 3902 tp1->whoTo->net_ack += tp1->send_size; 3903 if (tp1->snd_count < 2) { 3904 /* 3905 * True non-retransmited 3906 * chunk 3907 */ 3908 tp1->whoTo->net_ack2 += 3909 tp1->send_size; 3910 3911 /* update RTO too? */ 3912 if (tp1->do_rtt) { 3913 tp1->whoTo->RTO = 3914 sctp_calculate_rto(stcb, 3915 asoc, tp1->whoTo, 3916 &tp1->sent_rcv_time, 3917 sctp_align_safe_nocopy); 3918 tp1->do_rtt = 0; 3919 } 3920 } 3921 /* 3922 * CMT: CUCv2 algorithm. From the 3923 * cumack'd TSNs, for each TSN being 3924 * acked for the first time, set the 3925 * following variables for the 3926 * corresp destination. 3927 * new_pseudo_cumack will trigger a 3928 * cwnd update. 3929 * find_(rtx_)pseudo_cumack will 3930 * trigger search for the next 3931 * expected (rtx-)pseudo-cumack. 3932 */ 3933 tp1->whoTo->new_pseudo_cumack = 1; 3934 tp1->whoTo->find_pseudo_cumack = 1; 3935 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3936 3937 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 3938 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 3939 } 3940 } 3941 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3942 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3943 } 3944 if (tp1->rec.data.chunk_was_revoked) { 3945 /* deflate the cwnd */ 3946 tp1->whoTo->cwnd -= tp1->book_size; 3947 tp1->rec.data.chunk_was_revoked = 0; 3948 } 3949 tp1->sent = SCTP_DATAGRAM_ACKED; 3950 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3951 if (tp1->data) { 3952 sctp_free_bufspace(stcb, asoc, tp1, 1); 3953 sctp_m_freem(tp1->data); 3954 } 3955 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 3956 sctp_log_sack(asoc->last_acked_seq, 3957 cumack, 3958 tp1->rec.data.TSN_seq, 3959 0, 3960 0, 3961 SCTP_LOG_FREE_SENT); 3962 } 3963 tp1->data = NULL; 3964 asoc->sent_queue_cnt--; 3965 sctp_free_a_chunk(stcb, tp1); 3966 tp1 = tp2; 3967 } else { 3968 break; 3969 } 3970 } 3971 3972 } 3973 if (stcb->sctp_socket) { 3974 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 3975 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 3976 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK); 3977 } 3978 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 3979 } else { 3980 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 3981 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK); 3982 } 3983 } 3984 3985 /* JRS - Use the congestion control given in the CC module */ 3986 if (asoc->last_acked_seq != cumack) 3987 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 3988 3989 asoc->last_acked_seq = cumack; 3990 3991 if (TAILQ_EMPTY(&asoc->sent_queue)) { 3992 /* nothing left in-flight */ 3993 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3994 net->flight_size = 0; 3995 net->partial_bytes_acked = 0; 3996 } 3997 asoc->total_flight = 0; 3998 asoc->total_flight_count = 0; 3999 } 4000 /* Fix up the a-p-a-p for future PR-SCTP sends */ 4001 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4002 asoc->advanced_peer_ack_point = cumack; 4003 } 4004 /* ECN Nonce updates */ 4005 if (asoc->ecn_nonce_allowed) { 4006 if (asoc->nonce_sum_check) { 4007 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) { 4008 if (asoc->nonce_wait_for_ecne == 0) { 4009 struct sctp_tmit_chunk *lchk; 4010 4011 lchk = TAILQ_FIRST(&asoc->send_queue); 4012 asoc->nonce_wait_for_ecne = 1; 4013 if (lchk) { 4014 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4015 } else { 4016 asoc->nonce_wait_tsn = asoc->sending_seq; 4017 } 4018 } else { 4019 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4020 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4021 /* 4022 * Misbehaving peer. We need 4023 * to react to this guy 4024 */ 4025 asoc->ecn_allowed = 0; 4026 asoc->ecn_nonce_allowed = 0; 4027 } 4028 } 4029 } 4030 } else { 4031 /* See if Resynchronization Possible */ 4032 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4033 asoc->nonce_sum_check = 1; 4034 /* 4035 * now we must calculate what the base is. 4036 * We do this based on two things, we know 4037 * the total's for all the segments 4038 * gap-acked in the SACK (none), We also 4039 * know the SACK's nonce sum, its in 4040 * nonce_sum_flag. So we can build a truth 4041 * table to back-calculate the new value of 4042 * asoc->nonce_sum_expect_base: 4043 * 4044 * SACK-flag-Value Seg-Sums Base 0 0 0 4045 * 1 0 1 0 1 1 1 4046 * 1 0 4047 */ 4048 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4049 } 4050 } 4051 } 4052 /* RWND update */ 4053 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4054 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 4055 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4056 /* SWS sender side engages */ 4057 asoc->peers_rwnd = 0; 4058 } 4059 if (asoc->peers_rwnd > old_rwnd) { 4060 win_probe_recovery = 1; 4061 } 4062 /* Now assure a timer where data is queued at */ 4063 again: 4064 j = 0; 4065 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4066 if (win_probe_recovery && (net->window_probe)) { 4067 net->window_probe = 0; 4068 win_probe_recovered = 1; 4069 /* 4070 * Find first chunk that was used with window probe 4071 * and clear the sent 4072 */ 4073 /* sa_ignore FREED_MEMORY */ 4074 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4075 if (tp1->window_probe) { 4076 /* move back to data send queue */ 4077 sctp_window_probe_recovery(stcb, asoc, net, tp1); 4078 break; 4079 } 4080 } 4081 } 4082 if (net->flight_size) { 4083 int to_ticks; 4084 4085 if (net->RTO == 0) { 4086 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4087 } else { 4088 to_ticks = MSEC_TO_TICKS(net->RTO); 4089 } 4090 j++; 4091 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4092 sctp_timeout_handler, &net->rxt_timer); 4093 } else { 4094 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4095 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4096 stcb, net, 4097 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4098 } 4099 if (sctp_early_fr) { 4100 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4101 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4102 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4103 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4104 } 4105 } 4106 } 4107 } 4108 if ((j == 0) && 4109 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4110 (asoc->sent_queue_retran_cnt == 0) && 4111 (win_probe_recovered == 0) && 4112 (done_once == 0)) { 4113 /* huh, this should not happen */ 4114 sctp_fs_audit(asoc); 4115 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4116 net->flight_size = 0; 4117 } 4118 asoc->total_flight = 0; 4119 asoc->total_flight_count = 0; 4120 asoc->sent_queue_retran_cnt = 0; 4121 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4122 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4123 sctp_flight_size_increase(tp1); 4124 sctp_total_flight_increase(stcb, tp1); 4125 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4126 asoc->sent_queue_retran_cnt++; 4127 } 4128 } 4129 done_once = 1; 4130 goto again; 4131 } 4132 /**********************************/ 4133 /* Now what about shutdown issues */ 4134 /**********************************/ 4135 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4136 /* nothing left on sendqueue.. consider done */ 4137 /* clean up */ 4138 if ((asoc->stream_queue_cnt == 1) && 4139 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4140 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4141 (asoc->locked_on_sending) 4142 ) { 4143 struct sctp_stream_queue_pending *sp; 4144 4145 /* 4146 * I may be in a state where we got all across.. but 4147 * cannot write more due to a shutdown... we abort 4148 * since the user did not indicate EOR in this case. 4149 * The sp will be cleaned during free of the asoc. 4150 */ 4151 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4152 sctp_streamhead); 4153 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4154 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4155 asoc->locked_on_sending = NULL; 4156 asoc->stream_queue_cnt--; 4157 } 4158 } 4159 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4160 (asoc->stream_queue_cnt == 0)) { 4161 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4162 /* Need to abort here */ 4163 struct mbuf *oper; 4164 4165 abort_out_now: 4166 *abort_now = 1; 4167 /* XXX */ 4168 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4169 0, M_DONTWAIT, 1, MT_DATA); 4170 if (oper) { 4171 struct sctp_paramhdr *ph; 4172 uint32_t *ippp; 4173 4174 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4175 sizeof(uint32_t); 4176 ph = mtod(oper, struct sctp_paramhdr *); 4177 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4178 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4179 ippp = (uint32_t *) (ph + 1); 4180 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24); 4181 } 4182 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4183 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 4184 } else { 4185 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4186 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4187 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4188 } 4189 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4190 sctp_stop_timers_for_shutdown(stcb); 4191 sctp_send_shutdown(stcb, 4192 stcb->asoc.primary_destination); 4193 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4194 stcb->sctp_ep, stcb, asoc->primary_destination); 4195 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4196 stcb->sctp_ep, stcb, asoc->primary_destination); 4197 } 4198 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4199 (asoc->stream_queue_cnt == 0)) { 4200 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4201 goto abort_out_now; 4202 } 4203 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4204 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4205 sctp_send_shutdown_ack(stcb, 4206 stcb->asoc.primary_destination); 4207 4208 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4209 stcb->sctp_ep, stcb, asoc->primary_destination); 4210 } 4211 } 4212 if (sctp_logging_level & SCTP_SACK_RWND_LOGGING_ENABLE) { 4213 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4214 rwnd, 4215 stcb->asoc.peers_rwnd, 4216 stcb->asoc.total_flight, 4217 stcb->asoc.total_output_queue_size); 4218 } 4219 } 4220 4221 void 4222 sctp_handle_sack(struct mbuf *m, int offset, 4223 struct sctp_sack_chunk *ch, struct sctp_tcb *stcb, 4224 struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd) 4225 { 4226 struct sctp_association *asoc; 4227 struct sctp_sack *sack; 4228 struct sctp_tmit_chunk *tp1, *tp2; 4229 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, 4230 this_sack_lowest_newack; 4231 uint32_t sav_cum_ack; 4232 uint16_t num_seg, num_dup; 4233 uint16_t wake_him = 0; 4234 unsigned int sack_length; 4235 uint32_t send_s = 0; 4236 long j; 4237 int accum_moved = 0; 4238 int will_exit_fast_recovery = 0; 4239 uint32_t a_rwnd, old_rwnd; 4240 int win_probe_recovery = 0; 4241 int win_probe_recovered = 0; 4242 struct sctp_nets *net = NULL; 4243 int nonce_sum_flag, ecn_seg_sums = 0; 4244 int done_once; 4245 uint8_t reneged_all = 0; 4246 uint8_t cmt_dac_flag; 4247 4248 /* 4249 * we take any chance we can to service our queues since we cannot 4250 * get awoken when the socket is read from :< 4251 */ 4252 /* 4253 * Now perform the actual SACK handling: 1) Verify that it is not an 4254 * old sack, if so discard. 2) If there is nothing left in the send 4255 * queue (cum-ack is equal to last acked) then you have a duplicate 4256 * too, update any rwnd change and verify no timers are running. 4257 * then return. 3) Process any new consequtive data i.e. cum-ack 4258 * moved process these first and note that it moved. 4) Process any 4259 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4260 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4261 * sync up flightsizes and things, stop all timers and also check 4262 * for shutdown_pending state. If so then go ahead and send off the 4263 * shutdown. If in shutdown recv, send off the shutdown-ack and 4264 * start that timer, Ret. 9) Strike any non-acked things and do FR 4265 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4266 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4267 * if in shutdown_recv state. 4268 */ 4269 SCTP_TCB_LOCK_ASSERT(stcb); 4270 sack = &ch->sack; 4271 /* CMT DAC algo */ 4272 this_sack_lowest_newack = 0; 4273 j = 0; 4274 sack_length = (unsigned int)sack_len; 4275 /* ECN Nonce */ 4276 SCTP_STAT_INCR(sctps_slowpath_sack); 4277 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM; 4278 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack); 4279 #ifdef SCTP_ASOCLOG_OF_TSNS 4280 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4281 stcb->asoc.cumack_log_at++; 4282 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4283 stcb->asoc.cumack_log_at = 0; 4284 } 4285 #endif 4286 num_seg = ntohs(sack->num_gap_ack_blks); 4287 a_rwnd = rwnd; 4288 4289 if (sctp_logging_level & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4290 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4291 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4292 } 4293 /* CMT DAC algo */ 4294 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC; 4295 num_dup = ntohs(sack->num_dup_tsns); 4296 4297 old_rwnd = stcb->asoc.peers_rwnd; 4298 stcb->asoc.overall_error_count = 0; 4299 asoc = &stcb->asoc; 4300 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 4301 sctp_log_sack(asoc->last_acked_seq, 4302 cum_ack, 4303 0, 4304 num_seg, 4305 num_dup, 4306 SCTP_LOG_NEW_SACK); 4307 } 4308 if ((num_dup) && (sctp_logging_level & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) { 4309 int off_to_dup, iii; 4310 uint32_t *dupdata, dblock; 4311 4312 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk); 4313 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) { 4314 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup, 4315 sizeof(uint32_t), (uint8_t *) & dblock); 4316 off_to_dup += sizeof(uint32_t); 4317 if (dupdata) { 4318 for (iii = 0; iii < num_dup; iii++) { 4319 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4320 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup, 4321 sizeof(uint32_t), (uint8_t *) & dblock); 4322 if (dupdata == NULL) 4323 break; 4324 off_to_dup += sizeof(uint32_t); 4325 4326 4327 } 4328 } 4329 } else { 4330 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n", 4331 off_to_dup, num_dup, sack_length, num_seg); 4332 } 4333 } 4334 if (sctp_strict_sacks) { 4335 /* reality check */ 4336 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4337 tp1 = TAILQ_LAST(&asoc->sent_queue, 4338 sctpchunk_listhead); 4339 send_s = tp1->rec.data.TSN_seq + 1; 4340 } else { 4341 send_s = asoc->sending_seq; 4342 } 4343 if (cum_ack == send_s || 4344 compare_with_wrap(cum_ack, send_s, MAX_TSN)) { 4345 #ifndef INVARIANTS 4346 struct mbuf *oper; 4347 4348 #endif 4349 #ifdef INVARIANTS 4350 hopeless_peer: 4351 panic("Impossible sack 1"); 4352 #else 4353 4354 4355 /* 4356 * no way, we have not even sent this TSN out yet. 4357 * Peer is hopelessly messed up with us. 4358 */ 4359 hopeless_peer: 4360 *abort_now = 1; 4361 /* XXX */ 4362 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4363 0, M_DONTWAIT, 1, MT_DATA); 4364 if (oper) { 4365 struct sctp_paramhdr *ph; 4366 uint32_t *ippp; 4367 4368 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4369 sizeof(uint32_t); 4370 ph = mtod(oper, struct sctp_paramhdr *); 4371 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4372 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4373 ippp = (uint32_t *) (ph + 1); 4374 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4375 } 4376 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4377 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 4378 return; 4379 #endif 4380 } 4381 } 4382 /**********************/ 4383 /* 1) check the range */ 4384 /**********************/ 4385 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) { 4386 /* acking something behind */ 4387 return; 4388 } 4389 sav_cum_ack = asoc->last_acked_seq; 4390 4391 /* update the Rwnd of the peer */ 4392 if (TAILQ_EMPTY(&asoc->sent_queue) && 4393 TAILQ_EMPTY(&asoc->send_queue) && 4394 (asoc->stream_queue_cnt == 0) 4395 ) { 4396 /* nothing left on send/sent and strmq */ 4397 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 4398 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4399 asoc->peers_rwnd, 0, 0, a_rwnd); 4400 } 4401 asoc->peers_rwnd = a_rwnd; 4402 if (asoc->sent_queue_retran_cnt) { 4403 asoc->sent_queue_retran_cnt = 0; 4404 } 4405 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4406 /* SWS sender side engages */ 4407 asoc->peers_rwnd = 0; 4408 } 4409 /* stop any timers */ 4410 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4411 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4412 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4413 if (sctp_early_fr) { 4414 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4415 SCTP_STAT_INCR(sctps_earlyfrstpidsck1); 4416 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4417 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4418 } 4419 } 4420 net->partial_bytes_acked = 0; 4421 net->flight_size = 0; 4422 } 4423 asoc->total_flight = 0; 4424 asoc->total_flight_count = 0; 4425 return; 4426 } 4427 /* 4428 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4429 * things. The total byte count acked is tracked in netAckSz AND 4430 * netAck2 is used to track the total bytes acked that are un- 4431 * amibguious and were never retransmitted. We track these on a per 4432 * destination address basis. 4433 */ 4434 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4435 net->prev_cwnd = net->cwnd; 4436 net->net_ack = 0; 4437 net->net_ack2 = 0; 4438 4439 /* 4440 * CMT: Reset CUC and Fast recovery algo variables before 4441 * SACK processing 4442 */ 4443 net->new_pseudo_cumack = 0; 4444 net->will_exit_fast_recovery = 0; 4445 } 4446 /* process the new consecutive TSN first */ 4447 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4448 while (tp1) { 4449 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq, 4450 MAX_TSN) || 4451 last_tsn == tp1->rec.data.TSN_seq) { 4452 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4453 /* 4454 * ECN Nonce: Add the nonce to the sender's 4455 * nonce sum 4456 */ 4457 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4458 accum_moved = 1; 4459 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4460 /* 4461 * If it is less than ACKED, it is 4462 * now no-longer in flight. Higher 4463 * values may occur during marking 4464 */ 4465 if ((tp1->whoTo->dest_state & 4466 SCTP_ADDR_UNCONFIRMED) && 4467 (tp1->snd_count < 2)) { 4468 /* 4469 * If there was no retran 4470 * and the address is 4471 * un-confirmed and we sent 4472 * there and are now 4473 * sacked.. its confirmed, 4474 * mark it so. 4475 */ 4476 tp1->whoTo->dest_state &= 4477 ~SCTP_ADDR_UNCONFIRMED; 4478 } 4479 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4480 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 4481 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4482 tp1->whoTo->flight_size, 4483 tp1->book_size, 4484 (uintptr_t) tp1->whoTo, 4485 tp1->rec.data.TSN_seq); 4486 } 4487 sctp_flight_size_decrease(tp1); 4488 sctp_total_flight_decrease(stcb, tp1); 4489 } 4490 tp1->whoTo->net_ack += tp1->send_size; 4491 4492 /* CMT SFR and DAC algos */ 4493 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4494 tp1->whoTo->saw_newack = 1; 4495 4496 if (tp1->snd_count < 2) { 4497 /* 4498 * True non-retransmited 4499 * chunk 4500 */ 4501 tp1->whoTo->net_ack2 += 4502 tp1->send_size; 4503 4504 /* update RTO too? */ 4505 if (tp1->do_rtt) { 4506 tp1->whoTo->RTO = 4507 sctp_calculate_rto(stcb, 4508 asoc, tp1->whoTo, 4509 &tp1->sent_rcv_time, 4510 sctp_align_safe_nocopy); 4511 tp1->do_rtt = 0; 4512 } 4513 } 4514 /* 4515 * CMT: CUCv2 algorithm. From the 4516 * cumack'd TSNs, for each TSN being 4517 * acked for the first time, set the 4518 * following variables for the 4519 * corresp destination. 4520 * new_pseudo_cumack will trigger a 4521 * cwnd update. 4522 * find_(rtx_)pseudo_cumack will 4523 * trigger search for the next 4524 * expected (rtx-)pseudo-cumack. 4525 */ 4526 tp1->whoTo->new_pseudo_cumack = 1; 4527 tp1->whoTo->find_pseudo_cumack = 1; 4528 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4529 4530 4531 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 4532 sctp_log_sack(asoc->last_acked_seq, 4533 cum_ack, 4534 tp1->rec.data.TSN_seq, 4535 0, 4536 0, 4537 SCTP_LOG_TSN_ACKED); 4538 } 4539 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 4540 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4541 } 4542 } 4543 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4544 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4545 #ifdef SCTP_AUDITING_ENABLED 4546 sctp_audit_log(0xB3, 4547 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4548 #endif 4549 } 4550 if (tp1->rec.data.chunk_was_revoked) { 4551 /* deflate the cwnd */ 4552 tp1->whoTo->cwnd -= tp1->book_size; 4553 tp1->rec.data.chunk_was_revoked = 0; 4554 } 4555 tp1->sent = SCTP_DATAGRAM_ACKED; 4556 } 4557 } else { 4558 break; 4559 } 4560 tp1 = TAILQ_NEXT(tp1, sctp_next); 4561 } 4562 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4563 /* always set this up to cum-ack */ 4564 asoc->this_sack_highest_gap = last_tsn; 4565 4566 /* Move offset up to point to gaps/dups */ 4567 offset += sizeof(struct sctp_sack_chunk); 4568 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) { 4569 4570 /* skip corrupt segments */ 4571 goto skip_segments; 4572 } 4573 if (num_seg > 0) { 4574 4575 /* 4576 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4577 * to be greater than the cumack. Also reset saw_newack to 0 4578 * for all dests. 4579 */ 4580 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4581 net->saw_newack = 0; 4582 net->this_sack_highest_newack = last_tsn; 4583 } 4584 4585 /* 4586 * thisSackHighestGap will increase while handling NEW 4587 * segments this_sack_highest_newack will increase while 4588 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4589 * used for CMT DAC algo. saw_newack will also change. 4590 */ 4591 sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn, 4592 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4593 num_seg, &ecn_seg_sums); 4594 4595 if (sctp_strict_sacks) { 4596 /* 4597 * validate the biggest_tsn_acked in the gap acks if 4598 * strict adherence is wanted. 4599 */ 4600 if ((biggest_tsn_acked == send_s) || 4601 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) { 4602 /* 4603 * peer is either confused or we are under 4604 * attack. We must abort. 4605 */ 4606 goto hopeless_peer; 4607 } 4608 } 4609 } 4610 skip_segments: 4611 /*******************************************/ 4612 /* cancel ALL T3-send timer if accum moved */ 4613 /*******************************************/ 4614 if (sctp_cmt_on_off) { 4615 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4616 if (net->new_pseudo_cumack) 4617 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4618 stcb, net, 4619 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4620 4621 } 4622 } else { 4623 if (accum_moved) { 4624 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4625 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4626 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4627 } 4628 } 4629 } 4630 /********************************************/ 4631 /* drop the acked chunks from the sendqueue */ 4632 /********************************************/ 4633 asoc->last_acked_seq = cum_ack; 4634 4635 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4636 if (tp1 == NULL) 4637 goto done_with_it; 4638 do { 4639 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 4640 MAX_TSN)) { 4641 break; 4642 } 4643 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4644 /* no more sent on list */ 4645 printf("Warning, tp1->sent == %d and its now acked?\n", 4646 tp1->sent); 4647 } 4648 tp2 = TAILQ_NEXT(tp1, sctp_next); 4649 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4650 if (tp1->pr_sctp_on) { 4651 if (asoc->pr_sctp_cnt != 0) 4652 asoc->pr_sctp_cnt--; 4653 } 4654 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) && 4655 (asoc->total_flight > 0)) { 4656 #ifdef INVARIANTS 4657 panic("Warning flight size is postive and should be 0"); 4658 #else 4659 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4660 asoc->total_flight); 4661 #endif 4662 asoc->total_flight = 0; 4663 } 4664 if (tp1->data) { 4665 sctp_free_bufspace(stcb, asoc, tp1, 1); 4666 sctp_m_freem(tp1->data); 4667 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4668 asoc->sent_queue_cnt_removeable--; 4669 } 4670 } 4671 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 4672 sctp_log_sack(asoc->last_acked_seq, 4673 cum_ack, 4674 tp1->rec.data.TSN_seq, 4675 0, 4676 0, 4677 SCTP_LOG_FREE_SENT); 4678 } 4679 tp1->data = NULL; 4680 asoc->sent_queue_cnt--; 4681 sctp_free_a_chunk(stcb, tp1); 4682 wake_him++; 4683 tp1 = tp2; 4684 } while (tp1 != NULL); 4685 4686 done_with_it: 4687 if ((wake_him) && (stcb->sctp_socket)) { 4688 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4689 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 4690 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK); 4691 } 4692 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4693 } else { 4694 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 4695 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK); 4696 } 4697 } 4698 4699 if (asoc->fast_retran_loss_recovery && accum_moved) { 4700 if (compare_with_wrap(asoc->last_acked_seq, 4701 asoc->fast_recovery_tsn, MAX_TSN) || 4702 asoc->last_acked_seq == asoc->fast_recovery_tsn) { 4703 /* Setup so we will exit RFC2582 fast recovery */ 4704 will_exit_fast_recovery = 1; 4705 } 4706 } 4707 /* 4708 * Check for revoked fragments: 4709 * 4710 * if Previous sack - Had no frags then we can't have any revoked if 4711 * Previous sack - Had frag's then - If we now have frags aka 4712 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4713 * some of them. else - The peer revoked all ACKED fragments, since 4714 * we had some before and now we have NONE. 4715 */ 4716 4717 if (num_seg) 4718 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4719 else if (asoc->saw_sack_with_frags) { 4720 int cnt_revoked = 0; 4721 4722 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4723 if (tp1 != NULL) { 4724 /* Peer revoked all dg's marked or acked */ 4725 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4726 if ((tp1->sent > SCTP_DATAGRAM_RESEND) && 4727 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) { 4728 tp1->sent = SCTP_DATAGRAM_SENT; 4729 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 4730 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4731 tp1->whoTo->flight_size, 4732 tp1->book_size, 4733 (uintptr_t) tp1->whoTo, 4734 tp1->rec.data.TSN_seq); 4735 } 4736 sctp_flight_size_increase(tp1); 4737 sctp_total_flight_increase(stcb, tp1); 4738 tp1->rec.data.chunk_was_revoked = 1; 4739 /* 4740 * To ensure that this increase in 4741 * flightsize, which is artificial, 4742 * does not throttle the sender, we 4743 * also increase the cwnd 4744 * artificially. 4745 */ 4746 tp1->whoTo->cwnd += tp1->book_size; 4747 cnt_revoked++; 4748 } 4749 } 4750 if (cnt_revoked) { 4751 reneged_all = 1; 4752 } 4753 } 4754 asoc->saw_sack_with_frags = 0; 4755 } 4756 if (num_seg) 4757 asoc->saw_sack_with_frags = 1; 4758 else 4759 asoc->saw_sack_with_frags = 0; 4760 4761 /* JRS - Use the congestion control given in the CC module */ 4762 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4763 4764 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4765 /* nothing left in-flight */ 4766 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4767 /* stop all timers */ 4768 if (sctp_early_fr) { 4769 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4770 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4771 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4772 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4773 } 4774 } 4775 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4776 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4777 net->flight_size = 0; 4778 net->partial_bytes_acked = 0; 4779 } 4780 asoc->total_flight = 0; 4781 asoc->total_flight_count = 0; 4782 } 4783 /**********************************/ 4784 /* Now what about shutdown issues */ 4785 /**********************************/ 4786 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4787 /* nothing left on sendqueue.. consider done */ 4788 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 4789 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4790 asoc->peers_rwnd, 0, 0, a_rwnd); 4791 } 4792 asoc->peers_rwnd = a_rwnd; 4793 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4794 /* SWS sender side engages */ 4795 asoc->peers_rwnd = 0; 4796 } 4797 /* clean up */ 4798 if ((asoc->stream_queue_cnt == 1) && 4799 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4800 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4801 (asoc->locked_on_sending) 4802 ) { 4803 struct sctp_stream_queue_pending *sp; 4804 4805 /* 4806 * I may be in a state where we got all across.. but 4807 * cannot write more due to a shutdown... we abort 4808 * since the user did not indicate EOR in this case. 4809 */ 4810 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4811 sctp_streamhead); 4812 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4813 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4814 asoc->locked_on_sending = NULL; 4815 asoc->stream_queue_cnt--; 4816 } 4817 } 4818 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4819 (asoc->stream_queue_cnt == 0)) { 4820 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4821 /* Need to abort here */ 4822 struct mbuf *oper; 4823 4824 abort_out_now: 4825 *abort_now = 1; 4826 /* XXX */ 4827 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4828 0, M_DONTWAIT, 1, MT_DATA); 4829 if (oper) { 4830 struct sctp_paramhdr *ph; 4831 uint32_t *ippp; 4832 4833 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4834 sizeof(uint32_t); 4835 ph = mtod(oper, struct sctp_paramhdr *); 4836 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4837 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4838 ippp = (uint32_t *) (ph + 1); 4839 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 4840 } 4841 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 4842 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 4843 return; 4844 } else { 4845 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4846 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4847 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4848 } 4849 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4850 sctp_stop_timers_for_shutdown(stcb); 4851 sctp_send_shutdown(stcb, 4852 stcb->asoc.primary_destination); 4853 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4854 stcb->sctp_ep, stcb, asoc->primary_destination); 4855 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4856 stcb->sctp_ep, stcb, asoc->primary_destination); 4857 } 4858 return; 4859 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4860 (asoc->stream_queue_cnt == 0)) { 4861 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4862 goto abort_out_now; 4863 } 4864 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4865 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4866 sctp_send_shutdown_ack(stcb, 4867 stcb->asoc.primary_destination); 4868 4869 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4870 stcb->sctp_ep, stcb, asoc->primary_destination); 4871 return; 4872 } 4873 } 4874 /* 4875 * Now here we are going to recycle net_ack for a different use... 4876 * HEADS UP. 4877 */ 4878 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4879 net->net_ack = 0; 4880 } 4881 4882 /* 4883 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 4884 * to be done. Setting this_sack_lowest_newack to the cum_ack will 4885 * automatically ensure that. 4886 */ 4887 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) { 4888 this_sack_lowest_newack = cum_ack; 4889 } 4890 if (num_seg > 0) { 4891 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 4892 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 4893 } 4894 /*********************************************/ 4895 /* Here we perform PR-SCTP procedures */ 4896 /* (section 4.2) */ 4897 /*********************************************/ 4898 /* C1. update advancedPeerAckPoint */ 4899 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4900 asoc->advanced_peer_ack_point = cum_ack; 4901 } 4902 /* C2. try to further move advancedPeerAckPoint ahead */ 4903 4904 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 4905 struct sctp_tmit_chunk *lchk; 4906 4907 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4908 /* C3. See if we need to send a Fwd-TSN */ 4909 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack, 4910 MAX_TSN)) { 4911 /* 4912 * ISSUE with ECN, see FWD-TSN processing for notes 4913 * on issues that will occur when the ECN NONCE 4914 * stuff is put into SCTP for cross checking. 4915 */ 4916 send_forward_tsn(stcb, asoc); 4917 4918 /* 4919 * ECN Nonce: Disable Nonce Sum check when FWD TSN 4920 * is sent and store resync tsn 4921 */ 4922 asoc->nonce_sum_check = 0; 4923 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 4924 if (lchk) { 4925 /* Assure a timer is up */ 4926 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4927 stcb->sctp_ep, stcb, lchk->whoTo); 4928 } 4929 } 4930 } 4931 /* JRS - Use the congestion control given in the CC module */ 4932 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 4933 4934 /****************************************************************** 4935 * Here we do the stuff with ECN Nonce checking. 4936 * We basically check to see if the nonce sum flag was incorrect 4937 * or if resynchronization needs to be done. Also if we catch a 4938 * misbehaving receiver we give him the kick. 4939 ******************************************************************/ 4940 4941 if (asoc->ecn_nonce_allowed) { 4942 if (asoc->nonce_sum_check) { 4943 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) { 4944 if (asoc->nonce_wait_for_ecne == 0) { 4945 struct sctp_tmit_chunk *lchk; 4946 4947 lchk = TAILQ_FIRST(&asoc->send_queue); 4948 asoc->nonce_wait_for_ecne = 1; 4949 if (lchk) { 4950 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4951 } else { 4952 asoc->nonce_wait_tsn = asoc->sending_seq; 4953 } 4954 } else { 4955 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4956 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4957 /* 4958 * Misbehaving peer. We need 4959 * to react to this guy 4960 */ 4961 asoc->ecn_allowed = 0; 4962 asoc->ecn_nonce_allowed = 0; 4963 } 4964 } 4965 } 4966 } else { 4967 /* See if Resynchronization Possible */ 4968 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4969 asoc->nonce_sum_check = 1; 4970 /* 4971 * now we must calculate what the base is. 4972 * We do this based on two things, we know 4973 * the total's for all the segments 4974 * gap-acked in the SACK, its stored in 4975 * ecn_seg_sums. We also know the SACK's 4976 * nonce sum, its in nonce_sum_flag. So we 4977 * can build a truth table to back-calculate 4978 * the new value of 4979 * asoc->nonce_sum_expect_base: 4980 * 4981 * SACK-flag-Value Seg-Sums Base 0 0 0 4982 * 1 0 1 0 1 1 1 4983 * 1 0 4984 */ 4985 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4986 } 4987 } 4988 } 4989 /* Now are we exiting loss recovery ? */ 4990 if (will_exit_fast_recovery) { 4991 /* Ok, we must exit fast recovery */ 4992 asoc->fast_retran_loss_recovery = 0; 4993 } 4994 if ((asoc->sat_t3_loss_recovery) && 4995 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn, 4996 MAX_TSN) || 4997 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) { 4998 /* end satellite t3 loss recovery */ 4999 asoc->sat_t3_loss_recovery = 0; 5000 } 5001 /* 5002 * CMT Fast recovery 5003 */ 5004 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5005 if (net->will_exit_fast_recovery) { 5006 /* Ok, we must exit fast recovery */ 5007 net->fast_retran_loss_recovery = 0; 5008 } 5009 } 5010 5011 /* Adjust and set the new rwnd value */ 5012 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 5013 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5014 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd); 5015 } 5016 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5017 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 5018 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5019 /* SWS sender side engages */ 5020 asoc->peers_rwnd = 0; 5021 } 5022 if (asoc->peers_rwnd > old_rwnd) { 5023 win_probe_recovery = 1; 5024 } 5025 /* 5026 * Now we must setup so we have a timer up for anyone with 5027 * outstanding data. 5028 */ 5029 done_once = 0; 5030 again: 5031 j = 0; 5032 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5033 if (win_probe_recovery && (net->window_probe)) { 5034 net->window_probe = 0; 5035 win_probe_recovered = 1; 5036 /*- 5037 * Find first chunk that was used with 5038 * window probe and clear the event. Put 5039 * it back into the send queue as if has 5040 * not been sent. 5041 */ 5042 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5043 if (tp1->window_probe) { 5044 sctp_window_probe_recovery(stcb, asoc, net, tp1); 5045 break; 5046 } 5047 } 5048 } 5049 if (net->flight_size) { 5050 j++; 5051 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5052 stcb->sctp_ep, stcb, net); 5053 } else { 5054 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5055 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5056 stcb, net, 5057 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 5058 } 5059 if (sctp_early_fr) { 5060 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 5061 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 5062 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 5063 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 5064 } 5065 } 5066 } 5067 } 5068 if ((j == 0) && 5069 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5070 (asoc->sent_queue_retran_cnt == 0) && 5071 (win_probe_recovered == 0) && 5072 (done_once == 0)) { 5073 /* huh, this should not happen */ 5074 sctp_fs_audit(asoc); 5075 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5076 net->flight_size = 0; 5077 } 5078 asoc->total_flight = 0; 5079 asoc->total_flight_count = 0; 5080 asoc->sent_queue_retran_cnt = 0; 5081 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5082 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5083 sctp_flight_size_increase(tp1); 5084 sctp_total_flight_increase(stcb, tp1); 5085 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5086 asoc->sent_queue_retran_cnt++; 5087 } 5088 } 5089 done_once = 1; 5090 goto again; 5091 } 5092 if (sctp_logging_level & SCTP_SACK_RWND_LOGGING_ENABLE) { 5093 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5094 a_rwnd, 5095 stcb->asoc.peers_rwnd, 5096 stcb->asoc.total_flight, 5097 stcb->asoc.total_output_queue_size); 5098 } 5099 } 5100 5101 void 5102 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, 5103 struct sctp_nets *netp, int *abort_flag) 5104 { 5105 /* Copy cum-ack */ 5106 uint32_t cum_ack, a_rwnd; 5107 5108 cum_ack = ntohl(cp->cumulative_tsn_ack); 5109 /* Arrange so a_rwnd does NOT change */ 5110 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5111 5112 /* Now call the express sack handling */ 5113 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag); 5114 } 5115 5116 static void 5117 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5118 struct sctp_stream_in *strmin) 5119 { 5120 struct sctp_queued_to_read *ctl, *nctl; 5121 struct sctp_association *asoc; 5122 int tt; 5123 5124 asoc = &stcb->asoc; 5125 tt = strmin->last_sequence_delivered; 5126 /* 5127 * First deliver anything prior to and including the stream no that 5128 * came in 5129 */ 5130 ctl = TAILQ_FIRST(&strmin->inqueue); 5131 while (ctl) { 5132 nctl = TAILQ_NEXT(ctl, next); 5133 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) || 5134 (tt == ctl->sinfo_ssn)) { 5135 /* this is deliverable now */ 5136 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5137 /* subtract pending on streams */ 5138 asoc->size_on_all_streams -= ctl->length; 5139 sctp_ucount_decr(asoc->cnt_on_all_streams); 5140 /* deliver it to at least the delivery-q */ 5141 if (stcb->sctp_socket) { 5142 sctp_add_to_readq(stcb->sctp_ep, stcb, 5143 ctl, 5144 &stcb->sctp_socket->so_rcv, 1); 5145 } 5146 } else { 5147 /* no more delivery now. */ 5148 break; 5149 } 5150 ctl = nctl; 5151 } 5152 /* 5153 * now we must deliver things in queue the normal way if any are 5154 * now ready. 5155 */ 5156 tt = strmin->last_sequence_delivered + 1; 5157 ctl = TAILQ_FIRST(&strmin->inqueue); 5158 while (ctl) { 5159 nctl = TAILQ_NEXT(ctl, next); 5160 if (tt == ctl->sinfo_ssn) { 5161 /* this is deliverable now */ 5162 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5163 /* subtract pending on streams */ 5164 asoc->size_on_all_streams -= ctl->length; 5165 sctp_ucount_decr(asoc->cnt_on_all_streams); 5166 /* deliver it to at least the delivery-q */ 5167 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5168 if (stcb->sctp_socket) { 5169 sctp_add_to_readq(stcb->sctp_ep, stcb, 5170 ctl, 5171 &stcb->sctp_socket->so_rcv, 1); 5172 } 5173 tt = strmin->last_sequence_delivered + 1; 5174 } else { 5175 break; 5176 } 5177 ctl = nctl; 5178 } 5179 } 5180 5181 void 5182 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5183 struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset) 5184 { 5185 /* 5186 * ISSUES that MUST be fixed for ECN! When we are the sender of the 5187 * forward TSN, when the SACK comes back that acknowledges the 5188 * FWD-TSN we must reset the NONCE sum to match correctly. This will 5189 * get quite tricky since we may have sent more data interveneing 5190 * and must carefully account for what the SACK says on the nonce 5191 * and any gaps that are reported. This work will NOT be done here, 5192 * but I note it here since it is really related to PR-SCTP and 5193 * FWD-TSN's 5194 */ 5195 5196 /* The pr-sctp fwd tsn */ 5197 /* 5198 * here we will perform all the data receiver side steps for 5199 * processing FwdTSN, as required in by pr-sctp draft: 5200 * 5201 * Assume we get FwdTSN(x): 5202 * 5203 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5204 * others we have 3) examine and update re-ordering queue on 5205 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5206 * report where we are. 5207 */ 5208 struct sctp_association *asoc; 5209 uint32_t new_cum_tsn, gap, back_out_htsn; 5210 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size; 5211 struct sctp_stream_in *strm; 5212 struct sctp_tmit_chunk *chk, *at; 5213 5214 cumack_set_flag = 0; 5215 asoc = &stcb->asoc; 5216 cnt_gone = 0; 5217 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5218 SCTPDBG(SCTP_DEBUG_INDATA1, 5219 "Bad size too small/big fwd-tsn\n"); 5220 return; 5221 } 5222 m_size = (stcb->asoc.mapping_array_size << 3); 5223 /*************************************************************/ 5224 /* 1. Here we update local cumTSN and shift the bitmap array */ 5225 /*************************************************************/ 5226 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5227 5228 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) || 5229 asoc->cumulative_tsn == new_cum_tsn) { 5230 /* Already got there ... */ 5231 return; 5232 } 5233 back_out_htsn = asoc->highest_tsn_inside_map; 5234 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, 5235 MAX_TSN)) { 5236 asoc->highest_tsn_inside_map = new_cum_tsn; 5237 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 5238 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5239 } 5240 } 5241 /* 5242 * now we know the new TSN is more advanced, let's find the actual 5243 * gap 5244 */ 5245 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn, 5246 MAX_TSN)) || 5247 (new_cum_tsn == asoc->mapping_array_base_tsn)) { 5248 gap = new_cum_tsn - asoc->mapping_array_base_tsn; 5249 } else { 5250 /* try to prevent underflow here */ 5251 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5252 } 5253 5254 if (gap > m_size) { 5255 asoc->highest_tsn_inside_map = back_out_htsn; 5256 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5257 struct mbuf *oper; 5258 5259 /* 5260 * out of range (of single byte chunks in the rwnd I 5261 * give out). This must be an attacker. 5262 */ 5263 *abort_flag = 1; 5264 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 5265 0, M_DONTWAIT, 1, MT_DATA); 5266 if (oper) { 5267 struct sctp_paramhdr *ph; 5268 uint32_t *ippp; 5269 5270 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 5271 (sizeof(uint32_t) * 3); 5272 ph = mtod(oper, struct sctp_paramhdr *); 5273 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 5274 ph->param_length = htons(SCTP_BUF_LEN(oper)); 5275 ippp = (uint32_t *) (ph + 1); 5276 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); 5277 ippp++; 5278 *ippp = asoc->highest_tsn_inside_map; 5279 ippp++; 5280 *ippp = new_cum_tsn; 5281 } 5282 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5283 sctp_abort_an_association(stcb->sctp_ep, stcb, 5284 SCTP_PEER_FAULTY, oper); 5285 return; 5286 } 5287 if (asoc->highest_tsn_inside_map > 5288 asoc->mapping_array_base_tsn) { 5289 gap = asoc->highest_tsn_inside_map - 5290 asoc->mapping_array_base_tsn; 5291 } else { 5292 gap = asoc->highest_tsn_inside_map + 5293 (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5294 } 5295 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5296 cumack_set_flag = 1; 5297 } 5298 SCTP_TCB_LOCK_ASSERT(stcb); 5299 for (i = 0; i <= gap; i++) { 5300 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i); 5301 } 5302 /* 5303 * Now after marking all, slide thing forward but no sack please. 5304 */ 5305 sctp_sack_check(stcb, 0, 0, abort_flag); 5306 if (*abort_flag) 5307 return; 5308 5309 if (cumack_set_flag) { 5310 /* 5311 * fwd-tsn went outside my gap array - not a common 5312 * occurance. Do the same thing we do when a cookie-echo 5313 * arrives. 5314 */ 5315 asoc->highest_tsn_inside_map = new_cum_tsn - 1; 5316 asoc->mapping_array_base_tsn = new_cum_tsn; 5317 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 5318 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 5319 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5320 } 5321 asoc->last_echo_tsn = asoc->highest_tsn_inside_map; 5322 } 5323 /*************************************************************/ 5324 /* 2. Clear up re-assembly queue */ 5325 /*************************************************************/ 5326 5327 /* 5328 * First service it if pd-api is up, just in case we can progress it 5329 * forward 5330 */ 5331 if (asoc->fragmented_delivery_inprogress) { 5332 sctp_service_reassembly(stcb, asoc); 5333 } 5334 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5335 /* For each one on here see if we need to toss it */ 5336 /* 5337 * For now large messages held on the reasmqueue that are 5338 * complete will be tossed too. We could in theory do more 5339 * work to spin through and stop after dumping one msg aka 5340 * seeing the start of a new msg at the head, and call the 5341 * delivery function... to see if it can be delivered... But 5342 * for now we just dump everything on the queue. 5343 */ 5344 chk = TAILQ_FIRST(&asoc->reasmqueue); 5345 while (chk) { 5346 at = TAILQ_NEXT(chk, sctp_next); 5347 if (compare_with_wrap(asoc->cumulative_tsn, 5348 chk->rec.data.TSN_seq, MAX_TSN) || 5349 asoc->cumulative_tsn == chk->rec.data.TSN_seq) { 5350 /* It needs to be tossed */ 5351 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5352 if (compare_with_wrap(chk->rec.data.TSN_seq, 5353 asoc->tsn_last_delivered, MAX_TSN)) { 5354 asoc->tsn_last_delivered = 5355 chk->rec.data.TSN_seq; 5356 asoc->str_of_pdapi = 5357 chk->rec.data.stream_number; 5358 asoc->ssn_of_pdapi = 5359 chk->rec.data.stream_seq; 5360 asoc->fragment_flags = 5361 chk->rec.data.rcv_flags; 5362 } 5363 asoc->size_on_reasm_queue -= chk->send_size; 5364 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5365 cnt_gone++; 5366 5367 /* Clear up any stream problem */ 5368 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 5369 SCTP_DATA_UNORDERED && 5370 (compare_with_wrap(chk->rec.data.stream_seq, 5371 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 5372 MAX_SEQ))) { 5373 /* 5374 * We must dump forward this streams 5375 * sequence number if the chunk is 5376 * not unordered that is being 5377 * skipped. There is a chance that 5378 * if the peer does not include the 5379 * last fragment in its FWD-TSN we 5380 * WILL have a problem here since 5381 * you would have a partial chunk in 5382 * queue that may not be 5383 * deliverable. Also if a Partial 5384 * delivery API as started the user 5385 * may get a partial chunk. The next 5386 * read returning a new chunk... 5387 * really ugly but I see no way 5388 * around it! Maybe a notify?? 5389 */ 5390 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 5391 chk->rec.data.stream_seq; 5392 } 5393 if (chk->data) { 5394 sctp_m_freem(chk->data); 5395 chk->data = NULL; 5396 } 5397 sctp_free_a_chunk(stcb, chk); 5398 } else { 5399 /* 5400 * Ok we have gone beyond the end of the 5401 * fwd-tsn's mark. Some checks... 5402 */ 5403 if ((asoc->fragmented_delivery_inprogress) && 5404 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5405 uint32_t str_seq; 5406 5407 /* 5408 * Special case PD-API is up and 5409 * what we fwd-tsn' over includes 5410 * one that had the LAST_FRAG. We no 5411 * longer need to do the PD-API. 5412 */ 5413 asoc->fragmented_delivery_inprogress = 0; 5414 5415 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi; 5416 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5417 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq); 5418 5419 } 5420 break; 5421 } 5422 chk = at; 5423 } 5424 } 5425 if (asoc->fragmented_delivery_inprogress) { 5426 /* 5427 * Ok we removed cnt_gone chunks in the PD-API queue that 5428 * were being delivered. So now we must turn off the flag. 5429 */ 5430 uint32_t str_seq; 5431 5432 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi; 5433 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5434 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq); 5435 asoc->fragmented_delivery_inprogress = 0; 5436 } 5437 /*************************************************************/ 5438 /* 3. Update the PR-stream re-ordering queues */ 5439 /*************************************************************/ 5440 fwd_sz -= sizeof(*fwd); 5441 if (m && fwd_sz) { 5442 /* New method. */ 5443 unsigned int num_str; 5444 struct sctp_strseq *stseq, strseqbuf; 5445 5446 offset += sizeof(*fwd); 5447 5448 num_str = fwd_sz / sizeof(struct sctp_strseq); 5449 for (i = 0; i < num_str; i++) { 5450 uint16_t st; 5451 unsigned char *xx; 5452 5453 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5454 sizeof(struct sctp_strseq), 5455 (uint8_t *) & strseqbuf); 5456 offset += sizeof(struct sctp_strseq); 5457 if (stseq == NULL) 5458 break; 5459 /* Convert */ 5460 xx = (unsigned char *)&stseq[i]; 5461 st = ntohs(stseq[i].stream); 5462 stseq[i].stream = st; 5463 st = ntohs(stseq[i].sequence); 5464 stseq[i].sequence = st; 5465 /* now process */ 5466 if (stseq[i].stream >= asoc->streamincnt) { 5467 /* 5468 * It is arguable if we should continue. 5469 * Since the peer sent bogus stream info we 5470 * may be in deep trouble.. a return may be 5471 * a better choice? 5472 */ 5473 continue; 5474 } 5475 strm = &asoc->strmin[stseq[i].stream]; 5476 if (compare_with_wrap(stseq[i].sequence, 5477 strm->last_sequence_delivered, MAX_SEQ)) { 5478 /* Update the sequence number */ 5479 strm->last_sequence_delivered = 5480 stseq[i].sequence; 5481 } 5482 /* now kick the stream the new way */ 5483 sctp_kick_prsctp_reorder_queue(stcb, strm); 5484 } 5485 } 5486 if (TAILQ_FIRST(&asoc->reasmqueue)) { 5487 /* now lets kick out and check for more fragmented delivery */ 5488 sctp_deliver_reasm_check(stcb, &stcb->asoc); 5489 } 5490 } 5491