1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #ifdef INET6 42 #include <netinet6/sctp6_var.h> 43 #endif 44 #include <netinet/sctp_header.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_asconf.h> 51 #include <netinet/sctp_bsd_addr.h> 52 #include <netinet/udp.h> 53 #include <netinet/udp_var.h> 54 #include <sys/proc.h> 55 #ifdef INET6 56 #include <netinet/icmp6.h> 57 #endif 58 59 60 #ifndef KTR_SCTP 61 #define KTR_SCTP KTR_SUBSYS 62 #endif 63 64 extern const struct sctp_cc_functions sctp_cc_functions[]; 65 extern const struct sctp_ss_functions sctp_ss_functions[]; 66 67 void 68 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 69 { 70 struct sctp_cwnd_log sctp_clog; 71 72 sctp_clog.x.sb.stcb = stcb; 73 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 74 if (stcb) 75 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 76 else 77 sctp_clog.x.sb.stcb_sbcc = 0; 78 sctp_clog.x.sb.incr = incr; 79 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 80 SCTP_LOG_EVENT_SB, 81 from, 82 sctp_clog.x.misc.log1, 83 sctp_clog.x.misc.log2, 84 sctp_clog.x.misc.log3, 85 sctp_clog.x.misc.log4); 86 } 87 88 void 89 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 90 { 91 struct sctp_cwnd_log sctp_clog; 92 93 sctp_clog.x.close.inp = (void *)inp; 94 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 95 if (stcb) { 96 sctp_clog.x.close.stcb = (void *)stcb; 97 sctp_clog.x.close.state = (uint16_t) stcb->asoc.state; 98 } else { 99 sctp_clog.x.close.stcb = 0; 100 sctp_clog.x.close.state = 0; 101 } 102 sctp_clog.x.close.loc = loc; 103 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 104 SCTP_LOG_EVENT_CLOSE, 105 0, 106 sctp_clog.x.misc.log1, 107 sctp_clog.x.misc.log2, 108 sctp_clog.x.misc.log3, 109 sctp_clog.x.misc.log4); 110 } 111 112 void 113 rto_logging(struct sctp_nets *net, int from) 114 { 115 struct sctp_cwnd_log sctp_clog; 116 117 memset(&sctp_clog, 0, sizeof(sctp_clog)); 118 sctp_clog.x.rto.net = (void *)net; 119 sctp_clog.x.rto.rtt = net->rtt / 1000; 120 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 121 SCTP_LOG_EVENT_RTT, 122 from, 123 sctp_clog.x.misc.log1, 124 sctp_clog.x.misc.log2, 125 sctp_clog.x.misc.log3, 126 sctp_clog.x.misc.log4); 127 } 128 129 void 130 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 131 { 132 struct sctp_cwnd_log sctp_clog; 133 134 sctp_clog.x.strlog.stcb = stcb; 135 sctp_clog.x.strlog.n_tsn = tsn; 136 sctp_clog.x.strlog.n_sseq = sseq; 137 sctp_clog.x.strlog.e_tsn = 0; 138 sctp_clog.x.strlog.e_sseq = 0; 139 sctp_clog.x.strlog.strm = stream; 140 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 141 SCTP_LOG_EVENT_STRM, 142 from, 143 sctp_clog.x.misc.log1, 144 sctp_clog.x.misc.log2, 145 sctp_clog.x.misc.log3, 146 sctp_clog.x.misc.log4); 147 } 148 149 void 150 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 151 { 152 struct sctp_cwnd_log sctp_clog; 153 154 sctp_clog.x.nagle.stcb = (void *)stcb; 155 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 156 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 157 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 158 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 159 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 160 SCTP_LOG_EVENT_NAGLE, 161 action, 162 sctp_clog.x.misc.log1, 163 sctp_clog.x.misc.log2, 164 sctp_clog.x.misc.log3, 165 sctp_clog.x.misc.log4); 166 } 167 168 void 169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 170 { 171 struct sctp_cwnd_log sctp_clog; 172 173 sctp_clog.x.sack.cumack = cumack; 174 sctp_clog.x.sack.oldcumack = old_cumack; 175 sctp_clog.x.sack.tsn = tsn; 176 sctp_clog.x.sack.numGaps = gaps; 177 sctp_clog.x.sack.numDups = dups; 178 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 179 SCTP_LOG_EVENT_SACK, 180 from, 181 sctp_clog.x.misc.log1, 182 sctp_clog.x.misc.log2, 183 sctp_clog.x.misc.log3, 184 sctp_clog.x.misc.log4); 185 } 186 187 void 188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 189 { 190 struct sctp_cwnd_log sctp_clog; 191 192 memset(&sctp_clog, 0, sizeof(sctp_clog)); 193 sctp_clog.x.map.base = map; 194 sctp_clog.x.map.cum = cum; 195 sctp_clog.x.map.high = high; 196 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 197 SCTP_LOG_EVENT_MAP, 198 from, 199 sctp_clog.x.misc.log1, 200 sctp_clog.x.misc.log2, 201 sctp_clog.x.misc.log3, 202 sctp_clog.x.misc.log4); 203 } 204 205 void 206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 207 { 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.fr.largest_tsn = biggest_tsn; 212 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 213 sctp_clog.x.fr.tsn = tsn; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_FR, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 } 222 223 #ifdef SCTP_MBUF_LOGGING 224 void 225 sctp_log_mb(struct mbuf *m, int from) 226 { 227 struct sctp_cwnd_log sctp_clog; 228 229 sctp_clog.x.mb.mp = m; 230 sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 231 sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 232 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 233 if (SCTP_BUF_IS_EXTENDED(m)) { 234 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 235 sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 236 } else { 237 sctp_clog.x.mb.ext = 0; 238 sctp_clog.x.mb.refcnt = 0; 239 } 240 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 241 SCTP_LOG_EVENT_MBUF, 242 from, 243 sctp_clog.x.misc.log1, 244 sctp_clog.x.misc.log2, 245 sctp_clog.x.misc.log3, 246 sctp_clog.x.misc.log4); 247 } 248 249 void 250 sctp_log_mbc(struct mbuf *m, int from) 251 { 252 struct mbuf *mat; 253 254 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 255 sctp_log_mb(mat, from); 256 } 257 } 258 #endif 259 260 void 261 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 262 { 263 struct sctp_cwnd_log sctp_clog; 264 265 if (control == NULL) { 266 SCTP_PRINTF("Gak log of NULL?\n"); 267 return; 268 } 269 sctp_clog.x.strlog.stcb = control->stcb; 270 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 271 sctp_clog.x.strlog.n_sseq = control->sinfo_ssn; 272 sctp_clog.x.strlog.strm = control->sinfo_stream; 273 if (poschk != NULL) { 274 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 275 sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn; 276 } else { 277 sctp_clog.x.strlog.e_tsn = 0; 278 sctp_clog.x.strlog.e_sseq = 0; 279 } 280 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 281 SCTP_LOG_EVENT_STRM, 282 from, 283 sctp_clog.x.misc.log1, 284 sctp_clog.x.misc.log2, 285 sctp_clog.x.misc.log3, 286 sctp_clog.x.misc.log4); 287 } 288 289 void 290 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 291 { 292 struct sctp_cwnd_log sctp_clog; 293 294 sctp_clog.x.cwnd.net = net; 295 if (stcb->asoc.send_queue_cnt > 255) 296 sctp_clog.x.cwnd.cnt_in_send = 255; 297 else 298 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 299 if (stcb->asoc.stream_queue_cnt > 255) 300 sctp_clog.x.cwnd.cnt_in_str = 255; 301 else 302 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 303 304 if (net) { 305 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 306 sctp_clog.x.cwnd.inflight = net->flight_size; 307 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 308 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 309 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 310 } 311 if (SCTP_CWNDLOG_PRESEND == from) { 312 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 313 } 314 sctp_clog.x.cwnd.cwnd_augment = augment; 315 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 316 SCTP_LOG_EVENT_CWND, 317 from, 318 sctp_clog.x.misc.log1, 319 sctp_clog.x.misc.log2, 320 sctp_clog.x.misc.log3, 321 sctp_clog.x.misc.log4); 322 } 323 324 void 325 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 326 { 327 struct sctp_cwnd_log sctp_clog; 328 329 memset(&sctp_clog, 0, sizeof(sctp_clog)); 330 if (inp) { 331 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 332 333 } else { 334 sctp_clog.x.lock.sock = (void *)NULL; 335 } 336 sctp_clog.x.lock.inp = (void *)inp; 337 if (stcb) { 338 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 339 } else { 340 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 341 } 342 if (inp) { 343 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 344 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 345 } else { 346 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 347 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 348 } 349 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 350 if (inp && (inp->sctp_socket)) { 351 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 352 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 353 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 354 } else { 355 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 356 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 357 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 358 } 359 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 360 SCTP_LOG_LOCK_EVENT, 361 from, 362 sctp_clog.x.misc.log1, 363 sctp_clog.x.misc.log2, 364 sctp_clog.x.misc.log3, 365 sctp_clog.x.misc.log4); 366 } 367 368 void 369 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 370 { 371 struct sctp_cwnd_log sctp_clog; 372 373 memset(&sctp_clog, 0, sizeof(sctp_clog)); 374 sctp_clog.x.cwnd.net = net; 375 sctp_clog.x.cwnd.cwnd_new_value = error; 376 sctp_clog.x.cwnd.inflight = net->flight_size; 377 sctp_clog.x.cwnd.cwnd_augment = burst; 378 if (stcb->asoc.send_queue_cnt > 255) 379 sctp_clog.x.cwnd.cnt_in_send = 255; 380 else 381 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 382 if (stcb->asoc.stream_queue_cnt > 255) 383 sctp_clog.x.cwnd.cnt_in_str = 255; 384 else 385 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 386 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 387 SCTP_LOG_EVENT_MAXBURST, 388 from, 389 sctp_clog.x.misc.log1, 390 sctp_clog.x.misc.log2, 391 sctp_clog.x.misc.log3, 392 sctp_clog.x.misc.log4); 393 } 394 395 void 396 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 397 { 398 struct sctp_cwnd_log sctp_clog; 399 400 sctp_clog.x.rwnd.rwnd = peers_rwnd; 401 sctp_clog.x.rwnd.send_size = snd_size; 402 sctp_clog.x.rwnd.overhead = overhead; 403 sctp_clog.x.rwnd.new_rwnd = 0; 404 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 405 SCTP_LOG_EVENT_RWND, 406 from, 407 sctp_clog.x.misc.log1, 408 sctp_clog.x.misc.log2, 409 sctp_clog.x.misc.log3, 410 sctp_clog.x.misc.log4); 411 } 412 413 void 414 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 415 { 416 struct sctp_cwnd_log sctp_clog; 417 418 sctp_clog.x.rwnd.rwnd = peers_rwnd; 419 sctp_clog.x.rwnd.send_size = flight_size; 420 sctp_clog.x.rwnd.overhead = overhead; 421 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 422 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 423 SCTP_LOG_EVENT_RWND, 424 from, 425 sctp_clog.x.misc.log1, 426 sctp_clog.x.misc.log2, 427 sctp_clog.x.misc.log3, 428 sctp_clog.x.misc.log4); 429 } 430 431 #ifdef SCTP_MBCNT_LOGGING 432 static void 433 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 434 { 435 struct sctp_cwnd_log sctp_clog; 436 437 sctp_clog.x.mbcnt.total_queue_size = total_oq; 438 sctp_clog.x.mbcnt.size_change = book; 439 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 440 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 441 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 442 SCTP_LOG_EVENT_MBCNT, 443 from, 444 sctp_clog.x.misc.log1, 445 sctp_clog.x.misc.log2, 446 sctp_clog.x.misc.log3, 447 sctp_clog.x.misc.log4); 448 } 449 #endif 450 451 void 452 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 453 { 454 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 455 SCTP_LOG_MISC_EVENT, 456 from, 457 a, b, c, d); 458 } 459 460 void 461 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 462 { 463 struct sctp_cwnd_log sctp_clog; 464 465 sctp_clog.x.wake.stcb = (void *)stcb; 466 sctp_clog.x.wake.wake_cnt = wake_cnt; 467 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 468 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 469 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 470 471 if (stcb->asoc.stream_queue_cnt < 0xff) 472 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 473 else 474 sctp_clog.x.wake.stream_qcnt = 0xff; 475 476 if (stcb->asoc.chunks_on_out_queue < 0xff) 477 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 478 else 479 sctp_clog.x.wake.chunks_on_oque = 0xff; 480 481 sctp_clog.x.wake.sctpflags = 0; 482 /* set in the defered mode stuff */ 483 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 484 sctp_clog.x.wake.sctpflags |= 1; 485 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 486 sctp_clog.x.wake.sctpflags |= 2; 487 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 488 sctp_clog.x.wake.sctpflags |= 4; 489 /* what about the sb */ 490 if (stcb->sctp_socket) { 491 struct socket *so = stcb->sctp_socket; 492 493 sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 494 } else { 495 sctp_clog.x.wake.sbflags = 0xff; 496 } 497 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 498 SCTP_LOG_EVENT_WAKE, 499 from, 500 sctp_clog.x.misc.log1, 501 sctp_clog.x.misc.log2, 502 sctp_clog.x.misc.log3, 503 sctp_clog.x.misc.log4); 504 } 505 506 void 507 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen) 508 { 509 struct sctp_cwnd_log sctp_clog; 510 511 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 512 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 513 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 514 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 515 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 516 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 517 sctp_clog.x.blk.sndlen = (uint32_t) sendlen; 518 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 519 SCTP_LOG_EVENT_BLOCK, 520 from, 521 sctp_clog.x.misc.log1, 522 sctp_clog.x.misc.log2, 523 sctp_clog.x.misc.log3, 524 sctp_clog.x.misc.log4); 525 } 526 527 int 528 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 529 { 530 /* May need to fix this if ktrdump does not work */ 531 return (0); 532 } 533 534 #ifdef SCTP_AUDITING_ENABLED 535 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 536 static int sctp_audit_indx = 0; 537 538 static 539 void 540 sctp_print_audit_report(void) 541 { 542 int i; 543 int cnt; 544 545 cnt = 0; 546 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 547 if ((sctp_audit_data[i][0] == 0xe0) && 548 (sctp_audit_data[i][1] == 0x01)) { 549 cnt = 0; 550 SCTP_PRINTF("\n"); 551 } else if (sctp_audit_data[i][0] == 0xf0) { 552 cnt = 0; 553 SCTP_PRINTF("\n"); 554 } else if ((sctp_audit_data[i][0] == 0xc0) && 555 (sctp_audit_data[i][1] == 0x01)) { 556 SCTP_PRINTF("\n"); 557 cnt = 0; 558 } 559 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 560 (uint32_t) sctp_audit_data[i][1]); 561 cnt++; 562 if ((cnt % 14) == 0) 563 SCTP_PRINTF("\n"); 564 } 565 for (i = 0; i < sctp_audit_indx; i++) { 566 if ((sctp_audit_data[i][0] == 0xe0) && 567 (sctp_audit_data[i][1] == 0x01)) { 568 cnt = 0; 569 SCTP_PRINTF("\n"); 570 } else if (sctp_audit_data[i][0] == 0xf0) { 571 cnt = 0; 572 SCTP_PRINTF("\n"); 573 } else if ((sctp_audit_data[i][0] == 0xc0) && 574 (sctp_audit_data[i][1] == 0x01)) { 575 SCTP_PRINTF("\n"); 576 cnt = 0; 577 } 578 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 579 (uint32_t) sctp_audit_data[i][1]); 580 cnt++; 581 if ((cnt % 14) == 0) 582 SCTP_PRINTF("\n"); 583 } 584 SCTP_PRINTF("\n"); 585 } 586 587 void 588 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 589 struct sctp_nets *net) 590 { 591 int resend_cnt, tot_out, rep, tot_book_cnt; 592 struct sctp_nets *lnet; 593 struct sctp_tmit_chunk *chk; 594 595 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 596 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 597 sctp_audit_indx++; 598 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 599 sctp_audit_indx = 0; 600 } 601 if (inp == NULL) { 602 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 603 sctp_audit_data[sctp_audit_indx][1] = 0x01; 604 sctp_audit_indx++; 605 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 606 sctp_audit_indx = 0; 607 } 608 return; 609 } 610 if (stcb == NULL) { 611 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 612 sctp_audit_data[sctp_audit_indx][1] = 0x02; 613 sctp_audit_indx++; 614 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 615 sctp_audit_indx = 0; 616 } 617 return; 618 } 619 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 620 sctp_audit_data[sctp_audit_indx][1] = 621 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 622 sctp_audit_indx++; 623 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 624 sctp_audit_indx = 0; 625 } 626 rep = 0; 627 tot_book_cnt = 0; 628 resend_cnt = tot_out = 0; 629 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 630 if (chk->sent == SCTP_DATAGRAM_RESEND) { 631 resend_cnt++; 632 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 633 tot_out += chk->book_size; 634 tot_book_cnt++; 635 } 636 } 637 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 638 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 639 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 645 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 646 rep = 1; 647 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 648 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 649 sctp_audit_data[sctp_audit_indx][1] = 650 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 651 sctp_audit_indx++; 652 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 653 sctp_audit_indx = 0; 654 } 655 } 656 if (tot_out != stcb->asoc.total_flight) { 657 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 658 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 659 sctp_audit_indx++; 660 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 661 sctp_audit_indx = 0; 662 } 663 rep = 1; 664 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 665 (int)stcb->asoc.total_flight); 666 stcb->asoc.total_flight = tot_out; 667 } 668 if (tot_book_cnt != stcb->asoc.total_flight_count) { 669 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 670 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 671 sctp_audit_indx++; 672 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 673 sctp_audit_indx = 0; 674 } 675 rep = 1; 676 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 677 678 stcb->asoc.total_flight_count = tot_book_cnt; 679 } 680 tot_out = 0; 681 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 682 tot_out += lnet->flight_size; 683 } 684 if (tot_out != stcb->asoc.total_flight) { 685 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 686 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 687 sctp_audit_indx++; 688 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 689 sctp_audit_indx = 0; 690 } 691 rep = 1; 692 SCTP_PRINTF("real flight:%d net total was %d\n", 693 stcb->asoc.total_flight, tot_out); 694 /* now corrective action */ 695 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 696 697 tot_out = 0; 698 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 699 if ((chk->whoTo == lnet) && 700 (chk->sent < SCTP_DATAGRAM_RESEND)) { 701 tot_out += chk->book_size; 702 } 703 } 704 if (lnet->flight_size != tot_out) { 705 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 706 (void *)lnet, lnet->flight_size, 707 tot_out); 708 lnet->flight_size = tot_out; 709 } 710 } 711 } 712 if (rep) { 713 sctp_print_audit_report(); 714 } 715 } 716 717 void 718 sctp_audit_log(uint8_t ev, uint8_t fd) 719 { 720 721 sctp_audit_data[sctp_audit_indx][0] = ev; 722 sctp_audit_data[sctp_audit_indx][1] = fd; 723 sctp_audit_indx++; 724 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 725 sctp_audit_indx = 0; 726 } 727 } 728 729 #endif 730 731 /* 732 * sctp_stop_timers_for_shutdown() should be called 733 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 734 * state to make sure that all timers are stopped. 735 */ 736 void 737 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 738 { 739 struct sctp_association *asoc; 740 struct sctp_nets *net; 741 742 asoc = &stcb->asoc; 743 744 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 745 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 746 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 747 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 748 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 749 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 750 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 751 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); 752 } 753 } 754 755 /* 756 * a list of sizes based on typical mtu's, used only if next hop size not 757 * returned. 758 */ 759 static uint32_t sctp_mtu_sizes[] = { 760 68, 761 296, 762 508, 763 512, 764 544, 765 576, 766 1006, 767 1492, 768 1500, 769 1536, 770 2002, 771 2048, 772 4352, 773 4464, 774 8166, 775 17914, 776 32000, 777 65535 778 }; 779 780 /* 781 * Return the largest MTU smaller than val. If there is no 782 * entry, just return val. 783 */ 784 uint32_t 785 sctp_get_prev_mtu(uint32_t val) 786 { 787 uint32_t i; 788 789 if (val <= sctp_mtu_sizes[0]) { 790 return (val); 791 } 792 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 793 if (val <= sctp_mtu_sizes[i]) { 794 break; 795 } 796 } 797 return (sctp_mtu_sizes[i - 1]); 798 } 799 800 /* 801 * Return the smallest MTU larger than val. If there is no 802 * entry, just return val. 803 */ 804 uint32_t 805 sctp_get_next_mtu(uint32_t val) 806 { 807 /* select another MTU that is just bigger than this one */ 808 uint32_t i; 809 810 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 811 if (val < sctp_mtu_sizes[i]) { 812 return (sctp_mtu_sizes[i]); 813 } 814 } 815 return (val); 816 } 817 818 void 819 sctp_fill_random_store(struct sctp_pcb *m) 820 { 821 /* 822 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 823 * our counter. The result becomes our good random numbers and we 824 * then setup to give these out. Note that we do no locking to 825 * protect this. This is ok, since if competing folks call this we 826 * will get more gobbled gook in the random store which is what we 827 * want. There is a danger that two guys will use the same random 828 * numbers, but thats ok too since that is random as well :-> 829 */ 830 m->store_at = 0; 831 (void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 832 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 833 sizeof(m->random_counter), (uint8_t *) m->random_store); 834 m->random_counter++; 835 } 836 837 uint32_t 838 sctp_select_initial_TSN(struct sctp_pcb *inp) 839 { 840 /* 841 * A true implementation should use random selection process to get 842 * the initial stream sequence number, using RFC1750 as a good 843 * guideline 844 */ 845 uint32_t x, *xp; 846 uint8_t *p; 847 int store_at, new_store; 848 849 if (inp->initial_sequence_debug != 0) { 850 uint32_t ret; 851 852 ret = inp->initial_sequence_debug; 853 inp->initial_sequence_debug++; 854 return (ret); 855 } 856 retry: 857 store_at = inp->store_at; 858 new_store = store_at + sizeof(uint32_t); 859 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 860 new_store = 0; 861 } 862 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 863 goto retry; 864 } 865 if (new_store == 0) { 866 /* Refill the random store */ 867 sctp_fill_random_store(inp); 868 } 869 p = &inp->random_store[store_at]; 870 xp = (uint32_t *) p; 871 x = *xp; 872 return (x); 873 } 874 875 uint32_t 876 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 877 { 878 uint32_t x; 879 struct timeval now; 880 881 if (check) { 882 (void)SCTP_GETTIME_TIMEVAL(&now); 883 } 884 for (;;) { 885 x = sctp_select_initial_TSN(&inp->sctp_ep); 886 if (x == 0) { 887 /* we never use 0 */ 888 continue; 889 } 890 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 891 break; 892 } 893 } 894 return (x); 895 } 896 897 int32_t 898 sctp_map_assoc_state(int kernel_state) 899 { 900 int32_t user_state; 901 902 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 903 user_state = SCTP_CLOSED; 904 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 905 user_state = SCTP_SHUTDOWN_PENDING; 906 } else { 907 switch (kernel_state & SCTP_STATE_MASK) { 908 case SCTP_STATE_EMPTY: 909 user_state = SCTP_CLOSED; 910 break; 911 case SCTP_STATE_INUSE: 912 user_state = SCTP_CLOSED; 913 break; 914 case SCTP_STATE_COOKIE_WAIT: 915 user_state = SCTP_COOKIE_WAIT; 916 break; 917 case SCTP_STATE_COOKIE_ECHOED: 918 user_state = SCTP_COOKIE_ECHOED; 919 break; 920 case SCTP_STATE_OPEN: 921 user_state = SCTP_ESTABLISHED; 922 break; 923 case SCTP_STATE_SHUTDOWN_SENT: 924 user_state = SCTP_SHUTDOWN_SENT; 925 break; 926 case SCTP_STATE_SHUTDOWN_RECEIVED: 927 user_state = SCTP_SHUTDOWN_RECEIVED; 928 break; 929 case SCTP_STATE_SHUTDOWN_ACK_SENT: 930 user_state = SCTP_SHUTDOWN_ACK_SENT; 931 break; 932 default: 933 user_state = SCTP_CLOSED; 934 break; 935 } 936 } 937 return (user_state); 938 } 939 940 int 941 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 942 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 943 { 944 struct sctp_association *asoc; 945 946 /* 947 * Anything set to zero is taken care of by the allocation routine's 948 * bzero 949 */ 950 951 /* 952 * Up front select what scoping to apply on addresses I tell my peer 953 * Not sure what to do with these right now, we will need to come up 954 * with a way to set them. We may need to pass them through from the 955 * caller in the sctp_aloc_assoc() function. 956 */ 957 int i; 958 #if defined(SCTP_DETAILED_STR_STATS) 959 int j; 960 #endif 961 962 asoc = &stcb->asoc; 963 /* init all variables to a known value. */ 964 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE); 965 asoc->max_burst = inp->sctp_ep.max_burst; 966 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 967 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 968 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 969 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 970 asoc->ecn_supported = inp->ecn_supported; 971 asoc->prsctp_supported = inp->prsctp_supported; 972 asoc->idata_supported = inp->idata_supported; 973 asoc->auth_supported = inp->auth_supported; 974 asoc->asconf_supported = inp->asconf_supported; 975 asoc->reconfig_supported = inp->reconfig_supported; 976 asoc->nrsack_supported = inp->nrsack_supported; 977 asoc->pktdrop_supported = inp->pktdrop_supported; 978 asoc->idata_supported = inp->idata_supported; 979 asoc->sctp_cmt_pf = (uint8_t) 0; 980 asoc->sctp_frag_point = inp->sctp_frag_point; 981 asoc->sctp_features = inp->sctp_features; 982 asoc->default_dscp = inp->sctp_ep.default_dscp; 983 asoc->max_cwnd = inp->max_cwnd; 984 #ifdef INET6 985 if (inp->sctp_ep.default_flowlabel) { 986 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 987 } else { 988 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 989 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 990 asoc->default_flowlabel &= 0x000fffff; 991 asoc->default_flowlabel |= 0x80000000; 992 } else { 993 asoc->default_flowlabel = 0; 994 } 995 } 996 #endif 997 asoc->sb_send_resv = 0; 998 if (override_tag) { 999 asoc->my_vtag = override_tag; 1000 } else { 1001 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1002 } 1003 /* Get the nonce tags */ 1004 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1005 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1006 asoc->vrf_id = vrf_id; 1007 1008 #ifdef SCTP_ASOCLOG_OF_TSNS 1009 asoc->tsn_in_at = 0; 1010 asoc->tsn_out_at = 0; 1011 asoc->tsn_in_wrapped = 0; 1012 asoc->tsn_out_wrapped = 0; 1013 asoc->cumack_log_at = 0; 1014 asoc->cumack_log_atsnt = 0; 1015 #endif 1016 #ifdef SCTP_FS_SPEC_LOG 1017 asoc->fs_index = 0; 1018 #endif 1019 asoc->refcnt = 0; 1020 asoc->assoc_up_sent = 0; 1021 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1022 sctp_select_initial_TSN(&inp->sctp_ep); 1023 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1024 /* we are optimisitic here */ 1025 asoc->peer_supports_nat = 0; 1026 asoc->sent_queue_retran_cnt = 0; 1027 1028 /* for CMT */ 1029 asoc->last_net_cmt_send_started = NULL; 1030 1031 /* This will need to be adjusted */ 1032 asoc->last_acked_seq = asoc->init_seq_number - 1; 1033 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1034 asoc->asconf_seq_in = asoc->last_acked_seq; 1035 1036 /* here we are different, we hold the next one we expect */ 1037 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1038 1039 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1040 asoc->initial_rto = inp->sctp_ep.initial_rto; 1041 1042 asoc->max_init_times = inp->sctp_ep.max_init_times; 1043 asoc->max_send_times = inp->sctp_ep.max_send_times; 1044 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1045 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1046 asoc->free_chunk_cnt = 0; 1047 1048 asoc->iam_blocking = 0; 1049 asoc->context = inp->sctp_context; 1050 asoc->local_strreset_support = inp->local_strreset_support; 1051 asoc->def_send = inp->def_send; 1052 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1053 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1054 asoc->pr_sctp_cnt = 0; 1055 asoc->total_output_queue_size = 0; 1056 1057 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1058 asoc->scope.ipv6_addr_legal = 1; 1059 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1060 asoc->scope.ipv4_addr_legal = 1; 1061 } else { 1062 asoc->scope.ipv4_addr_legal = 0; 1063 } 1064 } else { 1065 asoc->scope.ipv6_addr_legal = 0; 1066 asoc->scope.ipv4_addr_legal = 1; 1067 } 1068 1069 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1070 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1071 1072 asoc->smallest_mtu = inp->sctp_frag_point; 1073 asoc->minrto = inp->sctp_ep.sctp_minrto; 1074 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1075 1076 asoc->stream_locked_on = 0; 1077 asoc->ecn_echo_cnt_onq = 0; 1078 asoc->stream_locked = 0; 1079 1080 asoc->send_sack = 1; 1081 1082 LIST_INIT(&asoc->sctp_restricted_addrs); 1083 1084 TAILQ_INIT(&asoc->nets); 1085 TAILQ_INIT(&asoc->pending_reply_queue); 1086 TAILQ_INIT(&asoc->asconf_ack_sent); 1087 /* Setup to fill the hb random cache at first HB */ 1088 asoc->hb_random_idx = 4; 1089 1090 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1091 1092 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1093 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1094 1095 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1096 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1097 1098 /* 1099 * Now the stream parameters, here we allocate space for all streams 1100 * that we request by default. 1101 */ 1102 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1103 o_strms; 1104 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1105 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1106 SCTP_M_STRMO); 1107 if (asoc->strmout == NULL) { 1108 /* big trouble no memory */ 1109 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1110 return (ENOMEM); 1111 } 1112 for (i = 0; i < asoc->streamoutcnt; i++) { 1113 /* 1114 * inbound side must be set to 0xffff, also NOTE when we get 1115 * the INIT-ACK back (for INIT sender) we MUST reduce the 1116 * count (streamoutcnt) but first check if we sent to any of 1117 * the upper streams that were dropped (if some were). Those 1118 * that were dropped must be notified to the upper layer as 1119 * failed to send. 1120 */ 1121 asoc->strmout[i].next_mid_ordered = 0; 1122 asoc->strmout[i].next_mid_unordered = 0; 1123 TAILQ_INIT(&asoc->strmout[i].outqueue); 1124 asoc->strmout[i].chunks_on_queues = 0; 1125 #if defined(SCTP_DETAILED_STR_STATS) 1126 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1127 asoc->strmout[i].abandoned_sent[j] = 0; 1128 asoc->strmout[i].abandoned_unsent[j] = 0; 1129 } 1130 #else 1131 asoc->strmout[i].abandoned_sent[0] = 0; 1132 asoc->strmout[i].abandoned_unsent[0] = 0; 1133 #endif 1134 asoc->strmout[i].stream_no = i; 1135 asoc->strmout[i].last_msg_incomplete = 0; 1136 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1137 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1138 } 1139 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1140 1141 /* Now the mapping array */ 1142 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1143 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1144 SCTP_M_MAP); 1145 if (asoc->mapping_array == NULL) { 1146 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1147 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1148 return (ENOMEM); 1149 } 1150 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1151 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1152 SCTP_M_MAP); 1153 if (asoc->nr_mapping_array == NULL) { 1154 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1155 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1156 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1157 return (ENOMEM); 1158 } 1159 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1160 1161 /* Now the init of the other outqueues */ 1162 TAILQ_INIT(&asoc->free_chunks); 1163 TAILQ_INIT(&asoc->control_send_queue); 1164 TAILQ_INIT(&asoc->asconf_send_queue); 1165 TAILQ_INIT(&asoc->send_queue); 1166 TAILQ_INIT(&asoc->sent_queue); 1167 TAILQ_INIT(&asoc->resetHead); 1168 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1169 TAILQ_INIT(&asoc->asconf_queue); 1170 /* authentication fields */ 1171 asoc->authinfo.random = NULL; 1172 asoc->authinfo.active_keyid = 0; 1173 asoc->authinfo.assoc_key = NULL; 1174 asoc->authinfo.assoc_keyid = 0; 1175 asoc->authinfo.recv_key = NULL; 1176 asoc->authinfo.recv_keyid = 0; 1177 LIST_INIT(&asoc->shared_keys); 1178 asoc->marked_retrans = 0; 1179 asoc->port = inp->sctp_ep.port; 1180 asoc->timoinit = 0; 1181 asoc->timodata = 0; 1182 asoc->timosack = 0; 1183 asoc->timoshutdown = 0; 1184 asoc->timoheartbeat = 0; 1185 asoc->timocookie = 0; 1186 asoc->timoshutdownack = 0; 1187 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1188 asoc->discontinuity_time = asoc->start_time; 1189 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1190 asoc->abandoned_unsent[i] = 0; 1191 asoc->abandoned_sent[i] = 0; 1192 } 1193 /* 1194 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1195 * freed later when the association is freed. 1196 */ 1197 return (0); 1198 } 1199 1200 void 1201 sctp_print_mapping_array(struct sctp_association *asoc) 1202 { 1203 unsigned int i, limit; 1204 1205 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1206 asoc->mapping_array_size, 1207 asoc->mapping_array_base_tsn, 1208 asoc->cumulative_tsn, 1209 asoc->highest_tsn_inside_map, 1210 asoc->highest_tsn_inside_nr_map); 1211 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1212 if (asoc->mapping_array[limit - 1] != 0) { 1213 break; 1214 } 1215 } 1216 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1217 for (i = 0; i < limit; i++) { 1218 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1219 } 1220 if (limit % 16) 1221 SCTP_PRINTF("\n"); 1222 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1223 if (asoc->nr_mapping_array[limit - 1]) { 1224 break; 1225 } 1226 } 1227 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1228 for (i = 0; i < limit; i++) { 1229 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1230 } 1231 if (limit % 16) 1232 SCTP_PRINTF("\n"); 1233 } 1234 1235 int 1236 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1237 { 1238 /* mapping array needs to grow */ 1239 uint8_t *new_array1, *new_array2; 1240 uint32_t new_size; 1241 1242 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1243 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1244 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1245 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1246 /* can't get more, forget it */ 1247 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1248 if (new_array1) { 1249 SCTP_FREE(new_array1, SCTP_M_MAP); 1250 } 1251 if (new_array2) { 1252 SCTP_FREE(new_array2, SCTP_M_MAP); 1253 } 1254 return (-1); 1255 } 1256 memset(new_array1, 0, new_size); 1257 memset(new_array2, 0, new_size); 1258 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1259 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1260 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1261 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1262 asoc->mapping_array = new_array1; 1263 asoc->nr_mapping_array = new_array2; 1264 asoc->mapping_array_size = new_size; 1265 return (0); 1266 } 1267 1268 1269 static void 1270 sctp_iterator_work(struct sctp_iterator *it) 1271 { 1272 int iteration_count = 0; 1273 int inp_skip = 0; 1274 int first_in = 1; 1275 struct sctp_inpcb *tinp; 1276 1277 SCTP_INP_INFO_RLOCK(); 1278 SCTP_ITERATOR_LOCK(); 1279 sctp_it_ctl.cur_it = it; 1280 if (it->inp) { 1281 SCTP_INP_RLOCK(it->inp); 1282 SCTP_INP_DECR_REF(it->inp); 1283 } 1284 if (it->inp == NULL) { 1285 /* iterator is complete */ 1286 done_with_iterator: 1287 sctp_it_ctl.cur_it = NULL; 1288 SCTP_ITERATOR_UNLOCK(); 1289 SCTP_INP_INFO_RUNLOCK(); 1290 if (it->function_atend != NULL) { 1291 (*it->function_atend) (it->pointer, it->val); 1292 } 1293 SCTP_FREE(it, SCTP_M_ITER); 1294 return; 1295 } 1296 select_a_new_ep: 1297 if (first_in) { 1298 first_in = 0; 1299 } else { 1300 SCTP_INP_RLOCK(it->inp); 1301 } 1302 while (((it->pcb_flags) && 1303 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1304 ((it->pcb_features) && 1305 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1306 /* endpoint flags or features don't match, so keep looking */ 1307 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1308 SCTP_INP_RUNLOCK(it->inp); 1309 goto done_with_iterator; 1310 } 1311 tinp = it->inp; 1312 it->inp = LIST_NEXT(it->inp, sctp_list); 1313 SCTP_INP_RUNLOCK(tinp); 1314 if (it->inp == NULL) { 1315 goto done_with_iterator; 1316 } 1317 SCTP_INP_RLOCK(it->inp); 1318 } 1319 /* now go through each assoc which is in the desired state */ 1320 if (it->done_current_ep == 0) { 1321 if (it->function_inp != NULL) 1322 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1323 it->done_current_ep = 1; 1324 } 1325 if (it->stcb == NULL) { 1326 /* run the per instance function */ 1327 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1328 } 1329 if ((inp_skip) || it->stcb == NULL) { 1330 if (it->function_inp_end != NULL) { 1331 inp_skip = (*it->function_inp_end) (it->inp, 1332 it->pointer, 1333 it->val); 1334 } 1335 SCTP_INP_RUNLOCK(it->inp); 1336 goto no_stcb; 1337 } 1338 while (it->stcb) { 1339 SCTP_TCB_LOCK(it->stcb); 1340 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1341 /* not in the right state... keep looking */ 1342 SCTP_TCB_UNLOCK(it->stcb); 1343 goto next_assoc; 1344 } 1345 /* see if we have limited out the iterator loop */ 1346 iteration_count++; 1347 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1348 /* Pause to let others grab the lock */ 1349 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1350 SCTP_TCB_UNLOCK(it->stcb); 1351 SCTP_INP_INCR_REF(it->inp); 1352 SCTP_INP_RUNLOCK(it->inp); 1353 SCTP_ITERATOR_UNLOCK(); 1354 SCTP_INP_INFO_RUNLOCK(); 1355 SCTP_INP_INFO_RLOCK(); 1356 SCTP_ITERATOR_LOCK(); 1357 if (sctp_it_ctl.iterator_flags) { 1358 /* We won't be staying here */ 1359 SCTP_INP_DECR_REF(it->inp); 1360 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1361 if (sctp_it_ctl.iterator_flags & 1362 SCTP_ITERATOR_STOP_CUR_IT) { 1363 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1364 goto done_with_iterator; 1365 } 1366 if (sctp_it_ctl.iterator_flags & 1367 SCTP_ITERATOR_STOP_CUR_INP) { 1368 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1369 goto no_stcb; 1370 } 1371 /* If we reach here huh? */ 1372 SCTP_PRINTF("Unknown it ctl flag %x\n", 1373 sctp_it_ctl.iterator_flags); 1374 sctp_it_ctl.iterator_flags = 0; 1375 } 1376 SCTP_INP_RLOCK(it->inp); 1377 SCTP_INP_DECR_REF(it->inp); 1378 SCTP_TCB_LOCK(it->stcb); 1379 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1380 iteration_count = 0; 1381 } 1382 /* run function on this one */ 1383 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1384 1385 /* 1386 * we lie here, it really needs to have its own type but 1387 * first I must verify that this won't effect things :-0 1388 */ 1389 if (it->no_chunk_output == 0) 1390 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1391 1392 SCTP_TCB_UNLOCK(it->stcb); 1393 next_assoc: 1394 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1395 if (it->stcb == NULL) { 1396 /* Run last function */ 1397 if (it->function_inp_end != NULL) { 1398 inp_skip = (*it->function_inp_end) (it->inp, 1399 it->pointer, 1400 it->val); 1401 } 1402 } 1403 } 1404 SCTP_INP_RUNLOCK(it->inp); 1405 no_stcb: 1406 /* done with all assocs on this endpoint, move on to next endpoint */ 1407 it->done_current_ep = 0; 1408 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1409 it->inp = NULL; 1410 } else { 1411 it->inp = LIST_NEXT(it->inp, sctp_list); 1412 } 1413 if (it->inp == NULL) { 1414 goto done_with_iterator; 1415 } 1416 goto select_a_new_ep; 1417 } 1418 1419 void 1420 sctp_iterator_worker(void) 1421 { 1422 struct sctp_iterator *it, *nit; 1423 1424 /* This function is called with the WQ lock in place */ 1425 1426 sctp_it_ctl.iterator_running = 1; 1427 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 1428 /* now lets work on this one */ 1429 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1430 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1431 CURVNET_SET(it->vn); 1432 sctp_iterator_work(it); 1433 CURVNET_RESTORE(); 1434 SCTP_IPI_ITERATOR_WQ_LOCK(); 1435 /* sa_ignore FREED_MEMORY */ 1436 } 1437 sctp_it_ctl.iterator_running = 0; 1438 return; 1439 } 1440 1441 1442 static void 1443 sctp_handle_addr_wq(void) 1444 { 1445 /* deal with the ADDR wq from the rtsock calls */ 1446 struct sctp_laddr *wi, *nwi; 1447 struct sctp_asconf_iterator *asc; 1448 1449 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1450 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1451 if (asc == NULL) { 1452 /* Try later, no memory */ 1453 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1454 (struct sctp_inpcb *)NULL, 1455 (struct sctp_tcb *)NULL, 1456 (struct sctp_nets *)NULL); 1457 return; 1458 } 1459 LIST_INIT(&asc->list_of_work); 1460 asc->cnt = 0; 1461 1462 SCTP_WQ_ADDR_LOCK(); 1463 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1464 LIST_REMOVE(wi, sctp_nxt_addr); 1465 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1466 asc->cnt++; 1467 } 1468 SCTP_WQ_ADDR_UNLOCK(); 1469 1470 if (asc->cnt == 0) { 1471 SCTP_FREE(asc, SCTP_M_ASC_IT); 1472 } else { 1473 int ret; 1474 1475 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1476 sctp_asconf_iterator_stcb, 1477 NULL, /* No ep end for boundall */ 1478 SCTP_PCB_FLAGS_BOUNDALL, 1479 SCTP_PCB_ANY_FEATURES, 1480 SCTP_ASOC_ANY_STATE, 1481 (void *)asc, 0, 1482 sctp_asconf_iterator_end, NULL, 0); 1483 if (ret) { 1484 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1485 /* Freeing if we are stopping or put back on the 1486 * addr_wq. */ 1487 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1488 sctp_asconf_iterator_end(asc, 0); 1489 } else { 1490 SCTP_WQ_ADDR_LOCK(); 1491 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1492 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1493 } 1494 SCTP_WQ_ADDR_UNLOCK(); 1495 SCTP_FREE(asc, SCTP_M_ASC_IT); 1496 } 1497 } 1498 } 1499 } 1500 1501 void 1502 sctp_timeout_handler(void *t) 1503 { 1504 struct sctp_inpcb *inp; 1505 struct sctp_tcb *stcb; 1506 struct sctp_nets *net; 1507 struct sctp_timer *tmr; 1508 struct mbuf *op_err; 1509 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1510 struct socket *so; 1511 #endif 1512 int did_output; 1513 int type; 1514 1515 tmr = (struct sctp_timer *)t; 1516 inp = (struct sctp_inpcb *)tmr->ep; 1517 stcb = (struct sctp_tcb *)tmr->tcb; 1518 net = (struct sctp_nets *)tmr->net; 1519 CURVNET_SET((struct vnet *)tmr->vnet); 1520 did_output = 1; 1521 1522 #ifdef SCTP_AUDITING_ENABLED 1523 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1524 sctp_auditing(3, inp, stcb, net); 1525 #endif 1526 1527 /* sanity checks... */ 1528 if (tmr->self != (void *)tmr) { 1529 /* 1530 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n", 1531 * (void *)tmr); 1532 */ 1533 CURVNET_RESTORE(); 1534 return; 1535 } 1536 tmr->stopped_from = 0xa001; 1537 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1538 /* 1539 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n", 1540 * tmr->type); 1541 */ 1542 CURVNET_RESTORE(); 1543 return; 1544 } 1545 tmr->stopped_from = 0xa002; 1546 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1547 CURVNET_RESTORE(); 1548 return; 1549 } 1550 /* if this is an iterator timeout, get the struct and clear inp */ 1551 tmr->stopped_from = 0xa003; 1552 if (inp) { 1553 SCTP_INP_INCR_REF(inp); 1554 if ((inp->sctp_socket == NULL) && 1555 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1556 (tmr->type != SCTP_TIMER_TYPE_INIT) && 1557 (tmr->type != SCTP_TIMER_TYPE_SEND) && 1558 (tmr->type != SCTP_TIMER_TYPE_RECV) && 1559 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) && 1560 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1561 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1562 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1563 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1564 ) { 1565 SCTP_INP_DECR_REF(inp); 1566 CURVNET_RESTORE(); 1567 return; 1568 } 1569 } 1570 tmr->stopped_from = 0xa004; 1571 if (stcb) { 1572 atomic_add_int(&stcb->asoc.refcnt, 1); 1573 if (stcb->asoc.state == 0) { 1574 atomic_add_int(&stcb->asoc.refcnt, -1); 1575 if (inp) { 1576 SCTP_INP_DECR_REF(inp); 1577 } 1578 CURVNET_RESTORE(); 1579 return; 1580 } 1581 } 1582 type = tmr->type; 1583 tmr->stopped_from = 0xa005; 1584 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type); 1585 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1586 if (inp) { 1587 SCTP_INP_DECR_REF(inp); 1588 } 1589 if (stcb) { 1590 atomic_add_int(&stcb->asoc.refcnt, -1); 1591 } 1592 CURVNET_RESTORE(); 1593 return; 1594 } 1595 tmr->stopped_from = 0xa006; 1596 1597 if (stcb) { 1598 SCTP_TCB_LOCK(stcb); 1599 atomic_add_int(&stcb->asoc.refcnt, -1); 1600 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1601 ((stcb->asoc.state == 0) || 1602 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1603 SCTP_TCB_UNLOCK(stcb); 1604 if (inp) { 1605 SCTP_INP_DECR_REF(inp); 1606 } 1607 CURVNET_RESTORE(); 1608 return; 1609 } 1610 } 1611 /* record in stopped what t-o occurred */ 1612 tmr->stopped_from = type; 1613 1614 /* mark as being serviced now */ 1615 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1616 /* 1617 * Callout has been rescheduled. 1618 */ 1619 goto get_out; 1620 } 1621 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1622 /* 1623 * Not active, so no action. 1624 */ 1625 goto get_out; 1626 } 1627 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1628 1629 /* call the handler for the appropriate timer type */ 1630 switch (type) { 1631 case SCTP_TIMER_TYPE_ZERO_COPY: 1632 if (inp == NULL) { 1633 break; 1634 } 1635 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1636 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 1637 } 1638 break; 1639 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1640 if (inp == NULL) { 1641 break; 1642 } 1643 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1644 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket); 1645 } 1646 break; 1647 case SCTP_TIMER_TYPE_ADDR_WQ: 1648 sctp_handle_addr_wq(); 1649 break; 1650 case SCTP_TIMER_TYPE_SEND: 1651 if ((stcb == NULL) || (inp == NULL)) { 1652 break; 1653 } 1654 SCTP_STAT_INCR(sctps_timodata); 1655 stcb->asoc.timodata++; 1656 stcb->asoc.num_send_timers_up--; 1657 if (stcb->asoc.num_send_timers_up < 0) { 1658 stcb->asoc.num_send_timers_up = 0; 1659 } 1660 SCTP_TCB_LOCK_ASSERT(stcb); 1661 if (sctp_t3rxt_timer(inp, stcb, net)) { 1662 /* no need to unlock on tcb its gone */ 1663 1664 goto out_decr; 1665 } 1666 SCTP_TCB_LOCK_ASSERT(stcb); 1667 #ifdef SCTP_AUDITING_ENABLED 1668 sctp_auditing(4, inp, stcb, net); 1669 #endif 1670 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1671 if ((stcb->asoc.num_send_timers_up == 0) && 1672 (stcb->asoc.sent_queue_cnt > 0)) { 1673 struct sctp_tmit_chunk *chk; 1674 1675 /* 1676 * safeguard. If there on some on the sent queue 1677 * somewhere but no timers running something is 1678 * wrong... so we start a timer on the first chunk 1679 * on the send queue on whatever net it is sent to. 1680 */ 1681 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1682 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1683 chk->whoTo); 1684 } 1685 break; 1686 case SCTP_TIMER_TYPE_INIT: 1687 if ((stcb == NULL) || (inp == NULL)) { 1688 break; 1689 } 1690 SCTP_STAT_INCR(sctps_timoinit); 1691 stcb->asoc.timoinit++; 1692 if (sctp_t1init_timer(inp, stcb, net)) { 1693 /* no need to unlock on tcb its gone */ 1694 goto out_decr; 1695 } 1696 /* We do output but not here */ 1697 did_output = 0; 1698 break; 1699 case SCTP_TIMER_TYPE_RECV: 1700 if ((stcb == NULL) || (inp == NULL)) { 1701 break; 1702 } 1703 SCTP_STAT_INCR(sctps_timosack); 1704 stcb->asoc.timosack++; 1705 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1706 #ifdef SCTP_AUDITING_ENABLED 1707 sctp_auditing(4, inp, stcb, net); 1708 #endif 1709 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1710 break; 1711 case SCTP_TIMER_TYPE_SHUTDOWN: 1712 if ((stcb == NULL) || (inp == NULL)) { 1713 break; 1714 } 1715 if (sctp_shutdown_timer(inp, stcb, net)) { 1716 /* no need to unlock on tcb its gone */ 1717 goto out_decr; 1718 } 1719 SCTP_STAT_INCR(sctps_timoshutdown); 1720 stcb->asoc.timoshutdown++; 1721 #ifdef SCTP_AUDITING_ENABLED 1722 sctp_auditing(4, inp, stcb, net); 1723 #endif 1724 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1725 break; 1726 case SCTP_TIMER_TYPE_HEARTBEAT: 1727 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) { 1728 break; 1729 } 1730 SCTP_STAT_INCR(sctps_timoheartbeat); 1731 stcb->asoc.timoheartbeat++; 1732 if (sctp_heartbeat_timer(inp, stcb, net)) { 1733 /* no need to unlock on tcb its gone */ 1734 goto out_decr; 1735 } 1736 #ifdef SCTP_AUDITING_ENABLED 1737 sctp_auditing(4, inp, stcb, net); 1738 #endif 1739 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1740 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1741 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1742 } 1743 break; 1744 case SCTP_TIMER_TYPE_COOKIE: 1745 if ((stcb == NULL) || (inp == NULL)) { 1746 break; 1747 } 1748 if (sctp_cookie_timer(inp, stcb, net)) { 1749 /* no need to unlock on tcb its gone */ 1750 goto out_decr; 1751 } 1752 SCTP_STAT_INCR(sctps_timocookie); 1753 stcb->asoc.timocookie++; 1754 #ifdef SCTP_AUDITING_ENABLED 1755 sctp_auditing(4, inp, stcb, net); 1756 #endif 1757 /* 1758 * We consider T3 and Cookie timer pretty much the same with 1759 * respect to where from in chunk_output. 1760 */ 1761 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1762 break; 1763 case SCTP_TIMER_TYPE_NEWCOOKIE: 1764 { 1765 struct timeval tv; 1766 int i, secret; 1767 1768 if (inp == NULL) { 1769 break; 1770 } 1771 SCTP_STAT_INCR(sctps_timosecret); 1772 (void)SCTP_GETTIME_TIMEVAL(&tv); 1773 SCTP_INP_WLOCK(inp); 1774 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1775 inp->sctp_ep.last_secret_number = 1776 inp->sctp_ep.current_secret_number; 1777 inp->sctp_ep.current_secret_number++; 1778 if (inp->sctp_ep.current_secret_number >= 1779 SCTP_HOW_MANY_SECRETS) { 1780 inp->sctp_ep.current_secret_number = 0; 1781 } 1782 secret = (int)inp->sctp_ep.current_secret_number; 1783 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1784 inp->sctp_ep.secret_key[secret][i] = 1785 sctp_select_initial_TSN(&inp->sctp_ep); 1786 } 1787 SCTP_INP_WUNLOCK(inp); 1788 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1789 } 1790 did_output = 0; 1791 break; 1792 case SCTP_TIMER_TYPE_PATHMTURAISE: 1793 if ((stcb == NULL) || (inp == NULL)) { 1794 break; 1795 } 1796 SCTP_STAT_INCR(sctps_timopathmtu); 1797 sctp_pathmtu_timer(inp, stcb, net); 1798 did_output = 0; 1799 break; 1800 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1801 if ((stcb == NULL) || (inp == NULL)) { 1802 break; 1803 } 1804 if (sctp_shutdownack_timer(inp, stcb, net)) { 1805 /* no need to unlock on tcb its gone */ 1806 goto out_decr; 1807 } 1808 SCTP_STAT_INCR(sctps_timoshutdownack); 1809 stcb->asoc.timoshutdownack++; 1810 #ifdef SCTP_AUDITING_ENABLED 1811 sctp_auditing(4, inp, stcb, net); 1812 #endif 1813 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1814 break; 1815 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1816 if ((stcb == NULL) || (inp == NULL)) { 1817 break; 1818 } 1819 SCTP_STAT_INCR(sctps_timoshutdownguard); 1820 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1821 "Shutdown guard timer expired"); 1822 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 1823 /* no need to unlock on tcb its gone */ 1824 goto out_decr; 1825 1826 case SCTP_TIMER_TYPE_STRRESET: 1827 if ((stcb == NULL) || (inp == NULL)) { 1828 break; 1829 } 1830 if (sctp_strreset_timer(inp, stcb, net)) { 1831 /* no need to unlock on tcb its gone */ 1832 goto out_decr; 1833 } 1834 SCTP_STAT_INCR(sctps_timostrmrst); 1835 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 1836 break; 1837 case SCTP_TIMER_TYPE_ASCONF: 1838 if ((stcb == NULL) || (inp == NULL)) { 1839 break; 1840 } 1841 if (sctp_asconf_timer(inp, stcb, net)) { 1842 /* no need to unlock on tcb its gone */ 1843 goto out_decr; 1844 } 1845 SCTP_STAT_INCR(sctps_timoasconf); 1846 #ifdef SCTP_AUDITING_ENABLED 1847 sctp_auditing(4, inp, stcb, net); 1848 #endif 1849 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1850 break; 1851 case SCTP_TIMER_TYPE_PRIM_DELETED: 1852 if ((stcb == NULL) || (inp == NULL)) { 1853 break; 1854 } 1855 sctp_delete_prim_timer(inp, stcb, net); 1856 SCTP_STAT_INCR(sctps_timodelprim); 1857 break; 1858 1859 case SCTP_TIMER_TYPE_AUTOCLOSE: 1860 if ((stcb == NULL) || (inp == NULL)) { 1861 break; 1862 } 1863 SCTP_STAT_INCR(sctps_timoautoclose); 1864 sctp_autoclose_timer(inp, stcb, net); 1865 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1866 did_output = 0; 1867 break; 1868 case SCTP_TIMER_TYPE_ASOCKILL: 1869 if ((stcb == NULL) || (inp == NULL)) { 1870 break; 1871 } 1872 SCTP_STAT_INCR(sctps_timoassockill); 1873 /* Can we free it yet? */ 1874 SCTP_INP_DECR_REF(inp); 1875 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 1876 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1877 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1878 so = SCTP_INP_SO(inp); 1879 atomic_add_int(&stcb->asoc.refcnt, 1); 1880 SCTP_TCB_UNLOCK(stcb); 1881 SCTP_SOCKET_LOCK(so, 1); 1882 SCTP_TCB_LOCK(stcb); 1883 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1884 #endif 1885 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 1886 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1887 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1888 SCTP_SOCKET_UNLOCK(so, 1); 1889 #endif 1890 /* 1891 * free asoc, always unlocks (or destroy's) so prevent 1892 * duplicate unlock or unlock of a free mtx :-0 1893 */ 1894 stcb = NULL; 1895 goto out_no_decr; 1896 case SCTP_TIMER_TYPE_INPKILL: 1897 SCTP_STAT_INCR(sctps_timoinpkill); 1898 if (inp == NULL) { 1899 break; 1900 } 1901 /* 1902 * special case, take away our increment since WE are the 1903 * killer 1904 */ 1905 SCTP_INP_DECR_REF(inp); 1906 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 1907 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1908 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 1909 SCTP_CALLED_FROM_INPKILL_TIMER); 1910 inp = NULL; 1911 goto out_no_decr; 1912 default: 1913 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n", 1914 type); 1915 break; 1916 } 1917 #ifdef SCTP_AUDITING_ENABLED 1918 sctp_audit_log(0xF1, (uint8_t) type); 1919 if (inp) 1920 sctp_auditing(5, inp, stcb, net); 1921 #endif 1922 if ((did_output) && stcb) { 1923 /* 1924 * Now we need to clean up the control chunk chain if an 1925 * ECNE is on it. It must be marked as UNSENT again so next 1926 * call will continue to send it until such time that we get 1927 * a CWR, to remove it. It is, however, less likely that we 1928 * will find a ecn echo on the chain though. 1929 */ 1930 sctp_fix_ecn_echo(&stcb->asoc); 1931 } 1932 get_out: 1933 if (stcb) { 1934 SCTP_TCB_UNLOCK(stcb); 1935 } 1936 out_decr: 1937 if (inp) { 1938 SCTP_INP_DECR_REF(inp); 1939 } 1940 out_no_decr: 1941 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type); 1942 CURVNET_RESTORE(); 1943 } 1944 1945 void 1946 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1947 struct sctp_nets *net) 1948 { 1949 uint32_t to_ticks; 1950 struct sctp_timer *tmr; 1951 1952 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1953 return; 1954 1955 tmr = NULL; 1956 if (stcb) { 1957 SCTP_TCB_LOCK_ASSERT(stcb); 1958 } 1959 switch (t_type) { 1960 case SCTP_TIMER_TYPE_ZERO_COPY: 1961 tmr = &inp->sctp_ep.zero_copy_timer; 1962 to_ticks = SCTP_ZERO_COPY_TICK_DELAY; 1963 break; 1964 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1965 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 1966 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY; 1967 break; 1968 case SCTP_TIMER_TYPE_ADDR_WQ: 1969 /* Only 1 tick away :-) */ 1970 tmr = &SCTP_BASE_INFO(addr_wq_timer); 1971 to_ticks = SCTP_ADDRESS_TICK_DELAY; 1972 break; 1973 case SCTP_TIMER_TYPE_SEND: 1974 /* Here we use the RTO timer */ 1975 { 1976 int rto_val; 1977 1978 if ((stcb == NULL) || (net == NULL)) { 1979 return; 1980 } 1981 tmr = &net->rxt_timer; 1982 if (net->RTO == 0) { 1983 rto_val = stcb->asoc.initial_rto; 1984 } else { 1985 rto_val = net->RTO; 1986 } 1987 to_ticks = MSEC_TO_TICKS(rto_val); 1988 } 1989 break; 1990 case SCTP_TIMER_TYPE_INIT: 1991 /* 1992 * Here we use the INIT timer default usually about 1 1993 * minute. 1994 */ 1995 if ((stcb == NULL) || (net == NULL)) { 1996 return; 1997 } 1998 tmr = &net->rxt_timer; 1999 if (net->RTO == 0) { 2000 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2001 } else { 2002 to_ticks = MSEC_TO_TICKS(net->RTO); 2003 } 2004 break; 2005 case SCTP_TIMER_TYPE_RECV: 2006 /* 2007 * Here we use the Delayed-Ack timer value from the inp 2008 * ususually about 200ms. 2009 */ 2010 if (stcb == NULL) { 2011 return; 2012 } 2013 tmr = &stcb->asoc.dack_timer; 2014 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 2015 break; 2016 case SCTP_TIMER_TYPE_SHUTDOWN: 2017 /* Here we use the RTO of the destination. */ 2018 if ((stcb == NULL) || (net == NULL)) { 2019 return; 2020 } 2021 if (net->RTO == 0) { 2022 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2023 } else { 2024 to_ticks = MSEC_TO_TICKS(net->RTO); 2025 } 2026 tmr = &net->rxt_timer; 2027 break; 2028 case SCTP_TIMER_TYPE_HEARTBEAT: 2029 /* 2030 * the net is used here so that we can add in the RTO. Even 2031 * though we use a different timer. We also add the HB timer 2032 * PLUS a random jitter. 2033 */ 2034 if ((stcb == NULL) || (net == NULL)) { 2035 return; 2036 } else { 2037 uint32_t rndval; 2038 uint32_t jitter; 2039 2040 if ((net->dest_state & SCTP_ADDR_NOHB) && 2041 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2042 return; 2043 } 2044 if (net->RTO == 0) { 2045 to_ticks = stcb->asoc.initial_rto; 2046 } else { 2047 to_ticks = net->RTO; 2048 } 2049 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2050 jitter = rndval % to_ticks; 2051 if (jitter >= (to_ticks >> 1)) { 2052 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2053 } else { 2054 to_ticks = to_ticks - jitter; 2055 } 2056 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2057 !(net->dest_state & SCTP_ADDR_PF)) { 2058 to_ticks += net->heart_beat_delay; 2059 } 2060 /* 2061 * Now we must convert the to_ticks that are now in 2062 * ms to ticks. 2063 */ 2064 to_ticks = MSEC_TO_TICKS(to_ticks); 2065 tmr = &net->hb_timer; 2066 } 2067 break; 2068 case SCTP_TIMER_TYPE_COOKIE: 2069 /* 2070 * Here we can use the RTO timer from the network since one 2071 * RTT was compelete. If a retran happened then we will be 2072 * using the RTO initial value. 2073 */ 2074 if ((stcb == NULL) || (net == NULL)) { 2075 return; 2076 } 2077 if (net->RTO == 0) { 2078 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2079 } else { 2080 to_ticks = MSEC_TO_TICKS(net->RTO); 2081 } 2082 tmr = &net->rxt_timer; 2083 break; 2084 case SCTP_TIMER_TYPE_NEWCOOKIE: 2085 /* 2086 * nothing needed but the endpoint here ususually about 60 2087 * minutes. 2088 */ 2089 tmr = &inp->sctp_ep.signature_change; 2090 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2091 break; 2092 case SCTP_TIMER_TYPE_ASOCKILL: 2093 if (stcb == NULL) { 2094 return; 2095 } 2096 tmr = &stcb->asoc.strreset_timer; 2097 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 2098 break; 2099 case SCTP_TIMER_TYPE_INPKILL: 2100 /* 2101 * The inp is setup to die. We re-use the signature_chage 2102 * timer since that has stopped and we are in the GONE 2103 * state. 2104 */ 2105 tmr = &inp->sctp_ep.signature_change; 2106 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 2107 break; 2108 case SCTP_TIMER_TYPE_PATHMTURAISE: 2109 /* 2110 * Here we use the value found in the EP for PMTU ususually 2111 * about 10 minutes. 2112 */ 2113 if ((stcb == NULL) || (net == NULL)) { 2114 return; 2115 } 2116 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2117 return; 2118 } 2119 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2120 tmr = &net->pmtu_timer; 2121 break; 2122 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2123 /* Here we use the RTO of the destination */ 2124 if ((stcb == NULL) || (net == NULL)) { 2125 return; 2126 } 2127 if (net->RTO == 0) { 2128 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2129 } else { 2130 to_ticks = MSEC_TO_TICKS(net->RTO); 2131 } 2132 tmr = &net->rxt_timer; 2133 break; 2134 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2135 /* 2136 * Here we use the endpoints shutdown guard timer usually 2137 * about 3 minutes. 2138 */ 2139 if (stcb == NULL) { 2140 return; 2141 } 2142 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2143 to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto); 2144 } else { 2145 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2146 } 2147 tmr = &stcb->asoc.shut_guard_timer; 2148 break; 2149 case SCTP_TIMER_TYPE_STRRESET: 2150 /* 2151 * Here the timer comes from the stcb but its value is from 2152 * the net's RTO. 2153 */ 2154 if ((stcb == NULL) || (net == NULL)) { 2155 return; 2156 } 2157 if (net->RTO == 0) { 2158 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2159 } else { 2160 to_ticks = MSEC_TO_TICKS(net->RTO); 2161 } 2162 tmr = &stcb->asoc.strreset_timer; 2163 break; 2164 case SCTP_TIMER_TYPE_ASCONF: 2165 /* 2166 * Here the timer comes from the stcb but its value is from 2167 * the net's RTO. 2168 */ 2169 if ((stcb == NULL) || (net == NULL)) { 2170 return; 2171 } 2172 if (net->RTO == 0) { 2173 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2174 } else { 2175 to_ticks = MSEC_TO_TICKS(net->RTO); 2176 } 2177 tmr = &stcb->asoc.asconf_timer; 2178 break; 2179 case SCTP_TIMER_TYPE_PRIM_DELETED: 2180 if ((stcb == NULL) || (net != NULL)) { 2181 return; 2182 } 2183 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2184 tmr = &stcb->asoc.delete_prim_timer; 2185 break; 2186 case SCTP_TIMER_TYPE_AUTOCLOSE: 2187 if (stcb == NULL) { 2188 return; 2189 } 2190 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2191 /* 2192 * Really an error since stcb is NOT set to 2193 * autoclose 2194 */ 2195 return; 2196 } 2197 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2198 tmr = &stcb->asoc.autoclose_timer; 2199 break; 2200 default: 2201 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2202 __func__, t_type); 2203 return; 2204 break; 2205 } 2206 if ((to_ticks <= 0) || (tmr == NULL)) { 2207 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n", 2208 __func__, t_type, to_ticks, (void *)tmr); 2209 return; 2210 } 2211 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2212 /* 2213 * we do NOT allow you to have it already running. if it is 2214 * we leave the current one up unchanged 2215 */ 2216 return; 2217 } 2218 /* At this point we can proceed */ 2219 if (t_type == SCTP_TIMER_TYPE_SEND) { 2220 stcb->asoc.num_send_timers_up++; 2221 } 2222 tmr->stopped_from = 0; 2223 tmr->type = t_type; 2224 tmr->ep = (void *)inp; 2225 tmr->tcb = (void *)stcb; 2226 tmr->net = (void *)net; 2227 tmr->self = (void *)tmr; 2228 tmr->vnet = (void *)curvnet; 2229 tmr->ticks = sctp_get_tick_count(); 2230 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2231 return; 2232 } 2233 2234 void 2235 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2236 struct sctp_nets *net, uint32_t from) 2237 { 2238 struct sctp_timer *tmr; 2239 2240 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2241 (inp == NULL)) 2242 return; 2243 2244 tmr = NULL; 2245 if (stcb) { 2246 SCTP_TCB_LOCK_ASSERT(stcb); 2247 } 2248 switch (t_type) { 2249 case SCTP_TIMER_TYPE_ZERO_COPY: 2250 tmr = &inp->sctp_ep.zero_copy_timer; 2251 break; 2252 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 2253 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 2254 break; 2255 case SCTP_TIMER_TYPE_ADDR_WQ: 2256 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2257 break; 2258 case SCTP_TIMER_TYPE_SEND: 2259 if ((stcb == NULL) || (net == NULL)) { 2260 return; 2261 } 2262 tmr = &net->rxt_timer; 2263 break; 2264 case SCTP_TIMER_TYPE_INIT: 2265 if ((stcb == NULL) || (net == NULL)) { 2266 return; 2267 } 2268 tmr = &net->rxt_timer; 2269 break; 2270 case SCTP_TIMER_TYPE_RECV: 2271 if (stcb == NULL) { 2272 return; 2273 } 2274 tmr = &stcb->asoc.dack_timer; 2275 break; 2276 case SCTP_TIMER_TYPE_SHUTDOWN: 2277 if ((stcb == NULL) || (net == NULL)) { 2278 return; 2279 } 2280 tmr = &net->rxt_timer; 2281 break; 2282 case SCTP_TIMER_TYPE_HEARTBEAT: 2283 if ((stcb == NULL) || (net == NULL)) { 2284 return; 2285 } 2286 tmr = &net->hb_timer; 2287 break; 2288 case SCTP_TIMER_TYPE_COOKIE: 2289 if ((stcb == NULL) || (net == NULL)) { 2290 return; 2291 } 2292 tmr = &net->rxt_timer; 2293 break; 2294 case SCTP_TIMER_TYPE_NEWCOOKIE: 2295 /* nothing needed but the endpoint here */ 2296 tmr = &inp->sctp_ep.signature_change; 2297 /* 2298 * We re-use the newcookie timer for the INP kill timer. We 2299 * must assure that we do not kill it by accident. 2300 */ 2301 break; 2302 case SCTP_TIMER_TYPE_ASOCKILL: 2303 /* 2304 * Stop the asoc kill timer. 2305 */ 2306 if (stcb == NULL) { 2307 return; 2308 } 2309 tmr = &stcb->asoc.strreset_timer; 2310 break; 2311 2312 case SCTP_TIMER_TYPE_INPKILL: 2313 /* 2314 * The inp is setup to die. We re-use the signature_chage 2315 * timer since that has stopped and we are in the GONE 2316 * state. 2317 */ 2318 tmr = &inp->sctp_ep.signature_change; 2319 break; 2320 case SCTP_TIMER_TYPE_PATHMTURAISE: 2321 if ((stcb == NULL) || (net == NULL)) { 2322 return; 2323 } 2324 tmr = &net->pmtu_timer; 2325 break; 2326 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2327 if ((stcb == NULL) || (net == NULL)) { 2328 return; 2329 } 2330 tmr = &net->rxt_timer; 2331 break; 2332 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2333 if (stcb == NULL) { 2334 return; 2335 } 2336 tmr = &stcb->asoc.shut_guard_timer; 2337 break; 2338 case SCTP_TIMER_TYPE_STRRESET: 2339 if (stcb == NULL) { 2340 return; 2341 } 2342 tmr = &stcb->asoc.strreset_timer; 2343 break; 2344 case SCTP_TIMER_TYPE_ASCONF: 2345 if (stcb == NULL) { 2346 return; 2347 } 2348 tmr = &stcb->asoc.asconf_timer; 2349 break; 2350 case SCTP_TIMER_TYPE_PRIM_DELETED: 2351 if (stcb == NULL) { 2352 return; 2353 } 2354 tmr = &stcb->asoc.delete_prim_timer; 2355 break; 2356 case SCTP_TIMER_TYPE_AUTOCLOSE: 2357 if (stcb == NULL) { 2358 return; 2359 } 2360 tmr = &stcb->asoc.autoclose_timer; 2361 break; 2362 default: 2363 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2364 __func__, t_type); 2365 break; 2366 } 2367 if (tmr == NULL) { 2368 return; 2369 } 2370 if ((tmr->type != t_type) && tmr->type) { 2371 /* 2372 * Ok we have a timer that is under joint use. Cookie timer 2373 * per chance with the SEND timer. We therefore are NOT 2374 * running the timer that the caller wants stopped. So just 2375 * return. 2376 */ 2377 return; 2378 } 2379 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2380 stcb->asoc.num_send_timers_up--; 2381 if (stcb->asoc.num_send_timers_up < 0) { 2382 stcb->asoc.num_send_timers_up = 0; 2383 } 2384 } 2385 tmr->self = NULL; 2386 tmr->stopped_from = from; 2387 (void)SCTP_OS_TIMER_STOP(&tmr->timer); 2388 return; 2389 } 2390 2391 uint32_t 2392 sctp_calculate_len(struct mbuf *m) 2393 { 2394 uint32_t tlen = 0; 2395 struct mbuf *at; 2396 2397 at = m; 2398 while (at) { 2399 tlen += SCTP_BUF_LEN(at); 2400 at = SCTP_BUF_NEXT(at); 2401 } 2402 return (tlen); 2403 } 2404 2405 void 2406 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2407 struct sctp_association *asoc, uint32_t mtu) 2408 { 2409 /* 2410 * Reset the P-MTU size on this association, this involves changing 2411 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2412 * allow the DF flag to be cleared. 2413 */ 2414 struct sctp_tmit_chunk *chk; 2415 unsigned int eff_mtu, ovh; 2416 2417 asoc->smallest_mtu = mtu; 2418 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2419 ovh = SCTP_MIN_OVERHEAD; 2420 } else { 2421 ovh = SCTP_MIN_V4_OVERHEAD; 2422 } 2423 eff_mtu = mtu - ovh; 2424 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2425 if (chk->send_size > eff_mtu) { 2426 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2427 } 2428 } 2429 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2430 if (chk->send_size > eff_mtu) { 2431 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2432 } 2433 } 2434 } 2435 2436 2437 /* 2438 * given an association and starting time of the current RTT period return 2439 * RTO in number of msecs net should point to the current network 2440 */ 2441 2442 uint32_t 2443 sctp_calculate_rto(struct sctp_tcb *stcb, 2444 struct sctp_association *asoc, 2445 struct sctp_nets *net, 2446 struct timeval *told, 2447 int safe, int rtt_from_sack) 2448 { 2449 /*- 2450 * given an association and the starting time of the current RTT 2451 * period (in value1/value2) return RTO in number of msecs. 2452 */ 2453 int32_t rtt; /* RTT in ms */ 2454 uint32_t new_rto; 2455 int first_measure = 0; 2456 struct timeval now, then, *old; 2457 2458 /* Copy it out for sparc64 */ 2459 if (safe == sctp_align_unsafe_makecopy) { 2460 old = &then; 2461 memcpy(&then, told, sizeof(struct timeval)); 2462 } else if (safe == sctp_align_safe_nocopy) { 2463 old = told; 2464 } else { 2465 /* error */ 2466 SCTP_PRINTF("Huh, bad rto calc call\n"); 2467 return (0); 2468 } 2469 /************************/ 2470 /* 1. calculate new RTT */ 2471 /************************/ 2472 /* get the current time */ 2473 if (stcb->asoc.use_precise_time) { 2474 (void)SCTP_GETPTIME_TIMEVAL(&now); 2475 } else { 2476 (void)SCTP_GETTIME_TIMEVAL(&now); 2477 } 2478 timevalsub(&now, old); 2479 /* store the current RTT in us */ 2480 net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec + 2481 (uint64_t) now.tv_usec; 2482 2483 /* compute rtt in ms */ 2484 rtt = (int32_t) (net->rtt / 1000); 2485 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2486 /* Tell the CC module that a new update has just occurred 2487 * from a sack */ 2488 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2489 } 2490 /* 2491 * Do we need to determine the lan? We do this only on sacks i.e. 2492 * RTT being determined from data not non-data (HB/INIT->INITACK). 2493 */ 2494 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2495 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2496 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2497 net->lan_type = SCTP_LAN_INTERNET; 2498 } else { 2499 net->lan_type = SCTP_LAN_LOCAL; 2500 } 2501 } 2502 /***************************/ 2503 /* 2. update RTTVAR & SRTT */ 2504 /***************************/ 2505 /*- 2506 * Compute the scaled average lastsa and the 2507 * scaled variance lastsv as described in van Jacobson 2508 * Paper "Congestion Avoidance and Control", Annex A. 2509 * 2510 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2511 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar 2512 */ 2513 if (net->RTO_measured) { 2514 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2515 net->lastsa += rtt; 2516 if (rtt < 0) { 2517 rtt = -rtt; 2518 } 2519 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2520 net->lastsv += rtt; 2521 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2522 rto_logging(net, SCTP_LOG_RTTVAR); 2523 } 2524 } else { 2525 /* First RTO measurment */ 2526 net->RTO_measured = 1; 2527 first_measure = 1; 2528 net->lastsa = rtt << SCTP_RTT_SHIFT; 2529 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2530 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2531 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2532 } 2533 } 2534 if (net->lastsv == 0) { 2535 net->lastsv = SCTP_CLOCK_GRANULARITY; 2536 } 2537 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2538 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2539 (stcb->asoc.sat_network_lockout == 0)) { 2540 stcb->asoc.sat_network = 1; 2541 } else if ((!first_measure) && stcb->asoc.sat_network) { 2542 stcb->asoc.sat_network = 0; 2543 stcb->asoc.sat_network_lockout = 1; 2544 } 2545 /* bound it, per C6/C7 in Section 5.3.1 */ 2546 if (new_rto < stcb->asoc.minrto) { 2547 new_rto = stcb->asoc.minrto; 2548 } 2549 if (new_rto > stcb->asoc.maxrto) { 2550 new_rto = stcb->asoc.maxrto; 2551 } 2552 /* we are now returning the RTO */ 2553 return (new_rto); 2554 } 2555 2556 /* 2557 * return a pointer to a contiguous piece of data from the given mbuf chain 2558 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2559 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2560 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2561 */ 2562 caddr_t 2563 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2564 { 2565 uint32_t count; 2566 uint8_t *ptr; 2567 2568 ptr = in_ptr; 2569 if ((off < 0) || (len <= 0)) 2570 return (NULL); 2571 2572 /* find the desired start location */ 2573 while ((m != NULL) && (off > 0)) { 2574 if (off < SCTP_BUF_LEN(m)) 2575 break; 2576 off -= SCTP_BUF_LEN(m); 2577 m = SCTP_BUF_NEXT(m); 2578 } 2579 if (m == NULL) 2580 return (NULL); 2581 2582 /* is the current mbuf large enough (eg. contiguous)? */ 2583 if ((SCTP_BUF_LEN(m) - off) >= len) { 2584 return (mtod(m, caddr_t)+off); 2585 } else { 2586 /* else, it spans more than one mbuf, so save a temp copy... */ 2587 while ((m != NULL) && (len > 0)) { 2588 count = min(SCTP_BUF_LEN(m) - off, len); 2589 bcopy(mtod(m, caddr_t)+off, ptr, count); 2590 len -= count; 2591 ptr += count; 2592 off = 0; 2593 m = SCTP_BUF_NEXT(m); 2594 } 2595 if ((m == NULL) && (len > 0)) 2596 return (NULL); 2597 else 2598 return ((caddr_t)in_ptr); 2599 } 2600 } 2601 2602 2603 2604 struct sctp_paramhdr * 2605 sctp_get_next_param(struct mbuf *m, 2606 int offset, 2607 struct sctp_paramhdr *pull, 2608 int pull_limit) 2609 { 2610 /* This just provides a typed signature to Peter's Pull routine */ 2611 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2612 (uint8_t *) pull)); 2613 } 2614 2615 2616 struct mbuf * 2617 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2618 { 2619 struct mbuf *m_last; 2620 caddr_t dp; 2621 2622 if (padlen > 3) { 2623 return (NULL); 2624 } 2625 if (padlen <= M_TRAILINGSPACE(m)) { 2626 /* 2627 * The easy way. We hope the majority of the time we hit 2628 * here :) 2629 */ 2630 m_last = m; 2631 } else { 2632 /* Hard way we must grow the mbuf chain */ 2633 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 2634 if (m_last == NULL) { 2635 return (NULL); 2636 } 2637 SCTP_BUF_LEN(m_last) = 0; 2638 SCTP_BUF_NEXT(m_last) = NULL; 2639 SCTP_BUF_NEXT(m) = m_last; 2640 } 2641 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 2642 SCTP_BUF_LEN(m_last) += padlen; 2643 memset(dp, 0, padlen); 2644 return (m_last); 2645 } 2646 2647 struct mbuf * 2648 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2649 { 2650 /* find the last mbuf in chain and pad it */ 2651 struct mbuf *m_at; 2652 2653 if (last_mbuf != NULL) { 2654 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2655 } else { 2656 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2657 if (SCTP_BUF_NEXT(m_at) == NULL) { 2658 return (sctp_add_pad_tombuf(m_at, padval)); 2659 } 2660 } 2661 } 2662 return (NULL); 2663 } 2664 2665 static void 2666 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 2667 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked 2668 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2669 SCTP_UNUSED 2670 #endif 2671 ) 2672 { 2673 struct mbuf *m_notify; 2674 struct sctp_assoc_change *sac; 2675 struct sctp_queued_to_read *control; 2676 unsigned int notif_len; 2677 uint16_t abort_len; 2678 unsigned int i; 2679 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2680 struct socket *so; 2681 #endif 2682 2683 if (stcb == NULL) { 2684 return; 2685 } 2686 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2687 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 2688 if (abort != NULL) { 2689 abort_len = ntohs(abort->ch.chunk_length); 2690 } else { 2691 abort_len = 0; 2692 } 2693 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2694 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 2695 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2696 notif_len += abort_len; 2697 } 2698 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2699 if (m_notify == NULL) { 2700 /* Retry with smaller value. */ 2701 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 2702 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2703 if (m_notify == NULL) { 2704 goto set_error; 2705 } 2706 } 2707 SCTP_BUF_NEXT(m_notify) = NULL; 2708 sac = mtod(m_notify, struct sctp_assoc_change *); 2709 memset(sac, 0, notif_len); 2710 sac->sac_type = SCTP_ASSOC_CHANGE; 2711 sac->sac_flags = 0; 2712 sac->sac_length = sizeof(struct sctp_assoc_change); 2713 sac->sac_state = state; 2714 sac->sac_error = error; 2715 /* XXX verify these stream counts */ 2716 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2717 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2718 sac->sac_assoc_id = sctp_get_associd(stcb); 2719 if (notif_len > sizeof(struct sctp_assoc_change)) { 2720 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2721 i = 0; 2722 if (stcb->asoc.prsctp_supported == 1) { 2723 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 2724 } 2725 if (stcb->asoc.auth_supported == 1) { 2726 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 2727 } 2728 if (stcb->asoc.asconf_supported == 1) { 2729 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 2730 } 2731 if (stcb->asoc.idata_supported == 1) { 2732 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 2733 } 2734 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 2735 if (stcb->asoc.reconfig_supported == 1) { 2736 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 2737 } 2738 sac->sac_length += i; 2739 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2740 memcpy(sac->sac_info, abort, abort_len); 2741 sac->sac_length += abort_len; 2742 } 2743 } 2744 SCTP_BUF_LEN(m_notify) = sac->sac_length; 2745 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2746 0, 0, stcb->asoc.context, 0, 0, 0, 2747 m_notify); 2748 if (control != NULL) { 2749 control->length = SCTP_BUF_LEN(m_notify); 2750 /* not that we need this */ 2751 control->tail_mbuf = m_notify; 2752 control->spec_flags = M_NOTIFICATION; 2753 sctp_add_to_readq(stcb->sctp_ep, stcb, 2754 control, 2755 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 2756 so_locked); 2757 } else { 2758 sctp_m_freem(m_notify); 2759 } 2760 } 2761 /* 2762 * For 1-to-1 style sockets, we send up and error when an ABORT 2763 * comes in. 2764 */ 2765 set_error: 2766 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2767 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2768 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2769 SOCK_LOCK(stcb->sctp_socket); 2770 if (from_peer) { 2771 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 2772 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 2773 stcb->sctp_socket->so_error = ECONNREFUSED; 2774 } else { 2775 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 2776 stcb->sctp_socket->so_error = ECONNRESET; 2777 } 2778 } else { 2779 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) || 2780 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 2781 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 2782 stcb->sctp_socket->so_error = ETIMEDOUT; 2783 } else { 2784 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 2785 stcb->sctp_socket->so_error = ECONNABORTED; 2786 } 2787 } 2788 } 2789 /* Wake ANY sleepers */ 2790 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2791 so = SCTP_INP_SO(stcb->sctp_ep); 2792 if (!so_locked) { 2793 atomic_add_int(&stcb->asoc.refcnt, 1); 2794 SCTP_TCB_UNLOCK(stcb); 2795 SCTP_SOCKET_LOCK(so, 1); 2796 SCTP_TCB_LOCK(stcb); 2797 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2798 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2799 SCTP_SOCKET_UNLOCK(so, 1); 2800 return; 2801 } 2802 } 2803 #endif 2804 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2805 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2806 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2807 socantrcvmore_locked(stcb->sctp_socket); 2808 } 2809 sorwakeup(stcb->sctp_socket); 2810 sowwakeup(stcb->sctp_socket); 2811 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2812 if (!so_locked) { 2813 SCTP_SOCKET_UNLOCK(so, 1); 2814 } 2815 #endif 2816 } 2817 2818 static void 2819 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2820 struct sockaddr *sa, uint32_t error, int so_locked 2821 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2822 SCTP_UNUSED 2823 #endif 2824 ) 2825 { 2826 struct mbuf *m_notify; 2827 struct sctp_paddr_change *spc; 2828 struct sctp_queued_to_read *control; 2829 2830 if ((stcb == NULL) || 2831 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 2832 /* event not enabled */ 2833 return; 2834 } 2835 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 2836 if (m_notify == NULL) 2837 return; 2838 SCTP_BUF_LEN(m_notify) = 0; 2839 spc = mtod(m_notify, struct sctp_paddr_change *); 2840 memset(spc, 0, sizeof(struct sctp_paddr_change)); 2841 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2842 spc->spc_flags = 0; 2843 spc->spc_length = sizeof(struct sctp_paddr_change); 2844 switch (sa->sa_family) { 2845 #ifdef INET 2846 case AF_INET: 2847 #ifdef INET6 2848 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 2849 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 2850 (struct sockaddr_in6 *)&spc->spc_aaddr); 2851 } else { 2852 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2853 } 2854 #else 2855 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2856 #endif 2857 break; 2858 #endif 2859 #ifdef INET6 2860 case AF_INET6: 2861 { 2862 struct sockaddr_in6 *sin6; 2863 2864 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2865 2866 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2867 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2868 if (sin6->sin6_scope_id == 0) { 2869 /* recover scope_id for user */ 2870 (void)sa6_recoverscope(sin6); 2871 } else { 2872 /* clear embedded scope_id for user */ 2873 in6_clearscope(&sin6->sin6_addr); 2874 } 2875 } 2876 break; 2877 } 2878 #endif 2879 default: 2880 /* TSNH */ 2881 break; 2882 } 2883 spc->spc_state = state; 2884 spc->spc_error = error; 2885 spc->spc_assoc_id = sctp_get_associd(stcb); 2886 2887 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2888 SCTP_BUF_NEXT(m_notify) = NULL; 2889 2890 /* append to socket */ 2891 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2892 0, 0, stcb->asoc.context, 0, 0, 0, 2893 m_notify); 2894 if (control == NULL) { 2895 /* no memory */ 2896 sctp_m_freem(m_notify); 2897 return; 2898 } 2899 control->length = SCTP_BUF_LEN(m_notify); 2900 control->spec_flags = M_NOTIFICATION; 2901 /* not that we need this */ 2902 control->tail_mbuf = m_notify; 2903 sctp_add_to_readq(stcb->sctp_ep, stcb, 2904 control, 2905 &stcb->sctp_socket->so_rcv, 1, 2906 SCTP_READ_LOCK_NOT_HELD, 2907 so_locked); 2908 } 2909 2910 2911 static void 2912 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 2913 struct sctp_tmit_chunk *chk, int so_locked 2914 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2915 SCTP_UNUSED 2916 #endif 2917 ) 2918 { 2919 struct mbuf *m_notify; 2920 struct sctp_send_failed *ssf; 2921 struct sctp_send_failed_event *ssfe; 2922 struct sctp_queued_to_read *control; 2923 struct sctp_chunkhdr *chkhdr; 2924 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 2925 2926 if ((stcb == NULL) || 2927 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2928 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2929 /* event not enabled */ 2930 return; 2931 } 2932 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2933 notifhdr_len = sizeof(struct sctp_send_failed_event); 2934 } else { 2935 notifhdr_len = sizeof(struct sctp_send_failed); 2936 } 2937 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 2938 if (m_notify == NULL) 2939 /* no space left */ 2940 return; 2941 SCTP_BUF_LEN(m_notify) = notifhdr_len; 2942 if (stcb->asoc.idata_supported) { 2943 chkhdr_len = sizeof(struct sctp_idata_chunk); 2944 } else { 2945 chkhdr_len = sizeof(struct sctp_data_chunk); 2946 } 2947 /* Use some defaults in case we can't access the chunk header */ 2948 if (chk->send_size >= chkhdr_len) { 2949 payload_len = chk->send_size - chkhdr_len; 2950 } else { 2951 payload_len = 0; 2952 } 2953 padding_len = 0; 2954 if (chk->data != NULL) { 2955 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 2956 if (chkhdr != NULL) { 2957 chk_len = ntohs(chkhdr->chunk_length); 2958 if ((chk_len >= chkhdr_len) && 2959 (chk->send_size >= chk_len) && 2960 (chk->send_size - chk_len < 4)) { 2961 padding_len = chk->send_size - chk_len; 2962 payload_len = chk->send_size - chkhdr_len - padding_len; 2963 } 2964 } 2965 } 2966 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2967 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2968 memset(ssfe, 0, notifhdr_len); 2969 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2970 if (sent) { 2971 ssfe->ssfe_flags = SCTP_DATA_SENT; 2972 } else { 2973 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2974 } 2975 ssfe->ssfe_length = (uint32_t) (notifhdr_len + payload_len); 2976 ssfe->ssfe_error = error; 2977 /* not exactly what the user sent in, but should be close :) */ 2978 ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number; 2979 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 2980 ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype; 2981 ssfe->ssfe_info.snd_context = chk->rec.data.context; 2982 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2983 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2984 } else { 2985 ssf = mtod(m_notify, struct sctp_send_failed *); 2986 memset(ssf, 0, notifhdr_len); 2987 ssf->ssf_type = SCTP_SEND_FAILED; 2988 if (sent) { 2989 ssf->ssf_flags = SCTP_DATA_SENT; 2990 } else { 2991 ssf->ssf_flags = SCTP_DATA_UNSENT; 2992 } 2993 ssf->ssf_length = (uint32_t) (notifhdr_len + payload_len); 2994 ssf->ssf_error = error; 2995 /* not exactly what the user sent in, but should be close :) */ 2996 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2997 ssf->ssf_info.sinfo_ssn = (uint16_t) chk->rec.data.stream_seq; 2998 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2999 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 3000 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3001 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3002 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3003 } 3004 if (chk->data != NULL) { 3005 /* Trim off the sctp chunk header (it should be there) */ 3006 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3007 m_adj(chk->data, chkhdr_len); 3008 m_adj(chk->data, -padding_len); 3009 sctp_mbuf_crush(chk->data); 3010 chk->send_size -= (chkhdr_len + padding_len); 3011 } 3012 } 3013 SCTP_BUF_NEXT(m_notify) = chk->data; 3014 /* Steal off the mbuf */ 3015 chk->data = NULL; 3016 /* 3017 * For this case, we check the actual socket buffer, since the assoc 3018 * is going away we don't want to overfill the socket buffer for a 3019 * non-reader 3020 */ 3021 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3022 sctp_m_freem(m_notify); 3023 return; 3024 } 3025 /* append to socket */ 3026 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3027 0, 0, stcb->asoc.context, 0, 0, 0, 3028 m_notify); 3029 if (control == NULL) { 3030 /* no memory */ 3031 sctp_m_freem(m_notify); 3032 return; 3033 } 3034 control->spec_flags = M_NOTIFICATION; 3035 sctp_add_to_readq(stcb->sctp_ep, stcb, 3036 control, 3037 &stcb->sctp_socket->so_rcv, 1, 3038 SCTP_READ_LOCK_NOT_HELD, 3039 so_locked); 3040 } 3041 3042 3043 static void 3044 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3045 struct sctp_stream_queue_pending *sp, int so_locked 3046 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3047 SCTP_UNUSED 3048 #endif 3049 ) 3050 { 3051 struct mbuf *m_notify; 3052 struct sctp_send_failed *ssf; 3053 struct sctp_send_failed_event *ssfe; 3054 struct sctp_queued_to_read *control; 3055 int notifhdr_len; 3056 3057 if ((stcb == NULL) || 3058 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3059 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3060 /* event not enabled */ 3061 return; 3062 } 3063 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3064 notifhdr_len = sizeof(struct sctp_send_failed_event); 3065 } else { 3066 notifhdr_len = sizeof(struct sctp_send_failed); 3067 } 3068 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3069 if (m_notify == NULL) { 3070 /* no space left */ 3071 return; 3072 } 3073 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3074 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3075 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3076 memset(ssfe, 0, notifhdr_len); 3077 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3078 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3079 ssfe->ssfe_length = (uint32_t) (notifhdr_len + sp->length); 3080 ssfe->ssfe_error = error; 3081 /* not exactly what the user sent in, but should be close :) */ 3082 ssfe->ssfe_info.snd_sid = sp->stream; 3083 if (sp->some_taken) { 3084 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3085 } else { 3086 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3087 } 3088 ssfe->ssfe_info.snd_ppid = sp->ppid; 3089 ssfe->ssfe_info.snd_context = sp->context; 3090 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3091 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3092 } else { 3093 ssf = mtod(m_notify, struct sctp_send_failed *); 3094 memset(ssf, 0, notifhdr_len); 3095 ssf->ssf_type = SCTP_SEND_FAILED; 3096 ssf->ssf_flags = SCTP_DATA_UNSENT; 3097 ssf->ssf_length = (uint32_t) (notifhdr_len + sp->length); 3098 ssf->ssf_error = error; 3099 /* not exactly what the user sent in, but should be close :) */ 3100 ssf->ssf_info.sinfo_stream = sp->stream; 3101 ssf->ssf_info.sinfo_ssn = 0; 3102 if (sp->some_taken) { 3103 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3104 } else { 3105 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3106 } 3107 ssf->ssf_info.sinfo_ppid = sp->ppid; 3108 ssf->ssf_info.sinfo_context = sp->context; 3109 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3110 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3111 } 3112 SCTP_BUF_NEXT(m_notify) = sp->data; 3113 3114 /* Steal off the mbuf */ 3115 sp->data = NULL; 3116 /* 3117 * For this case, we check the actual socket buffer, since the assoc 3118 * is going away we don't want to overfill the socket buffer for a 3119 * non-reader 3120 */ 3121 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3122 sctp_m_freem(m_notify); 3123 return; 3124 } 3125 /* append to socket */ 3126 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3127 0, 0, stcb->asoc.context, 0, 0, 0, 3128 m_notify); 3129 if (control == NULL) { 3130 /* no memory */ 3131 sctp_m_freem(m_notify); 3132 return; 3133 } 3134 control->spec_flags = M_NOTIFICATION; 3135 sctp_add_to_readq(stcb->sctp_ep, stcb, 3136 control, 3137 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3138 } 3139 3140 3141 3142 static void 3143 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3144 { 3145 struct mbuf *m_notify; 3146 struct sctp_adaptation_event *sai; 3147 struct sctp_queued_to_read *control; 3148 3149 if ((stcb == NULL) || 3150 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3151 /* event not enabled */ 3152 return; 3153 } 3154 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3155 if (m_notify == NULL) 3156 /* no space left */ 3157 return; 3158 SCTP_BUF_LEN(m_notify) = 0; 3159 sai = mtod(m_notify, struct sctp_adaptation_event *); 3160 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3161 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3162 sai->sai_flags = 0; 3163 sai->sai_length = sizeof(struct sctp_adaptation_event); 3164 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3165 sai->sai_assoc_id = sctp_get_associd(stcb); 3166 3167 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3168 SCTP_BUF_NEXT(m_notify) = NULL; 3169 3170 /* append to socket */ 3171 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3172 0, 0, stcb->asoc.context, 0, 0, 0, 3173 m_notify); 3174 if (control == NULL) { 3175 /* no memory */ 3176 sctp_m_freem(m_notify); 3177 return; 3178 } 3179 control->length = SCTP_BUF_LEN(m_notify); 3180 control->spec_flags = M_NOTIFICATION; 3181 /* not that we need this */ 3182 control->tail_mbuf = m_notify; 3183 sctp_add_to_readq(stcb->sctp_ep, stcb, 3184 control, 3185 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3186 } 3187 3188 /* This always must be called with the read-queue LOCKED in the INP */ 3189 static void 3190 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3191 uint32_t val, int so_locked 3192 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3193 SCTP_UNUSED 3194 #endif 3195 ) 3196 { 3197 struct mbuf *m_notify; 3198 struct sctp_pdapi_event *pdapi; 3199 struct sctp_queued_to_read *control; 3200 struct sockbuf *sb; 3201 3202 if ((stcb == NULL) || 3203 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3204 /* event not enabled */ 3205 return; 3206 } 3207 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3208 return; 3209 } 3210 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3211 if (m_notify == NULL) 3212 /* no space left */ 3213 return; 3214 SCTP_BUF_LEN(m_notify) = 0; 3215 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3216 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3217 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3218 pdapi->pdapi_flags = 0; 3219 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3220 pdapi->pdapi_indication = error; 3221 pdapi->pdapi_stream = (val >> 16); 3222 pdapi->pdapi_seq = (val & 0x0000ffff); 3223 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3224 3225 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3226 SCTP_BUF_NEXT(m_notify) = NULL; 3227 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3228 0, 0, stcb->asoc.context, 0, 0, 0, 3229 m_notify); 3230 if (control == NULL) { 3231 /* no memory */ 3232 sctp_m_freem(m_notify); 3233 return; 3234 } 3235 control->spec_flags = M_NOTIFICATION; 3236 control->length = SCTP_BUF_LEN(m_notify); 3237 /* not that we need this */ 3238 control->tail_mbuf = m_notify; 3239 control->held_length = 0; 3240 control->length = 0; 3241 sb = &stcb->sctp_socket->so_rcv; 3242 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3243 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3244 } 3245 sctp_sballoc(stcb, sb, m_notify); 3246 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3247 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3248 } 3249 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 3250 control->end_added = 1; 3251 if (stcb->asoc.control_pdapi) 3252 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3253 else { 3254 /* we really should not see this case */ 3255 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3256 } 3257 if (stcb->sctp_ep && stcb->sctp_socket) { 3258 /* This should always be the case */ 3259 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3260 struct socket *so; 3261 3262 so = SCTP_INP_SO(stcb->sctp_ep); 3263 if (!so_locked) { 3264 atomic_add_int(&stcb->asoc.refcnt, 1); 3265 SCTP_TCB_UNLOCK(stcb); 3266 SCTP_SOCKET_LOCK(so, 1); 3267 SCTP_TCB_LOCK(stcb); 3268 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3269 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3270 SCTP_SOCKET_UNLOCK(so, 1); 3271 return; 3272 } 3273 } 3274 #endif 3275 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3276 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3277 if (!so_locked) { 3278 SCTP_SOCKET_UNLOCK(so, 1); 3279 } 3280 #endif 3281 } 3282 } 3283 3284 static void 3285 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3286 { 3287 struct mbuf *m_notify; 3288 struct sctp_shutdown_event *sse; 3289 struct sctp_queued_to_read *control; 3290 3291 /* 3292 * For TCP model AND UDP connected sockets we will send an error up 3293 * when an SHUTDOWN completes 3294 */ 3295 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3296 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3297 /* mark socket closed for read/write and wakeup! */ 3298 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3299 struct socket *so; 3300 3301 so = SCTP_INP_SO(stcb->sctp_ep); 3302 atomic_add_int(&stcb->asoc.refcnt, 1); 3303 SCTP_TCB_UNLOCK(stcb); 3304 SCTP_SOCKET_LOCK(so, 1); 3305 SCTP_TCB_LOCK(stcb); 3306 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3307 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3308 SCTP_SOCKET_UNLOCK(so, 1); 3309 return; 3310 } 3311 #endif 3312 socantsendmore(stcb->sctp_socket); 3313 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3314 SCTP_SOCKET_UNLOCK(so, 1); 3315 #endif 3316 } 3317 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3318 /* event not enabled */ 3319 return; 3320 } 3321 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3322 if (m_notify == NULL) 3323 /* no space left */ 3324 return; 3325 sse = mtod(m_notify, struct sctp_shutdown_event *); 3326 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3327 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3328 sse->sse_flags = 0; 3329 sse->sse_length = sizeof(struct sctp_shutdown_event); 3330 sse->sse_assoc_id = sctp_get_associd(stcb); 3331 3332 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3333 SCTP_BUF_NEXT(m_notify) = NULL; 3334 3335 /* append to socket */ 3336 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3337 0, 0, stcb->asoc.context, 0, 0, 0, 3338 m_notify); 3339 if (control == NULL) { 3340 /* no memory */ 3341 sctp_m_freem(m_notify); 3342 return; 3343 } 3344 control->spec_flags = M_NOTIFICATION; 3345 control->length = SCTP_BUF_LEN(m_notify); 3346 /* not that we need this */ 3347 control->tail_mbuf = m_notify; 3348 sctp_add_to_readq(stcb->sctp_ep, stcb, 3349 control, 3350 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3351 } 3352 3353 static void 3354 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3355 int so_locked 3356 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3357 SCTP_UNUSED 3358 #endif 3359 ) 3360 { 3361 struct mbuf *m_notify; 3362 struct sctp_sender_dry_event *event; 3363 struct sctp_queued_to_read *control; 3364 3365 if ((stcb == NULL) || 3366 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3367 /* event not enabled */ 3368 return; 3369 } 3370 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3371 if (m_notify == NULL) { 3372 /* no space left */ 3373 return; 3374 } 3375 SCTP_BUF_LEN(m_notify) = 0; 3376 event = mtod(m_notify, struct sctp_sender_dry_event *); 3377 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3378 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3379 event->sender_dry_flags = 0; 3380 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3381 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3382 3383 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3384 SCTP_BUF_NEXT(m_notify) = NULL; 3385 3386 /* append to socket */ 3387 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3388 0, 0, stcb->asoc.context, 0, 0, 0, 3389 m_notify); 3390 if (control == NULL) { 3391 /* no memory */ 3392 sctp_m_freem(m_notify); 3393 return; 3394 } 3395 control->length = SCTP_BUF_LEN(m_notify); 3396 control->spec_flags = M_NOTIFICATION; 3397 /* not that we need this */ 3398 control->tail_mbuf = m_notify; 3399 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3400 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3401 } 3402 3403 3404 void 3405 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3406 { 3407 struct mbuf *m_notify; 3408 struct sctp_queued_to_read *control; 3409 struct sctp_stream_change_event *stradd; 3410 3411 if ((stcb == NULL) || 3412 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3413 /* event not enabled */ 3414 return; 3415 } 3416 if ((stcb->asoc.peer_req_out) && flag) { 3417 /* Peer made the request, don't tell the local user */ 3418 stcb->asoc.peer_req_out = 0; 3419 return; 3420 } 3421 stcb->asoc.peer_req_out = 0; 3422 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3423 if (m_notify == NULL) 3424 /* no space left */ 3425 return; 3426 SCTP_BUF_LEN(m_notify) = 0; 3427 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3428 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3429 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3430 stradd->strchange_flags = flag; 3431 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3432 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3433 stradd->strchange_instrms = numberin; 3434 stradd->strchange_outstrms = numberout; 3435 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3436 SCTP_BUF_NEXT(m_notify) = NULL; 3437 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3438 /* no space */ 3439 sctp_m_freem(m_notify); 3440 return; 3441 } 3442 /* append to socket */ 3443 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3444 0, 0, stcb->asoc.context, 0, 0, 0, 3445 m_notify); 3446 if (control == NULL) { 3447 /* no memory */ 3448 sctp_m_freem(m_notify); 3449 return; 3450 } 3451 control->spec_flags = M_NOTIFICATION; 3452 control->length = SCTP_BUF_LEN(m_notify); 3453 /* not that we need this */ 3454 control->tail_mbuf = m_notify; 3455 sctp_add_to_readq(stcb->sctp_ep, stcb, 3456 control, 3457 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3458 } 3459 3460 void 3461 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3462 { 3463 struct mbuf *m_notify; 3464 struct sctp_queued_to_read *control; 3465 struct sctp_assoc_reset_event *strasoc; 3466 3467 if ((stcb == NULL) || 3468 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3469 /* event not enabled */ 3470 return; 3471 } 3472 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3473 if (m_notify == NULL) 3474 /* no space left */ 3475 return; 3476 SCTP_BUF_LEN(m_notify) = 0; 3477 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3478 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3479 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3480 strasoc->assocreset_flags = flag; 3481 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3482 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3483 strasoc->assocreset_local_tsn = sending_tsn; 3484 strasoc->assocreset_remote_tsn = recv_tsn; 3485 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3486 SCTP_BUF_NEXT(m_notify) = NULL; 3487 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3488 /* no space */ 3489 sctp_m_freem(m_notify); 3490 return; 3491 } 3492 /* append to socket */ 3493 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3494 0, 0, stcb->asoc.context, 0, 0, 0, 3495 m_notify); 3496 if (control == NULL) { 3497 /* no memory */ 3498 sctp_m_freem(m_notify); 3499 return; 3500 } 3501 control->spec_flags = M_NOTIFICATION; 3502 control->length = SCTP_BUF_LEN(m_notify); 3503 /* not that we need this */ 3504 control->tail_mbuf = m_notify; 3505 sctp_add_to_readq(stcb->sctp_ep, stcb, 3506 control, 3507 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3508 } 3509 3510 3511 3512 static void 3513 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3514 int number_entries, uint16_t * list, int flag) 3515 { 3516 struct mbuf *m_notify; 3517 struct sctp_queued_to_read *control; 3518 struct sctp_stream_reset_event *strreset; 3519 int len; 3520 3521 if ((stcb == NULL) || 3522 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3523 /* event not enabled */ 3524 return; 3525 } 3526 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3527 if (m_notify == NULL) 3528 /* no space left */ 3529 return; 3530 SCTP_BUF_LEN(m_notify) = 0; 3531 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3532 if (len > M_TRAILINGSPACE(m_notify)) { 3533 /* never enough room */ 3534 sctp_m_freem(m_notify); 3535 return; 3536 } 3537 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3538 memset(strreset, 0, len); 3539 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3540 strreset->strreset_flags = flag; 3541 strreset->strreset_length = len; 3542 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3543 if (number_entries) { 3544 int i; 3545 3546 for (i = 0; i < number_entries; i++) { 3547 strreset->strreset_stream_list[i] = ntohs(list[i]); 3548 } 3549 } 3550 SCTP_BUF_LEN(m_notify) = len; 3551 SCTP_BUF_NEXT(m_notify) = NULL; 3552 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3553 /* no space */ 3554 sctp_m_freem(m_notify); 3555 return; 3556 } 3557 /* append to socket */ 3558 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3559 0, 0, stcb->asoc.context, 0, 0, 0, 3560 m_notify); 3561 if (control == NULL) { 3562 /* no memory */ 3563 sctp_m_freem(m_notify); 3564 return; 3565 } 3566 control->spec_flags = M_NOTIFICATION; 3567 control->length = SCTP_BUF_LEN(m_notify); 3568 /* not that we need this */ 3569 control->tail_mbuf = m_notify; 3570 sctp_add_to_readq(stcb->sctp_ep, stcb, 3571 control, 3572 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3573 } 3574 3575 3576 static void 3577 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3578 { 3579 struct mbuf *m_notify; 3580 struct sctp_remote_error *sre; 3581 struct sctp_queued_to_read *control; 3582 unsigned int notif_len; 3583 uint16_t chunk_len; 3584 3585 if ((stcb == NULL) || 3586 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3587 return; 3588 } 3589 if (chunk != NULL) { 3590 chunk_len = ntohs(chunk->ch.chunk_length); 3591 } else { 3592 chunk_len = 0; 3593 } 3594 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 3595 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3596 if (m_notify == NULL) { 3597 /* Retry with smaller value. */ 3598 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 3599 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3600 if (m_notify == NULL) { 3601 return; 3602 } 3603 } 3604 SCTP_BUF_NEXT(m_notify) = NULL; 3605 sre = mtod(m_notify, struct sctp_remote_error *); 3606 memset(sre, 0, notif_len); 3607 sre->sre_type = SCTP_REMOTE_ERROR; 3608 sre->sre_flags = 0; 3609 sre->sre_length = sizeof(struct sctp_remote_error); 3610 sre->sre_error = error; 3611 sre->sre_assoc_id = sctp_get_associd(stcb); 3612 if (notif_len > sizeof(struct sctp_remote_error)) { 3613 memcpy(sre->sre_data, chunk, chunk_len); 3614 sre->sre_length += chunk_len; 3615 } 3616 SCTP_BUF_LEN(m_notify) = sre->sre_length; 3617 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3618 0, 0, stcb->asoc.context, 0, 0, 0, 3619 m_notify); 3620 if (control != NULL) { 3621 control->length = SCTP_BUF_LEN(m_notify); 3622 /* not that we need this */ 3623 control->tail_mbuf = m_notify; 3624 control->spec_flags = M_NOTIFICATION; 3625 sctp_add_to_readq(stcb->sctp_ep, stcb, 3626 control, 3627 &stcb->sctp_socket->so_rcv, 1, 3628 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3629 } else { 3630 sctp_m_freem(m_notify); 3631 } 3632 } 3633 3634 3635 void 3636 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3637 uint32_t error, void *data, int so_locked 3638 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3639 SCTP_UNUSED 3640 #endif 3641 ) 3642 { 3643 if ((stcb == NULL) || 3644 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3645 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3646 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3647 /* If the socket is gone we are out of here */ 3648 return; 3649 } 3650 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 3651 return; 3652 } 3653 if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) || 3654 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) { 3655 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 3656 (notification == SCTP_NOTIFY_INTERFACE_UP) || 3657 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 3658 /* Don't report these in front states */ 3659 return; 3660 } 3661 } 3662 switch (notification) { 3663 case SCTP_NOTIFY_ASSOC_UP: 3664 if (stcb->asoc.assoc_up_sent == 0) { 3665 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 3666 stcb->asoc.assoc_up_sent = 1; 3667 } 3668 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 3669 sctp_notify_adaptation_layer(stcb); 3670 } 3671 if (stcb->asoc.auth_supported == 0) { 3672 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3673 NULL, so_locked); 3674 } 3675 break; 3676 case SCTP_NOTIFY_ASSOC_DOWN: 3677 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 3678 break; 3679 case SCTP_NOTIFY_INTERFACE_DOWN: 3680 { 3681 struct sctp_nets *net; 3682 3683 net = (struct sctp_nets *)data; 3684 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3685 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 3686 break; 3687 } 3688 case SCTP_NOTIFY_INTERFACE_UP: 3689 { 3690 struct sctp_nets *net; 3691 3692 net = (struct sctp_nets *)data; 3693 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3694 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 3695 break; 3696 } 3697 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3698 { 3699 struct sctp_nets *net; 3700 3701 net = (struct sctp_nets *)data; 3702 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3703 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 3704 break; 3705 } 3706 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3707 sctp_notify_send_failed2(stcb, error, 3708 (struct sctp_stream_queue_pending *)data, so_locked); 3709 break; 3710 case SCTP_NOTIFY_SENT_DG_FAIL: 3711 sctp_notify_send_failed(stcb, 1, error, 3712 (struct sctp_tmit_chunk *)data, so_locked); 3713 break; 3714 case SCTP_NOTIFY_UNSENT_DG_FAIL: 3715 sctp_notify_send_failed(stcb, 0, error, 3716 (struct sctp_tmit_chunk *)data, so_locked); 3717 break; 3718 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3719 { 3720 uint32_t val; 3721 3722 val = *((uint32_t *) data); 3723 3724 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 3725 break; 3726 } 3727 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 3728 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3729 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) { 3730 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 3731 } else { 3732 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 3733 } 3734 break; 3735 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 3736 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3737 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) { 3738 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 3739 } else { 3740 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 3741 } 3742 break; 3743 case SCTP_NOTIFY_ASSOC_RESTART: 3744 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 3745 if (stcb->asoc.auth_supported == 0) { 3746 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3747 NULL, so_locked); 3748 } 3749 break; 3750 case SCTP_NOTIFY_STR_RESET_SEND: 3751 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN); 3752 break; 3753 case SCTP_NOTIFY_STR_RESET_RECV: 3754 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING); 3755 break; 3756 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3757 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3758 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 3759 break; 3760 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 3761 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3762 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 3763 break; 3764 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3765 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3766 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 3767 break; 3768 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 3769 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3770 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 3771 break; 3772 case SCTP_NOTIFY_ASCONF_ADD_IP: 3773 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3774 error, so_locked); 3775 break; 3776 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3777 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3778 error, so_locked); 3779 break; 3780 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3781 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3782 error, so_locked); 3783 break; 3784 case SCTP_NOTIFY_PEER_SHUTDOWN: 3785 sctp_notify_shutdown_event(stcb); 3786 break; 3787 case SCTP_NOTIFY_AUTH_NEW_KEY: 3788 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 3789 (uint16_t) (uintptr_t) data, 3790 so_locked); 3791 break; 3792 case SCTP_NOTIFY_AUTH_FREE_KEY: 3793 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 3794 (uint16_t) (uintptr_t) data, 3795 so_locked); 3796 break; 3797 case SCTP_NOTIFY_NO_PEER_AUTH: 3798 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 3799 (uint16_t) (uintptr_t) data, 3800 so_locked); 3801 break; 3802 case SCTP_NOTIFY_SENDER_DRY: 3803 sctp_notify_sender_dry_event(stcb, so_locked); 3804 break; 3805 case SCTP_NOTIFY_REMOTE_ERROR: 3806 sctp_notify_remote_error(stcb, error, data); 3807 break; 3808 default: 3809 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 3810 __func__, notification, notification); 3811 break; 3812 } /* end switch */ 3813 } 3814 3815 void 3816 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked 3817 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3818 SCTP_UNUSED 3819 #endif 3820 ) 3821 { 3822 struct sctp_association *asoc; 3823 struct sctp_stream_out *outs; 3824 struct sctp_tmit_chunk *chk, *nchk; 3825 struct sctp_stream_queue_pending *sp, *nsp; 3826 int i; 3827 3828 if (stcb == NULL) { 3829 return; 3830 } 3831 asoc = &stcb->asoc; 3832 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 3833 /* already being freed */ 3834 return; 3835 } 3836 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3837 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3838 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 3839 return; 3840 } 3841 /* now through all the gunk freeing chunks */ 3842 if (holds_lock == 0) { 3843 SCTP_TCB_SEND_LOCK(stcb); 3844 } 3845 /* sent queue SHOULD be empty */ 3846 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 3847 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3848 asoc->sent_queue_cnt--; 3849 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 3850 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3851 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3852 #ifdef INVARIANTS 3853 } else { 3854 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3855 #endif 3856 } 3857 } 3858 if (chk->data != NULL) { 3859 sctp_free_bufspace(stcb, asoc, chk, 1); 3860 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 3861 error, chk, so_locked); 3862 if (chk->data) { 3863 sctp_m_freem(chk->data); 3864 chk->data = NULL; 3865 } 3866 } 3867 sctp_free_a_chunk(stcb, chk, so_locked); 3868 /* sa_ignore FREED_MEMORY */ 3869 } 3870 /* pending send queue SHOULD be empty */ 3871 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 3872 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3873 asoc->send_queue_cnt--; 3874 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3875 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3876 #ifdef INVARIANTS 3877 } else { 3878 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3879 #endif 3880 } 3881 if (chk->data != NULL) { 3882 sctp_free_bufspace(stcb, asoc, chk, 1); 3883 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 3884 error, chk, so_locked); 3885 if (chk->data) { 3886 sctp_m_freem(chk->data); 3887 chk->data = NULL; 3888 } 3889 } 3890 sctp_free_a_chunk(stcb, chk, so_locked); 3891 /* sa_ignore FREED_MEMORY */ 3892 } 3893 for (i = 0; i < asoc->streamoutcnt; i++) { 3894 /* For each stream */ 3895 outs = &asoc->strmout[i]; 3896 /* clean up any sends there */ 3897 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 3898 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 3899 TAILQ_REMOVE(&outs->outqueue, sp, next); 3900 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock); 3901 sctp_free_spbufspace(stcb, asoc, sp); 3902 if (sp->data) { 3903 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3904 error, (void *)sp, so_locked); 3905 if (sp->data) { 3906 sctp_m_freem(sp->data); 3907 sp->data = NULL; 3908 sp->tail_mbuf = NULL; 3909 sp->length = 0; 3910 } 3911 } 3912 if (sp->net) { 3913 sctp_free_remote_addr(sp->net); 3914 sp->net = NULL; 3915 } 3916 /* Free the chunk */ 3917 sctp_free_a_strmoq(stcb, sp, so_locked); 3918 /* sa_ignore FREED_MEMORY */ 3919 } 3920 } 3921 3922 if (holds_lock == 0) { 3923 SCTP_TCB_SEND_UNLOCK(stcb); 3924 } 3925 } 3926 3927 void 3928 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 3929 struct sctp_abort_chunk *abort, int so_locked 3930 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3931 SCTP_UNUSED 3932 #endif 3933 ) 3934 { 3935 if (stcb == NULL) { 3936 return; 3937 } 3938 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3939 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3940 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3941 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3942 } 3943 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3944 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3945 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3946 return; 3947 } 3948 /* Tell them we lost the asoc */ 3949 sctp_report_all_outbound(stcb, error, 1, so_locked); 3950 if (from_peer) { 3951 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 3952 } else { 3953 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 3954 } 3955 } 3956 3957 void 3958 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3959 struct mbuf *m, int iphlen, 3960 struct sockaddr *src, struct sockaddr *dst, 3961 struct sctphdr *sh, struct mbuf *op_err, 3962 uint8_t mflowtype, uint32_t mflowid, 3963 uint32_t vrf_id, uint16_t port) 3964 { 3965 uint32_t vtag; 3966 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3967 struct socket *so; 3968 #endif 3969 3970 vtag = 0; 3971 if (stcb != NULL) { 3972 vtag = stcb->asoc.peer_vtag; 3973 vrf_id = stcb->asoc.vrf_id; 3974 } 3975 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 3976 mflowtype, mflowid, inp->fibnum, 3977 vrf_id, port); 3978 if (stcb != NULL) { 3979 /* We have a TCB to abort, send notification too */ 3980 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 3981 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3982 /* Ok, now lets free it */ 3983 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3984 so = SCTP_INP_SO(inp); 3985 atomic_add_int(&stcb->asoc.refcnt, 1); 3986 SCTP_TCB_UNLOCK(stcb); 3987 SCTP_SOCKET_LOCK(so, 1); 3988 SCTP_TCB_LOCK(stcb); 3989 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3990 #endif 3991 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3992 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3993 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3994 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3995 } 3996 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 3997 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3998 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3999 SCTP_SOCKET_UNLOCK(so, 1); 4000 #endif 4001 } 4002 } 4003 #ifdef SCTP_ASOCLOG_OF_TSNS 4004 void 4005 sctp_print_out_track_log(struct sctp_tcb *stcb) 4006 { 4007 #ifdef NOSIY_PRINTS 4008 int i; 4009 4010 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4011 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4012 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4013 SCTP_PRINTF("None rcvd\n"); 4014 goto none_in; 4015 } 4016 if (stcb->asoc.tsn_in_wrapped) { 4017 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4018 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4019 stcb->asoc.in_tsnlog[i].tsn, 4020 stcb->asoc.in_tsnlog[i].strm, 4021 stcb->asoc.in_tsnlog[i].seq, 4022 stcb->asoc.in_tsnlog[i].flgs, 4023 stcb->asoc.in_tsnlog[i].sz); 4024 } 4025 } 4026 if (stcb->asoc.tsn_in_at) { 4027 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4028 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4029 stcb->asoc.in_tsnlog[i].tsn, 4030 stcb->asoc.in_tsnlog[i].strm, 4031 stcb->asoc.in_tsnlog[i].seq, 4032 stcb->asoc.in_tsnlog[i].flgs, 4033 stcb->asoc.in_tsnlog[i].sz); 4034 } 4035 } 4036 none_in: 4037 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4038 if ((stcb->asoc.tsn_out_at == 0) && 4039 (stcb->asoc.tsn_out_wrapped == 0)) { 4040 SCTP_PRINTF("None sent\n"); 4041 } 4042 if (stcb->asoc.tsn_out_wrapped) { 4043 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4044 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4045 stcb->asoc.out_tsnlog[i].tsn, 4046 stcb->asoc.out_tsnlog[i].strm, 4047 stcb->asoc.out_tsnlog[i].seq, 4048 stcb->asoc.out_tsnlog[i].flgs, 4049 stcb->asoc.out_tsnlog[i].sz); 4050 } 4051 } 4052 if (stcb->asoc.tsn_out_at) { 4053 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4054 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4055 stcb->asoc.out_tsnlog[i].tsn, 4056 stcb->asoc.out_tsnlog[i].strm, 4057 stcb->asoc.out_tsnlog[i].seq, 4058 stcb->asoc.out_tsnlog[i].flgs, 4059 stcb->asoc.out_tsnlog[i].sz); 4060 } 4061 } 4062 #endif 4063 } 4064 #endif 4065 4066 void 4067 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4068 struct mbuf *op_err, 4069 int so_locked 4070 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4071 SCTP_UNUSED 4072 #endif 4073 ) 4074 { 4075 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4076 struct socket *so; 4077 #endif 4078 4079 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4080 so = SCTP_INP_SO(inp); 4081 #endif 4082 if (stcb == NULL) { 4083 /* Got to have a TCB */ 4084 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4085 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4086 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4087 SCTP_CALLED_DIRECTLY_NOCMPSET); 4088 } 4089 } 4090 return; 4091 } else { 4092 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 4093 } 4094 /* notify the peer */ 4095 sctp_send_abort_tcb(stcb, op_err, so_locked); 4096 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4097 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 4098 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4099 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4100 } 4101 /* notify the ulp */ 4102 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4103 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4104 } 4105 /* now free the asoc */ 4106 #ifdef SCTP_ASOCLOG_OF_TSNS 4107 sctp_print_out_track_log(stcb); 4108 #endif 4109 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4110 if (!so_locked) { 4111 atomic_add_int(&stcb->asoc.refcnt, 1); 4112 SCTP_TCB_UNLOCK(stcb); 4113 SCTP_SOCKET_LOCK(so, 1); 4114 SCTP_TCB_LOCK(stcb); 4115 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4116 } 4117 #endif 4118 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4119 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4120 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4121 if (!so_locked) { 4122 SCTP_SOCKET_UNLOCK(so, 1); 4123 } 4124 #endif 4125 } 4126 4127 void 4128 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4129 struct sockaddr *src, struct sockaddr *dst, 4130 struct sctphdr *sh, struct sctp_inpcb *inp, 4131 struct mbuf *cause, 4132 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4133 uint32_t vrf_id, uint16_t port) 4134 { 4135 struct sctp_chunkhdr *ch, chunk_buf; 4136 unsigned int chk_length; 4137 int contains_init_chunk; 4138 4139 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4140 /* Generate a TO address for future reference */ 4141 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4142 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4143 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4144 SCTP_CALLED_DIRECTLY_NOCMPSET); 4145 } 4146 } 4147 contains_init_chunk = 0; 4148 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4149 sizeof(*ch), (uint8_t *) & chunk_buf); 4150 while (ch != NULL) { 4151 chk_length = ntohs(ch->chunk_length); 4152 if (chk_length < sizeof(*ch)) { 4153 /* break to abort land */ 4154 break; 4155 } 4156 switch (ch->chunk_type) { 4157 case SCTP_INIT: 4158 contains_init_chunk = 1; 4159 break; 4160 case SCTP_PACKET_DROPPED: 4161 /* we don't respond to pkt-dropped */ 4162 return; 4163 case SCTP_ABORT_ASSOCIATION: 4164 /* we don't respond with an ABORT to an ABORT */ 4165 return; 4166 case SCTP_SHUTDOWN_COMPLETE: 4167 /* 4168 * we ignore it since we are not waiting for it and 4169 * peer is gone 4170 */ 4171 return; 4172 case SCTP_SHUTDOWN_ACK: 4173 sctp_send_shutdown_complete2(src, dst, sh, 4174 mflowtype, mflowid, fibnum, 4175 vrf_id, port); 4176 return; 4177 default: 4178 break; 4179 } 4180 offset += SCTP_SIZE32(chk_length); 4181 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4182 sizeof(*ch), (uint8_t *) & chunk_buf); 4183 } 4184 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4185 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4186 (contains_init_chunk == 0))) { 4187 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4188 mflowtype, mflowid, fibnum, 4189 vrf_id, port); 4190 } 4191 } 4192 4193 /* 4194 * check the inbound datagram to make sure there is not an abort inside it, 4195 * if there is return 1, else return 0. 4196 */ 4197 int 4198 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 4199 { 4200 struct sctp_chunkhdr *ch; 4201 struct sctp_init_chunk *init_chk, chunk_buf; 4202 int offset; 4203 unsigned int chk_length; 4204 4205 offset = iphlen + sizeof(struct sctphdr); 4206 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4207 (uint8_t *) & chunk_buf); 4208 while (ch != NULL) { 4209 chk_length = ntohs(ch->chunk_length); 4210 if (chk_length < sizeof(*ch)) { 4211 /* packet is probably corrupt */ 4212 break; 4213 } 4214 /* we seem to be ok, is it an abort? */ 4215 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4216 /* yep, tell them */ 4217 return (1); 4218 } 4219 if (ch->chunk_type == SCTP_INITIATION) { 4220 /* need to update the Vtag */ 4221 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4222 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 4223 if (init_chk != NULL) { 4224 *vtagfill = ntohl(init_chk->init.initiate_tag); 4225 } 4226 } 4227 /* Nope, move to the next chunk */ 4228 offset += SCTP_SIZE32(chk_length); 4229 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4230 sizeof(*ch), (uint8_t *) & chunk_buf); 4231 } 4232 return (0); 4233 } 4234 4235 /* 4236 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4237 * set (i.e. it's 0) so, create this function to compare link local scopes 4238 */ 4239 #ifdef INET6 4240 uint32_t 4241 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4242 { 4243 struct sockaddr_in6 a, b; 4244 4245 /* save copies */ 4246 a = *addr1; 4247 b = *addr2; 4248 4249 if (a.sin6_scope_id == 0) 4250 if (sa6_recoverscope(&a)) { 4251 /* can't get scope, so can't match */ 4252 return (0); 4253 } 4254 if (b.sin6_scope_id == 0) 4255 if (sa6_recoverscope(&b)) { 4256 /* can't get scope, so can't match */ 4257 return (0); 4258 } 4259 if (a.sin6_scope_id != b.sin6_scope_id) 4260 return (0); 4261 4262 return (1); 4263 } 4264 4265 /* 4266 * returns a sockaddr_in6 with embedded scope recovered and removed 4267 */ 4268 struct sockaddr_in6 * 4269 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4270 { 4271 /* check and strip embedded scope junk */ 4272 if (addr->sin6_family == AF_INET6) { 4273 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4274 if (addr->sin6_scope_id == 0) { 4275 *store = *addr; 4276 if (!sa6_recoverscope(store)) { 4277 /* use the recovered scope */ 4278 addr = store; 4279 } 4280 } else { 4281 /* else, return the original "to" addr */ 4282 in6_clearscope(&addr->sin6_addr); 4283 } 4284 } 4285 } 4286 return (addr); 4287 } 4288 #endif 4289 4290 /* 4291 * are the two addresses the same? currently a "scopeless" check returns: 1 4292 * if same, 0 if not 4293 */ 4294 int 4295 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4296 { 4297 4298 /* must be valid */ 4299 if (sa1 == NULL || sa2 == NULL) 4300 return (0); 4301 4302 /* must be the same family */ 4303 if (sa1->sa_family != sa2->sa_family) 4304 return (0); 4305 4306 switch (sa1->sa_family) { 4307 #ifdef INET6 4308 case AF_INET6: 4309 { 4310 /* IPv6 addresses */ 4311 struct sockaddr_in6 *sin6_1, *sin6_2; 4312 4313 sin6_1 = (struct sockaddr_in6 *)sa1; 4314 sin6_2 = (struct sockaddr_in6 *)sa2; 4315 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4316 sin6_2)); 4317 } 4318 #endif 4319 #ifdef INET 4320 case AF_INET: 4321 { 4322 /* IPv4 addresses */ 4323 struct sockaddr_in *sin_1, *sin_2; 4324 4325 sin_1 = (struct sockaddr_in *)sa1; 4326 sin_2 = (struct sockaddr_in *)sa2; 4327 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4328 } 4329 #endif 4330 default: 4331 /* we don't do these... */ 4332 return (0); 4333 } 4334 } 4335 4336 void 4337 sctp_print_address(struct sockaddr *sa) 4338 { 4339 #ifdef INET6 4340 char ip6buf[INET6_ADDRSTRLEN]; 4341 #endif 4342 4343 switch (sa->sa_family) { 4344 #ifdef INET6 4345 case AF_INET6: 4346 { 4347 struct sockaddr_in6 *sin6; 4348 4349 sin6 = (struct sockaddr_in6 *)sa; 4350 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4351 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4352 ntohs(sin6->sin6_port), 4353 sin6->sin6_scope_id); 4354 break; 4355 } 4356 #endif 4357 #ifdef INET 4358 case AF_INET: 4359 { 4360 struct sockaddr_in *sin; 4361 unsigned char *p; 4362 4363 sin = (struct sockaddr_in *)sa; 4364 p = (unsigned char *)&sin->sin_addr; 4365 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4366 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4367 break; 4368 } 4369 #endif 4370 default: 4371 SCTP_PRINTF("?\n"); 4372 break; 4373 } 4374 } 4375 4376 void 4377 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4378 struct sctp_inpcb *new_inp, 4379 struct sctp_tcb *stcb, 4380 int waitflags) 4381 { 4382 /* 4383 * go through our old INP and pull off any control structures that 4384 * belong to stcb and move then to the new inp. 4385 */ 4386 struct socket *old_so, *new_so; 4387 struct sctp_queued_to_read *control, *nctl; 4388 struct sctp_readhead tmp_queue; 4389 struct mbuf *m; 4390 int error = 0; 4391 4392 old_so = old_inp->sctp_socket; 4393 new_so = new_inp->sctp_socket; 4394 TAILQ_INIT(&tmp_queue); 4395 error = sblock(&old_so->so_rcv, waitflags); 4396 if (error) { 4397 /* 4398 * Gak, can't get sblock, we have a problem. data will be 4399 * left stranded.. and we don't dare look at it since the 4400 * other thread may be reading something. Oh well, its a 4401 * screwed up app that does a peeloff OR a accept while 4402 * reading from the main socket... actually its only the 4403 * peeloff() case, since I think read will fail on a 4404 * listening socket.. 4405 */ 4406 return; 4407 } 4408 /* lock the socket buffers */ 4409 SCTP_INP_READ_LOCK(old_inp); 4410 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4411 /* Pull off all for out target stcb */ 4412 if (control->stcb == stcb) { 4413 /* remove it we want it */ 4414 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4415 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4416 m = control->data; 4417 while (m) { 4418 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4419 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4420 } 4421 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4423 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4424 } 4425 m = SCTP_BUF_NEXT(m); 4426 } 4427 } 4428 } 4429 SCTP_INP_READ_UNLOCK(old_inp); 4430 /* Remove the sb-lock on the old socket */ 4431 4432 sbunlock(&old_so->so_rcv); 4433 /* Now we move them over to the new socket buffer */ 4434 SCTP_INP_READ_LOCK(new_inp); 4435 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4436 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4437 m = control->data; 4438 while (m) { 4439 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4440 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4441 } 4442 sctp_sballoc(stcb, &new_so->so_rcv, m); 4443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4444 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4445 } 4446 m = SCTP_BUF_NEXT(m); 4447 } 4448 } 4449 SCTP_INP_READ_UNLOCK(new_inp); 4450 } 4451 4452 void 4453 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4454 struct sctp_tcb *stcb, 4455 int so_locked 4456 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4457 SCTP_UNUSED 4458 #endif 4459 ) 4460 { 4461 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4462 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4463 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4464 } else { 4465 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4466 struct socket *so; 4467 4468 so = SCTP_INP_SO(inp); 4469 if (!so_locked) { 4470 if (stcb) { 4471 atomic_add_int(&stcb->asoc.refcnt, 1); 4472 SCTP_TCB_UNLOCK(stcb); 4473 } 4474 SCTP_SOCKET_LOCK(so, 1); 4475 if (stcb) { 4476 SCTP_TCB_LOCK(stcb); 4477 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4478 } 4479 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4480 SCTP_SOCKET_UNLOCK(so, 1); 4481 return; 4482 } 4483 } 4484 #endif 4485 sctp_sorwakeup(inp, inp->sctp_socket); 4486 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4487 if (!so_locked) { 4488 SCTP_SOCKET_UNLOCK(so, 1); 4489 } 4490 #endif 4491 } 4492 } 4493 } 4494 4495 void 4496 sctp_add_to_readq(struct sctp_inpcb *inp, 4497 struct sctp_tcb *stcb, 4498 struct sctp_queued_to_read *control, 4499 struct sockbuf *sb, 4500 int end, 4501 int inp_read_lock_held, 4502 int so_locked 4503 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4504 SCTP_UNUSED 4505 #endif 4506 ) 4507 { 4508 /* 4509 * Here we must place the control on the end of the socket read 4510 * queue AND increment sb_cc so that select will work properly on 4511 * read. 4512 */ 4513 struct mbuf *m, *prev = NULL; 4514 4515 if (inp == NULL) { 4516 /* Gak, TSNH!! */ 4517 #ifdef INVARIANTS 4518 panic("Gak, inp NULL on add_to_readq"); 4519 #endif 4520 return; 4521 } 4522 if (inp_read_lock_held == 0) 4523 SCTP_INP_READ_LOCK(inp); 4524 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4525 sctp_free_remote_addr(control->whoFrom); 4526 if (control->data) { 4527 sctp_m_freem(control->data); 4528 control->data = NULL; 4529 } 4530 sctp_free_a_readq(stcb, control); 4531 if (inp_read_lock_held == 0) 4532 SCTP_INP_READ_UNLOCK(inp); 4533 return; 4534 } 4535 if (!(control->spec_flags & M_NOTIFICATION)) { 4536 atomic_add_int(&inp->total_recvs, 1); 4537 if (!control->do_not_ref_stcb) { 4538 atomic_add_int(&stcb->total_recvs, 1); 4539 } 4540 } 4541 m = control->data; 4542 control->held_length = 0; 4543 control->length = 0; 4544 while (m) { 4545 if (SCTP_BUF_LEN(m) == 0) { 4546 /* Skip mbufs with NO length */ 4547 if (prev == NULL) { 4548 /* First one */ 4549 control->data = sctp_m_free(m); 4550 m = control->data; 4551 } else { 4552 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4553 m = SCTP_BUF_NEXT(prev); 4554 } 4555 if (m == NULL) { 4556 control->tail_mbuf = prev; 4557 } 4558 continue; 4559 } 4560 prev = m; 4561 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4562 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4563 } 4564 sctp_sballoc(stcb, sb, m); 4565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4566 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4567 } 4568 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4569 m = SCTP_BUF_NEXT(m); 4570 } 4571 if (prev != NULL) { 4572 control->tail_mbuf = prev; 4573 } else { 4574 /* Everything got collapsed out?? */ 4575 sctp_free_remote_addr(control->whoFrom); 4576 sctp_free_a_readq(stcb, control); 4577 if (inp_read_lock_held == 0) 4578 SCTP_INP_READ_UNLOCK(inp); 4579 return; 4580 } 4581 if (end) { 4582 control->end_added = 1; 4583 } 4584 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4585 control->on_read_q = 1; 4586 if (inp_read_lock_held == 0) 4587 SCTP_INP_READ_UNLOCK(inp); 4588 if (inp && inp->sctp_socket) { 4589 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4590 } 4591 } 4592 4593 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4594 *************ALTERNATE ROUTING CODE 4595 */ 4596 4597 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4598 *************ALTERNATE ROUTING CODE 4599 */ 4600 4601 struct mbuf * 4602 sctp_generate_cause(uint16_t code, char *info) 4603 { 4604 struct mbuf *m; 4605 struct sctp_gen_error_cause *cause; 4606 size_t info_len; 4607 uint16_t len; 4608 4609 if ((code == 0) || (info == NULL)) { 4610 return (NULL); 4611 } 4612 info_len = strlen(info); 4613 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4614 return (NULL); 4615 } 4616 len = (uint16_t) (sizeof(struct sctp_paramhdr) + info_len); 4617 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4618 if (m != NULL) { 4619 SCTP_BUF_LEN(m) = len; 4620 cause = mtod(m, struct sctp_gen_error_cause *); 4621 cause->code = htons(code); 4622 cause->length = htons(len); 4623 memcpy(cause->info, info, info_len); 4624 } 4625 return (m); 4626 } 4627 4628 struct mbuf * 4629 sctp_generate_no_user_data_cause(uint32_t tsn) 4630 { 4631 struct mbuf *m; 4632 struct sctp_error_no_user_data *no_user_data_cause; 4633 uint16_t len; 4634 4635 len = (uint16_t) sizeof(struct sctp_error_no_user_data); 4636 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4637 if (m != NULL) { 4638 SCTP_BUF_LEN(m) = len; 4639 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4640 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4641 no_user_data_cause->cause.length = htons(len); 4642 no_user_data_cause->tsn = tsn; /* tsn is passed in as NBO */ 4643 } 4644 return (m); 4645 } 4646 4647 #ifdef SCTP_MBCNT_LOGGING 4648 void 4649 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4650 struct sctp_tmit_chunk *tp1, int chk_cnt) 4651 { 4652 if (tp1->data == NULL) { 4653 return; 4654 } 4655 asoc->chunks_on_out_queue -= chk_cnt; 4656 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4657 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4658 asoc->total_output_queue_size, 4659 tp1->book_size, 4660 0, 4661 tp1->mbcnt); 4662 } 4663 if (asoc->total_output_queue_size >= tp1->book_size) { 4664 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4665 } else { 4666 asoc->total_output_queue_size = 0; 4667 } 4668 4669 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4670 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4671 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4672 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4673 } else { 4674 stcb->sctp_socket->so_snd.sb_cc = 0; 4675 4676 } 4677 } 4678 } 4679 4680 #endif 4681 4682 int 4683 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4684 uint8_t sent, int so_locked 4685 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4686 SCTP_UNUSED 4687 #endif 4688 ) 4689 { 4690 struct sctp_stream_out *strq; 4691 struct sctp_tmit_chunk *chk = NULL, *tp2; 4692 struct sctp_stream_queue_pending *sp; 4693 uint16_t stream = 0, seq = 0; 4694 uint8_t foundeom = 0; 4695 int ret_sz = 0; 4696 int notdone; 4697 int do_wakeup_routine = 0; 4698 4699 stream = tp1->rec.data.stream_number; 4700 seq = tp1->rec.data.stream_seq; 4701 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 4702 stcb->asoc.abandoned_sent[0]++; 4703 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 4704 stcb->asoc.strmout[stream].abandoned_sent[0]++; 4705 #if defined(SCTP_DETAILED_STR_STATS) 4706 stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 4707 #endif 4708 } else { 4709 stcb->asoc.abandoned_unsent[0]++; 4710 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 4711 stcb->asoc.strmout[stream].abandoned_unsent[0]++; 4712 #if defined(SCTP_DETAILED_STR_STATS) 4713 stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 4714 #endif 4715 } 4716 do { 4717 ret_sz += tp1->book_size; 4718 if (tp1->data != NULL) { 4719 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4720 sctp_flight_size_decrease(tp1); 4721 sctp_total_flight_decrease(stcb, tp1); 4722 } 4723 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4724 stcb->asoc.peers_rwnd += tp1->send_size; 4725 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 4726 if (sent) { 4727 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4728 } else { 4729 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4730 } 4731 if (tp1->data) { 4732 sctp_m_freem(tp1->data); 4733 tp1->data = NULL; 4734 } 4735 do_wakeup_routine = 1; 4736 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4737 stcb->asoc.sent_queue_cnt_removeable--; 4738 } 4739 } 4740 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4741 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4742 SCTP_DATA_NOT_FRAG) { 4743 /* not frag'ed we ae done */ 4744 notdone = 0; 4745 foundeom = 1; 4746 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4747 /* end of frag, we are done */ 4748 notdone = 0; 4749 foundeom = 1; 4750 } else { 4751 /* 4752 * Its a begin or middle piece, we must mark all of 4753 * it 4754 */ 4755 notdone = 1; 4756 tp1 = TAILQ_NEXT(tp1, sctp_next); 4757 } 4758 } while (tp1 && notdone); 4759 if (foundeom == 0) { 4760 /* 4761 * The multi-part message was scattered across the send and 4762 * sent queue. 4763 */ 4764 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 4765 if ((tp1->rec.data.stream_number != stream) || 4766 (tp1->rec.data.stream_seq != seq)) { 4767 break; 4768 } 4769 /* 4770 * save to chk in case we have some on stream out 4771 * queue. If so and we have an un-transmitted one we 4772 * don't have to fudge the TSN. 4773 */ 4774 chk = tp1; 4775 ret_sz += tp1->book_size; 4776 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4777 if (sent) { 4778 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4779 } else { 4780 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4781 } 4782 if (tp1->data) { 4783 sctp_m_freem(tp1->data); 4784 tp1->data = NULL; 4785 } 4786 /* No flight involved here book the size to 0 */ 4787 tp1->book_size = 0; 4788 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4789 foundeom = 1; 4790 } 4791 do_wakeup_routine = 1; 4792 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4793 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4794 /* on to the sent queue so we can wait for it to be 4795 * passed by. */ 4796 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4797 sctp_next); 4798 stcb->asoc.send_queue_cnt--; 4799 stcb->asoc.sent_queue_cnt++; 4800 } 4801 } 4802 if (foundeom == 0) { 4803 /* 4804 * Still no eom found. That means there is stuff left on the 4805 * stream out queue.. yuck. 4806 */ 4807 SCTP_TCB_SEND_LOCK(stcb); 4808 strq = &stcb->asoc.strmout[stream]; 4809 sp = TAILQ_FIRST(&strq->outqueue); 4810 if (sp != NULL) { 4811 sp->discard_rest = 1; 4812 /* 4813 * We may need to put a chunk on the queue that 4814 * holds the TSN that would have been sent with the 4815 * LAST bit. 4816 */ 4817 if (chk == NULL) { 4818 /* Yep, we have to */ 4819 sctp_alloc_a_chunk(stcb, chk); 4820 if (chk == NULL) { 4821 /* 4822 * we are hosed. All we can do is 4823 * nothing.. which will cause an 4824 * abort if the peer is paying 4825 * attention. 4826 */ 4827 goto oh_well; 4828 } 4829 memset(chk, 0, sizeof(*chk)); 4830 chk->rec.data.rcv_flags = 0; 4831 chk->sent = SCTP_FORWARD_TSN_SKIP; 4832 chk->asoc = &stcb->asoc; 4833 if (stcb->asoc.idata_supported == 0) { 4834 if (sp->sinfo_flags & SCTP_UNORDERED) { 4835 chk->rec.data.stream_seq = 0; 4836 } else { 4837 chk->rec.data.stream_seq = strq->next_mid_ordered; 4838 } 4839 } else { 4840 if (sp->sinfo_flags & SCTP_UNORDERED) { 4841 chk->rec.data.stream_seq = strq->next_mid_unordered; 4842 } else { 4843 chk->rec.data.stream_seq = strq->next_mid_ordered; 4844 } 4845 } 4846 chk->rec.data.stream_number = sp->stream; 4847 chk->rec.data.payloadtype = sp->ppid; 4848 chk->rec.data.context = sp->context; 4849 chk->flags = sp->act_flags; 4850 chk->whoTo = NULL; 4851 chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 4852 strq->chunks_on_queues++; 4853 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 4854 stcb->asoc.sent_queue_cnt++; 4855 stcb->asoc.pr_sctp_cnt++; 4856 } 4857 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 4858 if (sp->sinfo_flags & SCTP_UNORDERED) { 4859 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 4860 } 4861 if (stcb->asoc.idata_supported == 0) { 4862 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 4863 strq->next_mid_ordered++; 4864 } 4865 } else { 4866 if (sp->sinfo_flags & SCTP_UNORDERED) { 4867 strq->next_mid_unordered++; 4868 } else { 4869 strq->next_mid_ordered++; 4870 } 4871 } 4872 oh_well: 4873 if (sp->data) { 4874 /* 4875 * Pull any data to free up the SB and allow 4876 * sender to "add more" while we will throw 4877 * away :-) 4878 */ 4879 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 4880 ret_sz += sp->length; 4881 do_wakeup_routine = 1; 4882 sp->some_taken = 1; 4883 sctp_m_freem(sp->data); 4884 sp->data = NULL; 4885 sp->tail_mbuf = NULL; 4886 sp->length = 0; 4887 } 4888 } 4889 SCTP_TCB_SEND_UNLOCK(stcb); 4890 } 4891 if (do_wakeup_routine) { 4892 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4893 struct socket *so; 4894 4895 so = SCTP_INP_SO(stcb->sctp_ep); 4896 if (!so_locked) { 4897 atomic_add_int(&stcb->asoc.refcnt, 1); 4898 SCTP_TCB_UNLOCK(stcb); 4899 SCTP_SOCKET_LOCK(so, 1); 4900 SCTP_TCB_LOCK(stcb); 4901 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4902 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4903 /* assoc was freed while we were unlocked */ 4904 SCTP_SOCKET_UNLOCK(so, 1); 4905 return (ret_sz); 4906 } 4907 } 4908 #endif 4909 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4910 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4911 if (!so_locked) { 4912 SCTP_SOCKET_UNLOCK(so, 1); 4913 } 4914 #endif 4915 } 4916 return (ret_sz); 4917 } 4918 4919 /* 4920 * checks to see if the given address, sa, is one that is currently known by 4921 * the kernel note: can't distinguish the same address on multiple interfaces 4922 * and doesn't handle multiple addresses with different zone/scope id's note: 4923 * ifa_ifwithaddr() compares the entire sockaddr struct 4924 */ 4925 struct sctp_ifa * 4926 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 4927 int holds_lock) 4928 { 4929 struct sctp_laddr *laddr; 4930 4931 if (holds_lock == 0) { 4932 SCTP_INP_RLOCK(inp); 4933 } 4934 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4935 if (laddr->ifa == NULL) 4936 continue; 4937 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4938 continue; 4939 #ifdef INET 4940 if (addr->sa_family == AF_INET) { 4941 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4942 laddr->ifa->address.sin.sin_addr.s_addr) { 4943 /* found him. */ 4944 if (holds_lock == 0) { 4945 SCTP_INP_RUNLOCK(inp); 4946 } 4947 return (laddr->ifa); 4948 break; 4949 } 4950 } 4951 #endif 4952 #ifdef INET6 4953 if (addr->sa_family == AF_INET6) { 4954 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 4955 &laddr->ifa->address.sin6)) { 4956 /* found him. */ 4957 if (holds_lock == 0) { 4958 SCTP_INP_RUNLOCK(inp); 4959 } 4960 return (laddr->ifa); 4961 break; 4962 } 4963 } 4964 #endif 4965 } 4966 if (holds_lock == 0) { 4967 SCTP_INP_RUNLOCK(inp); 4968 } 4969 return (NULL); 4970 } 4971 4972 uint32_t 4973 sctp_get_ifa_hash_val(struct sockaddr *addr) 4974 { 4975 switch (addr->sa_family) { 4976 #ifdef INET 4977 case AF_INET: 4978 { 4979 struct sockaddr_in *sin; 4980 4981 sin = (struct sockaddr_in *)addr; 4982 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 4983 } 4984 #endif 4985 #ifdef INET6 4986 case AF_INET6: 4987 { 4988 struct sockaddr_in6 *sin6; 4989 uint32_t hash_of_addr; 4990 4991 sin6 = (struct sockaddr_in6 *)addr; 4992 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 4993 sin6->sin6_addr.s6_addr32[1] + 4994 sin6->sin6_addr.s6_addr32[2] + 4995 sin6->sin6_addr.s6_addr32[3]); 4996 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 4997 return (hash_of_addr); 4998 } 4999 #endif 5000 default: 5001 break; 5002 } 5003 return (0); 5004 } 5005 5006 struct sctp_ifa * 5007 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5008 { 5009 struct sctp_ifa *sctp_ifap; 5010 struct sctp_vrf *vrf; 5011 struct sctp_ifalist *hash_head; 5012 uint32_t hash_of_addr; 5013 5014 if (holds_lock == 0) 5015 SCTP_IPI_ADDR_RLOCK(); 5016 5017 vrf = sctp_find_vrf(vrf_id); 5018 if (vrf == NULL) { 5019 if (holds_lock == 0) 5020 SCTP_IPI_ADDR_RUNLOCK(); 5021 return (NULL); 5022 } 5023 hash_of_addr = sctp_get_ifa_hash_val(addr); 5024 5025 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5026 if (hash_head == NULL) { 5027 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5028 hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark, 5029 (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark)); 5030 sctp_print_address(addr); 5031 SCTP_PRINTF("No such bucket for address\n"); 5032 if (holds_lock == 0) 5033 SCTP_IPI_ADDR_RUNLOCK(); 5034 5035 return (NULL); 5036 } 5037 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5038 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5039 continue; 5040 #ifdef INET 5041 if (addr->sa_family == AF_INET) { 5042 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5043 sctp_ifap->address.sin.sin_addr.s_addr) { 5044 /* found him. */ 5045 if (holds_lock == 0) 5046 SCTP_IPI_ADDR_RUNLOCK(); 5047 return (sctp_ifap); 5048 break; 5049 } 5050 } 5051 #endif 5052 #ifdef INET6 5053 if (addr->sa_family == AF_INET6) { 5054 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5055 &sctp_ifap->address.sin6)) { 5056 /* found him. */ 5057 if (holds_lock == 0) 5058 SCTP_IPI_ADDR_RUNLOCK(); 5059 return (sctp_ifap); 5060 break; 5061 } 5062 } 5063 #endif 5064 } 5065 if (holds_lock == 0) 5066 SCTP_IPI_ADDR_RUNLOCK(); 5067 return (NULL); 5068 } 5069 5070 static void 5071 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock, 5072 uint32_t rwnd_req) 5073 { 5074 /* User pulled some data, do we need a rwnd update? */ 5075 int r_unlocked = 0; 5076 uint32_t dif, rwnd; 5077 struct socket *so = NULL; 5078 5079 if (stcb == NULL) 5080 return; 5081 5082 atomic_add_int(&stcb->asoc.refcnt, 1); 5083 5084 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | 5085 SCTP_STATE_SHUTDOWN_RECEIVED | 5086 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5087 /* Pre-check If we are freeing no update */ 5088 goto no_lock; 5089 } 5090 SCTP_INP_INCR_REF(stcb->sctp_ep); 5091 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5092 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5093 goto out; 5094 } 5095 so = stcb->sctp_socket; 5096 if (so == NULL) { 5097 goto out; 5098 } 5099 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5100 /* Have you have freed enough to look */ 5101 *freed_so_far = 0; 5102 /* Yep, its worth a look and the lock overhead */ 5103 5104 /* Figure out what the rwnd would be */ 5105 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5106 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5107 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5108 } else { 5109 dif = 0; 5110 } 5111 if (dif >= rwnd_req) { 5112 if (hold_rlock) { 5113 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5114 r_unlocked = 1; 5115 } 5116 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5117 /* 5118 * One last check before we allow the guy possibly 5119 * to get in. There is a race, where the guy has not 5120 * reached the gate. In that case 5121 */ 5122 goto out; 5123 } 5124 SCTP_TCB_LOCK(stcb); 5125 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5126 /* No reports here */ 5127 SCTP_TCB_UNLOCK(stcb); 5128 goto out; 5129 } 5130 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5131 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5132 5133 sctp_chunk_output(stcb->sctp_ep, stcb, 5134 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5135 /* make sure no timer is running */ 5136 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5137 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5138 SCTP_TCB_UNLOCK(stcb); 5139 } else { 5140 /* Update how much we have pending */ 5141 stcb->freed_by_sorcv_sincelast = dif; 5142 } 5143 out: 5144 if (so && r_unlocked && hold_rlock) { 5145 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5146 } 5147 SCTP_INP_DECR_REF(stcb->sctp_ep); 5148 no_lock: 5149 atomic_add_int(&stcb->asoc.refcnt, -1); 5150 return; 5151 } 5152 5153 int 5154 sctp_sorecvmsg(struct socket *so, 5155 struct uio *uio, 5156 struct mbuf **mp, 5157 struct sockaddr *from, 5158 int fromlen, 5159 int *msg_flags, 5160 struct sctp_sndrcvinfo *sinfo, 5161 int filling_sinfo) 5162 { 5163 /* 5164 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5165 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5166 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5167 * On the way out we may send out any combination of: 5168 * MSG_NOTIFICATION MSG_EOR 5169 * 5170 */ 5171 struct sctp_inpcb *inp = NULL; 5172 int my_len = 0; 5173 int cp_len = 0, error = 0; 5174 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5175 struct mbuf *m = NULL; 5176 struct sctp_tcb *stcb = NULL; 5177 int wakeup_read_socket = 0; 5178 int freecnt_applied = 0; 5179 int out_flags = 0, in_flags = 0; 5180 int block_allowed = 1; 5181 uint32_t freed_so_far = 0; 5182 uint32_t copied_so_far = 0; 5183 int in_eeor_mode = 0; 5184 int no_rcv_needed = 0; 5185 uint32_t rwnd_req = 0; 5186 int hold_sblock = 0; 5187 int hold_rlock = 0; 5188 ssize_t slen = 0; 5189 uint32_t held_length = 0; 5190 int sockbuf_lock = 0; 5191 5192 if (uio == NULL) { 5193 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5194 return (EINVAL); 5195 } 5196 if (msg_flags) { 5197 in_flags = *msg_flags; 5198 if (in_flags & MSG_PEEK) 5199 SCTP_STAT_INCR(sctps_read_peeks); 5200 } else { 5201 in_flags = 0; 5202 } 5203 slen = uio->uio_resid; 5204 5205 /* Pull in and set up our int flags */ 5206 if (in_flags & MSG_OOB) { 5207 /* Out of band's NOT supported */ 5208 return (EOPNOTSUPP); 5209 } 5210 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5211 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5212 return (EINVAL); 5213 } 5214 if ((in_flags & (MSG_DONTWAIT 5215 | MSG_NBIO 5216 )) || 5217 SCTP_SO_IS_NBIO(so)) { 5218 block_allowed = 0; 5219 } 5220 /* setup the endpoint */ 5221 inp = (struct sctp_inpcb *)so->so_pcb; 5222 if (inp == NULL) { 5223 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5224 return (EFAULT); 5225 } 5226 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5227 /* Must be at least a MTU's worth */ 5228 if (rwnd_req < SCTP_MIN_RWND) 5229 rwnd_req = SCTP_MIN_RWND; 5230 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5231 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5232 sctp_misc_ints(SCTP_SORECV_ENTER, 5233 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid); 5234 } 5235 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5236 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5237 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid); 5238 } 5239 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5240 if (error) { 5241 goto release_unlocked; 5242 } 5243 sockbuf_lock = 1; 5244 restart: 5245 5246 5247 restart_nosblocks: 5248 if (hold_sblock == 0) { 5249 SOCKBUF_LOCK(&so->so_rcv); 5250 hold_sblock = 1; 5251 } 5252 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5253 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5254 goto out; 5255 } 5256 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5257 if (so->so_error) { 5258 error = so->so_error; 5259 if ((in_flags & MSG_PEEK) == 0) 5260 so->so_error = 0; 5261 goto out; 5262 } else { 5263 if (so->so_rcv.sb_cc == 0) { 5264 /* indicate EOF */ 5265 error = 0; 5266 goto out; 5267 } 5268 } 5269 } 5270 if (so->so_rcv.sb_cc <= held_length) { 5271 if (so->so_error) { 5272 error = so->so_error; 5273 if ((in_flags & MSG_PEEK) == 0) { 5274 so->so_error = 0; 5275 } 5276 goto out; 5277 } 5278 if ((so->so_rcv.sb_cc == 0) && 5279 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5280 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5281 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5282 /* 5283 * For active open side clear flags for 5284 * re-use passive open is blocked by 5285 * connect. 5286 */ 5287 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5288 /* You were aborted, passive side 5289 * always hits here */ 5290 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5291 error = ECONNRESET; 5292 } 5293 so->so_state &= ~(SS_ISCONNECTING | 5294 SS_ISDISCONNECTING | 5295 SS_ISCONFIRMING | 5296 SS_ISCONNECTED); 5297 if (error == 0) { 5298 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5299 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5300 error = ENOTCONN; 5301 } 5302 } 5303 goto out; 5304 } 5305 } 5306 if (block_allowed) { 5307 error = sbwait(&so->so_rcv); 5308 if (error) { 5309 goto out; 5310 } 5311 held_length = 0; 5312 goto restart_nosblocks; 5313 } else { 5314 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5315 error = EWOULDBLOCK; 5316 goto out; 5317 } 5318 } 5319 if (hold_sblock == 1) { 5320 SOCKBUF_UNLOCK(&so->so_rcv); 5321 hold_sblock = 0; 5322 } 5323 /* we possibly have data we can read */ 5324 /* sa_ignore FREED_MEMORY */ 5325 control = TAILQ_FIRST(&inp->read_queue); 5326 if (control == NULL) { 5327 /* 5328 * This could be happening since the appender did the 5329 * increment but as not yet did the tailq insert onto the 5330 * read_queue 5331 */ 5332 if (hold_rlock == 0) { 5333 SCTP_INP_READ_LOCK(inp); 5334 } 5335 control = TAILQ_FIRST(&inp->read_queue); 5336 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5337 #ifdef INVARIANTS 5338 panic("Huh, its non zero and nothing on control?"); 5339 #endif 5340 so->so_rcv.sb_cc = 0; 5341 } 5342 SCTP_INP_READ_UNLOCK(inp); 5343 hold_rlock = 0; 5344 goto restart; 5345 } 5346 if ((control->length == 0) && 5347 (control->do_not_ref_stcb)) { 5348 /* 5349 * Clean up code for freeing assoc that left behind a 5350 * pdapi.. maybe a peer in EEOR that just closed after 5351 * sending and never indicated a EOR. 5352 */ 5353 if (hold_rlock == 0) { 5354 hold_rlock = 1; 5355 SCTP_INP_READ_LOCK(inp); 5356 } 5357 control->held_length = 0; 5358 if (control->data) { 5359 /* Hmm there is data here .. fix */ 5360 struct mbuf *m_tmp; 5361 int cnt = 0; 5362 5363 m_tmp = control->data; 5364 while (m_tmp) { 5365 cnt += SCTP_BUF_LEN(m_tmp); 5366 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5367 control->tail_mbuf = m_tmp; 5368 control->end_added = 1; 5369 } 5370 m_tmp = SCTP_BUF_NEXT(m_tmp); 5371 } 5372 control->length = cnt; 5373 } else { 5374 /* remove it */ 5375 TAILQ_REMOVE(&inp->read_queue, control, next); 5376 /* Add back any hiddend data */ 5377 sctp_free_remote_addr(control->whoFrom); 5378 sctp_free_a_readq(stcb, control); 5379 } 5380 if (hold_rlock) { 5381 hold_rlock = 0; 5382 SCTP_INP_READ_UNLOCK(inp); 5383 } 5384 goto restart; 5385 } 5386 if ((control->length == 0) && 5387 (control->end_added == 1)) { 5388 /* Do we also need to check for (control->pdapi_aborted == 5389 * 1)? */ 5390 if (hold_rlock == 0) { 5391 hold_rlock = 1; 5392 SCTP_INP_READ_LOCK(inp); 5393 } 5394 TAILQ_REMOVE(&inp->read_queue, control, next); 5395 if (control->data) { 5396 #ifdef INVARIANTS 5397 panic("control->data not null but control->length == 0"); 5398 #else 5399 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5400 sctp_m_freem(control->data); 5401 control->data = NULL; 5402 #endif 5403 } 5404 if (control->aux_data) { 5405 sctp_m_free(control->aux_data); 5406 control->aux_data = NULL; 5407 } 5408 #ifdef INVARIANTS 5409 if (control->on_strm_q) { 5410 panic("About to free ctl:%p so:%p and its in %d", 5411 control, so, control->on_strm_q); 5412 } 5413 #endif 5414 sctp_free_remote_addr(control->whoFrom); 5415 sctp_free_a_readq(stcb, control); 5416 if (hold_rlock) { 5417 hold_rlock = 0; 5418 SCTP_INP_READ_UNLOCK(inp); 5419 } 5420 goto restart; 5421 } 5422 if (control->length == 0) { 5423 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5424 (filling_sinfo)) { 5425 /* find a more suitable one then this */ 5426 ctl = TAILQ_NEXT(control, next); 5427 while (ctl) { 5428 if ((ctl->stcb != control->stcb) && (ctl->length) && 5429 (ctl->some_taken || 5430 (ctl->spec_flags & M_NOTIFICATION) || 5431 ((ctl->do_not_ref_stcb == 0) && 5432 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5433 ) { 5434 /*- 5435 * If we have a different TCB next, and there is data 5436 * present. If we have already taken some (pdapi), OR we can 5437 * ref the tcb and no delivery as started on this stream, we 5438 * take it. Note we allow a notification on a different 5439 * assoc to be delivered.. 5440 */ 5441 control = ctl; 5442 goto found_one; 5443 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5444 (ctl->length) && 5445 ((ctl->some_taken) || 5446 ((ctl->do_not_ref_stcb == 0) && 5447 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5448 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5449 /*- 5450 * If we have the same tcb, and there is data present, and we 5451 * have the strm interleave feature present. Then if we have 5452 * taken some (pdapi) or we can refer to tht tcb AND we have 5453 * not started a delivery for this stream, we can take it. 5454 * Note we do NOT allow a notificaiton on the same assoc to 5455 * be delivered. 5456 */ 5457 control = ctl; 5458 goto found_one; 5459 } 5460 ctl = TAILQ_NEXT(ctl, next); 5461 } 5462 } 5463 /* 5464 * if we reach here, not suitable replacement is available 5465 * <or> fragment interleave is NOT on. So stuff the sb_cc 5466 * into the our held count, and its time to sleep again. 5467 */ 5468 held_length = so->so_rcv.sb_cc; 5469 control->held_length = so->so_rcv.sb_cc; 5470 goto restart; 5471 } 5472 /* Clear the held length since there is something to read */ 5473 control->held_length = 0; 5474 found_one: 5475 /* 5476 * If we reach here, control has a some data for us to read off. 5477 * Note that stcb COULD be NULL. 5478 */ 5479 if (hold_rlock == 0) { 5480 hold_rlock = 1; 5481 SCTP_INP_READ_LOCK(inp); 5482 } 5483 control->some_taken++; 5484 stcb = control->stcb; 5485 if (stcb) { 5486 if ((control->do_not_ref_stcb == 0) && 5487 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5488 if (freecnt_applied == 0) 5489 stcb = NULL; 5490 } else if (control->do_not_ref_stcb == 0) { 5491 /* you can't free it on me please */ 5492 /* 5493 * The lock on the socket buffer protects us so the 5494 * free code will stop. But since we used the 5495 * socketbuf lock and the sender uses the tcb_lock 5496 * to increment, we need to use the atomic add to 5497 * the refcnt 5498 */ 5499 if (freecnt_applied) { 5500 #ifdef INVARIANTS 5501 panic("refcnt already incremented"); 5502 #else 5503 SCTP_PRINTF("refcnt already incremented?\n"); 5504 #endif 5505 } else { 5506 atomic_add_int(&stcb->asoc.refcnt, 1); 5507 freecnt_applied = 1; 5508 } 5509 /* 5510 * Setup to remember how much we have not yet told 5511 * the peer our rwnd has opened up. Note we grab the 5512 * value from the tcb from last time. Note too that 5513 * sack sending clears this when a sack is sent, 5514 * which is fine. Once we hit the rwnd_req, we then 5515 * will go to the sctp_user_rcvd() that will not 5516 * lock until it KNOWs it MUST send a WUP-SACK. 5517 */ 5518 freed_so_far = stcb->freed_by_sorcv_sincelast; 5519 stcb->freed_by_sorcv_sincelast = 0; 5520 } 5521 } 5522 if (stcb && 5523 ((control->spec_flags & M_NOTIFICATION) == 0) && 5524 control->do_not_ref_stcb == 0) { 5525 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5526 } 5527 /* First lets get off the sinfo and sockaddr info */ 5528 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5529 sinfo->sinfo_stream = control->sinfo_stream; 5530 sinfo->sinfo_ssn = (uint16_t) control->sinfo_ssn; 5531 sinfo->sinfo_flags = control->sinfo_flags; 5532 sinfo->sinfo_ppid = control->sinfo_ppid; 5533 sinfo->sinfo_context = control->sinfo_context; 5534 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5535 sinfo->sinfo_tsn = control->sinfo_tsn; 5536 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5537 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5538 nxt = TAILQ_NEXT(control, next); 5539 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5540 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5541 struct sctp_extrcvinfo *s_extra; 5542 5543 s_extra = (struct sctp_extrcvinfo *)sinfo; 5544 if ((nxt) && 5545 (nxt->length)) { 5546 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5547 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5548 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5549 } 5550 if (nxt->spec_flags & M_NOTIFICATION) { 5551 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5552 } 5553 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5554 s_extra->serinfo_next_length = nxt->length; 5555 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5556 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5557 if (nxt->tail_mbuf != NULL) { 5558 if (nxt->end_added) { 5559 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5560 } 5561 } 5562 } else { 5563 /* 5564 * we explicitly 0 this, since the memcpy 5565 * got some other things beyond the older 5566 * sinfo_ that is on the control's structure 5567 * :-D 5568 */ 5569 nxt = NULL; 5570 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5571 s_extra->serinfo_next_aid = 0; 5572 s_extra->serinfo_next_length = 0; 5573 s_extra->serinfo_next_ppid = 0; 5574 s_extra->serinfo_next_stream = 0; 5575 } 5576 } 5577 /* 5578 * update off the real current cum-ack, if we have an stcb. 5579 */ 5580 if ((control->do_not_ref_stcb == 0) && stcb) 5581 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5582 /* 5583 * mask off the high bits, we keep the actual chunk bits in 5584 * there. 5585 */ 5586 sinfo->sinfo_flags &= 0x00ff; 5587 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5588 sinfo->sinfo_flags |= SCTP_UNORDERED; 5589 } 5590 } 5591 #ifdef SCTP_ASOCLOG_OF_TSNS 5592 { 5593 int index, newindex; 5594 struct sctp_pcbtsn_rlog *entry; 5595 5596 do { 5597 index = inp->readlog_index; 5598 newindex = index + 1; 5599 if (newindex >= SCTP_READ_LOG_SIZE) { 5600 newindex = 0; 5601 } 5602 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5603 entry = &inp->readlog[index]; 5604 entry->vtag = control->sinfo_assoc_id; 5605 entry->strm = control->sinfo_stream; 5606 entry->seq = control->sinfo_ssn; 5607 entry->sz = control->length; 5608 entry->flgs = control->sinfo_flags; 5609 } 5610 #endif 5611 if ((fromlen > 0) && (from != NULL)) { 5612 union sctp_sockstore store; 5613 size_t len; 5614 5615 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5616 #ifdef INET6 5617 case AF_INET6: 5618 len = sizeof(struct sockaddr_in6); 5619 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5620 store.sin6.sin6_port = control->port_from; 5621 break; 5622 #endif 5623 #ifdef INET 5624 case AF_INET: 5625 #ifdef INET6 5626 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5627 len = sizeof(struct sockaddr_in6); 5628 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5629 &store.sin6); 5630 store.sin6.sin6_port = control->port_from; 5631 } else { 5632 len = sizeof(struct sockaddr_in); 5633 store.sin = control->whoFrom->ro._l_addr.sin; 5634 store.sin.sin_port = control->port_from; 5635 } 5636 #else 5637 len = sizeof(struct sockaddr_in); 5638 store.sin = control->whoFrom->ro._l_addr.sin; 5639 store.sin.sin_port = control->port_from; 5640 #endif 5641 break; 5642 #endif 5643 default: 5644 len = 0; 5645 break; 5646 } 5647 memcpy(from, &store, min((size_t)fromlen, len)); 5648 #ifdef INET6 5649 { 5650 struct sockaddr_in6 lsa6, *from6; 5651 5652 from6 = (struct sockaddr_in6 *)from; 5653 sctp_recover_scope_mac(from6, (&lsa6)); 5654 } 5655 #endif 5656 } 5657 if (hold_rlock) { 5658 SCTP_INP_READ_UNLOCK(inp); 5659 hold_rlock = 0; 5660 } 5661 if (hold_sblock) { 5662 SOCKBUF_UNLOCK(&so->so_rcv); 5663 hold_sblock = 0; 5664 } 5665 /* now copy out what data we can */ 5666 if (mp == NULL) { 5667 /* copy out each mbuf in the chain up to length */ 5668 get_more_data: 5669 m = control->data; 5670 while (m) { 5671 /* Move out all we can */ 5672 cp_len = (int)uio->uio_resid; 5673 my_len = (int)SCTP_BUF_LEN(m); 5674 if (cp_len > my_len) { 5675 /* not enough in this buf */ 5676 cp_len = my_len; 5677 } 5678 if (hold_rlock) { 5679 SCTP_INP_READ_UNLOCK(inp); 5680 hold_rlock = 0; 5681 } 5682 if (cp_len > 0) 5683 error = uiomove(mtod(m, char *), cp_len, uio); 5684 /* re-read */ 5685 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5686 goto release; 5687 } 5688 if ((control->do_not_ref_stcb == 0) && stcb && 5689 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5690 no_rcv_needed = 1; 5691 } 5692 if (error) { 5693 /* error we are out of here */ 5694 goto release; 5695 } 5696 SCTP_INP_READ_LOCK(inp); 5697 hold_rlock = 1; 5698 if (cp_len == SCTP_BUF_LEN(m)) { 5699 if ((SCTP_BUF_NEXT(m) == NULL) && 5700 (control->end_added)) { 5701 out_flags |= MSG_EOR; 5702 if ((control->do_not_ref_stcb == 0) && 5703 (control->stcb != NULL) && 5704 ((control->spec_flags & M_NOTIFICATION) == 0)) 5705 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5706 } 5707 if (control->spec_flags & M_NOTIFICATION) { 5708 out_flags |= MSG_NOTIFICATION; 5709 } 5710 /* we ate up the mbuf */ 5711 if (in_flags & MSG_PEEK) { 5712 /* just looking */ 5713 m = SCTP_BUF_NEXT(m); 5714 copied_so_far += cp_len; 5715 } else { 5716 /* dispose of the mbuf */ 5717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5718 sctp_sblog(&so->so_rcv, 5719 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5720 } 5721 sctp_sbfree(control, stcb, &so->so_rcv, m); 5722 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5723 sctp_sblog(&so->so_rcv, 5724 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5725 } 5726 copied_so_far += cp_len; 5727 freed_so_far += cp_len; 5728 freed_so_far += MSIZE; 5729 atomic_subtract_int(&control->length, cp_len); 5730 control->data = sctp_m_free(m); 5731 m = control->data; 5732 /* been through it all, must hold sb 5733 * lock ok to null tail */ 5734 if (control->data == NULL) { 5735 #ifdef INVARIANTS 5736 if ((control->end_added == 0) || 5737 (TAILQ_NEXT(control, next) == NULL)) { 5738 /* 5739 * If the end is not 5740 * added, OR the 5741 * next is NOT null 5742 * we MUST have the 5743 * lock. 5744 */ 5745 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5746 panic("Hmm we don't own the lock?"); 5747 } 5748 } 5749 #endif 5750 control->tail_mbuf = NULL; 5751 #ifdef INVARIANTS 5752 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5753 panic("end_added, nothing left and no MSG_EOR"); 5754 } 5755 #endif 5756 } 5757 } 5758 } else { 5759 /* Do we need to trim the mbuf? */ 5760 if (control->spec_flags & M_NOTIFICATION) { 5761 out_flags |= MSG_NOTIFICATION; 5762 } 5763 if ((in_flags & MSG_PEEK) == 0) { 5764 SCTP_BUF_RESV_UF(m, cp_len); 5765 SCTP_BUF_LEN(m) -= cp_len; 5766 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5767 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5768 } 5769 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5770 if ((control->do_not_ref_stcb == 0) && 5771 stcb) { 5772 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5773 } 5774 copied_so_far += cp_len; 5775 freed_so_far += cp_len; 5776 freed_so_far += MSIZE; 5777 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5778 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5779 SCTP_LOG_SBRESULT, 0); 5780 } 5781 atomic_subtract_int(&control->length, cp_len); 5782 } else { 5783 copied_so_far += cp_len; 5784 } 5785 } 5786 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 5787 break; 5788 } 5789 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5790 (control->do_not_ref_stcb == 0) && 5791 (freed_so_far >= rwnd_req)) { 5792 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5793 } 5794 } /* end while(m) */ 5795 /* 5796 * At this point we have looked at it all and we either have 5797 * a MSG_EOR/or read all the user wants... <OR> 5798 * control->length == 0. 5799 */ 5800 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 5801 /* we are done with this control */ 5802 if (control->length == 0) { 5803 if (control->data) { 5804 #ifdef INVARIANTS 5805 panic("control->data not null at read eor?"); 5806 #else 5807 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 5808 sctp_m_freem(control->data); 5809 control->data = NULL; 5810 #endif 5811 } 5812 done_with_control: 5813 if (hold_rlock == 0) { 5814 SCTP_INP_READ_LOCK(inp); 5815 hold_rlock = 1; 5816 } 5817 TAILQ_REMOVE(&inp->read_queue, control, next); 5818 /* Add back any hiddend data */ 5819 if (control->held_length) { 5820 held_length = 0; 5821 control->held_length = 0; 5822 wakeup_read_socket = 1; 5823 } 5824 if (control->aux_data) { 5825 sctp_m_free(control->aux_data); 5826 control->aux_data = NULL; 5827 } 5828 no_rcv_needed = control->do_not_ref_stcb; 5829 sctp_free_remote_addr(control->whoFrom); 5830 control->data = NULL; 5831 #ifdef INVARIANTS 5832 if (control->on_strm_q) { 5833 panic("About to free ctl:%p so:%p and its in %d", 5834 control, so, control->on_strm_q); 5835 } 5836 #endif 5837 sctp_free_a_readq(stcb, control); 5838 control = NULL; 5839 if ((freed_so_far >= rwnd_req) && 5840 (no_rcv_needed == 0)) 5841 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5842 5843 } else { 5844 /* 5845 * The user did not read all of this 5846 * message, turn off the returned MSG_EOR 5847 * since we are leaving more behind on the 5848 * control to read. 5849 */ 5850 #ifdef INVARIANTS 5851 if (control->end_added && 5852 (control->data == NULL) && 5853 (control->tail_mbuf == NULL)) { 5854 panic("Gak, control->length is corrupt?"); 5855 } 5856 #endif 5857 no_rcv_needed = control->do_not_ref_stcb; 5858 out_flags &= ~MSG_EOR; 5859 } 5860 } 5861 if (out_flags & MSG_EOR) { 5862 goto release; 5863 } 5864 if ((uio->uio_resid == 0) || 5865 ((in_eeor_mode) && 5866 (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) { 5867 goto release; 5868 } 5869 /* 5870 * If I hit here the receiver wants more and this message is 5871 * NOT done (pd-api). So two questions. Can we block? if not 5872 * we are done. Did the user NOT set MSG_WAITALL? 5873 */ 5874 if (block_allowed == 0) { 5875 goto release; 5876 } 5877 /* 5878 * We need to wait for more data a few things: - We don't 5879 * sbunlock() so we don't get someone else reading. - We 5880 * must be sure to account for the case where what is added 5881 * is NOT to our control when we wakeup. 5882 */ 5883 5884 /* 5885 * Do we need to tell the transport a rwnd update might be 5886 * needed before we go to sleep? 5887 */ 5888 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5889 ((freed_so_far >= rwnd_req) && 5890 (control->do_not_ref_stcb == 0) && 5891 (no_rcv_needed == 0))) { 5892 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5893 } 5894 wait_some_more: 5895 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5896 goto release; 5897 } 5898 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5899 goto release; 5900 5901 if (hold_rlock == 1) { 5902 SCTP_INP_READ_UNLOCK(inp); 5903 hold_rlock = 0; 5904 } 5905 if (hold_sblock == 0) { 5906 SOCKBUF_LOCK(&so->so_rcv); 5907 hold_sblock = 1; 5908 } 5909 if ((copied_so_far) && (control->length == 0) && 5910 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 5911 goto release; 5912 } 5913 if (so->so_rcv.sb_cc <= control->held_length) { 5914 error = sbwait(&so->so_rcv); 5915 if (error) { 5916 goto release; 5917 } 5918 control->held_length = 0; 5919 } 5920 if (hold_sblock) { 5921 SOCKBUF_UNLOCK(&so->so_rcv); 5922 hold_sblock = 0; 5923 } 5924 if (control->length == 0) { 5925 /* still nothing here */ 5926 if (control->end_added == 1) { 5927 /* he aborted, or is done i.e.did a shutdown */ 5928 out_flags |= MSG_EOR; 5929 if (control->pdapi_aborted) { 5930 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5931 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5932 5933 out_flags |= MSG_TRUNC; 5934 } else { 5935 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5936 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5937 } 5938 goto done_with_control; 5939 } 5940 if (so->so_rcv.sb_cc > held_length) { 5941 control->held_length = so->so_rcv.sb_cc; 5942 held_length = 0; 5943 } 5944 goto wait_some_more; 5945 } else if (control->data == NULL) { 5946 /* 5947 * we must re-sync since data is probably being 5948 * added 5949 */ 5950 SCTP_INP_READ_LOCK(inp); 5951 if ((control->length > 0) && (control->data == NULL)) { 5952 /* big trouble.. we have the lock and its 5953 * corrupt? */ 5954 #ifdef INVARIANTS 5955 panic("Impossible data==NULL length !=0"); 5956 #endif 5957 out_flags |= MSG_EOR; 5958 out_flags |= MSG_TRUNC; 5959 control->length = 0; 5960 SCTP_INP_READ_UNLOCK(inp); 5961 goto done_with_control; 5962 } 5963 SCTP_INP_READ_UNLOCK(inp); 5964 /* We will fall around to get more data */ 5965 } 5966 goto get_more_data; 5967 } else { 5968 /*- 5969 * Give caller back the mbuf chain, 5970 * store in uio_resid the length 5971 */ 5972 wakeup_read_socket = 0; 5973 if ((control->end_added == 0) || 5974 (TAILQ_NEXT(control, next) == NULL)) { 5975 /* Need to get rlock */ 5976 if (hold_rlock == 0) { 5977 SCTP_INP_READ_LOCK(inp); 5978 hold_rlock = 1; 5979 } 5980 } 5981 if (control->end_added) { 5982 out_flags |= MSG_EOR; 5983 if ((control->do_not_ref_stcb == 0) && 5984 (control->stcb != NULL) && 5985 ((control->spec_flags & M_NOTIFICATION) == 0)) 5986 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5987 } 5988 if (control->spec_flags & M_NOTIFICATION) { 5989 out_flags |= MSG_NOTIFICATION; 5990 } 5991 uio->uio_resid = control->length; 5992 *mp = control->data; 5993 m = control->data; 5994 while (m) { 5995 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5996 sctp_sblog(&so->so_rcv, 5997 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5998 } 5999 sctp_sbfree(control, stcb, &so->so_rcv, m); 6000 freed_so_far += SCTP_BUF_LEN(m); 6001 freed_so_far += MSIZE; 6002 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6003 sctp_sblog(&so->so_rcv, 6004 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6005 } 6006 m = SCTP_BUF_NEXT(m); 6007 } 6008 control->data = control->tail_mbuf = NULL; 6009 control->length = 0; 6010 if (out_flags & MSG_EOR) { 6011 /* Done with this control */ 6012 goto done_with_control; 6013 } 6014 } 6015 release: 6016 if (hold_rlock == 1) { 6017 SCTP_INP_READ_UNLOCK(inp); 6018 hold_rlock = 0; 6019 } 6020 if (hold_sblock == 1) { 6021 SOCKBUF_UNLOCK(&so->so_rcv); 6022 hold_sblock = 0; 6023 } 6024 sbunlock(&so->so_rcv); 6025 sockbuf_lock = 0; 6026 6027 release_unlocked: 6028 if (hold_sblock) { 6029 SOCKBUF_UNLOCK(&so->so_rcv); 6030 hold_sblock = 0; 6031 } 6032 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6033 if ((freed_so_far >= rwnd_req) && 6034 (control && (control->do_not_ref_stcb == 0)) && 6035 (no_rcv_needed == 0)) 6036 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6037 } 6038 out: 6039 if (msg_flags) { 6040 *msg_flags = out_flags; 6041 } 6042 if (((out_flags & MSG_EOR) == 0) && 6043 ((in_flags & MSG_PEEK) == 0) && 6044 (sinfo) && 6045 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6046 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6047 struct sctp_extrcvinfo *s_extra; 6048 6049 s_extra = (struct sctp_extrcvinfo *)sinfo; 6050 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6051 } 6052 if (hold_rlock == 1) { 6053 SCTP_INP_READ_UNLOCK(inp); 6054 } 6055 if (hold_sblock) { 6056 SOCKBUF_UNLOCK(&so->so_rcv); 6057 } 6058 if (sockbuf_lock) { 6059 sbunlock(&so->so_rcv); 6060 } 6061 if (freecnt_applied) { 6062 /* 6063 * The lock on the socket buffer protects us so the free 6064 * code will stop. But since we used the socketbuf lock and 6065 * the sender uses the tcb_lock to increment, we need to use 6066 * the atomic add to the refcnt. 6067 */ 6068 if (stcb == NULL) { 6069 #ifdef INVARIANTS 6070 panic("stcb for refcnt has gone NULL?"); 6071 goto stage_left; 6072 #else 6073 goto stage_left; 6074 #endif 6075 } 6076 /* Save the value back for next time */ 6077 stcb->freed_by_sorcv_sincelast = freed_so_far; 6078 atomic_add_int(&stcb->asoc.refcnt, -1); 6079 } 6080 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6081 if (stcb) { 6082 sctp_misc_ints(SCTP_SORECV_DONE, 6083 freed_so_far, 6084 (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen), 6085 stcb->asoc.my_rwnd, 6086 so->so_rcv.sb_cc); 6087 } else { 6088 sctp_misc_ints(SCTP_SORECV_DONE, 6089 freed_so_far, 6090 (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen), 6091 0, 6092 so->so_rcv.sb_cc); 6093 } 6094 } 6095 stage_left: 6096 if (wakeup_read_socket) { 6097 sctp_sorwakeup(inp, so); 6098 } 6099 return (error); 6100 } 6101 6102 6103 #ifdef SCTP_MBUF_LOGGING 6104 struct mbuf * 6105 sctp_m_free(struct mbuf *m) 6106 { 6107 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6108 sctp_log_mb(m, SCTP_MBUF_IFREE); 6109 } 6110 return (m_free(m)); 6111 } 6112 6113 void 6114 sctp_m_freem(struct mbuf *mb) 6115 { 6116 while (mb != NULL) 6117 mb = sctp_m_free(mb); 6118 } 6119 6120 #endif 6121 6122 int 6123 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6124 { 6125 /* 6126 * Given a local address. For all associations that holds the 6127 * address, request a peer-set-primary. 6128 */ 6129 struct sctp_ifa *ifa; 6130 struct sctp_laddr *wi; 6131 6132 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6133 if (ifa == NULL) { 6134 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6135 return (EADDRNOTAVAIL); 6136 } 6137 /* 6138 * Now that we have the ifa we must awaken the iterator with this 6139 * message. 6140 */ 6141 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6142 if (wi == NULL) { 6143 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6144 return (ENOMEM); 6145 } 6146 /* Now incr the count and int wi structure */ 6147 SCTP_INCR_LADDR_COUNT(); 6148 bzero(wi, sizeof(*wi)); 6149 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6150 wi->ifa = ifa; 6151 wi->action = SCTP_SET_PRIM_ADDR; 6152 atomic_add_int(&ifa->refcount, 1); 6153 6154 /* Now add it to the work queue */ 6155 SCTP_WQ_ADDR_LOCK(); 6156 /* 6157 * Should this really be a tailq? As it is we will process the 6158 * newest first :-0 6159 */ 6160 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6161 SCTP_WQ_ADDR_UNLOCK(); 6162 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6163 (struct sctp_inpcb *)NULL, 6164 (struct sctp_tcb *)NULL, 6165 (struct sctp_nets *)NULL); 6166 return (0); 6167 } 6168 6169 6170 int 6171 sctp_soreceive(struct socket *so, 6172 struct sockaddr **psa, 6173 struct uio *uio, 6174 struct mbuf **mp0, 6175 struct mbuf **controlp, 6176 int *flagsp) 6177 { 6178 int error, fromlen; 6179 uint8_t sockbuf[256]; 6180 struct sockaddr *from; 6181 struct sctp_extrcvinfo sinfo; 6182 int filling_sinfo = 1; 6183 struct sctp_inpcb *inp; 6184 6185 inp = (struct sctp_inpcb *)so->so_pcb; 6186 /* pickup the assoc we are reading from */ 6187 if (inp == NULL) { 6188 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6189 return (EINVAL); 6190 } 6191 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6192 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6193 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6194 (controlp == NULL)) { 6195 /* user does not want the sndrcv ctl */ 6196 filling_sinfo = 0; 6197 } 6198 if (psa) { 6199 from = (struct sockaddr *)sockbuf; 6200 fromlen = sizeof(sockbuf); 6201 from->sa_len = 0; 6202 } else { 6203 from = NULL; 6204 fromlen = 0; 6205 } 6206 6207 if (filling_sinfo) { 6208 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6209 } 6210 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 6211 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6212 if (controlp != NULL) { 6213 /* copy back the sinfo in a CMSG format */ 6214 if (filling_sinfo) 6215 *controlp = sctp_build_ctl_nchunk(inp, 6216 (struct sctp_sndrcvinfo *)&sinfo); 6217 else 6218 *controlp = NULL; 6219 } 6220 if (psa) { 6221 /* copy back the address info */ 6222 if (from && from->sa_len) { 6223 *psa = sodupsockaddr(from, M_NOWAIT); 6224 } else { 6225 *psa = NULL; 6226 } 6227 } 6228 return (error); 6229 } 6230 6231 6232 6233 6234 6235 int 6236 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6237 int totaddr, int *error) 6238 { 6239 int added = 0; 6240 int i; 6241 struct sctp_inpcb *inp; 6242 struct sockaddr *sa; 6243 size_t incr = 0; 6244 #ifdef INET 6245 struct sockaddr_in *sin; 6246 #endif 6247 #ifdef INET6 6248 struct sockaddr_in6 *sin6; 6249 #endif 6250 6251 sa = addr; 6252 inp = stcb->sctp_ep; 6253 *error = 0; 6254 for (i = 0; i < totaddr; i++) { 6255 switch (sa->sa_family) { 6256 #ifdef INET 6257 case AF_INET: 6258 incr = sizeof(struct sockaddr_in); 6259 sin = (struct sockaddr_in *)sa; 6260 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6261 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6262 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6263 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6264 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6265 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6266 *error = EINVAL; 6267 goto out_now; 6268 } 6269 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6270 SCTP_DONOT_SETSCOPE, 6271 SCTP_ADDR_IS_CONFIRMED)) { 6272 /* assoc gone no un-lock */ 6273 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6274 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6275 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6276 *error = ENOBUFS; 6277 goto out_now; 6278 } 6279 added++; 6280 break; 6281 #endif 6282 #ifdef INET6 6283 case AF_INET6: 6284 incr = sizeof(struct sockaddr_in6); 6285 sin6 = (struct sockaddr_in6 *)sa; 6286 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6287 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6288 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6289 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6290 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6291 *error = EINVAL; 6292 goto out_now; 6293 } 6294 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6295 SCTP_DONOT_SETSCOPE, 6296 SCTP_ADDR_IS_CONFIRMED)) { 6297 /* assoc gone no un-lock */ 6298 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6299 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6300 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6301 *error = ENOBUFS; 6302 goto out_now; 6303 } 6304 added++; 6305 break; 6306 #endif 6307 default: 6308 break; 6309 } 6310 sa = (struct sockaddr *)((caddr_t)sa + incr); 6311 } 6312 out_now: 6313 return (added); 6314 } 6315 6316 struct sctp_tcb * 6317 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6318 unsigned int *totaddr, 6319 unsigned int *num_v4, unsigned int *num_v6, int *error, 6320 unsigned int limit, int *bad_addr) 6321 { 6322 struct sockaddr *sa; 6323 struct sctp_tcb *stcb = NULL; 6324 unsigned int incr, at, i; 6325 6326 at = 0; 6327 sa = addr; 6328 *error = *num_v6 = *num_v4 = 0; 6329 /* account and validate addresses */ 6330 for (i = 0; i < *totaddr; i++) { 6331 switch (sa->sa_family) { 6332 #ifdef INET 6333 case AF_INET: 6334 incr = (unsigned int)sizeof(struct sockaddr_in); 6335 if (sa->sa_len != incr) { 6336 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6337 *error = EINVAL; 6338 *bad_addr = 1; 6339 return (NULL); 6340 } 6341 (*num_v4) += 1; 6342 break; 6343 #endif 6344 #ifdef INET6 6345 case AF_INET6: 6346 { 6347 struct sockaddr_in6 *sin6; 6348 6349 sin6 = (struct sockaddr_in6 *)sa; 6350 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6351 /* Must be non-mapped for connectx */ 6352 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6353 *error = EINVAL; 6354 *bad_addr = 1; 6355 return (NULL); 6356 } 6357 incr = (unsigned int)sizeof(struct sockaddr_in6); 6358 if (sa->sa_len != incr) { 6359 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6360 *error = EINVAL; 6361 *bad_addr = 1; 6362 return (NULL); 6363 } 6364 (*num_v6) += 1; 6365 break; 6366 } 6367 #endif 6368 default: 6369 *totaddr = i; 6370 incr = 0; 6371 /* we are done */ 6372 break; 6373 } 6374 if (i == *totaddr) { 6375 break; 6376 } 6377 SCTP_INP_INCR_REF(inp); 6378 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6379 if (stcb != NULL) { 6380 /* Already have or am bring up an association */ 6381 return (stcb); 6382 } else { 6383 SCTP_INP_DECR_REF(inp); 6384 } 6385 if ((at + incr) > limit) { 6386 *totaddr = i; 6387 break; 6388 } 6389 sa = (struct sockaddr *)((caddr_t)sa + incr); 6390 } 6391 return ((struct sctp_tcb *)NULL); 6392 } 6393 6394 /* 6395 * sctp_bindx(ADD) for one address. 6396 * assumes all arguments are valid/checked by caller. 6397 */ 6398 void 6399 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6400 struct sockaddr *sa, sctp_assoc_t assoc_id, 6401 uint32_t vrf_id, int *error, void *p) 6402 { 6403 struct sockaddr *addr_touse; 6404 #if defined(INET) && defined(INET6) 6405 struct sockaddr_in sin; 6406 #endif 6407 6408 /* see if we're bound all already! */ 6409 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6410 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6411 *error = EINVAL; 6412 return; 6413 } 6414 addr_touse = sa; 6415 #ifdef INET6 6416 if (sa->sa_family == AF_INET6) { 6417 #ifdef INET 6418 struct sockaddr_in6 *sin6; 6419 6420 #endif 6421 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6422 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6423 *error = EINVAL; 6424 return; 6425 } 6426 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6427 /* can only bind v6 on PF_INET6 sockets */ 6428 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6429 *error = EINVAL; 6430 return; 6431 } 6432 #ifdef INET 6433 sin6 = (struct sockaddr_in6 *)addr_touse; 6434 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6435 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6436 SCTP_IPV6_V6ONLY(inp)) { 6437 /* can't bind v4-mapped on PF_INET sockets */ 6438 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6439 *error = EINVAL; 6440 return; 6441 } 6442 in6_sin6_2_sin(&sin, sin6); 6443 addr_touse = (struct sockaddr *)&sin; 6444 } 6445 #endif 6446 } 6447 #endif 6448 #ifdef INET 6449 if (sa->sa_family == AF_INET) { 6450 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6451 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6452 *error = EINVAL; 6453 return; 6454 } 6455 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6456 SCTP_IPV6_V6ONLY(inp)) { 6457 /* can't bind v4 on PF_INET sockets */ 6458 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6459 *error = EINVAL; 6460 return; 6461 } 6462 } 6463 #endif 6464 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6465 if (p == NULL) { 6466 /* Can't get proc for Net/Open BSD */ 6467 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6468 *error = EINVAL; 6469 return; 6470 } 6471 *error = sctp_inpcb_bind(so, addr_touse, NULL, p); 6472 return; 6473 } 6474 /* 6475 * No locks required here since bind and mgmt_ep_sa all do their own 6476 * locking. If we do something for the FIX: below we may need to 6477 * lock in that case. 6478 */ 6479 if (assoc_id == 0) { 6480 /* add the address */ 6481 struct sctp_inpcb *lep; 6482 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse; 6483 6484 /* validate the incoming port */ 6485 if ((lsin->sin_port != 0) && 6486 (lsin->sin_port != inp->sctp_lport)) { 6487 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6488 *error = EINVAL; 6489 return; 6490 } else { 6491 /* user specified 0 port, set it to existing port */ 6492 lsin->sin_port = inp->sctp_lport; 6493 } 6494 6495 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); 6496 if (lep != NULL) { 6497 /* 6498 * We must decrement the refcount since we have the 6499 * ep already and are binding. No remove going on 6500 * here. 6501 */ 6502 SCTP_INP_DECR_REF(lep); 6503 } 6504 if (lep == inp) { 6505 /* already bound to it.. ok */ 6506 return; 6507 } else if (lep == NULL) { 6508 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 6509 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6510 SCTP_ADD_IP_ADDRESS, 6511 vrf_id, NULL); 6512 } else { 6513 *error = EADDRINUSE; 6514 } 6515 if (*error) 6516 return; 6517 } else { 6518 /* 6519 * FIX: decide whether we allow assoc based bindx 6520 */ 6521 } 6522 } 6523 6524 /* 6525 * sctp_bindx(DELETE) for one address. 6526 * assumes all arguments are valid/checked by caller. 6527 */ 6528 void 6529 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6530 struct sockaddr *sa, sctp_assoc_t assoc_id, 6531 uint32_t vrf_id, int *error) 6532 { 6533 struct sockaddr *addr_touse; 6534 #if defined(INET) && defined(INET6) 6535 struct sockaddr_in sin; 6536 #endif 6537 6538 /* see if we're bound all already! */ 6539 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6540 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6541 *error = EINVAL; 6542 return; 6543 } 6544 addr_touse = sa; 6545 #ifdef INET6 6546 if (sa->sa_family == AF_INET6) { 6547 #ifdef INET 6548 struct sockaddr_in6 *sin6; 6549 #endif 6550 6551 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6552 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6553 *error = EINVAL; 6554 return; 6555 } 6556 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6557 /* can only bind v6 on PF_INET6 sockets */ 6558 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6559 *error = EINVAL; 6560 return; 6561 } 6562 #ifdef INET 6563 sin6 = (struct sockaddr_in6 *)addr_touse; 6564 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6565 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6566 SCTP_IPV6_V6ONLY(inp)) { 6567 /* can't bind mapped-v4 on PF_INET sockets */ 6568 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6569 *error = EINVAL; 6570 return; 6571 } 6572 in6_sin6_2_sin(&sin, sin6); 6573 addr_touse = (struct sockaddr *)&sin; 6574 } 6575 #endif 6576 } 6577 #endif 6578 #ifdef INET 6579 if (sa->sa_family == AF_INET) { 6580 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6581 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6582 *error = EINVAL; 6583 return; 6584 } 6585 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6586 SCTP_IPV6_V6ONLY(inp)) { 6587 /* can't bind v4 on PF_INET sockets */ 6588 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6589 *error = EINVAL; 6590 return; 6591 } 6592 } 6593 #endif 6594 /* 6595 * No lock required mgmt_ep_sa does its own locking. If the FIX: 6596 * below is ever changed we may need to lock before calling 6597 * association level binding. 6598 */ 6599 if (assoc_id == 0) { 6600 /* delete the address */ 6601 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6602 SCTP_DEL_IP_ADDRESS, 6603 vrf_id, NULL); 6604 } else { 6605 /* 6606 * FIX: decide whether we allow assoc based bindx 6607 */ 6608 } 6609 } 6610 6611 /* 6612 * returns the valid local address count for an assoc, taking into account 6613 * all scoping rules 6614 */ 6615 int 6616 sctp_local_addr_count(struct sctp_tcb *stcb) 6617 { 6618 int loopback_scope; 6619 #if defined(INET) 6620 int ipv4_local_scope, ipv4_addr_legal; 6621 #endif 6622 #if defined (INET6) 6623 int local_scope, site_scope, ipv6_addr_legal; 6624 #endif 6625 struct sctp_vrf *vrf; 6626 struct sctp_ifn *sctp_ifn; 6627 struct sctp_ifa *sctp_ifa; 6628 int count = 0; 6629 6630 /* Turn on all the appropriate scopes */ 6631 loopback_scope = stcb->asoc.scope.loopback_scope; 6632 #if defined(INET) 6633 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6634 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6635 #endif 6636 #if defined(INET6) 6637 local_scope = stcb->asoc.scope.local_scope; 6638 site_scope = stcb->asoc.scope.site_scope; 6639 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6640 #endif 6641 SCTP_IPI_ADDR_RLOCK(); 6642 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6643 if (vrf == NULL) { 6644 /* no vrf, no addresses */ 6645 SCTP_IPI_ADDR_RUNLOCK(); 6646 return (0); 6647 } 6648 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6649 /* 6650 * bound all case: go through all ifns on the vrf 6651 */ 6652 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6653 if ((loopback_scope == 0) && 6654 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6655 continue; 6656 } 6657 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6658 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6659 continue; 6660 switch (sctp_ifa->address.sa.sa_family) { 6661 #ifdef INET 6662 case AF_INET: 6663 if (ipv4_addr_legal) { 6664 struct sockaddr_in *sin; 6665 6666 sin = &sctp_ifa->address.sin; 6667 if (sin->sin_addr.s_addr == 0) { 6668 /* skip unspecified 6669 * addrs */ 6670 continue; 6671 } 6672 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6673 &sin->sin_addr) != 0) { 6674 continue; 6675 } 6676 if ((ipv4_local_scope == 0) && 6677 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6678 continue; 6679 } 6680 /* count this one */ 6681 count++; 6682 } else { 6683 continue; 6684 } 6685 break; 6686 #endif 6687 #ifdef INET6 6688 case AF_INET6: 6689 if (ipv6_addr_legal) { 6690 struct sockaddr_in6 *sin6; 6691 6692 sin6 = &sctp_ifa->address.sin6; 6693 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6694 continue; 6695 } 6696 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6697 &sin6->sin6_addr) != 0) { 6698 continue; 6699 } 6700 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6701 if (local_scope == 0) 6702 continue; 6703 if (sin6->sin6_scope_id == 0) { 6704 if (sa6_recoverscope(sin6) != 0) 6705 /* 6706 * 6707 * bad 6708 * link 6709 * 6710 * local 6711 * 6712 * address 6713 */ 6714 continue; 6715 } 6716 } 6717 if ((site_scope == 0) && 6718 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 6719 continue; 6720 } 6721 /* count this one */ 6722 count++; 6723 } 6724 break; 6725 #endif 6726 default: 6727 /* TSNH */ 6728 break; 6729 } 6730 } 6731 } 6732 } else { 6733 /* 6734 * subset bound case 6735 */ 6736 struct sctp_laddr *laddr; 6737 6738 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 6739 sctp_nxt_addr) { 6740 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 6741 continue; 6742 } 6743 /* count this one */ 6744 count++; 6745 } 6746 } 6747 SCTP_IPI_ADDR_RUNLOCK(); 6748 return (count); 6749 } 6750 6751 #if defined(SCTP_LOCAL_TRACE_BUF) 6752 6753 void 6754 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 6755 { 6756 uint32_t saveindex, newindex; 6757 6758 do { 6759 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 6760 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6761 newindex = 1; 6762 } else { 6763 newindex = saveindex + 1; 6764 } 6765 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 6766 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6767 saveindex = 0; 6768 } 6769 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 6770 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 6771 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 6772 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 6773 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 6774 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 6775 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 6776 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 6777 } 6778 6779 #endif 6780 static void 6781 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 6782 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 6783 { 6784 struct ip *iph; 6785 #ifdef INET6 6786 struct ip6_hdr *ip6; 6787 #endif 6788 struct mbuf *sp, *last; 6789 struct udphdr *uhdr; 6790 uint16_t port; 6791 6792 if ((m->m_flags & M_PKTHDR) == 0) { 6793 /* Can't handle one that is not a pkt hdr */ 6794 goto out; 6795 } 6796 /* Pull the src port */ 6797 iph = mtod(m, struct ip *); 6798 uhdr = (struct udphdr *)((caddr_t)iph + off); 6799 port = uhdr->uh_sport; 6800 /* 6801 * Split out the mbuf chain. Leave the IP header in m, place the 6802 * rest in the sp. 6803 */ 6804 sp = m_split(m, off, M_NOWAIT); 6805 if (sp == NULL) { 6806 /* Gak, drop packet, we can't do a split */ 6807 goto out; 6808 } 6809 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 6810 /* Gak, packet can't have an SCTP header in it - too small */ 6811 m_freem(sp); 6812 goto out; 6813 } 6814 /* Now pull up the UDP header and SCTP header together */ 6815 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 6816 if (sp == NULL) { 6817 /* Gak pullup failed */ 6818 goto out; 6819 } 6820 /* Trim out the UDP header */ 6821 m_adj(sp, sizeof(struct udphdr)); 6822 6823 /* Now reconstruct the mbuf chain */ 6824 for (last = m; last->m_next; last = last->m_next); 6825 last->m_next = sp; 6826 m->m_pkthdr.len += sp->m_pkthdr.len; 6827 /* 6828 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 6829 * checksum and it was valid. Since CSUM_DATA_VALID == 6830 * CSUM_SCTP_VALID this would imply that the HW also verified the 6831 * SCTP checksum. Therefore, clear the bit. 6832 */ 6833 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 6834 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 6835 m->m_pkthdr.len, 6836 if_name(m->m_pkthdr.rcvif), 6837 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 6838 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 6839 iph = mtod(m, struct ip *); 6840 switch (iph->ip_v) { 6841 #ifdef INET 6842 case IPVERSION: 6843 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 6844 sctp_input_with_port(m, off, port); 6845 break; 6846 #endif 6847 #ifdef INET6 6848 case IPV6_VERSION >> 4: 6849 ip6 = mtod(m, struct ip6_hdr *); 6850 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 6851 sctp6_input_with_port(&m, &off, port); 6852 break; 6853 #endif 6854 default: 6855 goto out; 6856 break; 6857 } 6858 return; 6859 out: 6860 m_freem(m); 6861 } 6862 6863 #ifdef INET 6864 static void 6865 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 6866 { 6867 struct ip *outer_ip, *inner_ip; 6868 struct sctphdr *sh; 6869 struct icmp *icmp; 6870 struct udphdr *udp; 6871 struct sctp_inpcb *inp; 6872 struct sctp_tcb *stcb; 6873 struct sctp_nets *net; 6874 struct sctp_init_chunk *ch; 6875 struct sockaddr_in src, dst; 6876 uint8_t type, code; 6877 6878 inner_ip = (struct ip *)vip; 6879 icmp = (struct icmp *)((caddr_t)inner_ip - 6880 (sizeof(struct icmp) - sizeof(struct ip))); 6881 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 6882 if (ntohs(outer_ip->ip_len) < 6883 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 6884 return; 6885 } 6886 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 6887 sh = (struct sctphdr *)(udp + 1); 6888 memset(&src, 0, sizeof(struct sockaddr_in)); 6889 src.sin_family = AF_INET; 6890 src.sin_len = sizeof(struct sockaddr_in); 6891 src.sin_port = sh->src_port; 6892 src.sin_addr = inner_ip->ip_src; 6893 memset(&dst, 0, sizeof(struct sockaddr_in)); 6894 dst.sin_family = AF_INET; 6895 dst.sin_len = sizeof(struct sockaddr_in); 6896 dst.sin_port = sh->dest_port; 6897 dst.sin_addr = inner_ip->ip_dst; 6898 /* 6899 * 'dst' holds the dest of the packet that failed to be sent. 'src' 6900 * holds our local endpoint address. Thus we reverse the dst and the 6901 * src in the lookup. 6902 */ 6903 inp = NULL; 6904 net = NULL; 6905 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 6906 (struct sockaddr *)&src, 6907 &inp, &net, 1, 6908 SCTP_DEFAULT_VRFID); 6909 if ((stcb != NULL) && 6910 (net != NULL) && 6911 (inp != NULL)) { 6912 /* Check the UDP port numbers */ 6913 if ((udp->uh_dport != net->port) || 6914 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 6915 SCTP_TCB_UNLOCK(stcb); 6916 return; 6917 } 6918 /* Check the verification tag */ 6919 if (ntohl(sh->v_tag) != 0) { 6920 /* 6921 * This must be the verification tag used for 6922 * sending out packets. We don't consider packets 6923 * reflecting the verification tag. 6924 */ 6925 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 6926 SCTP_TCB_UNLOCK(stcb); 6927 return; 6928 } 6929 } else { 6930 if (ntohs(outer_ip->ip_len) >= 6931 sizeof(struct ip) + 6932 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 6933 /* 6934 * In this case we can check if we got an 6935 * INIT chunk and if the initiate tag 6936 * matches. 6937 */ 6938 ch = (struct sctp_init_chunk *)(sh + 1); 6939 if ((ch->ch.chunk_type != SCTP_INITIATION) || 6940 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 6941 SCTP_TCB_UNLOCK(stcb); 6942 return; 6943 } 6944 } else { 6945 SCTP_TCB_UNLOCK(stcb); 6946 return; 6947 } 6948 } 6949 type = icmp->icmp_type; 6950 code = icmp->icmp_code; 6951 if ((type == ICMP_UNREACH) && 6952 (code == ICMP_UNREACH_PORT)) { 6953 code = ICMP_UNREACH_PROTOCOL; 6954 } 6955 sctp_notify(inp, stcb, net, type, code, 6956 ntohs(inner_ip->ip_len), 6957 ntohs(icmp->icmp_nextmtu)); 6958 } else { 6959 if ((stcb == NULL) && (inp != NULL)) { 6960 /* reduce ref-count */ 6961 SCTP_INP_WLOCK(inp); 6962 SCTP_INP_DECR_REF(inp); 6963 SCTP_INP_WUNLOCK(inp); 6964 } 6965 if (stcb) { 6966 SCTP_TCB_UNLOCK(stcb); 6967 } 6968 } 6969 return; 6970 } 6971 #endif 6972 6973 #ifdef INET6 6974 static void 6975 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 6976 { 6977 struct ip6ctlparam *ip6cp; 6978 struct sctp_inpcb *inp; 6979 struct sctp_tcb *stcb; 6980 struct sctp_nets *net; 6981 struct sctphdr sh; 6982 struct udphdr udp; 6983 struct sockaddr_in6 src, dst; 6984 uint8_t type, code; 6985 6986 ip6cp = (struct ip6ctlparam *)d; 6987 /* 6988 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 6989 */ 6990 if (ip6cp->ip6c_m == NULL) { 6991 return; 6992 } 6993 /* 6994 * Check if we can safely examine the ports and the verification tag 6995 * of the SCTP common header. 6996 */ 6997 if (ip6cp->ip6c_m->m_pkthdr.len < 6998 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 6999 return; 7000 } 7001 /* Copy out the UDP header. */ 7002 memset(&udp, 0, sizeof(struct udphdr)); 7003 m_copydata(ip6cp->ip6c_m, 7004 ip6cp->ip6c_off, 7005 sizeof(struct udphdr), 7006 (caddr_t)&udp); 7007 /* Copy out the port numbers and the verification tag. */ 7008 memset(&sh, 0, sizeof(struct sctphdr)); 7009 m_copydata(ip6cp->ip6c_m, 7010 ip6cp->ip6c_off + sizeof(struct udphdr), 7011 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7012 (caddr_t)&sh); 7013 memset(&src, 0, sizeof(struct sockaddr_in6)); 7014 src.sin6_family = AF_INET6; 7015 src.sin6_len = sizeof(struct sockaddr_in6); 7016 src.sin6_port = sh.src_port; 7017 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7018 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7019 return; 7020 } 7021 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7022 dst.sin6_family = AF_INET6; 7023 dst.sin6_len = sizeof(struct sockaddr_in6); 7024 dst.sin6_port = sh.dest_port; 7025 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7026 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7027 return; 7028 } 7029 inp = NULL; 7030 net = NULL; 7031 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7032 (struct sockaddr *)&src, 7033 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7034 if ((stcb != NULL) && 7035 (net != NULL) && 7036 (inp != NULL)) { 7037 /* Check the UDP port numbers */ 7038 if ((udp.uh_dport != net->port) || 7039 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7040 SCTP_TCB_UNLOCK(stcb); 7041 return; 7042 } 7043 /* Check the verification tag */ 7044 if (ntohl(sh.v_tag) != 0) { 7045 /* 7046 * This must be the verification tag used for 7047 * sending out packets. We don't consider packets 7048 * reflecting the verification tag. 7049 */ 7050 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7051 SCTP_TCB_UNLOCK(stcb); 7052 return; 7053 } 7054 } else { 7055 if (ip6cp->ip6c_m->m_pkthdr.len >= 7056 ip6cp->ip6c_off + sizeof(struct udphdr) + 7057 sizeof(struct sctphdr) + 7058 sizeof(struct sctp_chunkhdr) + 7059 offsetof(struct sctp_init, a_rwnd)) { 7060 /* 7061 * In this case we can check if we got an 7062 * INIT chunk and if the initiate tag 7063 * matches. 7064 */ 7065 uint32_t initiate_tag; 7066 uint8_t chunk_type; 7067 7068 m_copydata(ip6cp->ip6c_m, 7069 ip6cp->ip6c_off + 7070 sizeof(struct udphdr) + 7071 sizeof(struct sctphdr), 7072 sizeof(uint8_t), 7073 (caddr_t)&chunk_type); 7074 m_copydata(ip6cp->ip6c_m, 7075 ip6cp->ip6c_off + 7076 sizeof(struct udphdr) + 7077 sizeof(struct sctphdr) + 7078 sizeof(struct sctp_chunkhdr), 7079 sizeof(uint32_t), 7080 (caddr_t)&initiate_tag); 7081 if ((chunk_type != SCTP_INITIATION) || 7082 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7083 SCTP_TCB_UNLOCK(stcb); 7084 return; 7085 } 7086 } else { 7087 SCTP_TCB_UNLOCK(stcb); 7088 return; 7089 } 7090 } 7091 type = ip6cp->ip6c_icmp6->icmp6_type; 7092 code = ip6cp->ip6c_icmp6->icmp6_code; 7093 if ((type == ICMP6_DST_UNREACH) && 7094 (code == ICMP6_DST_UNREACH_NOPORT)) { 7095 type = ICMP6_PARAM_PROB; 7096 code = ICMP6_PARAMPROB_NEXTHEADER; 7097 } 7098 sctp6_notify(inp, stcb, net, type, code, 7099 (uint16_t) ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7100 } else { 7101 if ((stcb == NULL) && (inp != NULL)) { 7102 /* reduce inp's ref-count */ 7103 SCTP_INP_WLOCK(inp); 7104 SCTP_INP_DECR_REF(inp); 7105 SCTP_INP_WUNLOCK(inp); 7106 } 7107 if (stcb) { 7108 SCTP_TCB_UNLOCK(stcb); 7109 } 7110 } 7111 } 7112 #endif 7113 7114 void 7115 sctp_over_udp_stop(void) 7116 { 7117 /* 7118 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7119 * for writting! 7120 */ 7121 #ifdef INET 7122 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7123 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7124 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7125 } 7126 #endif 7127 #ifdef INET6 7128 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7129 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7130 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7131 } 7132 #endif 7133 } 7134 7135 int 7136 sctp_over_udp_start(void) 7137 { 7138 uint16_t port; 7139 int ret; 7140 #ifdef INET 7141 struct sockaddr_in sin; 7142 #endif 7143 #ifdef INET6 7144 struct sockaddr_in6 sin6; 7145 #endif 7146 /* 7147 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7148 * for writting! 7149 */ 7150 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7151 if (ntohs(port) == 0) { 7152 /* Must have a port set */ 7153 return (EINVAL); 7154 } 7155 #ifdef INET 7156 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7157 /* Already running -- must stop first */ 7158 return (EALREADY); 7159 } 7160 #endif 7161 #ifdef INET6 7162 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7163 /* Already running -- must stop first */ 7164 return (EALREADY); 7165 } 7166 #endif 7167 #ifdef INET 7168 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7169 SOCK_DGRAM, IPPROTO_UDP, 7170 curthread->td_ucred, curthread))) { 7171 sctp_over_udp_stop(); 7172 return (ret); 7173 } 7174 /* Call the special UDP hook. */ 7175 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7176 sctp_recv_udp_tunneled_packet, 7177 sctp_recv_icmp_tunneled_packet, 7178 NULL))) { 7179 sctp_over_udp_stop(); 7180 return (ret); 7181 } 7182 /* Ok, we have a socket, bind it to the port. */ 7183 memset(&sin, 0, sizeof(struct sockaddr_in)); 7184 sin.sin_len = sizeof(struct sockaddr_in); 7185 sin.sin_family = AF_INET; 7186 sin.sin_port = htons(port); 7187 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7188 (struct sockaddr *)&sin, curthread))) { 7189 sctp_over_udp_stop(); 7190 return (ret); 7191 } 7192 #endif 7193 #ifdef INET6 7194 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7195 SOCK_DGRAM, IPPROTO_UDP, 7196 curthread->td_ucred, curthread))) { 7197 sctp_over_udp_stop(); 7198 return (ret); 7199 } 7200 /* Call the special UDP hook. */ 7201 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7202 sctp_recv_udp_tunneled_packet, 7203 sctp_recv_icmp6_tunneled_packet, 7204 NULL))) { 7205 sctp_over_udp_stop(); 7206 return (ret); 7207 } 7208 /* Ok, we have a socket, bind it to the port. */ 7209 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7210 sin6.sin6_len = sizeof(struct sockaddr_in6); 7211 sin6.sin6_family = AF_INET6; 7212 sin6.sin6_port = htons(port); 7213 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7214 (struct sockaddr *)&sin6, curthread))) { 7215 sctp_over_udp_stop(); 7216 return (ret); 7217 } 7218 #endif 7219 return (0); 7220 } 7221