1 /* 2 * Copyright (c) 1984, 1985, 1986, 1987 Regents of the University of California. 3 * All rights reserved. The Berkeley software License Agreement 4 * specifies the terms and conditions for redistribution. 5 * 6 * @(#)spp_usrreq.c 7.3 (Berkeley) 11/05/87 7 */ 8 9 #include "param.h" 10 #include "systm.h" 11 #include "dir.h" 12 #include "user.h" 13 #include "mbuf.h" 14 #include "protosw.h" 15 #include "socket.h" 16 #include "socketvar.h" 17 #include "errno.h" 18 19 #include "../net/if.h" 20 #include "../net/route.h" 21 #include "../netinet/tcp_fsm.h" 22 #include "../netinet/tcp_timer.h" 23 24 #include "ns.h" 25 #include "ns_pcb.h" 26 #include "idp.h" 27 #include "idp_var.h" 28 #include "ns_error.h" 29 #include "sp.h" 30 #include "spidp.h" 31 #include "spp_var.h" 32 #include "spp_debug.h" 33 34 /* 35 * SP protocol implementation. 36 */ 37 spp_init() 38 { 39 40 spp_iss = 1; /* WRONG !! should fish it out of TODR */ 41 } 42 struct spidp spp_savesi; 43 int traceallspps = 0; 44 extern int sppconsdebug; 45 int spp_hardnosed; 46 int spp_use_delack = 0; 47 48 /*ARGSUSED*/ 49 spp_input(m, nsp, ifp) 50 register struct mbuf *m; 51 register struct nspcb *nsp; 52 struct ifnet *ifp; 53 { 54 register struct sppcb *cb; 55 register struct spidp *si = mtod(m, struct spidp *); 56 register struct socket *so; 57 short ostate; 58 int dropsocket = 0; 59 60 61 sppstat.spps_rcvtotal++; 62 if (nsp == 0) { 63 panic("No nspcb in spp_input\n"); 64 return; 65 } 66 67 cb = nstosppcb(nsp); 68 if (cb == 0) goto bad; 69 70 if (m->m_len < sizeof(*si)) { 71 if ((m = m_pullup(m, sizeof(*si))) == 0) { 72 sppstat.spps_rcvshort++; 73 return; 74 } 75 si = mtod(m, struct spidp *); 76 } 77 si->si_seq = ntohs(si->si_seq); 78 si->si_ack = ntohs(si->si_ack); 79 si->si_alo = ntohs(si->si_alo); 80 81 so = nsp->nsp_socket; 82 if (so->so_options & SO_DEBUG || traceallspps) { 83 ostate = cb->s_state; 84 spp_savesi = *si; 85 } 86 if (so->so_options & SO_ACCEPTCONN) { 87 struct sppcb *ocb = cb; 88 struct socket *oso = so; 89 so = sonewconn(so); 90 if (so == 0) { 91 goto drop; 92 } 93 /* 94 * This is ugly, but .... 95 * 96 * Mark socket as temporary until we're 97 * committed to keeping it. The code at 98 * ``drop'' and ``dropwithreset'' check the 99 * flag dropsocket to see if the temporary 100 * socket created here should be discarded. 101 * We mark the socket as discardable until 102 * we're committed to it below in TCPS_LISTEN. 103 */ 104 dropsocket++; 105 nsp = (struct nspcb *)so->so_pcb; 106 nsp->nsp_laddr = si->si_dna; 107 cb = nstosppcb(nsp); 108 cb->s_mtu = ocb->s_mtu; /* preserve sockopts */ 109 cb->s_flags = ocb->s_flags; /* preserve sockopts */ 110 if (so->so_snd.sb_hiwat != oso->so_snd.sb_hiwat) /*XXX*/ 111 sbreserve(&so->so_snd, oso->so_snd.sb_hiwat); 112 if (so->so_rcv.sb_hiwat != oso->so_rcv.sb_hiwat) /*XXX*/ 113 sbreserve(&so->so_rcv, oso->so_rcv.sb_hiwat); 114 cb->s_state = TCPS_LISTEN; 115 } 116 117 /* 118 * Packet received on connection. 119 * reset idle time and keep-alive timer; 120 */ 121 cb->s_idle = 0; 122 cb->s_timer[TCPT_KEEP] = TCPTV_KEEP; 123 124 switch (cb->s_state) { 125 126 case TCPS_LISTEN:{ 127 struct mbuf *am; 128 register struct sockaddr_ns *sns; 129 struct ns_addr laddr; 130 131 /* 132 * If somebody here was carying on a conversation 133 * and went away, and his pen pal thinks he can 134 * still talk, we get the misdirected packet. 135 */ 136 if (spp_hardnosed && (si->si_did != 0 || si->si_seq != 0)) { 137 spp_istat.gonawy++; 138 goto dropwithreset; 139 } 140 am = m_get(M_DONTWAIT, MT_SONAME); 141 if (am == NULL) 142 goto drop; 143 am->m_len = sizeof (struct sockaddr_ns); 144 sns = mtod(am, struct sockaddr_ns *); 145 sns->sns_family = AF_NS; 146 sns->sns_addr = si->si_sna; 147 laddr = nsp->nsp_laddr; 148 if (ns_nullhost(laddr)) 149 nsp->nsp_laddr = si->si_dna; 150 if (ns_pcbconnect(nsp, am)) { 151 nsp->nsp_laddr = laddr; 152 (void) m_free(am); 153 spp_istat.noconn++; 154 goto drop; 155 } 156 (void) m_free(am); 157 spp_template(cb); 158 dropsocket = 0; /* committed to socket */ 159 cb->s_did = si->si_sid; 160 cb->s_rack = si->si_ack; 161 cb->s_ralo = si->si_alo; 162 #define THREEWAYSHAKE 163 #ifdef THREEWAYSHAKE 164 cb->s_state = TCPS_SYN_RECEIVED; 165 cb->s_force = 1 + TCPT_KEEP; 166 sppstat.spps_accepts++; 167 cb->s_timer[TCPT_KEEP] = TCPTV_KEEP; 168 } 169 break; 170 /* 171 * This state means that we have heard a response 172 * to our acceptance of their connection 173 * It is probably logically unnecessary in this 174 * implementation. 175 */ 176 case TCPS_SYN_RECEIVED: { 177 if (si->si_did!=cb->s_sid) { 178 spp_istat.wrncon++; 179 goto drop; 180 } 181 #endif 182 nsp->nsp_fport = si->si_sport; 183 cb->s_timer[TCPT_REXMT] = 0; 184 cb->s_timer[TCPT_KEEP] = TCPTV_KEEP; 185 soisconnected(so); 186 cb->s_state = TCPS_ESTABLISHED; 187 sppstat.spps_accepts++; 188 } 189 break; 190 191 /* 192 * This state means that we have gotten a response 193 * to our attempt to establish a connection. 194 * We fill in the data from the other side, 195 * telling us which port to respond to, instead of the well- 196 * known one we might have sent to in the first place. 197 * We also require that this is a response to our 198 * connection id. 199 */ 200 case TCPS_SYN_SENT: 201 if (si->si_did!=cb->s_sid) { 202 spp_istat.notme++; 203 goto drop; 204 } 205 sppstat.spps_connects++; 206 cb->s_did = si->si_sid; 207 cb->s_rack = si->si_ack; 208 cb->s_ralo = si->si_alo; 209 cb->s_dport = nsp->nsp_fport = si->si_sport; 210 cb->s_timer[TCPT_REXMT] = 0; 211 cb->s_flags |= SF_ACKNOW; 212 soisconnected(so); 213 cb->s_state = TCPS_ESTABLISHED; 214 /* Use roundtrip time of connection request for initial rtt */ 215 if (cb->s_rtt) { 216 cb->s_srtt = cb->s_rtt << 3; 217 cb->s_rttvar = cb->s_rtt << 1; 218 TCPT_RANGESET(cb->s_rxtcur, 219 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1, 220 TCPTV_MIN, TCPTV_REXMTMAX); 221 cb->s_rtt = 0; 222 } 223 } 224 if (so->so_options & SO_DEBUG || traceallspps) 225 spp_trace(SA_INPUT, (u_char)ostate, cb, &spp_savesi, 0); 226 227 m->m_len -= sizeof (struct idp); 228 m->m_off += sizeof (struct idp); 229 230 if (spp_reass(cb, si)) { 231 m_freem(m); 232 } 233 if (cb->s_force || (cb->s_flags & (SF_ACKNOW|SF_WIN|SF_RXT))) 234 (void) spp_output(cb, (struct mbuf *)0); 235 cb->s_flags &= ~(SF_WIN|SF_RXT); 236 return; 237 238 dropwithreset: 239 if (dropsocket) 240 (void) soabort(so); 241 si->si_seq = ntohs(si->si_seq); 242 si->si_ack = ntohs(si->si_ack); 243 si->si_alo = ntohs(si->si_alo); 244 ns_error(dtom(si), NS_ERR_NOSOCK, 0); 245 if (cb->s_nspcb->nsp_socket->so_options & SO_DEBUG || traceallspps) 246 spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0); 247 return; 248 249 drop: 250 bad: 251 if (cb == 0 || cb->s_nspcb->nsp_socket->so_options & SO_DEBUG || 252 traceallspps) 253 spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0); 254 m_freem(m); 255 } 256 257 int spprexmtthresh = 3; 258 259 /* 260 * This is structurally similar to the tcp reassembly routine 261 * but its function is somewhat different: It merely queues 262 * packets up, and suppresses duplicates. 263 */ 264 spp_reass(cb, si) 265 register struct sppcb *cb; 266 register struct spidp *si; 267 { 268 register struct spidp_q *q; 269 register struct mbuf *m; 270 register struct socket *so = cb->s_nspcb->nsp_socket; 271 char packetp = cb->s_flags & SF_HI; 272 int incr; 273 char wakeup = 0; 274 275 if (si == SI(0)) 276 goto present; 277 /* 278 * Update our news from them. 279 */ 280 if (si->si_cc & SP_SA) 281 cb->s_flags |= (spp_use_delack ? SF_DELACK : SF_ACKNOW); 282 if (SSEQ_GT(si->si_alo, cb->s_ralo)) 283 cb->s_flags |= SF_WIN; 284 if (SSEQ_LEQ(si->si_ack, cb->s_rack)) { 285 if ((si->si_cc & SP_SP) && cb->s_rack != cb->s_smax) { 286 sppstat.spps_rcvdupack++; 287 /* 288 * If this is a completely duplicate ack 289 * and other conditions hold, we assume 290 * a packet has been dropped and retransmit 291 * it exactly as in tcp_input(). 292 */ 293 if (si->si_ack != cb->s_rack || 294 si->si_alo != cb->s_ralo) 295 cb->s_dupacks = 0; 296 else if (++cb->s_dupacks == spprexmtthresh) { 297 u_short onxt = cb->s_snxt; 298 int cwnd = cb->s_cwnd; 299 300 cb->s_snxt = si->si_ack; 301 cb->s_cwnd = CUNIT; 302 cb->s_force = 1 + TCPT_REXMT; 303 (void) spp_output(cb, 0); 304 cb->s_timer[TCPT_REXMT] = cb->s_rxtcur; 305 cb->s_rtt = 0; 306 if (cwnd >= 4 * CUNIT) 307 cb->s_cwnd = cwnd / 2; 308 if (SSEQ_GT(onxt, cb->s_snxt)) 309 cb->s_snxt = onxt; 310 return (1); 311 } 312 } else 313 cb->s_dupacks = 0; 314 goto update_window; 315 } 316 cb->s_dupacks = 0; 317 /* 318 * If our correspondent acknowledges data we haven't sent 319 * TCP would drop the packet after acking. We'll be a little 320 * more permissive 321 */ 322 if (SSEQ_GT(si->si_ack, (cb->s_smax + 1))) { 323 sppstat.spps_rcvacktoomuch++; 324 si->si_ack = cb->s_smax + 1; 325 } 326 sppstat.spps_rcvackpack++; 327 /* 328 * If transmit timer is running and timed sequence 329 * number was acked, update smoothed round trip time. 330 * See discussion of algorithm in tcp_input.c 331 */ 332 if (cb->s_rtt && SSEQ_GT(si->si_ack, cb->s_rtseq)) { 333 sppstat.spps_rttupdated++; 334 if (cb->s_srtt != 0) { 335 register short delta; 336 delta = cb->s_rtt - (cb->s_srtt >> 3); 337 if ((cb->s_srtt += delta) <= 0) 338 cb->s_srtt = 1; 339 if (delta < 0) 340 delta = -delta; 341 delta -= (cb->s_rttvar >> 2); 342 if ((cb->s_rttvar += delta) <= 0) 343 cb->s_rttvar = 1; 344 } else { 345 /* 346 * No rtt measurement yet 347 */ 348 cb->s_srtt = cb->s_rtt << 3; 349 cb->s_rttvar = cb->s_rtt << 1; 350 } 351 cb->s_rtt = 0; 352 cb->s_rxtshift = 0; 353 TCPT_RANGESET(cb->s_rxtcur, 354 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1, 355 TCPTV_MIN, TCPTV_REXMTMAX); 356 } 357 /* 358 * If all outstanding data is acked, stop retransmit 359 * timer and remember to restart (more output or persist). 360 * If there is more data to be acked, restart retransmit 361 * timer, using current (possibly backed-off) value; 362 */ 363 if (si->si_ack == cb->s_smax + 1) { 364 cb->s_timer[TCPT_REXMT] = 0; 365 cb->s_flags |= SF_RXT; 366 } else if (cb->s_timer[TCPT_PERSIST] == 0) 367 cb->s_timer[TCPT_REXMT] = cb->s_rxtcur; 368 /* 369 * When new data is acked, open the congestion window. 370 * If the window gives us less than ssthresh packets 371 * in flight, open exponentially (maxseg at a time). 372 * Otherwise open linearly (maxseg^2 / cwnd at a time). 373 */ 374 incr = CUNIT; 375 if (cb->s_cwnd > cb->s_ssthresh) 376 incr = MAX(incr * incr / cb->s_cwnd, 1); 377 cb->s_cwnd = MIN(cb->s_cwnd + incr, cb->s_cwmx); 378 /* 379 * Trim Acked data from output queue. 380 */ 381 for (m = so->so_snd.sb_mb; m; m = m->m_act) { 382 if (SSEQ_LT((mtod(m, struct spidp *))->si_seq, si->si_ack)) 383 sbdroprecord(&so->so_snd); 384 else 385 break; 386 } 387 if ((so->so_snd.sb_flags & SB_WAIT) || so->so_snd.sb_sel) 388 sowwakeup(so); 389 cb->s_rack = si->si_ack; 390 update_window: 391 if (SSEQ_LT(cb->s_snxt, cb->s_rack)) 392 cb->s_snxt = cb->s_rack; 393 if (SSEQ_LT(cb->s_swl1, si->si_seq) || cb->s_swl1 == si->si_seq && 394 (SSEQ_LT(cb->s_swl2, si->si_ack) || 395 cb->s_swl2 == si->si_ack && SSEQ_LT(cb->s_ralo, si->si_alo))) { 396 /* keep track of pure window updates */ 397 if ((si->si_cc & SP_SP) && cb->s_swl2 == si->si_ack 398 && SSEQ_LT(cb->s_ralo, si->si_alo)) { 399 sppstat.spps_rcvwinupd++; 400 sppstat.spps_rcvdupack--; 401 } 402 cb->s_ralo = si->si_alo; 403 cb->s_swl1 = si->si_seq; 404 cb->s_swl2 = si->si_ack; 405 cb->s_swnd = (1 + si->si_alo - si->si_ack); 406 if (cb->s_swnd > cb->s_smxw) 407 cb->s_smxw = cb->s_swnd; 408 cb->s_flags |= SF_WIN; 409 } 410 /* 411 * If this packet number is higher than that which 412 * we have allocated refuse it, unless urgent 413 */ 414 if (SSEQ_GT(si->si_seq, cb->s_alo)) { 415 if (si->si_cc & SP_SP) { 416 sppstat.spps_rcvwinprobe++; 417 return (1); 418 } else 419 sppstat.spps_rcvpackafterwin++; 420 if (si->si_cc & SP_OB) { 421 if (SSEQ_GT(si->si_seq, cb->s_alo + 60)) { 422 ns_error(dtom(si), NS_ERR_FULLUP, 0); 423 return (0); 424 } /* else queue this packet; */ 425 } else { 426 /*register struct socket *so = cb->s_nspcb->nsp_socket; 427 if (so->so_state && SS_NOFDREF) { 428 ns_error(dtom(si), NS_ERR_NOSOCK, 0); 429 (void)spp_close(cb); 430 } else 431 would crash system*/ 432 spp_istat.notyet++; 433 ns_error(dtom(si), NS_ERR_FULLUP, 0); 434 return (0); 435 } 436 } 437 /* 438 * If this is a system packet, we don't need to 439 * queue it up, and won't update acknowledge # 440 */ 441 if (si->si_cc & SP_SP) { 442 return (1); 443 } 444 /* 445 * We have already seen this packet, so drop. 446 */ 447 if (SSEQ_LT(si->si_seq, cb->s_ack)) { 448 spp_istat.bdreas++; 449 sppstat.spps_rcvduppack++; 450 if (si->si_seq == cb->s_ack - 1) 451 spp_istat.lstdup++; 452 return (1); 453 } 454 /* 455 * Loop through all packets queued up to insert in 456 * appropriate sequence. 457 */ 458 for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) { 459 if (si->si_seq == SI(q)->si_seq) { 460 sppstat.spps_rcvduppack++; 461 return (1); 462 } 463 if (SSEQ_LT(si->si_seq, SI(q)->si_seq)) { 464 sppstat.spps_rcvoopack++; 465 break; 466 } 467 } 468 insque(si, q->si_prev); 469 /* 470 * If this packet is urgent, inform process 471 */ 472 if (si->si_cc & SP_OB) { 473 cb->s_iobc = ((char *)si)[1 + sizeof(*si)]; 474 sohasoutofband(so); 475 cb->s_oobflags |= SF_IOOB; 476 } 477 present: 478 #define SPINC sizeof(struct sphdr) 479 /* 480 * Loop through all packets queued up to update acknowledge 481 * number, and present all acknowledged data to user; 482 * If in packet interface mode, show packet headers. 483 */ 484 for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) { 485 if (SI(q)->si_seq == cb->s_ack) { 486 cb->s_ack++; 487 m = dtom(q); 488 if (SI(q)->si_cc & SP_OB) { 489 cb->s_oobflags &= ~SF_IOOB; 490 if (so->so_rcv.sb_cc) 491 so->so_oobmark = so->so_rcv.sb_cc; 492 else 493 so->so_state |= SS_RCVATMARK; 494 } 495 q = q->si_prev; 496 remque(q->si_next); 497 wakeup = 1; 498 sppstat.spps_rcvpack++; 499 if (packetp) { 500 sbappendrecord(&so->so_rcv, m); 501 } else { 502 cb->s_rhdr = *mtod(m, struct sphdr *); 503 m->m_off += SPINC; 504 m->m_len -= SPINC; 505 sbappend(&so->so_rcv, m); 506 } 507 } else 508 break; 509 } 510 if (wakeup) sorwakeup(so); 511 return (0); 512 } 513 514 spp_ctlinput(cmd, arg) 515 int cmd; 516 caddr_t arg; 517 { 518 struct ns_addr *na; 519 extern u_char nsctlerrmap[]; 520 extern spp_abort(), spp_quench(); 521 extern struct nspcb *idp_drop(); 522 struct ns_errp *errp; 523 struct nspcb *nsp; 524 struct sockaddr_ns *sns; 525 int type; 526 527 if (cmd < 0 || cmd > PRC_NCMDS) 528 return; 529 type = NS_ERR_UNREACH_HOST; 530 531 switch (cmd) { 532 533 case PRC_ROUTEDEAD: 534 return; 535 536 case PRC_IFDOWN: 537 case PRC_HOSTDEAD: 538 case PRC_HOSTUNREACH: 539 sns = (struct sockaddr_ns *)arg; 540 if (sns->sns_family != AF_NS) 541 return; 542 na = &sns->sns_addr; 543 break; 544 545 default: 546 errp = (struct ns_errp *)arg; 547 na = &errp->ns_err_idp.idp_dna; 548 type = errp->ns_err_num; 549 type = ntohs((u_short)type); 550 } 551 switch (type) { 552 553 case NS_ERR_UNREACH_HOST: 554 ns_pcbnotify(na, (int)nsctlerrmap[cmd], spp_abort, (long) 0); 555 break; 556 557 case NS_ERR_TOO_BIG: 558 case NS_ERR_NOSOCK: 559 nsp = ns_pcblookup(na, errp->ns_err_idp.idp_sna.x_port, 560 NS_WILDCARD); 561 if (nsp) { 562 if(nsp->nsp_pcb) 563 (void) spp_drop((struct sppcb *)nsp->nsp_pcb, 564 (int)nsctlerrmap[cmd]); 565 else 566 (void) idp_drop(nsp, (int)nsctlerrmap[cmd]); 567 } 568 break; 569 570 case NS_ERR_FULLUP: 571 ns_pcbnotify(na, 0, spp_quench, (long) 0); 572 } 573 } 574 /* 575 * When a source quench is received, close congestion window 576 * to one packet. We will gradually open it again as we proceed. 577 */ 578 spp_quench(nsp) 579 struct nspcb *nsp; 580 { 581 struct sppcb *cb = nstosppcb(nsp); 582 583 if (cb) 584 cb->s_cwnd = CUNIT; 585 } 586 587 #ifdef notdef 588 int 589 spp_fixmtu(nsp) 590 register struct nspcb *nsp; 591 { 592 register struct sppcb *cb = (struct sppcb *)(nsp->nsp_pcb); 593 register struct mbuf *m; 594 register struct spidp *si; 595 struct ns_errp *ep; 596 struct sockbuf *sb; 597 int badseq, len; 598 struct mbuf *firstbad, *m0; 599 600 if (cb) { 601 /* 602 * The notification that we have sent 603 * too much is bad news -- we will 604 * have to go through queued up so far 605 * splitting ones which are too big and 606 * reassigning sequence numbers and checksums. 607 * we should then retransmit all packets from 608 * one above the offending packet to the last one 609 * we had sent (or our allocation) 610 * then the offending one so that the any queued 611 * data at our destination will be discarded. 612 */ 613 ep = (struct ns_errp *)nsp->nsp_notify_param; 614 sb = &nsp->nsp_socket->so_snd; 615 cb->s_mtu = ep->ns_err_param; 616 badseq = SI(&ep->ns_err_idp)->si_seq; 617 for (m = sb->sb_mb; m; m = m->m_act) { 618 si = mtod(m, struct spidp *); 619 if (si->si_seq == badseq) 620 break; 621 } 622 if (m == 0) return; 623 firstbad = m; 624 /*for (;;) {*/ 625 /* calculate length */ 626 for (m0 = m, len = 0; m ; m = m->m_next) 627 len += m->m_len; 628 if (len > cb->s_mtu) { 629 } 630 /* FINISH THIS 631 } */ 632 } 633 } 634 #endif 635 636 spp_output(cb, m0) 637 register struct sppcb *cb; 638 struct mbuf *m0; 639 { 640 struct socket *so = cb->s_nspcb->nsp_socket; 641 register struct mbuf *m; 642 register struct spidp *si = (struct spidp *) 0; 643 register struct sockbuf *sb = &so->so_snd; 644 int len = 0, win, rcv_win; 645 short span, off; 646 u_short alo, oalo; 647 int error = 0, idle, sendalot; 648 u_short lookfor = 0; 649 struct mbuf *mprev; 650 extern int idpcksum; 651 652 if (m0) { 653 int mtu = cb->s_mtu; 654 int datalen; 655 /* 656 * Make sure that packet isn't too big. 657 */ 658 for (m = m0; m ; m = m->m_next) { 659 mprev = m; 660 len += m->m_len; 661 } 662 datalen = (cb->s_flags & SF_HO) ? 663 len - sizeof (struct sphdr) : len; 664 if (datalen > mtu) { 665 if (cb->s_flags & SF_PI) { 666 m_freem(m0); 667 return (EMSGSIZE); 668 } else { 669 int off = 0; 670 int oldEM = cb->s_cc & SP_EM; 671 672 cb->s_cc &= ~SP_EM; 673 while (len > mtu) { 674 m = m_copy(m0, off, mtu); 675 if (m == NULL) { 676 error = ENOBUFS; 677 goto bad_copy; 678 } 679 error = spp_output(cb, m); 680 if (error) { 681 bad_copy: 682 cb->s_cc |= oldEM; 683 m_freem(m0); 684 return(error); 685 } 686 m_adj(m0, mtu); 687 len -= mtu; 688 } 689 cb->s_cc |= oldEM; 690 } 691 } 692 /* 693 * Force length even, by adding a "garbage byte" if 694 * necessary. 695 */ 696 if (len & 1) { 697 m = mprev; 698 if (m->m_len + m->m_off < MMAXOFF) 699 m->m_len++; 700 else { 701 struct mbuf *m1 = m_get(M_DONTWAIT, MT_DATA); 702 703 if (m1 == 0) { 704 m_freem(m0); 705 return (ENOBUFS); 706 } 707 m1->m_len = 1; 708 m1->m_off = MMAXOFF - 1; 709 m->m_next = m1; 710 } 711 } 712 m = m_get(M_DONTWAIT, MT_HEADER); 713 if (m == 0) { 714 m_freem(m0); 715 return (ENOBUFS); 716 } 717 /* 718 * Fill in mbuf with extended SP header 719 * and addresses and length put into network format. 720 * Long align so prepended ip headers will work on Gould. 721 */ 722 m->m_off = MMAXOFF - sizeof (struct spidp) - 2; 723 m->m_len = sizeof (struct spidp); 724 m->m_next = m0; 725 si = mtod(m, struct spidp *); 726 si->si_i = *cb->s_idp; 727 si->si_s = cb->s_shdr; 728 if ((cb->s_flags & SF_PI) && (cb->s_flags & SF_HO)) { 729 register struct sphdr *sh; 730 if (m0->m_len < sizeof (*sh)) { 731 if((m0 = m_pullup(m0, sizeof(*sh))) == NULL) { 732 (void) m_free(m); 733 m_freem(m0); 734 return (EINVAL); 735 } 736 m->m_next = m0; 737 } 738 sh = mtod(m0, struct sphdr *); 739 si->si_dt = sh->sp_dt; 740 si->si_cc |= sh->sp_cc & SP_EM; 741 m0->m_len -= sizeof (*sh); 742 m0->m_off += sizeof (*sh); 743 len -= sizeof (*sh); 744 } 745 len += sizeof(*si); 746 if (cb->s_oobflags & SF_SOOB) { 747 /* 748 * Per jqj@cornell: 749 * make sure OB packets convey exactly 1 byte. 750 * If the packet is 1 byte or larger, we 751 * have already guaranted there to be at least 752 * one garbage byte for the checksum, and 753 * extra bytes shouldn't hurt! 754 */ 755 if (len > sizeof(*si)) { 756 si->si_cc |= SP_OB; 757 len = (1 + sizeof(*si)); 758 } 759 } 760 si->si_len = htons((u_short)len); 761 /* 762 * queue stuff up for output 763 */ 764 sbappendrecord(sb, m); 765 cb->s_seq++; 766 } 767 idle = (cb->s_smax == (cb->s_rack - 1)); 768 again: 769 sendalot = 0; 770 off = cb->s_snxt - cb->s_rack; 771 win = MIN(cb->s_swnd, (cb->s_cwnd/CUNIT)); 772 773 /* 774 * If in persist timeout with window of 0, send a probe. 775 * Otherwise, if window is small but nonzero 776 * and timer expired, send what we can and go into 777 * transmit state. 778 */ 779 if (cb->s_force == 1 + TCPT_PERSIST) { 780 if (win != 0) { 781 cb->s_timer[TCPT_PERSIST] = 0; 782 cb->s_rxtshift = 0; 783 } 784 } 785 span = cb->s_seq - cb->s_rack; 786 len = MIN(span, win) - off; 787 788 if (len < 0) { 789 /* 790 * Window shrank after we went into it. 791 * If window shrank to 0, cancel pending 792 * restransmission and pull s_snxt back 793 * to (closed) window. We will enter persist 794 * state below. If the widndow didn't close completely, 795 * just wait for an ACK. 796 */ 797 len = 0; 798 if (win == 0) { 799 cb->s_timer[TCPT_REXMT] = 0; 800 cb->s_snxt = cb->s_rack; 801 } 802 } 803 if (len > 1) 804 sendalot = 1; 805 rcv_win = sbspace(&so->so_rcv); 806 807 /* 808 * Send if we owe peer an ACK. 809 */ 810 if (cb->s_oobflags & SF_SOOB) { 811 /* 812 * must transmit this out of band packet 813 */ 814 cb->s_oobflags &= ~ SF_SOOB; 815 sendalot = 1; 816 sppstat.spps_sndurg++; 817 goto found; 818 } 819 if (cb->s_flags & SF_ACKNOW) 820 goto send; 821 if (cb->s_state < TCPS_ESTABLISHED) 822 goto send; 823 /* 824 * Silly window can't happen in spp. 825 * Code from tcp deleted. 826 */ 827 if (len) 828 goto send; 829 /* 830 * Compare available window to amount of window 831 * known to peer (as advertised window less 832 * next expected input.) If the difference is at least two 833 * packets or at least 35% of the mximum possible window, 834 * then want to send a window update to peer. 835 */ 836 if (rcv_win > 0) { 837 u_short delta = 1 + cb->s_alo - cb->s_ack; 838 int adv = rcv_win - (delta * cb->s_mtu); 839 840 if ((so->so_rcv.sb_cc == 0 && adv >= (2 * cb->s_mtu)) || 841 (100 * adv / so->so_rcv.sb_hiwat >= 35)) { 842 sppstat.spps_sndwinup++; 843 cb->s_flags |= SF_ACKNOW; 844 goto send; 845 } 846 847 } 848 /* 849 * Many comments from tcp_output.c are appropriate here 850 * including . . . 851 * If send window is too small, there is data to transmit, and no 852 * retransmit or persist is pending, then go to persist state. 853 * If nothing happens soon, send when timer expires: 854 * if window is nonzero, transmit what we can, 855 * otherwise send a probe. 856 */ 857 if (so->so_snd.sb_cc && cb->s_timer[TCPT_REXMT] == 0 && 858 cb->s_timer[TCPT_PERSIST] == 0) { 859 cb->s_rxtshift = 0; 860 spp_setpersist(cb); 861 } 862 /* 863 * No reason to send a packet, just return. 864 */ 865 cb->s_outx = 1; 866 return (0); 867 868 send: 869 /* 870 * Find requested packet. 871 */ 872 si = 0; 873 if (len > 0) { 874 cb->s_want = cb->s_snxt; 875 for (m = sb->sb_mb; m; m = m->m_act) { 876 si = mtod(m, struct spidp *); 877 if (SSEQ_LEQ(cb->s_snxt, si->si_seq)) 878 break; 879 } 880 found: 881 if (si) { 882 if (si->si_seq == cb->s_snxt) 883 cb->s_snxt++; 884 else 885 sppstat.spps_sndvoid++, si = 0; 886 } 887 } 888 /* 889 * update window 890 */ 891 if (rcv_win < 0) 892 rcv_win = 0; 893 oalo = alo = cb->s_ack - 1 + (rcv_win / ((short)cb->s_mtu)); 894 if (SSEQ_LT(alo, cb->s_alo)) 895 alo = cb->s_alo; 896 897 if (si) { 898 /* 899 * must make a copy of this packet for 900 * idp_output to monkey with 901 */ 902 m = m_copy(dtom(si), 0, (int)M_COPYALL); 903 if (m == NULL) { 904 return (ENOBUFS); 905 } 906 m0 = m; 907 si = mtod(m, struct spidp *); 908 if (SSEQ_LT(si->si_seq, cb->s_smax)) 909 sppstat.spps_sndrexmitpack++; 910 else 911 sppstat.spps_sndpack++; 912 } else if (cb->s_force || cb->s_flags & SF_ACKNOW) { 913 /* 914 * Must send an acknowledgement or a probe 915 */ 916 if (cb->s_force) 917 sppstat.spps_sndprobe++; 918 if (cb->s_flags & SF_ACKNOW) 919 sppstat.spps_sndacks++; 920 m = m_get(M_DONTWAIT, MT_HEADER); 921 if (m == 0) { 922 return (ENOBUFS); 923 } 924 /* 925 * Fill in mbuf with extended SP header 926 * and addresses and length put into network format. 927 * Allign beginning of packet to long to prepend 928 * ifp's on loopback, or NSIP encaspulation for fussy cpu's. 929 */ 930 m->m_off = MMAXOFF - sizeof (struct spidp) - 2; 931 m->m_len = sizeof (*si); 932 m->m_next = 0; 933 si = mtod(m, struct spidp *); 934 si->si_i = *cb->s_idp; 935 si->si_s = cb->s_shdr; 936 si->si_seq = cb->s_smax + 1; 937 si->si_len = htons(sizeof (*si)); 938 si->si_cc |= SP_SP; 939 } else { 940 cb->s_outx = 3; 941 if (so->so_options & SO_DEBUG || traceallspps) 942 spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0); 943 return (0); 944 } 945 /* 946 * Stuff checksum and output datagram. 947 */ 948 if ((si->si_cc & SP_SP) == 0) { 949 if (cb->s_force != (1 + TCPT_PERSIST) || 950 cb->s_timer[TCPT_PERSIST] == 0) { 951 /* 952 * If this is a new packet and we are not currently 953 * timing anything, time this one. 954 */ 955 if (SSEQ_LT(cb->s_smax, si->si_seq)) { 956 cb->s_smax = si->si_seq; 957 if (cb->s_rtt == 0) { 958 sppstat.spps_segstimed++; 959 cb->s_rtseq = si->si_seq; 960 cb->s_rtt = 1; 961 } 962 } 963 /* 964 * Set rexmt timer if not currently set, 965 * Initial value for retransmit timer is smoothed 966 * round-trip time + 2 * round-trip time variance. 967 * Initialize shift counter which is used for backoff 968 * of retransmit time. 969 */ 970 if (cb->s_timer[TCPT_REXMT] == 0 && 971 cb->s_snxt != cb->s_rack) { 972 cb->s_timer[TCPT_REXMT] = cb->s_rxtcur; 973 if (cb->s_timer[TCPT_PERSIST]) { 974 cb->s_timer[TCPT_PERSIST] = 0; 975 cb->s_rxtshift = 0; 976 } 977 } 978 } else if (SSEQ_LT(cb->s_smax, si->si_seq)) { 979 cb->s_smax = si->si_seq; 980 } 981 } else if (cb->s_state < TCPS_ESTABLISHED) { 982 if (cb->s_rtt == 0) 983 cb->s_rtt = 1; /* Time initial handshake */ 984 if (cb->s_timer[TCPT_REXMT] == 0) 985 cb->s_timer[TCPT_REXMT] = cb->s_rxtcur; 986 } 987 { 988 /* 989 * Do not request acks when we ack their data packets or 990 * when we do a gratuitous window update. 991 */ 992 if (((si->si_cc & SP_SP) == 0) || cb->s_force) 993 si->si_cc |= SP_SA; 994 si->si_seq = htons(si->si_seq); 995 si->si_alo = htons(alo); 996 si->si_ack = htons(cb->s_ack); 997 998 if (idpcksum) { 999 si->si_sum = 0; 1000 len = ntohs(si->si_len); 1001 if (len & 1) 1002 len++; 1003 si->si_sum = ns_cksum(dtom(si), len); 1004 } else 1005 si->si_sum = 0xffff; 1006 1007 cb->s_outx = 4; 1008 if (so->so_options & SO_DEBUG || traceallspps) 1009 spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0); 1010 1011 if (so->so_options & SO_DONTROUTE) 1012 error = ns_output(m, (struct route *)0, NS_ROUTETOIF); 1013 else 1014 error = ns_output(m, &cb->s_nspcb->nsp_route, 0); 1015 } 1016 if (error) { 1017 return (error); 1018 } 1019 sppstat.spps_sndtotal++; 1020 /* 1021 * Data sent (as far as we can tell). 1022 * If this advertises a larger window than any other segment, 1023 * then remember the size of the advertized window. 1024 * Any pending ACK has now been sent. 1025 */ 1026 cb->s_force = 0; 1027 cb->s_flags &= ~(SF_ACKNOW|SF_DELACK); 1028 if (SSEQ_GT(alo, cb->s_alo)) 1029 cb->s_alo = alo; 1030 if (sendalot) 1031 goto again; 1032 cb->s_outx = 5; 1033 return (0); 1034 } 1035 1036 int spp_do_persist_panics = 0; 1037 1038 spp_setpersist(cb) 1039 register struct sppcb *cb; 1040 { 1041 register t = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1; 1042 extern int spp_backoff[]; 1043 1044 if (cb->s_timer[TCPT_REXMT] && spp_do_persist_panics) 1045 panic("spp_output REXMT"); 1046 /* 1047 * Start/restart persistance timer. 1048 */ 1049 TCPT_RANGESET(cb->s_timer[TCPT_PERSIST], 1050 t*spp_backoff[cb->s_rxtshift], 1051 TCPTV_PERSMIN, TCPTV_PERSMAX); 1052 if (cb->s_rxtshift < TCP_MAXRXTSHIFT) 1053 cb->s_rxtshift++; 1054 } 1055 /*ARGSUSED*/ 1056 spp_ctloutput(req, so, level, name, value) 1057 int req; 1058 struct socket *so; 1059 int name; 1060 struct mbuf **value; 1061 { 1062 register struct mbuf *m; 1063 struct nspcb *nsp = sotonspcb(so); 1064 register struct sppcb *cb; 1065 int mask, error = 0; 1066 1067 if (level != NSPROTO_SPP) { 1068 /* This will have to be changed when we do more general 1069 stacking of protocols */ 1070 return (idp_ctloutput(req, so, level, name, value)); 1071 } 1072 if (nsp == NULL) { 1073 error = EINVAL; 1074 goto release; 1075 } else 1076 cb = nstosppcb(nsp); 1077 1078 switch (req) { 1079 1080 case PRCO_GETOPT: 1081 if (value == NULL) 1082 return (EINVAL); 1083 m = m_get(M_DONTWAIT, MT_DATA); 1084 if (m == NULL) 1085 return (ENOBUFS); 1086 switch (name) { 1087 1088 case SO_HEADERS_ON_INPUT: 1089 mask = SF_HI; 1090 goto get_flags; 1091 1092 case SO_HEADERS_ON_OUTPUT: 1093 mask = SF_HO; 1094 get_flags: 1095 m->m_len = sizeof(short); 1096 m->m_off = MMAXOFF - sizeof(short); 1097 *mtod(m, short *) = cb->s_flags & mask; 1098 break; 1099 1100 case SO_MTU: 1101 m->m_len = sizeof(u_short); 1102 m->m_off = MMAXOFF - sizeof(short); 1103 *mtod(m, short *) = cb->s_mtu; 1104 break; 1105 1106 case SO_LAST_HEADER: 1107 m->m_len = sizeof(struct sphdr); 1108 m->m_off = MMAXOFF - sizeof(struct sphdr); 1109 *mtod(m, struct sphdr *) = cb->s_rhdr; 1110 break; 1111 1112 case SO_DEFAULT_HEADERS: 1113 m->m_len = sizeof(struct spidp); 1114 m->m_off = MMAXOFF - sizeof(struct sphdr); 1115 *mtod(m, struct sphdr *) = cb->s_shdr; 1116 break; 1117 1118 default: 1119 error = EINVAL; 1120 } 1121 *value = m; 1122 break; 1123 1124 case PRCO_SETOPT: 1125 if (value == 0 || *value == 0) { 1126 error = EINVAL; 1127 break; 1128 } 1129 switch (name) { 1130 int *ok; 1131 1132 case SO_HEADERS_ON_INPUT: 1133 mask = SF_HI; 1134 goto set_head; 1135 1136 case SO_HEADERS_ON_OUTPUT: 1137 mask = SF_HO; 1138 set_head: 1139 if (cb->s_flags & SF_PI) { 1140 ok = mtod(*value, int *); 1141 if (*ok) 1142 cb->s_flags |= mask; 1143 else 1144 cb->s_flags &= ~mask; 1145 } else error = EINVAL; 1146 break; 1147 1148 case SO_MTU: 1149 cb->s_mtu = *(mtod(*value, u_short *)); 1150 break; 1151 1152 case SO_DEFAULT_HEADERS: 1153 { 1154 register struct sphdr *sp 1155 = mtod(*value, struct sphdr *); 1156 cb->s_dt = sp->sp_dt; 1157 cb->s_cc = sp->sp_cc & SP_EM; 1158 } 1159 break; 1160 1161 default: 1162 error = EINVAL; 1163 } 1164 m_freem(*value); 1165 break; 1166 } 1167 release: 1168 return (error); 1169 } 1170 1171 /*ARGSUSED*/ 1172 spp_usrreq(so, req, m, nam, rights) 1173 struct socket *so; 1174 int req; 1175 struct mbuf *m, *nam, *rights; 1176 { 1177 struct nspcb *nsp = sotonspcb(so); 1178 register struct sppcb *cb; 1179 int s = splnet(); 1180 int error = 0, ostate; 1181 struct mbuf *mm; 1182 register struct sockbuf *sb; 1183 1184 if (req == PRU_CONTROL) 1185 return (ns_control(so, (int)m, (caddr_t)nam, 1186 (struct ifnet *)rights)); 1187 if (rights && rights->m_len) { 1188 error = EINVAL; 1189 goto release; 1190 } 1191 if (nsp == NULL) { 1192 if (req != PRU_ATTACH) { 1193 error = EINVAL; 1194 goto release; 1195 } 1196 } else 1197 cb = nstosppcb(nsp); 1198 1199 ostate = cb ? cb->s_state : 0; 1200 1201 switch (req) { 1202 1203 case PRU_ATTACH: 1204 if (nsp != NULL) { 1205 error = EISCONN; 1206 break; 1207 } 1208 error = ns_pcballoc(so, &nspcb); 1209 if (error) 1210 break; 1211 error = soreserve(so, 3072, 3072); 1212 if (error) 1213 break; 1214 nsp = sotonspcb(so); 1215 1216 mm = m_getclr(M_DONTWAIT, MT_PCB); 1217 sb = &so->so_snd; 1218 1219 if (mm == NULL) { 1220 error = ENOBUFS; 1221 break; 1222 } 1223 cb = mtod(mm, struct sppcb *); 1224 mm = m_getclr(M_DONTWAIT, MT_HEADER); 1225 if (mm == NULL) { 1226 m_free(dtom(m)); 1227 error = ENOBUFS; 1228 break; 1229 } 1230 cb->s_idp = mtod(mm, struct idp *); 1231 cb->s_state = TCPS_LISTEN; 1232 cb->s_smax = -1; 1233 cb->s_swl1 = -1; 1234 cb->s_q.si_next = cb->s_q.si_prev = &cb->s_q; 1235 cb->s_nspcb = nsp; 1236 cb->s_mtu = 576 - sizeof (struct spidp); 1237 cb->s_cwnd = sbspace(sb) * CUNIT / cb->s_mtu; 1238 cb->s_ssthresh = cb->s_cwnd; 1239 cb->s_cwmx = sb->sb_mbmax * CUNIT / 1240 (2 * sizeof (struct spidp)); 1241 /* Above is recomputed when connecting to account 1242 for changed buffering or mtu's */ 1243 cb->s_rtt = TCPTV_SRTTBASE; 1244 cb->s_rttvar = TCPTV_SRTTDFLT << 2; 1245 TCPT_RANGESET(cb->s_rxtcur, 1246 ((TCPTV_SRTTBASE >> 2) + (TCPTV_SRTTDFLT << 2)) >> 1, 1247 TCPTV_MIN, TCPTV_REXMTMAX); 1248 nsp->nsp_pcb = (caddr_t) cb; 1249 break; 1250 1251 case PRU_DETACH: 1252 if (nsp == NULL) { 1253 error = ENOTCONN; 1254 break; 1255 } 1256 if (cb->s_state > TCPS_LISTEN) 1257 cb = spp_disconnect(cb); 1258 else 1259 cb = spp_close(cb); 1260 break; 1261 1262 case PRU_BIND: 1263 error = ns_pcbbind(nsp, nam); 1264 break; 1265 1266 case PRU_LISTEN: 1267 if (nsp->nsp_lport == 0) 1268 error = ns_pcbbind(nsp, (struct mbuf *)0); 1269 if (error == 0) 1270 cb->s_state = TCPS_LISTEN; 1271 break; 1272 1273 /* 1274 * Initiate connection to peer. 1275 * Enter SYN_SENT state, and mark socket as connecting. 1276 * Start keep-alive timer, setup prototype header, 1277 * Send initial system packet requesting connection. 1278 */ 1279 case PRU_CONNECT: 1280 if (nsp->nsp_lport == 0) { 1281 error = ns_pcbbind(nsp, (struct mbuf *)0); 1282 if (error) 1283 break; 1284 } 1285 error = ns_pcbconnect(nsp, nam); 1286 if (error) 1287 break; 1288 soisconnecting(so); 1289 sppstat.spps_connattempt++; 1290 cb->s_state = TCPS_SYN_SENT; 1291 cb->s_did = 0; 1292 spp_template(cb); 1293 cb->s_timer[TCPT_KEEP] = TCPTV_KEEP; 1294 cb->s_force = 1 + TCPTV_KEEP; 1295 /* 1296 * Other party is required to respond to 1297 * the port I send from, but he is not 1298 * required to answer from where I am sending to, 1299 * so allow wildcarding. 1300 * original port I am sending to is still saved in 1301 * cb->s_dport. 1302 */ 1303 nsp->nsp_fport = 0; 1304 error = spp_output(cb, (struct mbuf *) 0); 1305 break; 1306 1307 case PRU_CONNECT2: 1308 error = EOPNOTSUPP; 1309 break; 1310 1311 /* 1312 * We may decide later to implement connection closing 1313 * handshaking at the spp level optionally. 1314 * here is the hook to do it: 1315 */ 1316 case PRU_DISCONNECT: 1317 cb = spp_disconnect(cb); 1318 break; 1319 1320 /* 1321 * Accept a connection. Essentially all the work is 1322 * done at higher levels; just return the address 1323 * of the peer, storing through addr. 1324 */ 1325 case PRU_ACCEPT: { 1326 struct sockaddr_ns *sns = mtod(nam, struct sockaddr_ns *); 1327 1328 nam->m_len = sizeof (struct sockaddr_ns); 1329 sns->sns_family = AF_NS; 1330 sns->sns_addr = nsp->nsp_faddr; 1331 break; 1332 } 1333 1334 case PRU_SHUTDOWN: 1335 socantsendmore(so); 1336 cb = spp_usrclosed(cb); 1337 if (cb) 1338 error = spp_output(cb, (struct mbuf *) 0); 1339 break; 1340 1341 /* 1342 * After a receive, possibly send acknowledgment 1343 * updating allocation. 1344 */ 1345 case PRU_RCVD: 1346 cb->s_flags |= SF_RVD; 1347 (void) spp_output(cb, (struct mbuf *) 0); 1348 cb->s_flags &= ~SF_RVD; 1349 break; 1350 1351 case PRU_ABORT: 1352 (void) spp_drop(cb, ECONNABORTED); 1353 break; 1354 1355 case PRU_SENSE: 1356 case PRU_CONTROL: 1357 m = NULL; 1358 error = EOPNOTSUPP; 1359 break; 1360 1361 case PRU_RCVOOB: 1362 if ((cb->s_oobflags & SF_IOOB) || so->so_oobmark || 1363 (so->so_state & SS_RCVATMARK)) { 1364 m->m_len = 1; 1365 *mtod(m, caddr_t) = cb->s_iobc; 1366 break; 1367 } 1368 error = EINVAL; 1369 break; 1370 1371 case PRU_SENDOOB: 1372 if (sbspace(&so->so_snd) < -512) { 1373 error = ENOBUFS; 1374 break; 1375 } 1376 cb->s_oobflags |= SF_SOOB; 1377 /* fall into */ 1378 case PRU_SEND: 1379 error = spp_output(cb, m); 1380 m = NULL; 1381 break; 1382 1383 case PRU_SOCKADDR: 1384 ns_setsockaddr(nsp, nam); 1385 break; 1386 1387 case PRU_PEERADDR: 1388 ns_setpeeraddr(nsp, nam); 1389 break; 1390 1391 case PRU_SLOWTIMO: 1392 cb = spp_timers(cb, (int)nam); 1393 req |= ((int)nam) << 8; 1394 break; 1395 1396 case PRU_FASTTIMO: 1397 case PRU_PROTORCV: 1398 case PRU_PROTOSEND: 1399 error = EOPNOTSUPP; 1400 break; 1401 1402 default: 1403 panic("sp_usrreq"); 1404 } 1405 if (cb && (so->so_options & SO_DEBUG || traceallspps)) 1406 spp_trace(SA_USER, (u_char)ostate, cb, (struct spidp *)0, req); 1407 release: 1408 if (m != NULL) 1409 m_freem(m); 1410 splx(s); 1411 return (error); 1412 } 1413 1414 spp_usrreq_sp(so, req, m, nam, rights) 1415 struct socket *so; 1416 int req; 1417 struct mbuf *m, *nam, *rights; 1418 { 1419 int error = spp_usrreq(so, req, m, nam, rights); 1420 1421 if (req == PRU_ATTACH && error == 0) { 1422 struct nspcb *nsp = sotonspcb(so); 1423 ((struct sppcb *)nsp->nsp_pcb)->s_flags |= 1424 (SF_HI | SF_HO | SF_PI); 1425 } 1426 return (error); 1427 } 1428 1429 /* 1430 * Create template to be used to send spp packets on a connection. 1431 * Called after host entry created, fills 1432 * in a skeletal spp header (choosing connection id), 1433 * minimizing the amount of work necessary when the connection is used. 1434 */ 1435 spp_template(cb) 1436 register struct sppcb *cb; 1437 { 1438 register struct nspcb *nsp = cb->s_nspcb; 1439 register struct idp *idp = cb->s_idp; 1440 register struct sockbuf *sb = &(nsp->nsp_socket->so_snd); 1441 1442 idp->idp_pt = NSPROTO_SPP; 1443 idp->idp_sna = nsp->nsp_laddr; 1444 idp->idp_dna = nsp->nsp_faddr; 1445 cb->s_sid = htons(spp_iss); 1446 spp_iss += SPP_ISSINCR/2; 1447 cb->s_alo = 1; 1448 cb->s_cwnd = (sbspace(sb) * CUNIT) / cb->s_mtu; 1449 cb->s_ssthresh = cb->s_cwnd; /* Try to expand fast to full complement 1450 of large packets */ 1451 cb->s_cwmx = (sb->sb_mbmax * CUNIT) / (2 * sizeof(struct spidp)); 1452 cb->s_cwmx = MAX(cb->s_cwmx, cb->s_cwnd); 1453 /* But allow for lots of little packets as well */ 1454 } 1455 1456 /* 1457 * Close a SPIP control block: 1458 * discard spp control block itself 1459 * discard ns protocol control block 1460 * wake up any sleepers 1461 */ 1462 struct sppcb * 1463 spp_close(cb) 1464 register struct sppcb *cb; 1465 { 1466 register struct spidp_q *s; 1467 struct nspcb *nsp = cb->s_nspcb; 1468 struct socket *so = nsp->nsp_socket; 1469 register struct mbuf *m; 1470 1471 s = cb->s_q.si_next; 1472 while (s != &(cb->s_q)) { 1473 s = s->si_next; 1474 m = dtom(s->si_prev); 1475 remque(s->si_prev); 1476 m_freem(m); 1477 } 1478 (void) m_free(dtom(cb->s_idp)); 1479 (void) m_free(dtom(cb)); 1480 nsp->nsp_pcb = 0; 1481 soisdisconnected(so); 1482 ns_pcbdetach(nsp); 1483 sppstat.spps_closed++; 1484 return ((struct sppcb *)0); 1485 } 1486 /* 1487 * Someday we may do level 3 handshaking 1488 * to close a connection or send a xerox style error. 1489 * For now, just close. 1490 */ 1491 struct sppcb * 1492 spp_usrclosed(cb) 1493 register struct sppcb *cb; 1494 { 1495 return (spp_close(cb)); 1496 } 1497 struct sppcb * 1498 spp_disconnect(cb) 1499 register struct sppcb *cb; 1500 { 1501 return (spp_close(cb)); 1502 } 1503 /* 1504 * Drop connection, reporting 1505 * the specified error. 1506 */ 1507 struct sppcb * 1508 spp_drop(cb, errno) 1509 register struct sppcb *cb; 1510 int errno; 1511 { 1512 struct socket *so = cb->s_nspcb->nsp_socket; 1513 1514 /* 1515 * someday, in the xerox world 1516 * we will generate error protocol packets 1517 * announcing that the socket has gone away. 1518 */ 1519 if (TCPS_HAVERCVDSYN(cb->s_state)) { 1520 sppstat.spps_drops++; 1521 cb->s_state = TCPS_CLOSED; 1522 /*(void) tcp_output(cb);*/ 1523 } else 1524 sppstat.spps_conndrops++; 1525 so->so_error = errno; 1526 return (spp_close(cb)); 1527 } 1528 1529 spp_abort(nsp) 1530 struct nspcb *nsp; 1531 { 1532 1533 (void) spp_close((struct sppcb *)nsp->nsp_pcb); 1534 } 1535 1536 long spp_backoff[TCP_MAXRXTSHIFT+1] = 1537 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; 1538 /* 1539 * Fast timeout routine for processing delayed acks 1540 */ 1541 spp_fasttimo() 1542 { 1543 register struct nspcb *nsp; 1544 register struct sppcb *cb; 1545 int s = splnet(); 1546 1547 nsp = nspcb.nsp_next; 1548 if (nsp) 1549 for (; nsp != &nspcb; nsp = nsp->nsp_next) 1550 if ((cb = (struct sppcb *)nsp->nsp_pcb) && 1551 (cb->s_flags & SF_DELACK)) { 1552 cb->s_flags &= ~SF_DELACK; 1553 cb->s_flags |= SF_ACKNOW; 1554 sppstat.spps_delack++; 1555 (void) spp_output(cb, (struct mbuf *) 0); 1556 } 1557 splx(s); 1558 } 1559 1560 /* 1561 * spp protocol timeout routine called every 500 ms. 1562 * Updates the timers in all active pcb's and 1563 * causes finite state machine actions if timers expire. 1564 */ 1565 spp_slowtimo() 1566 { 1567 register struct nspcb *ip, *ipnxt; 1568 register struct sppcb *cb; 1569 int s = splnet(); 1570 register int i; 1571 1572 /* 1573 * Search through tcb's and update active timers. 1574 */ 1575 ip = nspcb.nsp_next; 1576 if (ip == 0) { 1577 splx(s); 1578 return; 1579 } 1580 while (ip != &nspcb) { 1581 cb = nstosppcb(ip); 1582 ipnxt = ip->nsp_next; 1583 if (cb == 0) 1584 goto tpgone; 1585 for (i = 0; i < TCPT_NTIMERS; i++) { 1586 if (cb->s_timer[i] && --cb->s_timer[i] == 0) { 1587 (void) spp_usrreq(cb->s_nspcb->nsp_socket, 1588 PRU_SLOWTIMO, (struct mbuf *)0, 1589 (struct mbuf *)i, (struct mbuf *)0); 1590 if (ipnxt->nsp_prev != ip) 1591 goto tpgone; 1592 } 1593 } 1594 cb->s_idle++; 1595 if (cb->s_rtt) 1596 cb->s_rtt++; 1597 tpgone: 1598 ip = ipnxt; 1599 } 1600 spp_iss += SPP_ISSINCR/PR_SLOWHZ; /* increment iss */ 1601 splx(s); 1602 } 1603 /* 1604 * SPP timer processing. 1605 */ 1606 struct sppcb * 1607 spp_timers(cb, timer) 1608 register struct sppcb *cb; 1609 int timer; 1610 { 1611 long rexmt; 1612 int win; 1613 1614 cb->s_force = 1 + timer; 1615 switch (timer) { 1616 1617 /* 1618 * 2 MSL timeout in shutdown went off. TCP deletes connection 1619 * control block. 1620 */ 1621 case TCPT_2MSL: 1622 printf("spp: TCPT_2MSL went off for no reason\n"); 1623 cb->s_timer[timer] = 0; 1624 break; 1625 1626 /* 1627 * Retransmission timer went off. Message has not 1628 * been acked within retransmit interval. Back off 1629 * to a longer retransmit interval and retransmit one packet. 1630 */ 1631 case TCPT_REXMT: 1632 if (++cb->s_rxtshift > TCP_MAXRXTSHIFT) { 1633 cb->s_rxtshift = TCP_MAXRXTSHIFT; 1634 sppstat.spps_timeoutdrop++; 1635 cb = spp_drop(cb, ETIMEDOUT); 1636 break; 1637 } 1638 sppstat.spps_rexmttimeo++; 1639 rexmt = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1; 1640 rexmt *= spp_backoff[cb->s_rxtshift]; 1641 TCPT_RANGESET(cb->s_rxtcur, rexmt, TCPTV_MIN, TCPTV_REXMTMAX); 1642 cb->s_timer[TCPT_REXMT] = cb->s_rxtcur; 1643 /* 1644 * If we have backed off fairly far, our srtt 1645 * estimate is probably bogus. Clobber it 1646 * so we'll take the next rtt measurement as our srtt; 1647 * move the current srtt into rttvar to keep the current 1648 * retransmit times until then. 1649 */ 1650 if (cb->s_rxtshift > TCP_MAXRXTSHIFT / 4 ) { 1651 cb->s_rttvar += (cb->s_srtt >> 2); 1652 cb->s_srtt = 0; 1653 } 1654 cb->s_snxt = cb->s_rack; 1655 /* 1656 * If timing a packet, stop the timer. 1657 */ 1658 cb->s_rtt = 0; 1659 /* 1660 * See very long discussion in tcp_timer.c about congestion 1661 * window and sstrhesh 1662 */ 1663 win = MIN(cb->s_swnd, (cb->s_cwnd/CUNIT)) / 2; 1664 if (win < 2) 1665 win = 2; 1666 cb->s_cwnd = CUNIT; 1667 cb->s_ssthresh = win; 1668 (void) spp_output(cb, (struct mbuf *) 0); 1669 break; 1670 1671 /* 1672 * Persistance timer into zero window. 1673 * Force a probe to be sent. 1674 */ 1675 case TCPT_PERSIST: 1676 sppstat.spps_persisttimeo++; 1677 spp_setpersist(cb); 1678 (void) spp_output(cb, (struct mbuf *) 0); 1679 break; 1680 1681 /* 1682 * Keep-alive timer went off; send something 1683 * or drop connection if idle for too long. 1684 */ 1685 case TCPT_KEEP: 1686 sppstat.spps_keeptimeo++; 1687 if (cb->s_state < TCPS_ESTABLISHED) 1688 goto dropit; 1689 if (cb->s_nspcb->nsp_socket->so_options & SO_KEEPALIVE) { 1690 if (cb->s_idle >= TCPTV_MAXIDLE) 1691 goto dropit; 1692 sppstat.spps_keepprobe++; 1693 (void) spp_output(cb, (struct mbuf *) 0); 1694 } else 1695 cb->s_idle = 0; 1696 cb->s_timer[TCPT_KEEP] = TCPTV_KEEP; 1697 break; 1698 dropit: 1699 sppstat.spps_keepdrops++; 1700 cb = spp_drop(cb, ETIMEDOUT); 1701 break; 1702 } 1703 return (cb); 1704 } 1705 int SppcbSize = sizeof (struct sppcb); 1706 int NspcbSize = sizeof (struct nspcb); 1707