1 /* 2 * Copyright (c) 1984, 1985, 1986, 1987 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that this notice is preserved and that due credit is given 7 * to the University of California at Berkeley. The name of the University 8 * may not be used to endorse or promote products derived from this 9 * software without specific prior written permission. This software 10 * is provided ``as is'' without express or implied warranty. 11 * 12 * @(#)spp_usrreq.c 7.5 (Berkeley) 03/03/88 13 */ 14 15 #include "param.h" 16 #include "systm.h" 17 #include "dir.h" 18 #include "user.h" 19 #include "mbuf.h" 20 #include "protosw.h" 21 #include "socket.h" 22 #include "socketvar.h" 23 #include "errno.h" 24 25 #include "../net/if.h" 26 #include "../net/route.h" 27 #include "../netinet/tcp_fsm.h" 28 #include "../netinet/tcp_timer.h" 29 30 #include "ns.h" 31 #include "ns_pcb.h" 32 #include "idp.h" 33 #include "idp_var.h" 34 #include "ns_error.h" 35 #include "sp.h" 36 #include "spidp.h" 37 #include "spp_var.h" 38 #include "spp_debug.h" 39 40 /* 41 * SP protocol implementation. 42 */ 43 spp_init() 44 { 45 46 spp_iss = 1; /* WRONG !! should fish it out of TODR */ 47 } 48 struct spidp spp_savesi; 49 int traceallspps = 0; 50 extern int sppconsdebug; 51 int spp_hardnosed; 52 int spp_use_delack = 0; 53 54 /*ARGSUSED*/ 55 spp_input(m, nsp, ifp) 56 register struct mbuf *m; 57 register struct nspcb *nsp; 58 struct ifnet *ifp; 59 { 60 register struct sppcb *cb; 61 register struct spidp *si = mtod(m, struct spidp *); 62 register struct socket *so; 63 short ostate; 64 int dropsocket = 0; 65 66 67 sppstat.spps_rcvtotal++; 68 if (nsp == 0) { 69 panic("No nspcb in spp_input\n"); 70 return; 71 } 72 73 cb = nstosppcb(nsp); 74 if (cb == 0) goto bad; 75 76 if (m->m_len < sizeof(*si)) { 77 if ((m = m_pullup(m, sizeof(*si))) == 0) { 78 sppstat.spps_rcvshort++; 79 return; 80 } 81 si = mtod(m, struct spidp *); 82 } 83 si->si_seq = ntohs(si->si_seq); 84 si->si_ack = ntohs(si->si_ack); 85 si->si_alo = ntohs(si->si_alo); 86 87 so = nsp->nsp_socket; 88 if (so->so_options & SO_DEBUG || traceallspps) { 89 ostate = cb->s_state; 90 spp_savesi = *si; 91 } 92 if (so->so_options & SO_ACCEPTCONN) { 93 struct sppcb *ocb = cb; 94 struct socket *oso = so; 95 so = sonewconn(so); 96 if (so == 0) { 97 goto drop; 98 } 99 /* 100 * This is ugly, but .... 101 * 102 * Mark socket as temporary until we're 103 * committed to keeping it. The code at 104 * ``drop'' and ``dropwithreset'' check the 105 * flag dropsocket to see if the temporary 106 * socket created here should be discarded. 107 * We mark the socket as discardable until 108 * we're committed to it below in TCPS_LISTEN. 109 */ 110 dropsocket++; 111 nsp = (struct nspcb *)so->so_pcb; 112 nsp->nsp_laddr = si->si_dna; 113 cb = nstosppcb(nsp); 114 cb->s_mtu = ocb->s_mtu; /* preserve sockopts */ 115 cb->s_flags = ocb->s_flags; /* preserve sockopts */ 116 if (so->so_snd.sb_hiwat != oso->so_snd.sb_hiwat) /*XXX*/ 117 sbreserve(&so->so_snd, oso->so_snd.sb_hiwat); 118 if (so->so_rcv.sb_hiwat != oso->so_rcv.sb_hiwat) /*XXX*/ 119 sbreserve(&so->so_rcv, oso->so_rcv.sb_hiwat); 120 cb->s_state = TCPS_LISTEN; 121 } 122 123 /* 124 * Packet received on connection. 125 * reset idle time and keep-alive timer; 126 */ 127 cb->s_idle = 0; 128 cb->s_timer[TCPT_KEEP] = TCPTV_KEEP; 129 130 switch (cb->s_state) { 131 132 case TCPS_LISTEN:{ 133 struct mbuf *am; 134 register struct sockaddr_ns *sns; 135 struct ns_addr laddr; 136 137 /* 138 * If somebody here was carying on a conversation 139 * and went away, and his pen pal thinks he can 140 * still talk, we get the misdirected packet. 141 */ 142 if (spp_hardnosed && (si->si_did != 0 || si->si_seq != 0)) { 143 spp_istat.gonawy++; 144 goto dropwithreset; 145 } 146 am = m_get(M_DONTWAIT, MT_SONAME); 147 if (am == NULL) 148 goto drop; 149 am->m_len = sizeof (struct sockaddr_ns); 150 sns = mtod(am, struct sockaddr_ns *); 151 sns->sns_family = AF_NS; 152 sns->sns_addr = si->si_sna; 153 laddr = nsp->nsp_laddr; 154 if (ns_nullhost(laddr)) 155 nsp->nsp_laddr = si->si_dna; 156 if (ns_pcbconnect(nsp, am)) { 157 nsp->nsp_laddr = laddr; 158 (void) m_free(am); 159 spp_istat.noconn++; 160 goto drop; 161 } 162 (void) m_free(am); 163 spp_template(cb); 164 dropsocket = 0; /* committed to socket */ 165 cb->s_did = si->si_sid; 166 cb->s_rack = si->si_ack; 167 cb->s_ralo = si->si_alo; 168 #define THREEWAYSHAKE 169 #ifdef THREEWAYSHAKE 170 cb->s_state = TCPS_SYN_RECEIVED; 171 cb->s_force = 1 + TCPT_KEEP; 172 sppstat.spps_accepts++; 173 cb->s_timer[TCPT_KEEP] = TCPTV_KEEP; 174 } 175 break; 176 /* 177 * This state means that we have heard a response 178 * to our acceptance of their connection 179 * It is probably logically unnecessary in this 180 * implementation. 181 */ 182 case TCPS_SYN_RECEIVED: { 183 if (si->si_did!=cb->s_sid) { 184 spp_istat.wrncon++; 185 goto drop; 186 } 187 #endif 188 nsp->nsp_fport = si->si_sport; 189 cb->s_timer[TCPT_REXMT] = 0; 190 cb->s_timer[TCPT_KEEP] = TCPTV_KEEP; 191 soisconnected(so); 192 cb->s_state = TCPS_ESTABLISHED; 193 sppstat.spps_accepts++; 194 } 195 break; 196 197 /* 198 * This state means that we have gotten a response 199 * to our attempt to establish a connection. 200 * We fill in the data from the other side, 201 * telling us which port to respond to, instead of the well- 202 * known one we might have sent to in the first place. 203 * We also require that this is a response to our 204 * connection id. 205 */ 206 case TCPS_SYN_SENT: 207 if (si->si_did!=cb->s_sid) { 208 spp_istat.notme++; 209 goto drop; 210 } 211 sppstat.spps_connects++; 212 cb->s_did = si->si_sid; 213 cb->s_rack = si->si_ack; 214 cb->s_ralo = si->si_alo; 215 cb->s_dport = nsp->nsp_fport = si->si_sport; 216 cb->s_timer[TCPT_REXMT] = 0; 217 cb->s_flags |= SF_ACKNOW; 218 soisconnected(so); 219 cb->s_state = TCPS_ESTABLISHED; 220 /* Use roundtrip time of connection request for initial rtt */ 221 if (cb->s_rtt) { 222 cb->s_srtt = cb->s_rtt << 3; 223 cb->s_rttvar = cb->s_rtt << 1; 224 TCPT_RANGESET(cb->s_rxtcur, 225 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1, 226 TCPTV_MIN, TCPTV_REXMTMAX); 227 cb->s_rtt = 0; 228 } 229 } 230 if (so->so_options & SO_DEBUG || traceallspps) 231 spp_trace(SA_INPUT, (u_char)ostate, cb, &spp_savesi, 0); 232 233 m->m_len -= sizeof (struct idp); 234 m->m_off += sizeof (struct idp); 235 236 if (spp_reass(cb, si)) { 237 (void) m_freem(m); 238 } 239 if (cb->s_force || (cb->s_flags & (SF_ACKNOW|SF_WIN|SF_RXT))) 240 (void) spp_output(cb, (struct mbuf *)0); 241 cb->s_flags &= ~(SF_WIN|SF_RXT); 242 return; 243 244 dropwithreset: 245 if (dropsocket) 246 (void) soabort(so); 247 si->si_seq = ntohs(si->si_seq); 248 si->si_ack = ntohs(si->si_ack); 249 si->si_alo = ntohs(si->si_alo); 250 ns_error(dtom(si), NS_ERR_NOSOCK, 0); 251 if (cb->s_nspcb->nsp_socket->so_options & SO_DEBUG || traceallspps) 252 spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0); 253 return; 254 255 drop: 256 bad: 257 if (cb == 0 || cb->s_nspcb->nsp_socket->so_options & SO_DEBUG || 258 traceallspps) 259 spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0); 260 m_freem(m); 261 } 262 263 int spprexmtthresh = 3; 264 265 /* 266 * This is structurally similar to the tcp reassembly routine 267 * but its function is somewhat different: It merely queues 268 * packets up, and suppresses duplicates. 269 */ 270 spp_reass(cb, si) 271 register struct sppcb *cb; 272 register struct spidp *si; 273 { 274 register struct spidp_q *q; 275 register struct mbuf *m; 276 register struct socket *so = cb->s_nspcb->nsp_socket; 277 char packetp = cb->s_flags & SF_HI; 278 int incr; 279 char wakeup = 0; 280 281 if (si == SI(0)) 282 goto present; 283 /* 284 * Update our news from them. 285 */ 286 if (si->si_cc & SP_SA) 287 cb->s_flags |= (spp_use_delack ? SF_DELACK : SF_ACKNOW); 288 if (SSEQ_GT(si->si_alo, cb->s_ralo)) 289 cb->s_flags |= SF_WIN; 290 if (SSEQ_LEQ(si->si_ack, cb->s_rack)) { 291 if ((si->si_cc & SP_SP) && cb->s_rack != (cb->s_smax + 1)) { 292 sppstat.spps_rcvdupack++; 293 /* 294 * If this is a completely duplicate ack 295 * and other conditions hold, we assume 296 * a packet has been dropped and retransmit 297 * it exactly as in tcp_input(). 298 */ 299 if (si->si_ack != cb->s_rack || 300 si->si_alo != cb->s_ralo) 301 cb->s_dupacks = 0; 302 else if (++cb->s_dupacks == spprexmtthresh) { 303 u_short onxt = cb->s_snxt; 304 int cwnd = cb->s_cwnd; 305 306 cb->s_snxt = si->si_ack; 307 cb->s_cwnd = CUNIT; 308 cb->s_force = 1 + TCPT_REXMT; 309 (void) spp_output(cb, (struct mbuf *)0); 310 cb->s_timer[TCPT_REXMT] = cb->s_rxtcur; 311 cb->s_rtt = 0; 312 if (cwnd >= 4 * CUNIT) 313 cb->s_cwnd = cwnd / 2; 314 if (SSEQ_GT(onxt, cb->s_snxt)) 315 cb->s_snxt = onxt; 316 return (1); 317 } 318 } else 319 cb->s_dupacks = 0; 320 goto update_window; 321 } 322 cb->s_dupacks = 0; 323 /* 324 * If our correspondent acknowledges data we haven't sent 325 * TCP would drop the packet after acking. We'll be a little 326 * more permissive 327 */ 328 if (SSEQ_GT(si->si_ack, (cb->s_smax + 1))) { 329 sppstat.spps_rcvacktoomuch++; 330 si->si_ack = cb->s_smax + 1; 331 } 332 sppstat.spps_rcvackpack++; 333 /* 334 * If transmit timer is running and timed sequence 335 * number was acked, update smoothed round trip time. 336 * See discussion of algorithm in tcp_input.c 337 */ 338 if (cb->s_rtt && SSEQ_GT(si->si_ack, cb->s_rtseq)) { 339 sppstat.spps_rttupdated++; 340 if (cb->s_srtt != 0) { 341 register short delta; 342 delta = cb->s_rtt - (cb->s_srtt >> 3); 343 if ((cb->s_srtt += delta) <= 0) 344 cb->s_srtt = 1; 345 if (delta < 0) 346 delta = -delta; 347 delta -= (cb->s_rttvar >> 2); 348 if ((cb->s_rttvar += delta) <= 0) 349 cb->s_rttvar = 1; 350 } else { 351 /* 352 * No rtt measurement yet 353 */ 354 cb->s_srtt = cb->s_rtt << 3; 355 cb->s_rttvar = cb->s_rtt << 1; 356 } 357 cb->s_rtt = 0; 358 cb->s_rxtshift = 0; 359 TCPT_RANGESET(cb->s_rxtcur, 360 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1, 361 TCPTV_MIN, TCPTV_REXMTMAX); 362 } 363 /* 364 * If all outstanding data is acked, stop retransmit 365 * timer and remember to restart (more output or persist). 366 * If there is more data to be acked, restart retransmit 367 * timer, using current (possibly backed-off) value; 368 */ 369 if (si->si_ack == cb->s_smax + 1) { 370 cb->s_timer[TCPT_REXMT] = 0; 371 cb->s_flags |= SF_RXT; 372 } else if (cb->s_timer[TCPT_PERSIST] == 0) 373 cb->s_timer[TCPT_REXMT] = cb->s_rxtcur; 374 /* 375 * When new data is acked, open the congestion window. 376 * If the window gives us less than ssthresh packets 377 * in flight, open exponentially (maxseg at a time). 378 * Otherwise open linearly (maxseg^2 / cwnd at a time). 379 */ 380 incr = CUNIT; 381 if (cb->s_cwnd > cb->s_ssthresh) 382 incr = MAX(incr * incr / cb->s_cwnd, 1); 383 cb->s_cwnd = MIN(cb->s_cwnd + incr, cb->s_cwmx); 384 /* 385 * Trim Acked data from output queue. 386 */ 387 while ((m = so->so_snd.sb_mb) != NULL) { 388 if (SSEQ_LT((mtod(m, struct spidp *))->si_seq, si->si_ack)) 389 sbdroprecord(&so->so_snd); 390 else 391 break; 392 } 393 if ((so->so_snd.sb_flags & SB_WAIT) || so->so_snd.sb_sel) 394 sowwakeup(so); 395 cb->s_rack = si->si_ack; 396 update_window: 397 if (SSEQ_LT(cb->s_snxt, cb->s_rack)) 398 cb->s_snxt = cb->s_rack; 399 if (SSEQ_LT(cb->s_swl1, si->si_seq) || cb->s_swl1 == si->si_seq && 400 (SSEQ_LT(cb->s_swl2, si->si_ack) || 401 cb->s_swl2 == si->si_ack && SSEQ_LT(cb->s_ralo, si->si_alo))) { 402 /* keep track of pure window updates */ 403 if ((si->si_cc & SP_SP) && cb->s_swl2 == si->si_ack 404 && SSEQ_LT(cb->s_ralo, si->si_alo)) { 405 sppstat.spps_rcvwinupd++; 406 sppstat.spps_rcvdupack--; 407 } 408 cb->s_ralo = si->si_alo; 409 cb->s_swl1 = si->si_seq; 410 cb->s_swl2 = si->si_ack; 411 cb->s_swnd = (1 + si->si_alo - si->si_ack); 412 if (cb->s_swnd > cb->s_smxw) 413 cb->s_smxw = cb->s_swnd; 414 cb->s_flags |= SF_WIN; 415 } 416 /* 417 * If this packet number is higher than that which 418 * we have allocated refuse it, unless urgent 419 */ 420 if (SSEQ_GT(si->si_seq, cb->s_alo)) { 421 if (si->si_cc & SP_SP) { 422 sppstat.spps_rcvwinprobe++; 423 return (1); 424 } else 425 sppstat.spps_rcvpackafterwin++; 426 if (si->si_cc & SP_OB) { 427 if (SSEQ_GT(si->si_seq, cb->s_alo + 60)) { 428 ns_error(dtom(si), NS_ERR_FULLUP, 0); 429 return (0); 430 } /* else queue this packet; */ 431 } else { 432 /*register struct socket *so = cb->s_nspcb->nsp_socket; 433 if (so->so_state && SS_NOFDREF) { 434 ns_error(dtom(si), NS_ERR_NOSOCK, 0); 435 (void)spp_close(cb); 436 } else 437 would crash system*/ 438 spp_istat.notyet++; 439 ns_error(dtom(si), NS_ERR_FULLUP, 0); 440 return (0); 441 } 442 } 443 /* 444 * If this is a system packet, we don't need to 445 * queue it up, and won't update acknowledge # 446 */ 447 if (si->si_cc & SP_SP) { 448 return (1); 449 } 450 /* 451 * We have already seen this packet, so drop. 452 */ 453 if (SSEQ_LT(si->si_seq, cb->s_ack)) { 454 spp_istat.bdreas++; 455 sppstat.spps_rcvduppack++; 456 if (si->si_seq == cb->s_ack - 1) 457 spp_istat.lstdup++; 458 return (1); 459 } 460 /* 461 * Loop through all packets queued up to insert in 462 * appropriate sequence. 463 */ 464 for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) { 465 if (si->si_seq == SI(q)->si_seq) { 466 sppstat.spps_rcvduppack++; 467 return (1); 468 } 469 if (SSEQ_LT(si->si_seq, SI(q)->si_seq)) { 470 sppstat.spps_rcvoopack++; 471 break; 472 } 473 } 474 insque(si, q->si_prev); 475 /* 476 * If this packet is urgent, inform process 477 */ 478 if (si->si_cc & SP_OB) { 479 cb->s_iobc = ((char *)si)[1 + sizeof(*si)]; 480 sohasoutofband(so); 481 cb->s_oobflags |= SF_IOOB; 482 } 483 present: 484 #define SPINC sizeof(struct sphdr) 485 /* 486 * Loop through all packets queued up to update acknowledge 487 * number, and present all acknowledged data to user; 488 * If in packet interface mode, show packet headers. 489 */ 490 for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) { 491 if (SI(q)->si_seq == cb->s_ack) { 492 cb->s_ack++; 493 m = dtom(q); 494 if (SI(q)->si_cc & SP_OB) { 495 cb->s_oobflags &= ~SF_IOOB; 496 if (so->so_rcv.sb_cc) 497 so->so_oobmark = so->so_rcv.sb_cc; 498 else 499 so->so_state |= SS_RCVATMARK; 500 } 501 q = q->si_prev; 502 remque(q->si_next); 503 wakeup = 1; 504 sppstat.spps_rcvpack++; 505 if (packetp) { 506 sbappendrecord(&so->so_rcv, m); 507 } else { 508 cb->s_rhdr = *mtod(m, struct sphdr *); 509 m->m_off += SPINC; 510 m->m_len -= SPINC; 511 sbappend(&so->so_rcv, m); 512 } 513 } else 514 break; 515 } 516 if (wakeup) sorwakeup(so); 517 return (0); 518 } 519 520 spp_ctlinput(cmd, arg) 521 int cmd; 522 caddr_t arg; 523 { 524 struct ns_addr *na; 525 extern u_char nsctlerrmap[]; 526 extern spp_abort(), spp_quench(); 527 extern struct nspcb *idp_drop(); 528 struct ns_errp *errp; 529 struct nspcb *nsp; 530 struct sockaddr_ns *sns; 531 int type; 532 533 if (cmd < 0 || cmd > PRC_NCMDS) 534 return; 535 type = NS_ERR_UNREACH_HOST; 536 537 switch (cmd) { 538 539 case PRC_ROUTEDEAD: 540 return; 541 542 case PRC_IFDOWN: 543 case PRC_HOSTDEAD: 544 case PRC_HOSTUNREACH: 545 sns = (struct sockaddr_ns *)arg; 546 if (sns->sns_family != AF_NS) 547 return; 548 na = &sns->sns_addr; 549 break; 550 551 default: 552 errp = (struct ns_errp *)arg; 553 na = &errp->ns_err_idp.idp_dna; 554 type = errp->ns_err_num; 555 type = ntohs((u_short)type); 556 } 557 switch (type) { 558 559 case NS_ERR_UNREACH_HOST: 560 ns_pcbnotify(na, (int)nsctlerrmap[cmd], spp_abort, (long) 0); 561 break; 562 563 case NS_ERR_TOO_BIG: 564 case NS_ERR_NOSOCK: 565 nsp = ns_pcblookup(na, errp->ns_err_idp.idp_sna.x_port, 566 NS_WILDCARD); 567 if (nsp) { 568 if(nsp->nsp_pcb) 569 (void) spp_drop((struct sppcb *)nsp->nsp_pcb, 570 (int)nsctlerrmap[cmd]); 571 else 572 (void) idp_drop(nsp, (int)nsctlerrmap[cmd]); 573 } 574 break; 575 576 case NS_ERR_FULLUP: 577 ns_pcbnotify(na, 0, spp_quench, (long) 0); 578 } 579 } 580 /* 581 * When a source quench is received, close congestion window 582 * to one packet. We will gradually open it again as we proceed. 583 */ 584 spp_quench(nsp) 585 struct nspcb *nsp; 586 { 587 struct sppcb *cb = nstosppcb(nsp); 588 589 if (cb) 590 cb->s_cwnd = CUNIT; 591 } 592 593 #ifdef notdef 594 int 595 spp_fixmtu(nsp) 596 register struct nspcb *nsp; 597 { 598 register struct sppcb *cb = (struct sppcb *)(nsp->nsp_pcb); 599 register struct mbuf *m; 600 register struct spidp *si; 601 struct ns_errp *ep; 602 struct sockbuf *sb; 603 int badseq, len; 604 struct mbuf *firstbad, *m0; 605 606 if (cb) { 607 /* 608 * The notification that we have sent 609 * too much is bad news -- we will 610 * have to go through queued up so far 611 * splitting ones which are too big and 612 * reassigning sequence numbers and checksums. 613 * we should then retransmit all packets from 614 * one above the offending packet to the last one 615 * we had sent (or our allocation) 616 * then the offending one so that the any queued 617 * data at our destination will be discarded. 618 */ 619 ep = (struct ns_errp *)nsp->nsp_notify_param; 620 sb = &nsp->nsp_socket->so_snd; 621 cb->s_mtu = ep->ns_err_param; 622 badseq = SI(&ep->ns_err_idp)->si_seq; 623 for (m = sb->sb_mb; m; m = m->m_act) { 624 si = mtod(m, struct spidp *); 625 if (si->si_seq == badseq) 626 break; 627 } 628 if (m == 0) return; 629 firstbad = m; 630 /*for (;;) {*/ 631 /* calculate length */ 632 for (m0 = m, len = 0; m ; m = m->m_next) 633 len += m->m_len; 634 if (len > cb->s_mtu) { 635 } 636 /* FINISH THIS 637 } */ 638 } 639 } 640 #endif 641 642 spp_output(cb, m0) 643 register struct sppcb *cb; 644 struct mbuf *m0; 645 { 646 struct socket *so = cb->s_nspcb->nsp_socket; 647 register struct mbuf *m; 648 register struct spidp *si = (struct spidp *) 0; 649 register struct sockbuf *sb = &so->so_snd; 650 int len = 0, win, rcv_win; 651 short span, off; 652 u_short alo; 653 int error = 0, idle, sendalot; 654 struct mbuf *mprev; 655 extern int idpcksum; 656 657 if (m0) { 658 int mtu = cb->s_mtu; 659 int datalen; 660 /* 661 * Make sure that packet isn't too big. 662 */ 663 for (m = m0; m ; m = m->m_next) { 664 mprev = m; 665 len += m->m_len; 666 } 667 datalen = (cb->s_flags & SF_HO) ? 668 len - sizeof (struct sphdr) : len; 669 if (datalen > mtu) { 670 if (cb->s_flags & SF_PI) { 671 m_freem(m0); 672 return (EMSGSIZE); 673 } else { 674 int oldEM = cb->s_cc & SP_EM; 675 676 cb->s_cc &= ~SP_EM; 677 while (len > mtu) { 678 m = m_copy(m0, 0, mtu); 679 if (m == NULL) { 680 error = ENOBUFS; 681 goto bad_copy; 682 } 683 error = spp_output(cb, m); 684 if (error) { 685 bad_copy: 686 cb->s_cc |= oldEM; 687 m_freem(m0); 688 return(error); 689 } 690 m_adj(m0, mtu); 691 len -= mtu; 692 } 693 cb->s_cc |= oldEM; 694 } 695 } 696 /* 697 * Force length even, by adding a "garbage byte" if 698 * necessary. 699 */ 700 if (len & 1) { 701 m = mprev; 702 if (m->m_len + m->m_off < MMAXOFF) 703 m->m_len++; 704 else { 705 struct mbuf *m1 = m_get(M_DONTWAIT, MT_DATA); 706 707 if (m1 == 0) { 708 m_freem(m0); 709 return (ENOBUFS); 710 } 711 m1->m_len = 1; 712 m1->m_off = MMAXOFF - 1; 713 m->m_next = m1; 714 } 715 } 716 m = m_get(M_DONTWAIT, MT_HEADER); 717 if (m == 0) { 718 m_freem(m0); 719 return (ENOBUFS); 720 } 721 /* 722 * Fill in mbuf with extended SP header 723 * and addresses and length put into network format. 724 * Long align so prepended ip headers will work on Gould. 725 */ 726 m->m_off = MMAXOFF - sizeof (struct spidp) - 2; 727 m->m_len = sizeof (struct spidp); 728 m->m_next = m0; 729 si = mtod(m, struct spidp *); 730 si->si_i = *cb->s_idp; 731 si->si_s = cb->s_shdr; 732 if ((cb->s_flags & SF_PI) && (cb->s_flags & SF_HO)) { 733 register struct sphdr *sh; 734 if (m0->m_len < sizeof (*sh)) { 735 if((m0 = m_pullup(m0, sizeof(*sh))) == NULL) { 736 (void) m_free(m); 737 m_freem(m0); 738 return (EINVAL); 739 } 740 m->m_next = m0; 741 } 742 sh = mtod(m0, struct sphdr *); 743 si->si_dt = sh->sp_dt; 744 si->si_cc |= sh->sp_cc & SP_EM; 745 m0->m_len -= sizeof (*sh); 746 m0->m_off += sizeof (*sh); 747 len -= sizeof (*sh); 748 } 749 len += sizeof(*si); 750 if (cb->s_oobflags & SF_SOOB) { 751 /* 752 * Per jqj@cornell: 753 * make sure OB packets convey exactly 1 byte. 754 * If the packet is 1 byte or larger, we 755 * have already guaranted there to be at least 756 * one garbage byte for the checksum, and 757 * extra bytes shouldn't hurt! 758 */ 759 if (len > sizeof(*si)) { 760 si->si_cc |= SP_OB; 761 len = (1 + sizeof(*si)); 762 } 763 } 764 si->si_len = htons((u_short)len); 765 /* 766 * queue stuff up for output 767 */ 768 sbappendrecord(sb, m); 769 cb->s_seq++; 770 } 771 idle = (cb->s_smax == (cb->s_rack - 1)); 772 again: 773 sendalot = 0; 774 off = cb->s_snxt - cb->s_rack; 775 win = MIN(cb->s_swnd, (cb->s_cwnd/CUNIT)); 776 777 /* 778 * If in persist timeout with window of 0, send a probe. 779 * Otherwise, if window is small but nonzero 780 * and timer expired, send what we can and go into 781 * transmit state. 782 */ 783 if (cb->s_force == 1 + TCPT_PERSIST) { 784 if (win != 0) { 785 cb->s_timer[TCPT_PERSIST] = 0; 786 cb->s_rxtshift = 0; 787 } 788 } 789 span = cb->s_seq - cb->s_rack; 790 len = MIN(span, win) - off; 791 792 if (len < 0) { 793 /* 794 * Window shrank after we went into it. 795 * If window shrank to 0, cancel pending 796 * restransmission and pull s_snxt back 797 * to (closed) window. We will enter persist 798 * state below. If the widndow didn't close completely, 799 * just wait for an ACK. 800 */ 801 len = 0; 802 if (win == 0) { 803 cb->s_timer[TCPT_REXMT] = 0; 804 cb->s_snxt = cb->s_rack; 805 } 806 } 807 if (len > 1) 808 sendalot = 1; 809 rcv_win = sbspace(&so->so_rcv); 810 811 /* 812 * Send if we owe peer an ACK. 813 */ 814 if (cb->s_oobflags & SF_SOOB) { 815 /* 816 * must transmit this out of band packet 817 */ 818 cb->s_oobflags &= ~ SF_SOOB; 819 sendalot = 1; 820 sppstat.spps_sndurg++; 821 goto found; 822 } 823 if (cb->s_flags & SF_ACKNOW) 824 goto send; 825 if (cb->s_state < TCPS_ESTABLISHED) 826 goto send; 827 /* 828 * Silly window can't happen in spp. 829 * Code from tcp deleted. 830 */ 831 if (len) 832 goto send; 833 /* 834 * Compare available window to amount of window 835 * known to peer (as advertised window less 836 * next expected input.) If the difference is at least two 837 * packets or at least 35% of the mximum possible window, 838 * then want to send a window update to peer. 839 */ 840 if (rcv_win > 0) { 841 u_short delta = 1 + cb->s_alo - cb->s_ack; 842 int adv = rcv_win - (delta * cb->s_mtu); 843 844 if ((so->so_rcv.sb_cc == 0 && adv >= (2 * cb->s_mtu)) || 845 (100 * adv / so->so_rcv.sb_hiwat >= 35)) { 846 sppstat.spps_sndwinup++; 847 cb->s_flags |= SF_ACKNOW; 848 goto send; 849 } 850 851 } 852 /* 853 * Many comments from tcp_output.c are appropriate here 854 * including . . . 855 * If send window is too small, there is data to transmit, and no 856 * retransmit or persist is pending, then go to persist state. 857 * If nothing happens soon, send when timer expires: 858 * if window is nonzero, transmit what we can, 859 * otherwise send a probe. 860 */ 861 if (so->so_snd.sb_cc && cb->s_timer[TCPT_REXMT] == 0 && 862 cb->s_timer[TCPT_PERSIST] == 0) { 863 cb->s_rxtshift = 0; 864 spp_setpersist(cb); 865 } 866 /* 867 * No reason to send a packet, just return. 868 */ 869 cb->s_outx = 1; 870 return (0); 871 872 send: 873 /* 874 * Find requested packet. 875 */ 876 si = 0; 877 if (len > 0) { 878 cb->s_want = cb->s_snxt; 879 for (m = sb->sb_mb; m; m = m->m_act) { 880 si = mtod(m, struct spidp *); 881 if (SSEQ_LEQ(cb->s_snxt, si->si_seq)) 882 break; 883 } 884 found: 885 if (si) { 886 if (si->si_seq == cb->s_snxt) 887 cb->s_snxt++; 888 else 889 sppstat.spps_sndvoid++, si = 0; 890 } 891 } 892 /* 893 * update window 894 */ 895 if (rcv_win < 0) 896 rcv_win = 0; 897 alo = cb->s_ack - 1 + (rcv_win / ((short)cb->s_mtu)); 898 if (SSEQ_LT(alo, cb->s_alo)) 899 alo = cb->s_alo; 900 901 if (si) { 902 /* 903 * must make a copy of this packet for 904 * idp_output to monkey with 905 */ 906 m = m_copy(dtom(si), 0, (int)M_COPYALL); 907 if (m == NULL) { 908 return (ENOBUFS); 909 } 910 m0 = m; 911 si = mtod(m, struct spidp *); 912 if (SSEQ_LT(si->si_seq, cb->s_smax)) 913 sppstat.spps_sndrexmitpack++; 914 else 915 sppstat.spps_sndpack++; 916 } else if (cb->s_force || cb->s_flags & SF_ACKNOW) { 917 /* 918 * Must send an acknowledgement or a probe 919 */ 920 if (cb->s_force) 921 sppstat.spps_sndprobe++; 922 if (cb->s_flags & SF_ACKNOW) 923 sppstat.spps_sndacks++; 924 m = m_get(M_DONTWAIT, MT_HEADER); 925 if (m == 0) { 926 return (ENOBUFS); 927 } 928 /* 929 * Fill in mbuf with extended SP header 930 * and addresses and length put into network format. 931 * Allign beginning of packet to long to prepend 932 * ifp's on loopback, or NSIP encaspulation for fussy cpu's. 933 */ 934 m->m_off = MMAXOFF - sizeof (struct spidp) - 2; 935 m->m_len = sizeof (*si); 936 m->m_next = 0; 937 si = mtod(m, struct spidp *); 938 si->si_i = *cb->s_idp; 939 si->si_s = cb->s_shdr; 940 si->si_seq = cb->s_smax + 1; 941 si->si_len = htons(sizeof (*si)); 942 si->si_cc |= SP_SP; 943 } else { 944 cb->s_outx = 3; 945 if (so->so_options & SO_DEBUG || traceallspps) 946 spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0); 947 return (0); 948 } 949 /* 950 * Stuff checksum and output datagram. 951 */ 952 if ((si->si_cc & SP_SP) == 0) { 953 if (cb->s_force != (1 + TCPT_PERSIST) || 954 cb->s_timer[TCPT_PERSIST] == 0) { 955 /* 956 * If this is a new packet and we are not currently 957 * timing anything, time this one. 958 */ 959 if (SSEQ_LT(cb->s_smax, si->si_seq)) { 960 cb->s_smax = si->si_seq; 961 if (cb->s_rtt == 0) { 962 sppstat.spps_segstimed++; 963 cb->s_rtseq = si->si_seq; 964 cb->s_rtt = 1; 965 } 966 } 967 /* 968 * Set rexmt timer if not currently set, 969 * Initial value for retransmit timer is smoothed 970 * round-trip time + 2 * round-trip time variance. 971 * Initialize shift counter which is used for backoff 972 * of retransmit time. 973 */ 974 if (cb->s_timer[TCPT_REXMT] == 0 && 975 cb->s_snxt != cb->s_rack) { 976 cb->s_timer[TCPT_REXMT] = cb->s_rxtcur; 977 if (cb->s_timer[TCPT_PERSIST]) { 978 cb->s_timer[TCPT_PERSIST] = 0; 979 cb->s_rxtshift = 0; 980 } 981 } 982 } else if (SSEQ_LT(cb->s_smax, si->si_seq)) { 983 cb->s_smax = si->si_seq; 984 } 985 } else if (cb->s_state < TCPS_ESTABLISHED) { 986 if (cb->s_rtt == 0) 987 cb->s_rtt = 1; /* Time initial handshake */ 988 if (cb->s_timer[TCPT_REXMT] == 0) 989 cb->s_timer[TCPT_REXMT] = cb->s_rxtcur; 990 } 991 { 992 /* 993 * Do not request acks when we ack their data packets or 994 * when we do a gratuitous window update. 995 */ 996 if (((si->si_cc & SP_SP) == 0) || cb->s_force) 997 si->si_cc |= SP_SA; 998 si->si_seq = htons(si->si_seq); 999 si->si_alo = htons(alo); 1000 si->si_ack = htons(cb->s_ack); 1001 1002 if (idpcksum) { 1003 si->si_sum = 0; 1004 len = ntohs(si->si_len); 1005 if (len & 1) 1006 len++; 1007 si->si_sum = ns_cksum(dtom(si), len); 1008 } else 1009 si->si_sum = 0xffff; 1010 1011 cb->s_outx = 4; 1012 if (so->so_options & SO_DEBUG || traceallspps) 1013 spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0); 1014 1015 if (so->so_options & SO_DONTROUTE) 1016 error = ns_output(m, (struct route *)0, NS_ROUTETOIF); 1017 else 1018 error = ns_output(m, &cb->s_nspcb->nsp_route, 0); 1019 } 1020 if (error) { 1021 return (error); 1022 } 1023 sppstat.spps_sndtotal++; 1024 /* 1025 * Data sent (as far as we can tell). 1026 * If this advertises a larger window than any other segment, 1027 * then remember the size of the advertized window. 1028 * Any pending ACK has now been sent. 1029 */ 1030 cb->s_force = 0; 1031 cb->s_flags &= ~(SF_ACKNOW|SF_DELACK); 1032 if (SSEQ_GT(alo, cb->s_alo)) 1033 cb->s_alo = alo; 1034 if (sendalot) 1035 goto again; 1036 cb->s_outx = 5; 1037 return (0); 1038 } 1039 1040 int spp_do_persist_panics = 0; 1041 1042 spp_setpersist(cb) 1043 register struct sppcb *cb; 1044 { 1045 register t = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1; 1046 extern int spp_backoff[]; 1047 1048 if (cb->s_timer[TCPT_REXMT] && spp_do_persist_panics) 1049 panic("spp_output REXMT"); 1050 /* 1051 * Start/restart persistance timer. 1052 */ 1053 TCPT_RANGESET(cb->s_timer[TCPT_PERSIST], 1054 t*spp_backoff[cb->s_rxtshift], 1055 TCPTV_PERSMIN, TCPTV_PERSMAX); 1056 if (cb->s_rxtshift < TCP_MAXRXTSHIFT) 1057 cb->s_rxtshift++; 1058 } 1059 /*ARGSUSED*/ 1060 spp_ctloutput(req, so, level, name, value) 1061 int req; 1062 struct socket *so; 1063 int name; 1064 struct mbuf **value; 1065 { 1066 register struct mbuf *m; 1067 struct nspcb *nsp = sotonspcb(so); 1068 register struct sppcb *cb; 1069 int mask, error = 0; 1070 1071 if (level != NSPROTO_SPP) { 1072 /* This will have to be changed when we do more general 1073 stacking of protocols */ 1074 return (idp_ctloutput(req, so, level, name, value)); 1075 } 1076 if (nsp == NULL) { 1077 error = EINVAL; 1078 goto release; 1079 } else 1080 cb = nstosppcb(nsp); 1081 1082 switch (req) { 1083 1084 case PRCO_GETOPT: 1085 if (value == NULL) 1086 return (EINVAL); 1087 m = m_get(M_DONTWAIT, MT_DATA); 1088 if (m == NULL) 1089 return (ENOBUFS); 1090 switch (name) { 1091 1092 case SO_HEADERS_ON_INPUT: 1093 mask = SF_HI; 1094 goto get_flags; 1095 1096 case SO_HEADERS_ON_OUTPUT: 1097 mask = SF_HO; 1098 get_flags: 1099 m->m_len = sizeof(short); 1100 m->m_off = MMAXOFF - sizeof(short); 1101 *mtod(m, short *) = cb->s_flags & mask; 1102 break; 1103 1104 case SO_MTU: 1105 m->m_len = sizeof(u_short); 1106 m->m_off = MMAXOFF - sizeof(short); 1107 *mtod(m, short *) = cb->s_mtu; 1108 break; 1109 1110 case SO_LAST_HEADER: 1111 m->m_len = sizeof(struct sphdr); 1112 m->m_off = MMAXOFF - sizeof(struct sphdr); 1113 *mtod(m, struct sphdr *) = cb->s_rhdr; 1114 break; 1115 1116 case SO_DEFAULT_HEADERS: 1117 m->m_len = sizeof(struct spidp); 1118 m->m_off = MMAXOFF - sizeof(struct sphdr); 1119 *mtod(m, struct sphdr *) = cb->s_shdr; 1120 break; 1121 1122 default: 1123 error = EINVAL; 1124 } 1125 *value = m; 1126 break; 1127 1128 case PRCO_SETOPT: 1129 if (value == 0 || *value == 0) { 1130 error = EINVAL; 1131 break; 1132 } 1133 switch (name) { 1134 int *ok; 1135 1136 case SO_HEADERS_ON_INPUT: 1137 mask = SF_HI; 1138 goto set_head; 1139 1140 case SO_HEADERS_ON_OUTPUT: 1141 mask = SF_HO; 1142 set_head: 1143 if (cb->s_flags & SF_PI) { 1144 ok = mtod(*value, int *); 1145 if (*ok) 1146 cb->s_flags |= mask; 1147 else 1148 cb->s_flags &= ~mask; 1149 } else error = EINVAL; 1150 break; 1151 1152 case SO_MTU: 1153 cb->s_mtu = *(mtod(*value, u_short *)); 1154 break; 1155 1156 case SO_DEFAULT_HEADERS: 1157 { 1158 register struct sphdr *sp 1159 = mtod(*value, struct sphdr *); 1160 cb->s_dt = sp->sp_dt; 1161 cb->s_cc = sp->sp_cc & SP_EM; 1162 } 1163 break; 1164 1165 default: 1166 error = EINVAL; 1167 } 1168 m_freem(*value); 1169 break; 1170 } 1171 release: 1172 return (error); 1173 } 1174 1175 /*ARGSUSED*/ 1176 spp_usrreq(so, req, m, nam, rights) 1177 struct socket *so; 1178 int req; 1179 struct mbuf *m, *nam, *rights; 1180 { 1181 struct nspcb *nsp = sotonspcb(so); 1182 register struct sppcb *cb; 1183 int s = splnet(); 1184 int error = 0, ostate; 1185 struct mbuf *mm; 1186 register struct sockbuf *sb; 1187 1188 if (req == PRU_CONTROL) 1189 return (ns_control(so, (int)m, (caddr_t)nam, 1190 (struct ifnet *)rights)); 1191 if (rights && rights->m_len) { 1192 error = EINVAL; 1193 goto release; 1194 } 1195 if (nsp == NULL) { 1196 if (req != PRU_ATTACH) { 1197 error = EINVAL; 1198 goto release; 1199 } 1200 } else 1201 cb = nstosppcb(nsp); 1202 1203 ostate = cb ? cb->s_state : 0; 1204 1205 switch (req) { 1206 1207 case PRU_ATTACH: 1208 if (nsp != NULL) { 1209 error = EISCONN; 1210 break; 1211 } 1212 error = ns_pcballoc(so, &nspcb); 1213 if (error) 1214 break; 1215 error = soreserve(so, 3072, 3072); 1216 if (error) 1217 break; 1218 nsp = sotonspcb(so); 1219 1220 mm = m_getclr(M_DONTWAIT, MT_PCB); 1221 sb = &so->so_snd; 1222 1223 if (mm == NULL) { 1224 error = ENOBUFS; 1225 break; 1226 } 1227 cb = mtod(mm, struct sppcb *); 1228 mm = m_getclr(M_DONTWAIT, MT_HEADER); 1229 if (mm == NULL) { 1230 m_free(dtom(m)); 1231 error = ENOBUFS; 1232 break; 1233 } 1234 cb->s_idp = mtod(mm, struct idp *); 1235 cb->s_state = TCPS_LISTEN; 1236 cb->s_smax = -1; 1237 cb->s_swl1 = -1; 1238 cb->s_q.si_next = cb->s_q.si_prev = &cb->s_q; 1239 cb->s_nspcb = nsp; 1240 cb->s_mtu = 576 - sizeof (struct spidp); 1241 cb->s_cwnd = sbspace(sb) * CUNIT / cb->s_mtu; 1242 cb->s_ssthresh = cb->s_cwnd; 1243 cb->s_cwmx = sb->sb_mbmax * CUNIT / 1244 (2 * sizeof (struct spidp)); 1245 /* Above is recomputed when connecting to account 1246 for changed buffering or mtu's */ 1247 cb->s_rtt = TCPTV_SRTTBASE; 1248 cb->s_rttvar = TCPTV_SRTTDFLT << 2; 1249 TCPT_RANGESET(cb->s_rxtcur, 1250 ((TCPTV_SRTTBASE >> 2) + (TCPTV_SRTTDFLT << 2)) >> 1, 1251 TCPTV_MIN, TCPTV_REXMTMAX); 1252 nsp->nsp_pcb = (caddr_t) cb; 1253 break; 1254 1255 case PRU_DETACH: 1256 if (nsp == NULL) { 1257 error = ENOTCONN; 1258 break; 1259 } 1260 if (cb->s_state > TCPS_LISTEN) 1261 cb = spp_disconnect(cb); 1262 else 1263 cb = spp_close(cb); 1264 break; 1265 1266 case PRU_BIND: 1267 error = ns_pcbbind(nsp, nam); 1268 break; 1269 1270 case PRU_LISTEN: 1271 if (nsp->nsp_lport == 0) 1272 error = ns_pcbbind(nsp, (struct mbuf *)0); 1273 if (error == 0) 1274 cb->s_state = TCPS_LISTEN; 1275 break; 1276 1277 /* 1278 * Initiate connection to peer. 1279 * Enter SYN_SENT state, and mark socket as connecting. 1280 * Start keep-alive timer, setup prototype header, 1281 * Send initial system packet requesting connection. 1282 */ 1283 case PRU_CONNECT: 1284 if (nsp->nsp_lport == 0) { 1285 error = ns_pcbbind(nsp, (struct mbuf *)0); 1286 if (error) 1287 break; 1288 } 1289 error = ns_pcbconnect(nsp, nam); 1290 if (error) 1291 break; 1292 soisconnecting(so); 1293 sppstat.spps_connattempt++; 1294 cb->s_state = TCPS_SYN_SENT; 1295 cb->s_did = 0; 1296 spp_template(cb); 1297 cb->s_timer[TCPT_KEEP] = TCPTV_KEEP; 1298 cb->s_force = 1 + TCPTV_KEEP; 1299 /* 1300 * Other party is required to respond to 1301 * the port I send from, but he is not 1302 * required to answer from where I am sending to, 1303 * so allow wildcarding. 1304 * original port I am sending to is still saved in 1305 * cb->s_dport. 1306 */ 1307 nsp->nsp_fport = 0; 1308 error = spp_output(cb, (struct mbuf *) 0); 1309 break; 1310 1311 case PRU_CONNECT2: 1312 error = EOPNOTSUPP; 1313 break; 1314 1315 /* 1316 * We may decide later to implement connection closing 1317 * handshaking at the spp level optionally. 1318 * here is the hook to do it: 1319 */ 1320 case PRU_DISCONNECT: 1321 cb = spp_disconnect(cb); 1322 break; 1323 1324 /* 1325 * Accept a connection. Essentially all the work is 1326 * done at higher levels; just return the address 1327 * of the peer, storing through addr. 1328 */ 1329 case PRU_ACCEPT: { 1330 struct sockaddr_ns *sns = mtod(nam, struct sockaddr_ns *); 1331 1332 nam->m_len = sizeof (struct sockaddr_ns); 1333 sns->sns_family = AF_NS; 1334 sns->sns_addr = nsp->nsp_faddr; 1335 break; 1336 } 1337 1338 case PRU_SHUTDOWN: 1339 socantsendmore(so); 1340 cb = spp_usrclosed(cb); 1341 if (cb) 1342 error = spp_output(cb, (struct mbuf *) 0); 1343 break; 1344 1345 /* 1346 * After a receive, possibly send acknowledgment 1347 * updating allocation. 1348 */ 1349 case PRU_RCVD: 1350 cb->s_flags |= SF_RVD; 1351 (void) spp_output(cb, (struct mbuf *) 0); 1352 cb->s_flags &= ~SF_RVD; 1353 break; 1354 1355 case PRU_ABORT: 1356 (void) spp_drop(cb, ECONNABORTED); 1357 break; 1358 1359 case PRU_SENSE: 1360 case PRU_CONTROL: 1361 m = NULL; 1362 error = EOPNOTSUPP; 1363 break; 1364 1365 case PRU_RCVOOB: 1366 if ((cb->s_oobflags & SF_IOOB) || so->so_oobmark || 1367 (so->so_state & SS_RCVATMARK)) { 1368 m->m_len = 1; 1369 *mtod(m, caddr_t) = cb->s_iobc; 1370 break; 1371 } 1372 error = EINVAL; 1373 break; 1374 1375 case PRU_SENDOOB: 1376 if (sbspace(&so->so_snd) < -512) { 1377 error = ENOBUFS; 1378 break; 1379 } 1380 cb->s_oobflags |= SF_SOOB; 1381 /* fall into */ 1382 case PRU_SEND: 1383 error = spp_output(cb, m); 1384 m = NULL; 1385 break; 1386 1387 case PRU_SOCKADDR: 1388 ns_setsockaddr(nsp, nam); 1389 break; 1390 1391 case PRU_PEERADDR: 1392 ns_setpeeraddr(nsp, nam); 1393 break; 1394 1395 case PRU_SLOWTIMO: 1396 cb = spp_timers(cb, (int)nam); 1397 req |= ((int)nam) << 8; 1398 break; 1399 1400 case PRU_FASTTIMO: 1401 case PRU_PROTORCV: 1402 case PRU_PROTOSEND: 1403 error = EOPNOTSUPP; 1404 break; 1405 1406 default: 1407 panic("sp_usrreq"); 1408 } 1409 if (cb && (so->so_options & SO_DEBUG || traceallspps)) 1410 spp_trace(SA_USER, (u_char)ostate, cb, (struct spidp *)0, req); 1411 release: 1412 if (m != NULL) 1413 m_freem(m); 1414 splx(s); 1415 return (error); 1416 } 1417 1418 spp_usrreq_sp(so, req, m, nam, rights) 1419 struct socket *so; 1420 int req; 1421 struct mbuf *m, *nam, *rights; 1422 { 1423 int error = spp_usrreq(so, req, m, nam, rights); 1424 1425 if (req == PRU_ATTACH && error == 0) { 1426 struct nspcb *nsp = sotonspcb(so); 1427 ((struct sppcb *)nsp->nsp_pcb)->s_flags |= 1428 (SF_HI | SF_HO | SF_PI); 1429 } 1430 return (error); 1431 } 1432 1433 /* 1434 * Create template to be used to send spp packets on a connection. 1435 * Called after host entry created, fills 1436 * in a skeletal spp header (choosing connection id), 1437 * minimizing the amount of work necessary when the connection is used. 1438 */ 1439 spp_template(cb) 1440 register struct sppcb *cb; 1441 { 1442 register struct nspcb *nsp = cb->s_nspcb; 1443 register struct idp *idp = cb->s_idp; 1444 register struct sockbuf *sb = &(nsp->nsp_socket->so_snd); 1445 1446 idp->idp_pt = NSPROTO_SPP; 1447 idp->idp_sna = nsp->nsp_laddr; 1448 idp->idp_dna = nsp->nsp_faddr; 1449 cb->s_sid = htons(spp_iss); 1450 spp_iss += SPP_ISSINCR/2; 1451 cb->s_alo = 1; 1452 cb->s_cwnd = (sbspace(sb) * CUNIT) / cb->s_mtu; 1453 cb->s_ssthresh = cb->s_cwnd; /* Try to expand fast to full complement 1454 of large packets */ 1455 cb->s_cwmx = (sb->sb_mbmax * CUNIT) / (2 * sizeof(struct spidp)); 1456 cb->s_cwmx = MAX(cb->s_cwmx, cb->s_cwnd); 1457 /* But allow for lots of little packets as well */ 1458 } 1459 1460 /* 1461 * Close a SPIP control block: 1462 * discard spp control block itself 1463 * discard ns protocol control block 1464 * wake up any sleepers 1465 */ 1466 struct sppcb * 1467 spp_close(cb) 1468 register struct sppcb *cb; 1469 { 1470 register struct spidp_q *s; 1471 struct nspcb *nsp = cb->s_nspcb; 1472 struct socket *so = nsp->nsp_socket; 1473 register struct mbuf *m; 1474 1475 s = cb->s_q.si_next; 1476 while (s != &(cb->s_q)) { 1477 s = s->si_next; 1478 m = dtom(s->si_prev); 1479 remque(s->si_prev); 1480 m_freem(m); 1481 } 1482 (void) m_free(dtom(cb->s_idp)); 1483 (void) m_free(dtom(cb)); 1484 nsp->nsp_pcb = 0; 1485 soisdisconnected(so); 1486 ns_pcbdetach(nsp); 1487 sppstat.spps_closed++; 1488 return ((struct sppcb *)0); 1489 } 1490 /* 1491 * Someday we may do level 3 handshaking 1492 * to close a connection or send a xerox style error. 1493 * For now, just close. 1494 */ 1495 struct sppcb * 1496 spp_usrclosed(cb) 1497 register struct sppcb *cb; 1498 { 1499 return (spp_close(cb)); 1500 } 1501 struct sppcb * 1502 spp_disconnect(cb) 1503 register struct sppcb *cb; 1504 { 1505 return (spp_close(cb)); 1506 } 1507 /* 1508 * Drop connection, reporting 1509 * the specified error. 1510 */ 1511 struct sppcb * 1512 spp_drop(cb, errno) 1513 register struct sppcb *cb; 1514 int errno; 1515 { 1516 struct socket *so = cb->s_nspcb->nsp_socket; 1517 1518 /* 1519 * someday, in the xerox world 1520 * we will generate error protocol packets 1521 * announcing that the socket has gone away. 1522 */ 1523 if (TCPS_HAVERCVDSYN(cb->s_state)) { 1524 sppstat.spps_drops++; 1525 cb->s_state = TCPS_CLOSED; 1526 /*(void) tcp_output(cb);*/ 1527 } else 1528 sppstat.spps_conndrops++; 1529 so->so_error = errno; 1530 return (spp_close(cb)); 1531 } 1532 1533 spp_abort(nsp) 1534 struct nspcb *nsp; 1535 { 1536 1537 (void) spp_close((struct sppcb *)nsp->nsp_pcb); 1538 } 1539 1540 int spp_backoff[TCP_MAXRXTSHIFT+1] = 1541 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; 1542 /* 1543 * Fast timeout routine for processing delayed acks 1544 */ 1545 spp_fasttimo() 1546 { 1547 register struct nspcb *nsp; 1548 register struct sppcb *cb; 1549 int s = splnet(); 1550 1551 nsp = nspcb.nsp_next; 1552 if (nsp) 1553 for (; nsp != &nspcb; nsp = nsp->nsp_next) 1554 if ((cb = (struct sppcb *)nsp->nsp_pcb) && 1555 (cb->s_flags & SF_DELACK)) { 1556 cb->s_flags &= ~SF_DELACK; 1557 cb->s_flags |= SF_ACKNOW; 1558 sppstat.spps_delack++; 1559 (void) spp_output(cb, (struct mbuf *) 0); 1560 } 1561 splx(s); 1562 } 1563 1564 /* 1565 * spp protocol timeout routine called every 500 ms. 1566 * Updates the timers in all active pcb's and 1567 * causes finite state machine actions if timers expire. 1568 */ 1569 spp_slowtimo() 1570 { 1571 register struct nspcb *ip, *ipnxt; 1572 register struct sppcb *cb; 1573 int s = splnet(); 1574 register int i; 1575 1576 /* 1577 * Search through tcb's and update active timers. 1578 */ 1579 ip = nspcb.nsp_next; 1580 if (ip == 0) { 1581 splx(s); 1582 return; 1583 } 1584 while (ip != &nspcb) { 1585 cb = nstosppcb(ip); 1586 ipnxt = ip->nsp_next; 1587 if (cb == 0) 1588 goto tpgone; 1589 for (i = 0; i < TCPT_NTIMERS; i++) { 1590 if (cb->s_timer[i] && --cb->s_timer[i] == 0) { 1591 (void) spp_usrreq(cb->s_nspcb->nsp_socket, 1592 PRU_SLOWTIMO, (struct mbuf *)0, 1593 (struct mbuf *)i, (struct mbuf *)0); 1594 if (ipnxt->nsp_prev != ip) 1595 goto tpgone; 1596 } 1597 } 1598 cb->s_idle++; 1599 if (cb->s_rtt) 1600 cb->s_rtt++; 1601 tpgone: 1602 ip = ipnxt; 1603 } 1604 spp_iss += SPP_ISSINCR/PR_SLOWHZ; /* increment iss */ 1605 splx(s); 1606 } 1607 /* 1608 * SPP timer processing. 1609 */ 1610 struct sppcb * 1611 spp_timers(cb, timer) 1612 register struct sppcb *cb; 1613 int timer; 1614 { 1615 long rexmt; 1616 int win; 1617 1618 cb->s_force = 1 + timer; 1619 switch (timer) { 1620 1621 /* 1622 * 2 MSL timeout in shutdown went off. TCP deletes connection 1623 * control block. 1624 */ 1625 case TCPT_2MSL: 1626 printf("spp: TCPT_2MSL went off for no reason\n"); 1627 cb->s_timer[timer] = 0; 1628 break; 1629 1630 /* 1631 * Retransmission timer went off. Message has not 1632 * been acked within retransmit interval. Back off 1633 * to a longer retransmit interval and retransmit one packet. 1634 */ 1635 case TCPT_REXMT: 1636 if (++cb->s_rxtshift > TCP_MAXRXTSHIFT) { 1637 cb->s_rxtshift = TCP_MAXRXTSHIFT; 1638 sppstat.spps_timeoutdrop++; 1639 cb = spp_drop(cb, ETIMEDOUT); 1640 break; 1641 } 1642 sppstat.spps_rexmttimeo++; 1643 rexmt = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1; 1644 rexmt *= spp_backoff[cb->s_rxtshift]; 1645 TCPT_RANGESET(cb->s_rxtcur, rexmt, TCPTV_MIN, TCPTV_REXMTMAX); 1646 cb->s_timer[TCPT_REXMT] = cb->s_rxtcur; 1647 /* 1648 * If we have backed off fairly far, our srtt 1649 * estimate is probably bogus. Clobber it 1650 * so we'll take the next rtt measurement as our srtt; 1651 * move the current srtt into rttvar to keep the current 1652 * retransmit times until then. 1653 */ 1654 if (cb->s_rxtshift > TCP_MAXRXTSHIFT / 4 ) { 1655 cb->s_rttvar += (cb->s_srtt >> 2); 1656 cb->s_srtt = 0; 1657 } 1658 cb->s_snxt = cb->s_rack; 1659 /* 1660 * If timing a packet, stop the timer. 1661 */ 1662 cb->s_rtt = 0; 1663 /* 1664 * See very long discussion in tcp_timer.c about congestion 1665 * window and sstrhesh 1666 */ 1667 win = MIN(cb->s_swnd, (cb->s_cwnd/CUNIT)) / 2; 1668 if (win < 2) 1669 win = 2; 1670 cb->s_cwnd = CUNIT; 1671 cb->s_ssthresh = win * CUNIT; 1672 (void) spp_output(cb, (struct mbuf *) 0); 1673 break; 1674 1675 /* 1676 * Persistance timer into zero window. 1677 * Force a probe to be sent. 1678 */ 1679 case TCPT_PERSIST: 1680 sppstat.spps_persisttimeo++; 1681 spp_setpersist(cb); 1682 (void) spp_output(cb, (struct mbuf *) 0); 1683 break; 1684 1685 /* 1686 * Keep-alive timer went off; send something 1687 * or drop connection if idle for too long. 1688 */ 1689 case TCPT_KEEP: 1690 sppstat.spps_keeptimeo++; 1691 if (cb->s_state < TCPS_ESTABLISHED) 1692 goto dropit; 1693 if (cb->s_nspcb->nsp_socket->so_options & SO_KEEPALIVE) { 1694 if (cb->s_idle >= TCPTV_MAXIDLE) 1695 goto dropit; 1696 sppstat.spps_keepprobe++; 1697 (void) spp_output(cb, (struct mbuf *) 0); 1698 } else 1699 cb->s_idle = 0; 1700 cb->s_timer[TCPT_KEEP] = TCPTV_KEEP; 1701 break; 1702 dropit: 1703 sppstat.spps_keepdrops++; 1704 cb = spp_drop(cb, ETIMEDOUT); 1705 break; 1706 } 1707 return (cb); 1708 } 1709 #ifndef lint 1710 int SppcbSize = sizeof (struct sppcb); 1711 int NspcbSize = sizeof (struct nspcb); 1712 #endif lint 1713