1 /* 2 * Copyright (c) 1984, 1985, 1986, 1987 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that this notice is preserved and that due credit is given 7 * to the University of California at Berkeley. The name of the University 8 * may not be used to endorse or promote products derived from this 9 * software without specific prior written permission. This software 10 * is provided ``as is'' without express or implied warranty. 11 * 12 * @(#)spp_usrreq.c 7.7 (Berkeley) 05/25/88 13 */ 14 15 #include "param.h" 16 #include "systm.h" 17 #include "dir.h" 18 #include "user.h" 19 #include "mbuf.h" 20 #include "protosw.h" 21 #include "socket.h" 22 #include "socketvar.h" 23 #include "errno.h" 24 25 #include "../net/if.h" 26 #include "../net/route.h" 27 #include "../netinet/tcp_fsm.h" 28 29 #include "ns.h" 30 #include "ns_pcb.h" 31 #include "idp.h" 32 #include "idp_var.h" 33 #include "ns_error.h" 34 #include "sp.h" 35 #include "spidp.h" 36 #include "spp_timer.h" 37 #include "spp_var.h" 38 #include "spp_debug.h" 39 40 /* 41 * SP protocol implementation. 42 */ 43 spp_init() 44 { 45 46 spp_iss = 1; /* WRONG !! should fish it out of TODR */ 47 } 48 struct spidp spp_savesi; 49 int traceallspps = 0; 50 extern int sppconsdebug; 51 int spp_hardnosed; 52 int spp_use_delack = 0; 53 54 /*ARGSUSED*/ 55 spp_input(m, nsp, ifp) 56 register struct mbuf *m; 57 register struct nspcb *nsp; 58 struct ifnet *ifp; 59 { 60 register struct sppcb *cb; 61 register struct spidp *si = mtod(m, struct spidp *); 62 register struct socket *so; 63 short ostate; 64 int dropsocket = 0; 65 66 67 sppstat.spps_rcvtotal++; 68 if (nsp == 0) { 69 panic("No nspcb in spp_input\n"); 70 return; 71 } 72 73 cb = nstosppcb(nsp); 74 if (cb == 0) goto bad; 75 76 if (m->m_len < sizeof(*si)) { 77 if ((m = m_pullup(m, sizeof(*si))) == 0) { 78 sppstat.spps_rcvshort++; 79 return; 80 } 81 si = mtod(m, struct spidp *); 82 } 83 si->si_seq = ntohs(si->si_seq); 84 si->si_ack = ntohs(si->si_ack); 85 si->si_alo = ntohs(si->si_alo); 86 87 so = nsp->nsp_socket; 88 if (so->so_options & SO_DEBUG || traceallspps) { 89 ostate = cb->s_state; 90 spp_savesi = *si; 91 } 92 if (so->so_options & SO_ACCEPTCONN) { 93 struct sppcb *ocb = cb; 94 95 so = sonewconn(so); 96 if (so == 0) { 97 goto drop; 98 } 99 /* 100 * This is ugly, but .... 101 * 102 * Mark socket as temporary until we're 103 * committed to keeping it. The code at 104 * ``drop'' and ``dropwithreset'' check the 105 * flag dropsocket to see if the temporary 106 * socket created here should be discarded. 107 * We mark the socket as discardable until 108 * we're committed to it below in TCPS_LISTEN. 109 */ 110 dropsocket++; 111 nsp = (struct nspcb *)so->so_pcb; 112 nsp->nsp_laddr = si->si_dna; 113 cb = nstosppcb(nsp); 114 cb->s_mtu = ocb->s_mtu; /* preserve sockopts */ 115 cb->s_flags = ocb->s_flags; /* preserve sockopts */ 116 cb->s_state = TCPS_LISTEN; 117 } 118 119 /* 120 * Packet received on connection. 121 * reset idle time and keep-alive timer; 122 */ 123 cb->s_idle = 0; 124 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP; 125 126 switch (cb->s_state) { 127 128 case TCPS_LISTEN:{ 129 struct mbuf *am; 130 register struct sockaddr_ns *sns; 131 struct ns_addr laddr; 132 133 /* 134 * If somebody here was carying on a conversation 135 * and went away, and his pen pal thinks he can 136 * still talk, we get the misdirected packet. 137 */ 138 if (spp_hardnosed && (si->si_did != 0 || si->si_seq != 0)) { 139 spp_istat.gonawy++; 140 goto dropwithreset; 141 } 142 am = m_get(M_DONTWAIT, MT_SONAME); 143 if (am == NULL) 144 goto drop; 145 am->m_len = sizeof (struct sockaddr_ns); 146 sns = mtod(am, struct sockaddr_ns *); 147 sns->sns_family = AF_NS; 148 sns->sns_addr = si->si_sna; 149 laddr = nsp->nsp_laddr; 150 if (ns_nullhost(laddr)) 151 nsp->nsp_laddr = si->si_dna; 152 if (ns_pcbconnect(nsp, am)) { 153 nsp->nsp_laddr = laddr; 154 (void) m_free(am); 155 spp_istat.noconn++; 156 goto drop; 157 } 158 (void) m_free(am); 159 spp_template(cb); 160 dropsocket = 0; /* committed to socket */ 161 cb->s_did = si->si_sid; 162 cb->s_rack = si->si_ack; 163 cb->s_ralo = si->si_alo; 164 #define THREEWAYSHAKE 165 #ifdef THREEWAYSHAKE 166 cb->s_state = TCPS_SYN_RECEIVED; 167 cb->s_force = 1 + SPPT_KEEP; 168 sppstat.spps_accepts++; 169 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP; 170 } 171 break; 172 /* 173 * This state means that we have heard a response 174 * to our acceptance of their connection 175 * It is probably logically unnecessary in this 176 * implementation. 177 */ 178 case TCPS_SYN_RECEIVED: { 179 if (si->si_did!=cb->s_sid) { 180 spp_istat.wrncon++; 181 goto drop; 182 } 183 #endif 184 nsp->nsp_fport = si->si_sport; 185 cb->s_timer[SPPT_REXMT] = 0; 186 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP; 187 soisconnected(so); 188 cb->s_state = TCPS_ESTABLISHED; 189 sppstat.spps_accepts++; 190 } 191 break; 192 193 /* 194 * This state means that we have gotten a response 195 * to our attempt to establish a connection. 196 * We fill in the data from the other side, 197 * telling us which port to respond to, instead of the well- 198 * known one we might have sent to in the first place. 199 * We also require that this is a response to our 200 * connection id. 201 */ 202 case TCPS_SYN_SENT: 203 if (si->si_did!=cb->s_sid) { 204 spp_istat.notme++; 205 goto drop; 206 } 207 sppstat.spps_connects++; 208 cb->s_did = si->si_sid; 209 cb->s_rack = si->si_ack; 210 cb->s_ralo = si->si_alo; 211 cb->s_dport = nsp->nsp_fport = si->si_sport; 212 cb->s_timer[SPPT_REXMT] = 0; 213 cb->s_flags |= SF_ACKNOW; 214 soisconnected(so); 215 cb->s_state = TCPS_ESTABLISHED; 216 /* Use roundtrip time of connection request for initial rtt */ 217 if (cb->s_rtt) { 218 cb->s_srtt = cb->s_rtt << 3; 219 cb->s_rttvar = cb->s_rtt << 1; 220 SPPT_RANGESET(cb->s_rxtcur, 221 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1, 222 SPPTV_MIN, SPPTV_REXMTMAX); 223 cb->s_rtt = 0; 224 } 225 } 226 if (so->so_options & SO_DEBUG || traceallspps) 227 spp_trace(SA_INPUT, (u_char)ostate, cb, &spp_savesi, 0); 228 229 m->m_len -= sizeof (struct idp); 230 m->m_off += sizeof (struct idp); 231 232 if (spp_reass(cb, si)) { 233 (void) m_freem(m); 234 } 235 if (cb->s_force || (cb->s_flags & (SF_ACKNOW|SF_WIN|SF_RXT))) 236 (void) spp_output(cb, (struct mbuf *)0); 237 cb->s_flags &= ~(SF_WIN|SF_RXT); 238 return; 239 240 dropwithreset: 241 if (dropsocket) 242 (void) soabort(so); 243 si->si_seq = ntohs(si->si_seq); 244 si->si_ack = ntohs(si->si_ack); 245 si->si_alo = ntohs(si->si_alo); 246 ns_error(dtom(si), NS_ERR_NOSOCK, 0); 247 if (cb->s_nspcb->nsp_socket->so_options & SO_DEBUG || traceallspps) 248 spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0); 249 return; 250 251 drop: 252 bad: 253 if (cb == 0 || cb->s_nspcb->nsp_socket->so_options & SO_DEBUG || 254 traceallspps) 255 spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0); 256 m_freem(m); 257 } 258 259 int spprexmtthresh = 3; 260 261 /* 262 * This is structurally similar to the tcp reassembly routine 263 * but its function is somewhat different: It merely queues 264 * packets up, and suppresses duplicates. 265 */ 266 spp_reass(cb, si) 267 register struct sppcb *cb; 268 register struct spidp *si; 269 { 270 register struct spidp_q *q; 271 register struct mbuf *m; 272 register struct socket *so = cb->s_nspcb->nsp_socket; 273 char packetp = cb->s_flags & SF_HI; 274 int incr; 275 char wakeup = 0; 276 277 if (si == SI(0)) 278 goto present; 279 /* 280 * Update our news from them. 281 */ 282 if (si->si_cc & SP_SA) 283 cb->s_flags |= (spp_use_delack ? SF_DELACK : SF_ACKNOW); 284 if (SSEQ_GT(si->si_alo, cb->s_ralo)) 285 cb->s_flags |= SF_WIN; 286 if (SSEQ_LEQ(si->si_ack, cb->s_rack)) { 287 if ((si->si_cc & SP_SP) && cb->s_rack != (cb->s_smax + 1)) { 288 sppstat.spps_rcvdupack++; 289 /* 290 * If this is a completely duplicate ack 291 * and other conditions hold, we assume 292 * a packet has been dropped and retransmit 293 * it exactly as in tcp_input(). 294 */ 295 if (si->si_ack != cb->s_rack || 296 si->si_alo != cb->s_ralo) 297 cb->s_dupacks = 0; 298 else if (++cb->s_dupacks == spprexmtthresh) { 299 u_short onxt = cb->s_snxt; 300 int cwnd = cb->s_cwnd; 301 302 cb->s_snxt = si->si_ack; 303 cb->s_cwnd = CUNIT; 304 cb->s_force = 1 + SPPT_REXMT; 305 (void) spp_output(cb, (struct mbuf *)0); 306 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur; 307 cb->s_rtt = 0; 308 if (cwnd >= 4 * CUNIT) 309 cb->s_cwnd = cwnd / 2; 310 if (SSEQ_GT(onxt, cb->s_snxt)) 311 cb->s_snxt = onxt; 312 return (1); 313 } 314 } else 315 cb->s_dupacks = 0; 316 goto update_window; 317 } 318 cb->s_dupacks = 0; 319 /* 320 * If our correspondent acknowledges data we haven't sent 321 * TCP would drop the packet after acking. We'll be a little 322 * more permissive 323 */ 324 if (SSEQ_GT(si->si_ack, (cb->s_smax + 1))) { 325 sppstat.spps_rcvacktoomuch++; 326 si->si_ack = cb->s_smax + 1; 327 } 328 sppstat.spps_rcvackpack++; 329 /* 330 * If transmit timer is running and timed sequence 331 * number was acked, update smoothed round trip time. 332 * See discussion of algorithm in tcp_input.c 333 */ 334 if (cb->s_rtt && SSEQ_GT(si->si_ack, cb->s_rtseq)) { 335 sppstat.spps_rttupdated++; 336 if (cb->s_srtt != 0) { 337 register short delta; 338 delta = cb->s_rtt - (cb->s_srtt >> 3); 339 if ((cb->s_srtt += delta) <= 0) 340 cb->s_srtt = 1; 341 if (delta < 0) 342 delta = -delta; 343 delta -= (cb->s_rttvar >> 2); 344 if ((cb->s_rttvar += delta) <= 0) 345 cb->s_rttvar = 1; 346 } else { 347 /* 348 * No rtt measurement yet 349 */ 350 cb->s_srtt = cb->s_rtt << 3; 351 cb->s_rttvar = cb->s_rtt << 1; 352 } 353 cb->s_rtt = 0; 354 cb->s_rxtshift = 0; 355 SPPT_RANGESET(cb->s_rxtcur, 356 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1, 357 SPPTV_MIN, SPPTV_REXMTMAX); 358 } 359 /* 360 * If all outstanding data is acked, stop retransmit 361 * timer and remember to restart (more output or persist). 362 * If there is more data to be acked, restart retransmit 363 * timer, using current (possibly backed-off) value; 364 */ 365 if (si->si_ack == cb->s_smax + 1) { 366 cb->s_timer[SPPT_REXMT] = 0; 367 cb->s_flags |= SF_RXT; 368 } else if (cb->s_timer[SPPT_PERSIST] == 0) 369 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur; 370 /* 371 * When new data is acked, open the congestion window. 372 * If the window gives us less than ssthresh packets 373 * in flight, open exponentially (maxseg at a time). 374 * Otherwise open linearly (maxseg^2 / cwnd at a time). 375 */ 376 incr = CUNIT; 377 if (cb->s_cwnd > cb->s_ssthresh) 378 incr = MAX(incr * incr / cb->s_cwnd, 1); 379 cb->s_cwnd = MIN(cb->s_cwnd + incr, cb->s_cwmx); 380 /* 381 * Trim Acked data from output queue. 382 */ 383 while ((m = so->so_snd.sb_mb) != NULL) { 384 if (SSEQ_LT((mtod(m, struct spidp *))->si_seq, si->si_ack)) 385 sbdroprecord(&so->so_snd); 386 else 387 break; 388 } 389 if ((so->so_snd.sb_flags & SB_WAIT) || so->so_snd.sb_sel) 390 sowwakeup(so); 391 cb->s_rack = si->si_ack; 392 update_window: 393 if (SSEQ_LT(cb->s_snxt, cb->s_rack)) 394 cb->s_snxt = cb->s_rack; 395 if (SSEQ_LT(cb->s_swl1, si->si_seq) || cb->s_swl1 == si->si_seq && 396 (SSEQ_LT(cb->s_swl2, si->si_ack) || 397 cb->s_swl2 == si->si_ack && SSEQ_LT(cb->s_ralo, si->si_alo))) { 398 /* keep track of pure window updates */ 399 if ((si->si_cc & SP_SP) && cb->s_swl2 == si->si_ack 400 && SSEQ_LT(cb->s_ralo, si->si_alo)) { 401 sppstat.spps_rcvwinupd++; 402 sppstat.spps_rcvdupack--; 403 } 404 cb->s_ralo = si->si_alo; 405 cb->s_swl1 = si->si_seq; 406 cb->s_swl2 = si->si_ack; 407 cb->s_swnd = (1 + si->si_alo - si->si_ack); 408 if (cb->s_swnd > cb->s_smxw) 409 cb->s_smxw = cb->s_swnd; 410 cb->s_flags |= SF_WIN; 411 } 412 /* 413 * If this packet number is higher than that which 414 * we have allocated refuse it, unless urgent 415 */ 416 if (SSEQ_GT(si->si_seq, cb->s_alo)) { 417 if (si->si_cc & SP_SP) { 418 sppstat.spps_rcvwinprobe++; 419 return (1); 420 } else 421 sppstat.spps_rcvpackafterwin++; 422 if (si->si_cc & SP_OB) { 423 if (SSEQ_GT(si->si_seq, cb->s_alo + 60)) { 424 ns_error(dtom(si), NS_ERR_FULLUP, 0); 425 return (0); 426 } /* else queue this packet; */ 427 } else { 428 /*register struct socket *so = cb->s_nspcb->nsp_socket; 429 if (so->so_state && SS_NOFDREF) { 430 ns_error(dtom(si), NS_ERR_NOSOCK, 0); 431 (void)spp_close(cb); 432 } else 433 would crash system*/ 434 spp_istat.notyet++; 435 ns_error(dtom(si), NS_ERR_FULLUP, 0); 436 return (0); 437 } 438 } 439 /* 440 * If this is a system packet, we don't need to 441 * queue it up, and won't update acknowledge # 442 */ 443 if (si->si_cc & SP_SP) { 444 return (1); 445 } 446 /* 447 * We have already seen this packet, so drop. 448 */ 449 if (SSEQ_LT(si->si_seq, cb->s_ack)) { 450 spp_istat.bdreas++; 451 sppstat.spps_rcvduppack++; 452 if (si->si_seq == cb->s_ack - 1) 453 spp_istat.lstdup++; 454 return (1); 455 } 456 /* 457 * Loop through all packets queued up to insert in 458 * appropriate sequence. 459 */ 460 for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) { 461 if (si->si_seq == SI(q)->si_seq) { 462 sppstat.spps_rcvduppack++; 463 return (1); 464 } 465 if (SSEQ_LT(si->si_seq, SI(q)->si_seq)) { 466 sppstat.spps_rcvoopack++; 467 break; 468 } 469 } 470 insque(si, q->si_prev); 471 /* 472 * If this packet is urgent, inform process 473 */ 474 if (si->si_cc & SP_OB) { 475 cb->s_iobc = ((char *)si)[1 + sizeof(*si)]; 476 sohasoutofband(so); 477 cb->s_oobflags |= SF_IOOB; 478 } 479 present: 480 #define SPINC sizeof(struct sphdr) 481 /* 482 * Loop through all packets queued up to update acknowledge 483 * number, and present all acknowledged data to user; 484 * If in packet interface mode, show packet headers. 485 */ 486 for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) { 487 if (SI(q)->si_seq == cb->s_ack) { 488 cb->s_ack++; 489 m = dtom(q); 490 if (SI(q)->si_cc & SP_OB) { 491 cb->s_oobflags &= ~SF_IOOB; 492 if (so->so_rcv.sb_cc) 493 so->so_oobmark = so->so_rcv.sb_cc; 494 else 495 so->so_state |= SS_RCVATMARK; 496 } 497 q = q->si_prev; 498 remque(q->si_next); 499 wakeup = 1; 500 sppstat.spps_rcvpack++; 501 if (packetp) { 502 sbappendrecord(&so->so_rcv, m); 503 } else { 504 cb->s_rhdr = *mtod(m, struct sphdr *); 505 m->m_off += SPINC; 506 m->m_len -= SPINC; 507 sbappend(&so->so_rcv, m); 508 } 509 } else 510 break; 511 } 512 if (wakeup) sorwakeup(so); 513 return (0); 514 } 515 516 spp_ctlinput(cmd, arg) 517 int cmd; 518 caddr_t arg; 519 { 520 struct ns_addr *na; 521 extern u_char nsctlerrmap[]; 522 extern spp_abort(), spp_quench(); 523 extern struct nspcb *idp_drop(); 524 struct ns_errp *errp; 525 struct nspcb *nsp; 526 struct sockaddr_ns *sns; 527 int type; 528 529 if (cmd < 0 || cmd > PRC_NCMDS) 530 return; 531 type = NS_ERR_UNREACH_HOST; 532 533 switch (cmd) { 534 535 case PRC_ROUTEDEAD: 536 return; 537 538 case PRC_IFDOWN: 539 case PRC_HOSTDEAD: 540 case PRC_HOSTUNREACH: 541 sns = (struct sockaddr_ns *)arg; 542 if (sns->sns_family != AF_NS) 543 return; 544 na = &sns->sns_addr; 545 break; 546 547 default: 548 errp = (struct ns_errp *)arg; 549 na = &errp->ns_err_idp.idp_dna; 550 type = errp->ns_err_num; 551 type = ntohs((u_short)type); 552 } 553 switch (type) { 554 555 case NS_ERR_UNREACH_HOST: 556 ns_pcbnotify(na, (int)nsctlerrmap[cmd], spp_abort, (long) 0); 557 break; 558 559 case NS_ERR_TOO_BIG: 560 case NS_ERR_NOSOCK: 561 nsp = ns_pcblookup(na, errp->ns_err_idp.idp_sna.x_port, 562 NS_WILDCARD); 563 if (nsp) { 564 if(nsp->nsp_pcb) 565 (void) spp_drop((struct sppcb *)nsp->nsp_pcb, 566 (int)nsctlerrmap[cmd]); 567 else 568 (void) idp_drop(nsp, (int)nsctlerrmap[cmd]); 569 } 570 break; 571 572 case NS_ERR_FULLUP: 573 ns_pcbnotify(na, 0, spp_quench, (long) 0); 574 } 575 } 576 /* 577 * When a source quench is received, close congestion window 578 * to one packet. We will gradually open it again as we proceed. 579 */ 580 spp_quench(nsp) 581 struct nspcb *nsp; 582 { 583 struct sppcb *cb = nstosppcb(nsp); 584 585 if (cb) 586 cb->s_cwnd = CUNIT; 587 } 588 589 #ifdef notdef 590 int 591 spp_fixmtu(nsp) 592 register struct nspcb *nsp; 593 { 594 register struct sppcb *cb = (struct sppcb *)(nsp->nsp_pcb); 595 register struct mbuf *m; 596 register struct spidp *si; 597 struct ns_errp *ep; 598 struct sockbuf *sb; 599 int badseq, len; 600 struct mbuf *firstbad, *m0; 601 602 if (cb) { 603 /* 604 * The notification that we have sent 605 * too much is bad news -- we will 606 * have to go through queued up so far 607 * splitting ones which are too big and 608 * reassigning sequence numbers and checksums. 609 * we should then retransmit all packets from 610 * one above the offending packet to the last one 611 * we had sent (or our allocation) 612 * then the offending one so that the any queued 613 * data at our destination will be discarded. 614 */ 615 ep = (struct ns_errp *)nsp->nsp_notify_param; 616 sb = &nsp->nsp_socket->so_snd; 617 cb->s_mtu = ep->ns_err_param; 618 badseq = SI(&ep->ns_err_idp)->si_seq; 619 for (m = sb->sb_mb; m; m = m->m_act) { 620 si = mtod(m, struct spidp *); 621 if (si->si_seq == badseq) 622 break; 623 } 624 if (m == 0) return; 625 firstbad = m; 626 /*for (;;) {*/ 627 /* calculate length */ 628 for (m0 = m, len = 0; m ; m = m->m_next) 629 len += m->m_len; 630 if (len > cb->s_mtu) { 631 } 632 /* FINISH THIS 633 } */ 634 } 635 } 636 #endif 637 638 spp_output(cb, m0) 639 register struct sppcb *cb; 640 struct mbuf *m0; 641 { 642 struct socket *so = cb->s_nspcb->nsp_socket; 643 register struct mbuf *m; 644 register struct spidp *si = (struct spidp *) 0; 645 register struct sockbuf *sb = &so->so_snd; 646 int len = 0, win, rcv_win; 647 short span, off; 648 u_short alo; 649 int error = 0, sendalot; 650 #ifdef notdef 651 int idle; 652 #endif 653 struct mbuf *mprev; 654 extern int idpcksum; 655 656 if (m0) { 657 int mtu = cb->s_mtu; 658 int datalen; 659 /* 660 * Make sure that packet isn't too big. 661 */ 662 for (m = m0; m ; m = m->m_next) { 663 mprev = m; 664 len += m->m_len; 665 } 666 datalen = (cb->s_flags & SF_HO) ? 667 len - sizeof (struct sphdr) : len; 668 if (datalen > mtu) { 669 if (cb->s_flags & SF_PI) { 670 m_freem(m0); 671 return (EMSGSIZE); 672 } else { 673 int oldEM = cb->s_cc & SP_EM; 674 675 cb->s_cc &= ~SP_EM; 676 while (len > mtu) { 677 m = m_copy(m0, 0, mtu); 678 if (m == NULL) { 679 error = ENOBUFS; 680 goto bad_copy; 681 } 682 error = spp_output(cb, m); 683 if (error) { 684 bad_copy: 685 cb->s_cc |= oldEM; 686 m_freem(m0); 687 return(error); 688 } 689 m_adj(m0, mtu); 690 len -= mtu; 691 } 692 cb->s_cc |= oldEM; 693 } 694 } 695 /* 696 * Force length even, by adding a "garbage byte" if 697 * necessary. 698 */ 699 if (len & 1) { 700 m = mprev; 701 if (m->m_len + m->m_off < MMAXOFF) 702 m->m_len++; 703 else { 704 struct mbuf *m1 = m_get(M_DONTWAIT, MT_DATA); 705 706 if (m1 == 0) { 707 m_freem(m0); 708 return (ENOBUFS); 709 } 710 m1->m_len = 1; 711 m1->m_off = MMAXOFF - 1; 712 m->m_next = m1; 713 } 714 } 715 m = m_get(M_DONTWAIT, MT_HEADER); 716 if (m == 0) { 717 m_freem(m0); 718 return (ENOBUFS); 719 } 720 /* 721 * Fill in mbuf with extended SP header 722 * and addresses and length put into network format. 723 * Long align so prepended ip headers will work on Gould. 724 */ 725 m->m_off = MMAXOFF - sizeof (struct spidp) - 2; 726 m->m_len = sizeof (struct spidp); 727 m->m_next = m0; 728 si = mtod(m, struct spidp *); 729 si->si_i = *cb->s_idp; 730 si->si_s = cb->s_shdr; 731 if ((cb->s_flags & SF_PI) && (cb->s_flags & SF_HO)) { 732 register struct sphdr *sh; 733 if (m0->m_len < sizeof (*sh)) { 734 if((m0 = m_pullup(m0, sizeof(*sh))) == NULL) { 735 (void) m_free(m); 736 m_freem(m0); 737 return (EINVAL); 738 } 739 m->m_next = m0; 740 } 741 sh = mtod(m0, struct sphdr *); 742 si->si_dt = sh->sp_dt; 743 si->si_cc |= sh->sp_cc & SP_EM; 744 m0->m_len -= sizeof (*sh); 745 m0->m_off += sizeof (*sh); 746 len -= sizeof (*sh); 747 } 748 len += sizeof(*si); 749 if (cb->s_oobflags & SF_SOOB) { 750 /* 751 * Per jqj@cornell: 752 * make sure OB packets convey exactly 1 byte. 753 * If the packet is 1 byte or larger, we 754 * have already guaranted there to be at least 755 * one garbage byte for the checksum, and 756 * extra bytes shouldn't hurt! 757 */ 758 if (len > sizeof(*si)) { 759 si->si_cc |= SP_OB; 760 len = (1 + sizeof(*si)); 761 } 762 } 763 si->si_len = htons((u_short)len); 764 /* 765 * queue stuff up for output 766 */ 767 sbappendrecord(sb, m); 768 cb->s_seq++; 769 } 770 #ifdef notdef 771 idle = (cb->s_smax == (cb->s_rack - 1)); 772 #endif 773 again: 774 sendalot = 0; 775 off = cb->s_snxt - cb->s_rack; 776 win = MIN(cb->s_swnd, (cb->s_cwnd/CUNIT)); 777 778 /* 779 * If in persist timeout with window of 0, send a probe. 780 * Otherwise, if window is small but nonzero 781 * and timer expired, send what we can and go into 782 * transmit state. 783 */ 784 if (cb->s_force == 1 + SPPT_PERSIST) { 785 if (win != 0) { 786 cb->s_timer[SPPT_PERSIST] = 0; 787 cb->s_rxtshift = 0; 788 } 789 } 790 span = cb->s_seq - cb->s_rack; 791 len = MIN(span, win) - off; 792 793 if (len < 0) { 794 /* 795 * Window shrank after we went into it. 796 * If window shrank to 0, cancel pending 797 * restransmission and pull s_snxt back 798 * to (closed) window. We will enter persist 799 * state below. If the widndow didn't close completely, 800 * just wait for an ACK. 801 */ 802 len = 0; 803 if (win == 0) { 804 cb->s_timer[SPPT_REXMT] = 0; 805 cb->s_snxt = cb->s_rack; 806 } 807 } 808 if (len > 1) 809 sendalot = 1; 810 rcv_win = sbspace(&so->so_rcv); 811 812 /* 813 * Send if we owe peer an ACK. 814 */ 815 if (cb->s_oobflags & SF_SOOB) { 816 /* 817 * must transmit this out of band packet 818 */ 819 cb->s_oobflags &= ~ SF_SOOB; 820 sendalot = 1; 821 sppstat.spps_sndurg++; 822 goto found; 823 } 824 if (cb->s_flags & SF_ACKNOW) 825 goto send; 826 if (cb->s_state < TCPS_ESTABLISHED) 827 goto send; 828 /* 829 * Silly window can't happen in spp. 830 * Code from tcp deleted. 831 */ 832 if (len) 833 goto send; 834 /* 835 * Compare available window to amount of window 836 * known to peer (as advertised window less 837 * next expected input.) If the difference is at least two 838 * packets or at least 35% of the mximum possible window, 839 * then want to send a window update to peer. 840 */ 841 if (rcv_win > 0) { 842 u_short delta = 1 + cb->s_alo - cb->s_ack; 843 int adv = rcv_win - (delta * cb->s_mtu); 844 845 if ((so->so_rcv.sb_cc == 0 && adv >= (2 * cb->s_mtu)) || 846 (100 * adv / so->so_rcv.sb_hiwat >= 35)) { 847 sppstat.spps_sndwinup++; 848 cb->s_flags |= SF_ACKNOW; 849 goto send; 850 } 851 852 } 853 /* 854 * Many comments from tcp_output.c are appropriate here 855 * including . . . 856 * If send window is too small, there is data to transmit, and no 857 * retransmit or persist is pending, then go to persist state. 858 * If nothing happens soon, send when timer expires: 859 * if window is nonzero, transmit what we can, 860 * otherwise send a probe. 861 */ 862 if (so->so_snd.sb_cc && cb->s_timer[SPPT_REXMT] == 0 && 863 cb->s_timer[SPPT_PERSIST] == 0) { 864 cb->s_rxtshift = 0; 865 spp_setpersist(cb); 866 } 867 /* 868 * No reason to send a packet, just return. 869 */ 870 cb->s_outx = 1; 871 return (0); 872 873 send: 874 /* 875 * Find requested packet. 876 */ 877 si = 0; 878 if (len > 0) { 879 cb->s_want = cb->s_snxt; 880 for (m = sb->sb_mb; m; m = m->m_act) { 881 si = mtod(m, struct spidp *); 882 if (SSEQ_LEQ(cb->s_snxt, si->si_seq)) 883 break; 884 } 885 found: 886 if (si) { 887 if (si->si_seq == cb->s_snxt) 888 cb->s_snxt++; 889 else 890 sppstat.spps_sndvoid++, si = 0; 891 } 892 } 893 /* 894 * update window 895 */ 896 if (rcv_win < 0) 897 rcv_win = 0; 898 alo = cb->s_ack - 1 + (rcv_win / ((short)cb->s_mtu)); 899 if (SSEQ_LT(alo, cb->s_alo)) 900 alo = cb->s_alo; 901 902 if (si) { 903 /* 904 * must make a copy of this packet for 905 * idp_output to monkey with 906 */ 907 m = m_copy(dtom(si), 0, (int)M_COPYALL); 908 if (m == NULL) { 909 return (ENOBUFS); 910 } 911 m0 = m; 912 si = mtod(m, struct spidp *); 913 if (SSEQ_LT(si->si_seq, cb->s_smax)) 914 sppstat.spps_sndrexmitpack++; 915 else 916 sppstat.spps_sndpack++; 917 } else if (cb->s_force || cb->s_flags & SF_ACKNOW) { 918 /* 919 * Must send an acknowledgement or a probe 920 */ 921 if (cb->s_force) 922 sppstat.spps_sndprobe++; 923 if (cb->s_flags & SF_ACKNOW) 924 sppstat.spps_sndacks++; 925 m = m_get(M_DONTWAIT, MT_HEADER); 926 if (m == 0) { 927 return (ENOBUFS); 928 } 929 /* 930 * Fill in mbuf with extended SP header 931 * and addresses and length put into network format. 932 * Allign beginning of packet to long to prepend 933 * ifp's on loopback, or NSIP encaspulation for fussy cpu's. 934 */ 935 m->m_off = MMAXOFF - sizeof (struct spidp) - 2; 936 m->m_len = sizeof (*si); 937 m->m_next = 0; 938 si = mtod(m, struct spidp *); 939 si->si_i = *cb->s_idp; 940 si->si_s = cb->s_shdr; 941 si->si_seq = cb->s_smax + 1; 942 si->si_len = htons(sizeof (*si)); 943 si->si_cc |= SP_SP; 944 } else { 945 cb->s_outx = 3; 946 if (so->so_options & SO_DEBUG || traceallspps) 947 spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0); 948 return (0); 949 } 950 /* 951 * Stuff checksum and output datagram. 952 */ 953 if ((si->si_cc & SP_SP) == 0) { 954 if (cb->s_force != (1 + SPPT_PERSIST) || 955 cb->s_timer[SPPT_PERSIST] == 0) { 956 /* 957 * If this is a new packet and we are not currently 958 * timing anything, time this one. 959 */ 960 if (SSEQ_LT(cb->s_smax, si->si_seq)) { 961 cb->s_smax = si->si_seq; 962 if (cb->s_rtt == 0) { 963 sppstat.spps_segstimed++; 964 cb->s_rtseq = si->si_seq; 965 cb->s_rtt = 1; 966 } 967 } 968 /* 969 * Set rexmt timer if not currently set, 970 * Initial value for retransmit timer is smoothed 971 * round-trip time + 2 * round-trip time variance. 972 * Initialize shift counter which is used for backoff 973 * of retransmit time. 974 */ 975 if (cb->s_timer[SPPT_REXMT] == 0 && 976 cb->s_snxt != cb->s_rack) { 977 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur; 978 if (cb->s_timer[SPPT_PERSIST]) { 979 cb->s_timer[SPPT_PERSIST] = 0; 980 cb->s_rxtshift = 0; 981 } 982 } 983 } else if (SSEQ_LT(cb->s_smax, si->si_seq)) { 984 cb->s_smax = si->si_seq; 985 } 986 } else if (cb->s_state < TCPS_ESTABLISHED) { 987 if (cb->s_rtt == 0) 988 cb->s_rtt = 1; /* Time initial handshake */ 989 if (cb->s_timer[SPPT_REXMT] == 0) 990 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur; 991 } 992 { 993 /* 994 * Do not request acks when we ack their data packets or 995 * when we do a gratuitous window update. 996 */ 997 if (((si->si_cc & SP_SP) == 0) || cb->s_force) 998 si->si_cc |= SP_SA; 999 si->si_seq = htons(si->si_seq); 1000 si->si_alo = htons(alo); 1001 si->si_ack = htons(cb->s_ack); 1002 1003 if (idpcksum) { 1004 si->si_sum = 0; 1005 len = ntohs(si->si_len); 1006 if (len & 1) 1007 len++; 1008 si->si_sum = ns_cksum(dtom(si), len); 1009 } else 1010 si->si_sum = 0xffff; 1011 1012 cb->s_outx = 4; 1013 if (so->so_options & SO_DEBUG || traceallspps) 1014 spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0); 1015 1016 if (so->so_options & SO_DONTROUTE) 1017 error = ns_output(m, (struct route *)0, NS_ROUTETOIF); 1018 else 1019 error = ns_output(m, &cb->s_nspcb->nsp_route, 0); 1020 } 1021 if (error) { 1022 return (error); 1023 } 1024 sppstat.spps_sndtotal++; 1025 /* 1026 * Data sent (as far as we can tell). 1027 * If this advertises a larger window than any other segment, 1028 * then remember the size of the advertized window. 1029 * Any pending ACK has now been sent. 1030 */ 1031 cb->s_force = 0; 1032 cb->s_flags &= ~(SF_ACKNOW|SF_DELACK); 1033 if (SSEQ_GT(alo, cb->s_alo)) 1034 cb->s_alo = alo; 1035 if (sendalot) 1036 goto again; 1037 cb->s_outx = 5; 1038 return (0); 1039 } 1040 1041 int spp_do_persist_panics = 0; 1042 1043 spp_setpersist(cb) 1044 register struct sppcb *cb; 1045 { 1046 register t = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1; 1047 extern int spp_backoff[]; 1048 1049 if (cb->s_timer[SPPT_REXMT] && spp_do_persist_panics) 1050 panic("spp_output REXMT"); 1051 /* 1052 * Start/restart persistance timer. 1053 */ 1054 SPPT_RANGESET(cb->s_timer[SPPT_PERSIST], 1055 t*spp_backoff[cb->s_rxtshift], 1056 SPPTV_PERSMIN, SPPTV_PERSMAX); 1057 if (cb->s_rxtshift < SPP_MAXRXTSHIFT) 1058 cb->s_rxtshift++; 1059 } 1060 /*ARGSUSED*/ 1061 spp_ctloutput(req, so, level, name, value) 1062 int req; 1063 struct socket *so; 1064 int name; 1065 struct mbuf **value; 1066 { 1067 register struct mbuf *m; 1068 struct nspcb *nsp = sotonspcb(so); 1069 register struct sppcb *cb; 1070 int mask, error = 0; 1071 1072 if (level != NSPROTO_SPP) { 1073 /* This will have to be changed when we do more general 1074 stacking of protocols */ 1075 return (idp_ctloutput(req, so, level, name, value)); 1076 } 1077 if (nsp == NULL) { 1078 error = EINVAL; 1079 goto release; 1080 } else 1081 cb = nstosppcb(nsp); 1082 1083 switch (req) { 1084 1085 case PRCO_GETOPT: 1086 if (value == NULL) 1087 return (EINVAL); 1088 m = m_get(M_DONTWAIT, MT_DATA); 1089 if (m == NULL) 1090 return (ENOBUFS); 1091 switch (name) { 1092 1093 case SO_HEADERS_ON_INPUT: 1094 mask = SF_HI; 1095 goto get_flags; 1096 1097 case SO_HEADERS_ON_OUTPUT: 1098 mask = SF_HO; 1099 get_flags: 1100 m->m_len = sizeof(short); 1101 m->m_off = MMAXOFF - sizeof(short); 1102 *mtod(m, short *) = cb->s_flags & mask; 1103 break; 1104 1105 case SO_MTU: 1106 m->m_len = sizeof(u_short); 1107 m->m_off = MMAXOFF - sizeof(short); 1108 *mtod(m, short *) = cb->s_mtu; 1109 break; 1110 1111 case SO_LAST_HEADER: 1112 m->m_len = sizeof(struct sphdr); 1113 m->m_off = MMAXOFF - sizeof(struct sphdr); 1114 *mtod(m, struct sphdr *) = cb->s_rhdr; 1115 break; 1116 1117 case SO_DEFAULT_HEADERS: 1118 m->m_len = sizeof(struct spidp); 1119 m->m_off = MMAXOFF - sizeof(struct sphdr); 1120 *mtod(m, struct sphdr *) = cb->s_shdr; 1121 break; 1122 1123 default: 1124 error = EINVAL; 1125 } 1126 *value = m; 1127 break; 1128 1129 case PRCO_SETOPT: 1130 if (value == 0 || *value == 0) { 1131 error = EINVAL; 1132 break; 1133 } 1134 switch (name) { 1135 int *ok; 1136 1137 case SO_HEADERS_ON_INPUT: 1138 mask = SF_HI; 1139 goto set_head; 1140 1141 case SO_HEADERS_ON_OUTPUT: 1142 mask = SF_HO; 1143 set_head: 1144 if (cb->s_flags & SF_PI) { 1145 ok = mtod(*value, int *); 1146 if (*ok) 1147 cb->s_flags |= mask; 1148 else 1149 cb->s_flags &= ~mask; 1150 } else error = EINVAL; 1151 break; 1152 1153 case SO_MTU: 1154 cb->s_mtu = *(mtod(*value, u_short *)); 1155 break; 1156 1157 case SO_DEFAULT_HEADERS: 1158 { 1159 register struct sphdr *sp 1160 = mtod(*value, struct sphdr *); 1161 cb->s_dt = sp->sp_dt; 1162 cb->s_cc = sp->sp_cc & SP_EM; 1163 } 1164 break; 1165 1166 default: 1167 error = EINVAL; 1168 } 1169 m_freem(*value); 1170 break; 1171 } 1172 release: 1173 return (error); 1174 } 1175 1176 /*ARGSUSED*/ 1177 spp_usrreq(so, req, m, nam, rights) 1178 struct socket *so; 1179 int req; 1180 struct mbuf *m, *nam, *rights; 1181 { 1182 struct nspcb *nsp = sotonspcb(so); 1183 register struct sppcb *cb; 1184 int s = splnet(); 1185 int error = 0, ostate; 1186 struct mbuf *mm; 1187 register struct sockbuf *sb; 1188 1189 if (req == PRU_CONTROL) 1190 return (ns_control(so, (int)m, (caddr_t)nam, 1191 (struct ifnet *)rights)); 1192 if (rights && rights->m_len) { 1193 error = EINVAL; 1194 goto release; 1195 } 1196 if (nsp == NULL) { 1197 if (req != PRU_ATTACH) { 1198 error = EINVAL; 1199 goto release; 1200 } 1201 } else 1202 cb = nstosppcb(nsp); 1203 1204 ostate = cb ? cb->s_state : 0; 1205 1206 switch (req) { 1207 1208 case PRU_ATTACH: 1209 if (nsp != NULL) { 1210 error = EISCONN; 1211 break; 1212 } 1213 error = ns_pcballoc(so, &nspcb); 1214 if (error) 1215 break; 1216 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 1217 error = soreserve(so, (u_long) 3072, (u_long) 3072); 1218 if (error) 1219 break; 1220 } 1221 nsp = sotonspcb(so); 1222 1223 mm = m_getclr(M_DONTWAIT, MT_PCB); 1224 sb = &so->so_snd; 1225 1226 if (mm == NULL) { 1227 error = ENOBUFS; 1228 break; 1229 } 1230 cb = mtod(mm, struct sppcb *); 1231 mm = m_getclr(M_DONTWAIT, MT_HEADER); 1232 if (mm == NULL) { 1233 (void) m_free(dtom(m)); 1234 error = ENOBUFS; 1235 break; 1236 } 1237 cb->s_idp = mtod(mm, struct idp *); 1238 cb->s_state = TCPS_LISTEN; 1239 cb->s_smax = -1; 1240 cb->s_swl1 = -1; 1241 cb->s_q.si_next = cb->s_q.si_prev = &cb->s_q; 1242 cb->s_nspcb = nsp; 1243 cb->s_mtu = 576 - sizeof (struct spidp); 1244 cb->s_cwnd = sbspace(sb) * CUNIT / cb->s_mtu; 1245 cb->s_ssthresh = cb->s_cwnd; 1246 cb->s_cwmx = sb->sb_mbmax * CUNIT / 1247 (2 * sizeof (struct spidp)); 1248 /* Above is recomputed when connecting to account 1249 for changed buffering or mtu's */ 1250 cb->s_rtt = SPPTV_SRTTBASE; 1251 cb->s_rttvar = SPPTV_SRTTDFLT << 2; 1252 SPPT_RANGESET(cb->s_rxtcur, 1253 ((SPPTV_SRTTBASE >> 2) + (SPPTV_SRTTDFLT << 2)) >> 1, 1254 SPPTV_MIN, SPPTV_REXMTMAX); 1255 nsp->nsp_pcb = (caddr_t) cb; 1256 break; 1257 1258 case PRU_DETACH: 1259 if (nsp == NULL) { 1260 error = ENOTCONN; 1261 break; 1262 } 1263 if (cb->s_state > TCPS_LISTEN) 1264 cb = spp_disconnect(cb); 1265 else 1266 cb = spp_close(cb); 1267 break; 1268 1269 case PRU_BIND: 1270 error = ns_pcbbind(nsp, nam); 1271 break; 1272 1273 case PRU_LISTEN: 1274 if (nsp->nsp_lport == 0) 1275 error = ns_pcbbind(nsp, (struct mbuf *)0); 1276 if (error == 0) 1277 cb->s_state = TCPS_LISTEN; 1278 break; 1279 1280 /* 1281 * Initiate connection to peer. 1282 * Enter SYN_SENT state, and mark socket as connecting. 1283 * Start keep-alive timer, setup prototype header, 1284 * Send initial system packet requesting connection. 1285 */ 1286 case PRU_CONNECT: 1287 if (nsp->nsp_lport == 0) { 1288 error = ns_pcbbind(nsp, (struct mbuf *)0); 1289 if (error) 1290 break; 1291 } 1292 error = ns_pcbconnect(nsp, nam); 1293 if (error) 1294 break; 1295 soisconnecting(so); 1296 sppstat.spps_connattempt++; 1297 cb->s_state = TCPS_SYN_SENT; 1298 cb->s_did = 0; 1299 spp_template(cb); 1300 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP; 1301 cb->s_force = 1 + SPPTV_KEEP; 1302 /* 1303 * Other party is required to respond to 1304 * the port I send from, but he is not 1305 * required to answer from where I am sending to, 1306 * so allow wildcarding. 1307 * original port I am sending to is still saved in 1308 * cb->s_dport. 1309 */ 1310 nsp->nsp_fport = 0; 1311 error = spp_output(cb, (struct mbuf *) 0); 1312 break; 1313 1314 case PRU_CONNECT2: 1315 error = EOPNOTSUPP; 1316 break; 1317 1318 /* 1319 * We may decide later to implement connection closing 1320 * handshaking at the spp level optionally. 1321 * here is the hook to do it: 1322 */ 1323 case PRU_DISCONNECT: 1324 cb = spp_disconnect(cb); 1325 break; 1326 1327 /* 1328 * Accept a connection. Essentially all the work is 1329 * done at higher levels; just return the address 1330 * of the peer, storing through addr. 1331 */ 1332 case PRU_ACCEPT: { 1333 struct sockaddr_ns *sns = mtod(nam, struct sockaddr_ns *); 1334 1335 nam->m_len = sizeof (struct sockaddr_ns); 1336 sns->sns_family = AF_NS; 1337 sns->sns_addr = nsp->nsp_faddr; 1338 break; 1339 } 1340 1341 case PRU_SHUTDOWN: 1342 socantsendmore(so); 1343 cb = spp_usrclosed(cb); 1344 if (cb) 1345 error = spp_output(cb, (struct mbuf *) 0); 1346 break; 1347 1348 /* 1349 * After a receive, possibly send acknowledgment 1350 * updating allocation. 1351 */ 1352 case PRU_RCVD: 1353 cb->s_flags |= SF_RVD; 1354 (void) spp_output(cb, (struct mbuf *) 0); 1355 cb->s_flags &= ~SF_RVD; 1356 break; 1357 1358 case PRU_ABORT: 1359 (void) spp_drop(cb, ECONNABORTED); 1360 break; 1361 1362 case PRU_SENSE: 1363 case PRU_CONTROL: 1364 m = NULL; 1365 error = EOPNOTSUPP; 1366 break; 1367 1368 case PRU_RCVOOB: 1369 if ((cb->s_oobflags & SF_IOOB) || so->so_oobmark || 1370 (so->so_state & SS_RCVATMARK)) { 1371 m->m_len = 1; 1372 *mtod(m, caddr_t) = cb->s_iobc; 1373 break; 1374 } 1375 error = EINVAL; 1376 break; 1377 1378 case PRU_SENDOOB: 1379 if (sbspace(&so->so_snd) < -512) { 1380 error = ENOBUFS; 1381 break; 1382 } 1383 cb->s_oobflags |= SF_SOOB; 1384 /* fall into */ 1385 case PRU_SEND: 1386 error = spp_output(cb, m); 1387 m = NULL; 1388 break; 1389 1390 case PRU_SOCKADDR: 1391 ns_setsockaddr(nsp, nam); 1392 break; 1393 1394 case PRU_PEERADDR: 1395 ns_setpeeraddr(nsp, nam); 1396 break; 1397 1398 case PRU_SLOWTIMO: 1399 cb = spp_timers(cb, (int)nam); 1400 req |= ((int)nam) << 8; 1401 break; 1402 1403 case PRU_FASTTIMO: 1404 case PRU_PROTORCV: 1405 case PRU_PROTOSEND: 1406 error = EOPNOTSUPP; 1407 break; 1408 1409 default: 1410 panic("sp_usrreq"); 1411 } 1412 if (cb && (so->so_options & SO_DEBUG || traceallspps)) 1413 spp_trace(SA_USER, (u_char)ostate, cb, (struct spidp *)0, req); 1414 release: 1415 if (m != NULL) 1416 m_freem(m); 1417 splx(s); 1418 return (error); 1419 } 1420 1421 spp_usrreq_sp(so, req, m, nam, rights) 1422 struct socket *so; 1423 int req; 1424 struct mbuf *m, *nam, *rights; 1425 { 1426 int error = spp_usrreq(so, req, m, nam, rights); 1427 1428 if (req == PRU_ATTACH && error == 0) { 1429 struct nspcb *nsp = sotonspcb(so); 1430 ((struct sppcb *)nsp->nsp_pcb)->s_flags |= 1431 (SF_HI | SF_HO | SF_PI); 1432 } 1433 return (error); 1434 } 1435 1436 /* 1437 * Create template to be used to send spp packets on a connection. 1438 * Called after host entry created, fills 1439 * in a skeletal spp header (choosing connection id), 1440 * minimizing the amount of work necessary when the connection is used. 1441 */ 1442 spp_template(cb) 1443 register struct sppcb *cb; 1444 { 1445 register struct nspcb *nsp = cb->s_nspcb; 1446 register struct idp *idp = cb->s_idp; 1447 register struct sockbuf *sb = &(nsp->nsp_socket->so_snd); 1448 1449 idp->idp_pt = NSPROTO_SPP; 1450 idp->idp_sna = nsp->nsp_laddr; 1451 idp->idp_dna = nsp->nsp_faddr; 1452 cb->s_sid = htons(spp_iss); 1453 spp_iss += SPP_ISSINCR/2; 1454 cb->s_alo = 1; 1455 cb->s_cwnd = (sbspace(sb) * CUNIT) / cb->s_mtu; 1456 cb->s_ssthresh = cb->s_cwnd; /* Try to expand fast to full complement 1457 of large packets */ 1458 cb->s_cwmx = (sb->sb_mbmax * CUNIT) / (2 * sizeof(struct spidp)); 1459 cb->s_cwmx = MAX(cb->s_cwmx, cb->s_cwnd); 1460 /* But allow for lots of little packets as well */ 1461 } 1462 1463 /* 1464 * Close a SPIP control block: 1465 * discard spp control block itself 1466 * discard ns protocol control block 1467 * wake up any sleepers 1468 */ 1469 struct sppcb * 1470 spp_close(cb) 1471 register struct sppcb *cb; 1472 { 1473 register struct spidp_q *s; 1474 struct nspcb *nsp = cb->s_nspcb; 1475 struct socket *so = nsp->nsp_socket; 1476 register struct mbuf *m; 1477 1478 s = cb->s_q.si_next; 1479 while (s != &(cb->s_q)) { 1480 s = s->si_next; 1481 m = dtom(s->si_prev); 1482 remque(s->si_prev); 1483 m_freem(m); 1484 } 1485 (void) m_free(dtom(cb->s_idp)); 1486 (void) m_free(dtom(cb)); 1487 nsp->nsp_pcb = 0; 1488 soisdisconnected(so); 1489 ns_pcbdetach(nsp); 1490 sppstat.spps_closed++; 1491 return ((struct sppcb *)0); 1492 } 1493 /* 1494 * Someday we may do level 3 handshaking 1495 * to close a connection or send a xerox style error. 1496 * For now, just close. 1497 */ 1498 struct sppcb * 1499 spp_usrclosed(cb) 1500 register struct sppcb *cb; 1501 { 1502 return (spp_close(cb)); 1503 } 1504 struct sppcb * 1505 spp_disconnect(cb) 1506 register struct sppcb *cb; 1507 { 1508 return (spp_close(cb)); 1509 } 1510 /* 1511 * Drop connection, reporting 1512 * the specified error. 1513 */ 1514 struct sppcb * 1515 spp_drop(cb, errno) 1516 register struct sppcb *cb; 1517 int errno; 1518 { 1519 struct socket *so = cb->s_nspcb->nsp_socket; 1520 1521 /* 1522 * someday, in the xerox world 1523 * we will generate error protocol packets 1524 * announcing that the socket has gone away. 1525 */ 1526 if (TCPS_HAVERCVDSYN(cb->s_state)) { 1527 sppstat.spps_drops++; 1528 cb->s_state = TCPS_CLOSED; 1529 /*(void) tcp_output(cb);*/ 1530 } else 1531 sppstat.spps_conndrops++; 1532 so->so_error = errno; 1533 return (spp_close(cb)); 1534 } 1535 1536 spp_abort(nsp) 1537 struct nspcb *nsp; 1538 { 1539 1540 (void) spp_close((struct sppcb *)nsp->nsp_pcb); 1541 } 1542 1543 int spp_backoff[SPP_MAXRXTSHIFT+1] = 1544 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; 1545 /* 1546 * Fast timeout routine for processing delayed acks 1547 */ 1548 spp_fasttimo() 1549 { 1550 register struct nspcb *nsp; 1551 register struct sppcb *cb; 1552 int s = splnet(); 1553 1554 nsp = nspcb.nsp_next; 1555 if (nsp) 1556 for (; nsp != &nspcb; nsp = nsp->nsp_next) 1557 if ((cb = (struct sppcb *)nsp->nsp_pcb) && 1558 (cb->s_flags & SF_DELACK)) { 1559 cb->s_flags &= ~SF_DELACK; 1560 cb->s_flags |= SF_ACKNOW; 1561 sppstat.spps_delack++; 1562 (void) spp_output(cb, (struct mbuf *) 0); 1563 } 1564 splx(s); 1565 } 1566 1567 /* 1568 * spp protocol timeout routine called every 500 ms. 1569 * Updates the timers in all active pcb's and 1570 * causes finite state machine actions if timers expire. 1571 */ 1572 spp_slowtimo() 1573 { 1574 register struct nspcb *ip, *ipnxt; 1575 register struct sppcb *cb; 1576 int s = splnet(); 1577 register int i; 1578 1579 /* 1580 * Search through tcb's and update active timers. 1581 */ 1582 ip = nspcb.nsp_next; 1583 if (ip == 0) { 1584 splx(s); 1585 return; 1586 } 1587 while (ip != &nspcb) { 1588 cb = nstosppcb(ip); 1589 ipnxt = ip->nsp_next; 1590 if (cb == 0) 1591 goto tpgone; 1592 for (i = 0; i < SPPT_NTIMERS; i++) { 1593 if (cb->s_timer[i] && --cb->s_timer[i] == 0) { 1594 (void) spp_usrreq(cb->s_nspcb->nsp_socket, 1595 PRU_SLOWTIMO, (struct mbuf *)0, 1596 (struct mbuf *)i, (struct mbuf *)0); 1597 if (ipnxt->nsp_prev != ip) 1598 goto tpgone; 1599 } 1600 } 1601 cb->s_idle++; 1602 if (cb->s_rtt) 1603 cb->s_rtt++; 1604 tpgone: 1605 ip = ipnxt; 1606 } 1607 spp_iss += SPP_ISSINCR/PR_SLOWHZ; /* increment iss */ 1608 splx(s); 1609 } 1610 /* 1611 * SPP timer processing. 1612 */ 1613 struct sppcb * 1614 spp_timers(cb, timer) 1615 register struct sppcb *cb; 1616 int timer; 1617 { 1618 long rexmt; 1619 int win; 1620 1621 cb->s_force = 1 + timer; 1622 switch (timer) { 1623 1624 /* 1625 * 2 MSL timeout in shutdown went off. TCP deletes connection 1626 * control block. 1627 */ 1628 case SPPT_2MSL: 1629 printf("spp: SPPT_2MSL went off for no reason\n"); 1630 cb->s_timer[timer] = 0; 1631 break; 1632 1633 /* 1634 * Retransmission timer went off. Message has not 1635 * been acked within retransmit interval. Back off 1636 * to a longer retransmit interval and retransmit one packet. 1637 */ 1638 case SPPT_REXMT: 1639 if (++cb->s_rxtshift > SPP_MAXRXTSHIFT) { 1640 cb->s_rxtshift = SPP_MAXRXTSHIFT; 1641 sppstat.spps_timeoutdrop++; 1642 cb = spp_drop(cb, ETIMEDOUT); 1643 break; 1644 } 1645 sppstat.spps_rexmttimeo++; 1646 rexmt = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1; 1647 rexmt *= spp_backoff[cb->s_rxtshift]; 1648 SPPT_RANGESET(cb->s_rxtcur, rexmt, SPPTV_MIN, SPPTV_REXMTMAX); 1649 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur; 1650 /* 1651 * If we have backed off fairly far, our srtt 1652 * estimate is probably bogus. Clobber it 1653 * so we'll take the next rtt measurement as our srtt; 1654 * move the current srtt into rttvar to keep the current 1655 * retransmit times until then. 1656 */ 1657 if (cb->s_rxtshift > SPP_MAXRXTSHIFT / 4 ) { 1658 cb->s_rttvar += (cb->s_srtt >> 2); 1659 cb->s_srtt = 0; 1660 } 1661 cb->s_snxt = cb->s_rack; 1662 /* 1663 * If timing a packet, stop the timer. 1664 */ 1665 cb->s_rtt = 0; 1666 /* 1667 * See very long discussion in tcp_timer.c about congestion 1668 * window and sstrhesh 1669 */ 1670 win = MIN(cb->s_swnd, (cb->s_cwnd/CUNIT)) / 2; 1671 if (win < 2) 1672 win = 2; 1673 cb->s_cwnd = CUNIT; 1674 cb->s_ssthresh = win * CUNIT; 1675 (void) spp_output(cb, (struct mbuf *) 0); 1676 break; 1677 1678 /* 1679 * Persistance timer into zero window. 1680 * Force a probe to be sent. 1681 */ 1682 case SPPT_PERSIST: 1683 sppstat.spps_persisttimeo++; 1684 spp_setpersist(cb); 1685 (void) spp_output(cb, (struct mbuf *) 0); 1686 break; 1687 1688 /* 1689 * Keep-alive timer went off; send something 1690 * or drop connection if idle for too long. 1691 */ 1692 case SPPT_KEEP: 1693 sppstat.spps_keeptimeo++; 1694 if (cb->s_state < TCPS_ESTABLISHED) 1695 goto dropit; 1696 if (cb->s_nspcb->nsp_socket->so_options & SO_KEEPALIVE) { 1697 if (cb->s_idle >= SPPTV_MAXIDLE) 1698 goto dropit; 1699 sppstat.spps_keepprobe++; 1700 (void) spp_output(cb, (struct mbuf *) 0); 1701 } else 1702 cb->s_idle = 0; 1703 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP; 1704 break; 1705 dropit: 1706 sppstat.spps_keepdrops++; 1707 cb = spp_drop(cb, ETIMEDOUT); 1708 break; 1709 } 1710 return (cb); 1711 } 1712 #ifndef lint 1713 int SppcbSize = sizeof (struct sppcb); 1714 int NspcbSize = sizeof (struct nspcb); 1715 #endif lint 1716