xref: /csrg-svn/sys/netns/spp_usrreq.c (revision 34856)
1 /*
2  * Copyright (c) 1984, 1985, 1986, 1987 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)spp_usrreq.c	7.8 (Berkeley) 06/29/88
18  */
19 
20 #include "param.h"
21 #include "systm.h"
22 #include "dir.h"
23 #include "user.h"
24 #include "mbuf.h"
25 #include "protosw.h"
26 #include "socket.h"
27 #include "socketvar.h"
28 #include "errno.h"
29 
30 #include "../net/if.h"
31 #include "../net/route.h"
32 #include "../netinet/tcp_fsm.h"
33 
34 #include "ns.h"
35 #include "ns_pcb.h"
36 #include "idp.h"
37 #include "idp_var.h"
38 #include "ns_error.h"
39 #include "sp.h"
40 #include "spidp.h"
41 #include "spp_timer.h"
42 #include "spp_var.h"
43 #include "spp_debug.h"
44 
45 /*
46  * SP protocol implementation.
47  */
48 spp_init()
49 {
50 
51 	spp_iss = 1; /* WRONG !! should fish it out of TODR */
52 }
53 struct spidp spp_savesi;
54 int traceallspps = 0;
55 extern int sppconsdebug;
56 int spp_hardnosed;
57 int spp_use_delack = 0;
58 
59 /*ARGSUSED*/
60 spp_input(m, nsp, ifp)
61 	register struct mbuf *m;
62 	register struct nspcb *nsp;
63 	struct ifnet *ifp;
64 {
65 	register struct sppcb *cb;
66 	register struct spidp *si = mtod(m, struct spidp *);
67 	register struct socket *so;
68 	short ostate;
69 	int dropsocket = 0;
70 
71 
72 	sppstat.spps_rcvtotal++;
73 	if (nsp == 0) {
74 		panic("No nspcb in spp_input\n");
75 		return;
76 	}
77 
78 	cb = nstosppcb(nsp);
79 	if (cb == 0) goto bad;
80 
81 	if (m->m_len < sizeof(*si)) {
82 		if ((m = m_pullup(m, sizeof(*si))) == 0) {
83 			sppstat.spps_rcvshort++;
84 			return;
85 		}
86 		si = mtod(m, struct spidp *);
87 	}
88 	si->si_seq = ntohs(si->si_seq);
89 	si->si_ack = ntohs(si->si_ack);
90 	si->si_alo = ntohs(si->si_alo);
91 
92 	so = nsp->nsp_socket;
93 	if (so->so_options & SO_DEBUG || traceallspps) {
94 		ostate = cb->s_state;
95 		spp_savesi = *si;
96 	}
97 	if (so->so_options & SO_ACCEPTCONN) {
98 		struct sppcb *ocb = cb;
99 
100 		so = sonewconn(so);
101 		if (so == 0) {
102 			goto drop;
103 		}
104 		/*
105 		 * This is ugly, but ....
106 		 *
107 		 * Mark socket as temporary until we're
108 		 * committed to keeping it.  The code at
109 		 * ``drop'' and ``dropwithreset'' check the
110 		 * flag dropsocket to see if the temporary
111 		 * socket created here should be discarded.
112 		 * We mark the socket as discardable until
113 		 * we're committed to it below in TCPS_LISTEN.
114 		 */
115 		dropsocket++;
116 		nsp = (struct nspcb *)so->so_pcb;
117 		nsp->nsp_laddr = si->si_dna;
118 		cb = nstosppcb(nsp);
119 		cb->s_mtu = ocb->s_mtu;		/* preserve sockopts */
120 		cb->s_flags = ocb->s_flags;	/* preserve sockopts */
121 		cb->s_state = TCPS_LISTEN;
122 	}
123 
124 	/*
125 	 * Packet received on connection.
126 	 * reset idle time and keep-alive timer;
127 	 */
128 	cb->s_idle = 0;
129 	cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
130 
131 	switch (cb->s_state) {
132 
133 	case TCPS_LISTEN:{
134 		struct mbuf *am;
135 		register struct sockaddr_ns *sns;
136 		struct ns_addr laddr;
137 
138 		/*
139 		 * If somebody here was carying on a conversation
140 		 * and went away, and his pen pal thinks he can
141 		 * still talk, we get the misdirected packet.
142 		 */
143 		if (spp_hardnosed && (si->si_did != 0 || si->si_seq != 0)) {
144 			spp_istat.gonawy++;
145 			goto dropwithreset;
146 		}
147 		am = m_get(M_DONTWAIT, MT_SONAME);
148 		if (am == NULL)
149 			goto drop;
150 		am->m_len = sizeof (struct sockaddr_ns);
151 		sns = mtod(am, struct sockaddr_ns *);
152 		sns->sns_family = AF_NS;
153 		sns->sns_addr = si->si_sna;
154 		laddr = nsp->nsp_laddr;
155 		if (ns_nullhost(laddr))
156 			nsp->nsp_laddr = si->si_dna;
157 		if (ns_pcbconnect(nsp, am)) {
158 			nsp->nsp_laddr = laddr;
159 			(void) m_free(am);
160 			spp_istat.noconn++;
161 			goto drop;
162 		}
163 		(void) m_free(am);
164 		spp_template(cb);
165 		dropsocket = 0;		/* committed to socket */
166 		cb->s_did = si->si_sid;
167 		cb->s_rack = si->si_ack;
168 		cb->s_ralo = si->si_alo;
169 #define THREEWAYSHAKE
170 #ifdef THREEWAYSHAKE
171 		cb->s_state = TCPS_SYN_RECEIVED;
172 		cb->s_force = 1 + SPPT_KEEP;
173 		sppstat.spps_accepts++;
174 		cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
175 		}
176 		break;
177 	/*
178 	 * This state means that we have heard a response
179 	 * to our acceptance of their connection
180 	 * It is probably logically unnecessary in this
181 	 * implementation.
182 	 */
183 	 case TCPS_SYN_RECEIVED: {
184 		if (si->si_did!=cb->s_sid) {
185 			spp_istat.wrncon++;
186 			goto drop;
187 		}
188 #endif
189 		nsp->nsp_fport =  si->si_sport;
190 		cb->s_timer[SPPT_REXMT] = 0;
191 		cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
192 		soisconnected(so);
193 		cb->s_state = TCPS_ESTABLISHED;
194 		sppstat.spps_accepts++;
195 		}
196 		break;
197 
198 	/*
199 	 * This state means that we have gotten a response
200 	 * to our attempt to establish a connection.
201 	 * We fill in the data from the other side,
202 	 * telling us which port to respond to, instead of the well-
203 	 * known one we might have sent to in the first place.
204 	 * We also require that this is a response to our
205 	 * connection id.
206 	 */
207 	case TCPS_SYN_SENT:
208 		if (si->si_did!=cb->s_sid) {
209 			spp_istat.notme++;
210 			goto drop;
211 		}
212 		sppstat.spps_connects++;
213 		cb->s_did = si->si_sid;
214 		cb->s_rack = si->si_ack;
215 		cb->s_ralo = si->si_alo;
216 		cb->s_dport = nsp->nsp_fport =  si->si_sport;
217 		cb->s_timer[SPPT_REXMT] = 0;
218 		cb->s_flags |= SF_ACKNOW;
219 		soisconnected(so);
220 		cb->s_state = TCPS_ESTABLISHED;
221 		/* Use roundtrip time of connection request for initial rtt */
222 		if (cb->s_rtt) {
223 			cb->s_srtt = cb->s_rtt << 3;
224 			cb->s_rttvar = cb->s_rtt << 1;
225 			SPPT_RANGESET(cb->s_rxtcur,
226 			    ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1,
227 			    SPPTV_MIN, SPPTV_REXMTMAX);
228 			    cb->s_rtt = 0;
229 		}
230 	}
231 	if (so->so_options & SO_DEBUG || traceallspps)
232 		spp_trace(SA_INPUT, (u_char)ostate, cb, &spp_savesi, 0);
233 
234 	m->m_len -= sizeof (struct idp);
235 	m->m_off += sizeof (struct idp);
236 
237 	if (spp_reass(cb, si)) {
238 		(void) m_freem(m);
239 	}
240 	if (cb->s_force || (cb->s_flags & (SF_ACKNOW|SF_WIN|SF_RXT)))
241 		(void) spp_output(cb, (struct mbuf *)0);
242 	cb->s_flags &= ~(SF_WIN|SF_RXT);
243 	return;
244 
245 dropwithreset:
246 	if (dropsocket)
247 		(void) soabort(so);
248 	si->si_seq = ntohs(si->si_seq);
249 	si->si_ack = ntohs(si->si_ack);
250 	si->si_alo = ntohs(si->si_alo);
251 	ns_error(dtom(si), NS_ERR_NOSOCK, 0);
252 	if (cb->s_nspcb->nsp_socket->so_options & SO_DEBUG || traceallspps)
253 		spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0);
254 	return;
255 
256 drop:
257 bad:
258 	if (cb == 0 || cb->s_nspcb->nsp_socket->so_options & SO_DEBUG ||
259             traceallspps)
260 		spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0);
261 	m_freem(m);
262 }
263 
264 int spprexmtthresh = 3;
265 
266 /*
267  * This is structurally similar to the tcp reassembly routine
268  * but its function is somewhat different:  It merely queues
269  * packets up, and suppresses duplicates.
270  */
271 spp_reass(cb, si)
272 register struct sppcb *cb;
273 register struct spidp *si;
274 {
275 	register struct spidp_q *q;
276 	register struct mbuf *m;
277 	register struct socket *so = cb->s_nspcb->nsp_socket;
278 	char packetp = cb->s_flags & SF_HI;
279 	int incr;
280 	char wakeup = 0;
281 
282 	if (si == SI(0))
283 		goto present;
284 	/*
285 	 * Update our news from them.
286 	 */
287 	if (si->si_cc & SP_SA)
288 		cb->s_flags |= (spp_use_delack ? SF_DELACK : SF_ACKNOW);
289 	if (SSEQ_GT(si->si_alo, cb->s_ralo))
290 		cb->s_flags |= SF_WIN;
291 	if (SSEQ_LEQ(si->si_ack, cb->s_rack)) {
292 		if ((si->si_cc & SP_SP) && cb->s_rack != (cb->s_smax + 1)) {
293 			sppstat.spps_rcvdupack++;
294 			/*
295 			 * If this is a completely duplicate ack
296 			 * and other conditions hold, we assume
297 			 * a packet has been dropped and retransmit
298 			 * it exactly as in tcp_input().
299 			 */
300 			if (si->si_ack != cb->s_rack ||
301 			    si->si_alo != cb->s_ralo)
302 				cb->s_dupacks = 0;
303 			else if (++cb->s_dupacks == spprexmtthresh) {
304 				u_short onxt = cb->s_snxt;
305 				int cwnd = cb->s_cwnd;
306 
307 				cb->s_snxt = si->si_ack;
308 				cb->s_cwnd = CUNIT;
309 				cb->s_force = 1 + SPPT_REXMT;
310 				(void) spp_output(cb, (struct mbuf *)0);
311 				cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
312 				cb->s_rtt = 0;
313 				if (cwnd >= 4 * CUNIT)
314 					cb->s_cwnd = cwnd / 2;
315 				if (SSEQ_GT(onxt, cb->s_snxt))
316 					cb->s_snxt = onxt;
317 				return (1);
318 			}
319 		} else
320 			cb->s_dupacks = 0;
321 		goto update_window;
322 	}
323 	cb->s_dupacks = 0;
324 	/*
325 	 * If our correspondent acknowledges data we haven't sent
326 	 * TCP would drop the packet after acking.  We'll be a little
327 	 * more permissive
328 	 */
329 	if (SSEQ_GT(si->si_ack, (cb->s_smax + 1))) {
330 		sppstat.spps_rcvacktoomuch++;
331 		si->si_ack = cb->s_smax + 1;
332 	}
333 	sppstat.spps_rcvackpack++;
334 	/*
335 	 * If transmit timer is running and timed sequence
336 	 * number was acked, update smoothed round trip time.
337 	 * See discussion of algorithm in tcp_input.c
338 	 */
339 	if (cb->s_rtt && SSEQ_GT(si->si_ack, cb->s_rtseq)) {
340 		sppstat.spps_rttupdated++;
341 		if (cb->s_srtt != 0) {
342 			register short delta;
343 			delta = cb->s_rtt - (cb->s_srtt >> 3);
344 			if ((cb->s_srtt += delta) <= 0)
345 				cb->s_srtt = 1;
346 			if (delta < 0)
347 				delta = -delta;
348 			delta -= (cb->s_rttvar >> 2);
349 			if ((cb->s_rttvar += delta) <= 0)
350 				cb->s_rttvar = 1;
351 		} else {
352 			/*
353 			 * No rtt measurement yet
354 			 */
355 			cb->s_srtt = cb->s_rtt << 3;
356 			cb->s_rttvar = cb->s_rtt << 1;
357 		}
358 		cb->s_rtt = 0;
359 		cb->s_rxtshift = 0;
360 		SPPT_RANGESET(cb->s_rxtcur,
361 			((cb->s_srtt >> 2) + cb->s_rttvar) >> 1,
362 			SPPTV_MIN, SPPTV_REXMTMAX);
363 	}
364 	/*
365 	 * If all outstanding data is acked, stop retransmit
366 	 * timer and remember to restart (more output or persist).
367 	 * If there is more data to be acked, restart retransmit
368 	 * timer, using current (possibly backed-off) value;
369 	 */
370 	if (si->si_ack == cb->s_smax + 1) {
371 		cb->s_timer[SPPT_REXMT] = 0;
372 		cb->s_flags |= SF_RXT;
373 	} else if (cb->s_timer[SPPT_PERSIST] == 0)
374 		cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
375 	/*
376 	 * When new data is acked, open the congestion window.
377 	 * If the window gives us less than ssthresh packets
378 	 * in flight, open exponentially (maxseg at a time).
379 	 * Otherwise open linearly (maxseg^2 / cwnd at a time).
380 	 */
381 	incr = CUNIT;
382 	if (cb->s_cwnd > cb->s_ssthresh)
383 		incr = MAX(incr * incr / cb->s_cwnd, 1);
384 	cb->s_cwnd = MIN(cb->s_cwnd + incr, cb->s_cwmx);
385 	/*
386 	 * Trim Acked data from output queue.
387 	 */
388 	while ((m = so->so_snd.sb_mb) != NULL) {
389 		if (SSEQ_LT((mtod(m, struct spidp *))->si_seq, si->si_ack))
390 			sbdroprecord(&so->so_snd);
391 		else
392 			break;
393 	}
394 	if ((so->so_snd.sb_flags & SB_WAIT) || so->so_snd.sb_sel)
395 		 sowwakeup(so);
396 	cb->s_rack = si->si_ack;
397 update_window:
398 	if (SSEQ_LT(cb->s_snxt, cb->s_rack))
399 		cb->s_snxt = cb->s_rack;
400 	if (SSEQ_LT(cb->s_swl1, si->si_seq) || cb->s_swl1 == si->si_seq &&
401 	    (SSEQ_LT(cb->s_swl2, si->si_ack) ||
402 	     cb->s_swl2 == si->si_ack && SSEQ_LT(cb->s_ralo, si->si_alo))) {
403 		/* keep track of pure window updates */
404 		if ((si->si_cc & SP_SP) && cb->s_swl2 == si->si_ack
405 		    && SSEQ_LT(cb->s_ralo, si->si_alo)) {
406 			sppstat.spps_rcvwinupd++;
407 			sppstat.spps_rcvdupack--;
408 		}
409 		cb->s_ralo = si->si_alo;
410 		cb->s_swl1 = si->si_seq;
411 		cb->s_swl2 = si->si_ack;
412 		cb->s_swnd = (1 + si->si_alo - si->si_ack);
413 		if (cb->s_swnd > cb->s_smxw)
414 			cb->s_smxw = cb->s_swnd;
415 		cb->s_flags |= SF_WIN;
416 	}
417 	/*
418 	 * If this packet number is higher than that which
419 	 * we have allocated refuse it, unless urgent
420 	 */
421 	if (SSEQ_GT(si->si_seq, cb->s_alo)) {
422 		if (si->si_cc & SP_SP) {
423 			sppstat.spps_rcvwinprobe++;
424 			return (1);
425 		} else
426 			sppstat.spps_rcvpackafterwin++;
427 		if (si->si_cc & SP_OB) {
428 			if (SSEQ_GT(si->si_seq, cb->s_alo + 60)) {
429 				ns_error(dtom(si), NS_ERR_FULLUP, 0);
430 				return (0);
431 			} /* else queue this packet; */
432 		} else {
433 			/*register struct socket *so = cb->s_nspcb->nsp_socket;
434 			if (so->so_state && SS_NOFDREF) {
435 				ns_error(dtom(si), NS_ERR_NOSOCK, 0);
436 				(void)spp_close(cb);
437 			} else
438 				       would crash system*/
439 			spp_istat.notyet++;
440 			ns_error(dtom(si), NS_ERR_FULLUP, 0);
441 			return (0);
442 		}
443 	}
444 	/*
445 	 * If this is a system packet, we don't need to
446 	 * queue it up, and won't update acknowledge #
447 	 */
448 	if (si->si_cc & SP_SP) {
449 		return (1);
450 	}
451 	/*
452 	 * We have already seen this packet, so drop.
453 	 */
454 	if (SSEQ_LT(si->si_seq, cb->s_ack)) {
455 		spp_istat.bdreas++;
456 		sppstat.spps_rcvduppack++;
457 		if (si->si_seq == cb->s_ack - 1)
458 			spp_istat.lstdup++;
459 		return (1);
460 	}
461 	/*
462 	 * Loop through all packets queued up to insert in
463 	 * appropriate sequence.
464 	 */
465 	for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) {
466 		if (si->si_seq == SI(q)->si_seq) {
467 			sppstat.spps_rcvduppack++;
468 			return (1);
469 		}
470 		if (SSEQ_LT(si->si_seq, SI(q)->si_seq)) {
471 			sppstat.spps_rcvoopack++;
472 			break;
473 		}
474 	}
475 	insque(si, q->si_prev);
476 	/*
477 	 * If this packet is urgent, inform process
478 	 */
479 	if (si->si_cc & SP_OB) {
480 		cb->s_iobc = ((char *)si)[1 + sizeof(*si)];
481 		sohasoutofband(so);
482 		cb->s_oobflags |= SF_IOOB;
483 	}
484 present:
485 #define SPINC sizeof(struct sphdr)
486 	/*
487 	 * Loop through all packets queued up to update acknowledge
488 	 * number, and present all acknowledged data to user;
489 	 * If in packet interface mode, show packet headers.
490 	 */
491 	for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) {
492 		  if (SI(q)->si_seq == cb->s_ack) {
493 			cb->s_ack++;
494 			m = dtom(q);
495 			if (SI(q)->si_cc & SP_OB) {
496 				cb->s_oobflags &= ~SF_IOOB;
497 				if (so->so_rcv.sb_cc)
498 					so->so_oobmark = so->so_rcv.sb_cc;
499 				else
500 					so->so_state |= SS_RCVATMARK;
501 			}
502 			q = q->si_prev;
503 			remque(q->si_next);
504 			wakeup = 1;
505 			sppstat.spps_rcvpack++;
506 			if (packetp) {
507 				sbappendrecord(&so->so_rcv, m);
508 			} else {
509 				cb->s_rhdr = *mtod(m, struct sphdr *);
510 				m->m_off += SPINC;
511 				m->m_len -= SPINC;
512 				sbappend(&so->so_rcv, m);
513 			}
514 		  } else
515 			break;
516 	}
517 	if (wakeup) sorwakeup(so);
518 	return (0);
519 }
520 
521 spp_ctlinput(cmd, arg)
522 	int cmd;
523 	caddr_t arg;
524 {
525 	struct ns_addr *na;
526 	extern u_char nsctlerrmap[];
527 	extern spp_abort(), spp_quench();
528 	extern struct nspcb *idp_drop();
529 	struct ns_errp *errp;
530 	struct nspcb *nsp;
531 	struct sockaddr_ns *sns;
532 	int type;
533 
534 	if (cmd < 0 || cmd > PRC_NCMDS)
535 		return;
536 	type = NS_ERR_UNREACH_HOST;
537 
538 	switch (cmd) {
539 
540 	case PRC_ROUTEDEAD:
541 		return;
542 
543 	case PRC_IFDOWN:
544 	case PRC_HOSTDEAD:
545 	case PRC_HOSTUNREACH:
546 		sns = (struct sockaddr_ns *)arg;
547 		if (sns->sns_family != AF_NS)
548 			return;
549 		na = &sns->sns_addr;
550 		break;
551 
552 	default:
553 		errp = (struct ns_errp *)arg;
554 		na = &errp->ns_err_idp.idp_dna;
555 		type = errp->ns_err_num;
556 		type = ntohs((u_short)type);
557 	}
558 	switch (type) {
559 
560 	case NS_ERR_UNREACH_HOST:
561 		ns_pcbnotify(na, (int)nsctlerrmap[cmd], spp_abort, (long) 0);
562 		break;
563 
564 	case NS_ERR_TOO_BIG:
565 	case NS_ERR_NOSOCK:
566 		nsp = ns_pcblookup(na, errp->ns_err_idp.idp_sna.x_port,
567 			NS_WILDCARD);
568 		if (nsp) {
569 			if(nsp->nsp_pcb)
570 				(void) spp_drop((struct sppcb *)nsp->nsp_pcb,
571 						(int)nsctlerrmap[cmd]);
572 			else
573 				(void) idp_drop(nsp, (int)nsctlerrmap[cmd]);
574 		}
575 		break;
576 
577 	case NS_ERR_FULLUP:
578 		ns_pcbnotify(na, 0, spp_quench, (long) 0);
579 	}
580 }
581 /*
582  * When a source quench is received, close congestion window
583  * to one packet.  We will gradually open it again as we proceed.
584  */
585 spp_quench(nsp)
586 	struct nspcb *nsp;
587 {
588 	struct sppcb *cb = nstosppcb(nsp);
589 
590 	if (cb)
591 		cb->s_cwnd = CUNIT;
592 }
593 
594 #ifdef notdef
595 int
596 spp_fixmtu(nsp)
597 register struct nspcb *nsp;
598 {
599 	register struct sppcb *cb = (struct sppcb *)(nsp->nsp_pcb);
600 	register struct mbuf *m;
601 	register struct spidp *si;
602 	struct ns_errp *ep;
603 	struct sockbuf *sb;
604 	int badseq, len;
605 	struct mbuf *firstbad, *m0;
606 
607 	if (cb) {
608 		/*
609 		 * The notification that we have sent
610 		 * too much is bad news -- we will
611 		 * have to go through queued up so far
612 		 * splitting ones which are too big and
613 		 * reassigning sequence numbers and checksums.
614 		 * we should then retransmit all packets from
615 		 * one above the offending packet to the last one
616 		 * we had sent (or our allocation)
617 		 * then the offending one so that the any queued
618 		 * data at our destination will be discarded.
619 		 */
620 		 ep = (struct ns_errp *)nsp->nsp_notify_param;
621 		 sb = &nsp->nsp_socket->so_snd;
622 		 cb->s_mtu = ep->ns_err_param;
623 		 badseq = SI(&ep->ns_err_idp)->si_seq;
624 		 for (m = sb->sb_mb; m; m = m->m_act) {
625 			si = mtod(m, struct spidp *);
626 			if (si->si_seq == badseq)
627 				break;
628 		 }
629 		 if (m == 0) return;
630 		 firstbad = m;
631 		 /*for (;;) {*/
632 			/* calculate length */
633 			for (m0 = m, len = 0; m ; m = m->m_next)
634 				len += m->m_len;
635 			if (len > cb->s_mtu) {
636 			}
637 		/* FINISH THIS
638 		} */
639 	}
640 }
641 #endif
642 
643 spp_output(cb, m0)
644 	register struct sppcb *cb;
645 	struct mbuf *m0;
646 {
647 	struct socket *so = cb->s_nspcb->nsp_socket;
648 	register struct mbuf *m;
649 	register struct spidp *si = (struct spidp *) 0;
650 	register struct sockbuf *sb = &so->so_snd;
651 	int len = 0, win, rcv_win;
652 	short span, off;
653 	u_short alo;
654 	int error = 0, sendalot;
655 #ifdef notdef
656 	int idle;
657 #endif
658 	struct mbuf *mprev;
659 	extern int idpcksum;
660 
661 	if (m0) {
662 		int mtu = cb->s_mtu;
663 		int datalen;
664 		/*
665 		 * Make sure that packet isn't too big.
666 		 */
667 		for (m = m0; m ; m = m->m_next) {
668 			mprev = m;
669 			len += m->m_len;
670 		}
671 		datalen = (cb->s_flags & SF_HO) ?
672 				len - sizeof (struct sphdr) : len;
673 		if (datalen > mtu) {
674 			if (cb->s_flags & SF_PI) {
675 				m_freem(m0);
676 				return (EMSGSIZE);
677 			} else {
678 				int oldEM = cb->s_cc & SP_EM;
679 
680 				cb->s_cc &= ~SP_EM;
681 				while (len > mtu) {
682 					m = m_copy(m0, 0, mtu);
683 					if (m == NULL) {
684 						error = ENOBUFS;
685 						goto bad_copy;
686 					}
687 					error = spp_output(cb, m);
688 					if (error) {
689 					bad_copy:
690 						cb->s_cc |= oldEM;
691 						m_freem(m0);
692 						return(error);
693 					}
694 					m_adj(m0, mtu);
695 					len -= mtu;
696 				}
697 				cb->s_cc |= oldEM;
698 			}
699 		}
700 		/*
701 		 * Force length even, by adding a "garbage byte" if
702 		 * necessary.
703 		 */
704 		if (len & 1) {
705 			m = mprev;
706 			if (m->m_len + m->m_off < MMAXOFF)
707 				m->m_len++;
708 			else {
709 				struct mbuf *m1 = m_get(M_DONTWAIT, MT_DATA);
710 
711 				if (m1 == 0) {
712 					m_freem(m0);
713 					return (ENOBUFS);
714 				}
715 				m1->m_len = 1;
716 				m1->m_off = MMAXOFF - 1;
717 				m->m_next = m1;
718 			}
719 		}
720 		m = m_get(M_DONTWAIT, MT_HEADER);
721 		if (m == 0) {
722 			m_freem(m0);
723 			return (ENOBUFS);
724 		}
725 		/*
726 		 * Fill in mbuf with extended SP header
727 		 * and addresses and length put into network format.
728 		 * Long align so prepended ip headers will work on Gould.
729 		 */
730 		m->m_off = MMAXOFF - sizeof (struct spidp) - 2;
731 		m->m_len = sizeof (struct spidp);
732 		m->m_next = m0;
733 		si = mtod(m, struct spidp *);
734 		si->si_i = *cb->s_idp;
735 		si->si_s = cb->s_shdr;
736 		if ((cb->s_flags & SF_PI) && (cb->s_flags & SF_HO)) {
737 			register struct sphdr *sh;
738 			if (m0->m_len < sizeof (*sh)) {
739 				if((m0 = m_pullup(m0, sizeof(*sh))) == NULL) {
740 					(void) m_free(m);
741 					m_freem(m0);
742 					return (EINVAL);
743 				}
744 				m->m_next = m0;
745 			}
746 			sh = mtod(m0, struct sphdr *);
747 			si->si_dt = sh->sp_dt;
748 			si->si_cc |= sh->sp_cc & SP_EM;
749 			m0->m_len -= sizeof (*sh);
750 			m0->m_off += sizeof (*sh);
751 			len -= sizeof (*sh);
752 		}
753 		len += sizeof(*si);
754 		if (cb->s_oobflags & SF_SOOB) {
755 			/*
756 			 * Per jqj@cornell:
757 			 * make sure OB packets convey exactly 1 byte.
758 			 * If the packet is 1 byte or larger, we
759 			 * have already guaranted there to be at least
760 			 * one garbage byte for the checksum, and
761 			 * extra bytes shouldn't hurt!
762 			 */
763 			if (len > sizeof(*si)) {
764 				si->si_cc |= SP_OB;
765 				len = (1 + sizeof(*si));
766 			}
767 		}
768 		si->si_len = htons((u_short)len);
769 		/*
770 		 * queue stuff up for output
771 		 */
772 		sbappendrecord(sb, m);
773 		cb->s_seq++;
774 	}
775 #ifdef notdef
776 	idle = (cb->s_smax == (cb->s_rack - 1));
777 #endif
778 again:
779 	sendalot = 0;
780 	off = cb->s_snxt - cb->s_rack;
781 	win = MIN(cb->s_swnd, (cb->s_cwnd/CUNIT));
782 
783 	/*
784 	 * If in persist timeout with window of 0, send a probe.
785 	 * Otherwise, if window is small but nonzero
786 	 * and timer expired, send what we can and go into
787 	 * transmit state.
788 	 */
789 	if (cb->s_force == 1 + SPPT_PERSIST) {
790 		if (win != 0) {
791 			cb->s_timer[SPPT_PERSIST] = 0;
792 			cb->s_rxtshift = 0;
793 		}
794 	}
795 	span = cb->s_seq - cb->s_rack;
796 	len = MIN(span, win) - off;
797 
798 	if (len < 0) {
799 		/*
800 		 * Window shrank after we went into it.
801 		 * If window shrank to 0, cancel pending
802 		 * restransmission and pull s_snxt back
803 		 * to (closed) window.  We will enter persist
804 		 * state below.  If the widndow didn't close completely,
805 		 * just wait for an ACK.
806 		 */
807 		len = 0;
808 		if (win == 0) {
809 			cb->s_timer[SPPT_REXMT] = 0;
810 			cb->s_snxt = cb->s_rack;
811 		}
812 	}
813 	if (len > 1)
814 		sendalot = 1;
815 	rcv_win = sbspace(&so->so_rcv);
816 
817 	/*
818 	 * Send if we owe peer an ACK.
819 	 */
820 	if (cb->s_oobflags & SF_SOOB) {
821 		/*
822 		 * must transmit this out of band packet
823 		 */
824 		cb->s_oobflags &= ~ SF_SOOB;
825 		sendalot = 1;
826 		sppstat.spps_sndurg++;
827 		goto found;
828 	}
829 	if (cb->s_flags & SF_ACKNOW)
830 		goto send;
831 	if (cb->s_state < TCPS_ESTABLISHED)
832 		goto send;
833 	/*
834 	 * Silly window can't happen in spp.
835 	 * Code from tcp deleted.
836 	 */
837 	if (len)
838 		goto send;
839 	/*
840 	 * Compare available window to amount of window
841 	 * known to peer (as advertised window less
842 	 * next expected input.)  If the difference is at least two
843 	 * packets or at least 35% of the mximum possible window,
844 	 * then want to send a window update to peer.
845 	 */
846 	if (rcv_win > 0) {
847 		u_short delta =  1 + cb->s_alo - cb->s_ack;
848 		int adv = rcv_win - (delta * cb->s_mtu);
849 
850 		if ((so->so_rcv.sb_cc == 0 && adv >= (2 * cb->s_mtu)) ||
851 		    (100 * adv / so->so_rcv.sb_hiwat >= 35)) {
852 			sppstat.spps_sndwinup++;
853 			cb->s_flags |= SF_ACKNOW;
854 			goto send;
855 		}
856 
857 	}
858 	/*
859 	 * Many comments from tcp_output.c are appropriate here
860 	 * including . . .
861 	 * If send window is too small, there is data to transmit, and no
862 	 * retransmit or persist is pending, then go to persist state.
863 	 * If nothing happens soon, send when timer expires:
864 	 * if window is nonzero, transmit what we can,
865 	 * otherwise send a probe.
866 	 */
867 	if (so->so_snd.sb_cc && cb->s_timer[SPPT_REXMT] == 0 &&
868 		cb->s_timer[SPPT_PERSIST] == 0) {
869 			cb->s_rxtshift = 0;
870 			spp_setpersist(cb);
871 	}
872 	/*
873 	 * No reason to send a packet, just return.
874 	 */
875 	cb->s_outx = 1;
876 	return (0);
877 
878 send:
879 	/*
880 	 * Find requested packet.
881 	 */
882 	si = 0;
883 	if (len > 0) {
884 		cb->s_want = cb->s_snxt;
885 		for (m = sb->sb_mb; m; m = m->m_act) {
886 			si = mtod(m, struct spidp *);
887 			if (SSEQ_LEQ(cb->s_snxt, si->si_seq))
888 				break;
889 		}
890 	found:
891 		if (si) {
892 			if (si->si_seq == cb->s_snxt)
893 					cb->s_snxt++;
894 				else
895 					sppstat.spps_sndvoid++, si = 0;
896 		}
897 	}
898 	/*
899 	 * update window
900 	 */
901 	if (rcv_win < 0)
902 		rcv_win = 0;
903 	alo = cb->s_ack - 1 + (rcv_win / ((short)cb->s_mtu));
904 	if (SSEQ_LT(alo, cb->s_alo))
905 		alo = cb->s_alo;
906 
907 	if (si) {
908 		/*
909 		 * must make a copy of this packet for
910 		 * idp_output to monkey with
911 		 */
912 		m = m_copy(dtom(si), 0, (int)M_COPYALL);
913 		if (m == NULL) {
914 			return (ENOBUFS);
915 		}
916 		m0 = m;
917 		si = mtod(m, struct spidp *);
918 		if (SSEQ_LT(si->si_seq, cb->s_smax))
919 			sppstat.spps_sndrexmitpack++;
920 		else
921 			sppstat.spps_sndpack++;
922 	} else if (cb->s_force || cb->s_flags & SF_ACKNOW) {
923 		/*
924 		 * Must send an acknowledgement or a probe
925 		 */
926 		if (cb->s_force)
927 			sppstat.spps_sndprobe++;
928 		if (cb->s_flags & SF_ACKNOW)
929 			sppstat.spps_sndacks++;
930 		m = m_get(M_DONTWAIT, MT_HEADER);
931 		if (m == 0) {
932 			return (ENOBUFS);
933 		}
934 		/*
935 		 * Fill in mbuf with extended SP header
936 		 * and addresses and length put into network format.
937 		 * Allign beginning of packet to long to prepend
938 		 * ifp's on loopback, or NSIP encaspulation for fussy cpu's.
939 		 */
940 		m->m_off = MMAXOFF - sizeof (struct spidp) - 2;
941 		m->m_len = sizeof (*si);
942 		m->m_next = 0;
943 		si = mtod(m, struct spidp *);
944 		si->si_i = *cb->s_idp;
945 		si->si_s = cb->s_shdr;
946 		si->si_seq = cb->s_smax + 1;
947 		si->si_len = htons(sizeof (*si));
948 		si->si_cc |= SP_SP;
949 	} else {
950 		cb->s_outx = 3;
951 		if (so->so_options & SO_DEBUG || traceallspps)
952 			spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0);
953 		return (0);
954 	}
955 	/*
956 	 * Stuff checksum and output datagram.
957 	 */
958 	if ((si->si_cc & SP_SP) == 0) {
959 		if (cb->s_force != (1 + SPPT_PERSIST) ||
960 		    cb->s_timer[SPPT_PERSIST] == 0) {
961 			/*
962 			 * If this is a new packet and we are not currently
963 			 * timing anything, time this one.
964 			 */
965 			if (SSEQ_LT(cb->s_smax, si->si_seq)) {
966 				cb->s_smax = si->si_seq;
967 				if (cb->s_rtt == 0) {
968 					sppstat.spps_segstimed++;
969 					cb->s_rtseq = si->si_seq;
970 					cb->s_rtt = 1;
971 				}
972 			}
973 			/*
974 			 * Set rexmt timer if not currently set,
975 			 * Initial value for retransmit timer is smoothed
976 			 * round-trip time + 2 * round-trip time variance.
977 			 * Initialize shift counter which is used for backoff
978 			 * of retransmit time.
979 			 */
980 			if (cb->s_timer[SPPT_REXMT] == 0 &&
981 			    cb->s_snxt != cb->s_rack) {
982 				cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
983 				if (cb->s_timer[SPPT_PERSIST]) {
984 					cb->s_timer[SPPT_PERSIST] = 0;
985 					cb->s_rxtshift = 0;
986 				}
987 			}
988 		} else if (SSEQ_LT(cb->s_smax, si->si_seq)) {
989 			cb->s_smax = si->si_seq;
990 		}
991 	} else if (cb->s_state < TCPS_ESTABLISHED) {
992 		if (cb->s_rtt == 0)
993 			cb->s_rtt = 1; /* Time initial handshake */
994 		if (cb->s_timer[SPPT_REXMT] == 0)
995 			cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
996 	}
997 	{
998 		/*
999 		 * Do not request acks when we ack their data packets or
1000 		 * when we do a gratuitous window update.
1001 		 */
1002 		if (((si->si_cc & SP_SP) == 0) || cb->s_force)
1003 				si->si_cc |= SP_SA;
1004 		si->si_seq = htons(si->si_seq);
1005 		si->si_alo = htons(alo);
1006 		si->si_ack = htons(cb->s_ack);
1007 
1008 		if (idpcksum) {
1009 			si->si_sum = 0;
1010 			len = ntohs(si->si_len);
1011 			if (len & 1)
1012 				len++;
1013 			si->si_sum = ns_cksum(dtom(si), len);
1014 		} else
1015 			si->si_sum = 0xffff;
1016 
1017 		cb->s_outx = 4;
1018 		if (so->so_options & SO_DEBUG || traceallspps)
1019 			spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0);
1020 
1021 		if (so->so_options & SO_DONTROUTE)
1022 			error = ns_output(m, (struct route *)0, NS_ROUTETOIF);
1023 		else
1024 			error = ns_output(m, &cb->s_nspcb->nsp_route, 0);
1025 	}
1026 	if (error) {
1027 		return (error);
1028 	}
1029 	sppstat.spps_sndtotal++;
1030 	/*
1031 	 * Data sent (as far as we can tell).
1032 	 * If this advertises a larger window than any other segment,
1033 	 * then remember the size of the advertized window.
1034 	 * Any pending ACK has now been sent.
1035 	 */
1036 	cb->s_force = 0;
1037 	cb->s_flags &= ~(SF_ACKNOW|SF_DELACK);
1038 	if (SSEQ_GT(alo, cb->s_alo))
1039 		cb->s_alo = alo;
1040 	if (sendalot)
1041 		goto again;
1042 	cb->s_outx = 5;
1043 	return (0);
1044 }
1045 
1046 int spp_do_persist_panics = 0;
1047 
1048 spp_setpersist(cb)
1049 	register struct sppcb *cb;
1050 {
1051 	register t = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1;
1052 	extern int spp_backoff[];
1053 
1054 	if (cb->s_timer[SPPT_REXMT] && spp_do_persist_panics)
1055 		panic("spp_output REXMT");
1056 	/*
1057 	 * Start/restart persistance timer.
1058 	 */
1059 	SPPT_RANGESET(cb->s_timer[SPPT_PERSIST],
1060 	    t*spp_backoff[cb->s_rxtshift],
1061 	    SPPTV_PERSMIN, SPPTV_PERSMAX);
1062 	if (cb->s_rxtshift < SPP_MAXRXTSHIFT)
1063 		cb->s_rxtshift++;
1064 }
1065 /*ARGSUSED*/
1066 spp_ctloutput(req, so, level, name, value)
1067 	int req;
1068 	struct socket *so;
1069 	int name;
1070 	struct mbuf **value;
1071 {
1072 	register struct mbuf *m;
1073 	struct nspcb *nsp = sotonspcb(so);
1074 	register struct sppcb *cb;
1075 	int mask, error = 0;
1076 
1077 	if (level != NSPROTO_SPP) {
1078 		/* This will have to be changed when we do more general
1079 		   stacking of protocols */
1080 		return (idp_ctloutput(req, so, level, name, value));
1081 	}
1082 	if (nsp == NULL) {
1083 		error = EINVAL;
1084 		goto release;
1085 	} else
1086 		cb = nstosppcb(nsp);
1087 
1088 	switch (req) {
1089 
1090 	case PRCO_GETOPT:
1091 		if (value == NULL)
1092 			return (EINVAL);
1093 		m = m_get(M_DONTWAIT, MT_DATA);
1094 		if (m == NULL)
1095 			return (ENOBUFS);
1096 		switch (name) {
1097 
1098 		case SO_HEADERS_ON_INPUT:
1099 			mask = SF_HI;
1100 			goto get_flags;
1101 
1102 		case SO_HEADERS_ON_OUTPUT:
1103 			mask = SF_HO;
1104 		get_flags:
1105 			m->m_len = sizeof(short);
1106 			m->m_off = MMAXOFF - sizeof(short);
1107 			*mtod(m, short *) = cb->s_flags & mask;
1108 			break;
1109 
1110 		case SO_MTU:
1111 			m->m_len = sizeof(u_short);
1112 			m->m_off = MMAXOFF - sizeof(short);
1113 			*mtod(m, short *) = cb->s_mtu;
1114 			break;
1115 
1116 		case SO_LAST_HEADER:
1117 			m->m_len = sizeof(struct sphdr);
1118 			m->m_off = MMAXOFF - sizeof(struct sphdr);
1119 			*mtod(m, struct sphdr *) = cb->s_rhdr;
1120 			break;
1121 
1122 		case SO_DEFAULT_HEADERS:
1123 			m->m_len = sizeof(struct spidp);
1124 			m->m_off = MMAXOFF - sizeof(struct sphdr);
1125 			*mtod(m, struct sphdr *) = cb->s_shdr;
1126 			break;
1127 
1128 		default:
1129 			error = EINVAL;
1130 		}
1131 		*value = m;
1132 		break;
1133 
1134 	case PRCO_SETOPT:
1135 		if (value == 0 || *value == 0) {
1136 			error = EINVAL;
1137 			break;
1138 		}
1139 		switch (name) {
1140 			int *ok;
1141 
1142 		case SO_HEADERS_ON_INPUT:
1143 			mask = SF_HI;
1144 			goto set_head;
1145 
1146 		case SO_HEADERS_ON_OUTPUT:
1147 			mask = SF_HO;
1148 		set_head:
1149 			if (cb->s_flags & SF_PI) {
1150 				ok = mtod(*value, int *);
1151 				if (*ok)
1152 					cb->s_flags |= mask;
1153 				else
1154 					cb->s_flags &= ~mask;
1155 			} else error = EINVAL;
1156 			break;
1157 
1158 		case SO_MTU:
1159 			cb->s_mtu = *(mtod(*value, u_short *));
1160 			break;
1161 
1162 		case SO_DEFAULT_HEADERS:
1163 			{
1164 				register struct sphdr *sp
1165 						= mtod(*value, struct sphdr *);
1166 				cb->s_dt = sp->sp_dt;
1167 				cb->s_cc = sp->sp_cc & SP_EM;
1168 			}
1169 			break;
1170 
1171 		default:
1172 			error = EINVAL;
1173 		}
1174 		m_freem(*value);
1175 		break;
1176 	}
1177 	release:
1178 		return (error);
1179 }
1180 
1181 /*ARGSUSED*/
1182 spp_usrreq(so, req, m, nam, rights)
1183 	struct socket *so;
1184 	int req;
1185 	struct mbuf *m, *nam, *rights;
1186 {
1187 	struct nspcb *nsp = sotonspcb(so);
1188 	register struct sppcb *cb;
1189 	int s = splnet();
1190 	int error = 0, ostate;
1191 	struct mbuf *mm;
1192 	register struct sockbuf *sb;
1193 
1194 	if (req == PRU_CONTROL)
1195                 return (ns_control(so, (int)m, (caddr_t)nam,
1196 			(struct ifnet *)rights));
1197 	if (rights && rights->m_len) {
1198 		error = EINVAL;
1199 		goto release;
1200 	}
1201 	if (nsp == NULL) {
1202 		if (req != PRU_ATTACH) {
1203 			error = EINVAL;
1204 			goto release;
1205 		}
1206 	} else
1207 		cb = nstosppcb(nsp);
1208 
1209 	ostate = cb ? cb->s_state : 0;
1210 
1211 	switch (req) {
1212 
1213 	case PRU_ATTACH:
1214 		if (nsp != NULL) {
1215 			error = EISCONN;
1216 			break;
1217 		}
1218 		error = ns_pcballoc(so, &nspcb);
1219 		if (error)
1220 			break;
1221 		if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
1222 			error = soreserve(so, (u_long) 3072, (u_long) 3072);
1223 			if (error)
1224 				break;
1225 		}
1226 		nsp = sotonspcb(so);
1227 
1228 		mm = m_getclr(M_DONTWAIT, MT_PCB);
1229 		sb = &so->so_snd;
1230 
1231 		if (mm == NULL) {
1232 			error = ENOBUFS;
1233 			break;
1234 		}
1235 		cb = mtod(mm, struct sppcb *);
1236 		mm = m_getclr(M_DONTWAIT, MT_HEADER);
1237 		if (mm == NULL) {
1238 			(void) m_free(dtom(m));
1239 			error = ENOBUFS;
1240 			break;
1241 		}
1242 		cb->s_idp = mtod(mm, struct idp *);
1243 		cb->s_state = TCPS_LISTEN;
1244 		cb->s_smax = -1;
1245 		cb->s_swl1 = -1;
1246 		cb->s_q.si_next = cb->s_q.si_prev = &cb->s_q;
1247 		cb->s_nspcb = nsp;
1248 		cb->s_mtu = 576 - sizeof (struct spidp);
1249 		cb->s_cwnd = sbspace(sb) * CUNIT / cb->s_mtu;
1250 		cb->s_ssthresh = cb->s_cwnd;
1251 		cb->s_cwmx = sb->sb_mbmax * CUNIT /
1252 				(2 * sizeof (struct spidp));
1253 		/* Above is recomputed when connecting to account
1254 		   for changed buffering or mtu's */
1255 		cb->s_rtt = SPPTV_SRTTBASE;
1256 		cb->s_rttvar = SPPTV_SRTTDFLT << 2;
1257 		SPPT_RANGESET(cb->s_rxtcur,
1258 		    ((SPPTV_SRTTBASE >> 2) + (SPPTV_SRTTDFLT << 2)) >> 1,
1259 		    SPPTV_MIN, SPPTV_REXMTMAX);
1260 		nsp->nsp_pcb = (caddr_t) cb;
1261 		break;
1262 
1263 	case PRU_DETACH:
1264 		if (nsp == NULL) {
1265 			error = ENOTCONN;
1266 			break;
1267 		}
1268 		if (cb->s_state > TCPS_LISTEN)
1269 			cb = spp_disconnect(cb);
1270 		else
1271 			cb = spp_close(cb);
1272 		break;
1273 
1274 	case PRU_BIND:
1275 		error = ns_pcbbind(nsp, nam);
1276 		break;
1277 
1278 	case PRU_LISTEN:
1279 		if (nsp->nsp_lport == 0)
1280 			error = ns_pcbbind(nsp, (struct mbuf *)0);
1281 		if (error == 0)
1282 			cb->s_state = TCPS_LISTEN;
1283 		break;
1284 
1285 	/*
1286 	 * Initiate connection to peer.
1287 	 * Enter SYN_SENT state, and mark socket as connecting.
1288 	 * Start keep-alive timer, setup prototype header,
1289 	 * Send initial system packet requesting connection.
1290 	 */
1291 	case PRU_CONNECT:
1292 		if (nsp->nsp_lport == 0) {
1293 			error = ns_pcbbind(nsp, (struct mbuf *)0);
1294 			if (error)
1295 				break;
1296 		}
1297 		error = ns_pcbconnect(nsp, nam);
1298 		if (error)
1299 			break;
1300 		soisconnecting(so);
1301 		sppstat.spps_connattempt++;
1302 		cb->s_state = TCPS_SYN_SENT;
1303 		cb->s_did = 0;
1304 		spp_template(cb);
1305 		cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
1306 		cb->s_force = 1 + SPPTV_KEEP;
1307 		/*
1308 		 * Other party is required to respond to
1309 		 * the port I send from, but he is not
1310 		 * required to answer from where I am sending to,
1311 		 * so allow wildcarding.
1312 		 * original port I am sending to is still saved in
1313 		 * cb->s_dport.
1314 		 */
1315 		nsp->nsp_fport = 0;
1316 		error = spp_output(cb, (struct mbuf *) 0);
1317 		break;
1318 
1319 	case PRU_CONNECT2:
1320 		error = EOPNOTSUPP;
1321 		break;
1322 
1323 	/*
1324 	 * We may decide later to implement connection closing
1325 	 * handshaking at the spp level optionally.
1326 	 * here is the hook to do it:
1327 	 */
1328 	case PRU_DISCONNECT:
1329 		cb = spp_disconnect(cb);
1330 		break;
1331 
1332 	/*
1333 	 * Accept a connection.  Essentially all the work is
1334 	 * done at higher levels; just return the address
1335 	 * of the peer, storing through addr.
1336 	 */
1337 	case PRU_ACCEPT: {
1338 		struct sockaddr_ns *sns = mtod(nam, struct sockaddr_ns *);
1339 
1340 		nam->m_len = sizeof (struct sockaddr_ns);
1341 		sns->sns_family = AF_NS;
1342 		sns->sns_addr = nsp->nsp_faddr;
1343 		break;
1344 		}
1345 
1346 	case PRU_SHUTDOWN:
1347 		socantsendmore(so);
1348 		cb = spp_usrclosed(cb);
1349 		if (cb)
1350 			error = spp_output(cb, (struct mbuf *) 0);
1351 		break;
1352 
1353 	/*
1354 	 * After a receive, possibly send acknowledgment
1355 	 * updating allocation.
1356 	 */
1357 	case PRU_RCVD:
1358 		cb->s_flags |= SF_RVD;
1359 		(void) spp_output(cb, (struct mbuf *) 0);
1360 		cb->s_flags &= ~SF_RVD;
1361 		break;
1362 
1363 	case PRU_ABORT:
1364 		(void) spp_drop(cb, ECONNABORTED);
1365 		break;
1366 
1367 	case PRU_SENSE:
1368 	case PRU_CONTROL:
1369 		m = NULL;
1370 		error = EOPNOTSUPP;
1371 		break;
1372 
1373 	case PRU_RCVOOB:
1374 		if ((cb->s_oobflags & SF_IOOB) || so->so_oobmark ||
1375 		    (so->so_state & SS_RCVATMARK)) {
1376 			m->m_len = 1;
1377 			*mtod(m, caddr_t) = cb->s_iobc;
1378 			break;
1379 		}
1380 		error = EINVAL;
1381 		break;
1382 
1383 	case PRU_SENDOOB:
1384 		if (sbspace(&so->so_snd) < -512) {
1385 			error = ENOBUFS;
1386 			break;
1387 		}
1388 		cb->s_oobflags |= SF_SOOB;
1389 		/* fall into */
1390 	case PRU_SEND:
1391 		error = spp_output(cb, m);
1392 		m = NULL;
1393 		break;
1394 
1395 	case PRU_SOCKADDR:
1396 		ns_setsockaddr(nsp, nam);
1397 		break;
1398 
1399 	case PRU_PEERADDR:
1400 		ns_setpeeraddr(nsp, nam);
1401 		break;
1402 
1403 	case PRU_SLOWTIMO:
1404 		cb = spp_timers(cb, (int)nam);
1405 		req |= ((int)nam) << 8;
1406 		break;
1407 
1408 	case PRU_FASTTIMO:
1409 	case PRU_PROTORCV:
1410 	case PRU_PROTOSEND:
1411 		error =  EOPNOTSUPP;
1412 		break;
1413 
1414 	default:
1415 		panic("sp_usrreq");
1416 	}
1417 	if (cb && (so->so_options & SO_DEBUG || traceallspps))
1418 		spp_trace(SA_USER, (u_char)ostate, cb, (struct spidp *)0, req);
1419 release:
1420 	if (m != NULL)
1421 		m_freem(m);
1422 	splx(s);
1423 	return (error);
1424 }
1425 
1426 spp_usrreq_sp(so, req, m, nam, rights)
1427 	struct socket *so;
1428 	int req;
1429 	struct mbuf *m, *nam, *rights;
1430 {
1431 	int error = spp_usrreq(so, req, m, nam, rights);
1432 
1433 	if (req == PRU_ATTACH && error == 0) {
1434 		struct nspcb *nsp = sotonspcb(so);
1435 		((struct sppcb *)nsp->nsp_pcb)->s_flags |=
1436 					(SF_HI | SF_HO | SF_PI);
1437 	}
1438 	return (error);
1439 }
1440 
1441 /*
1442  * Create template to be used to send spp packets on a connection.
1443  * Called after host entry created, fills
1444  * in a skeletal spp header (choosing connection id),
1445  * minimizing the amount of work necessary when the connection is used.
1446  */
1447 spp_template(cb)
1448 	register struct sppcb *cb;
1449 {
1450 	register struct nspcb *nsp = cb->s_nspcb;
1451 	register struct idp *idp = cb->s_idp;
1452 	register struct sockbuf *sb = &(nsp->nsp_socket->so_snd);
1453 
1454 	idp->idp_pt = NSPROTO_SPP;
1455 	idp->idp_sna = nsp->nsp_laddr;
1456 	idp->idp_dna = nsp->nsp_faddr;
1457 	cb->s_sid = htons(spp_iss);
1458 	spp_iss += SPP_ISSINCR/2;
1459 	cb->s_alo = 1;
1460 	cb->s_cwnd = (sbspace(sb) * CUNIT) / cb->s_mtu;
1461 	cb->s_ssthresh = cb->s_cwnd; /* Try to expand fast to full complement
1462 					of large packets */
1463 	cb->s_cwmx = (sb->sb_mbmax * CUNIT) / (2 * sizeof(struct spidp));
1464 	cb->s_cwmx = MAX(cb->s_cwmx, cb->s_cwnd);
1465 		/* But allow for lots of little packets as well */
1466 }
1467 
1468 /*
1469  * Close a SPIP control block:
1470  *	discard spp control block itself
1471  *	discard ns protocol control block
1472  *	wake up any sleepers
1473  */
1474 struct sppcb *
1475 spp_close(cb)
1476 	register struct sppcb *cb;
1477 {
1478 	register struct spidp_q *s;
1479 	struct nspcb *nsp = cb->s_nspcb;
1480 	struct socket *so = nsp->nsp_socket;
1481 	register struct mbuf *m;
1482 
1483 	s = cb->s_q.si_next;
1484 	while (s != &(cb->s_q)) {
1485 		s = s->si_next;
1486 		m = dtom(s->si_prev);
1487 		remque(s->si_prev);
1488 		m_freem(m);
1489 	}
1490 	(void) m_free(dtom(cb->s_idp));
1491 	(void) m_free(dtom(cb));
1492 	nsp->nsp_pcb = 0;
1493 	soisdisconnected(so);
1494 	ns_pcbdetach(nsp);
1495 	sppstat.spps_closed++;
1496 	return ((struct sppcb *)0);
1497 }
1498 /*
1499  *	Someday we may do level 3 handshaking
1500  *	to close a connection or send a xerox style error.
1501  *	For now, just close.
1502  */
1503 struct sppcb *
1504 spp_usrclosed(cb)
1505 	register struct sppcb *cb;
1506 {
1507 	return (spp_close(cb));
1508 }
1509 struct sppcb *
1510 spp_disconnect(cb)
1511 	register struct sppcb *cb;
1512 {
1513 	return (spp_close(cb));
1514 }
1515 /*
1516  * Drop connection, reporting
1517  * the specified error.
1518  */
1519 struct sppcb *
1520 spp_drop(cb, errno)
1521 	register struct sppcb *cb;
1522 	int errno;
1523 {
1524 	struct socket *so = cb->s_nspcb->nsp_socket;
1525 
1526 	/*
1527 	 * someday, in the xerox world
1528 	 * we will generate error protocol packets
1529 	 * announcing that the socket has gone away.
1530 	 */
1531 	if (TCPS_HAVERCVDSYN(cb->s_state)) {
1532 		sppstat.spps_drops++;
1533 		cb->s_state = TCPS_CLOSED;
1534 		/*(void) tcp_output(cb);*/
1535 	} else
1536 		sppstat.spps_conndrops++;
1537 	so->so_error = errno;
1538 	return (spp_close(cb));
1539 }
1540 
1541 spp_abort(nsp)
1542 	struct nspcb *nsp;
1543 {
1544 
1545 	(void) spp_close((struct sppcb *)nsp->nsp_pcb);
1546 }
1547 
1548 int	spp_backoff[SPP_MAXRXTSHIFT+1] =
1549     { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
1550 /*
1551  * Fast timeout routine for processing delayed acks
1552  */
1553 spp_fasttimo()
1554 {
1555 	register struct nspcb *nsp;
1556 	register struct sppcb *cb;
1557 	int s = splnet();
1558 
1559 	nsp = nspcb.nsp_next;
1560 	if (nsp)
1561 	for (; nsp != &nspcb; nsp = nsp->nsp_next)
1562 		if ((cb = (struct sppcb *)nsp->nsp_pcb) &&
1563 		    (cb->s_flags & SF_DELACK)) {
1564 			cb->s_flags &= ~SF_DELACK;
1565 			cb->s_flags |= SF_ACKNOW;
1566 			sppstat.spps_delack++;
1567 			(void) spp_output(cb, (struct mbuf *) 0);
1568 		}
1569 	splx(s);
1570 }
1571 
1572 /*
1573  * spp protocol timeout routine called every 500 ms.
1574  * Updates the timers in all active pcb's and
1575  * causes finite state machine actions if timers expire.
1576  */
1577 spp_slowtimo()
1578 {
1579 	register struct nspcb *ip, *ipnxt;
1580 	register struct sppcb *cb;
1581 	int s = splnet();
1582 	register int i;
1583 
1584 	/*
1585 	 * Search through tcb's and update active timers.
1586 	 */
1587 	ip = nspcb.nsp_next;
1588 	if (ip == 0) {
1589 		splx(s);
1590 		return;
1591 	}
1592 	while (ip != &nspcb) {
1593 		cb = nstosppcb(ip);
1594 		ipnxt = ip->nsp_next;
1595 		if (cb == 0)
1596 			goto tpgone;
1597 		for (i = 0; i < SPPT_NTIMERS; i++) {
1598 			if (cb->s_timer[i] && --cb->s_timer[i] == 0) {
1599 				(void) spp_usrreq(cb->s_nspcb->nsp_socket,
1600 				    PRU_SLOWTIMO, (struct mbuf *)0,
1601 				    (struct mbuf *)i, (struct mbuf *)0);
1602 				if (ipnxt->nsp_prev != ip)
1603 					goto tpgone;
1604 			}
1605 		}
1606 		cb->s_idle++;
1607 		if (cb->s_rtt)
1608 			cb->s_rtt++;
1609 tpgone:
1610 		ip = ipnxt;
1611 	}
1612 	spp_iss += SPP_ISSINCR/PR_SLOWHZ;		/* increment iss */
1613 	splx(s);
1614 }
1615 /*
1616  * SPP timer processing.
1617  */
1618 struct sppcb *
1619 spp_timers(cb, timer)
1620 	register struct sppcb *cb;
1621 	int timer;
1622 {
1623 	long rexmt;
1624 	int win;
1625 
1626 	cb->s_force = 1 + timer;
1627 	switch (timer) {
1628 
1629 	/*
1630 	 * 2 MSL timeout in shutdown went off.  TCP deletes connection
1631 	 * control block.
1632 	 */
1633 	case SPPT_2MSL:
1634 		printf("spp: SPPT_2MSL went off for no reason\n");
1635 		cb->s_timer[timer] = 0;
1636 		break;
1637 
1638 	/*
1639 	 * Retransmission timer went off.  Message has not
1640 	 * been acked within retransmit interval.  Back off
1641 	 * to a longer retransmit interval and retransmit one packet.
1642 	 */
1643 	case SPPT_REXMT:
1644 		if (++cb->s_rxtshift > SPP_MAXRXTSHIFT) {
1645 			cb->s_rxtshift = SPP_MAXRXTSHIFT;
1646 			sppstat.spps_timeoutdrop++;
1647 			cb = spp_drop(cb, ETIMEDOUT);
1648 			break;
1649 		}
1650 		sppstat.spps_rexmttimeo++;
1651 		rexmt = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1;
1652 		rexmt *= spp_backoff[cb->s_rxtshift];
1653 		SPPT_RANGESET(cb->s_rxtcur, rexmt, SPPTV_MIN, SPPTV_REXMTMAX);
1654 		cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
1655 		/*
1656 		 * If we have backed off fairly far, our srtt
1657 		 * estimate is probably bogus.  Clobber it
1658 		 * so we'll take the next rtt measurement as our srtt;
1659 		 * move the current srtt into rttvar to keep the current
1660 		 * retransmit times until then.
1661 		 */
1662 		if (cb->s_rxtshift > SPP_MAXRXTSHIFT / 4 ) {
1663 			cb->s_rttvar += (cb->s_srtt >> 2);
1664 			cb->s_srtt = 0;
1665 		}
1666 		cb->s_snxt = cb->s_rack;
1667 		/*
1668 		 * If timing a packet, stop the timer.
1669 		 */
1670 		cb->s_rtt = 0;
1671 		/*
1672 		 * See very long discussion in tcp_timer.c about congestion
1673 		 * window and sstrhesh
1674 		 */
1675 		win = MIN(cb->s_swnd, (cb->s_cwnd/CUNIT)) / 2;
1676 		if (win < 2)
1677 			win = 2;
1678 		cb->s_cwnd = CUNIT;
1679 		cb->s_ssthresh = win * CUNIT;
1680 		(void) spp_output(cb, (struct mbuf *) 0);
1681 		break;
1682 
1683 	/*
1684 	 * Persistance timer into zero window.
1685 	 * Force a probe to be sent.
1686 	 */
1687 	case SPPT_PERSIST:
1688 		sppstat.spps_persisttimeo++;
1689 		spp_setpersist(cb);
1690 		(void) spp_output(cb, (struct mbuf *) 0);
1691 		break;
1692 
1693 	/*
1694 	 * Keep-alive timer went off; send something
1695 	 * or drop connection if idle for too long.
1696 	 */
1697 	case SPPT_KEEP:
1698 		sppstat.spps_keeptimeo++;
1699 		if (cb->s_state < TCPS_ESTABLISHED)
1700 			goto dropit;
1701 		if (cb->s_nspcb->nsp_socket->so_options & SO_KEEPALIVE) {
1702 		    	if (cb->s_idle >= SPPTV_MAXIDLE)
1703 				goto dropit;
1704 			sppstat.spps_keepprobe++;
1705 			(void) spp_output(cb, (struct mbuf *) 0);
1706 		} else
1707 			cb->s_idle = 0;
1708 		cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
1709 		break;
1710 	dropit:
1711 		sppstat.spps_keepdrops++;
1712 		cb = spp_drop(cb, ETIMEDOUT);
1713 		break;
1714 	}
1715 	return (cb);
1716 }
1717 #ifndef lint
1718 int SppcbSize = sizeof (struct sppcb);
1719 int NspcbSize = sizeof (struct nspcb);
1720 #endif lint
1721