125202Skarels #ifdef RCSIDENT
225202Skarels static char rcsident[] = "$Header: tcp_procs.c,v 1.32 85/07/31 09:34:27 walsh Exp $";
325202Skarels #endif
425202Skarels
525202Skarels #include "../h/param.h"
625202Skarels #include "../h/systm.h"
725202Skarels #include "../h/mbuf.h"
825202Skarels #include "../h/socket.h"
925202Skarels #include "../h/socketvar.h"
1025202Skarels #include "../h/syslog.h"
1125202Skarels
1225202Skarels #include "../net/if.h"
1325202Skarels #include "../net/route.h"
1425202Skarels
1525202Skarels #include "../bbnnet/in.h"
1625202Skarels #include "../bbnnet/in_pcb.h"
1725202Skarels #include "../bbnnet/in_var.h"
1825202Skarels #include "../bbnnet/net.h"
1925202Skarels #include "../bbnnet/fsm.h"
2025202Skarels #include "../bbnnet/tcp.h"
2125202Skarels #include "../bbnnet/seq.h"
2225202Skarels #include "../bbnnet/ip.h"
2325202Skarels #include "../bbnnet/macros.h"
2425202Skarels #ifdef HMPTRAPS
2525202Skarels #include "../bbnnet/hmp_traps.h"
2625202Skarels #endif
2725202Skarels
2825202Skarels /*
2925202Skarels * TCP finite state machine procedures.
3025202Skarels *
3125202Skarels * Called from finite state machine action routines, these do most of the work
3225202Skarels * of the protocol. They in turn call primitive routines (in tcp_prim) to
3325202Skarels * perform lower level functions.
3425202Skarels */
3525202Skarels
3625202Skarels
3725202Skarels /*
3825202Skarels * This works cooperatively with t_close for freeing up data on receive/send
3925202Skarels * buffers.
4025202Skarels */
4125202Skarels tcp_pcbdisconnect(inp)
4225202Skarels struct inpcb *inp;
4325202Skarels {
4425202Skarels register struct tcpcb *tp;
4525202Skarels
4625202Skarels if (tp = (struct tcpcb *) inp->inp_ppcb)
4725202Skarels {
4825202Skarels inp->inp_ppcb = (caddr_t) NULL;
4925202Skarels
5025202Skarels /*
5125202Skarels * free all data on receive queues
5225202Skarels */
5325202Skarels {
5425202Skarels register struct th *t, *next;
5525202Skarels
5625202Skarels t = tp->t_rcv_next;
5725202Skarels while (t != (struct th *)tp)
5825202Skarels {
5925202Skarels next = t->t_next;
6025202Skarels m_freem(dtom(t));
6125202Skarels t = next;
6225202Skarels }
6325202Skarels }
6425202Skarels {
6525202Skarels register struct mbuf *m, *next;
6625202Skarels
6725202Skarels m = tp->t_rcv_unack;
6825202Skarels while (m != NULL)
6925202Skarels {
7025202Skarels next = m->m_act;
7125202Skarels m_freem(m);
7225202Skarels m = next;
7325202Skarels }
7425202Skarels }
7525202Skarels
7625202Skarels if (tp->oob_data)
7725202Skarels m_freem(tp->oob_data);
7825202Skarels
7925202Skarels if (tp->t_template)
8025202Skarels m_free(dtom(tp->t_template));
8125202Skarels
8225202Skarels m_free(dtom(tp));
8325202Skarels }
8425202Skarels }
8525202Skarels
8625202Skarels /*
8725202Skarels * Delete TCB and free all resources used by the connection. Called after
8825202Skarels * the close protocol is complete.
8925202Skarels */
t_close(tp,state)9025202Skarels t_close(tp, state)
9125202Skarels register struct tcpcb *tp;
9225202Skarels short state;
9325202Skarels {
9425202Skarels register struct inpcb *inp;
9525202Skarels
9625202Skarels /*
9725202Skarels * in_pcbdetach() calls soisdisconnected(), which wakes up the
9825202Skarels * process if it's sleeping. Need only pass user error code if
9925202Skarels * appropriate (like ENETRESET) and hope he'll close the file
10025202Skarels * descriptor. Don't need to clear timers since they're in the
10125202Skarels * tcpcb to be deleted.
10225202Skarels */
10325202Skarels inp = tp->t_in_pcb;
10425202Skarels if (!tp->usr_abort)
10525202Skarels inp->inp_socket->so_error = state;
10625202Skarels in_pcbdetach(inp, tcp_pcbdisconnect);
10725202Skarels }
10825202Skarels
10925202Skarels short max_ack_skipped = 1;
11025202Skarels
11125202Skarels /*
11225202Skarels * We are in a position where, perhaps, we should send a TCP segment (packet).
11325202Skarels * The important decisions are:
11425202Skarels * 1) How big a segment should we send? This is important since most
11525202Skarels * overhead occurs at the packet level (interrupts, queueing,
11625202Skarels * header field checks...) and not at the byte level.
11725202Skarels * 2) Is it worth it to send this packet? Are we sending enough data
11825202Skarels * or would we be better off waiting for some more to queue up?
11925202Skarels *
12025202Skarels * The above requirements are the point of view when called in response to
12125202Skarels * a user's write request. We are also called on packet arrival in order
12225202Skarels * to send an ack (with piggy-backed data), and to respond to window openings
12325202Skarels * by sending any pent up data.
12425202Skarels *
12525202Skarels * Send a TCP segment. Send data from left window edge of send buffer up to
12625202Skarels * window size or end (whichever is less). Set retransmission timers.
12725202Skarels *
12825202Skarels * The Ford/Nagle algorithms might be thought of (if outstanding data, only
12925202Skarels * send if packet would be large), but they are primarily for telnet and
13025202Skarels * that doesn't go with ideas in comments down by push. Has idea of tcp
13125202Skarels * changed since RFC?
13225202Skarels */
send_tcp(tp,ctl)13325202Skarels send_tcp(tp, ctl)
13425202Skarels register struct tcpcb *tp;
13525202Skarels int ctl;
13625202Skarels {
13725202Skarels register sequence last, wind;
13825202Skarels register int snd_flags;
13925202Skarels register int len;
14025202Skarels struct sockbuf *sosnd;
14125202Skarels int forced, error;
14225202Skarels int sendalot;
14325202Skarels
14425202Skarels sosnd = &tp->t_in_pcb->inp_socket->so_snd;
14525202Skarels sendalot = FALSE;
14625202Skarels snd_flags = 0;
14725202Skarels tp->snd_lst = tp->snd_nxt;
14825202Skarels forced = FALSE;
14925202Skarels /*
15025202Skarels * Send SYN if this is first data (ISS)
15125202Skarels */
15225202Skarels if (SEQ_EQ(tp->snd_nxt, tp->iss))
15325202Skarels {
15425202Skarels snd_flags |= T_SYN;
15525202Skarels tp->snd_lst++;
15625202Skarels }
15725202Skarels /*
15825202Skarels * Get seq # of last datum in send buffer
15925202Skarels */
16025202Skarels last = tp->snd_una;
16125202Skarels if (!tp->syn_acked)
16225202Skarels last++; /* don't forget SYN */
16325202Skarels last += sosnd->sb_cc;
16425202Skarels /*
16525202Skarels * If no data to send in buffer, just do FIN check, otherwise see
16625202Skarels * how much we should send in segment.
16725202Skarels */
16825202Skarels if (SEQ_GEQ(tp->snd_nxt, last))
16925202Skarels {
17025202Skarels /*
17125202Skarels * should send FIN? don't unless haven't already sent one
17225202Skarels */
17325202Skarels if (tp->snd_fin &&
17425202Skarels (SEQ_EQ(tp->seq_fin, tp->iss) ||
17525202Skarels SEQ_LEQ(tp->snd_nxt, tp->seq_fin)))
17625202Skarels {
17725202Skarels snd_flags |= T_FIN;
17825202Skarels tp->seq_fin = tp->snd_lst++;
17925202Skarels }
18025202Skarels }
18125202Skarels else if (tp->syn_acked)
18225202Skarels {
18325202Skarels /*
18425202Skarels * We can't send more than we have (SYN + data represented
18525202Skarels * by last). Nor can we send more than the other end is
18625202Skarels * prepared to receive (represented by the window in snd_wnd
18725202Skarels * and wind).
18825202Skarels *
18925202Skarels * Only send a segment if there is something in the buffer,
19025202Skarels * and a non-zero window has been received.
19125202Skarels */
19225202Skarels wind = tp->snd_una + tp->snd_wnd;
19325202Skarels tp->snd_lst = SEQ_MIN(last, wind);
19425202Skarels
19525202Skarels /*
19625202Skarels * Make sure the segment is not larger than the remote end
19725202Skarels * can handle. Though they may advertise a 4K window, perhaps
19825202Skarels * they can only fill it 512 bytes at a time due to some
19925202Skarels * buffering or device driver constraint.
20025202Skarels *
20125202Skarels * If we're both on the local net, the maxseg is probably the
20225202Skarels * mtu of the local network, and this will avoid some IP
20325202Skarels * fragmentation.
20425202Skarels *
20525202Skarels * ">=" so that set sendalot.
20625202Skarels */
20725202Skarels if ((len = tp->snd_lst - tp->snd_nxt) >= tp->t_maxseg)
20825202Skarels {
20925202Skarels tp->snd_lst -= len - tp->t_maxseg;
21025202Skarels sendalot = TRUE;
21125202Skarels }
21225202Skarels
21325202Skarels /*
21425202Skarels * If we're not on the same net or on similar nets immediately
21525202Skarels * connected by a gateway, the negotiated maxseg may cause
21625202Skarels * fragmentation. Fragmentation per se is not bad, but
21725202Skarels * tinygrams can cause problems and are inefficient. So,
21825202Skarels * send something that if it fragments, will produce reasonably
21925202Skarels * sized fragments. Avoid excessive fragmentation to reduce
22025202Skarels * probability datagram fails to reassemble.
22125202Skarels */
22225202Skarels if (tp->t_maxfrag)
22325202Skarels {
22425202Skarels len = tp->t_maxfrag*3;
22525202Skarels if ((tp->snd_lst - tp->snd_nxt) > len)
22625202Skarels {
22725202Skarels tp->snd_lst = tp->snd_nxt + len;
22825202Skarels sendalot = TRUE;
22925202Skarels }
23025202Skarels }
23125202Skarels
23225202Skarels if (SEQ_GT(tp->snd_end, tp->snd_una) &&
23325202Skarels SEQ_LEQ(tp->snd_end, tp->snd_lst))
23425202Skarels /*
23525202Skarels * There is data to send, and it should be PUSHed.
23625202Skarels * PUSHed segments avoid the SWS algorithm since it
23725202Skarels * might delay transmission. PUSHed data MUST go
23825202Skarels * out ASAP. Note: To avoid performance degradation,
23925202Skarels * bulk data transfers should not have PUSH on.
24025202Skarels */
24125202Skarels snd_flags |= T_PUSH;
24225202Skarels else if (tp->snd_wnd > 0)
24325202Skarels {
24425202Skarels /*
24525202Skarels * Avoid the silly window syndrome (sending small
24625202Skarels * packets). Making sure the usable window is at
24725202Skarels * least some % of the offered window ensures we're
24825202Skarels * sending a relatively (for this connection) good
24925202Skarels * sized segment.
25025202Skarels *
25125202Skarels * If sbspace(sosnd) == 0, then the user
25225202Skarels * is blocked for send resources, and we won't be
25325202Skarels * able to send a larger packet later, so send it now.
25425202Skarels * (Hmm, still true? How about the wakeup after we
25525202Skarels * trim the acked data?)
25625202Skarels *
25725202Skarels * SWS and persistence interaction
25825202Skarels * If there is outstanding data, snd_nxt - snd_una
25925202Skarels * will be > 0, we'll have retransmit timers running
26025202Skarels * forcing eventual window updates. If there is
26125202Skarels * no outstanding data, then we'll send some and
26225202Skarels * start up the retransmit timers. So, any time
26325202Skarels * we run through this segment of code instead of
26425202Skarels * the next one, we've got some good timers running.
26525202Skarels */
26625202Skarels if (!tp->rexmt && !tp->ack_due && !tp->snd_fin &&
26725202Skarels !sendalot &&
26825202Skarels sbspace(sosnd) > 0 &&
26925202Skarels ((100*(tp->snd_nxt-tp->snd_una))/tp->snd_wnd)
27025202Skarels > tp->sws_qff)
27125202Skarels tp->snd_lst = tp->snd_nxt;
27225202Skarels }
27325202Skarels else
27425202Skarels {
27525202Skarels /*
27625202Skarels * We have stuff to send, but can't since the other
27725202Skarels * end can't handle it right now. We start up the
27825202Skarels * persistence timer in case their window opening
27925202Skarels * ack is lost. When the timer goes off, we send
28025202Skarels * a byte to force a window update. Wait for timer
28125202Skarels * in order to give him a chance to deal with the
28225202Skarels * remotely buffered data and send us an update.
28325202Skarels * (We'll get here on acks that stop rxmit timers
28425202Skarels * but that contain zero window since remote user
28525202Skarels * has not picked up data yet.)
28625202Skarels *
28725202Skarels * If we're called due to a write() or packet arrival,
28825202Skarels * this is how we enter the persistence state. If
28925202Skarels * we're called because the persist timer went off,
29025202Skarels * the timer is restarted to keep persisting.
29125202Skarels */
29225202Skarels if (tp->t_timers[TPERSIST] == 0)
29325202Skarels tp->t_timers[TPERSIST] = MIN(TCP_tvMAXPERSIST,
29425202Skarels MAX(TCP_tvMINPERSIST, tp->t_srtt*3));
29525202Skarels
29625202Skarels if (tp->force_one)
29725202Skarels {
29825202Skarels /* persist timer went off */
29925202Skarels tp->snd_lst = tp->snd_nxt + 1;
30025202Skarels forced = TRUE;
30125202Skarels }
30225202Skarels }
30325202Skarels
30425202Skarels /* must send FIN and no more data left to send after this */
30525202Skarels
30625202Skarels if (tp->snd_fin && !forced && SEQ_EQ(tp->snd_lst, last) &&
30725202Skarels (SEQ_EQ(tp->seq_fin, tp->iss) ||
30825202Skarels SEQ_LEQ(tp->snd_nxt, tp->seq_fin)))
30925202Skarels {
31025202Skarels snd_flags |= T_FIN;
31125202Skarels tp->seq_fin = tp->snd_lst++;
31225202Skarels }
31325202Skarels }
31425202Skarels
31525202Skarels /*
31625202Skarels * Now, we have determined how large a segment to send if our only
31725202Skarels * purpose is to get data to the other side. If there is something
31825202Skarels * to send, do it and update timers for rexmt.
31925202Skarels */
32025202Skarels len = tp->snd_lst - tp->snd_nxt;
32125202Skarels if (len > 0)
32225202Skarels { /* then SEQ_LT(tp->snd_nxt, tp->snd_lst) */
32325202Skarels
32425202Skarels error = send_pkt (tp, snd_flags, len);
32525202Skarels
32625202Skarels /*
32725202Skarels * SEQ_LEQ(snd_nxt, t_xmt_val): if this is a retransmission
32825202Skarels * of the round-trip-time measuring byte, then remeasure the
32925202Skarels * round trip time. (Keep rtt from drifting upward on lossy
33025202Skarels * networks.)
33125202Skarels *
33225202Skarels * SEQ_GT(snd_una, t_xmt_val): Measure the rtt if the last
33325202Skarels * timed byte has been acked.
33425202Skarels */
33525202Skarels if (tp->syn_acked && (SEQ_LEQ(tp->snd_nxt, tp->t_xmt_val) ||
33625202Skarels SEQ_GT(tp->snd_una, tp->t_xmt_val)))
33725202Skarels {
33825202Skarels if (tp->t_srtt != 0)
33925202Skarels tp->t_timers[TXMT] = 0;
34025202Skarels tp->t_xmt_val = tp->snd_nxt;
34125202Skarels }
34225202Skarels
34325202Skarels /*
34425202Skarels * If the window was full, and we're just forcing a byte
34525202Skarels * out to try to get a new window, then don't use
34625202Skarels * retransmission timeouts. The other side can take as
34725202Skarels * long as it wants to process the data it's currently got.
34825202Skarels */
34925202Skarels if (! forced)
35025202Skarels {
35125202Skarels /*
35225202Skarels * Set timers for retransmission. If we already have
35325202Skarels * some outstanding data, then don't reset timer. Think
35425202Skarels * of case where send one byte every 1/4 second and only
35525202Skarels * first byte is lost. Would need to wait until filled
35625202Skarels * window before retransmission timer started to decrease
35725202Skarels * and go off.
35825202Skarels */
35925202Skarels if (tp->t_timers[TREXMT] == 0)
36025202Skarels tp->t_timers[TREXMT] = tp->t_rxmitime;
36125202Skarels
36225202Skarels if (tp->t_timers[TREXMTTL] == 0)
36325202Skarels tp->t_timers[TREXMTTL] = tp->t_rttltimeo;
36425202Skarels
36525202Skarels /*
36625202Skarels * and remember that next segment out begins
36725202Skarels * further into the stream if this one got out.
36825202Skarels */
36925202Skarels if (! error)
37025202Skarels tp->snd_nxt = tp->snd_lst;
37125202Skarels }
37225202Skarels
37325202Skarels #if T_DELACK > 0
37425202Skarels t_cancel(tp, TDELACK);
37525202Skarels tp->force_ack = FALSE;
37625202Skarels tp->ack_skipped = 0;
37725202Skarels #endif
37825202Skarels tp->ack_due = FALSE;
37925202Skarels tp->snd_hi = SEQ_MAX(tp->snd_lst, tp->snd_hi);
38025202Skarels if (!error)
38125202Skarels return(TRUE);
38225202Skarels }
38325202Skarels
38425202Skarels /*
38525202Skarels * If ctl, make sure to send something so ACK gets through. Attempt
38625202Skarels * to reduce ACK traffic by delaying ACKs with no data slightly.
38725202Skarels * Naive ack traffic can account for about 10% of what the receiving
38825202Skarels * tcp is doing.
38925202Skarels *
39025202Skarels * Bidirectional connection (telnet) => ack piggy backs application's
39125202Skarels * response.
39225202Skarels *
39325202Skarels * Unidirectional connection (ftp) => advertise large enough window
39425202Skarels * so that either #skipped (tp->ack_skipped) or our estimate of what he
39525202Skarels * thinks window is cause ack. The estimate assumes most packets get
39625202Skarels * through. This also assumes that the sender buffers enough to take
39725202Skarels * advantage of the estimated usable window, so we'll assume a minimum
39825202Skarels * send buffer provided by his operating system. (Remember, his OS has
39925202Skarels * to buffer it until we ack it.)
40025202Skarels *
40125202Skarels * So, test assumes his send buffer > MINTCPBUF bytes large
40225202Skarels * and his silly window algorithm cuts in at < 50% of window.
40325202Skarels *
40425202Skarels * Use of the fasttimeout facility is a possibility.
40525202Skarels */
40625202Skarels if (ctl == TCP_CTL)
40725202Skarels {
40825202Skarels #if T_DELACK > 0
40925202Skarels if (tp->force_ack ||
41025202Skarels (tp->ack_skipped >= max_ack_skipped) ||
41125202Skarels ((tp->rcv_nxt - tp->lastack) > MIN(MINTCPBUF, tp->rcv_wnd>>1)))
41225202Skarels {
41325202Skarels (void) send_pkt(tp, 0, 0);
41425202Skarels t_cancel(tp, TDELACK);
41525202Skarels tp->force_ack = FALSE;
41625202Skarels tp->ack_skipped = 0;
41725202Skarels tp->ack_due = FALSE;
41825202Skarels tcpstat.t_ackonly ++;
41925202Skarels }
42025202Skarels else
42125202Skarels {
42225202Skarels tp->ack_skipped ++;
42325202Skarels if (tp->t_timers[TDELACK] == 0)
42425202Skarels tp->t_timers[TDELACK] = T_DELACK;
42525202Skarels }
42625202Skarels #else
42725202Skarels (void) send_pkt(tp, 0, 0);
42825202Skarels tp->ack_due = FALSE;
42925202Skarels tcpstat.t_ackonly ++;
43025202Skarels #endif
43125202Skarels }
43225202Skarels return(FALSE);
43325202Skarels }
43425202Skarels
43525202Skarels /*
43625202Skarels * Process incoming ACKs. Remove data from send queue up to acknowledgement.
43725202Skarels * Also handles round-trip timer for retransmissions and acknowledgement of
43825202Skarels * SYN, and clears the urgent flag if required.
43925202Skarels */
44025202Skarels
44125202Skarels #ifdef BBNPING
44225202Skarels #define BBNPING_RESET(inp, len) \
44325202Skarels if (len > 0){ \
44425202Skarels /* \
44525202Skarels * We've sent him NEW data, perhaps by a gateway, that he \
44625202Skarels * has successfully received. If that's the case, then \
44725202Skarels * we know the route works and we don't have to ping that \
44825202Skarels * gateway. \
44925202Skarels * \
45025202Skarels * see check_ping() \
45125202Skarels */ \
45225202Skarels register struct rtentry *rt; \
45325202Skarels \
45425202Skarels if (rt = inp->inp_route.ro_rt) \
45525202Skarels if (rt->rt_flags & RTF_GATEWAY) \
45625202Skarels rt->irt_pings = (-1); \
45725202Skarels }
45825202Skarels #else
45925202Skarels #define BBNPING_RESET(x,y) /* */
46025202Skarels #endif
46125202Skarels
46225202Skarels #ifdef MBUF_DEBUG
46325202Skarels #define LENCHECK \
46425202Skarels if ((len > sosnd->sb_cc) || (len < 0)){ \
46525202Skarels printf("len %d sb_cc %d flags 0x%x state %d\n", \
46625202Skarels len, sosnd->sb_cc, n->t_flags, tp->t_state); \
46725202Skarels if (len < 0) \
46825202Skarels len = 0; \
46925202Skarels else \
47025202Skarels len = sosnd->sb_cc; \
47125202Skarels }
47225202Skarels #else
47325202Skarels #define LENCHECK /* */
47425202Skarels #endif
47525202Skarels
47625202Skarels #define smooth(tp) (((75*(tp)->t_timers[TXMT]) + (125*(tp)->t_srtt)) / 200)
47725202Skarels
47825202Skarels #define RCV_ACK(tp, n) \
47925202Skarels { \
48025202Skarels register struct inpcb *inp; \
48125202Skarels register struct sockbuf *sosnd; \
48225202Skarels register len; \
48325202Skarels \
48425202Skarels inp = tp->t_in_pcb; \
48525202Skarels sosnd = &inp->inp_socket->so_snd; \
48625202Skarels len = n->t_ackno - tp->snd_una; \
48725202Skarels \
48825202Skarels tp->snd_una = n->t_ackno; \
48925202Skarels if (SEQ_GT(tp->snd_una, tp->snd_nxt)) \
49025202Skarels tp->snd_nxt = tp->snd_una; \
49125202Skarels \
49225202Skarels /* \
49325202Skarels * if urgent data has been acked, reset urgent flag \
49425202Skarels */ \
49525202Skarels \
49625202Skarels if (tp->snd_urg && SEQ_GEQ(tp->snd_una, tp->snd_urp)) \
49725202Skarels tp->snd_urg = FALSE; \
49825202Skarels \
49925202Skarels if (tp->syn_acked) { \
50025202Skarels /* if timed message has been acknowledged, use the time to set \
50125202Skarels the retransmission time value, exponential decay, 60/40 \
50225202Skarels weighted average */ \
50325202Skarels \
50425202Skarels if (SEQ_GEQ(tp->snd_una, tp->t_xmt_val)) { \
50525202Skarels if (tp->t_srtt == 0) \
50625202Skarels tp->t_srtt = tp->t_timers[TXMT]; \
50725202Skarels else \
50825202Skarels tp->t_srtt = smooth(tp); \
50925202Skarels tp->t_rxmitime = MIN(TCP_tvRXMAX, \
51025202Skarels MAX(TCP_tvRXMIN, (3*tp->t_srtt)/2)); \
51125202Skarels } \
51225202Skarels } else { \
51325202Skarels /* handle ack of opening syn (tell user) */ \
51425202Skarels \
51525202Skarels if (SEQ_GT(tp->snd_una, tp->iss)) { \
51625202Skarels tp->syn_acked = TRUE; \
51725202Skarels len--; /* ignore SYN */ \
51825202Skarels t_cancel(tp, TINIT); /* cancel init timer */ \
51925202Skarels } \
52025202Skarels } \
52125202Skarels \
52225202Skarels /* remove acknowledged data from send buff */ \
52325202Skarels if (ack_fin(tp, n)) \
52425202Skarels len --; \
52525202Skarels LENCHECK \
52625202Skarels sbdrop (sosnd, len); \
52725202Skarels BBNPING_RESET(inp, len) \
52825202Skarels sbwakeup (sosnd); /* wakeup iff > x% of buffering avail? */ \
52925202Skarels \
53025202Skarels /* handle ack of closing fin */ \
53125202Skarels \
53225202Skarels if (SEQ_NEQ(tp->seq_fin, tp->iss) && SEQ_GT(tp->snd_una, tp->seq_fin)) \
53325202Skarels tp->snd_fin = FALSE; \
53425202Skarels t_cancel(tp, TREXMT); /* cancel retransmit timer */ \
53525202Skarels t_cancel(tp, TREXMTTL); /* cancel retransmit too long timer */ \
53625202Skarels tp->cancelled = TRUE; \
53725202Skarels }
53825202Skarels
53925202Skarels
54025202Skarels /*
54125202Skarels * Process incoming segments
54225202Skarels */
rcv_tcp(tp,n,ctl)54325202Skarels rcv_tcp(tp, n, ctl)
54425202Skarels register struct tcpcb *tp;
54525202Skarels register struct th *n;
54625202Skarels int ctl;
54725202Skarels {
54825202Skarels int sentsomedata;
54925202Skarels
55025202Skarels tp->dropped_txt = FALSE;
55125202Skarels tp->ack_due = FALSE;
55225202Skarels tp->new_window = FALSE;
55325202Skarels /*
55425202Skarels * Process SYN
55525202Skarels */
55625202Skarels if (!tp->syn_rcvd && n->t_flags&T_SYN)
55725202Skarels {
55825202Skarels tp->snd_wl = tp->rcv_urp = tp->irs = n->t_seq;
55925202Skarels tp->rcv_urpend = tp->rcv_urp -1;
56025202Skarels tp->rcv_nxt = n->t_seq + 1;
56125202Skarels tp->syn_rcvd = TRUE;
56225202Skarels tp->ack_due = TRUE;
56325202Skarels }
56425202Skarels
56525202Skarels if (tp->syn_rcvd)
56625202Skarels {
56725202Skarels /*
56825202Skarels * Process ACK if data not already acked previously. (Take
56925202Skarels * ACKed data off send queue, and reset rexmt timers).
57025202Skarels */
57125202Skarels if (n->t_flags&T_ACK && SEQ_GT(n->t_ackno, tp->snd_una))
57225202Skarels RCV_ACK(tp, n)
57325202Skarels
57425202Skarels /*
57525202Skarels * Check for new window. rcv_ack did not change syn_rcvd.
57625202Skarels */
57725202Skarels if (SEQ_GEQ(n->t_seq, tp->snd_wl))
57825202Skarels {
57925202Skarels tp->snd_wl = n->t_seq;
58025202Skarels tp->snd_wnd = n->t_win;
58125202Skarels tp->new_window = TRUE;
58225202Skarels t_cancel(tp, TPERSIST); /* cancel persist timer */
58325202Skarels }
58425202Skarels }
58525202Skarels
58625202Skarels /*
58725202Skarels * For data packets only (vs. ctl), process data and URG.
58825202Skarels */
58925202Skarels if (ctl == TCP_DATA)
59025202Skarels {
59125202Skarels /*
59225202Skarels * Remember how much urgent data for present_data
59325202Skarels */
59425202Skarels if (n->t_flags & T_URG)
59525202Skarels {
59625202Skarels /*
59725202Skarels * if last <= urpend, then is a retransmission
59825202Skarels * bytes [n->t_seq ... last] are urgent
59925202Skarels */
60025202Skarels register sequence last;
60125202Skarels
60225202Skarels last = n->t_seq + n->t_urp;
60325202Skarels if (SEQ_GT(last, tp->rcv_urpend))
60425202Skarels {
60525202Skarels /*
60625202Skarels * Can only remember one contiguous region.
60725202Skarels */
60825202Skarels if (SEQ_GT(n->t_seq, tp->rcv_urpend+1))
60925202Skarels {
61025202Skarels struct socket *so;
61125202Skarels
61225202Skarels tp->rcv_urp = n->t_seq;
61325202Skarels if (tp->oob_data)
61425202Skarels {
61525202Skarels m_freem(tp->oob_data);
61625202Skarels tp->oob_data = NULL;
61725202Skarels }
61825202Skarels
61925202Skarels so = tp->t_in_pcb->inp_socket;
62025202Skarels so->so_oobmark = so->so_rcv.sb_cc +
62125202Skarels (tp->rcv_urp-tp->rcv_nxt);
62225202Skarels if (so->so_oobmark == 0)
62325202Skarels so->so_state |= SS_RCVATMARK;
62425202Skarels }
62525202Skarels tp->rcv_urpend = last;
62625202Skarels }
62725202Skarels }
62825202Skarels
62925202Skarels if (n->t_len != 0)
63025202Skarels rcv_text(tp, n); /* accept and sequence data */
63125202Skarels
63225202Skarels /*
63325202Skarels * Delay extraction of out-of-band data until
63425202Skarels * present_data() so don't have to worry about
63525202Skarels * duplication...
63625202Skarels */
63725202Skarels
63825202Skarels #ifdef bsd41
63925202Skarels /*
64025202Skarels * Process PUSH, mark end of data chain.
64125202Skarels *
64225202Skarels * Not done in 4.2. TCP is a byte stream, without record
64325202Skarels * boundries, so don't have to mark for sbappend(), which
64425202Skarels * preserves marks, and soreceive(), which terminates reads
64525202Skarels * at marks. Data IS pushed nevertheless since soreceive
64625202Skarels * gives the user all that is available and returns.
64725202Skarels */
64825202Skarels if (n->t_flags&T_PUSH && !tp->dropped_txt &&
64925202Skarels tp->t_rcv_prev != (struct th *)tp)
65025202Skarels {
65125202Skarels
65225202Skarels /* Find last mbuf on received data chain and mark */
65325202Skarels
65425202Skarels m = dtom(tp->t_rcv_prev);
65525202Skarels if (m != NULL)
65625202Skarels {
65725202Skarels while (m->m_next != NULL)
65825202Skarels m = m->m_next;
65925202Skarels m->m_act = (struct mbuf *) 1;
66025202Skarels }
66125202Skarels }
66225202Skarels #endif
66325202Skarels }
66425202Skarels /*
66525202Skarels * Process FIN, check for duplicates and make sure all data is in.
66625202Skarels */
66725202Skarels if (n->t_flags&T_FIN && !tp->dropped_txt)
66825202Skarels {
66925202Skarels if (tp->fin_rcvd)
67025202Skarels tp->ack_due = TRUE;
67125202Skarels else
67225202Skarels {
67325202Skarels /*
67425202Skarels * Check if we really have FIN
67525202Skarels * (rcv buf filled in, no drops)
67625202Skarels */
67725202Skarels register sequence last;
67825202Skarels
67925202Skarels last = firstempty(tp);
68025202Skarels if ((tp->t_rcv_prev == (struct th *)tp &&
68125202Skarels SEQ_EQ(last, t_end(n)+1)) ||
68225202Skarels SEQ_EQ(last, t_end(tp->t_rcv_prev)+1))
68325202Skarels {
68425202Skarels tp->fin_rcvd = TRUE;
68525202Skarels uwake(tp->t_in_pcb);
68625202Skarels }
68725202Skarels /*
68825202Skarels * If FIN, then set to ACK: incr rcv_nxt, since FIN
68925202Skarels * occupies sequence space
69025202Skarels */
69125202Skarels if (tp->fin_rcvd && SEQ_GEQ(tp->rcv_nxt, last))
69225202Skarels {
69325202Skarels tp->rcv_nxt = last + 1;
69425202Skarels tp->ack_due = TRUE;
69525202Skarels }
69625202Skarels }
69725202Skarels }
69825202Skarels /*
69925202Skarels * If ACK required or rcv window has changed, try to send something.
70025202Skarels */
70125202Skarels sentsomedata = FALSE;
70225202Skarels if (tp->ack_due)
70325202Skarels sentsomedata = send_tcp(tp, TCP_CTL);
70425202Skarels else if (tp->new_window)
70525202Skarels sentsomedata = send_tcp(tp, TCP_DATA);
70625202Skarels /*
70725202Skarels * tp->cancelled => retransmit, rttl timers are now zero
70825202Skarels *
70925202Skarels * If didn't send any data, might not have retransmit, rttl timers
71025202Skarels * running. If we still have unACKed data and we turned off
71125202Skarels * the timers above, then ensure timers are running.
71225202Skarels */
71325202Skarels if (!sentsomedata && is_unacked(tp) && tp->cancelled)
71425202Skarels {
71525202Skarels tp->t_timers[TREXMT] = tp->t_rxmitime;
71625202Skarels tp->t_timers[TREXMTTL] = tp->t_rttltimeo;
71725202Skarels tp->cancelled = FALSE;
71825202Skarels }
71925202Skarels }
72025202Skarels
72125202Skarels #undef BBNPING_RESET
72225202Skarels #undef LENCHECK
72325202Skarels
72425202Skarels /*
72525202Skarels * Process incoming data. Put the segments on sequencing queue in order,
72625202Skarels * taking care of overlaps and duplicates. Data is removed from sequence
72725202Skarels * queue by present_data when sequence is complete (no holes at top).
72825202Skarels * Drop data that falls outside buffer quota if tight for space. Otherwise,
72925202Skarels * process and recycle data held in tcp_input.
73025202Skarels */
rcv_text(tp,t)73125202Skarels rcv_text(tp, t)
73225202Skarels register struct tcpcb *tp;
73325202Skarels register struct th *t;
73425202Skarels {
73525202Skarels register i;
73625202Skarels register struct sockbuf *sorcv;
73725202Skarels register struct mbuf *m;
73825202Skarels register struct th *q;
73925202Skarels struct th *p;
74025202Skarels struct mbuf *n;
74125202Skarels struct th *savq;
74225202Skarels int j, oldkeep;
74325202Skarels sequence last;
74425202Skarels
74525202Skarels /* throw away any data we have already received */
74625202Skarels
74725202Skarels if ((i = tp->rcv_nxt - t->t_seq) > 0)
74825202Skarels {
74925202Skarels if (i < t->t_len)
75025202Skarels {
75125202Skarels t->t_seq += i;
75225202Skarels t->t_len -= i;
75325202Skarels m_adj(dtom(t), i);
75425202Skarels }
75525202Skarels else
75625202Skarels {
75725202Skarels tp->t_olddata++;
75825202Skarels tp->ack_due = TRUE; /* send ack just in case */
75925202Skarels #ifdef HMPTRAPS
76025202Skarels /* hmp_trap(T_TCP_DUP, (caddr_t)0,0); */
76125202Skarels #endif
76225202Skarels return;
76325202Skarels }
76425202Skarels }
76525202Skarels
76625202Skarels last = t_end(t); /* last seq # in incoming seg */
76725202Skarels
76825202Skarels /* # buffers available to con */
76925202Skarels
77025202Skarels sorcv = &tp->t_in_pcb->inp_socket->so_rcv;
77125202Skarels i = sbspace(sorcv);
77225202Skarels if (i < 0)
77325202Skarels i = 0;
77425202Skarels
77525202Skarels /* enough resources to process segment? used to walk mbuf chain to
77625202Skarels * count up data bytes. let's be smart and use t_len */
77725202Skarels
77825202Skarels j = t->t_len;
77925202Skarels if (j > i)
78025202Skarels {
78125202Skarels
78225202Skarels /* if segment preceeds top of sequencing queue, try to take
78325202Skarels buffers from bottom of queue */
78425202Skarels
78525202Skarels q = tp->t_rcv_next;
78625202Skarels if (q != (struct th *)tp && SEQ_LT(tp->rcv_nxt, q->t_seq) &&
78725202Skarels SEQ_LT(t->t_seq, q->t_seq))
78825202Skarels
78925202Skarels for (p = tp->t_rcv_prev; i < j && p != (struct th *)tp;)
79025202Skarels {
79125202Skarels savq = p->t_prev;
79225202Skarels TCP_DEQ(p, tp);
79325202Skarels #ifdef HMPTRAPS
79425202Skarels /* hmp_trap(T_TCP_UDROP, (caddr_t)0,0); */
79525202Skarels #endif
79625202Skarels for (m = dtom(p); m != NULL; m = m_free(m))
79725202Skarels i += m->m_len;
79825202Skarels p = savq;
79925202Skarels }
80025202Skarels
80125202Skarels /* if still not enough room, drop text from end of new segment */
80225202Skarels
80325202Skarels if (j > i)
80425202Skarels {
80525202Skarels
80625202Skarels for (m = dtom(t); i > 0 && m != NULL; m = m->m_next)
80725202Skarels i -= m->m_len;
80825202Skarels
80925202Skarels while (m != NULL)
81025202Skarels {
81125202Skarels t->t_len -= m->m_len;
81225202Skarels last -= m->m_len;
81325202Skarels m->m_len = 0;
81425202Skarels m = m->m_next;
81525202Skarels }
81625202Skarels tp->dropped_txt = TRUE;
81725202Skarels #ifdef HMPTRAPS
81825202Skarels /* hmp_trap(T_TCP_RDROP, (caddr_t)0,0); */
81925202Skarels #endif
82025202Skarels if (SEQ_LT(last, t->t_seq))
82125202Skarels return;
82225202Skarels }
82325202Skarels }
82425202Skarels
82525202Skarels /* merge incoming data into the sequence queue */
82625202Skarels
82725202Skarels q = tp->t_rcv_next; /* -> top of sequencing queue */
82825202Skarels
82925202Skarels /* skip frags which new doesn't overlap at end */
83025202Skarels
83125202Skarels while ((q != (struct th *)tp) && SEQ_GT(t->t_seq, t_end(q)))
83225202Skarels q = q->t_next;
83325202Skarels
83425202Skarels if (q == (struct th *)tp)
83525202Skarels { /* frag at end of chain */
83625202Skarels
83725202Skarels if (SEQ_GEQ(last, tp->rcv_nxt))
83825202Skarels {
83925202Skarels tcp_net_keep = TRUE;
84025202Skarels TCP_ENQ(t, tp->t_rcv_prev, tp);
84125202Skarels }
84225202Skarels
84325202Skarels }
84425202Skarels else
84525202Skarels {
84625202Skarels
84725202Skarels #ifdef HMPTRAPS
84825202Skarels /* we've received an out-of-order packet: trap! */
84925202Skarels
85025202Skarels /* hmp_trap(T_TCP_ORDER, (caddr_t)0,0); */
85125202Skarels
85225202Skarels #endif
85325202Skarels /* frag doesn't overlap any on chain */
85425202Skarels
85525202Skarels if (SEQ_LT(last, q->t_seq))
85625202Skarels {
85725202Skarels tcp_net_keep = TRUE;
85825202Skarels TCP_ENQ(t, q->t_prev, tp);
85925202Skarels
86025202Skarels /* new overlaps beginning of next frag only */
86125202Skarels
86225202Skarels }
86325202Skarels else if (SEQ_LT(last, t_end(q)))
86425202Skarels {
86525202Skarels if ((i = last - q->t_seq + 1) < t->t_len)
86625202Skarels {
86725202Skarels t->t_len -= i;
86825202Skarels m_adj(dtom(t), -i);
86925202Skarels tcp_net_keep = TRUE;
87025202Skarels TCP_ENQ(t, q->t_prev, tp);
87125202Skarels }
87225202Skarels
87325202Skarels /* new overlaps end of previous frag */
87425202Skarels
87525202Skarels }
87625202Skarels else
87725202Skarels {
87825202Skarels savq = q;
87925202Skarels if (SEQ_LEQ(t->t_seq, q->t_seq))
88025202Skarels { /* complete cover */
88125202Skarels savq = q->t_prev;
88225202Skarels TCP_DEQ(q, tp);
88325202Skarels m_freem(dtom(q));
88425202Skarels
88525202Skarels }
88625202Skarels else
88725202Skarels { /* overlap */
88825202Skarels if ((i = t_end(q) - t->t_seq + 1) < t->t_len)
88925202Skarels {
89025202Skarels t->t_seq += i;
89125202Skarels t->t_len -= i;
89225202Skarels m_adj(dtom(t), i);
89325202Skarels }
89425202Skarels else
89525202Skarels t->t_len = 0;
89625202Skarels }
89725202Skarels
89825202Skarels /* new overlaps at beginning of successor frags */
89925202Skarels
90025202Skarels q = savq->t_next;
90125202Skarels while ((q != (struct th *)tp) && (t->t_len != 0) &&
90225202Skarels SEQ_LEQ(q->t_seq, last))
90325202Skarels
90425202Skarels /* complete cover */
90525202Skarels
90625202Skarels if (SEQ_LEQ(t_end(q), last))
90725202Skarels {
90825202Skarels p = q->t_next;
90925202Skarels TCP_DEQ(q, tp);
91025202Skarels m_freem(dtom(q));
91125202Skarels q = p;
91225202Skarels }
91325202Skarels else
91425202Skarels { /* overlap */
91525202Skarels if ((i = last-q->t_seq+1) < t->t_len)
91625202Skarels {
91725202Skarels t->t_len -= i;
91825202Skarels m_adj(dtom(t), -i);
91925202Skarels }
92025202Skarels else
92125202Skarels t->t_len = 0;
92225202Skarels break;
92325202Skarels }
92425202Skarels
92525202Skarels /* enqueue whatever is left of new before successors */
92625202Skarels
92725202Skarels if (t->t_len != 0)
92825202Skarels {
92925202Skarels tcp_net_keep = TRUE;
93025202Skarels TCP_ENQ(t, savq, tp);
93125202Skarels }
93225202Skarels }
93325202Skarels }
93425202Skarels
93525202Skarels /* set to ack completed data (no gaps) */
93625202Skarels
93725202Skarels FIRSTEMPTY(tp, tp->rcv_nxt);
93825202Skarels tp->ack_due = TRUE;
93925202Skarels
94025202Skarels /* if any room remaining in rcv buf, take any unprocessed
94125202Skarels messages and schedule for later processing */
94225202Skarels
94325202Skarels if ((m = tp->t_rcv_unack) != NULL && (i = sbspace(sorcv)) > 0)
94425202Skarels do
94525202Skarels {
94625202Skarels
94725202Skarels /* schedule work request */
94825202Skarels
94925202Skarels t = mtod(m, struct th *);
95025202Skarels j = (t->t_off << TCP_OFFSHIFT) + sizeof(struct ip);
95125202Skarels m->m_off += j;
95225202Skarels m->m_len -= j;
95325202Skarels tp->t_rcv_unack = m->m_act;
95425202Skarels m->m_act = (struct mbuf *)0;
95525202Skarels oldkeep = tcp_net_keep;
95625202Skarels tcpstat.t_unack++;
95725202Skarels w_alloc(INRECV, 0, tp, t);
95825202Skarels tcp_net_keep = oldkeep;
95925202Skarels
96025202Skarels /* remaining buffer space */
96125202Skarels
96225202Skarels for (n = m; n != NULL; n = n->m_next)
96325202Skarels i -= n->m_len;
96425202Skarels }
96525202Skarels while ((m = tp->t_rcv_unack) != NULL && i > 0);
96625202Skarels }
96725202Skarels
96825202Skarels /*
96925202Skarels * Send a reset segment
97025202Skarels */
send_rst(tp,n)97125202Skarels send_rst(tp, n)
97225202Skarels register struct tcpcb *tp;
97325202Skarels register struct th *n;
97425202Skarels {
97525202Skarels register struct inpcb *inp;
97625202Skarels struct in_addr src, dst;
97725202Skarels u_short port;
97825202Skarels int temp_rst;
97925202Skarels
98025202Skarels /* don't send a reset in response to a reset */
98125202Skarels
98225202Skarels if (n->t_flags&T_RST || (inp = tp->t_in_pcb) == NULL)
98325202Skarels return;
98425202Skarels
98525202Skarels tp->snd_rst = TRUE;
98625202Skarels temp_rst = FALSE;
98725202Skarels if (n->t_flags&T_ACK)
98825202Skarels tp->snd_nxt = n->t_ackno;
98925202Skarels
99025202Skarels /* if reset required from "wildcard" listener, take addresses and
99125202Skarels port from incoming packet */
99225202Skarels
99325202Skarels if (inp->inp_laddr.s_addr == 0 || inp->inp_faddr.s_addr == 0 ||
99425202Skarels inp->inp_fport == 0)
99525202Skarels {
99625202Skarels src = inp->inp_laddr;
99725202Skarels dst = inp->inp_faddr;
99825202Skarels port = inp->inp_fport;
99925202Skarels inp->inp_laddr = n->t_d;
100025202Skarels inp->inp_faddr = n->t_s;
100125202Skarels inp->inp_fport = n->t_src;
100225202Skarels tp->t_template = tcp_template(tp);
100325202Skarels temp_rst = TRUE;
100425202Skarels }
100525202Skarels tp->syn_rcvd = FALSE;
100625202Skarels if (tp->t_template)
100725202Skarels (void) send_pkt(tp, 0, 0);
100825202Skarels else
100925202Skarels printf("send_rst: no template\n");
101025202Skarels tp->ack_due = FALSE;
101125202Skarels tp->snd_rst = FALSE;
101225202Skarels #if T_DELACK > 0
101325202Skarels tp->force_ack = FALSE;
101425202Skarels t_cancel(tp, TDELACK);
101525202Skarels tp->ack_skipped = 0;
101625202Skarels #endif
101725202Skarels
101825202Skarels /* restore "wildcard" addresses */
101925202Skarels
102025202Skarels if (temp_rst)
102125202Skarels {
102225202Skarels inp->inp_laddr = src;
102325202Skarels inp->inp_faddr = dst;
102425202Skarels inp->inp_fport = port;
102525202Skarels tp->snd_nxt = tp->iss;
102625202Skarels if (inp->inp_route.ro_rt != NULL)
102725202Skarels {
102825202Skarels rtfree(inp->inp_route.ro_rt);
102925202Skarels inp->inp_route.ro_rt = NULL;
103025202Skarels }
103125202Skarels if (tp->t_template)
103225202Skarels {
103325202Skarels m_free(dtom(tp->t_template));
103425202Skarels tp->t_template = NULL;
103525202Skarels }
103625202Skarels }
103725202Skarels }
103825202Skarels
extract_oob(tp,mp,sorcv)103925202Skarels struct mbuf *extract_oob(tp, mp, sorcv)
104025202Skarels struct tcpcb *tp;
104125202Skarels struct mbuf *mp;
104225202Skarels struct sockbuf *sorcv;
104325202Skarels {
104425202Skarels struct socket *so;
104525202Skarels struct mbuf *top, *here, *m;
104625202Skarels int off, len, tmp;
104725202Skarels
104825202Skarels m = mp;
104925202Skarels so = tp->t_in_pcb->inp_socket;
105025202Skarels /*
105125202Skarels * skip over bytes that preceed out of band data.
105225202Skarels */
105325202Skarels if ((off = so->so_oobmark - sorcv->sb_cc) < 0)
105425202Skarels {
1055*25210Skarels log(LOG_INFO, "extract_oob: neg off\n");
105625202Skarels tp->rcv_urpend = tp->rcv_urp = tp->irs;
105725202Skarels return (mp);
105825202Skarels }
105925202Skarels
106025202Skarels while (m && (off > 0))
106125202Skarels {
106225202Skarels if (m->m_len <= off)
106325202Skarels {
106425202Skarels off -= m->m_len;
106525202Skarels m = m->m_next;
106625202Skarels }
106725202Skarels else
106825202Skarels break;
106925202Skarels }
107025202Skarels
107125202Skarels if (!m)
107225202Skarels return (mp);
107325202Skarels
107425202Skarels /*
107525202Skarels * copy out of band data. removing it from input stream.
107625202Skarels */
107725202Skarels len = tp->rcv_urpend - tp->rcv_urp + 1; /* # urgent bytes */
107825202Skarels top = here = NULL;
107925202Skarels while (m && (len > 0))
108025202Skarels {
108125202Skarels char *p;
108225202Skarels struct mbuf *newm;
108325202Skarels int dropped;
108425202Skarels
108525202Skarels tmp = MIN(m->m_len - off, len);
108625202Skarels /* tmp == # urgent bytes in this mbuf */
108725202Skarels len -= tmp;
108825202Skarels tp->rcv_urp += tmp;
108925202Skarels
109025202Skarels p = mtod(m, caddr_t) + off; /* points at first urgent byte */
109125202Skarels dropped = FALSE;
109225202Skarels
109325202Skarels while (tmp > 0)
109425202Skarels {
109525202Skarels unsigned nbytes;
109625202Skarels
109725202Skarels /* in case this mbuf uses pages */
109825202Skarels nbytes = MIN(tmp, MLEN);
109925202Skarels
110025202Skarels if (! dropped)
110125202Skarels {
110225202Skarels if (newm = m_get(M_WAIT, MT_DATA))
110325202Skarels {
110425202Skarels bcopy (p, mtod(newm, char *), nbytes);
110525202Skarels newm->m_len = nbytes;
110625202Skarels
110725202Skarels if (!top)
110825202Skarels top = here = newm;
110925202Skarels else
111025202Skarels {
111125202Skarels here->m_next = newm;
111225202Skarels here = here->m_next;
111325202Skarels }
111425202Skarels }
111525202Skarels else
111625202Skarels /* potential unreliability */
111725202Skarels dropped = TRUE;
111825202Skarels }
111925202Skarels
112025202Skarels bcopy(p+nbytes, p, (unsigned)(m->m_len -off -nbytes));
112125202Skarels m->m_len -= nbytes;
112225202Skarels tmp -= nbytes;
112325202Skarels }
112425202Skarels
112525202Skarels if (m->m_len <= 0)
112625202Skarels {
112725202Skarels /*
112825202Skarels * So soreceive never sees a zero length mbuf
112925202Skarels * with m_act set. (PUSHED URGENT data packet)
113025202Skarels */
113125202Skarels if (m == mp)
113225202Skarels mp = m = m_free(m);
113325202Skarels else
113425202Skarels m = m_free(m);
113525202Skarels }
113625202Skarels else
113725202Skarels m = m->m_next;
113825202Skarels
113925202Skarels off = 0;
114025202Skarels }
114125202Skarels
114225202Skarels if (top)
114325202Skarels {
114425202Skarels if (tp->oob_data)
114525202Skarels m_cat (tp->oob_data, top);
114625202Skarels else
114725202Skarels tp->oob_data = top;
114825202Skarels sohasoutofband(so);
114925202Skarels }
115025202Skarels
115125202Skarels return (mp);
115225202Skarels }
115325202Skarels
115425202Skarels /*
115525202Skarels * Accept data for the user to receive. Moves data from sequenced tcp
115625202Skarels * segments from the sequencing queue to the user's receive queue (in the
115725202Skarels * ucb). Observes locking on receive queue.
115825202Skarels */
present_data(tp)115925202Skarels present_data(tp)
116025202Skarels register struct tcpcb *tp;
116125202Skarels {
116225202Skarels PRESENT_DATA(tp)
116325202Skarels }
1164