xref: /netbsd-src/sys/dev/kttcp.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: kttcp.c,v 1.42 2018/12/22 14:28:56 maxv Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Frank van der Linden and Jason R. Thorpe for
8  * Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed for the NetBSD Project by
21  *	Wasabi Systems, Inc.
22  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23  *    or promote products derived from this software without specific prior
24  *    written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * kttcp.c -- provides kernel support for testing network testing,
41  *            see kttcp(4)
42  */
43 
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: kttcp.c,v 1.42 2018/12/22 14:28:56 maxv Exp $");
46 
47 #include <sys/param.h>
48 #include <sys/types.h>
49 #include <sys/ioctl.h>
50 #include <sys/file.h>
51 #include <sys/filedesc.h>
52 #include <sys/conf.h>
53 #include <sys/systm.h>
54 #include <sys/protosw.h>
55 #include <sys/proc.h>
56 #include <sys/resourcevar.h>
57 #include <sys/signal.h>
58 #include <sys/socketvar.h>
59 #include <sys/socket.h>
60 #include <sys/mbuf.h>
61 #include <sys/mount.h>
62 #include <sys/syscallargs.h>
63 
64 #include <dev/kttcpio.h>
65 
66 #include "ioconf.h"
67 
68 static int kttcp_send(struct lwp *l, struct kttcp_io_args *);
69 static int kttcp_recv(struct lwp *l, struct kttcp_io_args *);
70 static int kttcp_sosend(struct socket *, unsigned long long,
71 			unsigned long long *, struct lwp *, int);
72 static int kttcp_soreceive(struct socket *, unsigned long long,
73 			   unsigned long long *, struct lwp *, int *);
74 
75 dev_type_ioctl(kttcpioctl);
76 
77 const struct cdevsw kttcp_cdevsw = {
78 	.d_open = nullopen,
79 	.d_close = nullclose,
80 	.d_read = noread,
81 	.d_write = nowrite,
82 	.d_ioctl = kttcpioctl,
83 	.d_stop = nostop,
84 	.d_tty = notty,
85 	.d_poll = nopoll,
86 	.d_mmap = nommap,
87 	.d_kqfilter = nokqfilter,
88 	.d_discard = nodiscard,
89 	.d_flag = D_OTHER
90 };
91 
92 void
93 kttcpattach(int count)
94 {
95 	/* Do nothing. */
96 }
97 
98 int
99 kttcpioctl(dev_t dev, u_long cmd, void *data, int flag,
100     struct lwp *l)
101 {
102 	int error;
103 
104 	if ((flag & FWRITE) == 0)
105 		return EPERM;
106 
107 	switch (cmd) {
108 	case KTTCP_IO_SEND:
109 		error = kttcp_send(l, (struct kttcp_io_args *) data);
110 		break;
111 
112 	case KTTCP_IO_RECV:
113 		error = kttcp_recv(l, (struct kttcp_io_args *) data);
114 		break;
115 
116 	default:
117 		return EINVAL;
118 	}
119 
120 	return error;
121 }
122 
123 static int
124 kttcp_send(struct lwp *l, struct kttcp_io_args *kio)
125 {
126 	struct socket *so;
127 	int error;
128 	struct timeval t0, t1;
129 	unsigned long long len, done;
130 
131 	if (kio->kio_totalsize >= KTTCP_MAX_XMIT)
132 		return EINVAL;
133 
134 	if ((error = fd_getsock(kio->kio_socket, &so)) != 0)
135 		return error;
136 
137 	len = kio->kio_totalsize;
138 	microtime(&t0);
139 	do {
140 		error = kttcp_sosend(so, len, &done, l, 0);
141 		len -= done;
142 	} while (error == 0 && len > 0);
143 
144 	fd_putfile(kio->kio_socket);
145 
146 	microtime(&t1);
147 	if (error != 0)
148 		return error;
149 	timersub(&t1, &t0, &kio->kio_elapsed);
150 
151 	kio->kio_bytesdone = kio->kio_totalsize - len;
152 
153 	return 0;
154 }
155 
156 static int
157 kttcp_recv(struct lwp *l, struct kttcp_io_args *kio)
158 {
159 	struct socket *so;
160 	int error;
161 	struct timeval t0, t1;
162 	unsigned long long len, done;
163 
164 	done = 0;	/* XXX gcc */
165 
166 	if (kio->kio_totalsize > KTTCP_MAX_XMIT)
167 		return EINVAL;
168 
169 	if ((error = fd_getsock(kio->kio_socket, &so)) != 0)
170 		return error;
171 	len = kio->kio_totalsize;
172 	microtime(&t0);
173 	do {
174 		error = kttcp_soreceive(so, len, &done, l, NULL);
175 		len -= done;
176 	} while (error == 0 && len > 0 && done > 0);
177 
178 	fd_putfile(kio->kio_socket);
179 
180 	microtime(&t1);
181 	if (error == EPIPE)
182 		error = 0;
183 	if (error != 0)
184 		return error;
185 	timersub(&t1, &t0, &kio->kio_elapsed);
186 
187 	kio->kio_bytesdone = kio->kio_totalsize - len;
188 
189 	return 0;
190 }
191 
192 #define SBLOCKWAIT(f)   (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
193 
194 /*
195  * Slightly changed version of sosend()
196  */
197 static int
198 kttcp_sosend(struct socket *so, unsigned long long slen,
199 	     unsigned long long *done, struct lwp *l, int flags)
200 {
201 	struct mbuf **mp, *m, *top;
202 	long space, len, mlen;
203 	int error, dontroute, atomic;
204 	long long resid;
205 
206 	atomic = sosendallatonce(so);
207 	resid = slen;
208 	top = NULL;
209 	/*
210 	 * In theory resid should be unsigned.
211 	 * However, space must be signed, as it might be less than 0
212 	 * if we over-committed, and we must use a signed comparison
213 	 * of space and resid.  On the other hand, a negative resid
214 	 * causes us to loop sending 0-length segments to the protocol.
215 	 */
216 	if (resid < 0) {
217 		error = EINVAL;
218 		goto out;
219 	}
220 	dontroute =
221 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
222 	    (so->so_proto->pr_flags & PR_ATOMIC);
223 	l->l_ru.ru_msgsnd++;
224 #define	snderr(errno)	{ error = errno; goto release; }
225 	solock(so);
226  restart:
227 	if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
228 		goto out;
229 	do {
230 		if (so->so_state & SS_CANTSENDMORE)
231 			snderr(EPIPE);
232 		if (so->so_error) {
233 			error = so->so_error;
234 			so->so_error = 0;
235 			goto release;
236 		}
237 		if ((so->so_state & SS_ISCONNECTED) == 0) {
238 			if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
239 				snderr(ENOTCONN);
240 			} else {
241 				snderr(EDESTADDRREQ);
242 			}
243 		}
244 		space = sbspace(&so->so_snd);
245 		if (flags & MSG_OOB)
246 			space += 1024;
247 		if ((atomic && resid > so->so_snd.sb_hiwat))
248 			snderr(EMSGSIZE);
249 		if (space < resid && (atomic || space < so->so_snd.sb_lowat)) {
250 			if (so->so_state & SS_NBIO)
251 				snderr(EWOULDBLOCK);
252 			SBLASTRECORDCHK(&so->so_rcv,
253 			    "kttcp_soreceive sbwait 1");
254 			SBLASTMBUFCHK(&so->so_rcv,
255 			    "kttcp_soreceive sbwait 1");
256 			sbunlock(&so->so_snd);
257 			error = sbwait(&so->so_snd);
258 			if (error)
259 				goto out;
260 			goto restart;
261 		}
262 		mp = &top;
263 		do {
264 			sounlock(so);
265 			do {
266 				if (top == 0) {
267 					m = m_gethdr(M_WAIT, MT_DATA);
268 					mlen = MHLEN;
269 					m->m_pkthdr.len = 0;
270 					m_reset_rcvif(m);
271 				} else {
272 					m = m_get(M_WAIT, MT_DATA);
273 					mlen = MLEN;
274 				}
275 				if (resid >= MINCLSIZE && space >= MCLBYTES) {
276 					m_clget(m, M_WAIT);
277 					if ((m->m_flags & M_EXT) == 0)
278 						goto nopages;
279 					mlen = MCLBYTES;
280 #ifdef	MAPPED_MBUFS
281 					len = lmin(MCLBYTES, resid);
282 #else
283 					if (atomic && top == 0) {
284 						len = lmin(MCLBYTES - max_hdr,
285 						    resid);
286 						m->m_data += max_hdr;
287 					} else
288 						len = lmin(MCLBYTES, resid);
289 #endif
290 					space -= len;
291 				} else {
292 nopages:
293 					len = lmin(lmin(mlen, resid), space);
294 					space -= len;
295 					/*
296 					 * For datagram protocols, leave room
297 					 * for protocol headers in first mbuf.
298 					 */
299 					if (atomic && top == 0 && len < mlen)
300 						m_align(m, len);
301 				}
302 				resid -= len;
303 				m->m_len = len;
304 				*mp = m;
305 				top->m_pkthdr.len += len;
306 				if (error)
307 					goto release;
308 				mp = &m->m_next;
309 				if (resid <= 0) {
310 					if (flags & MSG_EOR)
311 						top->m_flags |= M_EOR;
312 					break;
313 				}
314 			} while (space > 0 && atomic);
315 			solock(so);
316 
317 			if (so->so_state & SS_CANTSENDMORE)
318 				snderr(EPIPE);
319 			if (dontroute)
320 				so->so_options |= SO_DONTROUTE;
321 			if (resid > 0)
322 				so->so_state |= SS_MORETOCOME;
323 			if (flags & MSG_OOB)
324 				error = (*so->so_proto->pr_usrreqs->pr_sendoob)(so,
325 				    top, NULL);
326 			else
327 				error = (*so->so_proto->pr_usrreqs->pr_send)(so,
328 				    top, NULL, NULL, l);
329 			if (dontroute)
330 				so->so_options &= ~SO_DONTROUTE;
331 			if (resid > 0)
332 				so->so_state &= ~SS_MORETOCOME;
333 			top = 0;
334 			mp = &top;
335 			if (error)
336 				goto release;
337 		} while (resid && space > 0);
338 	} while (resid);
339 
340  release:
341 	sbunlock(&so->so_snd);
342  out:
343  	sounlock(so);
344 	if (top)
345 		m_freem(top);
346 	*done = slen - resid;
347 #if 0
348 	printf("sosend: error %d slen %llu resid %lld\n", error, slen, resid);
349 #endif
350 	return (error);
351 }
352 
353 static int
354 kttcp_soreceive(struct socket *so, unsigned long long slen,
355     unsigned long long *done, struct lwp *l, int *flagsp)
356 {
357 	struct mbuf *m, **mp;
358 	int flags, len, error, offset, moff, type;
359 	long long orig_resid, resid;
360 	const struct protosw *pr;
361 	struct mbuf *nextrecord;
362 
363 	pr = so->so_proto;
364 	mp = NULL;
365 	type = 0;
366 	resid = orig_resid = slen;
367 	if (flagsp)
368 		flags = *flagsp &~ MSG_EOR;
369 	else
370  		flags = 0;
371 	if (flags & MSG_OOB) {
372 		m = m_get(M_WAIT, MT_DATA);
373 		solock(so);
374 		error = (*pr->pr_usrreqs->pr_recvoob)(so, m, flags & MSG_PEEK);
375 		sounlock(so);
376 		if (error)
377 			goto bad;
378 		do {
379 			resid -= uimin(resid, m->m_len);
380 			m = m_free(m);
381 		} while (resid && error == 0 && m);
382  bad:
383 		if (m)
384 			m_freem(m);
385 		return (error);
386 	}
387 	if (mp)
388 		*mp = NULL;
389 	solock(so);
390  restart:
391 	if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0)
392 		return (error);
393 	m = so->so_rcv.sb_mb;
394 	/*
395 	 * If we have less data than requested, block awaiting more
396 	 * (subject to any timeout) if:
397 	 *   1. the current count is less than the low water mark,
398 	 *   2. MSG_WAITALL is set, and it is possible to do the entire
399 	 *	receive operation at once if we block (resid <= hiwat), or
400 	 *   3. MSG_DONTWAIT is not set.
401 	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
402 	 * we have to do the receive in sections, and thus risk returning
403 	 * a short count if a timeout or signal occurs after we start.
404 	 */
405 	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
406 	    so->so_rcv.sb_cc < resid) &&
407 	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
408 	    ((flags & MSG_WAITALL) && resid <= so->so_rcv.sb_hiwat)) &&
409 	    m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
410 #ifdef DIAGNOSTIC
411 		if (m == NULL && so->so_rcv.sb_cc)
412 			panic("receive 1");
413 #endif
414 		if (so->so_error) {
415 			if (m)
416 				goto dontblock;
417 			error = so->so_error;
418 			if ((flags & MSG_PEEK) == 0)
419 				so->so_error = 0;
420 			goto release;
421 		}
422 		if (so->so_state & SS_CANTRCVMORE) {
423 			if (m)
424 				goto dontblock;
425 			else
426 				goto release;
427 		}
428 		for (; m; m = m->m_next)
429 			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
430 				m = so->so_rcv.sb_mb;
431 				goto dontblock;
432 			}
433 		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
434 		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
435 			error = ENOTCONN;
436 			goto release;
437 		}
438 		if (resid == 0)
439 			goto release;
440 		if ((so->so_state & SS_NBIO) ||
441 		    (flags & (MSG_DONTWAIT|MSG_NBIO))) {
442 			error = EWOULDBLOCK;
443 			goto release;
444 		}
445 		sbunlock(&so->so_rcv);
446 		error = sbwait(&so->so_rcv);
447 		if (error) {
448 			sounlock(so);
449 			return (error);
450 		}
451 		goto restart;
452 	}
453  dontblock:
454 	/*
455 	 * On entry here, m points to the first record of the socket buffer.
456 	 * While we process the initial mbufs containing address and control
457 	 * info, we save a copy of m->m_nextpkt into nextrecord.
458 	 */
459 #ifdef notyet /* XXXX */
460 	if (uio->uio_lwp)
461 		uio->uio_lwp->l_ru.ru_msgrcv++;
462 #endif
463 	KASSERT(m == so->so_rcv.sb_mb);
464 	SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 1");
465 	SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 1");
466 	nextrecord = m->m_nextpkt;
467 	if (pr->pr_flags & PR_ADDR) {
468 #ifdef DIAGNOSTIC
469 		if (m->m_type != MT_SONAME)
470 			panic("receive 1a");
471 #endif
472 		orig_resid = 0;
473 		if (flags & MSG_PEEK) {
474 			m = m->m_next;
475 		} else {
476 			sbfree(&so->so_rcv, m);
477 			m = so->so_rcv.sb_mb = m_free(m);
478 		}
479 	}
480 	while (m && m->m_type == MT_CONTROL && error == 0) {
481 		if (flags & MSG_PEEK) {
482 			m = m->m_next;
483 		} else {
484 			sbfree(&so->so_rcv, m);
485 			m = so->so_rcv.sb_mb = m_free(m);
486 		}
487 	}
488 
489 	/*
490 	 * If m is non-NULL, we have some data to read.  From now on,
491 	 * make sure to keep sb_lastrecord consistent when working on
492 	 * the last packet on the chain (nextrecord == NULL) and we
493 	 * change m->m_nextpkt.
494 	 */
495 	if (m) {
496 		if ((flags & MSG_PEEK) == 0) {
497 			m->m_nextpkt = nextrecord;
498 			/*
499 			 * If nextrecord == NULL (this is a single chain),
500 			 * then sb_lastrecord may not be valid here if m
501 			 * was changed earlier.
502 			 */
503 			if (nextrecord == NULL) {
504 				KASSERT(so->so_rcv.sb_mb == m);
505 				so->so_rcv.sb_lastrecord = m;
506 			}
507 		}
508 		type = m->m_type;
509 		if (type == MT_OOBDATA)
510 			flags |= MSG_OOB;
511 	} else {
512 		if ((flags & MSG_PEEK) == 0) {
513 			KASSERT(so->so_rcv.sb_mb == m);
514 			so->so_rcv.sb_mb = nextrecord;
515 			SB_EMPTY_FIXUP(&so->so_rcv);
516 		}
517 	}
518 	SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 2");
519 	SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 2");
520 
521 	moff = 0;
522 	offset = 0;
523 	while (m && resid > 0 && error == 0) {
524 		if (m->m_type == MT_OOBDATA) {
525 			if (type != MT_OOBDATA)
526 				break;
527 		} else if (type == MT_OOBDATA)
528 			break;
529 #ifdef DIAGNOSTIC
530 		else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
531 			panic("receive 3");
532 #endif
533 		so->so_state &= ~SS_RCVATMARK;
534 		len = resid;
535 		if (so->so_oobmark && len > so->so_oobmark - offset)
536 			len = so->so_oobmark - offset;
537 		if (len > m->m_len - moff)
538 			len = m->m_len - moff;
539 		/*
540 		 * If mp is set, just pass back the mbufs.
541 		 * Otherwise copy them out via the uio, then free.
542 		 * Sockbuf must be consistent here (points to current mbuf,
543 		 * it points to next record) when we drop priority;
544 		 * we must note any additions to the sockbuf when we
545 		 * block interrupts again.
546 		 */
547 		resid -= len;
548 		if (len == m->m_len - moff) {
549 			if (m->m_flags & M_EOR)
550 				flags |= MSG_EOR;
551 			if (flags & MSG_PEEK) {
552 				m = m->m_next;
553 				moff = 0;
554 			} else {
555 				nextrecord = m->m_nextpkt;
556 				sbfree(&so->so_rcv, m);
557 				if (mp) {
558 					*mp = m;
559 					mp = &m->m_next;
560 					so->so_rcv.sb_mb = m = m->m_next;
561 					*mp = NULL;
562 				} else {
563 					m = so->so_rcv.sb_mb = m_free(m);
564 				}
565 				/*
566 				 * If m != NULL, we also know that
567 				 * so->so_rcv.sb_mb != NULL.
568 				 */
569 				KASSERT(so->so_rcv.sb_mb == m);
570 				if (m) {
571 					m->m_nextpkt = nextrecord;
572 					if (nextrecord == NULL)
573 						so->so_rcv.sb_lastrecord = m;
574 				} else {
575 					so->so_rcv.sb_mb = nextrecord;
576 					SB_EMPTY_FIXUP(&so->so_rcv);
577 				}
578 				SBLASTRECORDCHK(&so->so_rcv,
579 				    "kttcp_soreceive 3");
580 				SBLASTMBUFCHK(&so->so_rcv,
581 				    "kttcp_soreceive 3");
582 			}
583 		} else {
584 			if (flags & MSG_PEEK)
585 				moff += len;
586 			else {
587 				if (mp) {
588 					sounlock(so);
589 					*mp = m_copym(m, 0, len, M_WAIT);
590 					solock(so);
591 				}
592 				m->m_data += len;
593 				m->m_len -= len;
594 				so->so_rcv.sb_cc -= len;
595 			}
596 		}
597 		if (so->so_oobmark) {
598 			if ((flags & MSG_PEEK) == 0) {
599 				so->so_oobmark -= len;
600 				if (so->so_oobmark == 0) {
601 					so->so_state |= SS_RCVATMARK;
602 					break;
603 				}
604 			} else {
605 				offset += len;
606 				if (offset == so->so_oobmark)
607 					break;
608 			}
609 		}
610 		if (flags & MSG_EOR)
611 			break;
612 		/*
613 		 * If the MSG_WAITALL flag is set (for non-atomic socket),
614 		 * we must not quit until "uio->uio_resid == 0" or an error
615 		 * termination.  If a signal/timeout occurs, return
616 		 * with a short count but without error.
617 		 * Keep sockbuf locked against other readers.
618 		 */
619 		while (flags & MSG_WAITALL && m == NULL && resid > 0 &&
620 		    !sosendallatonce(so) && !nextrecord) {
621 			if (so->so_error || so->so_state & SS_CANTRCVMORE)
622 				break;
623 			/*
624 			 * If we are peeking and the socket receive buffer is
625 			 * full, stop since we can't get more data to peek at.
626 			 */
627 			if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0)
628 				break;
629 			/*
630 			 * If we've drained the socket buffer, tell the
631 			 * protocol in case it needs to do something to
632 			 * get it filled again.
633 			 */
634 			if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) {
635 				(*pr->pr_usrreqs->pr_rcvd)(so, flags, l);
636 			}
637 			SBLASTRECORDCHK(&so->so_rcv,
638 			    "kttcp_soreceive sbwait 2");
639 			SBLASTMBUFCHK(&so->so_rcv,
640 			    "kttcp_soreceive sbwait 2");
641 			error = sbwait(&so->so_rcv);
642 			if (error) {
643 				sbunlock(&so->so_rcv);
644 				sounlock(so);
645 				return (0);
646 			}
647 			if ((m = so->so_rcv.sb_mb) != NULL)
648 				nextrecord = m->m_nextpkt;
649 		}
650 	}
651 
652 	if (m && pr->pr_flags & PR_ATOMIC) {
653 		flags |= MSG_TRUNC;
654 		if ((flags & MSG_PEEK) == 0)
655 			(void) sbdroprecord(&so->so_rcv);
656 	}
657 	if ((flags & MSG_PEEK) == 0) {
658 		if (m == NULL) {
659 			/*
660 			 * First part is an SB_EMPTY_FIXUP().  Second part
661 			 * makes sure sb_lastrecord is up-to-date if
662 			 * there is still data in the socket buffer.
663 			 */
664 			so->so_rcv.sb_mb = nextrecord;
665 			if (so->so_rcv.sb_mb == NULL) {
666 				so->so_rcv.sb_mbtail = NULL;
667 				so->so_rcv.sb_lastrecord = NULL;
668 			} else if (nextrecord->m_nextpkt == NULL)
669 				so->so_rcv.sb_lastrecord = nextrecord;
670 		}
671 		SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 4");
672 		SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 4");
673 		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) {
674 			(*pr->pr_usrreqs->pr_rcvd)(so, flags, l);
675 		}
676 	}
677 	if (orig_resid == resid && orig_resid &&
678 	    (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
679 		sbunlock(&so->so_rcv);
680 		goto restart;
681 	}
682 
683 	if (flagsp)
684 		*flagsp |= flags;
685  release:
686 	sbunlock(&so->so_rcv);
687 	sounlock(so);
688 	*done = slen - resid;
689 #if 0
690 	printf("soreceive: error %d slen %llu resid %lld\n", error, slen, resid);
691 #endif
692 	return (error);
693 }
694