xref: /netbsd-src/sys/dev/kttcp.c (revision fad4c9f71477ae11cea2ee75ec82151ac770a534)
1 /*	$NetBSD: kttcp.c,v 1.18 2006/05/18 18:45:48 mrg Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Frank van der Linden and Jason R. Thorpe for
8  * Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed for the NetBSD Project by
21  *	Wasabi Systems, Inc.
22  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23  *    or promote products derived from this software without specific prior
24  *    written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * kttcp.c --
41  *
42  *	This module provides kernel support for testing network
43  *	throughput from the perspective of the kernel.  It is
44  *	similar in spirit to the classic ttcp network benchmark
45  *	program, the main difference being that with kttcp, the
46  *	kernel is the source and sink of the data.
47  *
48  *	Testing like this is useful for a few reasons:
49  *
50  *	1. This allows us to know what kind of performance we can
51  *	   expect from network applications that run in the kernel
52  *	   space, such as the NFS server or the NFS client.  These
53  *	   applications don't have to move the data to/from userspace,
54  *	   and so benchmark programs which run in userspace don't
55  *	   give us an accurate model.
56  *
57  *	2. Since data received is just thrown away, the receiver
58  *	   is very fast.  This can provide better exercise for the
59  *	   sender at the other end.
60  *
61  *	3. Since the NetBSD kernel currently uses a run-to-completion
62  *	   scheduling model, kttcp provides a benchmark model where
63  *	   preemption of the benchmark program is not an issue.
64  */
65 
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: kttcp.c,v 1.18 2006/05/18 18:45:48 mrg Exp $");
68 
69 #include <sys/param.h>
70 #include <sys/types.h>
71 #include <sys/ioctl.h>
72 #include <sys/file.h>
73 #include <sys/filedesc.h>
74 #include <sys/conf.h>
75 #include <sys/systm.h>
76 #include <sys/protosw.h>
77 #include <sys/proc.h>
78 #include <sys/resourcevar.h>
79 #include <sys/signal.h>
80 #include <sys/socketvar.h>
81 #include <sys/socket.h>
82 #include <sys/mbuf.h>
83 #include <sys/sa.h>
84 #include <sys/mount.h>
85 #include <sys/syscallargs.h>
86 
87 #include <dev/kttcpio.h>
88 
89 static int kttcp_send(struct lwp *l, struct kttcp_io_args *);
90 static int kttcp_recv(struct lwp *l, struct kttcp_io_args *);
91 static int kttcp_sosend(struct socket *, unsigned long long,
92 			unsigned long long *, struct lwp *, int);
93 static int kttcp_soreceive(struct socket *, unsigned long long,
94 			   unsigned long long *, struct lwp *, int *);
95 
96 void	kttcpattach(int);
97 
98 dev_type_ioctl(kttcpioctl);
99 
100 const struct cdevsw kttcp_cdevsw = {
101 	nullopen, nullclose, noread, nowrite, kttcpioctl,
102 	nostop, notty, nopoll, nommap, nokqfilter,
103 };
104 
105 void
106 kttcpattach(int count)
107 {
108 	/* Do nothing. */
109 }
110 
111 int
112 kttcpioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
113 {
114 	int error;
115 
116 	if ((flag & FWRITE) == 0)
117 		return EPERM;
118 
119 	switch (cmd) {
120 	case KTTCP_IO_SEND:
121 		error = kttcp_send(l, (struct kttcp_io_args *) data);
122 		break;
123 
124 	case KTTCP_IO_RECV:
125 		error = kttcp_recv(l, (struct kttcp_io_args *) data);
126 		break;
127 
128 	default:
129 		return EINVAL;
130 	}
131 
132 	return error;
133 }
134 
135 static int
136 kttcp_send(struct lwp *l, struct kttcp_io_args *kio)
137 {
138 	struct file *fp;
139 	int error;
140 	struct timeval t0, t1;
141 	unsigned long long len, done;
142 
143 	if (kio->kio_totalsize >= KTTCP_MAX_XMIT)
144 		return EINVAL;
145 
146 	fp = fd_getfile(l->l_proc->p_fd, kio->kio_socket);
147 	if (fp == NULL)
148 		return EBADF;
149 	FILE_USE(fp);
150 	if (fp->f_type != DTYPE_SOCKET) {
151 		FILE_UNUSE(fp, l);
152 		return EFTYPE;
153 	}
154 
155 	len = kio->kio_totalsize;
156 	microtime(&t0);
157 	do {
158 		error = kttcp_sosend((struct socket *)fp->f_data, len,
159 		    &done, l, 0);
160 		len -= done;
161 	} while (error == 0 && len > 0);
162 
163 	FILE_UNUSE(fp, l);
164 
165 	microtime(&t1);
166 	if (error != 0)
167 		return error;
168 	timersub(&t1, &t0, &kio->kio_elapsed);
169 
170 	kio->kio_bytesdone = kio->kio_totalsize - len;
171 
172 	return 0;
173 }
174 
175 static int
176 kttcp_recv(struct lwp *l, struct kttcp_io_args *kio)
177 {
178 	struct file *fp;
179 	int error;
180 	struct timeval t0, t1;
181 	unsigned long long len, done;
182 
183 	done = 0;	/* XXX gcc */
184 
185 	if (kio->kio_totalsize > KTTCP_MAX_XMIT)
186 		return EINVAL;
187 
188 	fp = fd_getfile(l->l_proc->p_fd, kio->kio_socket);
189 	if (fp == NULL)
190 		return EBADF;
191 	FILE_USE(fp);
192 	if (fp->f_type != DTYPE_SOCKET) {
193 		FILE_UNUSE(fp, l);
194 		return EBADF;
195 	}
196 	len = kio->kio_totalsize;
197 	microtime(&t0);
198 	do {
199 		error = kttcp_soreceive((struct socket *)fp->f_data,
200 		    len, &done, l, NULL);
201 		len -= done;
202 	} while (error == 0 && len > 0 && done > 0);
203 
204 	FILE_UNUSE(fp, l);
205 
206 	microtime(&t1);
207 	if (error == EPIPE)
208 		error = 0;
209 	if (error != 0)
210 		return error;
211 	timersub(&t1, &t0, &kio->kio_elapsed);
212 
213 	kio->kio_bytesdone = kio->kio_totalsize - len;
214 
215 	return 0;
216 }
217 
218 #define SBLOCKWAIT(f)   (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
219 
220 /*
221  * Slightly changed version of sosend()
222  */
223 static int
224 kttcp_sosend(struct socket *so, unsigned long long slen,
225 	     unsigned long long *done, struct lwp *l, int flags)
226 {
227 	struct mbuf **mp, *m, *top;
228 	long space, len, mlen;
229 	int error, s, dontroute, atomic;
230 	long long resid;
231 
232 	atomic = sosendallatonce(so);
233 	resid = slen;
234 	top = NULL;
235 	/*
236 	 * In theory resid should be unsigned.
237 	 * However, space must be signed, as it might be less than 0
238 	 * if we over-committed, and we must use a signed comparison
239 	 * of space and resid.  On the other hand, a negative resid
240 	 * causes us to loop sending 0-length segments to the protocol.
241 	 */
242 	if (resid < 0) {
243 		error = EINVAL;
244 		goto out;
245 	}
246 	dontroute =
247 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
248 	    (so->so_proto->pr_flags & PR_ATOMIC);
249 	/* WRS XXX - are we doing per-lwp or per-proc stats? */
250 	l->l_proc->p_stats->p_ru.ru_msgsnd++;
251 #define	snderr(errno)	{ error = errno; splx(s); goto release; }
252 
253  restart:
254 	if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
255 		goto out;
256 	do {
257 		s = splsoftnet();
258 		if (so->so_state & SS_CANTSENDMORE)
259 			snderr(EPIPE);
260 		if (so->so_error) {
261 			error = so->so_error;
262 			so->so_error = 0;
263 			splx(s);
264 			goto release;
265 		}
266 		if ((so->so_state & SS_ISCONNECTED) == 0) {
267 			if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
268 				if ((so->so_state & SS_ISCONFIRMING) == 0)
269 					snderr(ENOTCONN);
270 			} else
271 				snderr(EDESTADDRREQ);
272 		}
273 		space = sbspace(&so->so_snd);
274 		if (flags & MSG_OOB)
275 			space += 1024;
276 		if ((atomic && resid > so->so_snd.sb_hiwat))
277 			snderr(EMSGSIZE);
278 		if (space < resid && (atomic || space < so->so_snd.sb_lowat)) {
279 			if (so->so_state & SS_NBIO)
280 				snderr(EWOULDBLOCK);
281 			SBLASTRECORDCHK(&so->so_rcv,
282 			    "kttcp_soreceive sbwait 1");
283 			SBLASTMBUFCHK(&so->so_rcv,
284 			    "kttcp_soreceive sbwait 1");
285 			sbunlock(&so->so_snd);
286 			error = sbwait(&so->so_snd);
287 			splx(s);
288 			if (error)
289 				goto out;
290 			goto restart;
291 		}
292 		splx(s);
293 		mp = &top;
294 		do {
295 			do {
296 				if (top == 0) {
297 					m = m_gethdr(M_WAIT, MT_DATA);
298 					mlen = MHLEN;
299 					m->m_pkthdr.len = 0;
300 					m->m_pkthdr.rcvif = NULL;
301 				} else {
302 					m = m_get(M_WAIT, MT_DATA);
303 					mlen = MLEN;
304 				}
305 				if (resid >= MINCLSIZE && space >= MCLBYTES) {
306 					m_clget(m, M_WAIT);
307 					if ((m->m_flags & M_EXT) == 0)
308 						goto nopages;
309 					mlen = MCLBYTES;
310 #ifdef	MAPPED_MBUFS
311 					len = lmin(MCLBYTES, resid);
312 #else
313 					if (atomic && top == 0) {
314 						len = lmin(MCLBYTES - max_hdr,
315 						    resid);
316 						m->m_data += max_hdr;
317 					} else
318 						len = lmin(MCLBYTES, resid);
319 #endif
320 					space -= len;
321 				} else {
322 nopages:
323 					len = lmin(lmin(mlen, resid), space);
324 					space -= len;
325 					/*
326 					 * For datagram protocols, leave room
327 					 * for protocol headers in first mbuf.
328 					 */
329 					if (atomic && top == 0 && len < mlen)
330 						MH_ALIGN(m, len);
331 				}
332 				resid -= len;
333 				m->m_len = len;
334 				*mp = m;
335 				top->m_pkthdr.len += len;
336 				if (error)
337 					goto release;
338 				mp = &m->m_next;
339 				if (resid <= 0) {
340 					if (flags & MSG_EOR)
341 						top->m_flags |= M_EOR;
342 					break;
343 				}
344 			} while (space > 0 && atomic);
345 
346 			s = splsoftnet();
347 
348 			if (so->so_state & SS_CANTSENDMORE)
349 				snderr(EPIPE);
350 
351 			if (dontroute)
352 				so->so_options |= SO_DONTROUTE;
353 			if (resid > 0)
354 				so->so_state |= SS_MORETOCOME;
355 			error = (*so->so_proto->pr_usrreq)(so,
356 			    (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
357 			    top, NULL, NULL, l);
358 			if (dontroute)
359 				so->so_options &= ~SO_DONTROUTE;
360 			if (resid > 0)
361 				so->so_state &= ~SS_MORETOCOME;
362 			splx(s);
363 
364 			top = 0;
365 			mp = &top;
366 			if (error)
367 				goto release;
368 		} while (resid && space > 0);
369 	} while (resid);
370 
371  release:
372 	sbunlock(&so->so_snd);
373  out:
374 	if (top)
375 		m_freem(top);
376 	*done = slen - resid;
377 #if 0
378 	printf("sosend: error %d slen %llu resid %lld\n", error, slen, resid);
379 #endif
380 	return (error);
381 }
382 
383 static int
384 kttcp_soreceive(struct socket *so, unsigned long long slen,
385 		unsigned long long *done, struct lwp *l, int *flagsp)
386 {
387 	struct mbuf *m, **mp;
388 	int flags, len, error, s, offset, moff, type;
389 	long long orig_resid, resid;
390 	const struct protosw *pr;
391 	struct mbuf *nextrecord;
392 
393 	pr = so->so_proto;
394 	mp = NULL;
395 	type = 0;
396 	resid = orig_resid = slen;
397 	if (flagsp)
398 		flags = *flagsp &~ MSG_EOR;
399 	else
400  		flags = 0;
401 	if (flags & MSG_OOB) {
402 		m = m_get(M_WAIT, MT_DATA);
403 		error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,
404 		    (struct mbuf *)(long)(flags & MSG_PEEK), NULL, NULL);
405 		if (error)
406 			goto bad;
407 		do {
408 			resid -= min(resid, m->m_len);
409 			m = m_free(m);
410 		} while (resid && error == 0 && m);
411  bad:
412 		if (m)
413 			m_freem(m);
414 		return (error);
415 	}
416 	if (mp)
417 		*mp = NULL;
418 	if (so->so_state & SS_ISCONFIRMING && resid)
419 		(*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, NULL);
420 
421  restart:
422 	if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0)
423 		return (error);
424 	s = splsoftnet();
425 
426 	m = so->so_rcv.sb_mb;
427 	/*
428 	 * If we have less data than requested, block awaiting more
429 	 * (subject to any timeout) if:
430 	 *   1. the current count is less than the low water mark,
431 	 *   2. MSG_WAITALL is set, and it is possible to do the entire
432 	 *	receive operation at once if we block (resid <= hiwat), or
433 	 *   3. MSG_DONTWAIT is not set.
434 	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
435 	 * we have to do the receive in sections, and thus risk returning
436 	 * a short count if a timeout or signal occurs after we start.
437 	 */
438 	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
439 	    so->so_rcv.sb_cc < resid) &&
440 	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
441 	    ((flags & MSG_WAITALL) && resid <= so->so_rcv.sb_hiwat)) &&
442 	    m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
443 #ifdef DIAGNOSTIC
444 		if (m == NULL && so->so_rcv.sb_cc)
445 			panic("receive 1");
446 #endif
447 		if (so->so_error) {
448 			if (m)
449 				goto dontblock;
450 			error = so->so_error;
451 			if ((flags & MSG_PEEK) == 0)
452 				so->so_error = 0;
453 			goto release;
454 		}
455 		if (so->so_state & SS_CANTRCVMORE) {
456 			if (m)
457 				goto dontblock;
458 			else
459 				goto release;
460 		}
461 		for (; m; m = m->m_next)
462 			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
463 				m = so->so_rcv.sb_mb;
464 				goto dontblock;
465 			}
466 		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
467 		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
468 			error = ENOTCONN;
469 			goto release;
470 		}
471 		if (resid == 0)
472 			goto release;
473 		if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
474 			error = EWOULDBLOCK;
475 			goto release;
476 		}
477 		sbunlock(&so->so_rcv);
478 		error = sbwait(&so->so_rcv);
479 		splx(s);
480 		if (error)
481 			return (error);
482 		goto restart;
483 	}
484  dontblock:
485 	/*
486 	 * On entry here, m points to the first record of the socket buffer.
487 	 * While we process the initial mbufs containing address and control
488 	 * info, we save a copy of m->m_nextpkt into nextrecord.
489 	 */
490 #ifdef notyet /* XXXX */
491 	if (uio->uio_lwp)
492 		uio->uio_lwp->l_proc->p_stats->p_ru.ru_msgrcv++;
493 #endif
494 	KASSERT(m == so->so_rcv.sb_mb);
495 	SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 1");
496 	SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 1");
497 	nextrecord = m->m_nextpkt;
498 	if (pr->pr_flags & PR_ADDR) {
499 #ifdef DIAGNOSTIC
500 		if (m->m_type != MT_SONAME)
501 			panic("receive 1a");
502 #endif
503 		orig_resid = 0;
504 		if (flags & MSG_PEEK) {
505 			m = m->m_next;
506 		} else {
507 			sbfree(&so->so_rcv, m);
508 			MFREE(m, so->so_rcv.sb_mb);
509 			m = so->so_rcv.sb_mb;
510 		}
511 	}
512 	while (m && m->m_type == MT_CONTROL && error == 0) {
513 		if (flags & MSG_PEEK) {
514 			m = m->m_next;
515 		} else {
516 			sbfree(&so->so_rcv, m);
517 			MFREE(m, so->so_rcv.sb_mb);
518 			m = so->so_rcv.sb_mb;
519 		}
520 	}
521 
522 	/*
523 	 * If m is non-NULL, we have some data to read.  From now on,
524 	 * make sure to keep sb_lastrecord consistent when working on
525 	 * the last packet on the chain (nextrecord == NULL) and we
526 	 * change m->m_nextpkt.
527 	 */
528 	if (m) {
529 		if ((flags & MSG_PEEK) == 0) {
530 			m->m_nextpkt = nextrecord;
531 			/*
532 			 * If nextrecord == NULL (this is a single chain),
533 			 * then sb_lastrecord may not be valid here if m
534 			 * was changed earlier.
535 			 */
536 			if (nextrecord == NULL) {
537 				KASSERT(so->so_rcv.sb_mb == m);
538 				so->so_rcv.sb_lastrecord = m;
539 			}
540 		}
541 		type = m->m_type;
542 		if (type == MT_OOBDATA)
543 			flags |= MSG_OOB;
544 	} else {
545 		if ((flags & MSG_PEEK) == 0) {
546 			KASSERT(so->so_rcv.sb_mb == m);
547 			so->so_rcv.sb_mb = nextrecord;
548 			SB_EMPTY_FIXUP(&so->so_rcv);
549 		}
550 	}
551 	SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 2");
552 	SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 2");
553 
554 	moff = 0;
555 	offset = 0;
556 	while (m && resid > 0 && error == 0) {
557 		if (m->m_type == MT_OOBDATA) {
558 			if (type != MT_OOBDATA)
559 				break;
560 		} else if (type == MT_OOBDATA)
561 			break;
562 #ifdef DIAGNOSTIC
563 		else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
564 			panic("receive 3");
565 #endif
566 		so->so_state &= ~SS_RCVATMARK;
567 		len = resid;
568 		if (so->so_oobmark && len > so->so_oobmark - offset)
569 			len = so->so_oobmark - offset;
570 		if (len > m->m_len - moff)
571 			len = m->m_len - moff;
572 		/*
573 		 * If mp is set, just pass back the mbufs.
574 		 * Otherwise copy them out via the uio, then free.
575 		 * Sockbuf must be consistent here (points to current mbuf,
576 		 * it points to next record) when we drop priority;
577 		 * we must note any additions to the sockbuf when we
578 		 * block interrupts again.
579 		 */
580 		resid -= len;
581 		if (len == m->m_len - moff) {
582 			if (m->m_flags & M_EOR)
583 				flags |= MSG_EOR;
584 			if (flags & MSG_PEEK) {
585 				m = m->m_next;
586 				moff = 0;
587 			} else {
588 				nextrecord = m->m_nextpkt;
589 				sbfree(&so->so_rcv, m);
590 				if (mp) {
591 					*mp = m;
592 					mp = &m->m_next;
593 					so->so_rcv.sb_mb = m = m->m_next;
594 					*mp = NULL;
595 				} else {
596 					MFREE(m, so->so_rcv.sb_mb);
597 					m = so->so_rcv.sb_mb;
598 				}
599 				/*
600 				 * If m != NULL, we also know that
601 				 * so->so_rcv.sb_mb != NULL.
602 				 */
603 				KASSERT(so->so_rcv.sb_mb == m);
604 				if (m) {
605 					m->m_nextpkt = nextrecord;
606 					if (nextrecord == NULL)
607 						so->so_rcv.sb_lastrecord = m;
608 				} else {
609 					so->so_rcv.sb_mb = nextrecord;
610 					SB_EMPTY_FIXUP(&so->so_rcv);
611 				}
612 				SBLASTRECORDCHK(&so->so_rcv,
613 				    "kttcp_soreceive 3");
614 				SBLASTMBUFCHK(&so->so_rcv,
615 				    "kttcp_soreceive 3");
616 			}
617 		} else {
618 			if (flags & MSG_PEEK)
619 				moff += len;
620 			else {
621 				if (mp)
622 					*mp = m_copym(m, 0, len, M_WAIT);
623 				m->m_data += len;
624 				m->m_len -= len;
625 				so->so_rcv.sb_cc -= len;
626 			}
627 		}
628 		if (so->so_oobmark) {
629 			if ((flags & MSG_PEEK) == 0) {
630 				so->so_oobmark -= len;
631 				if (so->so_oobmark == 0) {
632 					so->so_state |= SS_RCVATMARK;
633 					break;
634 				}
635 			} else {
636 				offset += len;
637 				if (offset == so->so_oobmark)
638 					break;
639 			}
640 		}
641 		if (flags & MSG_EOR)
642 			break;
643 		/*
644 		 * If the MSG_WAITALL flag is set (for non-atomic socket),
645 		 * we must not quit until "uio->uio_resid == 0" or an error
646 		 * termination.  If a signal/timeout occurs, return
647 		 * with a short count but without error.
648 		 * Keep sockbuf locked against other readers.
649 		 */
650 		while (flags & MSG_WAITALL && m == NULL && resid > 0 &&
651 		    !sosendallatonce(so) && !nextrecord) {
652 			if (so->so_error || so->so_state & SS_CANTRCVMORE)
653 				break;
654 			/*
655 			 * If we are peeking and the socket receive buffer is
656 			 * full, stop since we can't get more data to peek at.
657 			 */
658 			if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0)
659 				break;
660 			/*
661 			 * If we've drained the socket buffer, tell the
662 			 * protocol in case it needs to do something to
663 			 * get it filled again.
664 			 */
665 			if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
666 				(*pr->pr_usrreq)(so, PRU_RCVD, NULL,
667 				    (struct mbuf *)(long)flags, NULL, NULL);
668 			SBLASTRECORDCHK(&so->so_rcv,
669 			    "kttcp_soreceive sbwait 2");
670 			SBLASTMBUFCHK(&so->so_rcv,
671 			    "kttcp_soreceive sbwait 2");
672 			error = sbwait(&so->so_rcv);
673 			if (error) {
674 				sbunlock(&so->so_rcv);
675 				splx(s);
676 				return (0);
677 			}
678 			if ((m = so->so_rcv.sb_mb) != NULL)
679 				nextrecord = m->m_nextpkt;
680 		}
681 	}
682 
683 	if (m && pr->pr_flags & PR_ATOMIC) {
684 		flags |= MSG_TRUNC;
685 		if ((flags & MSG_PEEK) == 0)
686 			(void) sbdroprecord(&so->so_rcv);
687 	}
688 	if ((flags & MSG_PEEK) == 0) {
689 		if (m == NULL) {
690 			/*
691 			 * First part is an SB_EMPTY_FIXUP().  Second part
692 			 * makes sure sb_lastrecord is up-to-date if
693 			 * there is still data in the socket buffer.
694 			 */
695 			so->so_rcv.sb_mb = nextrecord;
696 			if (so->so_rcv.sb_mb == NULL) {
697 				so->so_rcv.sb_mbtail = NULL;
698 				so->so_rcv.sb_lastrecord = NULL;
699 			} else if (nextrecord->m_nextpkt == NULL)
700 				so->so_rcv.sb_lastrecord = nextrecord;
701 		}
702 		SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 4");
703 		SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 4");
704 		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
705 			(*pr->pr_usrreq)(so, PRU_RCVD, NULL,
706 			    (struct mbuf *)(long)flags, NULL, NULL);
707 	}
708 	if (orig_resid == resid && orig_resid &&
709 	    (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
710 		sbunlock(&so->so_rcv);
711 		splx(s);
712 		goto restart;
713 	}
714 
715 	if (flagsp)
716 		*flagsp |= flags;
717  release:
718 	sbunlock(&so->so_rcv);
719 	splx(s);
720 	*done = slen - resid;
721 #if 0
722 	printf("soreceive: error %d slen %llu resid %lld\n", error, slen, resid);
723 #endif
724 	return (error);
725 }
726