xref: /netbsd-src/sys/dev/kttcp.c (revision b8c616269f5ebf18ab2e35cb8099d683130a177c)
1 /*	$NetBSD: kttcp.c,v 1.7 2003/01/18 09:45:08 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Frank van der Linden and Jason R. Thorpe for
8  * Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed for the NetBSD Project by
21  *	Wasabi Systems, Inc.
22  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23  *    or promote products derived from this software without specific prior
24  *    written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * kttcp.c --
41  *
42  *	This module provides kernel support for testing network
43  *	throughput from the perspective of the kernel.  It is
44  *	similar in spirit to the classic ttcp network benchmark
45  *	program, the main difference being that with kttcp, the
46  *	kernel is the source and sink of the data.
47  *
48  *	Testing like this is useful for a few reasons:
49  *
50  *	1. This allows us to know what kind of performance we can
51  *	   expect from network applications that run in the kernel
52  *	   space, such as the NFS server or the NFS client.  These
53  *	   applications don't have to move the data to/from userspace,
54  *	   and so benchmark programs which run in userspace don't
55  *	   give us an accurate model.
56  *
57  *	2. Since data received is just thrown away, the receiver
58  *	   is very fast.  This can provide better exercise for the
59  *	   sender at the other end.
60  *
61  *	3. Since the NetBSD kernel currently uses a run-to-completion
62  *	   scheduling model, kttcp provides a benchmark model where
63  *	   preemption of the benchmark program is not an issue.
64  */
65 
66 #include <sys/param.h>
67 #include <sys/types.h>
68 #include <sys/ioctl.h>
69 #include <sys/file.h>
70 #include <sys/filedesc.h>
71 #include <sys/conf.h>
72 #include <sys/systm.h>
73 #include <sys/protosw.h>
74 #include <sys/proc.h>
75 #include <sys/resourcevar.h>
76 #include <sys/signal.h>
77 #include <sys/socketvar.h>
78 #include <sys/socket.h>
79 #include <sys/mbuf.h>
80 #include <sys/sa.h>
81 #include <sys/mount.h>
82 #include <sys/syscallargs.h>
83 
84 #include <dev/kttcpio.h>
85 
86 static int kttcp_send(struct proc *p, struct kttcp_io_args *);
87 static int kttcp_recv(struct proc *p, struct kttcp_io_args *);
88 static int kttcp_sosend(struct socket *, unsigned long long,
89 			unsigned long long *, struct proc *, int);
90 static int kttcp_soreceive(struct socket *, unsigned long long,
91 			   unsigned long long *, struct proc *, int *);
92 
93 void	kttcpattach(int);
94 
95 dev_type_ioctl(kttcpioctl);
96 
97 const struct cdevsw kttcp_cdevsw = {
98 	nullopen, nullclose, noread, nowrite, kttcpioctl,
99 	nostop, notty, nopoll, nommap, nokqfilter,
100 };
101 
102 void
103 kttcpattach(int count)
104 {
105 	/* Do nothing. */
106 }
107 
108 int
109 kttcpioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
110 {
111 	int error;
112 
113 	if ((flag & FWRITE) == 0)
114 		return EPERM;
115 
116 	switch (cmd) {
117 	case KTTCP_IO_SEND:
118 		error = kttcp_send(p, (struct kttcp_io_args *) data);
119 		break;
120 
121 	case KTTCP_IO_RECV:
122 		error = kttcp_recv(p, (struct kttcp_io_args *) data);
123 		break;
124 
125 	default:
126 		return EINVAL;
127 	}
128 
129 	return error;
130 }
131 
132 static int
133 kttcp_send(struct proc *p, struct kttcp_io_args *kio)
134 {
135 	struct file *fp;
136 	int error;
137 	struct timeval t0, t1;
138 	unsigned long long len, done;
139 
140 	if (kio->kio_totalsize >= KTTCP_MAX_XMIT)
141 		return EINVAL;
142 
143 	fp = fd_getfile(p->p_fd, kio->kio_socket);
144 	if (fp == NULL)
145 		return EBADF;
146 	if (fp->f_type != DTYPE_SOCKET)
147 		return EFTYPE;
148 
149 	len = kio->kio_totalsize;
150 	microtime(&t0);
151 	do {
152 		error = kttcp_sosend((struct socket *)fp->f_data, len,
153 		    &done, p, 0);
154 		len -= done;
155 	} while (error == 0 && len > 0);
156 	microtime(&t1);
157 	if (error != 0)
158 		return error;
159 	timersub(&t1, &t0, &kio->kio_elapsed);
160 
161 	kio->kio_bytesdone = kio->kio_totalsize - len;
162 
163 	return 0;
164 }
165 
166 static int
167 kttcp_recv(struct proc *p, struct kttcp_io_args *kio)
168 {
169 	struct file *fp;
170 	int error;
171 	struct timeval t0, t1;
172 	unsigned long long len, done;
173 
174 	if (kio->kio_totalsize > KTTCP_MAX_XMIT)
175 		return EINVAL;
176 
177 	fp = fd_getfile(p->p_fd, kio->kio_socket);
178 	if (fp == NULL || fp->f_type != DTYPE_SOCKET)
179 		return EBADF;
180 	len = kio->kio_totalsize;
181 	microtime(&t0);
182 	do {
183 		error = kttcp_soreceive((struct socket *)fp->f_data,
184 		    len, &done, p, NULL);
185 		len -= done;
186 	} while (error == 0 && len > 0 && done > 0);
187 	microtime(&t1);
188 	if (error == EPIPE)
189 		error = 0;
190 	if (error != 0)
191 		return error;
192 	timersub(&t1, &t0, &kio->kio_elapsed);
193 
194 	kio->kio_bytesdone = kio->kio_totalsize - len;
195 
196 	return 0;
197 }
198 
199 #define SBLOCKWAIT(f)   (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
200 
201 /*
202  * Slightly changed version of sosend()
203  */
204 static int
205 kttcp_sosend(struct socket *so, unsigned long long slen,
206 	     unsigned long long *done, struct proc *p, int flags)
207 {
208 	struct mbuf **mp, *m, *top;
209 	long space, len, mlen;
210 	int error, s, dontroute, atomic;
211 	long long resid;
212 
213 	atomic = sosendallatonce(so);
214 	resid = slen;
215 	top = NULL;
216 	/*
217 	 * In theory resid should be unsigned.
218 	 * However, space must be signed, as it might be less than 0
219 	 * if we over-committed, and we must use a signed comparison
220 	 * of space and resid.  On the other hand, a negative resid
221 	 * causes us to loop sending 0-length segments to the protocol.
222 	 */
223 	if (resid < 0) {
224 		error = EINVAL;
225 		goto out;
226 	}
227 	dontroute =
228 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
229 	    (so->so_proto->pr_flags & PR_ATOMIC);
230 	p->p_stats->p_ru.ru_msgsnd++;
231 #define	snderr(errno)	{ error = errno; splx(s); goto release; }
232 
233  restart:
234 	if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
235 		goto out;
236 	do {
237 		s = splsoftnet();
238 		if (so->so_state & SS_CANTSENDMORE)
239 			snderr(EPIPE);
240 		if (so->so_error) {
241 			error = so->so_error;
242 			so->so_error = 0;
243 			splx(s);
244 			goto release;
245 		}
246 		if ((so->so_state & SS_ISCONNECTED) == 0) {
247 			if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
248 				if ((so->so_state & SS_ISCONFIRMING) == 0)
249 					snderr(ENOTCONN);
250 			} else
251 				snderr(EDESTADDRREQ);
252 		}
253 		space = sbspace(&so->so_snd);
254 		if (flags & MSG_OOB)
255 			space += 1024;
256 		if ((atomic && resid > so->so_snd.sb_hiwat))
257 			snderr(EMSGSIZE);
258 		if (space < resid && (atomic || space < so->so_snd.sb_lowat)) {
259 			if (so->so_state & SS_NBIO)
260 				snderr(EWOULDBLOCK);
261 			SBLASTRECORDCHK(&so->so_rcv,
262 			    "kttcp_soreceive sbwait 1");
263 			SBLASTMBUFCHK(&so->so_rcv,
264 			    "kttcp_soreceive sbwait 1");
265 			sbunlock(&so->so_snd);
266 			error = sbwait(&so->so_snd);
267 			splx(s);
268 			if (error)
269 				goto out;
270 			goto restart;
271 		}
272 		splx(s);
273 		mp = &top;
274 		do {
275 			do {
276 				if (top == 0) {
277 					MGETHDR(m, M_WAIT, MT_DATA);
278 					mlen = MHLEN;
279 					m->m_pkthdr.len = 0;
280 					m->m_pkthdr.rcvif = (struct ifnet *)0;
281 				} else {
282 					MGET(m, M_WAIT, MT_DATA);
283 					mlen = MLEN;
284 				}
285 				if (resid >= MINCLSIZE && space >= MCLBYTES) {
286 					MCLGET(m, M_WAIT);
287 					if ((m->m_flags & M_EXT) == 0)
288 						goto nopages;
289 					mlen = MCLBYTES;
290 #ifdef	MAPPED_MBUFS
291 					len = lmin(MCLBYTES, resid);
292 #else
293 					if (atomic && top == 0) {
294 						len = lmin(MCLBYTES - max_hdr,
295 						    resid);
296 						m->m_data += max_hdr;
297 					} else
298 						len = lmin(MCLBYTES, resid);
299 #endif
300 					space -= len;
301 				} else {
302 nopages:
303 					len = lmin(lmin(mlen, resid), space);
304 					space -= len;
305 					/*
306 					 * For datagram protocols, leave room
307 					 * for protocol headers in first mbuf.
308 					 */
309 					if (atomic && top == 0 && len < mlen)
310 						MH_ALIGN(m, len);
311 				}
312 				resid -= len;
313 				m->m_len = len;
314 				*mp = m;
315 				top->m_pkthdr.len += len;
316 				if (error)
317 					goto release;
318 				mp = &m->m_next;
319 				if (resid <= 0) {
320 					if (flags & MSG_EOR)
321 						top->m_flags |= M_EOR;
322 					break;
323 				}
324 			} while (space > 0 && atomic);
325 
326 			s = splsoftnet();
327 
328 			if (so->so_state & SS_CANTSENDMORE)
329 				snderr(EPIPE);
330 
331 			if (dontroute)
332 				so->so_options |= SO_DONTROUTE;
333 			if (resid > 0)
334 				so->so_state |= SS_MORETOCOME;
335 			error = (*so->so_proto->pr_usrreq)(so,
336 			    (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
337 			    top, NULL, NULL, p);
338 			if (dontroute)
339 				so->so_options &= ~SO_DONTROUTE;
340 			if (resid > 0)
341 				so->so_state &= ~SS_MORETOCOME;
342 			splx(s);
343 
344 			top = 0;
345 			mp = &top;
346 			if (error)
347 				goto release;
348 		} while (resid && space > 0);
349 	} while (resid);
350 
351  release:
352 	sbunlock(&so->so_snd);
353  out:
354 	if (top)
355 		m_freem(top);
356 	*done = slen - resid;
357 #if 0
358 	printf("sosend: error %d slen %llu resid %lld\n", error, slen, resid);
359 #endif
360 	return (error);
361 }
362 
363 static int
364 kttcp_soreceive(struct socket *so, unsigned long long slen,
365 		unsigned long long *done, struct proc *p, int *flagsp)
366 {
367 	struct mbuf *m, **mp;
368 	int flags, len, error, s, offset, moff, type;
369 	long long orig_resid, resid;
370 	struct protosw	*pr;
371 	struct mbuf *nextrecord;
372 
373 	pr = so->so_proto;
374 	mp = NULL;
375 	type = 0;
376 	resid = orig_resid = slen;
377 	if (flagsp)
378 		flags = *flagsp &~ MSG_EOR;
379 	else
380  		flags = 0;
381 	if (flags & MSG_OOB) {
382 		m = m_get(M_WAIT, MT_DATA);
383 		error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,
384 		    (struct mbuf *)(long)(flags & MSG_PEEK), (struct mbuf *)0,
385 		    (struct proc *)0);
386 		if (error)
387 			goto bad;
388 		do {
389 			resid -= min(resid, m->m_len);
390 			m = m_free(m);
391 		} while (resid && error == 0 && m);
392  bad:
393 		if (m)
394 			m_freem(m);
395 		return (error);
396 	}
397 	if (mp)
398 		*mp = (struct mbuf *)0;
399 	if (so->so_state & SS_ISCONFIRMING && resid)
400 		(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
401 		    (struct mbuf *)0, (struct mbuf *)0, (struct proc *)0);
402 
403  restart:
404 	if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0)
405 		return (error);
406 	s = splsoftnet();
407 
408 	m = so->so_rcv.sb_mb;
409 	/*
410 	 * If we have less data than requested, block awaiting more
411 	 * (subject to any timeout) if:
412 	 *   1. the current count is less than the low water mark,
413 	 *   2. MSG_WAITALL is set, and it is possible to do the entire
414 	 *	receive operation at once if we block (resid <= hiwat), or
415 	 *   3. MSG_DONTWAIT is not set.
416 	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
417 	 * we have to do the receive in sections, and thus risk returning
418 	 * a short count if a timeout or signal occurs after we start.
419 	 */
420 	if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
421 	    so->so_rcv.sb_cc < resid) &&
422 	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
423 	    ((flags & MSG_WAITALL) && resid <= so->so_rcv.sb_hiwat)) &&
424 	    m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
425 #ifdef DIAGNOSTIC
426 		if (m == 0 && so->so_rcv.sb_cc)
427 			panic("receive 1");
428 #endif
429 		if (so->so_error) {
430 			if (m)
431 				goto dontblock;
432 			error = so->so_error;
433 			if ((flags & MSG_PEEK) == 0)
434 				so->so_error = 0;
435 			goto release;
436 		}
437 		if (so->so_state & SS_CANTRCVMORE) {
438 			if (m)
439 				goto dontblock;
440 			else
441 				goto release;
442 		}
443 		for (; m; m = m->m_next)
444 			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
445 				m = so->so_rcv.sb_mb;
446 				goto dontblock;
447 			}
448 		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
449 		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
450 			error = ENOTCONN;
451 			goto release;
452 		}
453 		if (resid == 0)
454 			goto release;
455 		if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
456 			error = EWOULDBLOCK;
457 			goto release;
458 		}
459 		sbunlock(&so->so_rcv);
460 		error = sbwait(&so->so_rcv);
461 		splx(s);
462 		if (error)
463 			return (error);
464 		goto restart;
465 	}
466  dontblock:
467 	/*
468 	 * On entry here, m points to the first record of the socket buffer.
469 	 * While we process the initial mbufs containing address and control
470 	 * info, we save a copy of m->m_nextpkt into nextrecord.
471 	 */
472 #ifdef notyet /* XXXX */
473 	if (uio->uio_procp)
474 		uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
475 #endif
476 	KASSERT(m == so->so_rcv.sb_mb);
477 	SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 1");
478 	SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 1");
479 	nextrecord = m->m_nextpkt;
480 	if (pr->pr_flags & PR_ADDR) {
481 #ifdef DIAGNOSTIC
482 		if (m->m_type != MT_SONAME)
483 			panic("receive 1a");
484 #endif
485 		orig_resid = 0;
486 		if (flags & MSG_PEEK) {
487 			m = m->m_next;
488 		} else {
489 			sbfree(&so->so_rcv, m);
490 			MFREE(m, so->so_rcv.sb_mb);
491 			m = so->so_rcv.sb_mb;
492 		}
493 	}
494 	while (m && m->m_type == MT_CONTROL && error == 0) {
495 		if (flags & MSG_PEEK) {
496 			m = m->m_next;
497 		} else {
498 			sbfree(&so->so_rcv, m);
499 			MFREE(m, so->so_rcv.sb_mb);
500 			m = so->so_rcv.sb_mb;
501 		}
502 	}
503 
504 	/*
505 	 * If m is non-NULL, we have some data to read.  From now on,
506 	 * make sure to keep sb_lastrecord consistent when working on
507 	 * the last packet on the chain (nextrecord == NULL) and we
508 	 * change m->m_nextpkt.
509 	 */
510 	if (m) {
511 		if ((flags & MSG_PEEK) == 0) {
512 			m->m_nextpkt = nextrecord;
513 			/*
514 			 * If nextrecord == NULL (this is a single chain),
515 			 * then sb_lastrecord may not be valid here if m
516 			 * was changed earlier.
517 			 */
518 			if (nextrecord == NULL) {
519 				KASSERT(so->so_rcv.sb_mb == m);
520 				so->so_rcv.sb_lastrecord = m;
521 			}
522 		}
523 		type = m->m_type;
524 		if (type == MT_OOBDATA)
525 			flags |= MSG_OOB;
526 	} else {
527 		if ((flags & MSG_PEEK) == 0) {
528 			KASSERT(so->so_rcv.sb_mb == m);
529 			so->so_rcv.sb_mb = nextrecord;
530 			SB_EMPTY_FIXUP(&so->so_rcv);
531 		}
532 	}
533 	SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 2");
534 	SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 2");
535 
536 	moff = 0;
537 	offset = 0;
538 	while (m && resid > 0 && error == 0) {
539 		if (m->m_type == MT_OOBDATA) {
540 			if (type != MT_OOBDATA)
541 				break;
542 		} else if (type == MT_OOBDATA)
543 			break;
544 #ifdef DIAGNOSTIC
545 		else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
546 			panic("receive 3");
547 #endif
548 		so->so_state &= ~SS_RCVATMARK;
549 		len = resid;
550 		if (so->so_oobmark && len > so->so_oobmark - offset)
551 			len = so->so_oobmark - offset;
552 		if (len > m->m_len - moff)
553 			len = m->m_len - moff;
554 		/*
555 		 * If mp is set, just pass back the mbufs.
556 		 * Otherwise copy them out via the uio, then free.
557 		 * Sockbuf must be consistent here (points to current mbuf,
558 		 * it points to next record) when we drop priority;
559 		 * we must note any additions to the sockbuf when we
560 		 * block interrupts again.
561 		 */
562 		resid -= len;
563 		if (len == m->m_len - moff) {
564 			if (m->m_flags & M_EOR)
565 				flags |= MSG_EOR;
566 			if (flags & MSG_PEEK) {
567 				m = m->m_next;
568 				moff = 0;
569 			} else {
570 				nextrecord = m->m_nextpkt;
571 				sbfree(&so->so_rcv, m);
572 				if (mp) {
573 					*mp = m;
574 					mp = &m->m_next;
575 					so->so_rcv.sb_mb = m = m->m_next;
576 					*mp = (struct mbuf *)0;
577 				} else {
578 					MFREE(m, so->so_rcv.sb_mb);
579 					m = so->so_rcv.sb_mb;
580 				}
581 				/*
582 				 * If m != NULL, we also know that
583 				 * so->so_rcv.sb_mb != NULL.
584 				 */
585 				KASSERT(so->so_rcv.sb_mb == m);
586 				if (m) {
587 					m->m_nextpkt = nextrecord;
588 					if (nextrecord == NULL)
589 						so->so_rcv.sb_lastrecord = m;
590 				} else {
591 					so->so_rcv.sb_mb = nextrecord;
592 					SB_EMPTY_FIXUP(&so->so_rcv);
593 				}
594 				SBLASTRECORDCHK(&so->so_rcv,
595 				    "kttcp_soreceive 3");
596 				SBLASTMBUFCHK(&so->so_rcv,
597 				    "kttcp_soreceive 3");
598 			}
599 		} else {
600 			if (flags & MSG_PEEK)
601 				moff += len;
602 			else {
603 				if (mp)
604 					*mp = m_copym(m, 0, len, M_WAIT);
605 				m->m_data += len;
606 				m->m_len -= len;
607 				so->so_rcv.sb_cc -= len;
608 			}
609 		}
610 		if (so->so_oobmark) {
611 			if ((flags & MSG_PEEK) == 0) {
612 				so->so_oobmark -= len;
613 				if (so->so_oobmark == 0) {
614 					so->so_state |= SS_RCVATMARK;
615 					break;
616 				}
617 			} else {
618 				offset += len;
619 				if (offset == so->so_oobmark)
620 					break;
621 			}
622 		}
623 		if (flags & MSG_EOR)
624 			break;
625 		/*
626 		 * If the MSG_WAITALL flag is set (for non-atomic socket),
627 		 * we must not quit until "uio->uio_resid == 0" or an error
628 		 * termination.  If a signal/timeout occurs, return
629 		 * with a short count but without error.
630 		 * Keep sockbuf locked against other readers.
631 		 */
632 		while (flags & MSG_WAITALL && m == 0 && resid > 0 &&
633 		    !sosendallatonce(so) && !nextrecord) {
634 			if (so->so_error || so->so_state & SS_CANTRCVMORE)
635 				break;
636 			/*
637 			 * If we are peeking and the socket receive buffer is
638 			 * full, stop since we can't get more data to peek at.
639 			 */
640 			if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0)
641 				break;
642 			/*
643 			 * If we've drained the socket buffer, tell the
644 			 * protocol in case it needs to do something to
645 			 * get it filled again.
646 			 */
647 			if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
648 				(*pr->pr_usrreq)(so, PRU_RCVD,
649 				    (struct mbuf *)0,
650 				    (struct mbuf *)(long)flags,
651 				    (struct mbuf *)0,
652 				    (struct proc *)0);
653 			SBLASTRECORDCHK(&so->so_rcv,
654 			    "kttcp_soreceive sbwait 2");
655 			SBLASTMBUFCHK(&so->so_rcv,
656 			    "kttcp_soreceive sbwait 2");
657 			error = sbwait(&so->so_rcv);
658 			if (error) {
659 				sbunlock(&so->so_rcv);
660 				splx(s);
661 				return (0);
662 			}
663 			if ((m = so->so_rcv.sb_mb) != NULL)
664 				nextrecord = m->m_nextpkt;
665 		}
666 	}
667 
668 	if (m && pr->pr_flags & PR_ATOMIC) {
669 		flags |= MSG_TRUNC;
670 		if ((flags & MSG_PEEK) == 0)
671 			(void) sbdroprecord(&so->so_rcv);
672 	}
673 	if ((flags & MSG_PEEK) == 0) {
674 		if (m == 0) {
675 			/*
676 			 * First part is an SB_EMPTY_FIXUP().  Second part
677 			 * makes sure sb_lastrecord is up-to-date if
678 			 * there is still data in the socket buffer.
679 			 */
680 			so->so_rcv.sb_mb = nextrecord;
681 			if (so->so_rcv.sb_mb == NULL) {
682 				so->so_rcv.sb_mbtail = NULL;
683 				so->so_rcv.sb_lastrecord = NULL;
684 			} else if (nextrecord->m_nextpkt == NULL)
685 				so->so_rcv.sb_lastrecord = nextrecord;
686 		}
687 		SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 4");
688 		SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 4");
689 		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
690 			(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
691 			    (struct mbuf *)(long)flags, (struct mbuf *)0,
692 			    (struct proc *)0);
693 	}
694 	if (orig_resid == resid && orig_resid &&
695 	    (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
696 		sbunlock(&so->so_rcv);
697 		splx(s);
698 		goto restart;
699 	}
700 
701 	if (flagsp)
702 		*flagsp |= flags;
703  release:
704 	sbunlock(&so->so_rcv);
705 	splx(s);
706 	*done = slen - resid;
707 #if 0
708 	printf("soreceive: error %d slen %llu resid %lld\n", error, slen, resid);
709 #endif
710 	return (error);
711 }
712