1 /* $NetBSD: kttcp.c,v 1.25 2008/02/06 21:57:54 ad Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Frank van der Linden and Jason R. Thorpe for 8 * Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed for the NetBSD Project by 21 * Wasabi Systems, Inc. 22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 23 * or promote products derived from this software without specific prior 24 * written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * kttcp.c -- provides kernel support for testing network testing, 41 * see kttcp(4) 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: kttcp.c,v 1.25 2008/02/06 21:57:54 ad Exp $"); 46 47 #include <sys/param.h> 48 #include <sys/types.h> 49 #include <sys/ioctl.h> 50 #include <sys/file.h> 51 #include <sys/filedesc.h> 52 #include <sys/conf.h> 53 #include <sys/systm.h> 54 #include <sys/protosw.h> 55 #include <sys/proc.h> 56 #include <sys/resourcevar.h> 57 #include <sys/signal.h> 58 #include <sys/socketvar.h> 59 #include <sys/socket.h> 60 #include <sys/mbuf.h> 61 #include <sys/mount.h> 62 #include <sys/syscallargs.h> 63 64 #include <dev/kttcpio.h> 65 66 static int kttcp_send(struct lwp *l, struct kttcp_io_args *); 67 static int kttcp_recv(struct lwp *l, struct kttcp_io_args *); 68 static int kttcp_sosend(struct socket *, unsigned long long, 69 unsigned long long *, struct lwp *, int); 70 static int kttcp_soreceive(struct socket *, unsigned long long, 71 unsigned long long *, struct lwp *, int *); 72 73 void kttcpattach(int); 74 75 dev_type_ioctl(kttcpioctl); 76 77 const struct cdevsw kttcp_cdevsw = { 78 nullopen, nullclose, noread, nowrite, kttcpioctl, 79 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER 80 }; 81 82 void 83 kttcpattach(int count) 84 { 85 /* Do nothing. */ 86 } 87 88 int 89 kttcpioctl(dev_t dev, u_long cmd, void *data, int flag, 90 struct lwp *l) 91 { 92 int error; 93 94 if ((flag & FWRITE) == 0) 95 return EPERM; 96 97 switch (cmd) { 98 case KTTCP_IO_SEND: 99 error = kttcp_send(l, (struct kttcp_io_args *) data); 100 break; 101 102 case KTTCP_IO_RECV: 103 error = kttcp_recv(l, (struct kttcp_io_args *) data); 104 break; 105 106 default: 107 return EINVAL; 108 } 109 110 return error; 111 } 112 113 static int 114 kttcp_send(struct lwp *l, struct kttcp_io_args *kio) 115 { 116 struct file *fp; 117 int error; 118 struct timeval t0, t1; 119 unsigned long long len, done; 120 121 if (kio->kio_totalsize >= KTTCP_MAX_XMIT) 122 return EINVAL; 123 124 fp = fd_getfile(l->l_proc->p_fd, kio->kio_socket); 125 if (fp == NULL) 126 return EBADF; 127 FILE_USE(fp); 128 if (fp->f_type != DTYPE_SOCKET) { 129 FILE_UNUSE(fp, l); 130 return EFTYPE; 131 } 132 133 len = kio->kio_totalsize; 134 microtime(&t0); 135 do { 136 error = kttcp_sosend((struct socket *)fp->f_data, len, 137 &done, l, 0); 138 len -= done; 139 } while (error == 0 && len > 0); 140 141 FILE_UNUSE(fp, l); 142 143 microtime(&t1); 144 if (error != 0) 145 return error; 146 timersub(&t1, &t0, &kio->kio_elapsed); 147 148 kio->kio_bytesdone = kio->kio_totalsize - len; 149 150 return 0; 151 } 152 153 static int 154 kttcp_recv(struct lwp *l, struct kttcp_io_args *kio) 155 { 156 struct file *fp; 157 int error; 158 struct timeval t0, t1; 159 unsigned long long len, done; 160 161 done = 0; /* XXX gcc */ 162 163 if (kio->kio_totalsize > KTTCP_MAX_XMIT) 164 return EINVAL; 165 166 fp = fd_getfile(l->l_proc->p_fd, kio->kio_socket); 167 if (fp == NULL) 168 return EBADF; 169 FILE_USE(fp); 170 if (fp->f_type != DTYPE_SOCKET) { 171 FILE_UNUSE(fp, l); 172 return EBADF; 173 } 174 len = kio->kio_totalsize; 175 microtime(&t0); 176 do { 177 error = kttcp_soreceive((struct socket *)fp->f_data, 178 len, &done, l, NULL); 179 len -= done; 180 } while (error == 0 && len > 0 && done > 0); 181 182 FILE_UNUSE(fp, l); 183 184 microtime(&t1); 185 if (error == EPIPE) 186 error = 0; 187 if (error != 0) 188 return error; 189 timersub(&t1, &t0, &kio->kio_elapsed); 190 191 kio->kio_bytesdone = kio->kio_totalsize - len; 192 193 return 0; 194 } 195 196 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 197 198 /* 199 * Slightly changed version of sosend() 200 */ 201 static int 202 kttcp_sosend(struct socket *so, unsigned long long slen, 203 unsigned long long *done, struct lwp *l, int flags) 204 { 205 struct mbuf **mp, *m, *top; 206 long space, len, mlen; 207 int error, s, dontroute, atomic; 208 long long resid; 209 210 atomic = sosendallatonce(so); 211 resid = slen; 212 top = NULL; 213 /* 214 * In theory resid should be unsigned. 215 * However, space must be signed, as it might be less than 0 216 * if we over-committed, and we must use a signed comparison 217 * of space and resid. On the other hand, a negative resid 218 * causes us to loop sending 0-length segments to the protocol. 219 */ 220 if (resid < 0) { 221 error = EINVAL; 222 goto out; 223 } 224 dontroute = 225 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 226 (so->so_proto->pr_flags & PR_ATOMIC); 227 /* WRS XXX - are we doing per-lwp or per-proc stats? */ 228 l->l_proc->p_stats->p_ru.ru_msgsnd++; 229 #define snderr(errno) { error = errno; splx(s); goto release; } 230 231 restart: 232 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0) 233 goto out; 234 do { 235 s = splsoftnet(); 236 if (so->so_state & SS_CANTSENDMORE) 237 snderr(EPIPE); 238 if (so->so_error) { 239 error = so->so_error; 240 so->so_error = 0; 241 splx(s); 242 goto release; 243 } 244 if ((so->so_state & SS_ISCONNECTED) == 0) { 245 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 246 if ((so->so_state & SS_ISCONFIRMING) == 0) 247 snderr(ENOTCONN); 248 } else 249 snderr(EDESTADDRREQ); 250 } 251 space = sbspace(&so->so_snd); 252 if (flags & MSG_OOB) 253 space += 1024; 254 if ((atomic && resid > so->so_snd.sb_hiwat)) 255 snderr(EMSGSIZE); 256 if (space < resid && (atomic || space < so->so_snd.sb_lowat)) { 257 if (so->so_nbio) 258 snderr(EWOULDBLOCK); 259 SBLASTRECORDCHK(&so->so_rcv, 260 "kttcp_soreceive sbwait 1"); 261 SBLASTMBUFCHK(&so->so_rcv, 262 "kttcp_soreceive sbwait 1"); 263 sbunlock(&so->so_snd); 264 error = sbwait(&so->so_snd); 265 splx(s); 266 if (error) 267 goto out; 268 goto restart; 269 } 270 splx(s); 271 mp = ⊤ 272 do { 273 do { 274 if (top == 0) { 275 m = m_gethdr(M_WAIT, MT_DATA); 276 mlen = MHLEN; 277 m->m_pkthdr.len = 0; 278 m->m_pkthdr.rcvif = NULL; 279 } else { 280 m = m_get(M_WAIT, MT_DATA); 281 mlen = MLEN; 282 } 283 if (resid >= MINCLSIZE && space >= MCLBYTES) { 284 m_clget(m, M_WAIT); 285 if ((m->m_flags & M_EXT) == 0) 286 goto nopages; 287 mlen = MCLBYTES; 288 #ifdef MAPPED_MBUFS 289 len = lmin(MCLBYTES, resid); 290 #else 291 if (atomic && top == 0) { 292 len = lmin(MCLBYTES - max_hdr, 293 resid); 294 m->m_data += max_hdr; 295 } else 296 len = lmin(MCLBYTES, resid); 297 #endif 298 space -= len; 299 } else { 300 nopages: 301 len = lmin(lmin(mlen, resid), space); 302 space -= len; 303 /* 304 * For datagram protocols, leave room 305 * for protocol headers in first mbuf. 306 */ 307 if (atomic && top == 0 && len < mlen) 308 MH_ALIGN(m, len); 309 } 310 resid -= len; 311 m->m_len = len; 312 *mp = m; 313 top->m_pkthdr.len += len; 314 if (error) 315 goto release; 316 mp = &m->m_next; 317 if (resid <= 0) { 318 if (flags & MSG_EOR) 319 top->m_flags |= M_EOR; 320 break; 321 } 322 } while (space > 0 && atomic); 323 324 s = splsoftnet(); 325 326 if (so->so_state & SS_CANTSENDMORE) 327 snderr(EPIPE); 328 329 if (dontroute) 330 so->so_options |= SO_DONTROUTE; 331 if (resid > 0) 332 so->so_state |= SS_MORETOCOME; 333 error = (*so->so_proto->pr_usrreq)(so, 334 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 335 top, NULL, NULL, l); 336 if (dontroute) 337 so->so_options &= ~SO_DONTROUTE; 338 if (resid > 0) 339 so->so_state &= ~SS_MORETOCOME; 340 splx(s); 341 342 top = 0; 343 mp = ⊤ 344 if (error) 345 goto release; 346 } while (resid && space > 0); 347 } while (resid); 348 349 release: 350 sbunlock(&so->so_snd); 351 out: 352 if (top) 353 m_freem(top); 354 *done = slen - resid; 355 #if 0 356 printf("sosend: error %d slen %llu resid %lld\n", error, slen, resid); 357 #endif 358 return (error); 359 } 360 361 static int 362 kttcp_soreceive(struct socket *so, unsigned long long slen, 363 unsigned long long *done, struct lwp *l, int *flagsp) 364 { 365 struct mbuf *m, **mp; 366 int flags, len, error, s, offset, moff, type; 367 long long orig_resid, resid; 368 const struct protosw *pr; 369 struct mbuf *nextrecord; 370 371 pr = so->so_proto; 372 mp = NULL; 373 type = 0; 374 resid = orig_resid = slen; 375 if (flagsp) 376 flags = *flagsp &~ MSG_EOR; 377 else 378 flags = 0; 379 if (flags & MSG_OOB) { 380 m = m_get(M_WAIT, MT_DATA); 381 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, 382 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, NULL); 383 if (error) 384 goto bad; 385 do { 386 resid -= min(resid, m->m_len); 387 m = m_free(m); 388 } while (resid && error == 0 && m); 389 bad: 390 if (m) 391 m_freem(m); 392 return (error); 393 } 394 if (mp) 395 *mp = NULL; 396 if (so->so_state & SS_ISCONFIRMING && resid) 397 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, NULL); 398 399 restart: 400 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) 401 return (error); 402 s = splsoftnet(); 403 404 m = so->so_rcv.sb_mb; 405 /* 406 * If we have less data than requested, block awaiting more 407 * (subject to any timeout) if: 408 * 1. the current count is less than the low water mark, 409 * 2. MSG_WAITALL is set, and it is possible to do the entire 410 * receive operation at once if we block (resid <= hiwat), or 411 * 3. MSG_DONTWAIT is not set. 412 * If MSG_WAITALL is set but resid is larger than the receive buffer, 413 * we have to do the receive in sections, and thus risk returning 414 * a short count if a timeout or signal occurs after we start. 415 */ 416 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 417 so->so_rcv.sb_cc < resid) && 418 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 419 ((flags & MSG_WAITALL) && resid <= so->so_rcv.sb_hiwat)) && 420 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 421 #ifdef DIAGNOSTIC 422 if (m == NULL && so->so_rcv.sb_cc) 423 panic("receive 1"); 424 #endif 425 if (so->so_error) { 426 if (m) 427 goto dontblock; 428 error = so->so_error; 429 if ((flags & MSG_PEEK) == 0) 430 so->so_error = 0; 431 goto release; 432 } 433 if (so->so_state & SS_CANTRCVMORE) { 434 if (m) 435 goto dontblock; 436 else 437 goto release; 438 } 439 for (; m; m = m->m_next) 440 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 441 m = so->so_rcv.sb_mb; 442 goto dontblock; 443 } 444 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 445 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 446 error = ENOTCONN; 447 goto release; 448 } 449 if (resid == 0) 450 goto release; 451 if (so->so_nbio || (flags & MSG_DONTWAIT)) { 452 error = EWOULDBLOCK; 453 goto release; 454 } 455 sbunlock(&so->so_rcv); 456 error = sbwait(&so->so_rcv); 457 splx(s); 458 if (error) 459 return (error); 460 goto restart; 461 } 462 dontblock: 463 /* 464 * On entry here, m points to the first record of the socket buffer. 465 * While we process the initial mbufs containing address and control 466 * info, we save a copy of m->m_nextpkt into nextrecord. 467 */ 468 #ifdef notyet /* XXXX */ 469 if (uio->uio_lwp) 470 uio->uio_lwp->l_proc->p_stats->p_ru.ru_msgrcv++; 471 #endif 472 KASSERT(m == so->so_rcv.sb_mb); 473 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 1"); 474 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 1"); 475 nextrecord = m->m_nextpkt; 476 if (pr->pr_flags & PR_ADDR) { 477 #ifdef DIAGNOSTIC 478 if (m->m_type != MT_SONAME) 479 panic("receive 1a"); 480 #endif 481 orig_resid = 0; 482 if (flags & MSG_PEEK) { 483 m = m->m_next; 484 } else { 485 sbfree(&so->so_rcv, m); 486 MFREE(m, so->so_rcv.sb_mb); 487 m = so->so_rcv.sb_mb; 488 } 489 } 490 while (m && m->m_type == MT_CONTROL && error == 0) { 491 if (flags & MSG_PEEK) { 492 m = m->m_next; 493 } else { 494 sbfree(&so->so_rcv, m); 495 MFREE(m, so->so_rcv.sb_mb); 496 m = so->so_rcv.sb_mb; 497 } 498 } 499 500 /* 501 * If m is non-NULL, we have some data to read. From now on, 502 * make sure to keep sb_lastrecord consistent when working on 503 * the last packet on the chain (nextrecord == NULL) and we 504 * change m->m_nextpkt. 505 */ 506 if (m) { 507 if ((flags & MSG_PEEK) == 0) { 508 m->m_nextpkt = nextrecord; 509 /* 510 * If nextrecord == NULL (this is a single chain), 511 * then sb_lastrecord may not be valid here if m 512 * was changed earlier. 513 */ 514 if (nextrecord == NULL) { 515 KASSERT(so->so_rcv.sb_mb == m); 516 so->so_rcv.sb_lastrecord = m; 517 } 518 } 519 type = m->m_type; 520 if (type == MT_OOBDATA) 521 flags |= MSG_OOB; 522 } else { 523 if ((flags & MSG_PEEK) == 0) { 524 KASSERT(so->so_rcv.sb_mb == m); 525 so->so_rcv.sb_mb = nextrecord; 526 SB_EMPTY_FIXUP(&so->so_rcv); 527 } 528 } 529 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 2"); 530 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 2"); 531 532 moff = 0; 533 offset = 0; 534 while (m && resid > 0 && error == 0) { 535 if (m->m_type == MT_OOBDATA) { 536 if (type != MT_OOBDATA) 537 break; 538 } else if (type == MT_OOBDATA) 539 break; 540 #ifdef DIAGNOSTIC 541 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 542 panic("receive 3"); 543 #endif 544 so->so_state &= ~SS_RCVATMARK; 545 len = resid; 546 if (so->so_oobmark && len > so->so_oobmark - offset) 547 len = so->so_oobmark - offset; 548 if (len > m->m_len - moff) 549 len = m->m_len - moff; 550 /* 551 * If mp is set, just pass back the mbufs. 552 * Otherwise copy them out via the uio, then free. 553 * Sockbuf must be consistent here (points to current mbuf, 554 * it points to next record) when we drop priority; 555 * we must note any additions to the sockbuf when we 556 * block interrupts again. 557 */ 558 resid -= len; 559 if (len == m->m_len - moff) { 560 if (m->m_flags & M_EOR) 561 flags |= MSG_EOR; 562 if (flags & MSG_PEEK) { 563 m = m->m_next; 564 moff = 0; 565 } else { 566 nextrecord = m->m_nextpkt; 567 sbfree(&so->so_rcv, m); 568 if (mp) { 569 *mp = m; 570 mp = &m->m_next; 571 so->so_rcv.sb_mb = m = m->m_next; 572 *mp = NULL; 573 } else { 574 MFREE(m, so->so_rcv.sb_mb); 575 m = so->so_rcv.sb_mb; 576 } 577 /* 578 * If m != NULL, we also know that 579 * so->so_rcv.sb_mb != NULL. 580 */ 581 KASSERT(so->so_rcv.sb_mb == m); 582 if (m) { 583 m->m_nextpkt = nextrecord; 584 if (nextrecord == NULL) 585 so->so_rcv.sb_lastrecord = m; 586 } else { 587 so->so_rcv.sb_mb = nextrecord; 588 SB_EMPTY_FIXUP(&so->so_rcv); 589 } 590 SBLASTRECORDCHK(&so->so_rcv, 591 "kttcp_soreceive 3"); 592 SBLASTMBUFCHK(&so->so_rcv, 593 "kttcp_soreceive 3"); 594 } 595 } else { 596 if (flags & MSG_PEEK) 597 moff += len; 598 else { 599 if (mp) 600 *mp = m_copym(m, 0, len, M_WAIT); 601 m->m_data += len; 602 m->m_len -= len; 603 so->so_rcv.sb_cc -= len; 604 } 605 } 606 if (so->so_oobmark) { 607 if ((flags & MSG_PEEK) == 0) { 608 so->so_oobmark -= len; 609 if (so->so_oobmark == 0) { 610 so->so_state |= SS_RCVATMARK; 611 break; 612 } 613 } else { 614 offset += len; 615 if (offset == so->so_oobmark) 616 break; 617 } 618 } 619 if (flags & MSG_EOR) 620 break; 621 /* 622 * If the MSG_WAITALL flag is set (for non-atomic socket), 623 * we must not quit until "uio->uio_resid == 0" or an error 624 * termination. If a signal/timeout occurs, return 625 * with a short count but without error. 626 * Keep sockbuf locked against other readers. 627 */ 628 while (flags & MSG_WAITALL && m == NULL && resid > 0 && 629 !sosendallatonce(so) && !nextrecord) { 630 if (so->so_error || so->so_state & SS_CANTRCVMORE) 631 break; 632 /* 633 * If we are peeking and the socket receive buffer is 634 * full, stop since we can't get more data to peek at. 635 */ 636 if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0) 637 break; 638 /* 639 * If we've drained the socket buffer, tell the 640 * protocol in case it needs to do something to 641 * get it filled again. 642 */ 643 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 644 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, 645 (struct mbuf *)(long)flags, NULL, NULL); 646 SBLASTRECORDCHK(&so->so_rcv, 647 "kttcp_soreceive sbwait 2"); 648 SBLASTMBUFCHK(&so->so_rcv, 649 "kttcp_soreceive sbwait 2"); 650 error = sbwait(&so->so_rcv); 651 if (error) { 652 sbunlock(&so->so_rcv); 653 splx(s); 654 return (0); 655 } 656 if ((m = so->so_rcv.sb_mb) != NULL) 657 nextrecord = m->m_nextpkt; 658 } 659 } 660 661 if (m && pr->pr_flags & PR_ATOMIC) { 662 flags |= MSG_TRUNC; 663 if ((flags & MSG_PEEK) == 0) 664 (void) sbdroprecord(&so->so_rcv); 665 } 666 if ((flags & MSG_PEEK) == 0) { 667 if (m == NULL) { 668 /* 669 * First part is an SB_EMPTY_FIXUP(). Second part 670 * makes sure sb_lastrecord is up-to-date if 671 * there is still data in the socket buffer. 672 */ 673 so->so_rcv.sb_mb = nextrecord; 674 if (so->so_rcv.sb_mb == NULL) { 675 so->so_rcv.sb_mbtail = NULL; 676 so->so_rcv.sb_lastrecord = NULL; 677 } else if (nextrecord->m_nextpkt == NULL) 678 so->so_rcv.sb_lastrecord = nextrecord; 679 } 680 SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 4"); 681 SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 4"); 682 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 683 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, 684 (struct mbuf *)(long)flags, NULL, NULL); 685 } 686 if (orig_resid == resid && orig_resid && 687 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 688 sbunlock(&so->so_rcv); 689 splx(s); 690 goto restart; 691 } 692 693 if (flagsp) 694 *flagsp |= flags; 695 release: 696 sbunlock(&so->so_rcv); 697 splx(s); 698 *done = slen - resid; 699 #if 0 700 printf("soreceive: error %d slen %llu resid %lld\n", error, slen, resid); 701 #endif 702 return (error); 703 } 704