1 /* 2 * Copyright (c) 1982, 1986, 1989, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * sendfile(2) and related extensions: 6 * Copyright (c) 1998, David Greenman. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94 37 * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $ 38 */ 39 40 #include "opt_ktrace.h" 41 #include "opt_sctp.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/sysproto.h> 47 #include <sys/malloc.h> 48 #include <sys/filedesc.h> 49 #include <sys/event.h> 50 #include <sys/proc.h> 51 #include <sys/fcntl.h> 52 #include <sys/file.h> 53 #include <sys/filio.h> 54 #include <sys/kern_syscall.h> 55 #include <sys/mbuf.h> 56 #include <sys/protosw.h> 57 #include <sys/sfbuf.h> 58 #include <sys/socket.h> 59 #include <sys/socketvar.h> 60 #include <sys/socketops.h> 61 #include <sys/uio.h> 62 #include <sys/vnode.h> 63 #include <sys/lock.h> 64 #include <sys/mount.h> 65 #ifdef KTRACE 66 #include <sys/ktrace.h> 67 #endif 68 #include <vm/vm.h> 69 #include <vm/vm_object.h> 70 #include <vm/vm_page.h> 71 #include <vm/vm_pageout.h> 72 #include <vm/vm_kern.h> 73 #include <vm/vm_extern.h> 74 #include <sys/file2.h> 75 #include <sys/signalvar.h> 76 #include <sys/serialize.h> 77 78 #include <sys/thread2.h> 79 #include <sys/msgport2.h> 80 #include <sys/socketvar2.h> 81 #include <net/netmsg2.h> 82 83 #ifdef SCTP 84 #include <netinet/sctp_peeloff.h> 85 #endif /* SCTP */ 86 87 extern int use_soaccept_pred_fast; 88 extern int use_sendfile_async; 89 90 /* 91 * System call interface to the socket abstraction. 92 */ 93 94 extern struct fileops socketops; 95 96 /* 97 * socket_args(int domain, int type, int protocol) 98 */ 99 int 100 kern_socket(int domain, int type, int protocol, int *res) 101 { 102 struct thread *td = curthread; 103 struct filedesc *fdp = td->td_proc->p_fd; 104 struct socket *so; 105 struct file *fp; 106 int fd, error; 107 108 KKASSERT(td->td_lwp); 109 110 error = falloc(td->td_lwp, &fp, &fd); 111 if (error) 112 return (error); 113 error = socreate(domain, &so, type, protocol, td); 114 if (error) { 115 fsetfd(fdp, NULL, fd); 116 } else { 117 fp->f_type = DTYPE_SOCKET; 118 fp->f_flag = FREAD | FWRITE; 119 fp->f_ops = &socketops; 120 fp->f_data = so; 121 *res = fd; 122 fsetfd(fdp, fp, fd); 123 } 124 fdrop(fp); 125 return (error); 126 } 127 128 /* 129 * MPALMOSTSAFE 130 */ 131 int 132 sys_socket(struct socket_args *uap) 133 { 134 int error; 135 136 error = kern_socket(uap->domain, uap->type, uap->protocol, 137 &uap->sysmsg_iresult); 138 139 return (error); 140 } 141 142 int 143 kern_bind(int s, struct sockaddr *sa) 144 { 145 struct thread *td = curthread; 146 struct proc *p = td->td_proc; 147 struct file *fp; 148 int error; 149 150 KKASSERT(p); 151 error = holdsock(p->p_fd, s, &fp); 152 if (error) 153 return (error); 154 error = sobind((struct socket *)fp->f_data, sa, td); 155 fdrop(fp); 156 return (error); 157 } 158 159 /* 160 * bind_args(int s, caddr_t name, int namelen) 161 * 162 * MPALMOSTSAFE 163 */ 164 int 165 sys_bind(struct bind_args *uap) 166 { 167 struct sockaddr *sa; 168 int error; 169 170 error = getsockaddr(&sa, uap->name, uap->namelen); 171 if (error) 172 return (error); 173 error = kern_bind(uap->s, sa); 174 kfree(sa, M_SONAME); 175 176 return (error); 177 } 178 179 int 180 kern_listen(int s, int backlog) 181 { 182 struct thread *td = curthread; 183 struct proc *p = td->td_proc; 184 struct file *fp; 185 int error; 186 187 KKASSERT(p); 188 error = holdsock(p->p_fd, s, &fp); 189 if (error) 190 return (error); 191 error = solisten((struct socket *)fp->f_data, backlog, td); 192 fdrop(fp); 193 return(error); 194 } 195 196 /* 197 * listen_args(int s, int backlog) 198 * 199 * MPALMOSTSAFE 200 */ 201 int 202 sys_listen(struct listen_args *uap) 203 { 204 int error; 205 206 error = kern_listen(uap->s, uap->backlog); 207 return (error); 208 } 209 210 /* 211 * Returns the accepted socket as well. 212 * 213 * NOTE! The sockets sitting on so_comp/so_incomp might have 0 refs, the 214 * pool token is absolutely required to avoid a sofree() race, 215 * as well as to avoid tailq handling races. 216 */ 217 static boolean_t 218 soaccept_predicate(struct netmsg_so_notify *msg) 219 { 220 struct socket *head = msg->base.nm_so; 221 struct socket *so; 222 223 if (head->so_error != 0) { 224 msg->base.lmsg.ms_error = head->so_error; 225 return (TRUE); 226 } 227 lwkt_getpooltoken(head); 228 if (!TAILQ_EMPTY(&head->so_comp)) { 229 /* Abuse nm_so field as copy in/copy out parameter. XXX JH */ 230 so = TAILQ_FIRST(&head->so_comp); 231 TAILQ_REMOVE(&head->so_comp, so, so_list); 232 head->so_qlen--; 233 soclrstate(so, SS_COMP); 234 so->so_head = NULL; 235 soreference(so); 236 237 lwkt_relpooltoken(head); 238 239 msg->base.lmsg.ms_error = 0; 240 msg->base.nm_so = so; 241 return (TRUE); 242 } 243 lwkt_relpooltoken(head); 244 if (head->so_state & SS_CANTRCVMORE) { 245 msg->base.lmsg.ms_error = ECONNABORTED; 246 return (TRUE); 247 } 248 if (msg->nm_fflags & FNONBLOCK) { 249 msg->base.lmsg.ms_error = EWOULDBLOCK; 250 return (TRUE); 251 } 252 253 return (FALSE); 254 } 255 256 /* 257 * The second argument to kern_accept() is a handle to a struct sockaddr. 258 * This allows kern_accept() to return a pointer to an allocated struct 259 * sockaddr which must be freed later with FREE(). The caller must 260 * initialize *name to NULL. 261 */ 262 int 263 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res) 264 { 265 struct thread *td = curthread; 266 struct filedesc *fdp = td->td_proc->p_fd; 267 struct file *lfp = NULL; 268 struct file *nfp = NULL; 269 struct sockaddr *sa; 270 struct socket *head, *so; 271 struct netmsg_so_notify msg; 272 int fd; 273 u_int fflag; /* type must match fp->f_flag */ 274 int error, tmp; 275 276 *res = -1; 277 if (name && namelen && *namelen < 0) 278 return (EINVAL); 279 280 error = holdsock(td->td_proc->p_fd, s, &lfp); 281 if (error) 282 return (error); 283 284 error = falloc(td->td_lwp, &nfp, &fd); 285 if (error) { /* Probably ran out of file descriptors. */ 286 fdrop(lfp); 287 return (error); 288 } 289 head = (struct socket *)lfp->f_data; 290 if ((head->so_options & SO_ACCEPTCONN) == 0) { 291 error = EINVAL; 292 goto done; 293 } 294 295 if (fflags & O_FBLOCKING) 296 fflags |= lfp->f_flag & ~FNONBLOCK; 297 else if (fflags & O_FNONBLOCKING) 298 fflags |= lfp->f_flag | FNONBLOCK; 299 else 300 fflags = lfp->f_flag; 301 302 if (use_soaccept_pred_fast) { 303 boolean_t pred; 304 305 /* Initialize necessary parts for soaccept_predicate() */ 306 netmsg_init(&msg.base, head, &netisr_apanic_rport, 0, NULL); 307 msg.nm_fflags = fflags; 308 309 lwkt_getpooltoken(head); 310 pred = soaccept_predicate(&msg); 311 lwkt_relpooltoken(head); 312 313 if (pred) { 314 error = msg.base.lmsg.ms_error; 315 if (error) 316 goto done; 317 else 318 goto accepted; 319 } 320 } 321 322 /* optimize for uniprocessor case later XXX JH */ 323 netmsg_init_abortable(&msg.base, head, &curthread->td_msgport, 324 0, netmsg_so_notify, netmsg_so_notify_doabort); 325 msg.nm_predicate = soaccept_predicate; 326 msg.nm_fflags = fflags; 327 msg.nm_etype = NM_REVENT; 328 error = lwkt_domsg(head->so_port, &msg.base.lmsg, PCATCH); 329 if (error) 330 goto done; 331 332 accepted: 333 /* 334 * At this point we have the connection that's ready to be accepted. 335 * 336 * NOTE! soaccept_predicate() ref'd so for us, and soaccept() expects 337 * to eat the ref and turn it into a descriptor. 338 */ 339 so = msg.base.nm_so; 340 341 fflag = lfp->f_flag; 342 343 /* connection has been removed from the listen queue */ 344 KNOTE(&head->so_rcv.ssb_kq.ki_note, 0); 345 346 if (head->so_sigio != NULL) 347 fsetown(fgetown(&head->so_sigio), &so->so_sigio); 348 349 nfp->f_type = DTYPE_SOCKET; 350 nfp->f_flag = fflag; 351 nfp->f_ops = &socketops; 352 nfp->f_data = so; 353 /* Sync socket nonblocking/async state with file flags */ 354 tmp = fflag & FNONBLOCK; 355 fo_ioctl(nfp, FIONBIO, (caddr_t)&tmp, td->td_ucred, NULL); 356 tmp = fflag & FASYNC; 357 fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td->td_ucred, NULL); 358 359 sa = NULL; 360 if (so->so_faddr != NULL) { 361 sa = so->so_faddr; 362 so->so_faddr = NULL; 363 364 soaccept_generic(so); 365 error = 0; 366 } else { 367 error = soaccept(so, &sa); 368 } 369 370 /* 371 * Set the returned name and namelen as applicable. Set the returned 372 * namelen to 0 for older code which might ignore the return value 373 * from accept. 374 */ 375 if (error == 0) { 376 if (sa && name && namelen) { 377 if (*namelen > sa->sa_len) 378 *namelen = sa->sa_len; 379 *name = sa; 380 } else { 381 if (sa) 382 kfree(sa, M_SONAME); 383 } 384 } 385 386 done: 387 /* 388 * If an error occured clear the reserved descriptor, else associate 389 * nfp with it. 390 * 391 * Note that *res is normally ignored if an error is returned but 392 * a syscall message will still have access to the result code. 393 */ 394 if (error) { 395 fsetfd(fdp, NULL, fd); 396 } else { 397 *res = fd; 398 fsetfd(fdp, nfp, fd); 399 } 400 fdrop(nfp); 401 fdrop(lfp); 402 return (error); 403 } 404 405 /* 406 * accept(int s, caddr_t name, int *anamelen) 407 * 408 * MPALMOSTSAFE 409 */ 410 int 411 sys_accept(struct accept_args *uap) 412 { 413 struct sockaddr *sa = NULL; 414 int sa_len; 415 int error; 416 417 if (uap->name) { 418 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len)); 419 if (error) 420 return (error); 421 422 error = kern_accept(uap->s, 0, &sa, &sa_len, 423 &uap->sysmsg_iresult); 424 425 if (error == 0) 426 error = copyout(sa, uap->name, sa_len); 427 if (error == 0) { 428 error = copyout(&sa_len, uap->anamelen, 429 sizeof(*uap->anamelen)); 430 } 431 if (sa) 432 kfree(sa, M_SONAME); 433 } else { 434 error = kern_accept(uap->s, 0, NULL, 0, 435 &uap->sysmsg_iresult); 436 } 437 return (error); 438 } 439 440 /* 441 * extaccept(int s, int fflags, caddr_t name, int *anamelen) 442 * 443 * MPALMOSTSAFE 444 */ 445 int 446 sys_extaccept(struct extaccept_args *uap) 447 { 448 struct sockaddr *sa = NULL; 449 int sa_len; 450 int error; 451 int fflags = uap->flags & O_FMASK; 452 453 if (uap->name) { 454 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len)); 455 if (error) 456 return (error); 457 458 error = kern_accept(uap->s, fflags, &sa, &sa_len, 459 &uap->sysmsg_iresult); 460 461 if (error == 0) 462 error = copyout(sa, uap->name, sa_len); 463 if (error == 0) { 464 error = copyout(&sa_len, uap->anamelen, 465 sizeof(*uap->anamelen)); 466 } 467 if (sa) 468 kfree(sa, M_SONAME); 469 } else { 470 error = kern_accept(uap->s, fflags, NULL, 0, 471 &uap->sysmsg_iresult); 472 } 473 return (error); 474 } 475 476 477 /* 478 * Returns TRUE if predicate satisfied. 479 */ 480 static boolean_t 481 soconnected_predicate(struct netmsg_so_notify *msg) 482 { 483 struct socket *so = msg->base.nm_so; 484 485 /* check predicate */ 486 if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) { 487 msg->base.lmsg.ms_error = so->so_error; 488 return (TRUE); 489 } 490 491 return (FALSE); 492 } 493 494 int 495 kern_connect(int s, int fflags, struct sockaddr *sa) 496 { 497 struct thread *td = curthread; 498 struct proc *p = td->td_proc; 499 struct file *fp; 500 struct socket *so; 501 int error, interrupted = 0; 502 503 error = holdsock(p->p_fd, s, &fp); 504 if (error) 505 return (error); 506 so = (struct socket *)fp->f_data; 507 508 if (fflags & O_FBLOCKING) 509 /* fflags &= ~FNONBLOCK; */; 510 else if (fflags & O_FNONBLOCKING) 511 fflags |= FNONBLOCK; 512 else 513 fflags = fp->f_flag; 514 515 if (so->so_state & SS_ISCONNECTING) { 516 error = EALREADY; 517 goto done; 518 } 519 error = soconnect(so, sa, td); 520 if (error) 521 goto bad; 522 if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) { 523 error = EINPROGRESS; 524 goto done; 525 } 526 if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 527 struct netmsg_so_notify msg; 528 529 netmsg_init_abortable(&msg.base, so, 530 &curthread->td_msgport, 531 0, 532 netmsg_so_notify, 533 netmsg_so_notify_doabort); 534 msg.nm_predicate = soconnected_predicate; 535 msg.nm_etype = NM_REVENT; 536 error = lwkt_domsg(so->so_port, &msg.base.lmsg, PCATCH); 537 if (error == EINTR || error == ERESTART) 538 interrupted = 1; 539 } 540 if (error == 0) { 541 error = so->so_error; 542 so->so_error = 0; 543 } 544 bad: 545 if (!interrupted) 546 soclrstate(so, SS_ISCONNECTING); 547 if (error == ERESTART) 548 error = EINTR; 549 done: 550 fdrop(fp); 551 return (error); 552 } 553 554 /* 555 * connect_args(int s, caddr_t name, int namelen) 556 * 557 * MPALMOSTSAFE 558 */ 559 int 560 sys_connect(struct connect_args *uap) 561 { 562 struct sockaddr *sa; 563 int error; 564 565 error = getsockaddr(&sa, uap->name, uap->namelen); 566 if (error) 567 return (error); 568 error = kern_connect(uap->s, 0, sa); 569 kfree(sa, M_SONAME); 570 571 return (error); 572 } 573 574 /* 575 * connect_args(int s, int fflags, caddr_t name, int namelen) 576 * 577 * MPALMOSTSAFE 578 */ 579 int 580 sys_extconnect(struct extconnect_args *uap) 581 { 582 struct sockaddr *sa; 583 int error; 584 int fflags = uap->flags & O_FMASK; 585 586 error = getsockaddr(&sa, uap->name, uap->namelen); 587 if (error) 588 return (error); 589 error = kern_connect(uap->s, fflags, sa); 590 kfree(sa, M_SONAME); 591 592 return (error); 593 } 594 595 int 596 kern_socketpair(int domain, int type, int protocol, int *sv) 597 { 598 struct thread *td = curthread; 599 struct filedesc *fdp; 600 struct file *fp1, *fp2; 601 struct socket *so1, *so2; 602 int fd1, fd2, error; 603 604 fdp = td->td_proc->p_fd; 605 error = socreate(domain, &so1, type, protocol, td); 606 if (error) 607 return (error); 608 error = socreate(domain, &so2, type, protocol, td); 609 if (error) 610 goto free1; 611 error = falloc(td->td_lwp, &fp1, &fd1); 612 if (error) 613 goto free2; 614 sv[0] = fd1; 615 fp1->f_data = so1; 616 error = falloc(td->td_lwp, &fp2, &fd2); 617 if (error) 618 goto free3; 619 fp2->f_data = so2; 620 sv[1] = fd2; 621 error = soconnect2(so1, so2); 622 if (error) 623 goto free4; 624 if (type == SOCK_DGRAM) { 625 /* 626 * Datagram socket connection is asymmetric. 627 */ 628 error = soconnect2(so2, so1); 629 if (error) 630 goto free4; 631 } 632 fp1->f_type = fp2->f_type = DTYPE_SOCKET; 633 fp1->f_flag = fp2->f_flag = FREAD|FWRITE; 634 fp1->f_ops = fp2->f_ops = &socketops; 635 fsetfd(fdp, fp1, fd1); 636 fsetfd(fdp, fp2, fd2); 637 fdrop(fp1); 638 fdrop(fp2); 639 return (error); 640 free4: 641 fsetfd(fdp, NULL, fd2); 642 fdrop(fp2); 643 free3: 644 fsetfd(fdp, NULL, fd1); 645 fdrop(fp1); 646 free2: 647 (void)soclose(so2, 0); 648 free1: 649 (void)soclose(so1, 0); 650 return (error); 651 } 652 653 /* 654 * socketpair(int domain, int type, int protocol, int *rsv) 655 */ 656 int 657 sys_socketpair(struct socketpair_args *uap) 658 { 659 int error, sockv[2]; 660 661 error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv); 662 663 if (error == 0) { 664 error = copyout(sockv, uap->rsv, sizeof(sockv)); 665 666 if (error != 0) { 667 kern_close(sockv[0]); 668 kern_close(sockv[1]); 669 } 670 } 671 672 return (error); 673 } 674 675 int 676 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio, 677 struct mbuf *control, int flags, size_t *res) 678 { 679 struct thread *td = curthread; 680 struct lwp *lp = td->td_lwp; 681 struct proc *p = td->td_proc; 682 struct file *fp; 683 size_t len; 684 int error; 685 struct socket *so; 686 #ifdef KTRACE 687 struct iovec *ktriov = NULL; 688 struct uio ktruio; 689 #endif 690 691 error = holdsock(p->p_fd, s, &fp); 692 if (error) 693 return (error); 694 #ifdef KTRACE 695 if (KTRPOINT(td, KTR_GENIO)) { 696 int iovlen = auio->uio_iovcnt * sizeof (struct iovec); 697 698 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK); 699 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen); 700 ktruio = *auio; 701 } 702 #endif 703 len = auio->uio_resid; 704 so = (struct socket *)fp->f_data; 705 if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) { 706 if (fp->f_flag & FNONBLOCK) 707 flags |= MSG_FNONBLOCKING; 708 } 709 error = so_pru_sosend(so, sa, auio, NULL, control, flags, td); 710 if (error) { 711 if (auio->uio_resid != len && (error == ERESTART || 712 error == EINTR || error == EWOULDBLOCK)) 713 error = 0; 714 if (error == EPIPE && !(flags & MSG_NOSIGNAL)) 715 lwpsignal(p, lp, SIGPIPE); 716 } 717 #ifdef KTRACE 718 if (ktriov != NULL) { 719 if (error == 0) { 720 ktruio.uio_iov = ktriov; 721 ktruio.uio_resid = len - auio->uio_resid; 722 ktrgenio(lp, s, UIO_WRITE, &ktruio, error); 723 } 724 kfree(ktriov, M_TEMP); 725 } 726 #endif 727 if (error == 0) 728 *res = len - auio->uio_resid; 729 fdrop(fp); 730 return (error); 731 } 732 733 /* 734 * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen) 735 * 736 * MPALMOSTSAFE 737 */ 738 int 739 sys_sendto(struct sendto_args *uap) 740 { 741 struct thread *td = curthread; 742 struct uio auio; 743 struct iovec aiov; 744 struct sockaddr *sa = NULL; 745 int error; 746 747 if (uap->to) { 748 error = getsockaddr(&sa, uap->to, uap->tolen); 749 if (error) 750 return (error); 751 } 752 aiov.iov_base = uap->buf; 753 aiov.iov_len = uap->len; 754 auio.uio_iov = &aiov; 755 auio.uio_iovcnt = 1; 756 auio.uio_offset = 0; 757 auio.uio_resid = uap->len; 758 auio.uio_segflg = UIO_USERSPACE; 759 auio.uio_rw = UIO_WRITE; 760 auio.uio_td = td; 761 762 error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags, 763 &uap->sysmsg_szresult); 764 765 if (sa) 766 kfree(sa, M_SONAME); 767 return (error); 768 } 769 770 /* 771 * sendmsg_args(int s, caddr_t msg, int flags) 772 * 773 * MPALMOSTSAFE 774 */ 775 int 776 sys_sendmsg(struct sendmsg_args *uap) 777 { 778 struct thread *td = curthread; 779 struct msghdr msg; 780 struct uio auio; 781 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 782 struct sockaddr *sa = NULL; 783 struct mbuf *control = NULL; 784 int error; 785 786 error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg)); 787 if (error) 788 return (error); 789 790 /* 791 * Conditionally copyin msg.msg_name. 792 */ 793 if (msg.msg_name) { 794 error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen); 795 if (error) 796 return (error); 797 } 798 799 /* 800 * Populate auio. 801 */ 802 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen, 803 &auio.uio_resid); 804 if (error) 805 goto cleanup2; 806 auio.uio_iov = iov; 807 auio.uio_iovcnt = msg.msg_iovlen; 808 auio.uio_offset = 0; 809 auio.uio_segflg = UIO_USERSPACE; 810 auio.uio_rw = UIO_WRITE; 811 auio.uio_td = td; 812 813 /* 814 * Conditionally copyin msg.msg_control. 815 */ 816 if (msg.msg_control) { 817 if (msg.msg_controllen < sizeof(struct cmsghdr) || 818 msg.msg_controllen > MLEN) { 819 error = EINVAL; 820 goto cleanup; 821 } 822 control = m_get(MB_WAIT, MT_CONTROL); 823 if (control == NULL) { 824 error = ENOBUFS; 825 goto cleanup; 826 } 827 control->m_len = msg.msg_controllen; 828 error = copyin(msg.msg_control, mtod(control, caddr_t), 829 msg.msg_controllen); 830 if (error) { 831 m_free(control); 832 goto cleanup; 833 } 834 } 835 836 error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags, 837 &uap->sysmsg_szresult); 838 839 cleanup: 840 iovec_free(&iov, aiov); 841 cleanup2: 842 if (sa) 843 kfree(sa, M_SONAME); 844 return (error); 845 } 846 847 /* 848 * kern_recvmsg() takes a handle to sa and control. If the handle is non- 849 * null, it returns a dynamically allocated struct sockaddr and an mbuf. 850 * Don't forget to FREE() and m_free() these if they are returned. 851 */ 852 int 853 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio, 854 struct mbuf **control, int *flags, size_t *res) 855 { 856 struct thread *td = curthread; 857 struct proc *p = td->td_proc; 858 struct file *fp; 859 size_t len; 860 int error; 861 int lflags; 862 struct socket *so; 863 #ifdef KTRACE 864 struct iovec *ktriov = NULL; 865 struct uio ktruio; 866 #endif 867 868 error = holdsock(p->p_fd, s, &fp); 869 if (error) 870 return (error); 871 #ifdef KTRACE 872 if (KTRPOINT(td, KTR_GENIO)) { 873 int iovlen = auio->uio_iovcnt * sizeof (struct iovec); 874 875 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK); 876 bcopy(auio->uio_iov, ktriov, iovlen); 877 ktruio = *auio; 878 } 879 #endif 880 len = auio->uio_resid; 881 so = (struct socket *)fp->f_data; 882 883 if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) { 884 if (fp->f_flag & FNONBLOCK) { 885 if (flags) { 886 *flags |= MSG_FNONBLOCKING; 887 } else { 888 lflags = MSG_FNONBLOCKING; 889 flags = &lflags; 890 } 891 } 892 } 893 894 error = so_pru_soreceive(so, sa, auio, NULL, control, flags); 895 if (error) { 896 if (auio->uio_resid != len && (error == ERESTART || 897 error == EINTR || error == EWOULDBLOCK)) 898 error = 0; 899 } 900 #ifdef KTRACE 901 if (ktriov != NULL) { 902 if (error == 0) { 903 ktruio.uio_iov = ktriov; 904 ktruio.uio_resid = len - auio->uio_resid; 905 ktrgenio(td->td_lwp, s, UIO_READ, &ktruio, error); 906 } 907 kfree(ktriov, M_TEMP); 908 } 909 #endif 910 if (error == 0) 911 *res = len - auio->uio_resid; 912 fdrop(fp); 913 return (error); 914 } 915 916 /* 917 * recvfrom_args(int s, caddr_t buf, size_t len, int flags, 918 * caddr_t from, int *fromlenaddr) 919 * 920 * MPALMOSTSAFE 921 */ 922 int 923 sys_recvfrom(struct recvfrom_args *uap) 924 { 925 struct thread *td = curthread; 926 struct uio auio; 927 struct iovec aiov; 928 struct sockaddr *sa = NULL; 929 int error, fromlen; 930 931 if (uap->from && uap->fromlenaddr) { 932 error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen)); 933 if (error) 934 return (error); 935 if (fromlen < 0) 936 return (EINVAL); 937 } else { 938 fromlen = 0; 939 } 940 aiov.iov_base = uap->buf; 941 aiov.iov_len = uap->len; 942 auio.uio_iov = &aiov; 943 auio.uio_iovcnt = 1; 944 auio.uio_offset = 0; 945 auio.uio_resid = uap->len; 946 auio.uio_segflg = UIO_USERSPACE; 947 auio.uio_rw = UIO_READ; 948 auio.uio_td = td; 949 950 error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL, 951 &uap->flags, &uap->sysmsg_szresult); 952 953 if (error == 0 && uap->from) { 954 /* note: sa may still be NULL */ 955 if (sa) { 956 fromlen = MIN(fromlen, sa->sa_len); 957 error = copyout(sa, uap->from, fromlen); 958 } else { 959 fromlen = 0; 960 } 961 if (error == 0) { 962 error = copyout(&fromlen, uap->fromlenaddr, 963 sizeof(fromlen)); 964 } 965 } 966 if (sa) 967 kfree(sa, M_SONAME); 968 969 return (error); 970 } 971 972 /* 973 * recvmsg_args(int s, struct msghdr *msg, int flags) 974 * 975 * MPALMOSTSAFE 976 */ 977 int 978 sys_recvmsg(struct recvmsg_args *uap) 979 { 980 struct thread *td = curthread; 981 struct msghdr msg; 982 struct uio auio; 983 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 984 struct mbuf *m, *control = NULL; 985 struct sockaddr *sa = NULL; 986 caddr_t ctlbuf; 987 socklen_t *ufromlenp, *ucontrollenp; 988 int error, fromlen, controllen, len, flags, *uflagsp; 989 990 /* 991 * This copyin handles everything except the iovec. 992 */ 993 error = copyin(uap->msg, &msg, sizeof(msg)); 994 if (error) 995 return (error); 996 997 if (msg.msg_name && msg.msg_namelen < 0) 998 return (EINVAL); 999 if (msg.msg_control && msg.msg_controllen < 0) 1000 return (EINVAL); 1001 1002 ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr, 1003 msg_namelen)); 1004 ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr, 1005 msg_controllen)); 1006 uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr, 1007 msg_flags)); 1008 1009 /* 1010 * Populate auio. 1011 */ 1012 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen, 1013 &auio.uio_resid); 1014 if (error) 1015 return (error); 1016 auio.uio_iov = iov; 1017 auio.uio_iovcnt = msg.msg_iovlen; 1018 auio.uio_offset = 0; 1019 auio.uio_segflg = UIO_USERSPACE; 1020 auio.uio_rw = UIO_READ; 1021 auio.uio_td = td; 1022 1023 flags = uap->flags; 1024 1025 error = kern_recvmsg(uap->s, 1026 (msg.msg_name ? &sa : NULL), &auio, 1027 (msg.msg_control ? &control : NULL), &flags, 1028 &uap->sysmsg_szresult); 1029 1030 /* 1031 * Conditionally copyout the name and populate the namelen field. 1032 */ 1033 if (error == 0 && msg.msg_name) { 1034 /* note: sa may still be NULL */ 1035 if (sa != NULL) { 1036 fromlen = MIN(msg.msg_namelen, sa->sa_len); 1037 error = copyout(sa, msg.msg_name, fromlen); 1038 } else { 1039 fromlen = 0; 1040 } 1041 if (error == 0) 1042 error = copyout(&fromlen, ufromlenp, 1043 sizeof(*ufromlenp)); 1044 } 1045 1046 /* 1047 * Copyout msg.msg_control and msg.msg_controllen. 1048 */ 1049 if (error == 0 && msg.msg_control) { 1050 len = msg.msg_controllen; 1051 m = control; 1052 ctlbuf = (caddr_t)msg.msg_control; 1053 1054 while(m && len > 0) { 1055 unsigned int tocopy; 1056 1057 if (len >= m->m_len) { 1058 tocopy = m->m_len; 1059 } else { 1060 msg.msg_flags |= MSG_CTRUNC; 1061 tocopy = len; 1062 } 1063 1064 error = copyout(mtod(m, caddr_t), ctlbuf, tocopy); 1065 if (error) 1066 goto cleanup; 1067 1068 ctlbuf += tocopy; 1069 len -= tocopy; 1070 m = m->m_next; 1071 } 1072 controllen = ctlbuf - (caddr_t)msg.msg_control; 1073 error = copyout(&controllen, ucontrollenp, 1074 sizeof(*ucontrollenp)); 1075 } 1076 1077 if (error == 0) 1078 error = copyout(&flags, uflagsp, sizeof(*uflagsp)); 1079 1080 cleanup: 1081 if (sa) 1082 kfree(sa, M_SONAME); 1083 iovec_free(&iov, aiov); 1084 if (control) 1085 m_freem(control); 1086 return (error); 1087 } 1088 1089 /* 1090 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an 1091 * in kernel pointer instead of a userland pointer. This allows us 1092 * to manipulate socket options in the emulation code. 1093 */ 1094 int 1095 kern_setsockopt(int s, struct sockopt *sopt) 1096 { 1097 struct thread *td = curthread; 1098 struct proc *p = td->td_proc; 1099 struct file *fp; 1100 int error; 1101 1102 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0) 1103 return (EFAULT); 1104 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0) 1105 return (EINVAL); 1106 if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */ 1107 return (EINVAL); 1108 1109 error = holdsock(p->p_fd, s, &fp); 1110 if (error) 1111 return (error); 1112 1113 error = sosetopt((struct socket *)fp->f_data, sopt); 1114 fdrop(fp); 1115 return (error); 1116 } 1117 1118 /* 1119 * setsockopt_args(int s, int level, int name, caddr_t val, int valsize) 1120 * 1121 * MPALMOSTSAFE 1122 */ 1123 int 1124 sys_setsockopt(struct setsockopt_args *uap) 1125 { 1126 struct thread *td = curthread; 1127 struct sockopt sopt; 1128 int error; 1129 1130 sopt.sopt_level = uap->level; 1131 sopt.sopt_name = uap->name; 1132 sopt.sopt_valsize = uap->valsize; 1133 sopt.sopt_td = td; 1134 sopt.sopt_val = NULL; 1135 1136 if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */ 1137 return (EINVAL); 1138 if (uap->val) { 1139 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK); 1140 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize); 1141 if (error) 1142 goto out; 1143 } 1144 1145 error = kern_setsockopt(uap->s, &sopt); 1146 out: 1147 if (uap->val) 1148 kfree(sopt.sopt_val, M_TEMP); 1149 return(error); 1150 } 1151 1152 /* 1153 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an 1154 * in kernel pointer instead of a userland pointer. This allows us 1155 * to manipulate socket options in the emulation code. 1156 */ 1157 int 1158 kern_getsockopt(int s, struct sockopt *sopt) 1159 { 1160 struct thread *td = curthread; 1161 struct proc *p = td->td_proc; 1162 struct file *fp; 1163 int error; 1164 1165 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0) 1166 return (EFAULT); 1167 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0) 1168 return (EINVAL); 1169 if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */ 1170 return (EINVAL); 1171 1172 error = holdsock(p->p_fd, s, &fp); 1173 if (error) 1174 return (error); 1175 1176 error = sogetopt((struct socket *)fp->f_data, sopt); 1177 fdrop(fp); 1178 return (error); 1179 } 1180 1181 /* 1182 * getsockopt_args(int s, int level, int name, caddr_t val, int *avalsize) 1183 * 1184 * MPALMOSTSAFE 1185 */ 1186 int 1187 sys_getsockopt(struct getsockopt_args *uap) 1188 { 1189 struct thread *td = curthread; 1190 struct sockopt sopt; 1191 int error, valsize; 1192 1193 if (uap->val) { 1194 error = copyin(uap->avalsize, &valsize, sizeof(valsize)); 1195 if (error) 1196 return (error); 1197 } else { 1198 valsize = 0; 1199 } 1200 1201 sopt.sopt_level = uap->level; 1202 sopt.sopt_name = uap->name; 1203 sopt.sopt_valsize = valsize; 1204 sopt.sopt_td = td; 1205 sopt.sopt_val = NULL; 1206 1207 if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */ 1208 return (EINVAL); 1209 if (uap->val) { 1210 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK); 1211 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize); 1212 if (error) 1213 goto out; 1214 } 1215 1216 error = kern_getsockopt(uap->s, &sopt); 1217 if (error) 1218 goto out; 1219 valsize = sopt.sopt_valsize; 1220 error = copyout(&valsize, uap->avalsize, sizeof(valsize)); 1221 if (error) 1222 goto out; 1223 if (uap->val) 1224 error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize); 1225 out: 1226 if (uap->val) 1227 kfree(sopt.sopt_val, M_TEMP); 1228 return (error); 1229 } 1230 1231 /* 1232 * The second argument to kern_getsockname() is a handle to a struct sockaddr. 1233 * This allows kern_getsockname() to return a pointer to an allocated struct 1234 * sockaddr which must be freed later with FREE(). The caller must 1235 * initialize *name to NULL. 1236 */ 1237 int 1238 kern_getsockname(int s, struct sockaddr **name, int *namelen) 1239 { 1240 struct thread *td = curthread; 1241 struct proc *p = td->td_proc; 1242 struct file *fp; 1243 struct socket *so; 1244 struct sockaddr *sa = NULL; 1245 int error; 1246 1247 error = holdsock(p->p_fd, s, &fp); 1248 if (error) 1249 return (error); 1250 if (*namelen < 0) { 1251 fdrop(fp); 1252 return (EINVAL); 1253 } 1254 so = (struct socket *)fp->f_data; 1255 error = so_pru_sockaddr(so, &sa); 1256 if (error == 0) { 1257 if (sa == NULL) { 1258 *namelen = 0; 1259 } else { 1260 *namelen = MIN(*namelen, sa->sa_len); 1261 *name = sa; 1262 } 1263 } 1264 1265 fdrop(fp); 1266 return (error); 1267 } 1268 1269 /* 1270 * getsockname_args(int fdes, caddr_t asa, int *alen) 1271 * 1272 * Get socket name. 1273 * 1274 * MPALMOSTSAFE 1275 */ 1276 int 1277 sys_getsockname(struct getsockname_args *uap) 1278 { 1279 struct sockaddr *sa = NULL; 1280 int error, sa_len; 1281 1282 error = copyin(uap->alen, &sa_len, sizeof(sa_len)); 1283 if (error) 1284 return (error); 1285 1286 error = kern_getsockname(uap->fdes, &sa, &sa_len); 1287 1288 if (error == 0) 1289 error = copyout(sa, uap->asa, sa_len); 1290 if (error == 0) 1291 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen)); 1292 if (sa) 1293 kfree(sa, M_SONAME); 1294 return (error); 1295 } 1296 1297 /* 1298 * The second argument to kern_getpeername() is a handle to a struct sockaddr. 1299 * This allows kern_getpeername() to return a pointer to an allocated struct 1300 * sockaddr which must be freed later with FREE(). The caller must 1301 * initialize *name to NULL. 1302 */ 1303 int 1304 kern_getpeername(int s, struct sockaddr **name, int *namelen) 1305 { 1306 struct thread *td = curthread; 1307 struct proc *p = td->td_proc; 1308 struct file *fp; 1309 struct socket *so; 1310 struct sockaddr *sa = NULL; 1311 int error; 1312 1313 error = holdsock(p->p_fd, s, &fp); 1314 if (error) 1315 return (error); 1316 if (*namelen < 0) { 1317 fdrop(fp); 1318 return (EINVAL); 1319 } 1320 so = (struct socket *)fp->f_data; 1321 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) { 1322 fdrop(fp); 1323 return (ENOTCONN); 1324 } 1325 error = so_pru_peeraddr(so, &sa); 1326 if (error == 0) { 1327 if (sa == NULL) { 1328 *namelen = 0; 1329 } else { 1330 *namelen = MIN(*namelen, sa->sa_len); 1331 *name = sa; 1332 } 1333 } 1334 1335 fdrop(fp); 1336 return (error); 1337 } 1338 1339 /* 1340 * getpeername_args(int fdes, caddr_t asa, int *alen) 1341 * 1342 * Get name of peer for connected socket. 1343 * 1344 * MPALMOSTSAFE 1345 */ 1346 int 1347 sys_getpeername(struct getpeername_args *uap) 1348 { 1349 struct sockaddr *sa = NULL; 1350 int error, sa_len; 1351 1352 error = copyin(uap->alen, &sa_len, sizeof(sa_len)); 1353 if (error) 1354 return (error); 1355 1356 error = kern_getpeername(uap->fdes, &sa, &sa_len); 1357 1358 if (error == 0) 1359 error = copyout(sa, uap->asa, sa_len); 1360 if (error == 0) 1361 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen)); 1362 if (sa) 1363 kfree(sa, M_SONAME); 1364 return (error); 1365 } 1366 1367 int 1368 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len) 1369 { 1370 struct sockaddr *sa; 1371 int error; 1372 1373 *namp = NULL; 1374 if (len > SOCK_MAXADDRLEN) 1375 return ENAMETOOLONG; 1376 if (len < offsetof(struct sockaddr, sa_data[0])) 1377 return EDOM; 1378 sa = kmalloc(len, M_SONAME, M_WAITOK); 1379 error = copyin(uaddr, sa, len); 1380 if (error) { 1381 kfree(sa, M_SONAME); 1382 } else { 1383 #if BYTE_ORDER != BIG_ENDIAN 1384 /* 1385 * The bind(), connect(), and sendto() syscalls were not 1386 * versioned for COMPAT_43. Thus, this check must stay. 1387 */ 1388 if (sa->sa_family == 0 && sa->sa_len < AF_MAX) 1389 sa->sa_family = sa->sa_len; 1390 #endif 1391 sa->sa_len = len; 1392 *namp = sa; 1393 } 1394 return error; 1395 } 1396 1397 /* 1398 * Detach a mapped page and release resources back to the system. 1399 * We must release our wiring and if the object is ripped out 1400 * from under the vm_page we become responsible for freeing the 1401 * page. 1402 * 1403 * MPSAFE 1404 */ 1405 static void 1406 sf_buf_mfree(void *arg) 1407 { 1408 struct sf_buf *sf = arg; 1409 vm_page_t m; 1410 1411 m = sf_buf_page(sf); 1412 if (sf_buf_free(sf)) { 1413 /* sf invalid now */ 1414 vm_page_busy_wait(m, FALSE, "sockpgf"); 1415 vm_page_unwire(m, 0); 1416 if (m->wire_count == 0 && m->object == NULL) { 1417 vm_page_free(m); 1418 } else { 1419 vm_page_wakeup(m); 1420 } 1421 } 1422 } 1423 1424 /* 1425 * sendfile(2). 1426 * int sendfile(int fd, int s, off_t offset, size_t nbytes, 1427 * struct sf_hdtr *hdtr, off_t *sbytes, int flags) 1428 * 1429 * Send a file specified by 'fd' and starting at 'offset' to a socket 1430 * specified by 's'. Send only 'nbytes' of the file or until EOF if 1431 * nbytes == 0. Optionally add a header and/or trailer to the socket 1432 * output. If specified, write the total number of bytes sent into *sbytes. 1433 * 1434 * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused 1435 * the headers to count against the remaining bytes to be sent from 1436 * the file descriptor. We may wish to implement a compatibility syscall 1437 * in the future. 1438 * 1439 * MPALMOSTSAFE 1440 */ 1441 int 1442 sys_sendfile(struct sendfile_args *uap) 1443 { 1444 struct thread *td = curthread; 1445 struct proc *p = td->td_proc; 1446 struct file *fp; 1447 struct vnode *vp = NULL; 1448 struct sf_hdtr hdtr; 1449 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 1450 struct uio auio; 1451 struct mbuf *mheader = NULL; 1452 size_t hbytes = 0; 1453 size_t tbytes; 1454 off_t hdtr_size = 0; 1455 off_t sbytes; 1456 int error; 1457 1458 KKASSERT(p); 1459 1460 /* 1461 * Do argument checking. Must be a regular file in, stream 1462 * type and connected socket out, positive offset. 1463 */ 1464 fp = holdfp(p->p_fd, uap->fd, FREAD); 1465 if (fp == NULL) { 1466 return (EBADF); 1467 } 1468 if (fp->f_type != DTYPE_VNODE) { 1469 fdrop(fp); 1470 return (EINVAL); 1471 } 1472 vp = (struct vnode *)fp->f_data; 1473 vref(vp); 1474 fdrop(fp); 1475 1476 /* 1477 * If specified, get the pointer to the sf_hdtr struct for 1478 * any headers/trailers. 1479 */ 1480 if (uap->hdtr) { 1481 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr)); 1482 if (error) 1483 goto done; 1484 /* 1485 * Send any headers. 1486 */ 1487 if (hdtr.headers) { 1488 error = iovec_copyin(hdtr.headers, &iov, aiov, 1489 hdtr.hdr_cnt, &hbytes); 1490 if (error) 1491 goto done; 1492 auio.uio_iov = iov; 1493 auio.uio_iovcnt = hdtr.hdr_cnt; 1494 auio.uio_offset = 0; 1495 auio.uio_segflg = UIO_USERSPACE; 1496 auio.uio_rw = UIO_WRITE; 1497 auio.uio_td = td; 1498 auio.uio_resid = hbytes; 1499 1500 mheader = m_uiomove(&auio); 1501 1502 iovec_free(&iov, aiov); 1503 if (mheader == NULL) 1504 goto done; 1505 } 1506 } 1507 1508 error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader, 1509 &sbytes, uap->flags); 1510 if (error) 1511 goto done; 1512 1513 /* 1514 * Send trailers. Wimp out and use writev(2). 1515 */ 1516 if (uap->hdtr != NULL && hdtr.trailers != NULL) { 1517 error = iovec_copyin(hdtr.trailers, &iov, aiov, 1518 hdtr.trl_cnt, &auio.uio_resid); 1519 if (error) 1520 goto done; 1521 auio.uio_iov = iov; 1522 auio.uio_iovcnt = hdtr.trl_cnt; 1523 auio.uio_offset = 0; 1524 auio.uio_segflg = UIO_USERSPACE; 1525 auio.uio_rw = UIO_WRITE; 1526 auio.uio_td = td; 1527 1528 error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes); 1529 1530 iovec_free(&iov, aiov); 1531 if (error) 1532 goto done; 1533 hdtr_size += tbytes; /* trailer bytes successfully sent */ 1534 } 1535 1536 done: 1537 if (vp) 1538 vrele(vp); 1539 if (uap->sbytes != NULL) { 1540 sbytes += hdtr_size; 1541 copyout(&sbytes, uap->sbytes, sizeof(off_t)); 1542 } 1543 return (error); 1544 } 1545 1546 int 1547 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes, 1548 struct mbuf *mheader, off_t *sbytes, int flags) 1549 { 1550 struct thread *td = curthread; 1551 struct proc *p = td->td_proc; 1552 struct vm_object *obj; 1553 struct socket *so; 1554 struct file *fp; 1555 struct mbuf *m, *mp; 1556 struct sf_buf *sf; 1557 struct vm_page *pg; 1558 off_t off, xfsize; 1559 off_t hbytes = 0; 1560 int error = 0; 1561 1562 if (vp->v_type != VREG) { 1563 error = EINVAL; 1564 goto done0; 1565 } 1566 if ((obj = vp->v_object) == NULL) { 1567 error = EINVAL; 1568 goto done0; 1569 } 1570 error = holdsock(p->p_fd, sfd, &fp); 1571 if (error) 1572 goto done0; 1573 so = (struct socket *)fp->f_data; 1574 if (so->so_type != SOCK_STREAM) { 1575 error = EINVAL; 1576 goto done; 1577 } 1578 if ((so->so_state & SS_ISCONNECTED) == 0) { 1579 error = ENOTCONN; 1580 goto done; 1581 } 1582 if (offset < 0) { 1583 error = EINVAL; 1584 goto done; 1585 } 1586 1587 *sbytes = 0; 1588 /* 1589 * Protect against multiple writers to the socket. 1590 */ 1591 ssb_lock(&so->so_snd, M_WAITOK); 1592 1593 /* 1594 * Loop through the pages in the file, starting with the requested 1595 * offset. Get a file page (do I/O if necessary), map the file page 1596 * into an sf_buf, attach an mbuf header to the sf_buf, and queue 1597 * it on the socket. 1598 */ 1599 for (off = offset; ; off += xfsize, *sbytes += xfsize + hbytes) { 1600 vm_pindex_t pindex; 1601 vm_offset_t pgoff; 1602 int space; 1603 1604 pindex = OFF_TO_IDX(off); 1605 retry_lookup: 1606 /* 1607 * Calculate the amount to transfer. Not to exceed a page, 1608 * the EOF, or the passed in nbytes. 1609 */ 1610 xfsize = vp->v_filesize - off; 1611 if (xfsize > PAGE_SIZE) 1612 xfsize = PAGE_SIZE; 1613 pgoff = (vm_offset_t)(off & PAGE_MASK); 1614 if (PAGE_SIZE - pgoff < xfsize) 1615 xfsize = PAGE_SIZE - pgoff; 1616 if (nbytes && xfsize > (nbytes - *sbytes)) 1617 xfsize = nbytes - *sbytes; 1618 if (xfsize <= 0) 1619 break; 1620 /* 1621 * Optimize the non-blocking case by looking at the socket space 1622 * before going to the extra work of constituting the sf_buf. 1623 */ 1624 if ((fp->f_flag & FNONBLOCK) && 1625 ssb_space_prealloc(&so->so_snd) <= 0) { 1626 if (so->so_state & SS_CANTSENDMORE) 1627 error = EPIPE; 1628 else 1629 error = EAGAIN; 1630 ssb_unlock(&so->so_snd); 1631 goto done; 1632 } 1633 /* 1634 * Attempt to look up the page. 1635 * 1636 * Allocate if not found, wait and loop if busy, then 1637 * wire the page. critical section protection is 1638 * required to maintain the object association (an 1639 * interrupt can free the page) through to the 1640 * vm_page_wire() call. 1641 */ 1642 vm_object_hold(obj); 1643 pg = vm_page_lookup_busy_try(obj, pindex, TRUE, &error); 1644 if (error) { 1645 vm_page_sleep_busy(pg, TRUE, "sfpbsy"); 1646 vm_object_drop(obj); 1647 goto retry_lookup; 1648 } 1649 if (pg == NULL) { 1650 pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL | 1651 VM_ALLOC_NULL_OK); 1652 if (pg == NULL) { 1653 vm_wait(0); 1654 vm_object_drop(obj); 1655 goto retry_lookup; 1656 } 1657 } 1658 vm_page_wire(pg); 1659 vm_object_drop(obj); 1660 1661 /* 1662 * If page is not valid for what we need, initiate I/O 1663 */ 1664 1665 if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) { 1666 struct uio auio; 1667 struct iovec aiov; 1668 int bsize; 1669 1670 /* 1671 * Ensure that our page is still around when the I/O 1672 * completes. 1673 */ 1674 vm_page_io_start(pg); 1675 vm_page_wakeup(pg); 1676 1677 /* 1678 * Get the page from backing store. 1679 */ 1680 bsize = vp->v_mount->mnt_stat.f_iosize; 1681 auio.uio_iov = &aiov; 1682 auio.uio_iovcnt = 1; 1683 aiov.iov_base = 0; 1684 aiov.iov_len = MAXBSIZE; 1685 auio.uio_resid = MAXBSIZE; 1686 auio.uio_offset = trunc_page(off); 1687 auio.uio_segflg = UIO_NOCOPY; 1688 auio.uio_rw = UIO_READ; 1689 auio.uio_td = td; 1690 vn_lock(vp, LK_SHARED | LK_RETRY); 1691 error = VOP_READ(vp, &auio, 1692 IO_VMIO | ((MAXBSIZE / bsize) << 16), 1693 td->td_ucred); 1694 vn_unlock(vp); 1695 vm_page_flag_clear(pg, PG_ZERO); 1696 vm_page_busy_wait(pg, FALSE, "sockpg"); 1697 vm_page_io_finish(pg); 1698 if (error) { 1699 vm_page_unwire(pg, 0); 1700 vm_page_wakeup(pg); 1701 vm_page_try_to_free(pg); 1702 ssb_unlock(&so->so_snd); 1703 goto done; 1704 } 1705 } 1706 1707 1708 /* 1709 * Get a sendfile buf. We usually wait as long as necessary, 1710 * but this wait can be interrupted. 1711 */ 1712 if ((sf = sf_buf_alloc(pg)) == NULL) { 1713 vm_page_unwire(pg, 0); 1714 vm_page_wakeup(pg); 1715 vm_page_try_to_free(pg); 1716 ssb_unlock(&so->so_snd); 1717 error = EINTR; 1718 goto done; 1719 } 1720 vm_page_wakeup(pg); 1721 1722 /* 1723 * Get an mbuf header and set it up as having external storage. 1724 */ 1725 MGETHDR(m, MB_WAIT, MT_DATA); 1726 if (m == NULL) { 1727 error = ENOBUFS; 1728 sf_buf_free(sf); 1729 ssb_unlock(&so->so_snd); 1730 goto done; 1731 } 1732 1733 m->m_ext.ext_free = sf_buf_mfree; 1734 m->m_ext.ext_ref = sf_buf_ref; 1735 m->m_ext.ext_arg = sf; 1736 m->m_ext.ext_buf = (void *)sf_buf_kva(sf); 1737 m->m_ext.ext_size = PAGE_SIZE; 1738 m->m_data = (char *)sf_buf_kva(sf) + pgoff; 1739 m->m_flags |= M_EXT; 1740 m->m_pkthdr.len = m->m_len = xfsize; 1741 KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0); 1742 1743 if (mheader != NULL) { 1744 hbytes = mheader->m_pkthdr.len; 1745 mheader->m_pkthdr.len += m->m_pkthdr.len; 1746 m_cat(mheader, m); 1747 m = mheader; 1748 mheader = NULL; 1749 } else 1750 hbytes = 0; 1751 1752 /* 1753 * Add the buffer to the socket buffer chain. 1754 */ 1755 crit_enter(); 1756 retry_space: 1757 /* 1758 * Make sure that the socket is still able to take more data. 1759 * CANTSENDMORE being true usually means that the connection 1760 * was closed. so_error is true when an error was sensed after 1761 * a previous send. 1762 * The state is checked after the page mapping and buffer 1763 * allocation above since those operations may block and make 1764 * any socket checks stale. From this point forward, nothing 1765 * blocks before the pru_send (or more accurately, any blocking 1766 * results in a loop back to here to re-check). 1767 */ 1768 if ((so->so_state & SS_CANTSENDMORE) || so->so_error) { 1769 if (so->so_state & SS_CANTSENDMORE) { 1770 error = EPIPE; 1771 } else { 1772 error = so->so_error; 1773 so->so_error = 0; 1774 } 1775 m_freem(m); 1776 ssb_unlock(&so->so_snd); 1777 crit_exit(); 1778 goto done; 1779 } 1780 /* 1781 * Wait for socket space to become available. We do this just 1782 * after checking the connection state above in order to avoid 1783 * a race condition with ssb_wait(). 1784 */ 1785 space = ssb_space_prealloc(&so->so_snd); 1786 if (space < m->m_pkthdr.len && space < so->so_snd.ssb_lowat) { 1787 if (fp->f_flag & FNONBLOCK) { 1788 m_freem(m); 1789 ssb_unlock(&so->so_snd); 1790 crit_exit(); 1791 error = EAGAIN; 1792 goto done; 1793 } 1794 error = ssb_wait(&so->so_snd); 1795 /* 1796 * An error from ssb_wait usually indicates that we've 1797 * been interrupted by a signal. If we've sent anything 1798 * then return bytes sent, otherwise return the error. 1799 */ 1800 if (error) { 1801 m_freem(m); 1802 ssb_unlock(&so->so_snd); 1803 crit_exit(); 1804 goto done; 1805 } 1806 goto retry_space; 1807 } 1808 1809 for (mp = m; mp != NULL; mp = mp->m_next) 1810 ssb_preallocstream(&so->so_snd, mp); 1811 if (use_sendfile_async) 1812 error = so_pru_senda(so, 0, m, NULL, NULL, td); 1813 else 1814 error = so_pru_send(so, 0, m, NULL, NULL, td); 1815 1816 crit_exit(); 1817 if (error) { 1818 ssb_unlock(&so->so_snd); 1819 goto done; 1820 } 1821 } 1822 if (mheader != NULL) { 1823 *sbytes += mheader->m_pkthdr.len; 1824 1825 for (mp = mheader; mp != NULL; mp = mp->m_next) 1826 ssb_preallocstream(&so->so_snd, mp); 1827 if (use_sendfile_async) 1828 error = so_pru_senda(so, 0, mheader, NULL, NULL, td); 1829 else 1830 error = so_pru_send(so, 0, mheader, NULL, NULL, td); 1831 1832 mheader = NULL; 1833 } 1834 ssb_unlock(&so->so_snd); 1835 1836 done: 1837 fdrop(fp); 1838 done0: 1839 if (mheader != NULL) 1840 m_freem(mheader); 1841 return (error); 1842 } 1843 1844 /* 1845 * MPALMOSTSAFE 1846 */ 1847 int 1848 sys_sctp_peeloff(struct sctp_peeloff_args *uap) 1849 { 1850 #ifdef SCTP 1851 struct thread *td = curthread; 1852 struct filedesc *fdp = td->td_proc->p_fd; 1853 struct file *lfp = NULL; 1854 struct file *nfp = NULL; 1855 int error; 1856 struct socket *head, *so; 1857 caddr_t assoc_id; 1858 int fd; 1859 short fflag; /* type must match fp->f_flag */ 1860 1861 assoc_id = uap->name; 1862 error = holdsock(td->td_proc->p_fd, uap->sd, &lfp); 1863 if (error) 1864 return (error); 1865 1866 crit_enter(); 1867 head = (struct socket *)lfp->f_data; 1868 error = sctp_can_peel_off(head, assoc_id); 1869 if (error) { 1870 crit_exit(); 1871 goto done; 1872 } 1873 /* 1874 * At this point we know we do have a assoc to pull 1875 * we proceed to get the fd setup. This may block 1876 * but that is ok. 1877 */ 1878 1879 fflag = lfp->f_flag; 1880 error = falloc(td->td_lwp, &nfp, &fd); 1881 if (error) { 1882 /* 1883 * Probably ran out of file descriptors. Put the 1884 * unaccepted connection back onto the queue and 1885 * do another wakeup so some other process might 1886 * have a chance at it. 1887 */ 1888 crit_exit(); 1889 goto done; 1890 } 1891 uap->sysmsg_iresult = fd; 1892 1893 so = sctp_get_peeloff(head, assoc_id, &error); 1894 if (so == NULL) { 1895 /* 1896 * Either someone else peeled it off OR 1897 * we can't get a socket. 1898 */ 1899 goto noconnection; 1900 } 1901 soreference(so); /* reference needed */ 1902 soclrstate(so, SS_NOFDREF | SS_COMP); /* when clearing NOFDREF */ 1903 so->so_head = NULL; 1904 if (head->so_sigio != NULL) 1905 fsetown(fgetown(&head->so_sigio), &so->so_sigio); 1906 1907 nfp->f_type = DTYPE_SOCKET; 1908 nfp->f_flag = fflag; 1909 nfp->f_ops = &socketops; 1910 nfp->f_data = so; 1911 1912 noconnection: 1913 /* 1914 * Assign the file pointer to the reserved descriptor, or clear 1915 * the reserved descriptor if an error occured. 1916 */ 1917 if (error) 1918 fsetfd(fdp, NULL, fd); 1919 else 1920 fsetfd(fdp, nfp, fd); 1921 crit_exit(); 1922 /* 1923 * Release explicitly held references before returning. 1924 */ 1925 done: 1926 if (nfp != NULL) 1927 fdrop(nfp); 1928 fdrop(lfp); 1929 return (error); 1930 #else /* SCTP */ 1931 return(EOPNOTSUPP); 1932 #endif /* SCTP */ 1933 } 1934