1 /* 2 * Copyright (c) 1982, 1986, 1989, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * sendfile(2) and related extensions: 6 * Copyright (c) 1998, David Greenman. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94 37 * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $ 38 */ 39 40 #include "opt_ktrace.h" 41 #include "opt_sctp.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/sysproto.h> 47 #include <sys/malloc.h> 48 #include <sys/filedesc.h> 49 #include <sys/event.h> 50 #include <sys/proc.h> 51 #include <sys/fcntl.h> 52 #include <sys/file.h> 53 #include <sys/filio.h> 54 #include <sys/kern_syscall.h> 55 #include <sys/mbuf.h> 56 #include <sys/protosw.h> 57 #include <sys/sfbuf.h> 58 #include <sys/socket.h> 59 #include <sys/socketvar.h> 60 #include <sys/socketops.h> 61 #include <sys/uio.h> 62 #include <sys/vnode.h> 63 #include <sys/lock.h> 64 #include <sys/mount.h> 65 #ifdef KTRACE 66 #include <sys/ktrace.h> 67 #endif 68 #include <vm/vm.h> 69 #include <vm/vm_object.h> 70 #include <vm/vm_page.h> 71 #include <vm/vm_pageout.h> 72 #include <vm/vm_kern.h> 73 #include <vm/vm_extern.h> 74 #include <sys/file2.h> 75 #include <sys/signalvar.h> 76 #include <sys/serialize.h> 77 78 #include <sys/thread2.h> 79 #include <sys/msgport2.h> 80 #include <sys/socketvar2.h> 81 #include <net/netmsg2.h> 82 83 #ifdef SCTP 84 #include <netinet/sctp_peeloff.h> 85 #endif /* SCTP */ 86 87 extern int use_soaccept_pred_fast; 88 89 /* 90 * System call interface to the socket abstraction. 91 */ 92 93 extern struct fileops socketops; 94 95 /* 96 * socket_args(int domain, int type, int protocol) 97 */ 98 int 99 kern_socket(int domain, int type, int protocol, int *res) 100 { 101 struct thread *td = curthread; 102 struct filedesc *fdp = td->td_proc->p_fd; 103 struct socket *so; 104 struct file *fp; 105 int fd, error; 106 107 KKASSERT(td->td_lwp); 108 109 error = falloc(td->td_lwp, &fp, &fd); 110 if (error) 111 return (error); 112 error = socreate(domain, &so, type, protocol, td); 113 if (error) { 114 fsetfd(fdp, NULL, fd); 115 } else { 116 fp->f_type = DTYPE_SOCKET; 117 fp->f_flag = FREAD | FWRITE; 118 fp->f_ops = &socketops; 119 fp->f_data = so; 120 *res = fd; 121 fsetfd(fdp, fp, fd); 122 } 123 fdrop(fp); 124 return (error); 125 } 126 127 /* 128 * MPALMOSTSAFE 129 */ 130 int 131 sys_socket(struct socket_args *uap) 132 { 133 int error; 134 135 error = kern_socket(uap->domain, uap->type, uap->protocol, 136 &uap->sysmsg_iresult); 137 138 return (error); 139 } 140 141 int 142 kern_bind(int s, struct sockaddr *sa) 143 { 144 struct thread *td = curthread; 145 struct proc *p = td->td_proc; 146 struct file *fp; 147 int error; 148 149 KKASSERT(p); 150 error = holdsock(p->p_fd, s, &fp); 151 if (error) 152 return (error); 153 error = sobind((struct socket *)fp->f_data, sa, td); 154 fdrop(fp); 155 return (error); 156 } 157 158 /* 159 * bind_args(int s, caddr_t name, int namelen) 160 * 161 * MPALMOSTSAFE 162 */ 163 int 164 sys_bind(struct bind_args *uap) 165 { 166 struct sockaddr *sa; 167 int error; 168 169 error = getsockaddr(&sa, uap->name, uap->namelen); 170 if (error) 171 return (error); 172 error = kern_bind(uap->s, sa); 173 kfree(sa, M_SONAME); 174 175 return (error); 176 } 177 178 int 179 kern_listen(int s, int backlog) 180 { 181 struct thread *td = curthread; 182 struct proc *p = td->td_proc; 183 struct file *fp; 184 int error; 185 186 KKASSERT(p); 187 error = holdsock(p->p_fd, s, &fp); 188 if (error) 189 return (error); 190 error = solisten((struct socket *)fp->f_data, backlog, td); 191 fdrop(fp); 192 return(error); 193 } 194 195 /* 196 * listen_args(int s, int backlog) 197 * 198 * MPALMOSTSAFE 199 */ 200 int 201 sys_listen(struct listen_args *uap) 202 { 203 int error; 204 205 error = kern_listen(uap->s, uap->backlog); 206 return (error); 207 } 208 209 /* 210 * Returns the accepted socket as well. 211 * 212 * NOTE! The sockets sitting on so_comp/so_incomp might have 0 refs, the 213 * pool token is absolutely required to avoid a sofree() race, 214 * as well as to avoid tailq handling races. 215 */ 216 static boolean_t 217 soaccept_predicate(struct netmsg_so_notify *msg) 218 { 219 struct socket *head = msg->base.nm_so; 220 struct socket *so; 221 222 if (head->so_error != 0) { 223 msg->base.lmsg.ms_error = head->so_error; 224 return (TRUE); 225 } 226 lwkt_getpooltoken(head); 227 if (!TAILQ_EMPTY(&head->so_comp)) { 228 /* Abuse nm_so field as copy in/copy out parameter. XXX JH */ 229 so = TAILQ_FIRST(&head->so_comp); 230 TAILQ_REMOVE(&head->so_comp, so, so_list); 231 head->so_qlen--; 232 soclrstate(so, SS_COMP); 233 so->so_head = NULL; 234 soreference(so); 235 236 lwkt_relpooltoken(head); 237 238 msg->base.lmsg.ms_error = 0; 239 msg->base.nm_so = so; 240 return (TRUE); 241 } 242 lwkt_relpooltoken(head); 243 if (head->so_state & SS_CANTRCVMORE) { 244 msg->base.lmsg.ms_error = ECONNABORTED; 245 return (TRUE); 246 } 247 if (msg->nm_fflags & FNONBLOCK) { 248 msg->base.lmsg.ms_error = EWOULDBLOCK; 249 return (TRUE); 250 } 251 252 return (FALSE); 253 } 254 255 /* 256 * The second argument to kern_accept() is a handle to a struct sockaddr. 257 * This allows kern_accept() to return a pointer to an allocated struct 258 * sockaddr which must be freed later with FREE(). The caller must 259 * initialize *name to NULL. 260 */ 261 int 262 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res) 263 { 264 struct thread *td = curthread; 265 struct filedesc *fdp = td->td_proc->p_fd; 266 struct file *lfp = NULL; 267 struct file *nfp = NULL; 268 struct sockaddr *sa; 269 struct socket *head, *so; 270 struct netmsg_so_notify msg; 271 int fd; 272 u_int fflag; /* type must match fp->f_flag */ 273 int error, tmp; 274 275 *res = -1; 276 if (name && namelen && *namelen < 0) 277 return (EINVAL); 278 279 error = holdsock(td->td_proc->p_fd, s, &lfp); 280 if (error) 281 return (error); 282 283 error = falloc(td->td_lwp, &nfp, &fd); 284 if (error) { /* Probably ran out of file descriptors. */ 285 fdrop(lfp); 286 return (error); 287 } 288 head = (struct socket *)lfp->f_data; 289 if ((head->so_options & SO_ACCEPTCONN) == 0) { 290 error = EINVAL; 291 goto done; 292 } 293 294 if (fflags & O_FBLOCKING) 295 fflags |= lfp->f_flag & ~FNONBLOCK; 296 else if (fflags & O_FNONBLOCKING) 297 fflags |= lfp->f_flag | FNONBLOCK; 298 else 299 fflags = lfp->f_flag; 300 301 if (use_soaccept_pred_fast) { 302 boolean_t pred; 303 304 /* Initialize necessary parts for soaccept_predicate() */ 305 netmsg_init(&msg.base, head, &netisr_apanic_rport, 0, NULL); 306 msg.nm_fflags = fflags; 307 308 lwkt_getpooltoken(head); 309 pred = soaccept_predicate(&msg); 310 lwkt_relpooltoken(head); 311 312 if (pred) { 313 error = msg.base.lmsg.ms_error; 314 if (error) 315 goto done; 316 else 317 goto accepted; 318 } 319 } 320 321 /* optimize for uniprocessor case later XXX JH */ 322 netmsg_init_abortable(&msg.base, head, &curthread->td_msgport, 323 0, netmsg_so_notify, netmsg_so_notify_doabort); 324 msg.nm_predicate = soaccept_predicate; 325 msg.nm_fflags = fflags; 326 msg.nm_etype = NM_REVENT; 327 error = lwkt_domsg(head->so_port, &msg.base.lmsg, PCATCH); 328 if (error) 329 goto done; 330 331 accepted: 332 /* 333 * At this point we have the connection that's ready to be accepted. 334 * 335 * NOTE! soaccept_predicate() ref'd so for us, and soaccept() expects 336 * to eat the ref and turn it into a descriptor. 337 */ 338 so = msg.base.nm_so; 339 340 fflag = lfp->f_flag; 341 342 /* connection has been removed from the listen queue */ 343 KNOTE(&head->so_rcv.ssb_kq.ki_note, 0); 344 345 if (head->so_sigio != NULL) 346 fsetown(fgetown(&head->so_sigio), &so->so_sigio); 347 348 nfp->f_type = DTYPE_SOCKET; 349 nfp->f_flag = fflag; 350 nfp->f_ops = &socketops; 351 nfp->f_data = so; 352 /* Sync socket nonblocking/async state with file flags */ 353 tmp = fflag & FNONBLOCK; 354 fo_ioctl(nfp, FIONBIO, (caddr_t)&tmp, td->td_ucred, NULL); 355 tmp = fflag & FASYNC; 356 fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td->td_ucred, NULL); 357 358 sa = NULL; 359 if (so->so_faddr != NULL) { 360 sa = so->so_faddr; 361 so->so_faddr = NULL; 362 363 soaccept_generic(so); 364 error = 0; 365 } else { 366 error = soaccept(so, &sa); 367 } 368 369 /* 370 * Set the returned name and namelen as applicable. Set the returned 371 * namelen to 0 for older code which might ignore the return value 372 * from accept. 373 */ 374 if (error == 0) { 375 if (sa && name && namelen) { 376 if (*namelen > sa->sa_len) 377 *namelen = sa->sa_len; 378 *name = sa; 379 } else { 380 if (sa) 381 kfree(sa, M_SONAME); 382 } 383 } 384 385 done: 386 /* 387 * If an error occured clear the reserved descriptor, else associate 388 * nfp with it. 389 * 390 * Note that *res is normally ignored if an error is returned but 391 * a syscall message will still have access to the result code. 392 */ 393 if (error) { 394 fsetfd(fdp, NULL, fd); 395 } else { 396 *res = fd; 397 fsetfd(fdp, nfp, fd); 398 } 399 fdrop(nfp); 400 fdrop(lfp); 401 return (error); 402 } 403 404 /* 405 * accept(int s, caddr_t name, int *anamelen) 406 * 407 * MPALMOSTSAFE 408 */ 409 int 410 sys_accept(struct accept_args *uap) 411 { 412 struct sockaddr *sa = NULL; 413 int sa_len; 414 int error; 415 416 if (uap->name) { 417 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len)); 418 if (error) 419 return (error); 420 421 error = kern_accept(uap->s, 0, &sa, &sa_len, 422 &uap->sysmsg_iresult); 423 424 if (error == 0) 425 error = copyout(sa, uap->name, sa_len); 426 if (error == 0) { 427 error = copyout(&sa_len, uap->anamelen, 428 sizeof(*uap->anamelen)); 429 } 430 if (sa) 431 kfree(sa, M_SONAME); 432 } else { 433 error = kern_accept(uap->s, 0, NULL, 0, 434 &uap->sysmsg_iresult); 435 } 436 return (error); 437 } 438 439 /* 440 * extaccept(int s, int fflags, caddr_t name, int *anamelen) 441 * 442 * MPALMOSTSAFE 443 */ 444 int 445 sys_extaccept(struct extaccept_args *uap) 446 { 447 struct sockaddr *sa = NULL; 448 int sa_len; 449 int error; 450 int fflags = uap->flags & O_FMASK; 451 452 if (uap->name) { 453 error = copyin(uap->anamelen, &sa_len, sizeof(sa_len)); 454 if (error) 455 return (error); 456 457 error = kern_accept(uap->s, fflags, &sa, &sa_len, 458 &uap->sysmsg_iresult); 459 460 if (error == 0) 461 error = copyout(sa, uap->name, sa_len); 462 if (error == 0) { 463 error = copyout(&sa_len, uap->anamelen, 464 sizeof(*uap->anamelen)); 465 } 466 if (sa) 467 kfree(sa, M_SONAME); 468 } else { 469 error = kern_accept(uap->s, fflags, NULL, 0, 470 &uap->sysmsg_iresult); 471 } 472 return (error); 473 } 474 475 476 /* 477 * Returns TRUE if predicate satisfied. 478 */ 479 static boolean_t 480 soconnected_predicate(struct netmsg_so_notify *msg) 481 { 482 struct socket *so = msg->base.nm_so; 483 484 /* check predicate */ 485 if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) { 486 msg->base.lmsg.ms_error = so->so_error; 487 return (TRUE); 488 } 489 490 return (FALSE); 491 } 492 493 int 494 kern_connect(int s, int fflags, struct sockaddr *sa) 495 { 496 struct thread *td = curthread; 497 struct proc *p = td->td_proc; 498 struct file *fp; 499 struct socket *so; 500 int error, interrupted = 0; 501 502 error = holdsock(p->p_fd, s, &fp); 503 if (error) 504 return (error); 505 so = (struct socket *)fp->f_data; 506 507 if (fflags & O_FBLOCKING) 508 /* fflags &= ~FNONBLOCK; */; 509 else if (fflags & O_FNONBLOCKING) 510 fflags |= FNONBLOCK; 511 else 512 fflags = fp->f_flag; 513 514 if (so->so_state & SS_ISCONNECTING) { 515 error = EALREADY; 516 goto done; 517 } 518 error = soconnect(so, sa, td); 519 if (error) 520 goto bad; 521 if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) { 522 error = EINPROGRESS; 523 goto done; 524 } 525 if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 526 struct netmsg_so_notify msg; 527 528 netmsg_init_abortable(&msg.base, so, 529 &curthread->td_msgport, 530 0, 531 netmsg_so_notify, 532 netmsg_so_notify_doabort); 533 msg.nm_predicate = soconnected_predicate; 534 msg.nm_etype = NM_REVENT; 535 error = lwkt_domsg(so->so_port, &msg.base.lmsg, PCATCH); 536 if (error == EINTR || error == ERESTART) 537 interrupted = 1; 538 } 539 if (error == 0) { 540 error = so->so_error; 541 so->so_error = 0; 542 } 543 bad: 544 if (!interrupted) 545 soclrstate(so, SS_ISCONNECTING); 546 if (error == ERESTART) 547 error = EINTR; 548 done: 549 fdrop(fp); 550 return (error); 551 } 552 553 /* 554 * connect_args(int s, caddr_t name, int namelen) 555 * 556 * MPALMOSTSAFE 557 */ 558 int 559 sys_connect(struct connect_args *uap) 560 { 561 struct sockaddr *sa; 562 int error; 563 564 error = getsockaddr(&sa, uap->name, uap->namelen); 565 if (error) 566 return (error); 567 error = kern_connect(uap->s, 0, sa); 568 kfree(sa, M_SONAME); 569 570 return (error); 571 } 572 573 /* 574 * connect_args(int s, int fflags, caddr_t name, int namelen) 575 * 576 * MPALMOSTSAFE 577 */ 578 int 579 sys_extconnect(struct extconnect_args *uap) 580 { 581 struct sockaddr *sa; 582 int error; 583 int fflags = uap->flags & O_FMASK; 584 585 error = getsockaddr(&sa, uap->name, uap->namelen); 586 if (error) 587 return (error); 588 error = kern_connect(uap->s, fflags, sa); 589 kfree(sa, M_SONAME); 590 591 return (error); 592 } 593 594 int 595 kern_socketpair(int domain, int type, int protocol, int *sv) 596 { 597 struct thread *td = curthread; 598 struct filedesc *fdp; 599 struct file *fp1, *fp2; 600 struct socket *so1, *so2; 601 int fd1, fd2, error; 602 603 fdp = td->td_proc->p_fd; 604 error = socreate(domain, &so1, type, protocol, td); 605 if (error) 606 return (error); 607 error = socreate(domain, &so2, type, protocol, td); 608 if (error) 609 goto free1; 610 error = falloc(td->td_lwp, &fp1, &fd1); 611 if (error) 612 goto free2; 613 sv[0] = fd1; 614 fp1->f_data = so1; 615 error = falloc(td->td_lwp, &fp2, &fd2); 616 if (error) 617 goto free3; 618 fp2->f_data = so2; 619 sv[1] = fd2; 620 error = soconnect2(so1, so2); 621 if (error) 622 goto free4; 623 if (type == SOCK_DGRAM) { 624 /* 625 * Datagram socket connection is asymmetric. 626 */ 627 error = soconnect2(so2, so1); 628 if (error) 629 goto free4; 630 } 631 fp1->f_type = fp2->f_type = DTYPE_SOCKET; 632 fp1->f_flag = fp2->f_flag = FREAD|FWRITE; 633 fp1->f_ops = fp2->f_ops = &socketops; 634 fsetfd(fdp, fp1, fd1); 635 fsetfd(fdp, fp2, fd2); 636 fdrop(fp1); 637 fdrop(fp2); 638 return (error); 639 free4: 640 fsetfd(fdp, NULL, fd2); 641 fdrop(fp2); 642 free3: 643 fsetfd(fdp, NULL, fd1); 644 fdrop(fp1); 645 free2: 646 (void)soclose(so2, 0); 647 free1: 648 (void)soclose(so1, 0); 649 return (error); 650 } 651 652 /* 653 * socketpair(int domain, int type, int protocol, int *rsv) 654 */ 655 int 656 sys_socketpair(struct socketpair_args *uap) 657 { 658 int error, sockv[2]; 659 660 error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv); 661 662 if (error == 0) { 663 error = copyout(sockv, uap->rsv, sizeof(sockv)); 664 665 if (error != 0) { 666 kern_close(sockv[0]); 667 kern_close(sockv[1]); 668 } 669 } 670 671 return (error); 672 } 673 674 int 675 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio, 676 struct mbuf *control, int flags, size_t *res) 677 { 678 struct thread *td = curthread; 679 struct lwp *lp = td->td_lwp; 680 struct proc *p = td->td_proc; 681 struct file *fp; 682 size_t len; 683 int error; 684 struct socket *so; 685 #ifdef KTRACE 686 struct iovec *ktriov = NULL; 687 struct uio ktruio; 688 #endif 689 690 error = holdsock(p->p_fd, s, &fp); 691 if (error) 692 return (error); 693 #ifdef KTRACE 694 if (KTRPOINT(td, KTR_GENIO)) { 695 int iovlen = auio->uio_iovcnt * sizeof (struct iovec); 696 697 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK); 698 bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen); 699 ktruio = *auio; 700 } 701 #endif 702 len = auio->uio_resid; 703 so = (struct socket *)fp->f_data; 704 if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) { 705 if (fp->f_flag & FNONBLOCK) 706 flags |= MSG_FNONBLOCKING; 707 } 708 error = so_pru_sosend(so, sa, auio, NULL, control, flags, td); 709 if (error) { 710 if (auio->uio_resid != len && (error == ERESTART || 711 error == EINTR || error == EWOULDBLOCK)) 712 error = 0; 713 if (error == EPIPE && !(flags & MSG_NOSIGNAL)) 714 lwpsignal(p, lp, SIGPIPE); 715 } 716 #ifdef KTRACE 717 if (ktriov != NULL) { 718 if (error == 0) { 719 ktruio.uio_iov = ktriov; 720 ktruio.uio_resid = len - auio->uio_resid; 721 ktrgenio(lp, s, UIO_WRITE, &ktruio, error); 722 } 723 kfree(ktriov, M_TEMP); 724 } 725 #endif 726 if (error == 0) 727 *res = len - auio->uio_resid; 728 fdrop(fp); 729 return (error); 730 } 731 732 /* 733 * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen) 734 * 735 * MPALMOSTSAFE 736 */ 737 int 738 sys_sendto(struct sendto_args *uap) 739 { 740 struct thread *td = curthread; 741 struct uio auio; 742 struct iovec aiov; 743 struct sockaddr *sa = NULL; 744 int error; 745 746 if (uap->to) { 747 error = getsockaddr(&sa, uap->to, uap->tolen); 748 if (error) 749 return (error); 750 } 751 aiov.iov_base = uap->buf; 752 aiov.iov_len = uap->len; 753 auio.uio_iov = &aiov; 754 auio.uio_iovcnt = 1; 755 auio.uio_offset = 0; 756 auio.uio_resid = uap->len; 757 auio.uio_segflg = UIO_USERSPACE; 758 auio.uio_rw = UIO_WRITE; 759 auio.uio_td = td; 760 761 error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags, 762 &uap->sysmsg_szresult); 763 764 if (sa) 765 kfree(sa, M_SONAME); 766 return (error); 767 } 768 769 /* 770 * sendmsg_args(int s, caddr_t msg, int flags) 771 * 772 * MPALMOSTSAFE 773 */ 774 int 775 sys_sendmsg(struct sendmsg_args *uap) 776 { 777 struct thread *td = curthread; 778 struct msghdr msg; 779 struct uio auio; 780 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 781 struct sockaddr *sa = NULL; 782 struct mbuf *control = NULL; 783 int error; 784 785 error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg)); 786 if (error) 787 return (error); 788 789 /* 790 * Conditionally copyin msg.msg_name. 791 */ 792 if (msg.msg_name) { 793 error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen); 794 if (error) 795 return (error); 796 } 797 798 /* 799 * Populate auio. 800 */ 801 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen, 802 &auio.uio_resid); 803 if (error) 804 goto cleanup2; 805 auio.uio_iov = iov; 806 auio.uio_iovcnt = msg.msg_iovlen; 807 auio.uio_offset = 0; 808 auio.uio_segflg = UIO_USERSPACE; 809 auio.uio_rw = UIO_WRITE; 810 auio.uio_td = td; 811 812 /* 813 * Conditionally copyin msg.msg_control. 814 */ 815 if (msg.msg_control) { 816 if (msg.msg_controllen < sizeof(struct cmsghdr) || 817 msg.msg_controllen > MLEN) { 818 error = EINVAL; 819 goto cleanup; 820 } 821 control = m_get(MB_WAIT, MT_CONTROL); 822 if (control == NULL) { 823 error = ENOBUFS; 824 goto cleanup; 825 } 826 control->m_len = msg.msg_controllen; 827 error = copyin(msg.msg_control, mtod(control, caddr_t), 828 msg.msg_controllen); 829 if (error) { 830 m_free(control); 831 goto cleanup; 832 } 833 } 834 835 error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags, 836 &uap->sysmsg_szresult); 837 838 cleanup: 839 iovec_free(&iov, aiov); 840 cleanup2: 841 if (sa) 842 kfree(sa, M_SONAME); 843 return (error); 844 } 845 846 /* 847 * kern_recvmsg() takes a handle to sa and control. If the handle is non- 848 * null, it returns a dynamically allocated struct sockaddr and an mbuf. 849 * Don't forget to FREE() and m_free() these if they are returned. 850 */ 851 int 852 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio, 853 struct mbuf **control, int *flags, size_t *res) 854 { 855 struct thread *td = curthread; 856 struct proc *p = td->td_proc; 857 struct file *fp; 858 size_t len; 859 int error; 860 int lflags; 861 struct socket *so; 862 #ifdef KTRACE 863 struct iovec *ktriov = NULL; 864 struct uio ktruio; 865 #endif 866 867 error = holdsock(p->p_fd, s, &fp); 868 if (error) 869 return (error); 870 #ifdef KTRACE 871 if (KTRPOINT(td, KTR_GENIO)) { 872 int iovlen = auio->uio_iovcnt * sizeof (struct iovec); 873 874 ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK); 875 bcopy(auio->uio_iov, ktriov, iovlen); 876 ktruio = *auio; 877 } 878 #endif 879 len = auio->uio_resid; 880 so = (struct socket *)fp->f_data; 881 882 if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) { 883 if (fp->f_flag & FNONBLOCK) { 884 if (flags) { 885 *flags |= MSG_FNONBLOCKING; 886 } else { 887 lflags = MSG_FNONBLOCKING; 888 flags = &lflags; 889 } 890 } 891 } 892 893 error = so_pru_soreceive(so, sa, auio, NULL, control, flags); 894 if (error) { 895 if (auio->uio_resid != len && (error == ERESTART || 896 error == EINTR || error == EWOULDBLOCK)) 897 error = 0; 898 } 899 #ifdef KTRACE 900 if (ktriov != NULL) { 901 if (error == 0) { 902 ktruio.uio_iov = ktriov; 903 ktruio.uio_resid = len - auio->uio_resid; 904 ktrgenio(td->td_lwp, s, UIO_READ, &ktruio, error); 905 } 906 kfree(ktriov, M_TEMP); 907 } 908 #endif 909 if (error == 0) 910 *res = len - auio->uio_resid; 911 fdrop(fp); 912 return (error); 913 } 914 915 /* 916 * recvfrom_args(int s, caddr_t buf, size_t len, int flags, 917 * caddr_t from, int *fromlenaddr) 918 * 919 * MPALMOSTSAFE 920 */ 921 int 922 sys_recvfrom(struct recvfrom_args *uap) 923 { 924 struct thread *td = curthread; 925 struct uio auio; 926 struct iovec aiov; 927 struct sockaddr *sa = NULL; 928 int error, fromlen; 929 930 if (uap->from && uap->fromlenaddr) { 931 error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen)); 932 if (error) 933 return (error); 934 if (fromlen < 0) 935 return (EINVAL); 936 } else { 937 fromlen = 0; 938 } 939 aiov.iov_base = uap->buf; 940 aiov.iov_len = uap->len; 941 auio.uio_iov = &aiov; 942 auio.uio_iovcnt = 1; 943 auio.uio_offset = 0; 944 auio.uio_resid = uap->len; 945 auio.uio_segflg = UIO_USERSPACE; 946 auio.uio_rw = UIO_READ; 947 auio.uio_td = td; 948 949 error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL, 950 &uap->flags, &uap->sysmsg_szresult); 951 952 if (error == 0 && uap->from) { 953 /* note: sa may still be NULL */ 954 if (sa) { 955 fromlen = MIN(fromlen, sa->sa_len); 956 error = copyout(sa, uap->from, fromlen); 957 } else { 958 fromlen = 0; 959 } 960 if (error == 0) { 961 error = copyout(&fromlen, uap->fromlenaddr, 962 sizeof(fromlen)); 963 } 964 } 965 if (sa) 966 kfree(sa, M_SONAME); 967 968 return (error); 969 } 970 971 /* 972 * recvmsg_args(int s, struct msghdr *msg, int flags) 973 * 974 * MPALMOSTSAFE 975 */ 976 int 977 sys_recvmsg(struct recvmsg_args *uap) 978 { 979 struct thread *td = curthread; 980 struct msghdr msg; 981 struct uio auio; 982 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 983 struct mbuf *m, *control = NULL; 984 struct sockaddr *sa = NULL; 985 caddr_t ctlbuf; 986 socklen_t *ufromlenp, *ucontrollenp; 987 int error, fromlen, controllen, len, flags, *uflagsp; 988 989 /* 990 * This copyin handles everything except the iovec. 991 */ 992 error = copyin(uap->msg, &msg, sizeof(msg)); 993 if (error) 994 return (error); 995 996 if (msg.msg_name && msg.msg_namelen < 0) 997 return (EINVAL); 998 if (msg.msg_control && msg.msg_controllen < 0) 999 return (EINVAL); 1000 1001 ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr, 1002 msg_namelen)); 1003 ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr, 1004 msg_controllen)); 1005 uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr, 1006 msg_flags)); 1007 1008 /* 1009 * Populate auio. 1010 */ 1011 error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen, 1012 &auio.uio_resid); 1013 if (error) 1014 return (error); 1015 auio.uio_iov = iov; 1016 auio.uio_iovcnt = msg.msg_iovlen; 1017 auio.uio_offset = 0; 1018 auio.uio_segflg = UIO_USERSPACE; 1019 auio.uio_rw = UIO_READ; 1020 auio.uio_td = td; 1021 1022 flags = uap->flags; 1023 1024 error = kern_recvmsg(uap->s, 1025 (msg.msg_name ? &sa : NULL), &auio, 1026 (msg.msg_control ? &control : NULL), &flags, 1027 &uap->sysmsg_szresult); 1028 1029 /* 1030 * Conditionally copyout the name and populate the namelen field. 1031 */ 1032 if (error == 0 && msg.msg_name) { 1033 /* note: sa may still be NULL */ 1034 if (sa != NULL) { 1035 fromlen = MIN(msg.msg_namelen, sa->sa_len); 1036 error = copyout(sa, msg.msg_name, fromlen); 1037 } else { 1038 fromlen = 0; 1039 } 1040 if (error == 0) 1041 error = copyout(&fromlen, ufromlenp, 1042 sizeof(*ufromlenp)); 1043 } 1044 1045 /* 1046 * Copyout msg.msg_control and msg.msg_controllen. 1047 */ 1048 if (error == 0 && msg.msg_control) { 1049 len = msg.msg_controllen; 1050 m = control; 1051 ctlbuf = (caddr_t)msg.msg_control; 1052 1053 while(m && len > 0) { 1054 unsigned int tocopy; 1055 1056 if (len >= m->m_len) { 1057 tocopy = m->m_len; 1058 } else { 1059 msg.msg_flags |= MSG_CTRUNC; 1060 tocopy = len; 1061 } 1062 1063 error = copyout(mtod(m, caddr_t), ctlbuf, tocopy); 1064 if (error) 1065 goto cleanup; 1066 1067 ctlbuf += tocopy; 1068 len -= tocopy; 1069 m = m->m_next; 1070 } 1071 controllen = ctlbuf - (caddr_t)msg.msg_control; 1072 error = copyout(&controllen, ucontrollenp, 1073 sizeof(*ucontrollenp)); 1074 } 1075 1076 if (error == 0) 1077 error = copyout(&flags, uflagsp, sizeof(*uflagsp)); 1078 1079 cleanup: 1080 if (sa) 1081 kfree(sa, M_SONAME); 1082 iovec_free(&iov, aiov); 1083 if (control) 1084 m_freem(control); 1085 return (error); 1086 } 1087 1088 /* 1089 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an 1090 * in kernel pointer instead of a userland pointer. This allows us 1091 * to manipulate socket options in the emulation code. 1092 */ 1093 int 1094 kern_setsockopt(int s, struct sockopt *sopt) 1095 { 1096 struct thread *td = curthread; 1097 struct proc *p = td->td_proc; 1098 struct file *fp; 1099 int error; 1100 1101 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0) 1102 return (EFAULT); 1103 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0) 1104 return (EINVAL); 1105 if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */ 1106 return (EINVAL); 1107 1108 error = holdsock(p->p_fd, s, &fp); 1109 if (error) 1110 return (error); 1111 1112 error = sosetopt((struct socket *)fp->f_data, sopt); 1113 fdrop(fp); 1114 return (error); 1115 } 1116 1117 /* 1118 * setsockopt_args(int s, int level, int name, caddr_t val, int valsize) 1119 * 1120 * MPALMOSTSAFE 1121 */ 1122 int 1123 sys_setsockopt(struct setsockopt_args *uap) 1124 { 1125 struct thread *td = curthread; 1126 struct sockopt sopt; 1127 int error; 1128 1129 sopt.sopt_level = uap->level; 1130 sopt.sopt_name = uap->name; 1131 sopt.sopt_valsize = uap->valsize; 1132 sopt.sopt_td = td; 1133 sopt.sopt_val = NULL; 1134 1135 if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */ 1136 return (EINVAL); 1137 if (uap->val) { 1138 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK); 1139 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize); 1140 if (error) 1141 goto out; 1142 } 1143 1144 error = kern_setsockopt(uap->s, &sopt); 1145 out: 1146 if (uap->val) 1147 kfree(sopt.sopt_val, M_TEMP); 1148 return(error); 1149 } 1150 1151 /* 1152 * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an 1153 * in kernel pointer instead of a userland pointer. This allows us 1154 * to manipulate socket options in the emulation code. 1155 */ 1156 int 1157 kern_getsockopt(int s, struct sockopt *sopt) 1158 { 1159 struct thread *td = curthread; 1160 struct proc *p = td->td_proc; 1161 struct file *fp; 1162 int error; 1163 1164 if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0) 1165 return (EFAULT); 1166 if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0) 1167 return (EINVAL); 1168 if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */ 1169 return (EINVAL); 1170 1171 error = holdsock(p->p_fd, s, &fp); 1172 if (error) 1173 return (error); 1174 1175 error = sogetopt((struct socket *)fp->f_data, sopt); 1176 fdrop(fp); 1177 return (error); 1178 } 1179 1180 /* 1181 * getsockopt_args(int s, int level, int name, caddr_t val, int *avalsize) 1182 * 1183 * MPALMOSTSAFE 1184 */ 1185 int 1186 sys_getsockopt(struct getsockopt_args *uap) 1187 { 1188 struct thread *td = curthread; 1189 struct sockopt sopt; 1190 int error, valsize; 1191 1192 if (uap->val) { 1193 error = copyin(uap->avalsize, &valsize, sizeof(valsize)); 1194 if (error) 1195 return (error); 1196 } else { 1197 valsize = 0; 1198 } 1199 1200 sopt.sopt_level = uap->level; 1201 sopt.sopt_name = uap->name; 1202 sopt.sopt_valsize = valsize; 1203 sopt.sopt_td = td; 1204 sopt.sopt_val = NULL; 1205 1206 if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */ 1207 return (EINVAL); 1208 if (uap->val) { 1209 sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK); 1210 error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize); 1211 if (error) 1212 goto out; 1213 } 1214 1215 error = kern_getsockopt(uap->s, &sopt); 1216 if (error) 1217 goto out; 1218 valsize = sopt.sopt_valsize; 1219 error = copyout(&valsize, uap->avalsize, sizeof(valsize)); 1220 if (error) 1221 goto out; 1222 if (uap->val) 1223 error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize); 1224 out: 1225 if (uap->val) 1226 kfree(sopt.sopt_val, M_TEMP); 1227 return (error); 1228 } 1229 1230 /* 1231 * The second argument to kern_getsockname() is a handle to a struct sockaddr. 1232 * This allows kern_getsockname() to return a pointer to an allocated struct 1233 * sockaddr which must be freed later with FREE(). The caller must 1234 * initialize *name to NULL. 1235 */ 1236 int 1237 kern_getsockname(int s, struct sockaddr **name, int *namelen) 1238 { 1239 struct thread *td = curthread; 1240 struct proc *p = td->td_proc; 1241 struct file *fp; 1242 struct socket *so; 1243 struct sockaddr *sa = NULL; 1244 int error; 1245 1246 error = holdsock(p->p_fd, s, &fp); 1247 if (error) 1248 return (error); 1249 if (*namelen < 0) { 1250 fdrop(fp); 1251 return (EINVAL); 1252 } 1253 so = (struct socket *)fp->f_data; 1254 error = so_pru_sockaddr(so, &sa); 1255 if (error == 0) { 1256 if (sa == NULL) { 1257 *namelen = 0; 1258 } else { 1259 *namelen = MIN(*namelen, sa->sa_len); 1260 *name = sa; 1261 } 1262 } 1263 1264 fdrop(fp); 1265 return (error); 1266 } 1267 1268 /* 1269 * getsockname_args(int fdes, caddr_t asa, int *alen) 1270 * 1271 * Get socket name. 1272 * 1273 * MPALMOSTSAFE 1274 */ 1275 int 1276 sys_getsockname(struct getsockname_args *uap) 1277 { 1278 struct sockaddr *sa = NULL; 1279 int error, sa_len; 1280 1281 error = copyin(uap->alen, &sa_len, sizeof(sa_len)); 1282 if (error) 1283 return (error); 1284 1285 error = kern_getsockname(uap->fdes, &sa, &sa_len); 1286 1287 if (error == 0) 1288 error = copyout(sa, uap->asa, sa_len); 1289 if (error == 0) 1290 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen)); 1291 if (sa) 1292 kfree(sa, M_SONAME); 1293 return (error); 1294 } 1295 1296 /* 1297 * The second argument to kern_getpeername() is a handle to a struct sockaddr. 1298 * This allows kern_getpeername() to return a pointer to an allocated struct 1299 * sockaddr which must be freed later with FREE(). The caller must 1300 * initialize *name to NULL. 1301 */ 1302 int 1303 kern_getpeername(int s, struct sockaddr **name, int *namelen) 1304 { 1305 struct thread *td = curthread; 1306 struct proc *p = td->td_proc; 1307 struct file *fp; 1308 struct socket *so; 1309 struct sockaddr *sa = NULL; 1310 int error; 1311 1312 error = holdsock(p->p_fd, s, &fp); 1313 if (error) 1314 return (error); 1315 if (*namelen < 0) { 1316 fdrop(fp); 1317 return (EINVAL); 1318 } 1319 so = (struct socket *)fp->f_data; 1320 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) { 1321 fdrop(fp); 1322 return (ENOTCONN); 1323 } 1324 error = so_pru_peeraddr(so, &sa); 1325 if (error == 0) { 1326 if (sa == NULL) { 1327 *namelen = 0; 1328 } else { 1329 *namelen = MIN(*namelen, sa->sa_len); 1330 *name = sa; 1331 } 1332 } 1333 1334 fdrop(fp); 1335 return (error); 1336 } 1337 1338 /* 1339 * getpeername_args(int fdes, caddr_t asa, int *alen) 1340 * 1341 * Get name of peer for connected socket. 1342 * 1343 * MPALMOSTSAFE 1344 */ 1345 int 1346 sys_getpeername(struct getpeername_args *uap) 1347 { 1348 struct sockaddr *sa = NULL; 1349 int error, sa_len; 1350 1351 error = copyin(uap->alen, &sa_len, sizeof(sa_len)); 1352 if (error) 1353 return (error); 1354 1355 error = kern_getpeername(uap->fdes, &sa, &sa_len); 1356 1357 if (error == 0) 1358 error = copyout(sa, uap->asa, sa_len); 1359 if (error == 0) 1360 error = copyout(&sa_len, uap->alen, sizeof(*uap->alen)); 1361 if (sa) 1362 kfree(sa, M_SONAME); 1363 return (error); 1364 } 1365 1366 int 1367 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len) 1368 { 1369 struct sockaddr *sa; 1370 int error; 1371 1372 *namp = NULL; 1373 if (len > SOCK_MAXADDRLEN) 1374 return ENAMETOOLONG; 1375 if (len < offsetof(struct sockaddr, sa_data[0])) 1376 return EDOM; 1377 sa = kmalloc(len, M_SONAME, M_WAITOK); 1378 error = copyin(uaddr, sa, len); 1379 if (error) { 1380 kfree(sa, M_SONAME); 1381 } else { 1382 #if BYTE_ORDER != BIG_ENDIAN 1383 /* 1384 * The bind(), connect(), and sendto() syscalls were not 1385 * versioned for COMPAT_43. Thus, this check must stay. 1386 */ 1387 if (sa->sa_family == 0 && sa->sa_len < AF_MAX) 1388 sa->sa_family = sa->sa_len; 1389 #endif 1390 sa->sa_len = len; 1391 *namp = sa; 1392 } 1393 return error; 1394 } 1395 1396 /* 1397 * Detach a mapped page and release resources back to the system. 1398 * We must release our wiring and if the object is ripped out 1399 * from under the vm_page we become responsible for freeing the 1400 * page. 1401 * 1402 * MPSAFE 1403 */ 1404 static void 1405 sf_buf_mfree(void *arg) 1406 { 1407 struct sf_buf *sf = arg; 1408 vm_page_t m; 1409 1410 m = sf_buf_page(sf); 1411 if (sf_buf_free(sf)) { 1412 /* sf invalid now */ 1413 vm_page_busy_wait(m, FALSE, "sockpgf"); 1414 vm_page_unwire(m, 0); 1415 vm_page_wakeup(m); 1416 if (m->wire_count == 0 && m->object == NULL) 1417 vm_page_try_to_free(m); 1418 } 1419 } 1420 1421 /* 1422 * sendfile(2). 1423 * int sendfile(int fd, int s, off_t offset, size_t nbytes, 1424 * struct sf_hdtr *hdtr, off_t *sbytes, int flags) 1425 * 1426 * Send a file specified by 'fd' and starting at 'offset' to a socket 1427 * specified by 's'. Send only 'nbytes' of the file or until EOF if 1428 * nbytes == 0. Optionally add a header and/or trailer to the socket 1429 * output. If specified, write the total number of bytes sent into *sbytes. 1430 * 1431 * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused 1432 * the headers to count against the remaining bytes to be sent from 1433 * the file descriptor. We may wish to implement a compatibility syscall 1434 * in the future. 1435 * 1436 * MPALMOSTSAFE 1437 */ 1438 int 1439 sys_sendfile(struct sendfile_args *uap) 1440 { 1441 struct thread *td = curthread; 1442 struct proc *p = td->td_proc; 1443 struct file *fp; 1444 struct vnode *vp = NULL; 1445 struct sf_hdtr hdtr; 1446 struct iovec aiov[UIO_SMALLIOV], *iov = NULL; 1447 struct uio auio; 1448 struct mbuf *mheader = NULL; 1449 size_t hbytes = 0; 1450 size_t tbytes; 1451 off_t hdtr_size = 0; 1452 off_t sbytes; 1453 int error; 1454 1455 KKASSERT(p); 1456 1457 /* 1458 * Do argument checking. Must be a regular file in, stream 1459 * type and connected socket out, positive offset. 1460 */ 1461 fp = holdfp(p->p_fd, uap->fd, FREAD); 1462 if (fp == NULL) { 1463 return (EBADF); 1464 } 1465 if (fp->f_type != DTYPE_VNODE) { 1466 fdrop(fp); 1467 return (EINVAL); 1468 } 1469 vp = (struct vnode *)fp->f_data; 1470 vref(vp); 1471 fdrop(fp); 1472 1473 /* 1474 * If specified, get the pointer to the sf_hdtr struct for 1475 * any headers/trailers. 1476 */ 1477 if (uap->hdtr) { 1478 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr)); 1479 if (error) 1480 goto done; 1481 /* 1482 * Send any headers. 1483 */ 1484 if (hdtr.headers) { 1485 error = iovec_copyin(hdtr.headers, &iov, aiov, 1486 hdtr.hdr_cnt, &hbytes); 1487 if (error) 1488 goto done; 1489 auio.uio_iov = iov; 1490 auio.uio_iovcnt = hdtr.hdr_cnt; 1491 auio.uio_offset = 0; 1492 auio.uio_segflg = UIO_USERSPACE; 1493 auio.uio_rw = UIO_WRITE; 1494 auio.uio_td = td; 1495 auio.uio_resid = hbytes; 1496 1497 mheader = m_uiomove(&auio); 1498 1499 iovec_free(&iov, aiov); 1500 if (mheader == NULL) 1501 goto done; 1502 } 1503 } 1504 1505 error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader, 1506 &sbytes, uap->flags); 1507 if (error) 1508 goto done; 1509 1510 /* 1511 * Send trailers. Wimp out and use writev(2). 1512 */ 1513 if (uap->hdtr != NULL && hdtr.trailers != NULL) { 1514 error = iovec_copyin(hdtr.trailers, &iov, aiov, 1515 hdtr.trl_cnt, &auio.uio_resid); 1516 if (error) 1517 goto done; 1518 auio.uio_iov = iov; 1519 auio.uio_iovcnt = hdtr.trl_cnt; 1520 auio.uio_offset = 0; 1521 auio.uio_segflg = UIO_USERSPACE; 1522 auio.uio_rw = UIO_WRITE; 1523 auio.uio_td = td; 1524 1525 error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes); 1526 1527 iovec_free(&iov, aiov); 1528 if (error) 1529 goto done; 1530 hdtr_size += tbytes; /* trailer bytes successfully sent */ 1531 } 1532 1533 done: 1534 if (vp) 1535 vrele(vp); 1536 if (uap->sbytes != NULL) { 1537 sbytes += hdtr_size; 1538 copyout(&sbytes, uap->sbytes, sizeof(off_t)); 1539 } 1540 return (error); 1541 } 1542 1543 int 1544 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes, 1545 struct mbuf *mheader, off_t *sbytes, int flags) 1546 { 1547 struct thread *td = curthread; 1548 struct proc *p = td->td_proc; 1549 struct vm_object *obj; 1550 struct socket *so; 1551 struct file *fp; 1552 struct mbuf *m; 1553 struct sf_buf *sf; 1554 struct vm_page *pg; 1555 off_t off, xfsize; 1556 off_t hbytes = 0; 1557 int error = 0; 1558 1559 if (vp->v_type != VREG) { 1560 error = EINVAL; 1561 goto done0; 1562 } 1563 if ((obj = vp->v_object) == NULL) { 1564 error = EINVAL; 1565 goto done0; 1566 } 1567 error = holdsock(p->p_fd, sfd, &fp); 1568 if (error) 1569 goto done0; 1570 so = (struct socket *)fp->f_data; 1571 if (so->so_type != SOCK_STREAM) { 1572 error = EINVAL; 1573 goto done; 1574 } 1575 if ((so->so_state & SS_ISCONNECTED) == 0) { 1576 error = ENOTCONN; 1577 goto done; 1578 } 1579 if (offset < 0) { 1580 error = EINVAL; 1581 goto done; 1582 } 1583 1584 *sbytes = 0; 1585 /* 1586 * Protect against multiple writers to the socket. 1587 */ 1588 ssb_lock(&so->so_snd, M_WAITOK); 1589 1590 /* 1591 * Loop through the pages in the file, starting with the requested 1592 * offset. Get a file page (do I/O if necessary), map the file page 1593 * into an sf_buf, attach an mbuf header to the sf_buf, and queue 1594 * it on the socket. 1595 */ 1596 for (off = offset; ; off += xfsize, *sbytes += xfsize + hbytes) { 1597 vm_pindex_t pindex; 1598 vm_offset_t pgoff; 1599 1600 pindex = OFF_TO_IDX(off); 1601 retry_lookup: 1602 /* 1603 * Calculate the amount to transfer. Not to exceed a page, 1604 * the EOF, or the passed in nbytes. 1605 */ 1606 xfsize = vp->v_filesize - off; 1607 if (xfsize > PAGE_SIZE) 1608 xfsize = PAGE_SIZE; 1609 pgoff = (vm_offset_t)(off & PAGE_MASK); 1610 if (PAGE_SIZE - pgoff < xfsize) 1611 xfsize = PAGE_SIZE - pgoff; 1612 if (nbytes && xfsize > (nbytes - *sbytes)) 1613 xfsize = nbytes - *sbytes; 1614 if (xfsize <= 0) 1615 break; 1616 /* 1617 * Optimize the non-blocking case by looking at the socket space 1618 * before going to the extra work of constituting the sf_buf. 1619 */ 1620 if ((fp->f_flag & FNONBLOCK) && ssb_space(&so->so_snd) <= 0) { 1621 if (so->so_state & SS_CANTSENDMORE) 1622 error = EPIPE; 1623 else 1624 error = EAGAIN; 1625 ssb_unlock(&so->so_snd); 1626 goto done; 1627 } 1628 /* 1629 * Attempt to look up the page. 1630 * 1631 * Allocate if not found, wait and loop if busy, then 1632 * wire the page. critical section protection is 1633 * required to maintain the object association (an 1634 * interrupt can free the page) through to the 1635 * vm_page_wire() call. 1636 */ 1637 vm_object_hold(obj); 1638 pg = vm_page_lookup_busy_try(obj, pindex, TRUE, &error); 1639 if (error) { 1640 vm_page_sleep_busy(pg, TRUE, "sfpbsy"); 1641 vm_object_drop(obj); 1642 goto retry_lookup; 1643 } 1644 if (pg == NULL) { 1645 pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL | 1646 VM_ALLOC_NULL_OK); 1647 if (pg == NULL) { 1648 vm_wait(0); 1649 vm_object_drop(obj); 1650 goto retry_lookup; 1651 } 1652 } 1653 vm_page_wire(pg); 1654 vm_object_drop(obj); 1655 1656 /* 1657 * If page is not valid for what we need, initiate I/O 1658 */ 1659 1660 if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) { 1661 struct uio auio; 1662 struct iovec aiov; 1663 int bsize; 1664 1665 /* 1666 * Ensure that our page is still around when the I/O 1667 * completes. 1668 */ 1669 vm_page_io_start(pg); 1670 vm_page_wakeup(pg); 1671 1672 /* 1673 * Get the page from backing store. 1674 */ 1675 bsize = vp->v_mount->mnt_stat.f_iosize; 1676 auio.uio_iov = &aiov; 1677 auio.uio_iovcnt = 1; 1678 aiov.iov_base = 0; 1679 aiov.iov_len = MAXBSIZE; 1680 auio.uio_resid = MAXBSIZE; 1681 auio.uio_offset = trunc_page(off); 1682 auio.uio_segflg = UIO_NOCOPY; 1683 auio.uio_rw = UIO_READ; 1684 auio.uio_td = td; 1685 vn_lock(vp, LK_SHARED | LK_RETRY); 1686 error = VOP_READ(vp, &auio, 1687 IO_VMIO | ((MAXBSIZE / bsize) << 16), 1688 td->td_ucred); 1689 vn_unlock(vp); 1690 vm_page_flag_clear(pg, PG_ZERO); 1691 vm_page_busy_wait(pg, FALSE, "sockpg"); 1692 vm_page_io_finish(pg); 1693 if (error) { 1694 vm_page_unwire(pg, 0); 1695 vm_page_wakeup(pg); 1696 vm_page_try_to_free(pg); 1697 ssb_unlock(&so->so_snd); 1698 goto done; 1699 } 1700 } 1701 1702 1703 /* 1704 * Get a sendfile buf. We usually wait as long as necessary, 1705 * but this wait can be interrupted. 1706 */ 1707 if ((sf = sf_buf_alloc(pg)) == NULL) { 1708 vm_page_unwire(pg, 0); 1709 vm_page_wakeup(pg); 1710 vm_page_try_to_free(pg); 1711 ssb_unlock(&so->so_snd); 1712 error = EINTR; 1713 goto done; 1714 } 1715 vm_page_wakeup(pg); 1716 1717 /* 1718 * Get an mbuf header and set it up as having external storage. 1719 */ 1720 MGETHDR(m, MB_WAIT, MT_DATA); 1721 if (m == NULL) { 1722 error = ENOBUFS; 1723 sf_buf_free(sf); 1724 ssb_unlock(&so->so_snd); 1725 goto done; 1726 } 1727 1728 m->m_ext.ext_free = sf_buf_mfree; 1729 m->m_ext.ext_ref = sf_buf_ref; 1730 m->m_ext.ext_arg = sf; 1731 m->m_ext.ext_buf = (void *)sf_buf_kva(sf); 1732 m->m_ext.ext_size = PAGE_SIZE; 1733 m->m_data = (char *)sf_buf_kva(sf) + pgoff; 1734 m->m_flags |= M_EXT; 1735 m->m_pkthdr.len = m->m_len = xfsize; 1736 KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0); 1737 1738 if (mheader != NULL) { 1739 hbytes = mheader->m_pkthdr.len; 1740 mheader->m_pkthdr.len += m->m_pkthdr.len; 1741 m_cat(mheader, m); 1742 m = mheader; 1743 mheader = NULL; 1744 } else 1745 hbytes = 0; 1746 1747 /* 1748 * Add the buffer to the socket buffer chain. 1749 */ 1750 crit_enter(); 1751 retry_space: 1752 /* 1753 * Make sure that the socket is still able to take more data. 1754 * CANTSENDMORE being true usually means that the connection 1755 * was closed. so_error is true when an error was sensed after 1756 * a previous send. 1757 * The state is checked after the page mapping and buffer 1758 * allocation above since those operations may block and make 1759 * any socket checks stale. From this point forward, nothing 1760 * blocks before the pru_send (or more accurately, any blocking 1761 * results in a loop back to here to re-check). 1762 */ 1763 if ((so->so_state & SS_CANTSENDMORE) || so->so_error) { 1764 if (so->so_state & SS_CANTSENDMORE) { 1765 error = EPIPE; 1766 } else { 1767 error = so->so_error; 1768 so->so_error = 0; 1769 } 1770 m_freem(m); 1771 ssb_unlock(&so->so_snd); 1772 crit_exit(); 1773 goto done; 1774 } 1775 /* 1776 * Wait for socket space to become available. We do this just 1777 * after checking the connection state above in order to avoid 1778 * a race condition with ssb_wait(). 1779 */ 1780 if (ssb_space(&so->so_snd) < so->so_snd.ssb_lowat) { 1781 if (fp->f_flag & FNONBLOCK) { 1782 m_freem(m); 1783 ssb_unlock(&so->so_snd); 1784 crit_exit(); 1785 error = EAGAIN; 1786 goto done; 1787 } 1788 error = ssb_wait(&so->so_snd); 1789 /* 1790 * An error from ssb_wait usually indicates that we've 1791 * been interrupted by a signal. If we've sent anything 1792 * then return bytes sent, otherwise return the error. 1793 */ 1794 if (error) { 1795 m_freem(m); 1796 ssb_unlock(&so->so_snd); 1797 crit_exit(); 1798 goto done; 1799 } 1800 goto retry_space; 1801 } 1802 error = so_pru_senda(so, 0, m, NULL, NULL, td); 1803 crit_exit(); 1804 if (error) { 1805 ssb_unlock(&so->so_snd); 1806 goto done; 1807 } 1808 } 1809 if (mheader != NULL) { 1810 *sbytes += mheader->m_pkthdr.len; 1811 error = so_pru_senda(so, 0, mheader, NULL, NULL, td); 1812 mheader = NULL; 1813 } 1814 ssb_unlock(&so->so_snd); 1815 1816 done: 1817 fdrop(fp); 1818 done0: 1819 if (mheader != NULL) 1820 m_freem(mheader); 1821 return (error); 1822 } 1823 1824 /* 1825 * MPALMOSTSAFE 1826 */ 1827 int 1828 sys_sctp_peeloff(struct sctp_peeloff_args *uap) 1829 { 1830 #ifdef SCTP 1831 struct thread *td = curthread; 1832 struct filedesc *fdp = td->td_proc->p_fd; 1833 struct file *lfp = NULL; 1834 struct file *nfp = NULL; 1835 int error; 1836 struct socket *head, *so; 1837 caddr_t assoc_id; 1838 int fd; 1839 short fflag; /* type must match fp->f_flag */ 1840 1841 assoc_id = uap->name; 1842 error = holdsock(td->td_proc->p_fd, uap->sd, &lfp); 1843 if (error) 1844 return (error); 1845 1846 crit_enter(); 1847 head = (struct socket *)lfp->f_data; 1848 error = sctp_can_peel_off(head, assoc_id); 1849 if (error) { 1850 crit_exit(); 1851 goto done; 1852 } 1853 /* 1854 * At this point we know we do have a assoc to pull 1855 * we proceed to get the fd setup. This may block 1856 * but that is ok. 1857 */ 1858 1859 fflag = lfp->f_flag; 1860 error = falloc(td->td_lwp, &nfp, &fd); 1861 if (error) { 1862 /* 1863 * Probably ran out of file descriptors. Put the 1864 * unaccepted connection back onto the queue and 1865 * do another wakeup so some other process might 1866 * have a chance at it. 1867 */ 1868 crit_exit(); 1869 goto done; 1870 } 1871 uap->sysmsg_iresult = fd; 1872 1873 so = sctp_get_peeloff(head, assoc_id, &error); 1874 if (so == NULL) { 1875 /* 1876 * Either someone else peeled it off OR 1877 * we can't get a socket. 1878 */ 1879 goto noconnection; 1880 } 1881 soreference(so); /* reference needed */ 1882 soclrstate(so, SS_NOFDREF | SS_COMP); /* when clearing NOFDREF */ 1883 so->so_head = NULL; 1884 if (head->so_sigio != NULL) 1885 fsetown(fgetown(&head->so_sigio), &so->so_sigio); 1886 1887 nfp->f_type = DTYPE_SOCKET; 1888 nfp->f_flag = fflag; 1889 nfp->f_ops = &socketops; 1890 nfp->f_data = so; 1891 1892 noconnection: 1893 /* 1894 * Assign the file pointer to the reserved descriptor, or clear 1895 * the reserved descriptor if an error occured. 1896 */ 1897 if (error) 1898 fsetfd(fdp, NULL, fd); 1899 else 1900 fsetfd(fdp, nfp, fd); 1901 crit_exit(); 1902 /* 1903 * Release explicitly held references before returning. 1904 */ 1905 done: 1906 if (nfp != NULL) 1907 fdrop(nfp); 1908 fdrop(lfp); 1909 return (error); 1910 #else /* SCTP */ 1911 return(EOPNOTSUPP); 1912 #endif /* SCTP */ 1913 } 1914