1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ 20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.8 2003/07/30 00:19:14 dillon Exp $ 21 */ 22 23 /* 24 * This file contains a high-performance replacement for the socket-based 25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 26 * all features of sockets, but does do everything that pipes normally 27 * do. 28 */ 29 30 /* 31 * This code has two modes of operation, a small write mode and a large 32 * write mode. The small write mode acts like conventional pipes with 33 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the 34 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT 35 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and 36 * the receiving process can copy it directly from the pages in the sending 37 * process. 38 * 39 * If the sending process receives a signal, it is possible that it will 40 * go away, and certainly its address space can change, because control 41 * is returned back to the user-mode side. In that case, the pipe code 42 * arranges to copy the buffer supplied by the user process, to a pageable 43 * kernel buffer, and the receiving process will grab the data from the 44 * pageable kernel buffer. Since signals don't happen all that often, 45 * the copy operation is normally eliminated. 46 * 47 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will 48 * happen for small transfers so that the system will not spend all of 49 * its time context switching. PIPE_SIZE is constrained by the 50 * amount of kernel virtual memory. 51 */ 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/proc.h> 56 #include <sys/fcntl.h> 57 #include <sys/file.h> 58 #include <sys/filedesc.h> 59 #include <sys/filio.h> 60 #include <sys/ttycom.h> 61 #include <sys/stat.h> 62 #include <sys/poll.h> 63 #include <sys/select.h> 64 #include <sys/signalvar.h> 65 #include <sys/sysproto.h> 66 #include <sys/pipe.h> 67 #include <sys/vnode.h> 68 #include <sys/uio.h> 69 #include <sys/event.h> 70 71 #include <vm/vm.h> 72 #include <vm/vm_param.h> 73 #include <sys/lock.h> 74 #include <vm/vm_object.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_extern.h> 77 #include <vm/pmap.h> 78 #include <vm/vm_map.h> 79 #include <vm/vm_page.h> 80 #include <vm/vm_zone.h> 81 82 #include <sys/file2.h> 83 84 /* 85 * Use this define if you want to disable *fancy* VM things. Expect an 86 * approx 30% decrease in transfer rate. This could be useful for 87 * NetBSD or OpenBSD. 88 */ 89 /* #define PIPE_NODIRECT */ 90 91 /* 92 * interfaces to the outside world 93 */ 94 static int pipe_read __P((struct file *fp, struct uio *uio, 95 struct ucred *cred, int flags, struct thread *td)); 96 static int pipe_write __P((struct file *fp, struct uio *uio, 97 struct ucred *cred, int flags, struct thread *td)); 98 static int pipe_close __P((struct file *fp, struct thread *td)); 99 static int pipe_poll __P((struct file *fp, int events, struct ucred *cred, 100 struct thread *td)); 101 static int pipe_kqfilter __P((struct file *fp, struct knote *kn)); 102 static int pipe_stat __P((struct file *fp, struct stat *sb, struct thread *td)); 103 static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct thread *td)); 104 105 static struct fileops pipeops = { 106 NULL, /* port */ 107 0, /* autoq */ 108 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter, 109 pipe_stat, pipe_close 110 }; 111 112 static void filt_pipedetach(struct knote *kn); 113 static int filt_piperead(struct knote *kn, long hint); 114 static int filt_pipewrite(struct knote *kn, long hint); 115 116 static struct filterops pipe_rfiltops = 117 { 1, NULL, filt_pipedetach, filt_piperead }; 118 static struct filterops pipe_wfiltops = 119 { 1, NULL, filt_pipedetach, filt_pipewrite }; 120 121 122 /* 123 * Default pipe buffer size(s), this can be kind-of large now because pipe 124 * space is pageable. The pipe code will try to maintain locality of 125 * reference for performance reasons, so small amounts of outstanding I/O 126 * will not wipe the cache. 127 */ 128 #define MINPIPESIZE (PIPE_SIZE/3) 129 #define MAXPIPESIZE (2*PIPE_SIZE/3) 130 131 /* 132 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but 133 * is there so that on large systems, we don't exhaust it. 134 */ 135 #define MAXPIPEKVA (8*1024*1024) 136 137 /* 138 * Limit for direct transfers, we cannot, of course limit 139 * the amount of kva for pipes in general though. 140 */ 141 #define LIMITPIPEKVA (16*1024*1024) 142 143 /* 144 * Limit the number of "big" pipes 145 */ 146 #define LIMITBIGPIPES 32 147 static int nbigpipe; 148 149 static int amountpipekva; 150 151 static void pipeclose __P((struct pipe *cpipe)); 152 static void pipe_free_kmem __P((struct pipe *cpipe)); 153 static int pipe_create __P((struct pipe **cpipep)); 154 static __inline int pipelock __P((struct pipe *cpipe, int catch)); 155 static __inline void pipeunlock __P((struct pipe *cpipe)); 156 static __inline void pipeselwakeup __P((struct pipe *cpipe)); 157 #ifndef PIPE_NODIRECT 158 static int pipe_build_write_buffer __P((struct pipe *wpipe, struct uio *uio)); 159 static void pipe_destroy_write_buffer __P((struct pipe *wpipe)); 160 static int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio)); 161 static void pipe_clone_write_buffer __P((struct pipe *wpipe)); 162 #endif 163 static int pipespace __P((struct pipe *cpipe, int size)); 164 165 static vm_zone_t pipe_zone; 166 167 /* 168 * The pipe system call for the DTYPE_PIPE type of pipes 169 * 170 * pipe_ARgs(int dummy) 171 */ 172 173 /* ARGSUSED */ 174 int 175 pipe(struct pipe_args *uap) 176 { 177 struct thread *td = curthread; 178 struct proc *p = td->td_proc; 179 struct filedesc *fdp; 180 struct file *rf, *wf; 181 struct pipe *rpipe, *wpipe; 182 int fd1, fd2, error; 183 184 KKASSERT(p); 185 fdp = p->p_fd; 186 187 if (pipe_zone == NULL) 188 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4); 189 190 rpipe = wpipe = NULL; 191 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 192 pipeclose(rpipe); 193 pipeclose(wpipe); 194 return (ENFILE); 195 } 196 197 rpipe->pipe_state |= PIPE_DIRECTOK; 198 wpipe->pipe_state |= PIPE_DIRECTOK; 199 200 error = falloc(p, &rf, &fd1); 201 if (error) { 202 pipeclose(rpipe); 203 pipeclose(wpipe); 204 return (error); 205 } 206 fhold(rf); 207 uap->sysmsg_fds[0] = fd1; 208 209 /* 210 * Warning: once we've gotten past allocation of the fd for the 211 * read-side, we can only drop the read side via fdrop() in order 212 * to avoid races against processes which manage to dup() the read 213 * side while we are blocked trying to allocate the write side. 214 */ 215 rf->f_flag = FREAD | FWRITE; 216 rf->f_type = DTYPE_PIPE; 217 rf->f_data = (caddr_t)rpipe; 218 rf->f_ops = &pipeops; 219 error = falloc(p, &wf, &fd2); 220 if (error) { 221 if (fdp->fd_ofiles[fd1] == rf) { 222 fdp->fd_ofiles[fd1] = NULL; 223 fdrop(rf, td); 224 } 225 fdrop(rf, td); 226 /* rpipe has been closed by fdrop(). */ 227 pipeclose(wpipe); 228 return (error); 229 } 230 wf->f_flag = FREAD | FWRITE; 231 wf->f_type = DTYPE_PIPE; 232 wf->f_data = (caddr_t)wpipe; 233 wf->f_ops = &pipeops; 234 uap->sysmsg_fds[1] = fd2; 235 236 rpipe->pipe_peer = wpipe; 237 wpipe->pipe_peer = rpipe; 238 fdrop(rf, td); 239 240 return (0); 241 } 242 243 /* 244 * Allocate kva for pipe circular buffer, the space is pageable 245 * This routine will 'realloc' the size of a pipe safely, if it fails 246 * it will retain the old buffer. 247 * If it fails it will return ENOMEM. 248 */ 249 static int 250 pipespace(cpipe, size) 251 struct pipe *cpipe; 252 int size; 253 { 254 struct vm_object *object; 255 caddr_t buffer; 256 int npages, error; 257 258 npages = round_page(size)/PAGE_SIZE; 259 /* 260 * Create an object, I don't like the idea of paging to/from 261 * kernel_object. 262 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems. 263 */ 264 object = vm_object_allocate(OBJT_DEFAULT, npages); 265 buffer = (caddr_t) vm_map_min(kernel_map); 266 267 /* 268 * Insert the object into the kernel map, and allocate kva for it. 269 * The map entry is, by default, pageable. 270 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems. 271 */ 272 error = vm_map_find(kernel_map, object, 0, 273 (vm_offset_t *) &buffer, size, 1, 274 VM_PROT_ALL, VM_PROT_ALL, 0); 275 276 if (error != KERN_SUCCESS) { 277 vm_object_deallocate(object); 278 return (ENOMEM); 279 } 280 281 /* free old resources if we're resizing */ 282 pipe_free_kmem(cpipe); 283 cpipe->pipe_buffer.object = object; 284 cpipe->pipe_buffer.buffer = buffer; 285 cpipe->pipe_buffer.size = size; 286 cpipe->pipe_buffer.in = 0; 287 cpipe->pipe_buffer.out = 0; 288 cpipe->pipe_buffer.cnt = 0; 289 amountpipekva += cpipe->pipe_buffer.size; 290 return (0); 291 } 292 293 /* 294 * initialize and allocate VM and memory for pipe 295 */ 296 static int 297 pipe_create(cpipep) 298 struct pipe **cpipep; 299 { 300 struct pipe *cpipe; 301 int error; 302 303 *cpipep = zalloc(pipe_zone); 304 if (*cpipep == NULL) 305 return (ENOMEM); 306 307 cpipe = *cpipep; 308 309 /* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */ 310 cpipe->pipe_buffer.object = NULL; 311 #ifndef PIPE_NODIRECT 312 cpipe->pipe_map.kva = NULL; 313 #endif 314 /* 315 * protect so pipeclose() doesn't follow a junk pointer 316 * if pipespace() fails. 317 */ 318 bzero(&cpipe->pipe_sel, sizeof(cpipe->pipe_sel)); 319 cpipe->pipe_state = 0; 320 cpipe->pipe_peer = NULL; 321 cpipe->pipe_busy = 0; 322 323 #ifndef PIPE_NODIRECT 324 /* 325 * pipe data structure initializations to support direct pipe I/O 326 */ 327 cpipe->pipe_map.cnt = 0; 328 cpipe->pipe_map.kva = 0; 329 cpipe->pipe_map.pos = 0; 330 cpipe->pipe_map.npages = 0; 331 /* cpipe->pipe_map.ms[] = invalid */ 332 #endif 333 334 error = pipespace(cpipe, PIPE_SIZE); 335 if (error) 336 return (error); 337 338 vfs_timestamp(&cpipe->pipe_ctime); 339 cpipe->pipe_atime = cpipe->pipe_ctime; 340 cpipe->pipe_mtime = cpipe->pipe_ctime; 341 342 return (0); 343 } 344 345 346 /* 347 * lock a pipe for I/O, blocking other access 348 */ 349 static __inline int 350 pipelock(cpipe, catch) 351 struct pipe *cpipe; 352 int catch; 353 { 354 int error; 355 356 while (cpipe->pipe_state & PIPE_LOCK) { 357 cpipe->pipe_state |= PIPE_LWANT; 358 error = tsleep(cpipe, (catch ? PCATCH : 0), "pipelk", 0); 359 if (error != 0) 360 return (error); 361 } 362 cpipe->pipe_state |= PIPE_LOCK; 363 return (0); 364 } 365 366 /* 367 * unlock a pipe I/O lock 368 */ 369 static __inline void 370 pipeunlock(cpipe) 371 struct pipe *cpipe; 372 { 373 374 cpipe->pipe_state &= ~PIPE_LOCK; 375 if (cpipe->pipe_state & PIPE_LWANT) { 376 cpipe->pipe_state &= ~PIPE_LWANT; 377 wakeup(cpipe); 378 } 379 } 380 381 static __inline void 382 pipeselwakeup(cpipe) 383 struct pipe *cpipe; 384 { 385 386 if (cpipe->pipe_state & PIPE_SEL) { 387 cpipe->pipe_state &= ~PIPE_SEL; 388 selwakeup(&cpipe->pipe_sel); 389 } 390 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 391 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 392 KNOTE(&cpipe->pipe_sel.si_note, 0); 393 } 394 395 /* ARGSUSED */ 396 static int 397 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, 398 int flags, struct thread *td) 399 { 400 struct pipe *rpipe = (struct pipe *) fp->f_data; 401 int error; 402 int nread = 0; 403 u_int size; 404 405 ++rpipe->pipe_busy; 406 error = pipelock(rpipe, 1); 407 if (error) 408 goto unlocked_error; 409 410 while (uio->uio_resid) { 411 /* 412 * normal pipe buffer receive 413 */ 414 if (rpipe->pipe_buffer.cnt > 0) { 415 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 416 if (size > rpipe->pipe_buffer.cnt) 417 size = rpipe->pipe_buffer.cnt; 418 if (size > (u_int) uio->uio_resid) 419 size = (u_int) uio->uio_resid; 420 421 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 422 size, uio); 423 if (error) 424 break; 425 426 rpipe->pipe_buffer.out += size; 427 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 428 rpipe->pipe_buffer.out = 0; 429 430 rpipe->pipe_buffer.cnt -= size; 431 432 /* 433 * If there is no more to read in the pipe, reset 434 * its pointers to the beginning. This improves 435 * cache hit stats. 436 */ 437 if (rpipe->pipe_buffer.cnt == 0) { 438 rpipe->pipe_buffer.in = 0; 439 rpipe->pipe_buffer.out = 0; 440 } 441 nread += size; 442 #ifndef PIPE_NODIRECT 443 /* 444 * Direct copy, bypassing a kernel buffer. 445 */ 446 } else if ((size = rpipe->pipe_map.cnt) && 447 (rpipe->pipe_state & PIPE_DIRECTW)) { 448 caddr_t va; 449 if (size > (u_int) uio->uio_resid) 450 size = (u_int) uio->uio_resid; 451 452 va = (caddr_t) rpipe->pipe_map.kva + 453 rpipe->pipe_map.pos; 454 error = uiomove(va, size, uio); 455 if (error) 456 break; 457 nread += size; 458 rpipe->pipe_map.pos += size; 459 rpipe->pipe_map.cnt -= size; 460 if (rpipe->pipe_map.cnt == 0) { 461 rpipe->pipe_state &= ~PIPE_DIRECTW; 462 wakeup(rpipe); 463 } 464 #endif 465 } else { 466 /* 467 * detect EOF condition 468 * read returns 0 on EOF, no need to set error 469 */ 470 if (rpipe->pipe_state & PIPE_EOF) 471 break; 472 473 /* 474 * If the "write-side" has been blocked, wake it up now. 475 */ 476 if (rpipe->pipe_state & PIPE_WANTW) { 477 rpipe->pipe_state &= ~PIPE_WANTW; 478 wakeup(rpipe); 479 } 480 481 /* 482 * Break if some data was read. 483 */ 484 if (nread > 0) 485 break; 486 487 /* 488 * Unlock the pipe buffer for our remaining processing. We 489 * will either break out with an error or we will sleep and 490 * relock to loop. 491 */ 492 pipeunlock(rpipe); 493 494 /* 495 * Handle non-blocking mode operation or 496 * wait for more data. 497 */ 498 if (fp->f_flag & FNONBLOCK) { 499 error = EAGAIN; 500 } else { 501 rpipe->pipe_state |= PIPE_WANTR; 502 if ((error = tsleep(rpipe, PCATCH, 503 "piperd", 0)) == 0) { 504 error = pipelock(rpipe, 1); 505 } 506 } 507 if (error) 508 goto unlocked_error; 509 } 510 } 511 pipeunlock(rpipe); 512 513 if (error == 0) 514 vfs_timestamp(&rpipe->pipe_atime); 515 unlocked_error: 516 --rpipe->pipe_busy; 517 518 /* 519 * PIPE_WANT processing only makes sense if pipe_busy is 0. 520 */ 521 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 522 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 523 wakeup(rpipe); 524 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 525 /* 526 * Handle write blocking hysteresis. 527 */ 528 if (rpipe->pipe_state & PIPE_WANTW) { 529 rpipe->pipe_state &= ~PIPE_WANTW; 530 wakeup(rpipe); 531 } 532 } 533 534 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 535 pipeselwakeup(rpipe); 536 537 return (error); 538 } 539 540 #ifndef PIPE_NODIRECT 541 /* 542 * Map the sending processes' buffer into kernel space and wire it. 543 * This is similar to a physical write operation. 544 */ 545 static int 546 pipe_build_write_buffer(wpipe, uio) 547 struct pipe *wpipe; 548 struct uio *uio; 549 { 550 u_int size; 551 int i; 552 vm_offset_t addr, endaddr, paddr; 553 554 size = (u_int) uio->uio_iov->iov_len; 555 if (size > wpipe->pipe_buffer.size) 556 size = wpipe->pipe_buffer.size; 557 558 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size); 559 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base); 560 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) { 561 vm_page_t m; 562 563 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 || 564 (paddr = pmap_kextract(addr)) == 0) { 565 int j; 566 567 for (j = 0; j < i; j++) 568 vm_page_unwire(wpipe->pipe_map.ms[j], 1); 569 return (EFAULT); 570 } 571 572 m = PHYS_TO_VM_PAGE(paddr); 573 vm_page_wire(m); 574 wpipe->pipe_map.ms[i] = m; 575 } 576 577 /* 578 * set up the control block 579 */ 580 wpipe->pipe_map.npages = i; 581 wpipe->pipe_map.pos = 582 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK; 583 wpipe->pipe_map.cnt = size; 584 585 /* 586 * and map the buffer 587 */ 588 if (wpipe->pipe_map.kva == 0) { 589 /* 590 * We need to allocate space for an extra page because the 591 * address range might (will) span pages at times. 592 */ 593 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map, 594 wpipe->pipe_buffer.size + PAGE_SIZE); 595 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE; 596 } 597 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms, 598 wpipe->pipe_map.npages); 599 600 /* 601 * and update the uio data 602 */ 603 604 uio->uio_iov->iov_len -= size; 605 uio->uio_iov->iov_base += size; 606 if (uio->uio_iov->iov_len == 0) 607 uio->uio_iov++; 608 uio->uio_resid -= size; 609 uio->uio_offset += size; 610 return (0); 611 } 612 613 /* 614 * unmap and unwire the process buffer 615 */ 616 static void 617 pipe_destroy_write_buffer(wpipe) 618 struct pipe *wpipe; 619 { 620 int i; 621 622 if (wpipe->pipe_map.kva) { 623 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages); 624 625 if (amountpipekva > MAXPIPEKVA) { 626 vm_offset_t kva = wpipe->pipe_map.kva; 627 wpipe->pipe_map.kva = 0; 628 kmem_free(kernel_map, kva, 629 wpipe->pipe_buffer.size + PAGE_SIZE); 630 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE; 631 } 632 } 633 for (i = 0; i < wpipe->pipe_map.npages; i++) 634 vm_page_unwire(wpipe->pipe_map.ms[i], 1); 635 wpipe->pipe_map.npages = 0; 636 } 637 638 /* 639 * In the case of a signal, the writing process might go away. This 640 * code copies the data into the circular buffer so that the source 641 * pages can be freed without loss of data. 642 */ 643 static void 644 pipe_clone_write_buffer(wpipe) 645 struct pipe *wpipe; 646 { 647 int size; 648 int pos; 649 650 size = wpipe->pipe_map.cnt; 651 pos = wpipe->pipe_map.pos; 652 bcopy((caddr_t) wpipe->pipe_map.kva + pos, 653 (caddr_t) wpipe->pipe_buffer.buffer, size); 654 655 wpipe->pipe_buffer.in = size; 656 wpipe->pipe_buffer.out = 0; 657 wpipe->pipe_buffer.cnt = size; 658 wpipe->pipe_state &= ~PIPE_DIRECTW; 659 660 pipe_destroy_write_buffer(wpipe); 661 } 662 663 /* 664 * This implements the pipe buffer write mechanism. Note that only 665 * a direct write OR a normal pipe write can be pending at any given time. 666 * If there are any characters in the pipe buffer, the direct write will 667 * be deferred until the receiving process grabs all of the bytes from 668 * the pipe buffer. Then the direct mapping write is set-up. 669 */ 670 static int 671 pipe_direct_write(wpipe, uio) 672 struct pipe *wpipe; 673 struct uio *uio; 674 { 675 int error; 676 677 retry: 678 while (wpipe->pipe_state & PIPE_DIRECTW) { 679 if (wpipe->pipe_state & PIPE_WANTR) { 680 wpipe->pipe_state &= ~PIPE_WANTR; 681 wakeup(wpipe); 682 } 683 wpipe->pipe_state |= PIPE_WANTW; 684 error = tsleep(wpipe, PCATCH, "pipdww", 0); 685 if (error) 686 goto error1; 687 if (wpipe->pipe_state & PIPE_EOF) { 688 error = EPIPE; 689 goto error1; 690 } 691 } 692 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */ 693 if (wpipe->pipe_buffer.cnt > 0) { 694 if (wpipe->pipe_state & PIPE_WANTR) { 695 wpipe->pipe_state &= ~PIPE_WANTR; 696 wakeup(wpipe); 697 } 698 699 wpipe->pipe_state |= PIPE_WANTW; 700 error = tsleep(wpipe, PCATCH, "pipdwc", 0); 701 if (error) 702 goto error1; 703 if (wpipe->pipe_state & PIPE_EOF) { 704 error = EPIPE; 705 goto error1; 706 } 707 goto retry; 708 } 709 710 wpipe->pipe_state |= PIPE_DIRECTW; 711 712 error = pipe_build_write_buffer(wpipe, uio); 713 if (error) { 714 wpipe->pipe_state &= ~PIPE_DIRECTW; 715 goto error1; 716 } 717 718 error = 0; 719 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 720 if (wpipe->pipe_state & PIPE_EOF) { 721 pipelock(wpipe, 0); 722 pipe_destroy_write_buffer(wpipe); 723 pipeunlock(wpipe); 724 pipeselwakeup(wpipe); 725 error = EPIPE; 726 goto error1; 727 } 728 if (wpipe->pipe_state & PIPE_WANTR) { 729 wpipe->pipe_state &= ~PIPE_WANTR; 730 wakeup(wpipe); 731 } 732 pipeselwakeup(wpipe); 733 error = tsleep(wpipe, PCATCH, "pipdwt", 0); 734 } 735 736 pipelock(wpipe,0); 737 if (wpipe->pipe_state & PIPE_DIRECTW) { 738 /* 739 * this bit of trickery substitutes a kernel buffer for 740 * the process that might be going away. 741 */ 742 pipe_clone_write_buffer(wpipe); 743 } else { 744 pipe_destroy_write_buffer(wpipe); 745 } 746 pipeunlock(wpipe); 747 return (error); 748 749 error1: 750 wakeup(wpipe); 751 return (error); 752 } 753 #endif 754 755 static int 756 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, 757 int flags, struct thread *td) 758 { 759 int error = 0; 760 int orig_resid; 761 struct pipe *wpipe, *rpipe; 762 763 rpipe = (struct pipe *) fp->f_data; 764 wpipe = rpipe->pipe_peer; 765 766 /* 767 * detect loss of pipe read side, issue SIGPIPE if lost. 768 */ 769 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 770 return (EPIPE); 771 } 772 ++wpipe->pipe_busy; 773 774 /* 775 * If it is advantageous to resize the pipe buffer, do 776 * so. 777 */ 778 if ((uio->uio_resid > PIPE_SIZE) && 779 (nbigpipe < LIMITBIGPIPES) && 780 (wpipe->pipe_state & PIPE_DIRECTW) == 0 && 781 (wpipe->pipe_buffer.size <= PIPE_SIZE) && 782 (wpipe->pipe_buffer.cnt == 0)) { 783 784 if ((error = pipelock(wpipe,1)) == 0) { 785 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 786 nbigpipe++; 787 pipeunlock(wpipe); 788 } 789 } 790 791 /* 792 * If an early error occured unbusy and return, waking up any pending 793 * readers. 794 */ 795 if (error) { 796 --wpipe->pipe_busy; 797 if ((wpipe->pipe_busy == 0) && 798 (wpipe->pipe_state & PIPE_WANT)) { 799 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 800 wakeup(wpipe); 801 } 802 return(error); 803 } 804 805 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone")); 806 807 orig_resid = uio->uio_resid; 808 809 while (uio->uio_resid) { 810 int space; 811 812 #ifndef PIPE_NODIRECT 813 /* 814 * If the transfer is large, we can gain performance if 815 * we do process-to-process copies directly. 816 * If the write is non-blocking, we don't use the 817 * direct write mechanism. 818 * 819 * The direct write mechanism will detect the reader going 820 * away on us. 821 */ 822 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) && 823 (fp->f_flag & FNONBLOCK) == 0 && 824 (wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) && 825 (uio->uio_iov->iov_len >= PIPE_MINDIRECT)) { 826 error = pipe_direct_write( wpipe, uio); 827 if (error) 828 break; 829 continue; 830 } 831 #endif 832 833 /* 834 * Pipe buffered writes cannot be coincidental with 835 * direct writes. We wait until the currently executing 836 * direct write is completed before we start filling the 837 * pipe buffer. We break out if a signal occurs or the 838 * reader goes away. 839 */ 840 retrywrite: 841 while (wpipe->pipe_state & PIPE_DIRECTW) { 842 if (wpipe->pipe_state & PIPE_WANTR) { 843 wpipe->pipe_state &= ~PIPE_WANTR; 844 wakeup(wpipe); 845 } 846 error = tsleep(wpipe, PCATCH, "pipbww", 0); 847 if (wpipe->pipe_state & PIPE_EOF) 848 break; 849 if (error) 850 break; 851 } 852 if (wpipe->pipe_state & PIPE_EOF) { 853 error = EPIPE; 854 break; 855 } 856 857 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 858 859 /* Writes of size <= PIPE_BUF must be atomic. */ 860 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 861 space = 0; 862 863 if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) { 864 if ((error = pipelock(wpipe,1)) == 0) { 865 int size; /* Transfer size */ 866 int segsize; /* first segment to transfer */ 867 868 /* 869 * It is possible for a direct write to 870 * slip in on us... handle it here... 871 */ 872 if (wpipe->pipe_state & PIPE_DIRECTW) { 873 pipeunlock(wpipe); 874 goto retrywrite; 875 } 876 /* 877 * If a process blocked in uiomove, our 878 * value for space might be bad. 879 * 880 * XXX will we be ok if the reader has gone 881 * away here? 882 */ 883 if (space > wpipe->pipe_buffer.size - 884 wpipe->pipe_buffer.cnt) { 885 pipeunlock(wpipe); 886 goto retrywrite; 887 } 888 889 /* 890 * Transfer size is minimum of uio transfer 891 * and free space in pipe buffer. 892 */ 893 if (space > uio->uio_resid) 894 size = uio->uio_resid; 895 else 896 size = space; 897 /* 898 * First segment to transfer is minimum of 899 * transfer size and contiguous space in 900 * pipe buffer. If first segment to transfer 901 * is less than the transfer size, we've got 902 * a wraparound in the buffer. 903 */ 904 segsize = wpipe->pipe_buffer.size - 905 wpipe->pipe_buffer.in; 906 if (segsize > size) 907 segsize = size; 908 909 /* Transfer first segment */ 910 911 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 912 segsize, uio); 913 914 if (error == 0 && segsize < size) { 915 /* 916 * Transfer remaining part now, to 917 * support atomic writes. Wraparound 918 * happened. 919 */ 920 if (wpipe->pipe_buffer.in + segsize != 921 wpipe->pipe_buffer.size) 922 panic("Expected pipe buffer wraparound disappeared"); 923 924 error = uiomove(&wpipe->pipe_buffer.buffer[0], 925 size - segsize, uio); 926 } 927 if (error == 0) { 928 wpipe->pipe_buffer.in += size; 929 if (wpipe->pipe_buffer.in >= 930 wpipe->pipe_buffer.size) { 931 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size) 932 panic("Expected wraparound bad"); 933 wpipe->pipe_buffer.in = size - segsize; 934 } 935 936 wpipe->pipe_buffer.cnt += size; 937 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size) 938 panic("Pipe buffer overflow"); 939 940 } 941 pipeunlock(wpipe); 942 } 943 if (error) 944 break; 945 946 } else { 947 /* 948 * If the "read-side" has been blocked, wake it up now. 949 */ 950 if (wpipe->pipe_state & PIPE_WANTR) { 951 wpipe->pipe_state &= ~PIPE_WANTR; 952 wakeup(wpipe); 953 } 954 955 /* 956 * don't block on non-blocking I/O 957 */ 958 if (fp->f_flag & FNONBLOCK) { 959 error = EAGAIN; 960 break; 961 } 962 963 /* 964 * We have no more space and have something to offer, 965 * wake up select/poll. 966 */ 967 pipeselwakeup(wpipe); 968 969 wpipe->pipe_state |= PIPE_WANTW; 970 error = tsleep(wpipe, PCATCH, "pipewr", 0); 971 if (error != 0) 972 break; 973 /* 974 * If read side wants to go away, we just issue a signal 975 * to ourselves. 976 */ 977 if (wpipe->pipe_state & PIPE_EOF) { 978 error = EPIPE; 979 break; 980 } 981 } 982 } 983 984 --wpipe->pipe_busy; 985 986 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 987 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 988 wakeup(wpipe); 989 } else if (wpipe->pipe_buffer.cnt > 0) { 990 /* 991 * If we have put any characters in the buffer, we wake up 992 * the reader. 993 */ 994 if (wpipe->pipe_state & PIPE_WANTR) { 995 wpipe->pipe_state &= ~PIPE_WANTR; 996 wakeup(wpipe); 997 } 998 } 999 1000 /* 1001 * Don't return EPIPE if I/O was successful 1002 */ 1003 if ((wpipe->pipe_buffer.cnt == 0) && 1004 (uio->uio_resid == 0) && 1005 (error == EPIPE)) { 1006 error = 0; 1007 } 1008 1009 if (error == 0) 1010 vfs_timestamp(&wpipe->pipe_mtime); 1011 1012 /* 1013 * We have something to offer, 1014 * wake up select/poll. 1015 */ 1016 if (wpipe->pipe_buffer.cnt) 1017 pipeselwakeup(wpipe); 1018 1019 return (error); 1020 } 1021 1022 /* 1023 * we implement a very minimal set of ioctls for compatibility with sockets. 1024 */ 1025 int 1026 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct thread *td) 1027 { 1028 struct pipe *mpipe = (struct pipe *)fp->f_data; 1029 1030 switch (cmd) { 1031 1032 case FIONBIO: 1033 return (0); 1034 1035 case FIOASYNC: 1036 if (*(int *)data) { 1037 mpipe->pipe_state |= PIPE_ASYNC; 1038 } else { 1039 mpipe->pipe_state &= ~PIPE_ASYNC; 1040 } 1041 return (0); 1042 1043 case FIONREAD: 1044 if (mpipe->pipe_state & PIPE_DIRECTW) 1045 *(int *)data = mpipe->pipe_map.cnt; 1046 else 1047 *(int *)data = mpipe->pipe_buffer.cnt; 1048 return (0); 1049 1050 case FIOSETOWN: 1051 return (fsetown(*(int *)data, &mpipe->pipe_sigio)); 1052 1053 case FIOGETOWN: 1054 *(int *)data = fgetown(mpipe->pipe_sigio); 1055 return (0); 1056 1057 /* This is deprecated, FIOSETOWN should be used instead. */ 1058 case TIOCSPGRP: 1059 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio)); 1060 1061 /* This is deprecated, FIOGETOWN should be used instead. */ 1062 case TIOCGPGRP: 1063 *(int *)data = -fgetown(mpipe->pipe_sigio); 1064 return (0); 1065 1066 } 1067 return (ENOTTY); 1068 } 1069 1070 int 1071 pipe_poll(struct file *fp, int events, struct ucred *cred, struct thread *td) 1072 { 1073 struct pipe *rpipe = (struct pipe *)fp->f_data; 1074 struct pipe *wpipe; 1075 int revents = 0; 1076 1077 wpipe = rpipe->pipe_peer; 1078 if (events & (POLLIN | POLLRDNORM)) 1079 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1080 (rpipe->pipe_buffer.cnt > 0) || 1081 (rpipe->pipe_state & PIPE_EOF)) 1082 revents |= events & (POLLIN | POLLRDNORM); 1083 1084 if (events & (POLLOUT | POLLWRNORM)) 1085 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) || 1086 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1087 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) 1088 revents |= events & (POLLOUT | POLLWRNORM); 1089 1090 if ((rpipe->pipe_state & PIPE_EOF) || 1091 (wpipe == NULL) || 1092 (wpipe->pipe_state & PIPE_EOF)) 1093 revents |= POLLHUP; 1094 1095 if (revents == 0) { 1096 if (events & (POLLIN | POLLRDNORM)) { 1097 selrecord(td, &rpipe->pipe_sel); 1098 rpipe->pipe_state |= PIPE_SEL; 1099 } 1100 1101 if (events & (POLLOUT | POLLWRNORM)) { 1102 selrecord(td, &wpipe->pipe_sel); 1103 wpipe->pipe_state |= PIPE_SEL; 1104 } 1105 } 1106 1107 return (revents); 1108 } 1109 1110 static int 1111 pipe_stat(struct file *fp, struct stat *ub, struct thread *td) 1112 { 1113 struct pipe *pipe = (struct pipe *)fp->f_data; 1114 1115 bzero((caddr_t)ub, sizeof(*ub)); 1116 ub->st_mode = S_IFIFO; 1117 ub->st_blksize = pipe->pipe_buffer.size; 1118 ub->st_size = pipe->pipe_buffer.cnt; 1119 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1120 ub->st_atimespec = pipe->pipe_atime; 1121 ub->st_mtimespec = pipe->pipe_mtime; 1122 ub->st_ctimespec = pipe->pipe_ctime; 1123 /* 1124 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev, 1125 * st_flags, st_gen. 1126 * XXX (st_dev, st_ino) should be unique. 1127 */ 1128 return (0); 1129 } 1130 1131 /* ARGSUSED */ 1132 static int 1133 pipe_close(struct file *fp, struct thread *td) 1134 { 1135 struct pipe *cpipe = (struct pipe *)fp->f_data; 1136 1137 fp->f_ops = &badfileops; 1138 fp->f_data = NULL; 1139 funsetown(cpipe->pipe_sigio); 1140 pipeclose(cpipe); 1141 return (0); 1142 } 1143 1144 static void 1145 pipe_free_kmem(struct pipe *cpipe) 1146 { 1147 1148 if (cpipe->pipe_buffer.buffer != NULL) { 1149 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1150 --nbigpipe; 1151 amountpipekva -= cpipe->pipe_buffer.size; 1152 kmem_free(kernel_map, 1153 (vm_offset_t)cpipe->pipe_buffer.buffer, 1154 cpipe->pipe_buffer.size); 1155 cpipe->pipe_buffer.buffer = NULL; 1156 } 1157 #ifndef PIPE_NODIRECT 1158 if (cpipe->pipe_map.kva != NULL) { 1159 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE; 1160 kmem_free(kernel_map, 1161 cpipe->pipe_map.kva, 1162 cpipe->pipe_buffer.size + PAGE_SIZE); 1163 cpipe->pipe_map.cnt = 0; 1164 cpipe->pipe_map.kva = 0; 1165 cpipe->pipe_map.pos = 0; 1166 cpipe->pipe_map.npages = 0; 1167 } 1168 #endif 1169 } 1170 1171 /* 1172 * shutdown the pipe 1173 */ 1174 static void 1175 pipeclose(struct pipe *cpipe) 1176 { 1177 struct pipe *ppipe; 1178 1179 if (cpipe) { 1180 1181 pipeselwakeup(cpipe); 1182 1183 /* 1184 * If the other side is blocked, wake it up saying that 1185 * we want to close it down. 1186 */ 1187 while (cpipe->pipe_busy) { 1188 wakeup(cpipe); 1189 cpipe->pipe_state |= PIPE_WANT | PIPE_EOF; 1190 tsleep(cpipe, 0, "pipecl", 0); 1191 } 1192 1193 /* 1194 * Disconnect from peer 1195 */ 1196 if ((ppipe = cpipe->pipe_peer) != NULL) { 1197 pipeselwakeup(ppipe); 1198 1199 ppipe->pipe_state |= PIPE_EOF; 1200 wakeup(ppipe); 1201 KNOTE(&ppipe->pipe_sel.si_note, 0); 1202 ppipe->pipe_peer = NULL; 1203 } 1204 /* 1205 * free resources 1206 */ 1207 pipe_free_kmem(cpipe); 1208 zfree(pipe_zone, cpipe); 1209 } 1210 } 1211 1212 /*ARGSUSED*/ 1213 static int 1214 pipe_kqfilter(struct file *fp, struct knote *kn) 1215 { 1216 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data; 1217 1218 switch (kn->kn_filter) { 1219 case EVFILT_READ: 1220 kn->kn_fop = &pipe_rfiltops; 1221 break; 1222 case EVFILT_WRITE: 1223 kn->kn_fop = &pipe_wfiltops; 1224 cpipe = cpipe->pipe_peer; 1225 if (cpipe == NULL) 1226 /* other end of pipe has been closed */ 1227 return (EBADF); 1228 break; 1229 default: 1230 return (1); 1231 } 1232 kn->kn_hook = (caddr_t)cpipe; 1233 1234 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1235 return (0); 1236 } 1237 1238 static void 1239 filt_pipedetach(struct knote *kn) 1240 { 1241 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1242 1243 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1244 } 1245 1246 /*ARGSUSED*/ 1247 static int 1248 filt_piperead(struct knote *kn, long hint) 1249 { 1250 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1251 struct pipe *wpipe = rpipe->pipe_peer; 1252 1253 kn->kn_data = rpipe->pipe_buffer.cnt; 1254 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) 1255 kn->kn_data = rpipe->pipe_map.cnt; 1256 1257 if ((rpipe->pipe_state & PIPE_EOF) || 1258 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1259 kn->kn_flags |= EV_EOF; 1260 return (1); 1261 } 1262 return (kn->kn_data > 0); 1263 } 1264 1265 /*ARGSUSED*/ 1266 static int 1267 filt_pipewrite(struct knote *kn, long hint) 1268 { 1269 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1270 struct pipe *wpipe = rpipe->pipe_peer; 1271 1272 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1273 kn->kn_data = 0; 1274 kn->kn_flags |= EV_EOF; 1275 return (1); 1276 } 1277 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1278 if (wpipe->pipe_state & PIPE_DIRECTW) 1279 kn->kn_data = 0; 1280 1281 return (kn->kn_data >= PIPE_BUF); 1282 } 1283