1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ 20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $ 21 */ 22 23 /* 24 * This file contains a high-performance replacement for the socket-based 25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 26 * all features of sockets, but does do everything that pipes normally 27 * do. 28 */ 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/fcntl.h> 34 #include <sys/file.h> 35 #include <sys/filedesc.h> 36 #include <sys/filio.h> 37 #include <sys/ttycom.h> 38 #include <sys/stat.h> 39 #include <sys/signalvar.h> 40 #include <sys/sysproto.h> 41 #include <sys/pipe.h> 42 #include <sys/vnode.h> 43 #include <sys/uio.h> 44 #include <sys/event.h> 45 #include <sys/globaldata.h> 46 #include <sys/module.h> 47 #include <sys/malloc.h> 48 #include <sys/sysctl.h> 49 #include <sys/socket.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_param.h> 53 #include <sys/lock.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_kern.h> 56 #include <vm/vm_extern.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_zone.h> 61 62 #include <sys/file2.h> 63 #include <sys/signal2.h> 64 #include <sys/mplock2.h> 65 66 #include <machine/cpufunc.h> 67 68 /* 69 * interfaces to the outside world 70 */ 71 static int pipe_read (struct file *fp, struct uio *uio, 72 struct ucred *cred, int flags); 73 static int pipe_write (struct file *fp, struct uio *uio, 74 struct ucred *cred, int flags); 75 static int pipe_close (struct file *fp); 76 static int pipe_shutdown (struct file *fp, int how); 77 static int pipe_kqfilter (struct file *fp, struct knote *kn); 78 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred); 79 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, 80 struct ucred *cred, struct sysmsg *msg); 81 82 static struct fileops pipeops = { 83 .fo_read = pipe_read, 84 .fo_write = pipe_write, 85 .fo_ioctl = pipe_ioctl, 86 .fo_kqfilter = pipe_kqfilter, 87 .fo_stat = pipe_stat, 88 .fo_close = pipe_close, 89 .fo_shutdown = pipe_shutdown 90 }; 91 92 static void filt_pipedetach(struct knote *kn); 93 static int filt_piperead(struct knote *kn, long hint); 94 static int filt_pipewrite(struct knote *kn, long hint); 95 96 static struct filterops pipe_rfiltops = 97 { FILTEROP_ISFD, NULL, filt_pipedetach, filt_piperead }; 98 static struct filterops pipe_wfiltops = 99 { FILTEROP_ISFD, NULL, filt_pipedetach, filt_pipewrite }; 100 101 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures"); 102 103 /* 104 * Default pipe buffer size(s), this can be kind-of large now because pipe 105 * space is pageable. The pipe code will try to maintain locality of 106 * reference for performance reasons, so small amounts of outstanding I/O 107 * will not wipe the cache. 108 */ 109 #define MINPIPESIZE (PIPE_SIZE/3) 110 #define MAXPIPESIZE (2*PIPE_SIZE/3) 111 112 /* 113 * Limit the number of "big" pipes 114 */ 115 #define LIMITBIGPIPES 64 116 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */ 117 118 static int pipe_maxbig = LIMITBIGPIPES; 119 static int pipe_maxcache = PIPEQ_MAX_CACHE; 120 static int pipe_bigcount; 121 static int pipe_nbig; 122 static int pipe_bcache_alloc; 123 static int pipe_bkmem_alloc; 124 static int pipe_rblocked_count; 125 static int pipe_wblocked_count; 126 127 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation"); 128 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig, 129 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated"); 130 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount, 131 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded"); 132 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked, 133 CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded"); 134 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked, 135 CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded"); 136 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache, 137 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu"); 138 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig, 139 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes"); 140 #ifdef SMP 141 static int pipe_delay = 5000; /* 5uS default */ 142 SYSCTL_INT(_kern_pipe, OID_AUTO, delay, 143 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns"); 144 static int pipe_mpsafe = 1; 145 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe, 146 CTLFLAG_RW, &pipe_mpsafe, 0, ""); 147 #endif 148 #if !defined(NO_PIPE_SYSCTL_STATS) 149 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc, 150 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache"); 151 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc, 152 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem"); 153 #endif 154 155 static void pipeclose (struct pipe *cpipe); 156 static void pipe_free_kmem (struct pipe *cpipe); 157 static int pipe_create (struct pipe **cpipep); 158 static __inline void pipewakeup (struct pipe *cpipe); 159 static int pipespace (struct pipe *cpipe, int size); 160 161 static __inline void 162 pipewakeup(struct pipe *cpipe) 163 { 164 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) { 165 get_mplock(); 166 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 167 rel_mplock(); 168 } 169 if (SLIST_FIRST(&cpipe->pipe_kq.ki_note)) 170 KNOTE(&cpipe->pipe_kq.ki_note, 0); 171 } 172 173 /* 174 * These routines are called before and after a UIO. The UIO 175 * may block, causing our held tokens to be lost temporarily. 176 * 177 * We use these routines to serialize reads against other reads 178 * and writes against other writes. 179 * 180 * The read token is held on entry so *ipp does not race. 181 */ 182 static __inline int 183 pipe_start_uio(struct pipe *cpipe, int *ipp) 184 { 185 int error; 186 187 while (*ipp) { 188 *ipp = -1; 189 error = tsleep(ipp, PCATCH, "pipexx", 0); 190 if (error) 191 return (error); 192 } 193 *ipp = 1; 194 return (0); 195 } 196 197 static __inline void 198 pipe_end_uio(struct pipe *cpipe, int *ipp) 199 { 200 if (*ipp < 0) { 201 *ipp = 0; 202 wakeup(ipp); 203 } else { 204 KKASSERT(*ipp > 0); 205 *ipp = 0; 206 } 207 } 208 209 static __inline void 210 pipe_get_mplock(int *save) 211 { 212 #ifdef SMP 213 if (pipe_mpsafe == 0) { 214 get_mplock(); 215 *save = 1; 216 } else 217 #endif 218 { 219 *save = 0; 220 } 221 } 222 223 static __inline void 224 pipe_rel_mplock(int *save) 225 { 226 #ifdef SMP 227 if (*save) 228 rel_mplock(); 229 #endif 230 } 231 232 233 /* 234 * The pipe system call for the DTYPE_PIPE type of pipes 235 * 236 * pipe_args(int dummy) 237 * 238 * MPSAFE 239 */ 240 int 241 sys_pipe(struct pipe_args *uap) 242 { 243 struct thread *td = curthread; 244 struct filedesc *fdp = td->td_proc->p_fd; 245 struct file *rf, *wf; 246 struct pipe *rpipe, *wpipe; 247 int fd1, fd2, error; 248 249 rpipe = wpipe = NULL; 250 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 251 pipeclose(rpipe); 252 pipeclose(wpipe); 253 return (ENFILE); 254 } 255 256 error = falloc(td->td_lwp, &rf, &fd1); 257 if (error) { 258 pipeclose(rpipe); 259 pipeclose(wpipe); 260 return (error); 261 } 262 uap->sysmsg_fds[0] = fd1; 263 264 /* 265 * Warning: once we've gotten past allocation of the fd for the 266 * read-side, we can only drop the read side via fdrop() in order 267 * to avoid races against processes which manage to dup() the read 268 * side while we are blocked trying to allocate the write side. 269 */ 270 rf->f_type = DTYPE_PIPE; 271 rf->f_flag = FREAD | FWRITE; 272 rf->f_ops = &pipeops; 273 rf->f_data = rpipe; 274 error = falloc(td->td_lwp, &wf, &fd2); 275 if (error) { 276 fsetfd(fdp, NULL, fd1); 277 fdrop(rf); 278 /* rpipe has been closed by fdrop(). */ 279 pipeclose(wpipe); 280 return (error); 281 } 282 wf->f_type = DTYPE_PIPE; 283 wf->f_flag = FREAD | FWRITE; 284 wf->f_ops = &pipeops; 285 wf->f_data = wpipe; 286 uap->sysmsg_fds[1] = fd2; 287 288 rpipe->pipe_slock = kmalloc(sizeof(struct lock), 289 M_PIPE, M_WAITOK|M_ZERO); 290 wpipe->pipe_slock = rpipe->pipe_slock; 291 rpipe->pipe_peer = wpipe; 292 wpipe->pipe_peer = rpipe; 293 lockinit(rpipe->pipe_slock, "pipecl", 0, 0); 294 295 /* 296 * Once activated the peer relationship remains valid until 297 * both sides are closed. 298 */ 299 fsetfd(fdp, rf, fd1); 300 fsetfd(fdp, wf, fd2); 301 fdrop(rf); 302 fdrop(wf); 303 304 return (0); 305 } 306 307 /* 308 * Allocate kva for pipe circular buffer, the space is pageable 309 * This routine will 'realloc' the size of a pipe safely, if it fails 310 * it will retain the old buffer. 311 * If it fails it will return ENOMEM. 312 */ 313 static int 314 pipespace(struct pipe *cpipe, int size) 315 { 316 struct vm_object *object; 317 caddr_t buffer; 318 int npages, error; 319 320 npages = round_page(size) / PAGE_SIZE; 321 object = cpipe->pipe_buffer.object; 322 323 /* 324 * [re]create the object if necessary and reserve space for it 325 * in the kernel_map. The object and memory are pageable. On 326 * success, free the old resources before assigning the new 327 * ones. 328 */ 329 if (object == NULL || object->size != npages) { 330 get_mplock(); 331 object = vm_object_allocate(OBJT_DEFAULT, npages); 332 buffer = (caddr_t)vm_map_min(&kernel_map); 333 334 error = vm_map_find(&kernel_map, object, 0, 335 (vm_offset_t *)&buffer, 336 size, PAGE_SIZE, 337 1, VM_MAPTYPE_NORMAL, 338 VM_PROT_ALL, VM_PROT_ALL, 339 0); 340 341 if (error != KERN_SUCCESS) { 342 vm_object_deallocate(object); 343 rel_mplock(); 344 return (ENOMEM); 345 } 346 pipe_free_kmem(cpipe); 347 rel_mplock(); 348 cpipe->pipe_buffer.object = object; 349 cpipe->pipe_buffer.buffer = buffer; 350 cpipe->pipe_buffer.size = size; 351 ++pipe_bkmem_alloc; 352 } else { 353 ++pipe_bcache_alloc; 354 } 355 cpipe->pipe_buffer.rindex = 0; 356 cpipe->pipe_buffer.windex = 0; 357 return (0); 358 } 359 360 /* 361 * Initialize and allocate VM and memory for pipe, pulling the pipe from 362 * our per-cpu cache if possible. For now make sure it is sized for the 363 * smaller PIPE_SIZE default. 364 */ 365 static int 366 pipe_create(struct pipe **cpipep) 367 { 368 globaldata_t gd = mycpu; 369 struct pipe *cpipe; 370 int error; 371 372 if ((cpipe = gd->gd_pipeq) != NULL) { 373 gd->gd_pipeq = cpipe->pipe_peer; 374 --gd->gd_pipeqcount; 375 cpipe->pipe_peer = NULL; 376 cpipe->pipe_wantwcnt = 0; 377 } else { 378 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO); 379 } 380 *cpipep = cpipe; 381 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0) 382 return (error); 383 vfs_timestamp(&cpipe->pipe_ctime); 384 cpipe->pipe_atime = cpipe->pipe_ctime; 385 cpipe->pipe_mtime = cpipe->pipe_ctime; 386 lwkt_token_init(&cpipe->pipe_rlock, 1); 387 lwkt_token_init(&cpipe->pipe_wlock, 1); 388 return (0); 389 } 390 391 /* 392 * MPALMOSTSAFE (acquires mplock) 393 */ 394 static int 395 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 396 { 397 struct pipe *rpipe; 398 int error; 399 size_t nread = 0; 400 int nbio; 401 u_int size; /* total bytes available */ 402 u_int nsize; /* total bytes to read */ 403 u_int rindex; /* contiguous bytes available */ 404 int notify_writer; 405 int mpsave; 406 int bigread; 407 int bigcount; 408 409 if (uio->uio_resid == 0) 410 return(0); 411 412 /* 413 * Setup locks, calculate nbio 414 */ 415 pipe_get_mplock(&mpsave); 416 rpipe = (struct pipe *)fp->f_data; 417 lwkt_gettoken(&rpipe->pipe_rlock); 418 419 if (fflags & O_FBLOCKING) 420 nbio = 0; 421 else if (fflags & O_FNONBLOCKING) 422 nbio = 1; 423 else if (fp->f_flag & O_NONBLOCK) 424 nbio = 1; 425 else 426 nbio = 0; 427 428 /* 429 * Reads are serialized. Note however that pipe_buffer.buffer and 430 * pipe_buffer.size can change out from under us when the number 431 * of bytes in the buffer are zero due to the write-side doing a 432 * pipespace(). 433 */ 434 error = pipe_start_uio(rpipe, &rpipe->pipe_rip); 435 if (error) { 436 pipe_rel_mplock(&mpsave); 437 lwkt_reltoken(&rpipe->pipe_rlock); 438 return (error); 439 } 440 notify_writer = 0; 441 442 bigread = (uio->uio_resid > 10 * 1024 * 1024); 443 bigcount = 10; 444 445 while (uio->uio_resid) { 446 /* 447 * Don't hog the cpu. 448 */ 449 if (bigread && --bigcount == 0) { 450 lwkt_user_yield(); 451 bigcount = 10; 452 if (CURSIG(curthread->td_lwp)) { 453 error = EINTR; 454 break; 455 } 456 } 457 458 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 459 cpu_lfence(); 460 if (size) { 461 rindex = rpipe->pipe_buffer.rindex & 462 (rpipe->pipe_buffer.size - 1); 463 nsize = size; 464 if (nsize > rpipe->pipe_buffer.size - rindex) 465 nsize = rpipe->pipe_buffer.size - rindex; 466 nsize = szmin(nsize, uio->uio_resid); 467 468 error = uiomove(&rpipe->pipe_buffer.buffer[rindex], 469 nsize, uio); 470 if (error) 471 break; 472 cpu_mfence(); 473 rpipe->pipe_buffer.rindex += nsize; 474 nread += nsize; 475 476 /* 477 * If the FIFO is still over half full just continue 478 * and do not try to notify the writer yet. 479 */ 480 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) { 481 notify_writer = 0; 482 continue; 483 } 484 485 /* 486 * When the FIFO is less then half full notify any 487 * waiting writer. WANTW can be checked while 488 * holding just the rlock. 489 */ 490 notify_writer = 1; 491 if ((rpipe->pipe_state & PIPE_WANTW) == 0) 492 continue; 493 } 494 495 /* 496 * If the "write-side" was blocked we wake it up. This code 497 * is reached either when the buffer is completely emptied 498 * or if it becomes more then half-empty. 499 * 500 * Pipe_state can only be modified if both the rlock and 501 * wlock are held. 502 */ 503 if (rpipe->pipe_state & PIPE_WANTW) { 504 lwkt_gettoken(&rpipe->pipe_wlock); 505 if (rpipe->pipe_state & PIPE_WANTW) { 506 notify_writer = 0; 507 rpipe->pipe_state &= ~PIPE_WANTW; 508 lwkt_reltoken(&rpipe->pipe_wlock); 509 wakeup(rpipe); 510 } else { 511 lwkt_reltoken(&rpipe->pipe_wlock); 512 } 513 } 514 515 /* 516 * Pick up our copy loop again if the writer sent data to 517 * us while we were messing around. 518 * 519 * On a SMP box poll up to pipe_delay nanoseconds for new 520 * data. Typically a value of 2000 to 4000 is sufficient 521 * to eradicate most IPIs/tsleeps/wakeups when a pipe 522 * is used for synchronous communications with small packets, 523 * and 8000 or so (8uS) will pipeline large buffer xfers 524 * between cpus over a pipe. 525 * 526 * For synchronous communications a hit means doing a 527 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS, 528 * where as miss requiring a tsleep/wakeup sequence 529 * will take 7uS or more. 530 */ 531 if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) 532 continue; 533 534 #if defined(SMP) && defined(_RDTSC_SUPPORTED_) 535 if (pipe_delay) { 536 int64_t tsc_target; 537 int good = 0; 538 539 tsc_target = tsc_get_target(pipe_delay); 540 while (tsc_test_target(tsc_target) == 0) { 541 if (rpipe->pipe_buffer.windex != 542 rpipe->pipe_buffer.rindex) { 543 good = 1; 544 break; 545 } 546 } 547 if (good) 548 continue; 549 } 550 #endif 551 552 /* 553 * Detect EOF condition, do not set error. 554 */ 555 if (rpipe->pipe_state & PIPE_REOF) 556 break; 557 558 /* 559 * Break if some data was read, or if this was a non-blocking 560 * read. 561 */ 562 if (nread > 0) 563 break; 564 565 if (nbio) { 566 error = EAGAIN; 567 break; 568 } 569 570 /* 571 * Last chance, interlock with WANTR. 572 */ 573 lwkt_gettoken(&rpipe->pipe_wlock); 574 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 575 if (size) { 576 lwkt_reltoken(&rpipe->pipe_wlock); 577 continue; 578 } 579 580 /* 581 * Retest EOF - acquiring a new token can temporarily release 582 * tokens already held. 583 */ 584 if (rpipe->pipe_state & PIPE_REOF) { 585 lwkt_reltoken(&rpipe->pipe_wlock); 586 break; 587 } 588 589 /* 590 * If there is no more to read in the pipe, reset its 591 * pointers to the beginning. This improves cache hit 592 * stats. 593 * 594 * We need both locks to modify both pointers, and there 595 * must also not be a write in progress or the uiomove() 596 * in the write might block and temporarily release 597 * its wlock, then reacquire and update windex. We are 598 * only serialized against reads, not writes. 599 * 600 * XXX should we even bother resetting the indices? It 601 * might actually be more cache efficient not to. 602 */ 603 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex && 604 rpipe->pipe_wip == 0) { 605 rpipe->pipe_buffer.rindex = 0; 606 rpipe->pipe_buffer.windex = 0; 607 } 608 609 /* 610 * Wait for more data. 611 * 612 * Pipe_state can only be set if both the rlock and wlock 613 * are held. 614 */ 615 rpipe->pipe_state |= PIPE_WANTR; 616 tsleep_interlock(rpipe, PCATCH); 617 lwkt_reltoken(&rpipe->pipe_wlock); 618 error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0); 619 ++pipe_rblocked_count; 620 if (error) 621 break; 622 } 623 pipe_end_uio(rpipe, &rpipe->pipe_rip); 624 625 /* 626 * Uptime last access time 627 */ 628 if (error == 0 && nread) 629 vfs_timestamp(&rpipe->pipe_atime); 630 631 /* 632 * If we drained the FIFO more then half way then handle 633 * write blocking hysteresis. 634 * 635 * Note that PIPE_WANTW cannot be set by the writer without 636 * it holding both rlock and wlock, so we can test it 637 * while holding just rlock. 638 */ 639 if (notify_writer) { 640 if (rpipe->pipe_state & PIPE_WANTW) { 641 lwkt_gettoken(&rpipe->pipe_wlock); 642 if (rpipe->pipe_state & PIPE_WANTW) { 643 rpipe->pipe_state &= ~PIPE_WANTW; 644 lwkt_reltoken(&rpipe->pipe_wlock); 645 wakeup(rpipe); 646 } else { 647 lwkt_reltoken(&rpipe->pipe_wlock); 648 } 649 } 650 lwkt_gettoken(&rpipe->pipe_wlock); 651 pipewakeup(rpipe); 652 lwkt_reltoken(&rpipe->pipe_wlock); 653 } 654 /*size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;*/ 655 lwkt_reltoken(&rpipe->pipe_rlock); 656 657 pipe_rel_mplock(&mpsave); 658 return (error); 659 } 660 661 /* 662 * MPALMOSTSAFE - acquires mplock 663 */ 664 static int 665 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 666 { 667 int error; 668 int orig_resid; 669 int nbio; 670 struct pipe *wpipe, *rpipe; 671 u_int windex; 672 u_int space; 673 u_int wcount; 674 int mpsave; 675 int bigwrite; 676 int bigcount; 677 678 pipe_get_mplock(&mpsave); 679 680 /* 681 * Writes go to the peer. The peer will always exist. 682 */ 683 rpipe = (struct pipe *) fp->f_data; 684 wpipe = rpipe->pipe_peer; 685 lwkt_gettoken(&wpipe->pipe_wlock); 686 if (wpipe->pipe_state & PIPE_WEOF) { 687 pipe_rel_mplock(&mpsave); 688 lwkt_reltoken(&wpipe->pipe_wlock); 689 return (EPIPE); 690 } 691 692 /* 693 * Degenerate case (EPIPE takes prec) 694 */ 695 if (uio->uio_resid == 0) { 696 pipe_rel_mplock(&mpsave); 697 lwkt_reltoken(&wpipe->pipe_wlock); 698 return(0); 699 } 700 701 /* 702 * Writes are serialized (start_uio must be called with wlock) 703 */ 704 error = pipe_start_uio(wpipe, &wpipe->pipe_wip); 705 if (error) { 706 pipe_rel_mplock(&mpsave); 707 lwkt_reltoken(&wpipe->pipe_wlock); 708 return (error); 709 } 710 711 if (fflags & O_FBLOCKING) 712 nbio = 0; 713 else if (fflags & O_FNONBLOCKING) 714 nbio = 1; 715 else if (fp->f_flag & O_NONBLOCK) 716 nbio = 1; 717 else 718 nbio = 0; 719 720 /* 721 * If it is advantageous to resize the pipe buffer, do 722 * so. We are write-serialized so we can block safely. 723 */ 724 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 725 (pipe_nbig < pipe_maxbig) && 726 wpipe->pipe_wantwcnt > 4 && 727 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 728 /* 729 * Recheck after lock. 730 */ 731 lwkt_gettoken(&wpipe->pipe_rlock); 732 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 733 (pipe_nbig < pipe_maxbig) && 734 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 735 atomic_add_int(&pipe_nbig, 1); 736 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 737 ++pipe_bigcount; 738 else 739 atomic_subtract_int(&pipe_nbig, 1); 740 } 741 lwkt_reltoken(&wpipe->pipe_rlock); 742 } 743 744 orig_resid = uio->uio_resid; 745 wcount = 0; 746 747 bigwrite = (uio->uio_resid > 10 * 1024 * 1024); 748 bigcount = 10; 749 750 while (uio->uio_resid) { 751 if (wpipe->pipe_state & PIPE_WEOF) { 752 error = EPIPE; 753 break; 754 } 755 756 /* 757 * Don't hog the cpu. 758 */ 759 if (bigwrite && --bigcount == 0) { 760 lwkt_user_yield(); 761 bigcount = 10; 762 if (CURSIG(curthread->td_lwp)) { 763 error = EINTR; 764 break; 765 } 766 } 767 768 windex = wpipe->pipe_buffer.windex & 769 (wpipe->pipe_buffer.size - 1); 770 space = wpipe->pipe_buffer.size - 771 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 772 cpu_lfence(); 773 774 /* Writes of size <= PIPE_BUF must be atomic. */ 775 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 776 space = 0; 777 778 /* 779 * Write to fill, read size handles write hysteresis. Also 780 * additional restrictions can cause select-based non-blocking 781 * writes to spin. 782 */ 783 if (space > 0) { 784 u_int segsize; 785 786 /* 787 * Transfer size is minimum of uio transfer 788 * and free space in pipe buffer. 789 * 790 * Limit each uiocopy to no more then PIPE_SIZE 791 * so we can keep the gravy train going on a 792 * SMP box. This doubles the performance for 793 * write sizes > 16K. Otherwise large writes 794 * wind up doing an inefficient synchronous 795 * ping-pong. 796 */ 797 space = szmin(space, uio->uio_resid); 798 if (space > PIPE_SIZE) 799 space = PIPE_SIZE; 800 801 /* 802 * First segment to transfer is minimum of 803 * transfer size and contiguous space in 804 * pipe buffer. If first segment to transfer 805 * is less than the transfer size, we've got 806 * a wraparound in the buffer. 807 */ 808 segsize = wpipe->pipe_buffer.size - windex; 809 if (segsize > space) 810 segsize = space; 811 812 #ifdef SMP 813 /* 814 * If this is the first loop and the reader is 815 * blocked, do a preemptive wakeup of the reader. 816 * 817 * On SMP the IPI latency plus the wlock interlock 818 * on the reader side is the fastest way to get the 819 * reader going. (The scheduler will hard loop on 820 * lock tokens). 821 * 822 * NOTE: We can't clear WANTR here without acquiring 823 * the rlock, which we don't want to do here! 824 */ 825 if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1) 826 wakeup(wpipe); 827 #endif 828 829 /* 830 * Transfer segment, which may include a wrap-around. 831 * Update windex to account for both all in one go 832 * so the reader can read() the data atomically. 833 */ 834 error = uiomove(&wpipe->pipe_buffer.buffer[windex], 835 segsize, uio); 836 if (error == 0 && segsize < space) { 837 segsize = space - segsize; 838 error = uiomove(&wpipe->pipe_buffer.buffer[0], 839 segsize, uio); 840 } 841 if (error) 842 break; 843 cpu_mfence(); 844 wpipe->pipe_buffer.windex += space; 845 wcount += space; 846 continue; 847 } 848 849 /* 850 * We need both the rlock and the wlock to interlock against 851 * the EOF, WANTW, and size checks, and to modify pipe_state. 852 * 853 * These are token locks so we do not have to worry about 854 * deadlocks. 855 */ 856 lwkt_gettoken(&wpipe->pipe_rlock); 857 858 /* 859 * If the "read-side" has been blocked, wake it up now 860 * and yield to let it drain synchronously rather 861 * then block. 862 */ 863 if (wpipe->pipe_state & PIPE_WANTR) { 864 wpipe->pipe_state &= ~PIPE_WANTR; 865 wakeup(wpipe); 866 } 867 868 /* 869 * don't block on non-blocking I/O 870 */ 871 if (nbio) { 872 lwkt_reltoken(&wpipe->pipe_rlock); 873 error = EAGAIN; 874 break; 875 } 876 877 /* 878 * re-test whether we have to block in the writer after 879 * acquiring both locks, in case the reader opened up 880 * some space. 881 */ 882 space = wpipe->pipe_buffer.size - 883 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 884 cpu_lfence(); 885 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 886 space = 0; 887 888 /* 889 * Retest EOF - acquiring a new token can temporarily release 890 * tokens already held. 891 */ 892 if (wpipe->pipe_state & PIPE_WEOF) { 893 lwkt_reltoken(&wpipe->pipe_rlock); 894 error = EPIPE; 895 break; 896 } 897 898 /* 899 * We have no more space and have something to offer, 900 * wake up select/poll/kq. 901 */ 902 if (space == 0) { 903 wpipe->pipe_state |= PIPE_WANTW; 904 ++wpipe->pipe_wantwcnt; 905 pipewakeup(wpipe); 906 if (wpipe->pipe_state & PIPE_WANTW) 907 error = tsleep(wpipe, PCATCH, "pipewr", 0); 908 ++pipe_wblocked_count; 909 } 910 lwkt_reltoken(&wpipe->pipe_rlock); 911 912 /* 913 * Break out if we errored or the read side wants us to go 914 * away. 915 */ 916 if (error) 917 break; 918 if (wpipe->pipe_state & PIPE_WEOF) { 919 error = EPIPE; 920 break; 921 } 922 } 923 pipe_end_uio(wpipe, &wpipe->pipe_wip); 924 925 /* 926 * If we have put any characters in the buffer, we wake up 927 * the reader. 928 * 929 * Both rlock and wlock are required to be able to modify pipe_state. 930 */ 931 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) { 932 if (wpipe->pipe_state & PIPE_WANTR) { 933 lwkt_gettoken(&wpipe->pipe_rlock); 934 if (wpipe->pipe_state & PIPE_WANTR) { 935 wpipe->pipe_state &= ~PIPE_WANTR; 936 lwkt_reltoken(&wpipe->pipe_rlock); 937 wakeup(wpipe); 938 } else { 939 lwkt_reltoken(&wpipe->pipe_rlock); 940 } 941 } 942 lwkt_gettoken(&wpipe->pipe_rlock); 943 pipewakeup(wpipe); 944 lwkt_reltoken(&wpipe->pipe_rlock); 945 } 946 947 /* 948 * Don't return EPIPE if I/O was successful 949 */ 950 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) && 951 (uio->uio_resid == 0) && 952 (error == EPIPE)) { 953 error = 0; 954 } 955 956 if (error == 0) 957 vfs_timestamp(&wpipe->pipe_mtime); 958 959 /* 960 * We have something to offer, 961 * wake up select/poll/kq. 962 */ 963 /*space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;*/ 964 lwkt_reltoken(&wpipe->pipe_wlock); 965 pipe_rel_mplock(&mpsave); 966 return (error); 967 } 968 969 /* 970 * MPALMOSTSAFE - acquires mplock 971 * 972 * we implement a very minimal set of ioctls for compatibility with sockets. 973 */ 974 int 975 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, 976 struct ucred *cred, struct sysmsg *msg) 977 { 978 struct pipe *mpipe; 979 int error; 980 int mpsave; 981 982 pipe_get_mplock(&mpsave); 983 mpipe = (struct pipe *)fp->f_data; 984 985 lwkt_gettoken(&mpipe->pipe_rlock); 986 lwkt_gettoken(&mpipe->pipe_wlock); 987 988 switch (cmd) { 989 case FIOASYNC: 990 if (*(int *)data) { 991 mpipe->pipe_state |= PIPE_ASYNC; 992 } else { 993 mpipe->pipe_state &= ~PIPE_ASYNC; 994 } 995 error = 0; 996 break; 997 case FIONREAD: 998 *(int *)data = mpipe->pipe_buffer.windex - 999 mpipe->pipe_buffer.rindex; 1000 error = 0; 1001 break; 1002 case FIOSETOWN: 1003 get_mplock(); 1004 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1005 rel_mplock(); 1006 break; 1007 case FIOGETOWN: 1008 *(int *)data = fgetown(mpipe->pipe_sigio); 1009 error = 0; 1010 break; 1011 case TIOCSPGRP: 1012 /* This is deprecated, FIOSETOWN should be used instead. */ 1013 get_mplock(); 1014 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1015 rel_mplock(); 1016 break; 1017 1018 case TIOCGPGRP: 1019 /* This is deprecated, FIOGETOWN should be used instead. */ 1020 *(int *)data = -fgetown(mpipe->pipe_sigio); 1021 error = 0; 1022 break; 1023 default: 1024 error = ENOTTY; 1025 break; 1026 } 1027 lwkt_reltoken(&mpipe->pipe_wlock); 1028 lwkt_reltoken(&mpipe->pipe_rlock); 1029 pipe_rel_mplock(&mpsave); 1030 1031 return (error); 1032 } 1033 1034 /* 1035 * MPSAFE 1036 */ 1037 static int 1038 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred) 1039 { 1040 struct pipe *pipe; 1041 int mpsave; 1042 1043 pipe_get_mplock(&mpsave); 1044 pipe = (struct pipe *)fp->f_data; 1045 1046 bzero((caddr_t)ub, sizeof(*ub)); 1047 ub->st_mode = S_IFIFO; 1048 ub->st_blksize = pipe->pipe_buffer.size; 1049 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex; 1050 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1051 ub->st_atimespec = pipe->pipe_atime; 1052 ub->st_mtimespec = pipe->pipe_mtime; 1053 ub->st_ctimespec = pipe->pipe_ctime; 1054 /* 1055 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev, 1056 * st_flags, st_gen. 1057 * XXX (st_dev, st_ino) should be unique. 1058 */ 1059 pipe_rel_mplock(&mpsave); 1060 return (0); 1061 } 1062 1063 /* 1064 * MPALMOSTSAFE - acquires mplock 1065 */ 1066 static int 1067 pipe_close(struct file *fp) 1068 { 1069 struct pipe *cpipe; 1070 1071 get_mplock(); 1072 cpipe = (struct pipe *)fp->f_data; 1073 fp->f_ops = &badfileops; 1074 fp->f_data = NULL; 1075 funsetown(cpipe->pipe_sigio); 1076 pipeclose(cpipe); 1077 rel_mplock(); 1078 return (0); 1079 } 1080 1081 /* 1082 * Shutdown one or both directions of a full-duplex pipe. 1083 * 1084 * MPALMOSTSAFE - acquires mplock 1085 */ 1086 static int 1087 pipe_shutdown(struct file *fp, int how) 1088 { 1089 struct pipe *rpipe; 1090 struct pipe *wpipe; 1091 int error = EPIPE; 1092 int mpsave; 1093 1094 pipe_get_mplock(&mpsave); 1095 rpipe = (struct pipe *)fp->f_data; 1096 wpipe = rpipe->pipe_peer; 1097 1098 /* 1099 * We modify pipe_state on both pipes, which means we need 1100 * all four tokens! 1101 */ 1102 lwkt_gettoken(&rpipe->pipe_rlock); 1103 lwkt_gettoken(&rpipe->pipe_wlock); 1104 lwkt_gettoken(&wpipe->pipe_rlock); 1105 lwkt_gettoken(&wpipe->pipe_wlock); 1106 1107 switch(how) { 1108 case SHUT_RDWR: 1109 case SHUT_RD: 1110 rpipe->pipe_state |= PIPE_REOF; /* my reads */ 1111 rpipe->pipe_state |= PIPE_WEOF; /* peer writes */ 1112 if (rpipe->pipe_state & PIPE_WANTR) { 1113 rpipe->pipe_state &= ~PIPE_WANTR; 1114 wakeup(rpipe); 1115 } 1116 if (rpipe->pipe_state & PIPE_WANTW) { 1117 rpipe->pipe_state &= ~PIPE_WANTW; 1118 wakeup(rpipe); 1119 } 1120 error = 0; 1121 if (how == SHUT_RD) 1122 break; 1123 /* fall through */ 1124 case SHUT_WR: 1125 wpipe->pipe_state |= PIPE_REOF; /* peer reads */ 1126 wpipe->pipe_state |= PIPE_WEOF; /* my writes */ 1127 if (wpipe->pipe_state & PIPE_WANTR) { 1128 wpipe->pipe_state &= ~PIPE_WANTR; 1129 wakeup(wpipe); 1130 } 1131 if (wpipe->pipe_state & PIPE_WANTW) { 1132 wpipe->pipe_state &= ~PIPE_WANTW; 1133 wakeup(wpipe); 1134 } 1135 error = 0; 1136 break; 1137 } 1138 pipewakeup(rpipe); 1139 pipewakeup(wpipe); 1140 1141 lwkt_reltoken(&wpipe->pipe_wlock); 1142 lwkt_reltoken(&wpipe->pipe_rlock); 1143 lwkt_reltoken(&rpipe->pipe_wlock); 1144 lwkt_reltoken(&rpipe->pipe_rlock); 1145 1146 pipe_rel_mplock(&mpsave); 1147 return (error); 1148 } 1149 1150 static void 1151 pipe_free_kmem(struct pipe *cpipe) 1152 { 1153 if (cpipe->pipe_buffer.buffer != NULL) { 1154 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1155 atomic_subtract_int(&pipe_nbig, 1); 1156 kmem_free(&kernel_map, 1157 (vm_offset_t)cpipe->pipe_buffer.buffer, 1158 cpipe->pipe_buffer.size); 1159 cpipe->pipe_buffer.buffer = NULL; 1160 cpipe->pipe_buffer.object = NULL; 1161 } 1162 } 1163 1164 /* 1165 * Close the pipe. The slock must be held to interlock against simultanious 1166 * closes. The rlock and wlock must be held to adjust the pipe_state. 1167 */ 1168 static void 1169 pipeclose(struct pipe *cpipe) 1170 { 1171 globaldata_t gd; 1172 struct pipe *ppipe; 1173 1174 if (cpipe == NULL) 1175 return; 1176 1177 /* 1178 * The slock may not have been allocated yet (close during 1179 * initialization) 1180 * 1181 * We need both the read and write tokens to modify pipe_state. 1182 */ 1183 if (cpipe->pipe_slock) 1184 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE); 1185 lwkt_gettoken(&cpipe->pipe_rlock); 1186 lwkt_gettoken(&cpipe->pipe_wlock); 1187 1188 /* 1189 * Set our state, wakeup anyone waiting in select/poll/kq, and 1190 * wakeup anyone blocked on our pipe. 1191 */ 1192 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF; 1193 pipewakeup(cpipe); 1194 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1195 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1196 wakeup(cpipe); 1197 } 1198 1199 /* 1200 * Disconnect from peer. 1201 */ 1202 if ((ppipe = cpipe->pipe_peer) != NULL) { 1203 lwkt_gettoken(&ppipe->pipe_rlock); 1204 lwkt_gettoken(&ppipe->pipe_wlock); 1205 ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF; 1206 pipewakeup(ppipe); 1207 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1208 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1209 wakeup(ppipe); 1210 } 1211 if (SLIST_FIRST(&ppipe->pipe_kq.ki_note)) 1212 KNOTE(&ppipe->pipe_kq.ki_note, 0); 1213 lwkt_reltoken(&ppipe->pipe_wlock); 1214 lwkt_reltoken(&ppipe->pipe_rlock); 1215 } 1216 1217 /* 1218 * If the peer is also closed we can free resources for both 1219 * sides, otherwise we leave our side intact to deal with any 1220 * races (since we only have the slock). 1221 */ 1222 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) { 1223 cpipe->pipe_peer = NULL; 1224 ppipe->pipe_peer = NULL; 1225 ppipe->pipe_slock = NULL; /* we will free the slock */ 1226 pipeclose(ppipe); 1227 ppipe = NULL; 1228 } 1229 1230 lwkt_reltoken(&cpipe->pipe_wlock); 1231 lwkt_reltoken(&cpipe->pipe_rlock); 1232 if (cpipe->pipe_slock) 1233 lockmgr(cpipe->pipe_slock, LK_RELEASE); 1234 1235 /* 1236 * If we disassociated from our peer we can free resources 1237 */ 1238 if (ppipe == NULL) { 1239 gd = mycpu; 1240 if (cpipe->pipe_slock) { 1241 kfree(cpipe->pipe_slock, M_PIPE); 1242 cpipe->pipe_slock = NULL; 1243 } 1244 if (gd->gd_pipeqcount >= pipe_maxcache || 1245 cpipe->pipe_buffer.size != PIPE_SIZE 1246 ) { 1247 pipe_free_kmem(cpipe); 1248 kfree(cpipe, M_PIPE); 1249 } else { 1250 cpipe->pipe_state = 0; 1251 cpipe->pipe_peer = gd->gd_pipeq; 1252 gd->gd_pipeq = cpipe; 1253 ++gd->gd_pipeqcount; 1254 } 1255 } 1256 } 1257 1258 /* 1259 * MPALMOSTSAFE - acquires mplock 1260 */ 1261 static int 1262 pipe_kqfilter(struct file *fp, struct knote *kn) 1263 { 1264 struct pipe *cpipe; 1265 1266 cpipe = (struct pipe *)kn->kn_fp->f_data; 1267 1268 switch (kn->kn_filter) { 1269 case EVFILT_READ: 1270 kn->kn_fop = &pipe_rfiltops; 1271 break; 1272 case EVFILT_WRITE: 1273 kn->kn_fop = &pipe_wfiltops; 1274 if (cpipe->pipe_peer == NULL) { 1275 /* other end of pipe has been closed */ 1276 rel_mplock(); 1277 return (EPIPE); 1278 } 1279 break; 1280 default: 1281 return (EOPNOTSUPP); 1282 } 1283 kn->kn_hook = (caddr_t)cpipe; 1284 1285 knote_insert(&cpipe->pipe_kq.ki_note, kn); 1286 1287 return (0); 1288 } 1289 1290 static void 1291 filt_pipedetach(struct knote *kn) 1292 { 1293 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1294 1295 knote_remove(&cpipe->pipe_kq.ki_note, kn); 1296 } 1297 1298 /*ARGSUSED*/ 1299 static int 1300 filt_piperead(struct knote *kn, long hint) 1301 { 1302 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1303 1304 /* XXX RACE */ 1305 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 1306 if (rpipe->pipe_state & PIPE_REOF) { 1307 kn->kn_flags |= EV_EOF; 1308 return (1); 1309 } 1310 1311 return (kn->kn_data > 0); 1312 } 1313 1314 /*ARGSUSED*/ 1315 static int 1316 filt_pipewrite(struct knote *kn, long hint) 1317 { 1318 struct pipe *wpipe = (struct pipe *)kn->kn_fp->f_data; 1319 u_int32_t space; 1320 1321 /* XXX RACE */ 1322 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) { 1323 kn->kn_data = 0; 1324 kn->kn_flags |= EV_EOF; 1325 return (1); 1326 } 1327 space = wpipe->pipe_buffer.windex - 1328 wpipe->pipe_buffer.rindex; 1329 space = wpipe->pipe_buffer.size - space; 1330 1331 kn->kn_data = space; 1332 return (kn->kn_data >= PIPE_BUF); 1333 } 1334