1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ 20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $ 21 */ 22 23 /* 24 * This file contains a high-performance replacement for the socket-based 25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 26 * all features of sockets, but does do everything that pipes normally 27 * do. 28 */ 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/fcntl.h> 34 #include <sys/file.h> 35 #include <sys/filedesc.h> 36 #include <sys/filio.h> 37 #include <sys/ttycom.h> 38 #include <sys/stat.h> 39 #include <sys/poll.h> 40 #include <sys/select.h> 41 #include <sys/signalvar.h> 42 #include <sys/sysproto.h> 43 #include <sys/pipe.h> 44 #include <sys/vnode.h> 45 #include <sys/uio.h> 46 #include <sys/event.h> 47 #include <sys/globaldata.h> 48 #include <sys/module.h> 49 #include <sys/malloc.h> 50 #include <sys/sysctl.h> 51 #include <sys/socket.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_param.h> 55 #include <sys/lock.h> 56 #include <vm/vm_object.h> 57 #include <vm/vm_kern.h> 58 #include <vm/vm_extern.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_map.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_zone.h> 63 64 #include <sys/file2.h> 65 66 #include <machine/cpufunc.h> 67 68 /* 69 * interfaces to the outside world 70 */ 71 static int pipe_read (struct file *fp, struct uio *uio, 72 struct ucred *cred, int flags); 73 static int pipe_write (struct file *fp, struct uio *uio, 74 struct ucred *cred, int flags); 75 static int pipe_close (struct file *fp); 76 static int pipe_shutdown (struct file *fp, int how); 77 static int pipe_poll (struct file *fp, int events, struct ucred *cred); 78 static int pipe_kqfilter (struct file *fp, struct knote *kn); 79 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred); 80 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, struct ucred *cred); 81 82 static struct fileops pipeops = { 83 .fo_read = pipe_read, 84 .fo_write = pipe_write, 85 .fo_ioctl = pipe_ioctl, 86 .fo_poll = pipe_poll, 87 .fo_kqfilter = pipe_kqfilter, 88 .fo_stat = pipe_stat, 89 .fo_close = pipe_close, 90 .fo_shutdown = pipe_shutdown 91 }; 92 93 static void filt_pipedetach(struct knote *kn); 94 static int filt_piperead(struct knote *kn, long hint); 95 static int filt_pipewrite(struct knote *kn, long hint); 96 97 static struct filterops pipe_rfiltops = 98 { 1, NULL, filt_pipedetach, filt_piperead }; 99 static struct filterops pipe_wfiltops = 100 { 1, NULL, filt_pipedetach, filt_pipewrite }; 101 102 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures"); 103 104 /* 105 * Default pipe buffer size(s), this can be kind-of large now because pipe 106 * space is pageable. The pipe code will try to maintain locality of 107 * reference for performance reasons, so small amounts of outstanding I/O 108 * will not wipe the cache. 109 */ 110 #define MINPIPESIZE (PIPE_SIZE/3) 111 #define MAXPIPESIZE (2*PIPE_SIZE/3) 112 113 /* 114 * Limit the number of "big" pipes 115 */ 116 #define LIMITBIGPIPES 64 117 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */ 118 119 static int pipe_maxbig = LIMITBIGPIPES; 120 static int pipe_maxcache = PIPEQ_MAX_CACHE; 121 static int pipe_bigcount; 122 static int pipe_nbig; 123 static int pipe_bcache_alloc; 124 static int pipe_bkmem_alloc; 125 static int pipe_rblocked_count; 126 static int pipe_wblocked_count; 127 128 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation"); 129 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig, 130 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated"); 131 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount, 132 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded"); 133 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked, 134 CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded"); 135 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked, 136 CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded"); 137 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache, 138 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu"); 139 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig, 140 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes"); 141 #ifdef SMP 142 static int pipe_delay = 5000; /* 5uS default */ 143 SYSCTL_INT(_kern_pipe, OID_AUTO, delay, 144 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns"); 145 static int pipe_mpsafe = 1; 146 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe, 147 CTLFLAG_RW, &pipe_mpsafe, 0, ""); 148 #endif 149 #if !defined(NO_PIPE_SYSCTL_STATS) 150 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc, 151 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache"); 152 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc, 153 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem"); 154 #endif 155 156 static void pipeclose (struct pipe *cpipe); 157 static void pipe_free_kmem (struct pipe *cpipe); 158 static int pipe_create (struct pipe **cpipep); 159 static __inline void pipeselwakeup (struct pipe *cpipe); 160 static int pipespace (struct pipe *cpipe, int size); 161 162 static __inline void 163 pipeselwakeup(struct pipe *cpipe) 164 { 165 if (cpipe->pipe_state & PIPE_SEL) { 166 get_mplock(); 167 cpipe->pipe_state &= ~PIPE_SEL; 168 selwakeup(&cpipe->pipe_sel); 169 rel_mplock(); 170 } 171 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) { 172 get_mplock(); 173 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 174 rel_mplock(); 175 } 176 if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) { 177 get_mplock(); 178 KNOTE(&cpipe->pipe_sel.si_note, 0); 179 rel_mplock(); 180 } 181 } 182 183 /* 184 * These routines are called before and after a UIO. The UIO 185 * may block, causing our held tokens to be lost temporarily. 186 * 187 * We use these routines to serialize reads against other reads 188 * and writes against other writes. 189 * 190 * The read token is held on entry so *ipp does not race. 191 */ 192 static __inline int 193 pipe_start_uio(struct pipe *cpipe, u_int *ipp) 194 { 195 int error; 196 197 while (*ipp) { 198 *ipp = -1; 199 error = tsleep(ipp, PCATCH, "pipexx", 0); 200 if (error) 201 return (error); 202 } 203 *ipp = 1; 204 return (0); 205 } 206 207 static __inline void 208 pipe_end_uio(struct pipe *cpipe, u_int *ipp) 209 { 210 if (*ipp < 0) { 211 *ipp = 0; 212 wakeup(ipp); 213 } else { 214 *ipp = 0; 215 } 216 } 217 218 static __inline void 219 pipe_get_mplock(int *save) 220 { 221 #ifdef SMP 222 if (pipe_mpsafe == 0) { 223 get_mplock(); 224 *save = 1; 225 } else 226 #endif 227 { 228 *save = 0; 229 } 230 } 231 232 static __inline void 233 pipe_rel_mplock(int *save) 234 { 235 #ifdef SMP 236 if (*save) 237 rel_mplock(); 238 #endif 239 } 240 241 242 /* 243 * The pipe system call for the DTYPE_PIPE type of pipes 244 * 245 * pipe_ARgs(int dummy) 246 */ 247 248 /* ARGSUSED */ 249 int 250 sys_pipe(struct pipe_args *uap) 251 { 252 struct thread *td = curthread; 253 struct proc *p = td->td_proc; 254 struct file *rf, *wf; 255 struct pipe *rpipe, *wpipe; 256 int fd1, fd2, error; 257 258 KKASSERT(p); 259 260 rpipe = wpipe = NULL; 261 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 262 pipeclose(rpipe); 263 pipeclose(wpipe); 264 return (ENFILE); 265 } 266 267 error = falloc(p, &rf, &fd1); 268 if (error) { 269 pipeclose(rpipe); 270 pipeclose(wpipe); 271 return (error); 272 } 273 uap->sysmsg_fds[0] = fd1; 274 275 /* 276 * Warning: once we've gotten past allocation of the fd for the 277 * read-side, we can only drop the read side via fdrop() in order 278 * to avoid races against processes which manage to dup() the read 279 * side while we are blocked trying to allocate the write side. 280 */ 281 rf->f_type = DTYPE_PIPE; 282 rf->f_flag = FREAD | FWRITE; 283 rf->f_ops = &pipeops; 284 rf->f_data = rpipe; 285 error = falloc(p, &wf, &fd2); 286 if (error) { 287 fsetfd(p, NULL, fd1); 288 fdrop(rf); 289 /* rpipe has been closed by fdrop(). */ 290 pipeclose(wpipe); 291 return (error); 292 } 293 wf->f_type = DTYPE_PIPE; 294 wf->f_flag = FREAD | FWRITE; 295 wf->f_ops = &pipeops; 296 wf->f_data = wpipe; 297 uap->sysmsg_fds[1] = fd2; 298 299 rpipe->pipe_slock = kmalloc(sizeof(struct lock), 300 M_PIPE, M_WAITOK|M_ZERO); 301 wpipe->pipe_slock = rpipe->pipe_slock; 302 rpipe->pipe_peer = wpipe; 303 wpipe->pipe_peer = rpipe; 304 lockinit(rpipe->pipe_slock, "pipecl", 0, 0); 305 306 /* 307 * Once activated the peer relationship remains valid until 308 * both sides are closed. 309 */ 310 fsetfd(p, rf, fd1); 311 fsetfd(p, wf, fd2); 312 fdrop(rf); 313 fdrop(wf); 314 315 return (0); 316 } 317 318 /* 319 * Allocate kva for pipe circular buffer, the space is pageable 320 * This routine will 'realloc' the size of a pipe safely, if it fails 321 * it will retain the old buffer. 322 * If it fails it will return ENOMEM. 323 */ 324 static int 325 pipespace(struct pipe *cpipe, int size) 326 { 327 struct vm_object *object; 328 caddr_t buffer; 329 int npages, error; 330 331 npages = round_page(size) / PAGE_SIZE; 332 object = cpipe->pipe_buffer.object; 333 334 /* 335 * [re]create the object if necessary and reserve space for it 336 * in the kernel_map. The object and memory are pageable. On 337 * success, free the old resources before assigning the new 338 * ones. 339 */ 340 if (object == NULL || object->size != npages) { 341 get_mplock(); 342 object = vm_object_allocate(OBJT_DEFAULT, npages); 343 buffer = (caddr_t)vm_map_min(&kernel_map); 344 345 error = vm_map_find(&kernel_map, object, 0, 346 (vm_offset_t *)&buffer, size, 347 1, 348 VM_MAPTYPE_NORMAL, 349 VM_PROT_ALL, VM_PROT_ALL, 350 0); 351 352 if (error != KERN_SUCCESS) { 353 vm_object_deallocate(object); 354 rel_mplock(); 355 return (ENOMEM); 356 } 357 pipe_free_kmem(cpipe); 358 rel_mplock(); 359 cpipe->pipe_buffer.object = object; 360 cpipe->pipe_buffer.buffer = buffer; 361 cpipe->pipe_buffer.size = size; 362 ++pipe_bkmem_alloc; 363 } else { 364 ++pipe_bcache_alloc; 365 } 366 cpipe->pipe_buffer.rindex = 0; 367 cpipe->pipe_buffer.windex = 0; 368 return (0); 369 } 370 371 /* 372 * Initialize and allocate VM and memory for pipe, pulling the pipe from 373 * our per-cpu cache if possible. For now make sure it is sized for the 374 * smaller PIPE_SIZE default. 375 */ 376 static int 377 pipe_create(struct pipe **cpipep) 378 { 379 globaldata_t gd = mycpu; 380 struct pipe *cpipe; 381 int error; 382 383 if ((cpipe = gd->gd_pipeq) != NULL) { 384 gd->gd_pipeq = cpipe->pipe_peer; 385 --gd->gd_pipeqcount; 386 cpipe->pipe_peer = NULL; 387 cpipe->pipe_wantwcnt = 0; 388 } else { 389 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO); 390 } 391 *cpipep = cpipe; 392 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0) 393 return (error); 394 vfs_timestamp(&cpipe->pipe_ctime); 395 cpipe->pipe_atime = cpipe->pipe_ctime; 396 cpipe->pipe_mtime = cpipe->pipe_ctime; 397 lwkt_token_init(&cpipe->pipe_rlock); 398 lwkt_token_init(&cpipe->pipe_wlock); 399 return (0); 400 } 401 402 /* 403 * MPALMOSTSAFE (acquires mplock) 404 */ 405 static int 406 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 407 { 408 struct pipe *rpipe; 409 int error; 410 int orig_resid; 411 int nread = 0; 412 int nbio; 413 u_int size; /* total bytes available */ 414 u_int nsize; /* total bytes to read */ 415 u_int rindex; /* contiguous bytes available */ 416 int notify_writer; 417 lwkt_tokref rlock; 418 lwkt_tokref wlock; 419 int mpsave; 420 421 /* 422 * Degenerate case 423 */ 424 orig_resid = uio->uio_resid; 425 if (orig_resid == 0) 426 return(0); 427 428 /* 429 * Setup locks, calculate nbio 430 */ 431 pipe_get_mplock(&mpsave); 432 rpipe = (struct pipe *)fp->f_data; 433 lwkt_gettoken(&rlock, &rpipe->pipe_rlock); 434 435 if (fflags & O_FBLOCKING) 436 nbio = 0; 437 else if (fflags & O_FNONBLOCKING) 438 nbio = 1; 439 else if (fp->f_flag & O_NONBLOCK) 440 nbio = 1; 441 else 442 nbio = 0; 443 444 /* 445 * Reads are serialized. Note howeverthat pipe_buffer.buffer and 446 * pipe_buffer.size can change out from under us when the number 447 * of bytes in the buffer are zero due to the write-side doing a 448 * pipespace(). 449 */ 450 error = pipe_start_uio(rpipe, &rpipe->pipe_rip); 451 if (error) { 452 pipe_rel_mplock(&mpsave); 453 lwkt_reltoken(&rlock); 454 return (error); 455 } 456 notify_writer = 0; 457 while (uio->uio_resid) { 458 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 459 cpu_lfence(); 460 if (size) { 461 rindex = rpipe->pipe_buffer.rindex & 462 (rpipe->pipe_buffer.size - 1); 463 nsize = size; 464 if (nsize > rpipe->pipe_buffer.size - rindex) 465 nsize = rpipe->pipe_buffer.size - rindex; 466 if (nsize > (u_int)uio->uio_resid) 467 nsize = (u_int)uio->uio_resid; 468 469 error = uiomove(&rpipe->pipe_buffer.buffer[rindex], 470 nsize, uio); 471 if (error) 472 break; 473 cpu_mfence(); 474 rpipe->pipe_buffer.rindex += nsize; 475 nread += nsize; 476 477 /* 478 * If the FIFO is still over half full just continue 479 * and do not try to notify the writer yet. 480 */ 481 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) { 482 notify_writer = 0; 483 continue; 484 } 485 486 /* 487 * When the FIFO is less then half full notify any 488 * waiting writer. WANTW can be checked while 489 * holding just the rlock. 490 */ 491 notify_writer = 1; 492 if ((rpipe->pipe_state & PIPE_WANTW) == 0) 493 continue; 494 } 495 496 /* 497 * If the "write-side" was blocked we wake it up. This code 498 * is reached either when the buffer is completely emptied 499 * or if it becomes more then half-empty. 500 * 501 * Pipe_state can only be modified if both the rlock and 502 * wlock are held. 503 */ 504 if (rpipe->pipe_state & PIPE_WANTW) { 505 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 506 if (rpipe->pipe_state & PIPE_WANTW) { 507 notify_writer = 0; 508 rpipe->pipe_state &= ~PIPE_WANTW; 509 lwkt_reltoken(&wlock); 510 wakeup(rpipe); 511 } else { 512 lwkt_reltoken(&wlock); 513 } 514 } 515 516 /* 517 * Pick up our copy loop again if the writer sent data to 518 * us while we were messing around. 519 * 520 * On a SMP box poll up to pipe_delay nanoseconds for new 521 * data. Typically a value of 2000 to 4000 is sufficient 522 * to eradicate most IPIs/tsleeps/wakeups when a pipe 523 * is used for synchronous communications with small packets, 524 * and 8000 or so (8uS) will pipeline large buffer xfers 525 * between cpus over a pipe. 526 * 527 * For synchronous communications a hit means doing a 528 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS, 529 * where as miss requiring a tsleep/wakeup sequence 530 * will take 7uS or more. 531 */ 532 if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) 533 continue; 534 535 #if defined(SMP) && defined(_RDTSC_SUPPORTED_) 536 if (pipe_delay) { 537 int64_t tsc_target; 538 int good = 0; 539 540 tsc_target = tsc_get_target(pipe_delay); 541 while (tsc_test_target(tsc_target) == 0) { 542 if (rpipe->pipe_buffer.windex != 543 rpipe->pipe_buffer.rindex) { 544 good = 1; 545 break; 546 } 547 } 548 if (good) 549 continue; 550 } 551 #endif 552 553 /* 554 * Detect EOF condition, do not set error. 555 */ 556 if (rpipe->pipe_state & PIPE_REOF) 557 break; 558 559 /* 560 * Break if some data was read, or if this was a non-blocking 561 * read. 562 */ 563 if (nread > 0) 564 break; 565 566 if (nbio) { 567 error = EAGAIN; 568 break; 569 } 570 571 /* 572 * Last chance, interlock with WANTR. 573 */ 574 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 575 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 576 if (size) { 577 lwkt_reltoken(&wlock); 578 continue; 579 } 580 581 /* 582 * If there is no more to read in the pipe, reset its 583 * pointers to the beginning. This improves cache hit 584 * stats. 585 * 586 * We need both locks to modify both pointers, and there 587 * must also not be a write in progress or the uiomove() 588 * in the write might block and temporarily release 589 * its wlock, then reacquire and update windex. We are 590 * only serialized against reads, not writes. 591 * 592 * XXX should we even bother resetting the indices? It 593 * might actually be more cache efficient not to. 594 */ 595 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex && 596 rpipe->pipe_wip == 0) { 597 rpipe->pipe_buffer.rindex = 0; 598 rpipe->pipe_buffer.windex = 0; 599 } 600 601 /* 602 * Wait for more data. 603 * 604 * Pipe_state can only be set if both the rlock and wlock 605 * are held. 606 */ 607 rpipe->pipe_state |= PIPE_WANTR; 608 tsleep_interlock(rpipe, PCATCH); 609 lwkt_reltoken(&wlock); 610 error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0); 611 ++pipe_rblocked_count; 612 if (error) 613 break; 614 } 615 pipe_end_uio(rpipe, &rpipe->pipe_rip); 616 617 /* 618 * Uptime last access time 619 */ 620 if (error == 0 && nread) 621 vfs_timestamp(&rpipe->pipe_atime); 622 623 /* 624 * If we drained the FIFO more then half way then handle 625 * write blocking hysteresis. 626 * 627 * Note that PIPE_WANTW cannot be set by the writer without 628 * it holding both rlock and wlock, so we can test it 629 * while holding just rlock. 630 */ 631 if (notify_writer) { 632 if (rpipe->pipe_state & PIPE_WANTW) { 633 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 634 if (rpipe->pipe_state & PIPE_WANTW) { 635 rpipe->pipe_state &= ~PIPE_WANTW; 636 lwkt_reltoken(&wlock); 637 wakeup(rpipe); 638 } else { 639 lwkt_reltoken(&wlock); 640 } 641 } 642 } 643 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 644 lwkt_reltoken(&rlock); 645 646 /* 647 * If enough space is available in buffer then wakeup sel writers? 648 */ 649 if ((rpipe->pipe_buffer.size - size) >= PIPE_BUF) 650 pipeselwakeup(rpipe); 651 pipe_rel_mplock(&mpsave); 652 return (error); 653 } 654 655 /* 656 * MPALMOSTSAFE - acquires mplock 657 */ 658 static int 659 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 660 { 661 int error; 662 int orig_resid; 663 int nbio; 664 struct pipe *wpipe, *rpipe; 665 lwkt_tokref rlock; 666 lwkt_tokref wlock; 667 u_int windex; 668 u_int space; 669 u_int wcount; 670 int mpsave; 671 672 pipe_get_mplock(&mpsave); 673 674 /* 675 * Writes go to the peer. The peer will always exist. 676 */ 677 rpipe = (struct pipe *) fp->f_data; 678 wpipe = rpipe->pipe_peer; 679 lwkt_gettoken(&wlock, &wpipe->pipe_wlock); 680 if (wpipe->pipe_state & PIPE_WEOF) { 681 pipe_rel_mplock(&mpsave); 682 lwkt_reltoken(&wlock); 683 return (EPIPE); 684 } 685 686 /* 687 * Degenerate case (EPIPE takes prec) 688 */ 689 if (uio->uio_resid == 0) { 690 pipe_rel_mplock(&mpsave); 691 lwkt_reltoken(&wlock); 692 return(0); 693 } 694 695 /* 696 * Writes are serialized (start_uio must be called with wlock) 697 */ 698 error = pipe_start_uio(wpipe, &wpipe->pipe_wip); 699 if (error) { 700 pipe_rel_mplock(&mpsave); 701 lwkt_reltoken(&wlock); 702 return (error); 703 } 704 705 if (fflags & O_FBLOCKING) 706 nbio = 0; 707 else if (fflags & O_FNONBLOCKING) 708 nbio = 1; 709 else if (fp->f_flag & O_NONBLOCK) 710 nbio = 1; 711 else 712 nbio = 0; 713 714 /* 715 * If it is advantageous to resize the pipe buffer, do 716 * so. We are write-serialized so we can block safely. 717 */ 718 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 719 (pipe_nbig < pipe_maxbig) && 720 wpipe->pipe_wantwcnt > 4 && 721 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 722 /* 723 * Recheck after lock. 724 */ 725 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 726 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 727 (pipe_nbig < pipe_maxbig) && 728 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 729 atomic_add_int(&pipe_nbig, 1); 730 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 731 ++pipe_bigcount; 732 else 733 atomic_subtract_int(&pipe_nbig, 1); 734 } 735 lwkt_reltoken(&rlock); 736 } 737 738 orig_resid = uio->uio_resid; 739 wcount = 0; 740 741 while (uio->uio_resid) { 742 if (wpipe->pipe_state & PIPE_WEOF) { 743 error = EPIPE; 744 break; 745 } 746 747 windex = wpipe->pipe_buffer.windex & 748 (wpipe->pipe_buffer.size - 1); 749 space = wpipe->pipe_buffer.size - 750 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 751 cpu_lfence(); 752 753 /* Writes of size <= PIPE_BUF must be atomic. */ 754 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 755 space = 0; 756 757 /* 758 * Write to fill, read size handles write hysteresis. Also 759 * additional restrictions can cause select-based non-blocking 760 * writes to spin. 761 */ 762 if (space > 0) { 763 u_int segsize; 764 765 /* 766 * Transfer size is minimum of uio transfer 767 * and free space in pipe buffer. 768 * 769 * Limit each uiocopy to no more then PIPE_SIZE 770 * so we can keep the gravy train going on a 771 * SMP box. This doubles the performance for 772 * write sizes > 16K. Otherwise large writes 773 * wind up doing an inefficient synchronous 774 * ping-pong. 775 */ 776 if (space > (u_int)uio->uio_resid) 777 space = (u_int)uio->uio_resid; 778 if (space > PIPE_SIZE) 779 space = PIPE_SIZE; 780 781 /* 782 * First segment to transfer is minimum of 783 * transfer size and contiguous space in 784 * pipe buffer. If first segment to transfer 785 * is less than the transfer size, we've got 786 * a wraparound in the buffer. 787 */ 788 segsize = wpipe->pipe_buffer.size - windex; 789 if (segsize > space) 790 segsize = space; 791 792 #ifdef SMP 793 /* 794 * If this is the first loop and the reader is 795 * blocked, do a preemptive wakeup of the reader. 796 * 797 * On SMP the IPI latency plus the wlock interlock 798 * on the reader side is the fastest way to get the 799 * reader going. (The scheduler will hard loop on 800 * lock tokens). 801 * 802 * NOTE: We can't clear WANTR here without acquiring 803 * the rlock, which we don't want to do here! 804 */ 805 if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1) 806 wakeup(wpipe); 807 #endif 808 809 /* 810 * Transfer segment, which may include a wrap-around. 811 * Update windex to account for both all in one go 812 * so the reader can read() the data atomically. 813 */ 814 error = uiomove(&wpipe->pipe_buffer.buffer[windex], 815 segsize, uio); 816 if (error == 0 && segsize < space) { 817 segsize = space - segsize; 818 error = uiomove(&wpipe->pipe_buffer.buffer[0], 819 segsize, uio); 820 } 821 if (error) 822 break; 823 cpu_mfence(); 824 wpipe->pipe_buffer.windex += space; 825 wcount += space; 826 continue; 827 } 828 829 /* 830 * We need both the rlock and the wlock to interlock against 831 * the EOF, WANTW, and size checks, and to modify pipe_state. 832 * 833 * These are token locks so we do not have to worry about 834 * deadlocks. 835 */ 836 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 837 838 /* 839 * If the "read-side" has been blocked, wake it up now 840 * and yield to let it drain synchronously rather 841 * then block. 842 */ 843 if (wpipe->pipe_state & PIPE_WANTR) { 844 wpipe->pipe_state &= ~PIPE_WANTR; 845 wakeup(wpipe); 846 } 847 848 /* 849 * don't block on non-blocking I/O 850 */ 851 if (nbio) { 852 lwkt_reltoken(&rlock); 853 error = EAGAIN; 854 break; 855 } 856 857 /* 858 * re-test whether we have to block in the writer after 859 * acquiring both locks, in case the reader opened up 860 * some space. 861 */ 862 space = wpipe->pipe_buffer.size - 863 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 864 cpu_lfence(); 865 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 866 space = 0; 867 868 /* 869 * We have no more space and have something to offer, 870 * wake up select/poll. 871 */ 872 if (space == 0) { 873 pipeselwakeup(wpipe); 874 ++wpipe->pipe_wantwcnt; 875 wpipe->pipe_state |= PIPE_WANTW; 876 error = tsleep(wpipe, PCATCH, "pipewr", 0); 877 ++pipe_wblocked_count; 878 } 879 lwkt_reltoken(&rlock); 880 881 /* 882 * Break out if we errored or the read side wants us to go 883 * away. 884 */ 885 if (error) 886 break; 887 if (wpipe->pipe_state & PIPE_WEOF) { 888 error = EPIPE; 889 break; 890 } 891 } 892 pipe_end_uio(wpipe, &wpipe->pipe_wip); 893 894 /* 895 * If we have put any characters in the buffer, we wake up 896 * the reader. 897 * 898 * Both rlock and wlock are required to be able to modify pipe_state. 899 */ 900 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) { 901 if (wpipe->pipe_state & PIPE_WANTR) { 902 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 903 if (wpipe->pipe_state & PIPE_WANTR) { 904 wpipe->pipe_state &= ~PIPE_WANTR; 905 lwkt_reltoken(&rlock); 906 wakeup(wpipe); 907 } else { 908 lwkt_reltoken(&rlock); 909 } 910 } 911 } 912 913 /* 914 * Don't return EPIPE if I/O was successful 915 */ 916 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) && 917 (uio->uio_resid == 0) && 918 (error == EPIPE)) { 919 error = 0; 920 } 921 922 if (error == 0) 923 vfs_timestamp(&wpipe->pipe_mtime); 924 925 /* 926 * We have something to offer, 927 * wake up select/poll. 928 */ 929 space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex; 930 lwkt_reltoken(&wlock); 931 if (space) 932 pipeselwakeup(wpipe); 933 pipe_rel_mplock(&mpsave); 934 return (error); 935 } 936 937 /* 938 * MPALMOSTSAFE - acquires mplock 939 * 940 * we implement a very minimal set of ioctls for compatibility with sockets. 941 */ 942 int 943 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct ucred *cred) 944 { 945 struct pipe *mpipe; 946 lwkt_tokref rlock; 947 lwkt_tokref wlock; 948 int error; 949 int mpsave; 950 951 pipe_get_mplock(&mpsave); 952 mpipe = (struct pipe *)fp->f_data; 953 954 lwkt_gettoken(&rlock, &mpipe->pipe_rlock); 955 lwkt_gettoken(&wlock, &mpipe->pipe_wlock); 956 957 switch (cmd) { 958 case FIOASYNC: 959 if (*(int *)data) { 960 mpipe->pipe_state |= PIPE_ASYNC; 961 } else { 962 mpipe->pipe_state &= ~PIPE_ASYNC; 963 } 964 error = 0; 965 break; 966 case FIONREAD: 967 *(int *)data = mpipe->pipe_buffer.windex - 968 mpipe->pipe_buffer.rindex; 969 error = 0; 970 break; 971 case FIOSETOWN: 972 get_mplock(); 973 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 974 rel_mplock(); 975 break; 976 case FIOGETOWN: 977 *(int *)data = fgetown(mpipe->pipe_sigio); 978 error = 0; 979 break; 980 case TIOCSPGRP: 981 /* This is deprecated, FIOSETOWN should be used instead. */ 982 get_mplock(); 983 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 984 rel_mplock(); 985 break; 986 987 case TIOCGPGRP: 988 /* This is deprecated, FIOGETOWN should be used instead. */ 989 *(int *)data = -fgetown(mpipe->pipe_sigio); 990 error = 0; 991 break; 992 default: 993 error = ENOTTY; 994 break; 995 } 996 lwkt_reltoken(&rlock); 997 lwkt_reltoken(&wlock); 998 pipe_rel_mplock(&mpsave); 999 1000 return (error); 1001 } 1002 1003 /* 1004 * MPALMOSTSAFE - acquires mplock 1005 */ 1006 int 1007 pipe_poll(struct file *fp, int events, struct ucred *cred) 1008 { 1009 struct pipe *rpipe; 1010 struct pipe *wpipe; 1011 int revents = 0; 1012 u_int space; 1013 int mpsave; 1014 1015 pipe_get_mplock(&mpsave); 1016 rpipe = (struct pipe *)fp->f_data; 1017 wpipe = rpipe->pipe_peer; 1018 if (events & (POLLIN | POLLRDNORM)) { 1019 if ((rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) || 1020 (rpipe->pipe_state & PIPE_REOF)) { 1021 revents |= events & (POLLIN | POLLRDNORM); 1022 } 1023 } 1024 1025 if (events & (POLLOUT | POLLWRNORM)) { 1026 if (wpipe == NULL || (wpipe->pipe_state & PIPE_WEOF)) { 1027 revents |= events & (POLLOUT | POLLWRNORM); 1028 } else { 1029 space = wpipe->pipe_buffer.windex - 1030 wpipe->pipe_buffer.rindex; 1031 space = wpipe->pipe_buffer.size - space; 1032 if (space >= PIPE_BUF) 1033 revents |= events & (POLLOUT | POLLWRNORM); 1034 } 1035 } 1036 1037 if ((rpipe->pipe_state & PIPE_REOF) || 1038 (wpipe == NULL) || 1039 (wpipe->pipe_state & PIPE_WEOF)) 1040 revents |= POLLHUP; 1041 1042 if (revents == 0) { 1043 if (events & (POLLIN | POLLRDNORM)) { 1044 selrecord(curthread, &rpipe->pipe_sel); 1045 rpipe->pipe_state |= PIPE_SEL; 1046 } 1047 1048 if (events & (POLLOUT | POLLWRNORM)) { 1049 selrecord(curthread, &wpipe->pipe_sel); 1050 wpipe->pipe_state |= PIPE_SEL; 1051 } 1052 } 1053 pipe_rel_mplock(&mpsave); 1054 return (revents); 1055 } 1056 1057 /* 1058 * MPSAFE 1059 */ 1060 static int 1061 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred) 1062 { 1063 struct pipe *pipe; 1064 int mpsave; 1065 1066 pipe_get_mplock(&mpsave); 1067 pipe = (struct pipe *)fp->f_data; 1068 1069 bzero((caddr_t)ub, sizeof(*ub)); 1070 ub->st_mode = S_IFIFO; 1071 ub->st_blksize = pipe->pipe_buffer.size; 1072 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex; 1073 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1074 ub->st_atimespec = pipe->pipe_atime; 1075 ub->st_mtimespec = pipe->pipe_mtime; 1076 ub->st_ctimespec = pipe->pipe_ctime; 1077 /* 1078 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev, 1079 * st_flags, st_gen. 1080 * XXX (st_dev, st_ino) should be unique. 1081 */ 1082 pipe_rel_mplock(&mpsave); 1083 return (0); 1084 } 1085 1086 /* 1087 * MPALMOSTSAFE - acquires mplock 1088 */ 1089 static int 1090 pipe_close(struct file *fp) 1091 { 1092 struct pipe *cpipe; 1093 1094 get_mplock(); 1095 cpipe = (struct pipe *)fp->f_data; 1096 fp->f_ops = &badfileops; 1097 fp->f_data = NULL; 1098 funsetown(cpipe->pipe_sigio); 1099 pipeclose(cpipe); 1100 rel_mplock(); 1101 return (0); 1102 } 1103 1104 /* 1105 * Shutdown one or both directions of a full-duplex pipe. 1106 * 1107 * MPALMOSTSAFE - acquires mplock 1108 */ 1109 static int 1110 pipe_shutdown(struct file *fp, int how) 1111 { 1112 struct pipe *rpipe; 1113 struct pipe *wpipe; 1114 int error = EPIPE; 1115 lwkt_tokref rpipe_rlock; 1116 lwkt_tokref rpipe_wlock; 1117 lwkt_tokref wpipe_rlock; 1118 lwkt_tokref wpipe_wlock; 1119 int mpsave; 1120 1121 pipe_get_mplock(&mpsave); 1122 rpipe = (struct pipe *)fp->f_data; 1123 wpipe = rpipe->pipe_peer; 1124 1125 /* 1126 * We modify pipe_state on both pipes, which means we need 1127 * all four tokens! 1128 */ 1129 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock); 1130 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock); 1131 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock); 1132 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock); 1133 1134 switch(how) { 1135 case SHUT_RDWR: 1136 case SHUT_RD: 1137 rpipe->pipe_state |= PIPE_REOF; 1138 wpipe->pipe_state |= PIPE_WEOF; 1139 if (rpipe->pipe_state & PIPE_WANTR) { 1140 rpipe->pipe_state &= ~PIPE_WANTR; 1141 wakeup(rpipe); 1142 } 1143 if (wpipe->pipe_state & PIPE_WANTW) { 1144 wpipe->pipe_state &= ~PIPE_WANTW; 1145 wakeup(wpipe); 1146 } 1147 pipeselwakeup(rpipe); 1148 error = 0; 1149 if (how == SHUT_RD) 1150 break; 1151 /* fall through */ 1152 case SHUT_WR: 1153 wpipe->pipe_state |= PIPE_WEOF; 1154 rpipe->pipe_state |= PIPE_REOF; 1155 if (wpipe->pipe_state & PIPE_WANTW) { 1156 wpipe->pipe_state &= ~PIPE_WANTW; 1157 wakeup(wpipe); 1158 } 1159 if (rpipe->pipe_state & PIPE_WANTR) { 1160 rpipe->pipe_state &= ~PIPE_WANTR; 1161 wakeup(rpipe); 1162 } 1163 pipeselwakeup(wpipe); 1164 error = 0; 1165 break; 1166 } 1167 1168 lwkt_reltoken(&rpipe_rlock); 1169 lwkt_reltoken(&rpipe_wlock); 1170 lwkt_reltoken(&wpipe_rlock); 1171 lwkt_reltoken(&wpipe_wlock); 1172 1173 pipe_rel_mplock(&mpsave); 1174 return (error); 1175 } 1176 1177 static void 1178 pipe_free_kmem(struct pipe *cpipe) 1179 { 1180 if (cpipe->pipe_buffer.buffer != NULL) { 1181 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1182 atomic_subtract_int(&pipe_nbig, 1); 1183 kmem_free(&kernel_map, 1184 (vm_offset_t)cpipe->pipe_buffer.buffer, 1185 cpipe->pipe_buffer.size); 1186 cpipe->pipe_buffer.buffer = NULL; 1187 cpipe->pipe_buffer.object = NULL; 1188 } 1189 } 1190 1191 /* 1192 * Close the pipe. The slock must be held to interlock against simultanious 1193 * closes. The rlock and wlock must be held to adjust the pipe_state. 1194 */ 1195 static void 1196 pipeclose(struct pipe *cpipe) 1197 { 1198 globaldata_t gd; 1199 struct pipe *ppipe; 1200 lwkt_tokref cpipe_rlock; 1201 lwkt_tokref cpipe_wlock; 1202 lwkt_tokref ppipe_rlock; 1203 lwkt_tokref ppipe_wlock; 1204 1205 if (cpipe == NULL) 1206 return; 1207 1208 /* 1209 * The slock may not have been allocated yet (close during 1210 * initialization) 1211 * 1212 * We need both the read and write tokens to modify pipe_state. 1213 */ 1214 if (cpipe->pipe_slock) 1215 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE); 1216 lwkt_gettoken(&cpipe_rlock, &cpipe->pipe_rlock); 1217 lwkt_gettoken(&cpipe_wlock, &cpipe->pipe_wlock); 1218 1219 /* 1220 * Set our state, wakeup anyone waiting in select, and 1221 * wakeup anyone blocked on our pipe. 1222 */ 1223 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF; 1224 pipeselwakeup(cpipe); 1225 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1226 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1227 wakeup(cpipe); 1228 } 1229 1230 /* 1231 * Disconnect from peer 1232 */ 1233 if ((ppipe = cpipe->pipe_peer) != NULL) { 1234 lwkt_gettoken(&ppipe_rlock, &ppipe->pipe_rlock); 1235 lwkt_gettoken(&ppipe_wlock, &ppipe->pipe_wlock); 1236 ppipe->pipe_state |= PIPE_REOF; 1237 pipeselwakeup(ppipe); 1238 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1239 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1240 wakeup(ppipe); 1241 } 1242 if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) { 1243 get_mplock(); 1244 KNOTE(&ppipe->pipe_sel.si_note, 0); 1245 rel_mplock(); 1246 } 1247 lwkt_reltoken(&ppipe_rlock); 1248 lwkt_reltoken(&ppipe_wlock); 1249 } 1250 1251 /* 1252 * If the peer is also closed we can free resources for both 1253 * sides, otherwise we leave our side intact to deal with any 1254 * races (since we only have the slock). 1255 */ 1256 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) { 1257 cpipe->pipe_peer = NULL; 1258 ppipe->pipe_peer = NULL; 1259 ppipe->pipe_slock = NULL; /* we will free the slock */ 1260 pipeclose(ppipe); 1261 ppipe = NULL; 1262 } 1263 1264 lwkt_reltoken(&cpipe_rlock); 1265 lwkt_reltoken(&cpipe_wlock); 1266 if (cpipe->pipe_slock) 1267 lockmgr(cpipe->pipe_slock, LK_RELEASE); 1268 1269 /* 1270 * If we disassociated from our peer we can free resources 1271 */ 1272 if (ppipe == NULL) { 1273 gd = mycpu; 1274 if (cpipe->pipe_slock) { 1275 kfree(cpipe->pipe_slock, M_PIPE); 1276 cpipe->pipe_slock = NULL; 1277 } 1278 if (gd->gd_pipeqcount >= pipe_maxcache || 1279 cpipe->pipe_buffer.size != PIPE_SIZE 1280 ) { 1281 pipe_free_kmem(cpipe); 1282 kfree(cpipe, M_PIPE); 1283 } else { 1284 cpipe->pipe_state = 0; 1285 cpipe->pipe_peer = gd->gd_pipeq; 1286 gd->gd_pipeq = cpipe; 1287 ++gd->gd_pipeqcount; 1288 } 1289 } 1290 } 1291 1292 /* 1293 * MPALMOSTSAFE - acquires mplock 1294 */ 1295 static int 1296 pipe_kqfilter(struct file *fp, struct knote *kn) 1297 { 1298 struct pipe *cpipe; 1299 1300 get_mplock(); 1301 cpipe = (struct pipe *)kn->kn_fp->f_data; 1302 1303 switch (kn->kn_filter) { 1304 case EVFILT_READ: 1305 kn->kn_fop = &pipe_rfiltops; 1306 break; 1307 case EVFILT_WRITE: 1308 kn->kn_fop = &pipe_wfiltops; 1309 cpipe = cpipe->pipe_peer; 1310 if (cpipe == NULL) { 1311 /* other end of pipe has been closed */ 1312 rel_mplock(); 1313 return (EPIPE); 1314 } 1315 break; 1316 default: 1317 return (1); 1318 } 1319 kn->kn_hook = (caddr_t)cpipe; 1320 1321 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1322 rel_mplock(); 1323 return (0); 1324 } 1325 1326 static void 1327 filt_pipedetach(struct knote *kn) 1328 { 1329 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1330 1331 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1332 } 1333 1334 /*ARGSUSED*/ 1335 static int 1336 filt_piperead(struct knote *kn, long hint) 1337 { 1338 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1339 1340 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 1341 1342 /* XXX RACE */ 1343 if (rpipe->pipe_state & PIPE_REOF) { 1344 kn->kn_flags |= EV_EOF; 1345 return (1); 1346 } 1347 return (kn->kn_data > 0); 1348 } 1349 1350 /*ARGSUSED*/ 1351 static int 1352 filt_pipewrite(struct knote *kn, long hint) 1353 { 1354 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1355 struct pipe *wpipe = rpipe->pipe_peer; 1356 u_int32_t space; 1357 1358 /* XXX RACE */ 1359 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) { 1360 kn->kn_data = 0; 1361 kn->kn_flags |= EV_EOF; 1362 return (1); 1363 } 1364 space = wpipe->pipe_buffer.windex - 1365 wpipe->pipe_buffer.rindex; 1366 space = wpipe->pipe_buffer.size - space; 1367 kn->kn_data = space; 1368 return (kn->kn_data >= PIPE_BUF); 1369 } 1370