1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ 20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $ 21 */ 22 23 /* 24 * This file contains a high-performance replacement for the socket-based 25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 26 * all features of sockets, but does do everything that pipes normally 27 * do. 28 */ 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/fcntl.h> 34 #include <sys/file.h> 35 #include <sys/filedesc.h> 36 #include <sys/filio.h> 37 #include <sys/ttycom.h> 38 #include <sys/stat.h> 39 #include <sys/poll.h> 40 #include <sys/select.h> 41 #include <sys/signalvar.h> 42 #include <sys/sysproto.h> 43 #include <sys/pipe.h> 44 #include <sys/vnode.h> 45 #include <sys/uio.h> 46 #include <sys/event.h> 47 #include <sys/globaldata.h> 48 #include <sys/module.h> 49 #include <sys/malloc.h> 50 #include <sys/sysctl.h> 51 #include <sys/socket.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_param.h> 55 #include <sys/lock.h> 56 #include <vm/vm_object.h> 57 #include <vm/vm_kern.h> 58 #include <vm/vm_extern.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_map.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_zone.h> 63 64 #include <sys/file2.h> 65 #include <sys/signal2.h> 66 67 #include <machine/cpufunc.h> 68 69 /* 70 * interfaces to the outside world 71 */ 72 static int pipe_read (struct file *fp, struct uio *uio, 73 struct ucred *cred, int flags); 74 static int pipe_write (struct file *fp, struct uio *uio, 75 struct ucred *cred, int flags); 76 static int pipe_close (struct file *fp); 77 static int pipe_shutdown (struct file *fp, int how); 78 static int pipe_poll (struct file *fp, int events, struct ucred *cred); 79 static int pipe_kqfilter (struct file *fp, struct knote *kn); 80 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred); 81 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, 82 struct ucred *cred, struct sysmsg *msg); 83 84 static struct fileops pipeops = { 85 .fo_read = pipe_read, 86 .fo_write = pipe_write, 87 .fo_ioctl = pipe_ioctl, 88 .fo_poll = pipe_poll, 89 .fo_kqfilter = pipe_kqfilter, 90 .fo_stat = pipe_stat, 91 .fo_close = pipe_close, 92 .fo_shutdown = pipe_shutdown 93 }; 94 95 static void filt_pipedetach(struct knote *kn); 96 static int filt_piperead(struct knote *kn, long hint); 97 static int filt_pipewrite(struct knote *kn, long hint); 98 99 static struct filterops pipe_rfiltops = 100 { 1, NULL, filt_pipedetach, filt_piperead }; 101 static struct filterops pipe_wfiltops = 102 { 1, NULL, filt_pipedetach, filt_pipewrite }; 103 104 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures"); 105 106 /* 107 * Default pipe buffer size(s), this can be kind-of large now because pipe 108 * space is pageable. The pipe code will try to maintain locality of 109 * reference for performance reasons, so small amounts of outstanding I/O 110 * will not wipe the cache. 111 */ 112 #define MINPIPESIZE (PIPE_SIZE/3) 113 #define MAXPIPESIZE (2*PIPE_SIZE/3) 114 115 /* 116 * Limit the number of "big" pipes 117 */ 118 #define LIMITBIGPIPES 64 119 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */ 120 121 static int pipe_maxbig = LIMITBIGPIPES; 122 static int pipe_maxcache = PIPEQ_MAX_CACHE; 123 static int pipe_bigcount; 124 static int pipe_nbig; 125 static int pipe_bcache_alloc; 126 static int pipe_bkmem_alloc; 127 static int pipe_rblocked_count; 128 static int pipe_wblocked_count; 129 130 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation"); 131 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig, 132 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated"); 133 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount, 134 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded"); 135 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked, 136 CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded"); 137 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked, 138 CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded"); 139 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache, 140 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu"); 141 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig, 142 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes"); 143 #ifdef SMP 144 static int pipe_delay = 5000; /* 5uS default */ 145 SYSCTL_INT(_kern_pipe, OID_AUTO, delay, 146 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns"); 147 static int pipe_mpsafe = 1; 148 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe, 149 CTLFLAG_RW, &pipe_mpsafe, 0, ""); 150 #endif 151 #if !defined(NO_PIPE_SYSCTL_STATS) 152 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc, 153 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache"); 154 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc, 155 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem"); 156 #endif 157 158 static void pipeclose (struct pipe *cpipe); 159 static void pipe_free_kmem (struct pipe *cpipe); 160 static int pipe_create (struct pipe **cpipep); 161 static __inline void pipeselwakeup (struct pipe *cpipe); 162 static int pipespace (struct pipe *cpipe, int size); 163 164 static __inline void 165 pipeselwakeup(struct pipe *cpipe) 166 { 167 if (cpipe->pipe_state & PIPE_SEL) { 168 get_mplock(); 169 cpipe->pipe_state &= ~PIPE_SEL; 170 selwakeup(&cpipe->pipe_sel); 171 rel_mplock(); 172 } 173 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) { 174 get_mplock(); 175 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 176 rel_mplock(); 177 } 178 if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) { 179 get_mplock(); 180 KNOTE(&cpipe->pipe_sel.si_note, 0); 181 rel_mplock(); 182 } 183 } 184 185 /* 186 * These routines are called before and after a UIO. The UIO 187 * may block, causing our held tokens to be lost temporarily. 188 * 189 * We use these routines to serialize reads against other reads 190 * and writes against other writes. 191 * 192 * The read token is held on entry so *ipp does not race. 193 */ 194 static __inline int 195 pipe_start_uio(struct pipe *cpipe, int *ipp) 196 { 197 int error; 198 199 while (*ipp) { 200 *ipp = -1; 201 error = tsleep(ipp, PCATCH, "pipexx", 0); 202 if (error) 203 return (error); 204 } 205 *ipp = 1; 206 return (0); 207 } 208 209 static __inline void 210 pipe_end_uio(struct pipe *cpipe, int *ipp) 211 { 212 if (*ipp < 0) { 213 *ipp = 0; 214 wakeup(ipp); 215 } else { 216 KKASSERT(*ipp > 0); 217 *ipp = 0; 218 } 219 } 220 221 static __inline void 222 pipe_get_mplock(int *save) 223 { 224 #ifdef SMP 225 if (pipe_mpsafe == 0) { 226 get_mplock(); 227 *save = 1; 228 } else 229 #endif 230 { 231 *save = 0; 232 } 233 } 234 235 static __inline void 236 pipe_rel_mplock(int *save) 237 { 238 #ifdef SMP 239 if (*save) 240 rel_mplock(); 241 #endif 242 } 243 244 245 /* 246 * The pipe system call for the DTYPE_PIPE type of pipes 247 * 248 * pipe_ARgs(int dummy) 249 */ 250 251 /* ARGSUSED */ 252 int 253 sys_pipe(struct pipe_args *uap) 254 { 255 struct thread *td = curthread; 256 struct proc *p = td->td_proc; 257 struct file *rf, *wf; 258 struct pipe *rpipe, *wpipe; 259 int fd1, fd2, error; 260 261 KKASSERT(p); 262 263 rpipe = wpipe = NULL; 264 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 265 pipeclose(rpipe); 266 pipeclose(wpipe); 267 return (ENFILE); 268 } 269 270 error = falloc(p, &rf, &fd1); 271 if (error) { 272 pipeclose(rpipe); 273 pipeclose(wpipe); 274 return (error); 275 } 276 uap->sysmsg_fds[0] = fd1; 277 278 /* 279 * Warning: once we've gotten past allocation of the fd for the 280 * read-side, we can only drop the read side via fdrop() in order 281 * to avoid races against processes which manage to dup() the read 282 * side while we are blocked trying to allocate the write side. 283 */ 284 rf->f_type = DTYPE_PIPE; 285 rf->f_flag = FREAD | FWRITE; 286 rf->f_ops = &pipeops; 287 rf->f_data = rpipe; 288 error = falloc(p, &wf, &fd2); 289 if (error) { 290 fsetfd(p, NULL, fd1); 291 fdrop(rf); 292 /* rpipe has been closed by fdrop(). */ 293 pipeclose(wpipe); 294 return (error); 295 } 296 wf->f_type = DTYPE_PIPE; 297 wf->f_flag = FREAD | FWRITE; 298 wf->f_ops = &pipeops; 299 wf->f_data = wpipe; 300 uap->sysmsg_fds[1] = fd2; 301 302 rpipe->pipe_slock = kmalloc(sizeof(struct lock), 303 M_PIPE, M_WAITOK|M_ZERO); 304 wpipe->pipe_slock = rpipe->pipe_slock; 305 rpipe->pipe_peer = wpipe; 306 wpipe->pipe_peer = rpipe; 307 lockinit(rpipe->pipe_slock, "pipecl", 0, 0); 308 309 /* 310 * Once activated the peer relationship remains valid until 311 * both sides are closed. 312 */ 313 fsetfd(p, rf, fd1); 314 fsetfd(p, wf, fd2); 315 fdrop(rf); 316 fdrop(wf); 317 318 return (0); 319 } 320 321 /* 322 * Allocate kva for pipe circular buffer, the space is pageable 323 * This routine will 'realloc' the size of a pipe safely, if it fails 324 * it will retain the old buffer. 325 * If it fails it will return ENOMEM. 326 */ 327 static int 328 pipespace(struct pipe *cpipe, int size) 329 { 330 struct vm_object *object; 331 caddr_t buffer; 332 int npages, error; 333 334 npages = round_page(size) / PAGE_SIZE; 335 object = cpipe->pipe_buffer.object; 336 337 /* 338 * [re]create the object if necessary and reserve space for it 339 * in the kernel_map. The object and memory are pageable. On 340 * success, free the old resources before assigning the new 341 * ones. 342 */ 343 if (object == NULL || object->size != npages) { 344 get_mplock(); 345 object = vm_object_allocate(OBJT_DEFAULT, npages); 346 buffer = (caddr_t)vm_map_min(&kernel_map); 347 348 error = vm_map_find(&kernel_map, object, 0, 349 (vm_offset_t *)&buffer, size, 350 1, 351 VM_MAPTYPE_NORMAL, 352 VM_PROT_ALL, VM_PROT_ALL, 353 0); 354 355 if (error != KERN_SUCCESS) { 356 vm_object_deallocate(object); 357 rel_mplock(); 358 return (ENOMEM); 359 } 360 pipe_free_kmem(cpipe); 361 rel_mplock(); 362 cpipe->pipe_buffer.object = object; 363 cpipe->pipe_buffer.buffer = buffer; 364 cpipe->pipe_buffer.size = size; 365 ++pipe_bkmem_alloc; 366 } else { 367 ++pipe_bcache_alloc; 368 } 369 cpipe->pipe_buffer.rindex = 0; 370 cpipe->pipe_buffer.windex = 0; 371 return (0); 372 } 373 374 /* 375 * Initialize and allocate VM and memory for pipe, pulling the pipe from 376 * our per-cpu cache if possible. For now make sure it is sized for the 377 * smaller PIPE_SIZE default. 378 */ 379 static int 380 pipe_create(struct pipe **cpipep) 381 { 382 globaldata_t gd = mycpu; 383 struct pipe *cpipe; 384 int error; 385 386 if ((cpipe = gd->gd_pipeq) != NULL) { 387 gd->gd_pipeq = cpipe->pipe_peer; 388 --gd->gd_pipeqcount; 389 cpipe->pipe_peer = NULL; 390 cpipe->pipe_wantwcnt = 0; 391 } else { 392 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO); 393 } 394 *cpipep = cpipe; 395 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0) 396 return (error); 397 vfs_timestamp(&cpipe->pipe_ctime); 398 cpipe->pipe_atime = cpipe->pipe_ctime; 399 cpipe->pipe_mtime = cpipe->pipe_ctime; 400 lwkt_token_init(&cpipe->pipe_rlock); 401 lwkt_token_init(&cpipe->pipe_wlock); 402 return (0); 403 } 404 405 /* 406 * MPALMOSTSAFE (acquires mplock) 407 */ 408 static int 409 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 410 { 411 struct pipe *rpipe; 412 int error; 413 size_t nread = 0; 414 int nbio; 415 u_int size; /* total bytes available */ 416 u_int nsize; /* total bytes to read */ 417 u_int rindex; /* contiguous bytes available */ 418 int notify_writer; 419 lwkt_tokref rlock; 420 lwkt_tokref wlock; 421 int mpsave; 422 int bigread; 423 int bigcount; 424 425 if (uio->uio_resid == 0) 426 return(0); 427 428 /* 429 * Setup locks, calculate nbio 430 */ 431 pipe_get_mplock(&mpsave); 432 rpipe = (struct pipe *)fp->f_data; 433 lwkt_gettoken(&rlock, &rpipe->pipe_rlock); 434 435 if (fflags & O_FBLOCKING) 436 nbio = 0; 437 else if (fflags & O_FNONBLOCKING) 438 nbio = 1; 439 else if (fp->f_flag & O_NONBLOCK) 440 nbio = 1; 441 else 442 nbio = 0; 443 444 /* 445 * Reads are serialized. Note howeverthat pipe_buffer.buffer and 446 * pipe_buffer.size can change out from under us when the number 447 * of bytes in the buffer are zero due to the write-side doing a 448 * pipespace(). 449 */ 450 error = pipe_start_uio(rpipe, &rpipe->pipe_rip); 451 if (error) { 452 pipe_rel_mplock(&mpsave); 453 lwkt_reltoken(&rlock); 454 return (error); 455 } 456 notify_writer = 0; 457 458 bigread = (uio->uio_resid > 10 * 1024 * 1024); 459 bigcount = 10; 460 461 while (uio->uio_resid) { 462 /* 463 * Don't hog the cpu. 464 */ 465 if (bigread && --bigcount == 0) { 466 lwkt_user_yield(); 467 bigcount = 10; 468 if (CURSIG(curthread->td_lwp)) { 469 error = EINTR; 470 break; 471 } 472 } 473 474 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 475 cpu_lfence(); 476 if (size) { 477 rindex = rpipe->pipe_buffer.rindex & 478 (rpipe->pipe_buffer.size - 1); 479 nsize = size; 480 if (nsize > rpipe->pipe_buffer.size - rindex) 481 nsize = rpipe->pipe_buffer.size - rindex; 482 nsize = szmin(nsize, uio->uio_resid); 483 484 error = uiomove(&rpipe->pipe_buffer.buffer[rindex], 485 nsize, uio); 486 if (error) 487 break; 488 cpu_mfence(); 489 rpipe->pipe_buffer.rindex += nsize; 490 nread += nsize; 491 492 /* 493 * If the FIFO is still over half full just continue 494 * and do not try to notify the writer yet. 495 */ 496 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) { 497 notify_writer = 0; 498 continue; 499 } 500 501 /* 502 * When the FIFO is less then half full notify any 503 * waiting writer. WANTW can be checked while 504 * holding just the rlock. 505 */ 506 notify_writer = 1; 507 if ((rpipe->pipe_state & PIPE_WANTW) == 0) 508 continue; 509 } 510 511 /* 512 * If the "write-side" was blocked we wake it up. This code 513 * is reached either when the buffer is completely emptied 514 * or if it becomes more then half-empty. 515 * 516 * Pipe_state can only be modified if both the rlock and 517 * wlock are held. 518 */ 519 if (rpipe->pipe_state & PIPE_WANTW) { 520 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 521 if (rpipe->pipe_state & PIPE_WANTW) { 522 notify_writer = 0; 523 rpipe->pipe_state &= ~PIPE_WANTW; 524 lwkt_reltoken(&wlock); 525 wakeup(rpipe); 526 } else { 527 lwkt_reltoken(&wlock); 528 } 529 } 530 531 /* 532 * Pick up our copy loop again if the writer sent data to 533 * us while we were messing around. 534 * 535 * On a SMP box poll up to pipe_delay nanoseconds for new 536 * data. Typically a value of 2000 to 4000 is sufficient 537 * to eradicate most IPIs/tsleeps/wakeups when a pipe 538 * is used for synchronous communications with small packets, 539 * and 8000 or so (8uS) will pipeline large buffer xfers 540 * between cpus over a pipe. 541 * 542 * For synchronous communications a hit means doing a 543 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS, 544 * where as miss requiring a tsleep/wakeup sequence 545 * will take 7uS or more. 546 */ 547 if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) 548 continue; 549 550 #if defined(SMP) && defined(_RDTSC_SUPPORTED_) 551 if (pipe_delay) { 552 int64_t tsc_target; 553 int good = 0; 554 555 tsc_target = tsc_get_target(pipe_delay); 556 while (tsc_test_target(tsc_target) == 0) { 557 if (rpipe->pipe_buffer.windex != 558 rpipe->pipe_buffer.rindex) { 559 good = 1; 560 break; 561 } 562 } 563 if (good) 564 continue; 565 } 566 #endif 567 568 /* 569 * Detect EOF condition, do not set error. 570 */ 571 if (rpipe->pipe_state & PIPE_REOF) 572 break; 573 574 /* 575 * Break if some data was read, or if this was a non-blocking 576 * read. 577 */ 578 if (nread > 0) 579 break; 580 581 if (nbio) { 582 error = EAGAIN; 583 break; 584 } 585 586 /* 587 * Last chance, interlock with WANTR. 588 */ 589 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 590 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 591 if (size) { 592 lwkt_reltoken(&wlock); 593 continue; 594 } 595 596 /* 597 * Retest EOF - acquiring a new token can temporarily release 598 * tokens already held. 599 */ 600 if (rpipe->pipe_state & PIPE_REOF) 601 break; 602 603 /* 604 * If there is no more to read in the pipe, reset its 605 * pointers to the beginning. This improves cache hit 606 * stats. 607 * 608 * We need both locks to modify both pointers, and there 609 * must also not be a write in progress or the uiomove() 610 * in the write might block and temporarily release 611 * its wlock, then reacquire and update windex. We are 612 * only serialized against reads, not writes. 613 * 614 * XXX should we even bother resetting the indices? It 615 * might actually be more cache efficient not to. 616 */ 617 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex && 618 rpipe->pipe_wip == 0) { 619 rpipe->pipe_buffer.rindex = 0; 620 rpipe->pipe_buffer.windex = 0; 621 } 622 623 /* 624 * Wait for more data. 625 * 626 * Pipe_state can only be set if both the rlock and wlock 627 * are held. 628 */ 629 rpipe->pipe_state |= PIPE_WANTR; 630 tsleep_interlock(rpipe, PCATCH); 631 lwkt_reltoken(&wlock); 632 error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0); 633 ++pipe_rblocked_count; 634 if (error) 635 break; 636 } 637 pipe_end_uio(rpipe, &rpipe->pipe_rip); 638 639 /* 640 * Uptime last access time 641 */ 642 if (error == 0 && nread) 643 vfs_timestamp(&rpipe->pipe_atime); 644 645 /* 646 * If we drained the FIFO more then half way then handle 647 * write blocking hysteresis. 648 * 649 * Note that PIPE_WANTW cannot be set by the writer without 650 * it holding both rlock and wlock, so we can test it 651 * while holding just rlock. 652 */ 653 if (notify_writer) { 654 if (rpipe->pipe_state & PIPE_WANTW) { 655 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 656 if (rpipe->pipe_state & PIPE_WANTW) { 657 rpipe->pipe_state &= ~PIPE_WANTW; 658 lwkt_reltoken(&wlock); 659 wakeup(rpipe); 660 } else { 661 lwkt_reltoken(&wlock); 662 } 663 } 664 if (rpipe->pipe_state & PIPE_SEL) { 665 lwkt_gettoken(&wlock, &rpipe->pipe_wlock); 666 pipeselwakeup(rpipe); 667 lwkt_reltoken(&wlock); 668 } 669 } 670 /*size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;*/ 671 lwkt_reltoken(&rlock); 672 673 pipe_rel_mplock(&mpsave); 674 return (error); 675 } 676 677 /* 678 * MPALMOSTSAFE - acquires mplock 679 */ 680 static int 681 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 682 { 683 int error; 684 int orig_resid; 685 int nbio; 686 struct pipe *wpipe, *rpipe; 687 lwkt_tokref rlock; 688 lwkt_tokref wlock; 689 u_int windex; 690 u_int space; 691 u_int wcount; 692 int mpsave; 693 int bigwrite; 694 int bigcount; 695 696 pipe_get_mplock(&mpsave); 697 698 /* 699 * Writes go to the peer. The peer will always exist. 700 */ 701 rpipe = (struct pipe *) fp->f_data; 702 wpipe = rpipe->pipe_peer; 703 lwkt_gettoken(&wlock, &wpipe->pipe_wlock); 704 if (wpipe->pipe_state & PIPE_WEOF) { 705 pipe_rel_mplock(&mpsave); 706 lwkt_reltoken(&wlock); 707 return (EPIPE); 708 } 709 710 /* 711 * Degenerate case (EPIPE takes prec) 712 */ 713 if (uio->uio_resid == 0) { 714 pipe_rel_mplock(&mpsave); 715 lwkt_reltoken(&wlock); 716 return(0); 717 } 718 719 /* 720 * Writes are serialized (start_uio must be called with wlock) 721 */ 722 error = pipe_start_uio(wpipe, &wpipe->pipe_wip); 723 if (error) { 724 pipe_rel_mplock(&mpsave); 725 lwkt_reltoken(&wlock); 726 return (error); 727 } 728 729 if (fflags & O_FBLOCKING) 730 nbio = 0; 731 else if (fflags & O_FNONBLOCKING) 732 nbio = 1; 733 else if (fp->f_flag & O_NONBLOCK) 734 nbio = 1; 735 else 736 nbio = 0; 737 738 /* 739 * If it is advantageous to resize the pipe buffer, do 740 * so. We are write-serialized so we can block safely. 741 */ 742 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 743 (pipe_nbig < pipe_maxbig) && 744 wpipe->pipe_wantwcnt > 4 && 745 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 746 /* 747 * Recheck after lock. 748 */ 749 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 750 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 751 (pipe_nbig < pipe_maxbig) && 752 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 753 atomic_add_int(&pipe_nbig, 1); 754 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 755 ++pipe_bigcount; 756 else 757 atomic_subtract_int(&pipe_nbig, 1); 758 } 759 lwkt_reltoken(&rlock); 760 } 761 762 orig_resid = uio->uio_resid; 763 wcount = 0; 764 765 bigwrite = (uio->uio_resid > 10 * 1024 * 1024); 766 bigcount = 10; 767 768 while (uio->uio_resid) { 769 if (wpipe->pipe_state & PIPE_WEOF) { 770 error = EPIPE; 771 break; 772 } 773 774 /* 775 * Don't hog the cpu. 776 */ 777 if (bigwrite && --bigcount == 0) { 778 lwkt_user_yield(); 779 bigcount = 10; 780 if (CURSIG(curthread->td_lwp)) { 781 error = EINTR; 782 break; 783 } 784 } 785 786 windex = wpipe->pipe_buffer.windex & 787 (wpipe->pipe_buffer.size - 1); 788 space = wpipe->pipe_buffer.size - 789 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 790 cpu_lfence(); 791 792 /* Writes of size <= PIPE_BUF must be atomic. */ 793 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 794 space = 0; 795 796 /* 797 * Write to fill, read size handles write hysteresis. Also 798 * additional restrictions can cause select-based non-blocking 799 * writes to spin. 800 */ 801 if (space > 0) { 802 u_int segsize; 803 804 /* 805 * Transfer size is minimum of uio transfer 806 * and free space in pipe buffer. 807 * 808 * Limit each uiocopy to no more then PIPE_SIZE 809 * so we can keep the gravy train going on a 810 * SMP box. This doubles the performance for 811 * write sizes > 16K. Otherwise large writes 812 * wind up doing an inefficient synchronous 813 * ping-pong. 814 */ 815 space = szmin(space, uio->uio_resid); 816 if (space > PIPE_SIZE) 817 space = PIPE_SIZE; 818 819 /* 820 * First segment to transfer is minimum of 821 * transfer size and contiguous space in 822 * pipe buffer. If first segment to transfer 823 * is less than the transfer size, we've got 824 * a wraparound in the buffer. 825 */ 826 segsize = wpipe->pipe_buffer.size - windex; 827 if (segsize > space) 828 segsize = space; 829 830 #ifdef SMP 831 /* 832 * If this is the first loop and the reader is 833 * blocked, do a preemptive wakeup of the reader. 834 * 835 * On SMP the IPI latency plus the wlock interlock 836 * on the reader side is the fastest way to get the 837 * reader going. (The scheduler will hard loop on 838 * lock tokens). 839 * 840 * NOTE: We can't clear WANTR here without acquiring 841 * the rlock, which we don't want to do here! 842 */ 843 if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1) 844 wakeup(wpipe); 845 #endif 846 847 /* 848 * Transfer segment, which may include a wrap-around. 849 * Update windex to account for both all in one go 850 * so the reader can read() the data atomically. 851 */ 852 error = uiomove(&wpipe->pipe_buffer.buffer[windex], 853 segsize, uio); 854 if (error == 0 && segsize < space) { 855 segsize = space - segsize; 856 error = uiomove(&wpipe->pipe_buffer.buffer[0], 857 segsize, uio); 858 } 859 if (error) 860 break; 861 cpu_mfence(); 862 wpipe->pipe_buffer.windex += space; 863 wcount += space; 864 continue; 865 } 866 867 /* 868 * We need both the rlock and the wlock to interlock against 869 * the EOF, WANTW, and size checks, and to modify pipe_state. 870 * 871 * These are token locks so we do not have to worry about 872 * deadlocks. 873 */ 874 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 875 876 /* 877 * If the "read-side" has been blocked, wake it up now 878 * and yield to let it drain synchronously rather 879 * then block. 880 */ 881 if (wpipe->pipe_state & PIPE_WANTR) { 882 wpipe->pipe_state &= ~PIPE_WANTR; 883 wakeup(wpipe); 884 } 885 886 /* 887 * don't block on non-blocking I/O 888 */ 889 if (nbio) { 890 lwkt_reltoken(&rlock); 891 error = EAGAIN; 892 break; 893 } 894 895 /* 896 * re-test whether we have to block in the writer after 897 * acquiring both locks, in case the reader opened up 898 * some space. 899 */ 900 space = wpipe->pipe_buffer.size - 901 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 902 cpu_lfence(); 903 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 904 space = 0; 905 906 /* 907 * Retest EOF - acquiring a new token can temporarily release 908 * tokens already held. 909 */ 910 if (wpipe->pipe_state & PIPE_WEOF) { 911 error = EPIPE; 912 break; 913 } 914 915 /* 916 * We have no more space and have something to offer, 917 * wake up select/poll. 918 */ 919 if (space == 0) { 920 wpipe->pipe_state |= PIPE_WANTW; 921 ++wpipe->pipe_wantwcnt; 922 pipeselwakeup(wpipe); 923 if (wpipe->pipe_state & PIPE_WANTW) 924 error = tsleep(wpipe, PCATCH, "pipewr", 0); 925 ++pipe_wblocked_count; 926 } 927 lwkt_reltoken(&rlock); 928 929 /* 930 * Break out if we errored or the read side wants us to go 931 * away. 932 */ 933 if (error) 934 break; 935 if (wpipe->pipe_state & PIPE_WEOF) { 936 error = EPIPE; 937 break; 938 } 939 } 940 pipe_end_uio(wpipe, &wpipe->pipe_wip); 941 942 /* 943 * If we have put any characters in the buffer, we wake up 944 * the reader. 945 * 946 * Both rlock and wlock are required to be able to modify pipe_state. 947 */ 948 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) { 949 if (wpipe->pipe_state & PIPE_WANTR) { 950 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 951 if (wpipe->pipe_state & PIPE_WANTR) { 952 wpipe->pipe_state &= ~PIPE_WANTR; 953 lwkt_reltoken(&rlock); 954 wakeup(wpipe); 955 } else { 956 lwkt_reltoken(&rlock); 957 } 958 } 959 if (wpipe->pipe_state & PIPE_SEL) { 960 lwkt_gettoken(&rlock, &wpipe->pipe_rlock); 961 pipeselwakeup(wpipe); 962 lwkt_reltoken(&rlock); 963 } 964 } 965 966 /* 967 * Don't return EPIPE if I/O was successful 968 */ 969 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) && 970 (uio->uio_resid == 0) && 971 (error == EPIPE)) { 972 error = 0; 973 } 974 975 if (error == 0) 976 vfs_timestamp(&wpipe->pipe_mtime); 977 978 /* 979 * We have something to offer, 980 * wake up select/poll. 981 */ 982 /*space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;*/ 983 lwkt_reltoken(&wlock); 984 pipe_rel_mplock(&mpsave); 985 return (error); 986 } 987 988 /* 989 * MPALMOSTSAFE - acquires mplock 990 * 991 * we implement a very minimal set of ioctls for compatibility with sockets. 992 */ 993 int 994 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, 995 struct ucred *cred, struct sysmsg *msg) 996 { 997 struct pipe *mpipe; 998 lwkt_tokref rlock; 999 lwkt_tokref wlock; 1000 int error; 1001 int mpsave; 1002 1003 pipe_get_mplock(&mpsave); 1004 mpipe = (struct pipe *)fp->f_data; 1005 1006 lwkt_gettoken(&rlock, &mpipe->pipe_rlock); 1007 lwkt_gettoken(&wlock, &mpipe->pipe_wlock); 1008 1009 switch (cmd) { 1010 case FIOASYNC: 1011 if (*(int *)data) { 1012 mpipe->pipe_state |= PIPE_ASYNC; 1013 } else { 1014 mpipe->pipe_state &= ~PIPE_ASYNC; 1015 } 1016 error = 0; 1017 break; 1018 case FIONREAD: 1019 *(int *)data = mpipe->pipe_buffer.windex - 1020 mpipe->pipe_buffer.rindex; 1021 error = 0; 1022 break; 1023 case FIOSETOWN: 1024 get_mplock(); 1025 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1026 rel_mplock(); 1027 break; 1028 case FIOGETOWN: 1029 *(int *)data = fgetown(mpipe->pipe_sigio); 1030 error = 0; 1031 break; 1032 case TIOCSPGRP: 1033 /* This is deprecated, FIOSETOWN should be used instead. */ 1034 get_mplock(); 1035 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1036 rel_mplock(); 1037 break; 1038 1039 case TIOCGPGRP: 1040 /* This is deprecated, FIOGETOWN should be used instead. */ 1041 *(int *)data = -fgetown(mpipe->pipe_sigio); 1042 error = 0; 1043 break; 1044 default: 1045 error = ENOTTY; 1046 break; 1047 } 1048 lwkt_reltoken(&rlock); 1049 lwkt_reltoken(&wlock); 1050 pipe_rel_mplock(&mpsave); 1051 1052 return (error); 1053 } 1054 1055 /* 1056 * MPALMOSTSAFE - acquires mplock 1057 * 1058 * poll for events (helper) 1059 */ 1060 static int 1061 pipe_poll_events(struct pipe *rpipe, struct pipe *wpipe, int events) 1062 { 1063 int revents = 0; 1064 u_int space; 1065 1066 if (events & (POLLIN | POLLRDNORM)) { 1067 if ((rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) || 1068 (rpipe->pipe_state & PIPE_REOF)) { 1069 revents |= events & (POLLIN | POLLRDNORM); 1070 } 1071 } 1072 1073 if (events & (POLLOUT | POLLWRNORM)) { 1074 if (wpipe == NULL || (wpipe->pipe_state & PIPE_WEOF)) { 1075 revents |= events & (POLLOUT | POLLWRNORM); 1076 } else { 1077 space = wpipe->pipe_buffer.windex - 1078 wpipe->pipe_buffer.rindex; 1079 space = wpipe->pipe_buffer.size - space; 1080 if (space >= PIPE_BUF) 1081 revents |= events & (POLLOUT | POLLWRNORM); 1082 } 1083 } 1084 1085 if ((rpipe->pipe_state & PIPE_REOF) || 1086 (wpipe == NULL) || 1087 (wpipe->pipe_state & PIPE_WEOF)) { 1088 revents |= POLLHUP; 1089 } 1090 return (revents); 1091 } 1092 1093 /* 1094 * Poll for events from file pointer. 1095 */ 1096 int 1097 pipe_poll(struct file *fp, int events, struct ucred *cred) 1098 { 1099 lwkt_tokref rpipe_rlock; 1100 lwkt_tokref rpipe_wlock; 1101 lwkt_tokref wpipe_rlock; 1102 lwkt_tokref wpipe_wlock; 1103 struct pipe *rpipe; 1104 struct pipe *wpipe; 1105 int revents = 0; 1106 int mpsave; 1107 1108 pipe_get_mplock(&mpsave); 1109 rpipe = (struct pipe *)fp->f_data; 1110 wpipe = rpipe->pipe_peer; 1111 1112 revents = pipe_poll_events(rpipe, wpipe, events); 1113 if (revents == 0) { 1114 if (events & (POLLIN | POLLRDNORM)) { 1115 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock); 1116 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock); 1117 } 1118 if (events & (POLLOUT | POLLWRNORM)) { 1119 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock); 1120 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock); 1121 } 1122 revents = pipe_poll_events(rpipe, wpipe, events); 1123 if (revents == 0) { 1124 if (events & (POLLIN | POLLRDNORM)) { 1125 selrecord(curthread, &rpipe->pipe_sel); 1126 rpipe->pipe_state |= PIPE_SEL; 1127 } 1128 1129 if (events & (POLLOUT | POLLWRNORM)) { 1130 selrecord(curthread, &wpipe->pipe_sel); 1131 wpipe->pipe_state |= PIPE_SEL; 1132 } 1133 } 1134 if (events & (POLLIN | POLLRDNORM)) { 1135 lwkt_reltoken(&rpipe_rlock); 1136 lwkt_reltoken(&rpipe_wlock); 1137 } 1138 if (events & (POLLOUT | POLLWRNORM)) { 1139 lwkt_reltoken(&wpipe_rlock); 1140 lwkt_reltoken(&wpipe_wlock); 1141 } 1142 } 1143 pipe_rel_mplock(&mpsave); 1144 return (revents); 1145 } 1146 1147 /* 1148 * MPSAFE 1149 */ 1150 static int 1151 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred) 1152 { 1153 struct pipe *pipe; 1154 int mpsave; 1155 1156 pipe_get_mplock(&mpsave); 1157 pipe = (struct pipe *)fp->f_data; 1158 1159 bzero((caddr_t)ub, sizeof(*ub)); 1160 ub->st_mode = S_IFIFO; 1161 ub->st_blksize = pipe->pipe_buffer.size; 1162 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex; 1163 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1164 ub->st_atimespec = pipe->pipe_atime; 1165 ub->st_mtimespec = pipe->pipe_mtime; 1166 ub->st_ctimespec = pipe->pipe_ctime; 1167 /* 1168 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev, 1169 * st_flags, st_gen. 1170 * XXX (st_dev, st_ino) should be unique. 1171 */ 1172 pipe_rel_mplock(&mpsave); 1173 return (0); 1174 } 1175 1176 /* 1177 * MPALMOSTSAFE - acquires mplock 1178 */ 1179 static int 1180 pipe_close(struct file *fp) 1181 { 1182 struct pipe *cpipe; 1183 1184 get_mplock(); 1185 cpipe = (struct pipe *)fp->f_data; 1186 fp->f_ops = &badfileops; 1187 fp->f_data = NULL; 1188 funsetown(cpipe->pipe_sigio); 1189 pipeclose(cpipe); 1190 rel_mplock(); 1191 return (0); 1192 } 1193 1194 /* 1195 * Shutdown one or both directions of a full-duplex pipe. 1196 * 1197 * MPALMOSTSAFE - acquires mplock 1198 */ 1199 static int 1200 pipe_shutdown(struct file *fp, int how) 1201 { 1202 struct pipe *rpipe; 1203 struct pipe *wpipe; 1204 int error = EPIPE; 1205 lwkt_tokref rpipe_rlock; 1206 lwkt_tokref rpipe_wlock; 1207 lwkt_tokref wpipe_rlock; 1208 lwkt_tokref wpipe_wlock; 1209 int mpsave; 1210 1211 pipe_get_mplock(&mpsave); 1212 rpipe = (struct pipe *)fp->f_data; 1213 wpipe = rpipe->pipe_peer; 1214 1215 /* 1216 * We modify pipe_state on both pipes, which means we need 1217 * all four tokens! 1218 */ 1219 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock); 1220 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock); 1221 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock); 1222 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock); 1223 1224 switch(how) { 1225 case SHUT_RDWR: 1226 case SHUT_RD: 1227 rpipe->pipe_state |= PIPE_REOF; /* my reads */ 1228 rpipe->pipe_state |= PIPE_WEOF; /* peer writes */ 1229 if (rpipe->pipe_state & PIPE_WANTR) { 1230 rpipe->pipe_state &= ~PIPE_WANTR; 1231 wakeup(rpipe); 1232 } 1233 if (rpipe->pipe_state & PIPE_WANTW) { 1234 rpipe->pipe_state &= ~PIPE_WANTW; 1235 wakeup(rpipe); 1236 } 1237 error = 0; 1238 if (how == SHUT_RD) 1239 break; 1240 /* fall through */ 1241 case SHUT_WR: 1242 wpipe->pipe_state |= PIPE_REOF; /* peer reads */ 1243 wpipe->pipe_state |= PIPE_WEOF; /* my writes */ 1244 if (wpipe->pipe_state & PIPE_WANTR) { 1245 wpipe->pipe_state &= ~PIPE_WANTR; 1246 wakeup(wpipe); 1247 } 1248 if (wpipe->pipe_state & PIPE_WANTW) { 1249 wpipe->pipe_state &= ~PIPE_WANTW; 1250 wakeup(wpipe); 1251 } 1252 error = 0; 1253 break; 1254 } 1255 pipeselwakeup(rpipe); 1256 pipeselwakeup(wpipe); 1257 1258 lwkt_reltoken(&rpipe_rlock); 1259 lwkt_reltoken(&rpipe_wlock); 1260 lwkt_reltoken(&wpipe_rlock); 1261 lwkt_reltoken(&wpipe_wlock); 1262 1263 pipe_rel_mplock(&mpsave); 1264 return (error); 1265 } 1266 1267 static void 1268 pipe_free_kmem(struct pipe *cpipe) 1269 { 1270 if (cpipe->pipe_buffer.buffer != NULL) { 1271 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1272 atomic_subtract_int(&pipe_nbig, 1); 1273 kmem_free(&kernel_map, 1274 (vm_offset_t)cpipe->pipe_buffer.buffer, 1275 cpipe->pipe_buffer.size); 1276 cpipe->pipe_buffer.buffer = NULL; 1277 cpipe->pipe_buffer.object = NULL; 1278 } 1279 } 1280 1281 /* 1282 * Close the pipe. The slock must be held to interlock against simultanious 1283 * closes. The rlock and wlock must be held to adjust the pipe_state. 1284 */ 1285 static void 1286 pipeclose(struct pipe *cpipe) 1287 { 1288 globaldata_t gd; 1289 struct pipe *ppipe; 1290 lwkt_tokref cpipe_rlock; 1291 lwkt_tokref cpipe_wlock; 1292 lwkt_tokref ppipe_rlock; 1293 lwkt_tokref ppipe_wlock; 1294 1295 if (cpipe == NULL) 1296 return; 1297 1298 /* 1299 * The slock may not have been allocated yet (close during 1300 * initialization) 1301 * 1302 * We need both the read and write tokens to modify pipe_state. 1303 */ 1304 if (cpipe->pipe_slock) 1305 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE); 1306 lwkt_gettoken(&cpipe_rlock, &cpipe->pipe_rlock); 1307 lwkt_gettoken(&cpipe_wlock, &cpipe->pipe_wlock); 1308 1309 /* 1310 * Set our state, wakeup anyone waiting in select, and 1311 * wakeup anyone blocked on our pipe. 1312 */ 1313 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF; 1314 pipeselwakeup(cpipe); 1315 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1316 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1317 wakeup(cpipe); 1318 } 1319 1320 /* 1321 * Disconnect from peer. 1322 */ 1323 if ((ppipe = cpipe->pipe_peer) != NULL) { 1324 lwkt_gettoken(&ppipe_rlock, &ppipe->pipe_rlock); 1325 lwkt_gettoken(&ppipe_wlock, &ppipe->pipe_wlock); 1326 ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF; 1327 pipeselwakeup(ppipe); 1328 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1329 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1330 wakeup(ppipe); 1331 } 1332 if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) { 1333 get_mplock(); 1334 KNOTE(&ppipe->pipe_sel.si_note, 0); 1335 rel_mplock(); 1336 } 1337 lwkt_reltoken(&ppipe_rlock); 1338 lwkt_reltoken(&ppipe_wlock); 1339 } 1340 1341 /* 1342 * If the peer is also closed we can free resources for both 1343 * sides, otherwise we leave our side intact to deal with any 1344 * races (since we only have the slock). 1345 */ 1346 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) { 1347 cpipe->pipe_peer = NULL; 1348 ppipe->pipe_peer = NULL; 1349 ppipe->pipe_slock = NULL; /* we will free the slock */ 1350 pipeclose(ppipe); 1351 ppipe = NULL; 1352 } 1353 1354 lwkt_reltoken(&cpipe_rlock); 1355 lwkt_reltoken(&cpipe_wlock); 1356 if (cpipe->pipe_slock) 1357 lockmgr(cpipe->pipe_slock, LK_RELEASE); 1358 1359 /* 1360 * If we disassociated from our peer we can free resources 1361 */ 1362 if (ppipe == NULL) { 1363 gd = mycpu; 1364 if (cpipe->pipe_slock) { 1365 kfree(cpipe->pipe_slock, M_PIPE); 1366 cpipe->pipe_slock = NULL; 1367 } 1368 if (gd->gd_pipeqcount >= pipe_maxcache || 1369 cpipe->pipe_buffer.size != PIPE_SIZE 1370 ) { 1371 pipe_free_kmem(cpipe); 1372 kfree(cpipe, M_PIPE); 1373 } else { 1374 cpipe->pipe_state = 0; 1375 cpipe->pipe_peer = gd->gd_pipeq; 1376 gd->gd_pipeq = cpipe; 1377 ++gd->gd_pipeqcount; 1378 } 1379 } 1380 } 1381 1382 /* 1383 * MPALMOSTSAFE - acquires mplock 1384 */ 1385 static int 1386 pipe_kqfilter(struct file *fp, struct knote *kn) 1387 { 1388 struct pipe *cpipe; 1389 1390 get_mplock(); 1391 cpipe = (struct pipe *)kn->kn_fp->f_data; 1392 1393 switch (kn->kn_filter) { 1394 case EVFILT_READ: 1395 kn->kn_fop = &pipe_rfiltops; 1396 break; 1397 case EVFILT_WRITE: 1398 kn->kn_fop = &pipe_wfiltops; 1399 cpipe = cpipe->pipe_peer; 1400 if (cpipe == NULL) { 1401 /* other end of pipe has been closed */ 1402 rel_mplock(); 1403 return (EPIPE); 1404 } 1405 break; 1406 default: 1407 return (1); 1408 } 1409 kn->kn_hook = (caddr_t)cpipe; 1410 1411 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1412 rel_mplock(); 1413 return (0); 1414 } 1415 1416 static void 1417 filt_pipedetach(struct knote *kn) 1418 { 1419 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1420 1421 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1422 } 1423 1424 /*ARGSUSED*/ 1425 static int 1426 filt_piperead(struct knote *kn, long hint) 1427 { 1428 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1429 1430 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 1431 1432 /* XXX RACE */ 1433 if (rpipe->pipe_state & PIPE_REOF) { 1434 kn->kn_flags |= EV_EOF; 1435 return (1); 1436 } 1437 return (kn->kn_data > 0); 1438 } 1439 1440 /*ARGSUSED*/ 1441 static int 1442 filt_pipewrite(struct knote *kn, long hint) 1443 { 1444 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1445 struct pipe *wpipe = rpipe->pipe_peer; 1446 u_int32_t space; 1447 1448 /* XXX RACE */ 1449 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) { 1450 kn->kn_data = 0; 1451 kn->kn_flags |= EV_EOF; 1452 return (1); 1453 } 1454 space = wpipe->pipe_buffer.windex - 1455 wpipe->pipe_buffer.rindex; 1456 space = wpipe->pipe_buffer.size - space; 1457 kn->kn_data = space; 1458 return (kn->kn_data >= PIPE_BUF); 1459 } 1460