1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ 20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $ 21 */ 22 23 /* 24 * This file contains a high-performance replacement for the socket-based 25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 26 * all features of sockets, but does do everything that pipes normally 27 * do. 28 */ 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/proc.h> 33 #include <sys/fcntl.h> 34 #include <sys/file.h> 35 #include <sys/filedesc.h> 36 #include <sys/filio.h> 37 #include <sys/ttycom.h> 38 #include <sys/stat.h> 39 #include <sys/poll.h> 40 #include <sys/select.h> 41 #include <sys/signalvar.h> 42 #include <sys/sysproto.h> 43 #include <sys/pipe.h> 44 #include <sys/vnode.h> 45 #include <sys/uio.h> 46 #include <sys/event.h> 47 #include <sys/globaldata.h> 48 #include <sys/module.h> 49 #include <sys/malloc.h> 50 #include <sys/sysctl.h> 51 #include <sys/socket.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_param.h> 55 #include <sys/lock.h> 56 #include <vm/vm_object.h> 57 #include <vm/vm_kern.h> 58 #include <vm/vm_extern.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_map.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_zone.h> 63 64 #include <sys/file2.h> 65 #include <sys/signal2.h> 66 #include <sys/mplock2.h> 67 68 #include <machine/cpufunc.h> 69 70 /* 71 * interfaces to the outside world 72 */ 73 static int pipe_read (struct file *fp, struct uio *uio, 74 struct ucred *cred, int flags); 75 static int pipe_write (struct file *fp, struct uio *uio, 76 struct ucred *cred, int flags); 77 static int pipe_close (struct file *fp); 78 static int pipe_shutdown (struct file *fp, int how); 79 static int pipe_poll (struct file *fp, int events, struct ucred *cred); 80 static int pipe_kqfilter (struct file *fp, struct knote *kn); 81 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred); 82 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, 83 struct ucred *cred, struct sysmsg *msg); 84 85 static struct fileops pipeops = { 86 .fo_read = pipe_read, 87 .fo_write = pipe_write, 88 .fo_ioctl = pipe_ioctl, 89 .fo_poll = pipe_poll, 90 .fo_kqfilter = pipe_kqfilter, 91 .fo_stat = pipe_stat, 92 .fo_close = pipe_close, 93 .fo_shutdown = pipe_shutdown 94 }; 95 96 static void filt_pipedetach(struct knote *kn); 97 static int filt_piperead(struct knote *kn, long hint); 98 static int filt_pipewrite(struct knote *kn, long hint); 99 100 static struct filterops pipe_rfiltops = 101 { 1, NULL, filt_pipedetach, filt_piperead }; 102 static struct filterops pipe_wfiltops = 103 { 1, NULL, filt_pipedetach, filt_pipewrite }; 104 105 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures"); 106 107 /* 108 * Default pipe buffer size(s), this can be kind-of large now because pipe 109 * space is pageable. The pipe code will try to maintain locality of 110 * reference for performance reasons, so small amounts of outstanding I/O 111 * will not wipe the cache. 112 */ 113 #define MINPIPESIZE (PIPE_SIZE/3) 114 #define MAXPIPESIZE (2*PIPE_SIZE/3) 115 116 /* 117 * Limit the number of "big" pipes 118 */ 119 #define LIMITBIGPIPES 64 120 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */ 121 122 static int pipe_maxbig = LIMITBIGPIPES; 123 static int pipe_maxcache = PIPEQ_MAX_CACHE; 124 static int pipe_bigcount; 125 static int pipe_nbig; 126 static int pipe_bcache_alloc; 127 static int pipe_bkmem_alloc; 128 static int pipe_rblocked_count; 129 static int pipe_wblocked_count; 130 131 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation"); 132 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig, 133 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated"); 134 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount, 135 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded"); 136 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked, 137 CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded"); 138 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked, 139 CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded"); 140 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache, 141 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu"); 142 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig, 143 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes"); 144 #ifdef SMP 145 static int pipe_delay = 5000; /* 5uS default */ 146 SYSCTL_INT(_kern_pipe, OID_AUTO, delay, 147 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns"); 148 static int pipe_mpsafe = 1; 149 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe, 150 CTLFLAG_RW, &pipe_mpsafe, 0, ""); 151 #endif 152 #if !defined(NO_PIPE_SYSCTL_STATS) 153 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc, 154 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache"); 155 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc, 156 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem"); 157 #endif 158 159 static void pipeclose (struct pipe *cpipe); 160 static void pipe_free_kmem (struct pipe *cpipe); 161 static int pipe_create (struct pipe **cpipep); 162 static __inline void pipeselwakeup (struct pipe *cpipe); 163 static int pipespace (struct pipe *cpipe, int size); 164 165 static __inline int 166 pipeseltest(struct pipe *cpipe) 167 { 168 return ((cpipe->pipe_state & PIPE_SEL) || 169 ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) || 170 SLIST_FIRST(&cpipe->pipe_sel.si_note)); 171 } 172 173 static __inline void 174 pipeselwakeup(struct pipe *cpipe) 175 { 176 if (cpipe->pipe_state & PIPE_SEL) { 177 get_mplock(); 178 cpipe->pipe_state &= ~PIPE_SEL; 179 selwakeup(&cpipe->pipe_sel); 180 rel_mplock(); 181 } 182 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) { 183 get_mplock(); 184 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 185 rel_mplock(); 186 } 187 if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) { 188 get_mplock(); 189 KNOTE(&cpipe->pipe_sel.si_note, 0); 190 rel_mplock(); 191 } 192 } 193 194 /* 195 * These routines are called before and after a UIO. The UIO 196 * may block, causing our held tokens to be lost temporarily. 197 * 198 * We use these routines to serialize reads against other reads 199 * and writes against other writes. 200 * 201 * The read token is held on entry so *ipp does not race. 202 */ 203 static __inline int 204 pipe_start_uio(struct pipe *cpipe, int *ipp) 205 { 206 int error; 207 208 while (*ipp) { 209 *ipp = -1; 210 error = tsleep(ipp, PCATCH, "pipexx", 0); 211 if (error) 212 return (error); 213 } 214 *ipp = 1; 215 return (0); 216 } 217 218 static __inline void 219 pipe_end_uio(struct pipe *cpipe, int *ipp) 220 { 221 if (*ipp < 0) { 222 *ipp = 0; 223 wakeup(ipp); 224 } else { 225 KKASSERT(*ipp > 0); 226 *ipp = 0; 227 } 228 } 229 230 static __inline void 231 pipe_get_mplock(int *save) 232 { 233 #ifdef SMP 234 if (pipe_mpsafe == 0) { 235 get_mplock(); 236 *save = 1; 237 } else 238 #endif 239 { 240 *save = 0; 241 } 242 } 243 244 static __inline void 245 pipe_rel_mplock(int *save) 246 { 247 #ifdef SMP 248 if (*save) 249 rel_mplock(); 250 #endif 251 } 252 253 254 /* 255 * The pipe system call for the DTYPE_PIPE type of pipes 256 * 257 * pipe_args(int dummy) 258 * 259 * MPSAFE 260 */ 261 int 262 sys_pipe(struct pipe_args *uap) 263 { 264 struct thread *td = curthread; 265 struct filedesc *fdp = td->td_proc->p_fd; 266 struct file *rf, *wf; 267 struct pipe *rpipe, *wpipe; 268 int fd1, fd2, error; 269 270 rpipe = wpipe = NULL; 271 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 272 pipeclose(rpipe); 273 pipeclose(wpipe); 274 return (ENFILE); 275 } 276 277 error = falloc(td->td_lwp, &rf, &fd1); 278 if (error) { 279 pipeclose(rpipe); 280 pipeclose(wpipe); 281 return (error); 282 } 283 uap->sysmsg_fds[0] = fd1; 284 285 /* 286 * Warning: once we've gotten past allocation of the fd for the 287 * read-side, we can only drop the read side via fdrop() in order 288 * to avoid races against processes which manage to dup() the read 289 * side while we are blocked trying to allocate the write side. 290 */ 291 rf->f_type = DTYPE_PIPE; 292 rf->f_flag = FREAD | FWRITE; 293 rf->f_ops = &pipeops; 294 rf->f_data = rpipe; 295 error = falloc(td->td_lwp, &wf, &fd2); 296 if (error) { 297 fsetfd(fdp, NULL, fd1); 298 fdrop(rf); 299 /* rpipe has been closed by fdrop(). */ 300 pipeclose(wpipe); 301 return (error); 302 } 303 wf->f_type = DTYPE_PIPE; 304 wf->f_flag = FREAD | FWRITE; 305 wf->f_ops = &pipeops; 306 wf->f_data = wpipe; 307 uap->sysmsg_fds[1] = fd2; 308 309 rpipe->pipe_slock = kmalloc(sizeof(struct lock), 310 M_PIPE, M_WAITOK|M_ZERO); 311 wpipe->pipe_slock = rpipe->pipe_slock; 312 rpipe->pipe_peer = wpipe; 313 wpipe->pipe_peer = rpipe; 314 lockinit(rpipe->pipe_slock, "pipecl", 0, 0); 315 316 /* 317 * Once activated the peer relationship remains valid until 318 * both sides are closed. 319 */ 320 fsetfd(fdp, rf, fd1); 321 fsetfd(fdp, wf, fd2); 322 fdrop(rf); 323 fdrop(wf); 324 325 return (0); 326 } 327 328 /* 329 * Allocate kva for pipe circular buffer, the space is pageable 330 * This routine will 'realloc' the size of a pipe safely, if it fails 331 * it will retain the old buffer. 332 * If it fails it will return ENOMEM. 333 */ 334 static int 335 pipespace(struct pipe *cpipe, int size) 336 { 337 struct vm_object *object; 338 caddr_t buffer; 339 int npages, error; 340 341 npages = round_page(size) / PAGE_SIZE; 342 object = cpipe->pipe_buffer.object; 343 344 /* 345 * [re]create the object if necessary and reserve space for it 346 * in the kernel_map. The object and memory are pageable. On 347 * success, free the old resources before assigning the new 348 * ones. 349 */ 350 if (object == NULL || object->size != npages) { 351 get_mplock(); 352 object = vm_object_allocate(OBJT_DEFAULT, npages); 353 buffer = (caddr_t)vm_map_min(&kernel_map); 354 355 error = vm_map_find(&kernel_map, object, 0, 356 (vm_offset_t *)&buffer, 357 size, PAGE_SIZE, 358 1, VM_MAPTYPE_NORMAL, 359 VM_PROT_ALL, VM_PROT_ALL, 360 0); 361 362 if (error != KERN_SUCCESS) { 363 vm_object_deallocate(object); 364 rel_mplock(); 365 return (ENOMEM); 366 } 367 pipe_free_kmem(cpipe); 368 rel_mplock(); 369 cpipe->pipe_buffer.object = object; 370 cpipe->pipe_buffer.buffer = buffer; 371 cpipe->pipe_buffer.size = size; 372 ++pipe_bkmem_alloc; 373 } else { 374 ++pipe_bcache_alloc; 375 } 376 cpipe->pipe_buffer.rindex = 0; 377 cpipe->pipe_buffer.windex = 0; 378 return (0); 379 } 380 381 /* 382 * Initialize and allocate VM and memory for pipe, pulling the pipe from 383 * our per-cpu cache if possible. For now make sure it is sized for the 384 * smaller PIPE_SIZE default. 385 */ 386 static int 387 pipe_create(struct pipe **cpipep) 388 { 389 globaldata_t gd = mycpu; 390 struct pipe *cpipe; 391 int error; 392 393 if ((cpipe = gd->gd_pipeq) != NULL) { 394 gd->gd_pipeq = cpipe->pipe_peer; 395 --gd->gd_pipeqcount; 396 cpipe->pipe_peer = NULL; 397 cpipe->pipe_wantwcnt = 0; 398 } else { 399 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO); 400 } 401 *cpipep = cpipe; 402 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0) 403 return (error); 404 vfs_timestamp(&cpipe->pipe_ctime); 405 cpipe->pipe_atime = cpipe->pipe_ctime; 406 cpipe->pipe_mtime = cpipe->pipe_ctime; 407 lwkt_token_init(&cpipe->pipe_rlock, 1); 408 lwkt_token_init(&cpipe->pipe_wlock, 1); 409 return (0); 410 } 411 412 /* 413 * MPALMOSTSAFE (acquires mplock) 414 */ 415 static int 416 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 417 { 418 struct pipe *rpipe; 419 int error; 420 size_t nread = 0; 421 int nbio; 422 u_int size; /* total bytes available */ 423 u_int nsize; /* total bytes to read */ 424 u_int rindex; /* contiguous bytes available */ 425 int notify_writer; 426 int mpsave; 427 int bigread; 428 int bigcount; 429 430 if (uio->uio_resid == 0) 431 return(0); 432 433 /* 434 * Setup locks, calculate nbio 435 */ 436 pipe_get_mplock(&mpsave); 437 rpipe = (struct pipe *)fp->f_data; 438 lwkt_gettoken(&rpipe->pipe_rlock); 439 440 if (fflags & O_FBLOCKING) 441 nbio = 0; 442 else if (fflags & O_FNONBLOCKING) 443 nbio = 1; 444 else if (fp->f_flag & O_NONBLOCK) 445 nbio = 1; 446 else 447 nbio = 0; 448 449 /* 450 * Reads are serialized. Note howeverthat pipe_buffer.buffer and 451 * pipe_buffer.size can change out from under us when the number 452 * of bytes in the buffer are zero due to the write-side doing a 453 * pipespace(). 454 */ 455 error = pipe_start_uio(rpipe, &rpipe->pipe_rip); 456 if (error) { 457 pipe_rel_mplock(&mpsave); 458 lwkt_reltoken(&rpipe->pipe_rlock); 459 return (error); 460 } 461 notify_writer = 0; 462 463 bigread = (uio->uio_resid > 10 * 1024 * 1024); 464 bigcount = 10; 465 466 while (uio->uio_resid) { 467 /* 468 * Don't hog the cpu. 469 */ 470 if (bigread && --bigcount == 0) { 471 lwkt_user_yield(); 472 bigcount = 10; 473 if (CURSIG(curthread->td_lwp)) { 474 error = EINTR; 475 break; 476 } 477 } 478 479 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 480 cpu_lfence(); 481 if (size) { 482 rindex = rpipe->pipe_buffer.rindex & 483 (rpipe->pipe_buffer.size - 1); 484 nsize = size; 485 if (nsize > rpipe->pipe_buffer.size - rindex) 486 nsize = rpipe->pipe_buffer.size - rindex; 487 nsize = szmin(nsize, uio->uio_resid); 488 489 error = uiomove(&rpipe->pipe_buffer.buffer[rindex], 490 nsize, uio); 491 if (error) 492 break; 493 cpu_mfence(); 494 rpipe->pipe_buffer.rindex += nsize; 495 nread += nsize; 496 497 /* 498 * If the FIFO is still over half full just continue 499 * and do not try to notify the writer yet. 500 */ 501 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) { 502 notify_writer = 0; 503 continue; 504 } 505 506 /* 507 * When the FIFO is less then half full notify any 508 * waiting writer. WANTW can be checked while 509 * holding just the rlock. 510 */ 511 notify_writer = 1; 512 if ((rpipe->pipe_state & PIPE_WANTW) == 0) 513 continue; 514 } 515 516 /* 517 * If the "write-side" was blocked we wake it up. This code 518 * is reached either when the buffer is completely emptied 519 * or if it becomes more then half-empty. 520 * 521 * Pipe_state can only be modified if both the rlock and 522 * wlock are held. 523 */ 524 if (rpipe->pipe_state & PIPE_WANTW) { 525 lwkt_gettoken(&rpipe->pipe_wlock); 526 if (rpipe->pipe_state & PIPE_WANTW) { 527 notify_writer = 0; 528 rpipe->pipe_state &= ~PIPE_WANTW; 529 lwkt_reltoken(&rpipe->pipe_wlock); 530 wakeup(rpipe); 531 } else { 532 lwkt_reltoken(&rpipe->pipe_wlock); 533 } 534 } 535 536 /* 537 * Pick up our copy loop again if the writer sent data to 538 * us while we were messing around. 539 * 540 * On a SMP box poll up to pipe_delay nanoseconds for new 541 * data. Typically a value of 2000 to 4000 is sufficient 542 * to eradicate most IPIs/tsleeps/wakeups when a pipe 543 * is used for synchronous communications with small packets, 544 * and 8000 or so (8uS) will pipeline large buffer xfers 545 * between cpus over a pipe. 546 * 547 * For synchronous communications a hit means doing a 548 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS, 549 * where as miss requiring a tsleep/wakeup sequence 550 * will take 7uS or more. 551 */ 552 if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) 553 continue; 554 555 #if defined(SMP) && defined(_RDTSC_SUPPORTED_) 556 if (pipe_delay) { 557 int64_t tsc_target; 558 int good = 0; 559 560 tsc_target = tsc_get_target(pipe_delay); 561 while (tsc_test_target(tsc_target) == 0) { 562 if (rpipe->pipe_buffer.windex != 563 rpipe->pipe_buffer.rindex) { 564 good = 1; 565 break; 566 } 567 } 568 if (good) 569 continue; 570 } 571 #endif 572 573 /* 574 * Detect EOF condition, do not set error. 575 */ 576 if (rpipe->pipe_state & PIPE_REOF) 577 break; 578 579 /* 580 * Break if some data was read, or if this was a non-blocking 581 * read. 582 */ 583 if (nread > 0) 584 break; 585 586 if (nbio) { 587 error = EAGAIN; 588 break; 589 } 590 591 /* 592 * Last chance, interlock with WANTR. 593 */ 594 lwkt_gettoken(&rpipe->pipe_wlock); 595 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 596 if (size) { 597 lwkt_reltoken(&rpipe->pipe_wlock); 598 continue; 599 } 600 601 /* 602 * Retest EOF - acquiring a new token can temporarily release 603 * tokens already held. 604 */ 605 if (rpipe->pipe_state & PIPE_REOF) { 606 lwkt_reltoken(&rpipe->pipe_wlock); 607 break; 608 } 609 610 /* 611 * If there is no more to read in the pipe, reset its 612 * pointers to the beginning. This improves cache hit 613 * stats. 614 * 615 * We need both locks to modify both pointers, and there 616 * must also not be a write in progress or the uiomove() 617 * in the write might block and temporarily release 618 * its wlock, then reacquire and update windex. We are 619 * only serialized against reads, not writes. 620 * 621 * XXX should we even bother resetting the indices? It 622 * might actually be more cache efficient not to. 623 */ 624 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex && 625 rpipe->pipe_wip == 0) { 626 rpipe->pipe_buffer.rindex = 0; 627 rpipe->pipe_buffer.windex = 0; 628 } 629 630 /* 631 * Wait for more data. 632 * 633 * Pipe_state can only be set if both the rlock and wlock 634 * are held. 635 */ 636 rpipe->pipe_state |= PIPE_WANTR; 637 tsleep_interlock(rpipe, PCATCH); 638 lwkt_reltoken(&rpipe->pipe_wlock); 639 error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0); 640 ++pipe_rblocked_count; 641 if (error) 642 break; 643 } 644 pipe_end_uio(rpipe, &rpipe->pipe_rip); 645 646 /* 647 * Uptime last access time 648 */ 649 if (error == 0 && nread) 650 vfs_timestamp(&rpipe->pipe_atime); 651 652 /* 653 * If we drained the FIFO more then half way then handle 654 * write blocking hysteresis. 655 * 656 * Note that PIPE_WANTW cannot be set by the writer without 657 * it holding both rlock and wlock, so we can test it 658 * while holding just rlock. 659 */ 660 if (notify_writer) { 661 if (rpipe->pipe_state & PIPE_WANTW) { 662 lwkt_gettoken(&rpipe->pipe_wlock); 663 if (rpipe->pipe_state & PIPE_WANTW) { 664 rpipe->pipe_state &= ~PIPE_WANTW; 665 lwkt_reltoken(&rpipe->pipe_wlock); 666 wakeup(rpipe); 667 } else { 668 lwkt_reltoken(&rpipe->pipe_wlock); 669 } 670 } 671 if (pipeseltest(rpipe)) { 672 lwkt_gettoken(&rpipe->pipe_wlock); 673 pipeselwakeup(rpipe); 674 lwkt_reltoken(&rpipe->pipe_wlock); 675 } 676 } 677 /*size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;*/ 678 lwkt_reltoken(&rpipe->pipe_rlock); 679 680 pipe_rel_mplock(&mpsave); 681 return (error); 682 } 683 684 /* 685 * MPALMOSTSAFE - acquires mplock 686 */ 687 static int 688 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) 689 { 690 int error; 691 int orig_resid; 692 int nbio; 693 struct pipe *wpipe, *rpipe; 694 u_int windex; 695 u_int space; 696 u_int wcount; 697 int mpsave; 698 int bigwrite; 699 int bigcount; 700 701 pipe_get_mplock(&mpsave); 702 703 /* 704 * Writes go to the peer. The peer will always exist. 705 */ 706 rpipe = (struct pipe *) fp->f_data; 707 wpipe = rpipe->pipe_peer; 708 lwkt_gettoken(&wpipe->pipe_wlock); 709 if (wpipe->pipe_state & PIPE_WEOF) { 710 pipe_rel_mplock(&mpsave); 711 lwkt_reltoken(&wpipe->pipe_wlock); 712 return (EPIPE); 713 } 714 715 /* 716 * Degenerate case (EPIPE takes prec) 717 */ 718 if (uio->uio_resid == 0) { 719 pipe_rel_mplock(&mpsave); 720 lwkt_reltoken(&wpipe->pipe_wlock); 721 return(0); 722 } 723 724 /* 725 * Writes are serialized (start_uio must be called with wlock) 726 */ 727 error = pipe_start_uio(wpipe, &wpipe->pipe_wip); 728 if (error) { 729 pipe_rel_mplock(&mpsave); 730 lwkt_reltoken(&wpipe->pipe_wlock); 731 return (error); 732 } 733 734 if (fflags & O_FBLOCKING) 735 nbio = 0; 736 else if (fflags & O_FNONBLOCKING) 737 nbio = 1; 738 else if (fp->f_flag & O_NONBLOCK) 739 nbio = 1; 740 else 741 nbio = 0; 742 743 /* 744 * If it is advantageous to resize the pipe buffer, do 745 * so. We are write-serialized so we can block safely. 746 */ 747 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 748 (pipe_nbig < pipe_maxbig) && 749 wpipe->pipe_wantwcnt > 4 && 750 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 751 /* 752 * Recheck after lock. 753 */ 754 lwkt_gettoken(&wpipe->pipe_rlock); 755 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && 756 (pipe_nbig < pipe_maxbig) && 757 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { 758 atomic_add_int(&pipe_nbig, 1); 759 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 760 ++pipe_bigcount; 761 else 762 atomic_subtract_int(&pipe_nbig, 1); 763 } 764 lwkt_reltoken(&wpipe->pipe_rlock); 765 } 766 767 orig_resid = uio->uio_resid; 768 wcount = 0; 769 770 bigwrite = (uio->uio_resid > 10 * 1024 * 1024); 771 bigcount = 10; 772 773 while (uio->uio_resid) { 774 if (wpipe->pipe_state & PIPE_WEOF) { 775 error = EPIPE; 776 break; 777 } 778 779 /* 780 * Don't hog the cpu. 781 */ 782 if (bigwrite && --bigcount == 0) { 783 lwkt_user_yield(); 784 bigcount = 10; 785 if (CURSIG(curthread->td_lwp)) { 786 error = EINTR; 787 break; 788 } 789 } 790 791 windex = wpipe->pipe_buffer.windex & 792 (wpipe->pipe_buffer.size - 1); 793 space = wpipe->pipe_buffer.size - 794 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 795 cpu_lfence(); 796 797 /* Writes of size <= PIPE_BUF must be atomic. */ 798 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 799 space = 0; 800 801 /* 802 * Write to fill, read size handles write hysteresis. Also 803 * additional restrictions can cause select-based non-blocking 804 * writes to spin. 805 */ 806 if (space > 0) { 807 u_int segsize; 808 809 /* 810 * Transfer size is minimum of uio transfer 811 * and free space in pipe buffer. 812 * 813 * Limit each uiocopy to no more then PIPE_SIZE 814 * so we can keep the gravy train going on a 815 * SMP box. This doubles the performance for 816 * write sizes > 16K. Otherwise large writes 817 * wind up doing an inefficient synchronous 818 * ping-pong. 819 */ 820 space = szmin(space, uio->uio_resid); 821 if (space > PIPE_SIZE) 822 space = PIPE_SIZE; 823 824 /* 825 * First segment to transfer is minimum of 826 * transfer size and contiguous space in 827 * pipe buffer. If first segment to transfer 828 * is less than the transfer size, we've got 829 * a wraparound in the buffer. 830 */ 831 segsize = wpipe->pipe_buffer.size - windex; 832 if (segsize > space) 833 segsize = space; 834 835 #ifdef SMP 836 /* 837 * If this is the first loop and the reader is 838 * blocked, do a preemptive wakeup of the reader. 839 * 840 * On SMP the IPI latency plus the wlock interlock 841 * on the reader side is the fastest way to get the 842 * reader going. (The scheduler will hard loop on 843 * lock tokens). 844 * 845 * NOTE: We can't clear WANTR here without acquiring 846 * the rlock, which we don't want to do here! 847 */ 848 if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1) 849 wakeup(wpipe); 850 #endif 851 852 /* 853 * Transfer segment, which may include a wrap-around. 854 * Update windex to account for both all in one go 855 * so the reader can read() the data atomically. 856 */ 857 error = uiomove(&wpipe->pipe_buffer.buffer[windex], 858 segsize, uio); 859 if (error == 0 && segsize < space) { 860 segsize = space - segsize; 861 error = uiomove(&wpipe->pipe_buffer.buffer[0], 862 segsize, uio); 863 } 864 if (error) 865 break; 866 cpu_mfence(); 867 wpipe->pipe_buffer.windex += space; 868 wcount += space; 869 continue; 870 } 871 872 /* 873 * We need both the rlock and the wlock to interlock against 874 * the EOF, WANTW, and size checks, and to modify pipe_state. 875 * 876 * These are token locks so we do not have to worry about 877 * deadlocks. 878 */ 879 lwkt_gettoken(&wpipe->pipe_rlock); 880 881 /* 882 * If the "read-side" has been blocked, wake it up now 883 * and yield to let it drain synchronously rather 884 * then block. 885 */ 886 if (wpipe->pipe_state & PIPE_WANTR) { 887 wpipe->pipe_state &= ~PIPE_WANTR; 888 wakeup(wpipe); 889 } 890 891 /* 892 * don't block on non-blocking I/O 893 */ 894 if (nbio) { 895 lwkt_reltoken(&wpipe->pipe_rlock); 896 error = EAGAIN; 897 break; 898 } 899 900 /* 901 * re-test whether we have to block in the writer after 902 * acquiring both locks, in case the reader opened up 903 * some space. 904 */ 905 space = wpipe->pipe_buffer.size - 906 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex); 907 cpu_lfence(); 908 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 909 space = 0; 910 911 /* 912 * Retest EOF - acquiring a new token can temporarily release 913 * tokens already held. 914 */ 915 if (wpipe->pipe_state & PIPE_WEOF) { 916 lwkt_reltoken(&wpipe->pipe_rlock); 917 error = EPIPE; 918 break; 919 } 920 921 /* 922 * We have no more space and have something to offer, 923 * wake up select/poll. 924 */ 925 if (space == 0) { 926 wpipe->pipe_state |= PIPE_WANTW; 927 ++wpipe->pipe_wantwcnt; 928 pipeselwakeup(wpipe); 929 if (wpipe->pipe_state & PIPE_WANTW) 930 error = tsleep(wpipe, PCATCH, "pipewr", 0); 931 ++pipe_wblocked_count; 932 } 933 lwkt_reltoken(&wpipe->pipe_rlock); 934 935 /* 936 * Break out if we errored or the read side wants us to go 937 * away. 938 */ 939 if (error) 940 break; 941 if (wpipe->pipe_state & PIPE_WEOF) { 942 error = EPIPE; 943 break; 944 } 945 } 946 pipe_end_uio(wpipe, &wpipe->pipe_wip); 947 948 /* 949 * If we have put any characters in the buffer, we wake up 950 * the reader. 951 * 952 * Both rlock and wlock are required to be able to modify pipe_state. 953 */ 954 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) { 955 if (wpipe->pipe_state & PIPE_WANTR) { 956 lwkt_gettoken(&wpipe->pipe_rlock); 957 if (wpipe->pipe_state & PIPE_WANTR) { 958 wpipe->pipe_state &= ~PIPE_WANTR; 959 lwkt_reltoken(&wpipe->pipe_rlock); 960 wakeup(wpipe); 961 } else { 962 lwkt_reltoken(&wpipe->pipe_rlock); 963 } 964 } 965 if (pipeseltest(wpipe)) { 966 lwkt_gettoken(&wpipe->pipe_rlock); 967 pipeselwakeup(wpipe); 968 lwkt_reltoken(&wpipe->pipe_rlock); 969 } 970 } 971 972 /* 973 * Don't return EPIPE if I/O was successful 974 */ 975 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) && 976 (uio->uio_resid == 0) && 977 (error == EPIPE)) { 978 error = 0; 979 } 980 981 if (error == 0) 982 vfs_timestamp(&wpipe->pipe_mtime); 983 984 /* 985 * We have something to offer, 986 * wake up select/poll. 987 */ 988 /*space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;*/ 989 lwkt_reltoken(&wpipe->pipe_wlock); 990 pipe_rel_mplock(&mpsave); 991 return (error); 992 } 993 994 /* 995 * MPALMOSTSAFE - acquires mplock 996 * 997 * we implement a very minimal set of ioctls for compatibility with sockets. 998 */ 999 int 1000 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, 1001 struct ucred *cred, struct sysmsg *msg) 1002 { 1003 struct pipe *mpipe; 1004 int error; 1005 int mpsave; 1006 1007 pipe_get_mplock(&mpsave); 1008 mpipe = (struct pipe *)fp->f_data; 1009 1010 lwkt_gettoken(&mpipe->pipe_rlock); 1011 lwkt_gettoken(&mpipe->pipe_wlock); 1012 1013 switch (cmd) { 1014 case FIOASYNC: 1015 if (*(int *)data) { 1016 mpipe->pipe_state |= PIPE_ASYNC; 1017 } else { 1018 mpipe->pipe_state &= ~PIPE_ASYNC; 1019 } 1020 error = 0; 1021 break; 1022 case FIONREAD: 1023 *(int *)data = mpipe->pipe_buffer.windex - 1024 mpipe->pipe_buffer.rindex; 1025 error = 0; 1026 break; 1027 case FIOSETOWN: 1028 get_mplock(); 1029 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1030 rel_mplock(); 1031 break; 1032 case FIOGETOWN: 1033 *(int *)data = fgetown(mpipe->pipe_sigio); 1034 error = 0; 1035 break; 1036 case TIOCSPGRP: 1037 /* This is deprecated, FIOSETOWN should be used instead. */ 1038 get_mplock(); 1039 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1040 rel_mplock(); 1041 break; 1042 1043 case TIOCGPGRP: 1044 /* This is deprecated, FIOGETOWN should be used instead. */ 1045 *(int *)data = -fgetown(mpipe->pipe_sigio); 1046 error = 0; 1047 break; 1048 default: 1049 error = ENOTTY; 1050 break; 1051 } 1052 lwkt_reltoken(&mpipe->pipe_wlock); 1053 lwkt_reltoken(&mpipe->pipe_rlock); 1054 pipe_rel_mplock(&mpsave); 1055 1056 return (error); 1057 } 1058 1059 /* 1060 * MPALMOSTSAFE - acquires mplock 1061 * 1062 * poll for events (helper) 1063 */ 1064 static int 1065 pipe_poll_events(struct pipe *rpipe, struct pipe *wpipe, int events) 1066 { 1067 int revents = 0; 1068 u_int space; 1069 1070 if (events & (POLLIN | POLLRDNORM)) { 1071 if ((rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) || 1072 (rpipe->pipe_state & PIPE_REOF)) { 1073 revents |= events & (POLLIN | POLLRDNORM); 1074 } 1075 } 1076 1077 if (events & (POLLOUT | POLLWRNORM)) { 1078 if (wpipe == NULL || (wpipe->pipe_state & PIPE_WEOF)) { 1079 revents |= events & (POLLOUT | POLLWRNORM); 1080 } else { 1081 space = wpipe->pipe_buffer.windex - 1082 wpipe->pipe_buffer.rindex; 1083 space = wpipe->pipe_buffer.size - space; 1084 if (space >= PIPE_BUF) 1085 revents |= events & (POLLOUT | POLLWRNORM); 1086 } 1087 } 1088 1089 if ((rpipe->pipe_state & PIPE_REOF) || 1090 (wpipe == NULL) || 1091 (wpipe->pipe_state & PIPE_WEOF)) { 1092 revents |= POLLHUP; 1093 } 1094 return (revents); 1095 } 1096 1097 /* 1098 * Poll for events from file pointer. 1099 */ 1100 int 1101 pipe_poll(struct file *fp, int events, struct ucred *cred) 1102 { 1103 struct pipe *rpipe; 1104 struct pipe *wpipe; 1105 int revents = 0; 1106 int mpsave; 1107 1108 pipe_get_mplock(&mpsave); 1109 rpipe = (struct pipe *)fp->f_data; 1110 wpipe = rpipe->pipe_peer; 1111 1112 revents = pipe_poll_events(rpipe, wpipe, events); 1113 if (revents == 0) { 1114 if (events & (POLLIN | POLLRDNORM)) { 1115 lwkt_gettoken(&rpipe->pipe_rlock); 1116 lwkt_gettoken(&rpipe->pipe_wlock); 1117 } 1118 if (events & (POLLOUT | POLLWRNORM)) { 1119 lwkt_gettoken(&wpipe->pipe_rlock); 1120 lwkt_gettoken(&wpipe->pipe_wlock); 1121 } 1122 revents = pipe_poll_events(rpipe, wpipe, events); 1123 if (revents == 0) { 1124 if (events & (POLLIN | POLLRDNORM)) { 1125 selrecord(curthread, &rpipe->pipe_sel); 1126 rpipe->pipe_state |= PIPE_SEL; 1127 } 1128 1129 if (events & (POLLOUT | POLLWRNORM)) { 1130 selrecord(curthread, &wpipe->pipe_sel); 1131 wpipe->pipe_state |= PIPE_SEL; 1132 } 1133 } 1134 if (events & (POLLOUT | POLLWRNORM)) { 1135 lwkt_reltoken(&wpipe->pipe_wlock); 1136 lwkt_reltoken(&wpipe->pipe_rlock); 1137 } 1138 if (events & (POLLIN | POLLRDNORM)) { 1139 lwkt_reltoken(&rpipe->pipe_wlock); 1140 lwkt_reltoken(&rpipe->pipe_rlock); 1141 } 1142 } 1143 pipe_rel_mplock(&mpsave); 1144 return (revents); 1145 } 1146 1147 /* 1148 * MPSAFE 1149 */ 1150 static int 1151 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred) 1152 { 1153 struct pipe *pipe; 1154 int mpsave; 1155 1156 pipe_get_mplock(&mpsave); 1157 pipe = (struct pipe *)fp->f_data; 1158 1159 bzero((caddr_t)ub, sizeof(*ub)); 1160 ub->st_mode = S_IFIFO; 1161 ub->st_blksize = pipe->pipe_buffer.size; 1162 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex; 1163 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1164 ub->st_atimespec = pipe->pipe_atime; 1165 ub->st_mtimespec = pipe->pipe_mtime; 1166 ub->st_ctimespec = pipe->pipe_ctime; 1167 /* 1168 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev, 1169 * st_flags, st_gen. 1170 * XXX (st_dev, st_ino) should be unique. 1171 */ 1172 pipe_rel_mplock(&mpsave); 1173 return (0); 1174 } 1175 1176 /* 1177 * MPALMOSTSAFE - acquires mplock 1178 */ 1179 static int 1180 pipe_close(struct file *fp) 1181 { 1182 struct pipe *cpipe; 1183 1184 get_mplock(); 1185 cpipe = (struct pipe *)fp->f_data; 1186 fp->f_ops = &badfileops; 1187 fp->f_data = NULL; 1188 funsetown(cpipe->pipe_sigio); 1189 pipeclose(cpipe); 1190 rel_mplock(); 1191 return (0); 1192 } 1193 1194 /* 1195 * Shutdown one or both directions of a full-duplex pipe. 1196 * 1197 * MPALMOSTSAFE - acquires mplock 1198 */ 1199 static int 1200 pipe_shutdown(struct file *fp, int how) 1201 { 1202 struct pipe *rpipe; 1203 struct pipe *wpipe; 1204 int error = EPIPE; 1205 int mpsave; 1206 1207 pipe_get_mplock(&mpsave); 1208 rpipe = (struct pipe *)fp->f_data; 1209 wpipe = rpipe->pipe_peer; 1210 1211 /* 1212 * We modify pipe_state on both pipes, which means we need 1213 * all four tokens! 1214 */ 1215 lwkt_gettoken(&rpipe->pipe_rlock); 1216 lwkt_gettoken(&rpipe->pipe_wlock); 1217 lwkt_gettoken(&wpipe->pipe_rlock); 1218 lwkt_gettoken(&wpipe->pipe_wlock); 1219 1220 switch(how) { 1221 case SHUT_RDWR: 1222 case SHUT_RD: 1223 rpipe->pipe_state |= PIPE_REOF; /* my reads */ 1224 rpipe->pipe_state |= PIPE_WEOF; /* peer writes */ 1225 if (rpipe->pipe_state & PIPE_WANTR) { 1226 rpipe->pipe_state &= ~PIPE_WANTR; 1227 wakeup(rpipe); 1228 } 1229 if (rpipe->pipe_state & PIPE_WANTW) { 1230 rpipe->pipe_state &= ~PIPE_WANTW; 1231 wakeup(rpipe); 1232 } 1233 error = 0; 1234 if (how == SHUT_RD) 1235 break; 1236 /* fall through */ 1237 case SHUT_WR: 1238 wpipe->pipe_state |= PIPE_REOF; /* peer reads */ 1239 wpipe->pipe_state |= PIPE_WEOF; /* my writes */ 1240 if (wpipe->pipe_state & PIPE_WANTR) { 1241 wpipe->pipe_state &= ~PIPE_WANTR; 1242 wakeup(wpipe); 1243 } 1244 if (wpipe->pipe_state & PIPE_WANTW) { 1245 wpipe->pipe_state &= ~PIPE_WANTW; 1246 wakeup(wpipe); 1247 } 1248 error = 0; 1249 break; 1250 } 1251 pipeselwakeup(rpipe); 1252 pipeselwakeup(wpipe); 1253 1254 lwkt_reltoken(&wpipe->pipe_wlock); 1255 lwkt_reltoken(&wpipe->pipe_rlock); 1256 lwkt_reltoken(&rpipe->pipe_wlock); 1257 lwkt_reltoken(&rpipe->pipe_rlock); 1258 1259 pipe_rel_mplock(&mpsave); 1260 return (error); 1261 } 1262 1263 static void 1264 pipe_free_kmem(struct pipe *cpipe) 1265 { 1266 if (cpipe->pipe_buffer.buffer != NULL) { 1267 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1268 atomic_subtract_int(&pipe_nbig, 1); 1269 kmem_free(&kernel_map, 1270 (vm_offset_t)cpipe->pipe_buffer.buffer, 1271 cpipe->pipe_buffer.size); 1272 cpipe->pipe_buffer.buffer = NULL; 1273 cpipe->pipe_buffer.object = NULL; 1274 } 1275 } 1276 1277 /* 1278 * Close the pipe. The slock must be held to interlock against simultanious 1279 * closes. The rlock and wlock must be held to adjust the pipe_state. 1280 */ 1281 static void 1282 pipeclose(struct pipe *cpipe) 1283 { 1284 globaldata_t gd; 1285 struct pipe *ppipe; 1286 1287 if (cpipe == NULL) 1288 return; 1289 1290 /* 1291 * The slock may not have been allocated yet (close during 1292 * initialization) 1293 * 1294 * We need both the read and write tokens to modify pipe_state. 1295 */ 1296 if (cpipe->pipe_slock) 1297 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE); 1298 lwkt_gettoken(&cpipe->pipe_rlock); 1299 lwkt_gettoken(&cpipe->pipe_wlock); 1300 1301 /* 1302 * Set our state, wakeup anyone waiting in select, and 1303 * wakeup anyone blocked on our pipe. 1304 */ 1305 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF; 1306 pipeselwakeup(cpipe); 1307 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1308 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1309 wakeup(cpipe); 1310 } 1311 1312 /* 1313 * Disconnect from peer. 1314 */ 1315 if ((ppipe = cpipe->pipe_peer) != NULL) { 1316 lwkt_gettoken(&ppipe->pipe_rlock); 1317 lwkt_gettoken(&ppipe->pipe_wlock); 1318 ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF; 1319 pipeselwakeup(ppipe); 1320 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { 1321 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); 1322 wakeup(ppipe); 1323 } 1324 if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) { 1325 get_mplock(); 1326 KNOTE(&ppipe->pipe_sel.si_note, 0); 1327 rel_mplock(); 1328 } 1329 lwkt_reltoken(&ppipe->pipe_wlock); 1330 lwkt_reltoken(&ppipe->pipe_rlock); 1331 } 1332 1333 /* 1334 * If the peer is also closed we can free resources for both 1335 * sides, otherwise we leave our side intact to deal with any 1336 * races (since we only have the slock). 1337 */ 1338 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) { 1339 cpipe->pipe_peer = NULL; 1340 ppipe->pipe_peer = NULL; 1341 ppipe->pipe_slock = NULL; /* we will free the slock */ 1342 pipeclose(ppipe); 1343 ppipe = NULL; 1344 } 1345 1346 lwkt_reltoken(&cpipe->pipe_wlock); 1347 lwkt_reltoken(&cpipe->pipe_rlock); 1348 if (cpipe->pipe_slock) 1349 lockmgr(cpipe->pipe_slock, LK_RELEASE); 1350 1351 /* 1352 * If we disassociated from our peer we can free resources 1353 */ 1354 if (ppipe == NULL) { 1355 gd = mycpu; 1356 if (cpipe->pipe_slock) { 1357 kfree(cpipe->pipe_slock, M_PIPE); 1358 cpipe->pipe_slock = NULL; 1359 } 1360 if (gd->gd_pipeqcount >= pipe_maxcache || 1361 cpipe->pipe_buffer.size != PIPE_SIZE 1362 ) { 1363 pipe_free_kmem(cpipe); 1364 kfree(cpipe, M_PIPE); 1365 } else { 1366 cpipe->pipe_state = 0; 1367 cpipe->pipe_peer = gd->gd_pipeq; 1368 gd->gd_pipeq = cpipe; 1369 ++gd->gd_pipeqcount; 1370 } 1371 } 1372 } 1373 1374 /* 1375 * MPALMOSTSAFE - acquires mplock 1376 */ 1377 static int 1378 pipe_kqfilter(struct file *fp, struct knote *kn) 1379 { 1380 struct pipe *cpipe; 1381 1382 get_mplock(); 1383 cpipe = (struct pipe *)kn->kn_fp->f_data; 1384 1385 switch (kn->kn_filter) { 1386 case EVFILT_READ: 1387 kn->kn_fop = &pipe_rfiltops; 1388 break; 1389 case EVFILT_WRITE: 1390 kn->kn_fop = &pipe_wfiltops; 1391 cpipe = cpipe->pipe_peer; 1392 if (cpipe == NULL) { 1393 /* other end of pipe has been closed */ 1394 rel_mplock(); 1395 return (EPIPE); 1396 } 1397 break; 1398 default: 1399 return (1); 1400 } 1401 kn->kn_hook = (caddr_t)cpipe; 1402 1403 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1404 rel_mplock(); 1405 return (0); 1406 } 1407 1408 static void 1409 filt_pipedetach(struct knote *kn) 1410 { 1411 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1412 1413 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1414 } 1415 1416 /*ARGSUSED*/ 1417 static int 1418 filt_piperead(struct knote *kn, long hint) 1419 { 1420 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1421 1422 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; 1423 1424 /* XXX RACE */ 1425 if (rpipe->pipe_state & PIPE_REOF) { 1426 kn->kn_flags |= EV_EOF; 1427 return (1); 1428 } 1429 return (kn->kn_data > 0); 1430 } 1431 1432 /*ARGSUSED*/ 1433 static int 1434 filt_pipewrite(struct knote *kn, long hint) 1435 { 1436 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1437 struct pipe *wpipe = rpipe->pipe_peer; 1438 u_int32_t space; 1439 1440 /* XXX RACE */ 1441 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) { 1442 kn->kn_data = 0; 1443 kn->kn_flags |= EV_EOF; 1444 return (1); 1445 } 1446 space = wpipe->pipe_buffer.windex - 1447 wpipe->pipe_buffer.rindex; 1448 space = wpipe->pipe_buffer.size - space; 1449 kn->kn_data = space; 1450 return (kn->kn_data >= PIPE_BUF); 1451 } 1452