1 /* $OpenBSD: sys_pipe.c,v 1.38 2001/09/19 20:50:58 mickey Exp $ */ 2 3 /* 4 * Copyright (c) 1996 John S. Dyson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Absolutely no warranty of function or purpose is made by the author 17 * John S. Dyson. 18 * 4. Modifications may be freely made to this file if the above conditions 19 * are met. 20 */ 21 22 /* 23 * This file contains a high-performance replacement for the socket-based 24 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 25 * all features of sockets, but does do everything that pipes normally 26 * do. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/proc.h> 32 #include <sys/file.h> 33 #include <sys/protosw.h> 34 #include <sys/stat.h> 35 #include <sys/filedesc.h> 36 #include <sys/malloc.h> 37 #include <sys/pool.h> 38 #include <sys/ioctl.h> 39 #include <sys/stat.h> 40 #include <sys/select.h> 41 #include <sys/signalvar.h> 42 #include <sys/errno.h> 43 #include <sys/queue.h> 44 #include <sys/kernel.h> 45 #include <sys/mount.h> 46 #include <sys/syscallargs.h> 47 #include <sys/event.h> 48 #include <sys/lock.h> 49 50 #include <vm/vm.h> 51 #include <uvm/uvm_extern.h> 52 53 #include <sys/pipe.h> 54 55 /* 56 * interfaces to the outside world 57 */ 58 int pipe_read __P((struct file *, off_t *, struct uio *, struct ucred *)); 59 int pipe_write __P((struct file *, off_t *, struct uio *, struct ucred *)); 60 int pipe_close __P((struct file *, struct proc *)); 61 int pipe_select __P((struct file *, int which, struct proc *)); 62 int pipe_kqfilter __P((struct file *fp, struct knote *kn)); 63 int pipe_ioctl __P((struct file *, u_long, caddr_t, struct proc *)); 64 int pipe_stat __P((struct file *fp, struct stat *ub, struct proc *p)); 65 66 static struct fileops pipeops = { 67 pipe_read, pipe_write, pipe_ioctl, pipe_select, pipe_kqfilter, 68 pipe_stat, pipe_close 69 }; 70 71 void filt_pipedetach(struct knote *kn); 72 int filt_piperead(struct knote *kn, long hint); 73 int filt_pipewrite(struct knote *kn, long hint); 74 75 struct filterops pipe_rfiltops = 76 { 1, NULL, filt_pipedetach, filt_piperead }; 77 struct filterops pipe_wfiltops = 78 { 1, NULL, filt_pipedetach, filt_pipewrite }; 79 80 /* 81 * Default pipe buffer size(s), this can be kind-of large now because pipe 82 * space is pageable. The pipe code will try to maintain locality of 83 * reference for performance reasons, so small amounts of outstanding I/O 84 * will not wipe the cache. 85 */ 86 #define MINPIPESIZE (PIPE_SIZE/3) 87 88 /* 89 * Limit the number of "big" pipes 90 */ 91 #define LIMITBIGPIPES 32 92 int nbigpipe; 93 static int amountpipekva; 94 95 struct pool pipe_pool; 96 97 void pipeclose __P((struct pipe *)); 98 void pipeinit __P((struct pipe *)); 99 static __inline int pipelock __P((struct pipe *)); 100 static __inline void pipeunlock __P((struct pipe *)); 101 static __inline void pipeselwakeup __P((struct pipe *)); 102 void pipespace __P((struct pipe *)); 103 104 /* 105 * The pipe system call for the DTYPE_PIPE type of pipes 106 */ 107 108 /* ARGSUSED */ 109 int 110 sys_opipe(p, v, retval) 111 struct proc *p; 112 void *v; 113 register_t *retval; 114 { 115 struct filedesc *fdp = p->p_fd; 116 struct file *rf, *wf; 117 struct pipe *rpipe, *wpipe; 118 int fd, error; 119 120 rpipe = pool_get(&pipe_pool, PR_WAITOK); 121 pipeinit(rpipe); 122 wpipe = pool_get(&pipe_pool, PR_WAITOK); 123 pipeinit(wpipe); 124 125 error = falloc(p, &rf, &fd); 126 if (error) 127 goto free2; 128 rf->f_flag = FREAD | FWRITE; 129 rf->f_type = DTYPE_PIPE; 130 rf->f_ops = &pipeops; 131 rf->f_data = (caddr_t)rpipe; 132 retval[0] = fd; 133 134 error = falloc(p, &wf, &fd); 135 if (error) 136 goto free3; 137 wf->f_flag = FREAD | FWRITE; 138 wf->f_type = DTYPE_PIPE; 139 wf->f_ops = &pipeops; 140 wf->f_data = (caddr_t)wpipe; 141 retval[1] = fd; 142 143 rpipe->pipe_peer = wpipe; 144 wpipe->pipe_peer = rpipe; 145 146 return (0); 147 free3: 148 ffree(rf); 149 fdremove(fdp, retval[0]); 150 free2: 151 (void)pipeclose(wpipe); 152 (void)pipeclose(rpipe); 153 return (error); 154 } 155 156 /* 157 * Allocate kva for pipe circular buffer, the space is pageable 158 */ 159 void 160 pipespace(cpipe) 161 struct pipe *cpipe; 162 { 163 cpipe->pipe_buffer.buffer = (caddr_t) uvm_km_valloc(kernel_map, 164 cpipe->pipe_buffer.size); 165 if (cpipe->pipe_buffer.buffer == NULL) 166 panic("pipespace: out of kvm"); 167 168 amountpipekva += cpipe->pipe_buffer.size; 169 } 170 171 /* 172 * initialize and allocate VM and memory for pipe 173 */ 174 void 175 pipeinit(cpipe) 176 struct pipe *cpipe; 177 { 178 179 cpipe->pipe_buffer.in = 0; 180 cpipe->pipe_buffer.out = 0; 181 cpipe->pipe_buffer.cnt = 0; 182 cpipe->pipe_buffer.size = PIPE_SIZE; 183 184 /* Buffer kva gets dynamically allocated */ 185 cpipe->pipe_buffer.buffer = NULL; 186 /* cpipe->pipe_buffer.object = invalid */ 187 188 cpipe->pipe_state = 0; 189 cpipe->pipe_peer = NULL; 190 cpipe->pipe_busy = 0; 191 microtime(&cpipe->pipe_ctime); 192 cpipe->pipe_atime = cpipe->pipe_ctime; 193 cpipe->pipe_mtime = cpipe->pipe_ctime; 194 bzero(&cpipe->pipe_sel, sizeof cpipe->pipe_sel); 195 cpipe->pipe_pgid = NO_PID; 196 } 197 198 199 /* 200 * lock a pipe for I/O, blocking other access 201 */ 202 static __inline int 203 pipelock(cpipe) 204 struct pipe *cpipe; 205 { 206 int error; 207 while (cpipe->pipe_state & PIPE_LOCK) { 208 cpipe->pipe_state |= PIPE_LWANT; 209 if ((error = tsleep(cpipe, PRIBIO|PCATCH, "pipelk", 0))) 210 return error; 211 } 212 cpipe->pipe_state |= PIPE_LOCK; 213 return 0; 214 } 215 216 /* 217 * unlock a pipe I/O lock 218 */ 219 static __inline void 220 pipeunlock(cpipe) 221 struct pipe *cpipe; 222 { 223 cpipe->pipe_state &= ~PIPE_LOCK; 224 if (cpipe->pipe_state & PIPE_LWANT) { 225 cpipe->pipe_state &= ~PIPE_LWANT; 226 wakeup(cpipe); 227 } 228 } 229 230 static __inline void 231 pipeselwakeup(cpipe) 232 struct pipe *cpipe; 233 { 234 if (cpipe->pipe_state & PIPE_SEL) { 235 cpipe->pipe_state &= ~PIPE_SEL; 236 selwakeup(&cpipe->pipe_sel); 237 } 238 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_pgid != NO_PID) 239 gsignal(cpipe->pipe_pgid, SIGIO); 240 KNOTE(&cpipe->pipe_sel.si_note, 0); 241 } 242 243 /* ARGSUSED */ 244 int 245 pipe_read(fp, poff, uio, cred) 246 struct file *fp; 247 off_t *poff; 248 struct uio *uio; 249 struct ucred *cred; 250 { 251 struct pipe *rpipe = (struct pipe *) fp->f_data; 252 int error; 253 int nread = 0; 254 int size; 255 256 error = pipelock(rpipe); 257 if (error) 258 goto unlocked_error; 259 260 ++rpipe->pipe_busy; 261 262 while (uio->uio_resid) { 263 /* 264 * normal pipe buffer receive 265 */ 266 if (rpipe->pipe_buffer.cnt > 0) { 267 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 268 if (size > rpipe->pipe_buffer.cnt) 269 size = rpipe->pipe_buffer.cnt; 270 if (size > uio->uio_resid) 271 size = uio->uio_resid; 272 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 273 size, uio); 274 if (error) { 275 break; 276 } 277 rpipe->pipe_buffer.out += size; 278 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 279 rpipe->pipe_buffer.out = 0; 280 281 rpipe->pipe_buffer.cnt -= size; 282 283 /* 284 * If there is no more to read in the pipe, reset 285 * its pointers to the beginning. This improves 286 * cache hit stats. 287 */ 288 if (rpipe->pipe_buffer.cnt == 0) { 289 rpipe->pipe_buffer.in = 0; 290 rpipe->pipe_buffer.out = 0; 291 } 292 nread += size; 293 } else { 294 /* 295 * detect EOF condition 296 */ 297 if (rpipe->pipe_state & PIPE_EOF) { 298 /* XXX error = ? */ 299 break; 300 } 301 302 /* 303 * If the "write-side" has been blocked, wake it up now. 304 */ 305 if (rpipe->pipe_state & PIPE_WANTW) { 306 rpipe->pipe_state &= ~PIPE_WANTW; 307 wakeup(rpipe); 308 } 309 310 /* 311 * Break if some data was read. 312 */ 313 if (nread > 0) 314 break; 315 316 /* 317 * Unlock the pipe buffer for our remaining processing. 318 * We will either break out with an error or we will 319 * sleep and relock to loop. 320 */ 321 pipeunlock(rpipe); 322 323 /* 324 * Handle non-blocking mode operation or 325 * wait for more data. 326 */ 327 if (fp->f_flag & FNONBLOCK) 328 error = EAGAIN; 329 else { 330 rpipe->pipe_state |= PIPE_WANTR; 331 if ((error = tsleep(rpipe, PRIBIO|PCATCH, "piperd", 0)) == 0) 332 error = pipelock(rpipe); 333 } 334 if (error) 335 goto unlocked_error; 336 } 337 } 338 pipeunlock(rpipe); 339 340 if (error == 0) 341 microtime(&rpipe->pipe_atime); 342 unlocked_error: 343 --rpipe->pipe_busy; 344 345 /* 346 * PIPE_WANT processing only makes sense if pipe_busy is 0. 347 */ 348 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 349 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 350 wakeup(rpipe); 351 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 352 /* 353 * Handle write blocking hysteresis. 354 */ 355 if (rpipe->pipe_state & PIPE_WANTW) { 356 rpipe->pipe_state &= ~PIPE_WANTW; 357 wakeup(rpipe); 358 } 359 } 360 361 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 362 pipeselwakeup(rpipe); 363 364 return error; 365 } 366 367 int 368 pipe_write(fp, poff, uio, cred) 369 struct file *fp; 370 off_t *poff; 371 struct uio *uio; 372 struct ucred *cred; 373 { 374 int error = 0; 375 int orig_resid; 376 377 struct pipe *wpipe, *rpipe; 378 379 rpipe = (struct pipe *) fp->f_data; 380 wpipe = rpipe->pipe_peer; 381 382 /* 383 * detect loss of pipe read side, issue SIGPIPE if lost. 384 */ 385 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 386 return EPIPE; 387 } 388 389 /* 390 * If it is advantageous to resize the pipe buffer, do 391 * so. 392 */ 393 if ((uio->uio_resid > PIPE_SIZE) && 394 (nbigpipe < LIMITBIGPIPES) && 395 (wpipe->pipe_buffer.size <= PIPE_SIZE) && 396 (wpipe->pipe_buffer.cnt == 0)) { 397 398 if (wpipe->pipe_buffer.buffer) { 399 amountpipekva -= wpipe->pipe_buffer.size; 400 uvm_km_free(kernel_map, 401 (vaddr_t)wpipe->pipe_buffer.buffer, 402 wpipe->pipe_buffer.size); 403 } 404 405 wpipe->pipe_buffer.in = 0; 406 wpipe->pipe_buffer.out = 0; 407 wpipe->pipe_buffer.cnt = 0; 408 wpipe->pipe_buffer.size = BIG_PIPE_SIZE; 409 wpipe->pipe_buffer.buffer = NULL; 410 ++nbigpipe; 411 } 412 413 414 if (wpipe->pipe_buffer.buffer == NULL) { 415 if ((error = pipelock(wpipe)) == 0) { 416 pipespace(wpipe); 417 pipeunlock(wpipe); 418 } else { 419 return error; 420 } 421 } 422 423 ++wpipe->pipe_busy; 424 orig_resid = uio->uio_resid; 425 426 retrywrite: 427 while (uio->uio_resid) { 428 int space; 429 430 if (wpipe->pipe_state & PIPE_EOF) { 431 error = EPIPE; 432 break; 433 } 434 435 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 436 437 /* Writes of size <= PIPE_BUF must be atomic. */ 438 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 439 space = 0; 440 441 if (space > 0 && 442 (wpipe->pipe_buffer.cnt < wpipe->pipe_buffer.size)) { 443 if ((error = pipelock(wpipe)) == 0) { 444 int size; /* Transfer size */ 445 int segsize; /* first segment to transfer */ 446 447 /* 448 * If a process blocked in uiomove, our 449 * value for space might be bad. 450 * 451 * XXX will we be ok if the reader has gone 452 * away here? 453 */ 454 if (space > wpipe->pipe_buffer.size - 455 wpipe->pipe_buffer.cnt) { 456 pipeunlock(wpipe); 457 goto retrywrite; 458 } 459 460 /* 461 * Transfer size is minimum of uio transfer 462 * and free space in pipe buffer. 463 */ 464 if (space > uio->uio_resid) 465 size = uio->uio_resid; 466 else 467 size = space; 468 /* 469 * First segment to transfer is minimum of 470 * transfer size and contiguous space in 471 * pipe buffer. If first segment to transfer 472 * is less than the transfer size, we've got 473 * a wraparound in the buffer. 474 */ 475 segsize = wpipe->pipe_buffer.size - 476 wpipe->pipe_buffer.in; 477 if (segsize > size) 478 segsize = size; 479 480 /* Transfer first segment */ 481 482 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 483 segsize, uio); 484 485 if (error == 0 && segsize < size) { 486 /* 487 * Transfer remaining part now, to 488 * support atomic writes. Wraparound 489 * happened. 490 */ 491 #ifdef DIAGNOSTIC 492 if (wpipe->pipe_buffer.in + segsize != 493 wpipe->pipe_buffer.size) 494 panic("Expected pipe buffer wraparound disappeared"); 495 #endif 496 497 error = uiomove(&wpipe->pipe_buffer.buffer[0], 498 size - segsize, uio); 499 } 500 if (error == 0) { 501 wpipe->pipe_buffer.in += size; 502 if (wpipe->pipe_buffer.in >= 503 wpipe->pipe_buffer.size) { 504 #ifdef DIAGNOSTIC 505 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size) 506 panic("Expected wraparound bad"); 507 #endif 508 wpipe->pipe_buffer.in = size - segsize; 509 } 510 511 wpipe->pipe_buffer.cnt += size; 512 #ifdef DIAGNOSTIC 513 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size) 514 panic("Pipe buffer overflow"); 515 #endif 516 } 517 pipeunlock(wpipe); 518 } 519 if (error) 520 break; 521 } else { 522 /* 523 * If the "read-side" has been blocked, wake it up now. 524 */ 525 if (wpipe->pipe_state & PIPE_WANTR) { 526 wpipe->pipe_state &= ~PIPE_WANTR; 527 wakeup(wpipe); 528 } 529 530 /* 531 * don't block on non-blocking I/O 532 */ 533 if (fp->f_flag & FNONBLOCK) { 534 error = EAGAIN; 535 break; 536 } 537 538 /* 539 * We have no more space and have something to offer, 540 * wake up selects. 541 */ 542 pipeselwakeup(wpipe); 543 544 wpipe->pipe_state |= PIPE_WANTW; 545 error = tsleep(wpipe, (PRIBIO + 1)|PCATCH, 546 "pipewr", 0); 547 if (error) 548 break; 549 /* 550 * If read side wants to go away, we just issue a 551 * signal to ourselves. 552 */ 553 if (wpipe->pipe_state & PIPE_EOF) { 554 error = EPIPE; 555 break; 556 } 557 } 558 } 559 560 --wpipe->pipe_busy; 561 if ((wpipe->pipe_busy == 0) && 562 (wpipe->pipe_state & PIPE_WANT)) { 563 wpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTR); 564 wakeup(wpipe); 565 } else if (wpipe->pipe_buffer.cnt > 0) { 566 /* 567 * If we have put any characters in the buffer, we wake up 568 * the reader. 569 */ 570 if (wpipe->pipe_state & PIPE_WANTR) { 571 wpipe->pipe_state &= ~PIPE_WANTR; 572 wakeup(wpipe); 573 } 574 } 575 576 /* 577 * Don't return EPIPE if I/O was successful 578 */ 579 if ((wpipe->pipe_buffer.cnt == 0) && 580 (uio->uio_resid == 0) && 581 (error == EPIPE)) 582 error = 0; 583 584 if (error == 0) 585 microtime(&wpipe->pipe_mtime); 586 /* 587 * We have something to offer, 588 * wake up select. 589 */ 590 if (wpipe->pipe_buffer.cnt) 591 pipeselwakeup(wpipe); 592 593 return error; 594 } 595 596 /* 597 * we implement a very minimal set of ioctls for compatibility with sockets. 598 */ 599 int 600 pipe_ioctl(fp, cmd, data, p) 601 struct file *fp; 602 u_long cmd; 603 caddr_t data; 604 struct proc *p; 605 { 606 struct pipe *mpipe = (struct pipe *)fp->f_data; 607 608 switch (cmd) { 609 610 case FIONBIO: 611 return (0); 612 613 case FIOASYNC: 614 if (*(int *)data) { 615 mpipe->pipe_state |= PIPE_ASYNC; 616 } else { 617 mpipe->pipe_state &= ~PIPE_ASYNC; 618 } 619 return (0); 620 621 case FIONREAD: 622 *(int *)data = mpipe->pipe_buffer.cnt; 623 return (0); 624 625 case SIOCSPGRP: 626 mpipe->pipe_pgid = *(int *)data; 627 return (0); 628 629 case SIOCGPGRP: 630 *(int *)data = mpipe->pipe_pgid; 631 return (0); 632 633 } 634 return (ENOTTY); 635 } 636 637 int 638 pipe_select(fp, which, p) 639 struct file *fp; 640 int which; 641 struct proc *p; 642 { 643 struct pipe *rpipe = (struct pipe *)fp->f_data; 644 struct pipe *wpipe; 645 646 wpipe = rpipe->pipe_peer; 647 switch (which) { 648 649 case FREAD: 650 if ((rpipe->pipe_buffer.cnt > 0) || 651 (rpipe->pipe_state & PIPE_EOF)) { 652 return (1); 653 } 654 selrecord(p, &rpipe->pipe_sel); 655 rpipe->pipe_state |= PIPE_SEL; 656 break; 657 658 case FWRITE: 659 if ((wpipe == NULL) || 660 (wpipe->pipe_state & PIPE_EOF) || 661 ((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) { 662 return (1); 663 } 664 selrecord(p, &wpipe->pipe_sel); 665 wpipe->pipe_state |= PIPE_SEL; 666 break; 667 668 case 0: 669 if ((rpipe->pipe_state & PIPE_EOF) || 670 (wpipe == NULL) || 671 (wpipe->pipe_state & PIPE_EOF)) { 672 return (1); 673 } 674 675 selrecord(p, &rpipe->pipe_sel); 676 rpipe->pipe_state |= PIPE_SEL; 677 break; 678 } 679 return (0); 680 } 681 682 int 683 pipe_stat(fp, ub, p) 684 struct file *fp; 685 struct stat *ub; 686 struct proc *p; 687 { 688 struct pipe *pipe = (struct pipe *)fp->f_data; 689 690 bzero((caddr_t)ub, sizeof (*ub)); 691 ub->st_mode = S_IFIFO; 692 ub->st_blksize = pipe->pipe_buffer.size; 693 ub->st_size = pipe->pipe_buffer.cnt; 694 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 695 TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec); 696 TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec); 697 TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec); 698 ub->st_uid = fp->f_cred->cr_uid; 699 ub->st_gid = fp->f_cred->cr_gid; 700 /* 701 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen. 702 * XXX (st_dev, st_ino) should be unique. 703 */ 704 return 0; 705 } 706 707 /* ARGSUSED */ 708 int 709 pipe_close(fp, p) 710 struct file *fp; 711 struct proc *p; 712 { 713 struct pipe *cpipe = (struct pipe *)fp->f_data; 714 715 pipeclose(cpipe); 716 fp->f_data = NULL; 717 return 0; 718 } 719 720 /* 721 * shutdown the pipe 722 */ 723 void 724 pipeclose(cpipe) 725 struct pipe *cpipe; 726 { 727 struct pipe *ppipe; 728 if (cpipe) { 729 730 pipeselwakeup(cpipe); 731 732 /* 733 * If the other side is blocked, wake it up saying that 734 * we want to close it down. 735 */ 736 while (cpipe->pipe_busy) { 737 wakeup(cpipe); 738 cpipe->pipe_state |= PIPE_WANT|PIPE_EOF; 739 tsleep(cpipe, PRIBIO, "pipecl", 0); 740 } 741 742 /* 743 * Disconnect from peer 744 */ 745 if ((ppipe = cpipe->pipe_peer) != NULL) { 746 pipeselwakeup(ppipe); 747 748 ppipe->pipe_state |= PIPE_EOF; 749 wakeup(ppipe); 750 ppipe->pipe_peer = NULL; 751 } 752 753 /* 754 * free resources 755 */ 756 if (cpipe->pipe_buffer.buffer) { 757 if (cpipe->pipe_buffer.size > PIPE_SIZE) 758 --nbigpipe; 759 amountpipekva -= cpipe->pipe_buffer.size; 760 uvm_km_free(kernel_map, 761 (vaddr_t)cpipe->pipe_buffer.buffer, 762 cpipe->pipe_buffer.size); 763 } 764 pool_put(&pipe_pool, cpipe); 765 } 766 } 767 768 int 769 pipe_kqfilter(struct file *fp, struct knote *kn) 770 { 771 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 772 struct pipe *wpipe = rpipe->pipe_peer; 773 774 switch (kn->kn_filter) { 775 case EVFILT_READ: 776 kn->kn_fop = &pipe_rfiltops; 777 SLIST_INSERT_HEAD(&rpipe->pipe_sel.si_note, kn, kn_selnext); 778 break; 779 case EVFILT_WRITE: 780 if (wpipe == NULL) 781 return (1); 782 kn->kn_fop = &pipe_wfiltops; 783 SLIST_INSERT_HEAD(&wpipe->pipe_sel.si_note, kn, kn_selnext); 784 break; 785 default: 786 return (1); 787 } 788 789 return (0); 790 } 791 792 void 793 filt_pipedetach(struct knote *kn) 794 { 795 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 796 struct pipe *wpipe = rpipe->pipe_peer; 797 798 switch (kn->kn_filter) { 799 case EVFILT_READ: 800 SLIST_REMOVE(&rpipe->pipe_sel.si_note, kn, knote, kn_selnext); 801 break; 802 case EVFILT_WRITE: 803 if (wpipe == NULL) 804 return; 805 SLIST_REMOVE(&wpipe->pipe_sel.si_note, kn, knote, kn_selnext); 806 break; 807 } 808 } 809 810 /*ARGSUSED*/ 811 int 812 filt_piperead(struct knote *kn, long hint) 813 { 814 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 815 struct pipe *wpipe = rpipe->pipe_peer; 816 817 kn->kn_data = rpipe->pipe_buffer.cnt; 818 819 if ((rpipe->pipe_state & PIPE_EOF) || 820 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 821 kn->kn_flags |= EV_EOF; 822 return (1); 823 } 824 return (kn->kn_data > 0); 825 } 826 827 /*ARGSUSED*/ 828 int 829 filt_pipewrite(struct knote *kn, long hint) 830 { 831 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 832 struct pipe *wpipe = rpipe->pipe_peer; 833 834 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 835 kn->kn_data = 0; 836 kn->kn_flags |= EV_EOF; 837 return (1); 838 } 839 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 840 841 return (kn->kn_data >= PIPE_BUF); 842 } 843 844 void 845 pipe_init() 846 { 847 pool_init(&pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl", 848 0, pool_page_alloc_nointr, pool_page_free_nointr, 849 M_PIPE); 850 } 851 852