xref: /netbsd-src/sys/kern/sys_pipe.c (revision aaf4ece63a859a04e37cf3a7229b5fab0157cc06)
1 /*	$NetBSD: sys_pipe.c,v 1.70 2005/12/24 19:12:23 perry Exp $	*/
2 
3 /*-
4  * Copyright (c) 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Paul Kranenburg.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 1996 John S. Dyson
41  * All rights reserved.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice immediately at the beginning of the file, without modification,
48  *    this list of conditions, and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  * 3. Absolutely no warranty of function or purpose is made by the author
53  *    John S. Dyson.
54  * 4. Modifications may be freely made to this file if the above conditions
55  *    are met.
56  *
57  * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.95 2002/03/09 22:06:31 alfred Exp $
58  */
59 
60 /*
61  * This file contains a high-performance replacement for the socket-based
62  * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
63  * all features of sockets, but does do everything that pipes normally
64  * do.
65  *
66  * Adaption for NetBSD UVM, including uvm_loan() based direct write, was
67  * written by Jaromir Dolecek.
68  */
69 
70 /*
71  * This code has two modes of operation, a small write mode and a large
72  * write mode.  The small write mode acts like conventional pipes with
73  * a kernel buffer.  If the buffer is less than PIPE_MINDIRECT, then the
74  * "normal" pipe buffering is done.  If the buffer is between PIPE_MINDIRECT
75  * and PIPE_SIZE in size it is mapped read-only into the kernel address space
76  * using the UVM page loan facility from where the receiving process can copy
77  * the data directly from the pages in the sending process.
78  *
79  * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
80  * happen for small transfers so that the system will not spend all of
81  * its time context switching.  PIPE_SIZE is constrained by the
82  * amount of kernel virtual memory.
83  */
84 
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.70 2005/12/24 19:12:23 perry Exp $");
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/proc.h>
91 #include <sys/fcntl.h>
92 #include <sys/file.h>
93 #include <sys/filedesc.h>
94 #include <sys/filio.h>
95 #include <sys/kernel.h>
96 #include <sys/lock.h>
97 #include <sys/ttycom.h>
98 #include <sys/stat.h>
99 #include <sys/malloc.h>
100 #include <sys/poll.h>
101 #include <sys/signalvar.h>
102 #include <sys/vnode.h>
103 #include <sys/uio.h>
104 #include <sys/lock.h>
105 #include <sys/select.h>
106 #include <sys/mount.h>
107 #include <sys/sa.h>
108 #include <sys/syscallargs.h>
109 #include <uvm/uvm.h>
110 #include <sys/sysctl.h>
111 #include <sys/kernel.h>
112 
113 #include <sys/pipe.h>
114 
115 /*
116  * Avoid microtime(9), it's slow. We don't guard the read from time(9)
117  * with splclock(9) since we don't actually need to be THAT sure the access
118  * is atomic.
119  */
120 #define PIPE_TIMESTAMP(tvp)	(*(tvp) = time)
121 
122 
123 /*
124  * Use this define if you want to disable *fancy* VM things.  Expect an
125  * approx 30% decrease in transfer rate.
126  */
127 /* #define PIPE_NODIRECT */
128 
129 /*
130  * interfaces to the outside world
131  */
132 static int pipe_read(struct file *fp, off_t *offset, struct uio *uio,
133 		struct ucred *cred, int flags);
134 static int pipe_write(struct file *fp, off_t *offset, struct uio *uio,
135 		struct ucred *cred, int flags);
136 static int pipe_close(struct file *fp, struct lwp *l);
137 static int pipe_poll(struct file *fp, int events, struct lwp *l);
138 static int pipe_kqfilter(struct file *fp, struct knote *kn);
139 static int pipe_stat(struct file *fp, struct stat *sb, struct lwp *l);
140 static int pipe_ioctl(struct file *fp, u_long cmd, void *data,
141 		struct lwp *l);
142 
143 static const struct fileops pipeops = {
144 	pipe_read, pipe_write, pipe_ioctl, fnullop_fcntl, pipe_poll,
145 	pipe_stat, pipe_close, pipe_kqfilter
146 };
147 
148 /*
149  * Default pipe buffer size(s), this can be kind-of large now because pipe
150  * space is pageable.  The pipe code will try to maintain locality of
151  * reference for performance reasons, so small amounts of outstanding I/O
152  * will not wipe the cache.
153  */
154 #define MINPIPESIZE (PIPE_SIZE/3)
155 #define MAXPIPESIZE (2*PIPE_SIZE/3)
156 
157 /*
158  * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
159  * is there so that on large systems, we don't exhaust it.
160  */
161 #define MAXPIPEKVA (8*1024*1024)
162 static int maxpipekva = MAXPIPEKVA;
163 
164 /*
165  * Limit for direct transfers, we cannot, of course limit
166  * the amount of kva for pipes in general though.
167  */
168 #define LIMITPIPEKVA (16*1024*1024)
169 static int limitpipekva = LIMITPIPEKVA;
170 
171 /*
172  * Limit the number of "big" pipes
173  */
174 #define LIMITBIGPIPES  32
175 static int maxbigpipes = LIMITBIGPIPES;
176 static int nbigpipe = 0;
177 
178 /*
179  * Amount of KVA consumed by pipe buffers.
180  */
181 static int amountpipekva = 0;
182 
183 MALLOC_DEFINE(M_PIPE, "pipe", "Pipe structures");
184 
185 static void pipeclose(struct file *fp, struct pipe *pipe);
186 static void pipe_free_kmem(struct pipe *pipe);
187 static int pipe_create(struct pipe **pipep, int allockva);
188 static int pipelock(struct pipe *pipe, int catch);
189 static inline void pipeunlock(struct pipe *pipe);
190 static void pipeselwakeup(struct pipe *pipe, struct pipe *sigp, int code);
191 #ifndef PIPE_NODIRECT
192 static int pipe_direct_write(struct file *fp, struct pipe *wpipe,
193     struct uio *uio);
194 #endif
195 static int pipespace(struct pipe *pipe, int size);
196 
197 #ifndef PIPE_NODIRECT
198 static int pipe_loan_alloc(struct pipe *, int);
199 static void pipe_loan_free(struct pipe *);
200 #endif /* PIPE_NODIRECT */
201 
202 static POOL_INIT(pipe_pool, sizeof(struct pipe), 0, 0, 0, "pipepl",
203     &pool_allocator_nointr);
204 
205 /*
206  * The pipe system call for the DTYPE_PIPE type of pipes
207  */
208 
209 /* ARGSUSED */
210 int
211 sys_pipe(struct lwp *l, void *v, register_t *retval)
212 {
213 	struct file *rf, *wf;
214 	struct pipe *rpipe, *wpipe;
215 	int fd, error;
216 	struct proc *p;
217 
218 	p = l->l_proc;
219 	rpipe = wpipe = NULL;
220 	if (pipe_create(&rpipe, 1) || pipe_create(&wpipe, 0)) {
221 		pipeclose(NULL, rpipe);
222 		pipeclose(NULL, wpipe);
223 		return (ENFILE);
224 	}
225 
226 	/*
227 	 * Note: the file structure returned from falloc() is marked
228 	 * as 'larval' initially. Unless we mark it as 'mature' by
229 	 * FILE_SET_MATURE(), any attempt to do anything with it would
230 	 * return EBADF, including e.g. dup(2) or close(2). This avoids
231 	 * file descriptor races if we block in the second falloc().
232 	 */
233 
234 	error = falloc(p, &rf, &fd);
235 	if (error)
236 		goto free2;
237 	retval[0] = fd;
238 	rf->f_flag = FREAD;
239 	rf->f_type = DTYPE_PIPE;
240 	rf->f_data = (caddr_t)rpipe;
241 	rf->f_ops = &pipeops;
242 
243 	error = falloc(p, &wf, &fd);
244 	if (error)
245 		goto free3;
246 	retval[1] = fd;
247 	wf->f_flag = FWRITE;
248 	wf->f_type = DTYPE_PIPE;
249 	wf->f_data = (caddr_t)wpipe;
250 	wf->f_ops = &pipeops;
251 
252 	rpipe->pipe_peer = wpipe;
253 	wpipe->pipe_peer = rpipe;
254 
255 	FILE_SET_MATURE(rf);
256 	FILE_SET_MATURE(wf);
257 	FILE_UNUSE(rf, l);
258 	FILE_UNUSE(wf, l);
259 	return (0);
260 free3:
261 	FILE_UNUSE(rf, l);
262 	ffree(rf);
263 	fdremove(p->p_fd, retval[0]);
264 free2:
265 	pipeclose(NULL, wpipe);
266 	pipeclose(NULL, rpipe);
267 
268 	return (error);
269 }
270 
271 /*
272  * Allocate kva for pipe circular buffer, the space is pageable
273  * This routine will 'realloc' the size of a pipe safely, if it fails
274  * it will retain the old buffer.
275  * If it fails it will return ENOMEM.
276  */
277 static int
278 pipespace(struct pipe *pipe, int size)
279 {
280 	caddr_t buffer;
281 	/*
282 	 * Allocate pageable virtual address space. Physical memory is
283 	 * allocated on demand.
284 	 */
285 	buffer = (caddr_t) uvm_km_alloc(kernel_map, round_page(size), 0,
286 	    UVM_KMF_PAGEABLE);
287 	if (buffer == NULL)
288 		return (ENOMEM);
289 
290 	/* free old resources if we're resizing */
291 	pipe_free_kmem(pipe);
292 	pipe->pipe_buffer.buffer = buffer;
293 	pipe->pipe_buffer.size = size;
294 	pipe->pipe_buffer.in = 0;
295 	pipe->pipe_buffer.out = 0;
296 	pipe->pipe_buffer.cnt = 0;
297 	amountpipekva += pipe->pipe_buffer.size;
298 	return (0);
299 }
300 
301 /*
302  * Initialize and allocate VM and memory for pipe.
303  */
304 static int
305 pipe_create(struct pipe **pipep, int allockva)
306 {
307 	struct pipe *pipe;
308 	int error;
309 
310 	pipe = *pipep = pool_get(&pipe_pool, PR_WAITOK);
311 
312 	/* Initialize */
313 	memset(pipe, 0, sizeof(struct pipe));
314 	pipe->pipe_state = PIPE_SIGNALR;
315 
316 	PIPE_TIMESTAMP(&pipe->pipe_ctime);
317 	pipe->pipe_atime = pipe->pipe_ctime;
318 	pipe->pipe_mtime = pipe->pipe_ctime;
319 	simple_lock_init(&pipe->pipe_slock);
320 
321 	if (allockva && (error = pipespace(pipe, PIPE_SIZE)))
322 		return (error);
323 
324 	return (0);
325 }
326 
327 
328 /*
329  * Lock a pipe for I/O, blocking other access
330  * Called with pipe spin lock held.
331  * Return with pipe spin lock released on success.
332  */
333 static int
334 pipelock(struct pipe *pipe, int catch)
335 {
336 
337 	LOCK_ASSERT(simple_lock_held(&pipe->pipe_slock));
338 
339 	while (pipe->pipe_state & PIPE_LOCKFL) {
340 		int error;
341 		const int pcatch = catch ? PCATCH : 0;
342 
343 		pipe->pipe_state |= PIPE_LWANT;
344 		error = ltsleep(pipe, PSOCK | pcatch, "pipelk", 0,
345 		    &pipe->pipe_slock);
346 		if (error != 0)
347 			return error;
348 	}
349 
350 	pipe->pipe_state |= PIPE_LOCKFL;
351 	simple_unlock(&pipe->pipe_slock);
352 
353 	return 0;
354 }
355 
356 /*
357  * unlock a pipe I/O lock
358  */
359 static inline void
360 pipeunlock(struct pipe *pipe)
361 {
362 
363 	KASSERT(pipe->pipe_state & PIPE_LOCKFL);
364 
365 	pipe->pipe_state &= ~PIPE_LOCKFL;
366 	if (pipe->pipe_state & PIPE_LWANT) {
367 		pipe->pipe_state &= ~PIPE_LWANT;
368 		wakeup(pipe);
369 	}
370 }
371 
372 /*
373  * Select/poll wakup. This also sends SIGIO to peer connected to
374  * 'sigpipe' side of pipe.
375  */
376 static void
377 pipeselwakeup(struct pipe *selp, struct pipe *sigp, int code)
378 {
379 	int band;
380 
381 	selnotify(&selp->pipe_sel, NOTE_SUBMIT);
382 
383 	if (sigp == NULL || (sigp->pipe_state & PIPE_ASYNC) == 0)
384 		return;
385 
386 	switch (code) {
387 	case POLL_IN:
388 		band = POLLIN|POLLRDNORM;
389 		break;
390 	case POLL_OUT:
391 		band = POLLOUT|POLLWRNORM;
392 		break;
393 	case POLL_HUP:
394 		band = POLLHUP;
395 		break;
396 #if POLL_HUP != POLL_ERR
397 	case POLL_ERR:
398 		band = POLLERR;
399 		break;
400 #endif
401 	default:
402 		band = 0;
403 #ifdef DIAGNOSTIC
404 		printf("bad siginfo code %d in pipe notification.\n", code);
405 #endif
406 		break;
407 	}
408 
409 	fownsignal(sigp->pipe_pgid, SIGIO, code, band, selp);
410 }
411 
412 /* ARGSUSED */
413 static int
414 pipe_read(struct file *fp, off_t *offset, struct uio *uio, struct ucred *cred,
415     int flags)
416 {
417 	struct pipe *rpipe = (struct pipe *) fp->f_data;
418 	struct pipebuf *bp = &rpipe->pipe_buffer;
419 	int error;
420 	size_t nread = 0;
421 	size_t size;
422 	size_t ocnt;
423 
424 	PIPE_LOCK(rpipe);
425 	++rpipe->pipe_busy;
426 	ocnt = bp->cnt;
427 
428 again:
429 	error = pipelock(rpipe, 1);
430 	if (error)
431 		goto unlocked_error;
432 
433 	while (uio->uio_resid) {
434 		/*
435 		 * normal pipe buffer receive
436 		 */
437 		if (bp->cnt > 0) {
438 			size = bp->size - bp->out;
439 			if (size > bp->cnt)
440 				size = bp->cnt;
441 			if (size > uio->uio_resid)
442 				size = uio->uio_resid;
443 
444 			error = uiomove(&bp->buffer[bp->out], size, uio);
445 			if (error)
446 				break;
447 
448 			bp->out += size;
449 			if (bp->out >= bp->size)
450 				bp->out = 0;
451 
452 			bp->cnt -= size;
453 
454 			/*
455 			 * If there is no more to read in the pipe, reset
456 			 * its pointers to the beginning.  This improves
457 			 * cache hit stats.
458 			 */
459 			if (bp->cnt == 0) {
460 				bp->in = 0;
461 				bp->out = 0;
462 			}
463 			nread += size;
464 #ifndef PIPE_NODIRECT
465 		} else if ((rpipe->pipe_state & PIPE_DIRECTR) != 0) {
466 			/*
467 			 * Direct copy, bypassing a kernel buffer.
468 			 */
469 			caddr_t	va;
470 
471 			KASSERT(rpipe->pipe_state & PIPE_DIRECTW);
472 
473 			size = rpipe->pipe_map.cnt;
474 			if (size > uio->uio_resid)
475 				size = uio->uio_resid;
476 
477 			va = (caddr_t) rpipe->pipe_map.kva +
478 			    rpipe->pipe_map.pos;
479 			error = uiomove(va, size, uio);
480 			if (error)
481 				break;
482 			nread += size;
483 			rpipe->pipe_map.pos += size;
484 			rpipe->pipe_map.cnt -= size;
485 			if (rpipe->pipe_map.cnt == 0) {
486 				PIPE_LOCK(rpipe);
487 				rpipe->pipe_state &= ~PIPE_DIRECTR;
488 				wakeup(rpipe);
489 				PIPE_UNLOCK(rpipe);
490 			}
491 #endif
492 		} else {
493 			/*
494 			 * Break if some data was read.
495 			 */
496 			if (nread > 0)
497 				break;
498 
499 			PIPE_LOCK(rpipe);
500 
501 			/*
502 			 * detect EOF condition
503 			 * read returns 0 on EOF, no need to set error
504 			 */
505 			if (rpipe->pipe_state & PIPE_EOF) {
506 				PIPE_UNLOCK(rpipe);
507 				break;
508 			}
509 
510 			/*
511 			 * don't block on non-blocking I/O
512 			 */
513 			if (fp->f_flag & FNONBLOCK) {
514 				PIPE_UNLOCK(rpipe);
515 				error = EAGAIN;
516 				break;
517 			}
518 
519 			/*
520 			 * Unlock the pipe buffer for our remaining processing.
521 			 * We will either break out with an error or we will
522 			 * sleep and relock to loop.
523 			 */
524 			pipeunlock(rpipe);
525 
526 			/*
527 			 * The PIPE_DIRECTR flag is not under the control
528 			 * of the long-term lock (see pipe_direct_write()),
529 			 * so re-check now while holding the spin lock.
530 			 */
531 			if ((rpipe->pipe_state & PIPE_DIRECTR) != 0)
532 				goto again;
533 
534 			/*
535 			 * We want to read more, wake up select/poll.
536 			 */
537 			pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_IN);
538 
539 			/*
540 			 * If the "write-side" is blocked, wake it up now.
541 			 */
542 			if (rpipe->pipe_state & PIPE_WANTW) {
543 				rpipe->pipe_state &= ~PIPE_WANTW;
544 				wakeup(rpipe);
545 			}
546 
547 			/* Now wait until the pipe is filled */
548 			rpipe->pipe_state |= PIPE_WANTR;
549 			error = ltsleep(rpipe, PSOCK | PCATCH,
550 					"piperd", 0, &rpipe->pipe_slock);
551 			if (error != 0)
552 				goto unlocked_error;
553 			goto again;
554 		}
555 	}
556 
557 	if (error == 0)
558 		PIPE_TIMESTAMP(&rpipe->pipe_atime);
559 
560 	PIPE_LOCK(rpipe);
561 	pipeunlock(rpipe);
562 
563 unlocked_error:
564 	--rpipe->pipe_busy;
565 
566 	/*
567 	 * PIPE_WANTCLOSE processing only makes sense if pipe_busy is 0.
568 	 */
569 	if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANTCLOSE)) {
570 		rpipe->pipe_state &= ~(PIPE_WANTCLOSE|PIPE_WANTW);
571 		wakeup(rpipe);
572 	} else if (bp->cnt < MINPIPESIZE) {
573 		/*
574 		 * Handle write blocking hysteresis.
575 		 */
576 		if (rpipe->pipe_state & PIPE_WANTW) {
577 			rpipe->pipe_state &= ~PIPE_WANTW;
578 			wakeup(rpipe);
579 		}
580 	}
581 
582 	/*
583 	 * If anything was read off the buffer, signal to the writer it's
584 	 * possible to write more data. Also send signal if we are here for the
585 	 * first time after last write.
586 	 */
587 	if ((bp->size - bp->cnt) >= PIPE_BUF
588 	    && (ocnt != bp->cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
589 		pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
590 		rpipe->pipe_state &= ~PIPE_SIGNALR;
591 	}
592 
593 	PIPE_UNLOCK(rpipe);
594 	return (error);
595 }
596 
597 #ifndef PIPE_NODIRECT
598 /*
599  * Allocate structure for loan transfer.
600  */
601 static int
602 pipe_loan_alloc(struct pipe *wpipe, int npages)
603 {
604 	vsize_t len;
605 
606 	len = (vsize_t)npages << PAGE_SHIFT;
607 	wpipe->pipe_map.kva = uvm_km_alloc(kernel_map, len, 0,
608 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA);
609 	if (wpipe->pipe_map.kva == 0)
610 		return (ENOMEM);
611 
612 	amountpipekva += len;
613 	wpipe->pipe_map.npages = npages;
614 	wpipe->pipe_map.pgs = malloc(npages * sizeof(struct vm_page *), M_PIPE,
615 	    M_WAITOK);
616 	return (0);
617 }
618 
619 /*
620  * Free resources allocated for loan transfer.
621  */
622 static void
623 pipe_loan_free(struct pipe *wpipe)
624 {
625 	vsize_t len;
626 
627 	len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
628 	uvm_km_free(kernel_map, wpipe->pipe_map.kva, len, UVM_KMF_VAONLY);
629 	wpipe->pipe_map.kva = 0;
630 	amountpipekva -= len;
631 	free(wpipe->pipe_map.pgs, M_PIPE);
632 	wpipe->pipe_map.pgs = NULL;
633 }
634 
635 /*
636  * NetBSD direct write, using uvm_loan() mechanism.
637  * This implements the pipe buffer write mechanism.  Note that only
638  * a direct write OR a normal pipe write can be pending at any given time.
639  * If there are any characters in the pipe buffer, the direct write will
640  * be deferred until the receiving process grabs all of the bytes from
641  * the pipe buffer.  Then the direct mapping write is set-up.
642  *
643  * Called with the long-term pipe lock held.
644  */
645 static int
646 pipe_direct_write(struct file *fp, struct pipe *wpipe, struct uio *uio)
647 {
648 	int error, npages, j;
649 	struct vm_page **pgs;
650 	vaddr_t bbase, kva, base, bend;
651 	vsize_t blen, bcnt;
652 	voff_t bpos;
653 
654 	KASSERT(wpipe->pipe_map.cnt == 0);
655 
656 	/*
657 	 * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
658 	 * not aligned to PAGE_SIZE.
659 	 */
660 	bbase = (vaddr_t)uio->uio_iov->iov_base;
661 	base = trunc_page(bbase);
662 	bend = round_page(bbase + uio->uio_iov->iov_len);
663 	blen = bend - base;
664 	bpos = bbase - base;
665 
666 	if (blen > PIPE_DIRECT_CHUNK) {
667 		blen = PIPE_DIRECT_CHUNK;
668 		bend = base + blen;
669 		bcnt = PIPE_DIRECT_CHUNK - bpos;
670 	} else {
671 		bcnt = uio->uio_iov->iov_len;
672 	}
673 	npages = blen >> PAGE_SHIFT;
674 
675 	/*
676 	 * Free the old kva if we need more pages than we have
677 	 * allocated.
678 	 */
679 	if (wpipe->pipe_map.kva != 0 && npages > wpipe->pipe_map.npages)
680 		pipe_loan_free(wpipe);
681 
682 	/* Allocate new kva. */
683 	if (wpipe->pipe_map.kva == 0) {
684 		error = pipe_loan_alloc(wpipe, npages);
685 		if (error)
686 			return (error);
687 	}
688 
689 	/* Loan the write buffer memory from writer process */
690 	pgs = wpipe->pipe_map.pgs;
691 	error = uvm_loan(&uio->uio_lwp->l_proc->p_vmspace->vm_map, base, blen,
692 			 pgs, UVM_LOAN_TOPAGE);
693 	if (error) {
694 		pipe_loan_free(wpipe);
695 		return (ENOMEM); /* so that caller fallback to ordinary write */
696 	}
697 
698 	/* Enter the loaned pages to kva */
699 	kva = wpipe->pipe_map.kva;
700 	for (j = 0; j < npages; j++, kva += PAGE_SIZE) {
701 		pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ);
702 	}
703 	pmap_update(pmap_kernel());
704 
705 	/* Now we can put the pipe in direct write mode */
706 	wpipe->pipe_map.pos = bpos;
707 	wpipe->pipe_map.cnt = bcnt;
708 	wpipe->pipe_state |= PIPE_DIRECTW;
709 
710 	/*
711 	 * But before we can let someone do a direct read,
712 	 * we have to wait until the pipe is drained.
713 	 */
714 
715 	/* Relase the pipe lock while we wait */
716 	PIPE_LOCK(wpipe);
717 	pipeunlock(wpipe);
718 
719 	while (error == 0 && wpipe->pipe_buffer.cnt > 0) {
720 		if (wpipe->pipe_state & PIPE_WANTR) {
721 			wpipe->pipe_state &= ~PIPE_WANTR;
722 			wakeup(wpipe);
723 		}
724 
725 		wpipe->pipe_state |= PIPE_WANTW;
726 		error = ltsleep(wpipe, PSOCK | PCATCH, "pipdwc", 0,
727 				&wpipe->pipe_slock);
728 		if (error == 0 && wpipe->pipe_state & PIPE_EOF)
729 			error = EPIPE;
730 	}
731 
732 	/* Pipe is drained; next read will off the direct buffer */
733 	wpipe->pipe_state |= PIPE_DIRECTR;
734 
735 	/* Wait until the reader is done */
736 	while (error == 0 && (wpipe->pipe_state & PIPE_DIRECTR)) {
737 		if (wpipe->pipe_state & PIPE_WANTR) {
738 			wpipe->pipe_state &= ~PIPE_WANTR;
739 			wakeup(wpipe);
740 		}
741 		pipeselwakeup(wpipe, wpipe, POLL_IN);
742 		error = ltsleep(wpipe, PSOCK | PCATCH, "pipdwt", 0,
743 				&wpipe->pipe_slock);
744 		if (error == 0 && wpipe->pipe_state & PIPE_EOF)
745 			error = EPIPE;
746 	}
747 
748 	/* Take pipe out of direct write mode */
749 	wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTR);
750 
751 	/* Acquire the pipe lock and cleanup */
752 	(void)pipelock(wpipe, 0);
753 	if (pgs != NULL) {
754 		pmap_kremove(wpipe->pipe_map.kva, blen);
755 		uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
756 	}
757 	if (error || amountpipekva > maxpipekva)
758 		pipe_loan_free(wpipe);
759 
760 	if (error) {
761 		pipeselwakeup(wpipe, wpipe, POLL_ERR);
762 
763 		/*
764 		 * If nothing was read from what we offered, return error
765 		 * straight on. Otherwise update uio resid first. Caller
766 		 * will deal with the error condition, returning short
767 		 * write, error, or restarting the write(2) as appropriate.
768 		 */
769 		if (wpipe->pipe_map.cnt == bcnt) {
770 			wpipe->pipe_map.cnt = 0;
771 			wakeup(wpipe);
772 			return (error);
773 		}
774 
775 		bcnt -= wpipe->pipe_map.cnt;
776 	}
777 
778 	uio->uio_resid -= bcnt;
779 	/* uio_offset not updated, not set/used for write(2) */
780 	uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
781 	uio->uio_iov->iov_len -= bcnt;
782 	if (uio->uio_iov->iov_len == 0) {
783 		uio->uio_iov++;
784 		uio->uio_iovcnt--;
785 	}
786 
787 	wpipe->pipe_map.cnt = 0;
788 	return (error);
789 }
790 #endif /* !PIPE_NODIRECT */
791 
792 static int
793 pipe_write(struct file *fp, off_t *offset, struct uio *uio, struct ucred *cred,
794     int flags)
795 {
796 	struct pipe *wpipe, *rpipe;
797 	struct pipebuf *bp;
798 	int error;
799 
800 	/* We want to write to our peer */
801 	rpipe = (struct pipe *) fp->f_data;
802 
803 retry:
804 	error = 0;
805 	PIPE_LOCK(rpipe);
806 	wpipe = rpipe->pipe_peer;
807 
808 	/*
809 	 * Detect loss of pipe read side, issue SIGPIPE if lost.
810 	 */
811 	if (wpipe == NULL)
812 		error = EPIPE;
813 	else if (simple_lock_try(&wpipe->pipe_slock) == 0) {
814 		/* Deal with race for peer */
815 		PIPE_UNLOCK(rpipe);
816 		goto retry;
817 	} else if ((wpipe->pipe_state & PIPE_EOF) != 0) {
818 		PIPE_UNLOCK(wpipe);
819 		error = EPIPE;
820 	}
821 
822 	PIPE_UNLOCK(rpipe);
823 	if (error != 0)
824 		return (error);
825 
826 	++wpipe->pipe_busy;
827 
828 	/* Aquire the long-term pipe lock */
829 	if ((error = pipelock(wpipe,1)) != 0) {
830 		--wpipe->pipe_busy;
831 		if (wpipe->pipe_busy == 0
832 		    && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
833 			wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
834 			wakeup(wpipe);
835 		}
836 		PIPE_UNLOCK(wpipe);
837 		return (error);
838 	}
839 
840 	bp = &wpipe->pipe_buffer;
841 
842 	/*
843 	 * If it is advantageous to resize the pipe buffer, do so.
844 	 */
845 	if ((uio->uio_resid > PIPE_SIZE) &&
846 	    (nbigpipe < maxbigpipes) &&
847 #ifndef PIPE_NODIRECT
848 	    (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
849 #endif
850 	    (bp->size <= PIPE_SIZE) && (bp->cnt == 0)) {
851 
852 		if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
853 			nbigpipe++;
854 	}
855 
856 	while (uio->uio_resid) {
857 		size_t space;
858 
859 #ifndef PIPE_NODIRECT
860 		/*
861 		 * Pipe buffered writes cannot be coincidental with
862 		 * direct writes.  Also, only one direct write can be
863 		 * in progress at any one time.  We wait until the currently
864 		 * executing direct write is completed before continuing.
865 		 *
866 		 * We break out if a signal occurs or the reader goes away.
867 		 */
868 		while (error == 0 && wpipe->pipe_state & PIPE_DIRECTW) {
869 			PIPE_LOCK(wpipe);
870 			if (wpipe->pipe_state & PIPE_WANTR) {
871 				wpipe->pipe_state &= ~PIPE_WANTR;
872 				wakeup(wpipe);
873 			}
874 			pipeunlock(wpipe);
875 			error = ltsleep(wpipe, PSOCK | PCATCH,
876 					"pipbww", 0, &wpipe->pipe_slock);
877 
878 			(void)pipelock(wpipe, 0);
879 			if (wpipe->pipe_state & PIPE_EOF)
880 				error = EPIPE;
881 		}
882 		if (error)
883 			break;
884 
885 		/*
886 		 * If the transfer is large, we can gain performance if
887 		 * we do process-to-process copies directly.
888 		 * If the write is non-blocking, we don't use the
889 		 * direct write mechanism.
890 		 *
891 		 * The direct write mechanism will detect the reader going
892 		 * away on us.
893 		 */
894 		if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
895 		    (fp->f_flag & FNONBLOCK) == 0 &&
896 		    (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
897 			error = pipe_direct_write(fp, wpipe, uio);
898 
899 			/*
900 			 * Break out if error occurred, unless it's ENOMEM.
901 			 * ENOMEM means we failed to allocate some resources
902 			 * for direct write, so we just fallback to ordinary
903 			 * write. If the direct write was successful,
904 			 * process rest of data via ordinary write.
905 			 */
906 			if (error == 0)
907 				continue;
908 
909 			if (error != ENOMEM)
910 				break;
911 		}
912 #endif /* PIPE_NODIRECT */
913 
914 		space = bp->size - bp->cnt;
915 
916 		/* Writes of size <= PIPE_BUF must be atomic. */
917 		if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
918 			space = 0;
919 
920 		if (space > 0) {
921 			int size;	/* Transfer size */
922 			int segsize;	/* first segment to transfer */
923 
924 			/*
925 			 * Transfer size is minimum of uio transfer
926 			 * and free space in pipe buffer.
927 			 */
928 			if (space > uio->uio_resid)
929 				size = uio->uio_resid;
930 			else
931 				size = space;
932 			/*
933 			 * First segment to transfer is minimum of
934 			 * transfer size and contiguous space in
935 			 * pipe buffer.  If first segment to transfer
936 			 * is less than the transfer size, we've got
937 			 * a wraparound in the buffer.
938 			 */
939 			segsize = bp->size - bp->in;
940 			if (segsize > size)
941 				segsize = size;
942 
943 			/* Transfer first segment */
944 			error = uiomove(&bp->buffer[bp->in], segsize, uio);
945 
946 			if (error == 0 && segsize < size) {
947 				/*
948 				 * Transfer remaining part now, to
949 				 * support atomic writes.  Wraparound
950 				 * happened.
951 				 */
952 #ifdef DEBUG
953 				if (bp->in + segsize != bp->size)
954 					panic("Expected pipe buffer wraparound disappeared");
955 #endif
956 
957 				error = uiomove(&bp->buffer[0],
958 						size - segsize, uio);
959 			}
960 			if (error)
961 				break;
962 
963 			bp->in += size;
964 			if (bp->in >= bp->size) {
965 #ifdef DEBUG
966 				if (bp->in != size - segsize + bp->size)
967 					panic("Expected wraparound bad");
968 #endif
969 				bp->in = size - segsize;
970 			}
971 
972 			bp->cnt += size;
973 #ifdef DEBUG
974 			if (bp->cnt > bp->size)
975 				panic("Pipe buffer overflow");
976 #endif
977 		} else {
978 			/*
979 			 * If the "read-side" has been blocked, wake it up now.
980 			 */
981 			PIPE_LOCK(wpipe);
982 			if (wpipe->pipe_state & PIPE_WANTR) {
983 				wpipe->pipe_state &= ~PIPE_WANTR;
984 				wakeup(wpipe);
985 			}
986 			PIPE_UNLOCK(wpipe);
987 
988 			/*
989 			 * don't block on non-blocking I/O
990 			 */
991 			if (fp->f_flag & FNONBLOCK) {
992 				error = EAGAIN;
993 				break;
994 			}
995 
996 			/*
997 			 * We have no more space and have something to offer,
998 			 * wake up select/poll.
999 			 */
1000 			if (bp->cnt)
1001 				pipeselwakeup(wpipe, wpipe, POLL_OUT);
1002 
1003 			PIPE_LOCK(wpipe);
1004 			pipeunlock(wpipe);
1005 			wpipe->pipe_state |= PIPE_WANTW;
1006 			error = ltsleep(wpipe, PSOCK | PCATCH, "pipewr", 0,
1007 					&wpipe->pipe_slock);
1008 			(void)pipelock(wpipe, 0);
1009 			if (error != 0)
1010 				break;
1011 			/*
1012 			 * If read side wants to go away, we just issue a signal
1013 			 * to ourselves.
1014 			 */
1015 			if (wpipe->pipe_state & PIPE_EOF) {
1016 				error = EPIPE;
1017 				break;
1018 			}
1019 		}
1020 	}
1021 
1022 	PIPE_LOCK(wpipe);
1023 	--wpipe->pipe_busy;
1024 	if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANTCLOSE)) {
1025 		wpipe->pipe_state &= ~(PIPE_WANTCLOSE | PIPE_WANTR);
1026 		wakeup(wpipe);
1027 	} else if (bp->cnt > 0) {
1028 		/*
1029 		 * If we have put any characters in the buffer, we wake up
1030 		 * the reader.
1031 		 */
1032 		if (wpipe->pipe_state & PIPE_WANTR) {
1033 			wpipe->pipe_state &= ~PIPE_WANTR;
1034 			wakeup(wpipe);
1035 		}
1036 	}
1037 
1038 	/*
1039 	 * Don't return EPIPE if I/O was successful
1040 	 */
1041 	if (error == EPIPE && bp->cnt == 0 && uio->uio_resid == 0)
1042 		error = 0;
1043 
1044 	if (error == 0)
1045 		PIPE_TIMESTAMP(&wpipe->pipe_mtime);
1046 
1047 	/*
1048 	 * We have something to offer, wake up select/poll.
1049 	 * wpipe->pipe_map.cnt is always 0 in this point (direct write
1050 	 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
1051 	 */
1052 	if (bp->cnt)
1053 		pipeselwakeup(wpipe, wpipe, POLL_OUT);
1054 
1055 	/*
1056 	 * Arrange for next read(2) to do a signal.
1057 	 */
1058 	wpipe->pipe_state |= PIPE_SIGNALR;
1059 
1060 	pipeunlock(wpipe);
1061 	PIPE_UNLOCK(wpipe);
1062 	return (error);
1063 }
1064 
1065 /*
1066  * we implement a very minimal set of ioctls for compatibility with sockets.
1067  */
1068 int
1069 pipe_ioctl(struct file *fp, u_long cmd, void *data, struct lwp *l)
1070 {
1071 	struct pipe *pipe = (struct pipe *)fp->f_data;
1072 	struct proc *p = l->l_proc;
1073 
1074 	switch (cmd) {
1075 
1076 	case FIONBIO:
1077 		return (0);
1078 
1079 	case FIOASYNC:
1080 		PIPE_LOCK(pipe);
1081 		if (*(int *)data) {
1082 			pipe->pipe_state |= PIPE_ASYNC;
1083 		} else {
1084 			pipe->pipe_state &= ~PIPE_ASYNC;
1085 		}
1086 		PIPE_UNLOCK(pipe);
1087 		return (0);
1088 
1089 	case FIONREAD:
1090 		PIPE_LOCK(pipe);
1091 #ifndef PIPE_NODIRECT
1092 		if (pipe->pipe_state & PIPE_DIRECTW)
1093 			*(int *)data = pipe->pipe_map.cnt;
1094 		else
1095 #endif
1096 			*(int *)data = pipe->pipe_buffer.cnt;
1097 		PIPE_UNLOCK(pipe);
1098 		return (0);
1099 
1100 	case FIONWRITE:
1101 		/* Look at other side */
1102 		pipe = pipe->pipe_peer;
1103 		PIPE_LOCK(pipe);
1104 #ifndef PIPE_NODIRECT
1105 		if (pipe->pipe_state & PIPE_DIRECTW)
1106 			*(int *)data = pipe->pipe_map.cnt;
1107 		else
1108 #endif
1109 			*(int *)data = pipe->pipe_buffer.cnt;
1110 		PIPE_UNLOCK(pipe);
1111 		return (0);
1112 
1113 	case FIONSPACE:
1114 		/* Look at other side */
1115 		pipe = pipe->pipe_peer;
1116 		PIPE_LOCK(pipe);
1117 #ifndef PIPE_NODIRECT
1118 		/*
1119 		 * If we're in direct-mode, we don't really have a
1120 		 * send queue, and any other write will block. Thus
1121 		 * zero seems like the best answer.
1122 		 */
1123 		if (pipe->pipe_state & PIPE_DIRECTW)
1124 			*(int *)data = 0;
1125 		else
1126 #endif
1127 			*(int *)data = pipe->pipe_buffer.size -
1128 					pipe->pipe_buffer.cnt;
1129 		PIPE_UNLOCK(pipe);
1130 		return (0);
1131 
1132 	case TIOCSPGRP:
1133 	case FIOSETOWN:
1134 		return fsetown(p, &pipe->pipe_pgid, cmd, data);
1135 
1136 	case TIOCGPGRP:
1137 	case FIOGETOWN:
1138 		return fgetown(p, pipe->pipe_pgid, cmd, data);
1139 
1140 	}
1141 	return (EPASSTHROUGH);
1142 }
1143 
1144 int
1145 pipe_poll(struct file *fp, int events, struct lwp *l)
1146 {
1147 	struct pipe *rpipe = (struct pipe *)fp->f_data;
1148 	struct pipe *wpipe;
1149 	int eof = 0;
1150 	int revents = 0;
1151 
1152 retry:
1153 	PIPE_LOCK(rpipe);
1154 	wpipe = rpipe->pipe_peer;
1155 	if (wpipe != NULL && simple_lock_try(&wpipe->pipe_slock) == 0) {
1156 		/* Deal with race for peer */
1157 		PIPE_UNLOCK(rpipe);
1158 		goto retry;
1159 	}
1160 
1161 	if (events & (POLLIN | POLLRDNORM))
1162 		if ((rpipe->pipe_buffer.cnt > 0) ||
1163 #ifndef PIPE_NODIRECT
1164 		    (rpipe->pipe_state & PIPE_DIRECTR) ||
1165 #endif
1166 		    (rpipe->pipe_state & PIPE_EOF))
1167 			revents |= events & (POLLIN | POLLRDNORM);
1168 
1169 	eof |= (rpipe->pipe_state & PIPE_EOF);
1170 	PIPE_UNLOCK(rpipe);
1171 
1172 	if (wpipe == NULL)
1173 		revents |= events & (POLLOUT | POLLWRNORM);
1174 	else {
1175 		if (events & (POLLOUT | POLLWRNORM))
1176 			if ((wpipe->pipe_state & PIPE_EOF) || (
1177 #ifndef PIPE_NODIRECT
1178 			     (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1179 #endif
1180 			     (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1181 				revents |= events & (POLLOUT | POLLWRNORM);
1182 
1183 		eof |= (wpipe->pipe_state & PIPE_EOF);
1184 		PIPE_UNLOCK(wpipe);
1185 	}
1186 
1187 	if (wpipe == NULL || eof)
1188 		revents |= POLLHUP;
1189 
1190 	if (revents == 0) {
1191 		if (events & (POLLIN | POLLRDNORM))
1192 			selrecord(l, &rpipe->pipe_sel);
1193 
1194 		if (events & (POLLOUT | POLLWRNORM))
1195 			selrecord(l, &wpipe->pipe_sel);
1196 	}
1197 
1198 	return (revents);
1199 }
1200 
1201 static int
1202 pipe_stat(struct file *fp, struct stat *ub, struct lwp *l)
1203 {
1204 	struct pipe *pipe = (struct pipe *)fp->f_data;
1205 
1206 	memset((caddr_t)ub, 0, sizeof(*ub));
1207 	ub->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
1208 	ub->st_blksize = pipe->pipe_buffer.size;
1209 	if (ub->st_blksize == 0 && pipe->pipe_peer)
1210 		ub->st_blksize = pipe->pipe_peer->pipe_buffer.size;
1211 	ub->st_size = pipe->pipe_buffer.cnt;
1212 	ub->st_blocks = (ub->st_size) ? 1 : 0;
1213 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec);
1214 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
1215 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
1216 	ub->st_uid = fp->f_cred->cr_uid;
1217 	ub->st_gid = fp->f_cred->cr_gid;
1218 	/*
1219 	 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1220 	 * XXX (st_dev, st_ino) should be unique.
1221 	 */
1222 	return (0);
1223 }
1224 
1225 /* ARGSUSED */
1226 static int
1227 pipe_close(struct file *fp, struct lwp *l)
1228 {
1229 	struct pipe *pipe = (struct pipe *)fp->f_data;
1230 
1231 	fp->f_data = NULL;
1232 	pipeclose(fp, pipe);
1233 	return (0);
1234 }
1235 
1236 static void
1237 pipe_free_kmem(struct pipe *pipe)
1238 {
1239 
1240 	if (pipe->pipe_buffer.buffer != NULL) {
1241 		if (pipe->pipe_buffer.size > PIPE_SIZE)
1242 			--nbigpipe;
1243 		amountpipekva -= pipe->pipe_buffer.size;
1244 		uvm_km_free(kernel_map,
1245 			(vaddr_t)pipe->pipe_buffer.buffer,
1246 			pipe->pipe_buffer.size, UVM_KMF_PAGEABLE);
1247 		pipe->pipe_buffer.buffer = NULL;
1248 	}
1249 #ifndef PIPE_NODIRECT
1250 	if (pipe->pipe_map.kva != 0) {
1251 		pipe_loan_free(pipe);
1252 		pipe->pipe_map.cnt = 0;
1253 		pipe->pipe_map.kva = 0;
1254 		pipe->pipe_map.pos = 0;
1255 		pipe->pipe_map.npages = 0;
1256 	}
1257 #endif /* !PIPE_NODIRECT */
1258 }
1259 
1260 /*
1261  * shutdown the pipe
1262  */
1263 static void
1264 pipeclose(struct file *fp, struct pipe *pipe)
1265 {
1266 	struct pipe *ppipe;
1267 
1268 	if (pipe == NULL)
1269 		return;
1270 
1271 retry:
1272 	PIPE_LOCK(pipe);
1273 
1274 	pipeselwakeup(pipe, pipe, POLL_HUP);
1275 
1276 	/*
1277 	 * If the other side is blocked, wake it up saying that
1278 	 * we want to close it down.
1279 	 */
1280 	pipe->pipe_state |= PIPE_EOF;
1281 	while (pipe->pipe_busy) {
1282 		wakeup(pipe);
1283 		pipe->pipe_state |= PIPE_WANTCLOSE;
1284 		ltsleep(pipe, PSOCK, "pipecl", 0, &pipe->pipe_slock);
1285 	}
1286 
1287 	/*
1288 	 * Disconnect from peer
1289 	 */
1290 	if ((ppipe = pipe->pipe_peer) != NULL) {
1291 		/* Deal with race for peer */
1292 		if (simple_lock_try(&ppipe->pipe_slock) == 0) {
1293 			PIPE_UNLOCK(pipe);
1294 			goto retry;
1295 		}
1296 		pipeselwakeup(ppipe, ppipe, POLL_HUP);
1297 
1298 		ppipe->pipe_state |= PIPE_EOF;
1299 		wakeup(ppipe);
1300 		ppipe->pipe_peer = NULL;
1301 		PIPE_UNLOCK(ppipe);
1302 	}
1303 
1304 	KASSERT((pipe->pipe_state & PIPE_LOCKFL) == 0);
1305 
1306 	PIPE_UNLOCK(pipe);
1307 
1308 	/*
1309 	 * free resources
1310 	 */
1311 	pipe_free_kmem(pipe);
1312 	pool_put(&pipe_pool, pipe);
1313 }
1314 
1315 static void
1316 filt_pipedetach(struct knote *kn)
1317 {
1318 	struct pipe *pipe = (struct pipe *)kn->kn_fp->f_data;
1319 
1320 	switch(kn->kn_filter) {
1321 	case EVFILT_WRITE:
1322 		/* need the peer structure, not our own */
1323 		pipe = pipe->pipe_peer;
1324 		/* XXXSMP: race for peer */
1325 
1326 		/* if reader end already closed, just return */
1327 		if (pipe == NULL)
1328 			return;
1329 
1330 		break;
1331 	default:
1332 		/* nothing to do */
1333 		break;
1334 	}
1335 
1336 #ifdef DIAGNOSTIC
1337 	if (kn->kn_hook != pipe)
1338 		panic("filt_pipedetach: inconsistent knote");
1339 #endif
1340 
1341 	PIPE_LOCK(pipe);
1342 	SLIST_REMOVE(&pipe->pipe_sel.sel_klist, kn, knote, kn_selnext);
1343 	PIPE_UNLOCK(pipe);
1344 }
1345 
1346 /*ARGSUSED*/
1347 static int
1348 filt_piperead(struct knote *kn, long hint)
1349 {
1350 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1351 	struct pipe *wpipe = rpipe->pipe_peer;
1352 
1353 	if ((hint & NOTE_SUBMIT) == 0)
1354 		PIPE_LOCK(rpipe);
1355 	kn->kn_data = rpipe->pipe_buffer.cnt;
1356 	if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1357 		kn->kn_data = rpipe->pipe_map.cnt;
1358 
1359 	/* XXXSMP: race for peer */
1360 	if ((rpipe->pipe_state & PIPE_EOF) ||
1361 	    (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1362 		kn->kn_flags |= EV_EOF;
1363 		if ((hint & NOTE_SUBMIT) == 0)
1364 			PIPE_UNLOCK(rpipe);
1365 		return (1);
1366 	}
1367 	if ((hint & NOTE_SUBMIT) == 0)
1368 		PIPE_UNLOCK(rpipe);
1369 	return (kn->kn_data > 0);
1370 }
1371 
1372 /*ARGSUSED*/
1373 static int
1374 filt_pipewrite(struct knote *kn, long hint)
1375 {
1376 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1377 	struct pipe *wpipe = rpipe->pipe_peer;
1378 
1379 	if ((hint & NOTE_SUBMIT) == 0)
1380 		PIPE_LOCK(rpipe);
1381 	/* XXXSMP: race for peer */
1382 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1383 		kn->kn_data = 0;
1384 		kn->kn_flags |= EV_EOF;
1385 		if ((hint & NOTE_SUBMIT) == 0)
1386 			PIPE_UNLOCK(rpipe);
1387 		return (1);
1388 	}
1389 	kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1390 	if (wpipe->pipe_state & PIPE_DIRECTW)
1391 		kn->kn_data = 0;
1392 
1393 	if ((hint & NOTE_SUBMIT) == 0)
1394 		PIPE_UNLOCK(rpipe);
1395 	return (kn->kn_data >= PIPE_BUF);
1396 }
1397 
1398 static const struct filterops pipe_rfiltops =
1399 	{ 1, NULL, filt_pipedetach, filt_piperead };
1400 static const struct filterops pipe_wfiltops =
1401 	{ 1, NULL, filt_pipedetach, filt_pipewrite };
1402 
1403 /*ARGSUSED*/
1404 static int
1405 pipe_kqfilter(struct file *fp, struct knote *kn)
1406 {
1407 	struct pipe *pipe;
1408 
1409 	pipe = (struct pipe *)kn->kn_fp->f_data;
1410 	switch (kn->kn_filter) {
1411 	case EVFILT_READ:
1412 		kn->kn_fop = &pipe_rfiltops;
1413 		break;
1414 	case EVFILT_WRITE:
1415 		kn->kn_fop = &pipe_wfiltops;
1416 		/* XXXSMP: race for peer */
1417 		pipe = pipe->pipe_peer;
1418 		if (pipe == NULL) {
1419 			/* other end of pipe has been closed */
1420 			return (EBADF);
1421 		}
1422 		break;
1423 	default:
1424 		return (1);
1425 	}
1426 	kn->kn_hook = pipe;
1427 
1428 	PIPE_LOCK(pipe);
1429 	SLIST_INSERT_HEAD(&pipe->pipe_sel.sel_klist, kn, kn_selnext);
1430 	PIPE_UNLOCK(pipe);
1431 	return (0);
1432 }
1433 
1434 /*
1435  * Handle pipe sysctls.
1436  */
1437 SYSCTL_SETUP(sysctl_kern_pipe_setup, "sysctl kern.pipe subtree setup")
1438 {
1439 
1440 	sysctl_createv(clog, 0, NULL, NULL,
1441 		       CTLFLAG_PERMANENT,
1442 		       CTLTYPE_NODE, "kern", NULL,
1443 		       NULL, 0, NULL, 0,
1444 		       CTL_KERN, CTL_EOL);
1445 	sysctl_createv(clog, 0, NULL, NULL,
1446 		       CTLFLAG_PERMANENT,
1447 		       CTLTYPE_NODE, "pipe",
1448 		       SYSCTL_DESCR("Pipe settings"),
1449 		       NULL, 0, NULL, 0,
1450 		       CTL_KERN, KERN_PIPE, CTL_EOL);
1451 
1452 	sysctl_createv(clog, 0, NULL, NULL,
1453 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1454 		       CTLTYPE_INT, "maxkvasz",
1455 		       SYSCTL_DESCR("Maximum amount of kernel memory to be "
1456 				    "used for pipes"),
1457 		       NULL, 0, &maxpipekva, 0,
1458 		       CTL_KERN, KERN_PIPE, KERN_PIPE_MAXKVASZ, CTL_EOL);
1459 	sysctl_createv(clog, 0, NULL, NULL,
1460 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1461 		       CTLTYPE_INT, "maxloankvasz",
1462 		       SYSCTL_DESCR("Limit for direct transfers via page loan"),
1463 		       NULL, 0, &limitpipekva, 0,
1464 		       CTL_KERN, KERN_PIPE, KERN_PIPE_LIMITKVA, CTL_EOL);
1465 	sysctl_createv(clog, 0, NULL, NULL,
1466 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1467 		       CTLTYPE_INT, "maxbigpipes",
1468 		       SYSCTL_DESCR("Maximum number of \"big\" pipes"),
1469 		       NULL, 0, &maxbigpipes, 0,
1470 		       CTL_KERN, KERN_PIPE, KERN_PIPE_MAXBIGPIPES, CTL_EOL);
1471 	sysctl_createv(clog, 0, NULL, NULL,
1472 		       CTLFLAG_PERMANENT,
1473 		       CTLTYPE_INT, "nbigpipes",
1474 		       SYSCTL_DESCR("Number of \"big\" pipes"),
1475 		       NULL, 0, &nbigpipe, 0,
1476 		       CTL_KERN, KERN_PIPE, KERN_PIPE_NBIGPIPES, CTL_EOL);
1477 	sysctl_createv(clog, 0, NULL, NULL,
1478 		       CTLFLAG_PERMANENT,
1479 		       CTLTYPE_INT, "kvasize",
1480 		       SYSCTL_DESCR("Amount of kernel memory consumed by pipe "
1481 				    "buffers"),
1482 		       NULL, 0, &amountpipekva, 0,
1483 		       CTL_KERN, KERN_PIPE, KERN_PIPE_KVASIZE, CTL_EOL);
1484 }
1485