xref: /netbsd-src/sys/kern/sys_pipe.c (revision c9496f6b604074a9451a67df576a5b423068e71e)
1 /*	$NetBSD: sys_pipe.c,v 1.143 2017/12/26 08:30:58 kamil Exp $	*/
2 
3 /*-
4  * Copyright (c) 2003, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Paul Kranenburg, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1996 John S. Dyson
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice immediately at the beginning of the file, without modification,
41  *    this list of conditions, and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Absolutely no warranty of function or purpose is made by the author
46  *    John S. Dyson.
47  * 4. Modifications may be freely made to this file if the above conditions
48  *    are met.
49  */
50 
51 /*
52  * This file contains a high-performance replacement for the socket-based
53  * pipes scheme originally used.  It does not support all features of
54  * sockets, but does do everything that pipes normally do.
55  *
56  * This code has two modes of operation, a small write mode and a large
57  * write mode.  The small write mode acts like conventional pipes with
58  * a kernel buffer.  If the buffer is less than PIPE_MINDIRECT, then the
59  * "normal" pipe buffering is done.  If the buffer is between PIPE_MINDIRECT
60  * and PIPE_SIZE in size it is mapped read-only into the kernel address space
61  * using the UVM page loan facility from where the receiving process can copy
62  * the data directly from the pages in the sending process.
63  *
64  * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
65  * happen for small transfers so that the system will not spend all of
66  * its time context switching.  PIPE_SIZE is constrained by the
67  * amount of kernel virtual memory.
68  */
69 
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.143 2017/12/26 08:30:58 kamil Exp $");
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/proc.h>
76 #include <sys/fcntl.h>
77 #include <sys/file.h>
78 #include <sys/filedesc.h>
79 #include <sys/filio.h>
80 #include <sys/kernel.h>
81 #include <sys/ttycom.h>
82 #include <sys/stat.h>
83 #include <sys/poll.h>
84 #include <sys/signalvar.h>
85 #include <sys/vnode.h>
86 #include <sys/uio.h>
87 #include <sys/select.h>
88 #include <sys/mount.h>
89 #include <sys/syscallargs.h>
90 #include <sys/sysctl.h>
91 #include <sys/kauth.h>
92 #include <sys/atomic.h>
93 #include <sys/pipe.h>
94 
95 #include <uvm/uvm_extern.h>
96 
97 /*
98  * Use this to disable direct I/O and decrease the code size:
99  * #define PIPE_NODIRECT
100  */
101 
102 /* XXX Disabled for now; rare hangs switching between direct/buffered */
103 #define PIPE_NODIRECT
104 
105 static int	pipe_read(file_t *, off_t *, struct uio *, kauth_cred_t, int);
106 static int	pipe_write(file_t *, off_t *, struct uio *, kauth_cred_t, int);
107 static int	pipe_close(file_t *);
108 static int	pipe_poll(file_t *, int);
109 static int	pipe_kqfilter(file_t *, struct knote *);
110 static int	pipe_stat(file_t *, struct stat *);
111 static int	pipe_ioctl(file_t *, u_long, void *);
112 static void	pipe_restart(file_t *);
113 
114 static const struct fileops pipeops = {
115 	.fo_name = "pipe",
116 	.fo_read = pipe_read,
117 	.fo_write = pipe_write,
118 	.fo_ioctl = pipe_ioctl,
119 	.fo_fcntl = fnullop_fcntl,
120 	.fo_poll = pipe_poll,
121 	.fo_stat = pipe_stat,
122 	.fo_close = pipe_close,
123 	.fo_kqfilter = pipe_kqfilter,
124 	.fo_restart = pipe_restart,
125 };
126 
127 /*
128  * Default pipe buffer size(s), this can be kind-of large now because pipe
129  * space is pageable.  The pipe code will try to maintain locality of
130  * reference for performance reasons, so small amounts of outstanding I/O
131  * will not wipe the cache.
132  */
133 #define	MINPIPESIZE	(PIPE_SIZE / 3)
134 #define	MAXPIPESIZE	(2 * PIPE_SIZE / 3)
135 
136 /*
137  * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
138  * is there so that on large systems, we don't exhaust it.
139  */
140 #define	MAXPIPEKVA	(8 * 1024 * 1024)
141 static u_int	maxpipekva = MAXPIPEKVA;
142 
143 /*
144  * Limit for direct transfers, we cannot, of course limit
145  * the amount of kva for pipes in general though.
146  */
147 #define	LIMITPIPEKVA	(16 * 1024 * 1024)
148 static u_int	limitpipekva = LIMITPIPEKVA;
149 
150 /*
151  * Limit the number of "big" pipes
152  */
153 #define	LIMITBIGPIPES	32
154 static u_int	maxbigpipes = LIMITBIGPIPES;
155 static u_int	nbigpipe = 0;
156 
157 /*
158  * Amount of KVA consumed by pipe buffers.
159  */
160 static u_int	amountpipekva = 0;
161 
162 static void	pipeclose(struct pipe *);
163 static void	pipe_free_kmem(struct pipe *);
164 static int	pipe_create(struct pipe **, pool_cache_t);
165 static int	pipelock(struct pipe *, bool);
166 static inline void pipeunlock(struct pipe *);
167 static void	pipeselwakeup(struct pipe *, struct pipe *, int);
168 #ifndef PIPE_NODIRECT
169 static int	pipe_direct_write(file_t *, struct pipe *, struct uio *);
170 #endif
171 static int	pipespace(struct pipe *, int);
172 static int	pipe_ctor(void *, void *, int);
173 static void	pipe_dtor(void *, void *);
174 
175 #ifndef PIPE_NODIRECT
176 static int	pipe_loan_alloc(struct pipe *, int);
177 static void	pipe_loan_free(struct pipe *);
178 #endif /* PIPE_NODIRECT */
179 
180 static pool_cache_t	pipe_wr_cache;
181 static pool_cache_t	pipe_rd_cache;
182 
183 void
184 pipe_init(void)
185 {
186 
187 	/* Writer side is not automatically allocated KVA. */
188 	pipe_wr_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "pipewr",
189 	    NULL, IPL_NONE, pipe_ctor, pipe_dtor, NULL);
190 	KASSERT(pipe_wr_cache != NULL);
191 
192 	/* Reader side gets preallocated KVA. */
193 	pipe_rd_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "piperd",
194 	    NULL, IPL_NONE, pipe_ctor, pipe_dtor, (void *)1);
195 	KASSERT(pipe_rd_cache != NULL);
196 }
197 
198 static int
199 pipe_ctor(void *arg, void *obj, int flags)
200 {
201 	struct pipe *pipe;
202 	vaddr_t va;
203 
204 	pipe = obj;
205 
206 	memset(pipe, 0, sizeof(struct pipe));
207 	if (arg != NULL) {
208 		/* Preallocate space. */
209 		va = uvm_km_alloc(kernel_map, PIPE_SIZE, 0,
210 		    UVM_KMF_PAGEABLE | UVM_KMF_WAITVA);
211 		KASSERT(va != 0);
212 		pipe->pipe_kmem = va;
213 		atomic_add_int(&amountpipekva, PIPE_SIZE);
214 	}
215 	cv_init(&pipe->pipe_rcv, "pipe_rd");
216 	cv_init(&pipe->pipe_wcv, "pipe_wr");
217 	cv_init(&pipe->pipe_draincv, "pipe_drn");
218 	cv_init(&pipe->pipe_lkcv, "pipe_lk");
219 	selinit(&pipe->pipe_sel);
220 	pipe->pipe_state = PIPE_SIGNALR;
221 
222 	return 0;
223 }
224 
225 static void
226 pipe_dtor(void *arg, void *obj)
227 {
228 	struct pipe *pipe;
229 
230 	pipe = obj;
231 
232 	cv_destroy(&pipe->pipe_rcv);
233 	cv_destroy(&pipe->pipe_wcv);
234 	cv_destroy(&pipe->pipe_draincv);
235 	cv_destroy(&pipe->pipe_lkcv);
236 	seldestroy(&pipe->pipe_sel);
237 	if (pipe->pipe_kmem != 0) {
238 		uvm_km_free(kernel_map, pipe->pipe_kmem, PIPE_SIZE,
239 		    UVM_KMF_PAGEABLE);
240 		atomic_add_int(&amountpipekva, -PIPE_SIZE);
241 	}
242 }
243 
244 /*
245  * The pipe system call for the DTYPE_PIPE type of pipes
246  */
247 int
248 pipe1(struct lwp *l, int *fildes, int flags)
249 {
250 	struct pipe *rpipe, *wpipe;
251 	file_t *rf, *wf;
252 	int fd, error;
253 	proc_t *p;
254 
255 	if (flags & ~(O_CLOEXEC|O_NONBLOCK|O_NOSIGPIPE))
256 		return EINVAL;
257 	p = curproc;
258 	rpipe = wpipe = NULL;
259 	if ((error = pipe_create(&rpipe, pipe_rd_cache)) ||
260 	    (error = pipe_create(&wpipe, pipe_wr_cache))) {
261 		goto free2;
262 	}
263 	rpipe->pipe_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
264 	wpipe->pipe_lock = rpipe->pipe_lock;
265 	mutex_obj_hold(wpipe->pipe_lock);
266 
267 	error = fd_allocfile(&rf, &fd);
268 	if (error)
269 		goto free2;
270 	fildes[0] = fd;
271 
272 	error = fd_allocfile(&wf, &fd);
273 	if (error)
274 		goto free3;
275 	fildes[1] = fd;
276 
277 	rf->f_flag = FREAD | flags;
278 	rf->f_type = DTYPE_PIPE;
279 	rf->f_pipe = rpipe;
280 	rf->f_ops = &pipeops;
281 	fd_set_exclose(l, fildes[0], (flags & O_CLOEXEC) != 0);
282 
283 	wf->f_flag = FWRITE | flags;
284 	wf->f_type = DTYPE_PIPE;
285 	wf->f_pipe = wpipe;
286 	wf->f_ops = &pipeops;
287 	fd_set_exclose(l, fildes[1], (flags & O_CLOEXEC) != 0);
288 
289 	rpipe->pipe_peer = wpipe;
290 	wpipe->pipe_peer = rpipe;
291 
292 	fd_affix(p, rf, fildes[0]);
293 	fd_affix(p, wf, fildes[1]);
294 	return (0);
295 free3:
296 	fd_abort(p, rf, fildes[0]);
297 free2:
298 	pipeclose(wpipe);
299 	pipeclose(rpipe);
300 
301 	return (error);
302 }
303 
304 /*
305  * Allocate kva for pipe circular buffer, the space is pageable
306  * This routine will 'realloc' the size of a pipe safely, if it fails
307  * it will retain the old buffer.
308  * If it fails it will return ENOMEM.
309  */
310 static int
311 pipespace(struct pipe *pipe, int size)
312 {
313 	void *buffer;
314 
315 	/*
316 	 * Allocate pageable virtual address space.  Physical memory is
317 	 * allocated on demand.
318 	 */
319 	if (size == PIPE_SIZE && pipe->pipe_kmem != 0) {
320 		buffer = (void *)pipe->pipe_kmem;
321 	} else {
322 		buffer = (void *)uvm_km_alloc(kernel_map, round_page(size),
323 		    0, UVM_KMF_PAGEABLE);
324 		if (buffer == NULL)
325 			return (ENOMEM);
326 		atomic_add_int(&amountpipekva, size);
327 	}
328 
329 	/* free old resources if we're resizing */
330 	pipe_free_kmem(pipe);
331 	pipe->pipe_buffer.buffer = buffer;
332 	pipe->pipe_buffer.size = size;
333 	pipe->pipe_buffer.in = 0;
334 	pipe->pipe_buffer.out = 0;
335 	pipe->pipe_buffer.cnt = 0;
336 	return (0);
337 }
338 
339 /*
340  * Initialize and allocate VM and memory for pipe.
341  */
342 static int
343 pipe_create(struct pipe **pipep, pool_cache_t cache)
344 {
345 	struct pipe *pipe;
346 	int error;
347 
348 	pipe = pool_cache_get(cache, PR_WAITOK);
349 	KASSERT(pipe != NULL);
350 	*pipep = pipe;
351 	error = 0;
352 	getnanotime(&pipe->pipe_btime);
353 	pipe->pipe_atime = pipe->pipe_mtime = pipe->pipe_btime;
354 	pipe->pipe_lock = NULL;
355 	if (cache == pipe_rd_cache) {
356 		error = pipespace(pipe, PIPE_SIZE);
357 	} else {
358 		pipe->pipe_buffer.buffer = NULL;
359 		pipe->pipe_buffer.size = 0;
360 		pipe->pipe_buffer.in = 0;
361 		pipe->pipe_buffer.out = 0;
362 		pipe->pipe_buffer.cnt = 0;
363 	}
364 	return error;
365 }
366 
367 /*
368  * Lock a pipe for I/O, blocking other access
369  * Called with pipe spin lock held.
370  */
371 static int
372 pipelock(struct pipe *pipe, bool catch_p)
373 {
374 	int error;
375 
376 	KASSERT(mutex_owned(pipe->pipe_lock));
377 
378 	while (pipe->pipe_state & PIPE_LOCKFL) {
379 		pipe->pipe_state |= PIPE_LWANT;
380 		if (catch_p) {
381 			error = cv_wait_sig(&pipe->pipe_lkcv, pipe->pipe_lock);
382 			if (error != 0)
383 				return error;
384 		} else
385 			cv_wait(&pipe->pipe_lkcv, pipe->pipe_lock);
386 	}
387 
388 	pipe->pipe_state |= PIPE_LOCKFL;
389 
390 	return 0;
391 }
392 
393 /*
394  * unlock a pipe I/O lock
395  */
396 static inline void
397 pipeunlock(struct pipe *pipe)
398 {
399 
400 	KASSERT(pipe->pipe_state & PIPE_LOCKFL);
401 
402 	pipe->pipe_state &= ~PIPE_LOCKFL;
403 	if (pipe->pipe_state & PIPE_LWANT) {
404 		pipe->pipe_state &= ~PIPE_LWANT;
405 		cv_broadcast(&pipe->pipe_lkcv);
406 	}
407 }
408 
409 /*
410  * Select/poll wakup. This also sends SIGIO to peer connected to
411  * 'sigpipe' side of pipe.
412  */
413 static void
414 pipeselwakeup(struct pipe *selp, struct pipe *sigp, int code)
415 {
416 	int band;
417 
418 	switch (code) {
419 	case POLL_IN:
420 		band = POLLIN|POLLRDNORM;
421 		break;
422 	case POLL_OUT:
423 		band = POLLOUT|POLLWRNORM;
424 		break;
425 	case POLL_HUP:
426 		band = POLLHUP;
427 		break;
428 	case POLL_ERR:
429 		band = POLLERR;
430 		break;
431 	default:
432 		band = 0;
433 #ifdef DIAGNOSTIC
434 		printf("bad siginfo code %d in pipe notification.\n", code);
435 #endif
436 		break;
437 	}
438 
439 	selnotify(&selp->pipe_sel, band, NOTE_SUBMIT);
440 
441 	if (sigp == NULL || (sigp->pipe_state & PIPE_ASYNC) == 0)
442 		return;
443 
444 	fownsignal(sigp->pipe_pgid, SIGIO, code, band, selp);
445 }
446 
447 static int
448 pipe_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
449     int flags)
450 {
451 	struct pipe *rpipe = fp->f_pipe;
452 	struct pipebuf *bp = &rpipe->pipe_buffer;
453 	kmutex_t *lock = rpipe->pipe_lock;
454 	int error;
455 	size_t nread = 0;
456 	size_t size;
457 	size_t ocnt;
458 	unsigned int wakeup_state = 0;
459 
460 	mutex_enter(lock);
461 	++rpipe->pipe_busy;
462 	ocnt = bp->cnt;
463 
464 again:
465 	error = pipelock(rpipe, true);
466 	if (error)
467 		goto unlocked_error;
468 
469 	while (uio->uio_resid) {
470 		/*
471 		 * Normal pipe buffer receive.
472 		 */
473 		if (bp->cnt > 0) {
474 			size = bp->size - bp->out;
475 			if (size > bp->cnt)
476 				size = bp->cnt;
477 			if (size > uio->uio_resid)
478 				size = uio->uio_resid;
479 
480 			mutex_exit(lock);
481 			error = uiomove((char *)bp->buffer + bp->out, size, uio);
482 			mutex_enter(lock);
483 			if (error)
484 				break;
485 
486 			bp->out += size;
487 			if (bp->out >= bp->size)
488 				bp->out = 0;
489 
490 			bp->cnt -= size;
491 
492 			/*
493 			 * If there is no more to read in the pipe, reset
494 			 * its pointers to the beginning.  This improves
495 			 * cache hit stats.
496 			 */
497 			if (bp->cnt == 0) {
498 				bp->in = 0;
499 				bp->out = 0;
500 			}
501 			nread += size;
502 			continue;
503 		}
504 
505 #ifndef PIPE_NODIRECT
506 		if ((rpipe->pipe_state & PIPE_DIRECTR) != 0) {
507 			struct pipemapping * const rmap = &rpipe->pipe_map;
508 			/*
509 			 * Direct copy, bypassing a kernel buffer.
510 			 */
511 			void *va;
512 			u_int gen;
513 
514 			KASSERT(rpipe->pipe_state & PIPE_DIRECTW);
515 
516 			size = rmap->cnt;
517 			if (size > uio->uio_resid)
518 				size = uio->uio_resid;
519 
520 			va = (char *)rmap->kva + rmap->pos;
521 			gen = rmap->egen;
522 			mutex_exit(lock);
523 
524 			/*
525 			 * Consume emap and read the data from loaned pages.
526 			 */
527 			uvm_emap_consume(gen);
528 			error = uiomove(va, size, uio);
529 
530 			mutex_enter(lock);
531 			if (error)
532 				break;
533 			nread += size;
534 			rmap->pos += size;
535 			rmap->cnt -= size;
536 			if (rmap->cnt == 0) {
537 				rpipe->pipe_state &= ~PIPE_DIRECTR;
538 				cv_broadcast(&rpipe->pipe_wcv);
539 			}
540 			continue;
541 		}
542 #endif
543 		/*
544 		 * Break if some data was read.
545 		 */
546 		if (nread > 0)
547 			break;
548 
549 		/*
550 		 * Detect EOF condition.
551 		 * Read returns 0 on EOF, no need to set error.
552 		 */
553 		if (rpipe->pipe_state & PIPE_EOF)
554 			break;
555 
556 		/*
557 		 * Don't block on non-blocking I/O.
558 		 */
559 		if (fp->f_flag & FNONBLOCK) {
560 			error = EAGAIN;
561 			break;
562 		}
563 
564 		/*
565 		 * Unlock the pipe buffer for our remaining processing.
566 		 * We will either break out with an error or we will
567 		 * sleep and relock to loop.
568 		 */
569 		pipeunlock(rpipe);
570 
571 		/*
572 		 * Re-check to see if more direct writes are pending.
573 		 */
574 		if ((rpipe->pipe_state & PIPE_DIRECTR) != 0)
575 			goto again;
576 
577 #if 1   /* XXX (dsl) I'm sure these aren't needed here ... */
578 		/*
579 		 * We want to read more, wake up select/poll.
580 		 */
581 		pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
582 
583 		/*
584 		 * If the "write-side" is blocked, wake it up now.
585 		 */
586 		cv_broadcast(&rpipe->pipe_wcv);
587 #endif
588 
589 		if (wakeup_state & PIPE_RESTART) {
590 			error = ERESTART;
591 			goto unlocked_error;
592 		}
593 
594 		/* Now wait until the pipe is filled */
595 		error = cv_wait_sig(&rpipe->pipe_rcv, lock);
596 		if (error != 0)
597 			goto unlocked_error;
598 		wakeup_state = rpipe->pipe_state;
599 		goto again;
600 	}
601 
602 	if (error == 0)
603 		getnanotime(&rpipe->pipe_atime);
604 	pipeunlock(rpipe);
605 
606 unlocked_error:
607 	--rpipe->pipe_busy;
608 	if (rpipe->pipe_busy == 0) {
609 		rpipe->pipe_state &= ~PIPE_RESTART;
610 		cv_broadcast(&rpipe->pipe_draincv);
611 	}
612 	if (bp->cnt < MINPIPESIZE) {
613 		cv_broadcast(&rpipe->pipe_wcv);
614 	}
615 
616 	/*
617 	 * If anything was read off the buffer, signal to the writer it's
618 	 * possible to write more data. Also send signal if we are here for the
619 	 * first time after last write.
620 	 */
621 	if ((bp->size - bp->cnt) >= PIPE_BUF
622 	    && (ocnt != bp->cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
623 		pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
624 		rpipe->pipe_state &= ~PIPE_SIGNALR;
625 	}
626 
627 	mutex_exit(lock);
628 	return (error);
629 }
630 
631 #ifndef PIPE_NODIRECT
632 /*
633  * Allocate structure for loan transfer.
634  */
635 static int
636 pipe_loan_alloc(struct pipe *wpipe, int npages)
637 {
638 	struct pipemapping * const wmap = &wpipe->pipe_map;
639 	const vsize_t len = ptoa(npages);
640 
641 	atomic_add_int(&amountpipekva, len);
642 	wmap->kva = uvm_km_alloc(kernel_map, len, 0,
643 	    UVM_KMF_COLORMATCH | UVM_KMF_VAONLY | UVM_KMF_WAITVA);
644 	if (wmap->kva == 0) {
645 		atomic_add_int(&amountpipekva, -len);
646 		return (ENOMEM);
647 	}
648 
649 	wmap->npages = npages;
650 	wmap->pgs = kmem_alloc(npages * sizeof(struct vm_page *), KM_SLEEP);
651 	return (0);
652 }
653 
654 /*
655  * Free resources allocated for loan transfer.
656  */
657 static void
658 pipe_loan_free(struct pipe *wpipe)
659 {
660 	struct pipemapping * const wmap = &wpipe->pipe_map;
661 	const vsize_t len = ptoa(wmap->npages);
662 
663 	uvm_emap_remove(wmap->kva, len);	/* XXX */
664 	uvm_km_free(kernel_map, wmap->kva, len, UVM_KMF_VAONLY);
665 	wmap->kva = 0;
666 	atomic_add_int(&amountpipekva, -len);
667 	kmem_free(wmap->pgs, wmap->npages * sizeof(struct vm_page *));
668 	wmap->pgs = NULL;
669 #if 0
670 	wmap->npages = 0;
671 	wmap->pos = 0;
672 	wmap->cnt = 0;
673 #endif
674 }
675 
676 /*
677  * NetBSD direct write, using uvm_loan() mechanism.
678  * This implements the pipe buffer write mechanism.  Note that only
679  * a direct write OR a normal pipe write can be pending at any given time.
680  * If there are any characters in the pipe buffer, the direct write will
681  * be deferred until the receiving process grabs all of the bytes from
682  * the pipe buffer.  Then the direct mapping write is set-up.
683  *
684  * Called with the long-term pipe lock held.
685  */
686 static int
687 pipe_direct_write(file_t *fp, struct pipe *wpipe, struct uio *uio)
688 {
689 	struct pipemapping * const wmap = &wpipe->pipe_map;
690 	kmutex_t * const lock = wpipe->pipe_lock;
691 	struct vm_page **pgs;
692 	vaddr_t bbase, base, bend;
693 	vsize_t blen, bcnt;
694 	int error, npages;
695 	voff_t bpos;
696 	u_int starting_color;
697 
698 	KASSERT(mutex_owned(wpipe->pipe_lock));
699 	KASSERT(wmap->cnt == 0);
700 
701 	mutex_exit(lock);
702 
703 	/*
704 	 * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
705 	 * not aligned to PAGE_SIZE.
706 	 */
707 	bbase = (vaddr_t)uio->uio_iov->iov_base;
708 	base = trunc_page(bbase);
709 	bend = round_page(bbase + uio->uio_iov->iov_len);
710 	blen = bend - base;
711 	bpos = bbase - base;
712 
713 	if (blen > PIPE_DIRECT_CHUNK) {
714 		blen = PIPE_DIRECT_CHUNK;
715 		bend = base + blen;
716 		bcnt = PIPE_DIRECT_CHUNK - bpos;
717 	} else {
718 		bcnt = uio->uio_iov->iov_len;
719 	}
720 	npages = atop(blen);
721 	starting_color = atop(base) & uvmexp.colormask;
722 
723 	/*
724 	 * Free the old kva if we need more pages than we have
725 	 * allocated.
726 	 */
727 	if (wmap->kva != 0 && starting_color + npages > wmap->npages)
728 		pipe_loan_free(wpipe);
729 
730 	/* Allocate new kva. */
731 	if (wmap->kva == 0) {
732 		error = pipe_loan_alloc(wpipe, starting_color + npages);
733 		if (error) {
734 			mutex_enter(lock);
735 			return (error);
736 		}
737 	}
738 
739 	/* Loan the write buffer memory from writer process */
740 	pgs = wmap->pgs + starting_color;
741 	error = uvm_loan(&uio->uio_vmspace->vm_map, base, blen,
742 			 pgs, UVM_LOAN_TOPAGE);
743 	if (error) {
744 		pipe_loan_free(wpipe);
745 		mutex_enter(lock);
746 		return (ENOMEM); /* so that caller fallback to ordinary write */
747 	}
748 
749 	/* Enter the loaned pages to KVA, produce new emap generation number. */
750 	uvm_emap_enter(wmap->kva + ptoa(starting_color), pgs, npages);
751 	wmap->egen = uvm_emap_produce();
752 
753 	/* Now we can put the pipe in direct write mode */
754 	wmap->pos = bpos + ptoa(starting_color);
755 	wmap->cnt = bcnt;
756 
757 	/*
758 	 * But before we can let someone do a direct read, we
759 	 * have to wait until the pipe is drained.  Release the
760 	 * pipe lock while we wait.
761 	 */
762 	mutex_enter(lock);
763 	wpipe->pipe_state |= PIPE_DIRECTW;
764 	pipeunlock(wpipe);
765 
766 	while (error == 0 && wpipe->pipe_buffer.cnt > 0) {
767 		cv_broadcast(&wpipe->pipe_rcv);
768 		error = cv_wait_sig(&wpipe->pipe_wcv, lock);
769 		if (error == 0 && wpipe->pipe_state & PIPE_EOF)
770 			error = EPIPE;
771 	}
772 
773 	/* Pipe is drained; next read will off the direct buffer */
774 	wpipe->pipe_state |= PIPE_DIRECTR;
775 
776 	/* Wait until the reader is done */
777 	while (error == 0 && (wpipe->pipe_state & PIPE_DIRECTR)) {
778 		cv_broadcast(&wpipe->pipe_rcv);
779 		pipeselwakeup(wpipe, wpipe, POLL_IN);
780 		error = cv_wait_sig(&wpipe->pipe_wcv, lock);
781 		if (error == 0 && wpipe->pipe_state & PIPE_EOF)
782 			error = EPIPE;
783 	}
784 
785 	/* Take pipe out of direct write mode */
786 	wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTR);
787 
788 	/* Acquire the pipe lock and cleanup */
789 	(void)pipelock(wpipe, false);
790 	mutex_exit(lock);
791 
792 	if (pgs != NULL) {
793 		/* XXX: uvm_emap_remove */
794 		uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
795 	}
796 	if (error || amountpipekva > maxpipekva)
797 		pipe_loan_free(wpipe);
798 
799 	mutex_enter(lock);
800 	if (error) {
801 		pipeselwakeup(wpipe, wpipe, POLL_ERR);
802 
803 		/*
804 		 * If nothing was read from what we offered, return error
805 		 * straight on. Otherwise update uio resid first. Caller
806 		 * will deal with the error condition, returning short
807 		 * write, error, or restarting the write(2) as appropriate.
808 		 */
809 		if (wmap->cnt == bcnt) {
810 			wmap->cnt = 0;
811 			cv_broadcast(&wpipe->pipe_wcv);
812 			return (error);
813 		}
814 
815 		bcnt -= wpipe->cnt;
816 	}
817 
818 	uio->uio_resid -= bcnt;
819 	/* uio_offset not updated, not set/used for write(2) */
820 	uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
821 	uio->uio_iov->iov_len -= bcnt;
822 	if (uio->uio_iov->iov_len == 0) {
823 		uio->uio_iov++;
824 		uio->uio_iovcnt--;
825 	}
826 
827 	wmap->cnt = 0;
828 	return (error);
829 }
830 #endif /* !PIPE_NODIRECT */
831 
832 static int
833 pipe_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
834     int flags)
835 {
836 	struct pipe *wpipe, *rpipe;
837 	struct pipebuf *bp;
838 	kmutex_t *lock;
839 	int error;
840 	unsigned int wakeup_state = 0;
841 
842 	/* We want to write to our peer */
843 	rpipe = fp->f_pipe;
844 	lock = rpipe->pipe_lock;
845 	error = 0;
846 
847 	mutex_enter(lock);
848 	wpipe = rpipe->pipe_peer;
849 
850 	/*
851 	 * Detect loss of pipe read side, issue SIGPIPE if lost.
852 	 */
853 	if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) != 0) {
854 		mutex_exit(lock);
855 		return EPIPE;
856 	}
857 	++wpipe->pipe_busy;
858 
859 	/* Aquire the long-term pipe lock */
860 	if ((error = pipelock(wpipe, true)) != 0) {
861 		--wpipe->pipe_busy;
862 		if (wpipe->pipe_busy == 0) {
863 			wpipe->pipe_state &= ~PIPE_RESTART;
864 			cv_broadcast(&wpipe->pipe_draincv);
865 		}
866 		mutex_exit(lock);
867 		return (error);
868 	}
869 
870 	bp = &wpipe->pipe_buffer;
871 
872 	/*
873 	 * If it is advantageous to resize the pipe buffer, do so.
874 	 */
875 	if ((uio->uio_resid > PIPE_SIZE) &&
876 	    (nbigpipe < maxbigpipes) &&
877 #ifndef PIPE_NODIRECT
878 	    (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
879 #endif
880 	    (bp->size <= PIPE_SIZE) && (bp->cnt == 0)) {
881 
882 		if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
883 			atomic_inc_uint(&nbigpipe);
884 	}
885 
886 	while (uio->uio_resid) {
887 		size_t space;
888 
889 #ifndef PIPE_NODIRECT
890 		/*
891 		 * Pipe buffered writes cannot be coincidental with
892 		 * direct writes.  Also, only one direct write can be
893 		 * in progress at any one time.  We wait until the currently
894 		 * executing direct write is completed before continuing.
895 		 *
896 		 * We break out if a signal occurs or the reader goes away.
897 		 */
898 		while (error == 0 && wpipe->pipe_state & PIPE_DIRECTW) {
899 			cv_broadcast(&wpipe->pipe_rcv);
900 			pipeunlock(wpipe);
901 			error = cv_wait_sig(&wpipe->pipe_wcv, lock);
902 			(void)pipelock(wpipe, false);
903 			if (wpipe->pipe_state & PIPE_EOF)
904 				error = EPIPE;
905 		}
906 		if (error)
907 			break;
908 
909 		/*
910 		 * If the transfer is large, we can gain performance if
911 		 * we do process-to-process copies directly.
912 		 * If the write is non-blocking, we don't use the
913 		 * direct write mechanism.
914 		 *
915 		 * The direct write mechanism will detect the reader going
916 		 * away on us.
917 		 */
918 		if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
919 		    (fp->f_flag & FNONBLOCK) == 0 &&
920 		    (wmap->kva || (amountpipekva < limitpipekva))) {
921 			error = pipe_direct_write(fp, wpipe, uio);
922 
923 			/*
924 			 * Break out if error occurred, unless it's ENOMEM.
925 			 * ENOMEM means we failed to allocate some resources
926 			 * for direct write, so we just fallback to ordinary
927 			 * write. If the direct write was successful,
928 			 * process rest of data via ordinary write.
929 			 */
930 			if (error == 0)
931 				continue;
932 
933 			if (error != ENOMEM)
934 				break;
935 		}
936 #endif /* PIPE_NODIRECT */
937 
938 		space = bp->size - bp->cnt;
939 
940 		/* Writes of size <= PIPE_BUF must be atomic. */
941 		if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
942 			space = 0;
943 
944 		if (space > 0) {
945 			int size;	/* Transfer size */
946 			int segsize;	/* first segment to transfer */
947 
948 			/*
949 			 * Transfer size is minimum of uio transfer
950 			 * and free space in pipe buffer.
951 			 */
952 			if (space > uio->uio_resid)
953 				size = uio->uio_resid;
954 			else
955 				size = space;
956 			/*
957 			 * First segment to transfer is minimum of
958 			 * transfer size and contiguous space in
959 			 * pipe buffer.  If first segment to transfer
960 			 * is less than the transfer size, we've got
961 			 * a wraparound in the buffer.
962 			 */
963 			segsize = bp->size - bp->in;
964 			if (segsize > size)
965 				segsize = size;
966 
967 			/* Transfer first segment */
968 			mutex_exit(lock);
969 			error = uiomove((char *)bp->buffer + bp->in, segsize,
970 			    uio);
971 
972 			if (error == 0 && segsize < size) {
973 				/*
974 				 * Transfer remaining part now, to
975 				 * support atomic writes.  Wraparound
976 				 * happened.
977 				 */
978 				KASSERT(bp->in + segsize == bp->size);
979 				error = uiomove(bp->buffer,
980 				    size - segsize, uio);
981 			}
982 			mutex_enter(lock);
983 			if (error)
984 				break;
985 
986 			bp->in += size;
987 			if (bp->in >= bp->size) {
988 				KASSERT(bp->in == size - segsize + bp->size);
989 				bp->in = size - segsize;
990 			}
991 
992 			bp->cnt += size;
993 			KASSERT(bp->cnt <= bp->size);
994 			wakeup_state = 0;
995 		} else {
996 			/*
997 			 * If the "read-side" has been blocked, wake it up now.
998 			 */
999 			cv_broadcast(&wpipe->pipe_rcv);
1000 
1001 			/*
1002 			 * Don't block on non-blocking I/O.
1003 			 */
1004 			if (fp->f_flag & FNONBLOCK) {
1005 				error = EAGAIN;
1006 				break;
1007 			}
1008 
1009 			/*
1010 			 * We have no more space and have something to offer,
1011 			 * wake up select/poll.
1012 			 */
1013 			if (bp->cnt)
1014 				pipeselwakeup(wpipe, wpipe, POLL_IN);
1015 
1016 			if (wakeup_state & PIPE_RESTART) {
1017 				error = ERESTART;
1018 				break;
1019 			}
1020 
1021 			pipeunlock(wpipe);
1022 			error = cv_wait_sig(&wpipe->pipe_wcv, lock);
1023 			(void)pipelock(wpipe, false);
1024 			if (error != 0)
1025 				break;
1026 			/*
1027 			 * If read side wants to go away, we just issue a signal
1028 			 * to ourselves.
1029 			 */
1030 			if (wpipe->pipe_state & PIPE_EOF) {
1031 				error = EPIPE;
1032 				break;
1033 			}
1034 			wakeup_state = wpipe->pipe_state;
1035 		}
1036 	}
1037 
1038 	--wpipe->pipe_busy;
1039 	if (wpipe->pipe_busy == 0) {
1040 		wpipe->pipe_state &= ~PIPE_RESTART;
1041 		cv_broadcast(&wpipe->pipe_draincv);
1042 	}
1043 	if (bp->cnt > 0) {
1044 		cv_broadcast(&wpipe->pipe_rcv);
1045 	}
1046 
1047 	/*
1048 	 * Don't return EPIPE if I/O was successful
1049 	 */
1050 	if (error == EPIPE && bp->cnt == 0 && uio->uio_resid == 0)
1051 		error = 0;
1052 
1053 	if (error == 0)
1054 		getnanotime(&wpipe->pipe_mtime);
1055 
1056 	/*
1057 	 * We have something to offer, wake up select/poll.
1058 	 * wmap->cnt is always 0 in this point (direct write
1059 	 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
1060 	 */
1061 	if (bp->cnt)
1062 		pipeselwakeup(wpipe, wpipe, POLL_IN);
1063 
1064 	/*
1065 	 * Arrange for next read(2) to do a signal.
1066 	 */
1067 	wpipe->pipe_state |= PIPE_SIGNALR;
1068 
1069 	pipeunlock(wpipe);
1070 	mutex_exit(lock);
1071 	return (error);
1072 }
1073 
1074 /*
1075  * We implement a very minimal set of ioctls for compatibility with sockets.
1076  */
1077 int
1078 pipe_ioctl(file_t *fp, u_long cmd, void *data)
1079 {
1080 	struct pipe *pipe = fp->f_pipe;
1081 	kmutex_t *lock = pipe->pipe_lock;
1082 
1083 	switch (cmd) {
1084 
1085 	case FIONBIO:
1086 		return (0);
1087 
1088 	case FIOASYNC:
1089 		mutex_enter(lock);
1090 		if (*(int *)data) {
1091 			pipe->pipe_state |= PIPE_ASYNC;
1092 		} else {
1093 			pipe->pipe_state &= ~PIPE_ASYNC;
1094 		}
1095 		mutex_exit(lock);
1096 		return (0);
1097 
1098 	case FIONREAD:
1099 		mutex_enter(lock);
1100 #ifndef PIPE_NODIRECT
1101 		if (pipe->pipe_state & PIPE_DIRECTW)
1102 			*(int *)data = pipe->pipe_map.cnt;
1103 		else
1104 #endif
1105 			*(int *)data = pipe->pipe_buffer.cnt;
1106 		mutex_exit(lock);
1107 		return (0);
1108 
1109 	case FIONWRITE:
1110 		/* Look at other side */
1111 		pipe = pipe->pipe_peer;
1112 		mutex_enter(lock);
1113 #ifndef PIPE_NODIRECT
1114 		if (pipe->pipe_state & PIPE_DIRECTW)
1115 			*(int *)data = pipe->pipe_map.cnt;
1116 		else
1117 #endif
1118 			*(int *)data = pipe->pipe_buffer.cnt;
1119 		mutex_exit(lock);
1120 		return (0);
1121 
1122 	case FIONSPACE:
1123 		/* Look at other side */
1124 		pipe = pipe->pipe_peer;
1125 		mutex_enter(lock);
1126 #ifndef PIPE_NODIRECT
1127 		/*
1128 		 * If we're in direct-mode, we don't really have a
1129 		 * send queue, and any other write will block. Thus
1130 		 * zero seems like the best answer.
1131 		 */
1132 		if (pipe->pipe_state & PIPE_DIRECTW)
1133 			*(int *)data = 0;
1134 		else
1135 #endif
1136 			*(int *)data = pipe->pipe_buffer.size -
1137 			    pipe->pipe_buffer.cnt;
1138 		mutex_exit(lock);
1139 		return (0);
1140 
1141 	case TIOCSPGRP:
1142 	case FIOSETOWN:
1143 		return fsetown(&pipe->pipe_pgid, cmd, data);
1144 
1145 	case TIOCGPGRP:
1146 	case FIOGETOWN:
1147 		return fgetown(pipe->pipe_pgid, cmd, data);
1148 
1149 	}
1150 	return (EPASSTHROUGH);
1151 }
1152 
1153 int
1154 pipe_poll(file_t *fp, int events)
1155 {
1156 	struct pipe *rpipe = fp->f_pipe;
1157 	struct pipe *wpipe;
1158 	int eof = 0;
1159 	int revents = 0;
1160 
1161 	mutex_enter(rpipe->pipe_lock);
1162 	wpipe = rpipe->pipe_peer;
1163 
1164 	if (events & (POLLIN | POLLRDNORM))
1165 		if ((rpipe->pipe_buffer.cnt > 0) ||
1166 #ifndef PIPE_NODIRECT
1167 		    (rpipe->pipe_state & PIPE_DIRECTR) ||
1168 #endif
1169 		    (rpipe->pipe_state & PIPE_EOF))
1170 			revents |= events & (POLLIN | POLLRDNORM);
1171 
1172 	eof |= (rpipe->pipe_state & PIPE_EOF);
1173 
1174 	if (wpipe == NULL)
1175 		revents |= events & (POLLOUT | POLLWRNORM);
1176 	else {
1177 		if (events & (POLLOUT | POLLWRNORM))
1178 			if ((wpipe->pipe_state & PIPE_EOF) || (
1179 #ifndef PIPE_NODIRECT
1180 			     (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1181 #endif
1182 			     (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1183 				revents |= events & (POLLOUT | POLLWRNORM);
1184 
1185 		eof |= (wpipe->pipe_state & PIPE_EOF);
1186 	}
1187 
1188 	if (wpipe == NULL || eof)
1189 		revents |= POLLHUP;
1190 
1191 	if (revents == 0) {
1192 		if (events & (POLLIN | POLLRDNORM))
1193 			selrecord(curlwp, &rpipe->pipe_sel);
1194 
1195 		if (events & (POLLOUT | POLLWRNORM))
1196 			selrecord(curlwp, &wpipe->pipe_sel);
1197 	}
1198 	mutex_exit(rpipe->pipe_lock);
1199 
1200 	return (revents);
1201 }
1202 
1203 static int
1204 pipe_stat(file_t *fp, struct stat *ub)
1205 {
1206 	struct pipe *pipe = fp->f_pipe;
1207 
1208 	mutex_enter(pipe->pipe_lock);
1209 	memset(ub, 0, sizeof(*ub));
1210 	ub->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
1211 	ub->st_blksize = pipe->pipe_buffer.size;
1212 	if (ub->st_blksize == 0 && pipe->pipe_peer)
1213 		ub->st_blksize = pipe->pipe_peer->pipe_buffer.size;
1214 	ub->st_size = pipe->pipe_buffer.cnt;
1215 	ub->st_blocks = (ub->st_size) ? 1 : 0;
1216 	ub->st_atimespec = pipe->pipe_atime;
1217 	ub->st_mtimespec = pipe->pipe_mtime;
1218 	ub->st_ctimespec = ub->st_birthtimespec = pipe->pipe_btime;
1219 	ub->st_uid = kauth_cred_geteuid(fp->f_cred);
1220 	ub->st_gid = kauth_cred_getegid(fp->f_cred);
1221 
1222 	/*
1223 	 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1224 	 * XXX (st_dev, st_ino) should be unique.
1225 	 */
1226 	mutex_exit(pipe->pipe_lock);
1227 	return 0;
1228 }
1229 
1230 static int
1231 pipe_close(file_t *fp)
1232 {
1233 	struct pipe *pipe = fp->f_pipe;
1234 
1235 	fp->f_pipe = NULL;
1236 	pipeclose(pipe);
1237 	return (0);
1238 }
1239 
1240 static void
1241 pipe_restart(file_t *fp)
1242 {
1243 	struct pipe *pipe = fp->f_pipe;
1244 
1245 	/*
1246 	 * Unblock blocked reads/writes in order to allow close() to complete.
1247 	 * System calls return ERESTART so that the fd is revalidated.
1248 	 * (Partial writes return the transfer length.)
1249 	 */
1250 	mutex_enter(pipe->pipe_lock);
1251 	pipe->pipe_state |= PIPE_RESTART;
1252 	/* Wakeup both cvs, maybe we only need one, but maybe there are some
1253 	 * other paths where wakeup is needed, and it saves deciding which! */
1254 	cv_broadcast(&pipe->pipe_rcv);
1255 	cv_broadcast(&pipe->pipe_wcv);
1256 	mutex_exit(pipe->pipe_lock);
1257 }
1258 
1259 static void
1260 pipe_free_kmem(struct pipe *pipe)
1261 {
1262 
1263 	if (pipe->pipe_buffer.buffer != NULL) {
1264 		if (pipe->pipe_buffer.size > PIPE_SIZE) {
1265 			atomic_dec_uint(&nbigpipe);
1266 		}
1267 		if (pipe->pipe_buffer.buffer != (void *)pipe->pipe_kmem) {
1268 			uvm_km_free(kernel_map,
1269 			    (vaddr_t)pipe->pipe_buffer.buffer,
1270 			    pipe->pipe_buffer.size, UVM_KMF_PAGEABLE);
1271 			atomic_add_int(&amountpipekva,
1272 			    -pipe->pipe_buffer.size);
1273 		}
1274 		pipe->pipe_buffer.buffer = NULL;
1275 	}
1276 #ifndef PIPE_NODIRECT
1277 	if (pipe->pipe_map.kva != 0) {
1278 		pipe_loan_free(pipe);
1279 		pipe->pipe_map.cnt = 0;
1280 		pipe->pipe_map.pos = 0;
1281 		pipe->pipe_map.npages = 0;
1282 	}
1283 #endif /* !PIPE_NODIRECT */
1284 }
1285 
1286 /*
1287  * Shutdown the pipe.
1288  */
1289 static void
1290 pipeclose(struct pipe *pipe)
1291 {
1292 	kmutex_t *lock;
1293 	struct pipe *ppipe;
1294 
1295 	if (pipe == NULL)
1296 		return;
1297 
1298 	KASSERT(cv_is_valid(&pipe->pipe_rcv));
1299 	KASSERT(cv_is_valid(&pipe->pipe_wcv));
1300 	KASSERT(cv_is_valid(&pipe->pipe_draincv));
1301 	KASSERT(cv_is_valid(&pipe->pipe_lkcv));
1302 
1303 	lock = pipe->pipe_lock;
1304 	if (lock == NULL)
1305 		/* Must have failed during create */
1306 		goto free_resources;
1307 
1308 	mutex_enter(lock);
1309 	pipeselwakeup(pipe, pipe, POLL_HUP);
1310 
1311 	/*
1312 	 * If the other side is blocked, wake it up saying that
1313 	 * we want to close it down.
1314 	 */
1315 	pipe->pipe_state |= PIPE_EOF;
1316 	if (pipe->pipe_busy) {
1317 		while (pipe->pipe_busy) {
1318 			cv_broadcast(&pipe->pipe_wcv);
1319 			cv_wait_sig(&pipe->pipe_draincv, lock);
1320 		}
1321 	}
1322 
1323 	/*
1324 	 * Disconnect from peer.
1325 	 */
1326 	if ((ppipe = pipe->pipe_peer) != NULL) {
1327 		pipeselwakeup(ppipe, ppipe, POLL_HUP);
1328 		ppipe->pipe_state |= PIPE_EOF;
1329 		cv_broadcast(&ppipe->pipe_rcv);
1330 		ppipe->pipe_peer = NULL;
1331 	}
1332 
1333 	/*
1334 	 * Any knote objects still left in the list are
1335 	 * the one attached by peer.  Since no one will
1336 	 * traverse this list, we just clear it.
1337 	 */
1338 	SLIST_INIT(&pipe->pipe_sel.sel_klist);
1339 
1340 	KASSERT((pipe->pipe_state & PIPE_LOCKFL) == 0);
1341 	mutex_exit(lock);
1342 	mutex_obj_free(lock);
1343 
1344 	/*
1345 	 * Free resources.
1346 	 */
1347     free_resources:
1348 	pipe->pipe_pgid = 0;
1349 	pipe->pipe_state = PIPE_SIGNALR;
1350 	pipe_free_kmem(pipe);
1351 	if (pipe->pipe_kmem != 0) {
1352 		pool_cache_put(pipe_rd_cache, pipe);
1353 	} else {
1354 		pool_cache_put(pipe_wr_cache, pipe);
1355 	}
1356 }
1357 
1358 static void
1359 filt_pipedetach(struct knote *kn)
1360 {
1361 	struct pipe *pipe;
1362 	kmutex_t *lock;
1363 
1364 	pipe = ((file_t *)kn->kn_obj)->f_pipe;
1365 	lock = pipe->pipe_lock;
1366 
1367 	mutex_enter(lock);
1368 
1369 	switch(kn->kn_filter) {
1370 	case EVFILT_WRITE:
1371 		/* Need the peer structure, not our own. */
1372 		pipe = pipe->pipe_peer;
1373 
1374 		/* If reader end already closed, just return. */
1375 		if (pipe == NULL) {
1376 			mutex_exit(lock);
1377 			return;
1378 		}
1379 
1380 		break;
1381 	default:
1382 		/* Nothing to do. */
1383 		break;
1384 	}
1385 
1386 	KASSERT(kn->kn_hook == pipe);
1387 	SLIST_REMOVE(&pipe->pipe_sel.sel_klist, kn, knote, kn_selnext);
1388 	mutex_exit(lock);
1389 }
1390 
1391 static int
1392 filt_piperead(struct knote *kn, long hint)
1393 {
1394 	struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_pipe;
1395 	struct pipe *wpipe;
1396 
1397 	if ((hint & NOTE_SUBMIT) == 0) {
1398 		mutex_enter(rpipe->pipe_lock);
1399 	}
1400 	wpipe = rpipe->pipe_peer;
1401 	kn->kn_data = rpipe->pipe_buffer.cnt;
1402 
1403 	if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1404 		kn->kn_data = rpipe->pipe_map.cnt;
1405 
1406 	if ((rpipe->pipe_state & PIPE_EOF) ||
1407 	    (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1408 		kn->kn_flags |= EV_EOF;
1409 		if ((hint & NOTE_SUBMIT) == 0) {
1410 			mutex_exit(rpipe->pipe_lock);
1411 		}
1412 		return (1);
1413 	}
1414 
1415 	if ((hint & NOTE_SUBMIT) == 0) {
1416 		mutex_exit(rpipe->pipe_lock);
1417 	}
1418 	return (kn->kn_data > 0);
1419 }
1420 
1421 static int
1422 filt_pipewrite(struct knote *kn, long hint)
1423 {
1424 	struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_pipe;
1425 	struct pipe *wpipe;
1426 
1427 	if ((hint & NOTE_SUBMIT) == 0) {
1428 		mutex_enter(rpipe->pipe_lock);
1429 	}
1430 	wpipe = rpipe->pipe_peer;
1431 
1432 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1433 		kn->kn_data = 0;
1434 		kn->kn_flags |= EV_EOF;
1435 		if ((hint & NOTE_SUBMIT) == 0) {
1436 			mutex_exit(rpipe->pipe_lock);
1437 		}
1438 		return (1);
1439 	}
1440 	kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1441 	if (wpipe->pipe_state & PIPE_DIRECTW)
1442 		kn->kn_data = 0;
1443 
1444 	if ((hint & NOTE_SUBMIT) == 0) {
1445 		mutex_exit(rpipe->pipe_lock);
1446 	}
1447 	return (kn->kn_data >= PIPE_BUF);
1448 }
1449 
1450 static const struct filterops pipe_rfiltops = {
1451 	.f_isfd = 1,
1452 	.f_attach = NULL,
1453 	.f_detach = filt_pipedetach,
1454 	.f_event = filt_piperead,
1455 };
1456 
1457 static const struct filterops pipe_wfiltops = {
1458 	.f_isfd = 1,
1459 	.f_attach = NULL,
1460 	.f_detach = filt_pipedetach,
1461 	.f_event = filt_pipewrite,
1462 };
1463 
1464 static int
1465 pipe_kqfilter(file_t *fp, struct knote *kn)
1466 {
1467 	struct pipe *pipe;
1468 	kmutex_t *lock;
1469 
1470 	pipe = ((file_t *)kn->kn_obj)->f_pipe;
1471 	lock = pipe->pipe_lock;
1472 
1473 	mutex_enter(lock);
1474 
1475 	switch (kn->kn_filter) {
1476 	case EVFILT_READ:
1477 		kn->kn_fop = &pipe_rfiltops;
1478 		break;
1479 	case EVFILT_WRITE:
1480 		kn->kn_fop = &pipe_wfiltops;
1481 		pipe = pipe->pipe_peer;
1482 		if (pipe == NULL) {
1483 			/* Other end of pipe has been closed. */
1484 			mutex_exit(lock);
1485 			return (EBADF);
1486 		}
1487 		break;
1488 	default:
1489 		mutex_exit(lock);
1490 		return (EINVAL);
1491 	}
1492 
1493 	kn->kn_hook = pipe;
1494 	SLIST_INSERT_HEAD(&pipe->pipe_sel.sel_klist, kn, kn_selnext);
1495 	mutex_exit(lock);
1496 
1497 	return (0);
1498 }
1499 
1500 /*
1501  * Handle pipe sysctls.
1502  */
1503 SYSCTL_SETUP(sysctl_kern_pipe_setup, "sysctl kern.pipe subtree setup")
1504 {
1505 
1506 	sysctl_createv(clog, 0, NULL, NULL,
1507 		       CTLFLAG_PERMANENT,
1508 		       CTLTYPE_NODE, "pipe",
1509 		       SYSCTL_DESCR("Pipe settings"),
1510 		       NULL, 0, NULL, 0,
1511 		       CTL_KERN, KERN_PIPE, CTL_EOL);
1512 
1513 	sysctl_createv(clog, 0, NULL, NULL,
1514 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1515 		       CTLTYPE_INT, "maxkvasz",
1516 		       SYSCTL_DESCR("Maximum amount of kernel memory to be "
1517 				    "used for pipes"),
1518 		       NULL, 0, &maxpipekva, 0,
1519 		       CTL_KERN, KERN_PIPE, KERN_PIPE_MAXKVASZ, CTL_EOL);
1520 	sysctl_createv(clog, 0, NULL, NULL,
1521 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1522 		       CTLTYPE_INT, "maxloankvasz",
1523 		       SYSCTL_DESCR("Limit for direct transfers via page loan"),
1524 		       NULL, 0, &limitpipekva, 0,
1525 		       CTL_KERN, KERN_PIPE, KERN_PIPE_LIMITKVA, CTL_EOL);
1526 	sysctl_createv(clog, 0, NULL, NULL,
1527 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1528 		       CTLTYPE_INT, "maxbigpipes",
1529 		       SYSCTL_DESCR("Maximum number of \"big\" pipes"),
1530 		       NULL, 0, &maxbigpipes, 0,
1531 		       CTL_KERN, KERN_PIPE, KERN_PIPE_MAXBIGPIPES, CTL_EOL);
1532 	sysctl_createv(clog, 0, NULL, NULL,
1533 		       CTLFLAG_PERMANENT,
1534 		       CTLTYPE_INT, "nbigpipes",
1535 		       SYSCTL_DESCR("Number of \"big\" pipes"),
1536 		       NULL, 0, &nbigpipe, 0,
1537 		       CTL_KERN, KERN_PIPE, KERN_PIPE_NBIGPIPES, CTL_EOL);
1538 	sysctl_createv(clog, 0, NULL, NULL,
1539 		       CTLFLAG_PERMANENT,
1540 		       CTLTYPE_INT, "kvasize",
1541 		       SYSCTL_DESCR("Amount of kernel memory consumed by pipe "
1542 				    "buffers"),
1543 		       NULL, 0, &amountpipekva, 0,
1544 		       CTL_KERN, KERN_PIPE, KERN_PIPE_KVASIZE, CTL_EOL);
1545 }
1546