xref: /dflybsd-src/sys/kern/sys_pipe.c (revision 37fcf2909492f7075bf6d42e7fd1f78345527048)
1 /*
2  * Copyright (c) 1996 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. Modifications may be freely made to this file if the above conditions
17  *    are met.
18  *
19  * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20  * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
21  */
22 
23 /*
24  * This file contains a high-performance replacement for the socket-based
25  * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
26  * all features of sockets, but does do everything that pipes normally
27  * do.
28  */
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/fcntl.h>
34 #include <sys/file.h>
35 #include <sys/filedesc.h>
36 #include <sys/filio.h>
37 #include <sys/ttycom.h>
38 #include <sys/stat.h>
39 #include <sys/select.h>
40 #include <sys/signalvar.h>
41 #include <sys/sysproto.h>
42 #include <sys/pipe.h>
43 #include <sys/vnode.h>
44 #include <sys/uio.h>
45 #include <sys/event.h>
46 #include <sys/globaldata.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/socket.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_param.h>
54 #include <sys/lock.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_extern.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_zone.h>
62 
63 #include <sys/file2.h>
64 #include <sys/signal2.h>
65 #include <sys/mplock2.h>
66 
67 #include <machine/cpufunc.h>
68 
69 /*
70  * interfaces to the outside world
71  */
72 static int pipe_read (struct file *fp, struct uio *uio,
73 		struct ucred *cred, int flags);
74 static int pipe_write (struct file *fp, struct uio *uio,
75 		struct ucred *cred, int flags);
76 static int pipe_close (struct file *fp);
77 static int pipe_shutdown (struct file *fp, int how);
78 static int pipe_kqfilter (struct file *fp, struct knote *kn);
79 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred);
80 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data,
81 		struct ucred *cred, struct sysmsg *msg);
82 
83 static struct fileops pipeops = {
84 	.fo_read = pipe_read,
85 	.fo_write = pipe_write,
86 	.fo_ioctl = pipe_ioctl,
87 	.fo_kqfilter = pipe_kqfilter,
88 	.fo_stat = pipe_stat,
89 	.fo_close = pipe_close,
90 	.fo_shutdown = pipe_shutdown
91 };
92 
93 static void	filt_pipedetach(struct knote *kn);
94 static int	filt_piperead(struct knote *kn, long hint);
95 static int	filt_pipewrite(struct knote *kn, long hint);
96 
97 static struct filterops pipe_rfiltops =
98 	{ 1, NULL, filt_pipedetach, filt_piperead };
99 static struct filterops pipe_wfiltops =
100 	{ 1, NULL, filt_pipedetach, filt_pipewrite };
101 
102 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures");
103 
104 /*
105  * Default pipe buffer size(s), this can be kind-of large now because pipe
106  * space is pageable.  The pipe code will try to maintain locality of
107  * reference for performance reasons, so small amounts of outstanding I/O
108  * will not wipe the cache.
109  */
110 #define MINPIPESIZE (PIPE_SIZE/3)
111 #define MAXPIPESIZE (2*PIPE_SIZE/3)
112 
113 /*
114  * Limit the number of "big" pipes
115  */
116 #define LIMITBIGPIPES	64
117 #define PIPEQ_MAX_CACHE 16      /* per-cpu pipe structure cache */
118 
119 static int pipe_maxbig = LIMITBIGPIPES;
120 static int pipe_maxcache = PIPEQ_MAX_CACHE;
121 static int pipe_bigcount;
122 static int pipe_nbig;
123 static int pipe_bcache_alloc;
124 static int pipe_bkmem_alloc;
125 static int pipe_rblocked_count;
126 static int pipe_wblocked_count;
127 
128 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation");
129 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig,
130         CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated");
131 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount,
132         CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded");
133 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked,
134         CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded");
135 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked,
136         CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded");
137 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache,
138         CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu");
139 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig,
140         CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes");
141 #ifdef SMP
142 static int pipe_delay = 5000;	/* 5uS default */
143 SYSCTL_INT(_kern_pipe, OID_AUTO, delay,
144         CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns");
145 static int pipe_mpsafe = 1;
146 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe,
147         CTLFLAG_RW, &pipe_mpsafe, 0, "");
148 #endif
149 #if !defined(NO_PIPE_SYSCTL_STATS)
150 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc,
151         CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache");
152 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc,
153         CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem");
154 #endif
155 
156 static void pipeclose (struct pipe *cpipe);
157 static void pipe_free_kmem (struct pipe *cpipe);
158 static int pipe_create (struct pipe **cpipep);
159 static __inline void pipeselwakeup (struct pipe *cpipe);
160 static int pipespace (struct pipe *cpipe, int size);
161 
162 static __inline int
163 pipeseltest(struct pipe *cpipe)
164 {
165 	return ((cpipe->pipe_state & PIPE_SEL) ||
166 		((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) ||
167 		SLIST_FIRST(&cpipe->pipe_sel.si_note));
168 }
169 
170 static __inline void
171 pipeselwakeup(struct pipe *cpipe)
172 {
173 	if (cpipe->pipe_state & PIPE_SEL) {
174 		get_mplock();
175 		cpipe->pipe_state &= ~PIPE_SEL;
176 		selwakeup(&cpipe->pipe_sel);
177 		rel_mplock();
178 	}
179 	if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) {
180 		get_mplock();
181 		pgsigio(cpipe->pipe_sigio, SIGIO, 0);
182 		rel_mplock();
183 	}
184 	if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) {
185 		get_mplock();
186 		KNOTE(&cpipe->pipe_sel.si_note, 0);
187 		rel_mplock();
188 	}
189 }
190 
191 /*
192  * These routines are called before and after a UIO.  The UIO
193  * may block, causing our held tokens to be lost temporarily.
194  *
195  * We use these routines to serialize reads against other reads
196  * and writes against other writes.
197  *
198  * The read token is held on entry so *ipp does not race.
199  */
200 static __inline int
201 pipe_start_uio(struct pipe *cpipe, int *ipp)
202 {
203 	int error;
204 
205 	while (*ipp) {
206 		*ipp = -1;
207 		error = tsleep(ipp, PCATCH, "pipexx", 0);
208 		if (error)
209 			return (error);
210 	}
211 	*ipp = 1;
212 	return (0);
213 }
214 
215 static __inline void
216 pipe_end_uio(struct pipe *cpipe, int *ipp)
217 {
218 	if (*ipp < 0) {
219 		*ipp = 0;
220 		wakeup(ipp);
221 	} else {
222 		KKASSERT(*ipp > 0);
223 		*ipp = 0;
224 	}
225 }
226 
227 static __inline void
228 pipe_get_mplock(int *save)
229 {
230 #ifdef SMP
231 	if (pipe_mpsafe == 0) {
232 		get_mplock();
233 		*save = 1;
234 	} else
235 #endif
236 	{
237 		*save = 0;
238 	}
239 }
240 
241 static __inline void
242 pipe_rel_mplock(int *save)
243 {
244 #ifdef SMP
245 	if (*save)
246 		rel_mplock();
247 #endif
248 }
249 
250 
251 /*
252  * The pipe system call for the DTYPE_PIPE type of pipes
253  *
254  * pipe_args(int dummy)
255  *
256  * MPSAFE
257  */
258 int
259 sys_pipe(struct pipe_args *uap)
260 {
261 	struct thread *td = curthread;
262 	struct filedesc *fdp = td->td_proc->p_fd;
263 	struct file *rf, *wf;
264 	struct pipe *rpipe, *wpipe;
265 	int fd1, fd2, error;
266 
267 	rpipe = wpipe = NULL;
268 	if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
269 		pipeclose(rpipe);
270 		pipeclose(wpipe);
271 		return (ENFILE);
272 	}
273 
274 	error = falloc(td->td_lwp, &rf, &fd1);
275 	if (error) {
276 		pipeclose(rpipe);
277 		pipeclose(wpipe);
278 		return (error);
279 	}
280 	uap->sysmsg_fds[0] = fd1;
281 
282 	/*
283 	 * Warning: once we've gotten past allocation of the fd for the
284 	 * read-side, we can only drop the read side via fdrop() in order
285 	 * to avoid races against processes which manage to dup() the read
286 	 * side while we are blocked trying to allocate the write side.
287 	 */
288 	rf->f_type = DTYPE_PIPE;
289 	rf->f_flag = FREAD | FWRITE;
290 	rf->f_ops = &pipeops;
291 	rf->f_data = rpipe;
292 	error = falloc(td->td_lwp, &wf, &fd2);
293 	if (error) {
294 		fsetfd(fdp, NULL, fd1);
295 		fdrop(rf);
296 		/* rpipe has been closed by fdrop(). */
297 		pipeclose(wpipe);
298 		return (error);
299 	}
300 	wf->f_type = DTYPE_PIPE;
301 	wf->f_flag = FREAD | FWRITE;
302 	wf->f_ops = &pipeops;
303 	wf->f_data = wpipe;
304 	uap->sysmsg_fds[1] = fd2;
305 
306 	rpipe->pipe_slock = kmalloc(sizeof(struct lock),
307 				    M_PIPE, M_WAITOK|M_ZERO);
308 	wpipe->pipe_slock = rpipe->pipe_slock;
309 	rpipe->pipe_peer = wpipe;
310 	wpipe->pipe_peer = rpipe;
311 	lockinit(rpipe->pipe_slock, "pipecl", 0, 0);
312 
313 	/*
314 	 * Once activated the peer relationship remains valid until
315 	 * both sides are closed.
316 	 */
317 	fsetfd(fdp, rf, fd1);
318 	fsetfd(fdp, wf, fd2);
319 	fdrop(rf);
320 	fdrop(wf);
321 
322 	return (0);
323 }
324 
325 /*
326  * Allocate kva for pipe circular buffer, the space is pageable
327  * This routine will 'realloc' the size of a pipe safely, if it fails
328  * it will retain the old buffer.
329  * If it fails it will return ENOMEM.
330  */
331 static int
332 pipespace(struct pipe *cpipe, int size)
333 {
334 	struct vm_object *object;
335 	caddr_t buffer;
336 	int npages, error;
337 
338 	npages = round_page(size) / PAGE_SIZE;
339 	object = cpipe->pipe_buffer.object;
340 
341 	/*
342 	 * [re]create the object if necessary and reserve space for it
343 	 * in the kernel_map.  The object and memory are pageable.  On
344 	 * success, free the old resources before assigning the new
345 	 * ones.
346 	 */
347 	if (object == NULL || object->size != npages) {
348 		get_mplock();
349 		object = vm_object_allocate(OBJT_DEFAULT, npages);
350 		buffer = (caddr_t)vm_map_min(&kernel_map);
351 
352 		error = vm_map_find(&kernel_map, object, 0,
353 				    (vm_offset_t *)&buffer,
354 				    size, PAGE_SIZE,
355 				    1, VM_MAPTYPE_NORMAL,
356 				    VM_PROT_ALL, VM_PROT_ALL,
357 				    0);
358 
359 		if (error != KERN_SUCCESS) {
360 			vm_object_deallocate(object);
361 			rel_mplock();
362 			return (ENOMEM);
363 		}
364 		pipe_free_kmem(cpipe);
365 		rel_mplock();
366 		cpipe->pipe_buffer.object = object;
367 		cpipe->pipe_buffer.buffer = buffer;
368 		cpipe->pipe_buffer.size = size;
369 		++pipe_bkmem_alloc;
370 	} else {
371 		++pipe_bcache_alloc;
372 	}
373 	cpipe->pipe_buffer.rindex = 0;
374 	cpipe->pipe_buffer.windex = 0;
375 	return (0);
376 }
377 
378 /*
379  * Initialize and allocate VM and memory for pipe, pulling the pipe from
380  * our per-cpu cache if possible.  For now make sure it is sized for the
381  * smaller PIPE_SIZE default.
382  */
383 static int
384 pipe_create(struct pipe **cpipep)
385 {
386 	globaldata_t gd = mycpu;
387 	struct pipe *cpipe;
388 	int error;
389 
390 	if ((cpipe = gd->gd_pipeq) != NULL) {
391 		gd->gd_pipeq = cpipe->pipe_peer;
392 		--gd->gd_pipeqcount;
393 		cpipe->pipe_peer = NULL;
394 		cpipe->pipe_wantwcnt = 0;
395 	} else {
396 		cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO);
397 	}
398 	*cpipep = cpipe;
399 	if ((error = pipespace(cpipe, PIPE_SIZE)) != 0)
400 		return (error);
401 	vfs_timestamp(&cpipe->pipe_ctime);
402 	cpipe->pipe_atime = cpipe->pipe_ctime;
403 	cpipe->pipe_mtime = cpipe->pipe_ctime;
404 	lwkt_token_init(&cpipe->pipe_rlock, 1);
405 	lwkt_token_init(&cpipe->pipe_wlock, 1);
406 	return (0);
407 }
408 
409 /*
410  * MPALMOSTSAFE (acquires mplock)
411  */
412 static int
413 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
414 {
415 	struct pipe *rpipe;
416 	int error;
417 	size_t nread = 0;
418 	int nbio;
419 	u_int size;	/* total bytes available */
420 	u_int nsize;	/* total bytes to read */
421 	u_int rindex;	/* contiguous bytes available */
422 	int notify_writer;
423 	int mpsave;
424 	int bigread;
425 	int bigcount;
426 
427 	if (uio->uio_resid == 0)
428 		return(0);
429 
430 	/*
431 	 * Setup locks, calculate nbio
432 	 */
433 	pipe_get_mplock(&mpsave);
434 	rpipe = (struct pipe *)fp->f_data;
435 	lwkt_gettoken(&rpipe->pipe_rlock);
436 
437 	if (fflags & O_FBLOCKING)
438 		nbio = 0;
439 	else if (fflags & O_FNONBLOCKING)
440 		nbio = 1;
441 	else if (fp->f_flag & O_NONBLOCK)
442 		nbio = 1;
443 	else
444 		nbio = 0;
445 
446 	/*
447 	 * Reads are serialized.  Note howeverthat pipe_buffer.buffer and
448 	 * pipe_buffer.size can change out from under us when the number
449 	 * of bytes in the buffer are zero due to the write-side doing a
450 	 * pipespace().
451 	 */
452 	error = pipe_start_uio(rpipe, &rpipe->pipe_rip);
453 	if (error) {
454 		pipe_rel_mplock(&mpsave);
455 		lwkt_reltoken(&rpipe->pipe_rlock);
456 		return (error);
457 	}
458 	notify_writer = 0;
459 
460 	bigread = (uio->uio_resid > 10 * 1024 * 1024);
461 	bigcount = 10;
462 
463 	while (uio->uio_resid) {
464 		/*
465 		 * Don't hog the cpu.
466 		 */
467 		if (bigread && --bigcount == 0) {
468 			lwkt_user_yield();
469 			bigcount = 10;
470 			if (CURSIG(curthread->td_lwp)) {
471 				error = EINTR;
472 				break;
473 			}
474 		}
475 
476 		size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
477 		cpu_lfence();
478 		if (size) {
479 			rindex = rpipe->pipe_buffer.rindex &
480 				 (rpipe->pipe_buffer.size - 1);
481 			nsize = size;
482 			if (nsize > rpipe->pipe_buffer.size - rindex)
483 				nsize = rpipe->pipe_buffer.size - rindex;
484 			nsize = szmin(nsize, uio->uio_resid);
485 
486 			error = uiomove(&rpipe->pipe_buffer.buffer[rindex],
487 					nsize, uio);
488 			if (error)
489 				break;
490 			cpu_mfence();
491 			rpipe->pipe_buffer.rindex += nsize;
492 			nread += nsize;
493 
494 			/*
495 			 * If the FIFO is still over half full just continue
496 			 * and do not try to notify the writer yet.
497 			 */
498 			if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) {
499 				notify_writer = 0;
500 				continue;
501 			}
502 
503 			/*
504 			 * When the FIFO is less then half full notify any
505 			 * waiting writer.  WANTW can be checked while
506 			 * holding just the rlock.
507 			 */
508 			notify_writer = 1;
509 			if ((rpipe->pipe_state & PIPE_WANTW) == 0)
510 				continue;
511 		}
512 
513 		/*
514 		 * If the "write-side" was blocked we wake it up.  This code
515 		 * is reached either when the buffer is completely emptied
516 		 * or if it becomes more then half-empty.
517 		 *
518 		 * Pipe_state can only be modified if both the rlock and
519 		 * wlock are held.
520 		 */
521 		if (rpipe->pipe_state & PIPE_WANTW) {
522 			lwkt_gettoken(&rpipe->pipe_wlock);
523 			if (rpipe->pipe_state & PIPE_WANTW) {
524 				notify_writer = 0;
525 				rpipe->pipe_state &= ~PIPE_WANTW;
526 				lwkt_reltoken(&rpipe->pipe_wlock);
527 				wakeup(rpipe);
528 			} else {
529 				lwkt_reltoken(&rpipe->pipe_wlock);
530 			}
531 		}
532 
533 		/*
534 		 * Pick up our copy loop again if the writer sent data to
535 		 * us while we were messing around.
536 		 *
537 		 * On a SMP box poll up to pipe_delay nanoseconds for new
538 		 * data.  Typically a value of 2000 to 4000 is sufficient
539 		 * to eradicate most IPIs/tsleeps/wakeups when a pipe
540 		 * is used for synchronous communications with small packets,
541 		 * and 8000 or so (8uS) will pipeline large buffer xfers
542 		 * between cpus over a pipe.
543 		 *
544 		 * For synchronous communications a hit means doing a
545 		 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
546 		 * where as miss requiring a tsleep/wakeup sequence
547 		 * will take 7uS or more.
548 		 */
549 		if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex)
550 			continue;
551 
552 #if defined(SMP) && defined(_RDTSC_SUPPORTED_)
553 		if (pipe_delay) {
554 			int64_t tsc_target;
555 			int good = 0;
556 
557 			tsc_target = tsc_get_target(pipe_delay);
558 			while (tsc_test_target(tsc_target) == 0) {
559 				if (rpipe->pipe_buffer.windex !=
560 				    rpipe->pipe_buffer.rindex) {
561 					good = 1;
562 					break;
563 				}
564 			}
565 			if (good)
566 				continue;
567 		}
568 #endif
569 
570 		/*
571 		 * Detect EOF condition, do not set error.
572 		 */
573 		if (rpipe->pipe_state & PIPE_REOF)
574 			break;
575 
576 		/*
577 		 * Break if some data was read, or if this was a non-blocking
578 		 * read.
579 		 */
580 		if (nread > 0)
581 			break;
582 
583 		if (nbio) {
584 			error = EAGAIN;
585 			break;
586 		}
587 
588 		/*
589 		 * Last chance, interlock with WANTR.
590 		 */
591 		lwkt_gettoken(&rpipe->pipe_wlock);
592 		size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
593 		if (size) {
594 			lwkt_reltoken(&rpipe->pipe_wlock);
595 			continue;
596 		}
597 
598 		/*
599 		 * Retest EOF - acquiring a new token can temporarily release
600 		 * tokens already held.
601 		 */
602 		if (rpipe->pipe_state & PIPE_REOF) {
603 			lwkt_reltoken(&rpipe->pipe_wlock);
604 			break;
605 		}
606 
607 		/*
608 		 * If there is no more to read in the pipe, reset its
609 		 * pointers to the beginning.  This improves cache hit
610 		 * stats.
611 		 *
612 		 * We need both locks to modify both pointers, and there
613 		 * must also not be a write in progress or the uiomove()
614 		 * in the write might block and temporarily release
615 		 * its wlock, then reacquire and update windex.  We are
616 		 * only serialized against reads, not writes.
617 		 *
618 		 * XXX should we even bother resetting the indices?  It
619 		 *     might actually be more cache efficient not to.
620 		 */
621 		if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex &&
622 		    rpipe->pipe_wip == 0) {
623 			rpipe->pipe_buffer.rindex = 0;
624 			rpipe->pipe_buffer.windex = 0;
625 		}
626 
627 		/*
628 		 * Wait for more data.
629 		 *
630 		 * Pipe_state can only be set if both the rlock and wlock
631 		 * are held.
632 		 */
633 		rpipe->pipe_state |= PIPE_WANTR;
634 		tsleep_interlock(rpipe, PCATCH);
635 		lwkt_reltoken(&rpipe->pipe_wlock);
636 		error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0);
637 		++pipe_rblocked_count;
638 		if (error)
639 			break;
640 	}
641 	pipe_end_uio(rpipe, &rpipe->pipe_rip);
642 
643 	/*
644 	 * Uptime last access time
645 	 */
646 	if (error == 0 && nread)
647 		vfs_timestamp(&rpipe->pipe_atime);
648 
649 	/*
650 	 * If we drained the FIFO more then half way then handle
651 	 * write blocking hysteresis.
652 	 *
653 	 * Note that PIPE_WANTW cannot be set by the writer without
654 	 * it holding both rlock and wlock, so we can test it
655 	 * while holding just rlock.
656 	 */
657 	if (notify_writer) {
658 		if (rpipe->pipe_state & PIPE_WANTW) {
659 			lwkt_gettoken(&rpipe->pipe_wlock);
660 			if (rpipe->pipe_state & PIPE_WANTW) {
661 				rpipe->pipe_state &= ~PIPE_WANTW;
662 				lwkt_reltoken(&rpipe->pipe_wlock);
663 				wakeup(rpipe);
664 			} else {
665 				lwkt_reltoken(&rpipe->pipe_wlock);
666 			}
667 		}
668 		if (pipeseltest(rpipe)) {
669 			lwkt_gettoken(&rpipe->pipe_wlock);
670 			pipeselwakeup(rpipe);
671 			lwkt_reltoken(&rpipe->pipe_wlock);
672 		}
673 	}
674 	/*size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;*/
675 	lwkt_reltoken(&rpipe->pipe_rlock);
676 
677 	pipe_rel_mplock(&mpsave);
678 	return (error);
679 }
680 
681 /*
682  * MPALMOSTSAFE - acquires mplock
683  */
684 static int
685 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
686 {
687 	int error;
688 	int orig_resid;
689 	int nbio;
690 	struct pipe *wpipe, *rpipe;
691 	u_int windex;
692 	u_int space;
693 	u_int wcount;
694 	int mpsave;
695 	int bigwrite;
696 	int bigcount;
697 
698 	pipe_get_mplock(&mpsave);
699 
700 	/*
701 	 * Writes go to the peer.  The peer will always exist.
702 	 */
703 	rpipe = (struct pipe *) fp->f_data;
704 	wpipe = rpipe->pipe_peer;
705 	lwkt_gettoken(&wpipe->pipe_wlock);
706 	if (wpipe->pipe_state & PIPE_WEOF) {
707 		pipe_rel_mplock(&mpsave);
708 		lwkt_reltoken(&wpipe->pipe_wlock);
709 		return (EPIPE);
710 	}
711 
712 	/*
713 	 * Degenerate case (EPIPE takes prec)
714 	 */
715 	if (uio->uio_resid == 0) {
716 		pipe_rel_mplock(&mpsave);
717 		lwkt_reltoken(&wpipe->pipe_wlock);
718 		return(0);
719 	}
720 
721 	/*
722 	 * Writes are serialized (start_uio must be called with wlock)
723 	 */
724 	error = pipe_start_uio(wpipe, &wpipe->pipe_wip);
725 	if (error) {
726 		pipe_rel_mplock(&mpsave);
727 		lwkt_reltoken(&wpipe->pipe_wlock);
728 		return (error);
729 	}
730 
731 	if (fflags & O_FBLOCKING)
732 		nbio = 0;
733 	else if (fflags & O_FNONBLOCKING)
734 		nbio = 1;
735 	else if (fp->f_flag & O_NONBLOCK)
736 		nbio = 1;
737 	else
738 		nbio = 0;
739 
740 	/*
741 	 * If it is advantageous to resize the pipe buffer, do
742 	 * so.  We are write-serialized so we can block safely.
743 	 */
744 	if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
745 	    (pipe_nbig < pipe_maxbig) &&
746 	    wpipe->pipe_wantwcnt > 4 &&
747 	    (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
748 		/*
749 		 * Recheck after lock.
750 		 */
751 		lwkt_gettoken(&wpipe->pipe_rlock);
752 		if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
753 		    (pipe_nbig < pipe_maxbig) &&
754 		    (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
755 			atomic_add_int(&pipe_nbig, 1);
756 			if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
757 				++pipe_bigcount;
758 			else
759 				atomic_subtract_int(&pipe_nbig, 1);
760 		}
761 		lwkt_reltoken(&wpipe->pipe_rlock);
762 	}
763 
764 	orig_resid = uio->uio_resid;
765 	wcount = 0;
766 
767 	bigwrite = (uio->uio_resid > 10 * 1024 * 1024);
768 	bigcount = 10;
769 
770 	while (uio->uio_resid) {
771 		if (wpipe->pipe_state & PIPE_WEOF) {
772 			error = EPIPE;
773 			break;
774 		}
775 
776 		/*
777 		 * Don't hog the cpu.
778 		 */
779 		if (bigwrite && --bigcount == 0) {
780 			lwkt_user_yield();
781 			bigcount = 10;
782 			if (CURSIG(curthread->td_lwp)) {
783 				error = EINTR;
784 				break;
785 			}
786 		}
787 
788 		windex = wpipe->pipe_buffer.windex &
789 			 (wpipe->pipe_buffer.size - 1);
790 		space = wpipe->pipe_buffer.size -
791 			(wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
792 		cpu_lfence();
793 
794 		/* Writes of size <= PIPE_BUF must be atomic. */
795 		if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
796 			space = 0;
797 
798 		/*
799 		 * Write to fill, read size handles write hysteresis.  Also
800 		 * additional restrictions can cause select-based non-blocking
801 		 * writes to spin.
802 		 */
803 		if (space > 0) {
804 			u_int segsize;
805 
806 			/*
807 			 * Transfer size is minimum of uio transfer
808 			 * and free space in pipe buffer.
809 			 *
810 			 * Limit each uiocopy to no more then PIPE_SIZE
811 			 * so we can keep the gravy train going on a
812 			 * SMP box.  This doubles the performance for
813 			 * write sizes > 16K.  Otherwise large writes
814 			 * wind up doing an inefficient synchronous
815 			 * ping-pong.
816 			 */
817 			space = szmin(space, uio->uio_resid);
818 			if (space > PIPE_SIZE)
819 				space = PIPE_SIZE;
820 
821 			/*
822 			 * First segment to transfer is minimum of
823 			 * transfer size and contiguous space in
824 			 * pipe buffer.  If first segment to transfer
825 			 * is less than the transfer size, we've got
826 			 * a wraparound in the buffer.
827 			 */
828 			segsize = wpipe->pipe_buffer.size - windex;
829 			if (segsize > space)
830 				segsize = space;
831 
832 #ifdef SMP
833 			/*
834 			 * If this is the first loop and the reader is
835 			 * blocked, do a preemptive wakeup of the reader.
836 			 *
837 			 * On SMP the IPI latency plus the wlock interlock
838 			 * on the reader side is the fastest way to get the
839 			 * reader going.  (The scheduler will hard loop on
840 			 * lock tokens).
841 			 *
842 			 * NOTE: We can't clear WANTR here without acquiring
843 			 * the rlock, which we don't want to do here!
844 			 */
845 			if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1)
846 				wakeup(wpipe);
847 #endif
848 
849 			/*
850 			 * Transfer segment, which may include a wrap-around.
851 			 * Update windex to account for both all in one go
852 			 * so the reader can read() the data atomically.
853 			 */
854 			error = uiomove(&wpipe->pipe_buffer.buffer[windex],
855 					segsize, uio);
856 			if (error == 0 && segsize < space) {
857 				segsize = space - segsize;
858 				error = uiomove(&wpipe->pipe_buffer.buffer[0],
859 						segsize, uio);
860 			}
861 			if (error)
862 				break;
863 			cpu_mfence();
864 			wpipe->pipe_buffer.windex += space;
865 			wcount += space;
866 			continue;
867 		}
868 
869 		/*
870 		 * We need both the rlock and the wlock to interlock against
871 		 * the EOF, WANTW, and size checks, and to modify pipe_state.
872 		 *
873 		 * These are token locks so we do not have to worry about
874 		 * deadlocks.
875 		 */
876 		lwkt_gettoken(&wpipe->pipe_rlock);
877 
878 		/*
879 		 * If the "read-side" has been blocked, wake it up now
880 		 * and yield to let it drain synchronously rather
881 		 * then block.
882 		 */
883 		if (wpipe->pipe_state & PIPE_WANTR) {
884 			wpipe->pipe_state &= ~PIPE_WANTR;
885 			wakeup(wpipe);
886 		}
887 
888 		/*
889 		 * don't block on non-blocking I/O
890 		 */
891 		if (nbio) {
892 			lwkt_reltoken(&wpipe->pipe_rlock);
893 			error = EAGAIN;
894 			break;
895 		}
896 
897 		/*
898 		 * re-test whether we have to block in the writer after
899 		 * acquiring both locks, in case the reader opened up
900 		 * some space.
901 		 */
902 		space = wpipe->pipe_buffer.size -
903 			(wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
904 		cpu_lfence();
905 		if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
906 			space = 0;
907 
908 		/*
909 		 * Retest EOF - acquiring a new token can temporarily release
910 		 * tokens already held.
911 		 */
912 		if (wpipe->pipe_state & PIPE_WEOF) {
913 			lwkt_reltoken(&wpipe->pipe_rlock);
914 			error = EPIPE;
915 			break;
916 		}
917 
918 		/*
919 		 * We have no more space and have something to offer,
920 		 * wake up select/poll.
921 		 */
922 		if (space == 0) {
923 			wpipe->pipe_state |= PIPE_WANTW;
924 			++wpipe->pipe_wantwcnt;
925 			pipeselwakeup(wpipe);
926 			if (wpipe->pipe_state & PIPE_WANTW)
927 				error = tsleep(wpipe, PCATCH, "pipewr", 0);
928 			++pipe_wblocked_count;
929 		}
930 		lwkt_reltoken(&wpipe->pipe_rlock);
931 
932 		/*
933 		 * Break out if we errored or the read side wants us to go
934 		 * away.
935 		 */
936 		if (error)
937 			break;
938 		if (wpipe->pipe_state & PIPE_WEOF) {
939 			error = EPIPE;
940 			break;
941 		}
942 	}
943 	pipe_end_uio(wpipe, &wpipe->pipe_wip);
944 
945 	/*
946 	 * If we have put any characters in the buffer, we wake up
947 	 * the reader.
948 	 *
949 	 * Both rlock and wlock are required to be able to modify pipe_state.
950 	 */
951 	if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) {
952 		if (wpipe->pipe_state & PIPE_WANTR) {
953 			lwkt_gettoken(&wpipe->pipe_rlock);
954 			if (wpipe->pipe_state & PIPE_WANTR) {
955 				wpipe->pipe_state &= ~PIPE_WANTR;
956 				lwkt_reltoken(&wpipe->pipe_rlock);
957 				wakeup(wpipe);
958 			} else {
959 				lwkt_reltoken(&wpipe->pipe_rlock);
960 			}
961 		}
962 		if (pipeseltest(wpipe)) {
963 			lwkt_gettoken(&wpipe->pipe_rlock);
964 			pipeselwakeup(wpipe);
965 			lwkt_reltoken(&wpipe->pipe_rlock);
966 		}
967 	}
968 
969 	/*
970 	 * Don't return EPIPE if I/O was successful
971 	 */
972 	if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) &&
973 	    (uio->uio_resid == 0) &&
974 	    (error == EPIPE)) {
975 		error = 0;
976 	}
977 
978 	if (error == 0)
979 		vfs_timestamp(&wpipe->pipe_mtime);
980 
981 	/*
982 	 * We have something to offer,
983 	 * wake up select/poll.
984 	 */
985 	/*space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;*/
986 	lwkt_reltoken(&wpipe->pipe_wlock);
987 	pipe_rel_mplock(&mpsave);
988 	return (error);
989 }
990 
991 /*
992  * MPALMOSTSAFE - acquires mplock
993  *
994  * we implement a very minimal set of ioctls for compatibility with sockets.
995  */
996 int
997 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data,
998 	   struct ucred *cred, struct sysmsg *msg)
999 {
1000 	struct pipe *mpipe;
1001 	int error;
1002 	int mpsave;
1003 
1004 	pipe_get_mplock(&mpsave);
1005 	mpipe = (struct pipe *)fp->f_data;
1006 
1007 	lwkt_gettoken(&mpipe->pipe_rlock);
1008 	lwkt_gettoken(&mpipe->pipe_wlock);
1009 
1010 	switch (cmd) {
1011 	case FIOASYNC:
1012 		if (*(int *)data) {
1013 			mpipe->pipe_state |= PIPE_ASYNC;
1014 		} else {
1015 			mpipe->pipe_state &= ~PIPE_ASYNC;
1016 		}
1017 		error = 0;
1018 		break;
1019 	case FIONREAD:
1020 		*(int *)data = mpipe->pipe_buffer.windex -
1021 				mpipe->pipe_buffer.rindex;
1022 		error = 0;
1023 		break;
1024 	case FIOSETOWN:
1025 		get_mplock();
1026 		error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1027 		rel_mplock();
1028 		break;
1029 	case FIOGETOWN:
1030 		*(int *)data = fgetown(mpipe->pipe_sigio);
1031 		error = 0;
1032 		break;
1033 	case TIOCSPGRP:
1034 		/* This is deprecated, FIOSETOWN should be used instead. */
1035 		get_mplock();
1036 		error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1037 		rel_mplock();
1038 		break;
1039 
1040 	case TIOCGPGRP:
1041 		/* This is deprecated, FIOGETOWN should be used instead. */
1042 		*(int *)data = -fgetown(mpipe->pipe_sigio);
1043 		error = 0;
1044 		break;
1045 	default:
1046 		error = ENOTTY;
1047 		break;
1048 	}
1049 	lwkt_reltoken(&mpipe->pipe_wlock);
1050 	lwkt_reltoken(&mpipe->pipe_rlock);
1051 	pipe_rel_mplock(&mpsave);
1052 
1053 	return (error);
1054 }
1055 
1056 /*
1057  * MPSAFE
1058  */
1059 static int
1060 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred)
1061 {
1062 	struct pipe *pipe;
1063 	int mpsave;
1064 
1065 	pipe_get_mplock(&mpsave);
1066 	pipe = (struct pipe *)fp->f_data;
1067 
1068 	bzero((caddr_t)ub, sizeof(*ub));
1069 	ub->st_mode = S_IFIFO;
1070 	ub->st_blksize = pipe->pipe_buffer.size;
1071 	ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex;
1072 	ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1073 	ub->st_atimespec = pipe->pipe_atime;
1074 	ub->st_mtimespec = pipe->pipe_mtime;
1075 	ub->st_ctimespec = pipe->pipe_ctime;
1076 	/*
1077 	 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1078 	 * st_flags, st_gen.
1079 	 * XXX (st_dev, st_ino) should be unique.
1080 	 */
1081 	pipe_rel_mplock(&mpsave);
1082 	return (0);
1083 }
1084 
1085 /*
1086  * MPALMOSTSAFE - acquires mplock
1087  */
1088 static int
1089 pipe_close(struct file *fp)
1090 {
1091 	struct pipe *cpipe;
1092 
1093 	get_mplock();
1094 	cpipe = (struct pipe *)fp->f_data;
1095 	fp->f_ops = &badfileops;
1096 	fp->f_data = NULL;
1097 	funsetown(cpipe->pipe_sigio);
1098 	pipeclose(cpipe);
1099 	rel_mplock();
1100 	return (0);
1101 }
1102 
1103 /*
1104  * Shutdown one or both directions of a full-duplex pipe.
1105  *
1106  * MPALMOSTSAFE - acquires mplock
1107  */
1108 static int
1109 pipe_shutdown(struct file *fp, int how)
1110 {
1111 	struct pipe *rpipe;
1112 	struct pipe *wpipe;
1113 	int error = EPIPE;
1114 	int mpsave;
1115 
1116 	pipe_get_mplock(&mpsave);
1117 	rpipe = (struct pipe *)fp->f_data;
1118 	wpipe = rpipe->pipe_peer;
1119 
1120 	/*
1121 	 * We modify pipe_state on both pipes, which means we need
1122 	 * all four tokens!
1123 	 */
1124 	lwkt_gettoken(&rpipe->pipe_rlock);
1125 	lwkt_gettoken(&rpipe->pipe_wlock);
1126 	lwkt_gettoken(&wpipe->pipe_rlock);
1127 	lwkt_gettoken(&wpipe->pipe_wlock);
1128 
1129 	switch(how) {
1130 	case SHUT_RDWR:
1131 	case SHUT_RD:
1132 		rpipe->pipe_state |= PIPE_REOF;		/* my reads */
1133 		rpipe->pipe_state |= PIPE_WEOF;		/* peer writes */
1134 		if (rpipe->pipe_state & PIPE_WANTR) {
1135 			rpipe->pipe_state &= ~PIPE_WANTR;
1136 			wakeup(rpipe);
1137 		}
1138 		if (rpipe->pipe_state & PIPE_WANTW) {
1139 			rpipe->pipe_state &= ~PIPE_WANTW;
1140 			wakeup(rpipe);
1141 		}
1142 		error = 0;
1143 		if (how == SHUT_RD)
1144 			break;
1145 		/* fall through */
1146 	case SHUT_WR:
1147 		wpipe->pipe_state |= PIPE_REOF;		/* peer reads */
1148 		wpipe->pipe_state |= PIPE_WEOF;		/* my writes */
1149 		if (wpipe->pipe_state & PIPE_WANTR) {
1150 			wpipe->pipe_state &= ~PIPE_WANTR;
1151 			wakeup(wpipe);
1152 		}
1153 		if (wpipe->pipe_state & PIPE_WANTW) {
1154 			wpipe->pipe_state &= ~PIPE_WANTW;
1155 			wakeup(wpipe);
1156 		}
1157 		error = 0;
1158 		break;
1159 	}
1160 	pipeselwakeup(rpipe);
1161 	pipeselwakeup(wpipe);
1162 
1163 	lwkt_reltoken(&wpipe->pipe_wlock);
1164 	lwkt_reltoken(&wpipe->pipe_rlock);
1165 	lwkt_reltoken(&rpipe->pipe_wlock);
1166 	lwkt_reltoken(&rpipe->pipe_rlock);
1167 
1168 	pipe_rel_mplock(&mpsave);
1169 	return (error);
1170 }
1171 
1172 static void
1173 pipe_free_kmem(struct pipe *cpipe)
1174 {
1175 	if (cpipe->pipe_buffer.buffer != NULL) {
1176 		if (cpipe->pipe_buffer.size > PIPE_SIZE)
1177 			atomic_subtract_int(&pipe_nbig, 1);
1178 		kmem_free(&kernel_map,
1179 			(vm_offset_t)cpipe->pipe_buffer.buffer,
1180 			cpipe->pipe_buffer.size);
1181 		cpipe->pipe_buffer.buffer = NULL;
1182 		cpipe->pipe_buffer.object = NULL;
1183 	}
1184 }
1185 
1186 /*
1187  * Close the pipe.  The slock must be held to interlock against simultanious
1188  * closes.  The rlock and wlock must be held to adjust the pipe_state.
1189  */
1190 static void
1191 pipeclose(struct pipe *cpipe)
1192 {
1193 	globaldata_t gd;
1194 	struct pipe *ppipe;
1195 
1196 	if (cpipe == NULL)
1197 		return;
1198 
1199 	/*
1200 	 * The slock may not have been allocated yet (close during
1201 	 * initialization)
1202 	 *
1203 	 * We need both the read and write tokens to modify pipe_state.
1204 	 */
1205 	if (cpipe->pipe_slock)
1206 		lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE);
1207 	lwkt_gettoken(&cpipe->pipe_rlock);
1208 	lwkt_gettoken(&cpipe->pipe_wlock);
1209 
1210 	/*
1211 	 * Set our state, wakeup anyone waiting in select, and
1212 	 * wakeup anyone blocked on our pipe.
1213 	 */
1214 	cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF;
1215 	pipeselwakeup(cpipe);
1216 	if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1217 		cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1218 		wakeup(cpipe);
1219 	}
1220 
1221 	/*
1222 	 * Disconnect from peer.
1223 	 */
1224 	if ((ppipe = cpipe->pipe_peer) != NULL) {
1225 		lwkt_gettoken(&ppipe->pipe_rlock);
1226 		lwkt_gettoken(&ppipe->pipe_wlock);
1227 		ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF;
1228 		pipeselwakeup(ppipe);
1229 		if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1230 			ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1231 			wakeup(ppipe);
1232 		}
1233 		if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) {
1234 			get_mplock();
1235 			KNOTE(&ppipe->pipe_sel.si_note, 0);
1236 			rel_mplock();
1237 		}
1238 		lwkt_reltoken(&ppipe->pipe_wlock);
1239 		lwkt_reltoken(&ppipe->pipe_rlock);
1240 	}
1241 
1242 	/*
1243 	 * If the peer is also closed we can free resources for both
1244 	 * sides, otherwise we leave our side intact to deal with any
1245 	 * races (since we only have the slock).
1246 	 */
1247 	if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) {
1248 		cpipe->pipe_peer = NULL;
1249 		ppipe->pipe_peer = NULL;
1250 		ppipe->pipe_slock = NULL;	/* we will free the slock */
1251 		pipeclose(ppipe);
1252 		ppipe = NULL;
1253 	}
1254 
1255 	lwkt_reltoken(&cpipe->pipe_wlock);
1256 	lwkt_reltoken(&cpipe->pipe_rlock);
1257 	if (cpipe->pipe_slock)
1258 		lockmgr(cpipe->pipe_slock, LK_RELEASE);
1259 
1260 	/*
1261 	 * If we disassociated from our peer we can free resources
1262 	 */
1263 	if (ppipe == NULL) {
1264 		gd = mycpu;
1265 		if (cpipe->pipe_slock) {
1266 			kfree(cpipe->pipe_slock, M_PIPE);
1267 			cpipe->pipe_slock = NULL;
1268 		}
1269 		if (gd->gd_pipeqcount >= pipe_maxcache ||
1270 		    cpipe->pipe_buffer.size != PIPE_SIZE
1271 		) {
1272 			pipe_free_kmem(cpipe);
1273 			kfree(cpipe, M_PIPE);
1274 		} else {
1275 			cpipe->pipe_state = 0;
1276 			cpipe->pipe_peer = gd->gd_pipeq;
1277 			gd->gd_pipeq = cpipe;
1278 			++gd->gd_pipeqcount;
1279 		}
1280 	}
1281 }
1282 
1283 /*
1284  * MPALMOSTSAFE - acquires mplock
1285  */
1286 static int
1287 pipe_kqfilter(struct file *fp, struct knote *kn)
1288 {
1289 	struct pipe *cpipe;
1290 
1291 	get_mplock();
1292 	cpipe = (struct pipe *)kn->kn_fp->f_data;
1293 
1294 	switch (kn->kn_filter) {
1295 	case EVFILT_READ:
1296 		kn->kn_fop = &pipe_rfiltops;
1297 		break;
1298 	case EVFILT_WRITE:
1299 		kn->kn_fop = &pipe_wfiltops;
1300 		cpipe = cpipe->pipe_peer;
1301 		if (cpipe == NULL) {
1302 			/* other end of pipe has been closed */
1303 			rel_mplock();
1304 			return (EPIPE);
1305 		}
1306 		break;
1307 	default:
1308 		rel_mplock();
1309 		return (EOPNOTSUPP);
1310 	}
1311 	kn->kn_hook = (caddr_t)cpipe;
1312 
1313 	SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1314 	rel_mplock();
1315 	return (0);
1316 }
1317 
1318 static void
1319 filt_pipedetach(struct knote *kn)
1320 {
1321 	struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1322 
1323 	SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1324 }
1325 
1326 /*ARGSUSED*/
1327 static int
1328 filt_piperead(struct knote *kn, long hint)
1329 {
1330 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1331 
1332 	kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
1333 
1334 	/* XXX RACE */
1335 	if (rpipe->pipe_state & PIPE_REOF) {
1336 		kn->kn_flags |= EV_EOF;
1337 		return (1);
1338 	}
1339 	return (kn->kn_data > 0);
1340 }
1341 
1342 /*ARGSUSED*/
1343 static int
1344 filt_pipewrite(struct knote *kn, long hint)
1345 {
1346 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1347 	struct pipe *wpipe = rpipe->pipe_peer;
1348 	u_int32_t space;
1349 
1350 	/* XXX RACE */
1351 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) {
1352 		kn->kn_data = 0;
1353 		kn->kn_flags |= EV_EOF;
1354 		return (1);
1355 	}
1356 	space = wpipe->pipe_buffer.windex -
1357 		wpipe->pipe_buffer.rindex;
1358 	space = wpipe->pipe_buffer.size - space;
1359 	kn->kn_data = space;
1360 	return (kn->kn_data >= PIPE_BUF);
1361 }
1362