xref: /dflybsd-src/sys/kern/sys_pipe.c (revision 5cccfb7b21444e0e73a738d924f82daf27b4854d)
1 /*
2  * Copyright (c) 1996 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. Modifications may be freely made to this file if the above conditions
17  *    are met.
18  *
19  * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20  * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
21  */
22 
23 /*
24  * This file contains a high-performance replacement for the socket-based
25  * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
26  * all features of sockets, but does do everything that pipes normally
27  * do.
28  */
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/fcntl.h>
34 #include <sys/file.h>
35 #include <sys/filedesc.h>
36 #include <sys/filio.h>
37 #include <sys/ttycom.h>
38 #include <sys/stat.h>
39 #include <sys/poll.h>
40 #include <sys/select.h>
41 #include <sys/signalvar.h>
42 #include <sys/sysproto.h>
43 #include <sys/pipe.h>
44 #include <sys/vnode.h>
45 #include <sys/uio.h>
46 #include <sys/event.h>
47 #include <sys/globaldata.h>
48 #include <sys/module.h>
49 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/socket.h>
52 
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <sys/lock.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_extern.h>
59 #include <vm/pmap.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_zone.h>
63 
64 #include <sys/file2.h>
65 
66 #include <machine/cpufunc.h>
67 
68 /*
69  * interfaces to the outside world
70  */
71 static int pipe_read (struct file *fp, struct uio *uio,
72 		struct ucred *cred, int flags);
73 static int pipe_write (struct file *fp, struct uio *uio,
74 		struct ucred *cred, int flags);
75 static int pipe_close (struct file *fp);
76 static int pipe_shutdown (struct file *fp, int how);
77 static int pipe_poll (struct file *fp, int events, struct ucred *cred);
78 static int pipe_kqfilter (struct file *fp, struct knote *kn);
79 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred);
80 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, struct ucred *cred);
81 
82 static struct fileops pipeops = {
83 	.fo_read = pipe_read,
84 	.fo_write = pipe_write,
85 	.fo_ioctl = pipe_ioctl,
86 	.fo_poll = pipe_poll,
87 	.fo_kqfilter = pipe_kqfilter,
88 	.fo_stat = pipe_stat,
89 	.fo_close = pipe_close,
90 	.fo_shutdown = pipe_shutdown
91 };
92 
93 static void	filt_pipedetach(struct knote *kn);
94 static int	filt_piperead(struct knote *kn, long hint);
95 static int	filt_pipewrite(struct knote *kn, long hint);
96 
97 static struct filterops pipe_rfiltops =
98 	{ 1, NULL, filt_pipedetach, filt_piperead };
99 static struct filterops pipe_wfiltops =
100 	{ 1, NULL, filt_pipedetach, filt_pipewrite };
101 
102 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures");
103 
104 /*
105  * Default pipe buffer size(s), this can be kind-of large now because pipe
106  * space is pageable.  The pipe code will try to maintain locality of
107  * reference for performance reasons, so small amounts of outstanding I/O
108  * will not wipe the cache.
109  */
110 #define MINPIPESIZE (PIPE_SIZE/3)
111 #define MAXPIPESIZE (2*PIPE_SIZE/3)
112 
113 /*
114  * Limit the number of "big" pipes
115  */
116 #define LIMITBIGPIPES	64
117 #define PIPEQ_MAX_CACHE 16      /* per-cpu pipe structure cache */
118 
119 static int pipe_maxbig = LIMITBIGPIPES;
120 static int pipe_maxcache = PIPEQ_MAX_CACHE;
121 static int pipe_bigcount;
122 static int pipe_nbig;
123 static int pipe_bcache_alloc;
124 static int pipe_bkmem_alloc;
125 static int pipe_rblocked_count;
126 static int pipe_wblocked_count;
127 
128 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation");
129 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig,
130         CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated");
131 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount,
132         CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded");
133 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked,
134         CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded");
135 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked,
136         CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded");
137 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache,
138         CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu");
139 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig,
140         CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes");
141 #ifdef SMP
142 static int pipe_delay = 5000;	/* 5uS default */
143 SYSCTL_INT(_kern_pipe, OID_AUTO, delay,
144         CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns");
145 static int pipe_mpsafe = 0;
146 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe,
147         CTLFLAG_RW, &pipe_mpsafe, 0, "");
148 #endif
149 #if !defined(NO_PIPE_SYSCTL_STATS)
150 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc,
151         CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache");
152 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc,
153         CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem");
154 #endif
155 
156 static void pipeclose (struct pipe *cpipe);
157 static void pipe_free_kmem (struct pipe *cpipe);
158 static int pipe_create (struct pipe **cpipep);
159 static __inline void pipeselwakeup (struct pipe *cpipe);
160 static int pipespace (struct pipe *cpipe, int size);
161 
162 static __inline void
163 pipeselwakeup(struct pipe *cpipe)
164 {
165 	if (cpipe->pipe_state & PIPE_SEL) {
166 		get_mplock();
167 		cpipe->pipe_state &= ~PIPE_SEL;
168 		selwakeup(&cpipe->pipe_sel);
169 		rel_mplock();
170 	}
171 	if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) {
172 		get_mplock();
173 		pgsigio(cpipe->pipe_sigio, SIGIO, 0);
174 		rel_mplock();
175 	}
176 	if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) {
177 		get_mplock();
178 		KNOTE(&cpipe->pipe_sel.si_note, 0);
179 		rel_mplock();
180 	}
181 }
182 
183 /*
184  * These routines are called before and after a UIO.  The UIO
185  * may block, causing our held tokens to be lost temporarily.
186  *
187  * We use these routines to serialize reads against other reads
188  * and writes against other writes.
189  *
190  * The read token is held on entry so *ipp does not race.
191  */
192 static __inline int
193 pipe_start_uio(struct pipe *cpipe, u_int *ipp)
194 {
195 	int error;
196 
197 	while (*ipp) {
198 		*ipp = -1;
199 		error = tsleep(ipp, PCATCH, "pipexx", 0);
200 		if (error)
201 			return (error);
202 	}
203 	*ipp = 1;
204 	return (0);
205 }
206 
207 static __inline void
208 pipe_end_uio(struct pipe *cpipe, u_int *ipp)
209 {
210 	if (*ipp < 0) {
211 		*ipp = 0;
212 		wakeup(ipp);
213 	} else {
214 		*ipp = 0;
215 	}
216 }
217 
218 static __inline void
219 pipe_get_mplock(int *save)
220 {
221 #ifdef SMP
222 	if (pipe_mpsafe == 0) {
223 		get_mplock();
224 		*save = 1;
225 	} else
226 #endif
227 	{
228 		*save = 0;
229 	}
230 }
231 
232 static __inline void
233 pipe_rel_mplock(int *save)
234 {
235 #ifdef SMP
236 	if (*save)
237 		rel_mplock();
238 #endif
239 }
240 
241 
242 /*
243  * The pipe system call for the DTYPE_PIPE type of pipes
244  *
245  * pipe_ARgs(int dummy)
246  */
247 
248 /* ARGSUSED */
249 int
250 sys_pipe(struct pipe_args *uap)
251 {
252 	struct thread *td = curthread;
253 	struct proc *p = td->td_proc;
254 	struct file *rf, *wf;
255 	struct pipe *rpipe, *wpipe;
256 	int fd1, fd2, error;
257 
258 	KKASSERT(p);
259 
260 	rpipe = wpipe = NULL;
261 	if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
262 		pipeclose(rpipe);
263 		pipeclose(wpipe);
264 		return (ENFILE);
265 	}
266 
267 	error = falloc(p, &rf, &fd1);
268 	if (error) {
269 		pipeclose(rpipe);
270 		pipeclose(wpipe);
271 		return (error);
272 	}
273 	uap->sysmsg_fds[0] = fd1;
274 
275 	/*
276 	 * Warning: once we've gotten past allocation of the fd for the
277 	 * read-side, we can only drop the read side via fdrop() in order
278 	 * to avoid races against processes which manage to dup() the read
279 	 * side while we are blocked trying to allocate the write side.
280 	 */
281 	rf->f_type = DTYPE_PIPE;
282 	rf->f_flag = FREAD | FWRITE;
283 	rf->f_ops = &pipeops;
284 	rf->f_data = rpipe;
285 	error = falloc(p, &wf, &fd2);
286 	if (error) {
287 		fsetfd(p, NULL, fd1);
288 		fdrop(rf);
289 		/* rpipe has been closed by fdrop(). */
290 		pipeclose(wpipe);
291 		return (error);
292 	}
293 	wf->f_type = DTYPE_PIPE;
294 	wf->f_flag = FREAD | FWRITE;
295 	wf->f_ops = &pipeops;
296 	wf->f_data = wpipe;
297 	uap->sysmsg_fds[1] = fd2;
298 
299 	rpipe->pipe_slock = kmalloc(sizeof(struct lock),
300 				    M_PIPE, M_WAITOK|M_ZERO);
301 	wpipe->pipe_slock = rpipe->pipe_slock;
302 	rpipe->pipe_peer = wpipe;
303 	wpipe->pipe_peer = rpipe;
304 	lockinit(rpipe->pipe_slock, "pipecl", 0, 0);
305 
306 	/*
307 	 * Once activated the peer relationship remains valid until
308 	 * both sides are closed.
309 	 */
310 	fsetfd(p, rf, fd1);
311 	fsetfd(p, wf, fd2);
312 	fdrop(rf);
313 	fdrop(wf);
314 
315 	return (0);
316 }
317 
318 /*
319  * Allocate kva for pipe circular buffer, the space is pageable
320  * This routine will 'realloc' the size of a pipe safely, if it fails
321  * it will retain the old buffer.
322  * If it fails it will return ENOMEM.
323  */
324 static int
325 pipespace(struct pipe *cpipe, int size)
326 {
327 	struct vm_object *object;
328 	caddr_t buffer;
329 	int npages, error;
330 
331 	npages = round_page(size) / PAGE_SIZE;
332 	object = cpipe->pipe_buffer.object;
333 
334 	/*
335 	 * [re]create the object if necessary and reserve space for it
336 	 * in the kernel_map.  The object and memory are pageable.  On
337 	 * success, free the old resources before assigning the new
338 	 * ones.
339 	 */
340 	if (object == NULL || object->size != npages) {
341 		get_mplock();
342 		object = vm_object_allocate(OBJT_DEFAULT, npages);
343 		buffer = (caddr_t)vm_map_min(&kernel_map);
344 
345 		error = vm_map_find(&kernel_map, object, 0,
346 				    (vm_offset_t *)&buffer, size,
347 				    1,
348 				    VM_MAPTYPE_NORMAL,
349 				    VM_PROT_ALL, VM_PROT_ALL,
350 				    0);
351 
352 		if (error != KERN_SUCCESS) {
353 			vm_object_deallocate(object);
354 			rel_mplock();
355 			return (ENOMEM);
356 		}
357 		pipe_free_kmem(cpipe);
358 		rel_mplock();
359 		cpipe->pipe_buffer.object = object;
360 		cpipe->pipe_buffer.buffer = buffer;
361 		cpipe->pipe_buffer.size = size;
362 		++pipe_bkmem_alloc;
363 	} else {
364 		++pipe_bcache_alloc;
365 	}
366 	cpipe->pipe_buffer.rindex = 0;
367 	cpipe->pipe_buffer.windex = 0;
368 	return (0);
369 }
370 
371 /*
372  * Initialize and allocate VM and memory for pipe, pulling the pipe from
373  * our per-cpu cache if possible.  For now make sure it is sized for the
374  * smaller PIPE_SIZE default.
375  */
376 static int
377 pipe_create(struct pipe **cpipep)
378 {
379 	globaldata_t gd = mycpu;
380 	struct pipe *cpipe;
381 	int error;
382 
383 	if ((cpipe = gd->gd_pipeq) != NULL) {
384 		gd->gd_pipeq = cpipe->pipe_peer;
385 		--gd->gd_pipeqcount;
386 		cpipe->pipe_peer = NULL;
387 		cpipe->pipe_wantwcnt = 0;
388 	} else {
389 		cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO);
390 	}
391 	*cpipep = cpipe;
392 	if ((error = pipespace(cpipe, PIPE_SIZE)) != 0)
393 		return (error);
394 	vfs_timestamp(&cpipe->pipe_ctime);
395 	cpipe->pipe_atime = cpipe->pipe_ctime;
396 	cpipe->pipe_mtime = cpipe->pipe_ctime;
397 	lwkt_token_init(&cpipe->pipe_rlock);
398 	lwkt_token_init(&cpipe->pipe_wlock);
399 	return (0);
400 }
401 
402 /*
403  * MPALMOSTSAFE (acquires mplock)
404  */
405 static int
406 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
407 {
408 	struct pipe *rpipe;
409 	int error;
410 	int orig_resid;
411 	int nread = 0;
412 	int nbio;
413 	u_int size;	/* total bytes available */
414 	u_int nsize;	/* total bytes to read */
415 	u_int rindex;	/* contiguous bytes available */
416 	int notify_writer;
417 	lwkt_tokref rlock;
418 	lwkt_tokref wlock;
419 	int mpsave;
420 
421 	/*
422 	 * Degenerate case
423 	 */
424 	orig_resid = uio->uio_resid;
425 	if (orig_resid == 0)
426 		return(0);
427 
428 	/*
429 	 * Setup locks, calculate nbio
430 	 */
431 	pipe_get_mplock(&mpsave);
432 	rpipe = (struct pipe *)fp->f_data;
433 	lwkt_gettoken(&rlock, &rpipe->pipe_rlock);
434 
435 	if (fflags & O_FBLOCKING)
436 		nbio = 0;
437 	else if (fflags & O_FNONBLOCKING)
438 		nbio = 1;
439 	else if (fp->f_flag & O_NONBLOCK)
440 		nbio = 1;
441 	else
442 		nbio = 0;
443 
444 	/*
445 	 * Reads are serialized.  Note howeverthat pipe_buffer.buffer and
446 	 * pipe_buffer.size can change out from under us when the number
447 	 * of bytes in the buffer are zero due to the write-side doing a
448 	 * pipespace().
449 	 */
450 	error = pipe_start_uio(rpipe, &rpipe->pipe_rip);
451 	if (error) {
452 		pipe_rel_mplock(&mpsave);
453 		lwkt_reltoken(&rlock);
454 		return (error);
455 	}
456 	notify_writer = 0;
457 	while (uio->uio_resid) {
458 		size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
459 		cpu_lfence();
460 		if (size) {
461 			rindex = rpipe->pipe_buffer.rindex &
462 				 (rpipe->pipe_buffer.size - 1);
463 			nsize = size;
464 			if (nsize > rpipe->pipe_buffer.size - rindex)
465 				nsize = rpipe->pipe_buffer.size - rindex;
466 			if (nsize > (u_int)uio->uio_resid)
467 				nsize = (u_int)uio->uio_resid;
468 
469 			error = uiomove(&rpipe->pipe_buffer.buffer[rindex],
470 					nsize, uio);
471 			if (error)
472 				break;
473 			cpu_mfence();
474 			rpipe->pipe_buffer.rindex += nsize;
475 			nread += nsize;
476 
477 			/*
478 			 * If the FIFO is still over half full just continue
479 			 * and do not try to notify the writer yet.
480 			 */
481 			if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) {
482 				notify_writer = 0;
483 				continue;
484 			}
485 
486 			/*
487 			 * When the FIFO is less then half full notify any
488 			 * waiting writer.  WANTW can be checked while
489 			 * holding just the rlock.
490 			 */
491 			notify_writer = 1;
492 			if ((rpipe->pipe_state & PIPE_WANTW) == 0)
493 				continue;
494 		}
495 
496 		/*
497 		 * If the "write-side" was blocked we wake it up.  This code
498 		 * is reached either when the buffer is completely emptied
499 		 * or if it becomes more then half-empty.
500 		 *
501 		 * Pipe_state can only be modified if both the rlock and
502 		 * wlock are held.
503 		 */
504 		if (rpipe->pipe_state & PIPE_WANTW) {
505 			lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
506 			if (rpipe->pipe_state & PIPE_WANTW) {
507 				notify_writer = 0;
508 				rpipe->pipe_state &= ~PIPE_WANTW;
509 				lwkt_reltoken(&wlock);
510 				wakeup(rpipe);
511 			} else {
512 				lwkt_reltoken(&wlock);
513 			}
514 		}
515 
516 		/*
517 		 * Pick up our copy loop again if the writer sent data to
518 		 * us while we were messing around.
519 		 *
520 		 * On a SMP box poll up to pipe_delay nanoseconds for new
521 		 * data.  Typically a value of 2000 to 4000 is sufficient
522 		 * to eradicate most IPIs/tsleeps/wakeups when a pipe
523 		 * is used for synchronous communications with small packets,
524 		 * and 8000 or so (8uS) will pipeline large buffer xfers
525 		 * between cpus over a pipe.
526 		 *
527 		 * For synchronous communications a hit means doing a
528 		 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
529 		 * where as miss requiring a tsleep/wakeup sequence
530 		 * will take 7uS or more.
531 		 */
532 		if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex)
533 			continue;
534 
535 #if defined(SMP) && defined(_RDTSC_SUPPORTED_)
536 		if (pipe_delay) {
537 			int64_t tsc_target;
538 			int good = 0;
539 
540 			tsc_target = tsc_get_target(pipe_delay);
541 			while (tsc_test_target(tsc_target) == 0) {
542 				if (rpipe->pipe_buffer.windex !=
543 				    rpipe->pipe_buffer.rindex) {
544 					good = 1;
545 					break;
546 				}
547 			}
548 			if (good)
549 				continue;
550 		}
551 #endif
552 
553 		/*
554 		 * Detect EOF condition, do not set error.
555 		 */
556 		if (rpipe->pipe_state & PIPE_REOF)
557 			break;
558 
559 		/*
560 		 * Break if some data was read, or if this was a non-blocking
561 		 * read.
562 		 */
563 		if (nread > 0)
564 			break;
565 
566 		if (nbio) {
567 			error = EAGAIN;
568 			break;
569 		}
570 
571 		/*
572 		 * Last chance, interlock with WANTR.
573 		 */
574 		lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
575 		size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
576 		if (size) {
577 			lwkt_reltoken(&wlock);
578 			continue;
579 		}
580 
581 		/*
582 		 * If there is no more to read in the pipe, reset its
583 		 * pointers to the beginning.  This improves cache hit
584 		 * stats.
585 		 *
586 		 * We need both locks to modify both pointers, and there
587 		 * must also not be a write in progress or the uiomove()
588 		 * in the write might block and temporarily release
589 		 * its wlock, then reacquire and update windex.  We are
590 		 * only serialized against reads, not writes.
591 		 *
592 		 * XXX should we even bother resetting the indices?  It
593 		 *     might actually be more cache efficient not to.
594 		 */
595 		if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex &&
596 		    rpipe->pipe_wip == 0) {
597 			rpipe->pipe_buffer.rindex = 0;
598 			rpipe->pipe_buffer.windex = 0;
599 		}
600 
601 		/*
602 		 * Wait for more data.
603 		 *
604 		 * Pipe_state can only be set if both the rlock and wlock
605 		 * are held.
606 		 */
607 		rpipe->pipe_state |= PIPE_WANTR;
608 		crit_enter();
609 		tsleep_interlock(rpipe);
610 		lwkt_reltoken(&wlock);
611 		error = tsleep(rpipe, PCATCH, "piperd", 0);
612 		crit_exit();
613 		++pipe_rblocked_count;
614 		if (error)
615 			break;
616 	}
617 	pipe_end_uio(rpipe, &rpipe->pipe_rip);
618 
619 	/*
620 	 * Uptime last access time
621 	 */
622 	if (error == 0 && nread)
623 		vfs_timestamp(&rpipe->pipe_atime);
624 
625 	/*
626 	 * If we drained the FIFO more then half way then handle
627 	 * write blocking hysteresis.
628 	 *
629 	 * Note that PIPE_WANTW cannot be set by the writer without
630 	 * it holding both rlock and wlock, so we can test it
631 	 * while holding just rlock.
632 	 */
633 	if (notify_writer) {
634 		if (rpipe->pipe_state & PIPE_WANTW) {
635 			lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
636 			if (rpipe->pipe_state & PIPE_WANTW) {
637 				rpipe->pipe_state &= ~PIPE_WANTW;
638 				lwkt_reltoken(&wlock);
639 				wakeup(rpipe);
640 			} else {
641 				lwkt_reltoken(&wlock);
642 			}
643 		}
644 	}
645 	size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
646 	lwkt_reltoken(&rlock);
647 
648 	/*
649 	 * If enough space is available in buffer then wakeup sel writers?
650 	 */
651 	if ((rpipe->pipe_buffer.size - size) >= PIPE_BUF)
652 		pipeselwakeup(rpipe);
653 	pipe_rel_mplock(&mpsave);
654 	return (error);
655 }
656 
657 /*
658  * MPALMOSTSAFE - acquires mplock
659  */
660 static int
661 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
662 {
663 	int error;
664 	int orig_resid;
665 	int nbio;
666 	struct pipe *wpipe, *rpipe;
667 	lwkt_tokref rlock;
668 	lwkt_tokref wlock;
669 	u_int windex;
670 	u_int space;
671 	u_int wcount;
672 	int mpsave;
673 
674 	pipe_get_mplock(&mpsave);
675 
676 	/*
677 	 * Writes go to the peer.  The peer will always exist.
678 	 */
679 	rpipe = (struct pipe *) fp->f_data;
680 	wpipe = rpipe->pipe_peer;
681 	lwkt_gettoken(&wlock, &wpipe->pipe_wlock);
682 	if (wpipe->pipe_state & PIPE_WEOF) {
683 		pipe_rel_mplock(&mpsave);
684 		lwkt_reltoken(&wlock);
685 		return (EPIPE);
686 	}
687 
688 	/*
689 	 * Degenerate case (EPIPE takes prec)
690 	 */
691 	if (uio->uio_resid == 0) {
692 		pipe_rel_mplock(&mpsave);
693 		lwkt_reltoken(&wlock);
694 		return(0);
695 	}
696 
697 	/*
698 	 * Writes are serialized (start_uio must be called with wlock)
699 	 */
700 	error = pipe_start_uio(wpipe, &wpipe->pipe_wip);
701 	if (error) {
702 		pipe_rel_mplock(&mpsave);
703 		lwkt_reltoken(&wlock);
704 		return (error);
705 	}
706 
707 	if (fflags & O_FBLOCKING)
708 		nbio = 0;
709 	else if (fflags & O_FNONBLOCKING)
710 		nbio = 1;
711 	else if (fp->f_flag & O_NONBLOCK)
712 		nbio = 1;
713 	else
714 		nbio = 0;
715 
716 	/*
717 	 * If it is advantageous to resize the pipe buffer, do
718 	 * so.  We are write-serialized so we can block safely.
719 	 */
720 	if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
721 	    (pipe_nbig < pipe_maxbig) &&
722 	    wpipe->pipe_wantwcnt > 4 &&
723 	    (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
724 		/*
725 		 * Recheck after lock.
726 		 */
727 		lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
728 		if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
729 		    (pipe_nbig < pipe_maxbig) &&
730 		    (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
731 			atomic_add_int(&pipe_nbig, 1);
732 			if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
733 				++pipe_bigcount;
734 			else
735 				atomic_subtract_int(&pipe_nbig, 1);
736 		}
737 		lwkt_reltoken(&rlock);
738 	}
739 
740 	orig_resid = uio->uio_resid;
741 	wcount = 0;
742 
743 	while (uio->uio_resid) {
744 		if (wpipe->pipe_state & PIPE_WEOF) {
745 			error = EPIPE;
746 			break;
747 		}
748 
749 		windex = wpipe->pipe_buffer.windex &
750 			 (wpipe->pipe_buffer.size - 1);
751 		space = wpipe->pipe_buffer.size -
752 			(wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
753 		cpu_lfence();
754 
755 		/* Writes of size <= PIPE_BUF must be atomic. */
756 		if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
757 			space = 0;
758 
759 		/*
760 		 * Write to fill, read size handles write hysteresis.  Also
761 		 * additional restrictions can cause select-based non-blocking
762 		 * writes to spin.
763 		 */
764 		if (space > 0) {
765 			u_int segsize;
766 
767 			/*
768 			 * Transfer size is minimum of uio transfer
769 			 * and free space in pipe buffer.
770 			 *
771 			 * Limit each uiocopy to no more then PIPE_SIZE
772 			 * so we can keep the gravy train going on a
773 			 * SMP box.  This doubles the performance for
774 			 * write sizes > 16K.  Otherwise large writes
775 			 * wind up doing an inefficient synchronous
776 			 * ping-pong.
777 			 */
778 			if (space > (u_int)uio->uio_resid)
779 				space = (u_int)uio->uio_resid;
780 			if (space > PIPE_SIZE)
781 				space = PIPE_SIZE;
782 
783 			/*
784 			 * First segment to transfer is minimum of
785 			 * transfer size and contiguous space in
786 			 * pipe buffer.  If first segment to transfer
787 			 * is less than the transfer size, we've got
788 			 * a wraparound in the buffer.
789 			 */
790 			segsize = wpipe->pipe_buffer.size - windex;
791 			if (segsize > space)
792 				segsize = space;
793 
794 #ifdef SMP
795 			/*
796 			 * If this is the first loop and the reader is
797 			 * blocked, do a preemptive wakeup of the reader.
798 			 *
799 			 * On SMP the IPI latency plus the wlock interlock
800 			 * on the reader side is the fastest way to get the
801 			 * reader going.  (The scheduler will hard loop on
802 			 * lock tokens).
803 			 *
804 			 * NOTE: We can't clear WANTR here without acquiring
805 			 * the rlock, which we don't want to do here!
806 			 */
807 			if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1)
808 				wakeup(wpipe);
809 #endif
810 
811 			/*
812 			 * Transfer segment, which may include a wrap-around.
813 			 * Update windex to account for both all in one go
814 			 * so the reader can read() the data atomically.
815 			 */
816 			error = uiomove(&wpipe->pipe_buffer.buffer[windex],
817 					segsize, uio);
818 			if (error == 0 && segsize < space) {
819 				segsize = space - segsize;
820 				error = uiomove(&wpipe->pipe_buffer.buffer[0],
821 						segsize, uio);
822 			}
823 			if (error)
824 				break;
825 			cpu_mfence();
826 			wpipe->pipe_buffer.windex += space;
827 			wcount += space;
828 			continue;
829 		}
830 
831 		/*
832 		 * We need both the rlock and the wlock to interlock against
833 		 * the EOF, WANTW, and size checks, and to modify pipe_state.
834 		 *
835 		 * These are token locks so we do not have to worry about
836 		 * deadlocks.
837 		 */
838 		lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
839 
840 		/*
841 		 * If the "read-side" has been blocked, wake it up now
842 		 * and yield to let it drain synchronously rather
843 		 * then block.
844 		 */
845 		if (wpipe->pipe_state & PIPE_WANTR) {
846 			wpipe->pipe_state &= ~PIPE_WANTR;
847 			wakeup(wpipe);
848 		}
849 
850 		/*
851 		 * don't block on non-blocking I/O
852 		 */
853 		if (nbio) {
854 			lwkt_reltoken(&rlock);
855 			error = EAGAIN;
856 			break;
857 		}
858 
859 		/*
860 		 * re-test whether we have to block in the writer after
861 		 * acquiring both locks, in case the reader opened up
862 		 * some space.
863 		 */
864 		space = wpipe->pipe_buffer.size -
865 			(wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
866 		cpu_lfence();
867 		if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
868 			space = 0;
869 
870 		/*
871 		 * We have no more space and have something to offer,
872 		 * wake up select/poll.
873 		 */
874 		if (space == 0) {
875 			pipeselwakeup(wpipe);
876 			++wpipe->pipe_wantwcnt;
877 			wpipe->pipe_state |= PIPE_WANTW;
878 			error = tsleep(wpipe, PCATCH, "pipewr", 0);
879 			++pipe_wblocked_count;
880 		}
881 		lwkt_reltoken(&rlock);
882 
883 		/*
884 		 * Break out if we errored or the read side wants us to go
885 		 * away.
886 		 */
887 		if (error)
888 			break;
889 		if (wpipe->pipe_state & PIPE_WEOF) {
890 			error = EPIPE;
891 			break;
892 		}
893 	}
894 	pipe_end_uio(wpipe, &wpipe->pipe_wip);
895 
896 	/*
897 	 * If we have put any characters in the buffer, we wake up
898 	 * the reader.
899 	 *
900 	 * Both rlock and wlock are required to be able to modify pipe_state.
901 	 */
902 	if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) {
903 		if (wpipe->pipe_state & PIPE_WANTR) {
904 			lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
905 			if (wpipe->pipe_state & PIPE_WANTR) {
906 				wpipe->pipe_state &= ~PIPE_WANTR;
907 				lwkt_reltoken(&rlock);
908 				wakeup(wpipe);
909 			} else {
910 				lwkt_reltoken(&rlock);
911 			}
912 		}
913 	}
914 
915 	/*
916 	 * Don't return EPIPE if I/O was successful
917 	 */
918 	if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) &&
919 	    (uio->uio_resid == 0) &&
920 	    (error == EPIPE)) {
921 		error = 0;
922 	}
923 
924 	if (error == 0)
925 		vfs_timestamp(&wpipe->pipe_mtime);
926 
927 	/*
928 	 * We have something to offer,
929 	 * wake up select/poll.
930 	 */
931 	space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;
932 	lwkt_reltoken(&wlock);
933 	if (space)
934 		pipeselwakeup(wpipe);
935 	pipe_rel_mplock(&mpsave);
936 	return (error);
937 }
938 
939 /*
940  * MPALMOSTSAFE - acquires mplock
941  *
942  * we implement a very minimal set of ioctls for compatibility with sockets.
943  */
944 int
945 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct ucred *cred)
946 {
947 	struct pipe *mpipe;
948 	lwkt_tokref rlock;
949 	lwkt_tokref wlock;
950 	int error;
951 	int mpsave;
952 
953 	pipe_get_mplock(&mpsave);
954 	mpipe = (struct pipe *)fp->f_data;
955 
956 	lwkt_gettoken(&rlock, &mpipe->pipe_rlock);
957 	lwkt_gettoken(&wlock, &mpipe->pipe_wlock);
958 
959 	switch (cmd) {
960 	case FIOASYNC:
961 		if (*(int *)data) {
962 			mpipe->pipe_state |= PIPE_ASYNC;
963 		} else {
964 			mpipe->pipe_state &= ~PIPE_ASYNC;
965 		}
966 		error = 0;
967 		break;
968 	case FIONREAD:
969 		*(int *)data = mpipe->pipe_buffer.windex -
970 				mpipe->pipe_buffer.rindex;
971 		error = 0;
972 		break;
973 	case FIOSETOWN:
974 		get_mplock();
975 		error = fsetown(*(int *)data, &mpipe->pipe_sigio);
976 		rel_mplock();
977 		break;
978 	case FIOGETOWN:
979 		*(int *)data = fgetown(mpipe->pipe_sigio);
980 		error = 0;
981 		break;
982 	case TIOCSPGRP:
983 		/* This is deprecated, FIOSETOWN should be used instead. */
984 		get_mplock();
985 		error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
986 		rel_mplock();
987 		break;
988 
989 	case TIOCGPGRP:
990 		/* This is deprecated, FIOGETOWN should be used instead. */
991 		*(int *)data = -fgetown(mpipe->pipe_sigio);
992 		error = 0;
993 		break;
994 	default:
995 		error = ENOTTY;
996 		break;
997 	}
998 	lwkt_reltoken(&rlock);
999 	lwkt_reltoken(&wlock);
1000 	pipe_rel_mplock(&mpsave);
1001 
1002 	return (error);
1003 }
1004 
1005 /*
1006  * MPALMOSTSAFE - acquires mplock
1007  */
1008 int
1009 pipe_poll(struct file *fp, int events, struct ucred *cred)
1010 {
1011 	struct pipe *rpipe;
1012 	struct pipe *wpipe;
1013 	int revents = 0;
1014 	u_int space;
1015 	int mpsave;
1016 
1017 	pipe_get_mplock(&mpsave);
1018 	rpipe = (struct pipe *)fp->f_data;
1019 	wpipe = rpipe->pipe_peer;
1020 	if (events & (POLLIN | POLLRDNORM)) {
1021 		if ((rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) ||
1022 		    (rpipe->pipe_state & PIPE_REOF)) {
1023 			revents |= events & (POLLIN | POLLRDNORM);
1024 		}
1025 	}
1026 
1027 	if (events & (POLLOUT | POLLWRNORM)) {
1028 		if (wpipe == NULL || (wpipe->pipe_state & PIPE_WEOF)) {
1029 			revents |= events & (POLLOUT | POLLWRNORM);
1030 		} else {
1031 			space = wpipe->pipe_buffer.windex -
1032 				wpipe->pipe_buffer.rindex;
1033 			space = wpipe->pipe_buffer.size - space;
1034 			if (space >= PIPE_BUF)
1035 				revents |= events & (POLLOUT | POLLWRNORM);
1036 		}
1037 	}
1038 
1039 	if ((rpipe->pipe_state & PIPE_REOF) ||
1040 	    (wpipe == NULL) ||
1041 	    (wpipe->pipe_state & PIPE_WEOF))
1042 		revents |= POLLHUP;
1043 
1044 	if (revents == 0) {
1045 		if (events & (POLLIN | POLLRDNORM)) {
1046 			selrecord(curthread, &rpipe->pipe_sel);
1047 			rpipe->pipe_state |= PIPE_SEL;
1048 		}
1049 
1050 		if (events & (POLLOUT | POLLWRNORM)) {
1051 			selrecord(curthread, &wpipe->pipe_sel);
1052 			wpipe->pipe_state |= PIPE_SEL;
1053 		}
1054 	}
1055 	pipe_rel_mplock(&mpsave);
1056 	return (revents);
1057 }
1058 
1059 /*
1060  * MPSAFE
1061  */
1062 static int
1063 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred)
1064 {
1065 	struct pipe *pipe;
1066 	int mpsave;
1067 
1068 	pipe_get_mplock(&mpsave);
1069 	pipe = (struct pipe *)fp->f_data;
1070 
1071 	bzero((caddr_t)ub, sizeof(*ub));
1072 	ub->st_mode = S_IFIFO;
1073 	ub->st_blksize = pipe->pipe_buffer.size;
1074 	ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex;
1075 	ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1076 	ub->st_atimespec = pipe->pipe_atime;
1077 	ub->st_mtimespec = pipe->pipe_mtime;
1078 	ub->st_ctimespec = pipe->pipe_ctime;
1079 	/*
1080 	 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1081 	 * st_flags, st_gen.
1082 	 * XXX (st_dev, st_ino) should be unique.
1083 	 */
1084 	pipe_rel_mplock(&mpsave);
1085 	return (0);
1086 }
1087 
1088 /*
1089  * MPALMOSTSAFE - acquires mplock
1090  */
1091 static int
1092 pipe_close(struct file *fp)
1093 {
1094 	struct pipe *cpipe;
1095 
1096 	get_mplock();
1097 	cpipe = (struct pipe *)fp->f_data;
1098 	fp->f_ops = &badfileops;
1099 	fp->f_data = NULL;
1100 	funsetown(cpipe->pipe_sigio);
1101 	pipeclose(cpipe);
1102 	rel_mplock();
1103 	return (0);
1104 }
1105 
1106 /*
1107  * Shutdown one or both directions of a full-duplex pipe.
1108  *
1109  * MPALMOSTSAFE - acquires mplock
1110  */
1111 static int
1112 pipe_shutdown(struct file *fp, int how)
1113 {
1114 	struct pipe *rpipe;
1115 	struct pipe *wpipe;
1116 	int error = EPIPE;
1117 	lwkt_tokref rpipe_rlock;
1118 	lwkt_tokref rpipe_wlock;
1119 	lwkt_tokref wpipe_rlock;
1120 	lwkt_tokref wpipe_wlock;
1121 	int mpsave;
1122 
1123 	pipe_get_mplock(&mpsave);
1124 	rpipe = (struct pipe *)fp->f_data;
1125 	wpipe = rpipe->pipe_peer;
1126 
1127 	/*
1128 	 * We modify pipe_state on both pipes, which means we need
1129 	 * all four tokens!
1130 	 */
1131 	lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock);
1132 	lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock);
1133 	lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock);
1134 	lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock);
1135 
1136 	switch(how) {
1137 	case SHUT_RDWR:
1138 	case SHUT_RD:
1139 		rpipe->pipe_state |= PIPE_REOF;
1140 		wpipe->pipe_state |= PIPE_WEOF;
1141 		if (rpipe->pipe_state & PIPE_WANTR) {
1142 			rpipe->pipe_state &= ~PIPE_WANTR;
1143 			wakeup(rpipe);
1144 		}
1145 		if (wpipe->pipe_state & PIPE_WANTW) {
1146 			wpipe->pipe_state &= ~PIPE_WANTW;
1147 			wakeup(wpipe);
1148 		}
1149 		pipeselwakeup(rpipe);
1150 		error = 0;
1151 		if (how == SHUT_RD)
1152 			break;
1153 		/* fall through */
1154 	case SHUT_WR:
1155 		wpipe->pipe_state |= PIPE_WEOF;
1156 		rpipe->pipe_state |= PIPE_REOF;
1157 		if (wpipe->pipe_state & PIPE_WANTW) {
1158 			wpipe->pipe_state &= ~PIPE_WANTW;
1159 			wakeup(wpipe);
1160 		}
1161 		if (rpipe->pipe_state & PIPE_WANTR) {
1162 			rpipe->pipe_state &= ~PIPE_WANTR;
1163 			wakeup(rpipe);
1164 		}
1165 		pipeselwakeup(wpipe);
1166 		error = 0;
1167 		break;
1168 	}
1169 
1170 	lwkt_reltoken(&rpipe_rlock);
1171 	lwkt_reltoken(&rpipe_wlock);
1172 	lwkt_reltoken(&wpipe_rlock);
1173 	lwkt_reltoken(&wpipe_wlock);
1174 
1175 	pipe_rel_mplock(&mpsave);
1176 	return (error);
1177 }
1178 
1179 static void
1180 pipe_free_kmem(struct pipe *cpipe)
1181 {
1182 	if (cpipe->pipe_buffer.buffer != NULL) {
1183 		if (cpipe->pipe_buffer.size > PIPE_SIZE)
1184 			atomic_subtract_int(&pipe_nbig, 1);
1185 		kmem_free(&kernel_map,
1186 			(vm_offset_t)cpipe->pipe_buffer.buffer,
1187 			cpipe->pipe_buffer.size);
1188 		cpipe->pipe_buffer.buffer = NULL;
1189 		cpipe->pipe_buffer.object = NULL;
1190 	}
1191 }
1192 
1193 /*
1194  * Close the pipe.  The slock must be held to interlock against simultanious
1195  * closes.  The rlock and wlock must be held to adjust the pipe_state.
1196  */
1197 static void
1198 pipeclose(struct pipe *cpipe)
1199 {
1200 	globaldata_t gd;
1201 	struct pipe *ppipe;
1202 	lwkt_tokref cpipe_rlock;
1203 	lwkt_tokref cpipe_wlock;
1204 	lwkt_tokref ppipe_rlock;
1205 	lwkt_tokref ppipe_wlock;
1206 
1207 	if (cpipe == NULL)
1208 		return;
1209 
1210 	/*
1211 	 * The slock may not have been allocated yet (close during
1212 	 * initialization)
1213 	 *
1214 	 * We need both the read and write tokens to modify pipe_state.
1215 	 */
1216 	if (cpipe->pipe_slock)
1217 		lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE);
1218 	lwkt_gettoken(&cpipe_rlock, &cpipe->pipe_rlock);
1219 	lwkt_gettoken(&cpipe_wlock, &cpipe->pipe_wlock);
1220 
1221 	/*
1222 	 * Set our state, wakeup anyone waiting in select, and
1223 	 * wakeup anyone blocked on our pipe.
1224 	 */
1225 	cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF;
1226 	pipeselwakeup(cpipe);
1227 	if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1228 		cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1229 		wakeup(cpipe);
1230 	}
1231 
1232 	/*
1233 	 * Disconnect from peer
1234 	 */
1235 	if ((ppipe = cpipe->pipe_peer) != NULL) {
1236 		lwkt_gettoken(&ppipe_rlock, &ppipe->pipe_rlock);
1237 		lwkt_gettoken(&ppipe_wlock, &ppipe->pipe_wlock);
1238 		ppipe->pipe_state |= PIPE_REOF;
1239 		pipeselwakeup(ppipe);
1240 		if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1241 			ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1242 			wakeup(ppipe);
1243 		}
1244 		if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) {
1245 			get_mplock();
1246 			KNOTE(&ppipe->pipe_sel.si_note, 0);
1247 			rel_mplock();
1248 		}
1249 		lwkt_reltoken(&ppipe_rlock);
1250 		lwkt_reltoken(&ppipe_wlock);
1251 	}
1252 
1253 	/*
1254 	 * If the peer is also closed we can free resources for both
1255 	 * sides, otherwise we leave our side intact to deal with any
1256 	 * races (since we only have the slock).
1257 	 */
1258 	if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) {
1259 		cpipe->pipe_peer = NULL;
1260 		ppipe->pipe_peer = NULL;
1261 		ppipe->pipe_slock = NULL;	/* we will free the slock */
1262 		pipeclose(ppipe);
1263 		ppipe = NULL;
1264 	}
1265 
1266 	lwkt_reltoken(&cpipe_rlock);
1267 	lwkt_reltoken(&cpipe_wlock);
1268 	if (cpipe->pipe_slock)
1269 		lockmgr(cpipe->pipe_slock, LK_RELEASE);
1270 
1271 	/*
1272 	 * If we disassociated from our peer we can free resources
1273 	 */
1274 	if (ppipe == NULL) {
1275 		gd = mycpu;
1276 		if (cpipe->pipe_slock) {
1277 			kfree(cpipe->pipe_slock, M_PIPE);
1278 			cpipe->pipe_slock = NULL;
1279 		}
1280 		if (gd->gd_pipeqcount >= pipe_maxcache ||
1281 		    cpipe->pipe_buffer.size != PIPE_SIZE
1282 		) {
1283 			pipe_free_kmem(cpipe);
1284 			kfree(cpipe, M_PIPE);
1285 		} else {
1286 			cpipe->pipe_state = 0;
1287 			cpipe->pipe_peer = gd->gd_pipeq;
1288 			gd->gd_pipeq = cpipe;
1289 			++gd->gd_pipeqcount;
1290 		}
1291 	}
1292 }
1293 
1294 /*
1295  * MPALMOSTSAFE - acquires mplock
1296  */
1297 static int
1298 pipe_kqfilter(struct file *fp, struct knote *kn)
1299 {
1300 	struct pipe *cpipe;
1301 
1302 	get_mplock();
1303 	cpipe = (struct pipe *)kn->kn_fp->f_data;
1304 
1305 	switch (kn->kn_filter) {
1306 	case EVFILT_READ:
1307 		kn->kn_fop = &pipe_rfiltops;
1308 		break;
1309 	case EVFILT_WRITE:
1310 		kn->kn_fop = &pipe_wfiltops;
1311 		cpipe = cpipe->pipe_peer;
1312 		if (cpipe == NULL) {
1313 			/* other end of pipe has been closed */
1314 			rel_mplock();
1315 			return (EPIPE);
1316 		}
1317 		break;
1318 	default:
1319 		return (1);
1320 	}
1321 	kn->kn_hook = (caddr_t)cpipe;
1322 
1323 	SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1324 	rel_mplock();
1325 	return (0);
1326 }
1327 
1328 static void
1329 filt_pipedetach(struct knote *kn)
1330 {
1331 	struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1332 
1333 	SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1334 }
1335 
1336 /*ARGSUSED*/
1337 static int
1338 filt_piperead(struct knote *kn, long hint)
1339 {
1340 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1341 
1342 	kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
1343 
1344 	/* XXX RACE */
1345 	if (rpipe->pipe_state & PIPE_REOF) {
1346 		kn->kn_flags |= EV_EOF;
1347 		return (1);
1348 	}
1349 	return (kn->kn_data > 0);
1350 }
1351 
1352 /*ARGSUSED*/
1353 static int
1354 filt_pipewrite(struct knote *kn, long hint)
1355 {
1356 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1357 	struct pipe *wpipe = rpipe->pipe_peer;
1358 	u_int32_t space;
1359 
1360 	/* XXX RACE */
1361 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) {
1362 		kn->kn_data = 0;
1363 		kn->kn_flags |= EV_EOF;
1364 		return (1);
1365 	}
1366 	space = wpipe->pipe_buffer.windex -
1367 		wpipe->pipe_buffer.rindex;
1368 	space = wpipe->pipe_buffer.size - space;
1369 	kn->kn_data = space;
1370 	return (kn->kn_data >= PIPE_BUF);
1371 }
1372