xref: /dflybsd-src/sys/kern/sys_pipe.c (revision e54488bbec5c9f80e95cedd395b0e3d31fde253d)
1 /*
2  * Copyright (c) 1996 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. Modifications may be freely made to this file if the above conditions
17  *    are met.
18  *
19  * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20  * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
21  */
22 
23 /*
24  * This file contains a high-performance replacement for the socket-based
25  * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
26  * all features of sockets, but does do everything that pipes normally
27  * do.
28  */
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/proc.h>
33 #include <sys/fcntl.h>
34 #include <sys/file.h>
35 #include <sys/filedesc.h>
36 #include <sys/filio.h>
37 #include <sys/ttycom.h>
38 #include <sys/stat.h>
39 #include <sys/poll.h>
40 #include <sys/select.h>
41 #include <sys/signalvar.h>
42 #include <sys/sysproto.h>
43 #include <sys/pipe.h>
44 #include <sys/vnode.h>
45 #include <sys/uio.h>
46 #include <sys/event.h>
47 #include <sys/globaldata.h>
48 #include <sys/module.h>
49 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/socket.h>
52 
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <sys/lock.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_extern.h>
59 #include <vm/pmap.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_zone.h>
63 
64 #include <sys/file2.h>
65 
66 #include <machine/cpufunc.h>
67 
68 /*
69  * interfaces to the outside world
70  */
71 static int pipe_read (struct file *fp, struct uio *uio,
72 		struct ucred *cred, int flags);
73 static int pipe_write (struct file *fp, struct uio *uio,
74 		struct ucred *cred, int flags);
75 static int pipe_close (struct file *fp);
76 static int pipe_shutdown (struct file *fp, int how);
77 static int pipe_poll (struct file *fp, int events, struct ucred *cred);
78 static int pipe_kqfilter (struct file *fp, struct knote *kn);
79 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred);
80 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, struct ucred *cred);
81 
82 static struct fileops pipeops = {
83 	.fo_read = pipe_read,
84 	.fo_write = pipe_write,
85 	.fo_ioctl = pipe_ioctl,
86 	.fo_poll = pipe_poll,
87 	.fo_kqfilter = pipe_kqfilter,
88 	.fo_stat = pipe_stat,
89 	.fo_close = pipe_close,
90 	.fo_shutdown = pipe_shutdown
91 };
92 
93 static void	filt_pipedetach(struct knote *kn);
94 static int	filt_piperead(struct knote *kn, long hint);
95 static int	filt_pipewrite(struct knote *kn, long hint);
96 
97 static struct filterops pipe_rfiltops =
98 	{ 1, NULL, filt_pipedetach, filt_piperead };
99 static struct filterops pipe_wfiltops =
100 	{ 1, NULL, filt_pipedetach, filt_pipewrite };
101 
102 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures");
103 
104 /*
105  * Default pipe buffer size(s), this can be kind-of large now because pipe
106  * space is pageable.  The pipe code will try to maintain locality of
107  * reference for performance reasons, so small amounts of outstanding I/O
108  * will not wipe the cache.
109  */
110 #define MINPIPESIZE (PIPE_SIZE/3)
111 #define MAXPIPESIZE (2*PIPE_SIZE/3)
112 
113 /*
114  * Limit the number of "big" pipes
115  */
116 #define LIMITBIGPIPES	64
117 #define PIPEQ_MAX_CACHE 16      /* per-cpu pipe structure cache */
118 
119 static int pipe_maxbig = LIMITBIGPIPES;
120 static int pipe_maxcache = PIPEQ_MAX_CACHE;
121 static int pipe_bigcount;
122 static int pipe_nbig;
123 static int pipe_bcache_alloc;
124 static int pipe_bkmem_alloc;
125 static int pipe_rblocked_count;
126 static int pipe_wblocked_count;
127 
128 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation");
129 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig,
130         CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated");
131 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount,
132         CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded");
133 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked,
134         CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded");
135 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked,
136         CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded");
137 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache,
138         CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu");
139 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig,
140         CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes");
141 #ifdef SMP
142 static int pipe_delay = 5000;	/* 5uS default */
143 SYSCTL_INT(_kern_pipe, OID_AUTO, delay,
144         CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns");
145 static int pipe_mpsafe = 1;
146 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe,
147         CTLFLAG_RW, &pipe_mpsafe, 0, "");
148 #endif
149 #if !defined(NO_PIPE_SYSCTL_STATS)
150 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc,
151         CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache");
152 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc,
153         CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem");
154 #endif
155 
156 static void pipeclose (struct pipe *cpipe);
157 static void pipe_free_kmem (struct pipe *cpipe);
158 static int pipe_create (struct pipe **cpipep);
159 static __inline void pipeselwakeup (struct pipe *cpipe);
160 static int pipespace (struct pipe *cpipe, int size);
161 
162 static __inline void
163 pipeselwakeup(struct pipe *cpipe)
164 {
165 	if (cpipe->pipe_state & PIPE_SEL) {
166 		get_mplock();
167 		cpipe->pipe_state &= ~PIPE_SEL;
168 		selwakeup(&cpipe->pipe_sel);
169 		rel_mplock();
170 	}
171 	if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) {
172 		get_mplock();
173 		pgsigio(cpipe->pipe_sigio, SIGIO, 0);
174 		rel_mplock();
175 	}
176 	if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) {
177 		get_mplock();
178 		KNOTE(&cpipe->pipe_sel.si_note, 0);
179 		rel_mplock();
180 	}
181 }
182 
183 /*
184  * These routines are called before and after a UIO.  The UIO
185  * may block, causing our held tokens to be lost temporarily.
186  *
187  * We use these routines to serialize reads against other reads
188  * and writes against other writes.
189  *
190  * The read token is held on entry so *ipp does not race.
191  */
192 static __inline int
193 pipe_start_uio(struct pipe *cpipe, int *ipp)
194 {
195 	int error;
196 
197 	while (*ipp) {
198 		*ipp = -1;
199 		error = tsleep(ipp, PCATCH, "pipexx", 0);
200 		if (error)
201 			return (error);
202 	}
203 	*ipp = 1;
204 	return (0);
205 }
206 
207 static __inline void
208 pipe_end_uio(struct pipe *cpipe, int *ipp)
209 {
210 	if (*ipp < 0) {
211 		*ipp = 0;
212 		wakeup(ipp);
213 	} else {
214 		KKASSERT(*ipp > 0);
215 		*ipp = 0;
216 	}
217 }
218 
219 static __inline void
220 pipe_get_mplock(int *save)
221 {
222 #ifdef SMP
223 	if (pipe_mpsafe == 0) {
224 		get_mplock();
225 		*save = 1;
226 	} else
227 #endif
228 	{
229 		*save = 0;
230 	}
231 }
232 
233 static __inline void
234 pipe_rel_mplock(int *save)
235 {
236 #ifdef SMP
237 	if (*save)
238 		rel_mplock();
239 #endif
240 }
241 
242 
243 /*
244  * The pipe system call for the DTYPE_PIPE type of pipes
245  *
246  * pipe_ARgs(int dummy)
247  */
248 
249 /* ARGSUSED */
250 int
251 sys_pipe(struct pipe_args *uap)
252 {
253 	struct thread *td = curthread;
254 	struct proc *p = td->td_proc;
255 	struct file *rf, *wf;
256 	struct pipe *rpipe, *wpipe;
257 	int fd1, fd2, error;
258 
259 	KKASSERT(p);
260 
261 	rpipe = wpipe = NULL;
262 	if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
263 		pipeclose(rpipe);
264 		pipeclose(wpipe);
265 		return (ENFILE);
266 	}
267 
268 	error = falloc(p, &rf, &fd1);
269 	if (error) {
270 		pipeclose(rpipe);
271 		pipeclose(wpipe);
272 		return (error);
273 	}
274 	uap->sysmsg_fds[0] = fd1;
275 
276 	/*
277 	 * Warning: once we've gotten past allocation of the fd for the
278 	 * read-side, we can only drop the read side via fdrop() in order
279 	 * to avoid races against processes which manage to dup() the read
280 	 * side while we are blocked trying to allocate the write side.
281 	 */
282 	rf->f_type = DTYPE_PIPE;
283 	rf->f_flag = FREAD | FWRITE;
284 	rf->f_ops = &pipeops;
285 	rf->f_data = rpipe;
286 	error = falloc(p, &wf, &fd2);
287 	if (error) {
288 		fsetfd(p, NULL, fd1);
289 		fdrop(rf);
290 		/* rpipe has been closed by fdrop(). */
291 		pipeclose(wpipe);
292 		return (error);
293 	}
294 	wf->f_type = DTYPE_PIPE;
295 	wf->f_flag = FREAD | FWRITE;
296 	wf->f_ops = &pipeops;
297 	wf->f_data = wpipe;
298 	uap->sysmsg_fds[1] = fd2;
299 
300 	rpipe->pipe_slock = kmalloc(sizeof(struct lock),
301 				    M_PIPE, M_WAITOK|M_ZERO);
302 	wpipe->pipe_slock = rpipe->pipe_slock;
303 	rpipe->pipe_peer = wpipe;
304 	wpipe->pipe_peer = rpipe;
305 	lockinit(rpipe->pipe_slock, "pipecl", 0, 0);
306 
307 	/*
308 	 * Once activated the peer relationship remains valid until
309 	 * both sides are closed.
310 	 */
311 	fsetfd(p, rf, fd1);
312 	fsetfd(p, wf, fd2);
313 	fdrop(rf);
314 	fdrop(wf);
315 
316 	return (0);
317 }
318 
319 /*
320  * Allocate kva for pipe circular buffer, the space is pageable
321  * This routine will 'realloc' the size of a pipe safely, if it fails
322  * it will retain the old buffer.
323  * If it fails it will return ENOMEM.
324  */
325 static int
326 pipespace(struct pipe *cpipe, int size)
327 {
328 	struct vm_object *object;
329 	caddr_t buffer;
330 	int npages, error;
331 
332 	npages = round_page(size) / PAGE_SIZE;
333 	object = cpipe->pipe_buffer.object;
334 
335 	/*
336 	 * [re]create the object if necessary and reserve space for it
337 	 * in the kernel_map.  The object and memory are pageable.  On
338 	 * success, free the old resources before assigning the new
339 	 * ones.
340 	 */
341 	if (object == NULL || object->size != npages) {
342 		get_mplock();
343 		object = vm_object_allocate(OBJT_DEFAULT, npages);
344 		buffer = (caddr_t)vm_map_min(&kernel_map);
345 
346 		error = vm_map_find(&kernel_map, object, 0,
347 				    (vm_offset_t *)&buffer, size,
348 				    1,
349 				    VM_MAPTYPE_NORMAL,
350 				    VM_PROT_ALL, VM_PROT_ALL,
351 				    0);
352 
353 		if (error != KERN_SUCCESS) {
354 			vm_object_deallocate(object);
355 			rel_mplock();
356 			return (ENOMEM);
357 		}
358 		pipe_free_kmem(cpipe);
359 		rel_mplock();
360 		cpipe->pipe_buffer.object = object;
361 		cpipe->pipe_buffer.buffer = buffer;
362 		cpipe->pipe_buffer.size = size;
363 		++pipe_bkmem_alloc;
364 	} else {
365 		++pipe_bcache_alloc;
366 	}
367 	cpipe->pipe_buffer.rindex = 0;
368 	cpipe->pipe_buffer.windex = 0;
369 	return (0);
370 }
371 
372 /*
373  * Initialize and allocate VM and memory for pipe, pulling the pipe from
374  * our per-cpu cache if possible.  For now make sure it is sized for the
375  * smaller PIPE_SIZE default.
376  */
377 static int
378 pipe_create(struct pipe **cpipep)
379 {
380 	globaldata_t gd = mycpu;
381 	struct pipe *cpipe;
382 	int error;
383 
384 	if ((cpipe = gd->gd_pipeq) != NULL) {
385 		gd->gd_pipeq = cpipe->pipe_peer;
386 		--gd->gd_pipeqcount;
387 		cpipe->pipe_peer = NULL;
388 		cpipe->pipe_wantwcnt = 0;
389 	} else {
390 		cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO);
391 	}
392 	*cpipep = cpipe;
393 	if ((error = pipespace(cpipe, PIPE_SIZE)) != 0)
394 		return (error);
395 	vfs_timestamp(&cpipe->pipe_ctime);
396 	cpipe->pipe_atime = cpipe->pipe_ctime;
397 	cpipe->pipe_mtime = cpipe->pipe_ctime;
398 	lwkt_token_init(&cpipe->pipe_rlock);
399 	lwkt_token_init(&cpipe->pipe_wlock);
400 	return (0);
401 }
402 
403 /*
404  * MPALMOSTSAFE (acquires mplock)
405  */
406 static int
407 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
408 {
409 	struct pipe *rpipe;
410 	int error;
411 	int orig_resid;
412 	int nread = 0;
413 	int nbio;
414 	u_int size;	/* total bytes available */
415 	u_int nsize;	/* total bytes to read */
416 	u_int rindex;	/* contiguous bytes available */
417 	int notify_writer;
418 	lwkt_tokref rlock;
419 	lwkt_tokref wlock;
420 	int mpsave;
421 
422 	/*
423 	 * Degenerate case
424 	 */
425 	orig_resid = uio->uio_resid;
426 	if (orig_resid == 0)
427 		return(0);
428 
429 	/*
430 	 * Setup locks, calculate nbio
431 	 */
432 	pipe_get_mplock(&mpsave);
433 	rpipe = (struct pipe *)fp->f_data;
434 	lwkt_gettoken(&rlock, &rpipe->pipe_rlock);
435 
436 	if (fflags & O_FBLOCKING)
437 		nbio = 0;
438 	else if (fflags & O_FNONBLOCKING)
439 		nbio = 1;
440 	else if (fp->f_flag & O_NONBLOCK)
441 		nbio = 1;
442 	else
443 		nbio = 0;
444 
445 	/*
446 	 * Reads are serialized.  Note howeverthat pipe_buffer.buffer and
447 	 * pipe_buffer.size can change out from under us when the number
448 	 * of bytes in the buffer are zero due to the write-side doing a
449 	 * pipespace().
450 	 */
451 	error = pipe_start_uio(rpipe, &rpipe->pipe_rip);
452 	if (error) {
453 		pipe_rel_mplock(&mpsave);
454 		lwkt_reltoken(&rlock);
455 		return (error);
456 	}
457 	notify_writer = 0;
458 	while (uio->uio_resid) {
459 		size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
460 		cpu_lfence();
461 		if (size) {
462 			rindex = rpipe->pipe_buffer.rindex &
463 				 (rpipe->pipe_buffer.size - 1);
464 			nsize = size;
465 			if (nsize > rpipe->pipe_buffer.size - rindex)
466 				nsize = rpipe->pipe_buffer.size - rindex;
467 			if (nsize > (u_int)uio->uio_resid)
468 				nsize = (u_int)uio->uio_resid;
469 
470 			error = uiomove(&rpipe->pipe_buffer.buffer[rindex],
471 					nsize, uio);
472 			if (error)
473 				break;
474 			cpu_mfence();
475 			rpipe->pipe_buffer.rindex += nsize;
476 			nread += nsize;
477 
478 			/*
479 			 * If the FIFO is still over half full just continue
480 			 * and do not try to notify the writer yet.
481 			 */
482 			if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) {
483 				notify_writer = 0;
484 				continue;
485 			}
486 
487 			/*
488 			 * When the FIFO is less then half full notify any
489 			 * waiting writer.  WANTW can be checked while
490 			 * holding just the rlock.
491 			 */
492 			notify_writer = 1;
493 			if ((rpipe->pipe_state & PIPE_WANTW) == 0)
494 				continue;
495 		}
496 
497 		/*
498 		 * If the "write-side" was blocked we wake it up.  This code
499 		 * is reached either when the buffer is completely emptied
500 		 * or if it becomes more then half-empty.
501 		 *
502 		 * Pipe_state can only be modified if both the rlock and
503 		 * wlock are held.
504 		 */
505 		if (rpipe->pipe_state & PIPE_WANTW) {
506 			lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
507 			if (rpipe->pipe_state & PIPE_WANTW) {
508 				notify_writer = 0;
509 				rpipe->pipe_state &= ~PIPE_WANTW;
510 				lwkt_reltoken(&wlock);
511 				wakeup(rpipe);
512 			} else {
513 				lwkt_reltoken(&wlock);
514 			}
515 		}
516 
517 		/*
518 		 * Pick up our copy loop again if the writer sent data to
519 		 * us while we were messing around.
520 		 *
521 		 * On a SMP box poll up to pipe_delay nanoseconds for new
522 		 * data.  Typically a value of 2000 to 4000 is sufficient
523 		 * to eradicate most IPIs/tsleeps/wakeups when a pipe
524 		 * is used for synchronous communications with small packets,
525 		 * and 8000 or so (8uS) will pipeline large buffer xfers
526 		 * between cpus over a pipe.
527 		 *
528 		 * For synchronous communications a hit means doing a
529 		 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
530 		 * where as miss requiring a tsleep/wakeup sequence
531 		 * will take 7uS or more.
532 		 */
533 		if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex)
534 			continue;
535 
536 #if defined(SMP) && defined(_RDTSC_SUPPORTED_)
537 		if (pipe_delay) {
538 			int64_t tsc_target;
539 			int good = 0;
540 
541 			tsc_target = tsc_get_target(pipe_delay);
542 			while (tsc_test_target(tsc_target) == 0) {
543 				if (rpipe->pipe_buffer.windex !=
544 				    rpipe->pipe_buffer.rindex) {
545 					good = 1;
546 					break;
547 				}
548 			}
549 			if (good)
550 				continue;
551 		}
552 #endif
553 
554 		/*
555 		 * Detect EOF condition, do not set error.
556 		 */
557 		if (rpipe->pipe_state & PIPE_REOF)
558 			break;
559 
560 		/*
561 		 * Break if some data was read, or if this was a non-blocking
562 		 * read.
563 		 */
564 		if (nread > 0)
565 			break;
566 
567 		if (nbio) {
568 			error = EAGAIN;
569 			break;
570 		}
571 
572 		/*
573 		 * Last chance, interlock with WANTR.
574 		 */
575 		lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
576 		size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
577 		if (size) {
578 			lwkt_reltoken(&wlock);
579 			continue;
580 		}
581 
582 		/*
583 		 * If there is no more to read in the pipe, reset its
584 		 * pointers to the beginning.  This improves cache hit
585 		 * stats.
586 		 *
587 		 * We need both locks to modify both pointers, and there
588 		 * must also not be a write in progress or the uiomove()
589 		 * in the write might block and temporarily release
590 		 * its wlock, then reacquire and update windex.  We are
591 		 * only serialized against reads, not writes.
592 		 *
593 		 * XXX should we even bother resetting the indices?  It
594 		 *     might actually be more cache efficient not to.
595 		 */
596 		if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex &&
597 		    rpipe->pipe_wip == 0) {
598 			rpipe->pipe_buffer.rindex = 0;
599 			rpipe->pipe_buffer.windex = 0;
600 		}
601 
602 		/*
603 		 * Wait for more data.
604 		 *
605 		 * Pipe_state can only be set if both the rlock and wlock
606 		 * are held.
607 		 */
608 		rpipe->pipe_state |= PIPE_WANTR;
609 		tsleep_interlock(rpipe, PCATCH);
610 		lwkt_reltoken(&wlock);
611 		error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0);
612 		++pipe_rblocked_count;
613 		if (error)
614 			break;
615 	}
616 	pipe_end_uio(rpipe, &rpipe->pipe_rip);
617 
618 	/*
619 	 * Uptime last access time
620 	 */
621 	if (error == 0 && nread)
622 		vfs_timestamp(&rpipe->pipe_atime);
623 
624 	/*
625 	 * If we drained the FIFO more then half way then handle
626 	 * write blocking hysteresis.
627 	 *
628 	 * Note that PIPE_WANTW cannot be set by the writer without
629 	 * it holding both rlock and wlock, so we can test it
630 	 * while holding just rlock.
631 	 */
632 	if (notify_writer) {
633 		if (rpipe->pipe_state & PIPE_WANTW) {
634 			lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
635 			if (rpipe->pipe_state & PIPE_WANTW) {
636 				rpipe->pipe_state &= ~PIPE_WANTW;
637 				lwkt_reltoken(&wlock);
638 				wakeup(rpipe);
639 			} else {
640 				lwkt_reltoken(&wlock);
641 			}
642 		}
643 	}
644 	size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
645 	lwkt_reltoken(&rlock);
646 
647 	/*
648 	 * If enough space is available in buffer then wakeup sel writers?
649 	 */
650 	if ((rpipe->pipe_buffer.size - size) >= PIPE_BUF)
651 		pipeselwakeup(rpipe);
652 	pipe_rel_mplock(&mpsave);
653 	return (error);
654 }
655 
656 /*
657  * MPALMOSTSAFE - acquires mplock
658  */
659 static int
660 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
661 {
662 	int error;
663 	int orig_resid;
664 	int nbio;
665 	struct pipe *wpipe, *rpipe;
666 	lwkt_tokref rlock;
667 	lwkt_tokref wlock;
668 	u_int windex;
669 	u_int space;
670 	u_int wcount;
671 	int mpsave;
672 
673 	pipe_get_mplock(&mpsave);
674 
675 	/*
676 	 * Writes go to the peer.  The peer will always exist.
677 	 */
678 	rpipe = (struct pipe *) fp->f_data;
679 	wpipe = rpipe->pipe_peer;
680 	lwkt_gettoken(&wlock, &wpipe->pipe_wlock);
681 	if (wpipe->pipe_state & PIPE_WEOF) {
682 		pipe_rel_mplock(&mpsave);
683 		lwkt_reltoken(&wlock);
684 		return (EPIPE);
685 	}
686 
687 	/*
688 	 * Degenerate case (EPIPE takes prec)
689 	 */
690 	if (uio->uio_resid == 0) {
691 		pipe_rel_mplock(&mpsave);
692 		lwkt_reltoken(&wlock);
693 		return(0);
694 	}
695 
696 	/*
697 	 * Writes are serialized (start_uio must be called with wlock)
698 	 */
699 	error = pipe_start_uio(wpipe, &wpipe->pipe_wip);
700 	if (error) {
701 		pipe_rel_mplock(&mpsave);
702 		lwkt_reltoken(&wlock);
703 		return (error);
704 	}
705 
706 	if (fflags & O_FBLOCKING)
707 		nbio = 0;
708 	else if (fflags & O_FNONBLOCKING)
709 		nbio = 1;
710 	else if (fp->f_flag & O_NONBLOCK)
711 		nbio = 1;
712 	else
713 		nbio = 0;
714 
715 	/*
716 	 * If it is advantageous to resize the pipe buffer, do
717 	 * so.  We are write-serialized so we can block safely.
718 	 */
719 	if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
720 	    (pipe_nbig < pipe_maxbig) &&
721 	    wpipe->pipe_wantwcnt > 4 &&
722 	    (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
723 		/*
724 		 * Recheck after lock.
725 		 */
726 		lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
727 		if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
728 		    (pipe_nbig < pipe_maxbig) &&
729 		    (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
730 			atomic_add_int(&pipe_nbig, 1);
731 			if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
732 				++pipe_bigcount;
733 			else
734 				atomic_subtract_int(&pipe_nbig, 1);
735 		}
736 		lwkt_reltoken(&rlock);
737 	}
738 
739 	orig_resid = uio->uio_resid;
740 	wcount = 0;
741 
742 	while (uio->uio_resid) {
743 		if (wpipe->pipe_state & PIPE_WEOF) {
744 			error = EPIPE;
745 			break;
746 		}
747 
748 		windex = wpipe->pipe_buffer.windex &
749 			 (wpipe->pipe_buffer.size - 1);
750 		space = wpipe->pipe_buffer.size -
751 			(wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
752 		cpu_lfence();
753 
754 		/* Writes of size <= PIPE_BUF must be atomic. */
755 		if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
756 			space = 0;
757 
758 		/*
759 		 * Write to fill, read size handles write hysteresis.  Also
760 		 * additional restrictions can cause select-based non-blocking
761 		 * writes to spin.
762 		 */
763 		if (space > 0) {
764 			u_int segsize;
765 
766 			/*
767 			 * Transfer size is minimum of uio transfer
768 			 * and free space in pipe buffer.
769 			 *
770 			 * Limit each uiocopy to no more then PIPE_SIZE
771 			 * so we can keep the gravy train going on a
772 			 * SMP box.  This doubles the performance for
773 			 * write sizes > 16K.  Otherwise large writes
774 			 * wind up doing an inefficient synchronous
775 			 * ping-pong.
776 			 */
777 			if (space > (u_int)uio->uio_resid)
778 				space = (u_int)uio->uio_resid;
779 			if (space > PIPE_SIZE)
780 				space = PIPE_SIZE;
781 
782 			/*
783 			 * First segment to transfer is minimum of
784 			 * transfer size and contiguous space in
785 			 * pipe buffer.  If first segment to transfer
786 			 * is less than the transfer size, we've got
787 			 * a wraparound in the buffer.
788 			 */
789 			segsize = wpipe->pipe_buffer.size - windex;
790 			if (segsize > space)
791 				segsize = space;
792 
793 #ifdef SMP
794 			/*
795 			 * If this is the first loop and the reader is
796 			 * blocked, do a preemptive wakeup of the reader.
797 			 *
798 			 * On SMP the IPI latency plus the wlock interlock
799 			 * on the reader side is the fastest way to get the
800 			 * reader going.  (The scheduler will hard loop on
801 			 * lock tokens).
802 			 *
803 			 * NOTE: We can't clear WANTR here without acquiring
804 			 * the rlock, which we don't want to do here!
805 			 */
806 			if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1)
807 				wakeup(wpipe);
808 #endif
809 
810 			/*
811 			 * Transfer segment, which may include a wrap-around.
812 			 * Update windex to account for both all in one go
813 			 * so the reader can read() the data atomically.
814 			 */
815 			error = uiomove(&wpipe->pipe_buffer.buffer[windex],
816 					segsize, uio);
817 			if (error == 0 && segsize < space) {
818 				segsize = space - segsize;
819 				error = uiomove(&wpipe->pipe_buffer.buffer[0],
820 						segsize, uio);
821 			}
822 			if (error)
823 				break;
824 			cpu_mfence();
825 			wpipe->pipe_buffer.windex += space;
826 			wcount += space;
827 			continue;
828 		}
829 
830 		/*
831 		 * We need both the rlock and the wlock to interlock against
832 		 * the EOF, WANTW, and size checks, and to modify pipe_state.
833 		 *
834 		 * These are token locks so we do not have to worry about
835 		 * deadlocks.
836 		 */
837 		lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
838 
839 		/*
840 		 * If the "read-side" has been blocked, wake it up now
841 		 * and yield to let it drain synchronously rather
842 		 * then block.
843 		 */
844 		if (wpipe->pipe_state & PIPE_WANTR) {
845 			wpipe->pipe_state &= ~PIPE_WANTR;
846 			wakeup(wpipe);
847 		}
848 
849 		/*
850 		 * don't block on non-blocking I/O
851 		 */
852 		if (nbio) {
853 			lwkt_reltoken(&rlock);
854 			error = EAGAIN;
855 			break;
856 		}
857 
858 		/*
859 		 * re-test whether we have to block in the writer after
860 		 * acquiring both locks, in case the reader opened up
861 		 * some space.
862 		 */
863 		space = wpipe->pipe_buffer.size -
864 			(wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
865 		cpu_lfence();
866 		if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
867 			space = 0;
868 
869 		/*
870 		 * We have no more space and have something to offer,
871 		 * wake up select/poll.
872 		 */
873 		if (space == 0) {
874 			wpipe->pipe_state |= PIPE_WANTW;
875 			++wpipe->pipe_wantwcnt;
876 			pipeselwakeup(wpipe);
877 			if (wpipe->pipe_state & PIPE_WANTW)
878 				error = tsleep(wpipe, PCATCH, "pipewr", 0);
879 			++pipe_wblocked_count;
880 		}
881 		lwkt_reltoken(&rlock);
882 
883 		/*
884 		 * Break out if we errored or the read side wants us to go
885 		 * away.
886 		 */
887 		if (error)
888 			break;
889 		if (wpipe->pipe_state & PIPE_WEOF) {
890 			error = EPIPE;
891 			break;
892 		}
893 	}
894 	pipe_end_uio(wpipe, &wpipe->pipe_wip);
895 
896 	/*
897 	 * If we have put any characters in the buffer, we wake up
898 	 * the reader.
899 	 *
900 	 * Both rlock and wlock are required to be able to modify pipe_state.
901 	 */
902 	if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) {
903 		if (wpipe->pipe_state & PIPE_WANTR) {
904 			lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
905 			if (wpipe->pipe_state & PIPE_WANTR) {
906 				wpipe->pipe_state &= ~PIPE_WANTR;
907 				lwkt_reltoken(&rlock);
908 				wakeup(wpipe);
909 			} else {
910 				lwkt_reltoken(&rlock);
911 			}
912 		}
913 	}
914 
915 	/*
916 	 * Don't return EPIPE if I/O was successful
917 	 */
918 	if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) &&
919 	    (uio->uio_resid == 0) &&
920 	    (error == EPIPE)) {
921 		error = 0;
922 	}
923 
924 	if (error == 0)
925 		vfs_timestamp(&wpipe->pipe_mtime);
926 
927 	/*
928 	 * We have something to offer,
929 	 * wake up select/poll.
930 	 */
931 	space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;
932 	lwkt_reltoken(&wlock);
933 	if (space)
934 		pipeselwakeup(wpipe);
935 	pipe_rel_mplock(&mpsave);
936 	return (error);
937 }
938 
939 /*
940  * MPALMOSTSAFE - acquires mplock
941  *
942  * we implement a very minimal set of ioctls for compatibility with sockets.
943  */
944 int
945 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct ucred *cred)
946 {
947 	struct pipe *mpipe;
948 	lwkt_tokref rlock;
949 	lwkt_tokref wlock;
950 	int error;
951 	int mpsave;
952 
953 	pipe_get_mplock(&mpsave);
954 	mpipe = (struct pipe *)fp->f_data;
955 
956 	lwkt_gettoken(&rlock, &mpipe->pipe_rlock);
957 	lwkt_gettoken(&wlock, &mpipe->pipe_wlock);
958 
959 	switch (cmd) {
960 	case FIOASYNC:
961 		if (*(int *)data) {
962 			mpipe->pipe_state |= PIPE_ASYNC;
963 		} else {
964 			mpipe->pipe_state &= ~PIPE_ASYNC;
965 		}
966 		error = 0;
967 		break;
968 	case FIONREAD:
969 		*(int *)data = mpipe->pipe_buffer.windex -
970 				mpipe->pipe_buffer.rindex;
971 		error = 0;
972 		break;
973 	case FIOSETOWN:
974 		get_mplock();
975 		error = fsetown(*(int *)data, &mpipe->pipe_sigio);
976 		rel_mplock();
977 		break;
978 	case FIOGETOWN:
979 		*(int *)data = fgetown(mpipe->pipe_sigio);
980 		error = 0;
981 		break;
982 	case TIOCSPGRP:
983 		/* This is deprecated, FIOSETOWN should be used instead. */
984 		get_mplock();
985 		error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
986 		rel_mplock();
987 		break;
988 
989 	case TIOCGPGRP:
990 		/* This is deprecated, FIOGETOWN should be used instead. */
991 		*(int *)data = -fgetown(mpipe->pipe_sigio);
992 		error = 0;
993 		break;
994 	default:
995 		error = ENOTTY;
996 		break;
997 	}
998 	lwkt_reltoken(&rlock);
999 	lwkt_reltoken(&wlock);
1000 	pipe_rel_mplock(&mpsave);
1001 
1002 	return (error);
1003 }
1004 
1005 /*
1006  * MPALMOSTSAFE - acquires mplock
1007  */
1008 int
1009 pipe_poll(struct file *fp, int events, struct ucred *cred)
1010 {
1011 	struct pipe *rpipe;
1012 	struct pipe *wpipe;
1013 	int revents = 0;
1014 	u_int space;
1015 	int mpsave;
1016 
1017 	pipe_get_mplock(&mpsave);
1018 	rpipe = (struct pipe *)fp->f_data;
1019 	wpipe = rpipe->pipe_peer;
1020 	if (events & (POLLIN | POLLRDNORM)) {
1021 		if ((rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) ||
1022 		    (rpipe->pipe_state & PIPE_REOF)) {
1023 			revents |= events & (POLLIN | POLLRDNORM);
1024 		}
1025 	}
1026 
1027 	if (events & (POLLOUT | POLLWRNORM)) {
1028 		if (wpipe == NULL || (wpipe->pipe_state & PIPE_WEOF)) {
1029 			revents |= events & (POLLOUT | POLLWRNORM);
1030 		} else {
1031 			space = wpipe->pipe_buffer.windex -
1032 				wpipe->pipe_buffer.rindex;
1033 			space = wpipe->pipe_buffer.size - space;
1034 			if (space >= PIPE_BUF)
1035 				revents |= events & (POLLOUT | POLLWRNORM);
1036 		}
1037 	}
1038 
1039 	if ((rpipe->pipe_state & PIPE_REOF) ||
1040 	    (wpipe == NULL) ||
1041 	    (wpipe->pipe_state & PIPE_WEOF))
1042 		revents |= POLLHUP;
1043 
1044 	if (revents == 0) {
1045 		if (events & (POLLIN | POLLRDNORM)) {
1046 			selrecord(curthread, &rpipe->pipe_sel);
1047 			rpipe->pipe_state |= PIPE_SEL;
1048 		}
1049 
1050 		if (events & (POLLOUT | POLLWRNORM)) {
1051 			selrecord(curthread, &wpipe->pipe_sel);
1052 			wpipe->pipe_state |= PIPE_SEL;
1053 		}
1054 	}
1055 	pipe_rel_mplock(&mpsave);
1056 	return (revents);
1057 }
1058 
1059 /*
1060  * MPSAFE
1061  */
1062 static int
1063 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred)
1064 {
1065 	struct pipe *pipe;
1066 	int mpsave;
1067 
1068 	pipe_get_mplock(&mpsave);
1069 	pipe = (struct pipe *)fp->f_data;
1070 
1071 	bzero((caddr_t)ub, sizeof(*ub));
1072 	ub->st_mode = S_IFIFO;
1073 	ub->st_blksize = pipe->pipe_buffer.size;
1074 	ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex;
1075 	ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1076 	ub->st_atimespec = pipe->pipe_atime;
1077 	ub->st_mtimespec = pipe->pipe_mtime;
1078 	ub->st_ctimespec = pipe->pipe_ctime;
1079 	/*
1080 	 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1081 	 * st_flags, st_gen.
1082 	 * XXX (st_dev, st_ino) should be unique.
1083 	 */
1084 	pipe_rel_mplock(&mpsave);
1085 	return (0);
1086 }
1087 
1088 /*
1089  * MPALMOSTSAFE - acquires mplock
1090  */
1091 static int
1092 pipe_close(struct file *fp)
1093 {
1094 	struct pipe *cpipe;
1095 
1096 	get_mplock();
1097 	cpipe = (struct pipe *)fp->f_data;
1098 	fp->f_ops = &badfileops;
1099 	fp->f_data = NULL;
1100 	funsetown(cpipe->pipe_sigio);
1101 	pipeclose(cpipe);
1102 	rel_mplock();
1103 	return (0);
1104 }
1105 
1106 /*
1107  * Shutdown one or both directions of a full-duplex pipe.
1108  *
1109  * MPALMOSTSAFE - acquires mplock
1110  */
1111 static int
1112 pipe_shutdown(struct file *fp, int how)
1113 {
1114 	struct pipe *rpipe;
1115 	struct pipe *wpipe;
1116 	int error = EPIPE;
1117 	lwkt_tokref rpipe_rlock;
1118 	lwkt_tokref rpipe_wlock;
1119 	lwkt_tokref wpipe_rlock;
1120 	lwkt_tokref wpipe_wlock;
1121 	int mpsave;
1122 
1123 	pipe_get_mplock(&mpsave);
1124 	rpipe = (struct pipe *)fp->f_data;
1125 	wpipe = rpipe->pipe_peer;
1126 
1127 	/*
1128 	 * We modify pipe_state on both pipes, which means we need
1129 	 * all four tokens!
1130 	 */
1131 	lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock);
1132 	lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock);
1133 	lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock);
1134 	lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock);
1135 
1136 	switch(how) {
1137 	case SHUT_RDWR:
1138 	case SHUT_RD:
1139 		rpipe->pipe_state |= PIPE_REOF;		/* my reads */
1140 		rpipe->pipe_state |= PIPE_WEOF;		/* peer writes */
1141 		if (rpipe->pipe_state & PIPE_WANTR) {
1142 			rpipe->pipe_state &= ~PIPE_WANTR;
1143 			wakeup(rpipe);
1144 		}
1145 		if (rpipe->pipe_state & PIPE_WANTW) {
1146 			rpipe->pipe_state &= ~PIPE_WANTW;
1147 			wakeup(rpipe);
1148 		}
1149 		error = 0;
1150 		if (how == SHUT_RD)
1151 			break;
1152 		/* fall through */
1153 	case SHUT_WR:
1154 		wpipe->pipe_state |= PIPE_REOF;		/* peer reads */
1155 		wpipe->pipe_state |= PIPE_WEOF;		/* my writes */
1156 		if (wpipe->pipe_state & PIPE_WANTR) {
1157 			wpipe->pipe_state &= ~PIPE_WANTR;
1158 			wakeup(wpipe);
1159 		}
1160 		if (wpipe->pipe_state & PIPE_WANTW) {
1161 			wpipe->pipe_state &= ~PIPE_WANTW;
1162 			wakeup(wpipe);
1163 		}
1164 		error = 0;
1165 		break;
1166 	}
1167 	pipeselwakeup(rpipe);
1168 	pipeselwakeup(wpipe);
1169 
1170 	lwkt_reltoken(&rpipe_rlock);
1171 	lwkt_reltoken(&rpipe_wlock);
1172 	lwkt_reltoken(&wpipe_rlock);
1173 	lwkt_reltoken(&wpipe_wlock);
1174 
1175 	pipe_rel_mplock(&mpsave);
1176 	return (error);
1177 }
1178 
1179 static void
1180 pipe_free_kmem(struct pipe *cpipe)
1181 {
1182 	if (cpipe->pipe_buffer.buffer != NULL) {
1183 		if (cpipe->pipe_buffer.size > PIPE_SIZE)
1184 			atomic_subtract_int(&pipe_nbig, 1);
1185 		kmem_free(&kernel_map,
1186 			(vm_offset_t)cpipe->pipe_buffer.buffer,
1187 			cpipe->pipe_buffer.size);
1188 		cpipe->pipe_buffer.buffer = NULL;
1189 		cpipe->pipe_buffer.object = NULL;
1190 	}
1191 }
1192 
1193 /*
1194  * Close the pipe.  The slock must be held to interlock against simultanious
1195  * closes.  The rlock and wlock must be held to adjust the pipe_state.
1196  */
1197 static void
1198 pipeclose(struct pipe *cpipe)
1199 {
1200 	globaldata_t gd;
1201 	struct pipe *ppipe;
1202 	lwkt_tokref cpipe_rlock;
1203 	lwkt_tokref cpipe_wlock;
1204 	lwkt_tokref ppipe_rlock;
1205 	lwkt_tokref ppipe_wlock;
1206 
1207 	if (cpipe == NULL)
1208 		return;
1209 
1210 	/*
1211 	 * The slock may not have been allocated yet (close during
1212 	 * initialization)
1213 	 *
1214 	 * We need both the read and write tokens to modify pipe_state.
1215 	 */
1216 	if (cpipe->pipe_slock)
1217 		lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE);
1218 	lwkt_gettoken(&cpipe_rlock, &cpipe->pipe_rlock);
1219 	lwkt_gettoken(&cpipe_wlock, &cpipe->pipe_wlock);
1220 
1221 	/*
1222 	 * Set our state, wakeup anyone waiting in select, and
1223 	 * wakeup anyone blocked on our pipe.
1224 	 */
1225 	cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF;
1226 	pipeselwakeup(cpipe);
1227 	if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1228 		cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1229 		wakeup(cpipe);
1230 	}
1231 
1232 	/*
1233 	 * Disconnect from peer.
1234 	 */
1235 	if ((ppipe = cpipe->pipe_peer) != NULL) {
1236 		lwkt_gettoken(&ppipe_rlock, &ppipe->pipe_rlock);
1237 		lwkt_gettoken(&ppipe_wlock, &ppipe->pipe_wlock);
1238 		ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF;
1239 		pipeselwakeup(ppipe);
1240 		if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1241 			ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1242 			wakeup(ppipe);
1243 		}
1244 		if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) {
1245 			get_mplock();
1246 			KNOTE(&ppipe->pipe_sel.si_note, 0);
1247 			rel_mplock();
1248 		}
1249 		lwkt_reltoken(&ppipe_rlock);
1250 		lwkt_reltoken(&ppipe_wlock);
1251 	}
1252 
1253 	/*
1254 	 * If the peer is also closed we can free resources for both
1255 	 * sides, otherwise we leave our side intact to deal with any
1256 	 * races (since we only have the slock).
1257 	 */
1258 	if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) {
1259 		cpipe->pipe_peer = NULL;
1260 		ppipe->pipe_peer = NULL;
1261 		ppipe->pipe_slock = NULL;	/* we will free the slock */
1262 		pipeclose(ppipe);
1263 		ppipe = NULL;
1264 	}
1265 
1266 	lwkt_reltoken(&cpipe_rlock);
1267 	lwkt_reltoken(&cpipe_wlock);
1268 	if (cpipe->pipe_slock)
1269 		lockmgr(cpipe->pipe_slock, LK_RELEASE);
1270 
1271 	/*
1272 	 * If we disassociated from our peer we can free resources
1273 	 */
1274 	if (ppipe == NULL) {
1275 		gd = mycpu;
1276 		if (cpipe->pipe_slock) {
1277 			kfree(cpipe->pipe_slock, M_PIPE);
1278 			cpipe->pipe_slock = NULL;
1279 		}
1280 		if (gd->gd_pipeqcount >= pipe_maxcache ||
1281 		    cpipe->pipe_buffer.size != PIPE_SIZE
1282 		) {
1283 			pipe_free_kmem(cpipe);
1284 			kfree(cpipe, M_PIPE);
1285 		} else {
1286 			cpipe->pipe_state = 0;
1287 			cpipe->pipe_peer = gd->gd_pipeq;
1288 			gd->gd_pipeq = cpipe;
1289 			++gd->gd_pipeqcount;
1290 		}
1291 	}
1292 }
1293 
1294 /*
1295  * MPALMOSTSAFE - acquires mplock
1296  */
1297 static int
1298 pipe_kqfilter(struct file *fp, struct knote *kn)
1299 {
1300 	struct pipe *cpipe;
1301 
1302 	get_mplock();
1303 	cpipe = (struct pipe *)kn->kn_fp->f_data;
1304 
1305 	switch (kn->kn_filter) {
1306 	case EVFILT_READ:
1307 		kn->kn_fop = &pipe_rfiltops;
1308 		break;
1309 	case EVFILT_WRITE:
1310 		kn->kn_fop = &pipe_wfiltops;
1311 		cpipe = cpipe->pipe_peer;
1312 		if (cpipe == NULL) {
1313 			/* other end of pipe has been closed */
1314 			rel_mplock();
1315 			return (EPIPE);
1316 		}
1317 		break;
1318 	default:
1319 		return (1);
1320 	}
1321 	kn->kn_hook = (caddr_t)cpipe;
1322 
1323 	SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1324 	rel_mplock();
1325 	return (0);
1326 }
1327 
1328 static void
1329 filt_pipedetach(struct knote *kn)
1330 {
1331 	struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1332 
1333 	SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1334 }
1335 
1336 /*ARGSUSED*/
1337 static int
1338 filt_piperead(struct knote *kn, long hint)
1339 {
1340 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1341 
1342 	kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
1343 
1344 	/* XXX RACE */
1345 	if (rpipe->pipe_state & PIPE_REOF) {
1346 		kn->kn_flags |= EV_EOF;
1347 		return (1);
1348 	}
1349 	return (kn->kn_data > 0);
1350 }
1351 
1352 /*ARGSUSED*/
1353 static int
1354 filt_pipewrite(struct knote *kn, long hint)
1355 {
1356 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1357 	struct pipe *wpipe = rpipe->pipe_peer;
1358 	u_int32_t space;
1359 
1360 	/* XXX RACE */
1361 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) {
1362 		kn->kn_data = 0;
1363 		kn->kn_flags |= EV_EOF;
1364 		return (1);
1365 	}
1366 	space = wpipe->pipe_buffer.windex -
1367 		wpipe->pipe_buffer.rindex;
1368 	space = wpipe->pipe_buffer.size - space;
1369 	kn->kn_data = space;
1370 	return (kn->kn_data >= PIPE_BUF);
1371 }
1372