xref: /netbsd-src/sys/fs/puffs/puffs_msgif.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: puffs_msgif.c,v 1.94 2013/10/17 21:03:27 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2005, 2006, 2007  Antti Kantee.  All Rights Reserved.
5  *
6  * Development of this software was supported by the
7  * Google Summer of Code program and the Ulla Tuominen Foundation.
8  * The Google SoC project was mentored by Bill Studenmund.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.94 2013/10/17 21:03:27 christos Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/atomic.h>
38 #include <sys/kmem.h>
39 #include <sys/kthread.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/vnode.h>
46 #include <sys/atomic.h>
47 
48 #include <uvm/uvm.h>
49 
50 #include <dev/putter/putter_sys.h>
51 
52 #include <fs/puffs/puffs_msgif.h>
53 #include <fs/puffs/puffs_sys.h>
54 
55 #include <miscfs/syncfs/syncfs.h> /* XXX: for syncer_mutex reference */
56 
57 /*
58  * waitq data structures
59  */
60 
61 /*
62  * While a request is going to userspace, park the caller within the
63  * kernel.  This is the kernel counterpart of "struct puffs_req".
64  */
65 struct puffs_msgpark {
66 	struct puffs_req	*park_preq;	/* req followed by buf	*/
67 
68 	size_t			park_copylen;	/* userspace copylength	*/
69 	size_t			park_maxlen;	/* max size in comeback */
70 
71 	struct puffs_req	*park_creq;	/* non-compat preq	*/
72 	size_t			park_creqlen;	/* non-compat preq len	*/
73 
74 	parkdone_fn		park_done;	/* "biodone" a'la puffs	*/
75 	void			*park_donearg;
76 
77 	int			park_flags;
78 	int			park_refcount;
79 
80 	kcondvar_t		park_cv;
81 	kmutex_t		park_mtx;
82 
83 	TAILQ_ENTRY(puffs_msgpark) park_entries;
84 };
85 #define PARKFLAG_WAITERGONE	0x01
86 #define PARKFLAG_DONE		0x02
87 #define PARKFLAG_ONQUEUE1	0x04
88 #define PARKFLAG_ONQUEUE2	0x08
89 #define PARKFLAG_CALL		0x10
90 #define PARKFLAG_WANTREPLY	0x20
91 #define	PARKFLAG_HASERROR	0x40
92 
93 static pool_cache_t parkpc;
94 #ifdef PUFFSDEBUG
95 static int totalpark;
96 #endif
97 
98 int puffs_sopreq_expire_timeout = PUFFS_SOPREQ_EXPIRE_TIMEOUT;
99 
100 static int
101 makepark(void *arg, void *obj, int flags)
102 {
103 	struct puffs_msgpark *park = obj;
104 
105 	mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
106 	cv_init(&park->park_cv, "puffsrpl");
107 
108 	return 0;
109 }
110 
111 static void
112 nukepark(void *arg, void *obj)
113 {
114 	struct puffs_msgpark *park = obj;
115 
116 	cv_destroy(&park->park_cv);
117 	mutex_destroy(&park->park_mtx);
118 }
119 
120 void
121 puffs_msgif_init(void)
122 {
123 
124 	parkpc = pool_cache_init(sizeof(struct puffs_msgpark), 0, 0, 0,
125 	    "puffprkl", NULL, IPL_NONE, makepark, nukepark, NULL);
126 }
127 
128 void
129 puffs_msgif_destroy(void)
130 {
131 
132 	pool_cache_destroy(parkpc);
133 }
134 
135 static struct puffs_msgpark *
136 puffs_msgpark_alloc(int waitok)
137 {
138 	struct puffs_msgpark *park;
139 
140 	KASSERT(curlwp != uvm.pagedaemon_lwp || !waitok);
141 
142 	park = pool_cache_get(parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
143 	if (park == NULL)
144 		return park;
145 
146 	park->park_refcount = 1;
147 	park->park_preq = park->park_creq = NULL;
148 	park->park_flags = PARKFLAG_WANTREPLY;
149 
150 #ifdef PUFFSDEBUG
151 	totalpark++;
152 #endif
153 
154 	return park;
155 }
156 
157 static void
158 puffs_msgpark_reference(struct puffs_msgpark *park)
159 {
160 
161 	KASSERT(mutex_owned(&park->park_mtx));
162 	park->park_refcount++;
163 }
164 
165 /*
166  * Release reference to park structure.
167  */
168 static void
169 puffs_msgpark_release1(struct puffs_msgpark *park, int howmany)
170 {
171 	struct puffs_req *preq = park->park_preq;
172 	struct puffs_req *creq = park->park_creq;
173 	int refcnt;
174 
175 	KASSERT(mutex_owned(&park->park_mtx));
176 	refcnt = park->park_refcount -= howmany;
177 	mutex_exit(&park->park_mtx);
178 
179 	KASSERT(refcnt >= 0);
180 
181 	if (refcnt == 0) {
182 		if (preq)
183 			kmem_free(preq, park->park_maxlen);
184 #if 1
185 		if (creq)
186 			kmem_free(creq, park->park_creqlen);
187 #endif
188 		pool_cache_put(parkpc, park);
189 
190 #ifdef PUFFSDEBUG
191 		totalpark--;
192 #endif
193 	}
194 }
195 #define puffs_msgpark_release(a) puffs_msgpark_release1(a, 1)
196 
197 #ifdef PUFFSDEBUG
198 static void
199 parkdump(struct puffs_msgpark *park)
200 {
201 
202 	DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
203 	    "\tcopy %zu, max %zu - done: %p/%p\n"
204 	    "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
205 	    park, park->park_preq, park->park_preq->preq_id,
206 	    park->park_copylen, park->park_maxlen,
207 	    park->park_done, park->park_donearg,
208 	    park->park_flags, park->park_refcount,
209 	    &park->park_cv, &park->park_mtx));
210 }
211 
212 static void
213 parkqdump(struct puffs_wq *q, int dumpall)
214 {
215 	struct puffs_msgpark *park;
216 	int total = 0;
217 
218 	TAILQ_FOREACH(park, q, park_entries) {
219 		if (dumpall)
220 			parkdump(park);
221 		total++;
222 	}
223 	DPRINTF(("puffs waitqueue at %p dumped, %d total\n", q, total));
224 
225 }
226 #endif /* PUFFSDEBUG */
227 
228 /*
229  * A word about locking in the park structures: the lock protects the
230  * fields of the *park* structure (not preq) and acts as an interlock
231  * in cv operations.  The lock is always internal to this module and
232  * callers do not need to worry about it.
233  */
234 
235 int
236 puffs_msgmem_alloc(size_t len, struct puffs_msgpark **ppark, void **mem,
237 	int cansleep)
238 {
239 	struct puffs_msgpark *park;
240 	void *m;
241 
242 	KASSERT(curlwp != uvm.pagedaemon_lwp || !cansleep);
243 	m = kmem_zalloc(len, cansleep ? KM_SLEEP : KM_NOSLEEP);
244 	if (m == NULL) {
245 		KASSERT(cansleep == 0);
246 		return ENOMEM;
247 	}
248 
249 	park = puffs_msgpark_alloc(cansleep);
250 	if (park == NULL) {
251 		KASSERT(cansleep == 0);
252 		kmem_free(m, len);
253 		return ENOMEM;
254 	}
255 
256 	park->park_preq = m;
257 	park->park_maxlen = park->park_copylen = len;
258 
259 	*ppark = park;
260 	*mem = m;
261 
262 	return 0;
263 }
264 
265 void
266 puffs_msgmem_release(struct puffs_msgpark *park)
267 {
268 
269 	if (park == NULL)
270 		return;
271 
272 	mutex_enter(&park->park_mtx);
273 	puffs_msgpark_release(park);
274 }
275 
276 void
277 puffs_msg_setfaf(struct puffs_msgpark *park)
278 {
279 
280 	KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
281 	park->park_flags &= ~PARKFLAG_WANTREPLY;
282 }
283 
284 void
285 puffs_msg_setdelta(struct puffs_msgpark *park, size_t delta)
286 {
287 
288 	KASSERT(delta < park->park_maxlen); /* "<=" wouldn't make sense */
289 	park->park_copylen = park->park_maxlen - delta;
290 }
291 
292 void
293 puffs_msg_setinfo(struct puffs_msgpark *park, int class, int type,
294 	puffs_cookie_t ck)
295 {
296 
297 	park->park_preq->preq_opclass = PUFFSOP_OPCLASS(class);
298 	park->park_preq->preq_optype = type;
299 	park->park_preq->preq_cookie = ck;
300 }
301 
302 void
303 puffs_msg_setcall(struct puffs_msgpark *park, parkdone_fn donefn, void *donearg)
304 {
305 
306 	KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
307 	park->park_done = donefn;
308 	park->park_donearg = donearg;
309 	park->park_flags |= PARKFLAG_CALL;
310 }
311 
312 /*
313  * kernel-user-kernel waitqueues
314  */
315 
316 static uint64_t
317 puffs_getmsgid(struct puffs_mount *pmp)
318 {
319 	uint64_t rv;
320 
321 	mutex_enter(&pmp->pmp_lock);
322 	rv = pmp->pmp_nextmsgid++;
323 	mutex_exit(&pmp->pmp_lock);
324 
325 	return rv;
326 }
327 
328 /*
329  * A word about reference counting of parks.  A reference must be taken
330  * when accessing a park and additionally when it is on a queue.  So
331  * when taking it off a queue and releasing the access reference, the
332  * reference count is generally decremented by 2.
333  */
334 
335 void
336 puffs_msg_enqueue(struct puffs_mount *pmp, struct puffs_msgpark *park)
337 {
338 	struct lwp *l = curlwp;
339 	struct puffs_req *preq, *creq;
340 	ssize_t delta;
341 
342 	/*
343 	 * Some clients reuse a park, so reset some flags.  We might
344 	 * want to provide a caller-side interface for this and add
345 	 * a few more invariant checks here, but this will do for now.
346 	 */
347 	park->park_flags &= ~(PARKFLAG_DONE | PARKFLAG_HASERROR);
348 	KASSERT((park->park_flags & PARKFLAG_WAITERGONE) == 0);
349 
350 	preq = park->park_preq;
351 
352 #if 1
353 	/* check if we do compat adjustments */
354 	if (pmp->pmp_docompat && puffs_compat_outgoing(preq, &creq, &delta)) {
355 		park->park_creq = park->park_preq;
356 		park->park_creqlen = park->park_maxlen;
357 
358 		park->park_maxlen += delta;
359 		park->park_copylen += delta;
360 		park->park_preq = preq = creq;
361 	}
362 #endif
363 
364 	preq->preq_buflen = park->park_maxlen;
365 	KASSERT(preq->preq_id == 0
366 	    || (preq->preq_opclass & PUFFSOPFLAG_ISRESPONSE));
367 
368 	if ((park->park_flags & PARKFLAG_WANTREPLY) == 0)
369 		preq->preq_opclass |= PUFFSOPFLAG_FAF;
370 	else
371 		preq->preq_id = puffs_getmsgid(pmp);
372 
373 	/* fill in caller information */
374 	preq->preq_pid = l->l_proc->p_pid;
375 	preq->preq_lid = l->l_lid;
376 
377 	/*
378 	 * To support cv_sig, yet another movie: check if there are signals
379 	 * pending and we are issueing a non-FAF.  If so, return an error
380 	 * directly UNLESS we are issueing INACTIVE/RECLAIM.  In that case,
381 	 * convert it to a FAF, fire off to the file server and return
382 	 * an error.  Yes, this is bordering disgusting.  Barfbags are on me.
383 	 */
384 	if (__predict_false((park->park_flags & PARKFLAG_WANTREPLY)
385 	   && (park->park_flags & PARKFLAG_CALL) == 0
386 	   && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0))) {
387 		sigset_t ss;
388 
389 		/*
390 		 * see the comment about signals in puffs_msg_wait.
391 		 */
392 		sigpending1(l, &ss);
393 		if (sigismember(&ss, SIGINT) ||
394 		    sigismember(&ss, SIGTERM) ||
395 		    sigismember(&ss, SIGKILL) ||
396 		    sigismember(&ss, SIGHUP) ||
397 		    sigismember(&ss, SIGQUIT)) {
398 			park->park_flags |= PARKFLAG_HASERROR;
399 			preq->preq_rv = EINTR;
400 			if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
401 			    && (preq->preq_optype == PUFFS_VN_INACTIVE
402 			     || preq->preq_optype == PUFFS_VN_RECLAIM)) {
403 				park->park_preq->preq_opclass |=
404 				    PUFFSOPFLAG_FAF;
405 				park->park_flags &= ~PARKFLAG_WANTREPLY;
406 				DPRINTF(("puffs_msg_enqueue: "
407 				    "converted to FAF %p\n", park));
408 			} else {
409 				return;
410 			}
411 		}
412 	}
413 
414 	mutex_enter(&pmp->pmp_lock);
415 	if (pmp->pmp_status != PUFFSTAT_RUNNING) {
416 		mutex_exit(&pmp->pmp_lock);
417 		park->park_flags |= PARKFLAG_HASERROR;
418 		preq->preq_rv = ENXIO;
419 		return;
420 	}
421 
422 #ifdef PUFFSDEBUG
423 	parkqdump(&pmp->pmp_msg_touser, puffsdebug > 1);
424 	parkqdump(&pmp->pmp_msg_replywait, puffsdebug > 1);
425 #endif
426 
427 	/*
428 	 * Note: we don't need to lock park since we have the only
429 	 * reference to it at this point.
430 	 */
431 	TAILQ_INSERT_TAIL(&pmp->pmp_msg_touser, park, park_entries);
432 	park->park_flags |= PARKFLAG_ONQUEUE1;
433 	pmp->pmp_msg_touser_count++;
434 	park->park_refcount++;
435 	mutex_exit(&pmp->pmp_lock);
436 
437 	cv_broadcast(&pmp->pmp_msg_waiter_cv);
438 	putter_notify(pmp->pmp_pi);
439 
440 	DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
441 	    "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
442 	    preq->preq_opclass, preq->preq_optype, park->park_flags));
443 }
444 
445 int
446 puffs_msg_wait(struct puffs_mount *pmp, struct puffs_msgpark *park)
447 {
448 	lwp_t *l = curlwp;
449 	proc_t *p = l->l_proc;
450 	struct puffs_req *preq = park->park_preq; /* XXX: hmmm */
451 	sigset_t ss;
452 	sigset_t oss;
453 	int error = 0;
454 	int rv;
455 
456 	/*
457 	 * block unimportant signals.
458 	 *
459 	 * The set of "important" signals here was chosen to be same as
460 	 * nfs interruptible mount.
461 	 */
462 	sigfillset(&ss);
463 	sigdelset(&ss, SIGINT);
464 	sigdelset(&ss, SIGTERM);
465 	sigdelset(&ss, SIGKILL);
466 	sigdelset(&ss, SIGHUP);
467 	sigdelset(&ss, SIGQUIT);
468 	mutex_enter(p->p_lock);
469 	sigprocmask1(l, SIG_BLOCK, &ss, &oss);
470 	mutex_exit(p->p_lock);
471 
472 	mutex_enter(&pmp->pmp_lock);
473 	puffs_mp_reference(pmp);
474 	mutex_exit(&pmp->pmp_lock);
475 
476 	mutex_enter(&park->park_mtx);
477 	/* did the response beat us to the wait? */
478 	if (__predict_false((park->park_flags & PARKFLAG_DONE)
479 	    || (park->park_flags & PARKFLAG_HASERROR))) {
480 		rv = park->park_preq->preq_rv;
481 		mutex_exit(&park->park_mtx);
482 		goto skipwait;
483 	}
484 
485 	if ((park->park_flags & PARKFLAG_WANTREPLY) == 0
486 	    || (park->park_flags & PARKFLAG_CALL)) {
487 		mutex_exit(&park->park_mtx);
488 		rv = 0;
489 		goto skipwait;
490 	}
491 
492 	error = cv_wait_sig(&park->park_cv, &park->park_mtx);
493 	DPRINTF(("puffs_touser: waiter for %p woke up with %d\n",
494 	    park, error));
495 	if (error) {
496 		park->park_flags |= PARKFLAG_WAITERGONE;
497 		if (park->park_flags & PARKFLAG_DONE) {
498 			rv = preq->preq_rv;
499 			mutex_exit(&park->park_mtx);
500 		} else {
501 			/*
502 			 * ok, we marked it as going away, but
503 			 * still need to do queue ops.  take locks
504 			 * in correct order.
505 			 *
506 			 * We don't want to release our reference
507 			 * if it's on replywait queue to avoid error
508 			 * to file server.  putop() code will DTRT.
509 			 */
510 			mutex_exit(&park->park_mtx);
511 			mutex_enter(&pmp->pmp_lock);
512 			mutex_enter(&park->park_mtx);
513 
514 			/*
515 			 * Still on queue1?  We can safely remove it
516 			 * without any consequences since the file
517 			 * server hasn't seen it.  "else" we need to
518 			 * wait for the response and just ignore it
519 			 * to avoid signalling an incorrect error to
520 			 * the file server.
521 			 */
522 			if (park->park_flags & PARKFLAG_ONQUEUE1) {
523 				TAILQ_REMOVE(&pmp->pmp_msg_touser,
524 				    park, park_entries);
525 				puffs_msgpark_release(park);
526 				pmp->pmp_msg_touser_count--;
527 				park->park_flags &= ~PARKFLAG_ONQUEUE1;
528 			} else {
529 				mutex_exit(&park->park_mtx);
530 			}
531 			mutex_exit(&pmp->pmp_lock);
532 
533 			rv = EINTR;
534 		}
535 	} else {
536 		rv = preq->preq_rv;
537 		mutex_exit(&park->park_mtx);
538 	}
539 
540  skipwait:
541 	mutex_enter(&pmp->pmp_lock);
542 	puffs_mp_release(pmp);
543 	mutex_exit(&pmp->pmp_lock);
544 
545 	mutex_enter(p->p_lock);
546 	sigprocmask1(l, SIG_SETMASK, &oss, NULL);
547 	mutex_exit(p->p_lock);
548 
549 	return rv;
550 }
551 
552 /*
553  * XXX: this suuuucks.  Hopefully I'll get rid of this lossage once
554  * the whole setback-nonsense gets fixed.
555  */
556 int
557 puffs_msg_wait2(struct puffs_mount *pmp, struct puffs_msgpark *park,
558 	struct puffs_node *pn1, struct puffs_node *pn2)
559 {
560 	struct puffs_req *preq;
561 	int rv;
562 
563 	rv = puffs_msg_wait(pmp, park);
564 
565 	preq = park->park_preq;
566 	if (pn1 && preq->preq_setbacks & PUFFS_SETBACK_INACT_N1)
567 		pn1->pn_stat |= PNODE_DOINACT;
568 	if (pn2 && preq->preq_setbacks & PUFFS_SETBACK_INACT_N2)
569 		pn2->pn_stat |= PNODE_DOINACT;
570 
571 	if (pn1 && preq->preq_setbacks & PUFFS_SETBACK_NOREF_N1)
572 		pn1->pn_stat |= PNODE_NOREFS;
573 	if (pn2 && preq->preq_setbacks & PUFFS_SETBACK_NOREF_N2)
574 		pn2->pn_stat |= PNODE_NOREFS;
575 
576 	return rv;
577 
578 }
579 
580 /*
581  * XXX: lazy bum.  please, for the love of foie gras, fix me.
582  * This should *NOT* depend on setfaf.  Also "memcpy" could
583  * be done more nicely.
584  */
585 void
586 puffs_msg_sendresp(struct puffs_mount *pmp, struct puffs_req *origpreq, int rv)
587 {
588 	struct puffs_msgpark *park;
589 	struct puffs_req *preq;
590 
591 	puffs_msgmem_alloc(sizeof(struct puffs_req), &park, (void *)&preq, 1);
592 	puffs_msg_setfaf(park); /* XXXXXX: avoids reqid override */
593 
594 	memcpy(preq, origpreq, sizeof(struct puffs_req));
595 	preq->preq_rv = rv;
596 	preq->preq_opclass |= PUFFSOPFLAG_ISRESPONSE;
597 
598 	puffs_msg_enqueue(pmp, park);
599 	puffs_msgmem_release(park);
600 }
601 
602 /*
603  * Get next request in the outgoing queue.  "maxsize" controls the
604  * size the caller can accommodate and "nonblock" signals if this
605  * should block while waiting for input.  Handles all locking internally.
606  */
607 int
608 puffs_msgif_getout(void *this, size_t maxsize, int nonblock,
609 	uint8_t **data, size_t *dlen, void **parkptr)
610 {
611 	struct puffs_mount *pmp = this;
612 	struct puffs_msgpark *park = NULL;
613 	struct puffs_req *preq = NULL;
614 	int error;
615 
616 	error = 0;
617 	mutex_enter(&pmp->pmp_lock);
618 	puffs_mp_reference(pmp);
619 	for (;;) {
620 		/* RIP? */
621 		if (pmp->pmp_status != PUFFSTAT_RUNNING) {
622 			error = ENXIO;
623 			break;
624 		}
625 
626 		/* need platinum yendorian express card? */
627 		if (TAILQ_EMPTY(&pmp->pmp_msg_touser)) {
628 			DPRINTF(("puffs_getout: no outgoing op, "));
629 			if (nonblock) {
630 				DPRINTF(("returning EWOULDBLOCK\n"));
631 				error = EWOULDBLOCK;
632 				break;
633 			}
634 			DPRINTF(("waiting ...\n"));
635 
636 			error = cv_wait_sig(&pmp->pmp_msg_waiter_cv,
637 			    &pmp->pmp_lock);
638 			if (error)
639 				break;
640 			else
641 				continue;
642 		}
643 
644 		park = TAILQ_FIRST(&pmp->pmp_msg_touser);
645 		if (park == NULL)
646 			continue;
647 
648 		mutex_enter(&park->park_mtx);
649 		puffs_msgpark_reference(park);
650 
651 		DPRINTF(("puffs_getout: found park at %p, ", park));
652 
653 		/* If it's a goner, don't process any furher */
654 		if (park->park_flags & PARKFLAG_WAITERGONE) {
655 			DPRINTF(("waitergone!\n"));
656 			puffs_msgpark_release(park);
657 			continue;
658 		}
659 		preq = park->park_preq;
660 
661 #if 0
662 		/* check size */
663 		/*
664 		 * XXX: this check is not valid for now, we don't know
665 		 * the size of the caller's input buffer.  i.e. this
666 		 * will most likely go away
667 		 */
668 		if (maxsize < preq->preq_frhdr.pfr_len) {
669 			DPRINTF(("buffer too small\n"));
670 			puffs_msgpark_release(park);
671 			error = E2BIG;
672 			break;
673 		}
674 #endif
675 
676 		DPRINTF(("returning\n"));
677 
678 		/*
679 		 * Ok, we found what we came for.  Release it from the
680 		 * outgoing queue but do not unlock.  We will unlock
681 		 * only after we "releaseout" it to avoid complications:
682 		 * otherwise it is (theoretically) possible for userland
683 		 * to race us into "put" before we have a change to put
684 		 * this baby on the receiving queue.
685 		 */
686 		TAILQ_REMOVE(&pmp->pmp_msg_touser, park, park_entries);
687 		KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
688 		park->park_flags &= ~PARKFLAG_ONQUEUE1;
689 		mutex_exit(&park->park_mtx);
690 
691 		pmp->pmp_msg_touser_count--;
692 		KASSERT(pmp->pmp_msg_touser_count >= 0);
693 
694 		break;
695 	}
696 	puffs_mp_release(pmp);
697 	mutex_exit(&pmp->pmp_lock);
698 
699 	if (error == 0) {
700 		*data = (uint8_t *)preq;
701 		preq->preq_pth.pth_framelen = park->park_copylen;
702 		*dlen = preq->preq_pth.pth_framelen;
703 		*parkptr = park;
704 	}
705 
706 	return error;
707 }
708 
709 /*
710  * Release outgoing structure.  Now, depending on the success of the
711  * outgoing send, it is either going onto the result waiting queue
712  * or the death chamber.
713  */
714 void
715 puffs_msgif_releaseout(void *this, void *parkptr, int status)
716 {
717 	struct puffs_mount *pmp = this;
718 	struct puffs_msgpark *park = parkptr;
719 
720 	DPRINTF(("puffs_releaseout: returning park %p, errno %d: " ,
721 	    park, status));
722 	mutex_enter(&pmp->pmp_lock);
723 	mutex_enter(&park->park_mtx);
724 	if (park->park_flags & PARKFLAG_WANTREPLY) {
725 		if (status == 0) {
726 			DPRINTF(("enqueue replywait\n"));
727 			TAILQ_INSERT_TAIL(&pmp->pmp_msg_replywait, park,
728 			    park_entries);
729 			park->park_flags |= PARKFLAG_ONQUEUE2;
730 		} else {
731 			DPRINTF(("error path!\n"));
732 			park->park_preq->preq_rv = status;
733 			park->park_flags |= PARKFLAG_DONE;
734 			cv_signal(&park->park_cv);
735 		}
736 		puffs_msgpark_release(park);
737 	} else {
738 		DPRINTF(("release\n"));
739 		puffs_msgpark_release1(park, 2);
740 	}
741 	mutex_exit(&pmp->pmp_lock);
742 }
743 
744 size_t
745 puffs_msgif_waitcount(void *this)
746 {
747 	struct puffs_mount *pmp = this;
748 	size_t rv;
749 
750 	mutex_enter(&pmp->pmp_lock);
751 	rv = pmp->pmp_msg_touser_count;
752 	mutex_exit(&pmp->pmp_lock);
753 
754 	return rv;
755 }
756 
757 /*
758  * XXX: locking with this one?
759  */
760 static void
761 puffsop_msg(void *this, struct puffs_req *preq)
762 {
763 	struct puffs_mount *pmp = this;
764 	struct putter_hdr *pth = &preq->preq_pth;
765 	struct puffs_msgpark *park;
766 	int wgone;
767 
768 	mutex_enter(&pmp->pmp_lock);
769 
770 	/* Locate waiter */
771 	TAILQ_FOREACH(park, &pmp->pmp_msg_replywait, park_entries) {
772 		if (park->park_preq->preq_id == preq->preq_id)
773 			break;
774 	}
775 	if (park == NULL) {
776 		DPRINTF(("puffsop_msg: no request: %" PRIu64 "\n",
777 		    preq->preq_id));
778 		mutex_exit(&pmp->pmp_lock);
779 		return; /* XXX send error */
780 	}
781 
782 	mutex_enter(&park->park_mtx);
783 	puffs_msgpark_reference(park);
784 	if (pth->pth_framelen > park->park_maxlen) {
785 		DPRINTF(("puffsop_msg: invalid buffer length: "
786 		    "%" PRIu64 " (req %" PRIu64 ", \n", pth->pth_framelen,
787 		    preq->preq_id));
788 		park->park_preq->preq_rv = EPROTO;
789 		cv_signal(&park->park_cv);
790 		puffs_msgpark_release1(park, 2);
791 		mutex_exit(&pmp->pmp_lock);
792 		return; /* XXX: error */
793 	}
794 	wgone = park->park_flags & PARKFLAG_WAITERGONE;
795 
796 	KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
797 	TAILQ_REMOVE(&pmp->pmp_msg_replywait, park, park_entries);
798 	park->park_flags &= ~PARKFLAG_ONQUEUE2;
799 	mutex_exit(&pmp->pmp_lock);
800 
801 	if (wgone) {
802 		DPRINTF(("puffsop_msg: bad service - waiter gone for "
803 		    "park %p\n", park));
804 	} else {
805 #if 1
806 		if (park->park_creq) {
807 			struct puffs_req *creq;
808 			size_t csize;
809 
810 			KASSERT(pmp->pmp_docompat);
811 			puffs_compat_incoming(preq, park->park_creq);
812 			creq = park->park_creq;
813 			csize = park->park_creqlen;
814 			park->park_creq = park->park_preq;
815 			park->park_creqlen = park->park_maxlen;
816 
817 			park->park_preq = creq;
818 			park->park_maxlen = csize;
819 
820 			memcpy(park->park_creq, preq, pth->pth_framelen);
821 		} else {
822 #endif
823 			memcpy(park->park_preq, preq, pth->pth_framelen);
824 		}
825 
826 		if (park->park_flags & PARKFLAG_CALL) {
827 			DPRINTF(("puffsop_msg: call for %p, arg %p\n",
828 			    park->park_preq, park->park_donearg));
829 			park->park_done(pmp, preq, park->park_donearg);
830 		}
831 	}
832 
833 	if (!wgone) {
834 		DPRINTF(("puffs_putop: flagging done for "
835 		    "park %p\n", park));
836 		cv_signal(&park->park_cv);
837 	}
838 
839 	park->park_flags |= PARKFLAG_DONE;
840 	puffs_msgpark_release1(park, 2);
841 }
842 
843 /*
844  * Node expiry. We come here after an inactive on an unexpired node.
845  * The expiry has been queued and is done in sop thread.
846  */
847 static void
848 puffsop_expire(struct puffs_mount *pmp, puffs_cookie_t cookie)
849 {
850 	struct vnode *vp;
851 
852 	KASSERT(PUFFS_USE_FS_TTL(pmp));
853 
854 	/*
855 	 * If it still exists and has no reference,
856 	 * vrele should cause it to be reclaimed.
857 	 * Otherwise, we have nothing to do.
858 	 */
859 	if (puffs_cookie2vnode(pmp, cookie, 0, 0, &vp) == 0) {
860 		VPTOPP(vp)->pn_stat &= ~PNODE_SOPEXP;
861 		vrele(vp);
862 	}
863 
864 	return;
865 }
866 
867 static void
868 puffsop_flush(struct puffs_mount *pmp, struct puffs_flush *pf)
869 {
870 	struct vnode *vp;
871 	voff_t offlo, offhi;
872 	int rv, flags = 0;
873 
874 	KASSERT(pf->pf_req.preq_pth.pth_framelen == sizeof(struct puffs_flush));
875 
876 	/* XXX: slurry */
877 	if (pf->pf_op == PUFFS_INVAL_NAMECACHE_ALL) {
878 		cache_purgevfs(PMPTOMP(pmp));
879 		rv = 0;
880 		goto out;
881 	}
882 
883 	/*
884 	 * Get vnode, don't lock it.  Namecache is protected by its own lock
885 	 * and we have a reference to protect against premature harvesting.
886 	 *
887 	 * The node we want here might be locked and the op is in
888 	 * userspace waiting for us to complete ==> deadlock.  Another
889 	 * reason we need to eventually bump locking to userspace, as we
890 	 * will need to lock the node if we wish to do flushes.
891 	 */
892 	rv = puffs_cookie2vnode(pmp, pf->pf_cookie, 0, 0, &vp);
893 	if (rv) {
894 		if (rv == PUFFS_NOSUCHCOOKIE)
895 			rv = ENOENT;
896 		goto out;
897 	}
898 
899 	switch (pf->pf_op) {
900 #if 0
901 	/* not quite ready, yet */
902 	case PUFFS_INVAL_NAMECACHE_NODE:
903 	struct componentname *pf_cn;
904 	char *name;
905 		/* get comfortab^Wcomponentname */
906 		pf_cn = kmem_alloc(componentname);
907 		memset(pf_cn, 0, sizeof(struct componentname));
908 		break;
909 
910 #endif
911 	case PUFFS_INVAL_NAMECACHE_DIR:
912 		if (vp->v_type != VDIR) {
913 			rv = EINVAL;
914 			break;
915 		}
916 		cache_purge1(vp, NULL, 0, PURGE_CHILDREN);
917 		break;
918 
919 	case PUFFS_INVAL_PAGECACHE_NODE_RANGE:
920 		flags = PGO_FREE;
921 		/*FALLTHROUGH*/
922 	case PUFFS_FLUSH_PAGECACHE_NODE_RANGE:
923 		if (flags == 0)
924 			flags = PGO_CLEANIT;
925 
926 		if (pf->pf_end > vp->v_size || vp->v_type != VREG) {
927 			rv = EINVAL;
928 			break;
929 		}
930 
931 		offlo = trunc_page(pf->pf_start);
932 		offhi = round_page(pf->pf_end);
933 		if (offhi != 0 && offlo >= offhi) {
934 			rv = EINVAL;
935 			break;
936 		}
937 
938 		mutex_enter(vp->v_uobj.vmobjlock);
939 		rv = VOP_PUTPAGES(vp, offlo, offhi, flags);
940 		break;
941 
942 	default:
943 		rv = EINVAL;
944 	}
945 
946 	vrele(vp);
947 
948  out:
949 	puffs_msg_sendresp(pmp, &pf->pf_req, rv);
950 }
951 
952 int
953 puffs_msgif_dispatch(void *this, struct putter_hdr *pth)
954 {
955 	struct puffs_mount *pmp = this;
956 	struct puffs_req *preq = (struct puffs_req *)pth;
957 	struct puffs_sopreq *psopr;
958 
959 	if (pth->pth_framelen < sizeof(struct puffs_req)) {
960 		puffs_msg_sendresp(pmp, preq, EINVAL); /* E2SMALL */
961 		return 0;
962 	}
963 
964 	switch (PUFFSOP_OPCLASS(preq->preq_opclass)) {
965 	case PUFFSOP_VN:
966 	case PUFFSOP_VFS:
967 		DPRINTF(("dispatch: vn/vfs message 0x%x\n", preq->preq_optype));
968 		puffsop_msg(pmp, preq);
969 		break;
970 
971 	case PUFFSOP_FLUSH: /* process in sop thread */
972 	{
973 		struct puffs_flush *pf;
974 
975 		DPRINTF(("dispatch: flush 0x%x\n", preq->preq_optype));
976 
977 		if (preq->preq_pth.pth_framelen != sizeof(struct puffs_flush)) {
978 			puffs_msg_sendresp(pmp, preq, EINVAL); /* E2SMALL */
979 			break;
980 		}
981 		pf = (struct puffs_flush *)preq;
982 
983 		KASSERT(curlwp != uvm.pagedaemon_lwp);
984 		psopr = kmem_alloc(sizeof(*psopr), KM_SLEEP);
985 		memcpy(&psopr->psopr_pf, pf, sizeof(*pf));
986 		psopr->psopr_sopreq = PUFFS_SOPREQ_FLUSH;
987 
988 		mutex_enter(&pmp->pmp_sopmtx);
989 		if (pmp->pmp_sopthrcount == 0) {
990 			mutex_exit(&pmp->pmp_sopmtx);
991 			kmem_free(psopr, sizeof(*psopr));
992 			puffs_msg_sendresp(pmp, preq, ENXIO);
993 		} else {
994 			TAILQ_INSERT_TAIL(&pmp->pmp_sopfastreqs,
995 			    psopr, psopr_entries);
996 			cv_signal(&pmp->pmp_sopcv);
997 			mutex_exit(&pmp->pmp_sopmtx);
998 		}
999 		break;
1000 	}
1001 
1002 	case PUFFSOP_UNMOUNT: /* process in sop thread */
1003 	{
1004 
1005 		DPRINTF(("dispatch: unmount 0x%x\n", preq->preq_optype));
1006 
1007 		KASSERT(curlwp != uvm.pagedaemon_lwp);
1008 		psopr = kmem_alloc(sizeof(*psopr), KM_SLEEP);
1009 		psopr->psopr_preq = *preq;
1010 		psopr->psopr_sopreq = PUFFS_SOPREQ_UNMOUNT;
1011 
1012 		mutex_enter(&pmp->pmp_sopmtx);
1013 		if (pmp->pmp_sopthrcount == 0) {
1014 			mutex_exit(&pmp->pmp_sopmtx);
1015 			kmem_free(psopr, sizeof(*psopr));
1016 			puffs_msg_sendresp(pmp, preq, ENXIO);
1017 		} else {
1018 			TAILQ_INSERT_TAIL(&pmp->pmp_sopfastreqs,
1019 			    psopr, psopr_entries);
1020 			cv_signal(&pmp->pmp_sopcv);
1021 			mutex_exit(&pmp->pmp_sopmtx);
1022 		}
1023 		break;
1024 	}
1025 
1026 	default:
1027 		DPRINTF(("dispatch: invalid class 0x%x\n", preq->preq_opclass));
1028 		puffs_msg_sendresp(pmp, preq, EOPNOTSUPP);
1029 		break;
1030 	}
1031 
1032 	return 0;
1033 }
1034 
1035 /*
1036  * Work loop for thread processing all ops from server which
1037  * cannot safely be handled in caller context.  This includes
1038  * everything which might need a lock currently "held" by the file
1039  * server, i.e. a long-term kernel lock which will be released only
1040  * once the file server acknowledges a request
1041  */
1042 #define TIMED_OUT(expire) \
1043     ((int)((unsigned int)hardclock_ticks - (unsigned int)expire) > 0)
1044 void
1045 puffs_sop_thread(void *arg)
1046 {
1047 	struct puffs_mount *pmp = arg;
1048 	struct mount *mp = PMPTOMP(pmp);
1049 	struct puffs_sopreq *psopr;
1050 	bool keeprunning;
1051 	bool unmountme = false;
1052 	int timeo;
1053 
1054 	timeo = PUFFS_USE_FS_TTL(pmp) ? puffs_sopreq_expire_timeout : 0;
1055 
1056 	mutex_enter(&pmp->pmp_sopmtx);
1057 	for (keeprunning = true; keeprunning; ) {
1058 		/*
1059 		 * We have a fast queue for flush and umount, and a node
1060 		 * queue for delayes node reclaims. Requests on node queue 			 * are not honoured before clock reaches psopr_at. This
1061 		 * code assumes that requests are ordered by psopr_at.
1062 		 */
1063 		do {
1064 			psopr = TAILQ_FIRST(&pmp->pmp_sopfastreqs);
1065 			if (psopr != NULL) {
1066 				TAILQ_REMOVE(&pmp->pmp_sopfastreqs,
1067 					     psopr, psopr_entries);
1068 				break;
1069 			}
1070 
1071 			psopr = TAILQ_FIRST(&pmp->pmp_sopnodereqs);
1072 			if ((psopr != NULL) && TIMED_OUT(psopr->psopr_at)) {
1073 				TAILQ_REMOVE(&pmp->pmp_sopnodereqs,
1074 					     psopr, psopr_entries);
1075 				break;
1076 			}
1077 
1078 			cv_timedwait(&pmp->pmp_sopcv, &pmp->pmp_sopmtx, timeo);
1079 		} while (1 /* CONSTCOND */);
1080 
1081 		mutex_exit(&pmp->pmp_sopmtx);
1082 
1083 		switch (psopr->psopr_sopreq) {
1084 		case PUFFS_SOPREQSYS_EXIT:
1085 			keeprunning = false;
1086 			break;
1087 		case PUFFS_SOPREQ_FLUSH:
1088 			puffsop_flush(pmp, &psopr->psopr_pf);
1089 			break;
1090 		case PUFFS_SOPREQ_EXPIRE:
1091 			puffsop_expire(pmp, psopr->psopr_ck);
1092 			break;
1093 		case PUFFS_SOPREQ_UNMOUNT:
1094 			puffs_msg_sendresp(pmp, &psopr->psopr_preq, 0);
1095 
1096 			unmountme = true;
1097 			keeprunning = false;
1098 
1099 			/*
1100 			 * We know the mountpoint is still alive because
1101 			 * the thread that is us (poetic?) is still alive.
1102 			 */
1103 			atomic_inc_uint((unsigned int*)&mp->mnt_refcnt);
1104 			break;
1105 		}
1106 
1107 		kmem_free(psopr, sizeof(*psopr));
1108 		mutex_enter(&pmp->pmp_sopmtx);
1109 	}
1110 
1111 	/*
1112 	 * Purge remaining ops.
1113 	 */
1114 	while ((psopr = TAILQ_FIRST(&pmp->pmp_sopfastreqs)) != NULL) {
1115 		TAILQ_REMOVE(&pmp->pmp_sopfastreqs, psopr, psopr_entries);
1116 		mutex_exit(&pmp->pmp_sopmtx);
1117 		puffs_msg_sendresp(pmp, &psopr->psopr_preq, ENXIO);
1118 		kmem_free(psopr, sizeof(*psopr));
1119 		mutex_enter(&pmp->pmp_sopmtx);
1120 	}
1121 
1122 	while ((psopr = TAILQ_FIRST(&pmp->pmp_sopnodereqs)) != NULL) {
1123 		TAILQ_REMOVE(&pmp->pmp_sopnodereqs, psopr, psopr_entries);
1124 		mutex_exit(&pmp->pmp_sopmtx);
1125 		KASSERT(psopr->psopr_sopreq == PUFFS_SOPREQ_EXPIRE);
1126 		kmem_free(psopr, sizeof(*psopr));
1127 		mutex_enter(&pmp->pmp_sopmtx);
1128 	}
1129 
1130 	pmp->pmp_sopthrcount--;
1131 	cv_broadcast(&pmp->pmp_sopcv);
1132 	mutex_exit(&pmp->pmp_sopmtx); /* not allowed to access fs after this */
1133 
1134 	/*
1135 	 * If unmount was requested, we can now safely do it here, since
1136 	 * our context is dead from the point-of-view of puffs_unmount()
1137 	 * and we are just another thread.  dounmount() makes internally
1138 	 * sure that VFS_UNMOUNT() isn't called reentrantly and that it
1139 	 * is eventually completed.
1140 	 */
1141 	if (unmountme) {
1142 		(void)dounmount(mp, MNT_FORCE, curlwp);
1143 		vfs_destroy(mp);
1144 	}
1145 
1146 	kthread_exit(0);
1147 }
1148 
1149 int
1150 puffs_msgif_close(void *this)
1151 {
1152 	struct puffs_mount *pmp = this;
1153 	struct mount *mp = PMPTOMP(pmp);
1154 
1155 	mutex_enter(&pmp->pmp_lock);
1156 	puffs_mp_reference(pmp);
1157 
1158 	/*
1159 	 * Free the waiting callers before proceeding any further.
1160 	 * The syncer might be jogging around in this file system
1161 	 * currently.  If we allow it to go to the userspace of no
1162 	 * return while trying to get the syncer lock, well ...
1163 	 */
1164 	puffs_userdead(pmp);
1165 
1166 	/*
1167 	 * Make sure someone from puffs_unmount() isn't currently in
1168 	 * userspace.  If we don't take this precautionary step,
1169 	 * they might notice that the mountpoint has disappeared
1170 	 * from under them once they return.  Especially note that we
1171 	 * cannot simply test for an unmounter before calling
1172 	 * dounmount(), since it might be possible that that particular
1173 	 * invocation of unmount was called without MNT_FORCE.  Here we
1174 	 * *must* make sure unmount succeeds.  Also, restart is necessary
1175 	 * since pmp isn't locked.  We might end up with PUTTER_DEAD after
1176 	 * restart and exit from there.
1177 	 */
1178 	if (pmp->pmp_unmounting) {
1179 		cv_wait(&pmp->pmp_unmounting_cv, &pmp->pmp_lock);
1180 		puffs_mp_release(pmp);
1181 		mutex_exit(&pmp->pmp_lock);
1182 		DPRINTF(("puffs_fop_close: unmount was in progress for pmp %p, "
1183 		    "restart\n", pmp));
1184 		return ERESTART;
1185 	}
1186 
1187 	/* Won't access pmp from here anymore */
1188 	atomic_inc_uint((unsigned int*)&mp->mnt_refcnt);
1189 	puffs_mp_release(pmp);
1190 	mutex_exit(&pmp->pmp_lock);
1191 
1192 	/* Detach from VFS. */
1193 	(void)dounmount(mp, MNT_FORCE, curlwp);
1194 	vfs_destroy(mp);
1195 
1196 	return 0;
1197 }
1198 
1199 /*
1200  * We're dead, kaput, RIP, slightly more than merely pining for the
1201  * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
1202  * our maker, ceased to be, etcetc.  YASD.  It's a dead FS!
1203  *
1204  * Caller must hold puffs mutex.
1205  */
1206 void
1207 puffs_userdead(struct puffs_mount *pmp)
1208 {
1209 	struct puffs_msgpark *park, *park_next;
1210 
1211 	/*
1212 	 * Mark filesystem status as dying so that operations don't
1213 	 * attempt to march to userspace any longer.
1214 	 */
1215 	pmp->pmp_status = PUFFSTAT_DYING;
1216 
1217 	/* signal waiters on REQUEST TO file server queue */
1218 	for (park = TAILQ_FIRST(&pmp->pmp_msg_touser); park; park = park_next) {
1219 
1220 		mutex_enter(&park->park_mtx);
1221 		puffs_msgpark_reference(park);
1222 		park_next = TAILQ_NEXT(park, park_entries);
1223 
1224 		KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
1225 		TAILQ_REMOVE(&pmp->pmp_msg_touser, park, park_entries);
1226 		park->park_flags &= ~PARKFLAG_ONQUEUE1;
1227 		pmp->pmp_msg_touser_count--;
1228 
1229 		/*
1230 		 * Even though waiters on QUEUE1 are removed in touser()
1231 		 * in case of WAITERGONE, it is still possible for us to
1232 		 * get raced here due to having to retake locks in said
1233 		 * touser().  In the race case simply "ignore" the item
1234 		 * on the queue and move on to the next one.
1235 		 */
1236 		if (park->park_flags & PARKFLAG_WAITERGONE) {
1237 			KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
1238 			KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
1239 			puffs_msgpark_release(park);
1240 
1241 		} else {
1242 			park->park_preq->preq_rv = ENXIO;
1243 
1244 			if (park->park_flags & PARKFLAG_CALL) {
1245 				park->park_done(pmp, park->park_preq,
1246 				    park->park_donearg);
1247 				puffs_msgpark_release1(park, 2);
1248 			} else if ((park->park_flags & PARKFLAG_WANTREPLY)==0) {
1249 				puffs_msgpark_release1(park, 2);
1250 			} else {
1251 				park->park_preq->preq_rv = ENXIO;
1252 				cv_signal(&park->park_cv);
1253 				puffs_msgpark_release(park);
1254 			}
1255 		}
1256 	}
1257 
1258 	/* signal waiters on RESPONSE FROM file server queue */
1259 	for (park=TAILQ_FIRST(&pmp->pmp_msg_replywait); park; park=park_next) {
1260 		mutex_enter(&park->park_mtx);
1261 		puffs_msgpark_reference(park);
1262 		park_next = TAILQ_NEXT(park, park_entries);
1263 
1264 		KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
1265 		KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
1266 
1267 		TAILQ_REMOVE(&pmp->pmp_msg_replywait, park, park_entries);
1268 		park->park_flags &= ~PARKFLAG_ONQUEUE2;
1269 
1270 		if (park->park_flags & PARKFLAG_WAITERGONE) {
1271 			KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
1272 			puffs_msgpark_release(park);
1273 		} else {
1274 			park->park_preq->preq_rv = ENXIO;
1275 			if (park->park_flags & PARKFLAG_CALL) {
1276 				park->park_done(pmp, park->park_preq,
1277 				    park->park_donearg);
1278 				puffs_msgpark_release1(park, 2);
1279 			} else {
1280 				cv_signal(&park->park_cv);
1281 				puffs_msgpark_release(park);
1282 			}
1283 		}
1284 	}
1285 
1286 	cv_broadcast(&pmp->pmp_msg_waiter_cv);
1287 }
1288