1 /* $NetBSD: puffs_msgif.c,v 1.107 2024/02/09 22:08:37 andvar Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.107 2024/02/09 22:08:37 andvar Exp $");
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/atomic.h>
38 #include <sys/kmem.h>
39 #include <sys/kthread.h>
40 #include <sys/lock.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45 #include <sys/atomic.h>
46 #include <sys/compat_stub.h>
47
48 #include <uvm/uvm.h>
49
50 #include <dev/putter/putter_sys.h>
51
52 #include <fs/puffs/puffs_msgif.h>
53 #include <fs/puffs/puffs_sys.h>
54
55 /*
56 * waitq data structures
57 */
58
59 /*
60 * While a request is going to userspace, park the caller within the
61 * kernel. This is the kernel counterpart of "struct puffs_req".
62 */
63 struct puffs_msgpark {
64 struct puffs_req *park_preq; /* req followed by buf */
65
66 size_t park_copylen; /* userspace copylength */
67 size_t park_maxlen; /* max size in comeback */
68
69 struct puffs_req *park_creq; /* non-compat preq */
70 size_t park_creqlen; /* non-compat preq len */
71
72 parkdone_fn park_done; /* "biodone" a'la puffs */
73 void *park_donearg;
74
75 int park_flags;
76 int park_refcount;
77
78 kcondvar_t park_cv;
79 kmutex_t park_mtx;
80
81 TAILQ_ENTRY(puffs_msgpark) park_entries;
82 };
83 #define PARKFLAG_WAITERGONE 0x01
84 #define PARKFLAG_DONE 0x02
85 #define PARKFLAG_ONQUEUE1 0x04
86 #define PARKFLAG_ONQUEUE2 0x08
87 #define PARKFLAG_CALL 0x10
88 #define PARKFLAG_WANTREPLY 0x20
89 #define PARKFLAG_HASERROR 0x40
90
91 static pool_cache_t parkpc;
92 #ifdef PUFFSDEBUG
93 static int totalpark;
94 #endif
95
96 int puffs_sopreq_expire_timeout = PUFFS_SOPREQ_EXPIRE_TIMEOUT;
97
98 static int
makepark(void * arg,void * obj,int flags)99 makepark(void *arg, void *obj, int flags)
100 {
101 struct puffs_msgpark *park = obj;
102
103 mutex_init(&park->park_mtx, MUTEX_DEFAULT, IPL_NONE);
104 cv_init(&park->park_cv, "puffsrpl");
105
106 return 0;
107 }
108
109 static void
nukepark(void * arg,void * obj)110 nukepark(void *arg, void *obj)
111 {
112 struct puffs_msgpark *park = obj;
113
114 cv_destroy(&park->park_cv);
115 mutex_destroy(&park->park_mtx);
116 }
117
118 void
puffs_msgif_init(void)119 puffs_msgif_init(void)
120 {
121
122 parkpc = pool_cache_init(sizeof(struct puffs_msgpark), 0, 0, 0,
123 "puffprkl", NULL, IPL_NONE, makepark, nukepark, NULL);
124 }
125
126 void
puffs_msgif_destroy(void)127 puffs_msgif_destroy(void)
128 {
129
130 pool_cache_destroy(parkpc);
131 }
132
133 static struct puffs_msgpark *
puffs_msgpark_alloc(int waitok)134 puffs_msgpark_alloc(int waitok)
135 {
136 struct puffs_msgpark *park;
137
138 KASSERT(curlwp != uvm.pagedaemon_lwp || !waitok);
139
140 park = pool_cache_get(parkpc, waitok ? PR_WAITOK : PR_NOWAIT);
141 if (park == NULL)
142 return park;
143
144 park->park_refcount = 1;
145 park->park_preq = park->park_creq = NULL;
146 park->park_flags = PARKFLAG_WANTREPLY;
147
148 #ifdef PUFFSDEBUG
149 totalpark++;
150 #endif
151
152 return park;
153 }
154
155 static void
puffs_msgpark_reference(struct puffs_msgpark * park)156 puffs_msgpark_reference(struct puffs_msgpark *park)
157 {
158
159 KASSERT(mutex_owned(&park->park_mtx));
160 park->park_refcount++;
161 }
162
163 /*
164 * Release reference to park structure.
165 */
166 static void
puffs_msgpark_release1(struct puffs_msgpark * park,int howmany)167 puffs_msgpark_release1(struct puffs_msgpark *park, int howmany)
168 {
169 struct puffs_req *preq = park->park_preq;
170 struct puffs_req *creq = park->park_creq;
171 int refcnt;
172
173 KASSERT(mutex_owned(&park->park_mtx));
174 refcnt = park->park_refcount -= howmany;
175 mutex_exit(&park->park_mtx);
176
177 KASSERT(refcnt >= 0);
178
179 if (refcnt == 0) {
180 if (preq)
181 kmem_free(preq, park->park_maxlen);
182 #if 1
183 if (creq)
184 kmem_free(creq, park->park_creqlen);
185 #endif
186 pool_cache_put(parkpc, park);
187
188 #ifdef PUFFSDEBUG
189 totalpark--;
190 #endif
191 }
192 }
193 #define puffs_msgpark_release(a) puffs_msgpark_release1(a, 1)
194
195 #ifdef PUFFSDEBUG
196 static void
parkdump(struct puffs_msgpark * park)197 parkdump(struct puffs_msgpark *park)
198 {
199
200 DPRINTF(("park %p, preq %p, id %" PRIu64 "\n"
201 "\tcopy %zu, max %zu - done: %p/%p\n"
202 "\tflags 0x%08x, refcount %d, cv/mtx: %p/%p\n",
203 park, park->park_preq, park->park_preq->preq_id,
204 park->park_copylen, park->park_maxlen,
205 park->park_done, park->park_donearg,
206 park->park_flags, park->park_refcount,
207 &park->park_cv, &park->park_mtx));
208 }
209
210 static void
parkqdump(struct puffs_wq * q,int dumpall)211 parkqdump(struct puffs_wq *q, int dumpall)
212 {
213 struct puffs_msgpark *park;
214 int total = 0;
215
216 TAILQ_FOREACH(park, q, park_entries) {
217 if (dumpall)
218 parkdump(park);
219 total++;
220 }
221 DPRINTF(("puffs waitqueue at %p dumped, %d total\n", q, total));
222
223 }
224 #endif /* PUFFSDEBUG */
225
226 /*
227 * A word about locking in the park structures: the lock protects the
228 * fields of the *park* structure (not preq) and acts as an interlock
229 * in cv operations. The lock is always internal to this module and
230 * callers do not need to worry about it.
231 */
232
233 int
puffs_msgmem_alloc(size_t len,struct puffs_msgpark ** ppark,void ** mem,int cansleep)234 puffs_msgmem_alloc(size_t len, struct puffs_msgpark **ppark, void **mem,
235 int cansleep)
236 {
237 struct puffs_msgpark *park;
238 void *m;
239
240 KASSERT(curlwp != uvm.pagedaemon_lwp || !cansleep);
241 m = kmem_zalloc(len, cansleep ? KM_SLEEP : KM_NOSLEEP);
242 if (m == NULL) {
243 KASSERT(cansleep == 0);
244 return ENOMEM;
245 }
246
247 park = puffs_msgpark_alloc(cansleep);
248 if (park == NULL) {
249 KASSERT(cansleep == 0);
250 kmem_free(m, len);
251 return ENOMEM;
252 }
253
254 park->park_preq = m;
255 park->park_maxlen = park->park_copylen = len;
256
257 *ppark = park;
258 *mem = m;
259
260 return 0;
261 }
262
263 void
puffs_msgmem_release(struct puffs_msgpark * park)264 puffs_msgmem_release(struct puffs_msgpark *park)
265 {
266
267 if (park == NULL)
268 return;
269
270 mutex_enter(&park->park_mtx);
271 puffs_msgpark_release(park);
272 }
273
274 void
puffs_msg_setfaf(struct puffs_msgpark * park)275 puffs_msg_setfaf(struct puffs_msgpark *park)
276 {
277
278 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
279 park->park_flags &= ~PARKFLAG_WANTREPLY;
280 }
281
282 void
puffs_msg_setdelta(struct puffs_msgpark * park,size_t delta)283 puffs_msg_setdelta(struct puffs_msgpark *park, size_t delta)
284 {
285
286 KASSERT(delta < park->park_maxlen); /* "<=" wouldn't make sense */
287 park->park_copylen = park->park_maxlen - delta;
288 }
289
290 void
puffs_msg_setinfo(struct puffs_msgpark * park,int opclass,int type,puffs_cookie_t ck)291 puffs_msg_setinfo(struct puffs_msgpark *park, int opclass, int type,
292 puffs_cookie_t ck)
293 {
294
295 park->park_preq->preq_opclass = PUFFSOP_OPCLASS(opclass);
296 park->park_preq->preq_optype = type;
297 park->park_preq->preq_cookie = ck;
298 }
299
300 void
puffs_msg_setcall(struct puffs_msgpark * park,parkdone_fn donefn,void * donearg)301 puffs_msg_setcall(struct puffs_msgpark *park, parkdone_fn donefn, void *donearg)
302 {
303
304 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
305 park->park_done = donefn;
306 park->park_donearg = donearg;
307 park->park_flags |= PARKFLAG_CALL;
308 }
309
310 /*
311 * kernel-user-kernel waitqueues
312 */
313
314 static uint64_t
puffs_getmsgid(struct puffs_mount * pmp)315 puffs_getmsgid(struct puffs_mount *pmp)
316 {
317 uint64_t rv;
318
319 mutex_enter(&pmp->pmp_lock);
320 rv = pmp->pmp_nextmsgid++;
321 mutex_exit(&pmp->pmp_lock);
322
323 return rv;
324 }
325
326 /*
327 * A word about reference counting of parks. A reference must be taken
328 * when accessing a park and additionally when it is on a queue. So
329 * when taking it off a queue and releasing the access reference, the
330 * reference count is generally decremented by 2.
331 */
332
333 void
puffs_msg_enqueue(struct puffs_mount * pmp,struct puffs_msgpark * park)334 puffs_msg_enqueue(struct puffs_mount *pmp, struct puffs_msgpark *park)
335 {
336 struct lwp *l = curlwp;
337 struct puffs_req *preq, *creq;
338 ssize_t delta;
339 #if 1
340 int ret;
341 #endif
342
343 /*
344 * Some clients reuse a park, so reset some flags. We might
345 * want to provide a caller-side interface for this and add
346 * a few more invariant checks here, but this will do for now.
347 */
348 park->park_flags &= ~(PARKFLAG_DONE | PARKFLAG_HASERROR);
349 KASSERT((park->park_flags & PARKFLAG_WAITERGONE) == 0);
350
351 preq = park->park_preq;
352
353 #if 1
354 /* check if we do compat adjustments */
355 if (pmp->pmp_docompat) {
356 MODULE_HOOK_CALL(puffs_out_50_hook, (preq, &creq, &delta),
357 enosys(), ret);
358 if (ret == 0) {
359 park->park_creq = park->park_preq;
360 park->park_creqlen = park->park_maxlen;
361
362 park->park_maxlen += delta;
363 park->park_copylen += delta;
364 park->park_preq = preq = creq;
365 }
366 }
367 #endif
368
369 preq->preq_buflen = park->park_maxlen;
370 KASSERT(preq->preq_id == 0
371 || (preq->preq_opclass & PUFFSOPFLAG_ISRESPONSE));
372
373 if ((park->park_flags & PARKFLAG_WANTREPLY) == 0)
374 preq->preq_opclass |= PUFFSOPFLAG_FAF;
375 else
376 preq->preq_id = puffs_getmsgid(pmp);
377
378 /* fill in caller information */
379 preq->preq_pid = l->l_proc->p_pid;
380 preq->preq_lid = l->l_lid;
381
382 /*
383 * To support cv_sig, yet another movie: check if there are signals
384 * pending and we are issuing a non-FAF. If so, return an error
385 * directly UNLESS we are issuing INACTIVE/RECLAIM. In that case,
386 * convert it to a FAF, fire off to the file server and return
387 * an error. Yes, this is bordering disgusting. Barfbags are on me.
388 */
389 if (__predict_false((park->park_flags & PARKFLAG_WANTREPLY)
390 && (park->park_flags & PARKFLAG_CALL) == 0
391 && (l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0))) {
392 sigset_t ss;
393
394 /*
395 * see the comment about signals in puffs_msg_wait.
396 */
397 sigpending1(l, &ss);
398 if (sigismember(&ss, SIGINT) ||
399 sigismember(&ss, SIGTERM) ||
400 sigismember(&ss, SIGKILL) ||
401 sigismember(&ss, SIGHUP) ||
402 sigismember(&ss, SIGQUIT)) {
403 park->park_flags |= PARKFLAG_HASERROR;
404 preq->preq_rv = EINTR;
405 if (PUFFSOP_OPCLASS(preq->preq_opclass) == PUFFSOP_VN
406 && (preq->preq_optype == PUFFS_VN_INACTIVE
407 || preq->preq_optype == PUFFS_VN_RECLAIM)) {
408 park->park_preq->preq_opclass |=
409 PUFFSOPFLAG_FAF;
410 park->park_flags &= ~PARKFLAG_WANTREPLY;
411 DPRINTF(("puffs_msg_enqueue: "
412 "converted to FAF %p\n", park));
413 } else {
414 return;
415 }
416 }
417 }
418
419 mutex_enter(&pmp->pmp_lock);
420 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
421 mutex_exit(&pmp->pmp_lock);
422 park->park_flags |= PARKFLAG_HASERROR;
423 preq->preq_rv = ENXIO;
424 return;
425 }
426
427 #ifdef PUFFSDEBUG
428 parkqdump(&pmp->pmp_msg_touser, puffsdebug > 1);
429 parkqdump(&pmp->pmp_msg_replywait, puffsdebug > 1);
430 #endif
431
432 /*
433 * Note: we don't need to lock park since we have the only
434 * reference to it at this point.
435 */
436 TAILQ_INSERT_TAIL(&pmp->pmp_msg_touser, park, park_entries);
437 park->park_flags |= PARKFLAG_ONQUEUE1;
438 pmp->pmp_msg_touser_count++;
439 park->park_refcount++;
440
441 cv_broadcast(&pmp->pmp_msg_waiter_cv);
442 mutex_exit(&pmp->pmp_lock);
443 putter_notify(pmp->pmp_pi);
444
445 DPRINTF(("touser: req %" PRIu64 ", preq: %p, park: %p, "
446 "c/t: 0x%x/0x%x, f: 0x%x\n", preq->preq_id, preq, park,
447 preq->preq_opclass, preq->preq_optype, park->park_flags));
448 }
449
450 int
puffs_msg_wait(struct puffs_mount * pmp,struct puffs_msgpark * park)451 puffs_msg_wait(struct puffs_mount *pmp, struct puffs_msgpark *park)
452 {
453 lwp_t *l = curlwp;
454 proc_t *p = l->l_proc;
455 struct puffs_req *preq = park->park_preq; /* XXX: hmmm */
456 sigset_t ss;
457 sigset_t oss;
458 int error = 0;
459 int rv;
460
461 /*
462 * block unimportant signals.
463 *
464 * The set of "important" signals here was chosen to be same as
465 * nfs interruptible mount.
466 */
467 sigfillset(&ss);
468 sigdelset(&ss, SIGINT);
469 sigdelset(&ss, SIGTERM);
470 sigdelset(&ss, SIGKILL);
471 sigdelset(&ss, SIGHUP);
472 sigdelset(&ss, SIGQUIT);
473 mutex_enter(p->p_lock);
474 sigprocmask1(l, SIG_BLOCK, &ss, &oss);
475 mutex_exit(p->p_lock);
476
477 mutex_enter(&pmp->pmp_lock);
478 puffs_mp_reference(pmp);
479 mutex_exit(&pmp->pmp_lock);
480
481 mutex_enter(&park->park_mtx);
482 /* did the response beat us to the wait? */
483 if (__predict_false((park->park_flags & PARKFLAG_DONE)
484 || (park->park_flags & PARKFLAG_HASERROR))) {
485 rv = park->park_preq->preq_rv;
486 mutex_exit(&park->park_mtx);
487 goto skipwait;
488 }
489
490 if ((park->park_flags & PARKFLAG_WANTREPLY) == 0
491 || (park->park_flags & PARKFLAG_CALL)) {
492 mutex_exit(&park->park_mtx);
493 rv = 0;
494 goto skipwait;
495 }
496
497 error = cv_wait_sig(&park->park_cv, &park->park_mtx);
498 DPRINTF(("puffs_touser: waiter for %p woke up with %d\n",
499 park, error));
500 if (error) {
501 park->park_flags |= PARKFLAG_WAITERGONE;
502 if (park->park_flags & PARKFLAG_DONE) {
503 rv = preq->preq_rv;
504 mutex_exit(&park->park_mtx);
505 } else {
506 /*
507 * ok, we marked it as going away, but
508 * still need to do queue ops. take locks
509 * in correct order.
510 *
511 * We don't want to release our reference
512 * if it's on replywait queue to avoid error
513 * to file server. putop() code will DTRT.
514 */
515 mutex_exit(&park->park_mtx);
516 mutex_enter(&pmp->pmp_lock);
517 mutex_enter(&park->park_mtx);
518
519 /*
520 * Still on queue1? We can safely remove it
521 * without any consequences since the file
522 * server hasn't seen it. "else" we need to
523 * wait for the response and just ignore it
524 * to avoid signalling an incorrect error to
525 * the file server.
526 */
527 if (park->park_flags & PARKFLAG_ONQUEUE1) {
528 TAILQ_REMOVE(&pmp->pmp_msg_touser,
529 park, park_entries);
530 puffs_msgpark_release(park);
531 pmp->pmp_msg_touser_count--;
532 park->park_flags &= ~PARKFLAG_ONQUEUE1;
533 } else {
534 mutex_exit(&park->park_mtx);
535 }
536 mutex_exit(&pmp->pmp_lock);
537
538 rv = EINTR;
539 }
540 } else {
541 rv = preq->preq_rv;
542 mutex_exit(&park->park_mtx);
543 }
544
545 skipwait:
546 mutex_enter(&pmp->pmp_lock);
547 puffs_mp_release(pmp);
548 mutex_exit(&pmp->pmp_lock);
549
550 mutex_enter(p->p_lock);
551 sigprocmask1(l, SIG_SETMASK, &oss, NULL);
552 mutex_exit(p->p_lock);
553
554 return rv;
555 }
556
557 /*
558 * XXX: this suuuucks. Hopefully I'll get rid of this lossage once
559 * the whole setback-nonsense gets fixed.
560 */
561 int
puffs_msg_wait2(struct puffs_mount * pmp,struct puffs_msgpark * park,struct puffs_node * pn1,struct puffs_node * pn2)562 puffs_msg_wait2(struct puffs_mount *pmp, struct puffs_msgpark *park,
563 struct puffs_node *pn1, struct puffs_node *pn2)
564 {
565 struct puffs_req *preq;
566 int rv;
567
568 rv = puffs_msg_wait(pmp, park);
569
570 preq = park->park_preq;
571 if (pn1 && preq->preq_setbacks & PUFFS_SETBACK_INACT_N1)
572 pn1->pn_stat |= PNODE_DOINACT;
573 if (pn2 && preq->preq_setbacks & PUFFS_SETBACK_INACT_N2)
574 pn2->pn_stat |= PNODE_DOINACT;
575
576 if (pn1 && preq->preq_setbacks & PUFFS_SETBACK_NOREF_N1)
577 pn1->pn_stat |= PNODE_NOREFS;
578 if (pn2 && preq->preq_setbacks & PUFFS_SETBACK_NOREF_N2)
579 pn2->pn_stat |= PNODE_NOREFS;
580
581 return rv;
582
583 }
584
585 /*
586 * XXX: lazy bum. please, for the love of foie gras, fix me.
587 * This should *NOT* depend on setfaf. Also "memcpy" could
588 * be done more nicely.
589 */
590 void
puffs_msg_sendresp(struct puffs_mount * pmp,struct puffs_req * origpreq,int rv)591 puffs_msg_sendresp(struct puffs_mount *pmp, struct puffs_req *origpreq, int rv)
592 {
593 struct puffs_msgpark *park;
594 struct puffs_req *preq;
595
596 puffs_msgmem_alloc(sizeof(struct puffs_req), &park, (void *)&preq, 1);
597 puffs_msg_setfaf(park); /* XXXXXX: avoids reqid override */
598
599 memcpy(preq, origpreq, sizeof(struct puffs_req));
600 preq->preq_rv = rv;
601 preq->preq_opclass |= PUFFSOPFLAG_ISRESPONSE;
602
603 puffs_msg_enqueue(pmp, park);
604 puffs_msgmem_release(park);
605 }
606
607 /*
608 * Get next request in the outgoing queue. "maxsize" controls the
609 * size the caller can accommodate and "nonblock" signals if this
610 * should block while waiting for input. Handles all locking internally.
611 */
612 int
puffs_msgif_getout(void * ctx,size_t maxsize,int nonblock,uint8_t ** data,size_t * dlen,void ** parkptr)613 puffs_msgif_getout(void *ctx, size_t maxsize, int nonblock,
614 uint8_t **data, size_t *dlen, void **parkptr)
615 {
616 struct puffs_mount *pmp = ctx;
617 struct puffs_msgpark *park = NULL;
618 struct puffs_req *preq = NULL;
619 int error;
620
621 error = 0;
622 mutex_enter(&pmp->pmp_lock);
623 puffs_mp_reference(pmp);
624 for (;;) {
625 /* RIP? */
626 if (pmp->pmp_status != PUFFSTAT_RUNNING) {
627 error = ENXIO;
628 break;
629 }
630
631 /* need platinum yendorian express card? */
632 if (TAILQ_EMPTY(&pmp->pmp_msg_touser)) {
633 DPRINTF(("puffs_getout: no outgoing op, "));
634 if (nonblock) {
635 DPRINTF(("returning EWOULDBLOCK\n"));
636 error = EWOULDBLOCK;
637 break;
638 }
639 DPRINTF(("waiting ...\n"));
640
641 error = cv_wait_sig(&pmp->pmp_msg_waiter_cv,
642 &pmp->pmp_lock);
643 if (error)
644 break;
645 else
646 continue;
647 }
648
649 park = TAILQ_FIRST(&pmp->pmp_msg_touser);
650 if (park == NULL)
651 continue;
652
653 mutex_enter(&park->park_mtx);
654 puffs_msgpark_reference(park);
655
656 DPRINTF(("puffs_getout: found park at %p, ", park));
657
658 /* If it's a goner, don't process any furher */
659 if (park->park_flags & PARKFLAG_WAITERGONE) {
660 DPRINTF(("waitergone!\n"));
661 puffs_msgpark_release(park);
662 continue;
663 }
664 preq = park->park_preq;
665
666 #if 0
667 /* check size */
668 /*
669 * XXX: this check is not valid for now, we don't know
670 * the size of the caller's input buffer. i.e. this
671 * will most likely go away
672 */
673 if (maxsize < preq->preq_frhdr.pfr_len) {
674 DPRINTF(("buffer too small\n"));
675 puffs_msgpark_release(park);
676 error = E2BIG;
677 break;
678 }
679 #endif
680
681 DPRINTF(("returning\n"));
682
683 /*
684 * Ok, we found what we came for. Release it from the
685 * outgoing queue but do not unlock. We will unlock
686 * only after we "releaseout" it to avoid complications:
687 * otherwise it is (theoretically) possible for userland
688 * to race us into "put" before we have a change to put
689 * this baby on the receiving queue.
690 */
691 TAILQ_REMOVE(&pmp->pmp_msg_touser, park, park_entries);
692 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
693 park->park_flags &= ~PARKFLAG_ONQUEUE1;
694 mutex_exit(&park->park_mtx);
695
696 pmp->pmp_msg_touser_count--;
697 KASSERT(pmp->pmp_msg_touser_count >= 0);
698
699 break;
700 }
701 puffs_mp_release(pmp);
702 mutex_exit(&pmp->pmp_lock);
703
704 if (error == 0) {
705 *data = (uint8_t *)preq;
706 preq->preq_pth.pth_framelen = park->park_copylen;
707 *dlen = preq->preq_pth.pth_framelen;
708 *parkptr = park;
709 }
710
711 return error;
712 }
713
714 /*
715 * Release outgoing structure. Now, depending on the success of the
716 * outgoing send, it is either going onto the result waiting queue
717 * or the death chamber.
718 */
719 void
puffs_msgif_releaseout(void * ctx,void * parkptr,int status)720 puffs_msgif_releaseout(void *ctx, void *parkptr, int status)
721 {
722 struct puffs_mount *pmp = ctx;
723 struct puffs_msgpark *park = parkptr;
724
725 DPRINTF(("puffs_releaseout: returning park %p, errno %d: " ,
726 park, status));
727 mutex_enter(&pmp->pmp_lock);
728 mutex_enter(&park->park_mtx);
729 if (park->park_flags & PARKFLAG_WANTREPLY) {
730 if (status == 0) {
731 DPRINTF(("enqueue replywait\n"));
732 TAILQ_INSERT_TAIL(&pmp->pmp_msg_replywait, park,
733 park_entries);
734 park->park_flags |= PARKFLAG_ONQUEUE2;
735 } else {
736 DPRINTF(("error path!\n"));
737 park->park_preq->preq_rv = status;
738 park->park_flags |= PARKFLAG_DONE;
739 cv_signal(&park->park_cv);
740 }
741 puffs_msgpark_release(park);
742 } else {
743 DPRINTF(("release\n"));
744 puffs_msgpark_release1(park, 2);
745 }
746 mutex_exit(&pmp->pmp_lock);
747 }
748
749 size_t
puffs_msgif_waitcount(void * ctx)750 puffs_msgif_waitcount(void *ctx)
751 {
752 struct puffs_mount *pmp = ctx;
753 size_t rv;
754
755 mutex_enter(&pmp->pmp_lock);
756 rv = pmp->pmp_msg_touser_count;
757 mutex_exit(&pmp->pmp_lock);
758
759 return rv;
760 }
761
762 /*
763 * XXX: locking with this one?
764 */
765 static void
puffsop_msg(void * ctx,struct puffs_req * preq)766 puffsop_msg(void *ctx, struct puffs_req *preq)
767 {
768 struct puffs_mount *pmp = ctx;
769 struct putter_hdr *pth = &preq->preq_pth;
770 struct puffs_msgpark *park;
771 int wgone;
772
773 mutex_enter(&pmp->pmp_lock);
774
775 /* Locate waiter */
776 TAILQ_FOREACH(park, &pmp->pmp_msg_replywait, park_entries) {
777 if (park->park_preq->preq_id == preq->preq_id)
778 break;
779 }
780 if (park == NULL) {
781 DPRINTF(("puffsop_msg: no request: %" PRIu64 "\n",
782 preq->preq_id));
783 mutex_exit(&pmp->pmp_lock);
784 return; /* XXX send error */
785 }
786
787 mutex_enter(&park->park_mtx);
788 puffs_msgpark_reference(park);
789 if (pth->pth_framelen > park->park_maxlen) {
790 DPRINTF(("puffsop_msg: invalid buffer length: "
791 "%" PRIu64 " (req %" PRIu64 ", \n", pth->pth_framelen,
792 preq->preq_id));
793 park->park_preq->preq_rv = EPROTO;
794 cv_signal(&park->park_cv);
795 puffs_msgpark_release1(park, 2);
796 mutex_exit(&pmp->pmp_lock);
797 return; /* XXX: error */
798 }
799 wgone = park->park_flags & PARKFLAG_WAITERGONE;
800
801 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
802 TAILQ_REMOVE(&pmp->pmp_msg_replywait, park, park_entries);
803 park->park_flags &= ~PARKFLAG_ONQUEUE2;
804 mutex_exit(&pmp->pmp_lock);
805
806 if (wgone) {
807 DPRINTF(("puffsop_msg: bad service - waiter gone for "
808 "park %p\n", park));
809 } else {
810 #if 1
811 if (park->park_creq) {
812 struct puffs_req *creq;
813 size_t csize;
814
815 KASSERT(pmp->pmp_docompat);
816 MODULE_HOOK_CALL_VOID(puffs_in_50_hook,
817 (preq, park->park_creq), __nothing);
818 creq = park->park_creq;
819 csize = park->park_creqlen;
820 park->park_creq = park->park_preq;
821 park->park_creqlen = park->park_maxlen;
822
823 park->park_preq = creq;
824 park->park_maxlen = csize;
825
826 memcpy(park->park_creq, preq, pth->pth_framelen);
827 } else {
828 #endif
829 memcpy(park->park_preq, preq, pth->pth_framelen);
830 }
831
832 if (park->park_flags & PARKFLAG_CALL) {
833 DPRINTF(("puffsop_msg: call for %p, arg %p\n",
834 park->park_preq, park->park_donearg));
835 park->park_done(pmp, preq, park->park_donearg);
836 }
837 }
838
839 if (!wgone) {
840 DPRINTF(("puffs_putop: flagging done for "
841 "park %p\n", park));
842 cv_signal(&park->park_cv);
843 }
844
845 park->park_flags |= PARKFLAG_DONE;
846 puffs_msgpark_release1(park, 2);
847 }
848
849 /*
850 * Node expiry. We come here after an inactive on an unexpired node.
851 * The expiry has been queued and is done in sop thread.
852 */
853 static void
puffsop_expire(struct puffs_mount * pmp,puffs_cookie_t cookie)854 puffsop_expire(struct puffs_mount *pmp, puffs_cookie_t cookie)
855 {
856 struct vnode *vp;
857
858 KASSERT(PUFFS_USE_FS_TTL(pmp));
859
860 /*
861 * If it still exists and has no reference,
862 * vrele should cause it to be reclaimed.
863 * Otherwise, we have nothing to do.
864 */
865 if (puffs_cookie2vnode(pmp, cookie, &vp) == 0) {
866 VPTOPP(vp)->pn_stat &= ~PNODE_SOPEXP;
867 vrele(vp);
868 }
869
870 return;
871 }
872
873 static void
puffsop_flush(struct puffs_mount * pmp,struct puffs_flush * pf)874 puffsop_flush(struct puffs_mount *pmp, struct puffs_flush *pf)
875 {
876 struct vnode *vp;
877 voff_t offlo, offhi;
878 int rv, flags = 0;
879
880 KASSERT(pf->pf_req.preq_pth.pth_framelen == sizeof(struct puffs_flush));
881
882 /* XXX: slurry */
883 if (pf->pf_op == PUFFS_INVAL_NAMECACHE_ALL) {
884 cache_purgevfs(PMPTOMP(pmp));
885 rv = 0;
886 goto out;
887 }
888
889 /*
890 * Get vnode, don't lock it. Namecache is protected by its own lock
891 * and we have a reference to protect against premature harvesting.
892 *
893 * The node we want here might be locked and the op is in
894 * userspace waiting for us to complete ==> deadlock. Another
895 * reason we need to eventually bump locking to userspace, as we
896 * will need to lock the node if we wish to do flushes.
897 */
898 rv = puffs_cookie2vnode(pmp, pf->pf_cookie, &vp);
899 if (rv) {
900 if (rv == PUFFS_NOSUCHCOOKIE)
901 rv = ENOENT;
902 goto out;
903 }
904
905 switch (pf->pf_op) {
906 #if 0
907 /* not quite ready, yet */
908 case PUFFS_INVAL_NAMECACHE_NODE:
909 struct componentname *pf_cn;
910 char *name;
911 /* get comfortab^Wcomponentname */
912 pf_cn = kmem_alloc(componentname);
913 memset(pf_cn, 0, sizeof(struct componentname));
914 break;
915
916 #endif
917 case PUFFS_INVAL_NAMECACHE_DIR:
918 if (vp->v_type != VDIR) {
919 rv = EINVAL;
920 break;
921 }
922 cache_purge1(vp, NULL, 0, PURGE_CHILDREN);
923 break;
924
925 case PUFFS_INVAL_PAGECACHE_NODE_RANGE:
926 flags = PGO_FREE;
927 /*FALLTHROUGH*/
928 case PUFFS_FLUSH_PAGECACHE_NODE_RANGE:
929 if (flags == 0)
930 flags = PGO_CLEANIT;
931
932 if (pf->pf_end > vp->v_size || vp->v_type != VREG) {
933 rv = EINVAL;
934 break;
935 }
936
937 offlo = trunc_page(pf->pf_start);
938 offhi = round_page(pf->pf_end);
939 if (offhi != 0 && offlo >= offhi) {
940 rv = EINVAL;
941 break;
942 }
943
944 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
945 rv = VOP_PUTPAGES(vp, offlo, offhi, flags);
946 break;
947
948 default:
949 rv = EINVAL;
950 }
951
952 vrele(vp);
953
954 out:
955 puffs_msg_sendresp(pmp, &pf->pf_req, rv);
956 }
957
958 int
puffs_msgif_dispatch(void * ctx,struct putter_hdr * pth)959 puffs_msgif_dispatch(void *ctx, struct putter_hdr *pth)
960 {
961 struct puffs_mount *pmp = ctx;
962 struct puffs_req *preq = (struct puffs_req *)pth;
963 struct puffs_sopreq *psopr;
964
965 if (pth->pth_framelen < sizeof(struct puffs_req)) {
966 puffs_msg_sendresp(pmp, preq, EINVAL); /* E2SMALL */
967 return 0;
968 }
969
970 switch (PUFFSOP_OPCLASS(preq->preq_opclass)) {
971 case PUFFSOP_VN:
972 case PUFFSOP_VFS:
973 DPRINTF(("dispatch: vn/vfs message 0x%x\n", preq->preq_optype));
974 puffsop_msg(pmp, preq);
975 break;
976
977 case PUFFSOP_FLUSH: /* process in sop thread */
978 {
979 struct puffs_flush *pf;
980
981 DPRINTF(("dispatch: flush 0x%x\n", preq->preq_optype));
982
983 if (preq->preq_pth.pth_framelen != sizeof(struct puffs_flush)) {
984 puffs_msg_sendresp(pmp, preq, EINVAL); /* E2SMALL */
985 break;
986 }
987 pf = (struct puffs_flush *)preq;
988
989 KASSERT(curlwp != uvm.pagedaemon_lwp);
990 psopr = kmem_alloc(sizeof(*psopr), KM_SLEEP);
991 memcpy(&psopr->psopr_pf, pf, sizeof(*pf));
992 psopr->psopr_sopreq = PUFFS_SOPREQ_FLUSH;
993
994 mutex_enter(&pmp->pmp_sopmtx);
995 if (pmp->pmp_sopthrcount == 0) {
996 mutex_exit(&pmp->pmp_sopmtx);
997 kmem_free(psopr, sizeof(*psopr));
998 puffs_msg_sendresp(pmp, preq, ENXIO);
999 } else {
1000 TAILQ_INSERT_TAIL(&pmp->pmp_sopfastreqs,
1001 psopr, psopr_entries);
1002 cv_signal(&pmp->pmp_sopcv);
1003 mutex_exit(&pmp->pmp_sopmtx);
1004 }
1005 break;
1006 }
1007
1008 case PUFFSOP_UNMOUNT: /* process in sop thread */
1009 {
1010
1011 DPRINTF(("dispatch: unmount 0x%x\n", preq->preq_optype));
1012
1013 KASSERT(curlwp != uvm.pagedaemon_lwp);
1014 psopr = kmem_alloc(sizeof(*psopr), KM_SLEEP);
1015 psopr->psopr_preq = *preq;
1016 psopr->psopr_sopreq = PUFFS_SOPREQ_UNMOUNT;
1017
1018 mutex_enter(&pmp->pmp_sopmtx);
1019 if (pmp->pmp_sopthrcount == 0) {
1020 mutex_exit(&pmp->pmp_sopmtx);
1021 kmem_free(psopr, sizeof(*psopr));
1022 puffs_msg_sendresp(pmp, preq, ENXIO);
1023 } else {
1024 TAILQ_INSERT_TAIL(&pmp->pmp_sopfastreqs,
1025 psopr, psopr_entries);
1026 cv_signal(&pmp->pmp_sopcv);
1027 mutex_exit(&pmp->pmp_sopmtx);
1028 }
1029 break;
1030 }
1031
1032 default:
1033 DPRINTF(("dispatch: invalid opclass 0x%x\n", preq->preq_opclass));
1034 puffs_msg_sendresp(pmp, preq, EOPNOTSUPP);
1035 break;
1036 }
1037
1038 return 0;
1039 }
1040
1041 /*
1042 * Work loop for thread processing all ops from server which
1043 * cannot safely be handled in caller context. This includes
1044 * everything which might need a lock currently "held" by the file
1045 * server, i.e. a long-term kernel lock which will be released only
1046 * once the file server acknowledges a request
1047 */
1048 #define TIMED_OUT(expire) \
1049 ((int)((unsigned int)getticks() - (unsigned int)expire) > 0)
1050 void
puffs_sop_thread(void * arg)1051 puffs_sop_thread(void *arg)
1052 {
1053 struct puffs_mount *pmp = arg;
1054 struct mount *mp = PMPTOMP(pmp);
1055 struct puffs_sopreq *psopr;
1056 bool keeprunning;
1057 bool unmountme = false;
1058 int timeo;
1059
1060 timeo = PUFFS_USE_FS_TTL(pmp) ? puffs_sopreq_expire_timeout : 0;
1061
1062 mutex_enter(&pmp->pmp_sopmtx);
1063 for (keeprunning = true; keeprunning; ) {
1064 /*
1065 * We have a fast queue for flush and umount, and a node
1066 * queue for delayes node reclaims. Requests on node queue * are not honoured before clock reaches psopr_at. This
1067 * code assumes that requests are ordered by psopr_at.
1068 */
1069 do {
1070 psopr = TAILQ_FIRST(&pmp->pmp_sopfastreqs);
1071 if (psopr != NULL) {
1072 TAILQ_REMOVE(&pmp->pmp_sopfastreqs,
1073 psopr, psopr_entries);
1074 break;
1075 }
1076
1077 psopr = TAILQ_FIRST(&pmp->pmp_sopnodereqs);
1078 if ((psopr != NULL) && TIMED_OUT(psopr->psopr_at)) {
1079 TAILQ_REMOVE(&pmp->pmp_sopnodereqs,
1080 psopr, psopr_entries);
1081 break;
1082 }
1083
1084 cv_timedwait(&pmp->pmp_sopcv, &pmp->pmp_sopmtx, timeo);
1085 } while (1 /* CONSTCOND */);
1086
1087 mutex_exit(&pmp->pmp_sopmtx);
1088
1089 switch (psopr->psopr_sopreq) {
1090 case PUFFS_SOPREQSYS_EXIT:
1091 keeprunning = false;
1092 break;
1093 case PUFFS_SOPREQ_FLUSH:
1094 puffsop_flush(pmp, &psopr->psopr_pf);
1095 break;
1096 case PUFFS_SOPREQ_EXPIRE:
1097 puffsop_expire(pmp, psopr->psopr_ck);
1098 break;
1099 case PUFFS_SOPREQ_UNMOUNT:
1100 puffs_msg_sendresp(pmp, &psopr->psopr_preq, 0);
1101
1102 unmountme = true;
1103 keeprunning = false;
1104
1105 /*
1106 * We know the mountpoint is still alive because
1107 * the thread that is us (poetic?) is still alive.
1108 */
1109 vfs_ref(mp);
1110 break;
1111 }
1112
1113 kmem_free(psopr, sizeof(*psopr));
1114 mutex_enter(&pmp->pmp_sopmtx);
1115 }
1116
1117 /*
1118 * Purge remaining ops.
1119 */
1120 while ((psopr = TAILQ_FIRST(&pmp->pmp_sopfastreqs)) != NULL) {
1121 TAILQ_REMOVE(&pmp->pmp_sopfastreqs, psopr, psopr_entries);
1122 mutex_exit(&pmp->pmp_sopmtx);
1123 puffs_msg_sendresp(pmp, &psopr->psopr_preq, ENXIO);
1124 kmem_free(psopr, sizeof(*psopr));
1125 mutex_enter(&pmp->pmp_sopmtx);
1126 }
1127
1128 while ((psopr = TAILQ_FIRST(&pmp->pmp_sopnodereqs)) != NULL) {
1129 TAILQ_REMOVE(&pmp->pmp_sopnodereqs, psopr, psopr_entries);
1130 mutex_exit(&pmp->pmp_sopmtx);
1131 KASSERT(psopr->psopr_sopreq == PUFFS_SOPREQ_EXPIRE);
1132 kmem_free(psopr, sizeof(*psopr));
1133 mutex_enter(&pmp->pmp_sopmtx);
1134 }
1135
1136 pmp->pmp_sopthrcount--;
1137 cv_broadcast(&pmp->pmp_sopcv);
1138 mutex_exit(&pmp->pmp_sopmtx); /* not allowed to access fs after this */
1139
1140 /*
1141 * If unmount was requested, we can now safely do it here, since
1142 * our context is dead from the point-of-view of puffs_unmount()
1143 * and we are just another thread. dounmount() makes internally
1144 * sure that VFS_UNMOUNT() isn't called reentrantly and that it
1145 * is eventually completed.
1146 */
1147 if (unmountme) {
1148 (void)dounmount(mp, MNT_FORCE, curlwp);
1149 vfs_rele(mp);
1150 }
1151
1152 kthread_exit(0);
1153 }
1154
1155 int
puffs_msgif_close(void * ctx)1156 puffs_msgif_close(void *ctx)
1157 {
1158 struct puffs_mount *pmp = ctx;
1159 struct mount *mp = PMPTOMP(pmp);
1160
1161 mutex_enter(&pmp->pmp_lock);
1162 puffs_mp_reference(pmp);
1163
1164 /*
1165 * Free the waiting callers before proceeding any further.
1166 * The syncer might be jogging around in this file system
1167 * currently. If we allow it to go to the userspace of no
1168 * return while trying to get the syncer lock, well ...
1169 */
1170 puffs_userdead(pmp);
1171
1172 /*
1173 * Make sure someone from puffs_unmount() isn't currently in
1174 * userspace. If we don't take this precautionary step,
1175 * they might notice that the mountpoint has disappeared
1176 * from under them once they return. Especially note that we
1177 * cannot simply test for an unmounter before calling
1178 * dounmount(), since it might be possible that that particular
1179 * invocation of unmount was called without MNT_FORCE. Here we
1180 * *must* make sure unmount succeeds. Also, restart is necessary
1181 * since pmp isn't locked. We might end up with PUTTER_DEAD after
1182 * restart and exit from there.
1183 */
1184 if (pmp->pmp_unmounting) {
1185 cv_wait(&pmp->pmp_unmounting_cv, &pmp->pmp_lock);
1186 puffs_mp_release(pmp);
1187 mutex_exit(&pmp->pmp_lock);
1188 DPRINTF(("puffs_fop_close: unmount was in progress for pmp %p, "
1189 "restart\n", pmp));
1190 return ERESTART;
1191 }
1192
1193 /* Won't access pmp from here anymore */
1194 vfs_ref(mp);
1195 puffs_mp_release(pmp);
1196 mutex_exit(&pmp->pmp_lock);
1197
1198 /* Detach from VFS. */
1199 (void)dounmount(mp, MNT_FORCE, curlwp);
1200 vfs_rele(mp);
1201
1202 return 0;
1203 }
1204
1205 /*
1206 * We're dead, kaput, RIP, slightly more than merely pining for the
1207 * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
1208 * our maker, ceased to be, etcetc. YASD. It's a dead FS!
1209 *
1210 * Caller must hold puffs mutex.
1211 */
1212 void
puffs_userdead(struct puffs_mount * pmp)1213 puffs_userdead(struct puffs_mount *pmp)
1214 {
1215 struct puffs_msgpark *park, *park_next;
1216
1217 /*
1218 * Mark filesystem status as dying so that operations don't
1219 * attempt to march to userspace any longer.
1220 */
1221 pmp->pmp_status = PUFFSTAT_DYING;
1222
1223 /* signal waiters on REQUEST TO file server queue */
1224 for (park = TAILQ_FIRST(&pmp->pmp_msg_touser); park; park = park_next) {
1225
1226 mutex_enter(&park->park_mtx);
1227 puffs_msgpark_reference(park);
1228 park_next = TAILQ_NEXT(park, park_entries);
1229
1230 KASSERT(park->park_flags & PARKFLAG_ONQUEUE1);
1231 TAILQ_REMOVE(&pmp->pmp_msg_touser, park, park_entries);
1232 park->park_flags &= ~PARKFLAG_ONQUEUE1;
1233 pmp->pmp_msg_touser_count--;
1234
1235 /*
1236 * Even though waiters on QUEUE1 are removed in touser()
1237 * in case of WAITERGONE, it is still possible for us to
1238 * get raced here due to having to retake locks in said
1239 * touser(). In the race case simply "ignore" the item
1240 * on the queue and move on to the next one.
1241 */
1242 if (park->park_flags & PARKFLAG_WAITERGONE) {
1243 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
1244 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
1245 puffs_msgpark_release(park);
1246
1247 } else {
1248 park->park_preq->preq_rv = ENXIO;
1249
1250 if (park->park_flags & PARKFLAG_CALL) {
1251 park->park_done(pmp, park->park_preq,
1252 park->park_donearg);
1253 puffs_msgpark_release1(park, 2);
1254 } else if ((park->park_flags & PARKFLAG_WANTREPLY)==0) {
1255 puffs_msgpark_release1(park, 2);
1256 } else {
1257 park->park_preq->preq_rv = ENXIO;
1258 cv_signal(&park->park_cv);
1259 puffs_msgpark_release(park);
1260 }
1261 }
1262 }
1263
1264 /* signal waiters on RESPONSE FROM file server queue */
1265 for (park=TAILQ_FIRST(&pmp->pmp_msg_replywait); park; park=park_next) {
1266 mutex_enter(&park->park_mtx);
1267 puffs_msgpark_reference(park);
1268 park_next = TAILQ_NEXT(park, park_entries);
1269
1270 KASSERT(park->park_flags & PARKFLAG_ONQUEUE2);
1271 KASSERT(park->park_flags & PARKFLAG_WANTREPLY);
1272
1273 TAILQ_REMOVE(&pmp->pmp_msg_replywait, park, park_entries);
1274 park->park_flags &= ~PARKFLAG_ONQUEUE2;
1275
1276 if (park->park_flags & PARKFLAG_WAITERGONE) {
1277 KASSERT((park->park_flags & PARKFLAG_CALL) == 0);
1278 puffs_msgpark_release(park);
1279 } else {
1280 park->park_preq->preq_rv = ENXIO;
1281 if (park->park_flags & PARKFLAG_CALL) {
1282 park->park_done(pmp, park->park_preq,
1283 park->park_donearg);
1284 puffs_msgpark_release1(park, 2);
1285 } else {
1286 cv_signal(&park->park_cv);
1287 puffs_msgpark_release(park);
1288 }
1289 }
1290 }
1291
1292 cv_broadcast(&pmp->pmp_msg_waiter_cv);
1293 }
1294