xref: /netbsd-src/sys/coda/coda_psdev.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: coda_psdev.c,v 1.52 2014/03/16 05:20:26 dholland Exp $	*/
2 
3 /*
4  *
5  *             Coda: an Experimental Distributed File System
6  *                              Release 3.1
7  *
8  *           Copyright (c) 1987-1998 Carnegie Mellon University
9  *                          All Rights Reserved
10  *
11  * Permission  to  use, copy, modify and distribute this software and its
12  * documentation is hereby granted,  provided  that  both  the  copyright
13  * notice  and  this  permission  notice  appear  in  all  copies  of the
14  * software, derivative works or  modified  versions,  and  any  portions
15  * thereof, and that both notices appear in supporting documentation, and
16  * that credit is given to Carnegie Mellon University  in  all  documents
17  * and publicity pertaining to direct or indirect use of this code or its
18  * derivatives.
19  *
20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
25  * ANY DERIVATIVE WORK.
26  *
27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
29  * Mellon the rights to redistribute these changes without encumbrance.
30  *
31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32  */
33 
34 /*
35  * Mach Operating System
36  * Copyright (c) 1989 Carnegie-Mellon University
37  * All rights reserved.  The CMU software License Agreement specifies
38  * the terms and conditions for use and redistribution.
39  */
40 
41 /*
42  * This code was written for the Coda file system at Carnegie Mellon
43  * University.  Contributers include David Steere, James Kistler, and
44  * M. Satyanarayanan.  */
45 
46 /* These routines define the pseudo device for communication between
47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48  * but I moved them to make it easier to port the Minicache without
49  * porting coda. -- DCS 10/12/94
50  *
51  * Following code depends on file-system CODA.
52  */
53 
54 /* These routines are the device entry points for Venus. */
55 
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.52 2014/03/16 05:20:26 dholland Exp $");
58 
59 extern int coda_nc_initialized;    /* Set if cache has been initialized */
60 
61 #ifndef _KERNEL_OPT
62 #define	NVCODA 4
63 #else
64 #include <vcoda.h>
65 #endif
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/kernel.h>
70 #include <sys/malloc.h>
71 #include <sys/proc.h>
72 #include <sys/mount.h>
73 #include <sys/file.h>
74 #include <sys/ioctl.h>
75 #include <sys/poll.h>
76 #include <sys/select.h>
77 #include <sys/conf.h>
78 #include <sys/atomic.h>
79 #include <sys/module.h>
80 
81 #include <miscfs/syncfs/syncfs.h>
82 
83 #include <coda/coda.h>
84 #include <coda/cnode.h>
85 #include <coda/coda_namecache.h>
86 #include <coda/coda_io.h>
87 
88 #define CTL_C
89 
90 int coda_psdev_print_entry = 0;
91 static
92 int outstanding_upcalls = 0;
93 int coda_call_sleep = PZERO - 1;
94 #ifdef	CTL_C
95 int coda_pcatch = PCATCH;
96 #else
97 #endif
98 
99 int coda_kernel_version = CODA_KERNEL_VERSION;
100 
101 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
102 
103 void vcodaattach(int n);
104 
105 dev_type_open(vc_nb_open);
106 dev_type_close(vc_nb_close);
107 dev_type_read(vc_nb_read);
108 dev_type_write(vc_nb_write);
109 dev_type_ioctl(vc_nb_ioctl);
110 dev_type_poll(vc_nb_poll);
111 dev_type_kqfilter(vc_nb_kqfilter);
112 
113 const struct cdevsw vcoda_cdevsw = {
114 	.d_open = vc_nb_open,
115 	.d_close = vc_nb_close,
116 	.d_read = vc_nb_read,
117 	.d_write = vc_nb_write,
118 	.d_ioctl = vc_nb_ioctl,
119 	.d_stop = nostop,
120 	.d_tty = notty,
121 	.d_poll = vc_nb_poll,
122 	.d_mmap = nommap,
123 	.d_kqfilter = vc_nb_kqfilter,
124 	.d_flag = D_OTHER,
125 };
126 
127 struct vmsg {
128     TAILQ_ENTRY(vmsg) vm_chain;
129     void *	 vm_data;
130     u_short	 vm_flags;
131     u_short      vm_inSize;	/* Size is at most 5000 bytes */
132     u_short	 vm_outSize;
133     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
134     int		 vm_unique;
135     void *	 vm_sleep;	/* Not used by Mach. */
136 };
137 
138 struct coda_mntinfo coda_mnttbl[NVCODA];
139 
140 #define	VM_READ	    1
141 #define	VM_WRITE    2
142 #define	VM_INTR	    4
143 
144 /* vcodaattach: do nothing */
145 void
146 vcodaattach(int n)
147 {
148 }
149 
150 /*
151  * These functions are written for NetBSD.
152  */
153 int
154 vc_nb_open(dev_t dev, int flag, int mode,
155     struct lwp *l)
156 {
157     struct vcomm *vcp;
158 
159     ENTRY;
160 
161     if (minor(dev) >= NVCODA)
162 	return(ENXIO);
163 
164     if (!coda_nc_initialized)
165 	coda_nc_init();
166 
167     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
168     if (VC_OPEN(vcp))
169 	return(EBUSY);
170 
171     selinit(&vcp->vc_selproc);
172     TAILQ_INIT(&vcp->vc_requests);
173     TAILQ_INIT(&vcp->vc_replies);
174     MARK_VC_OPEN(vcp);
175 
176     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
177     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
178 
179     return(0);
180 }
181 
182 int
183 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
184 {
185     struct vcomm *vcp;
186     struct vmsg *vmp;
187     struct coda_mntinfo *mi;
188     int                 err;
189 
190     ENTRY;
191 
192     if (minor(dev) >= NVCODA)
193 	return(ENXIO);
194 
195     mi = &coda_mnttbl[minor(dev)];
196     vcp = &(mi->mi_vcomm);
197 
198     if (!VC_OPEN(vcp))
199 	panic("vcclose: not open");
200 
201     /* prevent future operations on this vfs from succeeding by auto-
202      * unmounting any vfs mounted via this device. This frees user or
203      * sysadm from having to remember where all mount points are located.
204      * Put this before WAKEUPs to avoid queuing new messages between
205      * the WAKEUP and the unmount (which can happen if we're unlucky)
206      */
207     if (!mi->mi_rootvp) {
208 	/* just a simple open/close w no mount */
209 	MARK_VC_CLOSED(vcp);
210 	return 0;
211     }
212 
213     /* Let unmount know this is for real */
214     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
215     coda_unmounting(mi->mi_vfsp);
216 
217     /* Wakeup clients so they can return. */
218     while ((vmp = TAILQ_FIRST(&vcp->vc_requests)) != NULL) {
219 	TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
220 
221 	/* Free signal request messages and don't wakeup cause
222 	   no one is waiting. */
223 	if (vmp->vm_opcode == CODA_SIGNAL) {
224 	    CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
225 	    CODA_FREE(vmp, sizeof(struct vmsg));
226 	    continue;
227 	}
228 	outstanding_upcalls++;
229 	wakeup(&vmp->vm_sleep);
230     }
231 
232     while ((vmp = TAILQ_FIRST(&vcp->vc_replies)) != NULL) {
233 	TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
234 
235 	outstanding_upcalls++;
236 	wakeup(&vmp->vm_sleep);
237     }
238 
239     MARK_VC_CLOSED(vcp);
240 
241     if (outstanding_upcalls) {
242 #ifdef	CODA_VERBOSE
243 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
244     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
245 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
246 #else
247     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
248 #endif
249     }
250 
251     err = dounmount(mi->mi_vfsp, flag, l);
252     if (err)
253 	myprintf(("Error %d unmounting vfs in vcclose(%llu)\n",
254 	           err, (unsigned long long)minor(dev)));
255     seldestroy(&vcp->vc_selproc);
256     return 0;
257 }
258 
259 int
260 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
261 {
262     struct vcomm *	vcp;
263     struct vmsg *vmp;
264     int error = 0;
265 
266     ENTRY;
267 
268     if (minor(dev) >= NVCODA)
269 	return(ENXIO);
270 
271     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
272 
273     /* Get message at head of request queue. */
274     vmp = TAILQ_FIRST(&vcp->vc_requests);
275     if (vmp == NULL)
276 	return(0);	/* Nothing to read */
277 
278     /* Move the input args into userspace */
279     uiop->uio_rw = UIO_READ;
280     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
281     if (error) {
282 	myprintf(("vcread: error (%d) on uiomove\n", error));
283 	error = EINVAL;
284     }
285 
286     TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
287 
288     /* If request was a signal, free up the message and don't
289        enqueue it in the reply queue. */
290     if (vmp->vm_opcode == CODA_SIGNAL) {
291 	if (codadebug)
292 	    myprintf(("vcread: signal msg (%d, %d)\n",
293 		      vmp->vm_opcode, vmp->vm_unique));
294 	CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
295 	CODA_FREE(vmp, sizeof(struct vmsg));
296 	return(error);
297     }
298 
299     vmp->vm_flags |= VM_READ;
300     TAILQ_INSERT_TAIL(&vcp->vc_replies, vmp, vm_chain);
301 
302     return(error);
303 }
304 
305 int
306 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
307 {
308     struct vcomm *	vcp;
309     struct vmsg *vmp;
310     struct coda_out_hdr *out;
311     u_long seq;
312     u_long opcode;
313     int tbuf[2];
314     int error = 0;
315 
316     ENTRY;
317 
318     if (minor(dev) >= NVCODA)
319 	return(ENXIO);
320 
321     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
322 
323     /* Peek at the opcode, unique without transfering the data. */
324     uiop->uio_rw = UIO_WRITE;
325     error = uiomove(tbuf, sizeof(int) * 2, uiop);
326     if (error) {
327 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
328 	return(EINVAL);
329     }
330 
331     opcode = tbuf[0];
332     seq = tbuf[1];
333 
334     if (codadebug)
335 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
336 
337     if (DOWNCALL(opcode)) {
338 	union outputArgs pbuf;
339 
340 	/* get the rest of the data. */
341 	uiop->uio_rw = UIO_WRITE;
342 	error = uiomove(&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
343 	if (error) {
344 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
345 		      error, opcode, seq));
346 	    return(EINVAL);
347 	    }
348 
349 	return handleDownCall(opcode, &pbuf);
350     }
351 
352     /* Look for the message on the (waiting for) reply queue. */
353     TAILQ_FOREACH(vmp, &vcp->vc_replies, vm_chain) {
354 	if (vmp->vm_unique == seq) break;
355     }
356 
357     if (vmp == NULL) {
358 	if (codadebug)
359 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
360 
361 	return(ESRCH);
362     }
363 
364     /* Remove the message from the reply queue */
365     TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
366 
367     /* move data into response buffer. */
368     out = (struct coda_out_hdr *)vmp->vm_data;
369     /* Don't need to copy opcode and uniquifier. */
370 
371     /* get the rest of the data. */
372     if (vmp->vm_outSize < uiop->uio_resid) {
373 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
374 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
375 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
376 	return(EINVAL);
377     }
378 
379     tbuf[0] = uiop->uio_resid; 	/* Save this value. */
380     uiop->uio_rw = UIO_WRITE;
381     error = uiomove(&out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
382     if (error) {
383 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
384 		  error, opcode, seq));
385 	return(EINVAL);
386     }
387 
388     /* I don't think these are used, but just in case. */
389     /* XXX - aren't these two already correct? -bnoble */
390     out->opcode = opcode;
391     out->unique = seq;
392     vmp->vm_outSize	= tbuf[0];	/* Amount of data transferred? */
393     vmp->vm_flags |= VM_WRITE;
394     wakeup(&vmp->vm_sleep);
395 
396     return(0);
397 }
398 
399 int
400 vc_nb_ioctl(dev_t dev, u_long cmd, void *addr, int flag,
401     struct lwp *l)
402 {
403     ENTRY;
404 
405     switch(cmd) {
406     case CODARESIZE: {
407 	struct coda_resize *data = (struct coda_resize *)addr;
408 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
409 	break;
410     }
411     case CODASTATS:
412 	if (coda_nc_use) {
413 	    coda_nc_gather_stats();
414 	    return(0);
415 	} else {
416 	    return(ENODEV);
417 	}
418 	break;
419     case CODAPRINT:
420 	if (coda_nc_use) {
421 	    print_coda_nc();
422 	    return(0);
423 	} else {
424 	    return(ENODEV);
425 	}
426 	break;
427     case CIOC_KERNEL_VERSION:
428 	switch (*(u_int *)addr) {
429 	case 0:
430 		*(u_int *)addr = coda_kernel_version;
431 		return 0;
432 		break;
433 	case 1:
434 	case 2:
435 		if (coda_kernel_version != *(u_int *)addr)
436 		    return ENOENT;
437 		else
438 		    return 0;
439 	default:
440 		return ENOENT;
441 	}
442     	break;
443     default :
444 	return(EINVAL);
445 	break;
446     }
447 }
448 
449 int
450 vc_nb_poll(dev_t dev, int events, struct lwp *l)
451 {
452     struct vcomm *vcp;
453     int event_msk = 0;
454 
455     ENTRY;
456 
457     if (minor(dev) >= NVCODA)
458 	return(ENXIO);
459 
460     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
461 
462     event_msk = events & (POLLIN|POLLRDNORM);
463     if (!event_msk)
464 	return(0);
465 
466     if (!TAILQ_EMPTY(&vcp->vc_requests))
467 	return(events & (POLLIN|POLLRDNORM));
468 
469     selrecord(l, &(vcp->vc_selproc));
470 
471     return(0);
472 }
473 
474 static void
475 filt_vc_nb_detach(struct knote *kn)
476 {
477 	struct vcomm *vcp = kn->kn_hook;
478 
479 	SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
480 }
481 
482 static int
483 filt_vc_nb_read(struct knote *kn, long hint)
484 {
485 	struct vcomm *vcp = kn->kn_hook;
486 	struct vmsg *vmp;
487 
488 	vmp = TAILQ_FIRST(&vcp->vc_requests);
489 	if (vmp == NULL)
490 		return (0);
491 
492 	kn->kn_data = vmp->vm_inSize;
493 	return (1);
494 }
495 
496 static const struct filterops vc_nb_read_filtops =
497 	{ 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
498 
499 int
500 vc_nb_kqfilter(dev_t dev, struct knote *kn)
501 {
502 	struct vcomm *vcp;
503 	struct klist *klist;
504 
505 	ENTRY;
506 
507 	if (minor(dev) >= NVCODA)
508 		return(ENXIO);
509 
510 	vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
511 
512 	switch (kn->kn_filter) {
513 	case EVFILT_READ:
514 		klist = &vcp->vc_selproc.sel_klist;
515 		kn->kn_fop = &vc_nb_read_filtops;
516 		break;
517 
518 	default:
519 		return (EINVAL);
520 	}
521 
522 	kn->kn_hook = vcp;
523 
524 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
525 
526 	return (0);
527 }
528 
529 /*
530  * Statistics
531  */
532 struct coda_clstat coda_clstat;
533 
534 /*
535  * Key question: whether to sleep interruptably or uninterruptably when
536  * waiting for Venus.  The former seems better (cause you can ^C a
537  * job), but then GNU-EMACS completion breaks. Use tsleep with no
538  * timeout, and no longjmp happens. But, when sleeping
539  * "uninterruptibly", we don't get told if it returns abnormally
540  * (e.g. kill -9).
541  */
542 
543 int
544 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
545 	void *buffer)
546 {
547 	struct vcomm *vcp;
548 	struct vmsg *vmp;
549 	int error;
550 #ifdef	CTL_C
551 	struct lwp *l = curlwp;
552 	struct proc *p = l->l_proc;
553 	sigset_t psig_omask;
554 	int i;
555 	psig_omask = l->l_sigmask;	/* XXXSA */
556 #endif
557 	if (mntinfo == NULL) {
558 	    /* Unlikely, but could be a race condition with a dying warden */
559 	    return ENODEV;
560 	}
561 
562 	vcp = &(mntinfo->mi_vcomm);
563 
564 	coda_clstat.ncalls++;
565 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
566 
567 	if (!VC_OPEN(vcp))
568 	    return(ENODEV);
569 
570 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
571 	/* Format the request message. */
572 	vmp->vm_data = buffer;
573 	vmp->vm_flags = 0;
574 	vmp->vm_inSize = inSize;
575 	vmp->vm_outSize
576 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
577 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
578 	vmp->vm_unique = ++vcp->vc_seq;
579 	if (codadebug)
580 	    myprintf(("Doing a call for %d.%d\n",
581 		      vmp->vm_opcode, vmp->vm_unique));
582 
583 	/* Fill in the common input args. */
584 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
585 
586 	/* Append msg to request queue and poke Venus. */
587 	TAILQ_INSERT_TAIL(&vcp->vc_requests, vmp, vm_chain);
588 	selnotify(&(vcp->vc_selproc), 0, 0);
589 
590 	/* We can be interrupted while we wait for Venus to process
591 	 * our request.  If the interrupt occurs before Venus has read
592 	 * the request, we dequeue and return. If it occurs after the
593 	 * read but before the reply, we dequeue, send a signal
594 	 * message, and return. If it occurs after the reply we ignore
595 	 * it. In no case do we want to restart the syscall.  If it
596 	 * was interrupted by a venus shutdown (vcclose), return
597 	 * ENODEV.  */
598 
599 	/* Ignore return, We have to check anyway */
600 #ifdef	CTL_C
601 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
602 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
603 	   as SA_RESTART.  This means that we should exit sleep handle the
604 	   "signal" and then go to sleep again.  Mostly this is done by letting
605 	   the syscall complete and be restarted.  We are not idempotent and
606 	   can not do this.  A better solution is necessary.
607 	 */
608 	i = 0;
609 	do {
610 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
611 	    if (error == 0)
612 	    	break;
613 	    mutex_enter(p->p_lock);
614 	    if (error == EWOULDBLOCK) {
615 #ifdef	CODA_VERBOSE
616 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
617 #endif
618     	    } else if (sigispending(l, SIGIO)) {
619 		    sigaddset(&l->l_sigmask, SIGIO);
620 #ifdef	CODA_VERBOSE
621 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
622 #endif
623     	    } else if (sigispending(l, SIGALRM)) {
624 		    sigaddset(&l->l_sigmask, SIGALRM);
625 #ifdef	CODA_VERBOSE
626 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
627 #endif
628 	    } else {
629 		    sigset_t tmp;
630 		    tmp = p->p_sigpend.sp_set;	/* array assignment */
631 		    sigminusset(&l->l_sigmask, &tmp);
632 
633 #ifdef	CODA_VERBOSE
634 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
635 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
636 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
637 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
638 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
639 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3],
640 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
641 #endif
642 		    mutex_exit(p->p_lock);
643 		    break;
644 #ifdef	notyet
645 		    sigminusset(&l->l_sigmask, &p->p_sigpend.sp_set);
646 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
647 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
648 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
649 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
650 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3]);
651 #endif
652 	    }
653 	    mutex_exit(p->p_lock);
654 	} while (error && i++ < 128 && VC_OPEN(vcp));
655 	l->l_sigmask = psig_omask;	/* XXXSA */
656 #else
657 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
658 #endif
659 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
660  	/* Op went through, interrupt or not... */
661 	    if (vmp->vm_flags & VM_WRITE) {
662 		error = 0;
663 		*outSize = vmp->vm_outSize;
664 	    }
665 
666 	    else if (!(vmp->vm_flags & VM_READ)) {
667 		/* Interrupted before venus read it. */
668 #ifdef	CODA_VERBOSE
669 		if (1)
670 #else
671 		if (codadebug)
672 #endif
673 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
674 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
675 
676 		TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
677 		error = EINTR;
678 	    }
679 
680 	    else {
681 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
682                    upcall started */
683 		/* Interrupted after start of upcall, send venus a signal */
684 		struct coda_in_hdr *dog;
685 		struct vmsg *svmp;
686 
687 #ifdef	CODA_VERBOSE
688 		if (1)
689 #else
690 		if (codadebug)
691 #endif
692 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
693 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
694 
695 		TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
696 		error = EINTR;
697 
698 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
699 
700 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
701 		dog = (struct coda_in_hdr *)svmp->vm_data;
702 
703 		svmp->vm_flags = 0;
704 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
705 		dog->unique = svmp->vm_unique = vmp->vm_unique;
706 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
707 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
708 
709 		if (codadebug)
710 		    myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
711 			   svmp->vm_opcode, svmp->vm_unique));
712 
713 		/* insert at head of queue */
714 		TAILQ_INSERT_HEAD(&vcp->vc_requests, svmp, vm_chain);
715 		selnotify(&(vcp->vc_selproc), 0, 0);
716 	    }
717 	}
718 
719 	else {	/* If venus died (!VC_OPEN(vcp)) */
720 	    if (codadebug)
721 		myprintf(("vcclose woke op %d.%d flags %d\n",
722 		       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
723 
724 		error = ENODEV;
725 	}
726 
727 	CODA_FREE(vmp, sizeof(struct vmsg));
728 
729 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
730 		wakeup(&outstanding_upcalls);
731 
732 	if (!error)
733 		error = ((struct coda_out_hdr *)buffer)->result;
734 	return(error);
735 }
736 
737 MODULE(MODULE_CLASS_DRIVER, vcoda, NULL);
738 
739 static int
740 vcoda_modcmd(modcmd_t cmd, void *arg)
741 {
742 	int error = 0;
743 
744 	switch (cmd) {
745 	case MODULE_CMD_INIT:
746 #ifdef _MODULE
747 	{
748 		int cmajor, dmajor;
749 		vcodaattach(NVCODA);
750 
751 		dmajor = cmajor = -1;
752 		return devsw_attach("vcoda", NULL, &dmajor,
753 		    &vcoda_cdevsw, &cmajor);
754 	}
755 #endif
756 		break;
757 
758 	case MODULE_CMD_FINI:
759 #ifdef _MODULE
760 		{
761 			for  (size_t i = 0; i < NVCODA; i++) {
762 				struct vcomm *vcp = &coda_mnttbl[i].mi_vcomm;
763 				if (VC_OPEN(vcp))
764 					return EBUSY;
765 			}
766 			return devsw_detach(NULL, &vcoda_cdevsw);
767 		}
768 #endif
769 		break;
770 
771 	case MODULE_CMD_STAT:
772 		return ENOTTY;
773 
774 	default:
775 		return ENOTTY;
776 	}
777 	return error;
778 }
779