xref: /netbsd-src/sys/coda/coda_psdev.c (revision e6d6e05cb173f30287ab619b21120b27baa66ad6)
1 /*	$NetBSD: coda_psdev.c,v 1.40 2008/03/01 17:26:07 plunky Exp $	*/
2 
3 /*
4  *
5  *             Coda: an Experimental Distributed File System
6  *                              Release 3.1
7  *
8  *           Copyright (c) 1987-1998 Carnegie Mellon University
9  *                          All Rights Reserved
10  *
11  * Permission  to  use, copy, modify and distribute this software and its
12  * documentation is hereby granted,  provided  that  both  the  copyright
13  * notice  and  this  permission  notice  appear  in  all  copies  of the
14  * software, derivative works or  modified  versions,  and  any  portions
15  * thereof, and that both notices appear in supporting documentation, and
16  * that credit is given to Carnegie Mellon University  in  all  documents
17  * and publicity pertaining to direct or indirect use of this code or its
18  * derivatives.
19  *
20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
25  * ANY DERIVATIVE WORK.
26  *
27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
29  * Mellon the rights to redistribute these changes without encumbrance.
30  *
31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32  */
33 
34 /*
35  * Mach Operating System
36  * Copyright (c) 1989 Carnegie-Mellon University
37  * All rights reserved.  The CMU software License Agreement specifies
38  * the terms and conditions for use and redistribution.
39  */
40 
41 /*
42  * This code was written for the Coda file system at Carnegie Mellon
43  * University.  Contributers include David Steere, James Kistler, and
44  * M. Satyanarayanan.  */
45 
46 /* These routines define the pseudo device for communication between
47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48  * but I moved them to make it easier to port the Minicache without
49  * porting coda. -- DCS 10/12/94
50  *
51  * Following code depends on file-system CODA.
52  */
53 
54 /* These routines are the device entry points for Venus. */
55 
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.40 2008/03/01 17:26:07 plunky Exp $");
58 
59 extern int coda_nc_initialized;    /* Set if cache has been initialized */
60 
61 #ifdef	_LKM
62 #define	NVCODA 4
63 #else
64 #include <vcoda.h>
65 #endif
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/kernel.h>
70 #include <sys/malloc.h>
71 #include <sys/proc.h>
72 #include <sys/mount.h>
73 #include <sys/file.h>
74 #include <sys/ioctl.h>
75 #include <sys/poll.h>
76 #include <sys/select.h>
77 #include <sys/conf.h>
78 
79 #include <miscfs/syncfs/syncfs.h>
80 
81 #include <coda/coda.h>
82 #include <coda/cnode.h>
83 #include <coda/coda_namecache.h>
84 #include <coda/coda_io.h>
85 
86 #define CTL_C
87 
88 int coda_psdev_print_entry = 0;
89 static
90 int outstanding_upcalls = 0;
91 int coda_call_sleep = PZERO - 1;
92 #ifdef	CTL_C
93 int coda_pcatch = PCATCH;
94 #else
95 #endif
96 
97 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
98 
99 void vcodaattach(int n);
100 
101 dev_type_open(vc_nb_open);
102 dev_type_close(vc_nb_close);
103 dev_type_read(vc_nb_read);
104 dev_type_write(vc_nb_write);
105 dev_type_ioctl(vc_nb_ioctl);
106 dev_type_poll(vc_nb_poll);
107 dev_type_kqfilter(vc_nb_kqfilter);
108 
109 const struct cdevsw vcoda_cdevsw = {
110 	vc_nb_open, vc_nb_close, vc_nb_read, vc_nb_write, vc_nb_ioctl,
111 	nostop, notty, vc_nb_poll, nommap, vc_nb_kqfilter, D_OTHER,
112 };
113 
114 struct vmsg {
115     struct queue vm_chain;
116     void *	 vm_data;
117     u_short	 vm_flags;
118     u_short      vm_inSize;	/* Size is at most 5000 bytes */
119     u_short	 vm_outSize;
120     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
121     int		 vm_unique;
122     void *	 vm_sleep;	/* Not used by Mach. */
123 };
124 
125 #define	VM_READ	    1
126 #define	VM_WRITE    2
127 #define	VM_INTR	    4
128 
129 /* vcodaattach: do nothing */
130 void
131 vcodaattach(int n)
132 {
133 }
134 
135 /*
136  * These functions are written for NetBSD.
137  */
138 int
139 vc_nb_open(dev_t dev, int flag, int mode,
140     struct lwp *l)
141 {
142     struct vcomm *vcp;
143 
144     ENTRY;
145 
146     if (minor(dev) >= NVCODA || minor(dev) < 0)
147 	return(ENXIO);
148 
149     if (!coda_nc_initialized)
150 	coda_nc_init();
151 
152     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
153     if (VC_OPEN(vcp))
154 	return(EBUSY);
155 
156     selinit(&vcp->vc_selproc);
157     INIT_QUEUE(vcp->vc_requests);
158     INIT_QUEUE(vcp->vc_replys);
159     MARK_VC_OPEN(vcp);
160 
161     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
162     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
163 
164     return(0);
165 }
166 
167 int
168 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
169 {
170     struct vcomm *vcp;
171     struct vmsg *vmp, *nvmp = NULL;
172     struct coda_mntinfo *mi;
173     int                 err;
174 
175     ENTRY;
176 
177     if (minor(dev) >= NVCODA || minor(dev) < 0)
178 	return(ENXIO);
179 
180     mi = &coda_mnttbl[minor(dev)];
181     vcp = &(mi->mi_vcomm);
182 
183     if (!VC_OPEN(vcp))
184 	panic("vcclose: not open");
185 
186     /* prevent future operations on this vfs from succeeding by auto-
187      * unmounting any vfs mounted via this device. This frees user or
188      * sysadm from having to remember where all mount points are located.
189      * Put this before WAKEUPs to avoid queuing new messages between
190      * the WAKEUP and the unmount (which can happen if we're unlucky)
191      */
192     if (!mi->mi_rootvp) {
193 	/* just a simple open/close w no mount */
194 	MARK_VC_CLOSED(vcp);
195 	return 0;
196     }
197 
198     /* Let unmount know this is for real */
199     /*
200      * XXX Freeze syncer.  Must do this before locking the
201      * mount point.  See dounmount for details().
202      */
203     mutex_enter(&syncer_mutex);
204     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
205     if (vfs_busy(mi->mi_vfsp, RW_WRITER, NULL)) {
206 	mutex_exit(&syncer_mutex);
207 	return (EBUSY);
208     }
209     coda_unmounting(mi->mi_vfsp);
210 
211     /* Wakeup clients so they can return. */
212     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
213 	 !EOQ(vmp, vcp->vc_requests);
214 	 vmp = nvmp)
215     {
216     	nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
217 	/* Free signal request messages and don't wakeup cause
218 	   no one is waiting. */
219 	if (vmp->vm_opcode == CODA_SIGNAL) {
220 	    CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
221 	    CODA_FREE(vmp, sizeof(struct vmsg));
222 	    continue;
223 	}
224 	outstanding_upcalls++;
225 	wakeup(&vmp->vm_sleep);
226     }
227 
228     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
229 	 !EOQ(vmp, vcp->vc_replys);
230 	 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
231     {
232 	outstanding_upcalls++;
233 	wakeup(&vmp->vm_sleep);
234     }
235 
236     MARK_VC_CLOSED(vcp);
237 
238     if (outstanding_upcalls) {
239 #ifdef	CODA_VERBOSE
240 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
241     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
242 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
243 #else
244     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
245 #endif
246     }
247 
248     err = dounmount(mi->mi_vfsp, flag, l);
249     if (err)
250 	myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
251 	           err, minor(dev)));
252     seldestroy(&vcp->vc_selproc);
253     return 0;
254 }
255 
256 int
257 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
258 {
259     struct vcomm *	vcp;
260     struct vmsg *vmp;
261     int error = 0;
262 
263     ENTRY;
264 
265     if (minor(dev) >= NVCODA || minor(dev) < 0)
266 	return(ENXIO);
267 
268     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
269     /* Get message at head of request queue. */
270     if (EMPTY(vcp->vc_requests))
271 	return(0);	/* Nothing to read */
272 
273     vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
274 
275     /* Move the input args into userspace */
276     uiop->uio_rw = UIO_READ;
277     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
278     if (error) {
279 	myprintf(("vcread: error (%d) on uiomove\n", error));
280 	error = EINVAL;
281     }
282 
283 #ifdef OLD_DIAGNOSTIC
284     if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
285 	panic("vc_nb_read: bad chain");
286 #endif
287 
288     REMQUE(vmp->vm_chain);
289 
290     /* If request was a signal, free up the message and don't
291        enqueue it in the reply queue. */
292     if (vmp->vm_opcode == CODA_SIGNAL) {
293 	if (codadebug)
294 	    myprintf(("vcread: signal msg (%d, %d)\n",
295 		      vmp->vm_opcode, vmp->vm_unique));
296 	CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
297 	CODA_FREE(vmp, sizeof(struct vmsg));
298 	return(error);
299     }
300 
301     vmp->vm_flags |= VM_READ;
302     INSQUE(vmp->vm_chain, vcp->vc_replys);
303 
304     return(error);
305 }
306 
307 int
308 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
309 {
310     struct vcomm *	vcp;
311     struct vmsg *vmp;
312     struct coda_out_hdr *out;
313     u_long seq;
314     u_long opcode;
315     int tbuf[2];
316     int error = 0;
317 
318     ENTRY;
319 
320     if (minor(dev) >= NVCODA || minor(dev) < 0)
321 	return(ENXIO);
322 
323     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
324 
325     /* Peek at the opcode, unique without transfering the data. */
326     uiop->uio_rw = UIO_WRITE;
327     error = uiomove(tbuf, sizeof(int) * 2, uiop);
328     if (error) {
329 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
330 	return(EINVAL);
331     }
332 
333     opcode = tbuf[0];
334     seq = tbuf[1];
335 
336     if (codadebug)
337 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
338 
339     if (DOWNCALL(opcode)) {
340 	union outputArgs pbuf;
341 
342 	/* get the rest of the data. */
343 	uiop->uio_rw = UIO_WRITE;
344 	error = uiomove(&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
345 	if (error) {
346 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
347 		      error, opcode, seq));
348 	    return(EINVAL);
349 	    }
350 
351 	return handleDownCall(opcode, &pbuf);
352     }
353 
354     /* Look for the message on the (waiting for) reply queue. */
355     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
356 	 !EOQ(vmp, vcp->vc_replys);
357 	 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
358     {
359 	if (vmp->vm_unique == seq) break;
360     }
361 
362     if (EOQ(vmp, vcp->vc_replys)) {
363 	if (codadebug)
364 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
365 
366 	return(ESRCH);
367 	}
368 
369     /* Remove the message from the reply queue */
370     REMQUE(vmp->vm_chain);
371 
372     /* move data into response buffer. */
373     out = (struct coda_out_hdr *)vmp->vm_data;
374     /* Don't need to copy opcode and uniquifier. */
375 
376     /* get the rest of the data. */
377     if (vmp->vm_outSize < uiop->uio_resid) {
378 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
379 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
380 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
381 	return(EINVAL);
382     }
383 
384     tbuf[0] = uiop->uio_resid; 	/* Save this value. */
385     uiop->uio_rw = UIO_WRITE;
386     error = uiomove(&out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
387     if (error) {
388 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
389 		  error, opcode, seq));
390 	return(EINVAL);
391     }
392 
393     /* I don't think these are used, but just in case. */
394     /* XXX - aren't these two already correct? -bnoble */
395     out->opcode = opcode;
396     out->unique = seq;
397     vmp->vm_outSize	= tbuf[0];	/* Amount of data transferred? */
398     vmp->vm_flags |= VM_WRITE;
399     wakeup(&vmp->vm_sleep);
400 
401     return(0);
402 }
403 
404 int
405 vc_nb_ioctl(dev_t dev, u_long cmd, void *addr, int flag,
406     struct lwp *l)
407 {
408     ENTRY;
409 
410     switch(cmd) {
411     case CODARESIZE: {
412 	struct coda_resize *data = (struct coda_resize *)addr;
413 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
414 	break;
415     }
416     case CODASTATS:
417 	if (coda_nc_use) {
418 	    coda_nc_gather_stats();
419 	    return(0);
420 	} else {
421 	    return(ENODEV);
422 	}
423 	break;
424     case CODAPRINT:
425 	if (coda_nc_use) {
426 	    print_coda_nc();
427 	    return(0);
428 	} else {
429 	    return(ENODEV);
430 	}
431 	break;
432     case CIOC_KERNEL_VERSION:
433 	switch (*(u_int *)addr) {
434 	case 0:
435 		*(u_int *)addr = coda_kernel_version;
436 		return 0;
437 		break;
438 	case 1:
439 	case 2:
440 		if (coda_kernel_version != *(u_int *)addr)
441 		    return ENOENT;
442 		else
443 		    return 0;
444 	default:
445 		return ENOENT;
446 	}
447     	break;
448     default :
449 	return(EINVAL);
450 	break;
451     }
452 }
453 
454 int
455 vc_nb_poll(dev_t dev, int events, struct lwp *l)
456 {
457     struct vcomm *vcp;
458     int event_msk = 0;
459 
460     ENTRY;
461 
462     if (minor(dev) >= NVCODA || minor(dev) < 0)
463 	return(ENXIO);
464 
465     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
466 
467     event_msk = events & (POLLIN|POLLRDNORM);
468     if (!event_msk)
469 	return(0);
470 
471     if (!EMPTY(vcp->vc_requests))
472 	return(events & (POLLIN|POLLRDNORM));
473 
474     selrecord(l, &(vcp->vc_selproc));
475 
476     return(0);
477 }
478 
479 static void
480 filt_vc_nb_detach(struct knote *kn)
481 {
482 	struct vcomm *vcp = kn->kn_hook;
483 
484 	SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
485 }
486 
487 static int
488 filt_vc_nb_read(struct knote *kn, long hint)
489 {
490 	struct vcomm *vcp = kn->kn_hook;
491 	struct vmsg *vmp;
492 
493 	if (EMPTY(vcp->vc_requests))
494 		return (0);
495 
496 	vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
497 
498 	kn->kn_data = vmp->vm_inSize;
499 	return (1);
500 }
501 
502 static const struct filterops vc_nb_read_filtops =
503 	{ 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
504 
505 int
506 vc_nb_kqfilter(dev_t dev, struct knote *kn)
507 {
508 	struct vcomm *vcp;
509 	struct klist *klist;
510 
511 	ENTRY;
512 
513 	if (minor(dev) >= NVCODA || minor(dev) < 0)
514 		return(ENXIO);
515 
516 	vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
517 
518 	switch (kn->kn_filter) {
519 	case EVFILT_READ:
520 		klist = &vcp->vc_selproc.sel_klist;
521 		kn->kn_fop = &vc_nb_read_filtops;
522 		break;
523 
524 	default:
525 		return (EINVAL);
526 	}
527 
528 	kn->kn_hook = vcp;
529 
530 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
531 
532 	return (0);
533 }
534 
535 /*
536  * Statistics
537  */
538 struct coda_clstat coda_clstat;
539 
540 /*
541  * Key question: whether to sleep interruptably or uninterruptably when
542  * waiting for Venus.  The former seems better (cause you can ^C a
543  * job), but then GNU-EMACS completion breaks. Use tsleep with no
544  * timeout, and no longjmp happens. But, when sleeping
545  * "uninterruptibly", we don't get told if it returns abnormally
546  * (e.g. kill -9).
547  */
548 
549 int
550 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
551 	void *buffer)
552 {
553 	struct vcomm *vcp;
554 	struct vmsg *vmp;
555 	int error;
556 #ifdef	CTL_C
557 	struct lwp *l = curlwp;
558 	struct proc *p = l->l_proc;
559 	sigset_t psig_omask;
560 	int i;
561 	psig_omask = l->l_sigmask;	/* XXXSA */
562 #endif
563 	if (mntinfo == NULL) {
564 	    /* Unlikely, but could be a race condition with a dying warden */
565 	    return ENODEV;
566 	}
567 
568 	vcp = &(mntinfo->mi_vcomm);
569 
570 	coda_clstat.ncalls++;
571 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
572 
573 	if (!VC_OPEN(vcp))
574 	    return(ENODEV);
575 
576 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
577 	/* Format the request message. */
578 	vmp->vm_data = buffer;
579 	vmp->vm_flags = 0;
580 	vmp->vm_inSize = inSize;
581 	vmp->vm_outSize
582 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
583 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
584 	vmp->vm_unique = ++vcp->vc_seq;
585 	if (codadebug)
586 	    myprintf(("Doing a call for %d.%d\n",
587 		      vmp->vm_opcode, vmp->vm_unique));
588 
589 	/* Fill in the common input args. */
590 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
591 
592 	/* Append msg to request queue and poke Venus. */
593 	INSQUE(vmp->vm_chain, vcp->vc_requests);
594 	selnotify(&(vcp->vc_selproc), 0, 0);
595 
596 	/* We can be interrupted while we wait for Venus to process
597 	 * our request.  If the interrupt occurs before Venus has read
598 	 * the request, we dequeue and return. If it occurs after the
599 	 * read but before the reply, we dequeue, send a signal
600 	 * message, and return. If it occurs after the reply we ignore
601 	 * it. In no case do we want to restart the syscall.  If it
602 	 * was interrupted by a venus shutdown (vcclose), return
603 	 * ENODEV.  */
604 
605 	/* Ignore return, We have to check anyway */
606 #ifdef	CTL_C
607 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
608 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
609 	   as SA_RESTART.  This means that we should exit sleep handle the
610 	   "signal" and then go to sleep again.  Mostly this is done by letting
611 	   the syscall complete and be restarted.  We are not idempotent and
612 	   can not do this.  A better solution is necessary.
613 	 */
614 	i = 0;
615 	do {
616 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
617 	    if (error == 0)
618 	    	break;
619 	    mutex_enter(&p->p_smutex);
620 	    if (error == EWOULDBLOCK) {
621 #ifdef	CODA_VERBOSE
622 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
623 #endif
624     	    } else if (sigispending(l, SIGIO)) {
625 		    sigaddset(&l->l_sigmask, SIGIO);
626 #ifdef	CODA_VERBOSE
627 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
628 #endif
629     	    } else if (sigispending(l, SIGALRM)) {
630 		    sigaddset(&l->l_sigmask, SIGALRM);
631 #ifdef	CODA_VERBOSE
632 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
633 #endif
634 	    } else {
635 		    sigset_t tmp;
636 		    tmp = p->p_sigpend.sp_set;	/* array assignment */
637 		    sigminusset(&l->l_sigmask, &tmp);
638 
639 #ifdef	CODA_VERBOSE
640 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
641 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
642 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
643 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
644 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
645 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3],
646 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
647 #endif
648 		    mutex_exit(&p->p_smutex);
649 		    break;
650 #ifdef	notyet
651 		    sigminusset(&l->l_sigmask, &p->p_sigpend.sp_set);
652 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
653 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
654 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
655 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
656 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3]);
657 #endif
658 	    }
659 	    mutex_exit(&p->p_smutex);
660 	} while (error && i++ < 128 && VC_OPEN(vcp));
661 	l->l_sigmask = psig_omask;	/* XXXSA */
662 #else
663 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
664 #endif
665 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
666  	/* Op went through, interrupt or not... */
667 	    if (vmp->vm_flags & VM_WRITE) {
668 		error = 0;
669 		*outSize = vmp->vm_outSize;
670 	    }
671 
672 	    else if (!(vmp->vm_flags & VM_READ)) {
673 		/* Interrupted before venus read it. */
674 #ifdef	CODA_VERBOSE
675 		if (1)
676 #else
677 		if (codadebug)
678 #endif
679 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
680 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
681 		REMQUE(vmp->vm_chain);
682 		error = EINTR;
683 	    }
684 
685 	    else {
686 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
687                    upcall started */
688 		/* Interrupted after start of upcall, send venus a signal */
689 		struct coda_in_hdr *dog;
690 		struct vmsg *svmp;
691 
692 #ifdef	CODA_VERBOSE
693 		if (1)
694 #else
695 		if (codadebug)
696 #endif
697 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
698 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
699 
700 		REMQUE(vmp->vm_chain);
701 		error = EINTR;
702 
703 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
704 
705 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
706 		dog = (struct coda_in_hdr *)svmp->vm_data;
707 
708 		svmp->vm_flags = 0;
709 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
710 		dog->unique = svmp->vm_unique = vmp->vm_unique;
711 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
712 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
713 
714 		if (codadebug)
715 		    myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
716 			   svmp->vm_opcode, svmp->vm_unique));
717 
718 		/* insert at head of queue! */
719 		INSQUE(svmp->vm_chain, vcp->vc_requests);
720 		selnotify(&(vcp->vc_selproc), 0, 0);
721 	    }
722 	}
723 
724 	else {	/* If venus died (!VC_OPEN(vcp)) */
725 	    if (codadebug)
726 		myprintf(("vcclose woke op %d.%d flags %d\n",
727 		       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
728 
729 		error = ENODEV;
730 	}
731 
732 	CODA_FREE(vmp, sizeof(struct vmsg));
733 
734 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
735 		wakeup(&outstanding_upcalls);
736 
737 	if (!error)
738 		error = ((struct coda_out_hdr *)buffer)->result;
739 	return(error);
740 }
741 
742