xref: /netbsd-src/sys/coda/coda_psdev.c (revision 08c81a9c2dc8c7300e893321eb65c0925d60871c)
1 /*	$NetBSD: coda_psdev.c,v 1.20 2002/09/06 13:18:43 gehenna Exp $	*/
2 
3 /*
4  *
5  *             Coda: an Experimental Distributed File System
6  *                              Release 3.1
7  *
8  *           Copyright (c) 1987-1998 Carnegie Mellon University
9  *                          All Rights Reserved
10  *
11  * Permission  to  use, copy, modify and distribute this software and its
12  * documentation is hereby granted,  provided  that  both  the  copyright
13  * notice  and  this  permission  notice  appear  in  all  copies  of the
14  * software, derivative works or  modified  versions,  and  any  portions
15  * thereof, and that both notices appear in supporting documentation, and
16  * that credit is given to Carnegie Mellon University  in  all  documents
17  * and publicity pertaining to direct or indirect use of this code or its
18  * derivatives.
19  *
20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
25  * ANY DERIVATIVE WORK.
26  *
27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
29  * Mellon the rights to redistribute these changes without encumbrance.
30  *
31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32  */
33 
34 /*
35  * Mach Operating System
36  * Copyright (c) 1989 Carnegie-Mellon University
37  * All rights reserved.  The CMU software License Agreement specifies
38  * the terms and conditions for use and redistribution.
39  */
40 
41 /*
42  * This code was written for the Coda file system at Carnegie Mellon
43  * University.  Contributers include David Steere, James Kistler, and
44  * M. Satyanarayanan.  */
45 
46 /* These routines define the psuedo device for communication between
47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48  * but I moved them to make it easier to port the Minicache without
49  * porting coda. -- DCS 10/12/94
50  */
51 
52 /* These routines are the device entry points for Venus. */
53 
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.20 2002/09/06 13:18:43 gehenna Exp $");
56 
57 extern int coda_nc_initialized;    /* Set if cache has been initialized */
58 
59 #ifdef	_LKM
60 #define	NVCODA 4
61 #else
62 #include <vcoda.h>
63 #endif
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/proc.h>
70 #include <sys/mount.h>
71 #include <sys/file.h>
72 #include <sys/ioctl.h>
73 #include <sys/poll.h>
74 #include <sys/select.h>
75 #include <sys/conf.h>
76 
77 #include <miscfs/syncfs/syncfs.h>
78 
79 #include <coda/coda.h>
80 #include <coda/cnode.h>
81 #include <coda/coda_namecache.h>
82 #include <coda/coda_io.h>
83 
84 #define CTL_C
85 
86 int coda_psdev_print_entry = 0;
87 static
88 int outstanding_upcalls = 0;
89 int coda_call_sleep = PZERO - 1;
90 #ifdef	CTL_C
91 int coda_pcatch = PCATCH;
92 #else
93 #endif
94 
95 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
96 
97 void vcodaattach(int n);
98 
99 dev_type_open(vc_nb_open);
100 dev_type_close(vc_nb_close);
101 dev_type_read(vc_nb_read);
102 dev_type_write(vc_nb_write);
103 dev_type_ioctl(vc_nb_ioctl);
104 dev_type_poll(vc_nb_poll);
105 
106 const struct cdevsw vcoda_cdevsw = {
107 	vc_nb_open, vc_nb_close, vc_nb_read, vc_nb_write, vc_nb_ioctl,
108 	nostop, notty, vc_nb_poll, nommap,
109 };
110 
111 struct vmsg {
112     struct queue vm_chain;
113     caddr_t	 vm_data;
114     u_short	 vm_flags;
115     u_short      vm_inSize;	/* Size is at most 5000 bytes */
116     u_short	 vm_outSize;
117     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
118     int		 vm_unique;
119     caddr_t	 vm_sleep;	/* Not used by Mach. */
120 };
121 
122 #define	VM_READ	    1
123 #define	VM_WRITE    2
124 #define	VM_INTR	    4
125 
126 /* vcodaattach: do nothing */
127 void
128 vcodaattach(n)
129     int n;
130 {
131 }
132 
133 /*
134  * These functions are written for NetBSD.
135  */
136 int
137 vc_nb_open(dev, flag, mode, p)
138     dev_t        dev;
139     int          flag;
140     int          mode;
141     struct proc *p;             /* NetBSD only */
142 {
143     struct vcomm *vcp;
144 
145     ENTRY;
146 
147     if (minor(dev) >= NVCODA || minor(dev) < 0)
148 	return(ENXIO);
149 
150     if (!coda_nc_initialized)
151 	coda_nc_init();
152 
153     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
154     if (VC_OPEN(vcp))
155 	return(EBUSY);
156 
157     memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
158     INIT_QUEUE(vcp->vc_requests);
159     INIT_QUEUE(vcp->vc_replys);
160     MARK_VC_OPEN(vcp);
161 
162     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
163     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
164 
165     return(0);
166 }
167 
168 int
169 vc_nb_close (dev, flag, mode, p)
170     dev_t        dev;
171     int          flag;
172     int          mode;
173     struct proc *p;
174 {
175     struct vcomm *vcp;
176     struct vmsg *vmp, *nvmp = NULL;
177     struct coda_mntinfo *mi;
178     int                 err;
179 
180     ENTRY;
181 
182     if (minor(dev) >= NVCODA || minor(dev) < 0)
183 	return(ENXIO);
184 
185     mi = &coda_mnttbl[minor(dev)];
186     vcp = &(mi->mi_vcomm);
187 
188     if (!VC_OPEN(vcp))
189 	panic("vcclose: not open");
190 
191     /* prevent future operations on this vfs from succeeding by auto-
192      * unmounting any vfs mounted via this device. This frees user or
193      * sysadm from having to remember where all mount points are located.
194      * Put this before WAKEUPs to avoid queuing new messages between
195      * the WAKEUP and the unmount (which can happen if we're unlucky)
196      */
197     if (!mi->mi_rootvp) {
198 	/* just a simple open/close w no mount */
199 	MARK_VC_CLOSED(vcp);
200 	return 0;
201     }
202 
203     /* Let unmount know this is for real */
204     /*
205      * XXX Freeze syncer.  Must do this before locking the
206      * mount point.  See dounmount for details().
207      */
208     lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
209     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
210     if (vfs_busy(mi->mi_vfsp, 0, 0)) {
211 	lockmgr(&syncer_lock, LK_RELEASE, NULL);
212 	return (EBUSY);
213     }
214     coda_unmounting(mi->mi_vfsp);
215 
216     /* Wakeup clients so they can return. */
217     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
218 	 !EOQ(vmp, vcp->vc_requests);
219 	 vmp = nvmp)
220     {
221     	nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
222 	/* Free signal request messages and don't wakeup cause
223 	   no one is waiting. */
224 	if (vmp->vm_opcode == CODA_SIGNAL) {
225 	    CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
226 	    CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
227 	    continue;
228 	}
229 	outstanding_upcalls++;
230 	wakeup(&vmp->vm_sleep);
231     }
232 
233     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
234 	 !EOQ(vmp, vcp->vc_replys);
235 	 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
236     {
237 	outstanding_upcalls++;
238 	wakeup(&vmp->vm_sleep);
239     }
240 
241     MARK_VC_CLOSED(vcp);
242 
243     if (outstanding_upcalls) {
244 #ifdef	CODA_VERBOSE
245 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
246     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
247 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
248 #else
249     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
250 #endif
251     }
252 
253     err = dounmount(mi->mi_vfsp, flag, p);
254     if (err)
255 	myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
256 	           err, minor(dev)));
257     return 0;
258 }
259 
260 int
261 vc_nb_read(dev, uiop, flag)
262     dev_t        dev;
263     struct uio  *uiop;
264     int          flag;
265 {
266     struct vcomm *	vcp;
267     struct vmsg *vmp;
268     int error = 0;
269 
270     ENTRY;
271 
272     if (minor(dev) >= NVCODA || minor(dev) < 0)
273 	return(ENXIO);
274 
275     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
276     /* Get message at head of request queue. */
277     if (EMPTY(vcp->vc_requests))
278 	return(0);	/* Nothing to read */
279 
280     vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
281 
282     /* Move the input args into userspace */
283     uiop->uio_rw = UIO_READ;
284     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
285     if (error) {
286 	myprintf(("vcread: error (%d) on uiomove\n", error));
287 	error = EINVAL;
288     }
289 
290 #ifdef OLD_DIAGNOSTIC
291     if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
292 	panic("vc_nb_read: bad chain");
293 #endif
294 
295     REMQUE(vmp->vm_chain);
296 
297     /* If request was a signal, free up the message and don't
298        enqueue it in the reply queue. */
299     if (vmp->vm_opcode == CODA_SIGNAL) {
300 	if (codadebug)
301 	    myprintf(("vcread: signal msg (%d, %d)\n",
302 		      vmp->vm_opcode, vmp->vm_unique));
303 	CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
304 	CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
305 	return(error);
306     }
307 
308     vmp->vm_flags |= VM_READ;
309     INSQUE(vmp->vm_chain, vcp->vc_replys);
310 
311     return(error);
312 }
313 
314 int
315 vc_nb_write(dev, uiop, flag)
316     dev_t        dev;
317     struct uio  *uiop;
318     int          flag;
319 {
320     struct vcomm *	vcp;
321     struct vmsg *vmp;
322     struct coda_out_hdr *out;
323     u_long seq;
324     u_long opcode;
325     int buf[2];
326     int error = 0;
327 
328     ENTRY;
329 
330     if (minor(dev) >= NVCODA || minor(dev) < 0)
331 	return(ENXIO);
332 
333     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
334 
335     /* Peek at the opcode, unique without transfering the data. */
336     uiop->uio_rw = UIO_WRITE;
337     error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
338     if (error) {
339 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
340 	return(EINVAL);
341     }
342 
343     opcode = buf[0];
344     seq = buf[1];
345 
346     if (codadebug)
347 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
348 
349     if (DOWNCALL(opcode)) {
350 	union outputArgs pbuf;
351 
352 	/* get the rest of the data. */
353 	uiop->uio_rw = UIO_WRITE;
354 	error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
355 	if (error) {
356 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
357 		      error, opcode, seq));
358 	    return(EINVAL);
359 	    }
360 
361 	return handleDownCall(opcode, &pbuf);
362     }
363 
364     /* Look for the message on the (waiting for) reply queue. */
365     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
366 	 !EOQ(vmp, vcp->vc_replys);
367 	 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
368     {
369 	if (vmp->vm_unique == seq) break;
370     }
371 
372     if (EOQ(vmp, vcp->vc_replys)) {
373 	if (codadebug)
374 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
375 
376 	return(ESRCH);
377 	}
378 
379     /* Remove the message from the reply queue */
380     REMQUE(vmp->vm_chain);
381 
382     /* move data into response buffer. */
383     out = (struct coda_out_hdr *)vmp->vm_data;
384     /* Don't need to copy opcode and uniquifier. */
385 
386     /* get the rest of the data. */
387     if (vmp->vm_outSize < uiop->uio_resid) {
388 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
389 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
390 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
391 	return(EINVAL);
392     }
393 
394     buf[0] = uiop->uio_resid; 	/* Save this value. */
395     uiop->uio_rw = UIO_WRITE;
396     error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
397     if (error) {
398 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
399 		  error, opcode, seq));
400 	return(EINVAL);
401     }
402 
403     /* I don't think these are used, but just in case. */
404     /* XXX - aren't these two already correct? -bnoble */
405     out->opcode = opcode;
406     out->unique = seq;
407     vmp->vm_outSize	= buf[0];	/* Amount of data transferred? */
408     vmp->vm_flags |= VM_WRITE;
409     wakeup(&vmp->vm_sleep);
410 
411     return(0);
412 }
413 
414 int
415 vc_nb_ioctl(dev, cmd, addr, flag, p)
416     dev_t         dev;
417     u_long        cmd;
418     caddr_t       addr;
419     int           flag;
420     struct proc  *p;
421 {
422     ENTRY;
423 
424     switch(cmd) {
425     case CODARESIZE: {
426 	struct coda_resize *data = (struct coda_resize *)addr;
427 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
428 	break;
429     }
430     case CODASTATS:
431 	if (coda_nc_use) {
432 	    coda_nc_gather_stats();
433 	    return(0);
434 	} else {
435 	    return(ENODEV);
436 	}
437 	break;
438     case CODAPRINT:
439 	if (coda_nc_use) {
440 	    print_coda_nc();
441 	    return(0);
442 	} else {
443 	    return(ENODEV);
444 	}
445 	break;
446     case CIOC_KERNEL_VERSION:
447 	switch (*(u_int *)addr) {
448 	case 0:
449 		*(u_int *)addr = coda_kernel_version;
450 		return 0;
451 		break;
452 	case 1:
453 	case 2:
454 		if (coda_kernel_version != *(u_int *)addr)
455 		    return ENOENT;
456 		else
457 		    return 0;
458 	default:
459 		return ENOENT;
460 	}
461     	break;
462     default :
463 	return(EINVAL);
464 	break;
465     }
466 }
467 
468 int
469 vc_nb_poll(dev, events, p)
470     dev_t         dev;
471     int           events;
472     struct proc  *p;
473 {
474     struct vcomm *vcp;
475     int event_msk = 0;
476 
477     ENTRY;
478 
479     if (minor(dev) >= NVCODA || minor(dev) < 0)
480 	return(ENXIO);
481 
482     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
483 
484     event_msk = events & (POLLIN|POLLRDNORM);
485     if (!event_msk)
486 	return(0);
487 
488     if (!EMPTY(vcp->vc_requests))
489 	return(events & (POLLIN|POLLRDNORM));
490 
491     selrecord(p, &(vcp->vc_selproc));
492 
493     return(0);
494 }
495 
496 /*
497  * Statistics
498  */
499 struct coda_clstat coda_clstat;
500 
501 /*
502  * Key question: whether to sleep interuptably or uninteruptably when
503  * waiting for Venus.  The former seems better (cause you can ^C a
504  * job), but then GNU-EMACS completion breaks. Use tsleep with no
505  * timeout, and no longjmp happens. But, when sleeping
506  * "uninterruptibly", we don't get told if it returns abnormally
507  * (e.g. kill -9).
508  */
509 
510 int
511 coda_call(mntinfo, inSize, outSize, buffer)
512      struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
513 {
514 	struct vcomm *vcp;
515 	struct vmsg *vmp;
516 	int error;
517 #ifdef	CTL_C
518 	struct proc *p = curproc;
519 	sigset_t psig_omask;
520 	int i;
521 	psig_omask = p->p_sigctx.ps_siglist;	/* array assignment */
522 #endif
523 	if (mntinfo == NULL) {
524 	    /* Unlikely, but could be a race condition with a dying warden */
525 	    return ENODEV;
526 	}
527 
528 	vcp = &(mntinfo->mi_vcomm);
529 
530 	coda_clstat.ncalls++;
531 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
532 
533 	if (!VC_OPEN(vcp))
534 	    return(ENODEV);
535 
536 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
537 	/* Format the request message. */
538 	vmp->vm_data = buffer;
539 	vmp->vm_flags = 0;
540 	vmp->vm_inSize = inSize;
541 	vmp->vm_outSize
542 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
543 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
544 	vmp->vm_unique = ++vcp->vc_seq;
545 	if (codadebug)
546 	    myprintf(("Doing a call for %d.%d\n",
547 		      vmp->vm_opcode, vmp->vm_unique));
548 
549 	/* Fill in the common input args. */
550 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
551 
552 	/* Append msg to request queue and poke Venus. */
553 	INSQUE(vmp->vm_chain, vcp->vc_requests);
554 	selwakeup(&(vcp->vc_selproc));
555 
556 	/* We can be interrupted while we wait for Venus to process
557 	 * our request.  If the interrupt occurs before Venus has read
558 	 * the request, we dequeue and return. If it occurs after the
559 	 * read but before the reply, we dequeue, send a signal
560 	 * message, and return. If it occurs after the reply we ignore
561 	 * it. In no case do we want to restart the syscall.  If it
562 	 * was interrupted by a venus shutdown (vcclose), return
563 	 * ENODEV.  */
564 
565 	/* Ignore return, We have to check anyway */
566 #ifdef	CTL_C
567 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
568 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
569 	   as SA_RESTART.  This means that we should exit sleep handle the
570 	   "signal" and then go to sleep again.  Mostly this is done by letting
571 	   the syscall complete and be restarted.  We are not idempotent and
572 	   can not do this.  A better solution is necessary.
573 	 */
574 	i = 0;
575 	do {
576 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
577 	    if (error == 0)
578 	    	break;
579 	    else if (error == EWOULDBLOCK) {
580 #ifdef	CODA_VERBOSE
581 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
582 #endif
583     	    } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
584 		    sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
585 #ifdef	CODA_VERBOSE
586 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
587 #endif
588     	    } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
589 		    sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
590 #ifdef	CODA_VERBOSE
591 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
592 #endif
593 	    } else {
594 		    sigset_t tmp;
595 		    tmp = p->p_sigctx.ps_siglist;	/* array assignment */
596 		    sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
597 
598 #ifdef	CODA_VERBOSE
599 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
600 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
601 			    p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
602 			    p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
603 			    p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
604 			    p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
605 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
606 #endif
607 		    break;
608 #ifdef	notyet
609 		    sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
610 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
611 			    p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
612 			    p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
613 			    p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
614 			    p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
615 #endif
616 	    }
617 	} while (error && i++ < 128 && VC_OPEN(vcp));
618 	p->p_sigctx.ps_siglist = psig_omask;	/* array assignment */
619 #else
620 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
621 #endif
622 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
623  	/* Op went through, interrupt or not... */
624 	    if (vmp->vm_flags & VM_WRITE) {
625 		error = 0;
626 		*outSize = vmp->vm_outSize;
627 	    }
628 
629 	    else if (!(vmp->vm_flags & VM_READ)) {
630 		/* Interrupted before venus read it. */
631 #ifdef	CODA_VERBOSE
632 		if (1)
633 #else
634 		if (codadebug)
635 #endif
636 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
637 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
638 		REMQUE(vmp->vm_chain);
639 		error = EINTR;
640 	    }
641 
642 	    else {
643 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
644                    upcall started */
645 		/* Interrupted after start of upcall, send venus a signal */
646 		struct coda_in_hdr *dog;
647 		struct vmsg *svmp;
648 
649 #ifdef	CODA_VERBOSE
650 		if (1)
651 #else
652 		if (codadebug)
653 #endif
654 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
655 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
656 
657 		REMQUE(vmp->vm_chain);
658 		error = EINTR;
659 
660 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
661 
662 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
663 		dog = (struct coda_in_hdr *)svmp->vm_data;
664 
665 		svmp->vm_flags = 0;
666 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
667 		dog->unique = svmp->vm_unique = vmp->vm_unique;
668 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
669 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
670 
671 		if (codadebug)
672 		    myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
673 			   svmp->vm_opcode, svmp->vm_unique));
674 
675 		/* insert at head of queue! */
676 		INSQUE(svmp->vm_chain, vcp->vc_requests);
677 		selwakeup(&(vcp->vc_selproc));
678 	    }
679 	}
680 
681 	else {	/* If venus died (!VC_OPEN(vcp)) */
682 	    if (codadebug)
683 		myprintf(("vcclose woke op %d.%d flags %d\n",
684 		       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
685 
686 		error = ENODEV;
687 	}
688 
689 	CODA_FREE(vmp, sizeof(struct vmsg));
690 
691 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
692 		wakeup(&outstanding_upcalls);
693 
694 	if (!error)
695 		error = ((struct coda_out_hdr *)buffer)->result;
696 	return(error);
697 }
698 
699