xref: /netbsd-src/sys/coda/coda_psdev.c (revision 3b01aba77a7a698587faaae455bbfe740923c1f5)
1 /*	$NetBSD: coda_psdev.c,v 1.17 2001/07/18 16:12:31 thorpej Exp $	*/
2 
3 /*
4  *
5  *             Coda: an Experimental Distributed File System
6  *                              Release 3.1
7  *
8  *           Copyright (c) 1987-1998 Carnegie Mellon University
9  *                          All Rights Reserved
10  *
11  * Permission  to  use, copy, modify and distribute this software and its
12  * documentation is hereby granted,  provided  that  both  the  copyright
13  * notice  and  this  permission  notice  appear  in  all  copies  of the
14  * software, derivative works or  modified  versions,  and  any  portions
15  * thereof, and that both notices appear in supporting documentation, and
16  * that credit is given to Carnegie Mellon University  in  all  documents
17  * and publicity pertaining to direct or indirect use of this code or its
18  * derivatives.
19  *
20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
25  * ANY DERIVATIVE WORK.
26  *
27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
29  * Mellon the rights to redistribute these changes without encumbrance.
30  *
31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32  */
33 
34 /*
35  * Mach Operating System
36  * Copyright (c) 1989 Carnegie-Mellon University
37  * All rights reserved.  The CMU software License Agreement specifies
38  * the terms and conditions for use and redistribution.
39  */
40 
41 /*
42  * This code was written for the Coda file system at Carnegie Mellon
43  * University.  Contributers include David Steere, James Kistler, and
44  * M. Satyanarayanan.  */
45 
46 /* These routines define the psuedo device for communication between
47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48  * but I moved them to make it easier to port the Minicache without
49  * porting coda. -- DCS 10/12/94
50  */
51 
52 /* These routines are the device entry points for Venus. */
53 
54 extern int coda_nc_initialized;    /* Set if cache has been initialized */
55 
56 #ifdef	_LKM
57 #define	NVCODA 4
58 #else
59 #include <vcoda.h>
60 #endif
61 
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/malloc.h>
66 #include <sys/proc.h>
67 #include <sys/mount.h>
68 #include <sys/file.h>
69 #include <sys/ioctl.h>
70 #include <sys/poll.h>
71 #include <sys/select.h>
72 
73 #include <miscfs/syncfs/syncfs.h>
74 
75 #include <coda/coda.h>
76 #include <coda/cnode.h>
77 #include <coda/coda_namecache.h>
78 #include <coda/coda_io.h>
79 #include <coda/coda_psdev.h>
80 
81 #define CTL_C
82 
83 int coda_psdev_print_entry = 0;
84 static
85 int outstanding_upcalls = 0;
86 int coda_call_sleep = PZERO - 1;
87 #ifdef	CTL_C
88 int coda_pcatch = PCATCH;
89 #else
90 #endif
91 
92 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__FUNCTION__))
93 
94 void vcodaattach(int n);
95 
96 struct vmsg {
97     struct queue vm_chain;
98     caddr_t	 vm_data;
99     u_short	 vm_flags;
100     u_short      vm_inSize;	/* Size is at most 5000 bytes */
101     u_short	 vm_outSize;
102     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
103     int		 vm_unique;
104     caddr_t	 vm_sleep;	/* Not used by Mach. */
105 };
106 
107 #define	VM_READ	    1
108 #define	VM_WRITE    2
109 #define	VM_INTR	    4
110 
111 /* vcodaattach: do nothing */
112 void
113 vcodaattach(n)
114     int n;
115 {
116 }
117 
118 /*
119  * These functions are written for NetBSD.
120  */
121 int
122 vc_nb_open(dev, flag, mode, p)
123     dev_t        dev;
124     int          flag;
125     int          mode;
126     struct proc *p;             /* NetBSD only */
127 {
128     struct vcomm *vcp;
129 
130     ENTRY;
131 
132     if (minor(dev) >= NVCODA || minor(dev) < 0)
133 	return(ENXIO);
134 
135     if (!coda_nc_initialized)
136 	coda_nc_init();
137 
138     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
139     if (VC_OPEN(vcp))
140 	return(EBUSY);
141 
142     memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
143     INIT_QUEUE(vcp->vc_requests);
144     INIT_QUEUE(vcp->vc_replys);
145     MARK_VC_OPEN(vcp);
146 
147     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
148     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
149 
150     return(0);
151 }
152 
153 int
154 vc_nb_close (dev, flag, mode, p)
155     dev_t        dev;
156     int          flag;
157     int          mode;
158     struct proc *p;
159 {
160     struct vcomm *vcp;
161     struct vmsg *vmp, *nvmp = NULL;
162     struct coda_mntinfo *mi;
163     int                 err;
164 
165     ENTRY;
166 
167     if (minor(dev) >= NVCODA || minor(dev) < 0)
168 	return(ENXIO);
169 
170     mi = &coda_mnttbl[minor(dev)];
171     vcp = &(mi->mi_vcomm);
172 
173     if (!VC_OPEN(vcp))
174 	panic("vcclose: not open");
175 
176     /* prevent future operations on this vfs from succeeding by auto-
177      * unmounting any vfs mounted via this device. This frees user or
178      * sysadm from having to remember where all mount points are located.
179      * Put this before WAKEUPs to avoid queuing new messages between
180      * the WAKEUP and the unmount (which can happen if we're unlucky)
181      */
182     if (!mi->mi_rootvp) {
183 	/* just a simple open/close w no mount */
184 	MARK_VC_CLOSED(vcp);
185 	return 0;
186     }
187 
188     /* Let unmount know this is for real */
189     /*
190      * XXX Freeze syncer.  Must do this before locking the
191      * mount point.  See dounmount for details().
192      */
193     lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
194     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
195     if (vfs_busy(mi->mi_vfsp, 0, 0)) {
196 	lockmgr(&syncer_lock, LK_RELEASE, NULL);
197 	return (EBUSY);
198     }
199     coda_unmounting(mi->mi_vfsp);
200 
201     /* Wakeup clients so they can return. */
202     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
203 	 !EOQ(vmp, vcp->vc_requests);
204 	 vmp = nvmp)
205     {
206     	nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
207 	/* Free signal request messages and don't wakeup cause
208 	   no one is waiting. */
209 	if (vmp->vm_opcode == CODA_SIGNAL) {
210 	    CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
211 	    CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
212 	    continue;
213 	}
214 	outstanding_upcalls++;
215 	wakeup(&vmp->vm_sleep);
216     }
217 
218     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
219 	 !EOQ(vmp, vcp->vc_replys);
220 	 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
221     {
222 	outstanding_upcalls++;
223 	wakeup(&vmp->vm_sleep);
224     }
225 
226     MARK_VC_CLOSED(vcp);
227 
228     if (outstanding_upcalls) {
229 #ifdef	CODA_VERBOSE
230 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
231     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
232 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
233 #else
234     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
235 #endif
236     }
237 
238     err = dounmount(mi->mi_vfsp, flag, p);
239     if (err)
240 	myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
241 	           err, minor(dev)));
242     return 0;
243 }
244 
245 int
246 vc_nb_read(dev, uiop, flag)
247     dev_t        dev;
248     struct uio  *uiop;
249     int          flag;
250 {
251     struct vcomm *	vcp;
252     struct vmsg *vmp;
253     int error = 0;
254 
255     ENTRY;
256 
257     if (minor(dev) >= NVCODA || minor(dev) < 0)
258 	return(ENXIO);
259 
260     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
261     /* Get message at head of request queue. */
262     if (EMPTY(vcp->vc_requests))
263 	return(0);	/* Nothing to read */
264 
265     vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
266 
267     /* Move the input args into userspace */
268     uiop->uio_rw = UIO_READ;
269     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
270     if (error) {
271 	myprintf(("vcread: error (%d) on uiomove\n", error));
272 	error = EINVAL;
273     }
274 
275 #ifdef OLD_DIAGNOSTIC
276     if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
277 	panic("vc_nb_read: bad chain");
278 #endif
279 
280     REMQUE(vmp->vm_chain);
281 
282     /* If request was a signal, free up the message and don't
283        enqueue it in the reply queue. */
284     if (vmp->vm_opcode == CODA_SIGNAL) {
285 	if (codadebug)
286 	    myprintf(("vcread: signal msg (%d, %d)\n",
287 		      vmp->vm_opcode, vmp->vm_unique));
288 	CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
289 	CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
290 	return(error);
291     }
292 
293     vmp->vm_flags |= VM_READ;
294     INSQUE(vmp->vm_chain, vcp->vc_replys);
295 
296     return(error);
297 }
298 
299 int
300 vc_nb_write(dev, uiop, flag)
301     dev_t        dev;
302     struct uio  *uiop;
303     int          flag;
304 {
305     struct vcomm *	vcp;
306     struct vmsg *vmp;
307     struct coda_out_hdr *out;
308     u_long seq;
309     u_long opcode;
310     int buf[2];
311     int error = 0;
312 
313     ENTRY;
314 
315     if (minor(dev) >= NVCODA || minor(dev) < 0)
316 	return(ENXIO);
317 
318     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
319 
320     /* Peek at the opcode, unique without transfering the data. */
321     uiop->uio_rw = UIO_WRITE;
322     error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
323     if (error) {
324 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
325 	return(EINVAL);
326     }
327 
328     opcode = buf[0];
329     seq = buf[1];
330 
331     if (codadebug)
332 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
333 
334     if (DOWNCALL(opcode)) {
335 	union outputArgs pbuf;
336 
337 	/* get the rest of the data. */
338 	uiop->uio_rw = UIO_WRITE;
339 	error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
340 	if (error) {
341 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
342 		      error, opcode, seq));
343 	    return(EINVAL);
344 	    }
345 
346 	return handleDownCall(opcode, &pbuf);
347     }
348 
349     /* Look for the message on the (waiting for) reply queue. */
350     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
351 	 !EOQ(vmp, vcp->vc_replys);
352 	 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
353     {
354 	if (vmp->vm_unique == seq) break;
355     }
356 
357     if (EOQ(vmp, vcp->vc_replys)) {
358 	if (codadebug)
359 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
360 
361 	return(ESRCH);
362 	}
363 
364     /* Remove the message from the reply queue */
365     REMQUE(vmp->vm_chain);
366 
367     /* move data into response buffer. */
368     out = (struct coda_out_hdr *)vmp->vm_data;
369     /* Don't need to copy opcode and uniquifier. */
370 
371     /* get the rest of the data. */
372     if (vmp->vm_outSize < uiop->uio_resid) {
373 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
374 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
375 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
376 	return(EINVAL);
377     }
378 
379     buf[0] = uiop->uio_resid; 	/* Save this value. */
380     uiop->uio_rw = UIO_WRITE;
381     error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
382     if (error) {
383 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
384 		  error, opcode, seq));
385 	return(EINVAL);
386     }
387 
388     /* I don't think these are used, but just in case. */
389     /* XXX - aren't these two already correct? -bnoble */
390     out->opcode = opcode;
391     out->unique = seq;
392     vmp->vm_outSize	= buf[0];	/* Amount of data transferred? */
393     vmp->vm_flags |= VM_WRITE;
394     wakeup(&vmp->vm_sleep);
395 
396     return(0);
397 }
398 
399 int
400 vc_nb_ioctl(dev, cmd, addr, flag, p)
401     dev_t         dev;
402     u_long        cmd;
403     caddr_t       addr;
404     int           flag;
405     struct proc  *p;
406 {
407     ENTRY;
408 
409     switch(cmd) {
410     case CODARESIZE: {
411 	struct coda_resize *data = (struct coda_resize *)addr;
412 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
413 	break;
414     }
415     case CODASTATS:
416 	if (coda_nc_use) {
417 	    coda_nc_gather_stats();
418 	    return(0);
419 	} else {
420 	    return(ENODEV);
421 	}
422 	break;
423     case CODAPRINT:
424 	if (coda_nc_use) {
425 	    print_coda_nc();
426 	    return(0);
427 	} else {
428 	    return(ENODEV);
429 	}
430 	break;
431     case CIOC_KERNEL_VERSION:
432 	switch (*(u_int *)addr) {
433 	case 0:
434 		*(u_int *)addr = coda_kernel_version;
435 		return 0;
436 		break;
437 	case 1:
438 	case 2:
439 		if (coda_kernel_version != *(u_int *)addr)
440 		    return ENOENT;
441 		else
442 		    return 0;
443 	default:
444 		return ENOENT;
445 	}
446     	break;
447     default :
448 	return(EINVAL);
449 	break;
450     }
451 }
452 
453 int
454 vc_nb_poll(dev, events, p)
455     dev_t         dev;
456     int           events;
457     struct proc  *p;
458 {
459     struct vcomm *vcp;
460     int event_msk = 0;
461 
462     ENTRY;
463 
464     if (minor(dev) >= NVCODA || minor(dev) < 0)
465 	return(ENXIO);
466 
467     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
468 
469     event_msk = events & (POLLIN|POLLRDNORM);
470     if (!event_msk)
471 	return(0);
472 
473     if (!EMPTY(vcp->vc_requests))
474 	return(events & (POLLIN|POLLRDNORM));
475 
476     selrecord(p, &(vcp->vc_selproc));
477 
478     return(0);
479 }
480 
481 /*
482  * Statistics
483  */
484 struct coda_clstat coda_clstat;
485 
486 /*
487  * Key question: whether to sleep interuptably or uninteruptably when
488  * waiting for Venus.  The former seems better (cause you can ^C a
489  * job), but then GNU-EMACS completion breaks. Use tsleep with no
490  * timeout, and no longjmp happens. But, when sleeping
491  * "uninterruptibly", we don't get told if it returns abnormally
492  * (e.g. kill -9).
493  */
494 
495 int
496 coda_call(mntinfo, inSize, outSize, buffer)
497      struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
498 {
499 	struct vcomm *vcp;
500 	struct vmsg *vmp;
501 	int error;
502 #ifdef	CTL_C
503 	struct proc *p = curproc;
504 	sigset_t psig_omask;
505 	int i;
506 	psig_omask = p->p_sigctx.ps_siglist;	/* array assignment */
507 #endif
508 	if (mntinfo == NULL) {
509 	    /* Unlikely, but could be a race condition with a dying warden */
510 	    return ENODEV;
511 	}
512 
513 	vcp = &(mntinfo->mi_vcomm);
514 
515 	coda_clstat.ncalls++;
516 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
517 
518 	if (!VC_OPEN(vcp))
519 	    return(ENODEV);
520 
521 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
522 	/* Format the request message. */
523 	vmp->vm_data = buffer;
524 	vmp->vm_flags = 0;
525 	vmp->vm_inSize = inSize;
526 	vmp->vm_outSize
527 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
528 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
529 	vmp->vm_unique = ++vcp->vc_seq;
530 	if (codadebug)
531 	    myprintf(("Doing a call for %d.%d\n",
532 		      vmp->vm_opcode, vmp->vm_unique));
533 
534 	/* Fill in the common input args. */
535 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
536 
537 	/* Append msg to request queue and poke Venus. */
538 	INSQUE(vmp->vm_chain, vcp->vc_requests);
539 	selwakeup(&(vcp->vc_selproc));
540 
541 	/* We can be interrupted while we wait for Venus to process
542 	 * our request.  If the interrupt occurs before Venus has read
543 	 * the request, we dequeue and return. If it occurs after the
544 	 * read but before the reply, we dequeue, send a signal
545 	 * message, and return. If it occurs after the reply we ignore
546 	 * it. In no case do we want to restart the syscall.  If it
547 	 * was interrupted by a venus shutdown (vcclose), return
548 	 * ENODEV.  */
549 
550 	/* Ignore return, We have to check anyway */
551 #ifdef	CTL_C
552 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
553 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
554 	   as SA_RESTART.  This means that we should exit sleep handle the
555 	   "signal" and then go to sleep again.  Mostly this is done by letting
556 	   the syscall complete and be restarted.  We are not idempotent and
557 	   can not do this.  A better solution is necessary.
558 	 */
559 	i = 0;
560 	do {
561 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
562 	    if (error == 0)
563 	    	break;
564 	    else if (error == EWOULDBLOCK) {
565 #ifdef	CODA_VERBOSE
566 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
567 #endif
568     	    } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
569 		    sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
570 #ifdef	CODA_VERBOSE
571 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
572 #endif
573     	    } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
574 		    sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
575 #ifdef	CODA_VERBOSE
576 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
577 #endif
578 	    } else {
579 		    sigset_t tmp;
580 		    tmp = p->p_sigctx.ps_siglist;	/* array assignment */
581 		    sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
582 
583 #ifdef	CODA_VERBOSE
584 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
585 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
586 			    p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
587 			    p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
588 			    p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
589 			    p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
590 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
591 #endif
592 		    break;
593 #ifdef	notyet
594 		    sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
595 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
596 			    p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
597 			    p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
598 			    p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
599 			    p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
600 #endif
601 	    }
602 	} while (error && i++ < 128 && VC_OPEN(vcp));
603 	p->p_sigctx.ps_siglist = psig_omask;	/* array assignment */
604 #else
605 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
606 #endif
607 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
608  	/* Op went through, interrupt or not... */
609 	    if (vmp->vm_flags & VM_WRITE) {
610 		error = 0;
611 		*outSize = vmp->vm_outSize;
612 	    }
613 
614 	    else if (!(vmp->vm_flags & VM_READ)) {
615 		/* Interrupted before venus read it. */
616 #ifdef	CODA_VERBOSE
617 		if (1)
618 #else
619 		if (codadebug)
620 #endif
621 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
622 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
623 		REMQUE(vmp->vm_chain);
624 		error = EINTR;
625 	    }
626 
627 	    else {
628 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
629                    upcall started */
630 		/* Interrupted after start of upcall, send venus a signal */
631 		struct coda_in_hdr *dog;
632 		struct vmsg *svmp;
633 
634 #ifdef	CODA_VERBOSE
635 		if (1)
636 #else
637 		if (codadebug)
638 #endif
639 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
640 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
641 
642 		REMQUE(vmp->vm_chain);
643 		error = EINTR;
644 
645 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
646 
647 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
648 		dog = (struct coda_in_hdr *)svmp->vm_data;
649 
650 		svmp->vm_flags = 0;
651 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
652 		dog->unique = svmp->vm_unique = vmp->vm_unique;
653 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
654 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
655 
656 		if (codadebug)
657 		    myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
658 			   svmp->vm_opcode, svmp->vm_unique));
659 
660 		/* insert at head of queue! */
661 		INSQUE(svmp->vm_chain, vcp->vc_requests);
662 		selwakeup(&(vcp->vc_selproc));
663 	    }
664 	}
665 
666 	else {	/* If venus died (!VC_OPEN(vcp)) */
667 	    if (codadebug)
668 		myprintf(("vcclose woke op %d.%d flags %d\n",
669 		       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
670 
671 		error = ENODEV;
672 	}
673 
674 	CODA_FREE(vmp, sizeof(struct vmsg));
675 
676 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
677 		wakeup(&outstanding_upcalls);
678 
679 	if (!error)
680 		error = ((struct coda_out_hdr *)buffer)->result;
681 	return(error);
682 }
683 
684