xref: /netbsd-src/sys/coda/coda_psdev.c (revision 5e4c038a45edbc7d63b7c2daa76e29f88b64a4e3)
1 /*	$NetBSD: coda_psdev.c,v 1.19 2001/11/23 17:42:48 perry Exp $	*/
2 
3 /*
4  *
5  *             Coda: an Experimental Distributed File System
6  *                              Release 3.1
7  *
8  *           Copyright (c) 1987-1998 Carnegie Mellon University
9  *                          All Rights Reserved
10  *
11  * Permission  to  use, copy, modify and distribute this software and its
12  * documentation is hereby granted,  provided  that  both  the  copyright
13  * notice  and  this  permission  notice  appear  in  all  copies  of the
14  * software, derivative works or  modified  versions,  and  any  portions
15  * thereof, and that both notices appear in supporting documentation, and
16  * that credit is given to Carnegie Mellon University  in  all  documents
17  * and publicity pertaining to direct or indirect use of this code or its
18  * derivatives.
19  *
20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
25  * ANY DERIVATIVE WORK.
26  *
27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
29  * Mellon the rights to redistribute these changes without encumbrance.
30  *
31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32  */
33 
34 /*
35  * Mach Operating System
36  * Copyright (c) 1989 Carnegie-Mellon University
37  * All rights reserved.  The CMU software License Agreement specifies
38  * the terms and conditions for use and redistribution.
39  */
40 
41 /*
42  * This code was written for the Coda file system at Carnegie Mellon
43  * University.  Contributers include David Steere, James Kistler, and
44  * M. Satyanarayanan.  */
45 
46 /* These routines define the psuedo device for communication between
47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48  * but I moved them to make it easier to port the Minicache without
49  * porting coda. -- DCS 10/12/94
50  */
51 
52 /* These routines are the device entry points for Venus. */
53 
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.19 2001/11/23 17:42:48 perry Exp $");
56 
57 extern int coda_nc_initialized;    /* Set if cache has been initialized */
58 
59 #ifdef	_LKM
60 #define	NVCODA 4
61 #else
62 #include <vcoda.h>
63 #endif
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/proc.h>
70 #include <sys/mount.h>
71 #include <sys/file.h>
72 #include <sys/ioctl.h>
73 #include <sys/poll.h>
74 #include <sys/select.h>
75 
76 #include <miscfs/syncfs/syncfs.h>
77 
78 #include <coda/coda.h>
79 #include <coda/cnode.h>
80 #include <coda/coda_namecache.h>
81 #include <coda/coda_io.h>
82 #include <coda/coda_psdev.h>
83 
84 #define CTL_C
85 
86 int coda_psdev_print_entry = 0;
87 static
88 int outstanding_upcalls = 0;
89 int coda_call_sleep = PZERO - 1;
90 #ifdef	CTL_C
91 int coda_pcatch = PCATCH;
92 #else
93 #endif
94 
95 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
96 
97 void vcodaattach(int n);
98 
99 struct vmsg {
100     struct queue vm_chain;
101     caddr_t	 vm_data;
102     u_short	 vm_flags;
103     u_short      vm_inSize;	/* Size is at most 5000 bytes */
104     u_short	 vm_outSize;
105     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
106     int		 vm_unique;
107     caddr_t	 vm_sleep;	/* Not used by Mach. */
108 };
109 
110 #define	VM_READ	    1
111 #define	VM_WRITE    2
112 #define	VM_INTR	    4
113 
114 /* vcodaattach: do nothing */
115 void
116 vcodaattach(n)
117     int n;
118 {
119 }
120 
121 /*
122  * These functions are written for NetBSD.
123  */
124 int
125 vc_nb_open(dev, flag, mode, p)
126     dev_t        dev;
127     int          flag;
128     int          mode;
129     struct proc *p;             /* NetBSD only */
130 {
131     struct vcomm *vcp;
132 
133     ENTRY;
134 
135     if (minor(dev) >= NVCODA || minor(dev) < 0)
136 	return(ENXIO);
137 
138     if (!coda_nc_initialized)
139 	coda_nc_init();
140 
141     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
142     if (VC_OPEN(vcp))
143 	return(EBUSY);
144 
145     memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
146     INIT_QUEUE(vcp->vc_requests);
147     INIT_QUEUE(vcp->vc_replys);
148     MARK_VC_OPEN(vcp);
149 
150     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
151     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
152 
153     return(0);
154 }
155 
156 int
157 vc_nb_close (dev, flag, mode, p)
158     dev_t        dev;
159     int          flag;
160     int          mode;
161     struct proc *p;
162 {
163     struct vcomm *vcp;
164     struct vmsg *vmp, *nvmp = NULL;
165     struct coda_mntinfo *mi;
166     int                 err;
167 
168     ENTRY;
169 
170     if (minor(dev) >= NVCODA || minor(dev) < 0)
171 	return(ENXIO);
172 
173     mi = &coda_mnttbl[minor(dev)];
174     vcp = &(mi->mi_vcomm);
175 
176     if (!VC_OPEN(vcp))
177 	panic("vcclose: not open");
178 
179     /* prevent future operations on this vfs from succeeding by auto-
180      * unmounting any vfs mounted via this device. This frees user or
181      * sysadm from having to remember where all mount points are located.
182      * Put this before WAKEUPs to avoid queuing new messages between
183      * the WAKEUP and the unmount (which can happen if we're unlucky)
184      */
185     if (!mi->mi_rootvp) {
186 	/* just a simple open/close w no mount */
187 	MARK_VC_CLOSED(vcp);
188 	return 0;
189     }
190 
191     /* Let unmount know this is for real */
192     /*
193      * XXX Freeze syncer.  Must do this before locking the
194      * mount point.  See dounmount for details().
195      */
196     lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
197     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
198     if (vfs_busy(mi->mi_vfsp, 0, 0)) {
199 	lockmgr(&syncer_lock, LK_RELEASE, NULL);
200 	return (EBUSY);
201     }
202     coda_unmounting(mi->mi_vfsp);
203 
204     /* Wakeup clients so they can return. */
205     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
206 	 !EOQ(vmp, vcp->vc_requests);
207 	 vmp = nvmp)
208     {
209     	nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
210 	/* Free signal request messages and don't wakeup cause
211 	   no one is waiting. */
212 	if (vmp->vm_opcode == CODA_SIGNAL) {
213 	    CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
214 	    CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
215 	    continue;
216 	}
217 	outstanding_upcalls++;
218 	wakeup(&vmp->vm_sleep);
219     }
220 
221     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
222 	 !EOQ(vmp, vcp->vc_replys);
223 	 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
224     {
225 	outstanding_upcalls++;
226 	wakeup(&vmp->vm_sleep);
227     }
228 
229     MARK_VC_CLOSED(vcp);
230 
231     if (outstanding_upcalls) {
232 #ifdef	CODA_VERBOSE
233 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
234     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
235 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
236 #else
237     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
238 #endif
239     }
240 
241     err = dounmount(mi->mi_vfsp, flag, p);
242     if (err)
243 	myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
244 	           err, minor(dev)));
245     return 0;
246 }
247 
248 int
249 vc_nb_read(dev, uiop, flag)
250     dev_t        dev;
251     struct uio  *uiop;
252     int          flag;
253 {
254     struct vcomm *	vcp;
255     struct vmsg *vmp;
256     int error = 0;
257 
258     ENTRY;
259 
260     if (minor(dev) >= NVCODA || minor(dev) < 0)
261 	return(ENXIO);
262 
263     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
264     /* Get message at head of request queue. */
265     if (EMPTY(vcp->vc_requests))
266 	return(0);	/* Nothing to read */
267 
268     vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
269 
270     /* Move the input args into userspace */
271     uiop->uio_rw = UIO_READ;
272     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
273     if (error) {
274 	myprintf(("vcread: error (%d) on uiomove\n", error));
275 	error = EINVAL;
276     }
277 
278 #ifdef OLD_DIAGNOSTIC
279     if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
280 	panic("vc_nb_read: bad chain");
281 #endif
282 
283     REMQUE(vmp->vm_chain);
284 
285     /* If request was a signal, free up the message and don't
286        enqueue it in the reply queue. */
287     if (vmp->vm_opcode == CODA_SIGNAL) {
288 	if (codadebug)
289 	    myprintf(("vcread: signal msg (%d, %d)\n",
290 		      vmp->vm_opcode, vmp->vm_unique));
291 	CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
292 	CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
293 	return(error);
294     }
295 
296     vmp->vm_flags |= VM_READ;
297     INSQUE(vmp->vm_chain, vcp->vc_replys);
298 
299     return(error);
300 }
301 
302 int
303 vc_nb_write(dev, uiop, flag)
304     dev_t        dev;
305     struct uio  *uiop;
306     int          flag;
307 {
308     struct vcomm *	vcp;
309     struct vmsg *vmp;
310     struct coda_out_hdr *out;
311     u_long seq;
312     u_long opcode;
313     int buf[2];
314     int error = 0;
315 
316     ENTRY;
317 
318     if (minor(dev) >= NVCODA || minor(dev) < 0)
319 	return(ENXIO);
320 
321     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
322 
323     /* Peek at the opcode, unique without transfering the data. */
324     uiop->uio_rw = UIO_WRITE;
325     error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
326     if (error) {
327 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
328 	return(EINVAL);
329     }
330 
331     opcode = buf[0];
332     seq = buf[1];
333 
334     if (codadebug)
335 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
336 
337     if (DOWNCALL(opcode)) {
338 	union outputArgs pbuf;
339 
340 	/* get the rest of the data. */
341 	uiop->uio_rw = UIO_WRITE;
342 	error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
343 	if (error) {
344 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
345 		      error, opcode, seq));
346 	    return(EINVAL);
347 	    }
348 
349 	return handleDownCall(opcode, &pbuf);
350     }
351 
352     /* Look for the message on the (waiting for) reply queue. */
353     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
354 	 !EOQ(vmp, vcp->vc_replys);
355 	 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
356     {
357 	if (vmp->vm_unique == seq) break;
358     }
359 
360     if (EOQ(vmp, vcp->vc_replys)) {
361 	if (codadebug)
362 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
363 
364 	return(ESRCH);
365 	}
366 
367     /* Remove the message from the reply queue */
368     REMQUE(vmp->vm_chain);
369 
370     /* move data into response buffer. */
371     out = (struct coda_out_hdr *)vmp->vm_data;
372     /* Don't need to copy opcode and uniquifier. */
373 
374     /* get the rest of the data. */
375     if (vmp->vm_outSize < uiop->uio_resid) {
376 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
377 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
378 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
379 	return(EINVAL);
380     }
381 
382     buf[0] = uiop->uio_resid; 	/* Save this value. */
383     uiop->uio_rw = UIO_WRITE;
384     error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
385     if (error) {
386 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
387 		  error, opcode, seq));
388 	return(EINVAL);
389     }
390 
391     /* I don't think these are used, but just in case. */
392     /* XXX - aren't these two already correct? -bnoble */
393     out->opcode = opcode;
394     out->unique = seq;
395     vmp->vm_outSize	= buf[0];	/* Amount of data transferred? */
396     vmp->vm_flags |= VM_WRITE;
397     wakeup(&vmp->vm_sleep);
398 
399     return(0);
400 }
401 
402 int
403 vc_nb_ioctl(dev, cmd, addr, flag, p)
404     dev_t         dev;
405     u_long        cmd;
406     caddr_t       addr;
407     int           flag;
408     struct proc  *p;
409 {
410     ENTRY;
411 
412     switch(cmd) {
413     case CODARESIZE: {
414 	struct coda_resize *data = (struct coda_resize *)addr;
415 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
416 	break;
417     }
418     case CODASTATS:
419 	if (coda_nc_use) {
420 	    coda_nc_gather_stats();
421 	    return(0);
422 	} else {
423 	    return(ENODEV);
424 	}
425 	break;
426     case CODAPRINT:
427 	if (coda_nc_use) {
428 	    print_coda_nc();
429 	    return(0);
430 	} else {
431 	    return(ENODEV);
432 	}
433 	break;
434     case CIOC_KERNEL_VERSION:
435 	switch (*(u_int *)addr) {
436 	case 0:
437 		*(u_int *)addr = coda_kernel_version;
438 		return 0;
439 		break;
440 	case 1:
441 	case 2:
442 		if (coda_kernel_version != *(u_int *)addr)
443 		    return ENOENT;
444 		else
445 		    return 0;
446 	default:
447 		return ENOENT;
448 	}
449     	break;
450     default :
451 	return(EINVAL);
452 	break;
453     }
454 }
455 
456 int
457 vc_nb_poll(dev, events, p)
458     dev_t         dev;
459     int           events;
460     struct proc  *p;
461 {
462     struct vcomm *vcp;
463     int event_msk = 0;
464 
465     ENTRY;
466 
467     if (minor(dev) >= NVCODA || minor(dev) < 0)
468 	return(ENXIO);
469 
470     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
471 
472     event_msk = events & (POLLIN|POLLRDNORM);
473     if (!event_msk)
474 	return(0);
475 
476     if (!EMPTY(vcp->vc_requests))
477 	return(events & (POLLIN|POLLRDNORM));
478 
479     selrecord(p, &(vcp->vc_selproc));
480 
481     return(0);
482 }
483 
484 /*
485  * Statistics
486  */
487 struct coda_clstat coda_clstat;
488 
489 /*
490  * Key question: whether to sleep interuptably or uninteruptably when
491  * waiting for Venus.  The former seems better (cause you can ^C a
492  * job), but then GNU-EMACS completion breaks. Use tsleep with no
493  * timeout, and no longjmp happens. But, when sleeping
494  * "uninterruptibly", we don't get told if it returns abnormally
495  * (e.g. kill -9).
496  */
497 
498 int
499 coda_call(mntinfo, inSize, outSize, buffer)
500      struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
501 {
502 	struct vcomm *vcp;
503 	struct vmsg *vmp;
504 	int error;
505 #ifdef	CTL_C
506 	struct proc *p = curproc;
507 	sigset_t psig_omask;
508 	int i;
509 	psig_omask = p->p_sigctx.ps_siglist;	/* array assignment */
510 #endif
511 	if (mntinfo == NULL) {
512 	    /* Unlikely, but could be a race condition with a dying warden */
513 	    return ENODEV;
514 	}
515 
516 	vcp = &(mntinfo->mi_vcomm);
517 
518 	coda_clstat.ncalls++;
519 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
520 
521 	if (!VC_OPEN(vcp))
522 	    return(ENODEV);
523 
524 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
525 	/* Format the request message. */
526 	vmp->vm_data = buffer;
527 	vmp->vm_flags = 0;
528 	vmp->vm_inSize = inSize;
529 	vmp->vm_outSize
530 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
531 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
532 	vmp->vm_unique = ++vcp->vc_seq;
533 	if (codadebug)
534 	    myprintf(("Doing a call for %d.%d\n",
535 		      vmp->vm_opcode, vmp->vm_unique));
536 
537 	/* Fill in the common input args. */
538 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
539 
540 	/* Append msg to request queue and poke Venus. */
541 	INSQUE(vmp->vm_chain, vcp->vc_requests);
542 	selwakeup(&(vcp->vc_selproc));
543 
544 	/* We can be interrupted while we wait for Venus to process
545 	 * our request.  If the interrupt occurs before Venus has read
546 	 * the request, we dequeue and return. If it occurs after the
547 	 * read but before the reply, we dequeue, send a signal
548 	 * message, and return. If it occurs after the reply we ignore
549 	 * it. In no case do we want to restart the syscall.  If it
550 	 * was interrupted by a venus shutdown (vcclose), return
551 	 * ENODEV.  */
552 
553 	/* Ignore return, We have to check anyway */
554 #ifdef	CTL_C
555 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
556 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
557 	   as SA_RESTART.  This means that we should exit sleep handle the
558 	   "signal" and then go to sleep again.  Mostly this is done by letting
559 	   the syscall complete and be restarted.  We are not idempotent and
560 	   can not do this.  A better solution is necessary.
561 	 */
562 	i = 0;
563 	do {
564 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
565 	    if (error == 0)
566 	    	break;
567 	    else if (error == EWOULDBLOCK) {
568 #ifdef	CODA_VERBOSE
569 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
570 #endif
571     	    } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
572 		    sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
573 #ifdef	CODA_VERBOSE
574 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
575 #endif
576     	    } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
577 		    sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
578 #ifdef	CODA_VERBOSE
579 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
580 #endif
581 	    } else {
582 		    sigset_t tmp;
583 		    tmp = p->p_sigctx.ps_siglist;	/* array assignment */
584 		    sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
585 
586 #ifdef	CODA_VERBOSE
587 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
588 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
589 			    p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
590 			    p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
591 			    p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
592 			    p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
593 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
594 #endif
595 		    break;
596 #ifdef	notyet
597 		    sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
598 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
599 			    p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
600 			    p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
601 			    p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
602 			    p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
603 #endif
604 	    }
605 	} while (error && i++ < 128 && VC_OPEN(vcp));
606 	p->p_sigctx.ps_siglist = psig_omask;	/* array assignment */
607 #else
608 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
609 #endif
610 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
611  	/* Op went through, interrupt or not... */
612 	    if (vmp->vm_flags & VM_WRITE) {
613 		error = 0;
614 		*outSize = vmp->vm_outSize;
615 	    }
616 
617 	    else if (!(vmp->vm_flags & VM_READ)) {
618 		/* Interrupted before venus read it. */
619 #ifdef	CODA_VERBOSE
620 		if (1)
621 #else
622 		if (codadebug)
623 #endif
624 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
625 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
626 		REMQUE(vmp->vm_chain);
627 		error = EINTR;
628 	    }
629 
630 	    else {
631 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
632                    upcall started */
633 		/* Interrupted after start of upcall, send venus a signal */
634 		struct coda_in_hdr *dog;
635 		struct vmsg *svmp;
636 
637 #ifdef	CODA_VERBOSE
638 		if (1)
639 #else
640 		if (codadebug)
641 #endif
642 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
643 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
644 
645 		REMQUE(vmp->vm_chain);
646 		error = EINTR;
647 
648 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
649 
650 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
651 		dog = (struct coda_in_hdr *)svmp->vm_data;
652 
653 		svmp->vm_flags = 0;
654 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
655 		dog->unique = svmp->vm_unique = vmp->vm_unique;
656 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
657 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
658 
659 		if (codadebug)
660 		    myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
661 			   svmp->vm_opcode, svmp->vm_unique));
662 
663 		/* insert at head of queue! */
664 		INSQUE(svmp->vm_chain, vcp->vc_requests);
665 		selwakeup(&(vcp->vc_selproc));
666 	    }
667 	}
668 
669 	else {	/* If venus died (!VC_OPEN(vcp)) */
670 	    if (codadebug)
671 		myprintf(("vcclose woke op %d.%d flags %d\n",
672 		       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
673 
674 		error = ENODEV;
675 	}
676 
677 	CODA_FREE(vmp, sizeof(struct vmsg));
678 
679 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
680 		wakeup(&outstanding_upcalls);
681 
682 	if (!error)
683 		error = ((struct coda_out_hdr *)buffer)->result;
684 	return(error);
685 }
686 
687