xref: /netbsd-src/sys/coda/coda_psdev.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: coda_psdev.c,v 1.26 2003/06/29 22:29:09 fvdl Exp $	*/
2 
3 /*
4  *
5  *             Coda: an Experimental Distributed File System
6  *                              Release 3.1
7  *
8  *           Copyright (c) 1987-1998 Carnegie Mellon University
9  *                          All Rights Reserved
10  *
11  * Permission  to  use, copy, modify and distribute this software and its
12  * documentation is hereby granted,  provided  that  both  the  copyright
13  * notice  and  this  permission  notice  appear  in  all  copies  of the
14  * software, derivative works or  modified  versions,  and  any  portions
15  * thereof, and that both notices appear in supporting documentation, and
16  * that credit is given to Carnegie Mellon University  in  all  documents
17  * and publicity pertaining to direct or indirect use of this code or its
18  * derivatives.
19  *
20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
25  * ANY DERIVATIVE WORK.
26  *
27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
29  * Mellon the rights to redistribute these changes without encumbrance.
30  *
31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32  */
33 
34 /*
35  * Mach Operating System
36  * Copyright (c) 1989 Carnegie-Mellon University
37  * All rights reserved.  The CMU software License Agreement specifies
38  * the terms and conditions for use and redistribution.
39  */
40 
41 /*
42  * This code was written for the Coda file system at Carnegie Mellon
43  * University.  Contributers include David Steere, James Kistler, and
44  * M. Satyanarayanan.  */
45 
46 /* These routines define the pseudo device for communication between
47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48  * but I moved them to make it easier to port the Minicache without
49  * porting coda. -- DCS 10/12/94
50  *
51  * Following code depends on file-system CODA.
52  */
53 
54 /* These routines are the device entry points for Venus. */
55 
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.26 2003/06/29 22:29:09 fvdl Exp $");
58 
59 extern int coda_nc_initialized;    /* Set if cache has been initialized */
60 
61 #ifdef	_LKM
62 #define	NVCODA 4
63 #else
64 #include <vcoda.h>
65 #endif
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/kernel.h>
70 #include <sys/malloc.h>
71 #include <sys/proc.h>
72 #include <sys/mount.h>
73 #include <sys/file.h>
74 #include <sys/ioctl.h>
75 #include <sys/poll.h>
76 #include <sys/select.h>
77 #include <sys/conf.h>
78 
79 #include <miscfs/syncfs/syncfs.h>
80 
81 #include <coda/coda.h>
82 #include <coda/cnode.h>
83 #include <coda/coda_namecache.h>
84 #include <coda/coda_io.h>
85 
86 #define CTL_C
87 
88 int coda_psdev_print_entry = 0;
89 static
90 int outstanding_upcalls = 0;
91 int coda_call_sleep = PZERO - 1;
92 #ifdef	CTL_C
93 int coda_pcatch = PCATCH;
94 #else
95 #endif
96 
97 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
98 
99 void vcodaattach(int n);
100 
101 dev_type_open(vc_nb_open);
102 dev_type_close(vc_nb_close);
103 dev_type_read(vc_nb_read);
104 dev_type_write(vc_nb_write);
105 dev_type_ioctl(vc_nb_ioctl);
106 dev_type_poll(vc_nb_poll);
107 dev_type_kqfilter(vc_nb_kqfilter);
108 
109 const struct cdevsw vcoda_cdevsw = {
110 	vc_nb_open, vc_nb_close, vc_nb_read, vc_nb_write, vc_nb_ioctl,
111 	nostop, notty, vc_nb_poll, nommap, vc_nb_kqfilter,
112 };
113 
114 struct vmsg {
115     struct queue vm_chain;
116     caddr_t	 vm_data;
117     u_short	 vm_flags;
118     u_short      vm_inSize;	/* Size is at most 5000 bytes */
119     u_short	 vm_outSize;
120     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
121     int		 vm_unique;
122     caddr_t	 vm_sleep;	/* Not used by Mach. */
123 };
124 
125 #define	VM_READ	    1
126 #define	VM_WRITE    2
127 #define	VM_INTR	    4
128 
129 /* vcodaattach: do nothing */
130 void
131 vcodaattach(n)
132     int n;
133 {
134 }
135 
136 /*
137  * These functions are written for NetBSD.
138  */
139 int
140 vc_nb_open(dev, flag, mode, p)
141     dev_t        dev;
142     int          flag;
143     int          mode;
144     struct proc *p;             /* NetBSD only */
145 {
146     struct vcomm *vcp;
147 
148     ENTRY;
149 
150     if (minor(dev) >= NVCODA || minor(dev) < 0)
151 	return(ENXIO);
152 
153     if (!coda_nc_initialized)
154 	coda_nc_init();
155 
156     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
157     if (VC_OPEN(vcp))
158 	return(EBUSY);
159 
160     memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
161     INIT_QUEUE(vcp->vc_requests);
162     INIT_QUEUE(vcp->vc_replys);
163     MARK_VC_OPEN(vcp);
164 
165     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
166     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
167 
168     return(0);
169 }
170 
171 int
172 vc_nb_close (dev, flag, mode, p)
173     dev_t        dev;
174     int          flag;
175     int          mode;
176     struct proc *p;
177 {
178     struct vcomm *vcp;
179     struct vmsg *vmp, *nvmp = NULL;
180     struct coda_mntinfo *mi;
181     int                 err;
182 
183     ENTRY;
184 
185     if (minor(dev) >= NVCODA || minor(dev) < 0)
186 	return(ENXIO);
187 
188     mi = &coda_mnttbl[minor(dev)];
189     vcp = &(mi->mi_vcomm);
190 
191     if (!VC_OPEN(vcp))
192 	panic("vcclose: not open");
193 
194     /* prevent future operations on this vfs from succeeding by auto-
195      * unmounting any vfs mounted via this device. This frees user or
196      * sysadm from having to remember where all mount points are located.
197      * Put this before WAKEUPs to avoid queuing new messages between
198      * the WAKEUP and the unmount (which can happen if we're unlucky)
199      */
200     if (!mi->mi_rootvp) {
201 	/* just a simple open/close w no mount */
202 	MARK_VC_CLOSED(vcp);
203 	return 0;
204     }
205 
206     /* Let unmount know this is for real */
207     /*
208      * XXX Freeze syncer.  Must do this before locking the
209      * mount point.  See dounmount for details().
210      */
211     lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
212     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
213     if (vfs_busy(mi->mi_vfsp, 0, 0)) {
214 	lockmgr(&syncer_lock, LK_RELEASE, NULL);
215 	return (EBUSY);
216     }
217     coda_unmounting(mi->mi_vfsp);
218 
219     /* Wakeup clients so they can return. */
220     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
221 	 !EOQ(vmp, vcp->vc_requests);
222 	 vmp = nvmp)
223     {
224     	nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
225 	/* Free signal request messages and don't wakeup cause
226 	   no one is waiting. */
227 	if (vmp->vm_opcode == CODA_SIGNAL) {
228 	    CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
229 	    CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
230 	    continue;
231 	}
232 	outstanding_upcalls++;
233 	wakeup(&vmp->vm_sleep);
234     }
235 
236     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
237 	 !EOQ(vmp, vcp->vc_replys);
238 	 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
239     {
240 	outstanding_upcalls++;
241 	wakeup(&vmp->vm_sleep);
242     }
243 
244     MARK_VC_CLOSED(vcp);
245 
246     if (outstanding_upcalls) {
247 #ifdef	CODA_VERBOSE
248 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
249     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
250 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
251 #else
252     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
253 #endif
254     }
255 
256     err = dounmount(mi->mi_vfsp, flag, p);
257     if (err)
258 	myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
259 	           err, minor(dev)));
260     return 0;
261 }
262 
263 int
264 vc_nb_read(dev, uiop, flag)
265     dev_t        dev;
266     struct uio  *uiop;
267     int          flag;
268 {
269     struct vcomm *	vcp;
270     struct vmsg *vmp;
271     int error = 0;
272 
273     ENTRY;
274 
275     if (minor(dev) >= NVCODA || minor(dev) < 0)
276 	return(ENXIO);
277 
278     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
279     /* Get message at head of request queue. */
280     if (EMPTY(vcp->vc_requests))
281 	return(0);	/* Nothing to read */
282 
283     vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
284 
285     /* Move the input args into userspace */
286     uiop->uio_rw = UIO_READ;
287     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
288     if (error) {
289 	myprintf(("vcread: error (%d) on uiomove\n", error));
290 	error = EINVAL;
291     }
292 
293 #ifdef OLD_DIAGNOSTIC
294     if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
295 	panic("vc_nb_read: bad chain");
296 #endif
297 
298     REMQUE(vmp->vm_chain);
299 
300     /* If request was a signal, free up the message and don't
301        enqueue it in the reply queue. */
302     if (vmp->vm_opcode == CODA_SIGNAL) {
303 	if (codadebug)
304 	    myprintf(("vcread: signal msg (%d, %d)\n",
305 		      vmp->vm_opcode, vmp->vm_unique));
306 	CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
307 	CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
308 	return(error);
309     }
310 
311     vmp->vm_flags |= VM_READ;
312     INSQUE(vmp->vm_chain, vcp->vc_replys);
313 
314     return(error);
315 }
316 
317 int
318 vc_nb_write(dev, uiop, flag)
319     dev_t        dev;
320     struct uio  *uiop;
321     int          flag;
322 {
323     struct vcomm *	vcp;
324     struct vmsg *vmp;
325     struct coda_out_hdr *out;
326     u_long seq;
327     u_long opcode;
328     int buf[2];
329     int error = 0;
330 
331     ENTRY;
332 
333     if (minor(dev) >= NVCODA || minor(dev) < 0)
334 	return(ENXIO);
335 
336     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
337 
338     /* Peek at the opcode, unique without transfering the data. */
339     uiop->uio_rw = UIO_WRITE;
340     error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
341     if (error) {
342 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
343 	return(EINVAL);
344     }
345 
346     opcode = buf[0];
347     seq = buf[1];
348 
349     if (codadebug)
350 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
351 
352     if (DOWNCALL(opcode)) {
353 	union outputArgs pbuf;
354 
355 	/* get the rest of the data. */
356 	uiop->uio_rw = UIO_WRITE;
357 	error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
358 	if (error) {
359 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
360 		      error, opcode, seq));
361 	    return(EINVAL);
362 	    }
363 
364 	return handleDownCall(opcode, &pbuf);
365     }
366 
367     /* Look for the message on the (waiting for) reply queue. */
368     for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
369 	 !EOQ(vmp, vcp->vc_replys);
370 	 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
371     {
372 	if (vmp->vm_unique == seq) break;
373     }
374 
375     if (EOQ(vmp, vcp->vc_replys)) {
376 	if (codadebug)
377 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
378 
379 	return(ESRCH);
380 	}
381 
382     /* Remove the message from the reply queue */
383     REMQUE(vmp->vm_chain);
384 
385     /* move data into response buffer. */
386     out = (struct coda_out_hdr *)vmp->vm_data;
387     /* Don't need to copy opcode and uniquifier. */
388 
389     /* get the rest of the data. */
390     if (vmp->vm_outSize < uiop->uio_resid) {
391 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
392 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
393 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
394 	return(EINVAL);
395     }
396 
397     buf[0] = uiop->uio_resid; 	/* Save this value. */
398     uiop->uio_rw = UIO_WRITE;
399     error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
400     if (error) {
401 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
402 		  error, opcode, seq));
403 	return(EINVAL);
404     }
405 
406     /* I don't think these are used, but just in case. */
407     /* XXX - aren't these two already correct? -bnoble */
408     out->opcode = opcode;
409     out->unique = seq;
410     vmp->vm_outSize	= buf[0];	/* Amount of data transferred? */
411     vmp->vm_flags |= VM_WRITE;
412     wakeup(&vmp->vm_sleep);
413 
414     return(0);
415 }
416 
417 int
418 vc_nb_ioctl(dev, cmd, addr, flag, p)
419     dev_t         dev;
420     u_long        cmd;
421     caddr_t       addr;
422     int           flag;
423     struct proc  *p;
424 {
425     ENTRY;
426 
427     switch(cmd) {
428     case CODARESIZE: {
429 	struct coda_resize *data = (struct coda_resize *)addr;
430 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
431 	break;
432     }
433     case CODASTATS:
434 	if (coda_nc_use) {
435 	    coda_nc_gather_stats();
436 	    return(0);
437 	} else {
438 	    return(ENODEV);
439 	}
440 	break;
441     case CODAPRINT:
442 	if (coda_nc_use) {
443 	    print_coda_nc();
444 	    return(0);
445 	} else {
446 	    return(ENODEV);
447 	}
448 	break;
449     case CIOC_KERNEL_VERSION:
450 	switch (*(u_int *)addr) {
451 	case 0:
452 		*(u_int *)addr = coda_kernel_version;
453 		return 0;
454 		break;
455 	case 1:
456 	case 2:
457 		if (coda_kernel_version != *(u_int *)addr)
458 		    return ENOENT;
459 		else
460 		    return 0;
461 	default:
462 		return ENOENT;
463 	}
464     	break;
465     default :
466 	return(EINVAL);
467 	break;
468     }
469 }
470 
471 int
472 vc_nb_poll(dev, events, p)
473     dev_t         dev;
474     int           events;
475     struct proc  *p;
476 {
477     struct vcomm *vcp;
478     int event_msk = 0;
479 
480     ENTRY;
481 
482     if (minor(dev) >= NVCODA || minor(dev) < 0)
483 	return(ENXIO);
484 
485     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
486 
487     event_msk = events & (POLLIN|POLLRDNORM);
488     if (!event_msk)
489 	return(0);
490 
491     if (!EMPTY(vcp->vc_requests))
492 	return(events & (POLLIN|POLLRDNORM));
493 
494     selrecord(p, &(vcp->vc_selproc));
495 
496     return(0);
497 }
498 
499 static void
500 filt_vc_nb_detach(struct knote *kn)
501 {
502 	struct vcomm *vcp = kn->kn_hook;
503 
504 	SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
505 }
506 
507 static int
508 filt_vc_nb_read(struct knote *kn, long hint)
509 {
510 	struct vcomm *vcp = kn->kn_hook;
511 	struct vmsg *vmp;
512 
513 	if (EMPTY(vcp->vc_requests))
514 		return (0);
515 
516 	vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
517 
518 	kn->kn_data = vmp->vm_inSize;
519 	return (1);
520 }
521 
522 static const struct filterops vc_nb_read_filtops =
523 	{ 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
524 
525 int
526 vc_nb_kqfilter(dev_t dev, struct knote *kn)
527 {
528 	struct vcomm *vcp;
529 	struct klist *klist;
530 
531 	ENTRY;
532 
533 	if (minor(dev) >= NVCODA || minor(dev) < 0)
534 		return(ENXIO);
535 
536 	vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
537 
538 	switch (kn->kn_filter) {
539 	case EVFILT_READ:
540 		klist = &vcp->vc_selproc.sel_klist;
541 		kn->kn_fop = &vc_nb_read_filtops;
542 		break;
543 
544 	default:
545 		return (1);
546 	}
547 
548 	kn->kn_hook = vcp;
549 
550 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
551 
552 	return (0);
553 }
554 
555 /*
556  * Statistics
557  */
558 struct coda_clstat coda_clstat;
559 
560 /*
561  * Key question: whether to sleep interruptably or uninterruptably when
562  * waiting for Venus.  The former seems better (cause you can ^C a
563  * job), but then GNU-EMACS completion breaks. Use tsleep with no
564  * timeout, and no longjmp happens. But, when sleeping
565  * "uninterruptibly", we don't get told if it returns abnormally
566  * (e.g. kill -9).
567  */
568 
569 int
570 coda_call(mntinfo, inSize, outSize, buffer)
571      struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
572 {
573 	struct vcomm *vcp;
574 	struct vmsg *vmp;
575 	int error;
576 #ifdef	CTL_C
577 	struct proc *p = curproc;
578 	sigset_t psig_omask;
579 	int i;
580 	psig_omask = p->p_sigctx.ps_siglist;	/* array assignment */
581 #endif
582 	if (mntinfo == NULL) {
583 	    /* Unlikely, but could be a race condition with a dying warden */
584 	    return ENODEV;
585 	}
586 
587 	vcp = &(mntinfo->mi_vcomm);
588 
589 	coda_clstat.ncalls++;
590 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
591 
592 	if (!VC_OPEN(vcp))
593 	    return(ENODEV);
594 
595 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
596 	/* Format the request message. */
597 	vmp->vm_data = buffer;
598 	vmp->vm_flags = 0;
599 	vmp->vm_inSize = inSize;
600 	vmp->vm_outSize
601 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
602 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
603 	vmp->vm_unique = ++vcp->vc_seq;
604 	if (codadebug)
605 	    myprintf(("Doing a call for %d.%d\n",
606 		      vmp->vm_opcode, vmp->vm_unique));
607 
608 	/* Fill in the common input args. */
609 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
610 
611 	/* Append msg to request queue and poke Venus. */
612 	INSQUE(vmp->vm_chain, vcp->vc_requests);
613 	selnotify(&(vcp->vc_selproc), 0);
614 
615 	/* We can be interrupted while we wait for Venus to process
616 	 * our request.  If the interrupt occurs before Venus has read
617 	 * the request, we dequeue and return. If it occurs after the
618 	 * read but before the reply, we dequeue, send a signal
619 	 * message, and return. If it occurs after the reply we ignore
620 	 * it. In no case do we want to restart the syscall.  If it
621 	 * was interrupted by a venus shutdown (vcclose), return
622 	 * ENODEV.  */
623 
624 	/* Ignore return, We have to check anyway */
625 #ifdef	CTL_C
626 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
627 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
628 	   as SA_RESTART.  This means that we should exit sleep handle the
629 	   "signal" and then go to sleep again.  Mostly this is done by letting
630 	   the syscall complete and be restarted.  We are not idempotent and
631 	   can not do this.  A better solution is necessary.
632 	 */
633 	i = 0;
634 	do {
635 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
636 	    if (error == 0)
637 	    	break;
638 	    else if (error == EWOULDBLOCK) {
639 #ifdef	CODA_VERBOSE
640 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
641 #endif
642     	    } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
643 		    sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
644 #ifdef	CODA_VERBOSE
645 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
646 #endif
647     	    } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
648 		    sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
649 #ifdef	CODA_VERBOSE
650 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
651 #endif
652 	    } else {
653 		    sigset_t tmp;
654 		    tmp = p->p_sigctx.ps_siglist;	/* array assignment */
655 		    sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
656 
657 #ifdef	CODA_VERBOSE
658 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
659 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
660 			    p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
661 			    p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
662 			    p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
663 			    p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
664 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
665 #endif
666 		    break;
667 #ifdef	notyet
668 		    sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
669 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
670 			    p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
671 			    p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
672 			    p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
673 			    p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
674 #endif
675 	    }
676 	} while (error && i++ < 128 && VC_OPEN(vcp));
677 	p->p_sigctx.ps_siglist = psig_omask;	/* array assignment */
678 #else
679 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
680 #endif
681 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
682  	/* Op went through, interrupt or not... */
683 	    if (vmp->vm_flags & VM_WRITE) {
684 		error = 0;
685 		*outSize = vmp->vm_outSize;
686 	    }
687 
688 	    else if (!(vmp->vm_flags & VM_READ)) {
689 		/* Interrupted before venus read it. */
690 #ifdef	CODA_VERBOSE
691 		if (1)
692 #else
693 		if (codadebug)
694 #endif
695 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
696 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
697 		REMQUE(vmp->vm_chain);
698 		error = EINTR;
699 	    }
700 
701 	    else {
702 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
703                    upcall started */
704 		/* Interrupted after start of upcall, send venus a signal */
705 		struct coda_in_hdr *dog;
706 		struct vmsg *svmp;
707 
708 #ifdef	CODA_VERBOSE
709 		if (1)
710 #else
711 		if (codadebug)
712 #endif
713 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
714 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
715 
716 		REMQUE(vmp->vm_chain);
717 		error = EINTR;
718 
719 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
720 
721 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
722 		dog = (struct coda_in_hdr *)svmp->vm_data;
723 
724 		svmp->vm_flags = 0;
725 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
726 		dog->unique = svmp->vm_unique = vmp->vm_unique;
727 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
728 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
729 
730 		if (codadebug)
731 		    myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
732 			   svmp->vm_opcode, svmp->vm_unique));
733 
734 		/* insert at head of queue! */
735 		INSQUE(svmp->vm_chain, vcp->vc_requests);
736 		selnotify(&(vcp->vc_selproc), 0);
737 	    }
738 	}
739 
740 	else {	/* If venus died (!VC_OPEN(vcp)) */
741 	    if (codadebug)
742 		myprintf(("vcclose woke op %d.%d flags %d\n",
743 		       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
744 
745 		error = ENODEV;
746 	}
747 
748 	CODA_FREE(vmp, sizeof(struct vmsg));
749 
750 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
751 		wakeup(&outstanding_upcalls);
752 
753 	if (!error)
754 		error = ((struct coda_out_hdr *)buffer)->result;
755 	return(error);
756 }
757 
758