Home | History | Annotate | Line # | Download | only in coda
coda_psdev.c revision 1.41
      1 /*	$NetBSD: coda_psdev.c,v 1.41 2008/03/21 17:59:57 plunky Exp $	*/
      2 
      3 /*
      4  *
      5  *             Coda: an Experimental Distributed File System
      6  *                              Release 3.1
      7  *
      8  *           Copyright (c) 1987-1998 Carnegie Mellon University
      9  *                          All Rights Reserved
     10  *
     11  * Permission  to  use, copy, modify and distribute this software and its
     12  * documentation is hereby granted,  provided  that  both  the  copyright
     13  * notice  and  this  permission  notice  appear  in  all  copies  of the
     14  * software, derivative works or  modified  versions,  and  any  portions
     15  * thereof, and that both notices appear in supporting documentation, and
     16  * that credit is given to Carnegie Mellon University  in  all  documents
     17  * and publicity pertaining to direct or indirect use of this code or its
     18  * derivatives.
     19  *
     20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
     21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
     22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
     23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
     24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
     25  * ANY DERIVATIVE WORK.
     26  *
     27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
     28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
     29  * Mellon the rights to redistribute these changes without encumbrance.
     30  *
     31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
     32  */
     33 
     34 /*
     35  * Mach Operating System
     36  * Copyright (c) 1989 Carnegie-Mellon University
     37  * All rights reserved.  The CMU software License Agreement specifies
     38  * the terms and conditions for use and redistribution.
     39  */
     40 
     41 /*
     42  * This code was written for the Coda file system at Carnegie Mellon
     43  * University.  Contributers include David Steere, James Kistler, and
     44  * M. Satyanarayanan.  */
     45 
     46 /* These routines define the pseudo device for communication between
     47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
     48  * but I moved them to make it easier to port the Minicache without
     49  * porting coda. -- DCS 10/12/94
     50  *
     51  * Following code depends on file-system CODA.
     52  */
     53 
     54 /* These routines are the device entry points for Venus. */
     55 
     56 #include <sys/cdefs.h>
     57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.41 2008/03/21 17:59:57 plunky Exp $");
     58 
     59 extern int coda_nc_initialized;    /* Set if cache has been initialized */
     60 
     61 #ifdef	_LKM
     62 #define	NVCODA 4
     63 #else
     64 #include <vcoda.h>
     65 #endif
     66 
     67 #include <sys/param.h>
     68 #include <sys/systm.h>
     69 #include <sys/kernel.h>
     70 #include <sys/malloc.h>
     71 #include <sys/proc.h>
     72 #include <sys/mount.h>
     73 #include <sys/file.h>
     74 #include <sys/ioctl.h>
     75 #include <sys/poll.h>
     76 #include <sys/select.h>
     77 #include <sys/conf.h>
     78 
     79 #include <miscfs/syncfs/syncfs.h>
     80 
     81 #include <coda/coda.h>
     82 #include <coda/cnode.h>
     83 #include <coda/coda_namecache.h>
     84 #include <coda/coda_io.h>
     85 
     86 #define CTL_C
     87 
     88 int coda_psdev_print_entry = 0;
     89 static
     90 int outstanding_upcalls = 0;
     91 int coda_call_sleep = PZERO - 1;
     92 #ifdef	CTL_C
     93 int coda_pcatch = PCATCH;
     94 #else
     95 #endif
     96 
     97 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
     98 
     99 void vcodaattach(int n);
    100 
    101 dev_type_open(vc_nb_open);
    102 dev_type_close(vc_nb_close);
    103 dev_type_read(vc_nb_read);
    104 dev_type_write(vc_nb_write);
    105 dev_type_ioctl(vc_nb_ioctl);
    106 dev_type_poll(vc_nb_poll);
    107 dev_type_kqfilter(vc_nb_kqfilter);
    108 
    109 const struct cdevsw vcoda_cdevsw = {
    110 	vc_nb_open, vc_nb_close, vc_nb_read, vc_nb_write, vc_nb_ioctl,
    111 	nostop, notty, vc_nb_poll, nommap, vc_nb_kqfilter, D_OTHER,
    112 };
    113 
    114 struct vmsg {
    115     TAILQ_ENTRY(vmsg) vm_chain;
    116     void *	 vm_data;
    117     u_short	 vm_flags;
    118     u_short      vm_inSize;	/* Size is at most 5000 bytes */
    119     u_short	 vm_outSize;
    120     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
    121     int		 vm_unique;
    122     void *	 vm_sleep;	/* Not used by Mach. */
    123 };
    124 
    125 #define	VM_READ	    1
    126 #define	VM_WRITE    2
    127 #define	VM_INTR	    4
    128 
    129 /* vcodaattach: do nothing */
    130 void
    131 vcodaattach(int n)
    132 {
    133 }
    134 
    135 /*
    136  * These functions are written for NetBSD.
    137  */
    138 int
    139 vc_nb_open(dev_t dev, int flag, int mode,
    140     struct lwp *l)
    141 {
    142     struct vcomm *vcp;
    143 
    144     ENTRY;
    145 
    146     if (minor(dev) >= NVCODA || minor(dev) < 0)
    147 	return(ENXIO);
    148 
    149     if (!coda_nc_initialized)
    150 	coda_nc_init();
    151 
    152     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    153     if (VC_OPEN(vcp))
    154 	return(EBUSY);
    155 
    156     selinit(&vcp->vc_selproc);
    157     TAILQ_INIT(&vcp->vc_requests);
    158     TAILQ_INIT(&vcp->vc_replies);
    159     MARK_VC_OPEN(vcp);
    160 
    161     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
    162     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
    163 
    164     return(0);
    165 }
    166 
    167 int
    168 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
    169 {
    170     struct vcomm *vcp;
    171     struct vmsg *vmp;
    172     struct coda_mntinfo *mi;
    173     int                 err;
    174 
    175     ENTRY;
    176 
    177     if (minor(dev) >= NVCODA || minor(dev) < 0)
    178 	return(ENXIO);
    179 
    180     mi = &coda_mnttbl[minor(dev)];
    181     vcp = &(mi->mi_vcomm);
    182 
    183     if (!VC_OPEN(vcp))
    184 	panic("vcclose: not open");
    185 
    186     /* prevent future operations on this vfs from succeeding by auto-
    187      * unmounting any vfs mounted via this device. This frees user or
    188      * sysadm from having to remember where all mount points are located.
    189      * Put this before WAKEUPs to avoid queuing new messages between
    190      * the WAKEUP and the unmount (which can happen if we're unlucky)
    191      */
    192     if (!mi->mi_rootvp) {
    193 	/* just a simple open/close w no mount */
    194 	MARK_VC_CLOSED(vcp);
    195 	return 0;
    196     }
    197 
    198     /* Let unmount know this is for real */
    199     /*
    200      * XXX Freeze syncer.  Must do this before locking the
    201      * mount point.  See dounmount for details().
    202      */
    203     mutex_enter(&syncer_mutex);
    204     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
    205     if (vfs_busy(mi->mi_vfsp, RW_WRITER, NULL)) {
    206 	mutex_exit(&syncer_mutex);
    207 	return (EBUSY);
    208     }
    209     coda_unmounting(mi->mi_vfsp);
    210 
    211     /* Wakeup clients so they can return. */
    212     while ((vmp = TAILQ_FIRST(&vcp->vc_requests)) != NULL) {
    213 	TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    214 
    215 	/* Free signal request messages and don't wakeup cause
    216 	   no one is waiting. */
    217 	if (vmp->vm_opcode == CODA_SIGNAL) {
    218 	    CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    219 	    CODA_FREE(vmp, sizeof(struct vmsg));
    220 	    continue;
    221 	}
    222 	outstanding_upcalls++;
    223 	wakeup(&vmp->vm_sleep);
    224     }
    225 
    226     while ((vmp = TAILQ_FIRST(&vcp->vc_replies)) != NULL) {
    227 	TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    228 
    229 	outstanding_upcalls++;
    230 	wakeup(&vmp->vm_sleep);
    231     }
    232 
    233     MARK_VC_CLOSED(vcp);
    234 
    235     if (outstanding_upcalls) {
    236 #ifdef	CODA_VERBOSE
    237 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    238     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    239 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    240 #else
    241     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    242 #endif
    243     }
    244 
    245     err = dounmount(mi->mi_vfsp, flag, l);
    246     if (err)
    247 	myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
    248 	           err, minor(dev)));
    249     seldestroy(&vcp->vc_selproc);
    250     return 0;
    251 }
    252 
    253 int
    254 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
    255 {
    256     struct vcomm *	vcp;
    257     struct vmsg *vmp;
    258     int error = 0;
    259 
    260     ENTRY;
    261 
    262     if (minor(dev) >= NVCODA || minor(dev) < 0)
    263 	return(ENXIO);
    264 
    265     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    266 
    267     /* Get message at head of request queue. */
    268     vmp = TAILQ_FIRST(&vcp->vc_requests);
    269     if (vmp == NULL)
    270 	return(0);	/* Nothing to read */
    271 
    272     /* Move the input args into userspace */
    273     uiop->uio_rw = UIO_READ;
    274     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
    275     if (error) {
    276 	myprintf(("vcread: error (%d) on uiomove\n", error));
    277 	error = EINVAL;
    278     }
    279 
    280     TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    281 
    282     /* If request was a signal, free up the message and don't
    283        enqueue it in the reply queue. */
    284     if (vmp->vm_opcode == CODA_SIGNAL) {
    285 	if (codadebug)
    286 	    myprintf(("vcread: signal msg (%d, %d)\n",
    287 		      vmp->vm_opcode, vmp->vm_unique));
    288 	CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    289 	CODA_FREE(vmp, sizeof(struct vmsg));
    290 	return(error);
    291     }
    292 
    293     vmp->vm_flags |= VM_READ;
    294     TAILQ_INSERT_TAIL(&vcp->vc_replies, vmp, vm_chain);
    295 
    296     return(error);
    297 }
    298 
    299 int
    300 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
    301 {
    302     struct vcomm *	vcp;
    303     struct vmsg *vmp;
    304     struct coda_out_hdr *out;
    305     u_long seq;
    306     u_long opcode;
    307     int tbuf[2];
    308     int error = 0;
    309 
    310     ENTRY;
    311 
    312     if (minor(dev) >= NVCODA || minor(dev) < 0)
    313 	return(ENXIO);
    314 
    315     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    316 
    317     /* Peek at the opcode, unique without transfering the data. */
    318     uiop->uio_rw = UIO_WRITE;
    319     error = uiomove(tbuf, sizeof(int) * 2, uiop);
    320     if (error) {
    321 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
    322 	return(EINVAL);
    323     }
    324 
    325     opcode = tbuf[0];
    326     seq = tbuf[1];
    327 
    328     if (codadebug)
    329 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
    330 
    331     if (DOWNCALL(opcode)) {
    332 	union outputArgs pbuf;
    333 
    334 	/* get the rest of the data. */
    335 	uiop->uio_rw = UIO_WRITE;
    336 	error = uiomove(&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
    337 	if (error) {
    338 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
    339 		      error, opcode, seq));
    340 	    return(EINVAL);
    341 	    }
    342 
    343 	return handleDownCall(opcode, &pbuf);
    344     }
    345 
    346     /* Look for the message on the (waiting for) reply queue. */
    347     TAILQ_FOREACH(vmp, &vcp->vc_replies, vm_chain) {
    348 	if (vmp->vm_unique == seq) break;
    349     }
    350 
    351     if (vmp == NULL) {
    352 	if (codadebug)
    353 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
    354 
    355 	return(ESRCH);
    356     }
    357 
    358     /* Remove the message from the reply queue */
    359     TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    360 
    361     /* move data into response buffer. */
    362     out = (struct coda_out_hdr *)vmp->vm_data;
    363     /* Don't need to copy opcode and uniquifier. */
    364 
    365     /* get the rest of the data. */
    366     if (vmp->vm_outSize < uiop->uio_resid) {
    367 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
    368 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
    369 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
    370 	return(EINVAL);
    371     }
    372 
    373     tbuf[0] = uiop->uio_resid; 	/* Save this value. */
    374     uiop->uio_rw = UIO_WRITE;
    375     error = uiomove(&out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
    376     if (error) {
    377 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
    378 		  error, opcode, seq));
    379 	return(EINVAL);
    380     }
    381 
    382     /* I don't think these are used, but just in case. */
    383     /* XXX - aren't these two already correct? -bnoble */
    384     out->opcode = opcode;
    385     out->unique = seq;
    386     vmp->vm_outSize	= tbuf[0];	/* Amount of data transferred? */
    387     vmp->vm_flags |= VM_WRITE;
    388     wakeup(&vmp->vm_sleep);
    389 
    390     return(0);
    391 }
    392 
    393 int
    394 vc_nb_ioctl(dev_t dev, u_long cmd, void *addr, int flag,
    395     struct lwp *l)
    396 {
    397     ENTRY;
    398 
    399     switch(cmd) {
    400     case CODARESIZE: {
    401 	struct coda_resize *data = (struct coda_resize *)addr;
    402 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
    403 	break;
    404     }
    405     case CODASTATS:
    406 	if (coda_nc_use) {
    407 	    coda_nc_gather_stats();
    408 	    return(0);
    409 	} else {
    410 	    return(ENODEV);
    411 	}
    412 	break;
    413     case CODAPRINT:
    414 	if (coda_nc_use) {
    415 	    print_coda_nc();
    416 	    return(0);
    417 	} else {
    418 	    return(ENODEV);
    419 	}
    420 	break;
    421     case CIOC_KERNEL_VERSION:
    422 	switch (*(u_int *)addr) {
    423 	case 0:
    424 		*(u_int *)addr = coda_kernel_version;
    425 		return 0;
    426 		break;
    427 	case 1:
    428 	case 2:
    429 		if (coda_kernel_version != *(u_int *)addr)
    430 		    return ENOENT;
    431 		else
    432 		    return 0;
    433 	default:
    434 		return ENOENT;
    435 	}
    436     	break;
    437     default :
    438 	return(EINVAL);
    439 	break;
    440     }
    441 }
    442 
    443 int
    444 vc_nb_poll(dev_t dev, int events, struct lwp *l)
    445 {
    446     struct vcomm *vcp;
    447     int event_msk = 0;
    448 
    449     ENTRY;
    450 
    451     if (minor(dev) >= NVCODA || minor(dev) < 0)
    452 	return(ENXIO);
    453 
    454     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    455 
    456     event_msk = events & (POLLIN|POLLRDNORM);
    457     if (!event_msk)
    458 	return(0);
    459 
    460     if (!TAILQ_EMPTY(&vcp->vc_requests))
    461 	return(events & (POLLIN|POLLRDNORM));
    462 
    463     selrecord(l, &(vcp->vc_selproc));
    464 
    465     return(0);
    466 }
    467 
    468 static void
    469 filt_vc_nb_detach(struct knote *kn)
    470 {
    471 	struct vcomm *vcp = kn->kn_hook;
    472 
    473 	SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
    474 }
    475 
    476 static int
    477 filt_vc_nb_read(struct knote *kn, long hint)
    478 {
    479 	struct vcomm *vcp = kn->kn_hook;
    480 	struct vmsg *vmp;
    481 
    482 	vmp = TAILQ_FIRST(&vcp->vc_requests);
    483 	if (vmp == NULL)
    484 		return (0);
    485 
    486 	kn->kn_data = vmp->vm_inSize;
    487 	return (1);
    488 }
    489 
    490 static const struct filterops vc_nb_read_filtops =
    491 	{ 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
    492 
    493 int
    494 vc_nb_kqfilter(dev_t dev, struct knote *kn)
    495 {
    496 	struct vcomm *vcp;
    497 	struct klist *klist;
    498 
    499 	ENTRY;
    500 
    501 	if (minor(dev) >= NVCODA || minor(dev) < 0)
    502 		return(ENXIO);
    503 
    504 	vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    505 
    506 	switch (kn->kn_filter) {
    507 	case EVFILT_READ:
    508 		klist = &vcp->vc_selproc.sel_klist;
    509 		kn->kn_fop = &vc_nb_read_filtops;
    510 		break;
    511 
    512 	default:
    513 		return (EINVAL);
    514 	}
    515 
    516 	kn->kn_hook = vcp;
    517 
    518 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
    519 
    520 	return (0);
    521 }
    522 
    523 /*
    524  * Statistics
    525  */
    526 struct coda_clstat coda_clstat;
    527 
    528 /*
    529  * Key question: whether to sleep interruptably or uninterruptably when
    530  * waiting for Venus.  The former seems better (cause you can ^C a
    531  * job), but then GNU-EMACS completion breaks. Use tsleep with no
    532  * timeout, and no longjmp happens. But, when sleeping
    533  * "uninterruptibly", we don't get told if it returns abnormally
    534  * (e.g. kill -9).
    535  */
    536 
    537 int
    538 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
    539 	void *buffer)
    540 {
    541 	struct vcomm *vcp;
    542 	struct vmsg *vmp;
    543 	int error;
    544 #ifdef	CTL_C
    545 	struct lwp *l = curlwp;
    546 	struct proc *p = l->l_proc;
    547 	sigset_t psig_omask;
    548 	int i;
    549 	psig_omask = l->l_sigmask;	/* XXXSA */
    550 #endif
    551 	if (mntinfo == NULL) {
    552 	    /* Unlikely, but could be a race condition with a dying warden */
    553 	    return ENODEV;
    554 	}
    555 
    556 	vcp = &(mntinfo->mi_vcomm);
    557 
    558 	coda_clstat.ncalls++;
    559 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
    560 
    561 	if (!VC_OPEN(vcp))
    562 	    return(ENODEV);
    563 
    564 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
    565 	/* Format the request message. */
    566 	vmp->vm_data = buffer;
    567 	vmp->vm_flags = 0;
    568 	vmp->vm_inSize = inSize;
    569 	vmp->vm_outSize
    570 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
    571 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
    572 	vmp->vm_unique = ++vcp->vc_seq;
    573 	if (codadebug)
    574 	    myprintf(("Doing a call for %d.%d\n",
    575 		      vmp->vm_opcode, vmp->vm_unique));
    576 
    577 	/* Fill in the common input args. */
    578 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
    579 
    580 	/* Append msg to request queue and poke Venus. */
    581 	TAILQ_INSERT_TAIL(&vcp->vc_requests, vmp, vm_chain);
    582 	selnotify(&(vcp->vc_selproc), 0, 0);
    583 
    584 	/* We can be interrupted while we wait for Venus to process
    585 	 * our request.  If the interrupt occurs before Venus has read
    586 	 * the request, we dequeue and return. If it occurs after the
    587 	 * read but before the reply, we dequeue, send a signal
    588 	 * message, and return. If it occurs after the reply we ignore
    589 	 * it. In no case do we want to restart the syscall.  If it
    590 	 * was interrupted by a venus shutdown (vcclose), return
    591 	 * ENODEV.  */
    592 
    593 	/* Ignore return, We have to check anyway */
    594 #ifdef	CTL_C
    595 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
    596 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
    597 	   as SA_RESTART.  This means that we should exit sleep handle the
    598 	   "signal" and then go to sleep again.  Mostly this is done by letting
    599 	   the syscall complete and be restarted.  We are not idempotent and
    600 	   can not do this.  A better solution is necessary.
    601 	 */
    602 	i = 0;
    603 	do {
    604 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
    605 	    if (error == 0)
    606 	    	break;
    607 	    mutex_enter(&p->p_smutex);
    608 	    if (error == EWOULDBLOCK) {
    609 #ifdef	CODA_VERBOSE
    610 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
    611 #endif
    612     	    } else if (sigispending(l, SIGIO)) {
    613 		    sigaddset(&l->l_sigmask, SIGIO);
    614 #ifdef	CODA_VERBOSE
    615 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
    616 #endif
    617     	    } else if (sigispending(l, SIGALRM)) {
    618 		    sigaddset(&l->l_sigmask, SIGALRM);
    619 #ifdef	CODA_VERBOSE
    620 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
    621 #endif
    622 	    } else {
    623 		    sigset_t tmp;
    624 		    tmp = p->p_sigpend.sp_set;	/* array assignment */
    625 		    sigminusset(&l->l_sigmask, &tmp);
    626 
    627 #ifdef	CODA_VERBOSE
    628 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
    629 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
    630 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    631 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    632 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    633 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3],
    634 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
    635 #endif
    636 		    mutex_exit(&p->p_smutex);
    637 		    break;
    638 #ifdef	notyet
    639 		    sigminusset(&l->l_sigmask, &p->p_sigpend.sp_set);
    640 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
    641 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    642 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    643 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    644 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3]);
    645 #endif
    646 	    }
    647 	    mutex_exit(&p->p_smutex);
    648 	} while (error && i++ < 128 && VC_OPEN(vcp));
    649 	l->l_sigmask = psig_omask;	/* XXXSA */
    650 #else
    651 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
    652 #endif
    653 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
    654  	/* Op went through, interrupt or not... */
    655 	    if (vmp->vm_flags & VM_WRITE) {
    656 		error = 0;
    657 		*outSize = vmp->vm_outSize;
    658 	    }
    659 
    660 	    else if (!(vmp->vm_flags & VM_READ)) {
    661 		/* Interrupted before venus read it. */
    662 #ifdef	CODA_VERBOSE
    663 		if (1)
    664 #else
    665 		if (codadebug)
    666 #endif
    667 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
    668 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    669 
    670 		TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    671 		error = EINTR;
    672 	    }
    673 
    674 	    else {
    675 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
    676                    upcall started */
    677 		/* Interrupted after start of upcall, send venus a signal */
    678 		struct coda_in_hdr *dog;
    679 		struct vmsg *svmp;
    680 
    681 #ifdef	CODA_VERBOSE
    682 		if (1)
    683 #else
    684 		if (codadebug)
    685 #endif
    686 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
    687 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    688 
    689 		TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    690 		error = EINTR;
    691 
    692 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
    693 
    694 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
    695 		dog = (struct coda_in_hdr *)svmp->vm_data;
    696 
    697 		svmp->vm_flags = 0;
    698 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
    699 		dog->unique = svmp->vm_unique = vmp->vm_unique;
    700 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
    701 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
    702 
    703 		if (codadebug)
    704 		    myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
    705 			   svmp->vm_opcode, svmp->vm_unique));
    706 
    707 		/* insert at head of queue! */
    708 		TAILQ_INSERT_TAIL(&vcp->vc_requests, svmp, vm_chain);
    709 		selnotify(&(vcp->vc_selproc), 0, 0);
    710 	    }
    711 	}
    712 
    713 	else {	/* If venus died (!VC_OPEN(vcp)) */
    714 	    if (codadebug)
    715 		myprintf(("vcclose woke op %d.%d flags %d\n",
    716 		       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    717 
    718 		error = ENODEV;
    719 	}
    720 
    721 	CODA_FREE(vmp, sizeof(struct vmsg));
    722 
    723 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
    724 		wakeup(&outstanding_upcalls);
    725 
    726 	if (!error)
    727 		error = ((struct coda_out_hdr *)buffer)->result;
    728 	return(error);
    729 }
    730 
    731