Home | History | Annotate | Line # | Download | only in coda
coda_psdev.c revision 1.53.4.1
      1 /*	$NetBSD: coda_psdev.c,v 1.53.4.1 2015/04/06 15:18:05 skrll Exp $	*/
      2 
      3 /*
      4  *
      5  *             Coda: an Experimental Distributed File System
      6  *                              Release 3.1
      7  *
      8  *           Copyright (c) 1987-1998 Carnegie Mellon University
      9  *                          All Rights Reserved
     10  *
     11  * Permission  to  use, copy, modify and distribute this software and its
     12  * documentation is hereby granted,  provided  that  both  the  copyright
     13  * notice  and  this  permission  notice  appear  in  all  copies  of the
     14  * software, derivative works or  modified  versions,  and  any  portions
     15  * thereof, and that both notices appear in supporting documentation, and
     16  * that credit is given to Carnegie Mellon University  in  all  documents
     17  * and publicity pertaining to direct or indirect use of this code or its
     18  * derivatives.
     19  *
     20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
     21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
     22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
     23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
     24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
     25  * ANY DERIVATIVE WORK.
     26  *
     27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
     28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
     29  * Mellon the rights to redistribute these changes without encumbrance.
     30  *
     31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
     32  */
     33 
     34 /*
     35  * Mach Operating System
     36  * Copyright (c) 1989 Carnegie-Mellon University
     37  * All rights reserved.  The CMU software License Agreement specifies
     38  * the terms and conditions for use and redistribution.
     39  */
     40 
     41 /*
     42  * This code was written for the Coda file system at Carnegie Mellon
     43  * University.  Contributers include David Steere, James Kistler, and
     44  * M. Satyanarayanan.  */
     45 
     46 /* These routines define the pseudo device for communication between
     47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
     48  * but I moved them to make it easier to port the Minicache without
     49  * porting coda. -- DCS 10/12/94
     50  *
     51  * Following code depends on file-system CODA.
     52  */
     53 
     54 /* These routines are the device entry points for Venus. */
     55 
     56 #include <sys/cdefs.h>
     57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.53.4.1 2015/04/06 15:18:05 skrll Exp $");
     58 
     59 extern int coda_nc_initialized;    /* Set if cache has been initialized */
     60 
     61 #include <sys/param.h>
     62 #include <sys/systm.h>
     63 #include <sys/kernel.h>
     64 #include <sys/malloc.h>
     65 #include <sys/proc.h>
     66 #include <sys/mount.h>
     67 #include <sys/file.h>
     68 #include <sys/ioctl.h>
     69 #include <sys/poll.h>
     70 #include <sys/select.h>
     71 #include <sys/conf.h>
     72 #include <sys/atomic.h>
     73 #include <sys/module.h>
     74 
     75 #include <miscfs/syncfs/syncfs.h>
     76 
     77 #include <coda/coda.h>
     78 #include <coda/cnode.h>
     79 #include <coda/coda_namecache.h>
     80 #include <coda/coda_io.h>
     81 
     82 #define CTL_C
     83 
     84 int coda_psdev_print_entry = 0;
     85 static
     86 int outstanding_upcalls = 0;
     87 int coda_call_sleep = PZERO - 1;
     88 #ifdef	CTL_C
     89 int coda_pcatch = PCATCH;
     90 #else
     91 #endif
     92 
     93 int coda_kernel_version = CODA_KERNEL_VERSION;
     94 
     95 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
     96 
     97 void vcodaattach(int n);
     98 
     99 dev_type_open(vc_nb_open);
    100 dev_type_close(vc_nb_close);
    101 dev_type_read(vc_nb_read);
    102 dev_type_write(vc_nb_write);
    103 dev_type_ioctl(vc_nb_ioctl);
    104 dev_type_poll(vc_nb_poll);
    105 dev_type_kqfilter(vc_nb_kqfilter);
    106 
    107 const struct cdevsw vcoda_cdevsw = {
    108 	.d_open = vc_nb_open,
    109 	.d_close = vc_nb_close,
    110 	.d_read = vc_nb_read,
    111 	.d_write = vc_nb_write,
    112 	.d_ioctl = vc_nb_ioctl,
    113 	.d_stop = nostop,
    114 	.d_tty = notty,
    115 	.d_poll = vc_nb_poll,
    116 	.d_mmap = nommap,
    117 	.d_kqfilter = vc_nb_kqfilter,
    118 	.d_discard = nodiscard,
    119 	.d_flag = D_OTHER,
    120 };
    121 
    122 struct vmsg {
    123     TAILQ_ENTRY(vmsg) vm_chain;
    124     void *	 vm_data;
    125     u_short	 vm_flags;
    126     u_short      vm_inSize;	/* Size is at most 5000 bytes */
    127     u_short	 vm_outSize;
    128     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
    129     int		 vm_unique;
    130     void *	 vm_sleep;	/* Not used by Mach. */
    131 };
    132 
    133 struct coda_mntinfo coda_mnttbl[NVCODA];
    134 
    135 #define	VM_READ	    1
    136 #define	VM_WRITE    2
    137 #define	VM_INTR	    4
    138 
    139 /* vcodaattach: do nothing */
    140 void
    141 vcodaattach(int n)
    142 {
    143 }
    144 
    145 /*
    146  * These functions are written for NetBSD.
    147  */
    148 int
    149 vc_nb_open(dev_t dev, int flag, int mode,
    150     struct lwp *l)
    151 {
    152     struct vcomm *vcp;
    153 
    154     ENTRY;
    155 
    156     if (minor(dev) >= NVCODA)
    157 	return(ENXIO);
    158 
    159     if (!coda_nc_initialized)
    160 	coda_nc_init();
    161 
    162     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    163     if (VC_OPEN(vcp))
    164 	return(EBUSY);
    165 
    166     selinit(&vcp->vc_selproc);
    167     TAILQ_INIT(&vcp->vc_requests);
    168     TAILQ_INIT(&vcp->vc_replies);
    169     MARK_VC_OPEN(vcp);
    170 
    171     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
    172     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
    173 
    174     return(0);
    175 }
    176 
    177 int
    178 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
    179 {
    180     struct vcomm *vcp;
    181     struct vmsg *vmp;
    182     struct coda_mntinfo *mi;
    183     int                 err;
    184 
    185     ENTRY;
    186 
    187     if (minor(dev) >= NVCODA)
    188 	return(ENXIO);
    189 
    190     mi = &coda_mnttbl[minor(dev)];
    191     vcp = &(mi->mi_vcomm);
    192 
    193     if (!VC_OPEN(vcp))
    194 	panic("vcclose: not open");
    195 
    196     /* prevent future operations on this vfs from succeeding by auto-
    197      * unmounting any vfs mounted via this device. This frees user or
    198      * sysadm from having to remember where all mount points are located.
    199      * Put this before WAKEUPs to avoid queuing new messages between
    200      * the WAKEUP and the unmount (which can happen if we're unlucky)
    201      */
    202     if (!mi->mi_rootvp) {
    203 	/* just a simple open/close w no mount */
    204 	MARK_VC_CLOSED(vcp);
    205 	return 0;
    206     }
    207 
    208     /* Let unmount know this is for real */
    209     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
    210     coda_unmounting(mi->mi_vfsp);
    211 
    212     /* Wakeup clients so they can return. */
    213     while ((vmp = TAILQ_FIRST(&vcp->vc_requests)) != NULL) {
    214 	TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    215 
    216 	/* Free signal request messages and don't wakeup cause
    217 	   no one is waiting. */
    218 	if (vmp->vm_opcode == CODA_SIGNAL) {
    219 	    CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    220 	    CODA_FREE(vmp, sizeof(struct vmsg));
    221 	    continue;
    222 	}
    223 	outstanding_upcalls++;
    224 	wakeup(&vmp->vm_sleep);
    225     }
    226 
    227     while ((vmp = TAILQ_FIRST(&vcp->vc_replies)) != NULL) {
    228 	TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    229 
    230 	outstanding_upcalls++;
    231 	wakeup(&vmp->vm_sleep);
    232     }
    233 
    234     MARK_VC_CLOSED(vcp);
    235 
    236     if (outstanding_upcalls) {
    237 #ifdef	CODA_VERBOSE
    238 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    239     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    240 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    241 #else
    242     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    243 #endif
    244     }
    245 
    246     err = dounmount(mi->mi_vfsp, flag, l);
    247     if (err)
    248 	myprintf(("Error %d unmounting vfs in vcclose(%llu)\n",
    249 	           err, (unsigned long long)minor(dev)));
    250     seldestroy(&vcp->vc_selproc);
    251     return 0;
    252 }
    253 
    254 int
    255 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
    256 {
    257     struct vcomm *	vcp;
    258     struct vmsg *vmp;
    259     int error = 0;
    260 
    261     ENTRY;
    262 
    263     if (minor(dev) >= NVCODA)
    264 	return(ENXIO);
    265 
    266     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    267 
    268     /* Get message at head of request queue. */
    269     vmp = TAILQ_FIRST(&vcp->vc_requests);
    270     if (vmp == NULL)
    271 	return(0);	/* Nothing to read */
    272 
    273     /* Move the input args into userspace */
    274     uiop->uio_rw = UIO_READ;
    275     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
    276     if (error) {
    277 	myprintf(("vcread: error (%d) on uiomove\n", error));
    278 	error = EINVAL;
    279     }
    280 
    281     TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    282 
    283     /* If request was a signal, free up the message and don't
    284        enqueue it in the reply queue. */
    285     if (vmp->vm_opcode == CODA_SIGNAL) {
    286 	if (codadebug)
    287 	    myprintf(("vcread: signal msg (%d, %d)\n",
    288 		      vmp->vm_opcode, vmp->vm_unique));
    289 	CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    290 	CODA_FREE(vmp, sizeof(struct vmsg));
    291 	return(error);
    292     }
    293 
    294     vmp->vm_flags |= VM_READ;
    295     TAILQ_INSERT_TAIL(&vcp->vc_replies, vmp, vm_chain);
    296 
    297     return(error);
    298 }
    299 
    300 int
    301 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
    302 {
    303     struct vcomm *	vcp;
    304     struct vmsg *vmp;
    305     struct coda_out_hdr *out;
    306     u_long seq;
    307     u_long opcode;
    308     int tbuf[2];
    309     int error = 0;
    310 
    311     ENTRY;
    312 
    313     if (minor(dev) >= NVCODA)
    314 	return(ENXIO);
    315 
    316     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    317 
    318     /* Peek at the opcode, unique without transfering the data. */
    319     uiop->uio_rw = UIO_WRITE;
    320     error = uiomove(tbuf, sizeof(int) * 2, uiop);
    321     if (error) {
    322 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
    323 	return(EINVAL);
    324     }
    325 
    326     opcode = tbuf[0];
    327     seq = tbuf[1];
    328 
    329     if (codadebug)
    330 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
    331 
    332     if (DOWNCALL(opcode)) {
    333 	union outputArgs pbuf;
    334 
    335 	/* get the rest of the data. */
    336 	uiop->uio_rw = UIO_WRITE;
    337 	error = uiomove(&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
    338 	if (error) {
    339 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
    340 		      error, opcode, seq));
    341 	    return(EINVAL);
    342 	    }
    343 
    344 	return handleDownCall(opcode, &pbuf);
    345     }
    346 
    347     /* Look for the message on the (waiting for) reply queue. */
    348     TAILQ_FOREACH(vmp, &vcp->vc_replies, vm_chain) {
    349 	if (vmp->vm_unique == seq) break;
    350     }
    351 
    352     if (vmp == NULL) {
    353 	if (codadebug)
    354 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
    355 
    356 	return(ESRCH);
    357     }
    358 
    359     /* Remove the message from the reply queue */
    360     TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    361 
    362     /* move data into response buffer. */
    363     out = (struct coda_out_hdr *)vmp->vm_data;
    364     /* Don't need to copy opcode and uniquifier. */
    365 
    366     /* get the rest of the data. */
    367     if (vmp->vm_outSize < uiop->uio_resid) {
    368 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
    369 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
    370 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
    371 	return(EINVAL);
    372     }
    373 
    374     tbuf[0] = uiop->uio_resid; 	/* Save this value. */
    375     uiop->uio_rw = UIO_WRITE;
    376     error = uiomove(&out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
    377     if (error) {
    378 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
    379 		  error, opcode, seq));
    380 	return(EINVAL);
    381     }
    382 
    383     /* I don't think these are used, but just in case. */
    384     /* XXX - aren't these two already correct? -bnoble */
    385     out->opcode = opcode;
    386     out->unique = seq;
    387     vmp->vm_outSize	= tbuf[0];	/* Amount of data transferred? */
    388     vmp->vm_flags |= VM_WRITE;
    389     wakeup(&vmp->vm_sleep);
    390 
    391     return(0);
    392 }
    393 
    394 int
    395 vc_nb_ioctl(dev_t dev, u_long cmd, void *addr, int flag,
    396     struct lwp *l)
    397 {
    398     ENTRY;
    399 
    400     switch(cmd) {
    401     case CODARESIZE: {
    402 	struct coda_resize *data = (struct coda_resize *)addr;
    403 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
    404 	break;
    405     }
    406     case CODASTATS:
    407 	if (coda_nc_use) {
    408 	    coda_nc_gather_stats();
    409 	    return(0);
    410 	} else {
    411 	    return(ENODEV);
    412 	}
    413 	break;
    414     case CODAPRINT:
    415 	if (coda_nc_use) {
    416 	    print_coda_nc();
    417 	    return(0);
    418 	} else {
    419 	    return(ENODEV);
    420 	}
    421 	break;
    422     case CIOC_KERNEL_VERSION:
    423 	switch (*(u_int *)addr) {
    424 	case 0:
    425 		*(u_int *)addr = coda_kernel_version;
    426 		return 0;
    427 		break;
    428 	case 1:
    429 	case 2:
    430 		if (coda_kernel_version != *(u_int *)addr)
    431 		    return ENOENT;
    432 		else
    433 		    return 0;
    434 	default:
    435 		return ENOENT;
    436 	}
    437     	break;
    438     default :
    439 	return(EINVAL);
    440 	break;
    441     }
    442 }
    443 
    444 int
    445 vc_nb_poll(dev_t dev, int events, struct lwp *l)
    446 {
    447     struct vcomm *vcp;
    448     int event_msk = 0;
    449 
    450     ENTRY;
    451 
    452     if (minor(dev) >= NVCODA)
    453 	return(ENXIO);
    454 
    455     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    456 
    457     event_msk = events & (POLLIN|POLLRDNORM);
    458     if (!event_msk)
    459 	return(0);
    460 
    461     if (!TAILQ_EMPTY(&vcp->vc_requests))
    462 	return(events & (POLLIN|POLLRDNORM));
    463 
    464     selrecord(l, &(vcp->vc_selproc));
    465 
    466     return(0);
    467 }
    468 
    469 static void
    470 filt_vc_nb_detach(struct knote *kn)
    471 {
    472 	struct vcomm *vcp = kn->kn_hook;
    473 
    474 	SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
    475 }
    476 
    477 static int
    478 filt_vc_nb_read(struct knote *kn, long hint)
    479 {
    480 	struct vcomm *vcp = kn->kn_hook;
    481 	struct vmsg *vmp;
    482 
    483 	vmp = TAILQ_FIRST(&vcp->vc_requests);
    484 	if (vmp == NULL)
    485 		return (0);
    486 
    487 	kn->kn_data = vmp->vm_inSize;
    488 	return (1);
    489 }
    490 
    491 static const struct filterops vc_nb_read_filtops =
    492 	{ 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
    493 
    494 int
    495 vc_nb_kqfilter(dev_t dev, struct knote *kn)
    496 {
    497 	struct vcomm *vcp;
    498 	struct klist *klist;
    499 
    500 	ENTRY;
    501 
    502 	if (minor(dev) >= NVCODA)
    503 		return(ENXIO);
    504 
    505 	vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    506 
    507 	switch (kn->kn_filter) {
    508 	case EVFILT_READ:
    509 		klist = &vcp->vc_selproc.sel_klist;
    510 		kn->kn_fop = &vc_nb_read_filtops;
    511 		break;
    512 
    513 	default:
    514 		return (EINVAL);
    515 	}
    516 
    517 	kn->kn_hook = vcp;
    518 
    519 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
    520 
    521 	return (0);
    522 }
    523 
    524 /*
    525  * Statistics
    526  */
    527 struct coda_clstat coda_clstat;
    528 
    529 /*
    530  * Key question: whether to sleep interruptably or uninterruptably when
    531  * waiting for Venus.  The former seems better (cause you can ^C a
    532  * job), but then GNU-EMACS completion breaks. Use tsleep with no
    533  * timeout, and no longjmp happens. But, when sleeping
    534  * "uninterruptibly", we don't get told if it returns abnormally
    535  * (e.g. kill -9).
    536  */
    537 
    538 int
    539 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
    540 	void *buffer)
    541 {
    542 	struct vcomm *vcp;
    543 	struct vmsg *vmp;
    544 	int error;
    545 #ifdef	CTL_C
    546 	struct lwp *l = curlwp;
    547 	struct proc *p = l->l_proc;
    548 	sigset_t psig_omask;
    549 	int i;
    550 	psig_omask = l->l_sigmask;	/* XXXSA */
    551 #endif
    552 	if (mntinfo == NULL) {
    553 	    /* Unlikely, but could be a race condition with a dying warden */
    554 	    return ENODEV;
    555 	}
    556 
    557 	vcp = &(mntinfo->mi_vcomm);
    558 
    559 	coda_clstat.ncalls++;
    560 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
    561 
    562 	if (!VC_OPEN(vcp))
    563 	    return(ENODEV);
    564 
    565 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
    566 	/* Format the request message. */
    567 	vmp->vm_data = buffer;
    568 	vmp->vm_flags = 0;
    569 	vmp->vm_inSize = inSize;
    570 	vmp->vm_outSize
    571 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
    572 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
    573 	vmp->vm_unique = ++vcp->vc_seq;
    574 	if (codadebug)
    575 	    myprintf(("Doing a call for %d.%d\n",
    576 		      vmp->vm_opcode, vmp->vm_unique));
    577 
    578 	/* Fill in the common input args. */
    579 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
    580 
    581 	/* Append msg to request queue and poke Venus. */
    582 	TAILQ_INSERT_TAIL(&vcp->vc_requests, vmp, vm_chain);
    583 	selnotify(&(vcp->vc_selproc), 0, 0);
    584 
    585 	/* We can be interrupted while we wait for Venus to process
    586 	 * our request.  If the interrupt occurs before Venus has read
    587 	 * the request, we dequeue and return. If it occurs after the
    588 	 * read but before the reply, we dequeue, send a signal
    589 	 * message, and return. If it occurs after the reply we ignore
    590 	 * it. In no case do we want to restart the syscall.  If it
    591 	 * was interrupted by a venus shutdown (vcclose), return
    592 	 * ENODEV.  */
    593 
    594 	/* Ignore return, We have to check anyway */
    595 #ifdef	CTL_C
    596 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
    597 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
    598 	   as SA_RESTART.  This means that we should exit sleep handle the
    599 	   "signal" and then go to sleep again.  Mostly this is done by letting
    600 	   the syscall complete and be restarted.  We are not idempotent and
    601 	   can not do this.  A better solution is necessary.
    602 	 */
    603 	i = 0;
    604 	do {
    605 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
    606 	    if (error == 0)
    607 	    	break;
    608 	    mutex_enter(p->p_lock);
    609 	    if (error == EWOULDBLOCK) {
    610 #ifdef	CODA_VERBOSE
    611 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
    612 #endif
    613     	    } else if (sigispending(l, SIGIO)) {
    614 		    sigaddset(&l->l_sigmask, SIGIO);
    615 #ifdef	CODA_VERBOSE
    616 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
    617 #endif
    618     	    } else if (sigispending(l, SIGALRM)) {
    619 		    sigaddset(&l->l_sigmask, SIGALRM);
    620 #ifdef	CODA_VERBOSE
    621 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
    622 #endif
    623 	    } else {
    624 		    sigset_t tmp;
    625 		    tmp = p->p_sigpend.sp_set;	/* array assignment */
    626 		    sigminusset(&l->l_sigmask, &tmp);
    627 
    628 #ifdef	CODA_VERBOSE
    629 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
    630 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
    631 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    632 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    633 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    634 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3],
    635 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
    636 #endif
    637 		    mutex_exit(p->p_lock);
    638 		    break;
    639 #ifdef	notyet
    640 		    sigminusset(&l->l_sigmask, &p->p_sigpend.sp_set);
    641 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
    642 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    643 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    644 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    645 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3]);
    646 #endif
    647 	    }
    648 	    mutex_exit(p->p_lock);
    649 	} while (error && i++ < 128 && VC_OPEN(vcp));
    650 	l->l_sigmask = psig_omask;	/* XXXSA */
    651 #else
    652 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
    653 #endif
    654 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
    655  	/* Op went through, interrupt or not... */
    656 	    if (vmp->vm_flags & VM_WRITE) {
    657 		error = 0;
    658 		*outSize = vmp->vm_outSize;
    659 	    }
    660 
    661 	    else if (!(vmp->vm_flags & VM_READ)) {
    662 		/* Interrupted before venus read it. */
    663 #ifdef	CODA_VERBOSE
    664 		if (1)
    665 #else
    666 		if (codadebug)
    667 #endif
    668 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
    669 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    670 
    671 		TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    672 		error = EINTR;
    673 	    }
    674 
    675 	    else {
    676 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
    677                    upcall started */
    678 		/* Interrupted after start of upcall, send venus a signal */
    679 		struct coda_in_hdr *dog;
    680 		struct vmsg *svmp;
    681 
    682 #ifdef	CODA_VERBOSE
    683 		if (1)
    684 #else
    685 		if (codadebug)
    686 #endif
    687 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
    688 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    689 
    690 		TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    691 		error = EINTR;
    692 
    693 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
    694 
    695 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
    696 		dog = (struct coda_in_hdr *)svmp->vm_data;
    697 
    698 		svmp->vm_flags = 0;
    699 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
    700 		dog->unique = svmp->vm_unique = vmp->vm_unique;
    701 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
    702 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
    703 
    704 		if (codadebug)
    705 		    myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
    706 			   svmp->vm_opcode, svmp->vm_unique));
    707 
    708 		/* insert at head of queue */
    709 		TAILQ_INSERT_HEAD(&vcp->vc_requests, svmp, vm_chain);
    710 		selnotify(&(vcp->vc_selproc), 0, 0);
    711 	    }
    712 	}
    713 
    714 	else {	/* If venus died (!VC_OPEN(vcp)) */
    715 	    if (codadebug)
    716 		myprintf(("vcclose woke op %d.%d flags %d\n",
    717 		       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    718 
    719 		error = ENODEV;
    720 	}
    721 
    722 	CODA_FREE(vmp, sizeof(struct vmsg));
    723 
    724 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
    725 		wakeup(&outstanding_upcalls);
    726 
    727 	if (!error)
    728 		error = ((struct coda_out_hdr *)buffer)->result;
    729 	return(error);
    730 }
    731 
    732 MODULE(MODULE_CLASS_DRIVER, vcoda, NULL);
    733 
    734 static int
    735 vcoda_modcmd(modcmd_t cmd, void *arg)
    736 {
    737 	int error = 0;
    738 
    739 	switch (cmd) {
    740 	case MODULE_CMD_INIT:
    741 #ifdef _MODULE
    742 	{
    743 		int cmajor, dmajor;
    744 		vcodaattach(NVCODA);
    745 
    746 		dmajor = cmajor = -1;
    747 		return devsw_attach("vcoda", NULL, &dmajor,
    748 		    &vcoda_cdevsw, &cmajor);
    749 	}
    750 #endif
    751 		break;
    752 
    753 	case MODULE_CMD_FINI:
    754 #ifdef _MODULE
    755 		{
    756 			for  (size_t i = 0; i < NVCODA; i++) {
    757 				struct vcomm *vcp = &coda_mnttbl[i].mi_vcomm;
    758 				if (VC_OPEN(vcp))
    759 					return EBUSY;
    760 			}
    761 			return devsw_detach(NULL, &vcoda_cdevsw);
    762 		}
    763 #endif
    764 		break;
    765 
    766 	case MODULE_CMD_STAT:
    767 		return ENOTTY;
    768 
    769 	default:
    770 		return ENOTTY;
    771 	}
    772 	return error;
    773 }
    774