Home | History | Annotate | Line # | Download | only in coda
coda_psdev.c revision 1.47.14.1
      1 /*	$NetBSD: coda_psdev.c,v 1.47.14.1 2012/05/23 10:07:52 yamt Exp $	*/
      2 
      3 /*
      4  *
      5  *             Coda: an Experimental Distributed File System
      6  *                              Release 3.1
      7  *
      8  *           Copyright (c) 1987-1998 Carnegie Mellon University
      9  *                          All Rights Reserved
     10  *
     11  * Permission  to  use, copy, modify and distribute this software and its
     12  * documentation is hereby granted,  provided  that  both  the  copyright
     13  * notice  and  this  permission  notice  appear  in  all  copies  of the
     14  * software, derivative works or  modified  versions,  and  any  portions
     15  * thereof, and that both notices appear in supporting documentation, and
     16  * that credit is given to Carnegie Mellon University  in  all  documents
     17  * and publicity pertaining to direct or indirect use of this code or its
     18  * derivatives.
     19  *
     20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
     21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
     22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
     23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
     24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
     25  * ANY DERIVATIVE WORK.
     26  *
     27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
     28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
     29  * Mellon the rights to redistribute these changes without encumbrance.
     30  *
     31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
     32  */
     33 
     34 /*
     35  * Mach Operating System
     36  * Copyright (c) 1989 Carnegie-Mellon University
     37  * All rights reserved.  The CMU software License Agreement specifies
     38  * the terms and conditions for use and redistribution.
     39  */
     40 
     41 /*
     42  * This code was written for the Coda file system at Carnegie Mellon
     43  * University.  Contributers include David Steere, James Kistler, and
     44  * M. Satyanarayanan.  */
     45 
     46 /* These routines define the pseudo device for communication between
     47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
     48  * but I moved them to make it easier to port the Minicache without
     49  * porting coda. -- DCS 10/12/94
     50  *
     51  * Following code depends on file-system CODA.
     52  */
     53 
     54 /* These routines are the device entry points for Venus. */
     55 
     56 #include <sys/cdefs.h>
     57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.47.14.1 2012/05/23 10:07:52 yamt Exp $");
     58 
     59 extern int coda_nc_initialized;    /* Set if cache has been initialized */
     60 
     61 #ifndef _KERNEL_OPT
     62 #define	NVCODA 4
     63 #else
     64 #include <vcoda.h>
     65 #endif
     66 
     67 #include <sys/param.h>
     68 #include <sys/systm.h>
     69 #include <sys/kernel.h>
     70 #include <sys/malloc.h>
     71 #include <sys/proc.h>
     72 #include <sys/mount.h>
     73 #include <sys/file.h>
     74 #include <sys/ioctl.h>
     75 #include <sys/poll.h>
     76 #include <sys/select.h>
     77 #include <sys/conf.h>
     78 #include <sys/atomic.h>
     79 #include <sys/module.h>
     80 
     81 #include <miscfs/syncfs/syncfs.h>
     82 
     83 #include <coda/coda.h>
     84 #include <coda/cnode.h>
     85 #include <coda/coda_namecache.h>
     86 #include <coda/coda_io.h>
     87 
     88 #define CTL_C
     89 
     90 int coda_psdev_print_entry = 0;
     91 static
     92 int outstanding_upcalls = 0;
     93 int coda_call_sleep = PZERO - 1;
     94 #ifdef	CTL_C
     95 int coda_pcatch = PCATCH;
     96 #else
     97 #endif
     98 
     99 int coda_kernel_version = CODA_KERNEL_VERSION;
    100 
    101 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
    102 
    103 void vcodaattach(int n);
    104 
    105 dev_type_open(vc_nb_open);
    106 dev_type_close(vc_nb_close);
    107 dev_type_read(vc_nb_read);
    108 dev_type_write(vc_nb_write);
    109 dev_type_ioctl(vc_nb_ioctl);
    110 dev_type_poll(vc_nb_poll);
    111 dev_type_kqfilter(vc_nb_kqfilter);
    112 
    113 const struct cdevsw vcoda_cdevsw = {
    114 	vc_nb_open, vc_nb_close, vc_nb_read, vc_nb_write, vc_nb_ioctl,
    115 	nostop, notty, vc_nb_poll, nommap, vc_nb_kqfilter, D_OTHER,
    116 };
    117 
    118 struct vmsg {
    119     TAILQ_ENTRY(vmsg) vm_chain;
    120     void *	 vm_data;
    121     u_short	 vm_flags;
    122     u_short      vm_inSize;	/* Size is at most 5000 bytes */
    123     u_short	 vm_outSize;
    124     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
    125     int		 vm_unique;
    126     void *	 vm_sleep;	/* Not used by Mach. */
    127 };
    128 
    129 struct coda_mntinfo coda_mnttbl[NVCODA];
    130 
    131 #define	VM_READ	    1
    132 #define	VM_WRITE    2
    133 #define	VM_INTR	    4
    134 
    135 /* vcodaattach: do nothing */
    136 void
    137 vcodaattach(int n)
    138 {
    139 }
    140 
    141 /*
    142  * These functions are written for NetBSD.
    143  */
    144 int
    145 vc_nb_open(dev_t dev, int flag, int mode,
    146     struct lwp *l)
    147 {
    148     struct vcomm *vcp;
    149 
    150     ENTRY;
    151 
    152     if (minor(dev) >= NVCODA)
    153 	return(ENXIO);
    154 
    155     if (!coda_nc_initialized)
    156 	coda_nc_init();
    157 
    158     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    159     if (VC_OPEN(vcp))
    160 	return(EBUSY);
    161 
    162     selinit(&vcp->vc_selproc);
    163     TAILQ_INIT(&vcp->vc_requests);
    164     TAILQ_INIT(&vcp->vc_replies);
    165     MARK_VC_OPEN(vcp);
    166 
    167     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
    168     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
    169 
    170     return(0);
    171 }
    172 
    173 int
    174 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
    175 {
    176     struct vcomm *vcp;
    177     struct vmsg *vmp;
    178     struct coda_mntinfo *mi;
    179     int                 err;
    180 
    181     ENTRY;
    182 
    183     if (minor(dev) >= NVCODA)
    184 	return(ENXIO);
    185 
    186     mi = &coda_mnttbl[minor(dev)];
    187     vcp = &(mi->mi_vcomm);
    188 
    189     if (!VC_OPEN(vcp))
    190 	panic("vcclose: not open");
    191 
    192     /* prevent future operations on this vfs from succeeding by auto-
    193      * unmounting any vfs mounted via this device. This frees user or
    194      * sysadm from having to remember where all mount points are located.
    195      * Put this before WAKEUPs to avoid queuing new messages between
    196      * the WAKEUP and the unmount (which can happen if we're unlucky)
    197      */
    198     if (!mi->mi_rootvp) {
    199 	/* just a simple open/close w no mount */
    200 	MARK_VC_CLOSED(vcp);
    201 	return 0;
    202     }
    203 
    204     /* Let unmount know this is for real */
    205     atomic_inc_uint(&mi->mi_vfsp->mnt_refcnt);
    206     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
    207     coda_unmounting(mi->mi_vfsp);
    208 
    209     /* Wakeup clients so they can return. */
    210     while ((vmp = TAILQ_FIRST(&vcp->vc_requests)) != NULL) {
    211 	TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    212 
    213 	/* Free signal request messages and don't wakeup cause
    214 	   no one is waiting. */
    215 	if (vmp->vm_opcode == CODA_SIGNAL) {
    216 	    CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    217 	    CODA_FREE(vmp, sizeof(struct vmsg));
    218 	    continue;
    219 	}
    220 	outstanding_upcalls++;
    221 	wakeup(&vmp->vm_sleep);
    222     }
    223 
    224     while ((vmp = TAILQ_FIRST(&vcp->vc_replies)) != NULL) {
    225 	TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    226 
    227 	outstanding_upcalls++;
    228 	wakeup(&vmp->vm_sleep);
    229     }
    230 
    231     MARK_VC_CLOSED(vcp);
    232 
    233     if (outstanding_upcalls) {
    234 #ifdef	CODA_VERBOSE
    235 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    236     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    237 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    238 #else
    239     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    240 #endif
    241     }
    242 
    243     err = dounmount(mi->mi_vfsp, flag, l);
    244     if (err)
    245 	myprintf(("Error %d unmounting vfs in vcclose(%llu)\n",
    246 	           err, (unsigned long long)minor(dev)));
    247     seldestroy(&vcp->vc_selproc);
    248     return 0;
    249 }
    250 
    251 int
    252 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
    253 {
    254     struct vcomm *	vcp;
    255     struct vmsg *vmp;
    256     int error = 0;
    257 
    258     ENTRY;
    259 
    260     if (minor(dev) >= NVCODA)
    261 	return(ENXIO);
    262 
    263     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    264 
    265     /* Get message at head of request queue. */
    266     vmp = TAILQ_FIRST(&vcp->vc_requests);
    267     if (vmp == NULL)
    268 	return(0);	/* Nothing to read */
    269 
    270     /* Move the input args into userspace */
    271     uiop->uio_rw = UIO_READ;
    272     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
    273     if (error) {
    274 	myprintf(("vcread: error (%d) on uiomove\n", error));
    275 	error = EINVAL;
    276     }
    277 
    278     TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    279 
    280     /* If request was a signal, free up the message and don't
    281        enqueue it in the reply queue. */
    282     if (vmp->vm_opcode == CODA_SIGNAL) {
    283 	if (codadebug)
    284 	    myprintf(("vcread: signal msg (%d, %d)\n",
    285 		      vmp->vm_opcode, vmp->vm_unique));
    286 	CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    287 	CODA_FREE(vmp, sizeof(struct vmsg));
    288 	return(error);
    289     }
    290 
    291     vmp->vm_flags |= VM_READ;
    292     TAILQ_INSERT_TAIL(&vcp->vc_replies, vmp, vm_chain);
    293 
    294     return(error);
    295 }
    296 
    297 int
    298 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
    299 {
    300     struct vcomm *	vcp;
    301     struct vmsg *vmp;
    302     struct coda_out_hdr *out;
    303     u_long seq;
    304     u_long opcode;
    305     int tbuf[2];
    306     int error = 0;
    307 
    308     ENTRY;
    309 
    310     if (minor(dev) >= NVCODA)
    311 	return(ENXIO);
    312 
    313     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    314 
    315     /* Peek at the opcode, unique without transfering the data. */
    316     uiop->uio_rw = UIO_WRITE;
    317     error = uiomove(tbuf, sizeof(int) * 2, uiop);
    318     if (error) {
    319 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
    320 	return(EINVAL);
    321     }
    322 
    323     opcode = tbuf[0];
    324     seq = tbuf[1];
    325 
    326     if (codadebug)
    327 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
    328 
    329     if (DOWNCALL(opcode)) {
    330 	union outputArgs pbuf;
    331 
    332 	/* get the rest of the data. */
    333 	uiop->uio_rw = UIO_WRITE;
    334 	error = uiomove(&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
    335 	if (error) {
    336 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
    337 		      error, opcode, seq));
    338 	    return(EINVAL);
    339 	    }
    340 
    341 	return handleDownCall(opcode, &pbuf);
    342     }
    343 
    344     /* Look for the message on the (waiting for) reply queue. */
    345     TAILQ_FOREACH(vmp, &vcp->vc_replies, vm_chain) {
    346 	if (vmp->vm_unique == seq) break;
    347     }
    348 
    349     if (vmp == NULL) {
    350 	if (codadebug)
    351 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
    352 
    353 	return(ESRCH);
    354     }
    355 
    356     /* Remove the message from the reply queue */
    357     TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    358 
    359     /* move data into response buffer. */
    360     out = (struct coda_out_hdr *)vmp->vm_data;
    361     /* Don't need to copy opcode and uniquifier. */
    362 
    363     /* get the rest of the data. */
    364     if (vmp->vm_outSize < uiop->uio_resid) {
    365 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
    366 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
    367 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
    368 	return(EINVAL);
    369     }
    370 
    371     tbuf[0] = uiop->uio_resid; 	/* Save this value. */
    372     uiop->uio_rw = UIO_WRITE;
    373     error = uiomove(&out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
    374     if (error) {
    375 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
    376 		  error, opcode, seq));
    377 	return(EINVAL);
    378     }
    379 
    380     /* I don't think these are used, but just in case. */
    381     /* XXX - aren't these two already correct? -bnoble */
    382     out->opcode = opcode;
    383     out->unique = seq;
    384     vmp->vm_outSize	= tbuf[0];	/* Amount of data transferred? */
    385     vmp->vm_flags |= VM_WRITE;
    386     wakeup(&vmp->vm_sleep);
    387 
    388     return(0);
    389 }
    390 
    391 int
    392 vc_nb_ioctl(dev_t dev, u_long cmd, void *addr, int flag,
    393     struct lwp *l)
    394 {
    395     ENTRY;
    396 
    397     switch(cmd) {
    398     case CODARESIZE: {
    399 	struct coda_resize *data = (struct coda_resize *)addr;
    400 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
    401 	break;
    402     }
    403     case CODASTATS:
    404 	if (coda_nc_use) {
    405 	    coda_nc_gather_stats();
    406 	    return(0);
    407 	} else {
    408 	    return(ENODEV);
    409 	}
    410 	break;
    411     case CODAPRINT:
    412 	if (coda_nc_use) {
    413 	    print_coda_nc();
    414 	    return(0);
    415 	} else {
    416 	    return(ENODEV);
    417 	}
    418 	break;
    419     case CIOC_KERNEL_VERSION:
    420 	switch (*(u_int *)addr) {
    421 	case 0:
    422 		*(u_int *)addr = coda_kernel_version;
    423 		return 0;
    424 		break;
    425 	case 1:
    426 	case 2:
    427 		if (coda_kernel_version != *(u_int *)addr)
    428 		    return ENOENT;
    429 		else
    430 		    return 0;
    431 	default:
    432 		return ENOENT;
    433 	}
    434     	break;
    435     default :
    436 	return(EINVAL);
    437 	break;
    438     }
    439 }
    440 
    441 int
    442 vc_nb_poll(dev_t dev, int events, struct lwp *l)
    443 {
    444     struct vcomm *vcp;
    445     int event_msk = 0;
    446 
    447     ENTRY;
    448 
    449     if (minor(dev) >= NVCODA)
    450 	return(ENXIO);
    451 
    452     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    453 
    454     event_msk = events & (POLLIN|POLLRDNORM);
    455     if (!event_msk)
    456 	return(0);
    457 
    458     if (!TAILQ_EMPTY(&vcp->vc_requests))
    459 	return(events & (POLLIN|POLLRDNORM));
    460 
    461     selrecord(l, &(vcp->vc_selproc));
    462 
    463     return(0);
    464 }
    465 
    466 static void
    467 filt_vc_nb_detach(struct knote *kn)
    468 {
    469 	struct vcomm *vcp = kn->kn_hook;
    470 
    471 	SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
    472 }
    473 
    474 static int
    475 filt_vc_nb_read(struct knote *kn, long hint)
    476 {
    477 	struct vcomm *vcp = kn->kn_hook;
    478 	struct vmsg *vmp;
    479 
    480 	vmp = TAILQ_FIRST(&vcp->vc_requests);
    481 	if (vmp == NULL)
    482 		return (0);
    483 
    484 	kn->kn_data = vmp->vm_inSize;
    485 	return (1);
    486 }
    487 
    488 static const struct filterops vc_nb_read_filtops =
    489 	{ 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
    490 
    491 int
    492 vc_nb_kqfilter(dev_t dev, struct knote *kn)
    493 {
    494 	struct vcomm *vcp;
    495 	struct klist *klist;
    496 
    497 	ENTRY;
    498 
    499 	if (minor(dev) >= NVCODA)
    500 		return(ENXIO);
    501 
    502 	vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    503 
    504 	switch (kn->kn_filter) {
    505 	case EVFILT_READ:
    506 		klist = &vcp->vc_selproc.sel_klist;
    507 		kn->kn_fop = &vc_nb_read_filtops;
    508 		break;
    509 
    510 	default:
    511 		return (EINVAL);
    512 	}
    513 
    514 	kn->kn_hook = vcp;
    515 
    516 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
    517 
    518 	return (0);
    519 }
    520 
    521 /*
    522  * Statistics
    523  */
    524 struct coda_clstat coda_clstat;
    525 
    526 /*
    527  * Key question: whether to sleep interruptably or uninterruptably when
    528  * waiting for Venus.  The former seems better (cause you can ^C a
    529  * job), but then GNU-EMACS completion breaks. Use tsleep with no
    530  * timeout, and no longjmp happens. But, when sleeping
    531  * "uninterruptibly", we don't get told if it returns abnormally
    532  * (e.g. kill -9).
    533  */
    534 
    535 int
    536 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
    537 	void *buffer)
    538 {
    539 	struct vcomm *vcp;
    540 	struct vmsg *vmp;
    541 	int error;
    542 #ifdef	CTL_C
    543 	struct lwp *l = curlwp;
    544 	struct proc *p = l->l_proc;
    545 	sigset_t psig_omask;
    546 	int i;
    547 	psig_omask = l->l_sigmask;	/* XXXSA */
    548 #endif
    549 	if (mntinfo == NULL) {
    550 	    /* Unlikely, but could be a race condition with a dying warden */
    551 	    return ENODEV;
    552 	}
    553 
    554 	vcp = &(mntinfo->mi_vcomm);
    555 
    556 	coda_clstat.ncalls++;
    557 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
    558 
    559 	if (!VC_OPEN(vcp))
    560 	    return(ENODEV);
    561 
    562 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
    563 	/* Format the request message. */
    564 	vmp->vm_data = buffer;
    565 	vmp->vm_flags = 0;
    566 	vmp->vm_inSize = inSize;
    567 	vmp->vm_outSize
    568 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
    569 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
    570 	vmp->vm_unique = ++vcp->vc_seq;
    571 	if (codadebug)
    572 	    myprintf(("Doing a call for %d.%d\n",
    573 		      vmp->vm_opcode, vmp->vm_unique));
    574 
    575 	/* Fill in the common input args. */
    576 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
    577 
    578 	/* Append msg to request queue and poke Venus. */
    579 	TAILQ_INSERT_TAIL(&vcp->vc_requests, vmp, vm_chain);
    580 	selnotify(&(vcp->vc_selproc), 0, 0);
    581 
    582 	/* We can be interrupted while we wait for Venus to process
    583 	 * our request.  If the interrupt occurs before Venus has read
    584 	 * the request, we dequeue and return. If it occurs after the
    585 	 * read but before the reply, we dequeue, send a signal
    586 	 * message, and return. If it occurs after the reply we ignore
    587 	 * it. In no case do we want to restart the syscall.  If it
    588 	 * was interrupted by a venus shutdown (vcclose), return
    589 	 * ENODEV.  */
    590 
    591 	/* Ignore return, We have to check anyway */
    592 #ifdef	CTL_C
    593 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
    594 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
    595 	   as SA_RESTART.  This means that we should exit sleep handle the
    596 	   "signal" and then go to sleep again.  Mostly this is done by letting
    597 	   the syscall complete and be restarted.  We are not idempotent and
    598 	   can not do this.  A better solution is necessary.
    599 	 */
    600 	i = 0;
    601 	do {
    602 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
    603 	    if (error == 0)
    604 	    	break;
    605 	    mutex_enter(p->p_lock);
    606 	    if (error == EWOULDBLOCK) {
    607 #ifdef	CODA_VERBOSE
    608 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
    609 #endif
    610     	    } else if (sigispending(l, SIGIO)) {
    611 		    sigaddset(&l->l_sigmask, SIGIO);
    612 #ifdef	CODA_VERBOSE
    613 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
    614 #endif
    615     	    } else if (sigispending(l, SIGALRM)) {
    616 		    sigaddset(&l->l_sigmask, SIGALRM);
    617 #ifdef	CODA_VERBOSE
    618 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
    619 #endif
    620 	    } else {
    621 		    sigset_t tmp;
    622 		    tmp = p->p_sigpend.sp_set;	/* array assignment */
    623 		    sigminusset(&l->l_sigmask, &tmp);
    624 
    625 #ifdef	CODA_VERBOSE
    626 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
    627 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
    628 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    629 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    630 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    631 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3],
    632 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
    633 #endif
    634 		    mutex_exit(p->p_lock);
    635 		    break;
    636 #ifdef	notyet
    637 		    sigminusset(&l->l_sigmask, &p->p_sigpend.sp_set);
    638 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
    639 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    640 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    641 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    642 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3]);
    643 #endif
    644 	    }
    645 	    mutex_exit(p->p_lock);
    646 	} while (error && i++ < 128 && VC_OPEN(vcp));
    647 	l->l_sigmask = psig_omask;	/* XXXSA */
    648 #else
    649 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
    650 #endif
    651 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
    652  	/* Op went through, interrupt or not... */
    653 	    if (vmp->vm_flags & VM_WRITE) {
    654 		error = 0;
    655 		*outSize = vmp->vm_outSize;
    656 	    }
    657 
    658 	    else if (!(vmp->vm_flags & VM_READ)) {
    659 		/* Interrupted before venus read it. */
    660 #ifdef	CODA_VERBOSE
    661 		if (1)
    662 #else
    663 		if (codadebug)
    664 #endif
    665 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
    666 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    667 
    668 		TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    669 		error = EINTR;
    670 	    }
    671 
    672 	    else {
    673 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
    674                    upcall started */
    675 		/* Interrupted after start of upcall, send venus a signal */
    676 		struct coda_in_hdr *dog;
    677 		struct vmsg *svmp;
    678 
    679 #ifdef	CODA_VERBOSE
    680 		if (1)
    681 #else
    682 		if (codadebug)
    683 #endif
    684 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
    685 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    686 
    687 		TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    688 		error = EINTR;
    689 
    690 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
    691 
    692 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
    693 		dog = (struct coda_in_hdr *)svmp->vm_data;
    694 
    695 		svmp->vm_flags = 0;
    696 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
    697 		dog->unique = svmp->vm_unique = vmp->vm_unique;
    698 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
    699 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
    700 
    701 		if (codadebug)
    702 		    myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
    703 			   svmp->vm_opcode, svmp->vm_unique));
    704 
    705 		/* insert at head of queue */
    706 		TAILQ_INSERT_HEAD(&vcp->vc_requests, svmp, vm_chain);
    707 		selnotify(&(vcp->vc_selproc), 0, 0);
    708 	    }
    709 	}
    710 
    711 	else {	/* If venus died (!VC_OPEN(vcp)) */
    712 	    if (codadebug)
    713 		myprintf(("vcclose woke op %d.%d flags %d\n",
    714 		       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    715 
    716 		error = ENODEV;
    717 	}
    718 
    719 	CODA_FREE(vmp, sizeof(struct vmsg));
    720 
    721 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
    722 		wakeup(&outstanding_upcalls);
    723 
    724 	if (!error)
    725 		error = ((struct coda_out_hdr *)buffer)->result;
    726 	return(error);
    727 }
    728 
    729 MODULE(MODULE_CLASS_DRIVER, vcoda, NULL);
    730 
    731 static int
    732 vcoda_modcmd(modcmd_t cmd, void *arg)
    733 {
    734 	int cmajor, dmajor, error = 0;
    735 
    736 	dmajor = cmajor = -1;
    737 
    738 	switch (cmd) {
    739 	case MODULE_CMD_INIT:
    740 #ifdef _MODULE
    741 		vcodaattach(NVCODA);
    742 
    743 		return devsw_attach("vcoda", NULL, &dmajor,
    744 		    &vcoda_cdevsw, &cmajor);
    745 #endif
    746 		break;
    747 
    748 	case MODULE_CMD_FINI:
    749 #ifdef _MODULE
    750 		{
    751 			for  (size_t i = 0; i < NVCODA; i++) {
    752 				struct vcomm *vcp = &coda_mnttbl[i].mi_vcomm;
    753 				if (VC_OPEN(vcp))
    754 					return EBUSY;
    755 			}
    756 			return devsw_detach(NULL, &vcoda_cdevsw);
    757 		}
    758 #endif
    759 		break;
    760 
    761 	case MODULE_CMD_STAT:
    762 		return ENOTTY;
    763 
    764 	default:
    765 		return ENOTTY;
    766 	}
    767 	return error;
    768 }
    769