Home | History | Annotate | Line # | Download | only in coda
coda_psdev.c revision 1.57
      1 /*	$NetBSD: coda_psdev.c,v 1.57 2016/07/07 06:55:40 msaitoh Exp $	*/
      2 
      3 /*
      4  *
      5  *             Coda: an Experimental Distributed File System
      6  *                              Release 3.1
      7  *
      8  *           Copyright (c) 1987-1998 Carnegie Mellon University
      9  *                          All Rights Reserved
     10  *
     11  * Permission  to  use, copy, modify and distribute this software and its
     12  * documentation is hereby granted,  provided  that  both  the  copyright
     13  * notice  and  this  permission  notice  appear  in  all  copies  of the
     14  * software, derivative works or  modified  versions,  and  any  portions
     15  * thereof, and that both notices appear in supporting documentation, and
     16  * that credit is given to Carnegie Mellon University  in  all  documents
     17  * and publicity pertaining to direct or indirect use of this code or its
     18  * derivatives.
     19  *
     20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
     21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
     22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
     23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
     24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
     25  * ANY DERIVATIVE WORK.
     26  *
     27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
     28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
     29  * Mellon the rights to redistribute these changes without encumbrance.
     30  *
     31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
     32  */
     33 
     34 /*
     35  * Mach Operating System
     36  * Copyright (c) 1989 Carnegie-Mellon University
     37  * All rights reserved.  The CMU software License Agreement specifies
     38  * the terms and conditions for use and redistribution.
     39  */
     40 
     41 /*
     42  * This code was written for the Coda file system at Carnegie Mellon
     43  * University.  Contributers include David Steere, James Kistler, and
     44  * M. Satyanarayanan.  */
     45 
     46 /* These routines define the pseudo device for communication between
     47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
     48  * but I moved them to make it easier to port the Minicache without
     49  * porting coda. -- DCS 10/12/94
     50  *
     51  * Following code depends on file-system CODA.
     52  */
     53 
     54 /* These routines are the device entry points for Venus. */
     55 
     56 #include <sys/cdefs.h>
     57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.57 2016/07/07 06:55:40 msaitoh Exp $");
     58 
     59 extern int coda_nc_initialized;    /* Set if cache has been initialized */
     60 
     61 #include <sys/param.h>
     62 #include <sys/systm.h>
     63 #include <sys/kernel.h>
     64 #include <sys/malloc.h>
     65 #include <sys/proc.h>
     66 #include <sys/mount.h>
     67 #include <sys/file.h>
     68 #include <sys/ioctl.h>
     69 #include <sys/poll.h>
     70 #include <sys/select.h>
     71 #include <sys/conf.h>
     72 #include <sys/atomic.h>
     73 #include <sys/module.h>
     74 
     75 #include <coda/coda.h>
     76 #include <coda/cnode.h>
     77 #include <coda/coda_namecache.h>
     78 #include <coda/coda_io.h>
     79 
     80 #include "ioconf.h"
     81 
     82 #define CTL_C
     83 
     84 int coda_psdev_print_entry = 0;
     85 static
     86 int outstanding_upcalls = 0;
     87 int coda_call_sleep = PZERO - 1;
     88 #ifdef	CTL_C
     89 int coda_pcatch = PCATCH;
     90 #else
     91 #endif
     92 
     93 int coda_kernel_version = CODA_KERNEL_VERSION;
     94 
     95 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
     96 
     97 dev_type_open(vc_nb_open);
     98 dev_type_close(vc_nb_close);
     99 dev_type_read(vc_nb_read);
    100 dev_type_write(vc_nb_write);
    101 dev_type_ioctl(vc_nb_ioctl);
    102 dev_type_poll(vc_nb_poll);
    103 dev_type_kqfilter(vc_nb_kqfilter);
    104 
    105 const struct cdevsw vcoda_cdevsw = {
    106 	.d_open = vc_nb_open,
    107 	.d_close = vc_nb_close,
    108 	.d_read = vc_nb_read,
    109 	.d_write = vc_nb_write,
    110 	.d_ioctl = vc_nb_ioctl,
    111 	.d_stop = nostop,
    112 	.d_tty = notty,
    113 	.d_poll = vc_nb_poll,
    114 	.d_mmap = nommap,
    115 	.d_kqfilter = vc_nb_kqfilter,
    116 	.d_discard = nodiscard,
    117 	.d_flag = D_OTHER,
    118 };
    119 
    120 struct vmsg {
    121     TAILQ_ENTRY(vmsg) vm_chain;
    122     void *	 vm_data;
    123     u_short	 vm_flags;
    124     u_short      vm_inSize;	/* Size is at most 5000 bytes */
    125     u_short	 vm_outSize;
    126     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
    127     int		 vm_unique;
    128     void *	 vm_sleep;	/* Not used by Mach. */
    129 };
    130 
    131 struct coda_mntinfo coda_mnttbl[NVCODA];
    132 
    133 #define	VM_READ	    1
    134 #define	VM_WRITE    2
    135 #define	VM_INTR	    4
    136 
    137 /* vcodaattach: do nothing */
    138 void
    139 vcodaattach(int n)
    140 {
    141 }
    142 
    143 /*
    144  * These functions are written for NetBSD.
    145  */
    146 int
    147 vc_nb_open(dev_t dev, int flag, int mode,
    148     struct lwp *l)
    149 {
    150     struct vcomm *vcp;
    151 
    152     ENTRY;
    153 
    154     if (minor(dev) >= NVCODA)
    155 	return(ENXIO);
    156 
    157     if (!coda_nc_initialized)
    158 	coda_nc_init();
    159 
    160     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    161     if (VC_OPEN(vcp))
    162 	return(EBUSY);
    163 
    164     selinit(&vcp->vc_selproc);
    165     TAILQ_INIT(&vcp->vc_requests);
    166     TAILQ_INIT(&vcp->vc_replies);
    167     MARK_VC_OPEN(vcp);
    168 
    169     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
    170     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
    171 
    172     return(0);
    173 }
    174 
    175 int
    176 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
    177 {
    178     struct vcomm *vcp;
    179     struct vmsg *vmp;
    180     struct coda_mntinfo *mi;
    181     int                 err;
    182 
    183     ENTRY;
    184 
    185     if (minor(dev) >= NVCODA)
    186 	return(ENXIO);
    187 
    188     mi = &coda_mnttbl[minor(dev)];
    189     vcp = &(mi->mi_vcomm);
    190 
    191     if (!VC_OPEN(vcp))
    192 	panic("vcclose: not open");
    193 
    194     /* prevent future operations on this vfs from succeeding by auto-
    195      * unmounting any vfs mounted via this device. This frees user or
    196      * sysadm from having to remember where all mount points are located.
    197      * Put this before WAKEUPs to avoid queuing new messages between
    198      * the WAKEUP and the unmount (which can happen if we're unlucky)
    199      */
    200     if (!mi->mi_rootvp) {
    201 	/* just a simple open/close w no mount */
    202 	MARK_VC_CLOSED(vcp);
    203 	return 0;
    204     }
    205 
    206     /* Let unmount know this is for real */
    207     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
    208     coda_unmounting(mi->mi_vfsp);
    209 
    210     /* Wakeup clients so they can return. */
    211     while ((vmp = TAILQ_FIRST(&vcp->vc_requests)) != NULL) {
    212 	TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    213 
    214 	/* Free signal request messages and don't wakeup cause
    215 	   no one is waiting. */
    216 	if (vmp->vm_opcode == CODA_SIGNAL) {
    217 	    CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    218 	    CODA_FREE(vmp, sizeof(struct vmsg));
    219 	    continue;
    220 	}
    221 	outstanding_upcalls++;
    222 	wakeup(&vmp->vm_sleep);
    223     }
    224 
    225     while ((vmp = TAILQ_FIRST(&vcp->vc_replies)) != NULL) {
    226 	TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    227 
    228 	outstanding_upcalls++;
    229 	wakeup(&vmp->vm_sleep);
    230     }
    231 
    232     MARK_VC_CLOSED(vcp);
    233 
    234     if (outstanding_upcalls) {
    235 #ifdef	CODA_VERBOSE
    236 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    237     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    238 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    239 #else
    240     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    241 #endif
    242     }
    243 
    244     err = dounmount(mi->mi_vfsp, flag, l);
    245     if (err)
    246 	myprintf(("Error %d unmounting vfs in vcclose(%llu)\n",
    247 	           err, (unsigned long long)minor(dev)));
    248     seldestroy(&vcp->vc_selproc);
    249     return 0;
    250 }
    251 
    252 int
    253 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
    254 {
    255     struct vcomm *	vcp;
    256     struct vmsg *vmp;
    257     int error = 0;
    258 
    259     ENTRY;
    260 
    261     if (minor(dev) >= NVCODA)
    262 	return(ENXIO);
    263 
    264     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    265 
    266     /* Get message at head of request queue. */
    267     vmp = TAILQ_FIRST(&vcp->vc_requests);
    268     if (vmp == NULL)
    269 	return(0);	/* Nothing to read */
    270 
    271     /* Move the input args into userspace */
    272     uiop->uio_rw = UIO_READ;
    273     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
    274     if (error) {
    275 	myprintf(("vcread: error (%d) on uiomove\n", error));
    276 	error = EINVAL;
    277     }
    278 
    279     TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    280 
    281     /* If request was a signal, free up the message and don't
    282        enqueue it in the reply queue. */
    283     if (vmp->vm_opcode == CODA_SIGNAL) {
    284 	if (codadebug)
    285 	    myprintf(("vcread: signal msg (%d, %d)\n",
    286 		      vmp->vm_opcode, vmp->vm_unique));
    287 	CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    288 	CODA_FREE(vmp, sizeof(struct vmsg));
    289 	return(error);
    290     }
    291 
    292     vmp->vm_flags |= VM_READ;
    293     TAILQ_INSERT_TAIL(&vcp->vc_replies, vmp, vm_chain);
    294 
    295     return(error);
    296 }
    297 
    298 int
    299 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
    300 {
    301     struct vcomm *	vcp;
    302     struct vmsg *vmp;
    303     struct coda_out_hdr *out;
    304     u_long seq;
    305     u_long opcode;
    306     int tbuf[2];
    307     int error = 0;
    308 
    309     ENTRY;
    310 
    311     if (minor(dev) >= NVCODA)
    312 	return(ENXIO);
    313 
    314     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    315 
    316     /* Peek at the opcode, unique without transfering the data. */
    317     uiop->uio_rw = UIO_WRITE;
    318     error = uiomove(tbuf, sizeof(int) * 2, uiop);
    319     if (error) {
    320 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
    321 	return(EINVAL);
    322     }
    323 
    324     opcode = tbuf[0];
    325     seq = tbuf[1];
    326 
    327     if (codadebug)
    328 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
    329 
    330     if (DOWNCALL(opcode)) {
    331 	union outputArgs pbuf;
    332 
    333 	/* get the rest of the data. */
    334 	uiop->uio_rw = UIO_WRITE;
    335 	error = uiomove(&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
    336 	if (error) {
    337 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
    338 		      error, opcode, seq));
    339 	    return(EINVAL);
    340 	    }
    341 
    342 	return handleDownCall(opcode, &pbuf);
    343     }
    344 
    345     /* Look for the message on the (waiting for) reply queue. */
    346     TAILQ_FOREACH(vmp, &vcp->vc_replies, vm_chain) {
    347 	if (vmp->vm_unique == seq) break;
    348     }
    349 
    350     if (vmp == NULL) {
    351 	if (codadebug)
    352 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
    353 
    354 	return(ESRCH);
    355     }
    356 
    357     /* Remove the message from the reply queue */
    358     TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    359 
    360     /* move data into response buffer. */
    361     out = (struct coda_out_hdr *)vmp->vm_data;
    362     /* Don't need to copy opcode and uniquifier. */
    363 
    364     /* get the rest of the data. */
    365     if (vmp->vm_outSize < uiop->uio_resid) {
    366 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
    367 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
    368 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
    369 	return(EINVAL);
    370     }
    371 
    372     tbuf[0] = uiop->uio_resid; 	/* Save this value. */
    373     uiop->uio_rw = UIO_WRITE;
    374     error = uiomove(&out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
    375     if (error) {
    376 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
    377 		  error, opcode, seq));
    378 	return(EINVAL);
    379     }
    380 
    381     /* I don't think these are used, but just in case. */
    382     /* XXX - aren't these two already correct? -bnoble */
    383     out->opcode = opcode;
    384     out->unique = seq;
    385     vmp->vm_outSize	= tbuf[0];	/* Amount of data transferred? */
    386     vmp->vm_flags |= VM_WRITE;
    387     wakeup(&vmp->vm_sleep);
    388 
    389     return(0);
    390 }
    391 
    392 int
    393 vc_nb_ioctl(dev_t dev, u_long cmd, void *addr, int flag,
    394     struct lwp *l)
    395 {
    396     ENTRY;
    397 
    398     switch (cmd) {
    399     case CODARESIZE: {
    400 	struct coda_resize *data = (struct coda_resize *)addr;
    401 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
    402 	break;
    403     }
    404     case CODASTATS:
    405 	if (coda_nc_use) {
    406 	    coda_nc_gather_stats();
    407 	    return(0);
    408 	} else {
    409 	    return(ENODEV);
    410 	}
    411 	break;
    412     case CODAPRINT:
    413 	if (coda_nc_use) {
    414 	    print_coda_nc();
    415 	    return(0);
    416 	} else {
    417 	    return(ENODEV);
    418 	}
    419 	break;
    420     case CIOC_KERNEL_VERSION:
    421 	switch (*(u_int *)addr) {
    422 	case 0:
    423 		*(u_int *)addr = coda_kernel_version;
    424 		return 0;
    425 		break;
    426 	case 1:
    427 	case 2:
    428 		if (coda_kernel_version != *(u_int *)addr)
    429 		    return ENOENT;
    430 		else
    431 		    return 0;
    432 	default:
    433 		return ENOENT;
    434 	}
    435     	break;
    436     default :
    437 	return(EINVAL);
    438 	break;
    439     }
    440 }
    441 
    442 int
    443 vc_nb_poll(dev_t dev, int events, struct lwp *l)
    444 {
    445     struct vcomm *vcp;
    446     int event_msk = 0;
    447 
    448     ENTRY;
    449 
    450     if (minor(dev) >= NVCODA)
    451 	return(ENXIO);
    452 
    453     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    454 
    455     event_msk = events & (POLLIN|POLLRDNORM);
    456     if (!event_msk)
    457 	return(0);
    458 
    459     if (!TAILQ_EMPTY(&vcp->vc_requests))
    460 	return(events & (POLLIN|POLLRDNORM));
    461 
    462     selrecord(l, &(vcp->vc_selproc));
    463 
    464     return(0);
    465 }
    466 
    467 static void
    468 filt_vc_nb_detach(struct knote *kn)
    469 {
    470 	struct vcomm *vcp = kn->kn_hook;
    471 
    472 	SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
    473 }
    474 
    475 static int
    476 filt_vc_nb_read(struct knote *kn, long hint)
    477 {
    478 	struct vcomm *vcp = kn->kn_hook;
    479 	struct vmsg *vmp;
    480 
    481 	vmp = TAILQ_FIRST(&vcp->vc_requests);
    482 	if (vmp == NULL)
    483 		return (0);
    484 
    485 	kn->kn_data = vmp->vm_inSize;
    486 	return (1);
    487 }
    488 
    489 static const struct filterops vc_nb_read_filtops =
    490 	{ 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
    491 
    492 int
    493 vc_nb_kqfilter(dev_t dev, struct knote *kn)
    494 {
    495 	struct vcomm *vcp;
    496 	struct klist *klist;
    497 
    498 	ENTRY;
    499 
    500 	if (minor(dev) >= NVCODA)
    501 		return(ENXIO);
    502 
    503 	vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    504 
    505 	switch (kn->kn_filter) {
    506 	case EVFILT_READ:
    507 		klist = &vcp->vc_selproc.sel_klist;
    508 		kn->kn_fop = &vc_nb_read_filtops;
    509 		break;
    510 
    511 	default:
    512 		return (EINVAL);
    513 	}
    514 
    515 	kn->kn_hook = vcp;
    516 
    517 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
    518 
    519 	return (0);
    520 }
    521 
    522 /*
    523  * Statistics
    524  */
    525 struct coda_clstat coda_clstat;
    526 
    527 /*
    528  * Key question: whether to sleep interruptably or uninterruptably when
    529  * waiting for Venus.  The former seems better (cause you can ^C a
    530  * job), but then GNU-EMACS completion breaks. Use tsleep with no
    531  * timeout, and no longjmp happens. But, when sleeping
    532  * "uninterruptibly", we don't get told if it returns abnormally
    533  * (e.g. kill -9).
    534  */
    535 
    536 int
    537 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
    538 	void *buffer)
    539 {
    540 	struct vcomm *vcp;
    541 	struct vmsg *vmp;
    542 	int error;
    543 #ifdef	CTL_C
    544 	struct lwp *l = curlwp;
    545 	struct proc *p = l->l_proc;
    546 	sigset_t psig_omask;
    547 	int i;
    548 	psig_omask = l->l_sigmask;	/* XXXSA */
    549 #endif
    550 	if (mntinfo == NULL) {
    551 	    /* Unlikely, but could be a race condition with a dying warden */
    552 	    return ENODEV;
    553 	}
    554 
    555 	vcp = &(mntinfo->mi_vcomm);
    556 
    557 	coda_clstat.ncalls++;
    558 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
    559 
    560 	if (!VC_OPEN(vcp))
    561 	    return(ENODEV);
    562 
    563 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
    564 	/* Format the request message. */
    565 	vmp->vm_data = buffer;
    566 	vmp->vm_flags = 0;
    567 	vmp->vm_inSize = inSize;
    568 	vmp->vm_outSize
    569 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
    570 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
    571 	vmp->vm_unique = ++vcp->vc_seq;
    572 	if (codadebug)
    573 	    myprintf(("Doing a call for %d.%d\n",
    574 		      vmp->vm_opcode, vmp->vm_unique));
    575 
    576 	/* Fill in the common input args. */
    577 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
    578 
    579 	/* Append msg to request queue and poke Venus. */
    580 	TAILQ_INSERT_TAIL(&vcp->vc_requests, vmp, vm_chain);
    581 	selnotify(&(vcp->vc_selproc), 0, 0);
    582 
    583 	/* We can be interrupted while we wait for Venus to process
    584 	 * our request.  If the interrupt occurs before Venus has read
    585 	 * the request, we dequeue and return. If it occurs after the
    586 	 * read but before the reply, we dequeue, send a signal
    587 	 * message, and return. If it occurs after the reply we ignore
    588 	 * it. In no case do we want to restart the syscall.  If it
    589 	 * was interrupted by a venus shutdown (vcclose), return
    590 	 * ENODEV.  */
    591 
    592 	/* Ignore return, We have to check anyway */
    593 #ifdef	CTL_C
    594 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
    595 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
    596 	   as SA_RESTART.  This means that we should exit sleep handle the
    597 	   "signal" and then go to sleep again.  Mostly this is done by letting
    598 	   the syscall complete and be restarted.  We are not idempotent and
    599 	   can not do this.  A better solution is necessary.
    600 	 */
    601 	i = 0;
    602 	do {
    603 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
    604 	    if (error == 0)
    605 	    	break;
    606 	    mutex_enter(p->p_lock);
    607 	    if (error == EWOULDBLOCK) {
    608 #ifdef	CODA_VERBOSE
    609 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
    610 #endif
    611     	    } else if (sigispending(l, SIGIO)) {
    612 		    sigaddset(&l->l_sigmask, SIGIO);
    613 #ifdef	CODA_VERBOSE
    614 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
    615 #endif
    616     	    } else if (sigispending(l, SIGALRM)) {
    617 		    sigaddset(&l->l_sigmask, SIGALRM);
    618 #ifdef	CODA_VERBOSE
    619 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
    620 #endif
    621 	    } else {
    622 		    sigset_t tmp;
    623 		    tmp = p->p_sigpend.sp_set;	/* array assignment */
    624 		    sigminusset(&l->l_sigmask, &tmp);
    625 
    626 #ifdef	CODA_VERBOSE
    627 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
    628 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
    629 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    630 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    631 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    632 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3],
    633 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
    634 #endif
    635 		    mutex_exit(p->p_lock);
    636 		    break;
    637 #ifdef	notyet
    638 		    sigminusset(&l->l_sigmask, &p->p_sigpend.sp_set);
    639 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
    640 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    641 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    642 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    643 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3]);
    644 #endif
    645 	    }
    646 	    mutex_exit(p->p_lock);
    647 	} while (error && i++ < 128 && VC_OPEN(vcp));
    648 	l->l_sigmask = psig_omask;	/* XXXSA */
    649 #else
    650 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
    651 #endif
    652 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
    653  	/* Op went through, interrupt or not... */
    654 	    if (vmp->vm_flags & VM_WRITE) {
    655 		error = 0;
    656 		*outSize = vmp->vm_outSize;
    657 	    }
    658 
    659 	    else if (!(vmp->vm_flags & VM_READ)) {
    660 		/* Interrupted before venus read it. */
    661 #ifdef	CODA_VERBOSE
    662 		if (1)
    663 #else
    664 		if (codadebug)
    665 #endif
    666 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
    667 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    668 
    669 		TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    670 		error = EINTR;
    671 	    }
    672 
    673 	    else {
    674 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
    675                    upcall started */
    676 		/* Interrupted after start of upcall, send venus a signal */
    677 		struct coda_in_hdr *dog;
    678 		struct vmsg *svmp;
    679 
    680 #ifdef	CODA_VERBOSE
    681 		if (1)
    682 #else
    683 		if (codadebug)
    684 #endif
    685 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
    686 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    687 
    688 		TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    689 		error = EINTR;
    690 
    691 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
    692 
    693 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
    694 		dog = (struct coda_in_hdr *)svmp->vm_data;
    695 
    696 		svmp->vm_flags = 0;
    697 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
    698 		dog->unique = svmp->vm_unique = vmp->vm_unique;
    699 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
    700 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
    701 
    702 		if (codadebug)
    703 		    myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
    704 			   svmp->vm_opcode, svmp->vm_unique));
    705 
    706 		/* insert at head of queue */
    707 		TAILQ_INSERT_HEAD(&vcp->vc_requests, svmp, vm_chain);
    708 		selnotify(&(vcp->vc_selproc), 0, 0);
    709 	    }
    710 	}
    711 
    712 	else {	/* If venus died (!VC_OPEN(vcp)) */
    713 	    if (codadebug)
    714 		myprintf(("vcclose woke op %d.%d flags %d\n",
    715 		       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    716 
    717 		error = ENODEV;
    718 	}
    719 
    720 	CODA_FREE(vmp, sizeof(struct vmsg));
    721 
    722 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
    723 		wakeup(&outstanding_upcalls);
    724 
    725 	if (!error)
    726 		error = ((struct coda_out_hdr *)buffer)->result;
    727 	return(error);
    728 }
    729 
    730 MODULE(MODULE_CLASS_DRIVER, vcoda, NULL);
    731 
    732 static int
    733 vcoda_modcmd(modcmd_t cmd, void *arg)
    734 {
    735 	int error = 0;
    736 
    737 	switch (cmd) {
    738 	case MODULE_CMD_INIT:
    739 #ifdef _MODULE
    740 	{
    741 		int cmajor, dmajor;
    742 		vcodaattach(NVCODA);
    743 
    744 		dmajor = cmajor = -1;
    745 		return devsw_attach("vcoda", NULL, &dmajor,
    746 		    &vcoda_cdevsw, &cmajor);
    747 	}
    748 #endif
    749 		break;
    750 
    751 	case MODULE_CMD_FINI:
    752 #ifdef _MODULE
    753 		{
    754 			for  (size_t i = 0; i < NVCODA; i++) {
    755 				struct vcomm *vcp = &coda_mnttbl[i].mi_vcomm;
    756 				if (VC_OPEN(vcp))
    757 					return EBUSY;
    758 			}
    759 			return devsw_detach(NULL, &vcoda_cdevsw);
    760 		}
    761 #endif
    762 		break;
    763 
    764 	case MODULE_CMD_STAT:
    765 		return ENOTTY;
    766 
    767 	default:
    768 		return ENOTTY;
    769 	}
    770 	return error;
    771 }
    772