Home | History | Annotate | Line # | Download | only in coda
      1 /*	$NetBSD: coda_psdev.c,v 1.65 2024/05/17 23:57:46 thorpej Exp $	*/
      2 
      3 /*
      4  *
      5  *             Coda: an Experimental Distributed File System
      6  *                              Release 3.1
      7  *
      8  *           Copyright (c) 1987-1998 Carnegie Mellon University
      9  *                          All Rights Reserved
     10  *
     11  * Permission  to  use, copy, modify and distribute this software and its
     12  * documentation is hereby granted,  provided  that  both  the  copyright
     13  * notice  and  this  permission  notice  appear  in  all  copies  of the
     14  * software, derivative works or  modified  versions,  and  any  portions
     15  * thereof, and that both notices appear in supporting documentation, and
     16  * that credit is given to Carnegie Mellon University  in  all  documents
     17  * and publicity pertaining to direct or indirect use of this code or its
     18  * derivatives.
     19  *
     20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
     21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
     22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
     23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
     24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
     25  * ANY DERIVATIVE WORK.
     26  *
     27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
     28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
     29  * Mellon the rights to redistribute these changes without encumbrance.
     30  *
     31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
     32  */
     33 
     34 /*
     35  * Mach Operating System
     36  * Copyright (c) 1989 Carnegie-Mellon University
     37  * All rights reserved.  The CMU software License Agreement specifies
     38  * the terms and conditions for use and redistribution.
     39  */
     40 
     41 /*
     42  * This code was written for the Coda file system at Carnegie Mellon
     43  * University.  Contributers include David Steere, James Kistler, and
     44  * M. Satyanarayanan.  */
     45 
     46 /* These routines define the pseudo device for communication between
     47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
     48  * but I moved them to make it easier to port the Minicache without
     49  * porting coda. -- DCS 10/12/94
     50  *
     51  * Following code depends on file-system CODA.
     52  */
     53 
     54 /* These routines are the device entry points for Venus. */
     55 
     56 #include <sys/cdefs.h>
     57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.65 2024/05/17 23:57:46 thorpej Exp $");
     58 
     59 extern int coda_nc_initialized;    /* Set if cache has been initialized */
     60 
     61 #include <sys/param.h>
     62 #include <sys/systm.h>
     63 #include <sys/kernel.h>
     64 #include <sys/proc.h>
     65 #include <sys/mount.h>
     66 #include <sys/file.h>
     67 #include <sys/ioctl.h>
     68 #include <sys/poll.h>
     69 #include <sys/select.h>
     70 #include <sys/conf.h>
     71 #include <sys/atomic.h>
     72 #include <sys/module.h>
     73 
     74 #include <coda/coda.h>
     75 #include <coda/cnode.h>
     76 #include <coda/coda_namecache.h>
     77 #include <coda/coda_io.h>
     78 
     79 #include "ioconf.h"
     80 
     81 #define CTL_C
     82 
     83 int coda_psdev_print_entry = 0;
     84 static
     85 int outstanding_upcalls = 0;
     86 int coda_call_sleep = PZERO - 1;
     87 #ifdef	CTL_C
     88 int coda_pcatch = PCATCH;
     89 #else
     90 #endif
     91 
     92 int coda_kernel_version = CODA_KERNEL_VERSION;
     93 
     94 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
     95 
     96 dev_type_open(vc_nb_open);
     97 dev_type_close(vc_nb_close);
     98 dev_type_read(vc_nb_read);
     99 dev_type_write(vc_nb_write);
    100 dev_type_ioctl(vc_nb_ioctl);
    101 dev_type_poll(vc_nb_poll);
    102 dev_type_kqfilter(vc_nb_kqfilter);
    103 
    104 const struct cdevsw vcoda_cdevsw = {
    105 	.d_open = vc_nb_open,
    106 	.d_close = vc_nb_close,
    107 	.d_read = vc_nb_read,
    108 	.d_write = vc_nb_write,
    109 	.d_ioctl = vc_nb_ioctl,
    110 	.d_stop = nostop,
    111 	.d_tty = notty,
    112 	.d_poll = vc_nb_poll,
    113 	.d_mmap = nommap,
    114 	.d_kqfilter = vc_nb_kqfilter,
    115 	.d_discard = nodiscard,
    116 	.d_flag = D_OTHER,
    117 };
    118 
    119 struct vmsg {
    120     TAILQ_ENTRY(vmsg) vm_chain;
    121     void *	 vm_data;
    122     u_short	 vm_flags;
    123     u_short      vm_inSize;	/* Size is at most 5000 bytes */
    124     u_short	 vm_outSize;
    125     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
    126     int		 vm_unique;
    127     void *	 vm_sleep;	/* Not used by Mach. */
    128 };
    129 
    130 struct coda_mntinfo coda_mnttbl[NVCODA];
    131 
    132 #define	VM_READ	    1
    133 #define	VM_WRITE    2
    134 #define	VM_INTR	    4
    135 
    136 /* vcodaattach: do nothing */
    137 void
    138 vcodaattach(int n)
    139 {
    140 }
    141 
    142 /*
    143  * These functions are written for NetBSD.
    144  */
    145 int
    146 vc_nb_open(dev_t dev, int flag, int mode,
    147     struct lwp *l)
    148 {
    149     struct vcomm *vcp;
    150 
    151     ENTRY;
    152 
    153     if (minor(dev) >= NVCODA)
    154 	return(ENXIO);
    155 
    156     if (!coda_nc_initialized)
    157 	coda_nc_init();
    158 
    159     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    160     if (VC_OPEN(vcp))
    161 	return(EBUSY);
    162 
    163     selinit(&vcp->vc_selproc);
    164     TAILQ_INIT(&vcp->vc_requests);
    165     TAILQ_INIT(&vcp->vc_replies);
    166     MARK_VC_OPEN(vcp);
    167 
    168     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
    169     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
    170 
    171     return(0);
    172 }
    173 
    174 int
    175 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
    176 {
    177     struct vcomm *vcp;
    178     struct vmsg *vmp;
    179     struct coda_mntinfo *mi;
    180     int                 err;
    181 
    182     ENTRY;
    183 
    184     if (minor(dev) >= NVCODA)
    185 	return(ENXIO);
    186 
    187     mi = &coda_mnttbl[minor(dev)];
    188     vcp = &(mi->mi_vcomm);
    189 
    190     if (!VC_OPEN(vcp))
    191 	panic("vcclose: not open");
    192 
    193     /* prevent future operations on this vfs from succeeding by auto-
    194      * unmounting any vfs mounted via this device. This frees user or
    195      * sysadm from having to remember where all mount points are located.
    196      * Put this before WAKEUPs to avoid queuing new messages between
    197      * the WAKEUP and the unmount (which can happen if we're unlucky)
    198      */
    199     if (!mi->mi_rootvp) {
    200 	/* just a simple open/close w no mount */
    201 	MARK_VC_CLOSED(vcp);
    202 	return 0;
    203     }
    204 
    205     /* Let unmount know this is for real */
    206     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
    207     coda_unmounting(mi->mi_vfsp);
    208 
    209     /* Wakeup clients so they can return. */
    210     while ((vmp = TAILQ_FIRST(&vcp->vc_requests)) != NULL) {
    211 	TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    212 
    213 	/* Free signal request messages and don't wakeup cause
    214 	   no one is waiting. */
    215 	if (vmp->vm_opcode == CODA_SIGNAL) {
    216 	    CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    217 	    CODA_FREE(vmp, sizeof(struct vmsg));
    218 	    continue;
    219 	}
    220 	outstanding_upcalls++;
    221 	wakeup(&vmp->vm_sleep);
    222     }
    223 
    224     while ((vmp = TAILQ_FIRST(&vcp->vc_replies)) != NULL) {
    225 	TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    226 
    227 	outstanding_upcalls++;
    228 	wakeup(&vmp->vm_sleep);
    229     }
    230 
    231     MARK_VC_CLOSED(vcp);
    232 
    233     if (outstanding_upcalls) {
    234 #ifdef	CODA_VERBOSE
    235 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    236     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    237 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    238 #else
    239     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    240 #endif
    241     }
    242 
    243     err = dounmount(mi->mi_vfsp, flag, l);
    244     if (err)
    245 	myprintf(("Error %d unmounting vfs in vcclose(%llu)\n",
    246 	           err, (unsigned long long)minor(dev)));
    247     seldestroy(&vcp->vc_selproc);
    248     return 0;
    249 }
    250 
    251 int
    252 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
    253 {
    254     struct vcomm *	vcp;
    255     struct vmsg *vmp;
    256     int error = 0;
    257 
    258     ENTRY;
    259 
    260     if (minor(dev) >= NVCODA)
    261 	return(ENXIO);
    262 
    263     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    264 
    265     /* Get message at head of request queue. */
    266     vmp = TAILQ_FIRST(&vcp->vc_requests);
    267     if (vmp == NULL)
    268 	return(0);	/* Nothing to read */
    269 
    270     /* Move the input args into userspace */
    271     uiop->uio_rw = UIO_READ;
    272     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
    273     if (error) {
    274 	myprintf(("vcread: error (%d) on uiomove\n", error));
    275 	error = EINVAL;
    276     }
    277 
    278     TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    279 
    280     /* If request was a signal, free up the message and don't
    281        enqueue it in the reply queue. */
    282     if (vmp->vm_opcode == CODA_SIGNAL) {
    283 	if (codadebug)
    284 	    myprintf(("vcread: signal msg (%d, %d)\n",
    285 		      vmp->vm_opcode, vmp->vm_unique));
    286 	CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    287 	CODA_FREE(vmp, sizeof(struct vmsg));
    288 	return(error);
    289     }
    290 
    291     vmp->vm_flags |= VM_READ;
    292     TAILQ_INSERT_TAIL(&vcp->vc_replies, vmp, vm_chain);
    293 
    294     return(error);
    295 }
    296 
    297 int
    298 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
    299 {
    300     struct vcomm *	vcp;
    301     struct vmsg *vmp;
    302     struct coda_out_hdr *out;
    303     u_long seq;
    304     u_long opcode;
    305     int tbuf[2];
    306     int error = 0;
    307 
    308     ENTRY;
    309 
    310     if (minor(dev) >= NVCODA)
    311 	return(ENXIO);
    312 
    313     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    314 
    315     /* Peek at the opcode, unique without transferring the data. */
    316     uiop->uio_rw = UIO_WRITE;
    317     error = uiomove(tbuf, sizeof(int) * 2, uiop);
    318     if (error) {
    319 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
    320 	return(EINVAL);
    321     }
    322 
    323     opcode = tbuf[0];
    324     seq = tbuf[1];
    325 
    326     if (codadebug)
    327 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
    328 
    329     if (DOWNCALL(opcode)) {
    330 	union outputArgs pbuf;
    331 
    332 	/* get the rest of the data. */
    333 	uiop->uio_rw = UIO_WRITE;
    334 	error = uiomove(&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
    335 	if (error) {
    336 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
    337 		      error, opcode, seq));
    338 	    return(EINVAL);
    339 	    }
    340 
    341 	return handleDownCall(opcode, &pbuf);
    342     }
    343 
    344     /* Look for the message on the (waiting for) reply queue. */
    345     TAILQ_FOREACH(vmp, &vcp->vc_replies, vm_chain) {
    346 	if (vmp->vm_unique == seq) break;
    347     }
    348 
    349     if (vmp == NULL) {
    350 	if (codadebug)
    351 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
    352 
    353 	return(ESRCH);
    354     }
    355 
    356     /* Remove the message from the reply queue */
    357     TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    358 
    359     /* move data into response buffer. */
    360     out = (struct coda_out_hdr *)vmp->vm_data;
    361     /* Don't need to copy opcode and uniquifier. */
    362 
    363     /* get the rest of the data. */
    364     if (vmp->vm_outSize < uiop->uio_resid) {
    365 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
    366 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
    367 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
    368 	return(EINVAL);
    369     }
    370 
    371     tbuf[0] = uiop->uio_resid; 	/* Save this value. */
    372     uiop->uio_rw = UIO_WRITE;
    373     error = uiomove(&out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
    374     if (error) {
    375 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
    376 		  error, opcode, seq));
    377 	return(EINVAL);
    378     }
    379 
    380     /* I don't think these are used, but just in case. */
    381     /* XXX - aren't these two already correct? -bnoble */
    382     out->opcode = opcode;
    383     out->unique = seq;
    384     vmp->vm_outSize	= tbuf[0];	/* Amount of data transferred? */
    385     vmp->vm_flags |= VM_WRITE;
    386     wakeup(&vmp->vm_sleep);
    387 
    388     return(0);
    389 }
    390 
    391 int
    392 vc_nb_ioctl(dev_t dev, u_long cmd, void *addr, int flag,
    393     struct lwp *l)
    394 {
    395     ENTRY;
    396 
    397     switch (cmd) {
    398     case CODARESIZE: {
    399 	struct coda_resize *data = (struct coda_resize *)addr;
    400 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
    401 	break;
    402     }
    403     case CODASTATS:
    404 	if (coda_nc_use) {
    405 	    coda_nc_gather_stats();
    406 	    return(0);
    407 	} else {
    408 	    return(ENODEV);
    409 	}
    410 	break;
    411     case CODAPRINT:
    412 	if (coda_nc_use) {
    413 	    print_coda_nc();
    414 	    return(0);
    415 	} else {
    416 	    return(ENODEV);
    417 	}
    418 	break;
    419     case CIOC_KERNEL_VERSION:
    420 	switch (*(u_int *)addr) {
    421 	case 0:
    422 		*(u_int *)addr = coda_kernel_version;
    423 		return 0;
    424 		break;
    425 	case 1:
    426 	case 2:
    427 		if (coda_kernel_version != *(u_int *)addr)
    428 		    return ENOENT;
    429 		else
    430 		    return 0;
    431 	default:
    432 		return ENOENT;
    433 	}
    434     	break;
    435     default :
    436 	return(EINVAL);
    437 	break;
    438     }
    439 }
    440 
    441 int
    442 vc_nb_poll(dev_t dev, int events, struct lwp *l)
    443 {
    444     struct vcomm *vcp;
    445     int event_msk = 0;
    446 
    447     ENTRY;
    448 
    449     if (minor(dev) >= NVCODA)
    450 	return(ENXIO);
    451 
    452     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    453 
    454     event_msk = events & (POLLIN|POLLRDNORM);
    455     if (!event_msk)
    456 	return(0);
    457 
    458     if (!TAILQ_EMPTY(&vcp->vc_requests))
    459 	return(events & (POLLIN|POLLRDNORM));
    460 
    461     selrecord(l, &(vcp->vc_selproc));
    462 
    463     return(0);
    464 }
    465 
    466 static void
    467 filt_vc_nb_detach(struct knote *kn)
    468 {
    469 	struct vcomm *vcp = kn->kn_hook;
    470 
    471 	selremove_knote(&vcp->vc_selproc, kn);
    472 }
    473 
    474 static int
    475 filt_vc_nb_read(struct knote *kn, long hint)
    476 {
    477 	struct vcomm *vcp = kn->kn_hook;
    478 	struct vmsg *vmp;
    479 
    480 	vmp = TAILQ_FIRST(&vcp->vc_requests);
    481 	if (vmp == NULL)
    482 		return (0);
    483 
    484 	kn->kn_data = vmp->vm_inSize;
    485 	return (1);
    486 }
    487 
    488 static const struct filterops vc_nb_read_filtops = {
    489 	.f_flags = FILTEROP_ISFD,
    490 	.f_attach = NULL,
    491 	.f_detach = filt_vc_nb_detach,
    492 	.f_event = filt_vc_nb_read,
    493 };
    494 
    495 int
    496 vc_nb_kqfilter(dev_t dev, struct knote *kn)
    497 {
    498 	struct vcomm *vcp;
    499 
    500 	ENTRY;
    501 
    502 	if (minor(dev) >= NVCODA)
    503 		return(ENXIO);
    504 
    505 	vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    506 
    507 	switch (kn->kn_filter) {
    508 	case EVFILT_READ:
    509 		kn->kn_fop = &vc_nb_read_filtops;
    510 		break;
    511 
    512 	default:
    513 		return (EINVAL);
    514 	}
    515 
    516 	kn->kn_hook = vcp;
    517 
    518 	selrecord_knote(&vcp->vc_selproc, kn);
    519 
    520 	return (0);
    521 }
    522 
    523 /*
    524  * Statistics
    525  */
    526 struct coda_clstat coda_clstat;
    527 
    528 /*
    529  * Key question: whether to sleep interruptably or uninterruptably when
    530  * waiting for Venus.  The former seems better (cause you can ^C a
    531  * job), but then GNU-EMACS completion breaks. Use tsleep with no
    532  * timeout, and no longjmp happens. But, when sleeping
    533  * "uninterruptibly", we don't get told if it returns abnormally
    534  * (e.g. kill -9).
    535  */
    536 
    537 int
    538 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
    539 	void *buffer)
    540 {
    541 	struct vcomm *vcp;
    542 	struct vmsg *vmp;
    543 	int error;
    544 #ifdef	CTL_C
    545 	struct lwp *l = curlwp;
    546 	struct proc *p = l->l_proc;
    547 	sigset_t psig_omask;
    548 	int i;
    549 	psig_omask = l->l_sigmask;	/* XXXSA */
    550 #endif
    551 	if (mntinfo == NULL) {
    552 	    /* Unlikely, but could be a race condition with a dying warden */
    553 	    return ENODEV;
    554 	}
    555 
    556 	vcp = &(mntinfo->mi_vcomm);
    557 
    558 	coda_clstat.ncalls++;
    559 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
    560 
    561 	if (!VC_OPEN(vcp))
    562 	    return(ENODEV);
    563 
    564 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
    565 	/* Format the request message. */
    566 	vmp->vm_data = buffer;
    567 	vmp->vm_flags = 0;
    568 	vmp->vm_inSize = inSize;
    569 	vmp->vm_outSize
    570 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
    571 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
    572 	vmp->vm_unique = ++vcp->vc_seq;
    573 	if (codadebug)
    574 	    myprintf(("Doing a call for %d.%d\n",
    575 		      vmp->vm_opcode, vmp->vm_unique));
    576 
    577 	/* Fill in the common input args. */
    578 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
    579 
    580 	/* Append msg to request queue and poke Venus. */
    581 	TAILQ_INSERT_TAIL(&vcp->vc_requests, vmp, vm_chain);
    582 	selnotify(&(vcp->vc_selproc), 0, 0);
    583 
    584 	/* We can be interrupted while we wait for Venus to process
    585 	 * our request.  If the interrupt occurs before Venus has read
    586 	 * the request, we dequeue and return. If it occurs after the
    587 	 * read but before the reply, we dequeue, send a signal
    588 	 * message, and return. If it occurs after the reply we ignore
    589 	 * it. In no case do we want to restart the syscall.  If it
    590 	 * was interrupted by a venus shutdown (vcclose), return
    591 	 * ENODEV.  */
    592 
    593 	/* Ignore return, We have to check anyway */
    594 #ifdef	CTL_C
    595 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
    596 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
    597 	   as SA_RESTART.  This means that we should exit sleep handle the
    598 	   "signal" and then go to sleep again.  Mostly this is done by letting
    599 	   the syscall complete and be restarted.  We are not idempotent and
    600 	   can not do this.  A better solution is necessary.
    601 	 */
    602 	i = 0;
    603 	do {
    604 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
    605 	    if (error == 0)
    606 	    	break;
    607 	    mutex_enter(p->p_lock);
    608 	    if (error == EWOULDBLOCK) {
    609 #ifdef	CODA_VERBOSE
    610 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
    611 #endif
    612     	    } else if (sigispending(l, SIGIO)) {
    613 		    sigaddset(&l->l_sigmask, SIGIO);
    614 #ifdef	CODA_VERBOSE
    615 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
    616 #endif
    617     	    } else if (sigispending(l, SIGALRM)) {
    618 		    sigaddset(&l->l_sigmask, SIGALRM);
    619 #ifdef	CODA_VERBOSE
    620 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
    621 #endif
    622 	    } else {
    623 		    sigset_t tmp;
    624 		    tmp = p->p_sigpend.sp_set;	/* array assignment */
    625 		    sigminusset(&l->l_sigmask, &tmp);
    626 
    627 #ifdef	CODA_VERBOSE
    628 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
    629 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
    630 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    631 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    632 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    633 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3],
    634 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
    635 #endif
    636 		    mutex_exit(p->p_lock);
    637 		    break;
    638 #ifdef	notyet
    639 		    sigminusset(&l->l_sigmask, &p->p_sigpend.sp_set);
    640 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
    641 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    642 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    643 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    644 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3]);
    645 #endif
    646 	    }
    647 	    mutex_exit(p->p_lock);
    648 	} while (error && i++ < 128 && VC_OPEN(vcp));
    649 	l->l_sigmask = psig_omask;	/* XXXSA */
    650 #else
    651 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
    652 #endif
    653 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
    654  	/* Op went through, interrupt or not... */
    655 	    if (vmp->vm_flags & VM_WRITE) {
    656 		error = 0;
    657 		*outSize = vmp->vm_outSize;
    658 	    }
    659 
    660 	    else if (!(vmp->vm_flags & VM_READ)) {
    661 		/* Interrupted before venus read it. */
    662 #ifdef	CODA_VERBOSE
    663 		if (1)
    664 #else
    665 		if (codadebug)
    666 #endif
    667 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
    668 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    669 
    670 		TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    671 		error = EINTR;
    672 	    }
    673 
    674 	    else {
    675 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
    676                    upcall started */
    677 		/* Interrupted after start of upcall, send venus a signal */
    678 		struct coda_in_hdr *dog;
    679 		struct vmsg *svmp;
    680 
    681 #ifdef	CODA_VERBOSE
    682 		if (1)
    683 #else
    684 		if (codadebug)
    685 #endif
    686 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
    687 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    688 
    689 		TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    690 		error = EINTR;
    691 
    692 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
    693 
    694 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
    695 		dog = (struct coda_in_hdr *)svmp->vm_data;
    696 
    697 		svmp->vm_flags = 0;
    698 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
    699 		dog->unique = svmp->vm_unique = vmp->vm_unique;
    700 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
    701 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
    702 
    703 		if (codadebug)
    704 		    myprintf(("coda_call: enqueuing signal msg (%d, %d)\n",
    705 			   svmp->vm_opcode, svmp->vm_unique));
    706 
    707 		/* insert at head of queue */
    708 		TAILQ_INSERT_HEAD(&vcp->vc_requests, svmp, vm_chain);
    709 		selnotify(&(vcp->vc_selproc), 0, 0);
    710 	    }
    711 	}
    712 
    713 	else {	/* If venus died (!VC_OPEN(vcp)) */
    714 		if (codadebug) {
    715 			myprintf(("vcclose woke op %d.%d flags %d\n",
    716 			       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    717 		}
    718 
    719 		error = ENODEV;
    720 	}
    721 
    722 	CODA_FREE(vmp, sizeof(struct vmsg));
    723 
    724 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
    725 		wakeup(&outstanding_upcalls);
    726 
    727 	if (!error)
    728 		error = ((struct coda_out_hdr *)buffer)->result;
    729 	return(error);
    730 }
    731 
    732 MODULE(MODULE_CLASS_DRIVER, vcoda, NULL);
    733 
    734 static int
    735 vcoda_modcmd(modcmd_t cmd, void *arg)
    736 {
    737 	int error = 0;
    738 
    739 	switch (cmd) {
    740 	case MODULE_CMD_INIT:
    741 #ifdef _MODULE
    742 	{
    743 		int cmajor, dmajor;
    744 		vcodaattach(NVCODA);
    745 
    746 		dmajor = cmajor = -1;
    747 		return devsw_attach("vcoda", NULL, &dmajor,
    748 		    &vcoda_cdevsw, &cmajor);
    749 	}
    750 #endif
    751 		break;
    752 
    753 	case MODULE_CMD_FINI:
    754 #ifdef _MODULE
    755 		{
    756 			for  (size_t i = 0; i < NVCODA; i++) {
    757 				struct vcomm *vcp = &coda_mnttbl[i].mi_vcomm;
    758 				if (VC_OPEN(vcp))
    759 					return EBUSY;
    760 			}
    761 			devsw_detach(NULL, &vcoda_cdevsw);
    762 		}
    763 #endif
    764 		break;
    765 
    766 	case MODULE_CMD_STAT:
    767 		return ENOTTY;
    768 
    769 	default:
    770 		return ENOTTY;
    771 	}
    772 	return error;
    773 }
    774