Home | History | Annotate | Line # | Download | only in coda
coda_psdev.c revision 1.63
      1 /*	$NetBSD: coda_psdev.c,v 1.63 2023/08/03 03:10:23 rin Exp $	*/
      2 
      3 /*
      4  *
      5  *             Coda: an Experimental Distributed File System
      6  *                              Release 3.1
      7  *
      8  *           Copyright (c) 1987-1998 Carnegie Mellon University
      9  *                          All Rights Reserved
     10  *
     11  * Permission  to  use, copy, modify and distribute this software and its
     12  * documentation is hereby granted,  provided  that  both  the  copyright
     13  * notice  and  this  permission  notice  appear  in  all  copies  of the
     14  * software, derivative works or  modified  versions,  and  any  portions
     15  * thereof, and that both notices appear in supporting documentation, and
     16  * that credit is given to Carnegie Mellon University  in  all  documents
     17  * and publicity pertaining to direct or indirect use of this code or its
     18  * derivatives.
     19  *
     20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
     21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
     22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
     23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
     24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
     25  * ANY DERIVATIVE WORK.
     26  *
     27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
     28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
     29  * Mellon the rights to redistribute these changes without encumbrance.
     30  *
     31  * 	@(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
     32  */
     33 
     34 /*
     35  * Mach Operating System
     36  * Copyright (c) 1989 Carnegie-Mellon University
     37  * All rights reserved.  The CMU software License Agreement specifies
     38  * the terms and conditions for use and redistribution.
     39  */
     40 
     41 /*
     42  * This code was written for the Coda file system at Carnegie Mellon
     43  * University.  Contributers include David Steere, James Kistler, and
     44  * M. Satyanarayanan.  */
     45 
     46 /* These routines define the pseudo device for communication between
     47  * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
     48  * but I moved them to make it easier to port the Minicache without
     49  * porting coda. -- DCS 10/12/94
     50  *
     51  * Following code depends on file-system CODA.
     52  */
     53 
     54 /* These routines are the device entry points for Venus. */
     55 
     56 #include <sys/cdefs.h>
     57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.63 2023/08/03 03:10:23 rin Exp $");
     58 
     59 extern int coda_nc_initialized;    /* Set if cache has been initialized */
     60 
     61 #include <sys/param.h>
     62 #include <sys/systm.h>
     63 #include <sys/kernel.h>
     64 #include <sys/malloc.h>
     65 #include <sys/proc.h>
     66 #include <sys/mount.h>
     67 #include <sys/file.h>
     68 #include <sys/ioctl.h>
     69 #include <sys/poll.h>
     70 #include <sys/select.h>
     71 #include <sys/conf.h>
     72 #include <sys/atomic.h>
     73 #include <sys/module.h>
     74 
     75 #include <coda/coda.h>
     76 #include <coda/cnode.h>
     77 #include <coda/coda_namecache.h>
     78 #include <coda/coda_io.h>
     79 
     80 #include "ioconf.h"
     81 
     82 #define CTL_C
     83 
     84 int coda_psdev_print_entry = 0;
     85 static
     86 int outstanding_upcalls = 0;
     87 int coda_call_sleep = PZERO - 1;
     88 #ifdef	CTL_C
     89 int coda_pcatch = PCATCH;
     90 #else
     91 #endif
     92 
     93 int coda_kernel_version = CODA_KERNEL_VERSION;
     94 
     95 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
     96 
     97 dev_type_open(vc_nb_open);
     98 dev_type_close(vc_nb_close);
     99 dev_type_read(vc_nb_read);
    100 dev_type_write(vc_nb_write);
    101 dev_type_ioctl(vc_nb_ioctl);
    102 dev_type_poll(vc_nb_poll);
    103 dev_type_kqfilter(vc_nb_kqfilter);
    104 
    105 const struct cdevsw vcoda_cdevsw = {
    106 	.d_open = vc_nb_open,
    107 	.d_close = vc_nb_close,
    108 	.d_read = vc_nb_read,
    109 	.d_write = vc_nb_write,
    110 	.d_ioctl = vc_nb_ioctl,
    111 	.d_stop = nostop,
    112 	.d_tty = notty,
    113 	.d_poll = vc_nb_poll,
    114 	.d_mmap = nommap,
    115 	.d_kqfilter = vc_nb_kqfilter,
    116 	.d_discard = nodiscard,
    117 	.d_flag = D_OTHER,
    118 };
    119 
    120 struct vmsg {
    121     TAILQ_ENTRY(vmsg) vm_chain;
    122     void *	 vm_data;
    123     u_short	 vm_flags;
    124     u_short      vm_inSize;	/* Size is at most 5000 bytes */
    125     u_short	 vm_outSize;
    126     u_short	 vm_opcode; 	/* copied from data to save ptr lookup */
    127     int		 vm_unique;
    128     void *	 vm_sleep;	/* Not used by Mach. */
    129 };
    130 
    131 struct coda_mntinfo coda_mnttbl[NVCODA];
    132 
    133 #define	VM_READ	    1
    134 #define	VM_WRITE    2
    135 #define	VM_INTR	    4
    136 
    137 /* vcodaattach: do nothing */
    138 void
    139 vcodaattach(int n)
    140 {
    141 }
    142 
    143 /*
    144  * These functions are written for NetBSD.
    145  */
    146 int
    147 vc_nb_open(dev_t dev, int flag, int mode,
    148     struct lwp *l)
    149 {
    150     struct vcomm *vcp;
    151 
    152     ENTRY;
    153 
    154     if (minor(dev) >= NVCODA)
    155 	return(ENXIO);
    156 
    157     if (!coda_nc_initialized)
    158 	coda_nc_init();
    159 
    160     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    161     if (VC_OPEN(vcp))
    162 	return(EBUSY);
    163 
    164     selinit(&vcp->vc_selproc);
    165     TAILQ_INIT(&vcp->vc_requests);
    166     TAILQ_INIT(&vcp->vc_replies);
    167     MARK_VC_OPEN(vcp);
    168 
    169     coda_mnttbl[minor(dev)].mi_vfsp = NULL;
    170     coda_mnttbl[minor(dev)].mi_rootvp = NULL;
    171 
    172     return(0);
    173 }
    174 
    175 int
    176 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
    177 {
    178     struct vcomm *vcp;
    179     struct vmsg *vmp;
    180     struct coda_mntinfo *mi;
    181     int                 err;
    182 
    183     ENTRY;
    184 
    185     if (minor(dev) >= NVCODA)
    186 	return(ENXIO);
    187 
    188     mi = &coda_mnttbl[minor(dev)];
    189     vcp = &(mi->mi_vcomm);
    190 
    191     if (!VC_OPEN(vcp))
    192 	panic("vcclose: not open");
    193 
    194     /* prevent future operations on this vfs from succeeding by auto-
    195      * unmounting any vfs mounted via this device. This frees user or
    196      * sysadm from having to remember where all mount points are located.
    197      * Put this before WAKEUPs to avoid queuing new messages between
    198      * the WAKEUP and the unmount (which can happen if we're unlucky)
    199      */
    200     if (!mi->mi_rootvp) {
    201 	/* just a simple open/close w no mount */
    202 	MARK_VC_CLOSED(vcp);
    203 	return 0;
    204     }
    205 
    206     /* Let unmount know this is for real */
    207     VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
    208     coda_unmounting(mi->mi_vfsp);
    209 
    210     /* Wakeup clients so they can return. */
    211     while ((vmp = TAILQ_FIRST(&vcp->vc_requests)) != NULL) {
    212 	TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    213 
    214 	/* Free signal request messages and don't wakeup cause
    215 	   no one is waiting. */
    216 	if (vmp->vm_opcode == CODA_SIGNAL) {
    217 	    CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    218 	    CODA_FREE(vmp, sizeof(struct vmsg));
    219 	    continue;
    220 	}
    221 	outstanding_upcalls++;
    222 	wakeup(&vmp->vm_sleep);
    223     }
    224 
    225     while ((vmp = TAILQ_FIRST(&vcp->vc_replies)) != NULL) {
    226 	TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    227 
    228 	outstanding_upcalls++;
    229 	wakeup(&vmp->vm_sleep);
    230     }
    231 
    232     MARK_VC_CLOSED(vcp);
    233 
    234     if (outstanding_upcalls) {
    235 #ifdef	CODA_VERBOSE
    236 	printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    237     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    238 	printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
    239 #else
    240     	(void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
    241 #endif
    242     }
    243 
    244     err = dounmount(mi->mi_vfsp, flag, l);
    245     if (err)
    246 	myprintf(("Error %d unmounting vfs in vcclose(%llu)\n",
    247 	           err, (unsigned long long)minor(dev)));
    248     seldestroy(&vcp->vc_selproc);
    249     return 0;
    250 }
    251 
    252 int
    253 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
    254 {
    255     struct vcomm *	vcp;
    256     struct vmsg *vmp;
    257     int error = 0;
    258 
    259     ENTRY;
    260 
    261     if (minor(dev) >= NVCODA)
    262 	return(ENXIO);
    263 
    264     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    265 
    266     /* Get message at head of request queue. */
    267     vmp = TAILQ_FIRST(&vcp->vc_requests);
    268     if (vmp == NULL)
    269 	return(0);	/* Nothing to read */
    270 
    271     /* Move the input args into userspace */
    272     uiop->uio_rw = UIO_READ;
    273     error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
    274     if (error) {
    275 	myprintf(("vcread: error (%d) on uiomove\n", error));
    276 	error = EINVAL;
    277     }
    278 
    279     TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    280 
    281     /* If request was a signal, free up the message and don't
    282        enqueue it in the reply queue. */
    283     if (vmp->vm_opcode == CODA_SIGNAL) {
    284 	if (codadebug)
    285 	    myprintf(("vcread: signal msg (%d, %d)\n",
    286 		      vmp->vm_opcode, vmp->vm_unique));
    287 	CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
    288 	CODA_FREE(vmp, sizeof(struct vmsg));
    289 	return(error);
    290     }
    291 
    292     vmp->vm_flags |= VM_READ;
    293     TAILQ_INSERT_TAIL(&vcp->vc_replies, vmp, vm_chain);
    294 
    295     return(error);
    296 }
    297 
    298 int
    299 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
    300 {
    301     struct vcomm *	vcp;
    302     struct vmsg *vmp;
    303     struct coda_out_hdr *out;
    304     u_long seq;
    305     u_long opcode;
    306     int tbuf[2];
    307     int error = 0;
    308 
    309     ENTRY;
    310 
    311     if (minor(dev) >= NVCODA)
    312 	return(ENXIO);
    313 
    314     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    315 
    316     /* Peek at the opcode, unique without transferring the data. */
    317     uiop->uio_rw = UIO_WRITE;
    318     error = uiomove(tbuf, sizeof(int) * 2, uiop);
    319     if (error) {
    320 	myprintf(("vcwrite: error (%d) on uiomove\n", error));
    321 	return(EINVAL);
    322     }
    323 
    324     opcode = tbuf[0];
    325     seq = tbuf[1];
    326 
    327     if (codadebug)
    328 	myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
    329 
    330     if (DOWNCALL(opcode)) {
    331 	union outputArgs pbuf;
    332 
    333 	/* get the rest of the data. */
    334 	uiop->uio_rw = UIO_WRITE;
    335 	error = uiomove(&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
    336 	if (error) {
    337 	    myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
    338 		      error, opcode, seq));
    339 	    return(EINVAL);
    340 	    }
    341 
    342 	return handleDownCall(opcode, &pbuf);
    343     }
    344 
    345     /* Look for the message on the (waiting for) reply queue. */
    346     TAILQ_FOREACH(vmp, &vcp->vc_replies, vm_chain) {
    347 	if (vmp->vm_unique == seq) break;
    348     }
    349 
    350     if (vmp == NULL) {
    351 	if (codadebug)
    352 	    myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
    353 
    354 	return(ESRCH);
    355     }
    356 
    357     /* Remove the message from the reply queue */
    358     TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    359 
    360     /* move data into response buffer. */
    361     out = (struct coda_out_hdr *)vmp->vm_data;
    362     /* Don't need to copy opcode and uniquifier. */
    363 
    364     /* get the rest of the data. */
    365     if (vmp->vm_outSize < uiop->uio_resid) {
    366 	myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
    367 		  vmp->vm_outSize, (unsigned long) uiop->uio_resid));
    368 	wakeup(&vmp->vm_sleep); 	/* Notify caller of the error. */
    369 	return(EINVAL);
    370     }
    371 
    372     tbuf[0] = uiop->uio_resid; 	/* Save this value. */
    373     uiop->uio_rw = UIO_WRITE;
    374     error = uiomove(&out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
    375     if (error) {
    376 	myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
    377 		  error, opcode, seq));
    378 	return(EINVAL);
    379     }
    380 
    381     /* I don't think these are used, but just in case. */
    382     /* XXX - aren't these two already correct? -bnoble */
    383     out->opcode = opcode;
    384     out->unique = seq;
    385     vmp->vm_outSize	= tbuf[0];	/* Amount of data transferred? */
    386     vmp->vm_flags |= VM_WRITE;
    387     wakeup(&vmp->vm_sleep);
    388 
    389     return(0);
    390 }
    391 
    392 int
    393 vc_nb_ioctl(dev_t dev, u_long cmd, void *addr, int flag,
    394     struct lwp *l)
    395 {
    396     ENTRY;
    397 
    398     switch (cmd) {
    399     case CODARESIZE: {
    400 	struct coda_resize *data = (struct coda_resize *)addr;
    401 	return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
    402 	break;
    403     }
    404     case CODASTATS:
    405 	if (coda_nc_use) {
    406 	    coda_nc_gather_stats();
    407 	    return(0);
    408 	} else {
    409 	    return(ENODEV);
    410 	}
    411 	break;
    412     case CODAPRINT:
    413 	if (coda_nc_use) {
    414 	    print_coda_nc();
    415 	    return(0);
    416 	} else {
    417 	    return(ENODEV);
    418 	}
    419 	break;
    420     case CIOC_KERNEL_VERSION:
    421 	switch (*(u_int *)addr) {
    422 	case 0:
    423 		*(u_int *)addr = coda_kernel_version;
    424 		return 0;
    425 		break;
    426 	case 1:
    427 	case 2:
    428 		if (coda_kernel_version != *(u_int *)addr)
    429 		    return ENOENT;
    430 		else
    431 		    return 0;
    432 	default:
    433 		return ENOENT;
    434 	}
    435     	break;
    436     default :
    437 	return(EINVAL);
    438 	break;
    439     }
    440 }
    441 
    442 int
    443 vc_nb_poll(dev_t dev, int events, struct lwp *l)
    444 {
    445     struct vcomm *vcp;
    446     int event_msk = 0;
    447 
    448     ENTRY;
    449 
    450     if (minor(dev) >= NVCODA)
    451 	return(ENXIO);
    452 
    453     vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    454 
    455     event_msk = events & (POLLIN|POLLRDNORM);
    456     if (!event_msk)
    457 	return(0);
    458 
    459     if (!TAILQ_EMPTY(&vcp->vc_requests))
    460 	return(events & (POLLIN|POLLRDNORM));
    461 
    462     selrecord(l, &(vcp->vc_selproc));
    463 
    464     return(0);
    465 }
    466 
    467 static void
    468 filt_vc_nb_detach(struct knote *kn)
    469 {
    470 	struct vcomm *vcp = kn->kn_hook;
    471 
    472 	selremove_knote(&vcp->vc_selproc, kn);
    473 }
    474 
    475 static int
    476 filt_vc_nb_read(struct knote *kn, long hint)
    477 {
    478 	struct vcomm *vcp = kn->kn_hook;
    479 	struct vmsg *vmp;
    480 
    481 	vmp = TAILQ_FIRST(&vcp->vc_requests);
    482 	if (vmp == NULL)
    483 		return (0);
    484 
    485 	kn->kn_data = vmp->vm_inSize;
    486 	return (1);
    487 }
    488 
    489 static const struct filterops vc_nb_read_filtops = {
    490 	.f_flags = FILTEROP_ISFD,
    491 	.f_attach = NULL,
    492 	.f_detach = filt_vc_nb_detach,
    493 	.f_event = filt_vc_nb_read,
    494 };
    495 
    496 int
    497 vc_nb_kqfilter(dev_t dev, struct knote *kn)
    498 {
    499 	struct vcomm *vcp;
    500 
    501 	ENTRY;
    502 
    503 	if (minor(dev) >= NVCODA)
    504 		return(ENXIO);
    505 
    506 	vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
    507 
    508 	switch (kn->kn_filter) {
    509 	case EVFILT_READ:
    510 		kn->kn_fop = &vc_nb_read_filtops;
    511 		break;
    512 
    513 	default:
    514 		return (EINVAL);
    515 	}
    516 
    517 	kn->kn_hook = vcp;
    518 
    519 	selrecord_knote(&vcp->vc_selproc, kn);
    520 
    521 	return (0);
    522 }
    523 
    524 /*
    525  * Statistics
    526  */
    527 struct coda_clstat coda_clstat;
    528 
    529 /*
    530  * Key question: whether to sleep interruptably or uninterruptably when
    531  * waiting for Venus.  The former seems better (cause you can ^C a
    532  * job), but then GNU-EMACS completion breaks. Use tsleep with no
    533  * timeout, and no longjmp happens. But, when sleeping
    534  * "uninterruptibly", we don't get told if it returns abnormally
    535  * (e.g. kill -9).
    536  */
    537 
    538 int
    539 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
    540 	void *buffer)
    541 {
    542 	struct vcomm *vcp;
    543 	struct vmsg *vmp;
    544 	int error;
    545 #ifdef	CTL_C
    546 	struct lwp *l = curlwp;
    547 	struct proc *p = l->l_proc;
    548 	sigset_t psig_omask;
    549 	int i;
    550 	psig_omask = l->l_sigmask;	/* XXXSA */
    551 #endif
    552 	if (mntinfo == NULL) {
    553 	    /* Unlikely, but could be a race condition with a dying warden */
    554 	    return ENODEV;
    555 	}
    556 
    557 	vcp = &(mntinfo->mi_vcomm);
    558 
    559 	coda_clstat.ncalls++;
    560 	coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
    561 
    562 	if (!VC_OPEN(vcp))
    563 	    return(ENODEV);
    564 
    565 	CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
    566 	/* Format the request message. */
    567 	vmp->vm_data = buffer;
    568 	vmp->vm_flags = 0;
    569 	vmp->vm_inSize = inSize;
    570 	vmp->vm_outSize
    571 	    = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
    572 	vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
    573 	vmp->vm_unique = ++vcp->vc_seq;
    574 	if (codadebug)
    575 	    myprintf(("Doing a call for %d.%d\n",
    576 		      vmp->vm_opcode, vmp->vm_unique));
    577 
    578 	/* Fill in the common input args. */
    579 	((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
    580 
    581 	/* Append msg to request queue and poke Venus. */
    582 	TAILQ_INSERT_TAIL(&vcp->vc_requests, vmp, vm_chain);
    583 	selnotify(&(vcp->vc_selproc), 0, 0);
    584 
    585 	/* We can be interrupted while we wait for Venus to process
    586 	 * our request.  If the interrupt occurs before Venus has read
    587 	 * the request, we dequeue and return. If it occurs after the
    588 	 * read but before the reply, we dequeue, send a signal
    589 	 * message, and return. If it occurs after the reply we ignore
    590 	 * it. In no case do we want to restart the syscall.  If it
    591 	 * was interrupted by a venus shutdown (vcclose), return
    592 	 * ENODEV.  */
    593 
    594 	/* Ignore return, We have to check anyway */
    595 #ifdef	CTL_C
    596 	/* This is work in progress.  Setting coda_pcatch lets tsleep reawaken
    597 	   on a ^c or ^z.  The problem is that emacs sets certain interrupts
    598 	   as SA_RESTART.  This means that we should exit sleep handle the
    599 	   "signal" and then go to sleep again.  Mostly this is done by letting
    600 	   the syscall complete and be restarted.  We are not idempotent and
    601 	   can not do this.  A better solution is necessary.
    602 	 */
    603 	i = 0;
    604 	do {
    605 	    error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
    606 	    if (error == 0)
    607 	    	break;
    608 	    mutex_enter(p->p_lock);
    609 	    if (error == EWOULDBLOCK) {
    610 #ifdef	CODA_VERBOSE
    611 		    printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
    612 #endif
    613     	    } else if (sigispending(l, SIGIO)) {
    614 		    sigaddset(&l->l_sigmask, SIGIO);
    615 #ifdef	CODA_VERBOSE
    616 		    printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
    617 #endif
    618     	    } else if (sigispending(l, SIGALRM)) {
    619 		    sigaddset(&l->l_sigmask, SIGALRM);
    620 #ifdef	CODA_VERBOSE
    621 		    printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
    622 #endif
    623 	    } else {
    624 		    sigset_t tmp;
    625 		    tmp = p->p_sigpend.sp_set;	/* array assignment */
    626 		    sigminusset(&l->l_sigmask, &tmp);
    627 
    628 #ifdef	CODA_VERBOSE
    629 		    printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
    630 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
    631 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    632 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    633 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    634 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3],
    635 			    tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
    636 #endif
    637 		    mutex_exit(p->p_lock);
    638 		    break;
    639 #ifdef	notyet
    640 		    sigminusset(&l->l_sigmask, &p->p_sigpend.sp_set);
    641 		    printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
    642 			    p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
    643 			    p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
    644 			    l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
    645 			    l->l_sigmask.__bits[2], l->l_sigmask.__bits[3]);
    646 #endif
    647 	    }
    648 	    mutex_exit(p->p_lock);
    649 	} while (error && i++ < 128 && VC_OPEN(vcp));
    650 	l->l_sigmask = psig_omask;	/* XXXSA */
    651 #else
    652 	(void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
    653 #endif
    654 	if (VC_OPEN(vcp)) {	/* Venus is still alive */
    655  	/* Op went through, interrupt or not... */
    656 	    if (vmp->vm_flags & VM_WRITE) {
    657 		error = 0;
    658 		*outSize = vmp->vm_outSize;
    659 	    }
    660 
    661 	    else if (!(vmp->vm_flags & VM_READ)) {
    662 		/* Interrupted before venus read it. */
    663 #ifdef	CODA_VERBOSE
    664 		if (1)
    665 #else
    666 		if (codadebug)
    667 #endif
    668 		    myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
    669 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    670 
    671 		TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
    672 		error = EINTR;
    673 	    }
    674 
    675 	    else {
    676 		/* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
    677                    upcall started */
    678 		/* Interrupted after start of upcall, send venus a signal */
    679 		struct coda_in_hdr *dog;
    680 		struct vmsg *svmp;
    681 
    682 #ifdef	CODA_VERBOSE
    683 		if (1)
    684 #else
    685 		if (codadebug)
    686 #endif
    687 		    myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
    688 			   vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    689 
    690 		TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
    691 		error = EINTR;
    692 
    693 		CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
    694 
    695 		CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
    696 		dog = (struct coda_in_hdr *)svmp->vm_data;
    697 
    698 		svmp->vm_flags = 0;
    699 		dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
    700 		dog->unique = svmp->vm_unique = vmp->vm_unique;
    701 		svmp->vm_inSize = sizeof (struct coda_in_hdr);
    702 /*??? rvb */	svmp->vm_outSize = sizeof (struct coda_in_hdr);
    703 
    704 		if (codadebug)
    705 		    myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
    706 			   svmp->vm_opcode, svmp->vm_unique));
    707 
    708 		/* insert at head of queue */
    709 		TAILQ_INSERT_HEAD(&vcp->vc_requests, svmp, vm_chain);
    710 		selnotify(&(vcp->vc_selproc), 0, 0);
    711 	    }
    712 	}
    713 
    714 	else {	/* If venus died (!VC_OPEN(vcp)) */
    715 		if (codadebug) {
    716 			myprintf(("vcclose woke op %d.%d flags %d\n",
    717 			       vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
    718 		}
    719 
    720 		error = ENODEV;
    721 	}
    722 
    723 	CODA_FREE(vmp, sizeof(struct vmsg));
    724 
    725 	if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
    726 		wakeup(&outstanding_upcalls);
    727 
    728 	if (!error)
    729 		error = ((struct coda_out_hdr *)buffer)->result;
    730 	return(error);
    731 }
    732 
    733 MODULE(MODULE_CLASS_DRIVER, vcoda, NULL);
    734 
    735 static int
    736 vcoda_modcmd(modcmd_t cmd, void *arg)
    737 {
    738 	int error = 0;
    739 
    740 	switch (cmd) {
    741 	case MODULE_CMD_INIT:
    742 #ifdef _MODULE
    743 	{
    744 		int cmajor, dmajor;
    745 		vcodaattach(NVCODA);
    746 
    747 		dmajor = cmajor = -1;
    748 		return devsw_attach("vcoda", NULL, &dmajor,
    749 		    &vcoda_cdevsw, &cmajor);
    750 	}
    751 #endif
    752 		break;
    753 
    754 	case MODULE_CMD_FINI:
    755 #ifdef _MODULE
    756 		{
    757 			for  (size_t i = 0; i < NVCODA; i++) {
    758 				struct vcomm *vcp = &coda_mnttbl[i].mi_vcomm;
    759 				if (VC_OPEN(vcp))
    760 					return EBUSY;
    761 			}
    762 			devsw_detach(NULL, &vcoda_cdevsw);
    763 		}
    764 #endif
    765 		break;
    766 
    767 	case MODULE_CMD_STAT:
    768 		return ENOTTY;
    769 
    770 	default:
    771 		return ENOTTY;
    772 	}
    773 	return error;
    774 }
    775