coda_psdev.c revision 1.25 1 /* $NetBSD: coda_psdev.c,v 1.25 2003/06/28 14:21:14 darrenr Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the pseudo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 *
51 * Following code depends on file-system CODA.
52 */
53
54 /* These routines are the device entry points for Venus. */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.25 2003/06/28 14:21:14 darrenr Exp $");
58
59 extern int coda_nc_initialized; /* Set if cache has been initialized */
60
61 #ifdef _LKM
62 #define NVCODA 4
63 #else
64 #include <vcoda.h>
65 #endif
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/kernel.h>
70 #include <sys/malloc.h>
71 #include <sys/proc.h>
72 #include <sys/mount.h>
73 #include <sys/file.h>
74 #include <sys/ioctl.h>
75 #include <sys/poll.h>
76 #include <sys/select.h>
77 #include <sys/conf.h>
78
79 #include <miscfs/syncfs/syncfs.h>
80
81 #include <coda/coda.h>
82 #include <coda/cnode.h>
83 #include <coda/coda_namecache.h>
84 #include <coda/coda_io.h>
85
86 #define CTL_C
87
88 int coda_psdev_print_entry = 0;
89 static
90 int outstanding_upcalls = 0;
91 int coda_call_sleep = PZERO - 1;
92 #ifdef CTL_C
93 int coda_pcatch = PCATCH;
94 #else
95 #endif
96
97 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
98
99 void vcodaattach(int n);
100
101 dev_type_open(vc_nb_open);
102 dev_type_close(vc_nb_close);
103 dev_type_read(vc_nb_read);
104 dev_type_write(vc_nb_write);
105 dev_type_ioctl(vc_nb_ioctl);
106 dev_type_poll(vc_nb_poll);
107 dev_type_kqfilter(vc_nb_kqfilter);
108
109 const struct cdevsw vcoda_cdevsw = {
110 vc_nb_open, vc_nb_close, vc_nb_read, vc_nb_write, vc_nb_ioctl,
111 nostop, notty, vc_nb_poll, nommap, vc_nb_kqfilter,
112 };
113
114 struct vmsg {
115 struct queue vm_chain;
116 caddr_t vm_data;
117 u_short vm_flags;
118 u_short vm_inSize; /* Size is at most 5000 bytes */
119 u_short vm_outSize;
120 u_short vm_opcode; /* copied from data to save ptr lookup */
121 int vm_unique;
122 caddr_t vm_sleep; /* Not used by Mach. */
123 };
124
125 #define VM_READ 1
126 #define VM_WRITE 2
127 #define VM_INTR 4
128
129 /* vcodaattach: do nothing */
130 void
131 vcodaattach(n)
132 int n;
133 {
134 }
135
136 /*
137 * These functions are written for NetBSD.
138 */
139 int
140 vc_nb_open(dev, flag, mode, l)
141 dev_t dev;
142 int flag;
143 int mode;
144 struct lwp *l; /* NetBSD only */
145 {
146 struct vcomm *vcp;
147
148 ENTRY;
149
150 if (minor(dev) >= NVCODA || minor(dev) < 0)
151 return(ENXIO);
152
153 if (!coda_nc_initialized)
154 coda_nc_init();
155
156 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
157 if (VC_OPEN(vcp))
158 return(EBUSY);
159
160 memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
161 INIT_QUEUE(vcp->vc_requests);
162 INIT_QUEUE(vcp->vc_replys);
163 MARK_VC_OPEN(vcp);
164
165 coda_mnttbl[minor(dev)].mi_vfsp = NULL;
166 coda_mnttbl[minor(dev)].mi_rootvp = NULL;
167
168 return(0);
169 }
170
171 int
172 vc_nb_close (dev, flag, mode, l)
173 dev_t dev;
174 int flag;
175 int mode;
176 struct lwp *l;
177 {
178 struct vcomm *vcp;
179 struct vmsg *vmp, *nvmp = NULL;
180 struct coda_mntinfo *mi;
181 int err;
182
183 ENTRY;
184
185 if (minor(dev) >= NVCODA || minor(dev) < 0)
186 return(ENXIO);
187
188 mi = &coda_mnttbl[minor(dev)];
189 vcp = &(mi->mi_vcomm);
190
191 if (!VC_OPEN(vcp))
192 panic("vcclose: not open");
193
194 /* prevent future operations on this vfs from succeeding by auto-
195 * unmounting any vfs mounted via this device. This frees user or
196 * sysadm from having to remember where all mount points are located.
197 * Put this before WAKEUPs to avoid queuing new messages between
198 * the WAKEUP and the unmount (which can happen if we're unlucky)
199 */
200 if (!mi->mi_rootvp) {
201 /* just a simple open/close w no mount */
202 MARK_VC_CLOSED(vcp);
203 return 0;
204 }
205
206 /* Let unmount know this is for real */
207 /*
208 * XXX Freeze syncer. Must do this before locking the
209 * mount point. See dounmount for details().
210 */
211 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
212 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
213 if (vfs_busy(mi->mi_vfsp, 0, 0)) {
214 lockmgr(&syncer_lock, LK_RELEASE, NULL);
215 return (EBUSY);
216 }
217 coda_unmounting(mi->mi_vfsp);
218
219 /* Wakeup clients so they can return. */
220 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
221 !EOQ(vmp, vcp->vc_requests);
222 vmp = nvmp)
223 {
224 nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
225 /* Free signal request messages and don't wakeup cause
226 no one is waiting. */
227 if (vmp->vm_opcode == CODA_SIGNAL) {
228 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
229 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
230 continue;
231 }
232 outstanding_upcalls++;
233 wakeup(&vmp->vm_sleep);
234 }
235
236 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
237 !EOQ(vmp, vcp->vc_replys);
238 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
239 {
240 outstanding_upcalls++;
241 wakeup(&vmp->vm_sleep);
242 }
243
244 MARK_VC_CLOSED(vcp);
245
246 if (outstanding_upcalls) {
247 #ifdef CODA_VERBOSE
248 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
249 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
250 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
251 #else
252 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
253 #endif
254 }
255
256 err = dounmount(mi->mi_vfsp, flag, l);
257 if (err)
258 myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
259 err, minor(dev)));
260 return 0;
261 }
262
263 int
264 vc_nb_read(dev, uiop, flag)
265 dev_t dev;
266 struct uio *uiop;
267 int flag;
268 {
269 struct vcomm * vcp;
270 struct vmsg *vmp;
271 int error = 0;
272
273 ENTRY;
274
275 if (minor(dev) >= NVCODA || minor(dev) < 0)
276 return(ENXIO);
277
278 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
279 /* Get message at head of request queue. */
280 if (EMPTY(vcp->vc_requests))
281 return(0); /* Nothing to read */
282
283 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
284
285 /* Move the input args into userspace */
286 uiop->uio_rw = UIO_READ;
287 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
288 if (error) {
289 myprintf(("vcread: error (%d) on uiomove\n", error));
290 error = EINVAL;
291 }
292
293 #ifdef OLD_DIAGNOSTIC
294 if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
295 panic("vc_nb_read: bad chain");
296 #endif
297
298 REMQUE(vmp->vm_chain);
299
300 /* If request was a signal, free up the message and don't
301 enqueue it in the reply queue. */
302 if (vmp->vm_opcode == CODA_SIGNAL) {
303 if (codadebug)
304 myprintf(("vcread: signal msg (%d, %d)\n",
305 vmp->vm_opcode, vmp->vm_unique));
306 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
307 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
308 return(error);
309 }
310
311 vmp->vm_flags |= VM_READ;
312 INSQUE(vmp->vm_chain, vcp->vc_replys);
313
314 return(error);
315 }
316
317 int
318 vc_nb_write(dev, uiop, flag)
319 dev_t dev;
320 struct uio *uiop;
321 int flag;
322 {
323 struct vcomm * vcp;
324 struct vmsg *vmp;
325 struct coda_out_hdr *out;
326 u_long seq;
327 u_long opcode;
328 int buf[2];
329 int error = 0;
330
331 ENTRY;
332
333 if (minor(dev) >= NVCODA || minor(dev) < 0)
334 return(ENXIO);
335
336 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
337
338 /* Peek at the opcode, unique without transfering the data. */
339 uiop->uio_rw = UIO_WRITE;
340 error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
341 if (error) {
342 myprintf(("vcwrite: error (%d) on uiomove\n", error));
343 return(EINVAL);
344 }
345
346 opcode = buf[0];
347 seq = buf[1];
348
349 if (codadebug)
350 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
351
352 if (DOWNCALL(opcode)) {
353 union outputArgs pbuf;
354
355 /* get the rest of the data. */
356 uiop->uio_rw = UIO_WRITE;
357 error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
358 if (error) {
359 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
360 error, opcode, seq));
361 return(EINVAL);
362 }
363
364 return handleDownCall(opcode, &pbuf);
365 }
366
367 /* Look for the message on the (waiting for) reply queue. */
368 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
369 !EOQ(vmp, vcp->vc_replys);
370 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
371 {
372 if (vmp->vm_unique == seq) break;
373 }
374
375 if (EOQ(vmp, vcp->vc_replys)) {
376 if (codadebug)
377 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
378
379 return(ESRCH);
380 }
381
382 /* Remove the message from the reply queue */
383 REMQUE(vmp->vm_chain);
384
385 /* move data into response buffer. */
386 out = (struct coda_out_hdr *)vmp->vm_data;
387 /* Don't need to copy opcode and uniquifier. */
388
389 /* get the rest of the data. */
390 if (vmp->vm_outSize < uiop->uio_resid) {
391 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
392 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
393 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
394 return(EINVAL);
395 }
396
397 buf[0] = uiop->uio_resid; /* Save this value. */
398 uiop->uio_rw = UIO_WRITE;
399 error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
400 if (error) {
401 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
402 error, opcode, seq));
403 return(EINVAL);
404 }
405
406 /* I don't think these are used, but just in case. */
407 /* XXX - aren't these two already correct? -bnoble */
408 out->opcode = opcode;
409 out->unique = seq;
410 vmp->vm_outSize = buf[0]; /* Amount of data transferred? */
411 vmp->vm_flags |= VM_WRITE;
412 wakeup(&vmp->vm_sleep);
413
414 return(0);
415 }
416
417 int
418 vc_nb_ioctl(dev, cmd, addr, flag, l)
419 dev_t dev;
420 u_long cmd;
421 caddr_t addr;
422 int flag;
423 struct lwp *l;
424 {
425 ENTRY;
426
427 switch(cmd) {
428 case CODARESIZE: {
429 struct coda_resize *data = (struct coda_resize *)addr;
430 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
431 break;
432 }
433 case CODASTATS:
434 if (coda_nc_use) {
435 coda_nc_gather_stats();
436 return(0);
437 } else {
438 return(ENODEV);
439 }
440 break;
441 case CODAPRINT:
442 if (coda_nc_use) {
443 print_coda_nc();
444 return(0);
445 } else {
446 return(ENODEV);
447 }
448 break;
449 case CIOC_KERNEL_VERSION:
450 switch (*(u_int *)addr) {
451 case 0:
452 *(u_int *)addr = coda_kernel_version;
453 return 0;
454 break;
455 case 1:
456 case 2:
457 if (coda_kernel_version != *(u_int *)addr)
458 return ENOENT;
459 else
460 return 0;
461 default:
462 return ENOENT;
463 }
464 break;
465 default :
466 return(EINVAL);
467 break;
468 }
469 }
470
471 int
472 vc_nb_poll(dev, events, l)
473 dev_t dev;
474 int events;
475 struct lwp *l;
476 {
477 struct vcomm *vcp;
478 int event_msk = 0;
479
480 ENTRY;
481
482 if (minor(dev) >= NVCODA || minor(dev) < 0)
483 return(ENXIO);
484
485 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
486
487 event_msk = events & (POLLIN|POLLRDNORM);
488 if (!event_msk)
489 return(0);
490
491 if (!EMPTY(vcp->vc_requests))
492 return(events & (POLLIN|POLLRDNORM));
493
494 selrecord(l, &(vcp->vc_selproc));
495
496 return(0);
497 }
498
499 static void
500 filt_vc_nb_detach(struct knote *kn)
501 {
502 struct vcomm *vcp = kn->kn_hook;
503
504 SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
505 }
506
507 static int
508 filt_vc_nb_read(struct knote *kn, long hint)
509 {
510 struct vcomm *vcp = kn->kn_hook;
511 struct vmsg *vmp;
512
513 if (EMPTY(vcp->vc_requests))
514 return (0);
515
516 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
517
518 kn->kn_data = vmp->vm_inSize;
519 return (1);
520 }
521
522 static const struct filterops vc_nb_read_filtops =
523 { 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
524
525 int
526 vc_nb_kqfilter(dev_t dev, struct knote *kn)
527 {
528 struct vcomm *vcp;
529 struct klist *klist;
530
531 ENTRY;
532
533 if (minor(dev) >= NVCODA || minor(dev) < 0)
534 return(ENXIO);
535
536 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
537
538 switch (kn->kn_filter) {
539 case EVFILT_READ:
540 klist = &vcp->vc_selproc.sel_klist;
541 kn->kn_fop = &vc_nb_read_filtops;
542 break;
543
544 default:
545 return (1);
546 }
547
548 kn->kn_hook = vcp;
549
550 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
551
552 return (0);
553 }
554
555 /*
556 * Statistics
557 */
558 struct coda_clstat coda_clstat;
559
560 /*
561 * Key question: whether to sleep interruptably or uninterruptably when
562 * waiting for Venus. The former seems better (cause you can ^C a
563 * job), but then GNU-EMACS completion breaks. Use tsleep with no
564 * timeout, and no longjmp happens. But, when sleeping
565 * "uninterruptibly", we don't get told if it returns abnormally
566 * (e.g. kill -9).
567 */
568
569 int
570 coda_call(mntinfo, inSize, outSize, buffer)
571 struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
572 {
573 struct vcomm *vcp;
574 struct vmsg *vmp;
575 int error;
576 #ifdef CTL_C
577 struct lwp *l = curlwp;
578 struct proc *p = l->l_proc;
579 sigset_t psig_omask;
580 int i;
581 psig_omask = l->l_proc->p_sigctx.ps_siglist; /* array assignment */
582 #endif
583 if (mntinfo == NULL) {
584 /* Unlikely, but could be a race condition with a dying warden */
585 return ENODEV;
586 }
587
588 vcp = &(mntinfo->mi_vcomm);
589
590 coda_clstat.ncalls++;
591 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
592
593 if (!VC_OPEN(vcp))
594 return(ENODEV);
595
596 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
597 /* Format the request message. */
598 vmp->vm_data = buffer;
599 vmp->vm_flags = 0;
600 vmp->vm_inSize = inSize;
601 vmp->vm_outSize
602 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
603 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
604 vmp->vm_unique = ++vcp->vc_seq;
605 if (codadebug)
606 myprintf(("Doing a call for %d.%d\n",
607 vmp->vm_opcode, vmp->vm_unique));
608
609 /* Fill in the common input args. */
610 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
611
612 /* Append msg to request queue and poke Venus. */
613 INSQUE(vmp->vm_chain, vcp->vc_requests);
614 selnotify(&(vcp->vc_selproc), 0);
615
616 /* We can be interrupted while we wait for Venus to process
617 * our request. If the interrupt occurs before Venus has read
618 * the request, we dequeue and return. If it occurs after the
619 * read but before the reply, we dequeue, send a signal
620 * message, and return. If it occurs after the reply we ignore
621 * it. In no case do we want to restart the syscall. If it
622 * was interrupted by a venus shutdown (vcclose), return
623 * ENODEV. */
624
625 /* Ignore return, We have to check anyway */
626 #ifdef CTL_C
627 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
628 on a ^c or ^z. The problem is that emacs sets certain interrupts
629 as SA_RESTART. This means that we should exit sleep handle the
630 "signal" and then go to sleep again. Mostly this is done by letting
631 the syscall complete and be restarted. We are not idempotent and
632 can not do this. A better solution is necessary.
633 */
634 i = 0;
635 do {
636 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
637 if (error == 0)
638 break;
639 else if (error == EWOULDBLOCK) {
640 #ifdef CODA_VERBOSE
641 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
642 #endif
643 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
644 sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
645 #ifdef CODA_VERBOSE
646 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
647 #endif
648 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
649 sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
650 #ifdef CODA_VERBOSE
651 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
652 #endif
653 } else {
654 sigset_t tmp;
655 tmp = p->p_sigctx.ps_siglist; /* array assignment */
656 sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
657
658 #ifdef CODA_VERBOSE
659 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
660 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
661 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
662 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
663 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
664 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
665 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
666 #endif
667 break;
668 #ifdef notyet
669 sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
670 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
671 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
672 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
673 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
674 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
675 #endif
676 }
677 } while (error && i++ < 128 && VC_OPEN(vcp));
678 p->p_sigctx.ps_siglist = psig_omask; /* array assignment */
679 #else
680 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
681 #endif
682 if (VC_OPEN(vcp)) { /* Venus is still alive */
683 /* Op went through, interrupt or not... */
684 if (vmp->vm_flags & VM_WRITE) {
685 error = 0;
686 *outSize = vmp->vm_outSize;
687 }
688
689 else if (!(vmp->vm_flags & VM_READ)) {
690 /* Interrupted before venus read it. */
691 #ifdef CODA_VERBOSE
692 if (1)
693 #else
694 if (codadebug)
695 #endif
696 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
697 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
698 REMQUE(vmp->vm_chain);
699 error = EINTR;
700 }
701
702 else {
703 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
704 upcall started */
705 /* Interrupted after start of upcall, send venus a signal */
706 struct coda_in_hdr *dog;
707 struct vmsg *svmp;
708
709 #ifdef CODA_VERBOSE
710 if (1)
711 #else
712 if (codadebug)
713 #endif
714 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
715 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
716
717 REMQUE(vmp->vm_chain);
718 error = EINTR;
719
720 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
721
722 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
723 dog = (struct coda_in_hdr *)svmp->vm_data;
724
725 svmp->vm_flags = 0;
726 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
727 dog->unique = svmp->vm_unique = vmp->vm_unique;
728 svmp->vm_inSize = sizeof (struct coda_in_hdr);
729 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
730
731 if (codadebug)
732 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
733 svmp->vm_opcode, svmp->vm_unique));
734
735 /* insert at head of queue! */
736 INSQUE(svmp->vm_chain, vcp->vc_requests);
737 selnotify(&(vcp->vc_selproc), 0);
738 }
739 }
740
741 else { /* If venus died (!VC_OPEN(vcp)) */
742 if (codadebug)
743 myprintf(("vcclose woke op %d.%d flags %d\n",
744 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
745
746 error = ENODEV;
747 }
748
749 CODA_FREE(vmp, sizeof(struct vmsg));
750
751 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
752 wakeup(&outstanding_upcalls);
753
754 if (!error)
755 error = ((struct coda_out_hdr *)buffer)->result;
756 return(error);
757 }
758
759