coda_psdev.c revision 1.16.2.3 1 /* $NetBSD: coda_psdev.c,v 1.16.2.3 2002/01/10 19:50:53 thorpej Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the psuedo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 */
51
52 /* These routines are the device entry points for Venus. */
53
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.16.2.3 2002/01/10 19:50:53 thorpej Exp $");
56
57 extern int coda_nc_initialized; /* Set if cache has been initialized */
58
59 #ifdef _LKM
60 #define NVCODA 4
61 #else
62 #include <vcoda.h>
63 #endif
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/proc.h>
70 #include <sys/mount.h>
71 #include <sys/file.h>
72 #include <sys/ioctl.h>
73 #include <sys/poll.h>
74 #include <sys/select.h>
75
76 #include <miscfs/syncfs/syncfs.h>
77
78 #include <coda/coda.h>
79 #include <coda/cnode.h>
80 #include <coda/coda_namecache.h>
81 #include <coda/coda_io.h>
82 #include <coda/coda_psdev.h>
83
84 #define CTL_C
85
86 int coda_psdev_print_entry = 0;
87 static
88 int outstanding_upcalls = 0;
89 int coda_call_sleep = PZERO - 1;
90 #ifdef CTL_C
91 int coda_pcatch = PCATCH;
92 #else
93 #endif
94
95 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
96
97 void vcodaattach(int n);
98
99 struct vmsg {
100 struct queue vm_chain;
101 caddr_t vm_data;
102 u_short vm_flags;
103 u_short vm_inSize; /* Size is at most 5000 bytes */
104 u_short vm_outSize;
105 u_short vm_opcode; /* copied from data to save ptr lookup */
106 int vm_unique;
107 caddr_t vm_sleep; /* Not used by Mach. */
108 };
109
110 #define VM_READ 1
111 #define VM_WRITE 2
112 #define VM_INTR 4
113
114 /* vcodaattach: do nothing */
115 void
116 vcodaattach(n)
117 int n;
118 {
119 }
120
121 /*
122 * These functions are written for NetBSD.
123 */
124 int
125 vc_nb_open(dev, flag, mode, p)
126 dev_t dev;
127 int flag;
128 int mode;
129 struct proc *p; /* NetBSD only */
130 {
131 struct vcomm *vcp;
132
133 ENTRY;
134
135 if (minor(dev) >= NVCODA || minor(dev) < 0)
136 return(ENXIO);
137
138 if (!coda_nc_initialized)
139 coda_nc_init();
140
141 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
142 if (VC_OPEN(vcp))
143 return(EBUSY);
144
145 memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
146 INIT_QUEUE(vcp->vc_requests);
147 INIT_QUEUE(vcp->vc_replys);
148 MARK_VC_OPEN(vcp);
149
150 coda_mnttbl[minor(dev)].mi_vfsp = NULL;
151 coda_mnttbl[minor(dev)].mi_rootvp = NULL;
152
153 return(0);
154 }
155
156 int
157 vc_nb_close (dev, flag, mode, p)
158 dev_t dev;
159 int flag;
160 int mode;
161 struct proc *p;
162 {
163 struct vcomm *vcp;
164 struct vmsg *vmp, *nvmp = NULL;
165 struct coda_mntinfo *mi;
166 int err;
167
168 ENTRY;
169
170 if (minor(dev) >= NVCODA || minor(dev) < 0)
171 return(ENXIO);
172
173 mi = &coda_mnttbl[minor(dev)];
174 vcp = &(mi->mi_vcomm);
175
176 if (!VC_OPEN(vcp))
177 panic("vcclose: not open");
178
179 /* prevent future operations on this vfs from succeeding by auto-
180 * unmounting any vfs mounted via this device. This frees user or
181 * sysadm from having to remember where all mount points are located.
182 * Put this before WAKEUPs to avoid queuing new messages between
183 * the WAKEUP and the unmount (which can happen if we're unlucky)
184 */
185 if (!mi->mi_rootvp) {
186 /* just a simple open/close w no mount */
187 MARK_VC_CLOSED(vcp);
188 return 0;
189 }
190
191 /* Let unmount know this is for real */
192 /*
193 * XXX Freeze syncer. Must do this before locking the
194 * mount point. See dounmount for details().
195 */
196 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
197 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
198 if (vfs_busy(mi->mi_vfsp, 0, 0)) {
199 lockmgr(&syncer_lock, LK_RELEASE, NULL);
200 return (EBUSY);
201 }
202 coda_unmounting(mi->mi_vfsp);
203
204 /* Wakeup clients so they can return. */
205 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
206 !EOQ(vmp, vcp->vc_requests);
207 vmp = nvmp)
208 {
209 nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
210 /* Free signal request messages and don't wakeup cause
211 no one is waiting. */
212 if (vmp->vm_opcode == CODA_SIGNAL) {
213 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
214 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
215 continue;
216 }
217 outstanding_upcalls++;
218 wakeup(&vmp->vm_sleep);
219 }
220
221 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
222 !EOQ(vmp, vcp->vc_replys);
223 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
224 {
225 outstanding_upcalls++;
226 wakeup(&vmp->vm_sleep);
227 }
228
229 MARK_VC_CLOSED(vcp);
230
231 if (outstanding_upcalls) {
232 #ifdef CODA_VERBOSE
233 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
234 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
235 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
236 #else
237 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
238 #endif
239 }
240
241 err = dounmount(mi->mi_vfsp, flag, p);
242 if (err)
243 myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
244 err, minor(dev)));
245 return 0;
246 }
247
248 int
249 vc_nb_read(dev, uiop, flag)
250 dev_t dev;
251 struct uio *uiop;
252 int flag;
253 {
254 struct vcomm * vcp;
255 struct vmsg *vmp;
256 int error = 0;
257
258 ENTRY;
259
260 if (minor(dev) >= NVCODA || minor(dev) < 0)
261 return(ENXIO);
262
263 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
264 /* Get message at head of request queue. */
265 if (EMPTY(vcp->vc_requests))
266 return(0); /* Nothing to read */
267
268 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
269
270 /* Move the input args into userspace */
271 uiop->uio_rw = UIO_READ;
272 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
273 if (error) {
274 myprintf(("vcread: error (%d) on uiomove\n", error));
275 error = EINVAL;
276 }
277
278 #ifdef OLD_DIAGNOSTIC
279 if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
280 panic("vc_nb_read: bad chain");
281 #endif
282
283 REMQUE(vmp->vm_chain);
284
285 /* If request was a signal, free up the message and don't
286 enqueue it in the reply queue. */
287 if (vmp->vm_opcode == CODA_SIGNAL) {
288 if (codadebug)
289 myprintf(("vcread: signal msg (%d, %d)\n",
290 vmp->vm_opcode, vmp->vm_unique));
291 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
292 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
293 return(error);
294 }
295
296 vmp->vm_flags |= VM_READ;
297 INSQUE(vmp->vm_chain, vcp->vc_replys);
298
299 return(error);
300 }
301
302 int
303 vc_nb_write(dev, uiop, flag)
304 dev_t dev;
305 struct uio *uiop;
306 int flag;
307 {
308 struct vcomm * vcp;
309 struct vmsg *vmp;
310 struct coda_out_hdr *out;
311 u_long seq;
312 u_long opcode;
313 int buf[2];
314 int error = 0;
315
316 ENTRY;
317
318 if (minor(dev) >= NVCODA || minor(dev) < 0)
319 return(ENXIO);
320
321 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
322
323 /* Peek at the opcode, unique without transfering the data. */
324 uiop->uio_rw = UIO_WRITE;
325 error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
326 if (error) {
327 myprintf(("vcwrite: error (%d) on uiomove\n", error));
328 return(EINVAL);
329 }
330
331 opcode = buf[0];
332 seq = buf[1];
333
334 if (codadebug)
335 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
336
337 if (DOWNCALL(opcode)) {
338 union outputArgs pbuf;
339
340 /* get the rest of the data. */
341 uiop->uio_rw = UIO_WRITE;
342 error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
343 if (error) {
344 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
345 error, opcode, seq));
346 return(EINVAL);
347 }
348
349 return handleDownCall(opcode, &pbuf);
350 }
351
352 /* Look for the message on the (waiting for) reply queue. */
353 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
354 !EOQ(vmp, vcp->vc_replys);
355 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
356 {
357 if (vmp->vm_unique == seq) break;
358 }
359
360 if (EOQ(vmp, vcp->vc_replys)) {
361 if (codadebug)
362 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
363
364 return(ESRCH);
365 }
366
367 /* Remove the message from the reply queue */
368 REMQUE(vmp->vm_chain);
369
370 /* move data into response buffer. */
371 out = (struct coda_out_hdr *)vmp->vm_data;
372 /* Don't need to copy opcode and uniquifier. */
373
374 /* get the rest of the data. */
375 if (vmp->vm_outSize < uiop->uio_resid) {
376 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
377 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
378 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
379 return(EINVAL);
380 }
381
382 buf[0] = uiop->uio_resid; /* Save this value. */
383 uiop->uio_rw = UIO_WRITE;
384 error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
385 if (error) {
386 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
387 error, opcode, seq));
388 return(EINVAL);
389 }
390
391 /* I don't think these are used, but just in case. */
392 /* XXX - aren't these two already correct? -bnoble */
393 out->opcode = opcode;
394 out->unique = seq;
395 vmp->vm_outSize = buf[0]; /* Amount of data transferred? */
396 vmp->vm_flags |= VM_WRITE;
397 wakeup(&vmp->vm_sleep);
398
399 return(0);
400 }
401
402 int
403 vc_nb_ioctl(dev, cmd, addr, flag, p)
404 dev_t dev;
405 u_long cmd;
406 caddr_t addr;
407 int flag;
408 struct proc *p;
409 {
410 ENTRY;
411
412 switch(cmd) {
413 case CODARESIZE: {
414 struct coda_resize *data = (struct coda_resize *)addr;
415 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
416 break;
417 }
418 case CODASTATS:
419 if (coda_nc_use) {
420 coda_nc_gather_stats();
421 return(0);
422 } else {
423 return(ENODEV);
424 }
425 break;
426 case CODAPRINT:
427 if (coda_nc_use) {
428 print_coda_nc();
429 return(0);
430 } else {
431 return(ENODEV);
432 }
433 break;
434 case CIOC_KERNEL_VERSION:
435 switch (*(u_int *)addr) {
436 case 0:
437 *(u_int *)addr = coda_kernel_version;
438 return 0;
439 break;
440 case 1:
441 case 2:
442 if (coda_kernel_version != *(u_int *)addr)
443 return ENOENT;
444 else
445 return 0;
446 default:
447 return ENOENT;
448 }
449 break;
450 default :
451 return(EINVAL);
452 break;
453 }
454 }
455
456 int
457 vc_nb_poll(dev, events, p)
458 dev_t dev;
459 int events;
460 struct proc *p;
461 {
462 struct vcomm *vcp;
463 int event_msk = 0;
464
465 ENTRY;
466
467 if (minor(dev) >= NVCODA || minor(dev) < 0)
468 return(ENXIO);
469
470 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
471
472 event_msk = events & (POLLIN|POLLRDNORM);
473 if (!event_msk)
474 return(0);
475
476 if (!EMPTY(vcp->vc_requests))
477 return(events & (POLLIN|POLLRDNORM));
478
479 selrecord(p, &(vcp->vc_selproc));
480
481 return(0);
482 }
483
484 static void
485 filt_vc_nb_detach(struct knote *kn)
486 {
487 struct vcomm *vcp = (void *) kn->kn_data;
488
489 SLIST_REMOVE(&vcp->vc_selproc.si_klist, kn, knote, kn_selnext);
490 }
491
492 static int
493 filt_vc_nb_read(struct knote *kn, long hint)
494 {
495 struct vcomm *vcp = (void *) kn->kn_data;
496 struct vmsg *vmp;
497
498 if (EMPTY(vcp->vc_requests))
499 return (0);
500
501 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
502
503 kn->kn_data = vmp->vm_inSize;
504 return (1);
505 }
506
507 static const struct filterops vc_nb_read_filtops =
508 { 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
509
510 int
511 vc_nb_kqfilter(dev_t dev, struct knote *kn)
512 {
513 struct vcomm *vcp;
514 struct klist *klist;
515
516 ENTRY;
517
518 if (minor(dev) >= NVCODA || minor(dev) < 0)
519 return(ENXIO);
520
521 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
522
523 switch (kn->kn_filter) {
524 case EVFILT_READ:
525 klist = &vcp->vc_selproc.si_klist;
526 kn->kn_fop = &vc_nb_read_filtops;
527 break;
528
529 default:
530 return (1);
531 }
532
533 kn->kn_hook = (void *) vcp;
534
535 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
536
537 return (0);
538 }
539
540 /*
541 * Statistics
542 */
543 struct coda_clstat coda_clstat;
544
545 /*
546 * Key question: whether to sleep interuptably or uninteruptably when
547 * waiting for Venus. The former seems better (cause you can ^C a
548 * job), but then GNU-EMACS completion breaks. Use tsleep with no
549 * timeout, and no longjmp happens. But, when sleeping
550 * "uninterruptibly", we don't get told if it returns abnormally
551 * (e.g. kill -9).
552 */
553
554 int
555 coda_call(mntinfo, inSize, outSize, buffer)
556 struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
557 {
558 struct vcomm *vcp;
559 struct vmsg *vmp;
560 int error;
561 #ifdef CTL_C
562 struct proc *p = curproc;
563 sigset_t psig_omask;
564 int i;
565 psig_omask = p->p_sigctx.ps_siglist; /* array assignment */
566 #endif
567 if (mntinfo == NULL) {
568 /* Unlikely, but could be a race condition with a dying warden */
569 return ENODEV;
570 }
571
572 vcp = &(mntinfo->mi_vcomm);
573
574 coda_clstat.ncalls++;
575 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
576
577 if (!VC_OPEN(vcp))
578 return(ENODEV);
579
580 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
581 /* Format the request message. */
582 vmp->vm_data = buffer;
583 vmp->vm_flags = 0;
584 vmp->vm_inSize = inSize;
585 vmp->vm_outSize
586 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
587 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
588 vmp->vm_unique = ++vcp->vc_seq;
589 if (codadebug)
590 myprintf(("Doing a call for %d.%d\n",
591 vmp->vm_opcode, vmp->vm_unique));
592
593 /* Fill in the common input args. */
594 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
595
596 /* Append msg to request queue and poke Venus. */
597 INSQUE(vmp->vm_chain, vcp->vc_requests);
598 selnotify(&(vcp->vc_selproc), 0);
599
600 /* We can be interrupted while we wait for Venus to process
601 * our request. If the interrupt occurs before Venus has read
602 * the request, we dequeue and return. If it occurs after the
603 * read but before the reply, we dequeue, send a signal
604 * message, and return. If it occurs after the reply we ignore
605 * it. In no case do we want to restart the syscall. If it
606 * was interrupted by a venus shutdown (vcclose), return
607 * ENODEV. */
608
609 /* Ignore return, We have to check anyway */
610 #ifdef CTL_C
611 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
612 on a ^c or ^z. The problem is that emacs sets certain interrupts
613 as SA_RESTART. This means that we should exit sleep handle the
614 "signal" and then go to sleep again. Mostly this is done by letting
615 the syscall complete and be restarted. We are not idempotent and
616 can not do this. A better solution is necessary.
617 */
618 i = 0;
619 do {
620 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
621 if (error == 0)
622 break;
623 else if (error == EWOULDBLOCK) {
624 #ifdef CODA_VERBOSE
625 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
626 #endif
627 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
628 sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
629 #ifdef CODA_VERBOSE
630 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
631 #endif
632 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
633 sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
634 #ifdef CODA_VERBOSE
635 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
636 #endif
637 } else {
638 sigset_t tmp;
639 tmp = p->p_sigctx.ps_siglist; /* array assignment */
640 sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
641
642 #ifdef CODA_VERBOSE
643 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
644 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
645 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
646 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
647 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
648 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
649 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
650 #endif
651 break;
652 #ifdef notyet
653 sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
654 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
655 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
656 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
657 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
658 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
659 #endif
660 }
661 } while (error && i++ < 128 && VC_OPEN(vcp));
662 p->p_sigctx.ps_siglist = psig_omask; /* array assignment */
663 #else
664 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
665 #endif
666 if (VC_OPEN(vcp)) { /* Venus is still alive */
667 /* Op went through, interrupt or not... */
668 if (vmp->vm_flags & VM_WRITE) {
669 error = 0;
670 *outSize = vmp->vm_outSize;
671 }
672
673 else if (!(vmp->vm_flags & VM_READ)) {
674 /* Interrupted before venus read it. */
675 #ifdef CODA_VERBOSE
676 if (1)
677 #else
678 if (codadebug)
679 #endif
680 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
681 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
682 REMQUE(vmp->vm_chain);
683 error = EINTR;
684 }
685
686 else {
687 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
688 upcall started */
689 /* Interrupted after start of upcall, send venus a signal */
690 struct coda_in_hdr *dog;
691 struct vmsg *svmp;
692
693 #ifdef CODA_VERBOSE
694 if (1)
695 #else
696 if (codadebug)
697 #endif
698 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
699 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
700
701 REMQUE(vmp->vm_chain);
702 error = EINTR;
703
704 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
705
706 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
707 dog = (struct coda_in_hdr *)svmp->vm_data;
708
709 svmp->vm_flags = 0;
710 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
711 dog->unique = svmp->vm_unique = vmp->vm_unique;
712 svmp->vm_inSize = sizeof (struct coda_in_hdr);
713 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
714
715 if (codadebug)
716 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
717 svmp->vm_opcode, svmp->vm_unique));
718
719 /* insert at head of queue! */
720 INSQUE(svmp->vm_chain, vcp->vc_requests);
721 selnotify(&(vcp->vc_selproc), 0);
722 }
723 }
724
725 else { /* If venus died (!VC_OPEN(vcp)) */
726 if (codadebug)
727 myprintf(("vcclose woke op %d.%d flags %d\n",
728 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
729
730 error = ENODEV;
731 }
732
733 CODA_FREE(vmp, sizeof(struct vmsg));
734
735 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
736 wakeup(&outstanding_upcalls);
737
738 if (!error)
739 error = ((struct coda_out_hdr *)buffer)->result;
740 return(error);
741 }
742
743