coda_psdev.c revision 1.16.2.2 1 /* $NetBSD: coda_psdev.c,v 1.16.2.2 2001/09/08 19:01:26 thorpej Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the psuedo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 */
51
52 /* These routines are the device entry points for Venus. */
53
54 extern int coda_nc_initialized; /* Set if cache has been initialized */
55
56 #ifdef _LKM
57 #define NVCODA 4
58 #else
59 #include <vcoda.h>
60 #endif
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/malloc.h>
66 #include <sys/proc.h>
67 #include <sys/mount.h>
68 #include <sys/file.h>
69 #include <sys/ioctl.h>
70 #include <sys/poll.h>
71 #include <sys/select.h>
72
73 #include <miscfs/syncfs/syncfs.h>
74
75 #include <coda/coda.h>
76 #include <coda/cnode.h>
77 #include <coda/coda_namecache.h>
78 #include <coda/coda_io.h>
79 #include <coda/coda_psdev.h>
80
81 #define CTL_C
82
83 int coda_psdev_print_entry = 0;
84 static
85 int outstanding_upcalls = 0;
86 int coda_call_sleep = PZERO - 1;
87 #ifdef CTL_C
88 int coda_pcatch = PCATCH;
89 #else
90 #endif
91
92 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__FUNCTION__))
93
94 void vcodaattach(int n);
95
96 struct vmsg {
97 struct queue vm_chain;
98 caddr_t vm_data;
99 u_short vm_flags;
100 u_short vm_inSize; /* Size is at most 5000 bytes */
101 u_short vm_outSize;
102 u_short vm_opcode; /* copied from data to save ptr lookup */
103 int vm_unique;
104 caddr_t vm_sleep; /* Not used by Mach. */
105 };
106
107 #define VM_READ 1
108 #define VM_WRITE 2
109 #define VM_INTR 4
110
111 /* vcodaattach: do nothing */
112 void
113 vcodaattach(n)
114 int n;
115 {
116 }
117
118 /*
119 * These functions are written for NetBSD.
120 */
121 int
122 vc_nb_open(dev, flag, mode, p)
123 dev_t dev;
124 int flag;
125 int mode;
126 struct proc *p; /* NetBSD only */
127 {
128 struct vcomm *vcp;
129
130 ENTRY;
131
132 if (minor(dev) >= NVCODA || minor(dev) < 0)
133 return(ENXIO);
134
135 if (!coda_nc_initialized)
136 coda_nc_init();
137
138 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
139 if (VC_OPEN(vcp))
140 return(EBUSY);
141
142 memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
143 INIT_QUEUE(vcp->vc_requests);
144 INIT_QUEUE(vcp->vc_replys);
145 MARK_VC_OPEN(vcp);
146
147 coda_mnttbl[minor(dev)].mi_vfsp = NULL;
148 coda_mnttbl[minor(dev)].mi_rootvp = NULL;
149
150 return(0);
151 }
152
153 int
154 vc_nb_close (dev, flag, mode, p)
155 dev_t dev;
156 int flag;
157 int mode;
158 struct proc *p;
159 {
160 struct vcomm *vcp;
161 struct vmsg *vmp, *nvmp = NULL;
162 struct coda_mntinfo *mi;
163 int err;
164
165 ENTRY;
166
167 if (minor(dev) >= NVCODA || minor(dev) < 0)
168 return(ENXIO);
169
170 mi = &coda_mnttbl[minor(dev)];
171 vcp = &(mi->mi_vcomm);
172
173 if (!VC_OPEN(vcp))
174 panic("vcclose: not open");
175
176 /* prevent future operations on this vfs from succeeding by auto-
177 * unmounting any vfs mounted via this device. This frees user or
178 * sysadm from having to remember where all mount points are located.
179 * Put this before WAKEUPs to avoid queuing new messages between
180 * the WAKEUP and the unmount (which can happen if we're unlucky)
181 */
182 if (!mi->mi_rootvp) {
183 /* just a simple open/close w no mount */
184 MARK_VC_CLOSED(vcp);
185 return 0;
186 }
187
188 /* Let unmount know this is for real */
189 /*
190 * XXX Freeze syncer. Must do this before locking the
191 * mount point. See dounmount for details().
192 */
193 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
194 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
195 if (vfs_busy(mi->mi_vfsp, 0, 0)) {
196 lockmgr(&syncer_lock, LK_RELEASE, NULL);
197 return (EBUSY);
198 }
199 coda_unmounting(mi->mi_vfsp);
200
201 /* Wakeup clients so they can return. */
202 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
203 !EOQ(vmp, vcp->vc_requests);
204 vmp = nvmp)
205 {
206 nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
207 /* Free signal request messages and don't wakeup cause
208 no one is waiting. */
209 if (vmp->vm_opcode == CODA_SIGNAL) {
210 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
211 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
212 continue;
213 }
214 outstanding_upcalls++;
215 wakeup(&vmp->vm_sleep);
216 }
217
218 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
219 !EOQ(vmp, vcp->vc_replys);
220 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
221 {
222 outstanding_upcalls++;
223 wakeup(&vmp->vm_sleep);
224 }
225
226 MARK_VC_CLOSED(vcp);
227
228 if (outstanding_upcalls) {
229 #ifdef CODA_VERBOSE
230 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
231 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
232 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
233 #else
234 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
235 #endif
236 }
237
238 err = dounmount(mi->mi_vfsp, flag, p);
239 if (err)
240 myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
241 err, minor(dev)));
242 return 0;
243 }
244
245 int
246 vc_nb_read(dev, uiop, flag)
247 dev_t dev;
248 struct uio *uiop;
249 int flag;
250 {
251 struct vcomm * vcp;
252 struct vmsg *vmp;
253 int error = 0;
254
255 ENTRY;
256
257 if (minor(dev) >= NVCODA || minor(dev) < 0)
258 return(ENXIO);
259
260 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
261 /* Get message at head of request queue. */
262 if (EMPTY(vcp->vc_requests))
263 return(0); /* Nothing to read */
264
265 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
266
267 /* Move the input args into userspace */
268 uiop->uio_rw = UIO_READ;
269 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
270 if (error) {
271 myprintf(("vcread: error (%d) on uiomove\n", error));
272 error = EINVAL;
273 }
274
275 #ifdef OLD_DIAGNOSTIC
276 if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
277 panic("vc_nb_read: bad chain");
278 #endif
279
280 REMQUE(vmp->vm_chain);
281
282 /* If request was a signal, free up the message and don't
283 enqueue it in the reply queue. */
284 if (vmp->vm_opcode == CODA_SIGNAL) {
285 if (codadebug)
286 myprintf(("vcread: signal msg (%d, %d)\n",
287 vmp->vm_opcode, vmp->vm_unique));
288 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
289 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
290 return(error);
291 }
292
293 vmp->vm_flags |= VM_READ;
294 INSQUE(vmp->vm_chain, vcp->vc_replys);
295
296 return(error);
297 }
298
299 int
300 vc_nb_write(dev, uiop, flag)
301 dev_t dev;
302 struct uio *uiop;
303 int flag;
304 {
305 struct vcomm * vcp;
306 struct vmsg *vmp;
307 struct coda_out_hdr *out;
308 u_long seq;
309 u_long opcode;
310 int buf[2];
311 int error = 0;
312
313 ENTRY;
314
315 if (minor(dev) >= NVCODA || minor(dev) < 0)
316 return(ENXIO);
317
318 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
319
320 /* Peek at the opcode, unique without transfering the data. */
321 uiop->uio_rw = UIO_WRITE;
322 error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
323 if (error) {
324 myprintf(("vcwrite: error (%d) on uiomove\n", error));
325 return(EINVAL);
326 }
327
328 opcode = buf[0];
329 seq = buf[1];
330
331 if (codadebug)
332 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
333
334 if (DOWNCALL(opcode)) {
335 union outputArgs pbuf;
336
337 /* get the rest of the data. */
338 uiop->uio_rw = UIO_WRITE;
339 error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
340 if (error) {
341 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
342 error, opcode, seq));
343 return(EINVAL);
344 }
345
346 return handleDownCall(opcode, &pbuf);
347 }
348
349 /* Look for the message on the (waiting for) reply queue. */
350 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
351 !EOQ(vmp, vcp->vc_replys);
352 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
353 {
354 if (vmp->vm_unique == seq) break;
355 }
356
357 if (EOQ(vmp, vcp->vc_replys)) {
358 if (codadebug)
359 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
360
361 return(ESRCH);
362 }
363
364 /* Remove the message from the reply queue */
365 REMQUE(vmp->vm_chain);
366
367 /* move data into response buffer. */
368 out = (struct coda_out_hdr *)vmp->vm_data;
369 /* Don't need to copy opcode and uniquifier. */
370
371 /* get the rest of the data. */
372 if (vmp->vm_outSize < uiop->uio_resid) {
373 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
374 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
375 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
376 return(EINVAL);
377 }
378
379 buf[0] = uiop->uio_resid; /* Save this value. */
380 uiop->uio_rw = UIO_WRITE;
381 error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
382 if (error) {
383 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
384 error, opcode, seq));
385 return(EINVAL);
386 }
387
388 /* I don't think these are used, but just in case. */
389 /* XXX - aren't these two already correct? -bnoble */
390 out->opcode = opcode;
391 out->unique = seq;
392 vmp->vm_outSize = buf[0]; /* Amount of data transferred? */
393 vmp->vm_flags |= VM_WRITE;
394 wakeup(&vmp->vm_sleep);
395
396 return(0);
397 }
398
399 int
400 vc_nb_ioctl(dev, cmd, addr, flag, p)
401 dev_t dev;
402 u_long cmd;
403 caddr_t addr;
404 int flag;
405 struct proc *p;
406 {
407 ENTRY;
408
409 switch(cmd) {
410 case CODARESIZE: {
411 struct coda_resize *data = (struct coda_resize *)addr;
412 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
413 break;
414 }
415 case CODASTATS:
416 if (coda_nc_use) {
417 coda_nc_gather_stats();
418 return(0);
419 } else {
420 return(ENODEV);
421 }
422 break;
423 case CODAPRINT:
424 if (coda_nc_use) {
425 print_coda_nc();
426 return(0);
427 } else {
428 return(ENODEV);
429 }
430 break;
431 case CIOC_KERNEL_VERSION:
432 switch (*(u_int *)addr) {
433 case 0:
434 *(u_int *)addr = coda_kernel_version;
435 return 0;
436 break;
437 case 1:
438 case 2:
439 if (coda_kernel_version != *(u_int *)addr)
440 return ENOENT;
441 else
442 return 0;
443 default:
444 return ENOENT;
445 }
446 break;
447 default :
448 return(EINVAL);
449 break;
450 }
451 }
452
453 int
454 vc_nb_poll(dev, events, p)
455 dev_t dev;
456 int events;
457 struct proc *p;
458 {
459 struct vcomm *vcp;
460 int event_msk = 0;
461
462 ENTRY;
463
464 if (minor(dev) >= NVCODA || minor(dev) < 0)
465 return(ENXIO);
466
467 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
468
469 event_msk = events & (POLLIN|POLLRDNORM);
470 if (!event_msk)
471 return(0);
472
473 if (!EMPTY(vcp->vc_requests))
474 return(events & (POLLIN|POLLRDNORM));
475
476 selrecord(p, &(vcp->vc_selproc));
477
478 return(0);
479 }
480
481 static void
482 filt_vc_nb_detach(struct knote *kn)
483 {
484 struct vcomm *vcp = (void *) kn->kn_data;
485
486 SLIST_REMOVE(&vcp->vc_selproc.si_klist, kn, knote, kn_selnext);
487 }
488
489 static int
490 filt_vc_nb_read(struct knote *kn, long hint)
491 {
492 struct vcomm *vcp = (void *) kn->kn_data;
493 struct vmsg *vmp;
494
495 if (EMPTY(vcp->vc_requests))
496 return (0);
497
498 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
499
500 kn->kn_data = vmp->vm_inSize;
501 return (1);
502 }
503
504 static const struct filterops vc_nb_read_filtops =
505 { 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
506
507 int
508 vc_nb_kqfilter(dev_t dev, struct knote *kn)
509 {
510 struct vcomm *vcp;
511 struct klist *klist;
512
513 ENTRY;
514
515 if (minor(dev) >= NVCODA || minor(dev) < 0)
516 return(ENXIO);
517
518 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
519
520 switch (kn->kn_filter) {
521 case EVFILT_READ:
522 klist = &vcp->vc_selproc.si_klist;
523 kn->kn_fop = &vc_nb_read_filtops;
524 break;
525
526 default:
527 return (1);
528 }
529
530 kn->kn_hook = (void *) vcp;
531
532 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
533
534 return (0);
535 }
536
537 /*
538 * Statistics
539 */
540 struct coda_clstat coda_clstat;
541
542 /*
543 * Key question: whether to sleep interuptably or uninteruptably when
544 * waiting for Venus. The former seems better (cause you can ^C a
545 * job), but then GNU-EMACS completion breaks. Use tsleep with no
546 * timeout, and no longjmp happens. But, when sleeping
547 * "uninterruptibly", we don't get told if it returns abnormally
548 * (e.g. kill -9).
549 */
550
551 int
552 coda_call(mntinfo, inSize, outSize, buffer)
553 struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
554 {
555 struct vcomm *vcp;
556 struct vmsg *vmp;
557 int error;
558 #ifdef CTL_C
559 struct proc *p = curproc;
560 sigset_t psig_omask;
561 int i;
562 psig_omask = p->p_sigctx.ps_siglist; /* array assignment */
563 #endif
564 if (mntinfo == NULL) {
565 /* Unlikely, but could be a race condition with a dying warden */
566 return ENODEV;
567 }
568
569 vcp = &(mntinfo->mi_vcomm);
570
571 coda_clstat.ncalls++;
572 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
573
574 if (!VC_OPEN(vcp))
575 return(ENODEV);
576
577 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
578 /* Format the request message. */
579 vmp->vm_data = buffer;
580 vmp->vm_flags = 0;
581 vmp->vm_inSize = inSize;
582 vmp->vm_outSize
583 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
584 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
585 vmp->vm_unique = ++vcp->vc_seq;
586 if (codadebug)
587 myprintf(("Doing a call for %d.%d\n",
588 vmp->vm_opcode, vmp->vm_unique));
589
590 /* Fill in the common input args. */
591 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
592
593 /* Append msg to request queue and poke Venus. */
594 INSQUE(vmp->vm_chain, vcp->vc_requests);
595 selnotify(&(vcp->vc_selproc), 0);
596
597 /* We can be interrupted while we wait for Venus to process
598 * our request. If the interrupt occurs before Venus has read
599 * the request, we dequeue and return. If it occurs after the
600 * read but before the reply, we dequeue, send a signal
601 * message, and return. If it occurs after the reply we ignore
602 * it. In no case do we want to restart the syscall. If it
603 * was interrupted by a venus shutdown (vcclose), return
604 * ENODEV. */
605
606 /* Ignore return, We have to check anyway */
607 #ifdef CTL_C
608 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
609 on a ^c or ^z. The problem is that emacs sets certain interrupts
610 as SA_RESTART. This means that we should exit sleep handle the
611 "signal" and then go to sleep again. Mostly this is done by letting
612 the syscall complete and be restarted. We are not idempotent and
613 can not do this. A better solution is necessary.
614 */
615 i = 0;
616 do {
617 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
618 if (error == 0)
619 break;
620 else if (error == EWOULDBLOCK) {
621 #ifdef CODA_VERBOSE
622 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
623 #endif
624 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
625 sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
626 #ifdef CODA_VERBOSE
627 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
628 #endif
629 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
630 sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
631 #ifdef CODA_VERBOSE
632 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
633 #endif
634 } else {
635 sigset_t tmp;
636 tmp = p->p_sigctx.ps_siglist; /* array assignment */
637 sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
638
639 #ifdef CODA_VERBOSE
640 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
641 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
642 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
643 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
644 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
645 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
646 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
647 #endif
648 break;
649 #ifdef notyet
650 sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
651 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
652 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
653 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
654 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
655 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
656 #endif
657 }
658 } while (error && i++ < 128 && VC_OPEN(vcp));
659 p->p_sigctx.ps_siglist = psig_omask; /* array assignment */
660 #else
661 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
662 #endif
663 if (VC_OPEN(vcp)) { /* Venus is still alive */
664 /* Op went through, interrupt or not... */
665 if (vmp->vm_flags & VM_WRITE) {
666 error = 0;
667 *outSize = vmp->vm_outSize;
668 }
669
670 else if (!(vmp->vm_flags & VM_READ)) {
671 /* Interrupted before venus read it. */
672 #ifdef CODA_VERBOSE
673 if (1)
674 #else
675 if (codadebug)
676 #endif
677 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
678 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
679 REMQUE(vmp->vm_chain);
680 error = EINTR;
681 }
682
683 else {
684 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
685 upcall started */
686 /* Interrupted after start of upcall, send venus a signal */
687 struct coda_in_hdr *dog;
688 struct vmsg *svmp;
689
690 #ifdef CODA_VERBOSE
691 if (1)
692 #else
693 if (codadebug)
694 #endif
695 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
696 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
697
698 REMQUE(vmp->vm_chain);
699 error = EINTR;
700
701 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
702
703 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
704 dog = (struct coda_in_hdr *)svmp->vm_data;
705
706 svmp->vm_flags = 0;
707 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
708 dog->unique = svmp->vm_unique = vmp->vm_unique;
709 svmp->vm_inSize = sizeof (struct coda_in_hdr);
710 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
711
712 if (codadebug)
713 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
714 svmp->vm_opcode, svmp->vm_unique));
715
716 /* insert at head of queue! */
717 INSQUE(svmp->vm_chain, vcp->vc_requests);
718 selnotify(&(vcp->vc_selproc), 0);
719 }
720 }
721
722 else { /* If venus died (!VC_OPEN(vcp)) */
723 if (codadebug)
724 myprintf(("vcclose woke op %d.%d flags %d\n",
725 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
726
727 error = ENODEV;
728 }
729
730 CODA_FREE(vmp, sizeof(struct vmsg));
731
732 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
733 wakeup(&outstanding_upcalls);
734
735 if (!error)
736 error = ((struct coda_out_hdr *)buffer)->result;
737 return(error);
738 }
739
740