coda_psdev.c revision 1.31 1 /* $NetBSD: coda_psdev.c,v 1.31 2005/12/11 12:19:50 christos Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the pseudo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 *
51 * Following code depends on file-system CODA.
52 */
53
54 /* These routines are the device entry points for Venus. */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.31 2005/12/11 12:19:50 christos Exp $");
58
59 extern int coda_nc_initialized; /* Set if cache has been initialized */
60
61 #ifdef _LKM
62 #define NVCODA 4
63 #else
64 #include <vcoda.h>
65 #endif
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/kernel.h>
70 #include <sys/malloc.h>
71 #include <sys/proc.h>
72 #include <sys/mount.h>
73 #include <sys/file.h>
74 #include <sys/ioctl.h>
75 #include <sys/poll.h>
76 #include <sys/select.h>
77 #include <sys/conf.h>
78
79 #include <miscfs/syncfs/syncfs.h>
80
81 #include <coda/coda.h>
82 #include <coda/cnode.h>
83 #include <coda/coda_namecache.h>
84 #include <coda/coda_io.h>
85
86 #define CTL_C
87
88 int coda_psdev_print_entry = 0;
89 static
90 int outstanding_upcalls = 0;
91 int coda_call_sleep = PZERO - 1;
92 #ifdef CTL_C
93 int coda_pcatch = PCATCH;
94 #else
95 #endif
96
97 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
98
99 void vcodaattach(int n);
100
101 dev_type_open(vc_nb_open);
102 dev_type_close(vc_nb_close);
103 dev_type_read(vc_nb_read);
104 dev_type_write(vc_nb_write);
105 dev_type_ioctl(vc_nb_ioctl);
106 dev_type_poll(vc_nb_poll);
107 dev_type_kqfilter(vc_nb_kqfilter);
108
109 const struct cdevsw vcoda_cdevsw = {
110 vc_nb_open, vc_nb_close, vc_nb_read, vc_nb_write, vc_nb_ioctl,
111 nostop, notty, vc_nb_poll, nommap, vc_nb_kqfilter,
112 };
113
114 struct vmsg {
115 struct queue vm_chain;
116 caddr_t vm_data;
117 u_short vm_flags;
118 u_short vm_inSize; /* Size is at most 5000 bytes */
119 u_short vm_outSize;
120 u_short vm_opcode; /* copied from data to save ptr lookup */
121 int vm_unique;
122 caddr_t vm_sleep; /* Not used by Mach. */
123 };
124
125 #define VM_READ 1
126 #define VM_WRITE 2
127 #define VM_INTR 4
128
129 /* vcodaattach: do nothing */
130 void
131 vcodaattach(int n)
132 {
133 }
134
135 /*
136 * These functions are written for NetBSD.
137 */
138 int
139 vc_nb_open(dev_t dev, int flag, int mode, struct lwp *l /* NetBSD only */)
140 {
141 struct vcomm *vcp;
142
143 ENTRY;
144
145 if (minor(dev) >= NVCODA || minor(dev) < 0)
146 return(ENXIO);
147
148 if (!coda_nc_initialized)
149 coda_nc_init();
150
151 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
152 if (VC_OPEN(vcp))
153 return(EBUSY);
154
155 memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
156 INIT_QUEUE(vcp->vc_requests);
157 INIT_QUEUE(vcp->vc_replys);
158 MARK_VC_OPEN(vcp);
159
160 coda_mnttbl[minor(dev)].mi_vfsp = NULL;
161 coda_mnttbl[minor(dev)].mi_rootvp = NULL;
162
163 return(0);
164 }
165
166 int
167 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
168 {
169 struct vcomm *vcp;
170 struct vmsg *vmp, *nvmp = NULL;
171 struct coda_mntinfo *mi;
172 int err;
173
174 ENTRY;
175
176 if (minor(dev) >= NVCODA || minor(dev) < 0)
177 return(ENXIO);
178
179 mi = &coda_mnttbl[minor(dev)];
180 vcp = &(mi->mi_vcomm);
181
182 if (!VC_OPEN(vcp))
183 panic("vcclose: not open");
184
185 /* prevent future operations on this vfs from succeeding by auto-
186 * unmounting any vfs mounted via this device. This frees user or
187 * sysadm from having to remember where all mount points are located.
188 * Put this before WAKEUPs to avoid queuing new messages between
189 * the WAKEUP and the unmount (which can happen if we're unlucky)
190 */
191 if (!mi->mi_rootvp) {
192 /* just a simple open/close w no mount */
193 MARK_VC_CLOSED(vcp);
194 return 0;
195 }
196
197 /* Let unmount know this is for real */
198 /*
199 * XXX Freeze syncer. Must do this before locking the
200 * mount point. See dounmount for details().
201 */
202 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
203 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
204 if (vfs_busy(mi->mi_vfsp, 0, 0)) {
205 lockmgr(&syncer_lock, LK_RELEASE, NULL);
206 return (EBUSY);
207 }
208 coda_unmounting(mi->mi_vfsp);
209
210 /* Wakeup clients so they can return. */
211 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
212 !EOQ(vmp, vcp->vc_requests);
213 vmp = nvmp)
214 {
215 nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
216 /* Free signal request messages and don't wakeup cause
217 no one is waiting. */
218 if (vmp->vm_opcode == CODA_SIGNAL) {
219 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
220 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
221 continue;
222 }
223 outstanding_upcalls++;
224 wakeup(&vmp->vm_sleep);
225 }
226
227 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
228 !EOQ(vmp, vcp->vc_replys);
229 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
230 {
231 outstanding_upcalls++;
232 wakeup(&vmp->vm_sleep);
233 }
234
235 MARK_VC_CLOSED(vcp);
236
237 if (outstanding_upcalls) {
238 #ifdef CODA_VERBOSE
239 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
240 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
241 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
242 #else
243 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
244 #endif
245 }
246
247 err = dounmount(mi->mi_vfsp, flag, l);
248 if (err)
249 myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
250 err, minor(dev)));
251 return 0;
252 }
253
254 int
255 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
256 {
257 struct vcomm * vcp;
258 struct vmsg *vmp;
259 int error = 0;
260
261 ENTRY;
262
263 if (minor(dev) >= NVCODA || minor(dev) < 0)
264 return(ENXIO);
265
266 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
267 /* Get message at head of request queue. */
268 if (EMPTY(vcp->vc_requests))
269 return(0); /* Nothing to read */
270
271 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
272
273 /* Move the input args into userspace */
274 uiop->uio_rw = UIO_READ;
275 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
276 if (error) {
277 myprintf(("vcread: error (%d) on uiomove\n", error));
278 error = EINVAL;
279 }
280
281 #ifdef OLD_DIAGNOSTIC
282 if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
283 panic("vc_nb_read: bad chain");
284 #endif
285
286 REMQUE(vmp->vm_chain);
287
288 /* If request was a signal, free up the message and don't
289 enqueue it in the reply queue. */
290 if (vmp->vm_opcode == CODA_SIGNAL) {
291 if (codadebug)
292 myprintf(("vcread: signal msg (%d, %d)\n",
293 vmp->vm_opcode, vmp->vm_unique));
294 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
295 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
296 return(error);
297 }
298
299 vmp->vm_flags |= VM_READ;
300 INSQUE(vmp->vm_chain, vcp->vc_replys);
301
302 return(error);
303 }
304
305 int
306 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
307 {
308 struct vcomm * vcp;
309 struct vmsg *vmp;
310 struct coda_out_hdr *out;
311 u_long seq;
312 u_long opcode;
313 int tbuf[2];
314 int error = 0;
315
316 ENTRY;
317
318 if (minor(dev) >= NVCODA || minor(dev) < 0)
319 return(ENXIO);
320
321 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
322
323 /* Peek at the opcode, unique without transfering the data. */
324 uiop->uio_rw = UIO_WRITE;
325 error = uiomove((caddr_t)tbuf, sizeof(int) * 2, uiop);
326 if (error) {
327 myprintf(("vcwrite: error (%d) on uiomove\n", error));
328 return(EINVAL);
329 }
330
331 opcode = tbuf[0];
332 seq = tbuf[1];
333
334 if (codadebug)
335 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
336
337 if (DOWNCALL(opcode)) {
338 union outputArgs pbuf;
339
340 /* get the rest of the data. */
341 uiop->uio_rw = UIO_WRITE;
342 error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
343 if (error) {
344 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
345 error, opcode, seq));
346 return(EINVAL);
347 }
348
349 return handleDownCall(opcode, &pbuf);
350 }
351
352 /* Look for the message on the (waiting for) reply queue. */
353 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
354 !EOQ(vmp, vcp->vc_replys);
355 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
356 {
357 if (vmp->vm_unique == seq) break;
358 }
359
360 if (EOQ(vmp, vcp->vc_replys)) {
361 if (codadebug)
362 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
363
364 return(ESRCH);
365 }
366
367 /* Remove the message from the reply queue */
368 REMQUE(vmp->vm_chain);
369
370 /* move data into response buffer. */
371 out = (struct coda_out_hdr *)vmp->vm_data;
372 /* Don't need to copy opcode and uniquifier. */
373
374 /* get the rest of the data. */
375 if (vmp->vm_outSize < uiop->uio_resid) {
376 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
377 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
378 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
379 return(EINVAL);
380 }
381
382 tbuf[0] = uiop->uio_resid; /* Save this value. */
383 uiop->uio_rw = UIO_WRITE;
384 error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
385 if (error) {
386 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
387 error, opcode, seq));
388 return(EINVAL);
389 }
390
391 /* I don't think these are used, but just in case. */
392 /* XXX - aren't these two already correct? -bnoble */
393 out->opcode = opcode;
394 out->unique = seq;
395 vmp->vm_outSize = tbuf[0]; /* Amount of data transferred? */
396 vmp->vm_flags |= VM_WRITE;
397 wakeup(&vmp->vm_sleep);
398
399 return(0);
400 }
401
402 int
403 vc_nb_ioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct lwp *l)
404 {
405 ENTRY;
406
407 switch(cmd) {
408 case CODARESIZE: {
409 struct coda_resize *data = (struct coda_resize *)addr;
410 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
411 break;
412 }
413 case CODASTATS:
414 if (coda_nc_use) {
415 coda_nc_gather_stats();
416 return(0);
417 } else {
418 return(ENODEV);
419 }
420 break;
421 case CODAPRINT:
422 if (coda_nc_use) {
423 print_coda_nc();
424 return(0);
425 } else {
426 return(ENODEV);
427 }
428 break;
429 case CIOC_KERNEL_VERSION:
430 switch (*(u_int *)addr) {
431 case 0:
432 *(u_int *)addr = coda_kernel_version;
433 return 0;
434 break;
435 case 1:
436 case 2:
437 if (coda_kernel_version != *(u_int *)addr)
438 return ENOENT;
439 else
440 return 0;
441 default:
442 return ENOENT;
443 }
444 break;
445 default :
446 return(EINVAL);
447 break;
448 }
449 }
450
451 int
452 vc_nb_poll(dev_t dev, int events, struct lwp *l)
453 {
454 struct vcomm *vcp;
455 int event_msk = 0;
456
457 ENTRY;
458
459 if (minor(dev) >= NVCODA || minor(dev) < 0)
460 return(ENXIO);
461
462 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
463
464 event_msk = events & (POLLIN|POLLRDNORM);
465 if (!event_msk)
466 return(0);
467
468 if (!EMPTY(vcp->vc_requests))
469 return(events & (POLLIN|POLLRDNORM));
470
471 selrecord(l, &(vcp->vc_selproc));
472
473 return(0);
474 }
475
476 static void
477 filt_vc_nb_detach(struct knote *kn)
478 {
479 struct vcomm *vcp = kn->kn_hook;
480
481 SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
482 }
483
484 static int
485 filt_vc_nb_read(struct knote *kn, long hint)
486 {
487 struct vcomm *vcp = kn->kn_hook;
488 struct vmsg *vmp;
489
490 if (EMPTY(vcp->vc_requests))
491 return (0);
492
493 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
494
495 kn->kn_data = vmp->vm_inSize;
496 return (1);
497 }
498
499 static const struct filterops vc_nb_read_filtops =
500 { 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
501
502 int
503 vc_nb_kqfilter(dev_t dev, struct knote *kn)
504 {
505 struct vcomm *vcp;
506 struct klist *klist;
507
508 ENTRY;
509
510 if (minor(dev) >= NVCODA || minor(dev) < 0)
511 return(ENXIO);
512
513 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
514
515 switch (kn->kn_filter) {
516 case EVFILT_READ:
517 klist = &vcp->vc_selproc.sel_klist;
518 kn->kn_fop = &vc_nb_read_filtops;
519 break;
520
521 default:
522 return (1);
523 }
524
525 kn->kn_hook = vcp;
526
527 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
528
529 return (0);
530 }
531
532 /*
533 * Statistics
534 */
535 struct coda_clstat coda_clstat;
536
537 /*
538 * Key question: whether to sleep interruptably or uninterruptably when
539 * waiting for Venus. The former seems better (cause you can ^C a
540 * job), but then GNU-EMACS completion breaks. Use tsleep with no
541 * timeout, and no longjmp happens. But, when sleeping
542 * "uninterruptibly", we don't get told if it returns abnormally
543 * (e.g. kill -9).
544 */
545
546 int
547 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
548 caddr_t buffer)
549 {
550 struct vcomm *vcp;
551 struct vmsg *vmp;
552 int error;
553 #ifdef CTL_C
554 struct lwp *l = curlwp;
555 struct proc *p = l->l_proc;
556 sigset_t psig_omask;
557 int i;
558 psig_omask = l->l_proc->p_sigctx.ps_siglist; /* array assignment */
559 #endif
560 if (mntinfo == NULL) {
561 /* Unlikely, but could be a race condition with a dying warden */
562 return ENODEV;
563 }
564
565 vcp = &(mntinfo->mi_vcomm);
566
567 coda_clstat.ncalls++;
568 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
569
570 if (!VC_OPEN(vcp))
571 return(ENODEV);
572
573 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
574 /* Format the request message. */
575 vmp->vm_data = buffer;
576 vmp->vm_flags = 0;
577 vmp->vm_inSize = inSize;
578 vmp->vm_outSize
579 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
580 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
581 vmp->vm_unique = ++vcp->vc_seq;
582 if (codadebug)
583 myprintf(("Doing a call for %d.%d\n",
584 vmp->vm_opcode, vmp->vm_unique));
585
586 /* Fill in the common input args. */
587 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
588
589 /* Append msg to request queue and poke Venus. */
590 INSQUE(vmp->vm_chain, vcp->vc_requests);
591 selnotify(&(vcp->vc_selproc), 0);
592
593 /* We can be interrupted while we wait for Venus to process
594 * our request. If the interrupt occurs before Venus has read
595 * the request, we dequeue and return. If it occurs after the
596 * read but before the reply, we dequeue, send a signal
597 * message, and return. If it occurs after the reply we ignore
598 * it. In no case do we want to restart the syscall. If it
599 * was interrupted by a venus shutdown (vcclose), return
600 * ENODEV. */
601
602 /* Ignore return, We have to check anyway */
603 #ifdef CTL_C
604 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
605 on a ^c or ^z. The problem is that emacs sets certain interrupts
606 as SA_RESTART. This means that we should exit sleep handle the
607 "signal" and then go to sleep again. Mostly this is done by letting
608 the syscall complete and be restarted. We are not idempotent and
609 can not do this. A better solution is necessary.
610 */
611 i = 0;
612 do {
613 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
614 if (error == 0)
615 break;
616 else if (error == EWOULDBLOCK) {
617 #ifdef CODA_VERBOSE
618 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
619 #endif
620 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
621 sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
622 #ifdef CODA_VERBOSE
623 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
624 #endif
625 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
626 sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
627 #ifdef CODA_VERBOSE
628 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
629 #endif
630 } else {
631 sigset_t tmp;
632 tmp = p->p_sigctx.ps_siglist; /* array assignment */
633 sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
634
635 #ifdef CODA_VERBOSE
636 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
637 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
638 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
639 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
640 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
641 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
642 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
643 #endif
644 break;
645 #ifdef notyet
646 sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
647 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
648 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
649 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
650 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
651 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
652 #endif
653 }
654 } while (error && i++ < 128 && VC_OPEN(vcp));
655 p->p_sigctx.ps_siglist = psig_omask; /* array assignment */
656 #else
657 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
658 #endif
659 if (VC_OPEN(vcp)) { /* Venus is still alive */
660 /* Op went through, interrupt or not... */
661 if (vmp->vm_flags & VM_WRITE) {
662 error = 0;
663 *outSize = vmp->vm_outSize;
664 }
665
666 else if (!(vmp->vm_flags & VM_READ)) {
667 /* Interrupted before venus read it. */
668 #ifdef CODA_VERBOSE
669 if (1)
670 #else
671 if (codadebug)
672 #endif
673 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
674 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
675 REMQUE(vmp->vm_chain);
676 error = EINTR;
677 }
678
679 else {
680 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
681 upcall started */
682 /* Interrupted after start of upcall, send venus a signal */
683 struct coda_in_hdr *dog;
684 struct vmsg *svmp;
685
686 #ifdef CODA_VERBOSE
687 if (1)
688 #else
689 if (codadebug)
690 #endif
691 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
692 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
693
694 REMQUE(vmp->vm_chain);
695 error = EINTR;
696
697 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
698
699 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
700 dog = (struct coda_in_hdr *)svmp->vm_data;
701
702 svmp->vm_flags = 0;
703 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
704 dog->unique = svmp->vm_unique = vmp->vm_unique;
705 svmp->vm_inSize = sizeof (struct coda_in_hdr);
706 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
707
708 if (codadebug)
709 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
710 svmp->vm_opcode, svmp->vm_unique));
711
712 /* insert at head of queue! */
713 INSQUE(svmp->vm_chain, vcp->vc_requests);
714 selnotify(&(vcp->vc_selproc), 0);
715 }
716 }
717
718 else { /* If venus died (!VC_OPEN(vcp)) */
719 if (codadebug)
720 myprintf(("vcclose woke op %d.%d flags %d\n",
721 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
722
723 error = ENODEV;
724 }
725
726 CODA_FREE(vmp, sizeof(struct vmsg));
727
728 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
729 wakeup(&outstanding_upcalls);
730
731 if (!error)
732 error = ((struct coda_out_hdr *)buffer)->result;
733 return(error);
734 }
735
736