coda_psdev.c revision 1.17.2.2 1 /* $NetBSD: coda_psdev.c,v 1.17.2.2 2001/09/26 15:28:07 fvdl Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the psuedo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 */
51
52 /* These routines are the device entry points for Venus. */
53
54 extern int coda_nc_initialized; /* Set if cache has been initialized */
55
56 #ifdef _LKM
57 #define NVCODA 4
58 #else
59 #include <vcoda.h>
60 #endif
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/malloc.h>
66 #include <sys/proc.h>
67 #include <sys/mount.h>
68 #include <sys/file.h>
69 #include <sys/ioctl.h>
70 #include <sys/poll.h>
71 #include <sys/select.h>
72 #include <sys/vnode.h>
73
74 #include <miscfs/specfs/specdev.h>
75
76 #include <miscfs/syncfs/syncfs.h>
77
78 #include <coda/coda.h>
79 #include <coda/cnode.h>
80 #include <coda/coda_namecache.h>
81 #include <coda/coda_io.h>
82 #include <coda/coda_psdev.h>
83
84 #define CTL_C
85
86 int coda_psdev_print_entry = 0;
87 static
88 int outstanding_upcalls = 0;
89 int coda_call_sleep = PZERO - 1;
90 #ifdef CTL_C
91 int coda_pcatch = PCATCH;
92 #else
93 #endif
94
95 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__FUNCTION__))
96
97 void vcodaattach(int n);
98
99 struct vmsg {
100 struct queue vm_chain;
101 caddr_t vm_data;
102 u_short vm_flags;
103 u_short vm_inSize; /* Size is at most 5000 bytes */
104 u_short vm_outSize;
105 u_short vm_opcode; /* copied from data to save ptr lookup */
106 int vm_unique;
107 caddr_t vm_sleep; /* Not used by Mach. */
108 };
109
110 #define VM_READ 1
111 #define VM_WRITE 2
112 #define VM_INTR 4
113
114 /* vcodaattach: do nothing */
115 void
116 vcodaattach(n)
117 int n;
118 {
119 }
120
121 /*
122 * These functions are written for NetBSD.
123 */
124 int
125 vc_nb_open(devvp, flag, mode, p)
126 struct vnode *devvp;
127 int flag;
128 int mode;
129 struct proc *p; /* NetBSD only */
130 {
131 struct vcomm *vcp;
132 dev_t rdev;
133 int unit;
134
135 ENTRY;
136
137 unit = minor(vdev_rdev(devvp));
138
139 if (unit >= NVCODA || unit < 0)
140 return(ENXIO);
141
142 if (!coda_nc_initialized)
143 coda_nc_init();
144
145 vcp = &coda_mnttbl[unit].mi_vcomm;
146
147 vdev_setprivdata(devvp, &coda_mnttbl[unit]);
148
149 if (VC_OPEN(vcp))
150 return(EBUSY);
151
152 memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
153 INIT_QUEUE(vcp->vc_requests);
154 INIT_QUEUE(vcp->vc_replys);
155 MARK_VC_OPEN(vcp);
156
157 coda_mnttbl[unit].mi_vfsp = NULL;
158 coda_mnttbl[unit].mi_rootvp = NULL;
159
160 return(0);
161 }
162
163 int
164 vc_nb_close(devvp, flag, mode, p)
165 struct vnode *devvp;
166 int flag;
167 int mode;
168 struct proc *p;
169 {
170 struct vcomm *vcp;
171 struct vmsg *vmp, *nvmp = NULL;
172 struct coda_mntinfo *mi;
173 int err;
174 dev_t rdev;
175
176 ENTRY;
177
178 rdev = vdev_rdev(devvp);
179 mi = vdev_privdata(devvp);
180 if (err != 0)
181 return err;
182 vcp = &(mi->mi_vcomm);
183
184 if (!VC_OPEN(vcp))
185 panic("vcclose: not open");
186
187 /* prevent future operations on this vfs from succeeding by auto-
188 * unmounting any vfs mounted via this device. This frees user or
189 * sysadm from having to remember where all mount points are located.
190 * Put this before WAKEUPs to avoid queuing new messages between
191 * the WAKEUP and the unmount (which can happen if we're unlucky)
192 */
193 if (!mi->mi_rootvp) {
194 /* just a simple open/close w no mount */
195 MARK_VC_CLOSED(vcp);
196 return 0;
197 }
198
199 /* Let unmount know this is for real */
200 /*
201 * XXX Freeze syncer. Must do this before locking the
202 * mount point. See dounmount for details().
203 */
204 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
205 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
206 if (vfs_busy(mi->mi_vfsp, 0, 0)) {
207 lockmgr(&syncer_lock, LK_RELEASE, NULL);
208 return (EBUSY);
209 }
210 coda_unmounting(mi->mi_vfsp);
211
212 /* Wakeup clients so they can return. */
213 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
214 !EOQ(vmp, vcp->vc_requests);
215 vmp = nvmp)
216 {
217 nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
218 /* Free signal request messages and don't wakeup cause
219 no one is waiting. */
220 if (vmp->vm_opcode == CODA_SIGNAL) {
221 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
222 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
223 continue;
224 }
225 outstanding_upcalls++;
226 wakeup(&vmp->vm_sleep);
227 }
228
229 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
230 !EOQ(vmp, vcp->vc_replys);
231 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
232 {
233 outstanding_upcalls++;
234 wakeup(&vmp->vm_sleep);
235 }
236
237 MARK_VC_CLOSED(vcp);
238
239 if (outstanding_upcalls) {
240 #ifdef CODA_VERBOSE
241 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
242 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
243 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
244 #else
245 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
246 #endif
247 }
248
249 err = dounmount(mi->mi_vfsp, flag, p);
250 if (err)
251 myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
252 err, minor(rdev)));
253 return 0;
254 }
255
256 int
257 vc_nb_read(devvp, uiop, flag)
258 struct vnode *devvp;
259 struct uio *uiop;
260 int flag;
261 {
262 struct vcomm * vcp;
263 struct vmsg *vmp;
264 struct coda_mntinfo *mi;
265 int error;
266
267 ENTRY;
268
269 mi = vdev_privdata(devvp);
270 vcp = &mi->mi_vcomm;
271 /* Get message at head of request queue. */
272 if (EMPTY(vcp->vc_requests))
273 return(0); /* Nothing to read */
274
275 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
276
277 /* Move the input args into userspace */
278 uiop->uio_rw = UIO_READ;
279 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
280 if (error) {
281 myprintf(("vcread: error (%d) on uiomove\n", error));
282 error = EINVAL;
283 }
284
285 #ifdef OLD_DIAGNOSTIC
286 if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
287 panic("vc_nb_read: bad chain");
288 #endif
289
290 REMQUE(vmp->vm_chain);
291
292 /* If request was a signal, free up the message and don't
293 enqueue it in the reply queue. */
294 if (vmp->vm_opcode == CODA_SIGNAL) {
295 if (codadebug)
296 myprintf(("vcread: signal msg (%d, %d)\n",
297 vmp->vm_opcode, vmp->vm_unique));
298 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
299 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
300 return(error);
301 }
302
303 vmp->vm_flags |= VM_READ;
304 INSQUE(vmp->vm_chain, vcp->vc_replys);
305
306 return(error);
307 }
308
309 int
310 vc_nb_write(devvp, uiop, flag)
311 struct vnode *devvp;
312 struct uio *uiop;
313 int flag;
314 {
315 struct vcomm * vcp;
316 struct vmsg *vmp;
317 struct coda_mntinfo *mi;
318 struct coda_out_hdr *out;
319 u_long seq;
320 u_long opcode;
321 int buf[2];
322 int error;
323
324 ENTRY;
325
326 mi = vdev_privdata(devvp);
327 vcp = &mi->mi_vcomm;
328
329 /* Peek at the opcode, unique without transfering the data. */
330 uiop->uio_rw = UIO_WRITE;
331 error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
332 if (error) {
333 myprintf(("vcwrite: error (%d) on uiomove\n", error));
334 return(EINVAL);
335 }
336
337 opcode = buf[0];
338 seq = buf[1];
339
340 if (codadebug)
341 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
342
343 if (DOWNCALL(opcode)) {
344 union outputArgs pbuf;
345
346 /* get the rest of the data. */
347 uiop->uio_rw = UIO_WRITE;
348 error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
349 if (error) {
350 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
351 error, opcode, seq));
352 return(EINVAL);
353 }
354
355 return handleDownCall(opcode, &pbuf);
356 }
357
358 /* Look for the message on the (waiting for) reply queue. */
359 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
360 !EOQ(vmp, vcp->vc_replys);
361 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
362 {
363 if (vmp->vm_unique == seq) break;
364 }
365
366 if (EOQ(vmp, vcp->vc_replys)) {
367 if (codadebug)
368 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
369
370 return(ESRCH);
371 }
372
373 /* Remove the message from the reply queue */
374 REMQUE(vmp->vm_chain);
375
376 /* move data into response buffer. */
377 out = (struct coda_out_hdr *)vmp->vm_data;
378 /* Don't need to copy opcode and uniquifier. */
379
380 /* get the rest of the data. */
381 if (vmp->vm_outSize < uiop->uio_resid) {
382 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
383 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
384 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
385 return(EINVAL);
386 }
387
388 buf[0] = uiop->uio_resid; /* Save this value. */
389 uiop->uio_rw = UIO_WRITE;
390 error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
391 if (error) {
392 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
393 error, opcode, seq));
394 return(EINVAL);
395 }
396
397 /* I don't think these are used, but just in case. */
398 /* XXX - aren't these two already correct? -bnoble */
399 out->opcode = opcode;
400 out->unique = seq;
401 vmp->vm_outSize = buf[0]; /* Amount of data transferred? */
402 vmp->vm_flags |= VM_WRITE;
403 wakeup(&vmp->vm_sleep);
404
405 return(0);
406 }
407
408 int
409 vc_nb_ioctl(devvp, cmd, addr, flag, p)
410 struct vnode *devvp;
411 u_long cmd;
412 caddr_t addr;
413 int flag;
414 struct proc *p;
415 {
416 ENTRY;
417
418 switch(cmd) {
419 case CODARESIZE: {
420 struct coda_resize *data = (struct coda_resize *)addr;
421 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
422 break;
423 }
424 case CODASTATS:
425 if (coda_nc_use) {
426 coda_nc_gather_stats();
427 return(0);
428 } else {
429 return(ENODEV);
430 }
431 break;
432 case CODAPRINT:
433 if (coda_nc_use) {
434 print_coda_nc();
435 return(0);
436 } else {
437 return(ENODEV);
438 }
439 break;
440 case CIOC_KERNEL_VERSION:
441 switch (*(u_int *)addr) {
442 case 0:
443 *(u_int *)addr = coda_kernel_version;
444 return 0;
445 break;
446 case 1:
447 case 2:
448 if (coda_kernel_version != *(u_int *)addr)
449 return ENOENT;
450 else
451 return 0;
452 default:
453 return ENOENT;
454 }
455 break;
456 default :
457 return(EINVAL);
458 break;
459 }
460 }
461
462 int
463 vc_nb_poll(devvp, events, p)
464 struct vnode *devvp;
465 int events;
466 struct proc *p;
467 {
468 struct coda_mntinfo *mi;
469 struct vcomm *vcp;
470 int event_msk;
471
472 ENTRY;
473
474 mi = vdev_privdata(devvp);
475 vcp = &mi->mi_vcomm;
476
477 event_msk = events & (POLLIN|POLLRDNORM);
478 if (!event_msk)
479 return(0);
480
481 if (!EMPTY(vcp->vc_requests))
482 return(events & (POLLIN|POLLRDNORM));
483
484 selrecord(p, &(vcp->vc_selproc));
485
486 return(0);
487 }
488
489 /*
490 * Statistics
491 */
492 struct coda_clstat coda_clstat;
493
494 /*
495 * Key question: whether to sleep interuptably or uninteruptably when
496 * waiting for Venus. The former seems better (cause you can ^C a
497 * job), but then GNU-EMACS completion breaks. Use tsleep with no
498 * timeout, and no longjmp happens. But, when sleeping
499 * "uninterruptibly", we don't get told if it returns abnormally
500 * (e.g. kill -9).
501 */
502
503 int
504 coda_call(mntinfo, inSize, outSize, buffer)
505 struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
506 {
507 struct vcomm *vcp;
508 struct vmsg *vmp;
509 int error;
510 #ifdef CTL_C
511 struct proc *p = curproc;
512 sigset_t psig_omask;
513 int i;
514 psig_omask = p->p_sigctx.ps_siglist; /* array assignment */
515 #endif
516 if (mntinfo == NULL) {
517 /* Unlikely, but could be a race condition with a dying warden */
518 return ENODEV;
519 }
520
521 vcp = &(mntinfo->mi_vcomm);
522
523 coda_clstat.ncalls++;
524 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
525
526 if (!VC_OPEN(vcp))
527 return(ENODEV);
528
529 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
530 /* Format the request message. */
531 vmp->vm_data = buffer;
532 vmp->vm_flags = 0;
533 vmp->vm_inSize = inSize;
534 vmp->vm_outSize
535 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
536 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
537 vmp->vm_unique = ++vcp->vc_seq;
538 if (codadebug)
539 myprintf(("Doing a call for %d.%d\n",
540 vmp->vm_opcode, vmp->vm_unique));
541
542 /* Fill in the common input args. */
543 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
544
545 /* Append msg to request queue and poke Venus. */
546 INSQUE(vmp->vm_chain, vcp->vc_requests);
547 selwakeup(&(vcp->vc_selproc));
548
549 /* We can be interrupted while we wait for Venus to process
550 * our request. If the interrupt occurs before Venus has read
551 * the request, we dequeue and return. If it occurs after the
552 * read but before the reply, we dequeue, send a signal
553 * message, and return. If it occurs after the reply we ignore
554 * it. In no case do we want to restart the syscall. If it
555 * was interrupted by a venus shutdown (vcclose), return
556 * ENODEV. */
557
558 /* Ignore return, We have to check anyway */
559 #ifdef CTL_C
560 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
561 on a ^c or ^z. The problem is that emacs sets certain interrupts
562 as SA_RESTART. This means that we should exit sleep handle the
563 "signal" and then go to sleep again. Mostly this is done by letting
564 the syscall complete and be restarted. We are not idempotent and
565 can not do this. A better solution is necessary.
566 */
567 i = 0;
568 do {
569 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
570 if (error == 0)
571 break;
572 else if (error == EWOULDBLOCK) {
573 #ifdef CODA_VERBOSE
574 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
575 #endif
576 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
577 sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
578 #ifdef CODA_VERBOSE
579 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
580 #endif
581 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
582 sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
583 #ifdef CODA_VERBOSE
584 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
585 #endif
586 } else {
587 sigset_t tmp;
588 tmp = p->p_sigctx.ps_siglist; /* array assignment */
589 sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
590
591 #ifdef CODA_VERBOSE
592 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
593 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
594 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
595 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
596 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
597 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
598 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
599 #endif
600 break;
601 #ifdef notyet
602 sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
603 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
604 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
605 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
606 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
607 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
608 #endif
609 }
610 } while (error && i++ < 128 && VC_OPEN(vcp));
611 p->p_sigctx.ps_siglist = psig_omask; /* array assignment */
612 #else
613 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
614 #endif
615 if (VC_OPEN(vcp)) { /* Venus is still alive */
616 /* Op went through, interrupt or not... */
617 if (vmp->vm_flags & VM_WRITE) {
618 error = 0;
619 *outSize = vmp->vm_outSize;
620 }
621
622 else if (!(vmp->vm_flags & VM_READ)) {
623 /* Interrupted before venus read it. */
624 #ifdef CODA_VERBOSE
625 if (1)
626 #else
627 if (codadebug)
628 #endif
629 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
630 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
631 REMQUE(vmp->vm_chain);
632 error = EINTR;
633 }
634
635 else {
636 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
637 upcall started */
638 /* Interrupted after start of upcall, send venus a signal */
639 struct coda_in_hdr *dog;
640 struct vmsg *svmp;
641
642 #ifdef CODA_VERBOSE
643 if (1)
644 #else
645 if (codadebug)
646 #endif
647 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
648 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
649
650 REMQUE(vmp->vm_chain);
651 error = EINTR;
652
653 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
654
655 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
656 dog = (struct coda_in_hdr *)svmp->vm_data;
657
658 svmp->vm_flags = 0;
659 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
660 dog->unique = svmp->vm_unique = vmp->vm_unique;
661 svmp->vm_inSize = sizeof (struct coda_in_hdr);
662 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
663
664 if (codadebug)
665 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
666 svmp->vm_opcode, svmp->vm_unique));
667
668 /* insert at head of queue! */
669 INSQUE(svmp->vm_chain, vcp->vc_requests);
670 selwakeup(&(vcp->vc_selproc));
671 }
672 }
673
674 else { /* If venus died (!VC_OPEN(vcp)) */
675 if (codadebug)
676 myprintf(("vcclose woke op %d.%d flags %d\n",
677 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
678
679 error = ENODEV;
680 }
681
682 CODA_FREE(vmp, sizeof(struct vmsg));
683
684 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
685 wakeup(&outstanding_upcalls);
686
687 if (!error)
688 error = ((struct coda_out_hdr *)buffer)->result;
689 return(error);
690 }
691
692