coda_psdev.c revision 1.17.2.1 1 /* $NetBSD: coda_psdev.c,v 1.17.2.1 2001/09/07 04:45:20 thorpej Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the psuedo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 */
51
52 /* These routines are the device entry points for Venus. */
53
54 extern int coda_nc_initialized; /* Set if cache has been initialized */
55
56 #ifdef _LKM
57 #define NVCODA 4
58 #else
59 #include <vcoda.h>
60 #endif
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/malloc.h>
66 #include <sys/proc.h>
67 #include <sys/mount.h>
68 #include <sys/file.h>
69 #include <sys/ioctl.h>
70 #include <sys/poll.h>
71 #include <sys/select.h>
72 #include <sys/vnode.h>
73
74 #include <miscfs/specfs/specdev.h>
75
76 #include <miscfs/syncfs/syncfs.h>
77
78 #include <coda/coda.h>
79 #include <coda/cnode.h>
80 #include <coda/coda_namecache.h>
81 #include <coda/coda_io.h>
82 #include <coda/coda_psdev.h>
83
84 #define CTL_C
85
86 int coda_psdev_print_entry = 0;
87 static
88 int outstanding_upcalls = 0;
89 int coda_call_sleep = PZERO - 1;
90 #ifdef CTL_C
91 int coda_pcatch = PCATCH;
92 #else
93 #endif
94
95 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__FUNCTION__))
96
97 void vcodaattach(int n);
98
99 struct vmsg {
100 struct queue vm_chain;
101 caddr_t vm_data;
102 u_short vm_flags;
103 u_short vm_inSize; /* Size is at most 5000 bytes */
104 u_short vm_outSize;
105 u_short vm_opcode; /* copied from data to save ptr lookup */
106 int vm_unique;
107 caddr_t vm_sleep; /* Not used by Mach. */
108 };
109
110 #define VM_READ 1
111 #define VM_WRITE 2
112 #define VM_INTR 4
113
114 /* vcodaattach: do nothing */
115 void
116 vcodaattach(n)
117 int n;
118 {
119 }
120
121 /*
122 * These functions are written for NetBSD.
123 */
124 int
125 vc_nb_open(devvp, flag, mode, p)
126 struct vnode *devvp;
127 int flag;
128 int mode;
129 struct proc *p; /* NetBSD only */
130 {
131 struct vcomm *vcp;
132
133 ENTRY;
134
135 if (minor(devvp->v_rdev) >= NVCODA || minor(devvp->v_rdev) < 0)
136 return(ENXIO);
137
138 if (!coda_nc_initialized)
139 coda_nc_init();
140
141 vcp = &coda_mnttbl[minor(devvp->v_rdev)].mi_vcomm;
142
143 devvp->v_devcookie = &coda_mnttbl[minor(devvp->v_rdev)];
144
145 if (VC_OPEN(vcp))
146 return(EBUSY);
147
148 memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
149 INIT_QUEUE(vcp->vc_requests);
150 INIT_QUEUE(vcp->vc_replys);
151 MARK_VC_OPEN(vcp);
152
153 coda_mnttbl[minor(devvp->v_rdev)].mi_vfsp = NULL;
154 coda_mnttbl[minor(devvp->v_rdev)].mi_rootvp = NULL;
155
156 return(0);
157 }
158
159 int
160 vc_nb_close (devvp, flag, mode, p)
161 struct vnode *devvp;
162 int flag;
163 int mode;
164 struct proc *p;
165 {
166 struct vcomm *vcp;
167 struct vmsg *vmp, *nvmp = NULL;
168 struct coda_mntinfo *mi;
169 int err;
170
171 ENTRY;
172
173 mi = devvp->v_devcookie;
174 vcp = &(mi->mi_vcomm);
175
176 if (!VC_OPEN(vcp))
177 panic("vcclose: not open");
178
179 /* prevent future operations on this vfs from succeeding by auto-
180 * unmounting any vfs mounted via this device. This frees user or
181 * sysadm from having to remember where all mount points are located.
182 * Put this before WAKEUPs to avoid queuing new messages between
183 * the WAKEUP and the unmount (which can happen if we're unlucky)
184 */
185 if (!mi->mi_rootvp) {
186 /* just a simple open/close w no mount */
187 MARK_VC_CLOSED(vcp);
188 return 0;
189 }
190
191 /* Let unmount know this is for real */
192 /*
193 * XXX Freeze syncer. Must do this before locking the
194 * mount point. See dounmount for details().
195 */
196 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
197 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
198 if (vfs_busy(mi->mi_vfsp, 0, 0)) {
199 lockmgr(&syncer_lock, LK_RELEASE, NULL);
200 return (EBUSY);
201 }
202 coda_unmounting(mi->mi_vfsp);
203
204 /* Wakeup clients so they can return. */
205 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
206 !EOQ(vmp, vcp->vc_requests);
207 vmp = nvmp)
208 {
209 nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
210 /* Free signal request messages and don't wakeup cause
211 no one is waiting. */
212 if (vmp->vm_opcode == CODA_SIGNAL) {
213 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
214 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
215 continue;
216 }
217 outstanding_upcalls++;
218 wakeup(&vmp->vm_sleep);
219 }
220
221 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
222 !EOQ(vmp, vcp->vc_replys);
223 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
224 {
225 outstanding_upcalls++;
226 wakeup(&vmp->vm_sleep);
227 }
228
229 MARK_VC_CLOSED(vcp);
230
231 if (outstanding_upcalls) {
232 #ifdef CODA_VERBOSE
233 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
234 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
235 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
236 #else
237 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
238 #endif
239 }
240
241 err = dounmount(mi->mi_vfsp, flag, p);
242 if (err)
243 myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
244 err, minor(devvp->v_rdev)));
245 return 0;
246 }
247
248 int
249 vc_nb_read(devvp, uiop, flag)
250 struct vnode *devvp;
251 struct uio *uiop;
252 int flag;
253 {
254 struct vcomm * vcp;
255 struct vmsg *vmp;
256 struct coda_mntinfo *mi;
257 int error = 0;
258
259 ENTRY;
260
261 mi = devvp->v_devcookie;
262 vcp = &mi->mi_vcomm;
263 /* Get message at head of request queue. */
264 if (EMPTY(vcp->vc_requests))
265 return(0); /* Nothing to read */
266
267 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
268
269 /* Move the input args into userspace */
270 uiop->uio_rw = UIO_READ;
271 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
272 if (error) {
273 myprintf(("vcread: error (%d) on uiomove\n", error));
274 error = EINVAL;
275 }
276
277 #ifdef OLD_DIAGNOSTIC
278 if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
279 panic("vc_nb_read: bad chain");
280 #endif
281
282 REMQUE(vmp->vm_chain);
283
284 /* If request was a signal, free up the message and don't
285 enqueue it in the reply queue. */
286 if (vmp->vm_opcode == CODA_SIGNAL) {
287 if (codadebug)
288 myprintf(("vcread: signal msg (%d, %d)\n",
289 vmp->vm_opcode, vmp->vm_unique));
290 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
291 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
292 return(error);
293 }
294
295 vmp->vm_flags |= VM_READ;
296 INSQUE(vmp->vm_chain, vcp->vc_replys);
297
298 return(error);
299 }
300
301 int
302 vc_nb_write(devvp, uiop, flag)
303 struct vnode *devvp;
304 struct uio *uiop;
305 int flag;
306 {
307 struct vcomm * vcp;
308 struct vmsg *vmp;
309 struct coda_mntinfo *mi;
310 struct coda_out_hdr *out;
311 u_long seq;
312 u_long opcode;
313 int buf[2];
314 int error = 0;
315
316 ENTRY;
317
318 mi = devvp->v_devcookie;
319 vcp = &mi->mi_vcomm;
320
321 /* Peek at the opcode, unique without transfering the data. */
322 uiop->uio_rw = UIO_WRITE;
323 error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
324 if (error) {
325 myprintf(("vcwrite: error (%d) on uiomove\n", error));
326 return(EINVAL);
327 }
328
329 opcode = buf[0];
330 seq = buf[1];
331
332 if (codadebug)
333 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
334
335 if (DOWNCALL(opcode)) {
336 union outputArgs pbuf;
337
338 /* get the rest of the data. */
339 uiop->uio_rw = UIO_WRITE;
340 error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
341 if (error) {
342 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
343 error, opcode, seq));
344 return(EINVAL);
345 }
346
347 return handleDownCall(opcode, &pbuf);
348 }
349
350 /* Look for the message on the (waiting for) reply queue. */
351 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
352 !EOQ(vmp, vcp->vc_replys);
353 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
354 {
355 if (vmp->vm_unique == seq) break;
356 }
357
358 if (EOQ(vmp, vcp->vc_replys)) {
359 if (codadebug)
360 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
361
362 return(ESRCH);
363 }
364
365 /* Remove the message from the reply queue */
366 REMQUE(vmp->vm_chain);
367
368 /* move data into response buffer. */
369 out = (struct coda_out_hdr *)vmp->vm_data;
370 /* Don't need to copy opcode and uniquifier. */
371
372 /* get the rest of the data. */
373 if (vmp->vm_outSize < uiop->uio_resid) {
374 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
375 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
376 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
377 return(EINVAL);
378 }
379
380 buf[0] = uiop->uio_resid; /* Save this value. */
381 uiop->uio_rw = UIO_WRITE;
382 error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
383 if (error) {
384 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
385 error, opcode, seq));
386 return(EINVAL);
387 }
388
389 /* I don't think these are used, but just in case. */
390 /* XXX - aren't these two already correct? -bnoble */
391 out->opcode = opcode;
392 out->unique = seq;
393 vmp->vm_outSize = buf[0]; /* Amount of data transferred? */
394 vmp->vm_flags |= VM_WRITE;
395 wakeup(&vmp->vm_sleep);
396
397 return(0);
398 }
399
400 int
401 vc_nb_ioctl(devvp, cmd, addr, flag, p)
402 struct vnode *devvp;
403 u_long cmd;
404 caddr_t addr;
405 int flag;
406 struct proc *p;
407 {
408 ENTRY;
409
410 switch(cmd) {
411 case CODARESIZE: {
412 struct coda_resize *data = (struct coda_resize *)addr;
413 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
414 break;
415 }
416 case CODASTATS:
417 if (coda_nc_use) {
418 coda_nc_gather_stats();
419 return(0);
420 } else {
421 return(ENODEV);
422 }
423 break;
424 case CODAPRINT:
425 if (coda_nc_use) {
426 print_coda_nc();
427 return(0);
428 } else {
429 return(ENODEV);
430 }
431 break;
432 case CIOC_KERNEL_VERSION:
433 switch (*(u_int *)addr) {
434 case 0:
435 *(u_int *)addr = coda_kernel_version;
436 return 0;
437 break;
438 case 1:
439 case 2:
440 if (coda_kernel_version != *(u_int *)addr)
441 return ENOENT;
442 else
443 return 0;
444 default:
445 return ENOENT;
446 }
447 break;
448 default :
449 return(EINVAL);
450 break;
451 }
452 }
453
454 int
455 vc_nb_poll(devvp, events, p)
456 struct vnode *devvp;
457 int events;
458 struct proc *p;
459 {
460 struct coda_mntinfo *mi;
461 struct vcomm *vcp;
462 int event_msk = 0;
463
464 ENTRY;
465
466 mi = devvp->v_devcookie;
467 vcp = &mi->mi_vcomm;
468
469 event_msk = events & (POLLIN|POLLRDNORM);
470 if (!event_msk)
471 return(0);
472
473 if (!EMPTY(vcp->vc_requests))
474 return(events & (POLLIN|POLLRDNORM));
475
476 selrecord(p, &(vcp->vc_selproc));
477
478 return(0);
479 }
480
481 /*
482 * Statistics
483 */
484 struct coda_clstat coda_clstat;
485
486 /*
487 * Key question: whether to sleep interuptably or uninteruptably when
488 * waiting for Venus. The former seems better (cause you can ^C a
489 * job), but then GNU-EMACS completion breaks. Use tsleep with no
490 * timeout, and no longjmp happens. But, when sleeping
491 * "uninterruptibly", we don't get told if it returns abnormally
492 * (e.g. kill -9).
493 */
494
495 int
496 coda_call(mntinfo, inSize, outSize, buffer)
497 struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
498 {
499 struct vcomm *vcp;
500 struct vmsg *vmp;
501 int error;
502 #ifdef CTL_C
503 struct proc *p = curproc;
504 sigset_t psig_omask;
505 int i;
506 psig_omask = p->p_sigctx.ps_siglist; /* array assignment */
507 #endif
508 if (mntinfo == NULL) {
509 /* Unlikely, but could be a race condition with a dying warden */
510 return ENODEV;
511 }
512
513 vcp = &(mntinfo->mi_vcomm);
514
515 coda_clstat.ncalls++;
516 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
517
518 if (!VC_OPEN(vcp))
519 return(ENODEV);
520
521 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
522 /* Format the request message. */
523 vmp->vm_data = buffer;
524 vmp->vm_flags = 0;
525 vmp->vm_inSize = inSize;
526 vmp->vm_outSize
527 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
528 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
529 vmp->vm_unique = ++vcp->vc_seq;
530 if (codadebug)
531 myprintf(("Doing a call for %d.%d\n",
532 vmp->vm_opcode, vmp->vm_unique));
533
534 /* Fill in the common input args. */
535 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
536
537 /* Append msg to request queue and poke Venus. */
538 INSQUE(vmp->vm_chain, vcp->vc_requests);
539 selwakeup(&(vcp->vc_selproc));
540
541 /* We can be interrupted while we wait for Venus to process
542 * our request. If the interrupt occurs before Venus has read
543 * the request, we dequeue and return. If it occurs after the
544 * read but before the reply, we dequeue, send a signal
545 * message, and return. If it occurs after the reply we ignore
546 * it. In no case do we want to restart the syscall. If it
547 * was interrupted by a venus shutdown (vcclose), return
548 * ENODEV. */
549
550 /* Ignore return, We have to check anyway */
551 #ifdef CTL_C
552 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
553 on a ^c or ^z. The problem is that emacs sets certain interrupts
554 as SA_RESTART. This means that we should exit sleep handle the
555 "signal" and then go to sleep again. Mostly this is done by letting
556 the syscall complete and be restarted. We are not idempotent and
557 can not do this. A better solution is necessary.
558 */
559 i = 0;
560 do {
561 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
562 if (error == 0)
563 break;
564 else if (error == EWOULDBLOCK) {
565 #ifdef CODA_VERBOSE
566 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
567 #endif
568 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
569 sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
570 #ifdef CODA_VERBOSE
571 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
572 #endif
573 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
574 sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
575 #ifdef CODA_VERBOSE
576 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
577 #endif
578 } else {
579 sigset_t tmp;
580 tmp = p->p_sigctx.ps_siglist; /* array assignment */
581 sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
582
583 #ifdef CODA_VERBOSE
584 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
585 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
586 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
587 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
588 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
589 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
590 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
591 #endif
592 break;
593 #ifdef notyet
594 sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
595 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
596 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
597 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
598 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
599 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
600 #endif
601 }
602 } while (error && i++ < 128 && VC_OPEN(vcp));
603 p->p_sigctx.ps_siglist = psig_omask; /* array assignment */
604 #else
605 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
606 #endif
607 if (VC_OPEN(vcp)) { /* Venus is still alive */
608 /* Op went through, interrupt or not... */
609 if (vmp->vm_flags & VM_WRITE) {
610 error = 0;
611 *outSize = vmp->vm_outSize;
612 }
613
614 else if (!(vmp->vm_flags & VM_READ)) {
615 /* Interrupted before venus read it. */
616 #ifdef CODA_VERBOSE
617 if (1)
618 #else
619 if (codadebug)
620 #endif
621 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
622 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
623 REMQUE(vmp->vm_chain);
624 error = EINTR;
625 }
626
627 else {
628 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
629 upcall started */
630 /* Interrupted after start of upcall, send venus a signal */
631 struct coda_in_hdr *dog;
632 struct vmsg *svmp;
633
634 #ifdef CODA_VERBOSE
635 if (1)
636 #else
637 if (codadebug)
638 #endif
639 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
640 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
641
642 REMQUE(vmp->vm_chain);
643 error = EINTR;
644
645 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
646
647 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
648 dog = (struct coda_in_hdr *)svmp->vm_data;
649
650 svmp->vm_flags = 0;
651 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
652 dog->unique = svmp->vm_unique = vmp->vm_unique;
653 svmp->vm_inSize = sizeof (struct coda_in_hdr);
654 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
655
656 if (codadebug)
657 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
658 svmp->vm_opcode, svmp->vm_unique));
659
660 /* insert at head of queue! */
661 INSQUE(svmp->vm_chain, vcp->vc_requests);
662 selwakeup(&(vcp->vc_selproc));
663 }
664 }
665
666 else { /* If venus died (!VC_OPEN(vcp)) */
667 if (codadebug)
668 myprintf(("vcclose woke op %d.%d flags %d\n",
669 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
670
671 error = ENODEV;
672 }
673
674 CODA_FREE(vmp, sizeof(struct vmsg));
675
676 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
677 wakeup(&outstanding_upcalls);
678
679 if (!error)
680 error = ((struct coda_out_hdr *)buffer)->result;
681 return(error);
682 }
683
684