coda_psdev.c revision 1.17.2.3 1 /* $NetBSD: coda_psdev.c,v 1.17.2.3 2001/10/01 12:43:20 fvdl Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the psuedo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 */
51
52 /* These routines are the device entry points for Venus. */
53
54 extern int coda_nc_initialized; /* Set if cache has been initialized */
55
56 #ifdef _LKM
57 #define NVCODA 4
58 #else
59 #include <vcoda.h>
60 #endif
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/malloc.h>
66 #include <sys/proc.h>
67 #include <sys/mount.h>
68 #include <sys/file.h>
69 #include <sys/ioctl.h>
70 #include <sys/poll.h>
71 #include <sys/select.h>
72 #include <sys/vnode.h>
73
74 #include <miscfs/specfs/specdev.h>
75
76 #include <miscfs/syncfs/syncfs.h>
77
78 #include <coda/coda.h>
79 #include <coda/cnode.h>
80 #include <coda/coda_namecache.h>
81 #include <coda/coda_io.h>
82 #include <coda/coda_psdev.h>
83
84 #define CTL_C
85
86 int coda_psdev_print_entry = 0;
87 static
88 int outstanding_upcalls = 0;
89 int coda_call_sleep = PZERO - 1;
90 #ifdef CTL_C
91 int coda_pcatch = PCATCH;
92 #else
93 #endif
94
95 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__FUNCTION__))
96
97 void vcodaattach(int n);
98
99 struct vmsg {
100 struct queue vm_chain;
101 caddr_t vm_data;
102 u_short vm_flags;
103 u_short vm_inSize; /* Size is at most 5000 bytes */
104 u_short vm_outSize;
105 u_short vm_opcode; /* copied from data to save ptr lookup */
106 int vm_unique;
107 caddr_t vm_sleep; /* Not used by Mach. */
108 };
109
110 #define VM_READ 1
111 #define VM_WRITE 2
112 #define VM_INTR 4
113
114 /* vcodaattach: do nothing */
115 void
116 vcodaattach(n)
117 int n;
118 {
119 }
120
121 /*
122 * These functions are written for NetBSD.
123 */
124 int
125 vc_nb_open(devvp, flag, mode, p)
126 struct vnode *devvp;
127 int flag;
128 int mode;
129 struct proc *p; /* NetBSD only */
130 {
131 struct vcomm *vcp;
132 int unit;
133
134 ENTRY;
135
136 unit = minor(vdev_rdev(devvp));
137
138 if (unit >= NVCODA || unit < 0)
139 return(ENXIO);
140
141 if (!coda_nc_initialized)
142 coda_nc_init();
143
144 vcp = &coda_mnttbl[unit].mi_vcomm;
145
146 vdev_setprivdata(devvp, &coda_mnttbl[unit]);
147
148 if (VC_OPEN(vcp))
149 return(EBUSY);
150
151 memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
152 INIT_QUEUE(vcp->vc_requests);
153 INIT_QUEUE(vcp->vc_replys);
154 MARK_VC_OPEN(vcp);
155
156 coda_mnttbl[unit].mi_vfsp = NULL;
157 coda_mnttbl[unit].mi_rootvp = NULL;
158
159 return(0);
160 }
161
162 int
163 vc_nb_close(devvp, flag, mode, p)
164 struct vnode *devvp;
165 int flag;
166 int mode;
167 struct proc *p;
168 {
169 struct vcomm *vcp;
170 struct vmsg *vmp, *nvmp = NULL;
171 struct coda_mntinfo *mi;
172 int err;
173 dev_t rdev;
174
175 ENTRY;
176
177 rdev = vdev_rdev(devvp);
178 mi = vdev_privdata(devvp);
179 if (err != 0)
180 return err;
181 vcp = &(mi->mi_vcomm);
182
183 if (!VC_OPEN(vcp))
184 panic("vcclose: not open");
185
186 /* prevent future operations on this vfs from succeeding by auto-
187 * unmounting any vfs mounted via this device. This frees user or
188 * sysadm from having to remember where all mount points are located.
189 * Put this before WAKEUPs to avoid queuing new messages between
190 * the WAKEUP and the unmount (which can happen if we're unlucky)
191 */
192 if (!mi->mi_rootvp) {
193 /* just a simple open/close w no mount */
194 MARK_VC_CLOSED(vcp);
195 return 0;
196 }
197
198 /* Let unmount know this is for real */
199 /*
200 * XXX Freeze syncer. Must do this before locking the
201 * mount point. See dounmount for details().
202 */
203 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
204 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
205 if (vfs_busy(mi->mi_vfsp, 0, 0)) {
206 lockmgr(&syncer_lock, LK_RELEASE, NULL);
207 return (EBUSY);
208 }
209 coda_unmounting(mi->mi_vfsp);
210
211 /* Wakeup clients so they can return. */
212 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
213 !EOQ(vmp, vcp->vc_requests);
214 vmp = nvmp)
215 {
216 nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
217 /* Free signal request messages and don't wakeup cause
218 no one is waiting. */
219 if (vmp->vm_opcode == CODA_SIGNAL) {
220 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
221 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
222 continue;
223 }
224 outstanding_upcalls++;
225 wakeup(&vmp->vm_sleep);
226 }
227
228 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
229 !EOQ(vmp, vcp->vc_replys);
230 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
231 {
232 outstanding_upcalls++;
233 wakeup(&vmp->vm_sleep);
234 }
235
236 MARK_VC_CLOSED(vcp);
237
238 if (outstanding_upcalls) {
239 #ifdef CODA_VERBOSE
240 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
241 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
242 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
243 #else
244 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
245 #endif
246 }
247
248 err = dounmount(mi->mi_vfsp, flag, p);
249 if (err)
250 myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
251 err, minor(rdev)));
252 return 0;
253 }
254
255 int
256 vc_nb_read(devvp, uiop, flag)
257 struct vnode *devvp;
258 struct uio *uiop;
259 int flag;
260 {
261 struct vcomm * vcp;
262 struct vmsg *vmp;
263 struct coda_mntinfo *mi;
264 int error;
265
266 ENTRY;
267
268 mi = vdev_privdata(devvp);
269 vcp = &mi->mi_vcomm;
270 /* Get message at head of request queue. */
271 if (EMPTY(vcp->vc_requests))
272 return(0); /* Nothing to read */
273
274 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
275
276 /* Move the input args into userspace */
277 uiop->uio_rw = UIO_READ;
278 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
279 if (error) {
280 myprintf(("vcread: error (%d) on uiomove\n", error));
281 error = EINVAL;
282 }
283
284 #ifdef OLD_DIAGNOSTIC
285 if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
286 panic("vc_nb_read: bad chain");
287 #endif
288
289 REMQUE(vmp->vm_chain);
290
291 /* If request was a signal, free up the message and don't
292 enqueue it in the reply queue. */
293 if (vmp->vm_opcode == CODA_SIGNAL) {
294 if (codadebug)
295 myprintf(("vcread: signal msg (%d, %d)\n",
296 vmp->vm_opcode, vmp->vm_unique));
297 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
298 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
299 return(error);
300 }
301
302 vmp->vm_flags |= VM_READ;
303 INSQUE(vmp->vm_chain, vcp->vc_replys);
304
305 return(error);
306 }
307
308 int
309 vc_nb_write(devvp, uiop, flag)
310 struct vnode *devvp;
311 struct uio *uiop;
312 int flag;
313 {
314 struct vcomm * vcp;
315 struct vmsg *vmp;
316 struct coda_mntinfo *mi;
317 struct coda_out_hdr *out;
318 u_long seq;
319 u_long opcode;
320 int buf[2];
321 int error;
322
323 ENTRY;
324
325 mi = vdev_privdata(devvp);
326 vcp = &mi->mi_vcomm;
327
328 /* Peek at the opcode, unique without transfering the data. */
329 uiop->uio_rw = UIO_WRITE;
330 error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
331 if (error) {
332 myprintf(("vcwrite: error (%d) on uiomove\n", error));
333 return(EINVAL);
334 }
335
336 opcode = buf[0];
337 seq = buf[1];
338
339 if (codadebug)
340 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
341
342 if (DOWNCALL(opcode)) {
343 union outputArgs pbuf;
344
345 /* get the rest of the data. */
346 uiop->uio_rw = UIO_WRITE;
347 error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
348 if (error) {
349 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
350 error, opcode, seq));
351 return(EINVAL);
352 }
353
354 return handleDownCall(opcode, &pbuf);
355 }
356
357 /* Look for the message on the (waiting for) reply queue. */
358 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
359 !EOQ(vmp, vcp->vc_replys);
360 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
361 {
362 if (vmp->vm_unique == seq) break;
363 }
364
365 if (EOQ(vmp, vcp->vc_replys)) {
366 if (codadebug)
367 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
368
369 return(ESRCH);
370 }
371
372 /* Remove the message from the reply queue */
373 REMQUE(vmp->vm_chain);
374
375 /* move data into response buffer. */
376 out = (struct coda_out_hdr *)vmp->vm_data;
377 /* Don't need to copy opcode and uniquifier. */
378
379 /* get the rest of the data. */
380 if (vmp->vm_outSize < uiop->uio_resid) {
381 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
382 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
383 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
384 return(EINVAL);
385 }
386
387 buf[0] = uiop->uio_resid; /* Save this value. */
388 uiop->uio_rw = UIO_WRITE;
389 error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
390 if (error) {
391 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
392 error, opcode, seq));
393 return(EINVAL);
394 }
395
396 /* I don't think these are used, but just in case. */
397 /* XXX - aren't these two already correct? -bnoble */
398 out->opcode = opcode;
399 out->unique = seq;
400 vmp->vm_outSize = buf[0]; /* Amount of data transferred? */
401 vmp->vm_flags |= VM_WRITE;
402 wakeup(&vmp->vm_sleep);
403
404 return(0);
405 }
406
407 int
408 vc_nb_ioctl(devvp, cmd, addr, flag, p)
409 struct vnode *devvp;
410 u_long cmd;
411 caddr_t addr;
412 int flag;
413 struct proc *p;
414 {
415 ENTRY;
416
417 switch(cmd) {
418 case CODARESIZE: {
419 struct coda_resize *data = (struct coda_resize *)addr;
420 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
421 break;
422 }
423 case CODASTATS:
424 if (coda_nc_use) {
425 coda_nc_gather_stats();
426 return(0);
427 } else {
428 return(ENODEV);
429 }
430 break;
431 case CODAPRINT:
432 if (coda_nc_use) {
433 print_coda_nc();
434 return(0);
435 } else {
436 return(ENODEV);
437 }
438 break;
439 case CIOC_KERNEL_VERSION:
440 switch (*(u_int *)addr) {
441 case 0:
442 *(u_int *)addr = coda_kernel_version;
443 return 0;
444 break;
445 case 1:
446 case 2:
447 if (coda_kernel_version != *(u_int *)addr)
448 return ENOENT;
449 else
450 return 0;
451 default:
452 return ENOENT;
453 }
454 break;
455 default :
456 return(EINVAL);
457 break;
458 }
459 }
460
461 int
462 vc_nb_poll(devvp, events, p)
463 struct vnode *devvp;
464 int events;
465 struct proc *p;
466 {
467 struct coda_mntinfo *mi;
468 struct vcomm *vcp;
469 int event_msk;
470
471 ENTRY;
472
473 mi = vdev_privdata(devvp);
474 vcp = &mi->mi_vcomm;
475
476 event_msk = events & (POLLIN|POLLRDNORM);
477 if (!event_msk)
478 return(0);
479
480 if (!EMPTY(vcp->vc_requests))
481 return(events & (POLLIN|POLLRDNORM));
482
483 selrecord(p, &(vcp->vc_selproc));
484
485 return(0);
486 }
487
488 /*
489 * Statistics
490 */
491 struct coda_clstat coda_clstat;
492
493 /*
494 * Key question: whether to sleep interuptably or uninteruptably when
495 * waiting for Venus. The former seems better (cause you can ^C a
496 * job), but then GNU-EMACS completion breaks. Use tsleep with no
497 * timeout, and no longjmp happens. But, when sleeping
498 * "uninterruptibly", we don't get told if it returns abnormally
499 * (e.g. kill -9).
500 */
501
502 int
503 coda_call(mntinfo, inSize, outSize, buffer)
504 struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
505 {
506 struct vcomm *vcp;
507 struct vmsg *vmp;
508 int error;
509 #ifdef CTL_C
510 struct proc *p = curproc;
511 sigset_t psig_omask;
512 int i;
513 psig_omask = p->p_sigctx.ps_siglist; /* array assignment */
514 #endif
515 if (mntinfo == NULL) {
516 /* Unlikely, but could be a race condition with a dying warden */
517 return ENODEV;
518 }
519
520 vcp = &(mntinfo->mi_vcomm);
521
522 coda_clstat.ncalls++;
523 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
524
525 if (!VC_OPEN(vcp))
526 return(ENODEV);
527
528 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
529 /* Format the request message. */
530 vmp->vm_data = buffer;
531 vmp->vm_flags = 0;
532 vmp->vm_inSize = inSize;
533 vmp->vm_outSize
534 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
535 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
536 vmp->vm_unique = ++vcp->vc_seq;
537 if (codadebug)
538 myprintf(("Doing a call for %d.%d\n",
539 vmp->vm_opcode, vmp->vm_unique));
540
541 /* Fill in the common input args. */
542 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
543
544 /* Append msg to request queue and poke Venus. */
545 INSQUE(vmp->vm_chain, vcp->vc_requests);
546 selwakeup(&(vcp->vc_selproc));
547
548 /* We can be interrupted while we wait for Venus to process
549 * our request. If the interrupt occurs before Venus has read
550 * the request, we dequeue and return. If it occurs after the
551 * read but before the reply, we dequeue, send a signal
552 * message, and return. If it occurs after the reply we ignore
553 * it. In no case do we want to restart the syscall. If it
554 * was interrupted by a venus shutdown (vcclose), return
555 * ENODEV. */
556
557 /* Ignore return, We have to check anyway */
558 #ifdef CTL_C
559 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
560 on a ^c or ^z. The problem is that emacs sets certain interrupts
561 as SA_RESTART. This means that we should exit sleep handle the
562 "signal" and then go to sleep again. Mostly this is done by letting
563 the syscall complete and be restarted. We are not idempotent and
564 can not do this. A better solution is necessary.
565 */
566 i = 0;
567 do {
568 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
569 if (error == 0)
570 break;
571 else if (error == EWOULDBLOCK) {
572 #ifdef CODA_VERBOSE
573 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
574 #endif
575 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
576 sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
577 #ifdef CODA_VERBOSE
578 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
579 #endif
580 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
581 sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
582 #ifdef CODA_VERBOSE
583 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
584 #endif
585 } else {
586 sigset_t tmp;
587 tmp = p->p_sigctx.ps_siglist; /* array assignment */
588 sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
589
590 #ifdef CODA_VERBOSE
591 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
592 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
593 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
594 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
595 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
596 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
597 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
598 #endif
599 break;
600 #ifdef notyet
601 sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
602 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
603 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
604 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
605 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
606 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
607 #endif
608 }
609 } while (error && i++ < 128 && VC_OPEN(vcp));
610 p->p_sigctx.ps_siglist = psig_omask; /* array assignment */
611 #else
612 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
613 #endif
614 if (VC_OPEN(vcp)) { /* Venus is still alive */
615 /* Op went through, interrupt or not... */
616 if (vmp->vm_flags & VM_WRITE) {
617 error = 0;
618 *outSize = vmp->vm_outSize;
619 }
620
621 else if (!(vmp->vm_flags & VM_READ)) {
622 /* Interrupted before venus read it. */
623 #ifdef CODA_VERBOSE
624 if (1)
625 #else
626 if (codadebug)
627 #endif
628 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
629 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
630 REMQUE(vmp->vm_chain);
631 error = EINTR;
632 }
633
634 else {
635 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
636 upcall started */
637 /* Interrupted after start of upcall, send venus a signal */
638 struct coda_in_hdr *dog;
639 struct vmsg *svmp;
640
641 #ifdef CODA_VERBOSE
642 if (1)
643 #else
644 if (codadebug)
645 #endif
646 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
647 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
648
649 REMQUE(vmp->vm_chain);
650 error = EINTR;
651
652 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
653
654 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
655 dog = (struct coda_in_hdr *)svmp->vm_data;
656
657 svmp->vm_flags = 0;
658 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
659 dog->unique = svmp->vm_unique = vmp->vm_unique;
660 svmp->vm_inSize = sizeof (struct coda_in_hdr);
661 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
662
663 if (codadebug)
664 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
665 svmp->vm_opcode, svmp->vm_unique));
666
667 /* insert at head of queue! */
668 INSQUE(svmp->vm_chain, vcp->vc_requests);
669 selwakeup(&(vcp->vc_selproc));
670 }
671 }
672
673 else { /* If venus died (!VC_OPEN(vcp)) */
674 if (codadebug)
675 myprintf(("vcclose woke op %d.%d flags %d\n",
676 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
677
678 error = ENODEV;
679 }
680
681 CODA_FREE(vmp, sizeof(struct vmsg));
682
683 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
684 wakeup(&outstanding_upcalls);
685
686 if (!error)
687 error = ((struct coda_out_hdr *)buffer)->result;
688 return(error);
689 }
690
691