coda_psdev.c revision 1.14 1 /* $NetBSD: coda_psdev.c,v 1.14 2000/12/22 22:58:57 jdolecek Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the psuedo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 */
51
52 /* These routines are the device entry points for Venus. */
53
54 extern int coda_nc_initialized; /* Set if cache has been initialized */
55
56 #ifdef _LKM
57 #define NVCODA 4
58 #else
59 #include <vcoda.h>
60 #endif
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/malloc.h>
66 #include <sys/proc.h>
67 #include <sys/mount.h>
68 #include <sys/file.h>
69 #include <sys/ioctl.h>
70 #include <sys/poll.h>
71 #include <sys/select.h>
72
73 #include <coda/coda.h>
74 #include <coda/cnode.h>
75 #include <coda/coda_namecache.h>
76 #include <coda/coda_io.h>
77 #include <coda/coda_psdev.h>
78
79 #define CTL_C
80
81 int coda_psdev_print_entry = 0;
82 static
83 int outstanding_upcalls = 0;
84 int coda_call_sleep = PZERO - 1;
85 #ifdef CTL_C
86 int coda_pcatch = PCATCH;
87 #else
88 #endif
89
90 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__FUNCTION__))
91
92 void vcodaattach(int n);
93
94 struct vmsg {
95 struct queue vm_chain;
96 caddr_t vm_data;
97 u_short vm_flags;
98 u_short vm_inSize; /* Size is at most 5000 bytes */
99 u_short vm_outSize;
100 u_short vm_opcode; /* copied from data to save ptr lookup */
101 int vm_unique;
102 caddr_t vm_sleep; /* Not used by Mach. */
103 };
104
105 #define VM_READ 1
106 #define VM_WRITE 2
107 #define VM_INTR 4
108
109 /* vcodaattach: do nothing */
110 void
111 vcodaattach(n)
112 int n;
113 {
114 }
115
116 /*
117 * These functions are written for NetBSD.
118 */
119 int
120 vc_nb_open(dev, flag, mode, p)
121 dev_t dev;
122 int flag;
123 int mode;
124 struct proc *p; /* NetBSD only */
125 {
126 struct vcomm *vcp;
127
128 ENTRY;
129
130 if (minor(dev) >= NVCODA || minor(dev) < 0)
131 return(ENXIO);
132
133 if (!coda_nc_initialized)
134 coda_nc_init();
135
136 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
137 if (VC_OPEN(vcp))
138 return(EBUSY);
139
140 bzero(&(vcp->vc_selproc), sizeof (struct selinfo));
141 INIT_QUEUE(vcp->vc_requests);
142 INIT_QUEUE(vcp->vc_replys);
143 MARK_VC_OPEN(vcp);
144
145 coda_mnttbl[minor(dev)].mi_vfsp = NULL;
146 coda_mnttbl[minor(dev)].mi_rootvp = NULL;
147
148 return(0);
149 }
150
151 int
152 vc_nb_close (dev, flag, mode, p)
153 dev_t dev;
154 int flag;
155 int mode;
156 struct proc *p;
157 {
158 struct vcomm *vcp;
159 struct vmsg *vmp, *nvmp = NULL;
160 struct coda_mntinfo *mi;
161 int err;
162
163 ENTRY;
164
165 if (minor(dev) >= NVCODA || minor(dev) < 0)
166 return(ENXIO);
167
168 mi = &coda_mnttbl[minor(dev)];
169 vcp = &(mi->mi_vcomm);
170
171 if (!VC_OPEN(vcp))
172 panic("vcclose: not open");
173
174 /* prevent future operations on this vfs from succeeding by auto-
175 * unmounting any vfs mounted via this device. This frees user or
176 * sysadm from having to remember where all mount points are located.
177 * Put this before WAKEUPs to avoid queuing new messages between
178 * the WAKEUP and the unmount (which can happen if we're unlucky)
179 */
180 if (!mi->mi_rootvp) {
181 /* just a simple open/close w no mount */
182 MARK_VC_CLOSED(vcp);
183 return 0;
184 }
185
186 /* Let unmount know this is for real */
187 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
188 if (vfs_busy(mi->mi_vfsp, 0, 0))
189 return (EBUSY);
190 coda_unmounting(mi->mi_vfsp);
191
192 /* Wakeup clients so they can return. */
193 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
194 !EOQ(vmp, vcp->vc_requests);
195 vmp = nvmp)
196 {
197 nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
198 /* Free signal request messages and don't wakeup cause
199 no one is waiting. */
200 if (vmp->vm_opcode == CODA_SIGNAL) {
201 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
202 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
203 continue;
204 }
205 outstanding_upcalls++;
206 wakeup(&vmp->vm_sleep);
207 }
208
209 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
210 !EOQ(vmp, vcp->vc_replys);
211 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
212 {
213 outstanding_upcalls++;
214 wakeup(&vmp->vm_sleep);
215 }
216
217 MARK_VC_CLOSED(vcp);
218
219 if (outstanding_upcalls) {
220 #ifdef CODA_VERBOSE
221 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
222 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
223 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
224 #else
225 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
226 #endif
227 }
228
229 err = dounmount(mi->mi_vfsp, flag, p);
230 if (err)
231 myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
232 err, minor(dev)));
233 return 0;
234 }
235
236 int
237 vc_nb_read(dev, uiop, flag)
238 dev_t dev;
239 struct uio *uiop;
240 int flag;
241 {
242 struct vcomm * vcp;
243 struct vmsg *vmp;
244 int error = 0;
245
246 ENTRY;
247
248 if (minor(dev) >= NVCODA || minor(dev) < 0)
249 return(ENXIO);
250
251 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
252 /* Get message at head of request queue. */
253 if (EMPTY(vcp->vc_requests))
254 return(0); /* Nothing to read */
255
256 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
257
258 /* Move the input args into userspace */
259 uiop->uio_rw = UIO_READ;
260 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
261 if (error) {
262 myprintf(("vcread: error (%d) on uiomove\n", error));
263 error = EINVAL;
264 }
265
266 #ifdef OLD_DIAGNOSTIC
267 if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
268 panic("vc_nb_read: bad chain");
269 #endif
270
271 REMQUE(vmp->vm_chain);
272
273 /* If request was a signal, free up the message and don't
274 enqueue it in the reply queue. */
275 if (vmp->vm_opcode == CODA_SIGNAL) {
276 if (codadebug)
277 myprintf(("vcread: signal msg (%d, %d)\n",
278 vmp->vm_opcode, vmp->vm_unique));
279 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
280 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
281 return(error);
282 }
283
284 vmp->vm_flags |= VM_READ;
285 INSQUE(vmp->vm_chain, vcp->vc_replys);
286
287 return(error);
288 }
289
290 int
291 vc_nb_write(dev, uiop, flag)
292 dev_t dev;
293 struct uio *uiop;
294 int flag;
295 {
296 struct vcomm * vcp;
297 struct vmsg *vmp;
298 struct coda_out_hdr *out;
299 u_long seq;
300 u_long opcode;
301 int buf[2];
302 int error = 0;
303
304 ENTRY;
305
306 if (minor(dev) >= NVCODA || minor(dev) < 0)
307 return(ENXIO);
308
309 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
310
311 /* Peek at the opcode, unique without transfering the data. */
312 uiop->uio_rw = UIO_WRITE;
313 error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
314 if (error) {
315 myprintf(("vcwrite: error (%d) on uiomove\n", error));
316 return(EINVAL);
317 }
318
319 opcode = buf[0];
320 seq = buf[1];
321
322 if (codadebug)
323 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
324
325 if (DOWNCALL(opcode)) {
326 union outputArgs pbuf;
327
328 /* get the rest of the data. */
329 uiop->uio_rw = UIO_WRITE;
330 error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
331 if (error) {
332 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
333 error, opcode, seq));
334 return(EINVAL);
335 }
336
337 return handleDownCall(opcode, &pbuf);
338 }
339
340 /* Look for the message on the (waiting for) reply queue. */
341 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
342 !EOQ(vmp, vcp->vc_replys);
343 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
344 {
345 if (vmp->vm_unique == seq) break;
346 }
347
348 if (EOQ(vmp, vcp->vc_replys)) {
349 if (codadebug)
350 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
351
352 return(ESRCH);
353 }
354
355 /* Remove the message from the reply queue */
356 REMQUE(vmp->vm_chain);
357
358 /* move data into response buffer. */
359 out = (struct coda_out_hdr *)vmp->vm_data;
360 /* Don't need to copy opcode and uniquifier. */
361
362 /* get the rest of the data. */
363 if (vmp->vm_outSize < uiop->uio_resid) {
364 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
365 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
366 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
367 return(EINVAL);
368 }
369
370 buf[0] = uiop->uio_resid; /* Save this value. */
371 uiop->uio_rw = UIO_WRITE;
372 error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
373 if (error) {
374 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
375 error, opcode, seq));
376 return(EINVAL);
377 }
378
379 /* I don't think these are used, but just in case. */
380 /* XXX - aren't these two already correct? -bnoble */
381 out->opcode = opcode;
382 out->unique = seq;
383 vmp->vm_outSize = buf[0]; /* Amount of data transferred? */
384 vmp->vm_flags |= VM_WRITE;
385 wakeup(&vmp->vm_sleep);
386
387 return(0);
388 }
389
390 int
391 vc_nb_ioctl(dev, cmd, addr, flag, p)
392 dev_t dev;
393 u_long cmd;
394 caddr_t addr;
395 int flag;
396 struct proc *p;
397 {
398 ENTRY;
399
400 switch(cmd) {
401 case CODARESIZE: {
402 struct coda_resize *data = (struct coda_resize *)addr;
403 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
404 break;
405 }
406 case CODASTATS:
407 if (coda_nc_use) {
408 coda_nc_gather_stats();
409 return(0);
410 } else {
411 return(ENODEV);
412 }
413 break;
414 case CODAPRINT:
415 if (coda_nc_use) {
416 print_coda_nc();
417 return(0);
418 } else {
419 return(ENODEV);
420 }
421 break;
422 case CIOC_KERNEL_VERSION:
423 switch (*(u_int *)addr) {
424 case 0:
425 *(u_int *)addr = coda_kernel_version;
426 return 0;
427 break;
428 case 1:
429 case 2:
430 if (coda_kernel_version != *(u_int *)addr)
431 return ENOENT;
432 else
433 return 0;
434 default:
435 return ENOENT;
436 }
437 break;
438 default :
439 return(EINVAL);
440 break;
441 }
442 }
443
444 int
445 vc_nb_poll(dev, events, p)
446 dev_t dev;
447 int events;
448 struct proc *p;
449 {
450 struct vcomm *vcp;
451 int event_msk = 0;
452
453 ENTRY;
454
455 if (minor(dev) >= NVCODA || minor(dev) < 0)
456 return(ENXIO);
457
458 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
459
460 event_msk = events & (POLLIN|POLLRDNORM);
461 if (!event_msk)
462 return(0);
463
464 if (!EMPTY(vcp->vc_requests))
465 return(events & (POLLIN|POLLRDNORM));
466
467 selrecord(p, &(vcp->vc_selproc));
468
469 return(0);
470 }
471
472 /*
473 * Statistics
474 */
475 struct coda_clstat coda_clstat;
476
477 /*
478 * Key question: whether to sleep interuptably or uninteruptably when
479 * waiting for Venus. The former seems better (cause you can ^C a
480 * job), but then GNU-EMACS completion breaks. Use tsleep with no
481 * timeout, and no longjmp happens. But, when sleeping
482 * "uninterruptibly", we don't get told if it returns abnormally
483 * (e.g. kill -9).
484 */
485
486 int
487 coda_call(mntinfo, inSize, outSize, buffer)
488 struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
489 {
490 struct vcomm *vcp;
491 struct vmsg *vmp;
492 int error;
493 #ifdef CTL_C
494 struct proc *p = curproc;
495 sigset_t psig_omask;
496 int i;
497 psig_omask = p->p_sigctx.ps_siglist; /* array assignment */
498 #endif
499 if (mntinfo == NULL) {
500 /* Unlikely, but could be a race condition with a dying warden */
501 return ENODEV;
502 }
503
504 vcp = &(mntinfo->mi_vcomm);
505
506 coda_clstat.ncalls++;
507 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
508
509 if (!VC_OPEN(vcp))
510 return(ENODEV);
511
512 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
513 /* Format the request message. */
514 vmp->vm_data = buffer;
515 vmp->vm_flags = 0;
516 vmp->vm_inSize = inSize;
517 vmp->vm_outSize
518 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
519 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
520 vmp->vm_unique = ++vcp->vc_seq;
521 if (codadebug)
522 myprintf(("Doing a call for %d.%d\n",
523 vmp->vm_opcode, vmp->vm_unique));
524
525 /* Fill in the common input args. */
526 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
527
528 /* Append msg to request queue and poke Venus. */
529 INSQUE(vmp->vm_chain, vcp->vc_requests);
530 selwakeup(&(vcp->vc_selproc));
531
532 /* We can be interrupted while we wait for Venus to process
533 * our request. If the interrupt occurs before Venus has read
534 * the request, we dequeue and return. If it occurs after the
535 * read but before the reply, we dequeue, send a signal
536 * message, and return. If it occurs after the reply we ignore
537 * it. In no case do we want to restart the syscall. If it
538 * was interrupted by a venus shutdown (vcclose), return
539 * ENODEV. */
540
541 /* Ignore return, We have to check anyway */
542 #ifdef CTL_C
543 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
544 on a ^c or ^z. The problem is that emacs sets certain interrupts
545 as SA_RESTART. This means that we should exit sleep handle the
546 "signal" and then go to sleep again. Mostly this is done by letting
547 the syscall complete and be restarted. We are not idempotent and
548 can not do this. A better solution is necessary.
549 */
550 i = 0;
551 do {
552 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
553 if (error == 0)
554 break;
555 else if (error == EWOULDBLOCK) {
556 #ifdef CODA_VERBOSE
557 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
558 #endif
559 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
560 sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
561 #ifdef CODA_VERBOSE
562 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
563 #endif
564 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
565 sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
566 #ifdef CODA_VERBOSE
567 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
568 #endif
569 } else {
570 sigset_t tmp;
571 tmp = p->p_sigctx.ps_siglist; /* array assignment */
572 sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
573
574 #ifdef CODA_VERBOSE
575 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
576 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
577 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
578 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
579 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
580 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
581 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
582 #endif
583 break;
584 #ifdef notyet
585 sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
586 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
587 p->p_siglist.__bits[0], p->p_siglist.__bits[1],
588 p->p_siglist.__bits[2], p->p_siglist.__bits[3],
589 p->p_sigmask.__bits[0], p->p_sigmask.__bits[1],
590 p->p_sigmask.__bits[2], p->p_sigmask.__bits[3]);
591 #endif
592 }
593 } while (error && i++ < 128 && VC_OPEN(vcp));
594 p->p_sigctx.ps_siglist = psig_omask; /* array assignment */
595 #else
596 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
597 #endif
598 if (VC_OPEN(vcp)) { /* Venus is still alive */
599 /* Op went through, interrupt or not... */
600 if (vmp->vm_flags & VM_WRITE) {
601 error = 0;
602 *outSize = vmp->vm_outSize;
603 }
604
605 else if (!(vmp->vm_flags & VM_READ)) {
606 /* Interrupted before venus read it. */
607 #ifdef CODA_VERBOSE
608 if (1)
609 #else
610 if (codadebug)
611 #endif
612 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
613 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
614 REMQUE(vmp->vm_chain);
615 error = EINTR;
616 }
617
618 else {
619 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
620 upcall started */
621 /* Interrupted after start of upcall, send venus a signal */
622 struct coda_in_hdr *dog;
623 struct vmsg *svmp;
624
625 #ifdef CODA_VERBOSE
626 if (1)
627 #else
628 if (codadebug)
629 #endif
630 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
631 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
632
633 REMQUE(vmp->vm_chain);
634 error = EINTR;
635
636 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
637
638 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
639 dog = (struct coda_in_hdr *)svmp->vm_data;
640
641 svmp->vm_flags = 0;
642 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
643 dog->unique = svmp->vm_unique = vmp->vm_unique;
644 svmp->vm_inSize = sizeof (struct coda_in_hdr);
645 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
646
647 if (codadebug)
648 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
649 svmp->vm_opcode, svmp->vm_unique));
650
651 /* insert at head of queue! */
652 INSQUE(svmp->vm_chain, vcp->vc_requests);
653 selwakeup(&(vcp->vc_selproc));
654 }
655 }
656
657 else { /* If venus died (!VC_OPEN(vcp)) */
658 if (codadebug)
659 myprintf(("vcclose woke op %d.%d flags %d\n",
660 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
661
662 error = ENODEV;
663 }
664
665 CODA_FREE(vmp, sizeof(struct vmsg));
666
667 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
668 wakeup(&outstanding_upcalls);
669
670 if (!error)
671 error = ((struct coda_out_hdr *)buffer)->result;
672 return(error);
673 }
674
675