coda_psdev.c revision 1.2 1 /* $NetBSD: coda_psdev.c,v 1.2 1998/09/08 17:12:47 rvb Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) cfs/cfs_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the psuedo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 */
51
52 /*
53 * HISTORY
54 * $Log: coda_psdev.c,v $
55 * Revision 1.2 1998/09/08 17:12:47 rvb
56 * Pass2 complete
57 *
58 * Revision 1.1.1.1 1998/08/29 21:26:45 rvb
59 * Very Preliminary Coda
60 *
61 * Revision 1.9 1998/08/28 18:12:17 rvb
62 * Now it also works on FreeBSD -current. This code will be
63 * committed to the FreeBSD -current and NetBSD -current
64 * trees. It will then be tailored to the particular platform
65 * by flushing conditional code.
66 *
67 * Revision 1.8 1998/08/18 17:05:15 rvb
68 * Don't use __RCSID now
69 *
70 * Revision 1.7 1998/08/18 16:31:41 rvb
71 * Sync the code for NetBSD -current; test on 1.3 later
72 *
73 * Revision 1.8 1998/06/09 23:30:42 rvb
74 * Try to allow ^C -- take 1
75 *
76 * Revision 1.5.2.8 98/01/23 11:21:04 rvb
77 * Sync with 2.2.5
78 *
79 * Revision 1.5.2.7 98/01/22 22:22:21 rvb
80 * sync 1.2 and 1.3
81 *
82 * Revision 1.5.2.6 98/01/22 13:11:24 rvb
83 * Move makecfsnode ctlfid later so vfsp is known; work on ^c and ^z
84 *
85 * Revision 1.5.2.5 97/12/16 22:01:27 rvb
86 * Oops add cfs_subr.h cfs_venus.h; sync with peter
87 *
88 * Revision 1.5.2.4 97/12/16 12:40:05 rvb
89 * Sync with 1.3
90 *
91 * Revision 1.5.2.3 97/12/10 14:08:24 rvb
92 * Fix O_ flags; check result in cfscall
93 *
94 * Revision 1.5.2.2 97/12/10 11:40:24 rvb
95 * No more ody
96 *
97 * Revision 1.5.2.1 97/12/06 17:41:20 rvb
98 * Sync with peters coda.h
99 *
100 * Revision 1.5 97/12/05 10:39:16 rvb
101 * Read CHANGES
102 *
103 * Revision 1.4.18.9 97/12/05 08:58:07 rvb
104 * peter found this one
105 *
106 * Revision 1.4.18.8 97/11/26 15:28:57 rvb
107 * Cant make downcall pbuf == union cfs_downcalls yet
108 *
109 * Revision 1.4.18.7 97/11/25 09:40:49 rvb
110 * Final cfs_venus.c w/o macros, but one locking bug
111 *
112 * Revision 1.4.18.6 97/11/20 11:46:41 rvb
113 * Capture current cfs_venus
114 *
115 * Revision 1.4.18.5 97/11/18 10:27:15 rvb
116 * cfs_nbsd.c is DEAD!!!; integrated into cfs_vf/vnops.c
117 * cfs_nb_foo and cfs_foo are joined
118 *
119 * Revision 1.4.18.4 97/11/13 22:02:59 rvb
120 * pass2 cfs_NetBSD.h mt
121 *
122 * Revision 1.4.18.3 97/11/12 12:09:38 rvb
123 * reorg pass1
124 *
125 * Revision 1.4.18.2 97/10/29 16:06:09 rvb
126 * Kill DYING
127 *
128 * Revision 1.4.18.1 1997/10/28 23:10:15 rvb
129 * >64Meg; venus can be killed!
130 *
131 * Revision 1.4 1996/12/12 22:10:58 bnoble
132 * Fixed the "downcall invokes venus operation" deadlock in all known cases.
133 * There may be more
134 *
135 * Revision 1.3 1996/11/13 04:14:20 bnoble
136 * Merging BNOBLE_WORK_6_20_96 into main line
137 *
138 * Revision 1.2.8.1 1996/08/22 14:25:04 bnoble
139 * Added a return code from vc_nb_close
140 *
141 * Revision 1.2 1996/01/02 16:56:58 bnoble
142 * Added support for Coda MiniCache and raw inode calls (final commit)
143 *
144 * Revision 1.1.2.1 1995/12/20 01:57:24 bnoble
145 * Added CFS-specific files
146 *
147 * Revision 1.1 1995/03/14 20:52:15 bnoble
148 * Initial revision
149 *
150 */
151
152 /* These routines are the device entry points for Venus. */
153
154 extern int cfsnc_initialized; /* Set if cache has been initialized */
155
156 #include <vcfs.h>
157 #include <sys/param.h>
158 #include <sys/systm.h>
159 #include <sys/kernel.h>
160 #include <sys/malloc.h>
161 #include <sys/proc.h>
162 #include <sys/mount.h>
163 #include <sys/file.h>
164 #include <sys/ioctl.h>
165 #include <sys/poll.h>
166 #include <sys/select.h>
167
168 #include <cfs/coda.h>
169 #include <cfs/cnode.h>
170 #include <cfs/cfsnc.h>
171 #include <cfs/cfsio.h>
172
173 #define CTL_C
174
175 int cfs_psdev_print_entry = 0;
176 #define ENTRY if(cfs_psdev_print_entry) myprintf(("Entered %s\n",__FUNCTION__))
177
178 void vcfsattach(int n);
179 int vc_nb_open(dev_t dev, int flag, int mode, struct proc *p);
180 int vc_nb_close (dev_t dev, int flag, int mode, struct proc *p);
181 int vc_nb_read(dev_t dev, struct uio *uiop, int flag);
182 int vc_nb_write(dev_t dev, struct uio *uiop, int flag);
183 int vc_nb_ioctl(dev_t dev, int cmd, caddr_t addr, int flag, struct proc *p);
184 int vc_nb_poll(dev_t dev, int events, struct proc *p);
185
186 struct vmsg {
187 struct queue vm_chain;
188 caddr_t vm_data;
189 u_short vm_flags;
190 u_short vm_inSize; /* Size is at most 5000 bytes */
191 u_short vm_outSize;
192 u_short vm_opcode; /* copied from data to save ptr lookup */
193 int vm_unique;
194 caddr_t vm_sleep; /* Not used by Mach. */
195 };
196
197 #define VM_READ 1
198 #define VM_WRITE 2
199 #define VM_INTR 4
200
201 /* vcfsattach: do nothing */
202 void
203 vcfsattach(n)
204 int n;
205 {
206 }
207
208 /*
209 * These functions are written for NetBSD.
210 */
211 int
212 vc_nb_open(dev, flag, mode, p)
213 dev_t dev;
214 int flag;
215 int mode;
216 struct proc *p; /* NetBSD only */
217 {
218 register struct vcomm *vcp;
219
220 ENTRY;
221
222 if (minor(dev) >= NVCFS || minor(dev) < 0)
223 return(ENXIO);
224
225 if (!cfsnc_initialized)
226 cfsnc_init();
227
228 vcp = &cfs_mnttbl[minor(dev)].mi_vcomm;
229 if (VC_OPEN(vcp))
230 return(EBUSY);
231
232 bzero(&(vcp->vc_selproc), sizeof (struct selinfo));
233 INIT_QUEUE(vcp->vc_requests);
234 INIT_QUEUE(vcp->vc_replys);
235 MARK_VC_OPEN(vcp);
236
237 cfs_mnttbl[minor(dev)].mi_vfsp = NULL;
238 cfs_mnttbl[minor(dev)].mi_rootvp = NULL;
239
240 return(0);
241 }
242
243 int
244 vc_nb_close (dev, flag, mode, p)
245 dev_t dev;
246 int flag;
247 int mode;
248 struct proc *p;
249 {
250 register struct vcomm *vcp;
251 register struct vmsg *vmp;
252 struct cfs_mntinfo *mi;
253 int err;
254
255 ENTRY;
256
257 if (minor(dev) >= NVCFS || minor(dev) < 0)
258 return(ENXIO);
259
260 mi = &cfs_mnttbl[minor(dev)];
261 vcp = &(mi->mi_vcomm);
262
263 if (!VC_OPEN(vcp))
264 panic("vcclose: not open");
265
266 /* prevent future operations on this vfs from succeeding by auto-
267 * unmounting any vfs mounted via this device. This frees user or
268 * sysadm from having to remember where all mount points are located.
269 * Put this before WAKEUPs to avoid queuing new messages between
270 * the WAKEUP and the unmount (which can happen if we're unlucky)
271 */
272 if (mi->mi_rootvp) {
273 /* Let unmount know this is for real */
274 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
275 if (vfs_busy(mi->mi_vfsp, 0, 0))
276 return (EBUSY);
277 cfs_unmounting(mi->mi_vfsp);
278 err = dounmount(mi->mi_vfsp, flag, p);
279 if (err)
280 myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
281 err, minor(dev)));
282 }
283
284 /* Wakeup clients so they can return. */
285 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
286 !EOQ(vmp, vcp->vc_requests);
287 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
288 {
289 /* Free signal request messages and don't wakeup cause
290 no one is waiting. */
291 if (vmp->vm_opcode == CFS_SIGNAL) {
292 CFS_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
293 CFS_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
294 continue;
295 }
296
297 wakeup(&vmp->vm_sleep);
298 }
299
300 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
301 !EOQ(vmp, vcp->vc_replys);
302 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
303 {
304 wakeup(&vmp->vm_sleep);
305 }
306
307 MARK_VC_CLOSED(vcp);
308 return 0;
309 }
310
311 int
312 vc_nb_read(dev, uiop, flag)
313 dev_t dev;
314 struct uio *uiop;
315 int flag;
316 {
317 register struct vcomm * vcp;
318 register struct vmsg *vmp;
319 int error = 0;
320
321 ENTRY;
322
323 if (minor(dev) >= NVCFS || minor(dev) < 0)
324 return(ENXIO);
325
326 vcp = &cfs_mnttbl[minor(dev)].mi_vcomm;
327 /* Get message at head of request queue. */
328 if (EMPTY(vcp->vc_requests))
329 return(0); /* Nothing to read */
330
331 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
332
333 /* Move the input args into userspace */
334 uiop->uio_rw = UIO_READ;
335 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
336 if (error) {
337 myprintf(("vcread: error (%d) on uiomove\n", error));
338 error = EINVAL;
339 }
340
341 #ifdef DIAGNOSTIC
342 if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
343 panic("vc_nb_read: bad chain");
344 #endif
345
346 REMQUE(vmp->vm_chain);
347
348 /* If request was a signal, free up the message and don't
349 enqueue it in the reply queue. */
350 if (vmp->vm_opcode == CFS_SIGNAL) {
351 if (cfsdebug)
352 myprintf(("vcread: signal msg (%d, %d)\n",
353 vmp->vm_opcode, vmp->vm_unique));
354 CFS_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
355 CFS_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
356 return(error);
357 }
358
359 vmp->vm_flags |= VM_READ;
360 INSQUE(vmp->vm_chain, vcp->vc_replys);
361
362 return(error);
363 }
364
365 int
366 vc_nb_write(dev, uiop, flag)
367 dev_t dev;
368 struct uio *uiop;
369 int flag;
370 {
371 register struct vcomm * vcp;
372 register struct vmsg *vmp;
373 struct cfs_out_hdr *out;
374 u_long seq;
375 u_long opcode;
376 int buf[2];
377 int error = 0;
378
379 ENTRY;
380
381 if (minor(dev) >= NVCFS || minor(dev) < 0)
382 return(ENXIO);
383
384 vcp = &cfs_mnttbl[minor(dev)].mi_vcomm;
385
386 /* Peek at the opcode, unique without transfering the data. */
387 uiop->uio_rw = UIO_WRITE;
388 error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
389 if (error) {
390 myprintf(("vcwrite: error (%d) on uiomove\n", error));
391 return(EINVAL);
392 }
393
394 opcode = buf[0];
395 seq = buf[1];
396
397 if (cfsdebug)
398 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
399
400 if (DOWNCALL(opcode)) {
401 union outputArgs pbuf;
402
403 /* get the rest of the data. */
404 uiop->uio_rw = UIO_WRITE;
405 error = uiomove((caddr_t)&pbuf.cfs_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
406 if (error) {
407 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
408 error, opcode, seq));
409 return(EINVAL);
410 }
411
412 return handleDownCall(opcode, &pbuf);
413 }
414
415 /* Look for the message on the (waiting for) reply queue. */
416 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
417 !EOQ(vmp, vcp->vc_replys);
418 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
419 {
420 if (vmp->vm_unique == seq) break;
421 }
422
423 if (EOQ(vmp, vcp->vc_replys)) {
424 if (cfsdebug)
425 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
426
427 return(ESRCH);
428 }
429
430 /* Remove the message from the reply queue */
431 REMQUE(vmp->vm_chain);
432
433 /* move data into response buffer. */
434 out = (struct cfs_out_hdr *)vmp->vm_data;
435 /* Don't need to copy opcode and uniquifier. */
436
437 /* get the rest of the data. */
438 if (vmp->vm_outSize < uiop->uio_resid) {
439 myprintf(("vcwrite: more data than asked for (%d < %d)\n",
440 vmp->vm_outSize, uiop->uio_resid));
441 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
442 return(EINVAL);
443 }
444
445 buf[0] = uiop->uio_resid; /* Save this value. */
446 uiop->uio_rw = UIO_WRITE;
447 error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
448 if (error) {
449 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
450 error, opcode, seq));
451 return(EINVAL);
452 }
453
454 /* I don't think these are used, but just in case. */
455 /* XXX - aren't these two already correct? -bnoble */
456 out->opcode = opcode;
457 out->unique = seq;
458 vmp->vm_outSize = buf[0]; /* Amount of data transferred? */
459 vmp->vm_flags |= VM_WRITE;
460 wakeup(&vmp->vm_sleep);
461
462 return(0);
463 }
464
465 int
466 vc_nb_ioctl(dev, cmd, addr, flag, p)
467 dev_t dev;
468 int cmd;
469 caddr_t addr;
470 int flag;
471 struct proc *p;
472 {
473 ENTRY;
474
475 switch(cmd) {
476 case CFSRESIZE: {
477 struct cfs_resize *data = (struct cfs_resize *)addr;
478 return(cfsnc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
479 break;
480 }
481 case CFSSTATS:
482 if (cfsnc_use) {
483 cfsnc_gather_stats();
484 return(0);
485 } else {
486 return(ENODEV);
487 }
488 break;
489 case CFSPRINT:
490 if (cfsnc_use) {
491 print_cfsnc();
492 return(0);
493 } else {
494 return(ENODEV);
495 }
496 break;
497 default :
498 return(EINVAL);
499 break;
500 }
501 }
502
503 int
504 vc_nb_poll(dev, events, p)
505 dev_t dev;
506 int events;
507 struct proc *p;
508 {
509 register struct vcomm *vcp;
510 int event_msk = 0;
511
512 ENTRY;
513
514 if (minor(dev) >= NVCFS || minor(dev) < 0)
515 return(ENXIO);
516
517 vcp = &cfs_mnttbl[minor(dev)].mi_vcomm;
518
519 event_msk = events & (POLLIN|POLLRDNORM);
520 if (!event_msk)
521 return(0);
522
523 if (!EMPTY(vcp->vc_requests))
524 return(events & (POLLIN|POLLRDNORM));
525
526 selrecord(p, &(vcp->vc_selproc));
527
528 return(0);
529 }
530
531 /*
532 * Statistics
533 */
534 struct cfs_clstat cfs_clstat;
535
536 /*
537 * Key question: whether to sleep interuptably or uninteruptably when
538 * waiting for Venus. The former seems better (cause you can ^C a
539 * job), but then GNU-EMACS completion breaks. Use tsleep with no
540 * timeout, and no longjmp happens. But, when sleeping
541 * "uninterruptibly", we don't get told if it returns abnormally
542 * (e.g. kill -9).
543 */
544
545 /* If you want this to be interruptible, set this to > PZERO */
546 int cfscall_sleep = PZERO - 1;
547 #ifdef CTL_C
548 int cfs_pcatch = PCATCH;
549 #else
550 #endif
551
552 int
553 cfscall(mntinfo, inSize, outSize, buffer)
554 struct cfs_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
555 {
556 struct vcomm *vcp;
557 struct vmsg *vmp;
558 int error;
559 #ifdef CTL_C
560 struct proc *p = curproc;
561 unsigned int psig_omask = p->p_sigmask;
562 int i;
563 #endif
564 if (mntinfo == NULL) {
565 /* Unlikely, but could be a race condition with a dying warden */
566 return ENODEV;
567 }
568
569 vcp = &(mntinfo->mi_vcomm);
570
571 cfs_clstat.ncalls++;
572 cfs_clstat.reqs[((struct cfs_in_hdr *)buffer)->opcode]++;
573
574 if (!VC_OPEN(vcp))
575 return(ENODEV);
576
577 CFS_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
578 /* Format the request message. */
579 vmp->vm_data = buffer;
580 vmp->vm_flags = 0;
581 vmp->vm_inSize = inSize;
582 vmp->vm_outSize
583 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
584 vmp->vm_opcode = ((struct cfs_in_hdr *)buffer)->opcode;
585 vmp->vm_unique = ++vcp->vc_seq;
586 if (cfsdebug)
587 myprintf(("Doing a call for %d.%d\n",
588 vmp->vm_opcode, vmp->vm_unique));
589
590 /* Fill in the common input args. */
591 ((struct cfs_in_hdr *)buffer)->unique = vmp->vm_unique;
592
593 /* Append msg to request queue and poke Venus. */
594 INSQUE(vmp->vm_chain, vcp->vc_requests);
595 selwakeup(&(vcp->vc_selproc));
596
597 /* We can be interrupted while we wait for Venus to process
598 * our request. If the interrupt occurs before Venus has read
599 * the request, we dequeue and return. If it occurs after the
600 * read but before the reply, we dequeue, send a signal
601 * message, and return. If it occurs after the reply we ignore
602 * it. In no case do we want to restart the syscall. If it
603 * was interrupted by a venus shutdown (vcclose), return
604 * ENODEV. */
605
606 /* Ignore return, We have to check anyway */
607 #ifdef CTL_C
608 /* This is work in progress. Setting cfs_pcatch lets tsleep reawaken
609 on a ^c or ^z. The problem is that emacs sets certain interrupts
610 as SA_RESTART. This means that we should exit sleep handle the
611 "signal" and then go to sleep again. Mostly this is done by letting
612 the syscall complete and be restarted. We are not idempotent and
613 can not do this. A better solution is necessary.
614 */
615 i = 0;
616 do {
617 error = tsleep(&vmp->vm_sleep, (cfscall_sleep|cfs_pcatch), "cfscall", hz*2);
618 if (error == 0)
619 break;
620 else if (error == EWOULDBLOCK) {
621 printf("cfscall: tsleep TIMEOUT %d sec\n", 2+2*i);
622 } else if (p->p_siglist == sigmask(SIGIO)) {
623 p->p_sigmask |= p->p_siglist;
624 printf("cfscall: tsleep returns %d SIGIO, cnt %d\n", error, i);
625 } else {
626 printf("cfscall: tsleep returns %d, cnt %d\n", error, i);
627 printf("cfscall: siglist = %x, sigmask = %x, mask %x\n",
628 p->p_siglist, p->p_sigmask,
629 p->p_siglist & ~p->p_sigmask);
630 break;
631 p->p_sigmask |= p->p_siglist;
632 printf("cfscall: new mask, siglist = %x, sigmask = %x, mask %x\n",
633 p->p_siglist, p->p_sigmask,
634 p->p_siglist & ~p->p_sigmask);
635 }
636 } while (error && i++ < 128);
637 p->p_sigmask = psig_omask;
638 #else
639 (void) tsleep(&vmp->vm_sleep, cfscall_sleep, "cfscall", 0);
640 #endif
641 if (VC_OPEN(vcp)) { /* Venus is still alive */
642 /* Op went through, interrupt or not... */
643 if (vmp->vm_flags & VM_WRITE) {
644 error = 0;
645 *outSize = vmp->vm_outSize;
646 }
647
648 else if (!(vmp->vm_flags & VM_READ)) {
649 /* Interrupted before venus read it. */
650 if (cfsdebug||1)
651 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
652 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
653 REMQUE(vmp->vm_chain);
654 error = EINTR;
655 }
656
657 else {
658 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
659 upcall started */
660 /* Interrupted after start of upcall, send venus a signal */
661 struct cfs_in_hdr *dog;
662 struct vmsg *svmp;
663
664 if (cfsdebug||1)
665 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
666 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
667
668 REMQUE(vmp->vm_chain);
669 error = EINTR;
670
671 CFS_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
672
673 CFS_ALLOC((svmp->vm_data), char *, sizeof (struct cfs_in_hdr));
674 dog = (struct cfs_in_hdr *)svmp->vm_data;
675
676 svmp->vm_flags = 0;
677 dog->opcode = svmp->vm_opcode = CFS_SIGNAL;
678 dog->unique = svmp->vm_unique = vmp->vm_unique;
679 svmp->vm_inSize = sizeof (struct cfs_in_hdr);
680 /*??? rvb */ svmp->vm_outSize = sizeof (struct cfs_in_hdr);
681
682 if (cfsdebug)
683 myprintf(("cfscall: enqueing signal msg (%d, %d)\n",
684 svmp->vm_opcode, svmp->vm_unique));
685
686 /* insert at head of queue! */
687 INSQUE(svmp->vm_chain, vcp->vc_requests);
688 selwakeup(&(vcp->vc_selproc));
689 }
690 }
691
692 else { /* If venus died (!VC_OPEN(vcp)) */
693 if (cfsdebug)
694 myprintf(("vcclose woke op %d.%d flags %d\n",
695 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
696
697 error = ENODEV;
698 }
699
700 CFS_FREE(vmp, sizeof(struct vmsg));
701
702 if (!error)
703 error = ((struct cfs_out_hdr *)buffer)->result;
704 return(error);
705 }
706