coda_psdev.c revision 1.58 1 /* $NetBSD: coda_psdev.c,v 1.58 2017/10/25 08:12:38 maya Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the pseudo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 *
51 * Following code depends on file-system CODA.
52 */
53
54 /* These routines are the device entry points for Venus. */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.58 2017/10/25 08:12:38 maya Exp $");
58
59 extern int coda_nc_initialized; /* Set if cache has been initialized */
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
64 #include <sys/malloc.h>
65 #include <sys/proc.h>
66 #include <sys/mount.h>
67 #include <sys/file.h>
68 #include <sys/ioctl.h>
69 #include <sys/poll.h>
70 #include <sys/select.h>
71 #include <sys/conf.h>
72 #include <sys/atomic.h>
73 #include <sys/module.h>
74
75 #include <coda/coda.h>
76 #include <coda/cnode.h>
77 #include <coda/coda_namecache.h>
78 #include <coda/coda_io.h>
79
80 #include "ioconf.h"
81
82 #define CTL_C
83
84 int coda_psdev_print_entry = 0;
85 static
86 int outstanding_upcalls = 0;
87 int coda_call_sleep = PZERO - 1;
88 #ifdef CTL_C
89 int coda_pcatch = PCATCH;
90 #else
91 #endif
92
93 int coda_kernel_version = CODA_KERNEL_VERSION;
94
95 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
96
97 dev_type_open(vc_nb_open);
98 dev_type_close(vc_nb_close);
99 dev_type_read(vc_nb_read);
100 dev_type_write(vc_nb_write);
101 dev_type_ioctl(vc_nb_ioctl);
102 dev_type_poll(vc_nb_poll);
103 dev_type_kqfilter(vc_nb_kqfilter);
104
105 const struct cdevsw vcoda_cdevsw = {
106 .d_open = vc_nb_open,
107 .d_close = vc_nb_close,
108 .d_read = vc_nb_read,
109 .d_write = vc_nb_write,
110 .d_ioctl = vc_nb_ioctl,
111 .d_stop = nostop,
112 .d_tty = notty,
113 .d_poll = vc_nb_poll,
114 .d_mmap = nommap,
115 .d_kqfilter = vc_nb_kqfilter,
116 .d_discard = nodiscard,
117 .d_flag = D_OTHER,
118 };
119
120 struct vmsg {
121 TAILQ_ENTRY(vmsg) vm_chain;
122 void * vm_data;
123 u_short vm_flags;
124 u_short vm_inSize; /* Size is at most 5000 bytes */
125 u_short vm_outSize;
126 u_short vm_opcode; /* copied from data to save ptr lookup */
127 int vm_unique;
128 void * vm_sleep; /* Not used by Mach. */
129 };
130
131 struct coda_mntinfo coda_mnttbl[NVCODA];
132
133 #define VM_READ 1
134 #define VM_WRITE 2
135 #define VM_INTR 4
136
137 /* vcodaattach: do nothing */
138 void
139 vcodaattach(int n)
140 {
141 }
142
143 /*
144 * These functions are written for NetBSD.
145 */
146 int
147 vc_nb_open(dev_t dev, int flag, int mode,
148 struct lwp *l)
149 {
150 struct vcomm *vcp;
151
152 ENTRY;
153
154 if (minor(dev) >= NVCODA)
155 return(ENXIO);
156
157 if (!coda_nc_initialized)
158 coda_nc_init();
159
160 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
161 if (VC_OPEN(vcp))
162 return(EBUSY);
163
164 selinit(&vcp->vc_selproc);
165 TAILQ_INIT(&vcp->vc_requests);
166 TAILQ_INIT(&vcp->vc_replies);
167 MARK_VC_OPEN(vcp);
168
169 coda_mnttbl[minor(dev)].mi_vfsp = NULL;
170 coda_mnttbl[minor(dev)].mi_rootvp = NULL;
171
172 return(0);
173 }
174
175 int
176 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
177 {
178 struct vcomm *vcp;
179 struct vmsg *vmp;
180 struct coda_mntinfo *mi;
181 int err;
182
183 ENTRY;
184
185 if (minor(dev) >= NVCODA)
186 return(ENXIO);
187
188 mi = &coda_mnttbl[minor(dev)];
189 vcp = &(mi->mi_vcomm);
190
191 if (!VC_OPEN(vcp))
192 panic("vcclose: not open");
193
194 /* prevent future operations on this vfs from succeeding by auto-
195 * unmounting any vfs mounted via this device. This frees user or
196 * sysadm from having to remember where all mount points are located.
197 * Put this before WAKEUPs to avoid queuing new messages between
198 * the WAKEUP and the unmount (which can happen if we're unlucky)
199 */
200 if (!mi->mi_rootvp) {
201 /* just a simple open/close w no mount */
202 MARK_VC_CLOSED(vcp);
203 return 0;
204 }
205
206 /* Let unmount know this is for real */
207 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
208 coda_unmounting(mi->mi_vfsp);
209
210 /* Wakeup clients so they can return. */
211 while ((vmp = TAILQ_FIRST(&vcp->vc_requests)) != NULL) {
212 TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
213
214 /* Free signal request messages and don't wakeup cause
215 no one is waiting. */
216 if (vmp->vm_opcode == CODA_SIGNAL) {
217 CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
218 CODA_FREE(vmp, sizeof(struct vmsg));
219 continue;
220 }
221 outstanding_upcalls++;
222 wakeup(&vmp->vm_sleep);
223 }
224
225 while ((vmp = TAILQ_FIRST(&vcp->vc_replies)) != NULL) {
226 TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
227
228 outstanding_upcalls++;
229 wakeup(&vmp->vm_sleep);
230 }
231
232 MARK_VC_CLOSED(vcp);
233
234 if (outstanding_upcalls) {
235 #ifdef CODA_VERBOSE
236 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
237 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
238 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
239 #else
240 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
241 #endif
242 }
243
244 err = dounmount(mi->mi_vfsp, flag, l);
245 if (err)
246 myprintf(("Error %d unmounting vfs in vcclose(%llu)\n",
247 err, (unsigned long long)minor(dev)));
248 seldestroy(&vcp->vc_selproc);
249 return 0;
250 }
251
252 int
253 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
254 {
255 struct vcomm * vcp;
256 struct vmsg *vmp;
257 int error = 0;
258
259 ENTRY;
260
261 if (minor(dev) >= NVCODA)
262 return(ENXIO);
263
264 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
265
266 /* Get message at head of request queue. */
267 vmp = TAILQ_FIRST(&vcp->vc_requests);
268 if (vmp == NULL)
269 return(0); /* Nothing to read */
270
271 /* Move the input args into userspace */
272 uiop->uio_rw = UIO_READ;
273 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
274 if (error) {
275 myprintf(("vcread: error (%d) on uiomove\n", error));
276 error = EINVAL;
277 }
278
279 TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
280
281 /* If request was a signal, free up the message and don't
282 enqueue it in the reply queue. */
283 if (vmp->vm_opcode == CODA_SIGNAL) {
284 if (codadebug)
285 myprintf(("vcread: signal msg (%d, %d)\n",
286 vmp->vm_opcode, vmp->vm_unique));
287 CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
288 CODA_FREE(vmp, sizeof(struct vmsg));
289 return(error);
290 }
291
292 vmp->vm_flags |= VM_READ;
293 TAILQ_INSERT_TAIL(&vcp->vc_replies, vmp, vm_chain);
294
295 return(error);
296 }
297
298 int
299 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
300 {
301 struct vcomm * vcp;
302 struct vmsg *vmp;
303 struct coda_out_hdr *out;
304 u_long seq;
305 u_long opcode;
306 int tbuf[2];
307 int error = 0;
308
309 ENTRY;
310
311 if (minor(dev) >= NVCODA)
312 return(ENXIO);
313
314 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
315
316 /* Peek at the opcode, unique without transfering the data. */
317 uiop->uio_rw = UIO_WRITE;
318 error = uiomove(tbuf, sizeof(int) * 2, uiop);
319 if (error) {
320 myprintf(("vcwrite: error (%d) on uiomove\n", error));
321 return(EINVAL);
322 }
323
324 opcode = tbuf[0];
325 seq = tbuf[1];
326
327 if (codadebug)
328 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
329
330 if (DOWNCALL(opcode)) {
331 union outputArgs pbuf;
332
333 /* get the rest of the data. */
334 uiop->uio_rw = UIO_WRITE;
335 error = uiomove(&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
336 if (error) {
337 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
338 error, opcode, seq));
339 return(EINVAL);
340 }
341
342 return handleDownCall(opcode, &pbuf);
343 }
344
345 /* Look for the message on the (waiting for) reply queue. */
346 TAILQ_FOREACH(vmp, &vcp->vc_replies, vm_chain) {
347 if (vmp->vm_unique == seq) break;
348 }
349
350 if (vmp == NULL) {
351 if (codadebug)
352 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
353
354 return(ESRCH);
355 }
356
357 /* Remove the message from the reply queue */
358 TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
359
360 /* move data into response buffer. */
361 out = (struct coda_out_hdr *)vmp->vm_data;
362 /* Don't need to copy opcode and uniquifier. */
363
364 /* get the rest of the data. */
365 if (vmp->vm_outSize < uiop->uio_resid) {
366 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
367 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
368 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
369 return(EINVAL);
370 }
371
372 tbuf[0] = uiop->uio_resid; /* Save this value. */
373 uiop->uio_rw = UIO_WRITE;
374 error = uiomove(&out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
375 if (error) {
376 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
377 error, opcode, seq));
378 return(EINVAL);
379 }
380
381 /* I don't think these are used, but just in case. */
382 /* XXX - aren't these two already correct? -bnoble */
383 out->opcode = opcode;
384 out->unique = seq;
385 vmp->vm_outSize = tbuf[0]; /* Amount of data transferred? */
386 vmp->vm_flags |= VM_WRITE;
387 wakeup(&vmp->vm_sleep);
388
389 return(0);
390 }
391
392 int
393 vc_nb_ioctl(dev_t dev, u_long cmd, void *addr, int flag,
394 struct lwp *l)
395 {
396 ENTRY;
397
398 switch (cmd) {
399 case CODARESIZE: {
400 struct coda_resize *data = (struct coda_resize *)addr;
401 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
402 break;
403 }
404 case CODASTATS:
405 if (coda_nc_use) {
406 coda_nc_gather_stats();
407 return(0);
408 } else {
409 return(ENODEV);
410 }
411 break;
412 case CODAPRINT:
413 if (coda_nc_use) {
414 print_coda_nc();
415 return(0);
416 } else {
417 return(ENODEV);
418 }
419 break;
420 case CIOC_KERNEL_VERSION:
421 switch (*(u_int *)addr) {
422 case 0:
423 *(u_int *)addr = coda_kernel_version;
424 return 0;
425 break;
426 case 1:
427 case 2:
428 if (coda_kernel_version != *(u_int *)addr)
429 return ENOENT;
430 else
431 return 0;
432 default:
433 return ENOENT;
434 }
435 break;
436 default :
437 return(EINVAL);
438 break;
439 }
440 }
441
442 int
443 vc_nb_poll(dev_t dev, int events, struct lwp *l)
444 {
445 struct vcomm *vcp;
446 int event_msk = 0;
447
448 ENTRY;
449
450 if (minor(dev) >= NVCODA)
451 return(ENXIO);
452
453 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
454
455 event_msk = events & (POLLIN|POLLRDNORM);
456 if (!event_msk)
457 return(0);
458
459 if (!TAILQ_EMPTY(&vcp->vc_requests))
460 return(events & (POLLIN|POLLRDNORM));
461
462 selrecord(l, &(vcp->vc_selproc));
463
464 return(0);
465 }
466
467 static void
468 filt_vc_nb_detach(struct knote *kn)
469 {
470 struct vcomm *vcp = kn->kn_hook;
471
472 SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
473 }
474
475 static int
476 filt_vc_nb_read(struct knote *kn, long hint)
477 {
478 struct vcomm *vcp = kn->kn_hook;
479 struct vmsg *vmp;
480
481 vmp = TAILQ_FIRST(&vcp->vc_requests);
482 if (vmp == NULL)
483 return (0);
484
485 kn->kn_data = vmp->vm_inSize;
486 return (1);
487 }
488
489 static const struct filterops vc_nb_read_filtops = {
490 .f_isfd = 1,
491 .f_attach = NULL,
492 .f_detach = filt_vc_nb_detach,
493 .f_event = filt_vc_nb_read,
494 };
495
496 int
497 vc_nb_kqfilter(dev_t dev, struct knote *kn)
498 {
499 struct vcomm *vcp;
500 struct klist *klist;
501
502 ENTRY;
503
504 if (minor(dev) >= NVCODA)
505 return(ENXIO);
506
507 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
508
509 switch (kn->kn_filter) {
510 case EVFILT_READ:
511 klist = &vcp->vc_selproc.sel_klist;
512 kn->kn_fop = &vc_nb_read_filtops;
513 break;
514
515 default:
516 return (EINVAL);
517 }
518
519 kn->kn_hook = vcp;
520
521 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
522
523 return (0);
524 }
525
526 /*
527 * Statistics
528 */
529 struct coda_clstat coda_clstat;
530
531 /*
532 * Key question: whether to sleep interruptably or uninterruptably when
533 * waiting for Venus. The former seems better (cause you can ^C a
534 * job), but then GNU-EMACS completion breaks. Use tsleep with no
535 * timeout, and no longjmp happens. But, when sleeping
536 * "uninterruptibly", we don't get told if it returns abnormally
537 * (e.g. kill -9).
538 */
539
540 int
541 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
542 void *buffer)
543 {
544 struct vcomm *vcp;
545 struct vmsg *vmp;
546 int error;
547 #ifdef CTL_C
548 struct lwp *l = curlwp;
549 struct proc *p = l->l_proc;
550 sigset_t psig_omask;
551 int i;
552 psig_omask = l->l_sigmask; /* XXXSA */
553 #endif
554 if (mntinfo == NULL) {
555 /* Unlikely, but could be a race condition with a dying warden */
556 return ENODEV;
557 }
558
559 vcp = &(mntinfo->mi_vcomm);
560
561 coda_clstat.ncalls++;
562 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
563
564 if (!VC_OPEN(vcp))
565 return(ENODEV);
566
567 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
568 /* Format the request message. */
569 vmp->vm_data = buffer;
570 vmp->vm_flags = 0;
571 vmp->vm_inSize = inSize;
572 vmp->vm_outSize
573 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
574 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
575 vmp->vm_unique = ++vcp->vc_seq;
576 if (codadebug)
577 myprintf(("Doing a call for %d.%d\n",
578 vmp->vm_opcode, vmp->vm_unique));
579
580 /* Fill in the common input args. */
581 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
582
583 /* Append msg to request queue and poke Venus. */
584 TAILQ_INSERT_TAIL(&vcp->vc_requests, vmp, vm_chain);
585 selnotify(&(vcp->vc_selproc), 0, 0);
586
587 /* We can be interrupted while we wait for Venus to process
588 * our request. If the interrupt occurs before Venus has read
589 * the request, we dequeue and return. If it occurs after the
590 * read but before the reply, we dequeue, send a signal
591 * message, and return. If it occurs after the reply we ignore
592 * it. In no case do we want to restart the syscall. If it
593 * was interrupted by a venus shutdown (vcclose), return
594 * ENODEV. */
595
596 /* Ignore return, We have to check anyway */
597 #ifdef CTL_C
598 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
599 on a ^c or ^z. The problem is that emacs sets certain interrupts
600 as SA_RESTART. This means that we should exit sleep handle the
601 "signal" and then go to sleep again. Mostly this is done by letting
602 the syscall complete and be restarted. We are not idempotent and
603 can not do this. A better solution is necessary.
604 */
605 i = 0;
606 do {
607 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
608 if (error == 0)
609 break;
610 mutex_enter(p->p_lock);
611 if (error == EWOULDBLOCK) {
612 #ifdef CODA_VERBOSE
613 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
614 #endif
615 } else if (sigispending(l, SIGIO)) {
616 sigaddset(&l->l_sigmask, SIGIO);
617 #ifdef CODA_VERBOSE
618 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
619 #endif
620 } else if (sigispending(l, SIGALRM)) {
621 sigaddset(&l->l_sigmask, SIGALRM);
622 #ifdef CODA_VERBOSE
623 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
624 #endif
625 } else {
626 sigset_t tmp;
627 tmp = p->p_sigpend.sp_set; /* array assignment */
628 sigminusset(&l->l_sigmask, &tmp);
629
630 #ifdef CODA_VERBOSE
631 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
632 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
633 p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
634 p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
635 l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
636 l->l_sigmask.__bits[2], l->l_sigmask.__bits[3],
637 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
638 #endif
639 mutex_exit(p->p_lock);
640 break;
641 #ifdef notyet
642 sigminusset(&l->l_sigmask, &p->p_sigpend.sp_set);
643 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
644 p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
645 p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
646 l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
647 l->l_sigmask.__bits[2], l->l_sigmask.__bits[3]);
648 #endif
649 }
650 mutex_exit(p->p_lock);
651 } while (error && i++ < 128 && VC_OPEN(vcp));
652 l->l_sigmask = psig_omask; /* XXXSA */
653 #else
654 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
655 #endif
656 if (VC_OPEN(vcp)) { /* Venus is still alive */
657 /* Op went through, interrupt or not... */
658 if (vmp->vm_flags & VM_WRITE) {
659 error = 0;
660 *outSize = vmp->vm_outSize;
661 }
662
663 else if (!(vmp->vm_flags & VM_READ)) {
664 /* Interrupted before venus read it. */
665 #ifdef CODA_VERBOSE
666 if (1)
667 #else
668 if (codadebug)
669 #endif
670 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
671 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
672
673 TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
674 error = EINTR;
675 }
676
677 else {
678 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
679 upcall started */
680 /* Interrupted after start of upcall, send venus a signal */
681 struct coda_in_hdr *dog;
682 struct vmsg *svmp;
683
684 #ifdef CODA_VERBOSE
685 if (1)
686 #else
687 if (codadebug)
688 #endif
689 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
690 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
691
692 TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
693 error = EINTR;
694
695 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
696
697 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
698 dog = (struct coda_in_hdr *)svmp->vm_data;
699
700 svmp->vm_flags = 0;
701 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
702 dog->unique = svmp->vm_unique = vmp->vm_unique;
703 svmp->vm_inSize = sizeof (struct coda_in_hdr);
704 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
705
706 if (codadebug)
707 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
708 svmp->vm_opcode, svmp->vm_unique));
709
710 /* insert at head of queue */
711 TAILQ_INSERT_HEAD(&vcp->vc_requests, svmp, vm_chain);
712 selnotify(&(vcp->vc_selproc), 0, 0);
713 }
714 }
715
716 else { /* If venus died (!VC_OPEN(vcp)) */
717 if (codadebug)
718 myprintf(("vcclose woke op %d.%d flags %d\n",
719 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
720
721 error = ENODEV;
722 }
723
724 CODA_FREE(vmp, sizeof(struct vmsg));
725
726 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
727 wakeup(&outstanding_upcalls);
728
729 if (!error)
730 error = ((struct coda_out_hdr *)buffer)->result;
731 return(error);
732 }
733
734 MODULE(MODULE_CLASS_DRIVER, vcoda, NULL);
735
736 static int
737 vcoda_modcmd(modcmd_t cmd, void *arg)
738 {
739 int error = 0;
740
741 switch (cmd) {
742 case MODULE_CMD_INIT:
743 #ifdef _MODULE
744 {
745 int cmajor, dmajor;
746 vcodaattach(NVCODA);
747
748 dmajor = cmajor = -1;
749 return devsw_attach("vcoda", NULL, &dmajor,
750 &vcoda_cdevsw, &cmajor);
751 }
752 #endif
753 break;
754
755 case MODULE_CMD_FINI:
756 #ifdef _MODULE
757 {
758 for (size_t i = 0; i < NVCODA; i++) {
759 struct vcomm *vcp = &coda_mnttbl[i].mi_vcomm;
760 if (VC_OPEN(vcp))
761 return EBUSY;
762 }
763 return devsw_detach(NULL, &vcoda_cdevsw);
764 }
765 #endif
766 break;
767
768 case MODULE_CMD_STAT:
769 return ENOTTY;
770
771 default:
772 return ENOTTY;
773 }
774 return error;
775 }
776