coda_psdev.c revision 1.52.2.1 1 /* $NetBSD: coda_psdev.c,v 1.52.2.1 2014/08/10 06:54:29 tls Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the pseudo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 *
51 * Following code depends on file-system CODA.
52 */
53
54 /* These routines are the device entry points for Venus. */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.52.2.1 2014/08/10 06:54:29 tls Exp $");
58
59 extern int coda_nc_initialized; /* Set if cache has been initialized */
60
61 #ifndef _KERNEL_OPT
62 #define NVCODA 4
63 #else
64 #include <vcoda.h>
65 #endif
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/kernel.h>
70 #include <sys/malloc.h>
71 #include <sys/proc.h>
72 #include <sys/mount.h>
73 #include <sys/file.h>
74 #include <sys/ioctl.h>
75 #include <sys/poll.h>
76 #include <sys/select.h>
77 #include <sys/conf.h>
78 #include <sys/atomic.h>
79 #include <sys/module.h>
80
81 #include <miscfs/syncfs/syncfs.h>
82
83 #include <coda/coda.h>
84 #include <coda/cnode.h>
85 #include <coda/coda_namecache.h>
86 #include <coda/coda_io.h>
87
88 #define CTL_C
89
90 int coda_psdev_print_entry = 0;
91 static
92 int outstanding_upcalls = 0;
93 int coda_call_sleep = PZERO - 1;
94 #ifdef CTL_C
95 int coda_pcatch = PCATCH;
96 #else
97 #endif
98
99 int coda_kernel_version = CODA_KERNEL_VERSION;
100
101 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
102
103 void vcodaattach(int n);
104
105 dev_type_open(vc_nb_open);
106 dev_type_close(vc_nb_close);
107 dev_type_read(vc_nb_read);
108 dev_type_write(vc_nb_write);
109 dev_type_ioctl(vc_nb_ioctl);
110 dev_type_poll(vc_nb_poll);
111 dev_type_kqfilter(vc_nb_kqfilter);
112
113 const struct cdevsw vcoda_cdevsw = {
114 .d_open = vc_nb_open,
115 .d_close = vc_nb_close,
116 .d_read = vc_nb_read,
117 .d_write = vc_nb_write,
118 .d_ioctl = vc_nb_ioctl,
119 .d_stop = nostop,
120 .d_tty = notty,
121 .d_poll = vc_nb_poll,
122 .d_mmap = nommap,
123 .d_kqfilter = vc_nb_kqfilter,
124 .d_discard = nodiscard,
125 .d_flag = D_OTHER,
126 };
127
128 struct vmsg {
129 TAILQ_ENTRY(vmsg) vm_chain;
130 void * vm_data;
131 u_short vm_flags;
132 u_short vm_inSize; /* Size is at most 5000 bytes */
133 u_short vm_outSize;
134 u_short vm_opcode; /* copied from data to save ptr lookup */
135 int vm_unique;
136 void * vm_sleep; /* Not used by Mach. */
137 };
138
139 struct coda_mntinfo coda_mnttbl[NVCODA];
140
141 #define VM_READ 1
142 #define VM_WRITE 2
143 #define VM_INTR 4
144
145 /* vcodaattach: do nothing */
146 void
147 vcodaattach(int n)
148 {
149 }
150
151 /*
152 * These functions are written for NetBSD.
153 */
154 int
155 vc_nb_open(dev_t dev, int flag, int mode,
156 struct lwp *l)
157 {
158 struct vcomm *vcp;
159
160 ENTRY;
161
162 if (minor(dev) >= NVCODA)
163 return(ENXIO);
164
165 if (!coda_nc_initialized)
166 coda_nc_init();
167
168 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
169 if (VC_OPEN(vcp))
170 return(EBUSY);
171
172 selinit(&vcp->vc_selproc);
173 TAILQ_INIT(&vcp->vc_requests);
174 TAILQ_INIT(&vcp->vc_replies);
175 MARK_VC_OPEN(vcp);
176
177 coda_mnttbl[minor(dev)].mi_vfsp = NULL;
178 coda_mnttbl[minor(dev)].mi_rootvp = NULL;
179
180 return(0);
181 }
182
183 int
184 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
185 {
186 struct vcomm *vcp;
187 struct vmsg *vmp;
188 struct coda_mntinfo *mi;
189 int err;
190
191 ENTRY;
192
193 if (minor(dev) >= NVCODA)
194 return(ENXIO);
195
196 mi = &coda_mnttbl[minor(dev)];
197 vcp = &(mi->mi_vcomm);
198
199 if (!VC_OPEN(vcp))
200 panic("vcclose: not open");
201
202 /* prevent future operations on this vfs from succeeding by auto-
203 * unmounting any vfs mounted via this device. This frees user or
204 * sysadm from having to remember where all mount points are located.
205 * Put this before WAKEUPs to avoid queuing new messages between
206 * the WAKEUP and the unmount (which can happen if we're unlucky)
207 */
208 if (!mi->mi_rootvp) {
209 /* just a simple open/close w no mount */
210 MARK_VC_CLOSED(vcp);
211 return 0;
212 }
213
214 /* Let unmount know this is for real */
215 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
216 coda_unmounting(mi->mi_vfsp);
217
218 /* Wakeup clients so they can return. */
219 while ((vmp = TAILQ_FIRST(&vcp->vc_requests)) != NULL) {
220 TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
221
222 /* Free signal request messages and don't wakeup cause
223 no one is waiting. */
224 if (vmp->vm_opcode == CODA_SIGNAL) {
225 CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
226 CODA_FREE(vmp, sizeof(struct vmsg));
227 continue;
228 }
229 outstanding_upcalls++;
230 wakeup(&vmp->vm_sleep);
231 }
232
233 while ((vmp = TAILQ_FIRST(&vcp->vc_replies)) != NULL) {
234 TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
235
236 outstanding_upcalls++;
237 wakeup(&vmp->vm_sleep);
238 }
239
240 MARK_VC_CLOSED(vcp);
241
242 if (outstanding_upcalls) {
243 #ifdef CODA_VERBOSE
244 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
245 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
246 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
247 #else
248 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
249 #endif
250 }
251
252 err = dounmount(mi->mi_vfsp, flag, l);
253 if (err)
254 myprintf(("Error %d unmounting vfs in vcclose(%llu)\n",
255 err, (unsigned long long)minor(dev)));
256 seldestroy(&vcp->vc_selproc);
257 return 0;
258 }
259
260 int
261 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
262 {
263 struct vcomm * vcp;
264 struct vmsg *vmp;
265 int error = 0;
266
267 ENTRY;
268
269 if (minor(dev) >= NVCODA)
270 return(ENXIO);
271
272 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
273
274 /* Get message at head of request queue. */
275 vmp = TAILQ_FIRST(&vcp->vc_requests);
276 if (vmp == NULL)
277 return(0); /* Nothing to read */
278
279 /* Move the input args into userspace */
280 uiop->uio_rw = UIO_READ;
281 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
282 if (error) {
283 myprintf(("vcread: error (%d) on uiomove\n", error));
284 error = EINVAL;
285 }
286
287 TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
288
289 /* If request was a signal, free up the message and don't
290 enqueue it in the reply queue. */
291 if (vmp->vm_opcode == CODA_SIGNAL) {
292 if (codadebug)
293 myprintf(("vcread: signal msg (%d, %d)\n",
294 vmp->vm_opcode, vmp->vm_unique));
295 CODA_FREE(vmp->vm_data, VC_IN_NO_DATA);
296 CODA_FREE(vmp, sizeof(struct vmsg));
297 return(error);
298 }
299
300 vmp->vm_flags |= VM_READ;
301 TAILQ_INSERT_TAIL(&vcp->vc_replies, vmp, vm_chain);
302
303 return(error);
304 }
305
306 int
307 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
308 {
309 struct vcomm * vcp;
310 struct vmsg *vmp;
311 struct coda_out_hdr *out;
312 u_long seq;
313 u_long opcode;
314 int tbuf[2];
315 int error = 0;
316
317 ENTRY;
318
319 if (minor(dev) >= NVCODA)
320 return(ENXIO);
321
322 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
323
324 /* Peek at the opcode, unique without transfering the data. */
325 uiop->uio_rw = UIO_WRITE;
326 error = uiomove(tbuf, sizeof(int) * 2, uiop);
327 if (error) {
328 myprintf(("vcwrite: error (%d) on uiomove\n", error));
329 return(EINVAL);
330 }
331
332 opcode = tbuf[0];
333 seq = tbuf[1];
334
335 if (codadebug)
336 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
337
338 if (DOWNCALL(opcode)) {
339 union outputArgs pbuf;
340
341 /* get the rest of the data. */
342 uiop->uio_rw = UIO_WRITE;
343 error = uiomove(&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
344 if (error) {
345 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
346 error, opcode, seq));
347 return(EINVAL);
348 }
349
350 return handleDownCall(opcode, &pbuf);
351 }
352
353 /* Look for the message on the (waiting for) reply queue. */
354 TAILQ_FOREACH(vmp, &vcp->vc_replies, vm_chain) {
355 if (vmp->vm_unique == seq) break;
356 }
357
358 if (vmp == NULL) {
359 if (codadebug)
360 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
361
362 return(ESRCH);
363 }
364
365 /* Remove the message from the reply queue */
366 TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
367
368 /* move data into response buffer. */
369 out = (struct coda_out_hdr *)vmp->vm_data;
370 /* Don't need to copy opcode and uniquifier. */
371
372 /* get the rest of the data. */
373 if (vmp->vm_outSize < uiop->uio_resid) {
374 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
375 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
376 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
377 return(EINVAL);
378 }
379
380 tbuf[0] = uiop->uio_resid; /* Save this value. */
381 uiop->uio_rw = UIO_WRITE;
382 error = uiomove(&out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
383 if (error) {
384 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
385 error, opcode, seq));
386 return(EINVAL);
387 }
388
389 /* I don't think these are used, but just in case. */
390 /* XXX - aren't these two already correct? -bnoble */
391 out->opcode = opcode;
392 out->unique = seq;
393 vmp->vm_outSize = tbuf[0]; /* Amount of data transferred? */
394 vmp->vm_flags |= VM_WRITE;
395 wakeup(&vmp->vm_sleep);
396
397 return(0);
398 }
399
400 int
401 vc_nb_ioctl(dev_t dev, u_long cmd, void *addr, int flag,
402 struct lwp *l)
403 {
404 ENTRY;
405
406 switch(cmd) {
407 case CODARESIZE: {
408 struct coda_resize *data = (struct coda_resize *)addr;
409 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
410 break;
411 }
412 case CODASTATS:
413 if (coda_nc_use) {
414 coda_nc_gather_stats();
415 return(0);
416 } else {
417 return(ENODEV);
418 }
419 break;
420 case CODAPRINT:
421 if (coda_nc_use) {
422 print_coda_nc();
423 return(0);
424 } else {
425 return(ENODEV);
426 }
427 break;
428 case CIOC_KERNEL_VERSION:
429 switch (*(u_int *)addr) {
430 case 0:
431 *(u_int *)addr = coda_kernel_version;
432 return 0;
433 break;
434 case 1:
435 case 2:
436 if (coda_kernel_version != *(u_int *)addr)
437 return ENOENT;
438 else
439 return 0;
440 default:
441 return ENOENT;
442 }
443 break;
444 default :
445 return(EINVAL);
446 break;
447 }
448 }
449
450 int
451 vc_nb_poll(dev_t dev, int events, struct lwp *l)
452 {
453 struct vcomm *vcp;
454 int event_msk = 0;
455
456 ENTRY;
457
458 if (minor(dev) >= NVCODA)
459 return(ENXIO);
460
461 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
462
463 event_msk = events & (POLLIN|POLLRDNORM);
464 if (!event_msk)
465 return(0);
466
467 if (!TAILQ_EMPTY(&vcp->vc_requests))
468 return(events & (POLLIN|POLLRDNORM));
469
470 selrecord(l, &(vcp->vc_selproc));
471
472 return(0);
473 }
474
475 static void
476 filt_vc_nb_detach(struct knote *kn)
477 {
478 struct vcomm *vcp = kn->kn_hook;
479
480 SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
481 }
482
483 static int
484 filt_vc_nb_read(struct knote *kn, long hint)
485 {
486 struct vcomm *vcp = kn->kn_hook;
487 struct vmsg *vmp;
488
489 vmp = TAILQ_FIRST(&vcp->vc_requests);
490 if (vmp == NULL)
491 return (0);
492
493 kn->kn_data = vmp->vm_inSize;
494 return (1);
495 }
496
497 static const struct filterops vc_nb_read_filtops =
498 { 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
499
500 int
501 vc_nb_kqfilter(dev_t dev, struct knote *kn)
502 {
503 struct vcomm *vcp;
504 struct klist *klist;
505
506 ENTRY;
507
508 if (minor(dev) >= NVCODA)
509 return(ENXIO);
510
511 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
512
513 switch (kn->kn_filter) {
514 case EVFILT_READ:
515 klist = &vcp->vc_selproc.sel_klist;
516 kn->kn_fop = &vc_nb_read_filtops;
517 break;
518
519 default:
520 return (EINVAL);
521 }
522
523 kn->kn_hook = vcp;
524
525 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
526
527 return (0);
528 }
529
530 /*
531 * Statistics
532 */
533 struct coda_clstat coda_clstat;
534
535 /*
536 * Key question: whether to sleep interruptably or uninterruptably when
537 * waiting for Venus. The former seems better (cause you can ^C a
538 * job), but then GNU-EMACS completion breaks. Use tsleep with no
539 * timeout, and no longjmp happens. But, when sleeping
540 * "uninterruptibly", we don't get told if it returns abnormally
541 * (e.g. kill -9).
542 */
543
544 int
545 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
546 void *buffer)
547 {
548 struct vcomm *vcp;
549 struct vmsg *vmp;
550 int error;
551 #ifdef CTL_C
552 struct lwp *l = curlwp;
553 struct proc *p = l->l_proc;
554 sigset_t psig_omask;
555 int i;
556 psig_omask = l->l_sigmask; /* XXXSA */
557 #endif
558 if (mntinfo == NULL) {
559 /* Unlikely, but could be a race condition with a dying warden */
560 return ENODEV;
561 }
562
563 vcp = &(mntinfo->mi_vcomm);
564
565 coda_clstat.ncalls++;
566 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
567
568 if (!VC_OPEN(vcp))
569 return(ENODEV);
570
571 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
572 /* Format the request message. */
573 vmp->vm_data = buffer;
574 vmp->vm_flags = 0;
575 vmp->vm_inSize = inSize;
576 vmp->vm_outSize
577 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
578 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
579 vmp->vm_unique = ++vcp->vc_seq;
580 if (codadebug)
581 myprintf(("Doing a call for %d.%d\n",
582 vmp->vm_opcode, vmp->vm_unique));
583
584 /* Fill in the common input args. */
585 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
586
587 /* Append msg to request queue and poke Venus. */
588 TAILQ_INSERT_TAIL(&vcp->vc_requests, vmp, vm_chain);
589 selnotify(&(vcp->vc_selproc), 0, 0);
590
591 /* We can be interrupted while we wait for Venus to process
592 * our request. If the interrupt occurs before Venus has read
593 * the request, we dequeue and return. If it occurs after the
594 * read but before the reply, we dequeue, send a signal
595 * message, and return. If it occurs after the reply we ignore
596 * it. In no case do we want to restart the syscall. If it
597 * was interrupted by a venus shutdown (vcclose), return
598 * ENODEV. */
599
600 /* Ignore return, We have to check anyway */
601 #ifdef CTL_C
602 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
603 on a ^c or ^z. The problem is that emacs sets certain interrupts
604 as SA_RESTART. This means that we should exit sleep handle the
605 "signal" and then go to sleep again. Mostly this is done by letting
606 the syscall complete and be restarted. We are not idempotent and
607 can not do this. A better solution is necessary.
608 */
609 i = 0;
610 do {
611 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
612 if (error == 0)
613 break;
614 mutex_enter(p->p_lock);
615 if (error == EWOULDBLOCK) {
616 #ifdef CODA_VERBOSE
617 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
618 #endif
619 } else if (sigispending(l, SIGIO)) {
620 sigaddset(&l->l_sigmask, SIGIO);
621 #ifdef CODA_VERBOSE
622 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
623 #endif
624 } else if (sigispending(l, SIGALRM)) {
625 sigaddset(&l->l_sigmask, SIGALRM);
626 #ifdef CODA_VERBOSE
627 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
628 #endif
629 } else {
630 sigset_t tmp;
631 tmp = p->p_sigpend.sp_set; /* array assignment */
632 sigminusset(&l->l_sigmask, &tmp);
633
634 #ifdef CODA_VERBOSE
635 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
636 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
637 p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
638 p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
639 l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
640 l->l_sigmask.__bits[2], l->l_sigmask.__bits[3],
641 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
642 #endif
643 mutex_exit(p->p_lock);
644 break;
645 #ifdef notyet
646 sigminusset(&l->l_sigmask, &p->p_sigpend.sp_set);
647 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
648 p->p_sigpend.sp_set.__bits[0], p->p_sigpend.sp_set.__bits[1],
649 p->p_sigpend.sp_set.__bits[2], p->p_sigpend.sp_set.__bits[3],
650 l->l_sigmask.__bits[0], l->l_sigmask.__bits[1],
651 l->l_sigmask.__bits[2], l->l_sigmask.__bits[3]);
652 #endif
653 }
654 mutex_exit(p->p_lock);
655 } while (error && i++ < 128 && VC_OPEN(vcp));
656 l->l_sigmask = psig_omask; /* XXXSA */
657 #else
658 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
659 #endif
660 if (VC_OPEN(vcp)) { /* Venus is still alive */
661 /* Op went through, interrupt or not... */
662 if (vmp->vm_flags & VM_WRITE) {
663 error = 0;
664 *outSize = vmp->vm_outSize;
665 }
666
667 else if (!(vmp->vm_flags & VM_READ)) {
668 /* Interrupted before venus read it. */
669 #ifdef CODA_VERBOSE
670 if (1)
671 #else
672 if (codadebug)
673 #endif
674 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
675 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
676
677 TAILQ_REMOVE(&vcp->vc_requests, vmp, vm_chain);
678 error = EINTR;
679 }
680
681 else {
682 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
683 upcall started */
684 /* Interrupted after start of upcall, send venus a signal */
685 struct coda_in_hdr *dog;
686 struct vmsg *svmp;
687
688 #ifdef CODA_VERBOSE
689 if (1)
690 #else
691 if (codadebug)
692 #endif
693 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
694 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
695
696 TAILQ_REMOVE(&vcp->vc_replies, vmp, vm_chain);
697 error = EINTR;
698
699 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
700
701 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
702 dog = (struct coda_in_hdr *)svmp->vm_data;
703
704 svmp->vm_flags = 0;
705 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
706 dog->unique = svmp->vm_unique = vmp->vm_unique;
707 svmp->vm_inSize = sizeof (struct coda_in_hdr);
708 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
709
710 if (codadebug)
711 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
712 svmp->vm_opcode, svmp->vm_unique));
713
714 /* insert at head of queue */
715 TAILQ_INSERT_HEAD(&vcp->vc_requests, svmp, vm_chain);
716 selnotify(&(vcp->vc_selproc), 0, 0);
717 }
718 }
719
720 else { /* If venus died (!VC_OPEN(vcp)) */
721 if (codadebug)
722 myprintf(("vcclose woke op %d.%d flags %d\n",
723 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
724
725 error = ENODEV;
726 }
727
728 CODA_FREE(vmp, sizeof(struct vmsg));
729
730 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
731 wakeup(&outstanding_upcalls);
732
733 if (!error)
734 error = ((struct coda_out_hdr *)buffer)->result;
735 return(error);
736 }
737
738 MODULE(MODULE_CLASS_DRIVER, vcoda, NULL);
739
740 static int
741 vcoda_modcmd(modcmd_t cmd, void *arg)
742 {
743 int error = 0;
744
745 switch (cmd) {
746 case MODULE_CMD_INIT:
747 #ifdef _MODULE
748 {
749 int cmajor, dmajor;
750 vcodaattach(NVCODA);
751
752 dmajor = cmajor = -1;
753 return devsw_attach("vcoda", NULL, &dmajor,
754 &vcoda_cdevsw, &cmajor);
755 }
756 #endif
757 break;
758
759 case MODULE_CMD_FINI:
760 #ifdef _MODULE
761 {
762 for (size_t i = 0; i < NVCODA; i++) {
763 struct vcomm *vcp = &coda_mnttbl[i].mi_vcomm;
764 if (VC_OPEN(vcp))
765 return EBUSY;
766 }
767 return devsw_detach(NULL, &vcoda_cdevsw);
768 }
769 #endif
770 break;
771
772 case MODULE_CMD_STAT:
773 return ENOTTY;
774
775 default:
776 return ENOTTY;
777 }
778 return error;
779 }
780