coda_psdev.c revision 1.23 1 /* $NetBSD: coda_psdev.c,v 1.23 2003/01/06 13:05:09 wiz Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the psuedo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 */
51
52 /* These routines are the device entry points for Venus. */
53
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.23 2003/01/06 13:05:09 wiz Exp $");
56
57 extern int coda_nc_initialized; /* Set if cache has been initialized */
58
59 #ifdef _LKM
60 #define NVCODA 4
61 #else
62 #include <vcoda.h>
63 #endif
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/proc.h>
70 #include <sys/mount.h>
71 #include <sys/file.h>
72 #include <sys/ioctl.h>
73 #include <sys/poll.h>
74 #include <sys/select.h>
75 #include <sys/conf.h>
76
77 #include <miscfs/syncfs/syncfs.h>
78
79 #include <coda/coda.h>
80 #include <coda/cnode.h>
81 #include <coda/coda_namecache.h>
82 #include <coda/coda_io.h>
83
84 #define CTL_C
85
86 int coda_psdev_print_entry = 0;
87 static
88 int outstanding_upcalls = 0;
89 int coda_call_sleep = PZERO - 1;
90 #ifdef CTL_C
91 int coda_pcatch = PCATCH;
92 #else
93 #endif
94
95 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
96
97 void vcodaattach(int n);
98
99 dev_type_open(vc_nb_open);
100 dev_type_close(vc_nb_close);
101 dev_type_read(vc_nb_read);
102 dev_type_write(vc_nb_write);
103 dev_type_ioctl(vc_nb_ioctl);
104 dev_type_poll(vc_nb_poll);
105 dev_type_kqfilter(vc_nb_kqfilter);
106
107 const struct cdevsw vcoda_cdevsw = {
108 vc_nb_open, vc_nb_close, vc_nb_read, vc_nb_write, vc_nb_ioctl,
109 nostop, notty, vc_nb_poll, nommap, vc_nb_kqfilter,
110 };
111
112 struct vmsg {
113 struct queue vm_chain;
114 caddr_t vm_data;
115 u_short vm_flags;
116 u_short vm_inSize; /* Size is at most 5000 bytes */
117 u_short vm_outSize;
118 u_short vm_opcode; /* copied from data to save ptr lookup */
119 int vm_unique;
120 caddr_t vm_sleep; /* Not used by Mach. */
121 };
122
123 #define VM_READ 1
124 #define VM_WRITE 2
125 #define VM_INTR 4
126
127 /* vcodaattach: do nothing */
128 void
129 vcodaattach(n)
130 int n;
131 {
132 }
133
134 /*
135 * These functions are written for NetBSD.
136 */
137 int
138 vc_nb_open(dev, flag, mode, p)
139 dev_t dev;
140 int flag;
141 int mode;
142 struct proc *p; /* NetBSD only */
143 {
144 struct vcomm *vcp;
145
146 ENTRY;
147
148 if (minor(dev) >= NVCODA || minor(dev) < 0)
149 return(ENXIO);
150
151 if (!coda_nc_initialized)
152 coda_nc_init();
153
154 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
155 if (VC_OPEN(vcp))
156 return(EBUSY);
157
158 memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
159 INIT_QUEUE(vcp->vc_requests);
160 INIT_QUEUE(vcp->vc_replys);
161 MARK_VC_OPEN(vcp);
162
163 coda_mnttbl[minor(dev)].mi_vfsp = NULL;
164 coda_mnttbl[minor(dev)].mi_rootvp = NULL;
165
166 return(0);
167 }
168
169 int
170 vc_nb_close (dev, flag, mode, p)
171 dev_t dev;
172 int flag;
173 int mode;
174 struct proc *p;
175 {
176 struct vcomm *vcp;
177 struct vmsg *vmp, *nvmp = NULL;
178 struct coda_mntinfo *mi;
179 int err;
180
181 ENTRY;
182
183 if (minor(dev) >= NVCODA || minor(dev) < 0)
184 return(ENXIO);
185
186 mi = &coda_mnttbl[minor(dev)];
187 vcp = &(mi->mi_vcomm);
188
189 if (!VC_OPEN(vcp))
190 panic("vcclose: not open");
191
192 /* prevent future operations on this vfs from succeeding by auto-
193 * unmounting any vfs mounted via this device. This frees user or
194 * sysadm from having to remember where all mount points are located.
195 * Put this before WAKEUPs to avoid queuing new messages between
196 * the WAKEUP and the unmount (which can happen if we're unlucky)
197 */
198 if (!mi->mi_rootvp) {
199 /* just a simple open/close w no mount */
200 MARK_VC_CLOSED(vcp);
201 return 0;
202 }
203
204 /* Let unmount know this is for real */
205 /*
206 * XXX Freeze syncer. Must do this before locking the
207 * mount point. See dounmount for details().
208 */
209 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
210 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
211 if (vfs_busy(mi->mi_vfsp, 0, 0)) {
212 lockmgr(&syncer_lock, LK_RELEASE, NULL);
213 return (EBUSY);
214 }
215 coda_unmounting(mi->mi_vfsp);
216
217 /* Wakeup clients so they can return. */
218 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
219 !EOQ(vmp, vcp->vc_requests);
220 vmp = nvmp)
221 {
222 nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
223 /* Free signal request messages and don't wakeup cause
224 no one is waiting. */
225 if (vmp->vm_opcode == CODA_SIGNAL) {
226 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
227 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
228 continue;
229 }
230 outstanding_upcalls++;
231 wakeup(&vmp->vm_sleep);
232 }
233
234 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
235 !EOQ(vmp, vcp->vc_replys);
236 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
237 {
238 outstanding_upcalls++;
239 wakeup(&vmp->vm_sleep);
240 }
241
242 MARK_VC_CLOSED(vcp);
243
244 if (outstanding_upcalls) {
245 #ifdef CODA_VERBOSE
246 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
247 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
248 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
249 #else
250 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
251 #endif
252 }
253
254 err = dounmount(mi->mi_vfsp, flag, p);
255 if (err)
256 myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
257 err, minor(dev)));
258 return 0;
259 }
260
261 int
262 vc_nb_read(dev, uiop, flag)
263 dev_t dev;
264 struct uio *uiop;
265 int flag;
266 {
267 struct vcomm * vcp;
268 struct vmsg *vmp;
269 int error = 0;
270
271 ENTRY;
272
273 if (minor(dev) >= NVCODA || minor(dev) < 0)
274 return(ENXIO);
275
276 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
277 /* Get message at head of request queue. */
278 if (EMPTY(vcp->vc_requests))
279 return(0); /* Nothing to read */
280
281 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
282
283 /* Move the input args into userspace */
284 uiop->uio_rw = UIO_READ;
285 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
286 if (error) {
287 myprintf(("vcread: error (%d) on uiomove\n", error));
288 error = EINVAL;
289 }
290
291 #ifdef OLD_DIAGNOSTIC
292 if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
293 panic("vc_nb_read: bad chain");
294 #endif
295
296 REMQUE(vmp->vm_chain);
297
298 /* If request was a signal, free up the message and don't
299 enqueue it in the reply queue. */
300 if (vmp->vm_opcode == CODA_SIGNAL) {
301 if (codadebug)
302 myprintf(("vcread: signal msg (%d, %d)\n",
303 vmp->vm_opcode, vmp->vm_unique));
304 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
305 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
306 return(error);
307 }
308
309 vmp->vm_flags |= VM_READ;
310 INSQUE(vmp->vm_chain, vcp->vc_replys);
311
312 return(error);
313 }
314
315 int
316 vc_nb_write(dev, uiop, flag)
317 dev_t dev;
318 struct uio *uiop;
319 int flag;
320 {
321 struct vcomm * vcp;
322 struct vmsg *vmp;
323 struct coda_out_hdr *out;
324 u_long seq;
325 u_long opcode;
326 int buf[2];
327 int error = 0;
328
329 ENTRY;
330
331 if (minor(dev) >= NVCODA || minor(dev) < 0)
332 return(ENXIO);
333
334 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
335
336 /* Peek at the opcode, unique without transfering the data. */
337 uiop->uio_rw = UIO_WRITE;
338 error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop);
339 if (error) {
340 myprintf(("vcwrite: error (%d) on uiomove\n", error));
341 return(EINVAL);
342 }
343
344 opcode = buf[0];
345 seq = buf[1];
346
347 if (codadebug)
348 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
349
350 if (DOWNCALL(opcode)) {
351 union outputArgs pbuf;
352
353 /* get the rest of the data. */
354 uiop->uio_rw = UIO_WRITE;
355 error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
356 if (error) {
357 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
358 error, opcode, seq));
359 return(EINVAL);
360 }
361
362 return handleDownCall(opcode, &pbuf);
363 }
364
365 /* Look for the message on the (waiting for) reply queue. */
366 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
367 !EOQ(vmp, vcp->vc_replys);
368 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
369 {
370 if (vmp->vm_unique == seq) break;
371 }
372
373 if (EOQ(vmp, vcp->vc_replys)) {
374 if (codadebug)
375 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
376
377 return(ESRCH);
378 }
379
380 /* Remove the message from the reply queue */
381 REMQUE(vmp->vm_chain);
382
383 /* move data into response buffer. */
384 out = (struct coda_out_hdr *)vmp->vm_data;
385 /* Don't need to copy opcode and uniquifier. */
386
387 /* get the rest of the data. */
388 if (vmp->vm_outSize < uiop->uio_resid) {
389 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
390 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
391 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
392 return(EINVAL);
393 }
394
395 buf[0] = uiop->uio_resid; /* Save this value. */
396 uiop->uio_rw = UIO_WRITE;
397 error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
398 if (error) {
399 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
400 error, opcode, seq));
401 return(EINVAL);
402 }
403
404 /* I don't think these are used, but just in case. */
405 /* XXX - aren't these two already correct? -bnoble */
406 out->opcode = opcode;
407 out->unique = seq;
408 vmp->vm_outSize = buf[0]; /* Amount of data transferred? */
409 vmp->vm_flags |= VM_WRITE;
410 wakeup(&vmp->vm_sleep);
411
412 return(0);
413 }
414
415 int
416 vc_nb_ioctl(dev, cmd, addr, flag, p)
417 dev_t dev;
418 u_long cmd;
419 caddr_t addr;
420 int flag;
421 struct proc *p;
422 {
423 ENTRY;
424
425 switch(cmd) {
426 case CODARESIZE: {
427 struct coda_resize *data = (struct coda_resize *)addr;
428 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
429 break;
430 }
431 case CODASTATS:
432 if (coda_nc_use) {
433 coda_nc_gather_stats();
434 return(0);
435 } else {
436 return(ENODEV);
437 }
438 break;
439 case CODAPRINT:
440 if (coda_nc_use) {
441 print_coda_nc();
442 return(0);
443 } else {
444 return(ENODEV);
445 }
446 break;
447 case CIOC_KERNEL_VERSION:
448 switch (*(u_int *)addr) {
449 case 0:
450 *(u_int *)addr = coda_kernel_version;
451 return 0;
452 break;
453 case 1:
454 case 2:
455 if (coda_kernel_version != *(u_int *)addr)
456 return ENOENT;
457 else
458 return 0;
459 default:
460 return ENOENT;
461 }
462 break;
463 default :
464 return(EINVAL);
465 break;
466 }
467 }
468
469 int
470 vc_nb_poll(dev, events, p)
471 dev_t dev;
472 int events;
473 struct proc *p;
474 {
475 struct vcomm *vcp;
476 int event_msk = 0;
477
478 ENTRY;
479
480 if (minor(dev) >= NVCODA || minor(dev) < 0)
481 return(ENXIO);
482
483 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
484
485 event_msk = events & (POLLIN|POLLRDNORM);
486 if (!event_msk)
487 return(0);
488
489 if (!EMPTY(vcp->vc_requests))
490 return(events & (POLLIN|POLLRDNORM));
491
492 selrecord(p, &(vcp->vc_selproc));
493
494 return(0);
495 }
496
497 static void
498 filt_vc_nb_detach(struct knote *kn)
499 {
500 struct vcomm *vcp = kn->kn_hook;
501
502 SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
503 }
504
505 static int
506 filt_vc_nb_read(struct knote *kn, long hint)
507 {
508 struct vcomm *vcp = kn->kn_hook;
509 struct vmsg *vmp;
510
511 if (EMPTY(vcp->vc_requests))
512 return (0);
513
514 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
515
516 kn->kn_data = vmp->vm_inSize;
517 return (1);
518 }
519
520 static const struct filterops vc_nb_read_filtops =
521 { 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
522
523 int
524 vc_nb_kqfilter(dev_t dev, struct knote *kn)
525 {
526 struct vcomm *vcp;
527 struct klist *klist;
528
529 ENTRY;
530
531 if (minor(dev) >= NVCODA || minor(dev) < 0)
532 return(ENXIO);
533
534 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
535
536 switch (kn->kn_filter) {
537 case EVFILT_READ:
538 klist = &vcp->vc_selproc.sel_klist;
539 kn->kn_fop = &vc_nb_read_filtops;
540 break;
541
542 default:
543 return (1);
544 }
545
546 kn->kn_hook = vcp;
547
548 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
549
550 return (0);
551 }
552
553 /*
554 * Statistics
555 */
556 struct coda_clstat coda_clstat;
557
558 /*
559 * Key question: whether to sleep interruptably or uninterruptably when
560 * waiting for Venus. The former seems better (cause you can ^C a
561 * job), but then GNU-EMACS completion breaks. Use tsleep with no
562 * timeout, and no longjmp happens. But, when sleeping
563 * "uninterruptibly", we don't get told if it returns abnormally
564 * (e.g. kill -9).
565 */
566
567 int
568 coda_call(mntinfo, inSize, outSize, buffer)
569 struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer;
570 {
571 struct vcomm *vcp;
572 struct vmsg *vmp;
573 int error;
574 #ifdef CTL_C
575 struct proc *p = curproc;
576 sigset_t psig_omask;
577 int i;
578 psig_omask = p->p_sigctx.ps_siglist; /* array assignment */
579 #endif
580 if (mntinfo == NULL) {
581 /* Unlikely, but could be a race condition with a dying warden */
582 return ENODEV;
583 }
584
585 vcp = &(mntinfo->mi_vcomm);
586
587 coda_clstat.ncalls++;
588 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
589
590 if (!VC_OPEN(vcp))
591 return(ENODEV);
592
593 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
594 /* Format the request message. */
595 vmp->vm_data = buffer;
596 vmp->vm_flags = 0;
597 vmp->vm_inSize = inSize;
598 vmp->vm_outSize
599 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
600 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
601 vmp->vm_unique = ++vcp->vc_seq;
602 if (codadebug)
603 myprintf(("Doing a call for %d.%d\n",
604 vmp->vm_opcode, vmp->vm_unique));
605
606 /* Fill in the common input args. */
607 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
608
609 /* Append msg to request queue and poke Venus. */
610 INSQUE(vmp->vm_chain, vcp->vc_requests);
611 selnotify(&(vcp->vc_selproc), 0);
612
613 /* We can be interrupted while we wait for Venus to process
614 * our request. If the interrupt occurs before Venus has read
615 * the request, we dequeue and return. If it occurs after the
616 * read but before the reply, we dequeue, send a signal
617 * message, and return. If it occurs after the reply we ignore
618 * it. In no case do we want to restart the syscall. If it
619 * was interrupted by a venus shutdown (vcclose), return
620 * ENODEV. */
621
622 /* Ignore return, We have to check anyway */
623 #ifdef CTL_C
624 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
625 on a ^c or ^z. The problem is that emacs sets certain interrupts
626 as SA_RESTART. This means that we should exit sleep handle the
627 "signal" and then go to sleep again. Mostly this is done by letting
628 the syscall complete and be restarted. We are not idempotent and
629 can not do this. A better solution is necessary.
630 */
631 i = 0;
632 do {
633 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
634 if (error == 0)
635 break;
636 else if (error == EWOULDBLOCK) {
637 #ifdef CODA_VERBOSE
638 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
639 #endif
640 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
641 sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
642 #ifdef CODA_VERBOSE
643 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
644 #endif
645 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
646 sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
647 #ifdef CODA_VERBOSE
648 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
649 #endif
650 } else {
651 sigset_t tmp;
652 tmp = p->p_sigctx.ps_siglist; /* array assignment */
653 sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
654
655 #ifdef CODA_VERBOSE
656 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
657 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
658 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
659 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
660 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
661 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
662 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
663 #endif
664 break;
665 #ifdef notyet
666 sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
667 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
668 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
669 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
670 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
671 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
672 #endif
673 }
674 } while (error && i++ < 128 && VC_OPEN(vcp));
675 p->p_sigctx.ps_siglist = psig_omask; /* array assignment */
676 #else
677 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
678 #endif
679 if (VC_OPEN(vcp)) { /* Venus is still alive */
680 /* Op went through, interrupt or not... */
681 if (vmp->vm_flags & VM_WRITE) {
682 error = 0;
683 *outSize = vmp->vm_outSize;
684 }
685
686 else if (!(vmp->vm_flags & VM_READ)) {
687 /* Interrupted before venus read it. */
688 #ifdef CODA_VERBOSE
689 if (1)
690 #else
691 if (codadebug)
692 #endif
693 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
694 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
695 REMQUE(vmp->vm_chain);
696 error = EINTR;
697 }
698
699 else {
700 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
701 upcall started */
702 /* Interrupted after start of upcall, send venus a signal */
703 struct coda_in_hdr *dog;
704 struct vmsg *svmp;
705
706 #ifdef CODA_VERBOSE
707 if (1)
708 #else
709 if (codadebug)
710 #endif
711 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
712 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
713
714 REMQUE(vmp->vm_chain);
715 error = EINTR;
716
717 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
718
719 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
720 dog = (struct coda_in_hdr *)svmp->vm_data;
721
722 svmp->vm_flags = 0;
723 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
724 dog->unique = svmp->vm_unique = vmp->vm_unique;
725 svmp->vm_inSize = sizeof (struct coda_in_hdr);
726 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
727
728 if (codadebug)
729 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
730 svmp->vm_opcode, svmp->vm_unique));
731
732 /* insert at head of queue! */
733 INSQUE(svmp->vm_chain, vcp->vc_requests);
734 selnotify(&(vcp->vc_selproc), 0);
735 }
736 }
737
738 else { /* If venus died (!VC_OPEN(vcp)) */
739 if (codadebug)
740 myprintf(("vcclose woke op %d.%d flags %d\n",
741 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
742
743 error = ENODEV;
744 }
745
746 CODA_FREE(vmp, sizeof(struct vmsg));
747
748 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
749 wakeup(&outstanding_upcalls);
750
751 if (!error)
752 error = ((struct coda_out_hdr *)buffer)->result;
753 return(error);
754 }
755
756