vnd.c revision 1.34 1 /* $NetBSD: vnd.c,v 1.34 1997/05/19 22:08:56 pk Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: vn.c 1.13 94/04/02$
41 *
42 * @(#)vn.c 8.6 (Berkeley) 4/1/94
43 */
44
45 /*
46 * Vnode disk driver.
47 *
48 * Block/character interface to a vnode. Allows one to treat a file
49 * as a disk (e.g. build a filesystem in it, mount it, etc.).
50 *
51 * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
52 * instead of a simple VOP_RDWR. We do this to avoid distorting the
53 * local buffer cache.
54 *
55 * NOTE 2: There is a security issue involved with this driver.
56 * Once mounted all access to the contents of the "mapped" file via
57 * the special file is controlled by the permissions on the special
58 * file, the protection of the mapped file is ignored (effectively,
59 * by using root credentials in all transactions).
60 *
61 * NOTE 3: Doesn't interact with leases, should it?
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/namei.h>
67 #include <sys/proc.h>
68 #include <sys/errno.h>
69 #include <sys/buf.h>
70 #include <sys/malloc.h>
71 #include <sys/ioctl.h>
72 #include <sys/disklabel.h>
73 #include <sys/device.h>
74 #include <sys/disk.h>
75 #include <sys/stat.h>
76 #include <sys/mount.h>
77 #include <sys/vnode.h>
78 #include <sys/file.h>
79 #include <sys/uio.h>
80 #include <sys/conf.h>
81
82 #include <miscfs/specfs/specdev.h>
83
84 #include <dev/vndioctl.h>
85
86 #ifdef DEBUG
87 int dovndcluster = 1;
88 int vnddebug = 0x00;
89 #define VDB_FOLLOW 0x01
90 #define VDB_INIT 0x02
91 #define VDB_IO 0x04
92 #endif
93
94 #define b_cylin b_resid
95
96 #define vndunit(x) DISKUNIT(x)
97
98 struct vndbuf {
99 struct buf vb_buf;
100 struct buf *vb_obp;
101 };
102
103 #define getvndbuf() \
104 ((struct vndbuf *)malloc(sizeof(struct vndbuf), M_DEVBUF, M_WAITOK))
105 #define putvndbuf(vbp) \
106 free((caddr_t)(vbp), M_DEVBUF)
107
108 struct vnd_softc {
109 int sc_flags; /* flags */
110 size_t sc_size; /* size of vnd */
111 struct vnode *sc_vp; /* vnode */
112 struct ucred *sc_cred; /* credentials */
113 int sc_maxactive; /* max # of active requests */
114 struct buf sc_tab; /* transfer queue */
115 char sc_xname[8]; /* XXX external name */
116 struct disk sc_dkdev; /* generic disk device info */
117 };
118
119 /* sc_flags */
120 #define VNF_ALIVE 0x01
121 #define VNF_INITED 0x02
122 #define VNF_WANTED 0x40
123 #define VNF_LOCKED 0x80
124
125 struct vnd_softc *vnd_softc;
126 int numvnd = 0;
127
128 /* called by main() at boot time */
129 void vndattach __P((int));
130
131 void vndclear __P((struct vnd_softc *));
132 void vndstart __P((struct vnd_softc *));
133 int vndsetcred __P((struct vnd_softc *, struct ucred *));
134 void vndthrottle __P((struct vnd_softc *, struct vnode *));
135 void vndiodone __P((struct buf *));
136 void vndshutdown __P((void));
137
138 static int vndlock __P((struct vnd_softc *));
139 static void vndunlock __P((struct vnd_softc *));
140
141 void
142 vndattach(num)
143 int num;
144 {
145 char *mem;
146 register u_long size;
147
148 if (num <= 0)
149 return;
150 size = num * sizeof(struct vnd_softc);
151 mem = malloc(size, M_DEVBUF, M_NOWAIT);
152 if (mem == NULL) {
153 printf("WARNING: no memory for vnode disks\n");
154 return;
155 }
156 bzero(mem, size);
157 vnd_softc = (struct vnd_softc *)mem;
158 numvnd = num;
159 }
160
161 int
162 vndopen(dev, flags, mode, p)
163 dev_t dev;
164 int flags, mode;
165 struct proc *p;
166 {
167 int unit = vndunit(dev);
168 struct vnd_softc *sc;
169 int error = 0, part, pmask;
170
171 /*
172 * XXX Should support disklabels.
173 */
174
175 #ifdef DEBUG
176 if (vnddebug & VDB_FOLLOW)
177 printf("vndopen(%x, %x, %x, %p)\n", dev, flags, mode, p);
178 #endif
179 if (unit >= numvnd)
180 return (ENXIO);
181 sc = &vnd_softc[unit];
182
183 if ((error = vndlock(sc)) != 0)
184 return (error);
185
186 part = DISKPART(dev);
187 pmask = (1 << part);
188
189 /* Prevent our unit from being unconfigured while open. */
190 switch (mode) {
191 case S_IFCHR:
192 sc->sc_dkdev.dk_copenmask |= pmask;
193 break;
194
195 case S_IFBLK:
196 sc->sc_dkdev.dk_bopenmask |= pmask;
197 break;
198 }
199 sc->sc_dkdev.dk_openmask =
200 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
201
202 vndunlock(sc);
203 return (0);
204 }
205
206 int
207 vndclose(dev, flags, mode, p)
208 dev_t dev;
209 int flags, mode;
210 struct proc *p;
211 {
212 int unit = vndunit(dev);
213 struct vnd_softc *sc;
214 int error = 0, part;
215
216 #ifdef DEBUG
217 if (vnddebug & VDB_FOLLOW)
218 printf("vndclose(%x, %x, %x, %p)\n", dev, flags, mode, p);
219 #endif
220
221 if (unit >= numvnd)
222 return (ENXIO);
223 sc = &vnd_softc[unit];
224
225 if ((error = vndlock(sc)) != 0)
226 return (error);
227
228 part = DISKPART(dev);
229
230 /* ...that much closer to allowing unconfiguration... */
231 switch (mode) {
232 case S_IFCHR:
233 sc->sc_dkdev.dk_copenmask &= ~(1 << part);
234 break;
235
236 case S_IFBLK:
237 sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
238 break;
239 }
240 sc->sc_dkdev.dk_openmask =
241 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
242
243 vndunlock(sc);
244 return (0);
245 }
246
247 /*
248 * Break the request into bsize pieces and submit using VOP_BMAP/VOP_STRATEGY.
249 * Note that this driver can only be used for swapping over NFS on the hp
250 * since nfs_strategy on the vax cannot handle u-areas and page tables.
251 */
252 void
253 vndstrategy(bp)
254 register struct buf *bp;
255 {
256 int unit = vndunit(bp->b_dev);
257 register struct vnd_softc *vnd = &vnd_softc[unit];
258 register struct vndbuf *nbp;
259 register int bn, bsize, resid;
260 register caddr_t addr;
261 int sz, flags, error;
262
263 #ifdef DEBUG
264 if (vnddebug & VDB_FOLLOW)
265 printf("vndstrategy(%p): unit %d\n", bp, unit);
266 #endif
267 if ((vnd->sc_flags & VNF_INITED) == 0) {
268 bp->b_error = ENXIO;
269 bp->b_flags |= B_ERROR;
270 biodone(bp);
271 return;
272 }
273 bn = bp->b_blkno;
274 sz = howmany(bp->b_bcount, DEV_BSIZE);
275 bp->b_resid = bp->b_bcount;
276 if (bn < 0 || bn + sz > vnd->sc_size) {
277 if (bn != vnd->sc_size) {
278 bp->b_error = EINVAL;
279 bp->b_flags |= B_ERROR;
280 }
281 biodone(bp);
282 return;
283 }
284 bn = dbtob(bn);
285 bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
286 addr = bp->b_data;
287 flags = bp->b_flags | B_CALL;
288 for (resid = bp->b_resid; resid; resid -= sz) {
289 struct vnode *vp;
290 daddr_t nbn;
291 int off, s, nra;
292
293 nra = 0;
294 VOP_LOCK(vnd->sc_vp);
295 error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
296 VOP_UNLOCK(vnd->sc_vp);
297 if (error == 0 && (long)nbn == -1)
298 error = EIO;
299 #ifdef DEBUG
300 if (!dovndcluster)
301 nra = 0;
302 #endif
303
304 if ((off = bn % bsize) != 0)
305 sz = bsize - off;
306 else
307 sz = (1 + nra) * bsize;
308 if (resid < sz)
309 sz = resid;
310 #ifdef DEBUG
311 if (vnddebug & VDB_IO)
312 printf("vndstrategy: vp %p/%p bn %x/%x sz %x\n",
313 vnd->sc_vp, vp, bn, nbn, sz);
314 #endif
315
316 nbp = getvndbuf();
317 nbp->vb_buf.b_flags = flags;
318 nbp->vb_buf.b_bcount = sz;
319 nbp->vb_buf.b_bufsize = bp->b_bufsize;
320 nbp->vb_buf.b_error = 0;
321 if (vp->v_type == VBLK || vp->v_type == VCHR)
322 nbp->vb_buf.b_dev = vp->v_rdev;
323 else
324 nbp->vb_buf.b_dev = NODEV;
325 nbp->vb_buf.b_data = addr;
326 nbp->vb_buf.b_blkno = nbn + btodb(off);
327 nbp->vb_buf.b_proc = bp->b_proc;
328 nbp->vb_buf.b_iodone = vndiodone;
329 nbp->vb_buf.b_vp = vp;
330 nbp->vb_buf.b_rcred = vnd->sc_cred; /* XXX crdup? */
331 nbp->vb_buf.b_wcred = vnd->sc_cred; /* XXX crdup? */
332 if (bp->b_dirtyend == 0) {
333 nbp->vb_buf.b_dirtyoff = 0;
334 nbp->vb_buf.b_dirtyend = sz;
335 } else {
336 nbp->vb_buf.b_dirtyoff =
337 max(0, bp->b_dirtyoff - (bp->b_bcount - resid));
338 nbp->vb_buf.b_dirtyend =
339 min(sz,
340 max(0, bp->b_dirtyend - (bp->b_bcount-resid)));
341 }
342 if (bp->b_validend == 0) {
343 nbp->vb_buf.b_validoff = 0;
344 nbp->vb_buf.b_validend = sz;
345 } else {
346 nbp->vb_buf.b_validoff =
347 max(0, bp->b_validoff - (bp->b_bcount - resid));
348 nbp->vb_buf.b_validend =
349 min(sz,
350 max(0, bp->b_validend - (bp->b_bcount-resid)));
351 }
352
353 /* save a reference to the old buffer */
354 nbp->vb_obp = bp;
355
356 /*
357 * If there was an error or a hole in the file...punt.
358 * Note that we deal with this after the nbp allocation.
359 * This ensures that we properly clean up any operations
360 * that we have already fired off.
361 *
362 * XXX we could deal with holes here but it would be
363 * a hassle (in the write case).
364 */
365 if (error) {
366 nbp->vb_buf.b_error = error;
367 nbp->vb_buf.b_flags |= B_ERROR;
368 bp->b_resid -= (resid - sz);
369 biodone(&nbp->vb_buf);
370 return;
371 }
372 /*
373 * Just sort by block number
374 */
375 nbp->vb_buf.b_cylin = nbp->vb_buf.b_blkno;
376 s = splbio();
377 disksort(&vnd->sc_tab, &nbp->vb_buf);
378 if (vnd->sc_tab.b_active < vnd->sc_maxactive) {
379 vnd->sc_tab.b_active++;
380 vndstart(vnd);
381 }
382 splx(s);
383 bn += sz;
384 addr += sz;
385 }
386 }
387
388 /*
389 * Feed requests sequentially.
390 * We do it this way to keep from flooding NFS servers if we are connected
391 * to an NFS file. This places the burden on the client rather than the
392 * server.
393 */
394 void
395 vndstart(vnd)
396 register struct vnd_softc *vnd;
397 {
398 register struct buf *bp;
399
400 /*
401 * Dequeue now since lower level strategy routine might
402 * queue using same links
403 */
404 bp = vnd->sc_tab.b_actf;
405 vnd->sc_tab.b_actf = bp->b_actf;
406 #ifdef DEBUG
407 if (vnddebug & VDB_IO)
408 printf("vndstart(%ld): bp %p vp %p blkno %x addr %p cnt %lx\n",
409 (long) (vnd-vnd_softc), bp, bp->b_vp, bp->b_blkno,
410 bp->b_data, bp->b_bcount);
411 #endif
412
413 /* Instrumentation. */
414 disk_busy(&vnd->sc_dkdev);
415
416 if ((bp->b_flags & B_READ) == 0)
417 bp->b_vp->v_numoutput++;
418 VOP_STRATEGY(bp);
419 }
420
421 void
422 vndiodone(bp)
423 struct buf *bp;
424 {
425 register struct vndbuf *vbp = (struct vndbuf *) bp;
426 register struct buf *pbp = vbp->vb_obp;
427 register struct vnd_softc *vnd = &vnd_softc[vndunit(pbp->b_dev)];
428 int s;
429
430 s = splbio();
431 #ifdef DEBUG
432 if (vnddebug & VDB_IO)
433 printf("vndiodone(%ld): vbp %p vp %p blkno %x addr %p cnt %lx\n",
434 (long) (vnd-vnd_softc), vbp, vbp->vb_buf.b_vp,
435 vbp->vb_buf.b_blkno, vbp->vb_buf.b_data,
436 vbp->vb_buf.b_bcount);
437 #endif
438
439 if (vbp->vb_buf.b_error) {
440 #ifdef DEBUG
441 if (vnddebug & VDB_IO)
442 printf("vndiodone: vbp %p error %d\n", vbp,
443 vbp->vb_buf.b_error);
444 #endif
445 pbp->b_flags |= B_ERROR;
446 pbp->b_error = biowait(&vbp->vb_buf);
447 }
448 pbp->b_resid -= vbp->vb_buf.b_bcount;
449 putvndbuf(vbp);
450 disk_unbusy(&vnd->sc_dkdev, (pbp->b_bcount - pbp->b_resid));
451 if (pbp->b_resid == 0) {
452 #ifdef DEBUG
453 if (vnddebug & VDB_IO)
454 printf("vndiodone: pbp %p iodone\n", pbp);
455 #endif
456 biodone(pbp);
457 }
458 if (vnd->sc_tab.b_actf)
459 vndstart(vnd);
460 else
461 vnd->sc_tab.b_active--;
462 splx(s);
463 }
464
465 /* ARGSUSED */
466 int
467 vndread(dev, uio, flags)
468 dev_t dev;
469 struct uio *uio;
470 int flags;
471 {
472 int unit = vndunit(dev);
473 struct vnd_softc *sc;
474
475 #ifdef DEBUG
476 if (vnddebug & VDB_FOLLOW)
477 printf("vndread(%x, %p)\n", dev, uio);
478 #endif
479
480 if (unit >= numvnd)
481 return (ENXIO);
482 sc = &vnd_softc[unit];
483
484 if ((sc->sc_flags & VNF_INITED) == 0)
485 return (ENXIO);
486
487 return (physio(vndstrategy, NULL, dev, B_READ, minphys, uio));
488 }
489
490 /* ARGSUSED */
491 int
492 vndwrite(dev, uio, flags)
493 dev_t dev;
494 struct uio *uio;
495 int flags;
496 {
497 int unit = vndunit(dev);
498 struct vnd_softc *sc;
499
500 #ifdef DEBUG
501 if (vnddebug & VDB_FOLLOW)
502 printf("vndwrite(%x, %p)\n", dev, uio);
503 #endif
504
505 if (unit >= numvnd)
506 return (ENXIO);
507 sc = &vnd_softc[unit];
508
509 if ((sc->sc_flags & VNF_INITED) == 0)
510 return (ENXIO);
511
512 return (physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio));
513 }
514
515 /* ARGSUSED */
516 int
517 vndioctl(dev, cmd, data, flag, p)
518 dev_t dev;
519 u_long cmd;
520 caddr_t data;
521 int flag;
522 struct proc *p;
523 {
524 int unit = vndunit(dev);
525 register struct vnd_softc *vnd;
526 struct vnd_ioctl *vio;
527 struct vattr vattr;
528 struct nameidata nd;
529 int error, part, pmask;
530
531 #ifdef DEBUG
532 if (vnddebug & VDB_FOLLOW)
533 printf("vndioctl(%x, %lx, %p, %x, %p): unit %d\n",
534 dev, cmd, data, flag, p, unit);
535 #endif
536 error = suser(p->p_ucred, &p->p_acflag);
537 if (error)
538 return (error);
539 if (unit >= numvnd)
540 return (ENXIO);
541
542 vnd = &vnd_softc[unit];
543 vio = (struct vnd_ioctl *)data;
544 switch (cmd) {
545
546 case VNDIOCSET:
547 if (vnd->sc_flags & VNF_INITED)
548 return (EBUSY);
549
550 if ((error = vndlock(vnd)) != 0)
551 return (error);
552
553 /*
554 * Always open for read and write.
555 * This is probably bogus, but it lets vn_open()
556 * weed out directories, sockets, etc. so we don't
557 * have to worry about them.
558 */
559 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file, p);
560 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
561 vndunlock(vnd);
562 return(error);
563 }
564 error = VOP_GETATTR(nd.ni_vp, &vattr, p->p_ucred, p);
565 if (error) {
566 VOP_UNLOCK(nd.ni_vp);
567 (void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
568 vndunlock(vnd);
569 return(error);
570 }
571 VOP_UNLOCK(nd.ni_vp);
572 vnd->sc_vp = nd.ni_vp;
573 vnd->sc_size = btodb(vattr.va_size); /* note truncation */
574 if ((error = vndsetcred(vnd, p->p_ucred)) != 0) {
575 (void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
576 vndunlock(vnd);
577 return(error);
578 }
579 vndthrottle(vnd, vnd->sc_vp);
580 vio->vnd_size = dbtob(vnd->sc_size);
581 vnd->sc_flags |= VNF_INITED;
582 #ifdef DEBUG
583 if (vnddebug & VDB_INIT)
584 printf("vndioctl: SET vp %p size %lx\n",
585 vnd->sc_vp, (unsigned long) vnd->sc_size);
586 #endif
587
588 /* Attach the disk. */
589 bzero(vnd->sc_xname, sizeof(vnd->sc_xname)); /* XXX */
590 sprintf(vnd->sc_xname, "vnd%d", unit); /* XXX */
591 vnd->sc_dkdev.dk_name = vnd->sc_xname;
592 disk_attach(&vnd->sc_dkdev);
593
594 vndunlock(vnd);
595
596 break;
597
598 case VNDIOCCLR:
599 if ((vnd->sc_flags & VNF_INITED) == 0)
600 return (ENXIO);
601
602 if ((error = vndlock(vnd)) != 0)
603 return (error);
604
605 /*
606 * Don't unconfigure if any other partitions are open
607 * or if both the character and block flavors of this
608 * partition are open.
609 */
610 part = DISKPART(dev);
611 pmask = (1 << part);
612 if ((vnd->sc_dkdev.dk_openmask & ~pmask) ||
613 ((vnd->sc_dkdev.dk_bopenmask & pmask) &&
614 (vnd->sc_dkdev.dk_copenmask & pmask))) {
615 vndunlock(vnd);
616 return (EBUSY);
617 }
618
619 vndclear(vnd);
620 #ifdef DEBUG
621 if (vnddebug & VDB_INIT)
622 printf("vndioctl: CLRed\n");
623 #endif
624
625 /* Detatch the disk. */
626 disk_detach(&vnd->sc_dkdev);
627
628 vndunlock(vnd);
629
630 break;
631
632 /*
633 * XXX Should support disklabels.
634 */
635
636 default:
637 return(ENOTTY);
638 }
639
640 return (0);
641 }
642
643 /*
644 * Duplicate the current processes' credentials. Since we are called only
645 * as the result of a SET ioctl and only root can do that, any future access
646 * to this "disk" is essentially as root. Note that credentials may change
647 * if some other uid can write directly to the mapped file (NFS).
648 */
649 int
650 vndsetcred(vnd, cred)
651 register struct vnd_softc *vnd;
652 struct ucred *cred;
653 {
654 struct uio auio;
655 struct iovec aiov;
656 char *tmpbuf;
657 int error;
658
659 vnd->sc_cred = crdup(cred);
660 tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
661
662 /* XXX: Horrible kludge to establish credentials for NFS */
663 aiov.iov_base = tmpbuf;
664 aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
665 auio.uio_iov = &aiov;
666 auio.uio_iovcnt = 1;
667 auio.uio_offset = 0;
668 auio.uio_rw = UIO_READ;
669 auio.uio_segflg = UIO_SYSSPACE;
670 auio.uio_resid = aiov.iov_len;
671 VOP_LOCK(vnd->sc_vp);
672 error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
673 VOP_UNLOCK(vnd->sc_vp);
674
675 free(tmpbuf, M_TEMP);
676 return (error);
677 }
678
679 /*
680 * Set maxactive based on FS type
681 */
682 void
683 vndthrottle(vnd, vp)
684 register struct vnd_softc *vnd;
685 struct vnode *vp;
686 {
687 #ifdef NFS
688 extern int (**nfsv2_vnodeop_p) __P((void *));
689
690 if (vp->v_op == nfsv2_vnodeop_p)
691 vnd->sc_maxactive = 2;
692 else
693 #endif
694 vnd->sc_maxactive = 8;
695
696 if (vnd->sc_maxactive < 1)
697 vnd->sc_maxactive = 1;
698 }
699
700 void
701 vndshutdown()
702 {
703 register struct vnd_softc *vnd;
704
705 for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
706 if (vnd->sc_flags & VNF_INITED)
707 vndclear(vnd);
708 }
709
710 void
711 vndclear(vnd)
712 register struct vnd_softc *vnd;
713 {
714 register struct vnode *vp = vnd->sc_vp;
715 struct proc *p = curproc; /* XXX */
716
717 #ifdef DEBUG
718 if (vnddebug & VDB_FOLLOW)
719 printf("vndclear(%p): vp %p\n", vnd, vp);
720 #endif
721 vnd->sc_flags &= ~VNF_INITED;
722 if (vp == (struct vnode *)0)
723 panic("vndioctl: null vp");
724 (void) vn_close(vp, FREAD|FWRITE, vnd->sc_cred, p);
725 crfree(vnd->sc_cred);
726 vnd->sc_vp = (struct vnode *)0;
727 vnd->sc_cred = (struct ucred *)0;
728 vnd->sc_size = 0;
729 }
730
731 int
732 vndsize(dev)
733 dev_t dev;
734 {
735 int unit = vndunit(dev);
736 register struct vnd_softc *vnd = &vnd_softc[unit];
737
738 if (unit >= numvnd || (vnd->sc_flags & VNF_INITED) == 0)
739 return(-1);
740 return(vnd->sc_size);
741 }
742
743 int
744 vnddump(dev, blkno, va, size)
745 dev_t dev;
746 daddr_t blkno;
747 caddr_t va;
748 size_t size;
749 {
750
751 /* Not implemented. */
752 return ENXIO;
753 }
754
755 /*
756 * Wait interruptibly for an exclusive lock.
757 *
758 * XXX
759 * Several drivers do this; it should be abstracted and made MP-safe.
760 */
761 static int
762 vndlock(sc)
763 struct vnd_softc *sc;
764 {
765 int error;
766
767 while ((sc->sc_flags & VNF_LOCKED) != 0) {
768 sc->sc_flags |= VNF_WANTED;
769 if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
770 return (error);
771 }
772 sc->sc_flags |= VNF_LOCKED;
773 return (0);
774 }
775
776 /*
777 * Unlock and wake up any waiters.
778 */
779 static void
780 vndunlock(sc)
781 struct vnd_softc *sc;
782 {
783
784 sc->sc_flags &= ~VNF_LOCKED;
785 if ((sc->sc_flags & VNF_WANTED) != 0) {
786 sc->sc_flags &= ~VNF_WANTED;
787 wakeup(sc);
788 }
789 }
790