vnd.c revision 1.33 1 /* $NetBSD: vnd.c,v 1.33 1997/05/19 14:41:54 pk Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: vn.c 1.13 94/04/02$
41 *
42 * @(#)vn.c 8.6 (Berkeley) 4/1/94
43 */
44
45 /*
46 * Vnode disk driver.
47 *
48 * Block/character interface to a vnode. Allows one to treat a file
49 * as a disk (e.g. build a filesystem in it, mount it, etc.).
50 *
51 * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
52 * instead of a simple VOP_RDWR. We do this to avoid distorting the
53 * local buffer cache.
54 *
55 * NOTE 2: There is a security issue involved with this driver.
56 * Once mounted all access to the contents of the "mapped" file via
57 * the special file is controlled by the permissions on the special
58 * file, the protection of the mapped file is ignored (effectively,
59 * by using root credentials in all transactions).
60 *
61 * NOTE 3: Doesn't interact with leases, should it?
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/namei.h>
67 #include <sys/proc.h>
68 #include <sys/errno.h>
69 #include <sys/buf.h>
70 #include <sys/malloc.h>
71 #include <sys/ioctl.h>
72 #include <sys/disklabel.h>
73 #include <sys/device.h>
74 #include <sys/disk.h>
75 #include <sys/stat.h>
76 #include <sys/mount.h>
77 #include <sys/vnode.h>
78 #include <sys/file.h>
79 #include <sys/uio.h>
80 #include <sys/conf.h>
81
82 #include <miscfs/specfs/specdev.h>
83
84 #include <dev/vndioctl.h>
85
86 #ifdef DEBUG
87 int dovndcluster = 1;
88 int vnddebug = 0x00;
89 #define VDB_FOLLOW 0x01
90 #define VDB_INIT 0x02
91 #define VDB_IO 0x04
92 #endif
93
94 #define b_cylin b_resid
95
96 #define vndunit(x) DISKUNIT(x)
97
98 struct vndbuf {
99 struct buf vb_buf;
100 struct buf *vb_obp;
101 };
102
103 #define getvndbuf() \
104 ((struct vndbuf *)malloc(sizeof(struct vndbuf), M_DEVBUF, M_WAITOK))
105 #define putvndbuf(vbp) \
106 free((caddr_t)(vbp), M_DEVBUF)
107
108 struct vnd_softc {
109 int sc_flags; /* flags */
110 size_t sc_size; /* size of vnd */
111 struct vnode *sc_vp; /* vnode */
112 struct ucred *sc_cred; /* credentials */
113 int sc_maxactive; /* max # of active requests */
114 struct buf sc_tab; /* transfer queue */
115 char sc_xname[8]; /* XXX external name */
116 struct disk sc_dkdev; /* generic disk device info */
117 };
118
119 /* sc_flags */
120 #define VNF_ALIVE 0x01
121 #define VNF_INITED 0x02
122 #define VNF_WANTED 0x40
123 #define VNF_LOCKED 0x80
124
125 struct vnd_softc *vnd_softc;
126 int numvnd = 0;
127
128 /* called by main() at boot time */
129 void vndattach __P((int));
130
131 void vndclear __P((struct vnd_softc *));
132 void vndstart __P((struct vnd_softc *));
133 int vndsetcred __P((struct vnd_softc *, struct ucred *));
134 void vndthrottle __P((struct vnd_softc *, struct vnode *));
135 void vndiodone __P((struct buf *));
136 void vndshutdown __P((void));
137
138 static int vndlock __P((struct vnd_softc *));
139 static void vndunlock __P((struct vnd_softc *));
140
141 void
142 vndattach(num)
143 int num;
144 {
145 char *mem;
146 register u_long size;
147
148 if (num <= 0)
149 return;
150 size = num * sizeof(struct vnd_softc);
151 mem = malloc(size, M_DEVBUF, M_NOWAIT);
152 if (mem == NULL) {
153 printf("WARNING: no memory for vnode disks\n");
154 return;
155 }
156 bzero(mem, size);
157 vnd_softc = (struct vnd_softc *)mem;
158 numvnd = num;
159 }
160
161 int
162 vndopen(dev, flags, mode, p)
163 dev_t dev;
164 int flags, mode;
165 struct proc *p;
166 {
167 int unit = vndunit(dev);
168 struct vnd_softc *sc;
169 int error = 0, part, pmask;
170
171 /*
172 * XXX Should support disklabels.
173 */
174
175 #ifdef DEBUG
176 if (vnddebug & VDB_FOLLOW)
177 printf("vndopen(%x, %x, %x, %p)\n", dev, flags, mode, p);
178 #endif
179 if (unit >= numvnd)
180 return (ENXIO);
181 sc = &vnd_softc[unit];
182
183 if ((error = vndlock(sc)) != 0)
184 return (error);
185
186 part = DISKPART(dev);
187 pmask = (1 << part);
188
189 /* Prevent our unit from being unconfigured while open. */
190 switch (mode) {
191 case S_IFCHR:
192 sc->sc_dkdev.dk_copenmask |= pmask;
193 break;
194
195 case S_IFBLK:
196 sc->sc_dkdev.dk_bopenmask |= pmask;
197 break;
198 }
199 sc->sc_dkdev.dk_openmask =
200 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
201
202 vndunlock(sc);
203 return (0);
204 }
205
206 int
207 vndclose(dev, flags, mode, p)
208 dev_t dev;
209 int flags, mode;
210 struct proc *p;
211 {
212 int unit = vndunit(dev);
213 struct vnd_softc *sc;
214 int error = 0, part;
215
216 #ifdef DEBUG
217 if (vnddebug & VDB_FOLLOW)
218 printf("vndclose(%x, %x, %x, %p)\n", dev, flags, mode, p);
219 #endif
220
221 if (unit >= numvnd)
222 return (ENXIO);
223 sc = &vnd_softc[unit];
224
225 if ((error = vndlock(sc)) != 0)
226 return (error);
227
228 part = DISKPART(dev);
229
230 /* ...that much closer to allowing unconfiguration... */
231 switch (mode) {
232 case S_IFCHR:
233 sc->sc_dkdev.dk_copenmask &= ~(1 << part);
234 break;
235
236 case S_IFBLK:
237 sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
238 break;
239 }
240 sc->sc_dkdev.dk_openmask =
241 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
242
243 vndunlock(sc);
244 return (0);
245 }
246
247 /*
248 * Break the request into bsize pieces and submit using VOP_BMAP/VOP_STRATEGY.
249 * Note that this driver can only be used for swapping over NFS on the hp
250 * since nfs_strategy on the vax cannot handle u-areas and page tables.
251 */
252 void
253 vndstrategy(bp)
254 register struct buf *bp;
255 {
256 int unit = vndunit(bp->b_dev);
257 register struct vnd_softc *vnd = &vnd_softc[unit];
258 register struct vndbuf *nbp;
259 register int bn, bsize, resid;
260 register caddr_t addr;
261 int sz, flags, error;
262
263 #ifdef DEBUG
264 if (vnddebug & VDB_FOLLOW)
265 printf("vndstrategy(%p): unit %d\n", bp, unit);
266 #endif
267 if ((vnd->sc_flags & VNF_INITED) == 0) {
268 bp->b_error = ENXIO;
269 bp->b_flags |= B_ERROR;
270 biodone(bp);
271 return;
272 }
273 bn = bp->b_blkno;
274 sz = howmany(bp->b_bcount, DEV_BSIZE);
275 bp->b_resid = bp->b_bcount;
276 if (bn < 0 || bn + sz > vnd->sc_size) {
277 if (bn != vnd->sc_size) {
278 bp->b_error = EINVAL;
279 bp->b_flags |= B_ERROR;
280 }
281 biodone(bp);
282 return;
283 }
284 bn = dbtob(bn);
285 bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
286 addr = bp->b_data;
287 flags = bp->b_flags | B_CALL;
288 for (resid = bp->b_resid; resid; resid -= sz) {
289 struct vnode *vp;
290 daddr_t nbn;
291 int off, s, nra;
292
293 nra = 0;
294 VOP_LOCK(vnd->sc_vp);
295 error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
296 VOP_UNLOCK(vnd->sc_vp);
297 if (error == 0 && (long)nbn == -1)
298 error = EIO;
299 #ifdef DEBUG
300 if (!dovndcluster)
301 nra = 0;
302 #endif
303
304 if ((off = bn % bsize) != 0)
305 sz = bsize - off;
306 else
307 sz = (1 + nra) * bsize;
308 if (resid < sz)
309 sz = resid;
310 #ifdef DEBUG
311 if (vnddebug & VDB_IO)
312 printf("vndstrategy: vp %p/%p bn %x/%x sz %x\n",
313 vnd->sc_vp, vp, bn, nbn, sz);
314 #endif
315
316 nbp = getvndbuf();
317 nbp->vb_buf.b_flags = flags;
318 nbp->vb_buf.b_bcount = sz;
319 nbp->vb_buf.b_bufsize = bp->b_bufsize;
320 nbp->vb_buf.b_error = 0;
321 if (vp->v_type == VBLK || vp->v_type == VCHR)
322 nbp->vb_buf.b_dev = vp->v_rdev;
323 else
324 nbp->vb_buf.b_dev = NODEV;
325 nbp->vb_buf.b_data = addr;
326 nbp->vb_buf.b_blkno = nbn + btodb(off);
327 nbp->vb_buf.b_proc = bp->b_proc;
328 nbp->vb_buf.b_iodone = vndiodone;
329 nbp->vb_buf.b_vp = vp;
330 nbp->vb_buf.b_rcred = vnd->sc_cred; /* XXX crdup? */
331 nbp->vb_buf.b_wcred = vnd->sc_cred; /* XXX crdup? */
332 if (bp->b_dirtyend == 0) {
333 nbp->vb_buf.b_dirtyoff = 0;
334 nbp->vb_buf.b_dirtyend = sz;
335 } else {
336 nbp->vb_buf.b_dirtyoff =
337 max(0, bp->b_dirtyoff - (bp->b_bcount - resid));
338 nbp->vb_buf.b_dirtyend =
339 min(sz, bp->b_dirtyend - (bp->b_bcount - resid));
340 }
341 if (bp->b_validend == 0) {
342 nbp->vb_buf.b_validoff = 0;
343 nbp->vb_buf.b_validend = sz;
344 } else {
345 nbp->vb_buf.b_validoff =
346 max(0, bp->b_validoff - (bp->b_bcount - resid));
347 nbp->vb_buf.b_validend =
348 min(sz, bp->b_validend - (bp->b_bcount - resid));
349 }
350
351 /* save a reference to the old buffer */
352 nbp->vb_obp = bp;
353
354 /*
355 * If there was an error or a hole in the file...punt.
356 * Note that we deal with this after the nbp allocation.
357 * This ensures that we properly clean up any operations
358 * that we have already fired off.
359 *
360 * XXX we could deal with holes here but it would be
361 * a hassle (in the write case).
362 */
363 if (error) {
364 nbp->vb_buf.b_error = error;
365 nbp->vb_buf.b_flags |= B_ERROR;
366 bp->b_resid -= (resid - sz);
367 biodone(&nbp->vb_buf);
368 return;
369 }
370 /*
371 * Just sort by block number
372 */
373 nbp->vb_buf.b_cylin = nbp->vb_buf.b_blkno;
374 s = splbio();
375 disksort(&vnd->sc_tab, &nbp->vb_buf);
376 if (vnd->sc_tab.b_active < vnd->sc_maxactive) {
377 vnd->sc_tab.b_active++;
378 vndstart(vnd);
379 }
380 splx(s);
381 bn += sz;
382 addr += sz;
383 }
384 }
385
386 /*
387 * Feed requests sequentially.
388 * We do it this way to keep from flooding NFS servers if we are connected
389 * to an NFS file. This places the burden on the client rather than the
390 * server.
391 */
392 void
393 vndstart(vnd)
394 register struct vnd_softc *vnd;
395 {
396 register struct buf *bp;
397
398 /*
399 * Dequeue now since lower level strategy routine might
400 * queue using same links
401 */
402 bp = vnd->sc_tab.b_actf;
403 vnd->sc_tab.b_actf = bp->b_actf;
404 #ifdef DEBUG
405 if (vnddebug & VDB_IO)
406 printf("vndstart(%ld): bp %p vp %p blkno %x addr %p cnt %lx\n",
407 (long) (vnd-vnd_softc), bp, bp->b_vp, bp->b_blkno,
408 bp->b_data, bp->b_bcount);
409 #endif
410
411 /* Instrumentation. */
412 disk_busy(&vnd->sc_dkdev);
413
414 if ((bp->b_flags & B_READ) == 0)
415 bp->b_vp->v_numoutput++;
416 VOP_STRATEGY(bp);
417 }
418
419 void
420 vndiodone(bp)
421 struct buf *bp;
422 {
423 register struct vndbuf *vbp = (struct vndbuf *) bp;
424 register struct buf *pbp = vbp->vb_obp;
425 register struct vnd_softc *vnd = &vnd_softc[vndunit(pbp->b_dev)];
426 int s;
427
428 s = splbio();
429 #ifdef DEBUG
430 if (vnddebug & VDB_IO)
431 printf("vndiodone(%ld): vbp %p vp %p blkno %x addr %p cnt %lx\n",
432 (long) (vnd-vnd_softc), vbp, vbp->vb_buf.b_vp,
433 vbp->vb_buf.b_blkno, vbp->vb_buf.b_data,
434 vbp->vb_buf.b_bcount);
435 #endif
436
437 if (vbp->vb_buf.b_error) {
438 #ifdef DEBUG
439 if (vnddebug & VDB_IO)
440 printf("vndiodone: vbp %p error %d\n", vbp,
441 vbp->vb_buf.b_error);
442 #endif
443 pbp->b_flags |= B_ERROR;
444 pbp->b_error = biowait(&vbp->vb_buf);
445 }
446 pbp->b_resid -= vbp->vb_buf.b_bcount;
447 putvndbuf(vbp);
448 disk_unbusy(&vnd->sc_dkdev, (pbp->b_bcount - pbp->b_resid));
449 if (pbp->b_resid == 0) {
450 #ifdef DEBUG
451 if (vnddebug & VDB_IO)
452 printf("vndiodone: pbp %p iodone\n", pbp);
453 #endif
454 biodone(pbp);
455 }
456 if (vnd->sc_tab.b_actf)
457 vndstart(vnd);
458 else
459 vnd->sc_tab.b_active--;
460 splx(s);
461 }
462
463 /* ARGSUSED */
464 int
465 vndread(dev, uio, flags)
466 dev_t dev;
467 struct uio *uio;
468 int flags;
469 {
470 int unit = vndunit(dev);
471 struct vnd_softc *sc;
472
473 #ifdef DEBUG
474 if (vnddebug & VDB_FOLLOW)
475 printf("vndread(%x, %p)\n", dev, uio);
476 #endif
477
478 if (unit >= numvnd)
479 return (ENXIO);
480 sc = &vnd_softc[unit];
481
482 if ((sc->sc_flags & VNF_INITED) == 0)
483 return (ENXIO);
484
485 return (physio(vndstrategy, NULL, dev, B_READ, minphys, uio));
486 }
487
488 /* ARGSUSED */
489 int
490 vndwrite(dev, uio, flags)
491 dev_t dev;
492 struct uio *uio;
493 int flags;
494 {
495 int unit = vndunit(dev);
496 struct vnd_softc *sc;
497
498 #ifdef DEBUG
499 if (vnddebug & VDB_FOLLOW)
500 printf("vndwrite(%x, %p)\n", dev, uio);
501 #endif
502
503 if (unit >= numvnd)
504 return (ENXIO);
505 sc = &vnd_softc[unit];
506
507 if ((sc->sc_flags & VNF_INITED) == 0)
508 return (ENXIO);
509
510 return (physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio));
511 }
512
513 /* ARGSUSED */
514 int
515 vndioctl(dev, cmd, data, flag, p)
516 dev_t dev;
517 u_long cmd;
518 caddr_t data;
519 int flag;
520 struct proc *p;
521 {
522 int unit = vndunit(dev);
523 register struct vnd_softc *vnd;
524 struct vnd_ioctl *vio;
525 struct vattr vattr;
526 struct nameidata nd;
527 int error, part, pmask;
528
529 #ifdef DEBUG
530 if (vnddebug & VDB_FOLLOW)
531 printf("vndioctl(%x, %lx, %p, %x, %p): unit %d\n",
532 dev, cmd, data, flag, p, unit);
533 #endif
534 error = suser(p->p_ucred, &p->p_acflag);
535 if (error)
536 return (error);
537 if (unit >= numvnd)
538 return (ENXIO);
539
540 vnd = &vnd_softc[unit];
541 vio = (struct vnd_ioctl *)data;
542 switch (cmd) {
543
544 case VNDIOCSET:
545 if (vnd->sc_flags & VNF_INITED)
546 return (EBUSY);
547
548 if ((error = vndlock(vnd)) != 0)
549 return (error);
550
551 /*
552 * Always open for read and write.
553 * This is probably bogus, but it lets vn_open()
554 * weed out directories, sockets, etc. so we don't
555 * have to worry about them.
556 */
557 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file, p);
558 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
559 vndunlock(vnd);
560 return(error);
561 }
562 error = VOP_GETATTR(nd.ni_vp, &vattr, p->p_ucred, p);
563 if (error) {
564 VOP_UNLOCK(nd.ni_vp);
565 (void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
566 vndunlock(vnd);
567 return(error);
568 }
569 VOP_UNLOCK(nd.ni_vp);
570 vnd->sc_vp = nd.ni_vp;
571 vnd->sc_size = btodb(vattr.va_size); /* note truncation */
572 if ((error = vndsetcred(vnd, p->p_ucred)) != 0) {
573 (void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
574 vndunlock(vnd);
575 return(error);
576 }
577 vndthrottle(vnd, vnd->sc_vp);
578 vio->vnd_size = dbtob(vnd->sc_size);
579 vnd->sc_flags |= VNF_INITED;
580 #ifdef DEBUG
581 if (vnddebug & VDB_INIT)
582 printf("vndioctl: SET vp %p size %lx\n",
583 vnd->sc_vp, (unsigned long) vnd->sc_size);
584 #endif
585
586 /* Attach the disk. */
587 bzero(vnd->sc_xname, sizeof(vnd->sc_xname)); /* XXX */
588 sprintf(vnd->sc_xname, "vnd%d", unit); /* XXX */
589 vnd->sc_dkdev.dk_name = vnd->sc_xname;
590 disk_attach(&vnd->sc_dkdev);
591
592 vndunlock(vnd);
593
594 break;
595
596 case VNDIOCCLR:
597 if ((vnd->sc_flags & VNF_INITED) == 0)
598 return (ENXIO);
599
600 if ((error = vndlock(vnd)) != 0)
601 return (error);
602
603 /*
604 * Don't unconfigure if any other partitions are open
605 * or if both the character and block flavors of this
606 * partition are open.
607 */
608 part = DISKPART(dev);
609 pmask = (1 << part);
610 if ((vnd->sc_dkdev.dk_openmask & ~pmask) ||
611 ((vnd->sc_dkdev.dk_bopenmask & pmask) &&
612 (vnd->sc_dkdev.dk_copenmask & pmask))) {
613 vndunlock(vnd);
614 return (EBUSY);
615 }
616
617 vndclear(vnd);
618 #ifdef DEBUG
619 if (vnddebug & VDB_INIT)
620 printf("vndioctl: CLRed\n");
621 #endif
622
623 /* Detatch the disk. */
624 disk_detach(&vnd->sc_dkdev);
625
626 vndunlock(vnd);
627
628 break;
629
630 /*
631 * XXX Should support disklabels.
632 */
633
634 default:
635 return(ENOTTY);
636 }
637
638 return (0);
639 }
640
641 /*
642 * Duplicate the current processes' credentials. Since we are called only
643 * as the result of a SET ioctl and only root can do that, any future access
644 * to this "disk" is essentially as root. Note that credentials may change
645 * if some other uid can write directly to the mapped file (NFS).
646 */
647 int
648 vndsetcred(vnd, cred)
649 register struct vnd_softc *vnd;
650 struct ucred *cred;
651 {
652 struct uio auio;
653 struct iovec aiov;
654 char *tmpbuf;
655 int error;
656
657 vnd->sc_cred = crdup(cred);
658 tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
659
660 /* XXX: Horrible kludge to establish credentials for NFS */
661 aiov.iov_base = tmpbuf;
662 aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
663 auio.uio_iov = &aiov;
664 auio.uio_iovcnt = 1;
665 auio.uio_offset = 0;
666 auio.uio_rw = UIO_READ;
667 auio.uio_segflg = UIO_SYSSPACE;
668 auio.uio_resid = aiov.iov_len;
669 VOP_LOCK(vnd->sc_vp);
670 error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
671 VOP_UNLOCK(vnd->sc_vp);
672
673 free(tmpbuf, M_TEMP);
674 return (error);
675 }
676
677 /*
678 * Set maxactive based on FS type
679 */
680 void
681 vndthrottle(vnd, vp)
682 register struct vnd_softc *vnd;
683 struct vnode *vp;
684 {
685 #ifdef NFS
686 extern int (**nfsv2_vnodeop_p) __P((void *));
687
688 if (vp->v_op == nfsv2_vnodeop_p)
689 vnd->sc_maxactive = 2;
690 else
691 #endif
692 vnd->sc_maxactive = 8;
693
694 if (vnd->sc_maxactive < 1)
695 vnd->sc_maxactive = 1;
696 }
697
698 void
699 vndshutdown()
700 {
701 register struct vnd_softc *vnd;
702
703 for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
704 if (vnd->sc_flags & VNF_INITED)
705 vndclear(vnd);
706 }
707
708 void
709 vndclear(vnd)
710 register struct vnd_softc *vnd;
711 {
712 register struct vnode *vp = vnd->sc_vp;
713 struct proc *p = curproc; /* XXX */
714
715 #ifdef DEBUG
716 if (vnddebug & VDB_FOLLOW)
717 printf("vndclear(%p): vp %p\n", vnd, vp);
718 #endif
719 vnd->sc_flags &= ~VNF_INITED;
720 if (vp == (struct vnode *)0)
721 panic("vndioctl: null vp");
722 (void) vn_close(vp, FREAD|FWRITE, vnd->sc_cred, p);
723 crfree(vnd->sc_cred);
724 vnd->sc_vp = (struct vnode *)0;
725 vnd->sc_cred = (struct ucred *)0;
726 vnd->sc_size = 0;
727 }
728
729 int
730 vndsize(dev)
731 dev_t dev;
732 {
733 int unit = vndunit(dev);
734 register struct vnd_softc *vnd = &vnd_softc[unit];
735
736 if (unit >= numvnd || (vnd->sc_flags & VNF_INITED) == 0)
737 return(-1);
738 return(vnd->sc_size);
739 }
740
741 int
742 vnddump(dev, blkno, va, size)
743 dev_t dev;
744 daddr_t blkno;
745 caddr_t va;
746 size_t size;
747 {
748
749 /* Not implemented. */
750 return ENXIO;
751 }
752
753 /*
754 * Wait interruptibly for an exclusive lock.
755 *
756 * XXX
757 * Several drivers do this; it should be abstracted and made MP-safe.
758 */
759 static int
760 vndlock(sc)
761 struct vnd_softc *sc;
762 {
763 int error;
764
765 while ((sc->sc_flags & VNF_LOCKED) != 0) {
766 sc->sc_flags |= VNF_WANTED;
767 if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
768 return (error);
769 }
770 sc->sc_flags |= VNF_LOCKED;
771 return (0);
772 }
773
774 /*
775 * Unlock and wake up any waiters.
776 */
777 static void
778 vndunlock(sc)
779 struct vnd_softc *sc;
780 {
781
782 sc->sc_flags &= ~VNF_LOCKED;
783 if ((sc->sc_flags & VNF_WANTED) != 0) {
784 sc->sc_flags &= ~VNF_WANTED;
785 wakeup(sc);
786 }
787 }
788