vnd.c revision 1.22 1 /* $NetBSD: vnd.c,v 1.22 1995/11/06 20:28:09 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: vn.c 1.13 94/04/02$
41 *
42 * @(#)vn.c 8.6 (Berkeley) 4/1/94
43 */
44
45 /*
46 * Vnode disk driver.
47 *
48 * Block/character interface to a vnode. Allows one to treat a file
49 * as a disk (e.g. build a filesystem in it, mount it, etc.).
50 *
51 * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
52 * instead of a simple VOP_RDWR. We do this to avoid distorting the
53 * local buffer cache.
54 *
55 * NOTE 2: There is a security issue involved with this driver.
56 * Once mounted all access to the contents of the "mapped" file via
57 * the special file is controlled by the permissions on the special
58 * file, the protection of the mapped file is ignored (effectively,
59 * by using root credentials in all transactions).
60 *
61 * NOTE 3: Doesn't interact with leases, should it?
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/namei.h>
67 #include <sys/proc.h>
68 #include <sys/errno.h>
69 #include <sys/dkstat.h>
70 #include <sys/buf.h>
71 #include <sys/malloc.h>
72 #include <sys/ioctl.h>
73 #include <sys/disklabel.h>
74 #include <sys/device.h>
75 #include <sys/disk.h>
76 #include <sys/stat.h>
77 #include <sys/conf.h>
78 #include <sys/mount.h>
79 #include <sys/vnode.h>
80 #include <sys/file.h>
81 #include <sys/uio.h>
82
83 #include <miscfs/specfs/specdev.h>
84
85 #include <dev/vndioctl.h>
86
87 #ifdef DEBUG
88 int dovndcluster = 1;
89 int vnddebug = 0x00;
90 #define VDB_FOLLOW 0x01
91 #define VDB_INIT 0x02
92 #define VDB_IO 0x04
93 #endif
94
95 #define b_cylin b_resid
96
97 #define vndunit(x) DISKUNIT(x)
98
99 struct vndbuf {
100 struct buf vb_buf;
101 struct buf *vb_obp;
102 };
103
104 #define getvndbuf() \
105 ((struct vndbuf *)malloc(sizeof(struct vndbuf), M_DEVBUF, M_WAITOK))
106 #define putvndbuf(vbp) \
107 free((caddr_t)(vbp), M_DEVBUF)
108
109 struct vnd_softc {
110 int sc_flags; /* flags */
111 size_t sc_size; /* size of vnd */
112 struct vnode *sc_vp; /* vnode */
113 struct ucred *sc_cred; /* credentials */
114 int sc_maxactive; /* max # of active requests */
115 struct buf sc_tab; /* transfer queue */
116 struct dkdevice sc_dkdev; /* generic disk device info */
117 };
118
119 /* sc_flags */
120 #define VNF_ALIVE 0x01
121 #define VNF_INITED 0x02
122 #define VNF_WANTED 0x40
123 #define VNF_LOCKED 0x80
124
125 struct vnd_softc *vnd_softc;
126 int numvnd = 0;
127
128 /* {b,c}devsw[] function prototypes */
129 dev_type_open(vndopen);
130 dev_type_close(vndclose);
131 dev_type_strategy(vndstrategy);
132 dev_type_ioctl(vndioctl);
133 dev_type_read(vndread);
134 dev_type_write(vndwrite);
135
136 /* called by main() at boot time */
137 void vndattach __P((int));
138
139 void vndclear __P((struct vnd_softc *));
140 void vndstart __P((struct vnd_softc *));
141 int vndsetcred __P((struct vnd_softc *, struct ucred *));
142 void vndthrottle __P((struct vnd_softc *, struct vnode *));
143
144 static int vndlock __P((struct vnd_softc *));
145 static void vndunlock __P((struct vnd_softc *));
146
147 void
148 vndattach(num)
149 int num;
150 {
151 char *mem;
152 register u_long size;
153
154 if (num <= 0)
155 return;
156 size = num * sizeof(struct vnd_softc);
157 mem = malloc(size, M_DEVBUF, M_NOWAIT);
158 if (mem == NULL) {
159 printf("WARNING: no memory for vnode disks\n");
160 return;
161 }
162 bzero(mem, size);
163 vnd_softc = (struct vnd_softc *)mem;
164 numvnd = num;
165 }
166
167 int
168 vndopen(dev, flags, mode, p)
169 dev_t dev;
170 int flags, mode;
171 struct proc *p;
172 {
173 int unit = vndunit(dev);
174 struct vnd_softc *sc;
175 int error = 0, part, pmask;
176
177 /*
178 * XXX Should support disklabels.
179 */
180
181 #ifdef DEBUG
182 if (vnddebug & VDB_FOLLOW)
183 printf("vndopen(%x, %x, %x, %x)\n", dev, flags, mode, p);
184 #endif
185 if (unit >= numvnd)
186 return (ENXIO);
187 sc = &vnd_softc[unit];
188
189 if (error = vndlock(sc))
190 return (error);
191
192 part = DISKPART(dev);
193 pmask = (1 << part);
194
195 /* Prevent our unit from being unconfigured while open. */
196 switch (mode) {
197 case S_IFCHR:
198 sc->sc_dkdev.dk_copenmask |= pmask;
199 break;
200
201 case S_IFBLK:
202 sc->sc_dkdev.dk_bopenmask |= pmask;
203 break;
204 }
205 sc->sc_dkdev.dk_openmask =
206 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
207
208 vndunlock(sc);
209 return (0);
210 }
211
212 int
213 vndclose(dev, flags, mode, p)
214 dev_t dev;
215 int flags, mode;
216 struct proc *p;
217 {
218 int unit = vndunit(dev);
219 struct vnd_softc *sc;
220 int error = 0, part;
221
222 #ifdef DEBUG
223 if (vnddebug & VDB_FOLLOW)
224 printf("vndclose(%x, %x, %x, %x)\n", dev, flags, mode, p);
225 #endif
226
227 if (unit >= numvnd)
228 return (ENXIO);
229 sc = &vnd_softc[unit];
230
231 if (error = vndlock(sc))
232 return (error);
233
234 part = DISKPART(dev);
235
236 /* ...that much closer to allowing unconfiguration... */
237 switch (mode) {
238 case S_IFCHR:
239 sc->sc_dkdev.dk_copenmask &= ~(1 << part);
240 break;
241
242 case S_IFBLK:
243 sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
244 break;
245 }
246 sc->sc_dkdev.dk_openmask =
247 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
248
249 vndunlock(sc);
250 return (0);
251 }
252
253 /*
254 * Break the request into bsize pieces and submit using VOP_BMAP/VOP_STRATEGY.
255 * Note that this driver can only be used for swapping over NFS on the hp
256 * since nfs_strategy on the vax cannot handle u-areas and page tables.
257 */
258 void
259 vndstrategy(bp)
260 register struct buf *bp;
261 {
262 int unit = vndunit(bp->b_dev);
263 register struct vnd_softc *vnd = &vnd_softc[unit];
264 register struct vndbuf *nbp;
265 register int bn, bsize, resid;
266 register caddr_t addr;
267 int sz, flags, error;
268 extern void vndiodone();
269
270 #ifdef DEBUG
271 if (vnddebug & VDB_FOLLOW)
272 printf("vndstrategy(%x): unit %d\n", bp, unit);
273 #endif
274 if ((vnd->sc_flags & VNF_INITED) == 0) {
275 bp->b_error = ENXIO;
276 bp->b_flags |= B_ERROR;
277 biodone(bp);
278 return;
279 }
280 bn = bp->b_blkno;
281 sz = howmany(bp->b_bcount, DEV_BSIZE);
282 bp->b_resid = bp->b_bcount;
283 if (bn < 0 || bn + sz > vnd->sc_size) {
284 if (bn != vnd->sc_size) {
285 bp->b_error = EINVAL;
286 bp->b_flags |= B_ERROR;
287 }
288 biodone(bp);
289 return;
290 }
291 bn = dbtob(bn);
292 bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
293 addr = bp->b_data;
294 flags = bp->b_flags | B_CALL;
295 for (resid = bp->b_resid; resid; resid -= sz) {
296 struct vnode *vp;
297 daddr_t nbn;
298 int off, s, nra;
299
300 nra = 0;
301 VOP_LOCK(vnd->sc_vp);
302 error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
303 VOP_UNLOCK(vnd->sc_vp);
304 if (error == 0 && (long)nbn == -1)
305 error = EIO;
306 #ifdef DEBUG
307 if (!dovndcluster)
308 nra = 0;
309 #endif
310
311 if (off = bn % bsize)
312 sz = bsize - off;
313 else
314 sz = (1 + nra) * bsize;
315 if (resid < sz)
316 sz = resid;
317 #ifdef DEBUG
318 if (vnddebug & VDB_IO)
319 printf("vndstrategy: vp %x/%x bn %x/%x sz %x\n",
320 vnd->sc_vp, vp, bn, nbn, sz);
321 #endif
322
323 nbp = getvndbuf();
324 nbp->vb_buf.b_flags = flags;
325 nbp->vb_buf.b_bcount = sz;
326 nbp->vb_buf.b_bufsize = bp->b_bufsize;
327 nbp->vb_buf.b_error = 0;
328 if (vp->v_type == VBLK || vp->v_type == VCHR)
329 nbp->vb_buf.b_dev = vp->v_rdev;
330 else
331 nbp->vb_buf.b_dev = NODEV;
332 nbp->vb_buf.b_data = addr;
333 nbp->vb_buf.b_blkno = nbn + btodb(off);
334 nbp->vb_buf.b_proc = bp->b_proc;
335 nbp->vb_buf.b_iodone = vndiodone;
336 nbp->vb_buf.b_vp = vp;
337 nbp->vb_buf.b_rcred = vnd->sc_cred; /* XXX crdup? */
338 nbp->vb_buf.b_wcred = vnd->sc_cred; /* XXX crdup? */
339 nbp->vb_buf.b_dirtyoff = bp->b_dirtyoff;
340 nbp->vb_buf.b_dirtyend = bp->b_dirtyend;
341 nbp->vb_buf.b_validoff = bp->b_validoff;
342 nbp->vb_buf.b_validend = bp->b_validend;
343
344 /* save a reference to the old buffer */
345 nbp->vb_obp = bp;
346
347 /*
348 * If there was an error or a hole in the file...punt.
349 * Note that we deal with this after the nbp allocation.
350 * This ensures that we properly clean up any operations
351 * that we have already fired off.
352 *
353 * XXX we could deal with holes here but it would be
354 * a hassle (in the write case).
355 */
356 if (error) {
357 nbp->vb_buf.b_error = error;
358 nbp->vb_buf.b_flags |= B_ERROR;
359 bp->b_resid -= (resid - sz);
360 biodone(&nbp->vb_buf);
361 return;
362 }
363 /*
364 * Just sort by block number
365 */
366 nbp->vb_buf.b_cylin = nbp->vb_buf.b_blkno;
367 s = splbio();
368 disksort(&vnd->sc_tab, &nbp->vb_buf);
369 if (vnd->sc_tab.b_active < vnd->sc_maxactive) {
370 vnd->sc_tab.b_active++;
371 vndstart(vnd);
372 }
373 splx(s);
374 bn += sz;
375 addr += sz;
376 }
377 }
378
379 /*
380 * Feed requests sequentially.
381 * We do it this way to keep from flooding NFS servers if we are connected
382 * to an NFS file. This places the burden on the client rather than the
383 * server.
384 */
385 void
386 vndstart(vnd)
387 register struct vnd_softc *vnd;
388 {
389 register struct buf *bp;
390
391 /*
392 * Dequeue now since lower level strategy routine might
393 * queue using same links
394 */
395 bp = vnd->sc_tab.b_actf;
396 vnd->sc_tab.b_actf = bp->b_actf;
397 #ifdef DEBUG
398 if (vnddebug & VDB_IO)
399 printf("vndstart(%d): bp %x vp %x blkno %x addr %x cnt %x\n",
400 vnd-vnd_softc, bp, bp->b_vp, bp->b_blkno, bp->b_data,
401 bp->b_bcount);
402 #endif
403 if ((bp->b_flags & B_READ) == 0)
404 bp->b_vp->v_numoutput++;
405 VOP_STRATEGY(bp);
406 }
407
408 void
409 vndiodone(vbp)
410 register struct vndbuf *vbp;
411 {
412 register struct buf *pbp = vbp->vb_obp;
413 register struct vnd_softc *vnd = &vnd_softc[vndunit(pbp->b_dev)];
414 int s;
415
416 s = splbio();
417 #ifdef DEBUG
418 if (vnddebug & VDB_IO)
419 printf("vndiodone(%d): vbp %x vp %x blkno %x addr %x cnt %x\n",
420 vnd-vnd_softc, vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno,
421 vbp->vb_buf.b_data, vbp->vb_buf.b_bcount);
422 #endif
423 if (vbp->vb_buf.b_error) {
424 #ifdef DEBUG
425 if (vnddebug & VDB_IO)
426 printf("vndiodone: vbp %x error %d\n", vbp,
427 vbp->vb_buf.b_error);
428 #endif
429 pbp->b_flags |= B_ERROR;
430 pbp->b_error = biowait(&vbp->vb_buf);
431 }
432 pbp->b_resid -= vbp->vb_buf.b_bcount;
433 putvndbuf(vbp);
434 if (pbp->b_resid == 0) {
435 #ifdef DEBUG
436 if (vnddebug & VDB_IO)
437 printf("vndiodone: pbp %x iodone\n", pbp);
438 #endif
439 biodone(pbp);
440 }
441 if (vnd->sc_tab.b_actf)
442 vndstart(vnd);
443 else
444 vnd->sc_tab.b_active--;
445 splx(s);
446 }
447
448 /* ARGSUSED */
449 int
450 vndread(dev, uio, flags)
451 dev_t dev;
452 struct uio *uio;
453 int flags;
454 {
455 int unit = vndunit(dev);
456 struct vnd_softc *sc;
457
458 #ifdef DEBUG
459 if (vnddebug & VDB_FOLLOW)
460 printf("vndread(%x, %x)\n", dev, uio);
461 #endif
462
463 if (unit >= numvnd)
464 return (ENXIO);
465 sc = &vnd_softc[unit];
466
467 if ((sc->sc_flags & VNF_INITED) == 0)
468 return (ENXIO);
469
470 return (physio(vndstrategy, NULL, dev, B_READ, minphys, uio));
471 }
472
473 /* ARGSUSED */
474 int
475 vndwrite(dev, uio, flags)
476 dev_t dev;
477 struct uio *uio;
478 int flags;
479 {
480 int unit = vndunit(dev);
481 struct vnd_softc *sc;
482
483 #ifdef DEBUG
484 if (vnddebug & VDB_FOLLOW)
485 printf("vndwrite(%x, %x)\n", dev, uio);
486 #endif
487
488 if (unit >= numvnd)
489 return (ENXIO);
490 sc = &vnd_softc[unit];
491
492 if ((sc->sc_flags & VNF_INITED) == 0)
493 return (ENXIO);
494
495 return (physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio));
496 }
497
498 /* ARGSUSED */
499 int
500 vndioctl(dev, cmd, data, flag, p)
501 dev_t dev;
502 u_long cmd;
503 caddr_t data;
504 int flag;
505 struct proc *p;
506 {
507 int unit = vndunit(dev);
508 register struct vnd_softc *vnd;
509 struct vnd_ioctl *vio;
510 struct vattr vattr;
511 struct nameidata nd;
512 int error, part, pmask, s;
513
514 #ifdef DEBUG
515 if (vnddebug & VDB_FOLLOW)
516 printf("vndioctl(%x, %lx, %x, %x, %x): unit %d\n",
517 dev, cmd, data, flag, p, unit);
518 #endif
519 error = suser(p->p_ucred, &p->p_acflag);
520 if (error)
521 return (error);
522 if (unit >= numvnd)
523 return (ENXIO);
524
525 vnd = &vnd_softc[unit];
526 vio = (struct vnd_ioctl *)data;
527 switch (cmd) {
528
529 case VNDIOCSET:
530 if (vnd->sc_flags & VNF_INITED)
531 return (EBUSY);
532
533 if (error = vndlock(vnd))
534 return (error);
535
536 /*
537 * Always open for read and write.
538 * This is probably bogus, but it lets vn_open()
539 * weed out directories, sockets, etc. so we don't
540 * have to worry about them.
541 */
542 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file, p);
543 if (error = vn_open(&nd, FREAD|FWRITE, 0)) {
544 vndunlock(vnd);
545 return(error);
546 }
547 if (error = VOP_GETATTR(nd.ni_vp, &vattr, p->p_ucred, p)) {
548 VOP_UNLOCK(nd.ni_vp);
549 (void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
550 vndunlock(vnd);
551 return(error);
552 }
553 VOP_UNLOCK(nd.ni_vp);
554 vnd->sc_vp = nd.ni_vp;
555 vnd->sc_size = btodb(vattr.va_size); /* note truncation */
556 if (error = vndsetcred(vnd, p->p_ucred)) {
557 (void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
558 vndunlock(vnd);
559 return(error);
560 }
561 vndthrottle(vnd, vnd->sc_vp);
562 vio->vnd_size = dbtob(vnd->sc_size);
563 vnd->sc_flags |= VNF_INITED;
564 #ifdef DEBUG
565 if (vnddebug & VDB_INIT)
566 printf("vndioctl: SET vp %x size %x\n",
567 vnd->sc_vp, vnd->sc_size);
568 #endif
569
570 vndunlock(vnd);
571
572 break;
573
574 case VNDIOCCLR:
575 if ((vnd->sc_flags & VNF_INITED) == 0)
576 return (ENXIO);
577
578 if (error = vndlock(vnd))
579 return (error);
580
581 /*
582 * Don't unconfigure if any other partitions are open
583 * or if both the character and block flavors of this
584 * partition are open.
585 */
586 part = DISKPART(dev);
587 pmask = (1 << part);
588 if ((vnd->sc_dkdev.dk_openmask & ~pmask) ||
589 ((vnd->sc_dkdev.dk_bopenmask & pmask) &&
590 (vnd->sc_dkdev.dk_copenmask & pmask))) {
591 vndunlock(vnd);
592 return (EBUSY);
593 }
594
595 vndclear(vnd);
596 #ifdef DEBUG
597 if (vnddebug & VDB_INIT)
598 printf("vndioctl: CLRed\n");
599 #endif
600
601 /* This must be atomic. */
602 s = splhigh();
603 vndunlock(vnd);
604 bzero(vnd, sizeof(struct vnd_softc));
605 splx(s);
606
607 break;
608
609 /*
610 * XXX Should support disklabels.
611 */
612
613 default:
614 return(ENOTTY);
615 }
616
617 return (0);
618 }
619
620 /*
621 * Duplicate the current processes' credentials. Since we are called only
622 * as the result of a SET ioctl and only root can do that, any future access
623 * to this "disk" is essentially as root. Note that credentials may change
624 * if some other uid can write directly to the mapped file (NFS).
625 */
626 int
627 vndsetcred(vnd, cred)
628 register struct vnd_softc *vnd;
629 struct ucred *cred;
630 {
631 struct uio auio;
632 struct iovec aiov;
633 char *tmpbuf;
634 int error;
635
636 vnd->sc_cred = crdup(cred);
637 tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
638
639 /* XXX: Horrible kludge to establish credentials for NFS */
640 aiov.iov_base = tmpbuf;
641 aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
642 auio.uio_iov = &aiov;
643 auio.uio_iovcnt = 1;
644 auio.uio_offset = 0;
645 auio.uio_rw = UIO_READ;
646 auio.uio_segflg = UIO_SYSSPACE;
647 auio.uio_resid = aiov.iov_len;
648 VOP_LOCK(vnd->sc_vp);
649 error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
650 VOP_UNLOCK(vnd->sc_vp);
651
652 free(tmpbuf, M_TEMP);
653 return (error);
654 }
655
656 /*
657 * Set maxactive based on FS type
658 */
659 void
660 vndthrottle(vnd, vp)
661 register struct vnd_softc *vnd;
662 struct vnode *vp;
663 {
664 #ifdef NFSCLIENT
665 extern int (**nfsv2_vnodeop_p)();
666
667 if (vp->v_op == nfsv2_vnodeop_p)
668 vnd->sc_maxactive = 2;
669 else
670 #endif
671 vnd->sc_maxactive = 8;
672
673 if (vnd->sc_maxactive < 1)
674 vnd->sc_maxactive = 1;
675 }
676
677 void
678 vndshutdown()
679 {
680 register struct vnd_softc *vnd;
681
682 for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
683 if (vnd->sc_flags & VNF_INITED)
684 vndclear(vnd);
685 }
686
687 void
688 vndclear(vnd)
689 register struct vnd_softc *vnd;
690 {
691 register struct vnode *vp = vnd->sc_vp;
692 struct proc *p = curproc; /* XXX */
693
694 #ifdef DEBUG
695 if (vnddebug & VDB_FOLLOW)
696 printf("vndclear(%x): vp %x\n", vp);
697 #endif
698 vnd->sc_flags &= ~VNF_INITED;
699 if (vp == (struct vnode *)0)
700 panic("vndioctl: null vp");
701 (void) vn_close(vp, FREAD|FWRITE, vnd->sc_cred, p);
702 crfree(vnd->sc_cred);
703 vnd->sc_vp = (struct vnode *)0;
704 vnd->sc_cred = (struct ucred *)0;
705 vnd->sc_size = 0;
706 }
707
708 int
709 vndsize(dev)
710 dev_t dev;
711 {
712 int unit = vndunit(dev);
713 register struct vnd_softc *vnd = &vnd_softc[unit];
714
715 if (unit >= numvnd || (vnd->sc_flags & VNF_INITED) == 0)
716 return(-1);
717 return(vnd->sc_size);
718 }
719
720 int
721 vnddump(dev, blkno, va, size)
722 dev_t dev;
723 daddr_t blkno;
724 caddr_t va;
725 size_t size;
726 {
727
728 /* Not implemented. */
729 return ENXIO;
730 }
731
732 /*
733 * Wait interruptibly for an exclusive lock.
734 *
735 * XXX
736 * Several drivers do this; it should be abstracted and made MP-safe.
737 */
738 static int
739 vndlock(sc)
740 struct vnd_softc *sc;
741 {
742 int error;
743
744 while ((sc->sc_flags & VNF_LOCKED) != 0) {
745 sc->sc_flags |= VNF_WANTED;
746 if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
747 return (error);
748 }
749 sc->sc_flags |= VNF_LOCKED;
750 return (0);
751 }
752
753 /*
754 * Unlock and wake up any waiters.
755 */
756 static void
757 vndunlock(sc)
758 struct vnd_softc *sc;
759 {
760
761 sc->sc_flags &= ~VNF_LOCKED;
762 if ((sc->sc_flags & VNF_WANTED) != 0) {
763 sc->sc_flags &= ~VNF_WANTED;
764 wakeup(sc);
765 }
766 }
767