vnd.c revision 1.39 1 /* $NetBSD: vnd.c,v 1.39 1997/06/08 15:55:34 pk Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: vn.c 1.13 94/04/02$
41 *
42 * @(#)vn.c 8.6 (Berkeley) 4/1/94
43 */
44
45 /*
46 * Vnode disk driver.
47 *
48 * Block/character interface to a vnode. Allows one to treat a file
49 * as a disk (e.g. build a filesystem in it, mount it, etc.).
50 *
51 * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
52 * instead of a simple VOP_RDWR. We do this to avoid distorting the
53 * local buffer cache.
54 *
55 * NOTE 2: There is a security issue involved with this driver.
56 * Once mounted all access to the contents of the "mapped" file via
57 * the special file is controlled by the permissions on the special
58 * file, the protection of the mapped file is ignored (effectively,
59 * by using root credentials in all transactions).
60 *
61 * NOTE 3: Doesn't interact with leases, should it?
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/namei.h>
67 #include <sys/proc.h>
68 #include <sys/errno.h>
69 #include <sys/buf.h>
70 #include <sys/malloc.h>
71 #include <sys/ioctl.h>
72 #include <sys/disklabel.h>
73 #include <sys/device.h>
74 #include <sys/disk.h>
75 #include <sys/stat.h>
76 #include <sys/mount.h>
77 #include <sys/vnode.h>
78 #include <sys/file.h>
79 #include <sys/uio.h>
80 #include <sys/conf.h>
81
82 #include <miscfs/specfs/specdev.h>
83
84 #include <dev/vndioctl.h>
85
86 #ifdef DEBUG
87 int dovndcluster = 1;
88 int vnddebug = 0x00;
89 #define VDB_FOLLOW 0x01
90 #define VDB_INIT 0x02
91 #define VDB_IO 0x04
92 #endif
93
94 #define b_cylin b_resid
95
96 #define vndunit(x) DISKUNIT(x)
97
98 struct vndxfer {
99 struct buf *vx_bp; /* Pointer to parent buffer */
100 int vx_error;
101 int vx_pending; /* # of pending aux buffers */
102 };
103
104 struct vndbuf {
105 struct buf vb_buf;
106 struct vndxfer *vb_xfer;
107 };
108
109 #define getvndxfer() \
110 ((struct vndxfer *)malloc(sizeof(struct vndxfer), M_DEVBUF, M_WAITOK))
111 #define putvndxfer(vnx) \
112 free((caddr_t)(vnx), M_DEVBUF)
113 #define getvndbuf() \
114 ((struct vndbuf *)malloc(sizeof(struct vndbuf), M_DEVBUF, M_WAITOK))
115 #define putvndbuf(vbp) \
116 free((caddr_t)(vbp), M_DEVBUF)
117
118 struct vnd_softc {
119 int sc_flags; /* flags */
120 size_t sc_size; /* size of vnd */
121 struct vnode *sc_vp; /* vnode */
122 struct ucred *sc_cred; /* credentials */
123 int sc_maxactive; /* max # of active requests */
124 struct buf sc_tab; /* transfer queue */
125 char sc_xname[8]; /* XXX external name */
126 struct disk sc_dkdev; /* generic disk device info */
127 };
128
129 /* sc_flags */
130 #define VNF_ALIVE 0x01
131 #define VNF_INITED 0x02
132 #define VNF_WANTED 0x40
133 #define VNF_LOCKED 0x80
134
135 struct vnd_softc *vnd_softc;
136 int numvnd = 0;
137
138 /* called by main() at boot time */
139 void vndattach __P((int));
140
141 void vndclear __P((struct vnd_softc *));
142 void vndstart __P((struct vnd_softc *));
143 int vndsetcred __P((struct vnd_softc *, struct ucred *));
144 void vndthrottle __P((struct vnd_softc *, struct vnode *));
145 void vndiodone __P((struct buf *));
146 void vndshutdown __P((void));
147
148 static int vndlock __P((struct vnd_softc *));
149 static void vndunlock __P((struct vnd_softc *));
150
151 void
152 vndattach(num)
153 int num;
154 {
155 char *mem;
156 register u_long size;
157
158 if (num <= 0)
159 return;
160 size = num * sizeof(struct vnd_softc);
161 mem = malloc(size, M_DEVBUF, M_NOWAIT);
162 if (mem == NULL) {
163 printf("WARNING: no memory for vnode disks\n");
164 return;
165 }
166 bzero(mem, size);
167 vnd_softc = (struct vnd_softc *)mem;
168 numvnd = num;
169 }
170
171 int
172 vndopen(dev, flags, mode, p)
173 dev_t dev;
174 int flags, mode;
175 struct proc *p;
176 {
177 int unit = vndunit(dev);
178 struct vnd_softc *sc;
179 int error = 0, part, pmask;
180
181 /*
182 * XXX Should support disklabels.
183 */
184
185 #ifdef DEBUG
186 if (vnddebug & VDB_FOLLOW)
187 printf("vndopen(%x, %x, %x, %p)\n", dev, flags, mode, p);
188 #endif
189 if (unit >= numvnd)
190 return (ENXIO);
191 sc = &vnd_softc[unit];
192
193 if ((error = vndlock(sc)) != 0)
194 return (error);
195
196 part = DISKPART(dev);
197 pmask = (1 << part);
198
199 /* Prevent our unit from being unconfigured while open. */
200 switch (mode) {
201 case S_IFCHR:
202 sc->sc_dkdev.dk_copenmask |= pmask;
203 break;
204
205 case S_IFBLK:
206 sc->sc_dkdev.dk_bopenmask |= pmask;
207 break;
208 }
209 sc->sc_dkdev.dk_openmask =
210 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
211
212 vndunlock(sc);
213 return (0);
214 }
215
216 int
217 vndclose(dev, flags, mode, p)
218 dev_t dev;
219 int flags, mode;
220 struct proc *p;
221 {
222 int unit = vndunit(dev);
223 struct vnd_softc *sc;
224 int error = 0, part;
225
226 #ifdef DEBUG
227 if (vnddebug & VDB_FOLLOW)
228 printf("vndclose(%x, %x, %x, %p)\n", dev, flags, mode, p);
229 #endif
230
231 if (unit >= numvnd)
232 return (ENXIO);
233 sc = &vnd_softc[unit];
234
235 if ((error = vndlock(sc)) != 0)
236 return (error);
237
238 part = DISKPART(dev);
239
240 /* ...that much closer to allowing unconfiguration... */
241 switch (mode) {
242 case S_IFCHR:
243 sc->sc_dkdev.dk_copenmask &= ~(1 << part);
244 break;
245
246 case S_IFBLK:
247 sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
248 break;
249 }
250 sc->sc_dkdev.dk_openmask =
251 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
252
253 vndunlock(sc);
254 return (0);
255 }
256
257 /*
258 * Break the request into bsize pieces and submit using VOP_BMAP/VOP_STRATEGY.
259 * Note that this driver can only be used for swapping over NFS on the hp
260 * since nfs_strategy on the vax cannot handle u-areas and page tables.
261 */
262 void
263 vndstrategy(bp)
264 register struct buf *bp;
265 {
266 int unit = vndunit(bp->b_dev);
267 struct vnd_softc *vnd = &vnd_softc[unit];
268 struct vndbuf *nbp;
269 struct vndxfer *vnx;
270 int bn, bsize, resid;
271 caddr_t addr;
272 int sz, flags, error;
273
274 #ifdef DEBUG
275 if (vnddebug & VDB_FOLLOW)
276 printf("vndstrategy(%p): unit %d\n", bp, unit);
277 #endif
278 if ((vnd->sc_flags & VNF_INITED) == 0) {
279 bp->b_error = ENXIO;
280 bp->b_flags |= B_ERROR;
281 biodone(bp);
282 return;
283 }
284 bn = bp->b_blkno;
285 sz = howmany(bp->b_bcount, DEV_BSIZE);
286 bp->b_resid = bp->b_bcount;
287 if (bn < 0 || bn + sz > vnd->sc_size) {
288 if (bn != vnd->sc_size) {
289 bp->b_error = EINVAL;
290 bp->b_flags |= B_ERROR;
291 }
292 biodone(bp);
293 return;
294 }
295 bn = dbtob(bn);
296 bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
297 addr = bp->b_data;
298 flags = bp->b_flags | B_CALL;
299
300 /* Allocate a header for this transfer and link it to the buffer */
301 vnx = getvndxfer();
302 vnx->vx_error = 0;
303 vnx->vx_pending = 0;
304 vnx->vx_bp = bp;
305
306 for (resid = bp->b_resid; resid; resid -= sz) {
307 struct vnode *vp;
308 daddr_t nbn;
309 int off, s, nra;
310
311 nra = 0;
312 VOP_LOCK(vnd->sc_vp);
313 error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
314 VOP_UNLOCK(vnd->sc_vp);
315
316 #define VND_FILLHOLES
317 #ifdef VND_FILLHOLES
318 if (error == 0 && (long)nbn == -1) {
319 int rw = (flags & B_READ) ? UIO_READ : UIO_WRITE;
320 sz = resid;
321 error = vn_rdwr(rw, vnd->sc_vp, addr, sz,
322 bn, UIO_SYSSPACE,
323 IO_SYNC | IO_NODELOCKED,
324 vnd->sc_cred, &resid, bp->b_proc);
325 s = splbio();
326 bp->b_resid -= (sz - resid);
327 splx(s);
328 }
329 #else
330 if (error == 0 && (long)nbn == -1)
331 error = EIO;
332 #endif
333
334 /*
335 * If there was an error or a hole in the file...punt.
336 * Note that we may have to wait for any operations
337 * that we have already fired off before releasing
338 * the buffer.
339 *
340 * XXX we could deal with holes here but it would be
341 * a hassle (in the write case).
342 */
343 if (error || (long)nbn == -1) {
344 vnx->vx_error = error;
345 s = splbio();
346 if (vnx->vx_pending == 0) {
347 if (error) {
348 bp->b_error = error;
349 bp->b_flags |= B_ERROR;
350 }
351 putvndxfer(vnx);
352 biodone(bp);
353 }
354 splx(s);
355 return;
356 }
357
358 #ifdef DEBUG
359 if (!dovndcluster)
360 nra = 0;
361 #endif
362
363 if ((off = bn % bsize) != 0)
364 sz = bsize - off;
365 else
366 sz = (1 + nra) * bsize;
367 if (resid < sz)
368 sz = resid;
369 #ifdef DEBUG
370 if (vnddebug & VDB_IO)
371 printf("vndstrategy: vp %p/%p bn %x/%x sz %x\n",
372 vnd->sc_vp, vp, bn, nbn, sz);
373 #endif
374
375 nbp = getvndbuf();
376 nbp->vb_buf.b_flags = flags;
377 nbp->vb_buf.b_bcount = sz;
378 nbp->vb_buf.b_bufsize = bp->b_bufsize;
379 nbp->vb_buf.b_error = 0;
380 if (vp->v_type == VBLK || vp->v_type == VCHR)
381 nbp->vb_buf.b_dev = vp->v_rdev;
382 else
383 nbp->vb_buf.b_dev = NODEV;
384 nbp->vb_buf.b_data = addr;
385 nbp->vb_buf.b_blkno = nbn + btodb(off);
386 nbp->vb_buf.b_proc = bp->b_proc;
387 nbp->vb_buf.b_iodone = vndiodone;
388 nbp->vb_buf.b_vp = vp;
389 nbp->vb_buf.b_rcred = vnd->sc_cred; /* XXX crdup? */
390 nbp->vb_buf.b_wcred = vnd->sc_cred; /* XXX crdup? */
391 if (bp->b_dirtyend == 0) {
392 nbp->vb_buf.b_dirtyoff = 0;
393 nbp->vb_buf.b_dirtyend = sz;
394 } else {
395 nbp->vb_buf.b_dirtyoff =
396 max(0, bp->b_dirtyoff - (bp->b_bcount - resid));
397 nbp->vb_buf.b_dirtyend =
398 min(sz,
399 max(0, bp->b_dirtyend - (bp->b_bcount-resid)));
400 }
401 if (bp->b_validend == 0) {
402 nbp->vb_buf.b_validoff = 0;
403 nbp->vb_buf.b_validend = sz;
404 } else {
405 nbp->vb_buf.b_validoff =
406 max(0, bp->b_validoff - (bp->b_bcount - resid));
407 nbp->vb_buf.b_validend =
408 min(sz,
409 max(0, bp->b_validend - (bp->b_bcount-resid)));
410 }
411
412 nbp->vb_xfer = vnx;
413
414 /*
415 * Just sort by block number
416 */
417 nbp->vb_buf.b_cylin = nbp->vb_buf.b_blkno;
418 s = splbio();
419 vnx->vx_pending++;
420 disksort(&vnd->sc_tab, &nbp->vb_buf);
421 if (vnd->sc_tab.b_active < vnd->sc_maxactive) {
422 vnd->sc_tab.b_active++;
423 vndstart(vnd);
424 }
425 splx(s);
426 bn += sz;
427 addr += sz;
428 }
429 }
430
431 /*
432 * Feed requests sequentially.
433 * We do it this way to keep from flooding NFS servers if we are connected
434 * to an NFS file. This places the burden on the client rather than the
435 * server.
436 */
437 void
438 vndstart(vnd)
439 register struct vnd_softc *vnd;
440 {
441 register struct buf *bp;
442
443 /*
444 * Dequeue now since lower level strategy routine might
445 * queue using same links
446 */
447 bp = vnd->sc_tab.b_actf;
448 vnd->sc_tab.b_actf = bp->b_actf;
449 #ifdef DEBUG
450 if (vnddebug & VDB_IO)
451 printf("vndstart(%ld): bp %p vp %p blkno %x addr %p cnt %lx\n",
452 (long) (vnd-vnd_softc), bp, bp->b_vp, bp->b_blkno,
453 bp->b_data, bp->b_bcount);
454 #endif
455
456 /* Instrumentation. */
457 disk_busy(&vnd->sc_dkdev);
458
459 if ((bp->b_flags & B_READ) == 0)
460 bp->b_vp->v_numoutput++;
461 VOP_STRATEGY(bp);
462 }
463
464 void
465 vndiodone(bp)
466 struct buf *bp;
467 {
468 register struct vndbuf *vbp = (struct vndbuf *) bp;
469 register struct vndxfer *vnx = (struct vndxfer *)vbp->vb_xfer;
470 register struct buf *pbp = vnx->vx_bp;
471 register struct vnd_softc *vnd = &vnd_softc[vndunit(pbp->b_dev)];
472 int s, resid;
473
474 s = splbio();
475 #ifdef DEBUG
476 if (vnddebug & VDB_IO)
477 printf("vndiodone(%ld): vbp %p vp %p blkno %x addr %p cnt %lx\n",
478 (long) (vnd-vnd_softc), vbp, vbp->vb_buf.b_vp,
479 vbp->vb_buf.b_blkno, vbp->vb_buf.b_data,
480 vbp->vb_buf.b_bcount);
481 #endif
482
483 resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
484 pbp->b_resid -= resid;
485 disk_unbusy(&vnd->sc_dkdev, resid);
486 vnx->vx_pending--;
487
488 if (vbp->vb_buf.b_error) {
489 #ifdef DEBUG
490 if (vnddebug & VDB_IO)
491 printf("vndiodone: vbp %p error %d\n", vbp,
492 vbp->vb_buf.b_error);
493 #endif
494 vnx->vx_error = vbp->vb_buf.b_error;
495 }
496 putvndbuf(vbp);
497
498 /*
499 * Wrap up this transaction if it has run to completion or, in
500 * case of an error, when all auxiliary buffers have returned.
501 */
502 if (pbp->b_resid == 0 || (vnx->vx_error && vnx->vx_pending == 0)) {
503
504 if (vnx->vx_error != 0) {
505 pbp->b_flags |= B_ERROR;
506 pbp->b_error = vnx->vx_error;
507 }
508 putvndxfer(vnx);
509 #ifdef DEBUG
510 if (vnddebug & VDB_IO)
511 printf("vndiodone: pbp %p iodone\n", pbp);
512 #endif
513 biodone(pbp);
514 }
515
516 if (vnd->sc_tab.b_actf)
517 vndstart(vnd);
518 else
519 vnd->sc_tab.b_active--;
520 splx(s);
521 }
522
523 /* ARGSUSED */
524 int
525 vndread(dev, uio, flags)
526 dev_t dev;
527 struct uio *uio;
528 int flags;
529 {
530 int unit = vndunit(dev);
531 struct vnd_softc *sc;
532
533 #ifdef DEBUG
534 if (vnddebug & VDB_FOLLOW)
535 printf("vndread(%x, %p)\n", dev, uio);
536 #endif
537
538 if (unit >= numvnd)
539 return (ENXIO);
540 sc = &vnd_softc[unit];
541
542 if ((sc->sc_flags & VNF_INITED) == 0)
543 return (ENXIO);
544
545 return (physio(vndstrategy, NULL, dev, B_READ, minphys, uio));
546 }
547
548 /* ARGSUSED */
549 int
550 vndwrite(dev, uio, flags)
551 dev_t dev;
552 struct uio *uio;
553 int flags;
554 {
555 int unit = vndunit(dev);
556 struct vnd_softc *sc;
557
558 #ifdef DEBUG
559 if (vnddebug & VDB_FOLLOW)
560 printf("vndwrite(%x, %p)\n", dev, uio);
561 #endif
562
563 if (unit >= numvnd)
564 return (ENXIO);
565 sc = &vnd_softc[unit];
566
567 if ((sc->sc_flags & VNF_INITED) == 0)
568 return (ENXIO);
569
570 return (physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio));
571 }
572
573 /* ARGSUSED */
574 int
575 vndioctl(dev, cmd, data, flag, p)
576 dev_t dev;
577 u_long cmd;
578 caddr_t data;
579 int flag;
580 struct proc *p;
581 {
582 int unit = vndunit(dev);
583 register struct vnd_softc *vnd;
584 struct vnd_ioctl *vio;
585 struct vattr vattr;
586 struct nameidata nd;
587 int error, part, pmask;
588
589 #ifdef DEBUG
590 if (vnddebug & VDB_FOLLOW)
591 printf("vndioctl(%x, %lx, %p, %x, %p): unit %d\n",
592 dev, cmd, data, flag, p, unit);
593 #endif
594 error = suser(p->p_ucred, &p->p_acflag);
595 if (error)
596 return (error);
597 if (unit >= numvnd)
598 return (ENXIO);
599
600 vnd = &vnd_softc[unit];
601 vio = (struct vnd_ioctl *)data;
602 switch (cmd) {
603
604 case VNDIOCSET:
605 if (vnd->sc_flags & VNF_INITED)
606 return (EBUSY);
607
608 if ((error = vndlock(vnd)) != 0)
609 return (error);
610
611 /*
612 * Always open for read and write.
613 * This is probably bogus, but it lets vn_open()
614 * weed out directories, sockets, etc. so we don't
615 * have to worry about them.
616 */
617 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file, p);
618 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
619 vndunlock(vnd);
620 return(error);
621 }
622 error = VOP_GETATTR(nd.ni_vp, &vattr, p->p_ucred, p);
623 if (error) {
624 VOP_UNLOCK(nd.ni_vp);
625 (void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
626 vndunlock(vnd);
627 return(error);
628 }
629 VOP_UNLOCK(nd.ni_vp);
630 vnd->sc_vp = nd.ni_vp;
631 vnd->sc_size = btodb(vattr.va_size); /* note truncation */
632 if ((error = vndsetcred(vnd, p->p_ucred)) != 0) {
633 (void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
634 vndunlock(vnd);
635 return(error);
636 }
637 vndthrottle(vnd, vnd->sc_vp);
638 vio->vnd_size = dbtob(vnd->sc_size);
639 vnd->sc_flags |= VNF_INITED;
640 #ifdef DEBUG
641 if (vnddebug & VDB_INIT)
642 printf("vndioctl: SET vp %p size %lx\n",
643 vnd->sc_vp, (unsigned long) vnd->sc_size);
644 #endif
645
646 /* Attach the disk. */
647 bzero(vnd->sc_xname, sizeof(vnd->sc_xname)); /* XXX */
648 sprintf(vnd->sc_xname, "vnd%d", unit); /* XXX */
649 vnd->sc_dkdev.dk_name = vnd->sc_xname;
650 disk_attach(&vnd->sc_dkdev);
651
652 vndunlock(vnd);
653
654 break;
655
656 case VNDIOCCLR:
657 if ((vnd->sc_flags & VNF_INITED) == 0)
658 return (ENXIO);
659
660 if ((error = vndlock(vnd)) != 0)
661 return (error);
662
663 /*
664 * Don't unconfigure if any other partitions are open
665 * or if both the character and block flavors of this
666 * partition are open.
667 */
668 part = DISKPART(dev);
669 pmask = (1 << part);
670 if ((vnd->sc_dkdev.dk_openmask & ~pmask) ||
671 ((vnd->sc_dkdev.dk_bopenmask & pmask) &&
672 (vnd->sc_dkdev.dk_copenmask & pmask))) {
673 vndunlock(vnd);
674 return (EBUSY);
675 }
676
677 vndclear(vnd);
678 #ifdef DEBUG
679 if (vnddebug & VDB_INIT)
680 printf("vndioctl: CLRed\n");
681 #endif
682
683 /* Detatch the disk. */
684 disk_detach(&vnd->sc_dkdev);
685
686 vndunlock(vnd);
687
688 break;
689
690 /*
691 * XXX Should support disklabels.
692 */
693
694 default:
695 return(ENOTTY);
696 }
697
698 return (0);
699 }
700
701 /*
702 * Duplicate the current processes' credentials. Since we are called only
703 * as the result of a SET ioctl and only root can do that, any future access
704 * to this "disk" is essentially as root. Note that credentials may change
705 * if some other uid can write directly to the mapped file (NFS).
706 */
707 int
708 vndsetcred(vnd, cred)
709 register struct vnd_softc *vnd;
710 struct ucred *cred;
711 {
712 struct uio auio;
713 struct iovec aiov;
714 char *tmpbuf;
715 int error;
716
717 vnd->sc_cred = crdup(cred);
718 tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
719
720 /* XXX: Horrible kludge to establish credentials for NFS */
721 aiov.iov_base = tmpbuf;
722 aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
723 auio.uio_iov = &aiov;
724 auio.uio_iovcnt = 1;
725 auio.uio_offset = 0;
726 auio.uio_rw = UIO_READ;
727 auio.uio_segflg = UIO_SYSSPACE;
728 auio.uio_resid = aiov.iov_len;
729 VOP_LOCK(vnd->sc_vp);
730 error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
731 VOP_UNLOCK(vnd->sc_vp);
732
733 free(tmpbuf, M_TEMP);
734 return (error);
735 }
736
737 /*
738 * Set maxactive based on FS type
739 */
740 void
741 vndthrottle(vnd, vp)
742 register struct vnd_softc *vnd;
743 struct vnode *vp;
744 {
745 #ifdef NFS
746 extern int (**nfsv2_vnodeop_p) __P((void *));
747
748 if (vp->v_op == nfsv2_vnodeop_p)
749 vnd->sc_maxactive = 2;
750 else
751 #endif
752 vnd->sc_maxactive = 8;
753
754 if (vnd->sc_maxactive < 1)
755 vnd->sc_maxactive = 1;
756 }
757
758 void
759 vndshutdown()
760 {
761 register struct vnd_softc *vnd;
762
763 for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
764 if (vnd->sc_flags & VNF_INITED)
765 vndclear(vnd);
766 }
767
768 void
769 vndclear(vnd)
770 register struct vnd_softc *vnd;
771 {
772 register struct vnode *vp = vnd->sc_vp;
773 struct proc *p = curproc; /* XXX */
774
775 #ifdef DEBUG
776 if (vnddebug & VDB_FOLLOW)
777 printf("vndclear(%p): vp %p\n", vnd, vp);
778 #endif
779 vnd->sc_flags &= ~VNF_INITED;
780 if (vp == (struct vnode *)0)
781 panic("vndioctl: null vp");
782 (void) vn_close(vp, FREAD|FWRITE, vnd->sc_cred, p);
783 crfree(vnd->sc_cred);
784 vnd->sc_vp = (struct vnode *)0;
785 vnd->sc_cred = (struct ucred *)0;
786 vnd->sc_size = 0;
787 }
788
789 int
790 vndsize(dev)
791 dev_t dev;
792 {
793 int unit = vndunit(dev);
794 register struct vnd_softc *vnd = &vnd_softc[unit];
795
796 if (unit >= numvnd || (vnd->sc_flags & VNF_INITED) == 0)
797 return(-1);
798 return(vnd->sc_size);
799 }
800
801 int
802 vnddump(dev, blkno, va, size)
803 dev_t dev;
804 daddr_t blkno;
805 caddr_t va;
806 size_t size;
807 {
808
809 /* Not implemented. */
810 return ENXIO;
811 }
812
813 /*
814 * Wait interruptibly for an exclusive lock.
815 *
816 * XXX
817 * Several drivers do this; it should be abstracted and made MP-safe.
818 */
819 static int
820 vndlock(sc)
821 struct vnd_softc *sc;
822 {
823 int error;
824
825 while ((sc->sc_flags & VNF_LOCKED) != 0) {
826 sc->sc_flags |= VNF_WANTED;
827 if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
828 return (error);
829 }
830 sc->sc_flags |= VNF_LOCKED;
831 return (0);
832 }
833
834 /*
835 * Unlock and wake up any waiters.
836 */
837 static void
838 vndunlock(sc)
839 struct vnd_softc *sc;
840 {
841
842 sc->sc_flags &= ~VNF_LOCKED;
843 if ((sc->sc_flags & VNF_WANTED) != 0) {
844 sc->sc_flags &= ~VNF_WANTED;
845 wakeup(sc);
846 }
847 }
848