vnd.c revision 1.38 1 /* $NetBSD: vnd.c,v 1.38 1997/05/26 20:28:38 pk Exp $ */
2
3 /*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: vn.c 1.13 94/04/02$
41 *
42 * @(#)vn.c 8.6 (Berkeley) 4/1/94
43 */
44
45 /*
46 * Vnode disk driver.
47 *
48 * Block/character interface to a vnode. Allows one to treat a file
49 * as a disk (e.g. build a filesystem in it, mount it, etc.).
50 *
51 * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
52 * instead of a simple VOP_RDWR. We do this to avoid distorting the
53 * local buffer cache.
54 *
55 * NOTE 2: There is a security issue involved with this driver.
56 * Once mounted all access to the contents of the "mapped" file via
57 * the special file is controlled by the permissions on the special
58 * file, the protection of the mapped file is ignored (effectively,
59 * by using root credentials in all transactions).
60 *
61 * NOTE 3: Doesn't interact with leases, should it?
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/namei.h>
67 #include <sys/proc.h>
68 #include <sys/errno.h>
69 #include <sys/buf.h>
70 #include <sys/malloc.h>
71 #include <sys/ioctl.h>
72 #include <sys/disklabel.h>
73 #include <sys/device.h>
74 #include <sys/disk.h>
75 #include <sys/stat.h>
76 #include <sys/mount.h>
77 #include <sys/vnode.h>
78 #include <sys/file.h>
79 #include <sys/uio.h>
80 #include <sys/conf.h>
81
82 #include <miscfs/specfs/specdev.h>
83
84 #include <dev/vndioctl.h>
85
86 #ifdef DEBUG
87 int dovndcluster = 1;
88 int vnddebug = 0x00;
89 #define VDB_FOLLOW 0x01
90 #define VDB_INIT 0x02
91 #define VDB_IO 0x04
92 #endif
93
94 #define b_cylin b_resid
95
96 #define vndunit(x) DISKUNIT(x)
97
98 struct vndxfer {
99 struct buf *vx_bp; /* Pointer to parent buffer */
100 int vx_error;
101 int vx_pending; /* # of pending aux buffers */
102 };
103
104 struct vndbuf {
105 struct buf vb_buf;
106 struct vndxfer *vb_xfer;
107 };
108
109 #define getvndxfer() \
110 ((struct vndxfer *)malloc(sizeof(struct vndxfer), M_DEVBUF, M_WAITOK))
111 #define putvndxfer(vnx) \
112 free((caddr_t)(vnx), M_DEVBUF)
113 #define getvndbuf() \
114 ((struct vndbuf *)malloc(sizeof(struct vndbuf), M_DEVBUF, M_WAITOK))
115 #define putvndbuf(vbp) \
116 free((caddr_t)(vbp), M_DEVBUF)
117
118 struct vnd_softc {
119 int sc_flags; /* flags */
120 size_t sc_size; /* size of vnd */
121 struct vnode *sc_vp; /* vnode */
122 struct ucred *sc_cred; /* credentials */
123 int sc_maxactive; /* max # of active requests */
124 struct buf sc_tab; /* transfer queue */
125 char sc_xname[8]; /* XXX external name */
126 struct disk sc_dkdev; /* generic disk device info */
127 };
128
129 /* sc_flags */
130 #define VNF_ALIVE 0x01
131 #define VNF_INITED 0x02
132 #define VNF_WANTED 0x40
133 #define VNF_LOCKED 0x80
134
135 struct vnd_softc *vnd_softc;
136 int numvnd = 0;
137
138 /* called by main() at boot time */
139 void vndattach __P((int));
140
141 void vndclear __P((struct vnd_softc *));
142 void vndstart __P((struct vnd_softc *));
143 int vndsetcred __P((struct vnd_softc *, struct ucred *));
144 void vndthrottle __P((struct vnd_softc *, struct vnode *));
145 void vndiodone __P((struct buf *));
146 void vndshutdown __P((void));
147
148 static int vndlock __P((struct vnd_softc *));
149 static void vndunlock __P((struct vnd_softc *));
150
151 void
152 vndattach(num)
153 int num;
154 {
155 char *mem;
156 register u_long size;
157
158 if (num <= 0)
159 return;
160 size = num * sizeof(struct vnd_softc);
161 mem = malloc(size, M_DEVBUF, M_NOWAIT);
162 if (mem == NULL) {
163 printf("WARNING: no memory for vnode disks\n");
164 return;
165 }
166 bzero(mem, size);
167 vnd_softc = (struct vnd_softc *)mem;
168 numvnd = num;
169 }
170
171 int
172 vndopen(dev, flags, mode, p)
173 dev_t dev;
174 int flags, mode;
175 struct proc *p;
176 {
177 int unit = vndunit(dev);
178 struct vnd_softc *sc;
179 int error = 0, part, pmask;
180
181 /*
182 * XXX Should support disklabels.
183 */
184
185 #ifdef DEBUG
186 if (vnddebug & VDB_FOLLOW)
187 printf("vndopen(%x, %x, %x, %p)\n", dev, flags, mode, p);
188 #endif
189 if (unit >= numvnd)
190 return (ENXIO);
191 sc = &vnd_softc[unit];
192
193 if ((error = vndlock(sc)) != 0)
194 return (error);
195
196 part = DISKPART(dev);
197 pmask = (1 << part);
198
199 /* Prevent our unit from being unconfigured while open. */
200 switch (mode) {
201 case S_IFCHR:
202 sc->sc_dkdev.dk_copenmask |= pmask;
203 break;
204
205 case S_IFBLK:
206 sc->sc_dkdev.dk_bopenmask |= pmask;
207 break;
208 }
209 sc->sc_dkdev.dk_openmask =
210 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
211
212 vndunlock(sc);
213 return (0);
214 }
215
216 int
217 vndclose(dev, flags, mode, p)
218 dev_t dev;
219 int flags, mode;
220 struct proc *p;
221 {
222 int unit = vndunit(dev);
223 struct vnd_softc *sc;
224 int error = 0, part;
225
226 #ifdef DEBUG
227 if (vnddebug & VDB_FOLLOW)
228 printf("vndclose(%x, %x, %x, %p)\n", dev, flags, mode, p);
229 #endif
230
231 if (unit >= numvnd)
232 return (ENXIO);
233 sc = &vnd_softc[unit];
234
235 if ((error = vndlock(sc)) != 0)
236 return (error);
237
238 part = DISKPART(dev);
239
240 /* ...that much closer to allowing unconfiguration... */
241 switch (mode) {
242 case S_IFCHR:
243 sc->sc_dkdev.dk_copenmask &= ~(1 << part);
244 break;
245
246 case S_IFBLK:
247 sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
248 break;
249 }
250 sc->sc_dkdev.dk_openmask =
251 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
252
253 vndunlock(sc);
254 return (0);
255 }
256
257 /*
258 * Break the request into bsize pieces and submit using VOP_BMAP/VOP_STRATEGY.
259 * Note that this driver can only be used for swapping over NFS on the hp
260 * since nfs_strategy on the vax cannot handle u-areas and page tables.
261 */
262 void
263 vndstrategy(bp)
264 register struct buf *bp;
265 {
266 int unit = vndunit(bp->b_dev);
267 struct vnd_softc *vnd = &vnd_softc[unit];
268 struct vndbuf *nbp;
269 struct vndxfer *vnx;
270 int bn, bsize, resid;
271 caddr_t addr;
272 int sz, flags, error;
273
274 #ifdef DEBUG
275 if (vnddebug & VDB_FOLLOW)
276 printf("vndstrategy(%p): unit %d\n", bp, unit);
277 #endif
278 if ((vnd->sc_flags & VNF_INITED) == 0) {
279 bp->b_error = ENXIO;
280 bp->b_flags |= B_ERROR;
281 biodone(bp);
282 return;
283 }
284 bn = bp->b_blkno;
285 sz = howmany(bp->b_bcount, DEV_BSIZE);
286 bp->b_resid = bp->b_bcount;
287 if (bn < 0 || bn + sz > vnd->sc_size) {
288 if (bn != vnd->sc_size) {
289 bp->b_error = EINVAL;
290 bp->b_flags |= B_ERROR;
291 }
292 biodone(bp);
293 return;
294 }
295 bn = dbtob(bn);
296 bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
297 addr = bp->b_data;
298 flags = bp->b_flags | B_CALL;
299
300 /* Allocate a header for this transfer and link it to the buffer */
301 vnx = getvndxfer();
302 vnx->vx_error = 0;
303 vnx->vx_pending = 0;
304 vnx->vx_bp = bp;
305
306 for (resid = bp->b_resid; resid; resid -= sz) {
307 struct vnode *vp;
308 daddr_t nbn;
309 int off, s, nra;
310
311 nra = 0;
312 VOP_LOCK(vnd->sc_vp);
313 error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
314 VOP_UNLOCK(vnd->sc_vp);
315
316 #ifdef VND_FILLHOLES
317 if (error == 0 && (long)nbn == -1) {
318 int rw = (flags & B_READ) ? UIO_READ : UIO_WRITE;
319 sz = resid;
320 error = vn_rdwr(rw, vnd->sc_vp, addr, sz,
321 bn, UIO_SYSSPACE,
322 IO_SYNC | IO_NODELOCKED,
323 vnd->sc_cred, &resid, bp->b_proc);
324 bp->b_resid -= (sz - resid);
325 }
326 #else
327 if (error == 0 && (long)nbn == -1)
328 error = EIO;
329 #endif
330
331 /*
332 * If there was an error or a hole in the file...punt.
333 * Note that we may have to wait for any operations
334 * that we have already fired off before releasing
335 * the buffer.
336 *
337 * XXX we could deal with holes here but it would be
338 * a hassle (in the write case).
339 */
340 if (error || (long)nbn == -1) {
341 vnx->vx_error = error;
342 if (vnx->vx_pending == 0) {
343 if (error) {
344 bp->b_error = error;
345 bp->b_flags |= B_ERROR;
346 }
347 putvndxfer(vnx);
348 biodone(bp);
349 }
350 return;
351 }
352
353 #ifdef DEBUG
354 if (!dovndcluster)
355 nra = 0;
356 #endif
357
358 if ((off = bn % bsize) != 0)
359 sz = bsize - off;
360 else
361 sz = (1 + nra) * bsize;
362 if (resid < sz)
363 sz = resid;
364 #ifdef DEBUG
365 if (vnddebug & VDB_IO)
366 printf("vndstrategy: vp %p/%p bn %x/%x sz %x\n",
367 vnd->sc_vp, vp, bn, nbn, sz);
368 #endif
369
370 nbp = getvndbuf();
371 nbp->vb_buf.b_flags = flags;
372 nbp->vb_buf.b_bcount = sz;
373 nbp->vb_buf.b_bufsize = bp->b_bufsize;
374 nbp->vb_buf.b_error = 0;
375 if (vp->v_type == VBLK || vp->v_type == VCHR)
376 nbp->vb_buf.b_dev = vp->v_rdev;
377 else
378 nbp->vb_buf.b_dev = NODEV;
379 nbp->vb_buf.b_data = addr;
380 nbp->vb_buf.b_blkno = nbn + btodb(off);
381 nbp->vb_buf.b_proc = bp->b_proc;
382 nbp->vb_buf.b_iodone = vndiodone;
383 nbp->vb_buf.b_vp = vp;
384 nbp->vb_buf.b_rcred = vnd->sc_cred; /* XXX crdup? */
385 nbp->vb_buf.b_wcred = vnd->sc_cred; /* XXX crdup? */
386 if (bp->b_dirtyend == 0) {
387 nbp->vb_buf.b_dirtyoff = 0;
388 nbp->vb_buf.b_dirtyend = sz;
389 } else {
390 nbp->vb_buf.b_dirtyoff =
391 max(0, bp->b_dirtyoff - (bp->b_bcount - resid));
392 nbp->vb_buf.b_dirtyend =
393 min(sz,
394 max(0, bp->b_dirtyend - (bp->b_bcount-resid)));
395 }
396 if (bp->b_validend == 0) {
397 nbp->vb_buf.b_validoff = 0;
398 nbp->vb_buf.b_validend = sz;
399 } else {
400 nbp->vb_buf.b_validoff =
401 max(0, bp->b_validoff - (bp->b_bcount - resid));
402 nbp->vb_buf.b_validend =
403 min(sz,
404 max(0, bp->b_validend - (bp->b_bcount-resid)));
405 }
406
407 nbp->vb_xfer = vnx;
408 vnx->vx_pending++;
409
410 /*
411 * Just sort by block number
412 */
413 nbp->vb_buf.b_cylin = nbp->vb_buf.b_blkno;
414 s = splbio();
415 disksort(&vnd->sc_tab, &nbp->vb_buf);
416 if (vnd->sc_tab.b_active < vnd->sc_maxactive) {
417 vnd->sc_tab.b_active++;
418 vndstart(vnd);
419 }
420 splx(s);
421 bn += sz;
422 addr += sz;
423 }
424 }
425
426 /*
427 * Feed requests sequentially.
428 * We do it this way to keep from flooding NFS servers if we are connected
429 * to an NFS file. This places the burden on the client rather than the
430 * server.
431 */
432 void
433 vndstart(vnd)
434 register struct vnd_softc *vnd;
435 {
436 register struct buf *bp;
437
438 /*
439 * Dequeue now since lower level strategy routine might
440 * queue using same links
441 */
442 bp = vnd->sc_tab.b_actf;
443 vnd->sc_tab.b_actf = bp->b_actf;
444 #ifdef DEBUG
445 if (vnddebug & VDB_IO)
446 printf("vndstart(%ld): bp %p vp %p blkno %x addr %p cnt %lx\n",
447 (long) (vnd-vnd_softc), bp, bp->b_vp, bp->b_blkno,
448 bp->b_data, bp->b_bcount);
449 #endif
450
451 /* Instrumentation. */
452 disk_busy(&vnd->sc_dkdev);
453
454 if ((bp->b_flags & B_READ) == 0)
455 bp->b_vp->v_numoutput++;
456 VOP_STRATEGY(bp);
457 }
458
459 void
460 vndiodone(bp)
461 struct buf *bp;
462 {
463 register struct vndbuf *vbp = (struct vndbuf *) bp;
464 register struct vndxfer *vnx = (struct vndxfer *)vbp->vb_xfer;
465 register struct buf *pbp = vnx->vx_bp;
466 register struct vnd_softc *vnd = &vnd_softc[vndunit(pbp->b_dev)];
467 int s, resid;
468
469 s = splbio();
470 #ifdef DEBUG
471 if (vnddebug & VDB_IO)
472 printf("vndiodone(%ld): vbp %p vp %p blkno %x addr %p cnt %lx\n",
473 (long) (vnd-vnd_softc), vbp, vbp->vb_buf.b_vp,
474 vbp->vb_buf.b_blkno, vbp->vb_buf.b_data,
475 vbp->vb_buf.b_bcount);
476 #endif
477
478 resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
479 pbp->b_resid -= resid;
480 disk_unbusy(&vnd->sc_dkdev, resid);
481 vnx->vx_pending--;
482
483 if (vbp->vb_buf.b_error) {
484 #ifdef DEBUG
485 if (vnddebug & VDB_IO)
486 printf("vndiodone: vbp %p error %d\n", vbp,
487 vbp->vb_buf.b_error);
488 #endif
489 vnx->vx_error = vbp->vb_buf.b_error;
490 }
491 putvndbuf(vbp);
492
493 /*
494 * Wrap up this transaction if it has run to completion or, in
495 * case of an error, when all auxiliary buffers have returned.
496 */
497 if (pbp->b_resid == 0 || (vnx->vx_error && vnx->vx_pending == 0)) {
498
499 if (vnx->vx_error != 0) {
500 pbp->b_flags |= B_ERROR;
501 pbp->b_error = vnx->vx_error;
502 }
503 putvndxfer(vnx);
504 #ifdef DEBUG
505 if (vnddebug & VDB_IO)
506 printf("vndiodone: pbp %p iodone\n", pbp);
507 #endif
508 biodone(pbp);
509 }
510
511 if (vnd->sc_tab.b_actf)
512 vndstart(vnd);
513 else
514 vnd->sc_tab.b_active--;
515 splx(s);
516 }
517
518 /* ARGSUSED */
519 int
520 vndread(dev, uio, flags)
521 dev_t dev;
522 struct uio *uio;
523 int flags;
524 {
525 int unit = vndunit(dev);
526 struct vnd_softc *sc;
527
528 #ifdef DEBUG
529 if (vnddebug & VDB_FOLLOW)
530 printf("vndread(%x, %p)\n", dev, uio);
531 #endif
532
533 if (unit >= numvnd)
534 return (ENXIO);
535 sc = &vnd_softc[unit];
536
537 if ((sc->sc_flags & VNF_INITED) == 0)
538 return (ENXIO);
539
540 return (physio(vndstrategy, NULL, dev, B_READ, minphys, uio));
541 }
542
543 /* ARGSUSED */
544 int
545 vndwrite(dev, uio, flags)
546 dev_t dev;
547 struct uio *uio;
548 int flags;
549 {
550 int unit = vndunit(dev);
551 struct vnd_softc *sc;
552
553 #ifdef DEBUG
554 if (vnddebug & VDB_FOLLOW)
555 printf("vndwrite(%x, %p)\n", dev, uio);
556 #endif
557
558 if (unit >= numvnd)
559 return (ENXIO);
560 sc = &vnd_softc[unit];
561
562 if ((sc->sc_flags & VNF_INITED) == 0)
563 return (ENXIO);
564
565 return (physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio));
566 }
567
568 /* ARGSUSED */
569 int
570 vndioctl(dev, cmd, data, flag, p)
571 dev_t dev;
572 u_long cmd;
573 caddr_t data;
574 int flag;
575 struct proc *p;
576 {
577 int unit = vndunit(dev);
578 register struct vnd_softc *vnd;
579 struct vnd_ioctl *vio;
580 struct vattr vattr;
581 struct nameidata nd;
582 int error, part, pmask;
583
584 #ifdef DEBUG
585 if (vnddebug & VDB_FOLLOW)
586 printf("vndioctl(%x, %lx, %p, %x, %p): unit %d\n",
587 dev, cmd, data, flag, p, unit);
588 #endif
589 error = suser(p->p_ucred, &p->p_acflag);
590 if (error)
591 return (error);
592 if (unit >= numvnd)
593 return (ENXIO);
594
595 vnd = &vnd_softc[unit];
596 vio = (struct vnd_ioctl *)data;
597 switch (cmd) {
598
599 case VNDIOCSET:
600 if (vnd->sc_flags & VNF_INITED)
601 return (EBUSY);
602
603 if ((error = vndlock(vnd)) != 0)
604 return (error);
605
606 /*
607 * Always open for read and write.
608 * This is probably bogus, but it lets vn_open()
609 * weed out directories, sockets, etc. so we don't
610 * have to worry about them.
611 */
612 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file, p);
613 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
614 vndunlock(vnd);
615 return(error);
616 }
617 error = VOP_GETATTR(nd.ni_vp, &vattr, p->p_ucred, p);
618 if (error) {
619 VOP_UNLOCK(nd.ni_vp);
620 (void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
621 vndunlock(vnd);
622 return(error);
623 }
624 VOP_UNLOCK(nd.ni_vp);
625 vnd->sc_vp = nd.ni_vp;
626 vnd->sc_size = btodb(vattr.va_size); /* note truncation */
627 if ((error = vndsetcred(vnd, p->p_ucred)) != 0) {
628 (void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
629 vndunlock(vnd);
630 return(error);
631 }
632 vndthrottle(vnd, vnd->sc_vp);
633 vio->vnd_size = dbtob(vnd->sc_size);
634 vnd->sc_flags |= VNF_INITED;
635 #ifdef DEBUG
636 if (vnddebug & VDB_INIT)
637 printf("vndioctl: SET vp %p size %lx\n",
638 vnd->sc_vp, (unsigned long) vnd->sc_size);
639 #endif
640
641 /* Attach the disk. */
642 bzero(vnd->sc_xname, sizeof(vnd->sc_xname)); /* XXX */
643 sprintf(vnd->sc_xname, "vnd%d", unit); /* XXX */
644 vnd->sc_dkdev.dk_name = vnd->sc_xname;
645 disk_attach(&vnd->sc_dkdev);
646
647 vndunlock(vnd);
648
649 break;
650
651 case VNDIOCCLR:
652 if ((vnd->sc_flags & VNF_INITED) == 0)
653 return (ENXIO);
654
655 if ((error = vndlock(vnd)) != 0)
656 return (error);
657
658 /*
659 * Don't unconfigure if any other partitions are open
660 * or if both the character and block flavors of this
661 * partition are open.
662 */
663 part = DISKPART(dev);
664 pmask = (1 << part);
665 if ((vnd->sc_dkdev.dk_openmask & ~pmask) ||
666 ((vnd->sc_dkdev.dk_bopenmask & pmask) &&
667 (vnd->sc_dkdev.dk_copenmask & pmask))) {
668 vndunlock(vnd);
669 return (EBUSY);
670 }
671
672 vndclear(vnd);
673 #ifdef DEBUG
674 if (vnddebug & VDB_INIT)
675 printf("vndioctl: CLRed\n");
676 #endif
677
678 /* Detatch the disk. */
679 disk_detach(&vnd->sc_dkdev);
680
681 vndunlock(vnd);
682
683 break;
684
685 /*
686 * XXX Should support disklabels.
687 */
688
689 default:
690 return(ENOTTY);
691 }
692
693 return (0);
694 }
695
696 /*
697 * Duplicate the current processes' credentials. Since we are called only
698 * as the result of a SET ioctl and only root can do that, any future access
699 * to this "disk" is essentially as root. Note that credentials may change
700 * if some other uid can write directly to the mapped file (NFS).
701 */
702 int
703 vndsetcred(vnd, cred)
704 register struct vnd_softc *vnd;
705 struct ucred *cred;
706 {
707 struct uio auio;
708 struct iovec aiov;
709 char *tmpbuf;
710 int error;
711
712 vnd->sc_cred = crdup(cred);
713 tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
714
715 /* XXX: Horrible kludge to establish credentials for NFS */
716 aiov.iov_base = tmpbuf;
717 aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
718 auio.uio_iov = &aiov;
719 auio.uio_iovcnt = 1;
720 auio.uio_offset = 0;
721 auio.uio_rw = UIO_READ;
722 auio.uio_segflg = UIO_SYSSPACE;
723 auio.uio_resid = aiov.iov_len;
724 VOP_LOCK(vnd->sc_vp);
725 error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
726 VOP_UNLOCK(vnd->sc_vp);
727
728 free(tmpbuf, M_TEMP);
729 return (error);
730 }
731
732 /*
733 * Set maxactive based on FS type
734 */
735 void
736 vndthrottle(vnd, vp)
737 register struct vnd_softc *vnd;
738 struct vnode *vp;
739 {
740 #ifdef NFS
741 extern int (**nfsv2_vnodeop_p) __P((void *));
742
743 if (vp->v_op == nfsv2_vnodeop_p)
744 vnd->sc_maxactive = 2;
745 else
746 #endif
747 vnd->sc_maxactive = 8;
748
749 if (vnd->sc_maxactive < 1)
750 vnd->sc_maxactive = 1;
751 }
752
753 void
754 vndshutdown()
755 {
756 register struct vnd_softc *vnd;
757
758 for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
759 if (vnd->sc_flags & VNF_INITED)
760 vndclear(vnd);
761 }
762
763 void
764 vndclear(vnd)
765 register struct vnd_softc *vnd;
766 {
767 register struct vnode *vp = vnd->sc_vp;
768 struct proc *p = curproc; /* XXX */
769
770 #ifdef DEBUG
771 if (vnddebug & VDB_FOLLOW)
772 printf("vndclear(%p): vp %p\n", vnd, vp);
773 #endif
774 vnd->sc_flags &= ~VNF_INITED;
775 if (vp == (struct vnode *)0)
776 panic("vndioctl: null vp");
777 (void) vn_close(vp, FREAD|FWRITE, vnd->sc_cred, p);
778 crfree(vnd->sc_cred);
779 vnd->sc_vp = (struct vnode *)0;
780 vnd->sc_cred = (struct ucred *)0;
781 vnd->sc_size = 0;
782 }
783
784 int
785 vndsize(dev)
786 dev_t dev;
787 {
788 int unit = vndunit(dev);
789 register struct vnd_softc *vnd = &vnd_softc[unit];
790
791 if (unit >= numvnd || (vnd->sc_flags & VNF_INITED) == 0)
792 return(-1);
793 return(vnd->sc_size);
794 }
795
796 int
797 vnddump(dev, blkno, va, size)
798 dev_t dev;
799 daddr_t blkno;
800 caddr_t va;
801 size_t size;
802 {
803
804 /* Not implemented. */
805 return ENXIO;
806 }
807
808 /*
809 * Wait interruptibly for an exclusive lock.
810 *
811 * XXX
812 * Several drivers do this; it should be abstracted and made MP-safe.
813 */
814 static int
815 vndlock(sc)
816 struct vnd_softc *sc;
817 {
818 int error;
819
820 while ((sc->sc_flags & VNF_LOCKED) != 0) {
821 sc->sc_flags |= VNF_WANTED;
822 if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
823 return (error);
824 }
825 sc->sc_flags |= VNF_LOCKED;
826 return (0);
827 }
828
829 /*
830 * Unlock and wake up any waiters.
831 */
832 static void
833 vndunlock(sc)
834 struct vnd_softc *sc;
835 {
836
837 sc->sc_flags &= ~VNF_LOCKED;
838 if ((sc->sc_flags & VNF_WANTED) != 0) {
839 sc->sc_flags &= ~VNF_WANTED;
840 wakeup(sc);
841 }
842 }
843