vnd.c revision 1.131 1 /* $NetBSD: vnd.c,v 1.131 2006/02/01 03:15:33 cube Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This code is derived from software contributed to Berkeley by
44 * the Systems Programming Group of the University of Utah Computer
45 * Science Department.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution.
55 * 3. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * from: Utah $Hdr: vn.c 1.13 94/04/02$
72 *
73 * @(#)vn.c 8.9 (Berkeley) 5/14/95
74 */
75
76 /*
77 * Copyright (c) 1988 University of Utah.
78 *
79 * This code is derived from software contributed to Berkeley by
80 * the Systems Programming Group of the University of Utah Computer
81 * Science Department.
82 *
83 * Redistribution and use in source and binary forms, with or without
84 * modification, are permitted provided that the following conditions
85 * are met:
86 * 1. Redistributions of source code must retain the above copyright
87 * notice, this list of conditions and the following disclaimer.
88 * 2. Redistributions in binary form must reproduce the above copyright
89 * notice, this list of conditions and the following disclaimer in the
90 * documentation and/or other materials provided with the distribution.
91 * 3. All advertising materials mentioning features or use of this software
92 * must display the following acknowledgement:
93 * This product includes software developed by the University of
94 * California, Berkeley and its contributors.
95 * 4. Neither the name of the University nor the names of its contributors
96 * may be used to endorse or promote products derived from this software
97 * without specific prior written permission.
98 *
99 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
100 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
101 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
102 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
103 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
104 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
105 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
106 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
107 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
108 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
109 * SUCH DAMAGE.
110 *
111 * from: Utah $Hdr: vn.c 1.13 94/04/02$
112 *
113 * @(#)vn.c 8.9 (Berkeley) 5/14/95
114 */
115
116 /*
117 * Vnode disk driver.
118 *
119 * Block/character interface to a vnode. Allows one to treat a file
120 * as a disk (e.g. build a filesystem in it, mount it, etc.).
121 *
122 * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
123 * instead of a simple VOP_RDWR. We do this to avoid distorting the
124 * local buffer cache.
125 *
126 * NOTE 2: There is a security issue involved with this driver.
127 * Once mounted all access to the contents of the "mapped" file via
128 * the special file is controlled by the permissions on the special
129 * file, the protection of the mapped file is ignored (effectively,
130 * by using root credentials in all transactions).
131 *
132 * NOTE 3: Doesn't interact with leases, should it?
133 */
134
135 #include <sys/cdefs.h>
136 __KERNEL_RCSID(0, "$NetBSD: vnd.c,v 1.131 2006/02/01 03:15:33 cube Exp $");
137
138 #if defined(_KERNEL_OPT)
139 #include "fs_nfs.h"
140 #include "opt_vnd.h"
141 #endif
142
143 #include <sys/param.h>
144 #include <sys/systm.h>
145 #include <sys/namei.h>
146 #include <sys/proc.h>
147 #include <sys/kthread.h>
148 #include <sys/errno.h>
149 #include <sys/buf.h>
150 #include <sys/bufq.h>
151 #include <sys/malloc.h>
152 #include <sys/ioctl.h>
153 #include <sys/disklabel.h>
154 #include <sys/device.h>
155 #include <sys/disk.h>
156 #include <sys/stat.h>
157 #include <sys/mount.h>
158 #include <sys/vnode.h>
159 #include <sys/file.h>
160 #include <sys/uio.h>
161 #include <sys/conf.h>
162 #include <net/zlib.h>
163
164 #include <miscfs/specfs/specdev.h>
165
166 #include <dev/vndvar.h>
167
168 #if defined(VNDDEBUG) && !defined(DEBUG)
169 #define DEBUG
170 #endif
171
172 #ifdef DEBUG
173 int dovndcluster = 1;
174 #define VDB_FOLLOW 0x01
175 #define VDB_INIT 0x02
176 #define VDB_IO 0x04
177 #define VDB_LABEL 0x08
178 int vnddebug = 0x00;
179 #endif
180
181 #define vndunit(x) DISKUNIT(x)
182
183 struct vndxfer {
184 struct buf vx_buf;
185 struct vnd_softc *vx_vnd;
186 };
187 #define VND_BUFTOXFER(bp) ((struct vndxfer *)(void *)bp)
188
189 #define VND_GETXFER(vnd) pool_get(&(vnd)->sc_vxpool, PR_WAITOK)
190 #define VND_PUTXFER(vnd, vx) pool_put(&(vnd)->sc_vxpool, (vx))
191
192 #define VNDLABELDEV(dev) \
193 (MAKEDISKDEV(major((dev)), vndunit((dev)), RAW_PART))
194
195 /* called by main() at boot time (XXX: and the LKM driver) */
196 void vndattach(int);
197
198 static void vndclear(struct vnd_softc *, int);
199 static int vndsetcred(struct vnd_softc *, struct ucred *);
200 static void vndthrottle(struct vnd_softc *, struct vnode *);
201 static void vndiodone(struct buf *);
202 #if 0
203 static void vndshutdown(void);
204 #endif
205
206 static void vndgetdefaultlabel(struct vnd_softc *, struct disklabel *);
207 static void vndgetdisklabel(dev_t, struct vnd_softc *);
208
209 static int vndlock(struct vnd_softc *);
210 static void vndunlock(struct vnd_softc *);
211 #ifdef VND_COMPRESSION
212 static void compstrategy(struct buf *, off_t);
213 static void *vnd_alloc(void *, u_int, u_int);
214 static void vnd_free(void *, void *);
215 #endif /* VND_COMPRESSION */
216
217 static void vndthread(void *);
218
219 static dev_type_open(vndopen);
220 static dev_type_close(vndclose);
221 static dev_type_read(vndread);
222 static dev_type_write(vndwrite);
223 static dev_type_ioctl(vndioctl);
224 static dev_type_strategy(vndstrategy);
225 static dev_type_dump(vnddump);
226 static dev_type_size(vndsize);
227
228 const struct bdevsw vnd_bdevsw = {
229 vndopen, vndclose, vndstrategy, vndioctl, vnddump, vndsize, D_DISK
230 };
231
232 const struct cdevsw vnd_cdevsw = {
233 vndopen, vndclose, vndread, vndwrite, vndioctl,
234 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
235 };
236
237 static int vnd_match(struct device *, struct cfdata *, void *);
238 static void vnd_attach(struct device *, struct device *, void *);
239 static int vnd_detach(struct device *, int);
240
241 CFATTACH_DECL(vnd, sizeof(struct vnd_softc),
242 vnd_match, vnd_attach, vnd_detach, NULL);
243 extern struct cfdriver vnd_cd;
244
245 static struct vnd_softc *vnd_spawn(int);
246
247 void
248 vndattach(int num)
249 {
250 int error;
251
252 error = config_cfattach_attach(vnd_cd.cd_name, &vnd_ca);
253 if (error)
254 aprint_error("%s: unable to register cfattach\n",
255 vnd_cd.cd_name);
256 }
257
258 static int
259 vnd_match(struct device *self, struct cfdata *cfdata, void *aux)
260 {
261 return 1;
262 }
263
264 static void
265 vnd_attach(struct device *parent, struct device *self, void *aux)
266 {
267 struct vnd_softc *sc = (struct vnd_softc *)self;
268
269 sc->sc_comp_offsets = NULL;
270 sc->sc_comp_buff = NULL;
271 sc->sc_comp_decombuf = NULL;
272 bufq_alloc(&sc->sc_tab, "disksort", BUFQ_SORT_RAWBLOCK);
273 pseudo_disk_init(&sc->sc_dkdev);
274
275 aprint_normal("%s: vnode disk driver\n", self->dv_xname);
276 }
277
278 static int
279 vnd_detach(struct device *self, int flags)
280 {
281 struct vnd_softc *sc = (struct vnd_softc *)self;
282 if (sc->sc_flags & VNF_INITED)
283 return EBUSY;
284 return 0;
285 }
286
287 static struct vnd_softc *
288 vnd_spawn(int unit)
289 {
290 struct cfdata *cf;
291
292 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
293 cf->cf_name = vnd_cd.cd_name;
294 cf->cf_atname = vnd_cd.cd_name;
295 cf->cf_unit = unit;
296 cf->cf_fstate = FSTATE_STAR;
297
298 return (struct vnd_softc *)config_attach_pseudo(cf);
299 }
300
301 static int
302 vndopen(dev_t dev, int flags, int mode, struct lwp *l)
303 {
304 int unit = vndunit(dev);
305 struct vnd_softc *sc;
306 int error = 0, part, pmask;
307 struct disklabel *lp;
308
309 #ifdef DEBUG
310 if (vnddebug & VDB_FOLLOW)
311 printf("vndopen(0x%x, 0x%x, 0x%x, %p)\n", dev, flags, mode, l);
312 #endif
313 sc = device_lookup(&vnd_cd, unit);
314 if (sc == NULL) {
315 sc = vnd_spawn(unit);
316 if (sc == NULL)
317 return ENOMEM;
318 }
319
320 if ((error = vndlock(sc)) != 0)
321 return (error);
322
323 lp = sc->sc_dkdev.dk_label;
324
325 part = DISKPART(dev);
326 pmask = (1 << part);
327
328 /*
329 * If we're initialized, check to see if there are any other
330 * open partitions. If not, then it's safe to update the
331 * in-core disklabel. Only read the disklabel if it is
332 * not already valid.
333 */
334 if ((sc->sc_flags & (VNF_INITED|VNF_VLABEL)) == VNF_INITED &&
335 sc->sc_dkdev.dk_openmask == 0)
336 vndgetdisklabel(dev, sc);
337
338 /* Check that the partitions exists. */
339 if (part != RAW_PART) {
340 if (((sc->sc_flags & VNF_INITED) == 0) ||
341 ((part >= lp->d_npartitions) ||
342 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
343 error = ENXIO;
344 goto done;
345 }
346 }
347
348 /* Prevent our unit from being unconfigured while open. */
349 switch (mode) {
350 case S_IFCHR:
351 sc->sc_dkdev.dk_copenmask |= pmask;
352 break;
353
354 case S_IFBLK:
355 sc->sc_dkdev.dk_bopenmask |= pmask;
356 break;
357 }
358 sc->sc_dkdev.dk_openmask =
359 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
360
361 done:
362 vndunlock(sc);
363 return (error);
364 }
365
366 static int
367 vndclose(dev_t dev, int flags, int mode, struct lwp *l)
368 {
369 int unit = vndunit(dev);
370 struct vnd_softc *sc;
371 int error = 0, part;
372
373 #ifdef DEBUG
374 if (vnddebug & VDB_FOLLOW)
375 printf("vndclose(0x%x, 0x%x, 0x%x, %p)\n", dev, flags, mode, l);
376 #endif
377 sc = device_lookup(&vnd_cd, unit);
378 if (sc == NULL)
379 return ENXIO;
380
381 if ((error = vndlock(sc)) != 0)
382 return (error);
383
384 part = DISKPART(dev);
385
386 /* ...that much closer to allowing unconfiguration... */
387 switch (mode) {
388 case S_IFCHR:
389 sc->sc_dkdev.dk_copenmask &= ~(1 << part);
390 break;
391
392 case S_IFBLK:
393 sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
394 break;
395 }
396 sc->sc_dkdev.dk_openmask =
397 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
398
399 vndunlock(sc);
400 return (0);
401 }
402
403 /*
404 * Qeue the request, and wakeup the kernel thread to handle it.
405 */
406 static void
407 vndstrategy(struct buf *bp)
408 {
409 int unit = vndunit(bp->b_dev);
410 struct vnd_softc *vnd =
411 (struct vnd_softc *)device_lookup(&vnd_cd, unit);
412 struct disklabel *lp = vnd->sc_dkdev.dk_label;
413 daddr_t blkno;
414 int s = splbio();
415
416 bp->b_resid = bp->b_bcount;
417
418 if ((vnd->sc_flags & VNF_INITED) == 0) {
419 bp->b_error = ENXIO;
420 bp->b_flags |= B_ERROR;
421 goto done;
422 }
423
424 /*
425 * The transfer must be a whole number of blocks.
426 */
427 if ((bp->b_bcount % lp->d_secsize) != 0) {
428 bp->b_error = EINVAL;
429 bp->b_flags |= B_ERROR;
430 goto done;
431 }
432
433 /*
434 * check if we're read-only.
435 */
436 if ((vnd->sc_flags & VNF_READONLY) && !(bp->b_flags & B_READ)) {
437 bp->b_error = EACCES;
438 bp->b_flags |= B_ERROR;
439 goto done;
440 }
441
442 /*
443 * Do bounds checking and adjust transfer. If there's an error,
444 * the bounds check will flag that for us.
445 */
446 if (DISKPART(bp->b_dev) != RAW_PART) {
447 if (bounds_check_with_label(&vnd->sc_dkdev,
448 bp, vnd->sc_flags & (VNF_WLABEL|VNF_LABELLING)) <= 0)
449 goto done;
450 }
451
452 /* If it's a nil transfer, wake up the top half now. */
453 if (bp->b_bcount == 0)
454 goto done;
455
456 /*
457 * Put the block number in terms of the logical blocksize
458 * of the "device".
459 */
460
461 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
462
463 /*
464 * Translate the partition-relative block number to an absolute.
465 */
466 if (DISKPART(bp->b_dev) != RAW_PART) {
467 struct partition *pp;
468
469 pp = &vnd->sc_dkdev.dk_label->d_partitions[
470 DISKPART(bp->b_dev)];
471 blkno += pp->p_offset;
472 }
473 bp->b_rawblkno = blkno;
474
475 #ifdef DEBUG
476 if (vnddebug & VDB_FOLLOW)
477 printf("vndstrategy(%p): unit %d\n", bp, unit);
478 #endif
479 BUFQ_PUT(vnd->sc_tab, bp);
480 wakeup(&vnd->sc_tab);
481 splx(s);
482 return;
483 done:
484 biodone(bp);
485 splx(s);
486 }
487
488 static void
489 vndthread(void *arg)
490 {
491 struct vnd_softc *vnd = arg;
492 struct mount *mp;
493 int s, bsize;
494 int sz, error;
495 struct disklabel *lp;
496
497 s = splbio();
498 vnd->sc_flags |= VNF_KTHREAD;
499 wakeup(&vnd->sc_kthread);
500
501 /*
502 * Dequeue requests, break them into bsize pieces and submit using
503 * VOP_BMAP/VOP_STRATEGY.
504 */
505 while ((vnd->sc_flags & VNF_VUNCONF) == 0) {
506 struct vndxfer *vnx;
507 off_t offset;
508 int resid;
509 int skipped = 0;
510 off_t bn;
511 int flags;
512 struct buf *obp;
513 struct buf *bp;
514
515 obp = BUFQ_GET(vnd->sc_tab);
516 if (obp == NULL) {
517 tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0);
518 continue;
519 };
520 splx(s);
521 flags = obp->b_flags;
522 #ifdef DEBUG
523 if (vnddebug & VDB_FOLLOW)
524 printf("vndthread(%p\n", obp);
525 #endif
526 lp = vnd->sc_dkdev.dk_label;
527
528 /* convert to a byte offset within the file. */
529 bn = obp->b_rawblkno * lp->d_secsize;
530
531 if (vnd->sc_vp->v_mount == NULL) {
532 obp->b_error = ENXIO;
533 obp->b_flags |= B_ERROR;
534 goto done;
535 }
536 #ifdef VND_COMPRESSION
537 /* handle a compressed read */
538 if ((flags & B_READ) != 0 && (vnd->sc_flags & VNF_COMP)) {
539 compstrategy(obp, bn);
540 goto done;
541 }
542 #endif /* VND_COMPRESSION */
543
544 bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
545
546 /*
547 * Allocate a header for this transfer and link it to the
548 * buffer
549 */
550 s = splbio();
551 vnx = VND_GETXFER(vnd);
552 splx(s);
553 vnx->vx_vnd = vnd;
554
555 bp = &vnx->vx_buf;
556 BUF_INIT(bp);
557 bp->b_flags = (obp->b_flags & B_READ) | B_CALL;
558 bp->b_iodone = vndiodone;
559 bp->b_private = obp;
560 bp->b_vp = NULL;
561 bp->b_data = obp->b_data;
562 bp->b_bcount = bp->b_resid = obp->b_bcount;
563 BIO_COPYPRIO(bp, obp);
564
565 s = splbio();
566 while (vnd->sc_active >= vnd->sc_maxactive) {
567 tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0);
568 }
569 vnd->sc_active++;
570 splx(s);
571
572 if ((flags & B_READ) == 0)
573 vn_start_write(vnd->sc_vp, &mp, V_WAIT);
574
575 /* Instrumentation. */
576 disk_busy(&vnd->sc_dkdev);
577
578 /*
579 * Feed requests sequentially.
580 * We do it this way to keep from flooding NFS servers if we
581 * are connected to an NFS file. This places the burden on
582 * the client rather than the server.
583 */
584 error = 0;
585 for (offset = 0, resid = bp->b_resid; resid;
586 resid -= sz, offset += sz) {
587 struct buf *nbp;
588 struct vnode *vp;
589 daddr_t nbn;
590 int off, nra;
591
592 nra = 0;
593 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
594 error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
595 VOP_UNLOCK(vnd->sc_vp, 0);
596
597 if (error == 0 && (long)nbn == -1)
598 error = EIO;
599
600 /*
601 * If there was an error or a hole in the file...punt.
602 * Note that we may have to wait for any operations
603 * that we have already fired off before releasing
604 * the buffer.
605 *
606 * XXX we could deal with holes here but it would be
607 * a hassle (in the write case).
608 */
609 if (error) {
610 skipped += resid;
611 break;
612 }
613
614 #ifdef DEBUG
615 if (!dovndcluster)
616 nra = 0;
617 #endif
618
619 if ((off = bn % bsize) != 0)
620 sz = bsize - off;
621 else
622 sz = (1 + nra) * bsize;
623 if (resid < sz)
624 sz = resid;
625 #ifdef DEBUG
626 if (vnddebug & VDB_IO)
627 printf("vndstrategy: vp %p/%p bn 0x%qx/0x%" PRIx64
628 " sz 0x%x\n",
629 vnd->sc_vp, vp, (long long)bn, nbn, sz);
630 #endif
631
632 nbp = getiobuf();
633 nestiobuf_setup(bp, nbp, offset, sz);
634 nbp->b_blkno = nbn + btodb(off);
635
636 #if 0 /* XXX #ifdef DEBUG */
637 if (vnddebug & VDB_IO)
638 printf("vndstart(%ld): bp %p vp %p blkno "
639 "0x%" PRIx64 " flags %x addr %p cnt 0x%x\n",
640 (long) (vnd-vnd_softc), &nbp->vb_buf,
641 nbp->vb_buf.b_vp, nbp->vb_buf.b_blkno,
642 nbp->vb_buf.b_flags, nbp->vb_buf.b_data,
643 nbp->vb_buf.b_bcount);
644 #endif
645 VOP_STRATEGY(vp, nbp);
646 bn += sz;
647 }
648 nestiobuf_done(bp, skipped, error);
649
650 if ((flags & B_READ) == 0)
651 vn_finished_write(mp, 0);
652
653 s = splbio();
654 continue;
655 done:
656 biodone(obp);
657 s = splbio();
658 }
659
660 vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF);
661 wakeup(&vnd->sc_kthread);
662 splx(s);
663 kthread_exit(0);
664 }
665
666 static void
667 vndiodone(struct buf *bp)
668 {
669 struct vndxfer *vnx = VND_BUFTOXFER(bp);
670 struct vnd_softc *vnd = vnx->vx_vnd;
671 struct buf *obp = bp->b_private;
672
673 KASSERT(&vnx->vx_buf == bp);
674 KASSERT(vnd->sc_active > 0);
675 #ifdef DEBUG
676 if (vnddebug & VDB_IO) {
677 printf("vndiodone1: bp %p iodone: error %d\n",
678 bp, (bp->b_flags & B_ERROR) != 0 ? bp->b_error : 0);
679 }
680 #endif
681 disk_unbusy(&vnd->sc_dkdev, bp->b_bcount - bp->b_resid,
682 (bp->b_flags & B_READ));
683 vnd->sc_active--;
684 if (vnd->sc_active == 0) {
685 wakeup(&vnd->sc_tab);
686 }
687 obp->b_flags |= bp->b_flags & B_ERROR;
688 obp->b_error = bp->b_error;
689 obp->b_resid = bp->b_resid;
690 VND_PUTXFER(vnd, vnx);
691 biodone(obp);
692 }
693
694 /* ARGSUSED */
695 static int
696 vndread(dev_t dev, struct uio *uio, int flags)
697 {
698 int unit = vndunit(dev);
699 struct vnd_softc *sc;
700
701 #ifdef DEBUG
702 if (vnddebug & VDB_FOLLOW)
703 printf("vndread(0x%x, %p)\n", dev, uio);
704 #endif
705
706 sc = device_lookup(&vnd_cd, unit);
707 if (sc == NULL)
708 return ENXIO;
709
710 if ((sc->sc_flags & VNF_INITED) == 0)
711 return (ENXIO);
712
713 return (physio(vndstrategy, NULL, dev, B_READ, minphys, uio));
714 }
715
716 /* ARGSUSED */
717 static int
718 vndwrite(dev_t dev, struct uio *uio, int flags)
719 {
720 int unit = vndunit(dev);
721 struct vnd_softc *sc;
722
723 #ifdef DEBUG
724 if (vnddebug & VDB_FOLLOW)
725 printf("vndwrite(0x%x, %p)\n", dev, uio);
726 #endif
727
728 sc = device_lookup(&vnd_cd, unit);
729 if (sc == NULL)
730 return ENXIO;
731
732 if ((sc->sc_flags & VNF_INITED) == 0)
733 return (ENXIO);
734
735 return (physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio));
736 }
737
738 static int
739 vnd_cget(struct lwp *l, int unit, int *un, struct vattr *va)
740 {
741 struct vnd_softc *vnd;
742
743 if (*un == -1)
744 *un = unit;
745 if (*un < 0)
746 return EINVAL;
747
748 vnd = device_lookup(&vnd_cd, *un);
749 if (vnd == NULL)
750 /*
751 * vnconfig(8) has weird expectations to list the
752 * devices.
753 * It will stop as soon as it gets ENXIO, but
754 * will continue if it gets something else...
755 */
756 return (*un >= vnd_cd.cd_ndevs) ? ENXIO : -1;
757
758 if ((vnd->sc_flags & VNF_INITED) == 0)
759 return -1;
760
761 return VOP_GETATTR(vnd->sc_vp, va, l->l_proc->p_ucred, l);
762 }
763
764 /* ARGSUSED */
765 static int
766 vndioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
767 {
768 int unit = vndunit(dev);
769 struct vnd_softc *vnd;
770 struct vnd_ioctl *vio;
771 struct vattr vattr;
772 struct nameidata nd;
773 int error, part, pmask;
774 size_t geomsize;
775 struct proc *p = (l != NULL) ? l->l_proc : NULL;
776 int fflags;
777 #ifdef __HAVE_OLD_DISKLABEL
778 struct disklabel newlabel;
779 #endif
780
781 #ifdef DEBUG
782 if (vnddebug & VDB_FOLLOW)
783 printf("vndioctl(0x%x, 0x%lx, %p, 0x%x, %p): unit %d\n",
784 dev, cmd, data, flag, p, unit);
785 #endif
786 vnd = device_lookup(&vnd_cd, unit);
787 if (vnd == NULL &&
788 #ifdef COMPAT_30
789 cmd != VNDIOOCGET &&
790 #endif
791 cmd != VNDIOCGET)
792 return ENXIO;
793 vio = (struct vnd_ioctl *)data;
794
795 /* Must be open for writes for these commands... */
796 switch (cmd) {
797 case VNDIOCSET:
798 case VNDIOCCLR:
799 case DIOCSDINFO:
800 case DIOCWDINFO:
801 #ifdef __HAVE_OLD_DISKLABEL
802 case ODIOCSDINFO:
803 case ODIOCWDINFO:
804 #endif
805 case DIOCKLABEL:
806 case DIOCWLABEL:
807 if ((flag & FWRITE) == 0)
808 return (EBADF);
809 }
810
811 /* Must be initialized for these... */
812 switch (cmd) {
813 case VNDIOCCLR:
814 case DIOCGDINFO:
815 case DIOCSDINFO:
816 case DIOCWDINFO:
817 case DIOCGPART:
818 case DIOCKLABEL:
819 case DIOCWLABEL:
820 case DIOCGDEFLABEL:
821 #ifdef __HAVE_OLD_DISKLABEL
822 case ODIOCGDINFO:
823 case ODIOCSDINFO:
824 case ODIOCWDINFO:
825 case ODIOCGDEFLABEL:
826 #endif
827 if ((vnd->sc_flags & VNF_INITED) == 0)
828 return (ENXIO);
829 }
830
831 switch (cmd) {
832 case VNDIOCSET:
833 if (vnd->sc_flags & VNF_INITED)
834 return (EBUSY);
835
836 if ((error = vndlock(vnd)) != 0)
837 return (error);
838
839 fflags = FREAD;
840 if ((vio->vnd_flags & VNDIOF_READONLY) == 0)
841 fflags |= FWRITE;
842 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file, l);
843 if ((error = vn_open(&nd, fflags, 0)) != 0)
844 goto unlock_and_exit;
845 error = VOP_GETATTR(nd.ni_vp, &vattr, l->l_proc->p_ucred, l);
846 if (!error && nd.ni_vp->v_type != VREG)
847 error = EOPNOTSUPP;
848 if (error) {
849 VOP_UNLOCK(nd.ni_vp, 0);
850 goto close_and_exit;
851 }
852
853 /* If using a compressed file, initialize its info */
854 /* (or abort with an error if kernel has no compression) */
855 if (vio->vnd_flags & VNF_COMP) {
856 #ifdef VND_COMPRESSION
857 struct vnd_comp_header *ch;
858 int i;
859 u_int32_t comp_size;
860 u_int32_t comp_maxsize;
861
862 /* allocate space for compresed file header */
863 ch = malloc(sizeof(struct vnd_comp_header),
864 M_TEMP, M_WAITOK);
865
866 /* read compressed file header */
867 error = vn_rdwr(UIO_READ, nd.ni_vp, (caddr_t)ch,
868 sizeof(struct vnd_comp_header), 0, UIO_SYSSPACE,
869 IO_UNIT|IO_NODELOCKED, p->p_ucred, NULL, NULL);
870 if(error) {
871 free(ch, M_TEMP);
872 VOP_UNLOCK(nd.ni_vp, 0);
873 goto close_and_exit;
874 }
875
876 /* save some header info */
877 vnd->sc_comp_blksz = ntohl(ch->block_size);
878 /* note last offset is the file byte size */
879 vnd->sc_comp_numoffs = ntohl(ch->num_blocks)+1;
880 free(ch, M_TEMP);
881 if(vnd->sc_comp_blksz % DEV_BSIZE !=0) {
882 VOP_UNLOCK(nd.ni_vp, 0);
883 error = EINVAL;
884 goto close_and_exit;
885 }
886 if(sizeof(struct vnd_comp_header) +
887 sizeof(u_int64_t) * vnd->sc_comp_numoffs >
888 vattr.va_size) {
889 VOP_UNLOCK(nd.ni_vp, 0);
890 error = EINVAL;
891 goto close_and_exit;
892 }
893
894 /* set decompressed file size */
895 vattr.va_size =
896 (vnd->sc_comp_numoffs - 1) * vnd->sc_comp_blksz;
897
898 /* allocate space for all the compressed offsets */
899 vnd->sc_comp_offsets =
900 malloc(sizeof(u_int64_t) * vnd->sc_comp_numoffs,
901 M_DEVBUF, M_WAITOK);
902
903 /* read in the offsets */
904 error = vn_rdwr(UIO_READ, nd.ni_vp,
905 (caddr_t)vnd->sc_comp_offsets,
906 sizeof(u_int64_t) * vnd->sc_comp_numoffs,
907 sizeof(struct vnd_comp_header), UIO_SYSSPACE,
908 IO_UNIT|IO_NODELOCKED, p->p_ucred, NULL, NULL);
909 if(error) {
910 VOP_UNLOCK(nd.ni_vp, 0);
911 goto close_and_exit;
912 }
913 /*
914 * find largest block size (used for allocation limit).
915 * Also convert offset to native byte order.
916 */
917 comp_maxsize = 0;
918 for (i = 0; i < vnd->sc_comp_numoffs - 1; i++) {
919 vnd->sc_comp_offsets[i] =
920 be64toh(vnd->sc_comp_offsets[i]);
921 comp_size = be64toh(vnd->sc_comp_offsets[i + 1])
922 - vnd->sc_comp_offsets[i];
923 if (comp_size > comp_maxsize)
924 comp_maxsize = comp_size;
925 }
926 vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1] =
927 be64toh(vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1]);
928
929 /* create compressed data buffer */
930 vnd->sc_comp_buff = malloc(comp_maxsize,
931 M_DEVBUF, M_WAITOK);
932
933 /* create decompressed buffer */
934 vnd->sc_comp_decombuf = malloc(vnd->sc_comp_blksz,
935 M_DEVBUF, M_WAITOK);
936 vnd->sc_comp_buffblk = -1;
937
938 /* Initialize decompress stream */
939 bzero(&vnd->sc_comp_stream, sizeof(z_stream));
940 vnd->sc_comp_stream.zalloc = vnd_alloc;
941 vnd->sc_comp_stream.zfree = vnd_free;
942 error = inflateInit2(&vnd->sc_comp_stream, MAX_WBITS);
943 if(error) {
944 if(vnd->sc_comp_stream.msg)
945 printf("vnd%d: compressed file, %s\n",
946 unit, vnd->sc_comp_stream.msg);
947 VOP_UNLOCK(nd.ni_vp, 0);
948 error = EINVAL;
949 goto close_and_exit;
950 }
951
952 vnd->sc_flags |= VNF_COMP | VNF_READONLY;
953 #else /* !VND_COMPRESSION */
954 VOP_UNLOCK(nd.ni_vp, 0);
955 error = EOPNOTSUPP;
956 goto close_and_exit;
957 #endif /* VND_COMPRESSION */
958 }
959
960 VOP_UNLOCK(nd.ni_vp, 0);
961 vnd->sc_vp = nd.ni_vp;
962 vnd->sc_size = btodb(vattr.va_size); /* note truncation */
963
964 /*
965 * Use pseudo-geometry specified. If none was provided,
966 * use "standard" Adaptec fictitious geometry.
967 */
968 if (vio->vnd_flags & VNDIOF_HASGEOM) {
969
970 memcpy(&vnd->sc_geom, &vio->vnd_geom,
971 sizeof(vio->vnd_geom));
972
973 /*
974 * Sanity-check the sector size.
975 * XXX Don't allow secsize < DEV_BSIZE. Should
976 * XXX we?
977 */
978 if (vnd->sc_geom.vng_secsize < DEV_BSIZE ||
979 (vnd->sc_geom.vng_secsize % DEV_BSIZE) != 0 ||
980 vnd->sc_geom.vng_ncylinders == 0 ||
981 (vnd->sc_geom.vng_ntracks *
982 vnd->sc_geom.vng_nsectors) == 0) {
983 error = EINVAL;
984 goto close_and_exit;
985 }
986
987 /*
988 * Compute the size (in DEV_BSIZE blocks) specified
989 * by the geometry.
990 */
991 geomsize = (vnd->sc_geom.vng_nsectors *
992 vnd->sc_geom.vng_ntracks *
993 vnd->sc_geom.vng_ncylinders) *
994 (vnd->sc_geom.vng_secsize / DEV_BSIZE);
995
996 /*
997 * Sanity-check the size against the specified
998 * geometry.
999 */
1000 if (vnd->sc_size < geomsize) {
1001 error = EINVAL;
1002 goto close_and_exit;
1003 }
1004 } else if (vnd->sc_size >= (32 * 64)) {
1005 /*
1006 * Size must be at least 2048 DEV_BSIZE blocks
1007 * (1M) in order to use this geometry.
1008 */
1009 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1010 vnd->sc_geom.vng_nsectors = 32;
1011 vnd->sc_geom.vng_ntracks = 64;
1012 vnd->sc_geom.vng_ncylinders = vnd->sc_size / (64 * 32);
1013 } else {
1014 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1015 vnd->sc_geom.vng_nsectors = 1;
1016 vnd->sc_geom.vng_ntracks = 1;
1017 vnd->sc_geom.vng_ncylinders = vnd->sc_size;
1018 }
1019
1020 if (vio->vnd_flags & VNDIOF_READONLY) {
1021 vnd->sc_flags |= VNF_READONLY;
1022 }
1023
1024 if ((error = vndsetcred(vnd, p->p_ucred)) != 0)
1025 goto close_and_exit;
1026
1027 vndthrottle(vnd, vnd->sc_vp);
1028 vio->vnd_size = dbtob(vnd->sc_size);
1029 vnd->sc_flags |= VNF_INITED;
1030
1031 /* create the kernel thread, wait for it to be up */
1032 error = kthread_create1(vndthread, vnd, &vnd->sc_kthread,
1033 vnd->sc_dev.dv_xname);
1034 if (error)
1035 goto close_and_exit;
1036 while ((vnd->sc_flags & VNF_KTHREAD) == 0) {
1037 tsleep(&vnd->sc_kthread, PRIBIO, "vndthr", 0);
1038 }
1039 #ifdef DEBUG
1040 if (vnddebug & VDB_INIT)
1041 printf("vndioctl: SET vp %p size 0x%lx %d/%d/%d/%d\n",
1042 vnd->sc_vp, (unsigned long) vnd->sc_size,
1043 vnd->sc_geom.vng_secsize,
1044 vnd->sc_geom.vng_nsectors,
1045 vnd->sc_geom.vng_ntracks,
1046 vnd->sc_geom.vng_ncylinders);
1047 #endif
1048
1049 /* Attach the disk. */
1050 vnd->sc_dkdev.dk_name = vnd->sc_dev.dv_xname;
1051 pseudo_disk_attach(&vnd->sc_dkdev);
1052
1053 /* Initialize the xfer and buffer pools. */
1054 pool_init(&vnd->sc_vxpool, sizeof(struct vndxfer), 0,
1055 0, 0, "vndxpl", NULL);
1056
1057 /* Try and read the disklabel. */
1058 vndgetdisklabel(dev, vnd);
1059
1060 vndunlock(vnd);
1061
1062 break;
1063
1064 close_and_exit:
1065 (void) vn_close(nd.ni_vp, fflags, p->p_ucred, l);
1066 unlock_and_exit:
1067 #ifdef VND_COMPRESSION
1068 /* free any allocated memory (for compressed file) */
1069 if(vnd->sc_comp_offsets) {
1070 free(vnd->sc_comp_offsets, M_DEVBUF);
1071 vnd->sc_comp_offsets = NULL;
1072 }
1073 if(vnd->sc_comp_buff) {
1074 free(vnd->sc_comp_buff, M_DEVBUF);
1075 vnd->sc_comp_buff = NULL;
1076 }
1077 if(vnd->sc_comp_decombuf) {
1078 free(vnd->sc_comp_decombuf, M_DEVBUF);
1079 vnd->sc_comp_decombuf = NULL;
1080 }
1081 #endif /* VND_COMPRESSION */
1082 vndunlock(vnd);
1083 return (error);
1084
1085 case VNDIOCCLR:
1086 if ((error = vndlock(vnd)) != 0)
1087 return (error);
1088
1089 /*
1090 * Don't unconfigure if any other partitions are open
1091 * or if both the character and block flavors of this
1092 * partition are open.
1093 */
1094 part = DISKPART(dev);
1095 pmask = (1 << part);
1096 if (((vnd->sc_dkdev.dk_openmask & ~pmask) ||
1097 ((vnd->sc_dkdev.dk_bopenmask & pmask) &&
1098 (vnd->sc_dkdev.dk_copenmask & pmask))) &&
1099 !(vio->vnd_flags & VNDIOF_FORCE)) {
1100 vndunlock(vnd);
1101 return (EBUSY);
1102 }
1103
1104 /*
1105 * XXX vndclear() might call vndclose() implicitely;
1106 * release lock to avoid recursion
1107 */
1108 vndunlock(vnd);
1109 vndclear(vnd, minor(dev));
1110 #ifdef DEBUG
1111 if (vnddebug & VDB_INIT)
1112 printf("vndioctl: CLRed\n");
1113 #endif
1114
1115 /* Destroy the xfer and buffer pools. */
1116 pool_destroy(&vnd->sc_vxpool);
1117
1118 /* Detatch the disk. */
1119 pseudo_disk_detach(&vnd->sc_dkdev);
1120 if ((error = config_detach((struct device *)vnd, 0)) != 0) {
1121 aprint_error("%s: unable to detach instance\n",
1122 vnd->sc_dev.dv_xname);
1123 return error;
1124 }
1125
1126 break;
1127
1128 #ifdef COMPAT_30
1129 case VNDIOOCGET: {
1130 struct vnd_ouser *vnu;
1131 struct vattr va;
1132 vnu = (struct vnd_ouser *)data;
1133 switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) {
1134 case 0:
1135 vnu->vnu_dev = va.va_fsid;
1136 vnu->vnu_ino = va.va_fileid;
1137 break;
1138 case -1:
1139 /* unused is not an error */
1140 vnu->vnu_dev = 0;
1141 vnu->vnu_ino = 0;
1142 break;
1143 default:
1144 return error;
1145 }
1146 break;
1147 }
1148 #endif
1149 case VNDIOCGET: {
1150 struct vnd_user *vnu;
1151 struct vattr va;
1152 vnu = (struct vnd_user *)data;
1153 switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) {
1154 case 0:
1155 vnu->vnu_dev = va.va_fsid;
1156 vnu->vnu_ino = va.va_fileid;
1157 break;
1158 case -1:
1159 /* unused is not an error */
1160 vnu->vnu_dev = 0;
1161 vnu->vnu_ino = 0;
1162 break;
1163 default:
1164 return error;
1165 }
1166 break;
1167 }
1168
1169 case DIOCGDINFO:
1170 *(struct disklabel *)data = *(vnd->sc_dkdev.dk_label);
1171 break;
1172
1173 #ifdef __HAVE_OLD_DISKLABEL
1174 case ODIOCGDINFO:
1175 newlabel = *(vnd->sc_dkdev.dk_label);
1176 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1177 return ENOTTY;
1178 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1179 break;
1180 #endif
1181
1182 case DIOCGPART:
1183 ((struct partinfo *)data)->disklab = vnd->sc_dkdev.dk_label;
1184 ((struct partinfo *)data)->part =
1185 &vnd->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1186 break;
1187
1188 case DIOCWDINFO:
1189 case DIOCSDINFO:
1190 #ifdef __HAVE_OLD_DISKLABEL
1191 case ODIOCWDINFO:
1192 case ODIOCSDINFO:
1193 #endif
1194 {
1195 struct disklabel *lp;
1196
1197 if ((error = vndlock(vnd)) != 0)
1198 return (error);
1199
1200 vnd->sc_flags |= VNF_LABELLING;
1201
1202 #ifdef __HAVE_OLD_DISKLABEL
1203 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1204 memset(&newlabel, 0, sizeof newlabel);
1205 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1206 lp = &newlabel;
1207 } else
1208 #endif
1209 lp = (struct disklabel *)data;
1210
1211 error = setdisklabel(vnd->sc_dkdev.dk_label,
1212 lp, 0, vnd->sc_dkdev.dk_cpulabel);
1213 if (error == 0) {
1214 if (cmd == DIOCWDINFO
1215 #ifdef __HAVE_OLD_DISKLABEL
1216 || cmd == ODIOCWDINFO
1217 #endif
1218 )
1219 error = writedisklabel(VNDLABELDEV(dev),
1220 vndstrategy, vnd->sc_dkdev.dk_label,
1221 vnd->sc_dkdev.dk_cpulabel);
1222 }
1223
1224 vnd->sc_flags &= ~VNF_LABELLING;
1225
1226 vndunlock(vnd);
1227
1228 if (error)
1229 return (error);
1230 break;
1231 }
1232
1233 case DIOCKLABEL:
1234 if (*(int *)data != 0)
1235 vnd->sc_flags |= VNF_KLABEL;
1236 else
1237 vnd->sc_flags &= ~VNF_KLABEL;
1238 break;
1239
1240 case DIOCWLABEL:
1241 if (*(int *)data != 0)
1242 vnd->sc_flags |= VNF_WLABEL;
1243 else
1244 vnd->sc_flags &= ~VNF_WLABEL;
1245 break;
1246
1247 case DIOCGDEFLABEL:
1248 vndgetdefaultlabel(vnd, (struct disklabel *)data);
1249 break;
1250
1251 #ifdef __HAVE_OLD_DISKLABEL
1252 case ODIOCGDEFLABEL:
1253 vndgetdefaultlabel(vnd, &newlabel);
1254 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1255 return ENOTTY;
1256 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1257 break;
1258 #endif
1259
1260 default:
1261 return (ENOTTY);
1262 }
1263
1264 return (0);
1265 }
1266
1267 /*
1268 * Duplicate the current processes' credentials. Since we are called only
1269 * as the result of a SET ioctl and only root can do that, any future access
1270 * to this "disk" is essentially as root. Note that credentials may change
1271 * if some other uid can write directly to the mapped file (NFS).
1272 */
1273 static int
1274 vndsetcred(struct vnd_softc *vnd, struct ucred *cred)
1275 {
1276 struct uio auio;
1277 struct iovec aiov;
1278 char *tmpbuf;
1279 int error;
1280
1281 vnd->sc_cred = crdup(cred);
1282 tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
1283
1284 /* XXX: Horrible kludge to establish credentials for NFS */
1285 aiov.iov_base = tmpbuf;
1286 aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
1287 auio.uio_iov = &aiov;
1288 auio.uio_iovcnt = 1;
1289 auio.uio_offset = 0;
1290 auio.uio_rw = UIO_READ;
1291 auio.uio_segflg = UIO_SYSSPACE;
1292 auio.uio_resid = aiov.iov_len;
1293 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1294 error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
1295 if (error == 0) {
1296 /*
1297 * Because vnd does all IO directly through the vnode
1298 * we need to flush (at least) the buffer from the above
1299 * VOP_READ from the buffer cache to prevent cache
1300 * incoherencies. Also, be careful to write dirty
1301 * buffers back to stable storage.
1302 */
1303 error = vinvalbuf(vnd->sc_vp, V_SAVE, vnd->sc_cred,
1304 curlwp, 0, 0);
1305 }
1306 VOP_UNLOCK(vnd->sc_vp, 0);
1307
1308 free(tmpbuf, M_TEMP);
1309 return (error);
1310 }
1311
1312 /*
1313 * Set maxactive based on FS type
1314 */
1315 static void
1316 vndthrottle(struct vnd_softc *vnd, struct vnode *vp)
1317 {
1318 #ifdef NFS
1319 extern int (**nfsv2_vnodeop_p)(void *);
1320
1321 if (vp->v_op == nfsv2_vnodeop_p)
1322 vnd->sc_maxactive = 2;
1323 else
1324 #endif
1325 vnd->sc_maxactive = 8;
1326
1327 if (vnd->sc_maxactive < 1)
1328 vnd->sc_maxactive = 1;
1329 }
1330
1331 #if 0
1332 static void
1333 vndshutdown(void)
1334 {
1335 struct vnd_softc *vnd;
1336
1337 for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
1338 if (vnd->sc_flags & VNF_INITED)
1339 vndclear(vnd);
1340 }
1341 #endif
1342
1343 static void
1344 vndclear(struct vnd_softc *vnd, int myminor)
1345 {
1346 struct vnode *vp = vnd->sc_vp;
1347 struct lwp *l = curlwp;
1348 int fflags = FREAD;
1349 int bmaj, cmaj, i, mn;
1350 int s;
1351
1352 #ifdef DEBUG
1353 if (vnddebug & VDB_FOLLOW)
1354 printf("vndclear(%p): vp %p\n", vnd, vp);
1355 #endif
1356 /* locate the major number */
1357 bmaj = bdevsw_lookup_major(&vnd_bdevsw);
1358 cmaj = cdevsw_lookup_major(&vnd_cdevsw);
1359
1360 /* Nuke the vnodes for any open instances */
1361 for (i = 0; i < MAXPARTITIONS; i++) {
1362 mn = DISKMINOR(vnd->sc_dev.dv_unit, i);
1363 vdevgone(bmaj, mn, mn, VBLK);
1364 if (mn != myminor) /* XXX avoid to kill own vnode */
1365 vdevgone(cmaj, mn, mn, VCHR);
1366 }
1367
1368 if ((vnd->sc_flags & VNF_READONLY) == 0)
1369 fflags |= FWRITE;
1370
1371 s = splbio();
1372 bufq_drain(vnd->sc_tab);
1373 splx(s);
1374
1375 vnd->sc_flags |= VNF_VUNCONF;
1376 wakeup(&vnd->sc_tab);
1377 while (vnd->sc_flags & VNF_KTHREAD)
1378 tsleep(&vnd->sc_kthread, PRIBIO, "vnthr", 0);
1379
1380 #ifdef VND_COMPRESSION
1381 /* free the compressed file buffers */
1382 if(vnd->sc_flags & VNF_COMP) {
1383 if(vnd->sc_comp_offsets) {
1384 free(vnd->sc_comp_offsets, M_DEVBUF);
1385 vnd->sc_comp_offsets = NULL;
1386 }
1387 if(vnd->sc_comp_buff) {
1388 free(vnd->sc_comp_buff, M_DEVBUF);
1389 vnd->sc_comp_buff = NULL;
1390 }
1391 if(vnd->sc_comp_decombuf) {
1392 free(vnd->sc_comp_decombuf, M_DEVBUF);
1393 vnd->sc_comp_decombuf = NULL;
1394 }
1395 }
1396 #endif /* VND_COMPRESSION */
1397 vnd->sc_flags &=
1398 ~(VNF_INITED | VNF_READONLY | VNF_VLABEL
1399 | VNF_VUNCONF | VNF_COMP);
1400 if (vp == (struct vnode *)0)
1401 panic("vndclear: null vp");
1402 (void) vn_close(vp, fflags, vnd->sc_cred, l);
1403 crfree(vnd->sc_cred);
1404 vnd->sc_vp = (struct vnode *)0;
1405 vnd->sc_cred = (struct ucred *)0;
1406 vnd->sc_size = 0;
1407 }
1408
1409 static int
1410 vndsize(dev_t dev)
1411 {
1412 struct vnd_softc *sc;
1413 struct disklabel *lp;
1414 int part, unit, omask;
1415 int size;
1416
1417 unit = vndunit(dev);
1418 sc = (struct vnd_softc *)device_lookup(&vnd_cd, unit);
1419 if (sc == NULL)
1420 return -1;
1421
1422 if ((sc->sc_flags & VNF_INITED) == 0)
1423 return (-1);
1424
1425 part = DISKPART(dev);
1426 omask = sc->sc_dkdev.dk_openmask & (1 << part);
1427 lp = sc->sc_dkdev.dk_label;
1428
1429 if (omask == 0 && vndopen(dev, 0, S_IFBLK, curlwp)) /* XXX */
1430 return (-1);
1431
1432 if (lp->d_partitions[part].p_fstype != FS_SWAP)
1433 size = -1;
1434 else
1435 size = lp->d_partitions[part].p_size *
1436 (lp->d_secsize / DEV_BSIZE);
1437
1438 if (omask == 0 && vndclose(dev, 0, S_IFBLK, curlwp)) /* XXX */
1439 return (-1);
1440
1441 return (size);
1442 }
1443
1444 static int
1445 vnddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
1446 {
1447
1448 /* Not implemented. */
1449 return ENXIO;
1450 }
1451
1452 static void
1453 vndgetdefaultlabel(struct vnd_softc *sc, struct disklabel *lp)
1454 {
1455 struct vndgeom *vng = &sc->sc_geom;
1456 struct partition *pp;
1457
1458 memset(lp, 0, sizeof(*lp));
1459
1460 lp->d_secperunit = sc->sc_size / (vng->vng_secsize / DEV_BSIZE);
1461 lp->d_secsize = vng->vng_secsize;
1462 lp->d_nsectors = vng->vng_nsectors;
1463 lp->d_ntracks = vng->vng_ntracks;
1464 lp->d_ncylinders = vng->vng_ncylinders;
1465 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1466
1467 strncpy(lp->d_typename, "vnd", sizeof(lp->d_typename));
1468 lp->d_type = DTYPE_VND;
1469 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1470 lp->d_rpm = 3600;
1471 lp->d_interleave = 1;
1472 lp->d_flags = 0;
1473
1474 pp = &lp->d_partitions[RAW_PART];
1475 pp->p_offset = 0;
1476 pp->p_size = lp->d_secperunit;
1477 pp->p_fstype = FS_UNUSED;
1478 lp->d_npartitions = RAW_PART + 1;
1479
1480 lp->d_magic = DISKMAGIC;
1481 lp->d_magic2 = DISKMAGIC;
1482 lp->d_checksum = dkcksum(lp);
1483 }
1484
1485 /*
1486 * Read the disklabel from a vnd. If one is not present, create a fake one.
1487 */
1488 static void
1489 vndgetdisklabel(dev_t dev, struct vnd_softc *sc)
1490 {
1491 const char *errstring;
1492 struct disklabel *lp = sc->sc_dkdev.dk_label;
1493 struct cpu_disklabel *clp = sc->sc_dkdev.dk_cpulabel;
1494 int i;
1495
1496 memset(clp, 0, sizeof(*clp));
1497
1498 vndgetdefaultlabel(sc, lp);
1499
1500 /*
1501 * Call the generic disklabel extraction routine.
1502 */
1503 errstring = readdisklabel(VNDLABELDEV(dev), vndstrategy, lp, clp);
1504 if (errstring) {
1505 /*
1506 * Lack of disklabel is common, but we print the warning
1507 * anyway, since it might contain other useful information.
1508 */
1509 printf("%s: %s\n", sc->sc_dev.dv_xname, errstring);
1510
1511 /*
1512 * For historical reasons, if there's no disklabel
1513 * present, all partitions must be FS_BSDFFS and
1514 * occupy the entire disk.
1515 */
1516 for (i = 0; i < MAXPARTITIONS; i++) {
1517 /*
1518 * Don't wipe out port specific hack (such as
1519 * dos partition hack of i386 port).
1520 */
1521 if (lp->d_partitions[i].p_size != 0)
1522 continue;
1523
1524 lp->d_partitions[i].p_size = lp->d_secperunit;
1525 lp->d_partitions[i].p_offset = 0;
1526 lp->d_partitions[i].p_fstype = FS_BSDFFS;
1527 }
1528
1529 strncpy(lp->d_packname, "default label",
1530 sizeof(lp->d_packname));
1531
1532 lp->d_npartitions = MAXPARTITIONS;
1533 lp->d_checksum = dkcksum(lp);
1534 }
1535
1536 /* In-core label now valid. */
1537 sc->sc_flags |= VNF_VLABEL;
1538 }
1539
1540 /*
1541 * Wait interruptibly for an exclusive lock.
1542 *
1543 * XXX
1544 * Several drivers do this; it should be abstracted and made MP-safe.
1545 */
1546 static int
1547 vndlock(struct vnd_softc *sc)
1548 {
1549 int error;
1550
1551 while ((sc->sc_flags & VNF_LOCKED) != 0) {
1552 sc->sc_flags |= VNF_WANTED;
1553 if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
1554 return (error);
1555 }
1556 sc->sc_flags |= VNF_LOCKED;
1557 return (0);
1558 }
1559
1560 /*
1561 * Unlock and wake up any waiters.
1562 */
1563 static void
1564 vndunlock(struct vnd_softc *sc)
1565 {
1566
1567 sc->sc_flags &= ~VNF_LOCKED;
1568 if ((sc->sc_flags & VNF_WANTED) != 0) {
1569 sc->sc_flags &= ~VNF_WANTED;
1570 wakeup(sc);
1571 }
1572 }
1573
1574 #ifdef VND_COMPRESSION
1575 /* compressed file read */
1576 static void
1577 compstrategy(struct buf *bp, off_t bn)
1578 {
1579 int error;
1580 int unit = vndunit(bp->b_dev);
1581 struct vnd_softc *vnd =
1582 (struct vnd_softc *)device_lookup(&vnd_cd, unit);
1583 u_int32_t comp_block;
1584 struct uio auio;
1585 caddr_t addr;
1586 int s;
1587
1588 /* set up constants for data move */
1589 auio.uio_rw = UIO_READ;
1590 auio.uio_segflg = UIO_SYSSPACE;
1591
1592 /* read, and transfer the data */
1593 addr = bp->b_data;
1594 s = splbio();
1595 while (bp->b_resid > 0) {
1596 unsigned length;
1597 size_t length_in_buffer;
1598 u_int32_t offset_in_buffer;
1599 struct iovec aiov;
1600
1601 /* calculate the compressed block number */
1602 comp_block = bn / (off_t)vnd->sc_comp_blksz;
1603
1604 /* check for good block number */
1605 if (comp_block >= vnd->sc_comp_numoffs) {
1606 bp->b_error = EINVAL;
1607 bp->b_flags |= B_ERROR;
1608 splx(s);
1609 return;
1610 }
1611
1612 /* read in the compressed block, if not in buffer */
1613 if (comp_block != vnd->sc_comp_buffblk) {
1614 length = vnd->sc_comp_offsets[comp_block + 1] -
1615 vnd->sc_comp_offsets[comp_block];
1616 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1617 error = vn_rdwr(UIO_READ, vnd->sc_vp, vnd->sc_comp_buff,
1618 length, vnd->sc_comp_offsets[comp_block],
1619 UIO_SYSSPACE, IO_UNIT, vnd->sc_cred, NULL, NULL);
1620 if (error) {
1621 bp->b_error = error;
1622 bp->b_flags |= B_ERROR;
1623 VOP_UNLOCK(vnd->sc_vp, 0);
1624 splx(s);
1625 return;
1626 }
1627 /* uncompress the buffer */
1628 vnd->sc_comp_stream.next_in = vnd->sc_comp_buff;
1629 vnd->sc_comp_stream.avail_in = length;
1630 vnd->sc_comp_stream.next_out = vnd->sc_comp_decombuf;
1631 vnd->sc_comp_stream.avail_out = vnd->sc_comp_blksz;
1632 inflateReset(&vnd->sc_comp_stream);
1633 error = inflate(&vnd->sc_comp_stream, Z_FINISH);
1634 if (error != Z_STREAM_END) {
1635 if (vnd->sc_comp_stream.msg)
1636 printf("%s: compressed file, %s\n",
1637 vnd->sc_dev.dv_xname,
1638 vnd->sc_comp_stream.msg);
1639 bp->b_error = EBADMSG;
1640 bp->b_flags |= B_ERROR;
1641 VOP_UNLOCK(vnd->sc_vp, 0);
1642 splx(s);
1643 return;
1644 }
1645 vnd->sc_comp_buffblk = comp_block;
1646 VOP_UNLOCK(vnd->sc_vp, 0);
1647 }
1648
1649 /* transfer the usable uncompressed data */
1650 offset_in_buffer = bn % (off_t)vnd->sc_comp_blksz;
1651 length_in_buffer = vnd->sc_comp_blksz - offset_in_buffer;
1652 if (length_in_buffer > bp->b_resid)
1653 length_in_buffer = bp->b_resid;
1654 auio.uio_iov = &aiov;
1655 auio.uio_iovcnt = 1;
1656 aiov.iov_base = addr;
1657 aiov.iov_len = length_in_buffer;
1658 auio.uio_resid = aiov.iov_len;
1659 auio.uio_offset = 0;
1660 error = uiomove(vnd->sc_comp_decombuf + offset_in_buffer,
1661 length_in_buffer, &auio);
1662 if (error) {
1663 bp->b_error = error;
1664 bp->b_flags |= B_ERROR;
1665 splx(s);
1666 return;
1667 }
1668
1669 bn += length_in_buffer;
1670 addr += length_in_buffer;
1671 bp->b_resid -= length_in_buffer;
1672 }
1673 splx(s);
1674 }
1675
1676 /* compression memory allocation routines */
1677 static void *
1678 vnd_alloc(void *aux, u_int items, u_int siz)
1679 {
1680 return malloc(items * siz, M_TEMP, M_NOWAIT);
1681 }
1682
1683 static void
1684 vnd_free(void *aux, void *ptr)
1685 {
1686 free(ptr, M_TEMP);
1687 }
1688 #endif /* VND_COMPRESSION */
1689