vnd.c revision 1.210 1 /* $NetBSD: vnd.c,v 1.210 2010/06/24 21:20:23 riz Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1990, 1993
34 * The Regents of the University of California. All rights reserved.
35 *
36 * This code is derived from software contributed to Berkeley by
37 * the Systems Programming Group of the University of Utah Computer
38 * Science Department.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * from: Utah $Hdr: vn.c 1.13 94/04/02$
65 *
66 * @(#)vn.c 8.9 (Berkeley) 5/14/95
67 */
68
69 /*
70 * Copyright (c) 1988 University of Utah.
71 *
72 * This code is derived from software contributed to Berkeley by
73 * the Systems Programming Group of the University of Utah Computer
74 * Science Department.
75 *
76 * Redistribution and use in source and binary forms, with or without
77 * modification, are permitted provided that the following conditions
78 * are met:
79 * 1. Redistributions of source code must retain the above copyright
80 * notice, this list of conditions and the following disclaimer.
81 * 2. Redistributions in binary form must reproduce the above copyright
82 * notice, this list of conditions and the following disclaimer in the
83 * documentation and/or other materials provided with the distribution.
84 * 3. All advertising materials mentioning features or use of this software
85 * must display the following acknowledgement:
86 * This product includes software developed by the University of
87 * California, Berkeley and its contributors.
88 * 4. Neither the name of the University nor the names of its contributors
89 * may be used to endorse or promote products derived from this software
90 * without specific prior written permission.
91 *
92 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
93 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
94 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
95 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
96 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
97 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
98 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
99 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
100 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
101 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
102 * SUCH DAMAGE.
103 *
104 * from: Utah $Hdr: vn.c 1.13 94/04/02$
105 *
106 * @(#)vn.c 8.9 (Berkeley) 5/14/95
107 */
108
109 /*
110 * Vnode disk driver.
111 *
112 * Block/character interface to a vnode. Allows one to treat a file
113 * as a disk (e.g. build a filesystem in it, mount it, etc.).
114 *
115 * NOTE 1: If the vnode supports the VOP_BMAP and VOP_STRATEGY operations,
116 * this uses them to avoid distorting the local buffer cache. If those
117 * block-level operations are not available, this falls back to the regular
118 * read and write calls. Using these may distort the cache in some cases
119 * but better have the driver working than preventing it to work on file
120 * systems where the block-level operations are not implemented for
121 * whatever reason.
122 *
123 * NOTE 2: There is a security issue involved with this driver.
124 * Once mounted all access to the contents of the "mapped" file via
125 * the special file is controlled by the permissions on the special
126 * file, the protection of the mapped file is ignored (effectively,
127 * by using root credentials in all transactions).
128 *
129 * NOTE 3: Doesn't interact with leases, should it?
130 */
131
132 #include <sys/cdefs.h>
133 __KERNEL_RCSID(0, "$NetBSD: vnd.c,v 1.210 2010/06/24 21:20:23 riz Exp $");
134
135 #if defined(_KERNEL_OPT)
136 #include "opt_vnd.h"
137 #endif
138
139 #include <sys/param.h>
140 #include <sys/systm.h>
141 #include <sys/namei.h>
142 #include <sys/proc.h>
143 #include <sys/kthread.h>
144 #include <sys/errno.h>
145 #include <sys/buf.h>
146 #include <sys/bufq.h>
147 #include <sys/malloc.h>
148 #include <sys/ioctl.h>
149 #include <sys/disklabel.h>
150 #include <sys/device.h>
151 #include <sys/disk.h>
152 #include <sys/stat.h>
153 #include <sys/mount.h>
154 #include <sys/vnode.h>
155 #include <sys/file.h>
156 #include <sys/uio.h>
157 #include <sys/conf.h>
158 #include <sys/kauth.h>
159
160 #include <net/zlib.h>
161
162 #include <miscfs/genfs/genfs.h>
163 #include <miscfs/specfs/specdev.h>
164
165 #include <dev/dkvar.h>
166 #include <dev/vndvar.h>
167
168 #include <prop/proplib.h>
169
170 #if defined(VNDDEBUG) && !defined(DEBUG)
171 #define DEBUG
172 #endif
173
174 #ifdef DEBUG
175 int dovndcluster = 1;
176 #define VDB_FOLLOW 0x01
177 #define VDB_INIT 0x02
178 #define VDB_IO 0x04
179 #define VDB_LABEL 0x08
180 int vnddebug = 0x00;
181 #endif
182
183 #define vndunit(x) DISKUNIT(x)
184
185 struct vndxfer {
186 struct buf vx_buf;
187 struct vnd_softc *vx_vnd;
188 };
189 #define VND_BUFTOXFER(bp) ((struct vndxfer *)(void *)bp)
190
191 #define VND_GETXFER(vnd) pool_get(&(vnd)->sc_vxpool, PR_WAITOK)
192 #define VND_PUTXFER(vnd, vx) pool_put(&(vnd)->sc_vxpool, (vx))
193
194 #define VNDLABELDEV(dev) \
195 (MAKEDISKDEV(major((dev)), vndunit((dev)), RAW_PART))
196
197 /* called by main() at boot time */
198 void vndattach(int);
199
200 static void vndclear(struct vnd_softc *, int);
201 static int vnddoclear(struct vnd_softc *, int, int, bool);
202 static int vndsetcred(struct vnd_softc *, kauth_cred_t);
203 static void vndthrottle(struct vnd_softc *, struct vnode *);
204 static void vndiodone(struct buf *);
205 #if 0
206 static void vndshutdown(void);
207 #endif
208
209 static void vndgetdefaultlabel(struct vnd_softc *, struct disklabel *);
210 static void vndgetdisklabel(dev_t, struct vnd_softc *);
211
212 static int vndlock(struct vnd_softc *);
213 static void vndunlock(struct vnd_softc *);
214 #ifdef VND_COMPRESSION
215 static void compstrategy(struct buf *, off_t);
216 static void *vnd_alloc(void *, u_int, u_int);
217 static void vnd_free(void *, void *);
218 #endif /* VND_COMPRESSION */
219
220 static void vndthread(void *);
221 static bool vnode_has_op(const struct vnode *, int);
222 static void handle_with_rdwr(struct vnd_softc *, const struct buf *,
223 struct buf *);
224 static void handle_with_strategy(struct vnd_softc *, const struct buf *,
225 struct buf *);
226 static void vnd_set_properties(struct vnd_softc *);
227
228 static dev_type_open(vndopen);
229 static dev_type_close(vndclose);
230 static dev_type_read(vndread);
231 static dev_type_write(vndwrite);
232 static dev_type_ioctl(vndioctl);
233 static dev_type_strategy(vndstrategy);
234 static dev_type_dump(vnddump);
235 static dev_type_size(vndsize);
236
237 const struct bdevsw vnd_bdevsw = {
238 vndopen, vndclose, vndstrategy, vndioctl, vnddump, vndsize, D_DISK
239 };
240
241 const struct cdevsw vnd_cdevsw = {
242 vndopen, vndclose, vndread, vndwrite, vndioctl,
243 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
244 };
245
246 static int vnd_match(device_t, cfdata_t, void *);
247 static void vnd_attach(device_t, device_t, void *);
248 static int vnd_detach(device_t, int);
249
250 CFATTACH_DECL3_NEW(vnd, sizeof(struct vnd_softc),
251 vnd_match, vnd_attach, vnd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
252 extern struct cfdriver vnd_cd;
253
254 static struct vnd_softc *vnd_spawn(int);
255 int vnd_destroy(device_t);
256
257 static struct dkdriver vnddkdriver = { vndstrategy, minphys };
258
259 void
260 vndattach(int num)
261 {
262 int error;
263
264 error = config_cfattach_attach(vnd_cd.cd_name, &vnd_ca);
265 if (error)
266 aprint_error("%s: unable to register cfattach\n",
267 vnd_cd.cd_name);
268 }
269
270 static int
271 vnd_match(device_t self, cfdata_t cfdata, void *aux)
272 {
273
274 return 1;
275 }
276
277 static void
278 vnd_attach(device_t parent, device_t self, void *aux)
279 {
280 struct vnd_softc *sc = device_private(self);
281
282 sc->sc_dev = self;
283 sc->sc_comp_offsets = NULL;
284 sc->sc_comp_buff = NULL;
285 sc->sc_comp_decombuf = NULL;
286 bufq_alloc(&sc->sc_tab, "disksort", BUFQ_SORT_RAWBLOCK);
287 disk_init(&sc->sc_dkdev, device_xname(self), &vnddkdriver);
288 if (!pmf_device_register(self, NULL, NULL))
289 aprint_error_dev(self, "couldn't establish power handler\n");
290 }
291
292 static int
293 vnd_detach(device_t self, int flags)
294 {
295 int error;
296 struct vnd_softc *sc = device_private(self);
297
298 if (sc->sc_flags & VNF_INITED) {
299 error = vnddoclear(sc, 0, -1, (flags & DETACH_FORCE) != 0);
300 if (error != 0)
301 return error;
302 }
303
304 pmf_device_deregister(self);
305 bufq_free(sc->sc_tab);
306 disk_destroy(&sc->sc_dkdev);
307
308 return 0;
309 }
310
311 static struct vnd_softc *
312 vnd_spawn(int unit)
313 {
314 cfdata_t cf;
315
316 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
317 cf->cf_name = vnd_cd.cd_name;
318 cf->cf_atname = vnd_cd.cd_name;
319 cf->cf_unit = unit;
320 cf->cf_fstate = FSTATE_STAR;
321
322 return device_private(config_attach_pseudo(cf));
323 }
324
325 int
326 vnd_destroy(device_t dev)
327 {
328 int error;
329 cfdata_t cf;
330
331 cf = device_cfdata(dev);
332 error = config_detach(dev, DETACH_QUIET);
333 if (error)
334 return error;
335 free(cf, M_DEVBUF);
336 return 0;
337 }
338
339 static int
340 vndopen(dev_t dev, int flags, int mode, struct lwp *l)
341 {
342 int unit = vndunit(dev);
343 struct vnd_softc *sc;
344 int error = 0, part, pmask;
345 struct disklabel *lp;
346
347 #ifdef DEBUG
348 if (vnddebug & VDB_FOLLOW)
349 printf("vndopen(0x%"PRIx64", 0x%x, 0x%x, %p)\n", dev, flags, mode, l);
350 #endif
351 sc = device_lookup_private(&vnd_cd, unit);
352 if (sc == NULL) {
353 sc = vnd_spawn(unit);
354 if (sc == NULL)
355 return ENOMEM;
356 }
357
358 if ((error = vndlock(sc)) != 0)
359 return error;
360
361 if ((sc->sc_flags & VNF_CLEARING) != 0) {
362 error = ENXIO;
363 goto done;
364 }
365
366 lp = sc->sc_dkdev.dk_label;
367
368 part = DISKPART(dev);
369 pmask = (1 << part);
370
371 /*
372 * If we're initialized, check to see if there are any other
373 * open partitions. If not, then it's safe to update the
374 * in-core disklabel. Only read the disklabel if it is
375 * not already valid.
376 */
377 if ((sc->sc_flags & (VNF_INITED|VNF_VLABEL)) == VNF_INITED &&
378 sc->sc_dkdev.dk_openmask == 0)
379 vndgetdisklabel(dev, sc);
380
381 /* Check that the partitions exists. */
382 if (part != RAW_PART) {
383 if (((sc->sc_flags & VNF_INITED) == 0) ||
384 ((part >= lp->d_npartitions) ||
385 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
386 error = ENXIO;
387 goto done;
388 }
389 }
390
391 /* Prevent our unit from being unconfigured while open. */
392 switch (mode) {
393 case S_IFCHR:
394 sc->sc_dkdev.dk_copenmask |= pmask;
395 break;
396
397 case S_IFBLK:
398 sc->sc_dkdev.dk_bopenmask |= pmask;
399 break;
400 }
401 sc->sc_dkdev.dk_openmask =
402 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
403
404 done:
405 vndunlock(sc);
406 return error;
407 }
408
409 static int
410 vndclose(dev_t dev, int flags, int mode, struct lwp *l)
411 {
412 int unit = vndunit(dev);
413 struct vnd_softc *sc;
414 int error = 0, part;
415
416 #ifdef DEBUG
417 if (vnddebug & VDB_FOLLOW)
418 printf("vndclose(0x%"PRIx64", 0x%x, 0x%x, %p)\n", dev, flags, mode, l);
419 #endif
420 sc = device_lookup_private(&vnd_cd, unit);
421 if (sc == NULL)
422 return ENXIO;
423
424 if ((error = vndlock(sc)) != 0)
425 return error;
426
427 part = DISKPART(dev);
428
429 /* ...that much closer to allowing unconfiguration... */
430 switch (mode) {
431 case S_IFCHR:
432 sc->sc_dkdev.dk_copenmask &= ~(1 << part);
433 break;
434
435 case S_IFBLK:
436 sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
437 break;
438 }
439 sc->sc_dkdev.dk_openmask =
440 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
441
442 vndunlock(sc);
443
444 if ((sc->sc_flags & VNF_INITED) == 0) {
445 if ((error = vnd_destroy(sc->sc_dev)) != 0) {
446 aprint_error_dev(sc->sc_dev,
447 "unable to detach instance\n");
448 return error;
449 }
450 }
451
452 return 0;
453 }
454
455 /*
456 * Queue the request, and wakeup the kernel thread to handle it.
457 */
458 static void
459 vndstrategy(struct buf *bp)
460 {
461 int unit = vndunit(bp->b_dev);
462 struct vnd_softc *vnd =
463 device_lookup_private(&vnd_cd, unit);
464 struct disklabel *lp;
465 daddr_t blkno;
466 int s = splbio();
467
468 if (vnd == NULL) {
469 bp->b_error = ENXIO;
470 goto done;
471 }
472 lp = vnd->sc_dkdev.dk_label;
473
474 if ((vnd->sc_flags & VNF_INITED) == 0) {
475 bp->b_error = ENXIO;
476 goto done;
477 }
478
479 /*
480 * The transfer must be a whole number of blocks.
481 */
482 if ((bp->b_bcount % lp->d_secsize) != 0) {
483 bp->b_error = EINVAL;
484 goto done;
485 }
486
487 /*
488 * check if we're read-only.
489 */
490 if ((vnd->sc_flags & VNF_READONLY) && !(bp->b_flags & B_READ)) {
491 bp->b_error = EACCES;
492 goto done;
493 }
494
495 /* If it's a nil transfer, wake up the top half now. */
496 if (bp->b_bcount == 0) {
497 goto done;
498 }
499
500 /*
501 * Do bounds checking and adjust transfer. If there's an error,
502 * the bounds check will flag that for us.
503 */
504 if (DISKPART(bp->b_dev) == RAW_PART) {
505 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
506 vnd->sc_size) <= 0)
507 goto done;
508 } else {
509 if (bounds_check_with_label(&vnd->sc_dkdev,
510 bp, vnd->sc_flags & (VNF_WLABEL|VNF_LABELLING)) <= 0)
511 goto done;
512 }
513
514 /*
515 * Put the block number in terms of the logical blocksize
516 * of the "device".
517 */
518
519 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
520
521 /*
522 * Translate the partition-relative block number to an absolute.
523 */
524 if (DISKPART(bp->b_dev) != RAW_PART) {
525 struct partition *pp;
526
527 pp = &vnd->sc_dkdev.dk_label->d_partitions[
528 DISKPART(bp->b_dev)];
529 blkno += pp->p_offset;
530 }
531 bp->b_rawblkno = blkno;
532
533 #ifdef DEBUG
534 if (vnddebug & VDB_FOLLOW)
535 printf("vndstrategy(%p): unit %d\n", bp, unit);
536 #endif
537 bufq_put(vnd->sc_tab, bp);
538 wakeup(&vnd->sc_tab);
539 splx(s);
540 return;
541
542 done:
543 bp->b_resid = bp->b_bcount;
544 biodone(bp);
545 splx(s);
546 }
547
548 static bool
549 vnode_has_strategy(struct vnd_softc *vnd)
550 {
551 return vnode_has_op(vnd->sc_vp, VOFFSET(vop_bmap)) &&
552 vnode_has_op(vnd->sc_vp, VOFFSET(vop_strategy));
553 }
554
555 /* XXX this function needs a reliable check to detect
556 * sparse files. Otherwise, bmap/strategy may be used
557 * and fail on non-allocated blocks. VOP_READ/VOP_WRITE
558 * works on sparse files.
559 */
560 #if notyet
561 static bool
562 vnode_strategy_probe(struct vnd_softc *vnd)
563 {
564 int error;
565 daddr_t nbn;
566
567 if (!vnode_has_strategy(vnd))
568 return false;
569
570 /* Convert the first logical block number to its
571 * physical block number.
572 */
573 error = 0;
574 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
575 error = VOP_BMAP(vnd->sc_vp, 0, NULL, &nbn, NULL);
576 VOP_UNLOCK(vnd->sc_vp);
577
578 /* Test if that worked. */
579 if (error == 0 && (long)nbn == -1)
580 return false;
581
582 return true;
583 }
584 #endif
585
586 static void
587 vndthread(void *arg)
588 {
589 struct vnd_softc *vnd = arg;
590 bool usestrategy;
591 int s;
592
593 /* Determine whether we can *use* VOP_BMAP and VOP_STRATEGY to
594 * directly access the backing vnode. If we can, use these two
595 * operations to avoid messing with the local buffer cache.
596 * Otherwise fall back to regular VOP_READ/VOP_WRITE operations
597 * which are guaranteed to work with any file system. */
598 usestrategy = vnode_has_strategy(vnd);
599
600 #ifdef DEBUG
601 if (vnddebug & VDB_INIT)
602 printf("vndthread: vp %p, %s\n", vnd->sc_vp,
603 usestrategy ?
604 "using bmap/strategy operations" :
605 "using read/write operations");
606 #endif
607
608 s = splbio();
609 vnd->sc_flags |= VNF_KTHREAD;
610 wakeup(&vnd->sc_kthread);
611
612 /*
613 * Dequeue requests and serve them depending on the available
614 * vnode operations.
615 */
616 while ((vnd->sc_flags & VNF_VUNCONF) == 0) {
617 struct vndxfer *vnx;
618 int flags;
619 struct buf *obp;
620 struct buf *bp;
621
622 obp = bufq_get(vnd->sc_tab);
623 if (obp == NULL) {
624 tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0);
625 continue;
626 };
627 splx(s);
628 flags = obp->b_flags;
629 #ifdef DEBUG
630 if (vnddebug & VDB_FOLLOW)
631 printf("vndthread(%p)\n", obp);
632 #endif
633
634 if (vnd->sc_vp->v_mount == NULL) {
635 obp->b_error = ENXIO;
636 goto done;
637 }
638 #ifdef VND_COMPRESSION
639 /* handle a compressed read */
640 if ((flags & B_READ) != 0 && (vnd->sc_flags & VNF_COMP)) {
641 off_t bn;
642
643 /* Convert to a byte offset within the file. */
644 bn = obp->b_rawblkno *
645 vnd->sc_dkdev.dk_label->d_secsize;
646
647 compstrategy(obp, bn);
648 goto done;
649 }
650 #endif /* VND_COMPRESSION */
651
652 /*
653 * Allocate a header for this transfer and link it to the
654 * buffer
655 */
656 s = splbio();
657 vnx = VND_GETXFER(vnd);
658 splx(s);
659 vnx->vx_vnd = vnd;
660
661 s = splbio();
662 while (vnd->sc_active >= vnd->sc_maxactive) {
663 tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0);
664 }
665 vnd->sc_active++;
666 splx(s);
667
668 /* Instrumentation. */
669 disk_busy(&vnd->sc_dkdev);
670
671 bp = &vnx->vx_buf;
672 buf_init(bp);
673 bp->b_flags = (obp->b_flags & B_READ);
674 bp->b_oflags = obp->b_oflags;
675 bp->b_cflags = obp->b_cflags;
676 bp->b_iodone = vndiodone;
677 bp->b_private = obp;
678 bp->b_vp = vnd->sc_vp;
679 bp->b_objlock = &bp->b_vp->v_interlock;
680 bp->b_data = obp->b_data;
681 bp->b_bcount = obp->b_bcount;
682 BIO_COPYPRIO(bp, obp);
683
684 /* Handle the request using the appropriate operations. */
685 if (usestrategy)
686 handle_with_strategy(vnd, obp, bp);
687 else
688 handle_with_rdwr(vnd, obp, bp);
689
690 s = splbio();
691 continue;
692
693 done:
694 biodone(obp);
695 s = splbio();
696 }
697
698 vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF);
699 wakeup(&vnd->sc_kthread);
700 splx(s);
701 kthread_exit(0);
702 }
703
704 /*
705 * Checks if the given vnode supports the requested operation.
706 * The operation is specified the offset returned by VOFFSET.
707 *
708 * XXX The test below used to determine this is quite fragile
709 * because it relies on the file system to use genfs to specify
710 * unimplemented operations. There might be another way to do
711 * it more cleanly.
712 */
713 static bool
714 vnode_has_op(const struct vnode *vp, int opoffset)
715 {
716 int (*defaultp)(void *);
717 int (*opp)(void *);
718
719 defaultp = vp->v_op[VOFFSET(vop_default)];
720 opp = vp->v_op[opoffset];
721
722 return opp != defaultp && opp != genfs_eopnotsupp &&
723 opp != genfs_badop && opp != genfs_nullop;
724 }
725
726 /*
727 * Handes the read/write request given in 'bp' using the vnode's VOP_READ
728 * and VOP_WRITE operations.
729 *
730 * 'obp' is a pointer to the original request fed to the vnd device.
731 */
732 static void
733 handle_with_rdwr(struct vnd_softc *vnd, const struct buf *obp, struct buf *bp)
734 {
735 bool doread;
736 off_t offset;
737 size_t resid;
738 struct vnode *vp;
739
740 doread = bp->b_flags & B_READ;
741 offset = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
742 vp = vnd->sc_vp;
743
744 #if defined(DEBUG)
745 if (vnddebug & VDB_IO)
746 printf("vnd (rdwr): vp %p, %s, rawblkno 0x%" PRIx64
747 ", secsize %d, offset %" PRIu64
748 ", bcount %d\n",
749 vp, doread ? "read" : "write", obp->b_rawblkno,
750 vnd->sc_dkdev.dk_label->d_secsize, offset,
751 bp->b_bcount);
752 #endif
753
754 /* Issue the read or write operation. */
755 bp->b_error =
756 vn_rdwr(doread ? UIO_READ : UIO_WRITE,
757 vp, bp->b_data, bp->b_bcount, offset,
758 UIO_SYSSPACE, 0, vnd->sc_cred, &resid, NULL);
759 bp->b_resid = resid;
760
761 /* We need to increase the number of outputs on the vnode if
762 * there was any write to it. */
763 if (!doread) {
764 mutex_enter(&vp->v_interlock);
765 vp->v_numoutput++;
766 mutex_exit(&vp->v_interlock);
767 }
768
769 biodone(bp);
770 }
771
772 /*
773 * Handes the read/write request given in 'bp' using the vnode's VOP_BMAP
774 * and VOP_STRATEGY operations.
775 *
776 * 'obp' is a pointer to the original request fed to the vnd device.
777 */
778 static void
779 handle_with_strategy(struct vnd_softc *vnd, const struct buf *obp,
780 struct buf *bp)
781 {
782 int bsize, error, flags, skipped;
783 size_t resid, sz;
784 off_t bn, offset;
785 struct vnode *vp;
786
787 flags = obp->b_flags;
788
789 if (!(flags & B_READ)) {
790 vp = bp->b_vp;
791 mutex_enter(&vp->v_interlock);
792 vp->v_numoutput++;
793 mutex_exit(&vp->v_interlock);
794 }
795
796 /* convert to a byte offset within the file. */
797 bn = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
798
799 bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
800 skipped = 0;
801
802 /*
803 * Break the request into bsize pieces and feed them
804 * sequentially using VOP_BMAP/VOP_STRATEGY.
805 * We do it this way to keep from flooding NFS servers if we
806 * are connected to an NFS file. This places the burden on
807 * the client rather than the server.
808 */
809 error = 0;
810 bp->b_resid = bp->b_bcount;
811 for (offset = 0, resid = bp->b_resid; resid;
812 resid -= sz, offset += sz) {
813 struct buf *nbp;
814 daddr_t nbn;
815 int off, nra;
816
817 nra = 0;
818 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
819 error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
820 VOP_UNLOCK(vnd->sc_vp);
821
822 if (error == 0 && (long)nbn == -1)
823 error = EIO;
824
825 /*
826 * If there was an error or a hole in the file...punt.
827 * Note that we may have to wait for any operations
828 * that we have already fired off before releasing
829 * the buffer.
830 *
831 * XXX we could deal with holes here but it would be
832 * a hassle (in the write case).
833 */
834 if (error) {
835 skipped += resid;
836 break;
837 }
838
839 #ifdef DEBUG
840 if (!dovndcluster)
841 nra = 0;
842 #endif
843
844 off = bn % bsize;
845 sz = MIN(((off_t)1 + nra) * bsize - off, resid);
846 #ifdef DEBUG
847 if (vnddebug & VDB_IO)
848 printf("vndstrategy: vp %p/%p bn 0x%qx/0x%" PRIx64
849 " sz 0x%zx\n", vnd->sc_vp, vp, (long long)bn,
850 nbn, sz);
851 #endif
852
853 nbp = getiobuf(vp, true);
854 nestiobuf_setup(bp, nbp, offset, sz);
855 nbp->b_blkno = nbn + btodb(off);
856
857 #if 0 /* XXX #ifdef DEBUG */
858 if (vnddebug & VDB_IO)
859 printf("vndstart(%ld): bp %p vp %p blkno "
860 "0x%" PRIx64 " flags %x addr %p cnt 0x%x\n",
861 (long) (vnd-vnd_softc), &nbp->vb_buf,
862 nbp->vb_buf.b_vp, nbp->vb_buf.b_blkno,
863 nbp->vb_buf.b_flags, nbp->vb_buf.b_data,
864 nbp->vb_buf.b_bcount);
865 #endif
866 VOP_STRATEGY(vp, nbp);
867 bn += sz;
868 }
869 nestiobuf_done(bp, skipped, error);
870 }
871
872 static void
873 vndiodone(struct buf *bp)
874 {
875 struct vndxfer *vnx = VND_BUFTOXFER(bp);
876 struct vnd_softc *vnd = vnx->vx_vnd;
877 struct buf *obp = bp->b_private;
878 int s = splbio();
879
880 KASSERT(&vnx->vx_buf == bp);
881 KASSERT(vnd->sc_active > 0);
882 #ifdef DEBUG
883 if (vnddebug & VDB_IO) {
884 printf("vndiodone1: bp %p iodone: error %d\n",
885 bp, bp->b_error);
886 }
887 #endif
888 disk_unbusy(&vnd->sc_dkdev, bp->b_bcount - bp->b_resid,
889 (bp->b_flags & B_READ));
890 vnd->sc_active--;
891 if (vnd->sc_active == 0) {
892 wakeup(&vnd->sc_tab);
893 }
894 splx(s);
895 obp->b_error = bp->b_error;
896 obp->b_resid = bp->b_resid;
897 buf_destroy(bp);
898 VND_PUTXFER(vnd, vnx);
899 biodone(obp);
900 }
901
902 /* ARGSUSED */
903 static int
904 vndread(dev_t dev, struct uio *uio, int flags)
905 {
906 int unit = vndunit(dev);
907 struct vnd_softc *sc;
908
909 #ifdef DEBUG
910 if (vnddebug & VDB_FOLLOW)
911 printf("vndread(0x%"PRIx64", %p)\n", dev, uio);
912 #endif
913
914 sc = device_lookup_private(&vnd_cd, unit);
915 if (sc == NULL)
916 return ENXIO;
917
918 if ((sc->sc_flags & VNF_INITED) == 0)
919 return ENXIO;
920
921 return physio(vndstrategy, NULL, dev, B_READ, minphys, uio);
922 }
923
924 /* ARGSUSED */
925 static int
926 vndwrite(dev_t dev, struct uio *uio, int flags)
927 {
928 int unit = vndunit(dev);
929 struct vnd_softc *sc;
930
931 #ifdef DEBUG
932 if (vnddebug & VDB_FOLLOW)
933 printf("vndwrite(0x%"PRIx64", %p)\n", dev, uio);
934 #endif
935
936 sc = device_lookup_private(&vnd_cd, unit);
937 if (sc == NULL)
938 return ENXIO;
939
940 if ((sc->sc_flags & VNF_INITED) == 0)
941 return ENXIO;
942
943 return physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio);
944 }
945
946 static int
947 vnd_cget(struct lwp *l, int unit, int *un, struct vattr *va)
948 {
949 struct vnd_softc *vnd;
950
951 if (*un == -1)
952 *un = unit;
953 if (*un < 0)
954 return EINVAL;
955
956 vnd = device_lookup_private(&vnd_cd, *un);
957 if (vnd == NULL)
958 return (*un >= vnd_cd.cd_ndevs) ? ENXIO : -1;
959
960 if ((vnd->sc_flags & VNF_INITED) == 0)
961 return -1;
962
963 return VOP_GETATTR(vnd->sc_vp, va, l->l_cred);
964 }
965
966 static int
967 vnddoclear(struct vnd_softc *vnd, int pmask, int minor, bool force)
968 {
969 int error;
970
971 if ((error = vndlock(vnd)) != 0)
972 return error;
973
974 /*
975 * Don't unconfigure if any other partitions are open
976 * or if both the character and block flavors of this
977 * partition are open.
978 */
979 if (DK_BUSY(vnd, pmask) && !force) {
980 vndunlock(vnd);
981 return EBUSY;
982 }
983
984 /*
985 * XXX vndclear() might call vndclose() implicitly;
986 * release lock to avoid recursion
987 *
988 * Set VNF_CLEARING to prevent vndopen() from
989 * sneaking in after we vndunlock().
990 */
991 vnd->sc_flags |= VNF_CLEARING;
992 vndunlock(vnd);
993 vndclear(vnd, minor);
994 #ifdef DEBUG
995 if (vnddebug & VDB_INIT)
996 printf("vndioctl: CLRed\n");
997 #endif
998
999 /* Destroy the xfer and buffer pools. */
1000 pool_destroy(&vnd->sc_vxpool);
1001
1002 /* Detach the disk. */
1003 disk_detach(&vnd->sc_dkdev);
1004
1005 return 0;
1006 }
1007
1008 /* ARGSUSED */
1009 static int
1010 vndioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1011 {
1012 bool force;
1013 int unit = vndunit(dev);
1014 struct vnd_softc *vnd;
1015 struct vnd_ioctl *vio;
1016 struct vattr vattr;
1017 struct nameidata nd;
1018 int error, part, pmask;
1019 size_t geomsize;
1020 int fflags;
1021 #ifdef __HAVE_OLD_DISKLABEL
1022 struct disklabel newlabel;
1023 #endif
1024 struct dkwedge_info *dkw;
1025 struct dkwedge_list *dkwl;
1026
1027 #ifdef DEBUG
1028 if (vnddebug & VDB_FOLLOW)
1029 printf("vndioctl(0x%"PRIx64", 0x%lx, %p, 0x%x, %p): unit %d\n",
1030 dev, cmd, data, flag, l->l_proc, unit);
1031 #endif
1032 vnd = device_lookup_private(&vnd_cd, unit);
1033 if (vnd == NULL &&
1034 #ifdef COMPAT_30
1035 cmd != VNDIOOCGET &&
1036 #endif
1037 cmd != VNDIOCGET)
1038 return ENXIO;
1039 vio = (struct vnd_ioctl *)data;
1040
1041 /* Must be open for writes for these commands... */
1042 switch (cmd) {
1043 case VNDIOCSET:
1044 case VNDIOCCLR:
1045 #ifdef VNDIOOCSET
1046 case VNDIOOCSET:
1047 case VNDIOOCCLR:
1048 #endif
1049 case DIOCSDINFO:
1050 case DIOCWDINFO:
1051 #ifdef __HAVE_OLD_DISKLABEL
1052 case ODIOCSDINFO:
1053 case ODIOCWDINFO:
1054 #endif
1055 case DIOCKLABEL:
1056 case DIOCWLABEL:
1057 if ((flag & FWRITE) == 0)
1058 return EBADF;
1059 }
1060
1061 /* Must be initialized for these... */
1062 switch (cmd) {
1063 case VNDIOCCLR:
1064 #ifdef VNDIOOCCLR
1065 case VNDIOOCCLR:
1066 #endif
1067 case DIOCGDINFO:
1068 case DIOCSDINFO:
1069 case DIOCWDINFO:
1070 case DIOCGPART:
1071 case DIOCKLABEL:
1072 case DIOCWLABEL:
1073 case DIOCGDEFLABEL:
1074 case DIOCCACHESYNC:
1075 #ifdef __HAVE_OLD_DISKLABEL
1076 case ODIOCGDINFO:
1077 case ODIOCSDINFO:
1078 case ODIOCWDINFO:
1079 case ODIOCGDEFLABEL:
1080 #endif
1081 if ((vnd->sc_flags & VNF_INITED) == 0)
1082 return ENXIO;
1083 }
1084
1085 switch (cmd) {
1086 #ifdef VNDIOOCSET
1087 case VNDIOOCSET:
1088 #endif
1089 case VNDIOCSET:
1090 if (vnd->sc_flags & VNF_INITED)
1091 return EBUSY;
1092
1093 if ((error = vndlock(vnd)) != 0)
1094 return error;
1095
1096 fflags = FREAD;
1097 if ((vio->vnd_flags & VNDIOF_READONLY) == 0)
1098 fflags |= FWRITE;
1099 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file);
1100 if ((error = vn_open(&nd, fflags, 0)) != 0)
1101 goto unlock_and_exit;
1102 KASSERT(l);
1103 error = VOP_GETATTR(nd.ni_vp, &vattr, l->l_cred);
1104 if (!error && nd.ni_vp->v_type != VREG)
1105 error = EOPNOTSUPP;
1106 if (!error && vattr.va_bytes < vattr.va_size)
1107 /* File is definitely sparse, reject here */
1108 error = EINVAL;
1109 if (error) {
1110 VOP_UNLOCK(nd.ni_vp);
1111 goto close_and_exit;
1112 }
1113
1114 /* If using a compressed file, initialize its info */
1115 /* (or abort with an error if kernel has no compression) */
1116 if (vio->vnd_flags & VNF_COMP) {
1117 #ifdef VND_COMPRESSION
1118 struct vnd_comp_header *ch;
1119 int i;
1120 u_int32_t comp_size;
1121 u_int32_t comp_maxsize;
1122
1123 /* allocate space for compresed file header */
1124 ch = malloc(sizeof(struct vnd_comp_header),
1125 M_TEMP, M_WAITOK);
1126
1127 /* read compressed file header */
1128 error = vn_rdwr(UIO_READ, nd.ni_vp, (void *)ch,
1129 sizeof(struct vnd_comp_header), 0, UIO_SYSSPACE,
1130 IO_UNIT|IO_NODELOCKED, l->l_cred, NULL, NULL);
1131 if (error) {
1132 free(ch, M_TEMP);
1133 VOP_UNLOCK(nd.ni_vp);
1134 goto close_and_exit;
1135 }
1136
1137 /* save some header info */
1138 vnd->sc_comp_blksz = ntohl(ch->block_size);
1139 /* note last offset is the file byte size */
1140 vnd->sc_comp_numoffs = ntohl(ch->num_blocks)+1;
1141 free(ch, M_TEMP);
1142 if (vnd->sc_comp_blksz == 0 ||
1143 vnd->sc_comp_blksz % DEV_BSIZE !=0) {
1144 VOP_UNLOCK(nd.ni_vp);
1145 error = EINVAL;
1146 goto close_and_exit;
1147 }
1148 if (sizeof(struct vnd_comp_header) +
1149 sizeof(u_int64_t) * vnd->sc_comp_numoffs >
1150 vattr.va_size) {
1151 VOP_UNLOCK(nd.ni_vp);
1152 error = EINVAL;
1153 goto close_and_exit;
1154 }
1155
1156 /* set decompressed file size */
1157 vattr.va_size =
1158 ((u_quad_t)vnd->sc_comp_numoffs - 1) *
1159 (u_quad_t)vnd->sc_comp_blksz;
1160
1161 /* allocate space for all the compressed offsets */
1162 vnd->sc_comp_offsets =
1163 malloc(sizeof(u_int64_t) * vnd->sc_comp_numoffs,
1164 M_DEVBUF, M_WAITOK);
1165
1166 /* read in the offsets */
1167 error = vn_rdwr(UIO_READ, nd.ni_vp,
1168 (void *)vnd->sc_comp_offsets,
1169 sizeof(u_int64_t) * vnd->sc_comp_numoffs,
1170 sizeof(struct vnd_comp_header), UIO_SYSSPACE,
1171 IO_UNIT|IO_NODELOCKED, l->l_cred, NULL, NULL);
1172 if (error) {
1173 VOP_UNLOCK(nd.ni_vp);
1174 goto close_and_exit;
1175 }
1176 /*
1177 * find largest block size (used for allocation limit).
1178 * Also convert offset to native byte order.
1179 */
1180 comp_maxsize = 0;
1181 for (i = 0; i < vnd->sc_comp_numoffs - 1; i++) {
1182 vnd->sc_comp_offsets[i] =
1183 be64toh(vnd->sc_comp_offsets[i]);
1184 comp_size = be64toh(vnd->sc_comp_offsets[i + 1])
1185 - vnd->sc_comp_offsets[i];
1186 if (comp_size > comp_maxsize)
1187 comp_maxsize = comp_size;
1188 }
1189 vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1] =
1190 be64toh(vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1]);
1191
1192 /* create compressed data buffer */
1193 vnd->sc_comp_buff = malloc(comp_maxsize,
1194 M_DEVBUF, M_WAITOK);
1195
1196 /* create decompressed buffer */
1197 vnd->sc_comp_decombuf = malloc(vnd->sc_comp_blksz,
1198 M_DEVBUF, M_WAITOK);
1199 vnd->sc_comp_buffblk = -1;
1200
1201 /* Initialize decompress stream */
1202 memset(&vnd->sc_comp_stream, 0, sizeof(z_stream));
1203 vnd->sc_comp_stream.zalloc = vnd_alloc;
1204 vnd->sc_comp_stream.zfree = vnd_free;
1205 error = inflateInit2(&vnd->sc_comp_stream, MAX_WBITS);
1206 if (error) {
1207 if (vnd->sc_comp_stream.msg)
1208 printf("vnd%d: compressed file, %s\n",
1209 unit, vnd->sc_comp_stream.msg);
1210 VOP_UNLOCK(nd.ni_vp);
1211 error = EINVAL;
1212 goto close_and_exit;
1213 }
1214
1215 vnd->sc_flags |= VNF_COMP | VNF_READONLY;
1216 #else /* !VND_COMPRESSION */
1217 VOP_UNLOCK(nd.ni_vp);
1218 error = EOPNOTSUPP;
1219 goto close_and_exit;
1220 #endif /* VND_COMPRESSION */
1221 }
1222
1223 VOP_UNLOCK(nd.ni_vp);
1224 vnd->sc_vp = nd.ni_vp;
1225 vnd->sc_size = btodb(vattr.va_size); /* note truncation */
1226
1227 /*
1228 * Use pseudo-geometry specified. If none was provided,
1229 * use "standard" Adaptec fictitious geometry.
1230 */
1231 if (vio->vnd_flags & VNDIOF_HASGEOM) {
1232
1233 memcpy(&vnd->sc_geom, &vio->vnd_geom,
1234 sizeof(vio->vnd_geom));
1235
1236 /*
1237 * Sanity-check the sector size.
1238 * XXX Don't allow secsize < DEV_BSIZE. Should
1239 * XXX we?
1240 */
1241 if (vnd->sc_geom.vng_secsize < DEV_BSIZE ||
1242 (vnd->sc_geom.vng_secsize % DEV_BSIZE) != 0 ||
1243 vnd->sc_geom.vng_ncylinders == 0 ||
1244 (vnd->sc_geom.vng_ntracks *
1245 vnd->sc_geom.vng_nsectors) == 0) {
1246 error = EINVAL;
1247 goto close_and_exit;
1248 }
1249
1250 /*
1251 * Compute the size (in DEV_BSIZE blocks) specified
1252 * by the geometry.
1253 */
1254 geomsize = (vnd->sc_geom.vng_nsectors *
1255 vnd->sc_geom.vng_ntracks *
1256 vnd->sc_geom.vng_ncylinders) *
1257 (vnd->sc_geom.vng_secsize / DEV_BSIZE);
1258
1259 /*
1260 * Sanity-check the size against the specified
1261 * geometry.
1262 */
1263 if (vnd->sc_size < geomsize) {
1264 error = EINVAL;
1265 goto close_and_exit;
1266 }
1267 } else if (vnd->sc_size >= (32 * 64)) {
1268 /*
1269 * Size must be at least 2048 DEV_BSIZE blocks
1270 * (1M) in order to use this geometry.
1271 */
1272 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1273 vnd->sc_geom.vng_nsectors = 32;
1274 vnd->sc_geom.vng_ntracks = 64;
1275 vnd->sc_geom.vng_ncylinders = vnd->sc_size / (64 * 32);
1276 } else {
1277 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1278 vnd->sc_geom.vng_nsectors = 1;
1279 vnd->sc_geom.vng_ntracks = 1;
1280 vnd->sc_geom.vng_ncylinders = vnd->sc_size;
1281 }
1282
1283 vnd_set_properties(vnd);
1284
1285 if (vio->vnd_flags & VNDIOF_READONLY) {
1286 vnd->sc_flags |= VNF_READONLY;
1287 }
1288
1289 if ((error = vndsetcred(vnd, l->l_cred)) != 0)
1290 goto close_and_exit;
1291
1292 vndthrottle(vnd, vnd->sc_vp);
1293 vio->vnd_osize = dbtob(vnd->sc_size);
1294 #ifdef VNDIOOCSET
1295 if (cmd != VNDIOOCSET)
1296 #endif
1297 vio->vnd_size = dbtob(vnd->sc_size);
1298 vnd->sc_flags |= VNF_INITED;
1299
1300 /* create the kernel thread, wait for it to be up */
1301 error = kthread_create(PRI_NONE, 0, NULL, vndthread, vnd,
1302 &vnd->sc_kthread, device_xname(vnd->sc_dev));
1303 if (error)
1304 goto close_and_exit;
1305 while ((vnd->sc_flags & VNF_KTHREAD) == 0) {
1306 tsleep(&vnd->sc_kthread, PRIBIO, "vndthr", 0);
1307 }
1308 #ifdef DEBUG
1309 if (vnddebug & VDB_INIT)
1310 printf("vndioctl: SET vp %p size 0x%lx %d/%d/%d/%d\n",
1311 vnd->sc_vp, (unsigned long) vnd->sc_size,
1312 vnd->sc_geom.vng_secsize,
1313 vnd->sc_geom.vng_nsectors,
1314 vnd->sc_geom.vng_ntracks,
1315 vnd->sc_geom.vng_ncylinders);
1316 #endif
1317
1318 /* Attach the disk. */
1319 disk_attach(&vnd->sc_dkdev);
1320 disk_blocksize(&vnd->sc_dkdev, vnd->sc_geom.vng_secsize);
1321
1322 /* Initialize the xfer and buffer pools. */
1323 pool_init(&vnd->sc_vxpool, sizeof(struct vndxfer), 0,
1324 0, 0, "vndxpl", NULL, IPL_BIO);
1325
1326 /* Try and read the disklabel. */
1327 vndgetdisklabel(dev, vnd);
1328
1329 vndunlock(vnd);
1330
1331 break;
1332
1333 close_and_exit:
1334 (void) vn_close(nd.ni_vp, fflags, l->l_cred);
1335 unlock_and_exit:
1336 #ifdef VND_COMPRESSION
1337 /* free any allocated memory (for compressed file) */
1338 if (vnd->sc_comp_offsets) {
1339 free(vnd->sc_comp_offsets, M_DEVBUF);
1340 vnd->sc_comp_offsets = NULL;
1341 }
1342 if (vnd->sc_comp_buff) {
1343 free(vnd->sc_comp_buff, M_DEVBUF);
1344 vnd->sc_comp_buff = NULL;
1345 }
1346 if (vnd->sc_comp_decombuf) {
1347 free(vnd->sc_comp_decombuf, M_DEVBUF);
1348 vnd->sc_comp_decombuf = NULL;
1349 }
1350 #endif /* VND_COMPRESSION */
1351 vndunlock(vnd);
1352 return error;
1353
1354 #ifdef VNDIOOCCLR
1355 case VNDIOOCCLR:
1356 #endif
1357 case VNDIOCCLR:
1358 part = DISKPART(dev);
1359 pmask = (1 << part);
1360 force = (vio->vnd_flags & VNDIOF_FORCE) != 0;
1361
1362 if ((error = vnddoclear(vnd, pmask, minor(dev), force)) != 0)
1363 return error;
1364
1365 break;
1366
1367 #ifdef COMPAT_30
1368 case VNDIOOCGET: {
1369 struct vnd_ouser *vnu;
1370 struct vattr va;
1371 vnu = (struct vnd_ouser *)data;
1372 KASSERT(l);
1373 switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) {
1374 case 0:
1375 vnu->vnu_dev = va.va_fsid;
1376 vnu->vnu_ino = va.va_fileid;
1377 break;
1378 case -1:
1379 /* unused is not an error */
1380 vnu->vnu_dev = 0;
1381 vnu->vnu_ino = 0;
1382 break;
1383 default:
1384 return error;
1385 }
1386 break;
1387 }
1388 #endif
1389 case VNDIOCGET: {
1390 struct vnd_user *vnu;
1391 struct vattr va;
1392 vnu = (struct vnd_user *)data;
1393 KASSERT(l);
1394 switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) {
1395 case 0:
1396 vnu->vnu_dev = va.va_fsid;
1397 vnu->vnu_ino = va.va_fileid;
1398 break;
1399 case -1:
1400 /* unused is not an error */
1401 vnu->vnu_dev = 0;
1402 vnu->vnu_ino = 0;
1403 break;
1404 default:
1405 return error;
1406 }
1407 break;
1408 }
1409
1410 case DIOCGDINFO:
1411 *(struct disklabel *)data = *(vnd->sc_dkdev.dk_label);
1412 break;
1413
1414 #ifdef __HAVE_OLD_DISKLABEL
1415 case ODIOCGDINFO:
1416 newlabel = *(vnd->sc_dkdev.dk_label);
1417 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1418 return ENOTTY;
1419 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1420 break;
1421 #endif
1422
1423 case DIOCGPART:
1424 ((struct partinfo *)data)->disklab = vnd->sc_dkdev.dk_label;
1425 ((struct partinfo *)data)->part =
1426 &vnd->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1427 break;
1428
1429 case DIOCWDINFO:
1430 case DIOCSDINFO:
1431 #ifdef __HAVE_OLD_DISKLABEL
1432 case ODIOCWDINFO:
1433 case ODIOCSDINFO:
1434 #endif
1435 {
1436 struct disklabel *lp;
1437
1438 if ((error = vndlock(vnd)) != 0)
1439 return error;
1440
1441 vnd->sc_flags |= VNF_LABELLING;
1442
1443 #ifdef __HAVE_OLD_DISKLABEL
1444 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1445 memset(&newlabel, 0, sizeof newlabel);
1446 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1447 lp = &newlabel;
1448 } else
1449 #endif
1450 lp = (struct disklabel *)data;
1451
1452 error = setdisklabel(vnd->sc_dkdev.dk_label,
1453 lp, 0, vnd->sc_dkdev.dk_cpulabel);
1454 if (error == 0) {
1455 if (cmd == DIOCWDINFO
1456 #ifdef __HAVE_OLD_DISKLABEL
1457 || cmd == ODIOCWDINFO
1458 #endif
1459 )
1460 error = writedisklabel(VNDLABELDEV(dev),
1461 vndstrategy, vnd->sc_dkdev.dk_label,
1462 vnd->sc_dkdev.dk_cpulabel);
1463 }
1464
1465 vnd->sc_flags &= ~VNF_LABELLING;
1466
1467 vndunlock(vnd);
1468
1469 if (error)
1470 return error;
1471 break;
1472 }
1473
1474 case DIOCKLABEL:
1475 if (*(int *)data != 0)
1476 vnd->sc_flags |= VNF_KLABEL;
1477 else
1478 vnd->sc_flags &= ~VNF_KLABEL;
1479 break;
1480
1481 case DIOCWLABEL:
1482 if (*(int *)data != 0)
1483 vnd->sc_flags |= VNF_WLABEL;
1484 else
1485 vnd->sc_flags &= ~VNF_WLABEL;
1486 break;
1487
1488 case DIOCGDEFLABEL:
1489 vndgetdefaultlabel(vnd, (struct disklabel *)data);
1490 break;
1491
1492 #ifdef __HAVE_OLD_DISKLABEL
1493 case ODIOCGDEFLABEL:
1494 vndgetdefaultlabel(vnd, &newlabel);
1495 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1496 return ENOTTY;
1497 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1498 break;
1499 #endif
1500
1501 case DIOCCACHESYNC:
1502 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1503 error = VOP_FSYNC(vnd->sc_vp, vnd->sc_cred,
1504 FSYNC_WAIT | FSYNC_DATAONLY | FSYNC_CACHE, 0, 0);
1505 VOP_UNLOCK(vnd->sc_vp);
1506 return error;
1507
1508 case DIOCAWEDGE:
1509 dkw = (void *) data;
1510
1511 if ((flag & FWRITE) == 0)
1512 return EBADF;
1513
1514 /* If the ioctl happens here, the parent is us. */
1515 strlcpy(dkw->dkw_parent, device_xname(vnd->sc_dev),
1516 sizeof(dkw->dkw_parent));
1517 return dkwedge_add(dkw);
1518
1519 case DIOCDWEDGE:
1520 dkw = (void *) data;
1521
1522 if ((flag & FWRITE) == 0)
1523 return EBADF;
1524
1525 /* If the ioctl happens here, the parent is us. */
1526 strlcpy(dkw->dkw_parent, device_xname(vnd->sc_dev),
1527 sizeof(dkw->dkw_parent));
1528 return dkwedge_del(dkw);
1529
1530 case DIOCLWEDGES:
1531 dkwl = (void *) data;
1532
1533 return dkwedge_list(&vnd->sc_dkdev, dkwl, l);
1534
1535 default:
1536 return ENOTTY;
1537 }
1538
1539 return 0;
1540 }
1541
1542 /*
1543 * Duplicate the current processes' credentials. Since we are called only
1544 * as the result of a SET ioctl and only root can do that, any future access
1545 * to this "disk" is essentially as root. Note that credentials may change
1546 * if some other uid can write directly to the mapped file (NFS).
1547 */
1548 static int
1549 vndsetcred(struct vnd_softc *vnd, kauth_cred_t cred)
1550 {
1551 struct uio auio;
1552 struct iovec aiov;
1553 char *tmpbuf;
1554 int error;
1555
1556 vnd->sc_cred = kauth_cred_dup(cred);
1557 tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
1558
1559 /* XXX: Horrible kludge to establish credentials for NFS */
1560 aiov.iov_base = tmpbuf;
1561 aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
1562 auio.uio_iov = &aiov;
1563 auio.uio_iovcnt = 1;
1564 auio.uio_offset = 0;
1565 auio.uio_rw = UIO_READ;
1566 auio.uio_resid = aiov.iov_len;
1567 UIO_SETUP_SYSSPACE(&auio);
1568 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1569 error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
1570 if (error == 0) {
1571 /*
1572 * Because vnd does all IO directly through the vnode
1573 * we need to flush (at least) the buffer from the above
1574 * VOP_READ from the buffer cache to prevent cache
1575 * incoherencies. Also, be careful to write dirty
1576 * buffers back to stable storage.
1577 */
1578 error = vinvalbuf(vnd->sc_vp, V_SAVE, vnd->sc_cred,
1579 curlwp, 0, 0);
1580 }
1581 VOP_UNLOCK(vnd->sc_vp);
1582
1583 free(tmpbuf, M_TEMP);
1584 return error;
1585 }
1586
1587 /*
1588 * Set maxactive based on FS type
1589 */
1590 static void
1591 vndthrottle(struct vnd_softc *vnd, struct vnode *vp)
1592 {
1593
1594 if (vp->v_tag == VT_NFS)
1595 vnd->sc_maxactive = 2;
1596 else
1597 vnd->sc_maxactive = 8;
1598
1599 if (vnd->sc_maxactive < 1)
1600 vnd->sc_maxactive = 1;
1601 }
1602
1603 #if 0
1604 static void
1605 vndshutdown(void)
1606 {
1607 struct vnd_softc *vnd;
1608
1609 for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
1610 if (vnd->sc_flags & VNF_INITED)
1611 vndclear(vnd);
1612 }
1613 #endif
1614
1615 static void
1616 vndclear(struct vnd_softc *vnd, int myminor)
1617 {
1618 struct vnode *vp = vnd->sc_vp;
1619 int fflags = FREAD;
1620 int bmaj, cmaj, i, mn;
1621 int s;
1622
1623 #ifdef DEBUG
1624 if (vnddebug & VDB_FOLLOW)
1625 printf("vndclear(%p): vp %p\n", vnd, vp);
1626 #endif
1627 /* locate the major number */
1628 bmaj = bdevsw_lookup_major(&vnd_bdevsw);
1629 cmaj = cdevsw_lookup_major(&vnd_cdevsw);
1630
1631 /* Nuke the vnodes for any open instances */
1632 for (i = 0; i < MAXPARTITIONS; i++) {
1633 mn = DISKMINOR(device_unit(vnd->sc_dev), i);
1634 vdevgone(bmaj, mn, mn, VBLK);
1635 if (mn != myminor) /* XXX avoid to kill own vnode */
1636 vdevgone(cmaj, mn, mn, VCHR);
1637 }
1638
1639 if ((vnd->sc_flags & VNF_READONLY) == 0)
1640 fflags |= FWRITE;
1641
1642 s = splbio();
1643 bufq_drain(vnd->sc_tab);
1644 splx(s);
1645
1646 vnd->sc_flags |= VNF_VUNCONF;
1647 wakeup(&vnd->sc_tab);
1648 while (vnd->sc_flags & VNF_KTHREAD)
1649 tsleep(&vnd->sc_kthread, PRIBIO, "vnthr", 0);
1650
1651 #ifdef VND_COMPRESSION
1652 /* free the compressed file buffers */
1653 if (vnd->sc_flags & VNF_COMP) {
1654 if (vnd->sc_comp_offsets) {
1655 free(vnd->sc_comp_offsets, M_DEVBUF);
1656 vnd->sc_comp_offsets = NULL;
1657 }
1658 if (vnd->sc_comp_buff) {
1659 free(vnd->sc_comp_buff, M_DEVBUF);
1660 vnd->sc_comp_buff = NULL;
1661 }
1662 if (vnd->sc_comp_decombuf) {
1663 free(vnd->sc_comp_decombuf, M_DEVBUF);
1664 vnd->sc_comp_decombuf = NULL;
1665 }
1666 }
1667 #endif /* VND_COMPRESSION */
1668 vnd->sc_flags &=
1669 ~(VNF_INITED | VNF_READONLY | VNF_VLABEL
1670 | VNF_VUNCONF | VNF_COMP | VNF_CLEARING);
1671 if (vp == NULL)
1672 panic("vndclear: null vp");
1673 (void) vn_close(vp, fflags, vnd->sc_cred);
1674 kauth_cred_free(vnd->sc_cred);
1675 vnd->sc_vp = NULL;
1676 vnd->sc_cred = NULL;
1677 vnd->sc_size = 0;
1678 }
1679
1680 static int
1681 vndsize(dev_t dev)
1682 {
1683 struct vnd_softc *sc;
1684 struct disklabel *lp;
1685 int part, unit, omask;
1686 int size;
1687
1688 unit = vndunit(dev);
1689 sc = device_lookup_private(&vnd_cd, unit);
1690 if (sc == NULL)
1691 return -1;
1692
1693 if ((sc->sc_flags & VNF_INITED) == 0)
1694 return -1;
1695
1696 part = DISKPART(dev);
1697 omask = sc->sc_dkdev.dk_openmask & (1 << part);
1698 lp = sc->sc_dkdev.dk_label;
1699
1700 if (omask == 0 && vndopen(dev, 0, S_IFBLK, curlwp)) /* XXX */
1701 return -1;
1702
1703 if (lp->d_partitions[part].p_fstype != FS_SWAP)
1704 size = -1;
1705 else
1706 size = lp->d_partitions[part].p_size *
1707 (lp->d_secsize / DEV_BSIZE);
1708
1709 if (omask == 0 && vndclose(dev, 0, S_IFBLK, curlwp)) /* XXX */
1710 return -1;
1711
1712 return size;
1713 }
1714
1715 static int
1716 vnddump(dev_t dev, daddr_t blkno, void *va,
1717 size_t size)
1718 {
1719
1720 /* Not implemented. */
1721 return ENXIO;
1722 }
1723
1724 static void
1725 vndgetdefaultlabel(struct vnd_softc *sc, struct disklabel *lp)
1726 {
1727 struct vndgeom *vng = &sc->sc_geom;
1728 struct partition *pp;
1729
1730 memset(lp, 0, sizeof(*lp));
1731
1732 lp->d_secperunit = sc->sc_size / (vng->vng_secsize / DEV_BSIZE);
1733 lp->d_secsize = vng->vng_secsize;
1734 lp->d_nsectors = vng->vng_nsectors;
1735 lp->d_ntracks = vng->vng_ntracks;
1736 lp->d_ncylinders = vng->vng_ncylinders;
1737 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1738
1739 strncpy(lp->d_typename, "vnd", sizeof(lp->d_typename));
1740 lp->d_type = DTYPE_VND;
1741 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1742 lp->d_rpm = 3600;
1743 lp->d_interleave = 1;
1744 lp->d_flags = 0;
1745
1746 pp = &lp->d_partitions[RAW_PART];
1747 pp->p_offset = 0;
1748 pp->p_size = lp->d_secperunit;
1749 pp->p_fstype = FS_UNUSED;
1750 lp->d_npartitions = RAW_PART + 1;
1751
1752 lp->d_magic = DISKMAGIC;
1753 lp->d_magic2 = DISKMAGIC;
1754 lp->d_checksum = dkcksum(lp);
1755 }
1756
1757 /*
1758 * Read the disklabel from a vnd. If one is not present, create a fake one.
1759 */
1760 static void
1761 vndgetdisklabel(dev_t dev, struct vnd_softc *sc)
1762 {
1763 const char *errstring;
1764 struct disklabel *lp = sc->sc_dkdev.dk_label;
1765 struct cpu_disklabel *clp = sc->sc_dkdev.dk_cpulabel;
1766 int i;
1767
1768 memset(clp, 0, sizeof(*clp));
1769
1770 vndgetdefaultlabel(sc, lp);
1771
1772 /*
1773 * Call the generic disklabel extraction routine.
1774 */
1775 errstring = readdisklabel(VNDLABELDEV(dev), vndstrategy, lp, clp);
1776 if (errstring) {
1777 /*
1778 * Lack of disklabel is common, but we print the warning
1779 * anyway, since it might contain other useful information.
1780 */
1781 aprint_normal_dev(sc->sc_dev, "%s\n", errstring);
1782
1783 /*
1784 * For historical reasons, if there's no disklabel
1785 * present, all partitions must be FS_BSDFFS and
1786 * occupy the entire disk.
1787 */
1788 for (i = 0; i < MAXPARTITIONS; i++) {
1789 /*
1790 * Don't wipe out port specific hack (such as
1791 * dos partition hack of i386 port).
1792 */
1793 if (lp->d_partitions[i].p_size != 0)
1794 continue;
1795
1796 lp->d_partitions[i].p_size = lp->d_secperunit;
1797 lp->d_partitions[i].p_offset = 0;
1798 lp->d_partitions[i].p_fstype = FS_BSDFFS;
1799 }
1800
1801 strncpy(lp->d_packname, "default label",
1802 sizeof(lp->d_packname));
1803
1804 lp->d_npartitions = MAXPARTITIONS;
1805 lp->d_checksum = dkcksum(lp);
1806 }
1807
1808 /* In-core label now valid. */
1809 sc->sc_flags |= VNF_VLABEL;
1810 }
1811
1812 /*
1813 * Wait interruptibly for an exclusive lock.
1814 *
1815 * XXX
1816 * Several drivers do this; it should be abstracted and made MP-safe.
1817 */
1818 static int
1819 vndlock(struct vnd_softc *sc)
1820 {
1821 int error;
1822
1823 while ((sc->sc_flags & VNF_LOCKED) != 0) {
1824 sc->sc_flags |= VNF_WANTED;
1825 if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
1826 return error;
1827 }
1828 sc->sc_flags |= VNF_LOCKED;
1829 return 0;
1830 }
1831
1832 /*
1833 * Unlock and wake up any waiters.
1834 */
1835 static void
1836 vndunlock(struct vnd_softc *sc)
1837 {
1838
1839 sc->sc_flags &= ~VNF_LOCKED;
1840 if ((sc->sc_flags & VNF_WANTED) != 0) {
1841 sc->sc_flags &= ~VNF_WANTED;
1842 wakeup(sc);
1843 }
1844 }
1845
1846 #ifdef VND_COMPRESSION
1847 /* compressed file read */
1848 static void
1849 compstrategy(struct buf *bp, off_t bn)
1850 {
1851 int error;
1852 int unit = vndunit(bp->b_dev);
1853 struct vnd_softc *vnd =
1854 device_lookup_private(&vnd_cd, unit);
1855 u_int32_t comp_block;
1856 struct uio auio;
1857 char *addr;
1858 int s;
1859
1860 /* set up constants for data move */
1861 auio.uio_rw = UIO_READ;
1862 UIO_SETUP_SYSSPACE(&auio);
1863
1864 /* read, and transfer the data */
1865 addr = bp->b_data;
1866 bp->b_resid = bp->b_bcount;
1867 s = splbio();
1868 while (bp->b_resid > 0) {
1869 unsigned length;
1870 size_t length_in_buffer;
1871 u_int32_t offset_in_buffer;
1872 struct iovec aiov;
1873
1874 /* calculate the compressed block number */
1875 comp_block = bn / (off_t)vnd->sc_comp_blksz;
1876
1877 /* check for good block number */
1878 if (comp_block >= vnd->sc_comp_numoffs) {
1879 bp->b_error = EINVAL;
1880 splx(s);
1881 return;
1882 }
1883
1884 /* read in the compressed block, if not in buffer */
1885 if (comp_block != vnd->sc_comp_buffblk) {
1886 length = vnd->sc_comp_offsets[comp_block + 1] -
1887 vnd->sc_comp_offsets[comp_block];
1888 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1889 error = vn_rdwr(UIO_READ, vnd->sc_vp, vnd->sc_comp_buff,
1890 length, vnd->sc_comp_offsets[comp_block],
1891 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vnd->sc_cred,
1892 NULL, NULL);
1893 if (error) {
1894 bp->b_error = error;
1895 VOP_UNLOCK(vnd->sc_vp);
1896 splx(s);
1897 return;
1898 }
1899 /* uncompress the buffer */
1900 vnd->sc_comp_stream.next_in = vnd->sc_comp_buff;
1901 vnd->sc_comp_stream.avail_in = length;
1902 vnd->sc_comp_stream.next_out = vnd->sc_comp_decombuf;
1903 vnd->sc_comp_stream.avail_out = vnd->sc_comp_blksz;
1904 inflateReset(&vnd->sc_comp_stream);
1905 error = inflate(&vnd->sc_comp_stream, Z_FINISH);
1906 if (error != Z_STREAM_END) {
1907 if (vnd->sc_comp_stream.msg)
1908 aprint_normal_dev(vnd->sc_dev,
1909 "compressed file, %s\n",
1910 vnd->sc_comp_stream.msg);
1911 bp->b_error = EBADMSG;
1912 VOP_UNLOCK(vnd->sc_vp);
1913 splx(s);
1914 return;
1915 }
1916 vnd->sc_comp_buffblk = comp_block;
1917 VOP_UNLOCK(vnd->sc_vp);
1918 }
1919
1920 /* transfer the usable uncompressed data */
1921 offset_in_buffer = bn % (off_t)vnd->sc_comp_blksz;
1922 length_in_buffer = vnd->sc_comp_blksz - offset_in_buffer;
1923 if (length_in_buffer > bp->b_resid)
1924 length_in_buffer = bp->b_resid;
1925 auio.uio_iov = &aiov;
1926 auio.uio_iovcnt = 1;
1927 aiov.iov_base = addr;
1928 aiov.iov_len = length_in_buffer;
1929 auio.uio_resid = aiov.iov_len;
1930 auio.uio_offset = 0;
1931 error = uiomove(vnd->sc_comp_decombuf + offset_in_buffer,
1932 length_in_buffer, &auio);
1933 if (error) {
1934 bp->b_error = error;
1935 splx(s);
1936 return;
1937 }
1938
1939 bn += length_in_buffer;
1940 addr += length_in_buffer;
1941 bp->b_resid -= length_in_buffer;
1942 }
1943 splx(s);
1944 }
1945
1946 /* compression memory allocation routines */
1947 static void *
1948 vnd_alloc(void *aux, u_int items, u_int siz)
1949 {
1950 return malloc(items * siz, M_TEMP, M_NOWAIT);
1951 }
1952
1953 static void
1954 vnd_free(void *aux, void *ptr)
1955 {
1956 free(ptr, M_TEMP);
1957 }
1958 #endif /* VND_COMPRESSION */
1959
1960 static void
1961 vnd_set_properties(struct vnd_softc *vnd)
1962 {
1963 prop_dictionary_t disk_info, odisk_info, geom;
1964
1965 disk_info = prop_dictionary_create();
1966
1967 geom = prop_dictionary_create();
1968
1969 prop_dictionary_set_uint64(geom, "sectors-per-unit",
1970 vnd->sc_geom.vng_nsectors * vnd->sc_geom.vng_ntracks *
1971 vnd->sc_geom.vng_ncylinders);
1972
1973 prop_dictionary_set_uint32(geom, "sector-size",
1974 vnd->sc_geom.vng_secsize);
1975
1976 prop_dictionary_set_uint16(geom, "sectors-per-track",
1977 vnd->sc_geom.vng_nsectors);
1978
1979 prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
1980 vnd->sc_geom.vng_ntracks);
1981
1982 prop_dictionary_set_uint64(geom, "cylinders-per-unit",
1983 vnd->sc_geom.vng_ncylinders);
1984
1985 prop_dictionary_set(disk_info, "geometry", geom);
1986 prop_object_release(geom);
1987
1988 prop_dictionary_set(device_properties(vnd->sc_dev),
1989 "disk-info", disk_info);
1990
1991 /*
1992 * Don't release disk_info here; we keep a reference to it.
1993 * disk_detach() will release it when we go away.
1994 */
1995
1996 odisk_info = vnd->sc_dkdev.dk_info;
1997 vnd->sc_dkdev.dk_info = disk_info;
1998 if (odisk_info)
1999 prop_object_release(odisk_info);
2000 }
2001
2002 #ifdef _MODULE
2003
2004 #include <sys/module.h>
2005
2006 MODULE(MODULE_CLASS_DRIVER, vnd, NULL);
2007 CFDRIVER_DECL(vnd, DV_DISK, NULL);
2008
2009 static int
2010 vnd_modcmd(modcmd_t cmd, void *arg)
2011 {
2012 int bmajor = -1, cmajor = -1, error = 0;
2013
2014 switch (cmd) {
2015 case MODULE_CMD_INIT:
2016 error = config_cfdriver_attach(&vnd_cd);
2017 if (error)
2018 break;
2019
2020 error = config_cfattach_attach(vnd_cd.cd_name, &vnd_ca);
2021 if (error) {
2022 config_cfdriver_detach(&vnd_cd);
2023 aprint_error("%s: unable to register cfattach\n",
2024 vnd_cd.cd_name);
2025 break;
2026 }
2027
2028 error = devsw_attach("vnd", &vnd_bdevsw, &bmajor,
2029 &vnd_cdevsw, &cmajor);
2030 if (error) {
2031 config_cfattach_detach(vnd_cd.cd_name, &vnd_ca);
2032 config_cfdriver_detach(&vnd_cd);
2033 break;
2034 }
2035
2036 break;
2037
2038 case MODULE_CMD_FINI:
2039 error = config_cfattach_detach(vnd_cd.cd_name, &vnd_ca);
2040 if (error)
2041 break;
2042 config_cfdriver_detach(&vnd_cd);
2043 devsw_detach(&vnd_bdevsw, &vnd_cdevsw);
2044 break;
2045
2046 case MODULE_CMD_STAT:
2047 return ENOTTY;
2048
2049 default:
2050 return ENOTTY;
2051 }
2052
2053 return error;
2054 }
2055
2056 #endif
2057