vnd.c revision 1.209 1 /* $NetBSD: vnd.c,v 1.209 2010/06/24 13:03:08 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1990, 1993
34 * The Regents of the University of California. All rights reserved.
35 *
36 * This code is derived from software contributed to Berkeley by
37 * the Systems Programming Group of the University of Utah Computer
38 * Science Department.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * from: Utah $Hdr: vn.c 1.13 94/04/02$
65 *
66 * @(#)vn.c 8.9 (Berkeley) 5/14/95
67 */
68
69 /*
70 * Copyright (c) 1988 University of Utah.
71 *
72 * This code is derived from software contributed to Berkeley by
73 * the Systems Programming Group of the University of Utah Computer
74 * Science Department.
75 *
76 * Redistribution and use in source and binary forms, with or without
77 * modification, are permitted provided that the following conditions
78 * are met:
79 * 1. Redistributions of source code must retain the above copyright
80 * notice, this list of conditions and the following disclaimer.
81 * 2. Redistributions in binary form must reproduce the above copyright
82 * notice, this list of conditions and the following disclaimer in the
83 * documentation and/or other materials provided with the distribution.
84 * 3. All advertising materials mentioning features or use of this software
85 * must display the following acknowledgement:
86 * This product includes software developed by the University of
87 * California, Berkeley and its contributors.
88 * 4. Neither the name of the University nor the names of its contributors
89 * may be used to endorse or promote products derived from this software
90 * without specific prior written permission.
91 *
92 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
93 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
94 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
95 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
96 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
97 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
98 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
99 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
100 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
101 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
102 * SUCH DAMAGE.
103 *
104 * from: Utah $Hdr: vn.c 1.13 94/04/02$
105 *
106 * @(#)vn.c 8.9 (Berkeley) 5/14/95
107 */
108
109 /*
110 * Vnode disk driver.
111 *
112 * Block/character interface to a vnode. Allows one to treat a file
113 * as a disk (e.g. build a filesystem in it, mount it, etc.).
114 *
115 * NOTE 1: If the vnode supports the VOP_BMAP and VOP_STRATEGY operations,
116 * this uses them to avoid distorting the local buffer cache. If those
117 * block-level operations are not available, this falls back to the regular
118 * read and write calls. Using these may distort the cache in some cases
119 * but better have the driver working than preventing it to work on file
120 * systems where the block-level operations are not implemented for
121 * whatever reason.
122 *
123 * NOTE 2: There is a security issue involved with this driver.
124 * Once mounted all access to the contents of the "mapped" file via
125 * the special file is controlled by the permissions on the special
126 * file, the protection of the mapped file is ignored (effectively,
127 * by using root credentials in all transactions).
128 *
129 * NOTE 3: Doesn't interact with leases, should it?
130 */
131
132 #include <sys/cdefs.h>
133 __KERNEL_RCSID(0, "$NetBSD: vnd.c,v 1.209 2010/06/24 13:03:08 hannken Exp $");
134
135 #if defined(_KERNEL_OPT)
136 #include "opt_vnd.h"
137 #endif
138
139 #include <sys/param.h>
140 #include <sys/systm.h>
141 #include <sys/namei.h>
142 #include <sys/proc.h>
143 #include <sys/kthread.h>
144 #include <sys/errno.h>
145 #include <sys/buf.h>
146 #include <sys/bufq.h>
147 #include <sys/malloc.h>
148 #include <sys/ioctl.h>
149 #include <sys/disklabel.h>
150 #include <sys/device.h>
151 #include <sys/disk.h>
152 #include <sys/stat.h>
153 #include <sys/mount.h>
154 #include <sys/vnode.h>
155 #include <sys/file.h>
156 #include <sys/uio.h>
157 #include <sys/conf.h>
158 #include <sys/kauth.h>
159
160 #include <net/zlib.h>
161
162 #include <miscfs/genfs/genfs.h>
163 #include <miscfs/specfs/specdev.h>
164
165 #include <dev/dkvar.h>
166 #include <dev/vndvar.h>
167
168 #include <prop/proplib.h>
169
170 #if defined(VNDDEBUG) && !defined(DEBUG)
171 #define DEBUG
172 #endif
173
174 #ifdef DEBUG
175 int dovndcluster = 1;
176 #define VDB_FOLLOW 0x01
177 #define VDB_INIT 0x02
178 #define VDB_IO 0x04
179 #define VDB_LABEL 0x08
180 int vnddebug = 0x00;
181 #endif
182
183 #define vndunit(x) DISKUNIT(x)
184
185 struct vndxfer {
186 struct buf vx_buf;
187 struct vnd_softc *vx_vnd;
188 };
189 #define VND_BUFTOXFER(bp) ((struct vndxfer *)(void *)bp)
190
191 #define VND_GETXFER(vnd) pool_get(&(vnd)->sc_vxpool, PR_WAITOK)
192 #define VND_PUTXFER(vnd, vx) pool_put(&(vnd)->sc_vxpool, (vx))
193
194 #define VNDLABELDEV(dev) \
195 (MAKEDISKDEV(major((dev)), vndunit((dev)), RAW_PART))
196
197 /* called by main() at boot time */
198 void vndattach(int);
199
200 static void vndclear(struct vnd_softc *, int);
201 static int vnddoclear(struct vnd_softc *, int, int, bool);
202 static int vndsetcred(struct vnd_softc *, kauth_cred_t);
203 static void vndthrottle(struct vnd_softc *, struct vnode *);
204 static void vndiodone(struct buf *);
205 #if 0
206 static void vndshutdown(void);
207 #endif
208
209 static void vndgetdefaultlabel(struct vnd_softc *, struct disklabel *);
210 static void vndgetdisklabel(dev_t, struct vnd_softc *);
211
212 static int vndlock(struct vnd_softc *);
213 static void vndunlock(struct vnd_softc *);
214 #ifdef VND_COMPRESSION
215 static void compstrategy(struct buf *, off_t);
216 static void *vnd_alloc(void *, u_int, u_int);
217 static void vnd_free(void *, void *);
218 #endif /* VND_COMPRESSION */
219
220 static void vndthread(void *);
221 static bool vnode_has_op(const struct vnode *, int);
222 static void handle_with_rdwr(struct vnd_softc *, const struct buf *,
223 struct buf *);
224 static void handle_with_strategy(struct vnd_softc *, const struct buf *,
225 struct buf *);
226 static void vnd_set_properties(struct vnd_softc *);
227
228 static dev_type_open(vndopen);
229 static dev_type_close(vndclose);
230 static dev_type_read(vndread);
231 static dev_type_write(vndwrite);
232 static dev_type_ioctl(vndioctl);
233 static dev_type_strategy(vndstrategy);
234 static dev_type_dump(vnddump);
235 static dev_type_size(vndsize);
236
237 const struct bdevsw vnd_bdevsw = {
238 vndopen, vndclose, vndstrategy, vndioctl, vnddump, vndsize, D_DISK
239 };
240
241 const struct cdevsw vnd_cdevsw = {
242 vndopen, vndclose, vndread, vndwrite, vndioctl,
243 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
244 };
245
246 static int vnd_match(device_t, cfdata_t, void *);
247 static void vnd_attach(device_t, device_t, void *);
248 static int vnd_detach(device_t, int);
249
250 CFATTACH_DECL3_NEW(vnd, sizeof(struct vnd_softc),
251 vnd_match, vnd_attach, vnd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
252 extern struct cfdriver vnd_cd;
253
254 static struct vnd_softc *vnd_spawn(int);
255 int vnd_destroy(device_t);
256
257 void
258 vndattach(int num)
259 {
260 int error;
261
262 error = config_cfattach_attach(vnd_cd.cd_name, &vnd_ca);
263 if (error)
264 aprint_error("%s: unable to register cfattach\n",
265 vnd_cd.cd_name);
266 }
267
268 static int
269 vnd_match(device_t self, cfdata_t cfdata, void *aux)
270 {
271
272 return 1;
273 }
274
275 static void
276 vnd_attach(device_t parent, device_t self, void *aux)
277 {
278 struct vnd_softc *sc = device_private(self);
279
280 sc->sc_dev = self;
281 sc->sc_comp_offsets = NULL;
282 sc->sc_comp_buff = NULL;
283 sc->sc_comp_decombuf = NULL;
284 bufq_alloc(&sc->sc_tab, "disksort", BUFQ_SORT_RAWBLOCK);
285 disk_init(&sc->sc_dkdev, device_xname(self), NULL);
286 if (!pmf_device_register(self, NULL, NULL))
287 aprint_error_dev(self, "couldn't establish power handler\n");
288 }
289
290 static int
291 vnd_detach(device_t self, int flags)
292 {
293 int error;
294 struct vnd_softc *sc = device_private(self);
295
296 if (sc->sc_flags & VNF_INITED) {
297 error = vnddoclear(sc, 0, -1, (flags & DETACH_FORCE) != 0);
298 if (error != 0)
299 return error;
300 }
301
302 pmf_device_deregister(self);
303 bufq_free(sc->sc_tab);
304 disk_destroy(&sc->sc_dkdev);
305
306 return 0;
307 }
308
309 static struct vnd_softc *
310 vnd_spawn(int unit)
311 {
312 cfdata_t cf;
313
314 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
315 cf->cf_name = vnd_cd.cd_name;
316 cf->cf_atname = vnd_cd.cd_name;
317 cf->cf_unit = unit;
318 cf->cf_fstate = FSTATE_STAR;
319
320 return device_private(config_attach_pseudo(cf));
321 }
322
323 int
324 vnd_destroy(device_t dev)
325 {
326 int error;
327 cfdata_t cf;
328
329 cf = device_cfdata(dev);
330 error = config_detach(dev, DETACH_QUIET);
331 if (error)
332 return error;
333 free(cf, M_DEVBUF);
334 return 0;
335 }
336
337 static int
338 vndopen(dev_t dev, int flags, int mode, struct lwp *l)
339 {
340 int unit = vndunit(dev);
341 struct vnd_softc *sc;
342 int error = 0, part, pmask;
343 struct disklabel *lp;
344
345 #ifdef DEBUG
346 if (vnddebug & VDB_FOLLOW)
347 printf("vndopen(0x%"PRIx64", 0x%x, 0x%x, %p)\n", dev, flags, mode, l);
348 #endif
349 sc = device_lookup_private(&vnd_cd, unit);
350 if (sc == NULL) {
351 sc = vnd_spawn(unit);
352 if (sc == NULL)
353 return ENOMEM;
354 }
355
356 if ((error = vndlock(sc)) != 0)
357 return error;
358
359 if ((sc->sc_flags & VNF_CLEARING) != 0) {
360 error = ENXIO;
361 goto done;
362 }
363
364 lp = sc->sc_dkdev.dk_label;
365
366 part = DISKPART(dev);
367 pmask = (1 << part);
368
369 /*
370 * If we're initialized, check to see if there are any other
371 * open partitions. If not, then it's safe to update the
372 * in-core disklabel. Only read the disklabel if it is
373 * not already valid.
374 */
375 if ((sc->sc_flags & (VNF_INITED|VNF_VLABEL)) == VNF_INITED &&
376 sc->sc_dkdev.dk_openmask == 0)
377 vndgetdisklabel(dev, sc);
378
379 /* Check that the partitions exists. */
380 if (part != RAW_PART) {
381 if (((sc->sc_flags & VNF_INITED) == 0) ||
382 ((part >= lp->d_npartitions) ||
383 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
384 error = ENXIO;
385 goto done;
386 }
387 }
388
389 /* Prevent our unit from being unconfigured while open. */
390 switch (mode) {
391 case S_IFCHR:
392 sc->sc_dkdev.dk_copenmask |= pmask;
393 break;
394
395 case S_IFBLK:
396 sc->sc_dkdev.dk_bopenmask |= pmask;
397 break;
398 }
399 sc->sc_dkdev.dk_openmask =
400 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
401
402 done:
403 vndunlock(sc);
404 return error;
405 }
406
407 static int
408 vndclose(dev_t dev, int flags, int mode, struct lwp *l)
409 {
410 int unit = vndunit(dev);
411 struct vnd_softc *sc;
412 int error = 0, part;
413
414 #ifdef DEBUG
415 if (vnddebug & VDB_FOLLOW)
416 printf("vndclose(0x%"PRIx64", 0x%x, 0x%x, %p)\n", dev, flags, mode, l);
417 #endif
418 sc = device_lookup_private(&vnd_cd, unit);
419 if (sc == NULL)
420 return ENXIO;
421
422 if ((error = vndlock(sc)) != 0)
423 return error;
424
425 part = DISKPART(dev);
426
427 /* ...that much closer to allowing unconfiguration... */
428 switch (mode) {
429 case S_IFCHR:
430 sc->sc_dkdev.dk_copenmask &= ~(1 << part);
431 break;
432
433 case S_IFBLK:
434 sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
435 break;
436 }
437 sc->sc_dkdev.dk_openmask =
438 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
439
440 vndunlock(sc);
441
442 if ((sc->sc_flags & VNF_INITED) == 0) {
443 if ((error = vnd_destroy(sc->sc_dev)) != 0) {
444 aprint_error_dev(sc->sc_dev,
445 "unable to detach instance\n");
446 return error;
447 }
448 }
449
450 return 0;
451 }
452
453 /*
454 * Queue the request, and wakeup the kernel thread to handle it.
455 */
456 static void
457 vndstrategy(struct buf *bp)
458 {
459 int unit = vndunit(bp->b_dev);
460 struct vnd_softc *vnd =
461 device_lookup_private(&vnd_cd, unit);
462 struct disklabel *lp;
463 daddr_t blkno;
464 int s = splbio();
465
466 if (vnd == NULL) {
467 bp->b_error = ENXIO;
468 goto done;
469 }
470 lp = vnd->sc_dkdev.dk_label;
471
472 if ((vnd->sc_flags & VNF_INITED) == 0) {
473 bp->b_error = ENXIO;
474 goto done;
475 }
476
477 /*
478 * The transfer must be a whole number of blocks.
479 */
480 if ((bp->b_bcount % lp->d_secsize) != 0) {
481 bp->b_error = EINVAL;
482 goto done;
483 }
484
485 /*
486 * check if we're read-only.
487 */
488 if ((vnd->sc_flags & VNF_READONLY) && !(bp->b_flags & B_READ)) {
489 bp->b_error = EACCES;
490 goto done;
491 }
492
493 /* If it's a nil transfer, wake up the top half now. */
494 if (bp->b_bcount == 0) {
495 goto done;
496 }
497
498 /*
499 * Do bounds checking and adjust transfer. If there's an error,
500 * the bounds check will flag that for us.
501 */
502 if (DISKPART(bp->b_dev) == RAW_PART) {
503 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
504 vnd->sc_size) <= 0)
505 goto done;
506 } else {
507 if (bounds_check_with_label(&vnd->sc_dkdev,
508 bp, vnd->sc_flags & (VNF_WLABEL|VNF_LABELLING)) <= 0)
509 goto done;
510 }
511
512 /*
513 * Put the block number in terms of the logical blocksize
514 * of the "device".
515 */
516
517 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
518
519 /*
520 * Translate the partition-relative block number to an absolute.
521 */
522 if (DISKPART(bp->b_dev) != RAW_PART) {
523 struct partition *pp;
524
525 pp = &vnd->sc_dkdev.dk_label->d_partitions[
526 DISKPART(bp->b_dev)];
527 blkno += pp->p_offset;
528 }
529 bp->b_rawblkno = blkno;
530
531 #ifdef DEBUG
532 if (vnddebug & VDB_FOLLOW)
533 printf("vndstrategy(%p): unit %d\n", bp, unit);
534 #endif
535 bufq_put(vnd->sc_tab, bp);
536 wakeup(&vnd->sc_tab);
537 splx(s);
538 return;
539
540 done:
541 bp->b_resid = bp->b_bcount;
542 biodone(bp);
543 splx(s);
544 }
545
546 static bool
547 vnode_has_strategy(struct vnd_softc *vnd)
548 {
549 return vnode_has_op(vnd->sc_vp, VOFFSET(vop_bmap)) &&
550 vnode_has_op(vnd->sc_vp, VOFFSET(vop_strategy));
551 }
552
553 /* XXX this function needs a reliable check to detect
554 * sparse files. Otherwise, bmap/strategy may be used
555 * and fail on non-allocated blocks. VOP_READ/VOP_WRITE
556 * works on sparse files.
557 */
558 #if notyet
559 static bool
560 vnode_strategy_probe(struct vnd_softc *vnd)
561 {
562 int error;
563 daddr_t nbn;
564
565 if (!vnode_has_strategy(vnd))
566 return false;
567
568 /* Convert the first logical block number to its
569 * physical block number.
570 */
571 error = 0;
572 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
573 error = VOP_BMAP(vnd->sc_vp, 0, NULL, &nbn, NULL);
574 VOP_UNLOCK(vnd->sc_vp);
575
576 /* Test if that worked. */
577 if (error == 0 && (long)nbn == -1)
578 return false;
579
580 return true;
581 }
582 #endif
583
584 static void
585 vndthread(void *arg)
586 {
587 struct vnd_softc *vnd = arg;
588 bool usestrategy;
589 int s;
590
591 /* Determine whether we can *use* VOP_BMAP and VOP_STRATEGY to
592 * directly access the backing vnode. If we can, use these two
593 * operations to avoid messing with the local buffer cache.
594 * Otherwise fall back to regular VOP_READ/VOP_WRITE operations
595 * which are guaranteed to work with any file system. */
596 usestrategy = vnode_has_strategy(vnd);
597
598 #ifdef DEBUG
599 if (vnddebug & VDB_INIT)
600 printf("vndthread: vp %p, %s\n", vnd->sc_vp,
601 usestrategy ?
602 "using bmap/strategy operations" :
603 "using read/write operations");
604 #endif
605
606 s = splbio();
607 vnd->sc_flags |= VNF_KTHREAD;
608 wakeup(&vnd->sc_kthread);
609
610 /*
611 * Dequeue requests and serve them depending on the available
612 * vnode operations.
613 */
614 while ((vnd->sc_flags & VNF_VUNCONF) == 0) {
615 struct vndxfer *vnx;
616 int flags;
617 struct buf *obp;
618 struct buf *bp;
619
620 obp = bufq_get(vnd->sc_tab);
621 if (obp == NULL) {
622 tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0);
623 continue;
624 };
625 splx(s);
626 flags = obp->b_flags;
627 #ifdef DEBUG
628 if (vnddebug & VDB_FOLLOW)
629 printf("vndthread(%p)\n", obp);
630 #endif
631
632 if (vnd->sc_vp->v_mount == NULL) {
633 obp->b_error = ENXIO;
634 goto done;
635 }
636 #ifdef VND_COMPRESSION
637 /* handle a compressed read */
638 if ((flags & B_READ) != 0 && (vnd->sc_flags & VNF_COMP)) {
639 off_t bn;
640
641 /* Convert to a byte offset within the file. */
642 bn = obp->b_rawblkno *
643 vnd->sc_dkdev.dk_label->d_secsize;
644
645 compstrategy(obp, bn);
646 goto done;
647 }
648 #endif /* VND_COMPRESSION */
649
650 /*
651 * Allocate a header for this transfer and link it to the
652 * buffer
653 */
654 s = splbio();
655 vnx = VND_GETXFER(vnd);
656 splx(s);
657 vnx->vx_vnd = vnd;
658
659 s = splbio();
660 while (vnd->sc_active >= vnd->sc_maxactive) {
661 tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0);
662 }
663 vnd->sc_active++;
664 splx(s);
665
666 /* Instrumentation. */
667 disk_busy(&vnd->sc_dkdev);
668
669 bp = &vnx->vx_buf;
670 buf_init(bp);
671 bp->b_flags = (obp->b_flags & B_READ);
672 bp->b_oflags = obp->b_oflags;
673 bp->b_cflags = obp->b_cflags;
674 bp->b_iodone = vndiodone;
675 bp->b_private = obp;
676 bp->b_vp = vnd->sc_vp;
677 bp->b_objlock = &bp->b_vp->v_interlock;
678 bp->b_data = obp->b_data;
679 bp->b_bcount = obp->b_bcount;
680 BIO_COPYPRIO(bp, obp);
681
682 /* Handle the request using the appropriate operations. */
683 if (usestrategy)
684 handle_with_strategy(vnd, obp, bp);
685 else
686 handle_with_rdwr(vnd, obp, bp);
687
688 s = splbio();
689 continue;
690
691 done:
692 biodone(obp);
693 s = splbio();
694 }
695
696 vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF);
697 wakeup(&vnd->sc_kthread);
698 splx(s);
699 kthread_exit(0);
700 }
701
702 /*
703 * Checks if the given vnode supports the requested operation.
704 * The operation is specified the offset returned by VOFFSET.
705 *
706 * XXX The test below used to determine this is quite fragile
707 * because it relies on the file system to use genfs to specify
708 * unimplemented operations. There might be another way to do
709 * it more cleanly.
710 */
711 static bool
712 vnode_has_op(const struct vnode *vp, int opoffset)
713 {
714 int (*defaultp)(void *);
715 int (*opp)(void *);
716
717 defaultp = vp->v_op[VOFFSET(vop_default)];
718 opp = vp->v_op[opoffset];
719
720 return opp != defaultp && opp != genfs_eopnotsupp &&
721 opp != genfs_badop && opp != genfs_nullop;
722 }
723
724 /*
725 * Handes the read/write request given in 'bp' using the vnode's VOP_READ
726 * and VOP_WRITE operations.
727 *
728 * 'obp' is a pointer to the original request fed to the vnd device.
729 */
730 static void
731 handle_with_rdwr(struct vnd_softc *vnd, const struct buf *obp, struct buf *bp)
732 {
733 bool doread;
734 off_t offset;
735 size_t resid;
736 struct vnode *vp;
737
738 doread = bp->b_flags & B_READ;
739 offset = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
740 vp = vnd->sc_vp;
741
742 #if defined(DEBUG)
743 if (vnddebug & VDB_IO)
744 printf("vnd (rdwr): vp %p, %s, rawblkno 0x%" PRIx64
745 ", secsize %d, offset %" PRIu64
746 ", bcount %d\n",
747 vp, doread ? "read" : "write", obp->b_rawblkno,
748 vnd->sc_dkdev.dk_label->d_secsize, offset,
749 bp->b_bcount);
750 #endif
751
752 /* Issue the read or write operation. */
753 bp->b_error =
754 vn_rdwr(doread ? UIO_READ : UIO_WRITE,
755 vp, bp->b_data, bp->b_bcount, offset,
756 UIO_SYSSPACE, 0, vnd->sc_cred, &resid, NULL);
757 bp->b_resid = resid;
758
759 /* We need to increase the number of outputs on the vnode if
760 * there was any write to it. */
761 if (!doread) {
762 mutex_enter(&vp->v_interlock);
763 vp->v_numoutput++;
764 mutex_exit(&vp->v_interlock);
765 }
766
767 biodone(bp);
768 }
769
770 /*
771 * Handes the read/write request given in 'bp' using the vnode's VOP_BMAP
772 * and VOP_STRATEGY operations.
773 *
774 * 'obp' is a pointer to the original request fed to the vnd device.
775 */
776 static void
777 handle_with_strategy(struct vnd_softc *vnd, const struct buf *obp,
778 struct buf *bp)
779 {
780 int bsize, error, flags, skipped;
781 size_t resid, sz;
782 off_t bn, offset;
783 struct vnode *vp;
784
785 flags = obp->b_flags;
786
787 if (!(flags & B_READ)) {
788 vp = bp->b_vp;
789 mutex_enter(&vp->v_interlock);
790 vp->v_numoutput++;
791 mutex_exit(&vp->v_interlock);
792 }
793
794 /* convert to a byte offset within the file. */
795 bn = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
796
797 bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
798 skipped = 0;
799
800 /*
801 * Break the request into bsize pieces and feed them
802 * sequentially using VOP_BMAP/VOP_STRATEGY.
803 * We do it this way to keep from flooding NFS servers if we
804 * are connected to an NFS file. This places the burden on
805 * the client rather than the server.
806 */
807 error = 0;
808 bp->b_resid = bp->b_bcount;
809 for (offset = 0, resid = bp->b_resid; resid;
810 resid -= sz, offset += sz) {
811 struct buf *nbp;
812 daddr_t nbn;
813 int off, nra;
814
815 nra = 0;
816 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
817 error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
818 VOP_UNLOCK(vnd->sc_vp);
819
820 if (error == 0 && (long)nbn == -1)
821 error = EIO;
822
823 /*
824 * If there was an error or a hole in the file...punt.
825 * Note that we may have to wait for any operations
826 * that we have already fired off before releasing
827 * the buffer.
828 *
829 * XXX we could deal with holes here but it would be
830 * a hassle (in the write case).
831 */
832 if (error) {
833 skipped += resid;
834 break;
835 }
836
837 #ifdef DEBUG
838 if (!dovndcluster)
839 nra = 0;
840 #endif
841
842 off = bn % bsize;
843 sz = MIN(((off_t)1 + nra) * bsize - off, resid);
844 #ifdef DEBUG
845 if (vnddebug & VDB_IO)
846 printf("vndstrategy: vp %p/%p bn 0x%qx/0x%" PRIx64
847 " sz 0x%zx\n", vnd->sc_vp, vp, (long long)bn,
848 nbn, sz);
849 #endif
850
851 nbp = getiobuf(vp, true);
852 nestiobuf_setup(bp, nbp, offset, sz);
853 nbp->b_blkno = nbn + btodb(off);
854
855 #if 0 /* XXX #ifdef DEBUG */
856 if (vnddebug & VDB_IO)
857 printf("vndstart(%ld): bp %p vp %p blkno "
858 "0x%" PRIx64 " flags %x addr %p cnt 0x%x\n",
859 (long) (vnd-vnd_softc), &nbp->vb_buf,
860 nbp->vb_buf.b_vp, nbp->vb_buf.b_blkno,
861 nbp->vb_buf.b_flags, nbp->vb_buf.b_data,
862 nbp->vb_buf.b_bcount);
863 #endif
864 VOP_STRATEGY(vp, nbp);
865 bn += sz;
866 }
867 nestiobuf_done(bp, skipped, error);
868 }
869
870 static void
871 vndiodone(struct buf *bp)
872 {
873 struct vndxfer *vnx = VND_BUFTOXFER(bp);
874 struct vnd_softc *vnd = vnx->vx_vnd;
875 struct buf *obp = bp->b_private;
876 int s = splbio();
877
878 KASSERT(&vnx->vx_buf == bp);
879 KASSERT(vnd->sc_active > 0);
880 #ifdef DEBUG
881 if (vnddebug & VDB_IO) {
882 printf("vndiodone1: bp %p iodone: error %d\n",
883 bp, bp->b_error);
884 }
885 #endif
886 disk_unbusy(&vnd->sc_dkdev, bp->b_bcount - bp->b_resid,
887 (bp->b_flags & B_READ));
888 vnd->sc_active--;
889 if (vnd->sc_active == 0) {
890 wakeup(&vnd->sc_tab);
891 }
892 splx(s);
893 obp->b_error = bp->b_error;
894 obp->b_resid = bp->b_resid;
895 buf_destroy(bp);
896 VND_PUTXFER(vnd, vnx);
897 biodone(obp);
898 }
899
900 /* ARGSUSED */
901 static int
902 vndread(dev_t dev, struct uio *uio, int flags)
903 {
904 int unit = vndunit(dev);
905 struct vnd_softc *sc;
906
907 #ifdef DEBUG
908 if (vnddebug & VDB_FOLLOW)
909 printf("vndread(0x%"PRIx64", %p)\n", dev, uio);
910 #endif
911
912 sc = device_lookup_private(&vnd_cd, unit);
913 if (sc == NULL)
914 return ENXIO;
915
916 if ((sc->sc_flags & VNF_INITED) == 0)
917 return ENXIO;
918
919 return physio(vndstrategy, NULL, dev, B_READ, minphys, uio);
920 }
921
922 /* ARGSUSED */
923 static int
924 vndwrite(dev_t dev, struct uio *uio, int flags)
925 {
926 int unit = vndunit(dev);
927 struct vnd_softc *sc;
928
929 #ifdef DEBUG
930 if (vnddebug & VDB_FOLLOW)
931 printf("vndwrite(0x%"PRIx64", %p)\n", dev, uio);
932 #endif
933
934 sc = device_lookup_private(&vnd_cd, unit);
935 if (sc == NULL)
936 return ENXIO;
937
938 if ((sc->sc_flags & VNF_INITED) == 0)
939 return ENXIO;
940
941 return physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio);
942 }
943
944 static int
945 vnd_cget(struct lwp *l, int unit, int *un, struct vattr *va)
946 {
947 struct vnd_softc *vnd;
948
949 if (*un == -1)
950 *un = unit;
951 if (*un < 0)
952 return EINVAL;
953
954 vnd = device_lookup_private(&vnd_cd, *un);
955 if (vnd == NULL)
956 return (*un >= vnd_cd.cd_ndevs) ? ENXIO : -1;
957
958 if ((vnd->sc_flags & VNF_INITED) == 0)
959 return -1;
960
961 return VOP_GETATTR(vnd->sc_vp, va, l->l_cred);
962 }
963
964 static int
965 vnddoclear(struct vnd_softc *vnd, int pmask, int minor, bool force)
966 {
967 int error;
968
969 if ((error = vndlock(vnd)) != 0)
970 return error;
971
972 /*
973 * Don't unconfigure if any other partitions are open
974 * or if both the character and block flavors of this
975 * partition are open.
976 */
977 if (DK_BUSY(vnd, pmask) && !force) {
978 vndunlock(vnd);
979 return EBUSY;
980 }
981
982 /*
983 * XXX vndclear() might call vndclose() implicitly;
984 * release lock to avoid recursion
985 *
986 * Set VNF_CLEARING to prevent vndopen() from
987 * sneaking in after we vndunlock().
988 */
989 vnd->sc_flags |= VNF_CLEARING;
990 vndunlock(vnd);
991 vndclear(vnd, minor);
992 #ifdef DEBUG
993 if (vnddebug & VDB_INIT)
994 printf("vndioctl: CLRed\n");
995 #endif
996
997 /* Destroy the xfer and buffer pools. */
998 pool_destroy(&vnd->sc_vxpool);
999
1000 /* Detach the disk. */
1001 disk_detach(&vnd->sc_dkdev);
1002
1003 return 0;
1004 }
1005
1006 /* ARGSUSED */
1007 static int
1008 vndioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1009 {
1010 bool force;
1011 int unit = vndunit(dev);
1012 struct vnd_softc *vnd;
1013 struct vnd_ioctl *vio;
1014 struct vattr vattr;
1015 struct nameidata nd;
1016 int error, part, pmask;
1017 size_t geomsize;
1018 int fflags;
1019 #ifdef __HAVE_OLD_DISKLABEL
1020 struct disklabel newlabel;
1021 #endif
1022
1023 #ifdef DEBUG
1024 if (vnddebug & VDB_FOLLOW)
1025 printf("vndioctl(0x%"PRIx64", 0x%lx, %p, 0x%x, %p): unit %d\n",
1026 dev, cmd, data, flag, l->l_proc, unit);
1027 #endif
1028 vnd = device_lookup_private(&vnd_cd, unit);
1029 if (vnd == NULL &&
1030 #ifdef COMPAT_30
1031 cmd != VNDIOOCGET &&
1032 #endif
1033 cmd != VNDIOCGET)
1034 return ENXIO;
1035 vio = (struct vnd_ioctl *)data;
1036
1037 /* Must be open for writes for these commands... */
1038 switch (cmd) {
1039 case VNDIOCSET:
1040 case VNDIOCCLR:
1041 #ifdef VNDIOOCSET
1042 case VNDIOOCSET:
1043 case VNDIOOCCLR:
1044 #endif
1045 case DIOCSDINFO:
1046 case DIOCWDINFO:
1047 #ifdef __HAVE_OLD_DISKLABEL
1048 case ODIOCSDINFO:
1049 case ODIOCWDINFO:
1050 #endif
1051 case DIOCKLABEL:
1052 case DIOCWLABEL:
1053 if ((flag & FWRITE) == 0)
1054 return EBADF;
1055 }
1056
1057 /* Must be initialized for these... */
1058 switch (cmd) {
1059 case VNDIOCCLR:
1060 #ifdef VNDIOOCCLR
1061 case VNDIOOCCLR:
1062 #endif
1063 case DIOCGDINFO:
1064 case DIOCSDINFO:
1065 case DIOCWDINFO:
1066 case DIOCGPART:
1067 case DIOCKLABEL:
1068 case DIOCWLABEL:
1069 case DIOCGDEFLABEL:
1070 case DIOCCACHESYNC:
1071 #ifdef __HAVE_OLD_DISKLABEL
1072 case ODIOCGDINFO:
1073 case ODIOCSDINFO:
1074 case ODIOCWDINFO:
1075 case ODIOCGDEFLABEL:
1076 #endif
1077 if ((vnd->sc_flags & VNF_INITED) == 0)
1078 return ENXIO;
1079 }
1080
1081 switch (cmd) {
1082 #ifdef VNDIOOCSET
1083 case VNDIOOCSET:
1084 #endif
1085 case VNDIOCSET:
1086 if (vnd->sc_flags & VNF_INITED)
1087 return EBUSY;
1088
1089 if ((error = vndlock(vnd)) != 0)
1090 return error;
1091
1092 fflags = FREAD;
1093 if ((vio->vnd_flags & VNDIOF_READONLY) == 0)
1094 fflags |= FWRITE;
1095 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file);
1096 if ((error = vn_open(&nd, fflags, 0)) != 0)
1097 goto unlock_and_exit;
1098 KASSERT(l);
1099 error = VOP_GETATTR(nd.ni_vp, &vattr, l->l_cred);
1100 if (!error && nd.ni_vp->v_type != VREG)
1101 error = EOPNOTSUPP;
1102 if (!error && vattr.va_bytes < vattr.va_size)
1103 /* File is definitely sparse, reject here */
1104 error = EINVAL;
1105 if (error) {
1106 VOP_UNLOCK(nd.ni_vp);
1107 goto close_and_exit;
1108 }
1109
1110 /* If using a compressed file, initialize its info */
1111 /* (or abort with an error if kernel has no compression) */
1112 if (vio->vnd_flags & VNF_COMP) {
1113 #ifdef VND_COMPRESSION
1114 struct vnd_comp_header *ch;
1115 int i;
1116 u_int32_t comp_size;
1117 u_int32_t comp_maxsize;
1118
1119 /* allocate space for compresed file header */
1120 ch = malloc(sizeof(struct vnd_comp_header),
1121 M_TEMP, M_WAITOK);
1122
1123 /* read compressed file header */
1124 error = vn_rdwr(UIO_READ, nd.ni_vp, (void *)ch,
1125 sizeof(struct vnd_comp_header), 0, UIO_SYSSPACE,
1126 IO_UNIT|IO_NODELOCKED, l->l_cred, NULL, NULL);
1127 if (error) {
1128 free(ch, M_TEMP);
1129 VOP_UNLOCK(nd.ni_vp);
1130 goto close_and_exit;
1131 }
1132
1133 /* save some header info */
1134 vnd->sc_comp_blksz = ntohl(ch->block_size);
1135 /* note last offset is the file byte size */
1136 vnd->sc_comp_numoffs = ntohl(ch->num_blocks)+1;
1137 free(ch, M_TEMP);
1138 if (vnd->sc_comp_blksz == 0 ||
1139 vnd->sc_comp_blksz % DEV_BSIZE !=0) {
1140 VOP_UNLOCK(nd.ni_vp);
1141 error = EINVAL;
1142 goto close_and_exit;
1143 }
1144 if (sizeof(struct vnd_comp_header) +
1145 sizeof(u_int64_t) * vnd->sc_comp_numoffs >
1146 vattr.va_size) {
1147 VOP_UNLOCK(nd.ni_vp);
1148 error = EINVAL;
1149 goto close_and_exit;
1150 }
1151
1152 /* set decompressed file size */
1153 vattr.va_size =
1154 ((u_quad_t)vnd->sc_comp_numoffs - 1) *
1155 (u_quad_t)vnd->sc_comp_blksz;
1156
1157 /* allocate space for all the compressed offsets */
1158 vnd->sc_comp_offsets =
1159 malloc(sizeof(u_int64_t) * vnd->sc_comp_numoffs,
1160 M_DEVBUF, M_WAITOK);
1161
1162 /* read in the offsets */
1163 error = vn_rdwr(UIO_READ, nd.ni_vp,
1164 (void *)vnd->sc_comp_offsets,
1165 sizeof(u_int64_t) * vnd->sc_comp_numoffs,
1166 sizeof(struct vnd_comp_header), UIO_SYSSPACE,
1167 IO_UNIT|IO_NODELOCKED, l->l_cred, NULL, NULL);
1168 if (error) {
1169 VOP_UNLOCK(nd.ni_vp);
1170 goto close_and_exit;
1171 }
1172 /*
1173 * find largest block size (used for allocation limit).
1174 * Also convert offset to native byte order.
1175 */
1176 comp_maxsize = 0;
1177 for (i = 0; i < vnd->sc_comp_numoffs - 1; i++) {
1178 vnd->sc_comp_offsets[i] =
1179 be64toh(vnd->sc_comp_offsets[i]);
1180 comp_size = be64toh(vnd->sc_comp_offsets[i + 1])
1181 - vnd->sc_comp_offsets[i];
1182 if (comp_size > comp_maxsize)
1183 comp_maxsize = comp_size;
1184 }
1185 vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1] =
1186 be64toh(vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1]);
1187
1188 /* create compressed data buffer */
1189 vnd->sc_comp_buff = malloc(comp_maxsize,
1190 M_DEVBUF, M_WAITOK);
1191
1192 /* create decompressed buffer */
1193 vnd->sc_comp_decombuf = malloc(vnd->sc_comp_blksz,
1194 M_DEVBUF, M_WAITOK);
1195 vnd->sc_comp_buffblk = -1;
1196
1197 /* Initialize decompress stream */
1198 memset(&vnd->sc_comp_stream, 0, sizeof(z_stream));
1199 vnd->sc_comp_stream.zalloc = vnd_alloc;
1200 vnd->sc_comp_stream.zfree = vnd_free;
1201 error = inflateInit2(&vnd->sc_comp_stream, MAX_WBITS);
1202 if (error) {
1203 if (vnd->sc_comp_stream.msg)
1204 printf("vnd%d: compressed file, %s\n",
1205 unit, vnd->sc_comp_stream.msg);
1206 VOP_UNLOCK(nd.ni_vp);
1207 error = EINVAL;
1208 goto close_and_exit;
1209 }
1210
1211 vnd->sc_flags |= VNF_COMP | VNF_READONLY;
1212 #else /* !VND_COMPRESSION */
1213 VOP_UNLOCK(nd.ni_vp);
1214 error = EOPNOTSUPP;
1215 goto close_and_exit;
1216 #endif /* VND_COMPRESSION */
1217 }
1218
1219 VOP_UNLOCK(nd.ni_vp);
1220 vnd->sc_vp = nd.ni_vp;
1221 vnd->sc_size = btodb(vattr.va_size); /* note truncation */
1222
1223 /*
1224 * Use pseudo-geometry specified. If none was provided,
1225 * use "standard" Adaptec fictitious geometry.
1226 */
1227 if (vio->vnd_flags & VNDIOF_HASGEOM) {
1228
1229 memcpy(&vnd->sc_geom, &vio->vnd_geom,
1230 sizeof(vio->vnd_geom));
1231
1232 /*
1233 * Sanity-check the sector size.
1234 * XXX Don't allow secsize < DEV_BSIZE. Should
1235 * XXX we?
1236 */
1237 if (vnd->sc_geom.vng_secsize < DEV_BSIZE ||
1238 (vnd->sc_geom.vng_secsize % DEV_BSIZE) != 0 ||
1239 vnd->sc_geom.vng_ncylinders == 0 ||
1240 (vnd->sc_geom.vng_ntracks *
1241 vnd->sc_geom.vng_nsectors) == 0) {
1242 error = EINVAL;
1243 goto close_and_exit;
1244 }
1245
1246 /*
1247 * Compute the size (in DEV_BSIZE blocks) specified
1248 * by the geometry.
1249 */
1250 geomsize = (vnd->sc_geom.vng_nsectors *
1251 vnd->sc_geom.vng_ntracks *
1252 vnd->sc_geom.vng_ncylinders) *
1253 (vnd->sc_geom.vng_secsize / DEV_BSIZE);
1254
1255 /*
1256 * Sanity-check the size against the specified
1257 * geometry.
1258 */
1259 if (vnd->sc_size < geomsize) {
1260 error = EINVAL;
1261 goto close_and_exit;
1262 }
1263 } else if (vnd->sc_size >= (32 * 64)) {
1264 /*
1265 * Size must be at least 2048 DEV_BSIZE blocks
1266 * (1M) in order to use this geometry.
1267 */
1268 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1269 vnd->sc_geom.vng_nsectors = 32;
1270 vnd->sc_geom.vng_ntracks = 64;
1271 vnd->sc_geom.vng_ncylinders = vnd->sc_size / (64 * 32);
1272 } else {
1273 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1274 vnd->sc_geom.vng_nsectors = 1;
1275 vnd->sc_geom.vng_ntracks = 1;
1276 vnd->sc_geom.vng_ncylinders = vnd->sc_size;
1277 }
1278
1279 vnd_set_properties(vnd);
1280
1281 if (vio->vnd_flags & VNDIOF_READONLY) {
1282 vnd->sc_flags |= VNF_READONLY;
1283 }
1284
1285 if ((error = vndsetcred(vnd, l->l_cred)) != 0)
1286 goto close_and_exit;
1287
1288 vndthrottle(vnd, vnd->sc_vp);
1289 vio->vnd_osize = dbtob(vnd->sc_size);
1290 #ifdef VNDIOOCSET
1291 if (cmd != VNDIOOCSET)
1292 #endif
1293 vio->vnd_size = dbtob(vnd->sc_size);
1294 vnd->sc_flags |= VNF_INITED;
1295
1296 /* create the kernel thread, wait for it to be up */
1297 error = kthread_create(PRI_NONE, 0, NULL, vndthread, vnd,
1298 &vnd->sc_kthread, device_xname(vnd->sc_dev));
1299 if (error)
1300 goto close_and_exit;
1301 while ((vnd->sc_flags & VNF_KTHREAD) == 0) {
1302 tsleep(&vnd->sc_kthread, PRIBIO, "vndthr", 0);
1303 }
1304 #ifdef DEBUG
1305 if (vnddebug & VDB_INIT)
1306 printf("vndioctl: SET vp %p size 0x%lx %d/%d/%d/%d\n",
1307 vnd->sc_vp, (unsigned long) vnd->sc_size,
1308 vnd->sc_geom.vng_secsize,
1309 vnd->sc_geom.vng_nsectors,
1310 vnd->sc_geom.vng_ntracks,
1311 vnd->sc_geom.vng_ncylinders);
1312 #endif
1313
1314 /* Attach the disk. */
1315 disk_attach(&vnd->sc_dkdev);
1316 disk_blocksize(&vnd->sc_dkdev, vnd->sc_geom.vng_secsize);
1317
1318 /* Initialize the xfer and buffer pools. */
1319 pool_init(&vnd->sc_vxpool, sizeof(struct vndxfer), 0,
1320 0, 0, "vndxpl", NULL, IPL_BIO);
1321
1322 /* Try and read the disklabel. */
1323 vndgetdisklabel(dev, vnd);
1324
1325 vndunlock(vnd);
1326
1327 break;
1328
1329 close_and_exit:
1330 (void) vn_close(nd.ni_vp, fflags, l->l_cred);
1331 unlock_and_exit:
1332 #ifdef VND_COMPRESSION
1333 /* free any allocated memory (for compressed file) */
1334 if (vnd->sc_comp_offsets) {
1335 free(vnd->sc_comp_offsets, M_DEVBUF);
1336 vnd->sc_comp_offsets = NULL;
1337 }
1338 if (vnd->sc_comp_buff) {
1339 free(vnd->sc_comp_buff, M_DEVBUF);
1340 vnd->sc_comp_buff = NULL;
1341 }
1342 if (vnd->sc_comp_decombuf) {
1343 free(vnd->sc_comp_decombuf, M_DEVBUF);
1344 vnd->sc_comp_decombuf = NULL;
1345 }
1346 #endif /* VND_COMPRESSION */
1347 vndunlock(vnd);
1348 return error;
1349
1350 #ifdef VNDIOOCCLR
1351 case VNDIOOCCLR:
1352 #endif
1353 case VNDIOCCLR:
1354 part = DISKPART(dev);
1355 pmask = (1 << part);
1356 force = (vio->vnd_flags & VNDIOF_FORCE) != 0;
1357
1358 if ((error = vnddoclear(vnd, pmask, minor(dev), force)) != 0)
1359 return error;
1360
1361 break;
1362
1363 #ifdef COMPAT_30
1364 case VNDIOOCGET: {
1365 struct vnd_ouser *vnu;
1366 struct vattr va;
1367 vnu = (struct vnd_ouser *)data;
1368 KASSERT(l);
1369 switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) {
1370 case 0:
1371 vnu->vnu_dev = va.va_fsid;
1372 vnu->vnu_ino = va.va_fileid;
1373 break;
1374 case -1:
1375 /* unused is not an error */
1376 vnu->vnu_dev = 0;
1377 vnu->vnu_ino = 0;
1378 break;
1379 default:
1380 return error;
1381 }
1382 break;
1383 }
1384 #endif
1385 case VNDIOCGET: {
1386 struct vnd_user *vnu;
1387 struct vattr va;
1388 vnu = (struct vnd_user *)data;
1389 KASSERT(l);
1390 switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) {
1391 case 0:
1392 vnu->vnu_dev = va.va_fsid;
1393 vnu->vnu_ino = va.va_fileid;
1394 break;
1395 case -1:
1396 /* unused is not an error */
1397 vnu->vnu_dev = 0;
1398 vnu->vnu_ino = 0;
1399 break;
1400 default:
1401 return error;
1402 }
1403 break;
1404 }
1405
1406 case DIOCGDINFO:
1407 *(struct disklabel *)data = *(vnd->sc_dkdev.dk_label);
1408 break;
1409
1410 #ifdef __HAVE_OLD_DISKLABEL
1411 case ODIOCGDINFO:
1412 newlabel = *(vnd->sc_dkdev.dk_label);
1413 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1414 return ENOTTY;
1415 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1416 break;
1417 #endif
1418
1419 case DIOCGPART:
1420 ((struct partinfo *)data)->disklab = vnd->sc_dkdev.dk_label;
1421 ((struct partinfo *)data)->part =
1422 &vnd->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1423 break;
1424
1425 case DIOCWDINFO:
1426 case DIOCSDINFO:
1427 #ifdef __HAVE_OLD_DISKLABEL
1428 case ODIOCWDINFO:
1429 case ODIOCSDINFO:
1430 #endif
1431 {
1432 struct disklabel *lp;
1433
1434 if ((error = vndlock(vnd)) != 0)
1435 return error;
1436
1437 vnd->sc_flags |= VNF_LABELLING;
1438
1439 #ifdef __HAVE_OLD_DISKLABEL
1440 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1441 memset(&newlabel, 0, sizeof newlabel);
1442 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1443 lp = &newlabel;
1444 } else
1445 #endif
1446 lp = (struct disklabel *)data;
1447
1448 error = setdisklabel(vnd->sc_dkdev.dk_label,
1449 lp, 0, vnd->sc_dkdev.dk_cpulabel);
1450 if (error == 0) {
1451 if (cmd == DIOCWDINFO
1452 #ifdef __HAVE_OLD_DISKLABEL
1453 || cmd == ODIOCWDINFO
1454 #endif
1455 )
1456 error = writedisklabel(VNDLABELDEV(dev),
1457 vndstrategy, vnd->sc_dkdev.dk_label,
1458 vnd->sc_dkdev.dk_cpulabel);
1459 }
1460
1461 vnd->sc_flags &= ~VNF_LABELLING;
1462
1463 vndunlock(vnd);
1464
1465 if (error)
1466 return error;
1467 break;
1468 }
1469
1470 case DIOCKLABEL:
1471 if (*(int *)data != 0)
1472 vnd->sc_flags |= VNF_KLABEL;
1473 else
1474 vnd->sc_flags &= ~VNF_KLABEL;
1475 break;
1476
1477 case DIOCWLABEL:
1478 if (*(int *)data != 0)
1479 vnd->sc_flags |= VNF_WLABEL;
1480 else
1481 vnd->sc_flags &= ~VNF_WLABEL;
1482 break;
1483
1484 case DIOCGDEFLABEL:
1485 vndgetdefaultlabel(vnd, (struct disklabel *)data);
1486 break;
1487
1488 #ifdef __HAVE_OLD_DISKLABEL
1489 case ODIOCGDEFLABEL:
1490 vndgetdefaultlabel(vnd, &newlabel);
1491 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1492 return ENOTTY;
1493 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1494 break;
1495 #endif
1496
1497 case DIOCCACHESYNC:
1498 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1499 error = VOP_FSYNC(vnd->sc_vp, vnd->sc_cred,
1500 FSYNC_WAIT | FSYNC_DATAONLY | FSYNC_CACHE, 0, 0);
1501 VOP_UNLOCK(vnd->sc_vp);
1502 return error;
1503
1504 default:
1505 return ENOTTY;
1506 }
1507
1508 return 0;
1509 }
1510
1511 /*
1512 * Duplicate the current processes' credentials. Since we are called only
1513 * as the result of a SET ioctl and only root can do that, any future access
1514 * to this "disk" is essentially as root. Note that credentials may change
1515 * if some other uid can write directly to the mapped file (NFS).
1516 */
1517 static int
1518 vndsetcred(struct vnd_softc *vnd, kauth_cred_t cred)
1519 {
1520 struct uio auio;
1521 struct iovec aiov;
1522 char *tmpbuf;
1523 int error;
1524
1525 vnd->sc_cred = kauth_cred_dup(cred);
1526 tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
1527
1528 /* XXX: Horrible kludge to establish credentials for NFS */
1529 aiov.iov_base = tmpbuf;
1530 aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
1531 auio.uio_iov = &aiov;
1532 auio.uio_iovcnt = 1;
1533 auio.uio_offset = 0;
1534 auio.uio_rw = UIO_READ;
1535 auio.uio_resid = aiov.iov_len;
1536 UIO_SETUP_SYSSPACE(&auio);
1537 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1538 error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
1539 if (error == 0) {
1540 /*
1541 * Because vnd does all IO directly through the vnode
1542 * we need to flush (at least) the buffer from the above
1543 * VOP_READ from the buffer cache to prevent cache
1544 * incoherencies. Also, be careful to write dirty
1545 * buffers back to stable storage.
1546 */
1547 error = vinvalbuf(vnd->sc_vp, V_SAVE, vnd->sc_cred,
1548 curlwp, 0, 0);
1549 }
1550 VOP_UNLOCK(vnd->sc_vp);
1551
1552 free(tmpbuf, M_TEMP);
1553 return error;
1554 }
1555
1556 /*
1557 * Set maxactive based on FS type
1558 */
1559 static void
1560 vndthrottle(struct vnd_softc *vnd, struct vnode *vp)
1561 {
1562
1563 if (vp->v_tag == VT_NFS)
1564 vnd->sc_maxactive = 2;
1565 else
1566 vnd->sc_maxactive = 8;
1567
1568 if (vnd->sc_maxactive < 1)
1569 vnd->sc_maxactive = 1;
1570 }
1571
1572 #if 0
1573 static void
1574 vndshutdown(void)
1575 {
1576 struct vnd_softc *vnd;
1577
1578 for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
1579 if (vnd->sc_flags & VNF_INITED)
1580 vndclear(vnd);
1581 }
1582 #endif
1583
1584 static void
1585 vndclear(struct vnd_softc *vnd, int myminor)
1586 {
1587 struct vnode *vp = vnd->sc_vp;
1588 int fflags = FREAD;
1589 int bmaj, cmaj, i, mn;
1590 int s;
1591
1592 #ifdef DEBUG
1593 if (vnddebug & VDB_FOLLOW)
1594 printf("vndclear(%p): vp %p\n", vnd, vp);
1595 #endif
1596 /* locate the major number */
1597 bmaj = bdevsw_lookup_major(&vnd_bdevsw);
1598 cmaj = cdevsw_lookup_major(&vnd_cdevsw);
1599
1600 /* Nuke the vnodes for any open instances */
1601 for (i = 0; i < MAXPARTITIONS; i++) {
1602 mn = DISKMINOR(device_unit(vnd->sc_dev), i);
1603 vdevgone(bmaj, mn, mn, VBLK);
1604 if (mn != myminor) /* XXX avoid to kill own vnode */
1605 vdevgone(cmaj, mn, mn, VCHR);
1606 }
1607
1608 if ((vnd->sc_flags & VNF_READONLY) == 0)
1609 fflags |= FWRITE;
1610
1611 s = splbio();
1612 bufq_drain(vnd->sc_tab);
1613 splx(s);
1614
1615 vnd->sc_flags |= VNF_VUNCONF;
1616 wakeup(&vnd->sc_tab);
1617 while (vnd->sc_flags & VNF_KTHREAD)
1618 tsleep(&vnd->sc_kthread, PRIBIO, "vnthr", 0);
1619
1620 #ifdef VND_COMPRESSION
1621 /* free the compressed file buffers */
1622 if (vnd->sc_flags & VNF_COMP) {
1623 if (vnd->sc_comp_offsets) {
1624 free(vnd->sc_comp_offsets, M_DEVBUF);
1625 vnd->sc_comp_offsets = NULL;
1626 }
1627 if (vnd->sc_comp_buff) {
1628 free(vnd->sc_comp_buff, M_DEVBUF);
1629 vnd->sc_comp_buff = NULL;
1630 }
1631 if (vnd->sc_comp_decombuf) {
1632 free(vnd->sc_comp_decombuf, M_DEVBUF);
1633 vnd->sc_comp_decombuf = NULL;
1634 }
1635 }
1636 #endif /* VND_COMPRESSION */
1637 vnd->sc_flags &=
1638 ~(VNF_INITED | VNF_READONLY | VNF_VLABEL
1639 | VNF_VUNCONF | VNF_COMP | VNF_CLEARING);
1640 if (vp == NULL)
1641 panic("vndclear: null vp");
1642 (void) vn_close(vp, fflags, vnd->sc_cred);
1643 kauth_cred_free(vnd->sc_cred);
1644 vnd->sc_vp = NULL;
1645 vnd->sc_cred = NULL;
1646 vnd->sc_size = 0;
1647 }
1648
1649 static int
1650 vndsize(dev_t dev)
1651 {
1652 struct vnd_softc *sc;
1653 struct disklabel *lp;
1654 int part, unit, omask;
1655 int size;
1656
1657 unit = vndunit(dev);
1658 sc = device_lookup_private(&vnd_cd, unit);
1659 if (sc == NULL)
1660 return -1;
1661
1662 if ((sc->sc_flags & VNF_INITED) == 0)
1663 return -1;
1664
1665 part = DISKPART(dev);
1666 omask = sc->sc_dkdev.dk_openmask & (1 << part);
1667 lp = sc->sc_dkdev.dk_label;
1668
1669 if (omask == 0 && vndopen(dev, 0, S_IFBLK, curlwp)) /* XXX */
1670 return -1;
1671
1672 if (lp->d_partitions[part].p_fstype != FS_SWAP)
1673 size = -1;
1674 else
1675 size = lp->d_partitions[part].p_size *
1676 (lp->d_secsize / DEV_BSIZE);
1677
1678 if (omask == 0 && vndclose(dev, 0, S_IFBLK, curlwp)) /* XXX */
1679 return -1;
1680
1681 return size;
1682 }
1683
1684 static int
1685 vnddump(dev_t dev, daddr_t blkno, void *va,
1686 size_t size)
1687 {
1688
1689 /* Not implemented. */
1690 return ENXIO;
1691 }
1692
1693 static void
1694 vndgetdefaultlabel(struct vnd_softc *sc, struct disklabel *lp)
1695 {
1696 struct vndgeom *vng = &sc->sc_geom;
1697 struct partition *pp;
1698
1699 memset(lp, 0, sizeof(*lp));
1700
1701 lp->d_secperunit = sc->sc_size / (vng->vng_secsize / DEV_BSIZE);
1702 lp->d_secsize = vng->vng_secsize;
1703 lp->d_nsectors = vng->vng_nsectors;
1704 lp->d_ntracks = vng->vng_ntracks;
1705 lp->d_ncylinders = vng->vng_ncylinders;
1706 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1707
1708 strncpy(lp->d_typename, "vnd", sizeof(lp->d_typename));
1709 lp->d_type = DTYPE_VND;
1710 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1711 lp->d_rpm = 3600;
1712 lp->d_interleave = 1;
1713 lp->d_flags = 0;
1714
1715 pp = &lp->d_partitions[RAW_PART];
1716 pp->p_offset = 0;
1717 pp->p_size = lp->d_secperunit;
1718 pp->p_fstype = FS_UNUSED;
1719 lp->d_npartitions = RAW_PART + 1;
1720
1721 lp->d_magic = DISKMAGIC;
1722 lp->d_magic2 = DISKMAGIC;
1723 lp->d_checksum = dkcksum(lp);
1724 }
1725
1726 /*
1727 * Read the disklabel from a vnd. If one is not present, create a fake one.
1728 */
1729 static void
1730 vndgetdisklabel(dev_t dev, struct vnd_softc *sc)
1731 {
1732 const char *errstring;
1733 struct disklabel *lp = sc->sc_dkdev.dk_label;
1734 struct cpu_disklabel *clp = sc->sc_dkdev.dk_cpulabel;
1735 int i;
1736
1737 memset(clp, 0, sizeof(*clp));
1738
1739 vndgetdefaultlabel(sc, lp);
1740
1741 /*
1742 * Call the generic disklabel extraction routine.
1743 */
1744 errstring = readdisklabel(VNDLABELDEV(dev), vndstrategy, lp, clp);
1745 if (errstring) {
1746 /*
1747 * Lack of disklabel is common, but we print the warning
1748 * anyway, since it might contain other useful information.
1749 */
1750 aprint_normal_dev(sc->sc_dev, "%s\n", errstring);
1751
1752 /*
1753 * For historical reasons, if there's no disklabel
1754 * present, all partitions must be FS_BSDFFS and
1755 * occupy the entire disk.
1756 */
1757 for (i = 0; i < MAXPARTITIONS; i++) {
1758 /*
1759 * Don't wipe out port specific hack (such as
1760 * dos partition hack of i386 port).
1761 */
1762 if (lp->d_partitions[i].p_size != 0)
1763 continue;
1764
1765 lp->d_partitions[i].p_size = lp->d_secperunit;
1766 lp->d_partitions[i].p_offset = 0;
1767 lp->d_partitions[i].p_fstype = FS_BSDFFS;
1768 }
1769
1770 strncpy(lp->d_packname, "default label",
1771 sizeof(lp->d_packname));
1772
1773 lp->d_npartitions = MAXPARTITIONS;
1774 lp->d_checksum = dkcksum(lp);
1775 }
1776
1777 /* In-core label now valid. */
1778 sc->sc_flags |= VNF_VLABEL;
1779 }
1780
1781 /*
1782 * Wait interruptibly for an exclusive lock.
1783 *
1784 * XXX
1785 * Several drivers do this; it should be abstracted and made MP-safe.
1786 */
1787 static int
1788 vndlock(struct vnd_softc *sc)
1789 {
1790 int error;
1791
1792 while ((sc->sc_flags & VNF_LOCKED) != 0) {
1793 sc->sc_flags |= VNF_WANTED;
1794 if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
1795 return error;
1796 }
1797 sc->sc_flags |= VNF_LOCKED;
1798 return 0;
1799 }
1800
1801 /*
1802 * Unlock and wake up any waiters.
1803 */
1804 static void
1805 vndunlock(struct vnd_softc *sc)
1806 {
1807
1808 sc->sc_flags &= ~VNF_LOCKED;
1809 if ((sc->sc_flags & VNF_WANTED) != 0) {
1810 sc->sc_flags &= ~VNF_WANTED;
1811 wakeup(sc);
1812 }
1813 }
1814
1815 #ifdef VND_COMPRESSION
1816 /* compressed file read */
1817 static void
1818 compstrategy(struct buf *bp, off_t bn)
1819 {
1820 int error;
1821 int unit = vndunit(bp->b_dev);
1822 struct vnd_softc *vnd =
1823 device_lookup_private(&vnd_cd, unit);
1824 u_int32_t comp_block;
1825 struct uio auio;
1826 char *addr;
1827 int s;
1828
1829 /* set up constants for data move */
1830 auio.uio_rw = UIO_READ;
1831 UIO_SETUP_SYSSPACE(&auio);
1832
1833 /* read, and transfer the data */
1834 addr = bp->b_data;
1835 bp->b_resid = bp->b_bcount;
1836 s = splbio();
1837 while (bp->b_resid > 0) {
1838 unsigned length;
1839 size_t length_in_buffer;
1840 u_int32_t offset_in_buffer;
1841 struct iovec aiov;
1842
1843 /* calculate the compressed block number */
1844 comp_block = bn / (off_t)vnd->sc_comp_blksz;
1845
1846 /* check for good block number */
1847 if (comp_block >= vnd->sc_comp_numoffs) {
1848 bp->b_error = EINVAL;
1849 splx(s);
1850 return;
1851 }
1852
1853 /* read in the compressed block, if not in buffer */
1854 if (comp_block != vnd->sc_comp_buffblk) {
1855 length = vnd->sc_comp_offsets[comp_block + 1] -
1856 vnd->sc_comp_offsets[comp_block];
1857 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1858 error = vn_rdwr(UIO_READ, vnd->sc_vp, vnd->sc_comp_buff,
1859 length, vnd->sc_comp_offsets[comp_block],
1860 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vnd->sc_cred,
1861 NULL, NULL);
1862 if (error) {
1863 bp->b_error = error;
1864 VOP_UNLOCK(vnd->sc_vp);
1865 splx(s);
1866 return;
1867 }
1868 /* uncompress the buffer */
1869 vnd->sc_comp_stream.next_in = vnd->sc_comp_buff;
1870 vnd->sc_comp_stream.avail_in = length;
1871 vnd->sc_comp_stream.next_out = vnd->sc_comp_decombuf;
1872 vnd->sc_comp_stream.avail_out = vnd->sc_comp_blksz;
1873 inflateReset(&vnd->sc_comp_stream);
1874 error = inflate(&vnd->sc_comp_stream, Z_FINISH);
1875 if (error != Z_STREAM_END) {
1876 if (vnd->sc_comp_stream.msg)
1877 aprint_normal_dev(vnd->sc_dev,
1878 "compressed file, %s\n",
1879 vnd->sc_comp_stream.msg);
1880 bp->b_error = EBADMSG;
1881 VOP_UNLOCK(vnd->sc_vp);
1882 splx(s);
1883 return;
1884 }
1885 vnd->sc_comp_buffblk = comp_block;
1886 VOP_UNLOCK(vnd->sc_vp);
1887 }
1888
1889 /* transfer the usable uncompressed data */
1890 offset_in_buffer = bn % (off_t)vnd->sc_comp_blksz;
1891 length_in_buffer = vnd->sc_comp_blksz - offset_in_buffer;
1892 if (length_in_buffer > bp->b_resid)
1893 length_in_buffer = bp->b_resid;
1894 auio.uio_iov = &aiov;
1895 auio.uio_iovcnt = 1;
1896 aiov.iov_base = addr;
1897 aiov.iov_len = length_in_buffer;
1898 auio.uio_resid = aiov.iov_len;
1899 auio.uio_offset = 0;
1900 error = uiomove(vnd->sc_comp_decombuf + offset_in_buffer,
1901 length_in_buffer, &auio);
1902 if (error) {
1903 bp->b_error = error;
1904 splx(s);
1905 return;
1906 }
1907
1908 bn += length_in_buffer;
1909 addr += length_in_buffer;
1910 bp->b_resid -= length_in_buffer;
1911 }
1912 splx(s);
1913 }
1914
1915 /* compression memory allocation routines */
1916 static void *
1917 vnd_alloc(void *aux, u_int items, u_int siz)
1918 {
1919 return malloc(items * siz, M_TEMP, M_NOWAIT);
1920 }
1921
1922 static void
1923 vnd_free(void *aux, void *ptr)
1924 {
1925 free(ptr, M_TEMP);
1926 }
1927 #endif /* VND_COMPRESSION */
1928
1929 static void
1930 vnd_set_properties(struct vnd_softc *vnd)
1931 {
1932 prop_dictionary_t disk_info, odisk_info, geom;
1933
1934 disk_info = prop_dictionary_create();
1935
1936 geom = prop_dictionary_create();
1937
1938 prop_dictionary_set_uint64(geom, "sectors-per-unit",
1939 vnd->sc_geom.vng_nsectors * vnd->sc_geom.vng_ntracks *
1940 vnd->sc_geom.vng_ncylinders);
1941
1942 prop_dictionary_set_uint32(geom, "sector-size",
1943 vnd->sc_geom.vng_secsize);
1944
1945 prop_dictionary_set_uint16(geom, "sectors-per-track",
1946 vnd->sc_geom.vng_nsectors);
1947
1948 prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
1949 vnd->sc_geom.vng_ntracks);
1950
1951 prop_dictionary_set_uint64(geom, "cylinders-per-unit",
1952 vnd->sc_geom.vng_ncylinders);
1953
1954 prop_dictionary_set(disk_info, "geometry", geom);
1955 prop_object_release(geom);
1956
1957 prop_dictionary_set(device_properties(vnd->sc_dev),
1958 "disk-info", disk_info);
1959
1960 /*
1961 * Don't release disk_info here; we keep a reference to it.
1962 * disk_detach() will release it when we go away.
1963 */
1964
1965 odisk_info = vnd->sc_dkdev.dk_info;
1966 vnd->sc_dkdev.dk_info = disk_info;
1967 if (odisk_info)
1968 prop_object_release(odisk_info);
1969 }
1970
1971 #ifdef _MODULE
1972
1973 #include <sys/module.h>
1974
1975 MODULE(MODULE_CLASS_DRIVER, vnd, NULL);
1976 CFDRIVER_DECL(vnd, DV_DISK, NULL);
1977
1978 static int
1979 vnd_modcmd(modcmd_t cmd, void *arg)
1980 {
1981 int bmajor = -1, cmajor = -1, error = 0;
1982
1983 switch (cmd) {
1984 case MODULE_CMD_INIT:
1985 error = config_cfdriver_attach(&vnd_cd);
1986 if (error)
1987 break;
1988
1989 error = config_cfattach_attach(vnd_cd.cd_name, &vnd_ca);
1990 if (error) {
1991 config_cfdriver_detach(&vnd_cd);
1992 aprint_error("%s: unable to register cfattach\n",
1993 vnd_cd.cd_name);
1994 break;
1995 }
1996
1997 error = devsw_attach("vnd", &vnd_bdevsw, &bmajor,
1998 &vnd_cdevsw, &cmajor);
1999 if (error) {
2000 config_cfattach_detach(vnd_cd.cd_name, &vnd_ca);
2001 config_cfdriver_detach(&vnd_cd);
2002 break;
2003 }
2004
2005 break;
2006
2007 case MODULE_CMD_FINI:
2008 error = config_cfattach_detach(vnd_cd.cd_name, &vnd_ca);
2009 if (error)
2010 break;
2011 config_cfdriver_detach(&vnd_cd);
2012 devsw_detach(&vnd_bdevsw, &vnd_cdevsw);
2013 break;
2014
2015 case MODULE_CMD_STAT:
2016 return ENOTTY;
2017
2018 default:
2019 return ENOTTY;
2020 }
2021
2022 return error;
2023 }
2024
2025 #endif
2026