vnd.c revision 1.200 1 /* $NetBSD: vnd.c,v 1.200 2009/05/06 22:38:42 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1990, 1993
34 * The Regents of the University of California. All rights reserved.
35 *
36 * This code is derived from software contributed to Berkeley by
37 * the Systems Programming Group of the University of Utah Computer
38 * Science Department.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * from: Utah $Hdr: vn.c 1.13 94/04/02$
65 *
66 * @(#)vn.c 8.9 (Berkeley) 5/14/95
67 */
68
69 /*
70 * Copyright (c) 1988 University of Utah.
71 *
72 * This code is derived from software contributed to Berkeley by
73 * the Systems Programming Group of the University of Utah Computer
74 * Science Department.
75 *
76 * Redistribution and use in source and binary forms, with or without
77 * modification, are permitted provided that the following conditions
78 * are met:
79 * 1. Redistributions of source code must retain the above copyright
80 * notice, this list of conditions and the following disclaimer.
81 * 2. Redistributions in binary form must reproduce the above copyright
82 * notice, this list of conditions and the following disclaimer in the
83 * documentation and/or other materials provided with the distribution.
84 * 3. All advertising materials mentioning features or use of this software
85 * must display the following acknowledgement:
86 * This product includes software developed by the University of
87 * California, Berkeley and its contributors.
88 * 4. Neither the name of the University nor the names of its contributors
89 * may be used to endorse or promote products derived from this software
90 * without specific prior written permission.
91 *
92 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
93 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
94 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
95 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
96 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
97 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
98 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
99 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
100 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
101 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
102 * SUCH DAMAGE.
103 *
104 * from: Utah $Hdr: vn.c 1.13 94/04/02$
105 *
106 * @(#)vn.c 8.9 (Berkeley) 5/14/95
107 */
108
109 /*
110 * Vnode disk driver.
111 *
112 * Block/character interface to a vnode. Allows one to treat a file
113 * as a disk (e.g. build a filesystem in it, mount it, etc.).
114 *
115 * NOTE 1: If the vnode supports the VOP_BMAP and VOP_STRATEGY operations,
116 * this uses them to avoid distorting the local buffer cache. If those
117 * block-level operations are not available, this falls back to the regular
118 * read and write calls. Using these may distort the cache in some cases
119 * but better have the driver working than preventing it to work on file
120 * systems where the block-level operations are not implemented for
121 * whatever reason.
122 *
123 * NOTE 2: There is a security issue involved with this driver.
124 * Once mounted all access to the contents of the "mapped" file via
125 * the special file is controlled by the permissions on the special
126 * file, the protection of the mapped file is ignored (effectively,
127 * by using root credentials in all transactions).
128 *
129 * NOTE 3: Doesn't interact with leases, should it?
130 */
131
132 #include <sys/cdefs.h>
133 __KERNEL_RCSID(0, "$NetBSD: vnd.c,v 1.200 2009/05/06 22:38:42 ad Exp $");
134
135 #if defined(_KERNEL_OPT)
136 #include "fs_nfs.h"
137 #include "opt_vnd.h"
138 #endif
139
140 #include <sys/param.h>
141 #include <sys/systm.h>
142 #include <sys/namei.h>
143 #include <sys/proc.h>
144 #include <sys/kthread.h>
145 #include <sys/errno.h>
146 #include <sys/buf.h>
147 #include <sys/bufq.h>
148 #include <sys/malloc.h>
149 #include <sys/ioctl.h>
150 #include <sys/disklabel.h>
151 #include <sys/device.h>
152 #include <sys/disk.h>
153 #include <sys/stat.h>
154 #include <sys/mount.h>
155 #include <sys/vnode.h>
156 #include <sys/file.h>
157 #include <sys/uio.h>
158 #include <sys/conf.h>
159 #include <sys/kauth.h>
160
161 #include <net/zlib.h>
162
163 #include <miscfs/genfs/genfs.h>
164 #include <miscfs/specfs/specdev.h>
165
166 #include <dev/vndvar.h>
167
168 #include <prop/proplib.h>
169
170 #if defined(VNDDEBUG) && !defined(DEBUG)
171 #define DEBUG
172 #endif
173
174 #ifdef DEBUG
175 int dovndcluster = 1;
176 #define VDB_FOLLOW 0x01
177 #define VDB_INIT 0x02
178 #define VDB_IO 0x04
179 #define VDB_LABEL 0x08
180 int vnddebug = 0x00;
181 #endif
182
183 #define vndunit(x) DISKUNIT(x)
184
185 struct vndxfer {
186 struct buf vx_buf;
187 struct vnd_softc *vx_vnd;
188 };
189 #define VND_BUFTOXFER(bp) ((struct vndxfer *)(void *)bp)
190
191 #define VND_GETXFER(vnd) pool_get(&(vnd)->sc_vxpool, PR_WAITOK)
192 #define VND_PUTXFER(vnd, vx) pool_put(&(vnd)->sc_vxpool, (vx))
193
194 #define VNDLABELDEV(dev) \
195 (MAKEDISKDEV(major((dev)), vndunit((dev)), RAW_PART))
196
197 /* called by main() at boot time */
198 void vndattach(int);
199
200 static void vndclear(struct vnd_softc *, int);
201 static int vnddoclear(struct vnd_softc *, int, int, bool);
202 static int vndsetcred(struct vnd_softc *, kauth_cred_t);
203 static void vndthrottle(struct vnd_softc *, struct vnode *);
204 static void vndiodone(struct buf *);
205 #if 0
206 static void vndshutdown(void);
207 #endif
208
209 static void vndgetdefaultlabel(struct vnd_softc *, struct disklabel *);
210 static void vndgetdisklabel(dev_t, struct vnd_softc *);
211
212 static int vndlock(struct vnd_softc *);
213 static void vndunlock(struct vnd_softc *);
214 #ifdef VND_COMPRESSION
215 static void compstrategy(struct buf *, off_t);
216 static void *vnd_alloc(void *, u_int, u_int);
217 static void vnd_free(void *, void *);
218 #endif /* VND_COMPRESSION */
219
220 static void vndthread(void *);
221 static bool vnode_has_op(const struct vnode *, int);
222 static void handle_with_rdwr(struct vnd_softc *, const struct buf *,
223 struct buf *);
224 static void handle_with_strategy(struct vnd_softc *, const struct buf *,
225 struct buf *);
226 static void vnd_set_properties(struct vnd_softc *);
227
228 static dev_type_open(vndopen);
229 static dev_type_close(vndclose);
230 static dev_type_read(vndread);
231 static dev_type_write(vndwrite);
232 static dev_type_ioctl(vndioctl);
233 static dev_type_strategy(vndstrategy);
234 static dev_type_dump(vnddump);
235 static dev_type_size(vndsize);
236
237 const struct bdevsw vnd_bdevsw = {
238 vndopen, vndclose, vndstrategy, vndioctl, vnddump, vndsize, D_DISK
239 };
240
241 const struct cdevsw vnd_cdevsw = {
242 vndopen, vndclose, vndread, vndwrite, vndioctl,
243 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
244 };
245
246 static int vnd_match(device_t, cfdata_t, void *);
247 static void vnd_attach(device_t, device_t, void *);
248 static int vnd_detach(device_t, int);
249
250 CFATTACH_DECL3_NEW(vnd, sizeof(struct vnd_softc),
251 vnd_match, vnd_attach, vnd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
252 extern struct cfdriver vnd_cd;
253
254 static struct vnd_softc *vnd_spawn(int);
255 int vnd_destroy(device_t);
256
257 void
258 vndattach(int num)
259 {
260 int error;
261
262 error = config_cfattach_attach(vnd_cd.cd_name, &vnd_ca);
263 if (error)
264 aprint_error("%s: unable to register cfattach\n",
265 vnd_cd.cd_name);
266 }
267
268 static int
269 vnd_match(device_t self, cfdata_t cfdata, void *aux)
270 {
271
272 return 1;
273 }
274
275 static void
276 vnd_attach(device_t parent, device_t self, void *aux)
277 {
278 struct vnd_softc *sc = device_private(self);
279
280 sc->sc_dev = self;
281 sc->sc_comp_offsets = NULL;
282 sc->sc_comp_buff = NULL;
283 sc->sc_comp_decombuf = NULL;
284 bufq_alloc(&sc->sc_tab, "disksort", BUFQ_SORT_RAWBLOCK);
285 disk_init(&sc->sc_dkdev, device_xname(self), NULL);
286 if (!pmf_device_register(self, NULL, NULL))
287 aprint_error_dev(self, "couldn't establish power handler\n");
288 }
289
290 static int
291 vnd_detach(device_t self, int flags)
292 {
293 int error;
294 struct vnd_softc *sc = device_private(self);
295
296 if (sc->sc_flags & VNF_INITED) {
297 error = vnddoclear(sc, 0, -1, (flags & DETACH_FORCE) != 0);
298 if (error != 0)
299 return error;
300 }
301
302 pmf_device_deregister(self);
303 bufq_free(sc->sc_tab);
304 disk_destroy(&sc->sc_dkdev);
305
306 return 0;
307 }
308
309 static struct vnd_softc *
310 vnd_spawn(int unit)
311 {
312 struct cfdata *cf;
313
314 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
315 cf->cf_name = vnd_cd.cd_name;
316 cf->cf_atname = vnd_cd.cd_name;
317 cf->cf_unit = unit;
318 cf->cf_fstate = FSTATE_STAR;
319
320 return device_private(config_attach_pseudo(cf));
321 }
322
323 int
324 vnd_destroy(device_t dev)
325 {
326 int error;
327 cfdata_t cf;
328
329 cf = device_cfdata(dev);
330 error = config_detach(dev, DETACH_QUIET);
331 if (error)
332 return error;
333 free(cf, M_DEVBUF);
334 return 0;
335 }
336
337 static int
338 vndopen(dev_t dev, int flags, int mode, struct lwp *l)
339 {
340 int unit = vndunit(dev);
341 struct vnd_softc *sc;
342 int error = 0, part, pmask;
343 struct disklabel *lp;
344
345 #ifdef DEBUG
346 if (vnddebug & VDB_FOLLOW)
347 printf("vndopen(0x%"PRIx64", 0x%x, 0x%x, %p)\n", dev, flags, mode, l);
348 #endif
349 sc = device_lookup_private(&vnd_cd, unit);
350 if (sc == NULL) {
351 sc = vnd_spawn(unit);
352 if (sc == NULL)
353 return ENOMEM;
354 }
355
356 if ((error = vndlock(sc)) != 0)
357 return error;
358
359 if ((sc->sc_flags & VNF_CLEARING) != 0)
360 return ENXIO;
361
362 lp = sc->sc_dkdev.dk_label;
363
364 part = DISKPART(dev);
365 pmask = (1 << part);
366
367 /*
368 * If we're initialized, check to see if there are any other
369 * open partitions. If not, then it's safe to update the
370 * in-core disklabel. Only read the disklabel if it is
371 * not already valid.
372 */
373 if ((sc->sc_flags & (VNF_INITED|VNF_VLABEL)) == VNF_INITED &&
374 sc->sc_dkdev.dk_openmask == 0)
375 vndgetdisklabel(dev, sc);
376
377 /* Check that the partitions exists. */
378 if (part != RAW_PART) {
379 if (((sc->sc_flags & VNF_INITED) == 0) ||
380 ((part >= lp->d_npartitions) ||
381 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
382 error = ENXIO;
383 goto done;
384 }
385 }
386
387 /* Prevent our unit from being unconfigured while open. */
388 switch (mode) {
389 case S_IFCHR:
390 sc->sc_dkdev.dk_copenmask |= pmask;
391 break;
392
393 case S_IFBLK:
394 sc->sc_dkdev.dk_bopenmask |= pmask;
395 break;
396 }
397 sc->sc_dkdev.dk_openmask =
398 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
399
400 done:
401 vndunlock(sc);
402 return error;
403 }
404
405 static int
406 vndclose(dev_t dev, int flags, int mode, struct lwp *l)
407 {
408 int unit = vndunit(dev);
409 struct vnd_softc *sc;
410 int error = 0, part;
411
412 #ifdef DEBUG
413 if (vnddebug & VDB_FOLLOW)
414 printf("vndclose(0x%"PRIx64", 0x%x, 0x%x, %p)\n", dev, flags, mode, l);
415 #endif
416 sc = device_lookup_private(&vnd_cd, unit);
417 if (sc == NULL)
418 return ENXIO;
419
420 if ((error = vndlock(sc)) != 0)
421 return error;
422
423 part = DISKPART(dev);
424
425 /* ...that much closer to allowing unconfiguration... */
426 switch (mode) {
427 case S_IFCHR:
428 sc->sc_dkdev.dk_copenmask &= ~(1 << part);
429 break;
430
431 case S_IFBLK:
432 sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
433 break;
434 }
435 sc->sc_dkdev.dk_openmask =
436 sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
437
438 vndunlock(sc);
439
440 if ((sc->sc_flags & VNF_INITED) == 0) {
441 if ((error = vnd_destroy(sc->sc_dev)) != 0) {
442 aprint_error_dev(sc->sc_dev,
443 "unable to detach instance\n");
444 return error;
445 }
446 }
447
448 return 0;
449 }
450
451 /*
452 * Queue the request, and wakeup the kernel thread to handle it.
453 */
454 static void
455 vndstrategy(struct buf *bp)
456 {
457 int unit = vndunit(bp->b_dev);
458 struct vnd_softc *vnd =
459 device_lookup_private(&vnd_cd, unit);
460 struct disklabel *lp;
461 daddr_t blkno;
462 int s = splbio();
463
464 if (vnd == NULL) {
465 bp->b_error = ENXIO;
466 goto done;
467 }
468 lp = vnd->sc_dkdev.dk_label;
469
470 if ((vnd->sc_flags & VNF_INITED) == 0) {
471 bp->b_error = ENXIO;
472 goto done;
473 }
474
475 /*
476 * The transfer must be a whole number of blocks.
477 */
478 if ((bp->b_bcount % lp->d_secsize) != 0) {
479 bp->b_error = EINVAL;
480 goto done;
481 }
482
483 /*
484 * check if we're read-only.
485 */
486 if ((vnd->sc_flags & VNF_READONLY) && !(bp->b_flags & B_READ)) {
487 bp->b_error = EACCES;
488 goto done;
489 }
490
491 /* If it's a nil transfer, wake up the top half now. */
492 if (bp->b_bcount == 0) {
493 goto done;
494 }
495
496 /*
497 * Do bounds checking and adjust transfer. If there's an error,
498 * the bounds check will flag that for us.
499 */
500 if (DISKPART(bp->b_dev) == RAW_PART) {
501 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
502 vnd->sc_size) <= 0)
503 goto done;
504 } else {
505 if (bounds_check_with_label(&vnd->sc_dkdev,
506 bp, vnd->sc_flags & (VNF_WLABEL|VNF_LABELLING)) <= 0)
507 goto done;
508 }
509
510 /*
511 * Put the block number in terms of the logical blocksize
512 * of the "device".
513 */
514
515 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
516
517 /*
518 * Translate the partition-relative block number to an absolute.
519 */
520 if (DISKPART(bp->b_dev) != RAW_PART) {
521 struct partition *pp;
522
523 pp = &vnd->sc_dkdev.dk_label->d_partitions[
524 DISKPART(bp->b_dev)];
525 blkno += pp->p_offset;
526 }
527 bp->b_rawblkno = blkno;
528
529 #ifdef DEBUG
530 if (vnddebug & VDB_FOLLOW)
531 printf("vndstrategy(%p): unit %d\n", bp, unit);
532 #endif
533 bufq_put(vnd->sc_tab, bp);
534 wakeup(&vnd->sc_tab);
535 splx(s);
536 return;
537
538 done:
539 bp->b_resid = bp->b_bcount;
540 biodone(bp);
541 splx(s);
542 }
543
544 static bool
545 vnode_has_strategy(struct vnd_softc *vnd)
546 {
547 return vnode_has_op(vnd->sc_vp, VOFFSET(vop_bmap)) &&
548 vnode_has_op(vnd->sc_vp, VOFFSET(vop_strategy));
549 }
550
551 /* XXX this function needs a reliable check to detect
552 * sparse files. Otherwise, bmap/strategy may be used
553 * and fail on non-allocated blocks. VOP_READ/VOP_WRITE
554 * works on sparse files.
555 */
556 #if notyet
557 static bool
558 vnode_strategy_probe(struct vnd_softc *vnd)
559 {
560 int error;
561 daddr_t nbn;
562
563 if (!vnode_has_strategy(vnd))
564 return false;
565
566 /* Convert the first logical block number to its
567 * physical block number.
568 */
569 error = 0;
570 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
571 error = VOP_BMAP(vnd->sc_vp, 0, NULL, &nbn, NULL);
572 VOP_UNLOCK(vnd->sc_vp, 0);
573
574 /* Test if that worked. */
575 if (error == 0 && (long)nbn == -1)
576 return false;
577
578 return true;
579 }
580 #endif
581
582 static void
583 vndthread(void *arg)
584 {
585 struct vnd_softc *vnd = arg;
586 bool usestrategy;
587 int s;
588
589 /* Determine whether we can *use* VOP_BMAP and VOP_STRATEGY to
590 * directly access the backing vnode. If we can, use these two
591 * operations to avoid messing with the local buffer cache.
592 * Otherwise fall back to regular VOP_READ/VOP_WRITE operations
593 * which are guaranteed to work with any file system. */
594 usestrategy = vnode_has_strategy(vnd);
595
596 #ifdef DEBUG
597 if (vnddebug & VDB_INIT)
598 printf("vndthread: vp %p, %s\n", vnd->sc_vp,
599 usestrategy ?
600 "using bmap/strategy operations" :
601 "using read/write operations");
602 #endif
603
604 s = splbio();
605 vnd->sc_flags |= VNF_KTHREAD;
606 wakeup(&vnd->sc_kthread);
607
608 /*
609 * Dequeue requests and serve them depending on the available
610 * vnode operations.
611 */
612 while ((vnd->sc_flags & VNF_VUNCONF) == 0) {
613 struct vndxfer *vnx;
614 int flags;
615 struct buf *obp;
616 struct buf *bp;
617
618 obp = bufq_get(vnd->sc_tab);
619 if (obp == NULL) {
620 tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0);
621 continue;
622 };
623 splx(s);
624 flags = obp->b_flags;
625 #ifdef DEBUG
626 if (vnddebug & VDB_FOLLOW)
627 printf("vndthread(%p)\n", obp);
628 #endif
629
630 if (vnd->sc_vp->v_mount == NULL) {
631 obp->b_error = ENXIO;
632 goto done;
633 }
634 #ifdef VND_COMPRESSION
635 /* handle a compressed read */
636 if ((flags & B_READ) != 0 && (vnd->sc_flags & VNF_COMP)) {
637 off_t bn;
638
639 /* Convert to a byte offset within the file. */
640 bn = obp->b_rawblkno *
641 vnd->sc_dkdev.dk_label->d_secsize;
642
643 compstrategy(obp, bn);
644 goto done;
645 }
646 #endif /* VND_COMPRESSION */
647
648 /*
649 * Allocate a header for this transfer and link it to the
650 * buffer
651 */
652 s = splbio();
653 vnx = VND_GETXFER(vnd);
654 splx(s);
655 vnx->vx_vnd = vnd;
656
657 s = splbio();
658 while (vnd->sc_active >= vnd->sc_maxactive) {
659 tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0);
660 }
661 vnd->sc_active++;
662 splx(s);
663
664 /* Instrumentation. */
665 disk_busy(&vnd->sc_dkdev);
666
667 bp = &vnx->vx_buf;
668 buf_init(bp);
669 bp->b_flags = (obp->b_flags & B_READ);
670 bp->b_oflags = obp->b_oflags;
671 bp->b_cflags = obp->b_cflags;
672 bp->b_iodone = vndiodone;
673 bp->b_private = obp;
674 bp->b_vp = vnd->sc_vp;
675 bp->b_objlock = &bp->b_vp->v_interlock;
676 bp->b_data = obp->b_data;
677 bp->b_bcount = obp->b_bcount;
678 BIO_COPYPRIO(bp, obp);
679
680 /* Handle the request using the appropriate operations. */
681 if (usestrategy)
682 handle_with_strategy(vnd, obp, bp);
683 else
684 handle_with_rdwr(vnd, obp, bp);
685
686 s = splbio();
687 continue;
688
689 done:
690 biodone(obp);
691 s = splbio();
692 }
693
694 vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF);
695 wakeup(&vnd->sc_kthread);
696 splx(s);
697 kthread_exit(0);
698 }
699
700 /*
701 * Checks if the given vnode supports the requested operation.
702 * The operation is specified the offset returned by VOFFSET.
703 *
704 * XXX The test below used to determine this is quite fragile
705 * because it relies on the file system to use genfs to specify
706 * unimplemented operations. There might be another way to do
707 * it more cleanly.
708 */
709 static bool
710 vnode_has_op(const struct vnode *vp, int opoffset)
711 {
712 int (*defaultp)(void *);
713 int (*opp)(void *);
714
715 defaultp = vp->v_op[VOFFSET(vop_default)];
716 opp = vp->v_op[opoffset];
717
718 return opp != defaultp && opp != genfs_eopnotsupp &&
719 opp != genfs_badop && opp != genfs_nullop;
720 }
721
722 /*
723 * Handes the read/write request given in 'bp' using the vnode's VOP_READ
724 * and VOP_WRITE operations.
725 *
726 * 'obp' is a pointer to the original request fed to the vnd device.
727 */
728 static void
729 handle_with_rdwr(struct vnd_softc *vnd, const struct buf *obp, struct buf *bp)
730 {
731 bool doread;
732 off_t offset;
733 size_t resid;
734 struct vnode *vp;
735
736 doread = bp->b_flags & B_READ;
737 offset = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
738 vp = vnd->sc_vp;
739
740 #if defined(DEBUG)
741 if (vnddebug & VDB_IO)
742 printf("vnd (rdwr): vp %p, %s, rawblkno 0x%" PRIx64
743 ", secsize %d, offset %" PRIu64
744 ", bcount %d\n",
745 vp, doread ? "read" : "write", obp->b_rawblkno,
746 vnd->sc_dkdev.dk_label->d_secsize, offset,
747 bp->b_bcount);
748 #endif
749
750 /* Issue the read or write operation. */
751 bp->b_error =
752 vn_rdwr(doread ? UIO_READ : UIO_WRITE,
753 vp, bp->b_data, bp->b_bcount, offset,
754 UIO_SYSSPACE, 0, vnd->sc_cred, &resid, NULL);
755 bp->b_resid = resid;
756
757 /* We need to increase the number of outputs on the vnode if
758 * there was any write to it. */
759 if (!doread) {
760 mutex_enter(&vp->v_interlock);
761 vp->v_numoutput++;
762 mutex_exit(&vp->v_interlock);
763 }
764
765 biodone(bp);
766 }
767
768 /*
769 * Handes the read/write request given in 'bp' using the vnode's VOP_BMAP
770 * and VOP_STRATEGY operations.
771 *
772 * 'obp' is a pointer to the original request fed to the vnd device.
773 */
774 static void
775 handle_with_strategy(struct vnd_softc *vnd, const struct buf *obp,
776 struct buf *bp)
777 {
778 int bsize, error, flags, skipped;
779 size_t resid, sz;
780 off_t bn, offset;
781 struct vnode *vp;
782
783 flags = obp->b_flags;
784
785 if (!(flags & B_READ)) {
786 vp = bp->b_vp;
787 mutex_enter(&vp->v_interlock);
788 vp->v_numoutput++;
789 mutex_exit(&vp->v_interlock);
790 }
791
792 /* convert to a byte offset within the file. */
793 bn = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
794
795 bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
796 skipped = 0;
797
798 /*
799 * Break the request into bsize pieces and feed them
800 * sequentially using VOP_BMAP/VOP_STRATEGY.
801 * We do it this way to keep from flooding NFS servers if we
802 * are connected to an NFS file. This places the burden on
803 * the client rather than the server.
804 */
805 error = 0;
806 bp->b_resid = bp->b_bcount;
807 for (offset = 0, resid = bp->b_resid; resid;
808 resid -= sz, offset += sz) {
809 struct buf *nbp;
810 daddr_t nbn;
811 int off, nra;
812
813 nra = 0;
814 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
815 error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
816 VOP_UNLOCK(vnd->sc_vp, 0);
817
818 if (error == 0 && (long)nbn == -1)
819 error = EIO;
820
821 /*
822 * If there was an error or a hole in the file...punt.
823 * Note that we may have to wait for any operations
824 * that we have already fired off before releasing
825 * the buffer.
826 *
827 * XXX we could deal with holes here but it would be
828 * a hassle (in the write case).
829 */
830 if (error) {
831 skipped += resid;
832 break;
833 }
834
835 #ifdef DEBUG
836 if (!dovndcluster)
837 nra = 0;
838 #endif
839
840 off = bn % bsize;
841 sz = MIN(((off_t)1 + nra) * bsize - off, resid);
842 #ifdef DEBUG
843 if (vnddebug & VDB_IO)
844 printf("vndstrategy: vp %p/%p bn 0x%qx/0x%" PRIx64
845 " sz 0x%zx\n", vnd->sc_vp, vp, (long long)bn,
846 nbn, sz);
847 #endif
848
849 nbp = getiobuf(vp, true);
850 nestiobuf_setup(bp, nbp, offset, sz);
851 nbp->b_blkno = nbn + btodb(off);
852
853 #if 0 /* XXX #ifdef DEBUG */
854 if (vnddebug & VDB_IO)
855 printf("vndstart(%ld): bp %p vp %p blkno "
856 "0x%" PRIx64 " flags %x addr %p cnt 0x%x\n",
857 (long) (vnd-vnd_softc), &nbp->vb_buf,
858 nbp->vb_buf.b_vp, nbp->vb_buf.b_blkno,
859 nbp->vb_buf.b_flags, nbp->vb_buf.b_data,
860 nbp->vb_buf.b_bcount);
861 #endif
862 VOP_STRATEGY(vp, nbp);
863 bn += sz;
864 }
865 nestiobuf_done(bp, skipped, error);
866 }
867
868 static void
869 vndiodone(struct buf *bp)
870 {
871 struct vndxfer *vnx = VND_BUFTOXFER(bp);
872 struct vnd_softc *vnd = vnx->vx_vnd;
873 struct buf *obp = bp->b_private;
874
875 KASSERT(&vnx->vx_buf == bp);
876 KASSERT(vnd->sc_active > 0);
877 #ifdef DEBUG
878 if (vnddebug & VDB_IO) {
879 printf("vndiodone1: bp %p iodone: error %d\n",
880 bp, bp->b_error);
881 }
882 #endif
883 disk_unbusy(&vnd->sc_dkdev, bp->b_bcount - bp->b_resid,
884 (bp->b_flags & B_READ));
885 vnd->sc_active--;
886 if (vnd->sc_active == 0) {
887 wakeup(&vnd->sc_tab);
888 }
889 obp->b_error = bp->b_error;
890 obp->b_resid = bp->b_resid;
891 buf_destroy(bp);
892 VND_PUTXFER(vnd, vnx);
893 biodone(obp);
894 }
895
896 /* ARGSUSED */
897 static int
898 vndread(dev_t dev, struct uio *uio, int flags)
899 {
900 int unit = vndunit(dev);
901 struct vnd_softc *sc;
902
903 #ifdef DEBUG
904 if (vnddebug & VDB_FOLLOW)
905 printf("vndread(0x%"PRIx64", %p)\n", dev, uio);
906 #endif
907
908 sc = device_lookup_private(&vnd_cd, unit);
909 if (sc == NULL)
910 return ENXIO;
911
912 if ((sc->sc_flags & VNF_INITED) == 0)
913 return ENXIO;
914
915 return physio(vndstrategy, NULL, dev, B_READ, minphys, uio);
916 }
917
918 /* ARGSUSED */
919 static int
920 vndwrite(dev_t dev, struct uio *uio, int flags)
921 {
922 int unit = vndunit(dev);
923 struct vnd_softc *sc;
924
925 #ifdef DEBUG
926 if (vnddebug & VDB_FOLLOW)
927 printf("vndwrite(0x%"PRIx64", %p)\n", dev, uio);
928 #endif
929
930 sc = device_lookup_private(&vnd_cd, unit);
931 if (sc == NULL)
932 return ENXIO;
933
934 if ((sc->sc_flags & VNF_INITED) == 0)
935 return ENXIO;
936
937 return physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio);
938 }
939
940 static int
941 vnd_cget(struct lwp *l, int unit, int *un, struct vattr *va)
942 {
943 struct vnd_softc *vnd;
944
945 if (*un == -1)
946 *un = unit;
947 if (*un < 0)
948 return EINVAL;
949
950 vnd = device_lookup_private(&vnd_cd, *un);
951 if (vnd == NULL)
952 return (*un >= vnd_cd.cd_ndevs) ? ENXIO : -1;
953
954 if ((vnd->sc_flags & VNF_INITED) == 0)
955 return -1;
956
957 return VOP_GETATTR(vnd->sc_vp, va, l->l_cred);
958 }
959
960 static int
961 vnddoclear(struct vnd_softc *vnd, int pmask, int minor, bool force)
962 {
963 int error;
964
965 if ((error = vndlock(vnd)) != 0)
966 return error;
967
968 /*
969 * Don't unconfigure if any other partitions are open
970 * or if both the character and block flavors of this
971 * partition are open.
972 */
973 if (((vnd->sc_dkdev.dk_openmask & ~pmask) ||
974 ((vnd->sc_dkdev.dk_bopenmask & pmask) &&
975 (vnd->sc_dkdev.dk_copenmask & pmask))) && !force) {
976 vndunlock(vnd);
977 return EBUSY;
978 }
979
980 /*
981 * XXX vndclear() might call vndclose() implicitly;
982 * release lock to avoid recursion
983 *
984 * Set VNF_CLEARING to prevent vndopen() from
985 * sneaking in after we vndunlock().
986 */
987 vnd->sc_flags |= VNF_CLEARING;
988 vndunlock(vnd);
989 vndclear(vnd, minor);
990 #ifdef DEBUG
991 if (vnddebug & VDB_INIT)
992 printf("vndioctl: CLRed\n");
993 #endif
994
995 /* Destroy the xfer and buffer pools. */
996 pool_destroy(&vnd->sc_vxpool);
997
998 /* Detach the disk. */
999 disk_detach(&vnd->sc_dkdev);
1000
1001 return 0;
1002 }
1003
1004 /* ARGSUSED */
1005 static int
1006 vndioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1007 {
1008 bool force;
1009 int unit = vndunit(dev);
1010 struct vnd_softc *vnd;
1011 struct vnd_ioctl *vio;
1012 struct vattr vattr;
1013 struct nameidata nd;
1014 int error, part, pmask;
1015 size_t geomsize;
1016 int fflags;
1017 #ifdef __HAVE_OLD_DISKLABEL
1018 struct disklabel newlabel;
1019 #endif
1020
1021 #ifdef DEBUG
1022 if (vnddebug & VDB_FOLLOW)
1023 printf("vndioctl(0x%"PRIx64", 0x%lx, %p, 0x%x, %p): unit %d\n",
1024 dev, cmd, data, flag, l->l_proc, unit);
1025 #endif
1026 vnd = device_lookup_private(&vnd_cd, unit);
1027 if (vnd == NULL &&
1028 #ifdef COMPAT_30
1029 cmd != VNDIOOCGET &&
1030 #endif
1031 cmd != VNDIOCGET)
1032 return ENXIO;
1033 vio = (struct vnd_ioctl *)data;
1034
1035 /* Must be open for writes for these commands... */
1036 switch (cmd) {
1037 case VNDIOCSET:
1038 case VNDIOCCLR:
1039 case DIOCSDINFO:
1040 case DIOCWDINFO:
1041 #ifdef __HAVE_OLD_DISKLABEL
1042 case ODIOCSDINFO:
1043 case ODIOCWDINFO:
1044 #endif
1045 case DIOCKLABEL:
1046 case DIOCWLABEL:
1047 if ((flag & FWRITE) == 0)
1048 return EBADF;
1049 }
1050
1051 /* Must be initialized for these... */
1052 switch (cmd) {
1053 case VNDIOCCLR:
1054 case DIOCGDINFO:
1055 case DIOCSDINFO:
1056 case DIOCWDINFO:
1057 case DIOCGPART:
1058 case DIOCKLABEL:
1059 case DIOCWLABEL:
1060 case DIOCGDEFLABEL:
1061 case DIOCCACHESYNC:
1062 #ifdef __HAVE_OLD_DISKLABEL
1063 case ODIOCGDINFO:
1064 case ODIOCSDINFO:
1065 case ODIOCWDINFO:
1066 case ODIOCGDEFLABEL:
1067 #endif
1068 if ((vnd->sc_flags & VNF_INITED) == 0)
1069 return ENXIO;
1070 }
1071
1072 switch (cmd) {
1073 case VNDIOCSET:
1074 if (vnd->sc_flags & VNF_INITED)
1075 return EBUSY;
1076
1077 if ((error = vndlock(vnd)) != 0)
1078 return error;
1079
1080 fflags = FREAD;
1081 if ((vio->vnd_flags & VNDIOF_READONLY) == 0)
1082 fflags |= FWRITE;
1083 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file);
1084 if ((error = vn_open(&nd, fflags, 0)) != 0)
1085 goto unlock_and_exit;
1086 KASSERT(l);
1087 error = VOP_GETATTR(nd.ni_vp, &vattr, l->l_cred);
1088 if (!error && nd.ni_vp->v_type != VREG)
1089 error = EOPNOTSUPP;
1090 if (error) {
1091 VOP_UNLOCK(nd.ni_vp, 0);
1092 goto close_and_exit;
1093 }
1094
1095 /* If using a compressed file, initialize its info */
1096 /* (or abort with an error if kernel has no compression) */
1097 if (vio->vnd_flags & VNF_COMP) {
1098 #ifdef VND_COMPRESSION
1099 struct vnd_comp_header *ch;
1100 int i;
1101 u_int32_t comp_size;
1102 u_int32_t comp_maxsize;
1103
1104 /* allocate space for compresed file header */
1105 ch = malloc(sizeof(struct vnd_comp_header),
1106 M_TEMP, M_WAITOK);
1107
1108 /* read compressed file header */
1109 error = vn_rdwr(UIO_READ, nd.ni_vp, (void *)ch,
1110 sizeof(struct vnd_comp_header), 0, UIO_SYSSPACE,
1111 IO_UNIT|IO_NODELOCKED, l->l_cred, NULL, NULL);
1112 if (error) {
1113 free(ch, M_TEMP);
1114 VOP_UNLOCK(nd.ni_vp, 0);
1115 goto close_and_exit;
1116 }
1117
1118 /* save some header info */
1119 vnd->sc_comp_blksz = ntohl(ch->block_size);
1120 /* note last offset is the file byte size */
1121 vnd->sc_comp_numoffs = ntohl(ch->num_blocks)+1;
1122 free(ch, M_TEMP);
1123 if (vnd->sc_comp_blksz == 0 ||
1124 vnd->sc_comp_blksz % DEV_BSIZE !=0) {
1125 VOP_UNLOCK(nd.ni_vp, 0);
1126 error = EINVAL;
1127 goto close_and_exit;
1128 }
1129 if (sizeof(struct vnd_comp_header) +
1130 sizeof(u_int64_t) * vnd->sc_comp_numoffs >
1131 vattr.va_size) {
1132 VOP_UNLOCK(nd.ni_vp, 0);
1133 error = EINVAL;
1134 goto close_and_exit;
1135 }
1136
1137 /* set decompressed file size */
1138 vattr.va_size =
1139 ((u_quad_t)vnd->sc_comp_numoffs - 1) *
1140 (u_quad_t)vnd->sc_comp_blksz;
1141
1142 /* allocate space for all the compressed offsets */
1143 vnd->sc_comp_offsets =
1144 malloc(sizeof(u_int64_t) * vnd->sc_comp_numoffs,
1145 M_DEVBUF, M_WAITOK);
1146
1147 /* read in the offsets */
1148 error = vn_rdwr(UIO_READ, nd.ni_vp,
1149 (void *)vnd->sc_comp_offsets,
1150 sizeof(u_int64_t) * vnd->sc_comp_numoffs,
1151 sizeof(struct vnd_comp_header), UIO_SYSSPACE,
1152 IO_UNIT|IO_NODELOCKED, l->l_cred, NULL, NULL);
1153 if (error) {
1154 VOP_UNLOCK(nd.ni_vp, 0);
1155 goto close_and_exit;
1156 }
1157 /*
1158 * find largest block size (used for allocation limit).
1159 * Also convert offset to native byte order.
1160 */
1161 comp_maxsize = 0;
1162 for (i = 0; i < vnd->sc_comp_numoffs - 1; i++) {
1163 vnd->sc_comp_offsets[i] =
1164 be64toh(vnd->sc_comp_offsets[i]);
1165 comp_size = be64toh(vnd->sc_comp_offsets[i + 1])
1166 - vnd->sc_comp_offsets[i];
1167 if (comp_size > comp_maxsize)
1168 comp_maxsize = comp_size;
1169 }
1170 vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1] =
1171 be64toh(vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1]);
1172
1173 /* create compressed data buffer */
1174 vnd->sc_comp_buff = malloc(comp_maxsize,
1175 M_DEVBUF, M_WAITOK);
1176
1177 /* create decompressed buffer */
1178 vnd->sc_comp_decombuf = malloc(vnd->sc_comp_blksz,
1179 M_DEVBUF, M_WAITOK);
1180 vnd->sc_comp_buffblk = -1;
1181
1182 /* Initialize decompress stream */
1183 memset(&vnd->sc_comp_stream, 0, sizeof(z_stream));
1184 vnd->sc_comp_stream.zalloc = vnd_alloc;
1185 vnd->sc_comp_stream.zfree = vnd_free;
1186 error = inflateInit2(&vnd->sc_comp_stream, MAX_WBITS);
1187 if (error) {
1188 if (vnd->sc_comp_stream.msg)
1189 printf("vnd%d: compressed file, %s\n",
1190 unit, vnd->sc_comp_stream.msg);
1191 VOP_UNLOCK(nd.ni_vp, 0);
1192 error = EINVAL;
1193 goto close_and_exit;
1194 }
1195
1196 vnd->sc_flags |= VNF_COMP | VNF_READONLY;
1197 #else /* !VND_COMPRESSION */
1198 VOP_UNLOCK(nd.ni_vp, 0);
1199 error = EOPNOTSUPP;
1200 goto close_and_exit;
1201 #endif /* VND_COMPRESSION */
1202 }
1203
1204 VOP_UNLOCK(nd.ni_vp, 0);
1205 vnd->sc_vp = nd.ni_vp;
1206 vnd->sc_size = btodb(vattr.va_size); /* note truncation */
1207
1208 /*
1209 * Use pseudo-geometry specified. If none was provided,
1210 * use "standard" Adaptec fictitious geometry.
1211 */
1212 if (vio->vnd_flags & VNDIOF_HASGEOM) {
1213
1214 memcpy(&vnd->sc_geom, &vio->vnd_geom,
1215 sizeof(vio->vnd_geom));
1216
1217 /*
1218 * Sanity-check the sector size.
1219 * XXX Don't allow secsize < DEV_BSIZE. Should
1220 * XXX we?
1221 */
1222 if (vnd->sc_geom.vng_secsize < DEV_BSIZE ||
1223 (vnd->sc_geom.vng_secsize % DEV_BSIZE) != 0 ||
1224 vnd->sc_geom.vng_ncylinders == 0 ||
1225 (vnd->sc_geom.vng_ntracks *
1226 vnd->sc_geom.vng_nsectors) == 0) {
1227 error = EINVAL;
1228 goto close_and_exit;
1229 }
1230
1231 /*
1232 * Compute the size (in DEV_BSIZE blocks) specified
1233 * by the geometry.
1234 */
1235 geomsize = (vnd->sc_geom.vng_nsectors *
1236 vnd->sc_geom.vng_ntracks *
1237 vnd->sc_geom.vng_ncylinders) *
1238 (vnd->sc_geom.vng_secsize / DEV_BSIZE);
1239
1240 /*
1241 * Sanity-check the size against the specified
1242 * geometry.
1243 */
1244 if (vnd->sc_size < geomsize) {
1245 error = EINVAL;
1246 goto close_and_exit;
1247 }
1248 } else if (vnd->sc_size >= (32 * 64)) {
1249 /*
1250 * Size must be at least 2048 DEV_BSIZE blocks
1251 * (1M) in order to use this geometry.
1252 */
1253 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1254 vnd->sc_geom.vng_nsectors = 32;
1255 vnd->sc_geom.vng_ntracks = 64;
1256 vnd->sc_geom.vng_ncylinders = vnd->sc_size / (64 * 32);
1257 } else {
1258 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1259 vnd->sc_geom.vng_nsectors = 1;
1260 vnd->sc_geom.vng_ntracks = 1;
1261 vnd->sc_geom.vng_ncylinders = vnd->sc_size;
1262 }
1263
1264 vnd_set_properties(vnd);
1265
1266 if (vio->vnd_flags & VNDIOF_READONLY) {
1267 vnd->sc_flags |= VNF_READONLY;
1268 }
1269
1270 if ((error = vndsetcred(vnd, l->l_cred)) != 0)
1271 goto close_and_exit;
1272
1273 vndthrottle(vnd, vnd->sc_vp);
1274 vio->vnd_size = dbtob(vnd->sc_size);
1275 vnd->sc_flags |= VNF_INITED;
1276
1277 /* create the kernel thread, wait for it to be up */
1278 error = kthread_create(PRI_NONE, 0, NULL, vndthread, vnd,
1279 &vnd->sc_kthread, device_xname(vnd->sc_dev));
1280 if (error)
1281 goto close_and_exit;
1282 while ((vnd->sc_flags & VNF_KTHREAD) == 0) {
1283 tsleep(&vnd->sc_kthread, PRIBIO, "vndthr", 0);
1284 }
1285 #ifdef DEBUG
1286 if (vnddebug & VDB_INIT)
1287 printf("vndioctl: SET vp %p size 0x%lx %d/%d/%d/%d\n",
1288 vnd->sc_vp, (unsigned long) vnd->sc_size,
1289 vnd->sc_geom.vng_secsize,
1290 vnd->sc_geom.vng_nsectors,
1291 vnd->sc_geom.vng_ntracks,
1292 vnd->sc_geom.vng_ncylinders);
1293 #endif
1294
1295 /* Attach the disk. */
1296 disk_attach(&vnd->sc_dkdev);
1297
1298 /* Initialize the xfer and buffer pools. */
1299 pool_init(&vnd->sc_vxpool, sizeof(struct vndxfer), 0,
1300 0, 0, "vndxpl", NULL, IPL_BIO);
1301
1302 /* Try and read the disklabel. */
1303 vndgetdisklabel(dev, vnd);
1304
1305 vndunlock(vnd);
1306
1307 break;
1308
1309 close_and_exit:
1310 (void) vn_close(nd.ni_vp, fflags, l->l_cred);
1311 unlock_and_exit:
1312 #ifdef VND_COMPRESSION
1313 /* free any allocated memory (for compressed file) */
1314 if (vnd->sc_comp_offsets) {
1315 free(vnd->sc_comp_offsets, M_DEVBUF);
1316 vnd->sc_comp_offsets = NULL;
1317 }
1318 if (vnd->sc_comp_buff) {
1319 free(vnd->sc_comp_buff, M_DEVBUF);
1320 vnd->sc_comp_buff = NULL;
1321 }
1322 if (vnd->sc_comp_decombuf) {
1323 free(vnd->sc_comp_decombuf, M_DEVBUF);
1324 vnd->sc_comp_decombuf = NULL;
1325 }
1326 #endif /* VND_COMPRESSION */
1327 vndunlock(vnd);
1328 return error;
1329
1330 case VNDIOCCLR:
1331 part = DISKPART(dev);
1332 pmask = (1 << part);
1333 force = (vio->vnd_flags & VNDIOF_FORCE) != 0;
1334
1335 if ((error = vnddoclear(vnd, pmask, minor(dev), force)) != 0)
1336 return error;
1337
1338 break;
1339
1340 #ifdef COMPAT_30
1341 case VNDIOOCGET: {
1342 struct vnd_ouser *vnu;
1343 struct vattr va;
1344 vnu = (struct vnd_ouser *)data;
1345 KASSERT(l);
1346 switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) {
1347 case 0:
1348 vnu->vnu_dev = va.va_fsid;
1349 vnu->vnu_ino = va.va_fileid;
1350 break;
1351 case -1:
1352 /* unused is not an error */
1353 vnu->vnu_dev = 0;
1354 vnu->vnu_ino = 0;
1355 break;
1356 default:
1357 return error;
1358 }
1359 break;
1360 }
1361 #endif
1362 case VNDIOCGET: {
1363 struct vnd_user *vnu;
1364 struct vattr va;
1365 vnu = (struct vnd_user *)data;
1366 KASSERT(l);
1367 switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) {
1368 case 0:
1369 vnu->vnu_dev = va.va_fsid;
1370 vnu->vnu_ino = va.va_fileid;
1371 break;
1372 case -1:
1373 /* unused is not an error */
1374 vnu->vnu_dev = 0;
1375 vnu->vnu_ino = 0;
1376 break;
1377 default:
1378 return error;
1379 }
1380 break;
1381 }
1382
1383 case DIOCGDINFO:
1384 *(struct disklabel *)data = *(vnd->sc_dkdev.dk_label);
1385 break;
1386
1387 #ifdef __HAVE_OLD_DISKLABEL
1388 case ODIOCGDINFO:
1389 newlabel = *(vnd->sc_dkdev.dk_label);
1390 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1391 return ENOTTY;
1392 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1393 break;
1394 #endif
1395
1396 case DIOCGPART:
1397 ((struct partinfo *)data)->disklab = vnd->sc_dkdev.dk_label;
1398 ((struct partinfo *)data)->part =
1399 &vnd->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1400 break;
1401
1402 case DIOCWDINFO:
1403 case DIOCSDINFO:
1404 #ifdef __HAVE_OLD_DISKLABEL
1405 case ODIOCWDINFO:
1406 case ODIOCSDINFO:
1407 #endif
1408 {
1409 struct disklabel *lp;
1410
1411 if ((error = vndlock(vnd)) != 0)
1412 return error;
1413
1414 vnd->sc_flags |= VNF_LABELLING;
1415
1416 #ifdef __HAVE_OLD_DISKLABEL
1417 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1418 memset(&newlabel, 0, sizeof newlabel);
1419 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1420 lp = &newlabel;
1421 } else
1422 #endif
1423 lp = (struct disklabel *)data;
1424
1425 error = setdisklabel(vnd->sc_dkdev.dk_label,
1426 lp, 0, vnd->sc_dkdev.dk_cpulabel);
1427 if (error == 0) {
1428 if (cmd == DIOCWDINFO
1429 #ifdef __HAVE_OLD_DISKLABEL
1430 || cmd == ODIOCWDINFO
1431 #endif
1432 )
1433 error = writedisklabel(VNDLABELDEV(dev),
1434 vndstrategy, vnd->sc_dkdev.dk_label,
1435 vnd->sc_dkdev.dk_cpulabel);
1436 }
1437
1438 vnd->sc_flags &= ~VNF_LABELLING;
1439
1440 vndunlock(vnd);
1441
1442 if (error)
1443 return error;
1444 break;
1445 }
1446
1447 case DIOCKLABEL:
1448 if (*(int *)data != 0)
1449 vnd->sc_flags |= VNF_KLABEL;
1450 else
1451 vnd->sc_flags &= ~VNF_KLABEL;
1452 break;
1453
1454 case DIOCWLABEL:
1455 if (*(int *)data != 0)
1456 vnd->sc_flags |= VNF_WLABEL;
1457 else
1458 vnd->sc_flags &= ~VNF_WLABEL;
1459 break;
1460
1461 case DIOCGDEFLABEL:
1462 vndgetdefaultlabel(vnd, (struct disklabel *)data);
1463 break;
1464
1465 #ifdef __HAVE_OLD_DISKLABEL
1466 case ODIOCGDEFLABEL:
1467 vndgetdefaultlabel(vnd, &newlabel);
1468 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1469 return ENOTTY;
1470 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1471 break;
1472 #endif
1473
1474 case DIOCCACHESYNC:
1475 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1476 error = VOP_FSYNC(vnd->sc_vp, vnd->sc_cred,
1477 FSYNC_WAIT | FSYNC_DATAONLY | FSYNC_CACHE, 0, 0);
1478 VOP_UNLOCK(vnd->sc_vp, 0);
1479 return error;
1480
1481 default:
1482 return ENOTTY;
1483 }
1484
1485 return 0;
1486 }
1487
1488 /*
1489 * Duplicate the current processes' credentials. Since we are called only
1490 * as the result of a SET ioctl and only root can do that, any future access
1491 * to this "disk" is essentially as root. Note that credentials may change
1492 * if some other uid can write directly to the mapped file (NFS).
1493 */
1494 static int
1495 vndsetcred(struct vnd_softc *vnd, kauth_cred_t cred)
1496 {
1497 struct uio auio;
1498 struct iovec aiov;
1499 char *tmpbuf;
1500 int error;
1501
1502 vnd->sc_cred = kauth_cred_dup(cred);
1503 tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
1504
1505 /* XXX: Horrible kludge to establish credentials for NFS */
1506 aiov.iov_base = tmpbuf;
1507 aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
1508 auio.uio_iov = &aiov;
1509 auio.uio_iovcnt = 1;
1510 auio.uio_offset = 0;
1511 auio.uio_rw = UIO_READ;
1512 auio.uio_resid = aiov.iov_len;
1513 UIO_SETUP_SYSSPACE(&auio);
1514 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1515 error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
1516 if (error == 0) {
1517 /*
1518 * Because vnd does all IO directly through the vnode
1519 * we need to flush (at least) the buffer from the above
1520 * VOP_READ from the buffer cache to prevent cache
1521 * incoherencies. Also, be careful to write dirty
1522 * buffers back to stable storage.
1523 */
1524 error = vinvalbuf(vnd->sc_vp, V_SAVE, vnd->sc_cred,
1525 curlwp, 0, 0);
1526 }
1527 VOP_UNLOCK(vnd->sc_vp, 0);
1528
1529 free(tmpbuf, M_TEMP);
1530 return error;
1531 }
1532
1533 /*
1534 * Set maxactive based on FS type
1535 */
1536 static void
1537 vndthrottle(struct vnd_softc *vnd, struct vnode *vp)
1538 {
1539 #ifdef NFS
1540 extern int (**nfsv2_vnodeop_p)(void *);
1541
1542 if (vp->v_op == nfsv2_vnodeop_p)
1543 vnd->sc_maxactive = 2;
1544 else
1545 #endif
1546 vnd->sc_maxactive = 8;
1547
1548 if (vnd->sc_maxactive < 1)
1549 vnd->sc_maxactive = 1;
1550 }
1551
1552 #if 0
1553 static void
1554 vndshutdown(void)
1555 {
1556 struct vnd_softc *vnd;
1557
1558 for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
1559 if (vnd->sc_flags & VNF_INITED)
1560 vndclear(vnd);
1561 }
1562 #endif
1563
1564 static void
1565 vndclear(struct vnd_softc *vnd, int myminor)
1566 {
1567 struct vnode *vp = vnd->sc_vp;
1568 int fflags = FREAD;
1569 int bmaj, cmaj, i, mn;
1570 int s;
1571
1572 #ifdef DEBUG
1573 if (vnddebug & VDB_FOLLOW)
1574 printf("vndclear(%p): vp %p\n", vnd, vp);
1575 #endif
1576 /* locate the major number */
1577 bmaj = bdevsw_lookup_major(&vnd_bdevsw);
1578 cmaj = cdevsw_lookup_major(&vnd_cdevsw);
1579
1580 /* Nuke the vnodes for any open instances */
1581 for (i = 0; i < MAXPARTITIONS; i++) {
1582 mn = DISKMINOR(device_unit(vnd->sc_dev), i);
1583 vdevgone(bmaj, mn, mn, VBLK);
1584 if (mn != myminor) /* XXX avoid to kill own vnode */
1585 vdevgone(cmaj, mn, mn, VCHR);
1586 }
1587
1588 if ((vnd->sc_flags & VNF_READONLY) == 0)
1589 fflags |= FWRITE;
1590
1591 s = splbio();
1592 bufq_drain(vnd->sc_tab);
1593 splx(s);
1594
1595 vnd->sc_flags |= VNF_VUNCONF;
1596 wakeup(&vnd->sc_tab);
1597 while (vnd->sc_flags & VNF_KTHREAD)
1598 tsleep(&vnd->sc_kthread, PRIBIO, "vnthr", 0);
1599
1600 #ifdef VND_COMPRESSION
1601 /* free the compressed file buffers */
1602 if (vnd->sc_flags & VNF_COMP) {
1603 if (vnd->sc_comp_offsets) {
1604 free(vnd->sc_comp_offsets, M_DEVBUF);
1605 vnd->sc_comp_offsets = NULL;
1606 }
1607 if (vnd->sc_comp_buff) {
1608 free(vnd->sc_comp_buff, M_DEVBUF);
1609 vnd->sc_comp_buff = NULL;
1610 }
1611 if (vnd->sc_comp_decombuf) {
1612 free(vnd->sc_comp_decombuf, M_DEVBUF);
1613 vnd->sc_comp_decombuf = NULL;
1614 }
1615 }
1616 #endif /* VND_COMPRESSION */
1617 vnd->sc_flags &=
1618 ~(VNF_INITED | VNF_READONLY | VNF_VLABEL
1619 | VNF_VUNCONF | VNF_COMP);
1620 if (vp == NULL)
1621 panic("vndclear: null vp");
1622 (void) vn_close(vp, fflags, vnd->sc_cred);
1623 kauth_cred_free(vnd->sc_cred);
1624 vnd->sc_vp = NULL;
1625 vnd->sc_cred = NULL;
1626 vnd->sc_size = 0;
1627 }
1628
1629 static int
1630 vndsize(dev_t dev)
1631 {
1632 struct vnd_softc *sc;
1633 struct disklabel *lp;
1634 int part, unit, omask;
1635 int size;
1636
1637 unit = vndunit(dev);
1638 sc = device_lookup_private(&vnd_cd, unit);
1639 if (sc == NULL)
1640 return -1;
1641
1642 if ((sc->sc_flags & VNF_INITED) == 0)
1643 return -1;
1644
1645 part = DISKPART(dev);
1646 omask = sc->sc_dkdev.dk_openmask & (1 << part);
1647 lp = sc->sc_dkdev.dk_label;
1648
1649 if (omask == 0 && vndopen(dev, 0, S_IFBLK, curlwp)) /* XXX */
1650 return -1;
1651
1652 if (lp->d_partitions[part].p_fstype != FS_SWAP)
1653 size = -1;
1654 else
1655 size = lp->d_partitions[part].p_size *
1656 (lp->d_secsize / DEV_BSIZE);
1657
1658 if (omask == 0 && vndclose(dev, 0, S_IFBLK, curlwp)) /* XXX */
1659 return -1;
1660
1661 return size;
1662 }
1663
1664 static int
1665 vnddump(dev_t dev, daddr_t blkno, void *va,
1666 size_t size)
1667 {
1668
1669 /* Not implemented. */
1670 return ENXIO;
1671 }
1672
1673 static void
1674 vndgetdefaultlabel(struct vnd_softc *sc, struct disklabel *lp)
1675 {
1676 struct vndgeom *vng = &sc->sc_geom;
1677 struct partition *pp;
1678
1679 memset(lp, 0, sizeof(*lp));
1680
1681 lp->d_secperunit = sc->sc_size / (vng->vng_secsize / DEV_BSIZE);
1682 lp->d_secsize = vng->vng_secsize;
1683 lp->d_nsectors = vng->vng_nsectors;
1684 lp->d_ntracks = vng->vng_ntracks;
1685 lp->d_ncylinders = vng->vng_ncylinders;
1686 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1687
1688 strncpy(lp->d_typename, "vnd", sizeof(lp->d_typename));
1689 lp->d_type = DTYPE_VND;
1690 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1691 lp->d_rpm = 3600;
1692 lp->d_interleave = 1;
1693 lp->d_flags = 0;
1694
1695 pp = &lp->d_partitions[RAW_PART];
1696 pp->p_offset = 0;
1697 pp->p_size = lp->d_secperunit;
1698 pp->p_fstype = FS_UNUSED;
1699 lp->d_npartitions = RAW_PART + 1;
1700
1701 lp->d_magic = DISKMAGIC;
1702 lp->d_magic2 = DISKMAGIC;
1703 lp->d_checksum = dkcksum(lp);
1704 }
1705
1706 /*
1707 * Read the disklabel from a vnd. If one is not present, create a fake one.
1708 */
1709 static void
1710 vndgetdisklabel(dev_t dev, struct vnd_softc *sc)
1711 {
1712 const char *errstring;
1713 struct disklabel *lp = sc->sc_dkdev.dk_label;
1714 struct cpu_disklabel *clp = sc->sc_dkdev.dk_cpulabel;
1715 int i;
1716
1717 memset(clp, 0, sizeof(*clp));
1718
1719 vndgetdefaultlabel(sc, lp);
1720
1721 /*
1722 * Call the generic disklabel extraction routine.
1723 */
1724 errstring = readdisklabel(VNDLABELDEV(dev), vndstrategy, lp, clp);
1725 if (errstring) {
1726 /*
1727 * Lack of disklabel is common, but we print the warning
1728 * anyway, since it might contain other useful information.
1729 */
1730 aprint_normal_dev(sc->sc_dev, "%s\n", errstring);
1731
1732 /*
1733 * For historical reasons, if there's no disklabel
1734 * present, all partitions must be FS_BSDFFS and
1735 * occupy the entire disk.
1736 */
1737 for (i = 0; i < MAXPARTITIONS; i++) {
1738 /*
1739 * Don't wipe out port specific hack (such as
1740 * dos partition hack of i386 port).
1741 */
1742 if (lp->d_partitions[i].p_size != 0)
1743 continue;
1744
1745 lp->d_partitions[i].p_size = lp->d_secperunit;
1746 lp->d_partitions[i].p_offset = 0;
1747 lp->d_partitions[i].p_fstype = FS_BSDFFS;
1748 }
1749
1750 strncpy(lp->d_packname, "default label",
1751 sizeof(lp->d_packname));
1752
1753 lp->d_npartitions = MAXPARTITIONS;
1754 lp->d_checksum = dkcksum(lp);
1755 }
1756
1757 /* In-core label now valid. */
1758 sc->sc_flags |= VNF_VLABEL;
1759 }
1760
1761 /*
1762 * Wait interruptibly for an exclusive lock.
1763 *
1764 * XXX
1765 * Several drivers do this; it should be abstracted and made MP-safe.
1766 */
1767 static int
1768 vndlock(struct vnd_softc *sc)
1769 {
1770 int error;
1771
1772 while ((sc->sc_flags & VNF_LOCKED) != 0) {
1773 sc->sc_flags |= VNF_WANTED;
1774 if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
1775 return error;
1776 }
1777 sc->sc_flags |= VNF_LOCKED;
1778 return 0;
1779 }
1780
1781 /*
1782 * Unlock and wake up any waiters.
1783 */
1784 static void
1785 vndunlock(struct vnd_softc *sc)
1786 {
1787
1788 sc->sc_flags &= ~VNF_LOCKED;
1789 if ((sc->sc_flags & VNF_WANTED) != 0) {
1790 sc->sc_flags &= ~VNF_WANTED;
1791 wakeup(sc);
1792 }
1793 }
1794
1795 #ifdef VND_COMPRESSION
1796 /* compressed file read */
1797 static void
1798 compstrategy(struct buf *bp, off_t bn)
1799 {
1800 int error;
1801 int unit = vndunit(bp->b_dev);
1802 struct vnd_softc *vnd =
1803 device_lookup_private(&vnd_cd, unit);
1804 u_int32_t comp_block;
1805 struct uio auio;
1806 char *addr;
1807 int s;
1808
1809 /* set up constants for data move */
1810 auio.uio_rw = UIO_READ;
1811 UIO_SETUP_SYSSPACE(&auio);
1812
1813 /* read, and transfer the data */
1814 addr = bp->b_data;
1815 bp->b_resid = bp->b_bcount;
1816 s = splbio();
1817 while (bp->b_resid > 0) {
1818 unsigned length;
1819 size_t length_in_buffer;
1820 u_int32_t offset_in_buffer;
1821 struct iovec aiov;
1822
1823 /* calculate the compressed block number */
1824 comp_block = bn / (off_t)vnd->sc_comp_blksz;
1825
1826 /* check for good block number */
1827 if (comp_block >= vnd->sc_comp_numoffs) {
1828 bp->b_error = EINVAL;
1829 splx(s);
1830 return;
1831 }
1832
1833 /* read in the compressed block, if not in buffer */
1834 if (comp_block != vnd->sc_comp_buffblk) {
1835 length = vnd->sc_comp_offsets[comp_block + 1] -
1836 vnd->sc_comp_offsets[comp_block];
1837 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1838 error = vn_rdwr(UIO_READ, vnd->sc_vp, vnd->sc_comp_buff,
1839 length, vnd->sc_comp_offsets[comp_block],
1840 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vnd->sc_cred,
1841 NULL, NULL);
1842 if (error) {
1843 bp->b_error = error;
1844 VOP_UNLOCK(vnd->sc_vp, 0);
1845 splx(s);
1846 return;
1847 }
1848 /* uncompress the buffer */
1849 vnd->sc_comp_stream.next_in = vnd->sc_comp_buff;
1850 vnd->sc_comp_stream.avail_in = length;
1851 vnd->sc_comp_stream.next_out = vnd->sc_comp_decombuf;
1852 vnd->sc_comp_stream.avail_out = vnd->sc_comp_blksz;
1853 inflateReset(&vnd->sc_comp_stream);
1854 error = inflate(&vnd->sc_comp_stream, Z_FINISH);
1855 if (error != Z_STREAM_END) {
1856 if (vnd->sc_comp_stream.msg)
1857 aprint_normal_dev(vnd->sc_dev,
1858 "compressed file, %s\n",
1859 vnd->sc_comp_stream.msg);
1860 bp->b_error = EBADMSG;
1861 VOP_UNLOCK(vnd->sc_vp, 0);
1862 splx(s);
1863 return;
1864 }
1865 vnd->sc_comp_buffblk = comp_block;
1866 VOP_UNLOCK(vnd->sc_vp, 0);
1867 }
1868
1869 /* transfer the usable uncompressed data */
1870 offset_in_buffer = bn % (off_t)vnd->sc_comp_blksz;
1871 length_in_buffer = vnd->sc_comp_blksz - offset_in_buffer;
1872 if (length_in_buffer > bp->b_resid)
1873 length_in_buffer = bp->b_resid;
1874 auio.uio_iov = &aiov;
1875 auio.uio_iovcnt = 1;
1876 aiov.iov_base = addr;
1877 aiov.iov_len = length_in_buffer;
1878 auio.uio_resid = aiov.iov_len;
1879 auio.uio_offset = 0;
1880 error = uiomove(vnd->sc_comp_decombuf + offset_in_buffer,
1881 length_in_buffer, &auio);
1882 if (error) {
1883 bp->b_error = error;
1884 splx(s);
1885 return;
1886 }
1887
1888 bn += length_in_buffer;
1889 addr += length_in_buffer;
1890 bp->b_resid -= length_in_buffer;
1891 }
1892 splx(s);
1893 }
1894
1895 /* compression memory allocation routines */
1896 static void *
1897 vnd_alloc(void *aux, u_int items, u_int siz)
1898 {
1899 return malloc(items * siz, M_TEMP, M_NOWAIT);
1900 }
1901
1902 static void
1903 vnd_free(void *aux, void *ptr)
1904 {
1905 free(ptr, M_TEMP);
1906 }
1907 #endif /* VND_COMPRESSION */
1908
1909 static void
1910 vnd_set_properties(struct vnd_softc *vnd)
1911 {
1912 prop_dictionary_t disk_info, odisk_info, geom;
1913
1914 disk_info = prop_dictionary_create();
1915
1916 geom = prop_dictionary_create();
1917
1918 prop_dictionary_set_uint64(geom, "sectors-per-unit",
1919 vnd->sc_geom.vng_nsectors * vnd->sc_geom.vng_ntracks *
1920 vnd->sc_geom.vng_ncylinders);
1921
1922 prop_dictionary_set_uint32(geom, "sector-size",
1923 vnd->sc_geom.vng_secsize);
1924
1925 prop_dictionary_set_uint16(geom, "sectors-per-track",
1926 vnd->sc_geom.vng_nsectors);
1927
1928 prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
1929 vnd->sc_geom.vng_ntracks);
1930
1931 prop_dictionary_set_uint64(geom, "cylinders-per-unit",
1932 vnd->sc_geom.vng_ncylinders);
1933
1934 prop_dictionary_set(disk_info, "geometry", geom);
1935 prop_object_release(geom);
1936
1937 prop_dictionary_set(device_properties(vnd->sc_dev),
1938 "disk-info", disk_info);
1939
1940 /*
1941 * Don't release disk_info here; we keep a reference to it.
1942 * disk_detach() will release it when we go away.
1943 */
1944
1945 odisk_info = vnd->sc_dkdev.dk_info;
1946 vnd->sc_dkdev.dk_info = disk_info;
1947 if (odisk_info)
1948 prop_object_release(odisk_info);
1949 }
1950
1951 #ifdef _MODULE
1952
1953 #include <sys/module.h>
1954
1955 MODULE(MODULE_CLASS_DRIVER, vnd, NULL);
1956 CFDRIVER_DECL(vnd, DV_DISK, NULL);
1957
1958 static int
1959 vnd_modcmd(modcmd_t cmd, void *arg)
1960 {
1961 int bmajor = -1, cmajor = -1, error = 0;
1962
1963 switch (cmd) {
1964 case MODULE_CMD_INIT:
1965 error = config_cfdriver_attach(&vnd_cd);
1966 if (error)
1967 break;
1968
1969 error = config_cfattach_attach(vnd_cd.cd_name, &vnd_ca);
1970 if (error) {
1971 config_cfdriver_detach(&vnd_cd);
1972 aprint_error("%s: unable to register cfattach\n",
1973 vnd_cd.cd_name);
1974 break;
1975 }
1976
1977 error = devsw_attach("vnd", &vnd_bdevsw, &bmajor,
1978 &vnd_cdevsw, &cmajor);
1979 if (error) {
1980 config_cfattach_detach(vnd_cd.cd_name, &vnd_ca);
1981 config_cfdriver_detach(&vnd_cd);
1982 break;
1983 }
1984
1985 break;
1986
1987 case MODULE_CMD_FINI:
1988 error = config_cfattach_detach(vnd_cd.cd_name, &vnd_ca);
1989 if (error)
1990 break;
1991 config_cfdriver_detach(&vnd_cd);
1992 devsw_detach(&vnd_bdevsw, &vnd_cdevsw);
1993 break;
1994
1995 case MODULE_CMD_STAT:
1996 return ENOTTY;
1997
1998 default:
1999 return ENOTTY;
2000 }
2001
2002 return error;
2003 }
2004
2005 #endif
2006