Home | History | Annotate | Line # | Download | only in dev
cgd.c revision 1.108.2.15
      1 /* $NetBSD: cgd.c,v 1.108.2.15 2016/07/26 03:24:20 pgoyette Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Roland C. Dowdeswell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.108.2.15 2016/07/26 03:24:20 pgoyette Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/proc.h>
     39 #include <sys/errno.h>
     40 #include <sys/buf.h>
     41 #include <sys/bufq.h>
     42 #include <sys/malloc.h>
     43 #include <sys/module.h>
     44 #include <sys/pool.h>
     45 #include <sys/ioctl.h>
     46 #include <sys/device.h>
     47 #include <sys/disk.h>
     48 #include <sys/disklabel.h>
     49 #include <sys/fcntl.h>
     50 #include <sys/namei.h> /* for pathbuf */
     51 #include <sys/vnode.h>
     52 #include <sys/conf.h>
     53 #include <sys/syslog.h>
     54 #include <sys/localcount.h>
     55 
     56 #include <dev/dkvar.h>
     57 #include <dev/cgdvar.h>
     58 
     59 #include <miscfs/specfs/specdev.h> /* for v_rdev */
     60 
     61 #include "ioconf.h"
     62 
     63 /* Entry Point Functions */
     64 
     65 static dev_type_open(cgdopen);
     66 static dev_type_close(cgdclose);
     67 static dev_type_read(cgdread);
     68 static dev_type_write(cgdwrite);
     69 static dev_type_ioctl(cgdioctl);
     70 static dev_type_strategy(cgdstrategy);
     71 static dev_type_dump(cgddump);
     72 static dev_type_size(cgdsize);
     73 
     74 const struct bdevsw cgd_bdevsw = {
     75 	LOCALCOUNT_INITIALIZER
     76 	.d_open = cgdopen,
     77 	.d_close = cgdclose,
     78 	.d_strategy = cgdstrategy,
     79 	.d_ioctl = cgdioctl,
     80 	.d_dump = cgddump,
     81 	.d_psize = cgdsize,
     82 	.d_discard = nodiscard,
     83 	.d_flag = D_DISK
     84 };
     85 
     86 const struct cdevsw cgd_cdevsw = {
     87 	LOCALCOUNT_INITIALIZER
     88 	.d_open = cgdopen,
     89 	.d_close = cgdclose,
     90 	.d_read = cgdread,
     91 	.d_write = cgdwrite,
     92 	.d_ioctl = cgdioctl,
     93 	.d_stop = nostop,
     94 	.d_tty = notty,
     95 	.d_poll = nopoll,
     96 	.d_mmap = nommap,
     97 	.d_kqfilter = nokqfilter,
     98 	.d_discard = nodiscard,
     99 	.d_flag = D_DISK
    100 };
    101 
    102 static int cgd_match(device_t, cfdata_t, void *);
    103 static void cgd_attach(device_t, device_t, void *);
    104 static int cgd_detach(device_t, int);
    105 static struct cgd_softc	*cgd_spawn(int, device_t *);
    106 static int cgd_destroy(device_t);
    107 
    108 /* Internal Functions */
    109 
    110 static int	cgd_diskstart(device_t, struct buf *);
    111 static void	cgdiodone(struct buf *);
    112 static int	cgd_dumpblocks(device_t, void *, daddr_t, int);
    113 
    114 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
    115 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
    116 static int	cgd_ioctl_get(dev_t, void *, struct lwp *);
    117 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
    118 			struct lwp *);
    119 static void	cgd_cipher(struct cgd_softc *, void *, void *,
    120 			   size_t, daddr_t, size_t, int);
    121 
    122 static struct dkdriver cgddkdriver = {
    123         .d_minphys  = minphys,
    124         .d_open = cgdopen,
    125         .d_close = cgdclose,
    126         .d_strategy = cgdstrategy,
    127         .d_iosize = NULL,
    128         .d_diskstart = cgd_diskstart,
    129         .d_dumpblocks = cgd_dumpblocks,
    130         .d_lastclose = NULL
    131 };
    132 
    133 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
    134     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    135 extern struct cfdriver cgd_cd;
    136 
    137 /* DIAGNOSTIC and DEBUG definitions */
    138 
    139 #if defined(CGDDEBUG) && !defined(DEBUG)
    140 #define DEBUG
    141 #endif
    142 
    143 #ifdef DEBUG
    144 int cgddebug = 0;
    145 
    146 #define CGDB_FOLLOW	0x1
    147 #define CGDB_IO	0x2
    148 #define CGDB_CRYPTO	0x4
    149 
    150 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
    151 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
    152 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
    153 
    154 static void	hexprint(const char *, void *, int);
    155 
    156 #else
    157 #define IFDEBUG(x,y)
    158 #define DPRINTF(x,y)
    159 #define DPRINTF_FOLLOW(y)
    160 #endif
    161 
    162 #ifdef DIAGNOSTIC
    163 #define DIAGPANIC(x)		panic x
    164 #define DIAGCONDPANIC(x,y)	if (x) panic y
    165 #else
    166 #define DIAGPANIC(x)
    167 #define DIAGCONDPANIC(x,y)
    168 #endif
    169 
    170 /* Global variables */
    171 
    172 /* Utility Functions */
    173 
    174 #define CGDUNIT(x)		DISKUNIT(x)
    175 #define GETCGD_SOFTC(_cs, x, _dv)			\
    176 	if (((_cs) = getcgd_softc(x, &_dv)) == NULL) {	\
    177 		return ENXIO;				\
    178 	}
    179 
    180 /* The code */
    181 
    182 /*
    183  * Lookup the device and return it's softc.  If the device doesn't
    184  * exist, spawn it.
    185  *
    186  * In either case, the device is "acquired", and must be "released"
    187  * by the caller after it is finished with the softc.
    188  */
    189 static struct cgd_softc *
    190 getcgd_softc(dev_t dev, device_t *self)
    191 {
    192 	int	unit = CGDUNIT(dev);
    193 	struct cgd_softc *sc;
    194 
    195 	DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
    196 
    197 	*self = device_lookup_acquire(&cgd_cd, unit);
    198 
    199 	if (*self == NULL) {
    200 		sc = cgd_spawn(unit, self);
    201 	} else {
    202 		sc = device_private(*self);
    203 	}
    204 
    205 	return sc;
    206 }
    207 
    208 static int
    209 cgd_match(device_t self, cfdata_t cfdata, void *aux)
    210 {
    211 
    212 	return 1;
    213 }
    214 
    215 static void
    216 cgd_attach(device_t parent, device_t self, void *aux)
    217 {
    218 	struct cgd_softc *sc;
    219 
    220 	sc = device_private(self);
    221 
    222 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
    223 	dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
    224 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
    225 
    226 	if (!pmf_device_register(self, NULL, NULL))
    227 		aprint_error_dev(self,
    228 		    "unable to register power management hooks\n");
    229 }
    230 
    231 
    232 /*
    233  * The caller must hold a reference to the device's localcount.  the
    234  * reference is released if the device is available for detach.
    235  */
    236 static int
    237 cgd_detach(device_t self, int flags)
    238 {
    239 	int ret;
    240 	const int pmask = 1 << RAW_PART;
    241 	struct cgd_softc *sc = device_private(self);
    242 	struct dk_softc *dksc = &sc->sc_dksc;
    243 
    244 	if (DK_BUSY(dksc, pmask))
    245 		return EBUSY;
    246 
    247 	if (DK_ATTACHED(dksc) &&
    248 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
    249 		return ret;
    250 
    251 	disk_destroy(&dksc->sc_dkdev);
    252 	mutex_destroy(&sc->sc_lock);
    253 
    254 	device_release(self);
    255 	return 0;
    256 }
    257 
    258 void
    259 cgdattach(int num)
    260 {
    261 	int error;
    262 
    263 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
    264 	if (error != 0)
    265 		aprint_error("%s: unable to register cfattach\n",
    266 		    cgd_cd.cd_name);
    267 }
    268 
    269 static struct cgd_softc *
    270 cgd_spawn(int unit, device_t *self)
    271 {
    272 	cfdata_t cf;
    273 	struct cgd_softc *sc;
    274 
    275 	cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
    276 	cf->cf_name = cgd_cd.cd_name;
    277 	cf->cf_atname = cgd_cd.cd_name;
    278 	cf->cf_unit = unit;
    279 	cf->cf_fstate = FSTATE_STAR;
    280 
    281 	if (config_attach_pseudo(cf) == NULL)
    282 		return NULL;
    283 
    284 	*self = device_lookup_acquire(&cgd_cd, unit);
    285 	if (self == NULL)
    286 		return NULL;
    287 	else {
    288 		/*
    289 		 * Note that we return while still holding a reference
    290 		 * to the device!
    291 		 */
    292 		sc = device_private(*self);
    293 		return sc;
    294 	}
    295 }
    296 
    297 static int
    298 cgd_destroy(device_t dev)
    299 {
    300 	int error;
    301 	cfdata_t cf;
    302 
    303 	cf = device_cfdata(dev);
    304 	error = config_detach(dev, DETACH_QUIET);
    305 	if (error == 0)
    306 		free(cf, M_DEVBUF);
    307 
    308 	return error;
    309 }
    310 
    311 static int
    312 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
    313 {
    314 	device_t self;
    315 	int	error;
    316 	struct	cgd_softc *cs;
    317 
    318 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
    319 	GETCGD_SOFTC(cs, dev, self);
    320 	error = dk_open(&cs->sc_dksc, dev, flags, fmt, l);
    321 	device_release(self);
    322 	return error;
    323 }
    324 
    325 static int
    326 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
    327 {
    328 	int error;
    329 	device_t self;
    330 	struct	cgd_softc *cs;
    331 	struct	dk_softc *dksc;
    332 
    333 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
    334 	GETCGD_SOFTC(cs, dev, self);
    335 	dksc = &cs->sc_dksc;
    336 	if ((error =  dk_close(dksc, dev, flags, fmt, l)) != 0) {
    337 		device_release(self);
    338 		return error;
    339 	}
    340 
    341 	if (!DK_ATTACHED(dksc)) {
    342 		if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
    343 			aprint_error_dev(dksc->sc_dev,
    344 			    "unable to detach instance\n");
    345 			return error;
    346 		}
    347 	} else
    348 		device_release(self);
    349 	return 0;
    350 }
    351 
    352 static void
    353 cgdstrategy(struct buf *bp)
    354 {
    355 	device_t self;
    356 	struct	cgd_softc *cs = getcgd_softc(bp->b_dev, &self);
    357 	struct	dk_softc *dksc = &cs->sc_dksc;
    358 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    359 
    360 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
    361 	    (long)bp->b_bcount));
    362 
    363 	/*
    364 	 * Reject unaligned writes.  We can encrypt and decrypt only
    365 	 * complete disk sectors, and we let the ciphers require their
    366 	 * buffers to be aligned to 32-bit boundaries.
    367 	 */
    368 	if (bp->b_blkno < 0 ||
    369 	    (bp->b_bcount % dg->dg_secsize) != 0 ||
    370 	    ((uintptr_t)bp->b_data & 3) != 0) {
    371 		bp->b_error = EINVAL;
    372 		bp->b_resid = bp->b_bcount;
    373 		biodone(bp);
    374 		device_release(self);
    375 		return;
    376 	}
    377 
    378 	/* XXXrcd: Should we test for (cs != NULL)? */
    379 	dk_strategy(&cs->sc_dksc, bp);
    380 	device_release(self);
    381 	return;
    382 }
    383 
    384 static int
    385 cgdsize(dev_t dev)
    386 {
    387 	int retval;
    388 	device_t self;
    389 	struct cgd_softc *cs = getcgd_softc(dev, &self);
    390 
    391 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
    392 	if (!cs)
    393 		retval = -1;
    394 	else
    395 		retval = dk_size(&cs->sc_dksc, dev);
    396 
    397 	device_release(self);
    398 	return retval;
    399 }
    400 
    401 /*
    402  * cgd_{get,put}data are functions that deal with getting a buffer
    403  * for the new encrypted data.  We have a buffer per device so that
    404  * we can ensure that we can always have a transaction in flight.
    405  * We use this buffer first so that we have one less piece of
    406  * malloc'ed data at any given point.
    407  */
    408 
    409 static void *
    410 cgd_getdata(struct dk_softc *dksc, unsigned long size)
    411 {
    412 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
    413 	void *	data = NULL;
    414 
    415 	mutex_enter(&cs->sc_lock);
    416 	if (cs->sc_data_used == 0) {
    417 		cs->sc_data_used = 1;
    418 		data = cs->sc_data;
    419 	}
    420 	mutex_exit(&cs->sc_lock);
    421 
    422 	if (data)
    423 		return data;
    424 
    425 	return malloc(size, M_DEVBUF, M_NOWAIT);
    426 }
    427 
    428 static void
    429 cgd_putdata(struct dk_softc *dksc, void *data)
    430 {
    431 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
    432 
    433 	if (data == cs->sc_data) {
    434 		mutex_enter(&cs->sc_lock);
    435 		cs->sc_data_used = 0;
    436 		mutex_exit(&cs->sc_lock);
    437 	} else {
    438 		free(data, M_DEVBUF);
    439 	}
    440 }
    441 
    442 static int
    443 cgd_diskstart(device_t dev, struct buf *bp)
    444 {
    445 	struct	cgd_softc *cs = device_private(dev);
    446 	struct	dk_softc *dksc = &cs->sc_dksc;
    447 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    448 	struct	buf *nbp;
    449 	void *	addr;
    450 	void *	newaddr;
    451 	daddr_t	bn;
    452 	struct	vnode *vp;
    453 
    454 	DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
    455 
    456 	bn = bp->b_rawblkno;
    457 
    458 	/*
    459 	 * We attempt to allocate all of our resources up front, so that
    460 	 * we can fail quickly if they are unavailable.
    461 	 */
    462 	nbp = getiobuf(cs->sc_tvn, false);
    463 	if (nbp == NULL)
    464 		return EAGAIN;
    465 
    466 	/*
    467 	 * If we are writing, then we need to encrypt the outgoing
    468 	 * block into a new block of memory.
    469 	 */
    470 	newaddr = addr = bp->b_data;
    471 	if ((bp->b_flags & B_READ) == 0) {
    472 		newaddr = cgd_getdata(dksc, bp->b_bcount);
    473 		if (!newaddr) {
    474 			putiobuf(nbp);
    475 			return EAGAIN;
    476 		}
    477 		cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
    478 		    dg->dg_secsize, CGD_CIPHER_ENCRYPT);
    479 	}
    480 
    481 	nbp->b_data = newaddr;
    482 	nbp->b_flags = bp->b_flags;
    483 	nbp->b_oflags = bp->b_oflags;
    484 	nbp->b_cflags = bp->b_cflags;
    485 	nbp->b_iodone = cgdiodone;
    486 	nbp->b_proc = bp->b_proc;
    487 	nbp->b_blkno = btodb(bn * dg->dg_secsize);
    488 	nbp->b_bcount = bp->b_bcount;
    489 	nbp->b_private = bp;
    490 
    491 	BIO_COPYPRIO(nbp, bp);
    492 
    493 	if ((nbp->b_flags & B_READ) == 0) {
    494 		vp = nbp->b_vp;
    495 		mutex_enter(vp->v_interlock);
    496 		vp->v_numoutput++;
    497 		mutex_exit(vp->v_interlock);
    498 	}
    499 	VOP_STRATEGY(cs->sc_tvn, nbp);
    500 
    501 	return 0;
    502 }
    503 
    504 static void
    505 cgdiodone(struct buf *nbp)
    506 {
    507 	device_t self;
    508 	struct	buf *obp = nbp->b_private;
    509 	struct	cgd_softc *cs = getcgd_softc(obp->b_dev, &self);
    510 	struct	dk_softc *dksc = &cs->sc_dksc;
    511 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    512 	daddr_t	bn;
    513 
    514 	KDASSERT(cs);
    515 
    516 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
    517 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
    518 	    obp, obp->b_bcount, obp->b_resid));
    519 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
    520 	    " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
    521 		nbp->b_bcount));
    522 	if (nbp->b_error != 0) {
    523 		obp->b_error = nbp->b_error;
    524 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
    525 		    obp->b_error));
    526 	}
    527 
    528 	/* Perform the decryption if we are reading.
    529 	 *
    530 	 * Note: use the blocknumber from nbp, since it is what
    531 	 *       we used to encrypt the blocks.
    532 	 */
    533 
    534 	if (nbp->b_flags & B_READ) {
    535 		bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
    536 		cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
    537 		    bn, dg->dg_secsize, CGD_CIPHER_DECRYPT);
    538 	}
    539 
    540 	/* If we allocated memory, free it now... */
    541 	if (nbp->b_data != obp->b_data)
    542 		cgd_putdata(dksc, nbp->b_data);
    543 
    544 	putiobuf(nbp);
    545 
    546 	/* Request is complete for whatever reason */
    547 	obp->b_resid = 0;
    548 	if (obp->b_error != 0)
    549 		obp->b_resid = obp->b_bcount;
    550 
    551 	dk_done(dksc, obp);
    552 	device_release(self);
    553 
    554 	dk_start(dksc, NULL);
    555 }
    556 
    557 static int
    558 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
    559 {
    560 	struct cgd_softc *sc = device_private(dev);
    561 	struct dk_softc *dksc = &sc->sc_dksc;
    562 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    563 	size_t nbytes, blksize;
    564 	void *buf;
    565 	int error;
    566 
    567 	/*
    568 	 * dk_dump gives us units of disklabel sectors.  Everything
    569 	 * else in cgd uses units of diskgeom sectors.  These had
    570 	 * better agree; otherwise we need to figure out how to convert
    571 	 * between them.
    572 	 */
    573 	KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
    574 	    "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
    575 	    dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
    576 	blksize = dg->dg_secsize;
    577 
    578 	/*
    579 	 * Compute the number of bytes in this request, which dk_dump
    580 	 * has `helpfully' converted to a number of blocks for us.
    581 	 */
    582 	nbytes = nblk*blksize;
    583 
    584 	/* Try to acquire a buffer to store the ciphertext.  */
    585 	buf = cgd_getdata(dksc, nbytes);
    586 	if (buf == NULL)
    587 		/* Out of memory: give up.  */
    588 		return ENOMEM;
    589 
    590 	/* Encrypt the caller's data into the temporary buffer.  */
    591 	cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
    592 
    593 	/* Pass it on to the underlying disk device.  */
    594 	error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
    595 
    596 	/* Release the buffer.  */
    597 	cgd_putdata(dksc, buf);
    598 
    599 	/* Return any error from the underlying disk device.  */
    600 	return error;
    601 }
    602 
    603 /* XXX: we should probably put these into dksubr.c, mostly */
    604 static int
    605 cgdread(dev_t dev, struct uio *uio, int flags)
    606 {
    607 	device_t self;
    608 	int	error;
    609 	struct	cgd_softc *cs;
    610 	struct	dk_softc *dksc;
    611 
    612 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
    613 	    (unsigned long long)dev, uio, flags));
    614 	GETCGD_SOFTC(cs, dev, self);
    615 	dksc = &cs->sc_dksc;
    616 	if (!DK_ATTACHED(dksc)) {
    617 		device_release(self);
    618 		return ENXIO;
    619 	}
    620 	error = physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
    621 	device_release(self);
    622 	return error;
    623 }
    624 
    625 /* XXX: we should probably put these into dksubr.c, mostly */
    626 static int
    627 cgdwrite(dev_t dev, struct uio *uio, int flags)
    628 {
    629 	device_t self;
    630 	int	error;
    631 	struct	cgd_softc *cs;
    632 	struct	dk_softc *dksc;
    633 
    634 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
    635 	GETCGD_SOFTC(cs, dev, self);
    636 	dksc = &cs->sc_dksc;
    637 	if (!DK_ATTACHED(dksc)) {
    638 		device_release(self);
    639 		return ENXIO;
    640 	}
    641 	error = physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
    642 	device_release(self);
    643 	return error;
    644 }
    645 
    646 static int
    647 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
    648 {
    649 	device_t self;
    650 	struct	cgd_softc *cs;
    651 	struct	dk_softc *dksc;
    652 	int	part = DISKPART(dev);
    653 	int	pmask = 1 << part;
    654 	int	error = 0;
    655 
    656 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
    657 	    dev, cmd, data, flag, l));
    658 
    659 	switch (cmd) {
    660 	case CGDIOCGET:
    661 		return cgd_ioctl_get(dev, data, l);
    662 	case CGDIOCSET:
    663 	case CGDIOCCLR:
    664 		if ((flag & FWRITE) == 0)
    665 			return EBADF;
    666 		/* FALLTHROUGH */
    667 	default:
    668 		GETCGD_SOFTC(cs, dev, self);
    669 		dksc = &cs->sc_dksc;
    670 		break;
    671 	}
    672 
    673 	switch (cmd) {
    674 	case CGDIOCSET:
    675 		if (DK_ATTACHED(dksc))
    676 			error = EBUSY;
    677 		else
    678 			error = cgd_ioctl_set(cs, data, l);
    679 		break;
    680 	case CGDIOCCLR:
    681 		if (DK_BUSY(&cs->sc_dksc, pmask))
    682 			error = EBUSY;
    683 		else
    684 			error = cgd_ioctl_clr(cs, l);
    685 		break;
    686 	case DIOCCACHESYNC:
    687 		/*
    688 		 * XXX Do we really need to care about having a writable
    689 		 * file descriptor here?
    690 		 */
    691 		if ((flag & FWRITE) == 0)
    692 			error = (EBADF);
    693 
    694 		/*
    695 		 * We pass this call down to the underlying disk.
    696 		 */
    697 		else
    698 			error = VOP_IOCTL(cs->sc_tvn, cmd, data, flag,
    699 			    l->l_cred);
    700 		break;
    701 	case DIOCGSTRATEGY:
    702 	case DIOCSSTRATEGY:
    703 		if (!DK_ATTACHED(dksc)) {
    704 			error = ENOENT;
    705 			break;
    706 		}
    707 		/*FALLTHROUGH*/
    708 	default:
    709 		error = dk_ioctl(dksc, dev, cmd, data, flag, l);
    710 		break;
    711 	case CGDIOCGET:
    712 		KASSERT(0);
    713 		error = EINVAL;
    714 		break;
    715 	}
    716 	device_release(self);
    717 	return error;
    718 }
    719 
    720 static int
    721 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
    722 {
    723 	device_t self;
    724 	int	error;
    725 	struct	cgd_softc *cs;
    726 
    727 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
    728 	    dev, blkno, va, (unsigned long)size));
    729 	GETCGD_SOFTC(cs, dev, self);
    730 	error = dk_dump(&cs->sc_dksc, dev, blkno, va, size);
    731 	device_release(self);
    732 	return error;
    733 }
    734 
    735 /*
    736  * XXXrcd:
    737  *  for now we hardcode the maximum key length.
    738  */
    739 #define MAX_KEYSIZE	1024
    740 
    741 static const struct {
    742 	const char *n;
    743 	int v;
    744 	int d;
    745 } encblkno[] = {
    746 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
    747 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
    748 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
    749 };
    750 
    751 /* ARGSUSED */
    752 static int
    753 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
    754 {
    755 	struct	 cgd_ioctl *ci = data;
    756 	struct	 vnode *vp;
    757 	int	 ret;
    758 	size_t	 i;
    759 	size_t	 keybytes;			/* key length in bytes */
    760 	const char *cp;
    761 	struct pathbuf *pb;
    762 	char	 *inbuf;
    763 	struct dk_softc *dksc = &cs->sc_dksc;
    764 
    765 	cp = ci->ci_disk;
    766 
    767 	ret = pathbuf_copyin(ci->ci_disk, &pb);
    768 	if (ret != 0) {
    769 		return ret;
    770 	}
    771 	ret = dk_lookup(pb, l, &vp);
    772 	pathbuf_destroy(pb);
    773 	if (ret != 0) {
    774 		return ret;
    775 	}
    776 
    777 	inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
    778 
    779 	if ((ret = cgdinit(cs, cp, vp, l)) != 0)
    780 		goto bail;
    781 
    782 	(void)memset(inbuf, 0, MAX_KEYSIZE);
    783 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
    784 	if (ret)
    785 		goto bail;
    786 	cs->sc_cfuncs = cryptfuncs_find(inbuf);
    787 	if (!cs->sc_cfuncs) {
    788 		ret = EINVAL;
    789 		goto bail;
    790 	}
    791 
    792 	(void)memset(inbuf, 0, MAX_KEYSIZE);
    793 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
    794 	if (ret)
    795 		goto bail;
    796 
    797 	for (i = 0; i < __arraycount(encblkno); i++)
    798 		if (strcmp(encblkno[i].n, inbuf) == 0)
    799 			break;
    800 
    801 	if (i == __arraycount(encblkno)) {
    802 		ret = EINVAL;
    803 		goto bail;
    804 	}
    805 
    806 	keybytes = ci->ci_keylen / 8 + 1;
    807 	if (keybytes > MAX_KEYSIZE) {
    808 		ret = EINVAL;
    809 		goto bail;
    810 	}
    811 
    812 	(void)memset(inbuf, 0, MAX_KEYSIZE);
    813 	ret = copyin(ci->ci_key, inbuf, keybytes);
    814 	if (ret)
    815 		goto bail;
    816 
    817 	cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
    818 	cs->sc_cdata.cf_mode = encblkno[i].v;
    819 	cs->sc_cdata.cf_keylen = ci->ci_keylen;
    820 	cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
    821 	    &cs->sc_cdata.cf_blocksize);
    822 	if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
    823 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
    824 		cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
    825 	    cs->sc_cdata.cf_priv = NULL;
    826 	}
    827 
    828 	/*
    829 	 * The blocksize is supposed to be in bytes. Unfortunately originally
    830 	 * it was expressed in bits. For compatibility we maintain encblkno
    831 	 * and encblkno8.
    832 	 */
    833 	cs->sc_cdata.cf_blocksize /= encblkno[i].d;
    834 	(void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
    835 	if (!cs->sc_cdata.cf_priv) {
    836 		ret = EINVAL;		/* XXX is this the right error? */
    837 		goto bail;
    838 	}
    839 	free(inbuf, M_TEMP);
    840 
    841 	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
    842 
    843 	cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
    844 	cs->sc_data_used = 0;
    845 
    846 	/* Attach the disk. */
    847 	dk_attach(dksc);
    848 	disk_attach(&dksc->sc_dkdev);
    849 
    850 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
    851 
    852 	/* Discover wedges on this disk. */
    853 	dkwedge_discover(&dksc->sc_dkdev);
    854 
    855 	return 0;
    856 
    857 bail:
    858 	free(inbuf, M_TEMP);
    859 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
    860 	return ret;
    861 }
    862 
    863 /* ARGSUSED */
    864 static int
    865 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
    866 {
    867 	struct	dk_softc *dksc = &cs->sc_dksc;
    868 
    869 	if (!DK_ATTACHED(dksc))
    870 		return ENXIO;
    871 
    872 	/* Delete all of our wedges. */
    873 	dkwedge_delall(&dksc->sc_dkdev);
    874 
    875 	/* Kill off any queued buffers. */
    876 	dk_drain(dksc);
    877 	bufq_free(dksc->sc_bufq);
    878 
    879 	(void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
    880 	cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
    881 	free(cs->sc_tpath, M_DEVBUF);
    882 	free(cs->sc_data, M_DEVBUF);
    883 	cs->sc_data_used = 0;
    884 	dk_detach(dksc);
    885 	disk_detach(&dksc->sc_dkdev);
    886 
    887 	return 0;
    888 }
    889 
    890 static int
    891 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
    892 {
    893 	device_t self;
    894 	struct cgd_softc *cs = getcgd_softc(dev, &self);
    895 	struct cgd_user *cgu;
    896 	int unit;
    897 	struct	dk_softc *dksc = &cs->sc_dksc;
    898 
    899 	unit = CGDUNIT(dev);
    900 	cgu = (struct cgd_user *)data;
    901 
    902 	DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
    903 			   dev, unit, data, l));
    904 
    905 	if (cgu->cgu_unit == -1)
    906 		cgu->cgu_unit = unit;
    907 
    908 	if (cgu->cgu_unit < 0) {
    909 		device_release(self);
    910 		return EINVAL;	/* XXX: should this be ENXIO? */
    911 	}
    912 
    913 	/*
    914 	 * XXX This appears to be redundant, given the initialization
    915 	 * XXX when it was declared.  Leave it for now, but don't
    916 	 * XXX take an extra reference to the device!
    917 	 */
    918 	cs = device_lookup_private(&cgd_cd, unit);
    919 	if (cs == NULL || !DK_ATTACHED(dksc)) {
    920 		cgu->cgu_dev = 0;
    921 		cgu->cgu_alg[0] = '\0';
    922 		cgu->cgu_blocksize = 0;
    923 		cgu->cgu_mode = 0;
    924 		cgu->cgu_keylen = 0;
    925 	}
    926 	else {
    927 		cgu->cgu_dev = cs->sc_tdev;
    928 		strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
    929 		    sizeof(cgu->cgu_alg));
    930 		cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
    931 		cgu->cgu_mode = cs->sc_cdata.cf_mode;
    932 		cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
    933 	}
    934 	device_release(self);
    935 	return 0;
    936 }
    937 
    938 static int
    939 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
    940 	struct lwp *l)
    941 {
    942 	struct	disk_geom *dg;
    943 	int	ret;
    944 	char	*tmppath;
    945 	uint64_t psize;
    946 	unsigned secsize;
    947 	struct dk_softc *dksc = &cs->sc_dksc;
    948 
    949 	cs->sc_tvn = vp;
    950 	cs->sc_tpath = NULL;
    951 
    952 	tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
    953 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
    954 	if (ret)
    955 		goto bail;
    956 	cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
    957 	memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
    958 
    959 	cs->sc_tdev = vp->v_rdev;
    960 
    961 	if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
    962 		goto bail;
    963 
    964 	if (psize == 0) {
    965 		ret = ENODEV;
    966 		goto bail;
    967 	}
    968 
    969 	/*
    970 	 * XXX here we should probe the underlying device.  If we
    971 	 *     are accessing a partition of type RAW_PART, then
    972 	 *     we should populate our initial geometry with the
    973 	 *     geometry that we discover from the device.
    974 	 */
    975 	dg = &dksc->sc_dkdev.dk_geom;
    976 	memset(dg, 0, sizeof(*dg));
    977 	dg->dg_secperunit = psize;
    978 	dg->dg_secsize = secsize;
    979 	dg->dg_ntracks = 1;
    980 	dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
    981 	dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
    982 
    983 bail:
    984 	free(tmppath, M_TEMP);
    985 	if (ret && cs->sc_tpath)
    986 		free(cs->sc_tpath, M_DEVBUF);
    987 	return ret;
    988 }
    989 
    990 /*
    991  * Our generic cipher entry point.  This takes care of the
    992  * IV mode and passes off the work to the specific cipher.
    993  * We implement here the IV method ``encrypted block
    994  * number''.
    995  *
    996  * For the encryption case, we accomplish this by setting
    997  * up a struct uio where the first iovec of the source is
    998  * the blocknumber and the first iovec of the dest is a
    999  * sink.  We then call the cipher with an IV of zero, and
   1000  * the right thing happens.
   1001  *
   1002  * For the decryption case, we use the same basic mechanism
   1003  * for symmetry, but we encrypt the block number in the
   1004  * first iovec.
   1005  *
   1006  * We mainly do this to avoid requiring the definition of
   1007  * an ECB mode.
   1008  *
   1009  * XXXrcd: for now we rely on our own crypto framework defined
   1010  *         in dev/cgd_crypto.c.  This will change when we
   1011  *         get a generic kernel crypto framework.
   1012  */
   1013 
   1014 static void
   1015 blkno2blkno_buf(char *sbuf, daddr_t blkno)
   1016 {
   1017 	int	i;
   1018 
   1019 	/* Set up the blkno in blkno_buf, here we do not care much
   1020 	 * about the final layout of the information as long as we
   1021 	 * can guarantee that each sector will have a different IV
   1022 	 * and that the endianness of the machine will not affect
   1023 	 * the representation that we have chosen.
   1024 	 *
   1025 	 * We choose this representation, because it does not rely
   1026 	 * on the size of buf (which is the blocksize of the cipher),
   1027 	 * but allows daddr_t to grow without breaking existing
   1028 	 * disks.
   1029 	 *
   1030 	 * Note that blkno2blkno_buf does not take a size as input,
   1031 	 * and hence must be called on a pre-zeroed buffer of length
   1032 	 * greater than or equal to sizeof(daddr_t).
   1033 	 */
   1034 	for (i=0; i < sizeof(daddr_t); i++) {
   1035 		*sbuf++ = blkno & 0xff;
   1036 		blkno >>= 8;
   1037 	}
   1038 }
   1039 
   1040 static void
   1041 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
   1042     size_t len, daddr_t blkno, size_t secsize, int dir)
   1043 {
   1044 	char		*dst = dstv;
   1045 	char 		*src = srcv;
   1046 	cfunc_cipher	*cipher = cs->sc_cfuncs->cf_cipher;
   1047 	struct uio	dstuio;
   1048 	struct uio	srcuio;
   1049 	struct iovec	dstiov[2];
   1050 	struct iovec	srciov[2];
   1051 	size_t		blocksize = cs->sc_cdata.cf_blocksize;
   1052 	size_t		todo;
   1053 	char		sink[CGD_MAXBLOCKSIZE];
   1054 	char		zero_iv[CGD_MAXBLOCKSIZE];
   1055 	char		blkno_buf[CGD_MAXBLOCKSIZE];
   1056 
   1057 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
   1058 
   1059 	DIAGCONDPANIC(len % blocksize != 0,
   1060 	    ("cgd_cipher: len %% blocksize != 0"));
   1061 
   1062 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
   1063 	DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
   1064 	    ("cgd_cipher: sizeof(daddr_t) > blocksize"));
   1065 
   1066 	memset(zero_iv, 0x0, blocksize);
   1067 
   1068 	dstuio.uio_iov = dstiov;
   1069 	dstuio.uio_iovcnt = 2;
   1070 
   1071 	srcuio.uio_iov = srciov;
   1072 	srcuio.uio_iovcnt = 2;
   1073 
   1074 	dstiov[0].iov_base = sink;
   1075 	dstiov[0].iov_len  = blocksize;
   1076 	srciov[0].iov_base = blkno_buf;
   1077 	srciov[0].iov_len  = blocksize;
   1078 
   1079 	for (; len > 0; len -= todo) {
   1080 		todo = MIN(len, secsize);
   1081 
   1082 		dstiov[1].iov_base = dst;
   1083 		srciov[1].iov_base = src;
   1084 		dstiov[1].iov_len  = todo;
   1085 		srciov[1].iov_len  = todo;
   1086 
   1087 		memset(blkno_buf, 0x0, blocksize);
   1088 		blkno2blkno_buf(blkno_buf, blkno);
   1089 		if (dir == CGD_CIPHER_DECRYPT) {
   1090 			dstuio.uio_iovcnt = 1;
   1091 			srcuio.uio_iovcnt = 1;
   1092 			IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
   1093 			    blkno_buf, blocksize));
   1094 			cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
   1095 			    zero_iv, CGD_CIPHER_ENCRYPT);
   1096 			memcpy(blkno_buf, sink, blocksize);
   1097 			dstuio.uio_iovcnt = 2;
   1098 			srcuio.uio_iovcnt = 2;
   1099 		}
   1100 
   1101 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
   1102 		    blkno_buf, blocksize));
   1103 		cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
   1104 		IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
   1105 		    sink, blocksize));
   1106 
   1107 		dst += todo;
   1108 		src += todo;
   1109 		blkno++;
   1110 	}
   1111 }
   1112 
   1113 #ifdef DEBUG
   1114 static void
   1115 hexprint(const char *start, void *buf, int len)
   1116 {
   1117 	char	*c = buf;
   1118 
   1119 	DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
   1120 	printf("%s: len=%06d 0x", start, len);
   1121 	while (len--)
   1122 		printf("%02x", (unsigned char) *c++);
   1123 }
   1124 #endif
   1125 
   1126 MODULE(MODULE_CLASS_DRIVER, cgd, "dk_subr");
   1127 
   1128 #ifdef _MODULE
   1129 CFDRIVER_DECL(cgd, DV_DISK, NULL);
   1130 
   1131 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
   1132 #endif
   1133 
   1134 static int
   1135 cgd_modcmd(modcmd_t cmd, void *arg)
   1136 {
   1137 	int error = 0;
   1138 
   1139 	switch (cmd) {
   1140 	case MODULE_CMD_INIT:
   1141 #ifdef _MODULE
   1142 		error = config_cfdriver_attach(&cgd_cd);
   1143 		if (error)
   1144 			break;
   1145 
   1146 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
   1147 	        if (error) {
   1148 			config_cfdriver_detach(&cgd_cd);
   1149 			aprint_error("%s: unable to register cfattach for "
   1150 			    "%s, error %d\n", __func__, cgd_cd.cd_name, error);
   1151 			break;
   1152 		}
   1153 		/*
   1154 		 * Attach the {b,c}devsw's
   1155 		 */
   1156 		error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1157 		    &cgd_cdevsw, &cgd_cmajor);
   1158 
   1159 		/*
   1160 		 * Attach the {b,c}devsw's
   1161 		 */
   1162 		error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1163 		    &cgd_cdevsw, &cgd_cmajor);
   1164 
   1165 		/*
   1166 		 * If devsw_attach fails, remove from autoconf database
   1167 		 */
   1168 		if (error) {
   1169 			config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
   1170 			config_cfdriver_detach(&cgd_cd);
   1171 			aprint_error("%s: unable to attach %s devsw, "
   1172 			    "error %d", __func__, cgd_cd.cd_name, error);
   1173 			break;
   1174 		}
   1175 #endif
   1176 		break;
   1177 
   1178 	case MODULE_CMD_FINI:
   1179 #ifdef _MODULE
   1180 		/*
   1181 		 * Remove {b,c}devsw's
   1182 		 */
   1183 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
   1184 
   1185 		/*
   1186 		 * Now remove device from autoconf database
   1187 		 */
   1188 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
   1189 		if (error) {
   1190 			error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1191 			    &cgd_cdevsw, &cgd_cmajor);
   1192 			aprint_error("%s: failed to detach %s cfattach, "
   1193 			    "error %d\n", __func__, cgd_cd.cd_name, error);
   1194 			break;
   1195 		}
   1196 		error = config_cfdriver_detach(&cgd_cd);
   1197 		if (error) {
   1198 			config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
   1199 			devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1200 			    &cgd_cdevsw, &cgd_cmajor);
   1201 			aprint_error("%s: failed to detach %s cfdriver, "
   1202 			    "error %d\n", __func__, cgd_cd.cd_name, error);
   1203 			break;
   1204 		}
   1205 #endif
   1206 		break;
   1207 
   1208 	case MODULE_CMD_STAT:
   1209 		error = ENOTTY;
   1210 		break;
   1211 	default:
   1212 		error = ENOTTY;
   1213 		break;
   1214 	}
   1215 
   1216 	return error;
   1217 }
   1218