Home | History | Annotate | Line # | Download | only in dev
cgd.c revision 1.118.2.1
      1 /* $NetBSD: cgd.c,v 1.118.2.1 2020/01/17 21:47:30 ad Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Roland C. Dowdeswell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.118.2.1 2020/01/17 21:47:30 ad Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/proc.h>
     39 #include <sys/errno.h>
     40 #include <sys/buf.h>
     41 #include <sys/bufq.h>
     42 #include <sys/malloc.h>
     43 #include <sys/module.h>
     44 #include <sys/pool.h>
     45 #include <sys/ioctl.h>
     46 #include <sys/device.h>
     47 #include <sys/disk.h>
     48 #include <sys/disklabel.h>
     49 #include <sys/fcntl.h>
     50 #include <sys/namei.h> /* for pathbuf */
     51 #include <sys/vnode.h>
     52 #include <sys/conf.h>
     53 #include <sys/syslog.h>
     54 
     55 #include <dev/dkvar.h>
     56 #include <dev/cgdvar.h>
     57 
     58 #include <miscfs/specfs/specdev.h> /* for v_rdev */
     59 
     60 #include "ioconf.h"
     61 
     62 struct selftest_params {
     63 	const char *alg;
     64 	int blocksize;	/* number of bytes */
     65 	int secsize;
     66 	daddr_t blkno;
     67 	int keylen;	/* number of bits */
     68 	int txtlen;	/* number of bytes */
     69 	const uint8_t *key;
     70 	const uint8_t *ptxt;
     71 	const uint8_t *ctxt;
     72 };
     73 
     74 /* Entry Point Functions */
     75 
     76 static dev_type_open(cgdopen);
     77 static dev_type_close(cgdclose);
     78 static dev_type_read(cgdread);
     79 static dev_type_write(cgdwrite);
     80 static dev_type_ioctl(cgdioctl);
     81 static dev_type_strategy(cgdstrategy);
     82 static dev_type_dump(cgddump);
     83 static dev_type_size(cgdsize);
     84 
     85 const struct bdevsw cgd_bdevsw = {
     86 	.d_open = cgdopen,
     87 	.d_close = cgdclose,
     88 	.d_strategy = cgdstrategy,
     89 	.d_ioctl = cgdioctl,
     90 	.d_dump = cgddump,
     91 	.d_psize = cgdsize,
     92 	.d_discard = nodiscard,
     93 	.d_flag = D_DISK
     94 };
     95 
     96 const struct cdevsw cgd_cdevsw = {
     97 	.d_open = cgdopen,
     98 	.d_close = cgdclose,
     99 	.d_read = cgdread,
    100 	.d_write = cgdwrite,
    101 	.d_ioctl = cgdioctl,
    102 	.d_stop = nostop,
    103 	.d_tty = notty,
    104 	.d_poll = nopoll,
    105 	.d_mmap = nommap,
    106 	.d_kqfilter = nokqfilter,
    107 	.d_discard = nodiscard,
    108 	.d_flag = D_DISK
    109 };
    110 
    111 /*
    112  * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
    113  */
    114 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
    115 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
    116 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
    117 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
    118 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
    119 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
    120 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
    121 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
    122 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
    123 };
    124 
    125 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
    126 	0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
    127 	0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
    128 	0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
    129 	0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
    130 	0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
    131 	0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
    132 	0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
    133 	0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
    134 };
    135 
    136 static const uint8_t selftest_aes_xts_256_key[33] = {
    137 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
    138 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
    139 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
    140 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
    141 	0
    142 };
    143 
    144 /*
    145  * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
    146  */
    147 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
    148 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
    149 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
    150 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
    151 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
    152 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
    153 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
    154 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
    155 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
    156 };
    157 
    158 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
    159 	0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
    160 	0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
    161 	0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
    162 	0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
    163 	0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
    164 	0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
    165 	0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
    166 	0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
    167 };
    168 
    169 static const uint8_t selftest_aes_xts_512_key[65] = {
    170 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
    171 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
    172 	0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
    173 	0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
    174 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
    175 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
    176 	0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
    177 	0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
    178 	0
    179 };
    180 
    181 const struct selftest_params selftests[] = {
    182 	{
    183 		.alg = "aes-xts",
    184 		.blocksize = 16,
    185 		.secsize = 512,
    186 		.blkno = 1,
    187 		.keylen = 256,
    188 		.txtlen = sizeof(selftest_aes_xts_256_ptxt),
    189 		.key  = selftest_aes_xts_256_key,
    190 		.ptxt = selftest_aes_xts_256_ptxt,
    191 		.ctxt = selftest_aes_xts_256_ctxt
    192 	},
    193 	{
    194 		.alg = "aes-xts",
    195 		.blocksize = 16,
    196 		.secsize = 512,
    197 		.blkno = 0xffff,
    198 		.keylen = 512,
    199 		.txtlen = sizeof(selftest_aes_xts_512_ptxt),
    200 		.key  = selftest_aes_xts_512_key,
    201 		.ptxt = selftest_aes_xts_512_ptxt,
    202 		.ctxt = selftest_aes_xts_512_ctxt
    203 	}
    204 };
    205 
    206 static int cgd_match(device_t, cfdata_t, void *);
    207 static void cgd_attach(device_t, device_t, void *);
    208 static int cgd_detach(device_t, int);
    209 static struct cgd_softc	*cgd_spawn(int);
    210 static int cgd_destroy(device_t);
    211 
    212 /* Internal Functions */
    213 
    214 static int	cgd_diskstart(device_t, struct buf *);
    215 static void	cgdiodone(struct buf *);
    216 static int	cgd_dumpblocks(device_t, void *, daddr_t, int);
    217 
    218 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
    219 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
    220 static int	cgd_ioctl_get(dev_t, void *, struct lwp *);
    221 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
    222 			struct lwp *);
    223 static void	cgd_cipher(struct cgd_softc *, void *, void *,
    224 			   size_t, daddr_t, size_t, int);
    225 
    226 static struct dkdriver cgddkdriver = {
    227         .d_minphys  = minphys,
    228         .d_open = cgdopen,
    229         .d_close = cgdclose,
    230         .d_strategy = cgdstrategy,
    231         .d_iosize = NULL,
    232         .d_diskstart = cgd_diskstart,
    233         .d_dumpblocks = cgd_dumpblocks,
    234         .d_lastclose = NULL
    235 };
    236 
    237 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
    238     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    239 
    240 /* DIAGNOSTIC and DEBUG definitions */
    241 
    242 #if defined(CGDDEBUG) && !defined(DEBUG)
    243 #define DEBUG
    244 #endif
    245 
    246 #ifdef DEBUG
    247 int cgddebug = 0;
    248 
    249 #define CGDB_FOLLOW	0x1
    250 #define CGDB_IO	0x2
    251 #define CGDB_CRYPTO	0x4
    252 
    253 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
    254 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
    255 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
    256 
    257 static void	hexprint(const char *, void *, int);
    258 
    259 #else
    260 #define IFDEBUG(x,y)
    261 #define DPRINTF(x,y)
    262 #define DPRINTF_FOLLOW(y)
    263 #endif
    264 
    265 /* Global variables */
    266 
    267 /* Utility Functions */
    268 
    269 #define CGDUNIT(x)		DISKUNIT(x)
    270 #define GETCGD_SOFTC(_cs, x)	if (!((_cs) = getcgd_softc(x))) return ENXIO
    271 
    272 /* The code */
    273 
    274 static struct cgd_softc *
    275 getcgd_softc(dev_t dev)
    276 {
    277 	int	unit = CGDUNIT(dev);
    278 	struct cgd_softc *sc;
    279 
    280 	DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
    281 
    282 	sc = device_lookup_private(&cgd_cd, unit);
    283 	if (sc == NULL)
    284 		sc = cgd_spawn(unit);
    285 	return sc;
    286 }
    287 
    288 static int
    289 cgd_match(device_t self, cfdata_t cfdata, void *aux)
    290 {
    291 
    292 	return 1;
    293 }
    294 
    295 static void
    296 cgd_attach(device_t parent, device_t self, void *aux)
    297 {
    298 	struct cgd_softc *sc = device_private(self);
    299 
    300 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
    301 	dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
    302 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
    303 
    304 	if (!pmf_device_register(self, NULL, NULL))
    305 		aprint_error_dev(self,
    306 		    "unable to register power management hooks\n");
    307 }
    308 
    309 
    310 static int
    311 cgd_detach(device_t self, int flags)
    312 {
    313 	int ret;
    314 	const int pmask = 1 << RAW_PART;
    315 	struct cgd_softc *sc = device_private(self);
    316 	struct dk_softc *dksc = &sc->sc_dksc;
    317 
    318 	if (DK_BUSY(dksc, pmask))
    319 		return EBUSY;
    320 
    321 	if (DK_ATTACHED(dksc) &&
    322 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
    323 		return ret;
    324 
    325 	disk_destroy(&dksc->sc_dkdev);
    326 	mutex_destroy(&sc->sc_lock);
    327 
    328 	return 0;
    329 }
    330 
    331 void
    332 cgdattach(int num)
    333 {
    334 	int error;
    335 
    336 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
    337 	if (error != 0)
    338 		aprint_error("%s: unable to register cfattach\n",
    339 		    cgd_cd.cd_name);
    340 }
    341 
    342 static struct cgd_softc *
    343 cgd_spawn(int unit)
    344 {
    345 	cfdata_t cf;
    346 
    347 	cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
    348 	cf->cf_name = cgd_cd.cd_name;
    349 	cf->cf_atname = cgd_cd.cd_name;
    350 	cf->cf_unit = unit;
    351 	cf->cf_fstate = FSTATE_STAR;
    352 
    353 	return device_private(config_attach_pseudo(cf));
    354 }
    355 
    356 static int
    357 cgd_destroy(device_t dev)
    358 {
    359 	int error;
    360 	cfdata_t cf;
    361 
    362 	cf = device_cfdata(dev);
    363 	error = config_detach(dev, DETACH_QUIET);
    364 	if (error)
    365 		return error;
    366 	free(cf, M_DEVBUF);
    367 	return 0;
    368 }
    369 
    370 static int
    371 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
    372 {
    373 	struct	cgd_softc *cs;
    374 
    375 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
    376 	GETCGD_SOFTC(cs, dev);
    377 	return dk_open(&cs->sc_dksc, dev, flags, fmt, l);
    378 }
    379 
    380 static int
    381 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
    382 {
    383 	int error;
    384 	struct	cgd_softc *cs;
    385 	struct	dk_softc *dksc;
    386 
    387 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
    388 	GETCGD_SOFTC(cs, dev);
    389 	dksc = &cs->sc_dksc;
    390 	if ((error =  dk_close(dksc, dev, flags, fmt, l)) != 0)
    391 		return error;
    392 
    393 	if (!DK_ATTACHED(dksc)) {
    394 		if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
    395 			aprint_error_dev(dksc->sc_dev,
    396 			    "unable to detach instance\n");
    397 			return error;
    398 		}
    399 	}
    400 	return 0;
    401 }
    402 
    403 static void
    404 cgdstrategy(struct buf *bp)
    405 {
    406 	struct	cgd_softc *cs;
    407 
    408 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
    409 	    (long)bp->b_bcount));
    410 
    411 	cs = getcgd_softc(bp->b_dev);
    412 	if (!cs) {
    413 		bp->b_error = ENXIO;
    414 		goto bail;
    415 	}
    416 
    417 	/*
    418 	 * Reject unaligned writes.
    419 	 */
    420 	if (((uintptr_t)bp->b_data & 3) != 0) {
    421 		bp->b_error = EINVAL;
    422 		goto bail;
    423 	}
    424 
    425 	dk_strategy(&cs->sc_dksc, bp);
    426 	return;
    427 
    428 bail:
    429 	bp->b_resid = bp->b_bcount;
    430 	biodone(bp);
    431 	return;
    432 }
    433 
    434 static int
    435 cgdsize(dev_t dev)
    436 {
    437 	struct cgd_softc *cs = getcgd_softc(dev);
    438 
    439 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
    440 	if (!cs)
    441 		return -1;
    442 	return dk_size(&cs->sc_dksc, dev);
    443 }
    444 
    445 /*
    446  * cgd_{get,put}data are functions that deal with getting a buffer
    447  * for the new encrypted data.  We have a buffer per device so that
    448  * we can ensure that we can always have a transaction in flight.
    449  * We use this buffer first so that we have one less piece of
    450  * malloc'ed data at any given point.
    451  */
    452 
    453 static void *
    454 cgd_getdata(struct dk_softc *dksc, unsigned long size)
    455 {
    456 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
    457 	void *	data = NULL;
    458 
    459 	mutex_enter(&cs->sc_lock);
    460 	if (cs->sc_data_used == 0) {
    461 		cs->sc_data_used = 1;
    462 		data = cs->sc_data;
    463 	}
    464 	mutex_exit(&cs->sc_lock);
    465 
    466 	if (data)
    467 		return data;
    468 
    469 	return malloc(size, M_DEVBUF, M_NOWAIT);
    470 }
    471 
    472 static void
    473 cgd_putdata(struct dk_softc *dksc, void *data)
    474 {
    475 	struct	cgd_softc *cs = (struct cgd_softc *)dksc;
    476 
    477 	if (data == cs->sc_data) {
    478 		mutex_enter(&cs->sc_lock);
    479 		cs->sc_data_used = 0;
    480 		mutex_exit(&cs->sc_lock);
    481 	} else {
    482 		free(data, M_DEVBUF);
    483 	}
    484 }
    485 
    486 static int
    487 cgd_diskstart(device_t dev, struct buf *bp)
    488 {
    489 	struct	cgd_softc *cs = device_private(dev);
    490 	struct	dk_softc *dksc = &cs->sc_dksc;
    491 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    492 	struct	buf *nbp;
    493 	void *	addr;
    494 	void *	newaddr;
    495 	daddr_t	bn;
    496 	struct	vnode *vp;
    497 
    498 	DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
    499 
    500 	bn = bp->b_rawblkno;
    501 
    502 	/*
    503 	 * We attempt to allocate all of our resources up front, so that
    504 	 * we can fail quickly if they are unavailable.
    505 	 */
    506 	nbp = getiobuf(cs->sc_tvn, false);
    507 	if (nbp == NULL)
    508 		return EAGAIN;
    509 
    510 	/*
    511 	 * If we are writing, then we need to encrypt the outgoing
    512 	 * block into a new block of memory.
    513 	 */
    514 	newaddr = addr = bp->b_data;
    515 	if ((bp->b_flags & B_READ) == 0) {
    516 		newaddr = cgd_getdata(dksc, bp->b_bcount);
    517 		if (!newaddr) {
    518 			putiobuf(nbp);
    519 			return EAGAIN;
    520 		}
    521 		cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
    522 		    dg->dg_secsize, CGD_CIPHER_ENCRYPT);
    523 	}
    524 
    525 	nbp->b_data = newaddr;
    526 	nbp->b_flags = bp->b_flags;
    527 	nbp->b_oflags = bp->b_oflags;
    528 	nbp->b_cflags = bp->b_cflags;
    529 	nbp->b_iodone = cgdiodone;
    530 	nbp->b_proc = bp->b_proc;
    531 	nbp->b_blkno = btodb(bn * dg->dg_secsize);
    532 	nbp->b_bcount = bp->b_bcount;
    533 	nbp->b_private = bp;
    534 
    535 	BIO_COPYPRIO(nbp, bp);
    536 
    537 	if ((nbp->b_flags & B_READ) == 0) {
    538 		vp = nbp->b_vp;
    539 		mutex_enter(vp->v_interlock);
    540 		vp->v_numoutput++;
    541 		mutex_exit(vp->v_interlock);
    542 	}
    543 	VOP_STRATEGY(cs->sc_tvn, nbp);
    544 
    545 	return 0;
    546 }
    547 
    548 static void
    549 cgdiodone(struct buf *nbp)
    550 {
    551 	struct	buf *obp = nbp->b_private;
    552 	struct	cgd_softc *cs = getcgd_softc(obp->b_dev);
    553 	struct	dk_softc *dksc = &cs->sc_dksc;
    554 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    555 	daddr_t	bn;
    556 
    557 	KDASSERT(cs);
    558 
    559 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
    560 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
    561 	    obp, obp->b_bcount, obp->b_resid));
    562 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
    563 	    " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
    564 		nbp->b_bcount));
    565 	if (nbp->b_error != 0) {
    566 		obp->b_error = nbp->b_error;
    567 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
    568 		    obp->b_error));
    569 	}
    570 
    571 	/* Perform the decryption if we are reading.
    572 	 *
    573 	 * Note: use the blocknumber from nbp, since it is what
    574 	 *       we used to encrypt the blocks.
    575 	 */
    576 
    577 	if (nbp->b_flags & B_READ) {
    578 		bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
    579 		cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
    580 		    bn, dg->dg_secsize, CGD_CIPHER_DECRYPT);
    581 	}
    582 
    583 	/* If we allocated memory, free it now... */
    584 	if (nbp->b_data != obp->b_data)
    585 		cgd_putdata(dksc, nbp->b_data);
    586 
    587 	putiobuf(nbp);
    588 
    589 	/* Request is complete for whatever reason */
    590 	obp->b_resid = 0;
    591 	if (obp->b_error != 0)
    592 		obp->b_resid = obp->b_bcount;
    593 
    594 	KERNEL_LOCK(1, NULL);		/* XXXSMP */
    595 	dk_done(dksc, obp);
    596 	dk_start(dksc, NULL);
    597 	KERNEL_UNLOCK_ONE(NULL);	/* XXXSMP */
    598 }
    599 
    600 static int
    601 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
    602 {
    603 	struct cgd_softc *sc = device_private(dev);
    604 	struct dk_softc *dksc = &sc->sc_dksc;
    605 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    606 	size_t nbytes, blksize;
    607 	void *buf;
    608 	int error;
    609 
    610 	/*
    611 	 * dk_dump gives us units of disklabel sectors.  Everything
    612 	 * else in cgd uses units of diskgeom sectors.  These had
    613 	 * better agree; otherwise we need to figure out how to convert
    614 	 * between them.
    615 	 */
    616 	KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
    617 	    "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
    618 	    dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
    619 	blksize = dg->dg_secsize;
    620 
    621 	/*
    622 	 * Compute the number of bytes in this request, which dk_dump
    623 	 * has `helpfully' converted to a number of blocks for us.
    624 	 */
    625 	nbytes = nblk*blksize;
    626 
    627 	/* Try to acquire a buffer to store the ciphertext.  */
    628 	buf = cgd_getdata(dksc, nbytes);
    629 	if (buf == NULL)
    630 		/* Out of memory: give up.  */
    631 		return ENOMEM;
    632 
    633 	/* Encrypt the caller's data into the temporary buffer.  */
    634 	cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
    635 
    636 	/* Pass it on to the underlying disk device.  */
    637 	error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
    638 
    639 	/* Release the buffer.  */
    640 	cgd_putdata(dksc, buf);
    641 
    642 	/* Return any error from the underlying disk device.  */
    643 	return error;
    644 }
    645 
    646 /* XXX: we should probably put these into dksubr.c, mostly */
    647 static int
    648 cgdread(dev_t dev, struct uio *uio, int flags)
    649 {
    650 	struct	cgd_softc *cs;
    651 	struct	dk_softc *dksc;
    652 
    653 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
    654 	    (unsigned long long)dev, uio, flags));
    655 	GETCGD_SOFTC(cs, dev);
    656 	dksc = &cs->sc_dksc;
    657 	if (!DK_ATTACHED(dksc))
    658 		return ENXIO;
    659 	return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
    660 }
    661 
    662 /* XXX: we should probably put these into dksubr.c, mostly */
    663 static int
    664 cgdwrite(dev_t dev, struct uio *uio, int flags)
    665 {
    666 	struct	cgd_softc *cs;
    667 	struct	dk_softc *dksc;
    668 
    669 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
    670 	GETCGD_SOFTC(cs, dev);
    671 	dksc = &cs->sc_dksc;
    672 	if (!DK_ATTACHED(dksc))
    673 		return ENXIO;
    674 	return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
    675 }
    676 
    677 static int
    678 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
    679 {
    680 	struct	cgd_softc *cs;
    681 	struct	dk_softc *dksc;
    682 	int	part = DISKPART(dev);
    683 	int	pmask = 1 << part;
    684 
    685 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
    686 	    dev, cmd, data, flag, l));
    687 
    688 	switch (cmd) {
    689 	case CGDIOCGET:
    690 		return cgd_ioctl_get(dev, data, l);
    691 	case CGDIOCSET:
    692 	case CGDIOCCLR:
    693 		if ((flag & FWRITE) == 0)
    694 			return EBADF;
    695 		/* FALLTHROUGH */
    696 	default:
    697 		GETCGD_SOFTC(cs, dev);
    698 		dksc = &cs->sc_dksc;
    699 		break;
    700 	}
    701 
    702 	switch (cmd) {
    703 	case CGDIOCSET:
    704 		if (DK_ATTACHED(dksc))
    705 			return EBUSY;
    706 		return cgd_ioctl_set(cs, data, l);
    707 	case CGDIOCCLR:
    708 		if (DK_BUSY(&cs->sc_dksc, pmask))
    709 			return EBUSY;
    710 		return cgd_ioctl_clr(cs, l);
    711 	case DIOCGCACHE:
    712 	case DIOCCACHESYNC:
    713 		if (!DK_ATTACHED(dksc))
    714 			return ENOENT;
    715 		/*
    716 		 * We pass this call down to the underlying disk.
    717 		 */
    718 		return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred);
    719 	case DIOCGSTRATEGY:
    720 	case DIOCSSTRATEGY:
    721 		if (!DK_ATTACHED(dksc))
    722 			return ENOENT;
    723 		/*FALLTHROUGH*/
    724 	default:
    725 		return dk_ioctl(dksc, dev, cmd, data, flag, l);
    726 	case CGDIOCGET:
    727 		KASSERT(0);
    728 		return EINVAL;
    729 	}
    730 }
    731 
    732 static int
    733 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
    734 {
    735 	struct	cgd_softc *cs;
    736 
    737 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
    738 	    dev, blkno, va, (unsigned long)size));
    739 	GETCGD_SOFTC(cs, dev);
    740 	return dk_dump(&cs->sc_dksc, dev, blkno, va, size);
    741 }
    742 
    743 /*
    744  * XXXrcd:
    745  *  for now we hardcode the maximum key length.
    746  */
    747 #define MAX_KEYSIZE	1024
    748 
    749 static const struct {
    750 	const char *n;
    751 	int v;
    752 	int d;
    753 } encblkno[] = {
    754 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
    755 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
    756 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
    757 };
    758 
    759 /* ARGSUSED */
    760 static int
    761 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
    762 {
    763 	struct	 cgd_ioctl *ci = data;
    764 	struct	 vnode *vp;
    765 	int	 ret;
    766 	size_t	 i;
    767 	size_t	 keybytes;			/* key length in bytes */
    768 	const char *cp;
    769 	struct pathbuf *pb;
    770 	char	 *inbuf;
    771 	struct dk_softc *dksc = &cs->sc_dksc;
    772 
    773 	cp = ci->ci_disk;
    774 
    775 	ret = pathbuf_copyin(ci->ci_disk, &pb);
    776 	if (ret != 0) {
    777 		return ret;
    778 	}
    779 	ret = vn_bdev_openpath(pb, &vp, l);
    780 	pathbuf_destroy(pb);
    781 	if (ret != 0) {
    782 		return ret;
    783 	}
    784 
    785 	inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
    786 
    787 	if ((ret = cgdinit(cs, cp, vp, l)) != 0)
    788 		goto bail;
    789 
    790 	(void)memset(inbuf, 0, MAX_KEYSIZE);
    791 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
    792 	if (ret)
    793 		goto bail;
    794 	cs->sc_cfuncs = cryptfuncs_find(inbuf);
    795 	if (!cs->sc_cfuncs) {
    796 		ret = EINVAL;
    797 		goto bail;
    798 	}
    799 
    800 	(void)memset(inbuf, 0, MAX_KEYSIZE);
    801 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
    802 	if (ret)
    803 		goto bail;
    804 
    805 	for (i = 0; i < __arraycount(encblkno); i++)
    806 		if (strcmp(encblkno[i].n, inbuf) == 0)
    807 			break;
    808 
    809 	if (i == __arraycount(encblkno)) {
    810 		ret = EINVAL;
    811 		goto bail;
    812 	}
    813 
    814 	keybytes = ci->ci_keylen / 8 + 1;
    815 	if (keybytes > MAX_KEYSIZE) {
    816 		ret = EINVAL;
    817 		goto bail;
    818 	}
    819 
    820 	(void)memset(inbuf, 0, MAX_KEYSIZE);
    821 	ret = copyin(ci->ci_key, inbuf, keybytes);
    822 	if (ret)
    823 		goto bail;
    824 
    825 	cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
    826 	cs->sc_cdata.cf_mode = encblkno[i].v;
    827 	cs->sc_cdata.cf_keylen = ci->ci_keylen;
    828 	cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
    829 	    &cs->sc_cdata.cf_blocksize);
    830 	if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
    831 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
    832 		cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
    833 	    cs->sc_cdata.cf_priv = NULL;
    834 	}
    835 
    836 	/*
    837 	 * The blocksize is supposed to be in bytes. Unfortunately originally
    838 	 * it was expressed in bits. For compatibility we maintain encblkno
    839 	 * and encblkno8.
    840 	 */
    841 	cs->sc_cdata.cf_blocksize /= encblkno[i].d;
    842 	(void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
    843 	if (!cs->sc_cdata.cf_priv) {
    844 		ret = EINVAL;		/* XXX is this the right error? */
    845 		goto bail;
    846 	}
    847 	free(inbuf, M_TEMP);
    848 
    849 	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
    850 
    851 	cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
    852 	cs->sc_data_used = 0;
    853 
    854 	/* Attach the disk. */
    855 	dk_attach(dksc);
    856 	disk_attach(&dksc->sc_dkdev);
    857 
    858 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
    859 
    860 	/* Discover wedges on this disk. */
    861 	dkwedge_discover(&dksc->sc_dkdev);
    862 
    863 	return 0;
    864 
    865 bail:
    866 	free(inbuf, M_TEMP);
    867 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
    868 	return ret;
    869 }
    870 
    871 /* ARGSUSED */
    872 static int
    873 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
    874 {
    875 	struct	dk_softc *dksc = &cs->sc_dksc;
    876 
    877 	if (!DK_ATTACHED(dksc))
    878 		return ENXIO;
    879 
    880 	/* Delete all of our wedges. */
    881 	dkwedge_delall(&dksc->sc_dkdev);
    882 
    883 	/* Kill off any queued buffers. */
    884 	dk_drain(dksc);
    885 	bufq_free(dksc->sc_bufq);
    886 
    887 	(void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
    888 	cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
    889 	free(cs->sc_tpath, M_DEVBUF);
    890 	free(cs->sc_data, M_DEVBUF);
    891 	cs->sc_data_used = 0;
    892 	dk_detach(dksc);
    893 	disk_detach(&dksc->sc_dkdev);
    894 
    895 	return 0;
    896 }
    897 
    898 static int
    899 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
    900 {
    901 	struct cgd_softc *cs = getcgd_softc(dev);
    902 	struct cgd_user *cgu;
    903 	int unit;
    904 	struct	dk_softc *dksc = &cs->sc_dksc;
    905 
    906 	unit = CGDUNIT(dev);
    907 	cgu = (struct cgd_user *)data;
    908 
    909 	DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
    910 			   dev, unit, data, l));
    911 
    912 	if (cgu->cgu_unit == -1)
    913 		cgu->cgu_unit = unit;
    914 
    915 	if (cgu->cgu_unit < 0)
    916 		return EINVAL;	/* XXX: should this be ENXIO? */
    917 
    918 	cs = device_lookup_private(&cgd_cd, unit);
    919 	if (cs == NULL || !DK_ATTACHED(dksc)) {
    920 		cgu->cgu_dev = 0;
    921 		cgu->cgu_alg[0] = '\0';
    922 		cgu->cgu_blocksize = 0;
    923 		cgu->cgu_mode = 0;
    924 		cgu->cgu_keylen = 0;
    925 	}
    926 	else {
    927 		cgu->cgu_dev = cs->sc_tdev;
    928 		strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
    929 		    sizeof(cgu->cgu_alg));
    930 		cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
    931 		cgu->cgu_mode = cs->sc_cdata.cf_mode;
    932 		cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
    933 	}
    934 	return 0;
    935 }
    936 
    937 static int
    938 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
    939 	struct lwp *l)
    940 {
    941 	struct	disk_geom *dg;
    942 	int	ret;
    943 	char	*tmppath;
    944 	uint64_t psize;
    945 	unsigned secsize;
    946 	struct dk_softc *dksc = &cs->sc_dksc;
    947 
    948 	cs->sc_tvn = vp;
    949 	cs->sc_tpath = NULL;
    950 
    951 	tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
    952 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
    953 	if (ret)
    954 		goto bail;
    955 	cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
    956 	memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
    957 
    958 	cs->sc_tdev = vp->v_rdev;
    959 
    960 	if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
    961 		goto bail;
    962 
    963 	if (psize == 0) {
    964 		ret = ENODEV;
    965 		goto bail;
    966 	}
    967 
    968 	/*
    969 	 * XXX here we should probe the underlying device.  If we
    970 	 *     are accessing a partition of type RAW_PART, then
    971 	 *     we should populate our initial geometry with the
    972 	 *     geometry that we discover from the device.
    973 	 */
    974 	dg = &dksc->sc_dkdev.dk_geom;
    975 	memset(dg, 0, sizeof(*dg));
    976 	dg->dg_secperunit = psize;
    977 	dg->dg_secsize = secsize;
    978 	dg->dg_ntracks = 1;
    979 	dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
    980 	dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
    981 
    982 bail:
    983 	free(tmppath, M_TEMP);
    984 	if (ret && cs->sc_tpath)
    985 		free(cs->sc_tpath, M_DEVBUF);
    986 	return ret;
    987 }
    988 
    989 /*
    990  * Our generic cipher entry point.  This takes care of the
    991  * IV mode and passes off the work to the specific cipher.
    992  * We implement here the IV method ``encrypted block
    993  * number''.
    994  *
    995  * XXXrcd: for now we rely on our own crypto framework defined
    996  *         in dev/cgd_crypto.c.  This will change when we
    997  *         get a generic kernel crypto framework.
    998  */
    999 
   1000 static void
   1001 blkno2blkno_buf(char *sbuf, daddr_t blkno)
   1002 {
   1003 	int	i;
   1004 
   1005 	/* Set up the blkno in blkno_buf, here we do not care much
   1006 	 * about the final layout of the information as long as we
   1007 	 * can guarantee that each sector will have a different IV
   1008 	 * and that the endianness of the machine will not affect
   1009 	 * the representation that we have chosen.
   1010 	 *
   1011 	 * We choose this representation, because it does not rely
   1012 	 * on the size of buf (which is the blocksize of the cipher),
   1013 	 * but allows daddr_t to grow without breaking existing
   1014 	 * disks.
   1015 	 *
   1016 	 * Note that blkno2blkno_buf does not take a size as input,
   1017 	 * and hence must be called on a pre-zeroed buffer of length
   1018 	 * greater than or equal to sizeof(daddr_t).
   1019 	 */
   1020 	for (i=0; i < sizeof(daddr_t); i++) {
   1021 		*sbuf++ = blkno & 0xff;
   1022 		blkno >>= 8;
   1023 	}
   1024 }
   1025 
   1026 static void
   1027 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
   1028     size_t len, daddr_t blkno, size_t secsize, int dir)
   1029 {
   1030 	char		*dst = dstv;
   1031 	char		*src = srcv;
   1032 	cfunc_cipher_prep	*ciprep = cs->sc_cfuncs->cf_cipher_prep;
   1033 	cfunc_cipher	*cipher = cs->sc_cfuncs->cf_cipher;
   1034 	struct uio	dstuio;
   1035 	struct uio	srcuio;
   1036 	struct iovec	dstiov[2];
   1037 	struct iovec	srciov[2];
   1038 	size_t		blocksize = cs->sc_cdata.cf_blocksize;
   1039 	size_t		todo;
   1040 	char		blkno_buf[CGD_MAXBLOCKSIZE], *iv;
   1041 
   1042 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
   1043 
   1044 	KASSERTMSG(len % blocksize == 0,
   1045 	    "cgd_cipher: len %% blocksize != 0");
   1046 
   1047 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
   1048 	KASSERTMSG(sizeof(daddr_t) <= blocksize,
   1049 	    "cgd_cipher: sizeof(daddr_t) > blocksize");
   1050 
   1051 	KASSERTMSG(blocksize <= CGD_MAXBLOCKSIZE,
   1052 	    "cgd_cipher: blocksize > CGD_MAXBLOCKSIZE");
   1053 
   1054 	dstuio.uio_iov = dstiov;
   1055 	dstuio.uio_iovcnt = 1;
   1056 
   1057 	srcuio.uio_iov = srciov;
   1058 	srcuio.uio_iovcnt = 1;
   1059 
   1060 	for (; len > 0; len -= todo) {
   1061 		todo = MIN(len, secsize);
   1062 
   1063 		dstiov[0].iov_base = dst;
   1064 		srciov[0].iov_base = src;
   1065 		dstiov[0].iov_len  = todo;
   1066 		srciov[0].iov_len  = todo;
   1067 
   1068 		memset(blkno_buf, 0x0, blocksize);
   1069 		blkno2blkno_buf(blkno_buf, blkno);
   1070 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
   1071 		    blkno_buf, blocksize));
   1072 
   1073 		/*
   1074 		 * Compute an initial IV. All ciphers
   1075 		 * can convert blkno_buf in-place.
   1076 		 */
   1077 		iv = blkno_buf;
   1078 		ciprep(cs->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir);
   1079 		IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize));
   1080 
   1081 		cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir);
   1082 
   1083 		dst += todo;
   1084 		src += todo;
   1085 		blkno++;
   1086 	}
   1087 }
   1088 
   1089 #ifdef DEBUG
   1090 static void
   1091 hexprint(const char *start, void *buf, int len)
   1092 {
   1093 	char	*c = buf;
   1094 
   1095 	KASSERTMSG(len >= 0, "hexprint: called with len < 0");
   1096 	printf("%s: len=%06d 0x", start, len);
   1097 	while (len--)
   1098 		printf("%02x", (unsigned char) *c++);
   1099 }
   1100 #endif
   1101 
   1102 static void
   1103 selftest(void)
   1104 {
   1105 	struct cgd_softc cs;
   1106 	void *buf;
   1107 
   1108 	printf("running cgd selftest ");
   1109 
   1110 	for (size_t i = 0; i < __arraycount(selftests); i++) {
   1111 		const char *alg = selftests[i].alg;
   1112 		const uint8_t *key = selftests[i].key;
   1113 		int keylen = selftests[i].keylen;
   1114 		int txtlen = selftests[i].txtlen;
   1115 
   1116 		printf("%s-%d ", alg, keylen);
   1117 
   1118 		memset(&cs, 0, sizeof(cs));
   1119 
   1120 		cs.sc_cfuncs = cryptfuncs_find(alg);
   1121 		if (cs.sc_cfuncs == NULL)
   1122 			panic("%s not implemented", alg);
   1123 
   1124 		cs.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
   1125 		cs.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
   1126 		cs.sc_cdata.cf_keylen = keylen;
   1127 
   1128 		cs.sc_cdata.cf_priv = cs.sc_cfuncs->cf_init(keylen,
   1129 		    key, &cs.sc_cdata.cf_blocksize);
   1130 		if (cs.sc_cdata.cf_priv == NULL)
   1131 			panic("cf_priv is NULL");
   1132 		if (cs.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
   1133 			panic("bad block size %zu", cs.sc_cdata.cf_blocksize);
   1134 
   1135 		cs.sc_cdata.cf_blocksize /= 8;
   1136 
   1137 		buf = malloc(txtlen, M_DEVBUF, M_WAITOK);
   1138 		memcpy(buf, selftests[i].ptxt, txtlen);
   1139 
   1140 		cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno,
   1141 				selftests[i].secsize, CGD_CIPHER_ENCRYPT);
   1142 		if (memcmp(buf, selftests[i].ctxt, txtlen) != 0)
   1143 			panic("encryption is broken");
   1144 
   1145 		cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno,
   1146 				selftests[i].secsize, CGD_CIPHER_DECRYPT);
   1147 		if (memcmp(buf, selftests[i].ptxt, txtlen) != 0)
   1148 			panic("decryption is broken");
   1149 
   1150 		free(buf, M_DEVBUF);
   1151 		cs.sc_cfuncs->cf_destroy(cs.sc_cdata.cf_priv);
   1152 	}
   1153 
   1154 	printf("done\n");
   1155 }
   1156 
   1157 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
   1158 
   1159 #ifdef _MODULE
   1160 CFDRIVER_DECL(cgd, DV_DISK, NULL);
   1161 
   1162 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
   1163 #endif
   1164 
   1165 static int
   1166 cgd_modcmd(modcmd_t cmd, void *arg)
   1167 {
   1168 	int error = 0;
   1169 
   1170 	switch (cmd) {
   1171 	case MODULE_CMD_INIT:
   1172 		selftest();
   1173 #ifdef _MODULE
   1174 		error = config_cfdriver_attach(&cgd_cd);
   1175 		if (error)
   1176 			break;
   1177 
   1178 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
   1179 	        if (error) {
   1180 			config_cfdriver_detach(&cgd_cd);
   1181 			aprint_error("%s: unable to register cfattach for"
   1182 			    "%s, error %d\n", __func__, cgd_cd.cd_name, error);
   1183 			break;
   1184 		}
   1185 		/*
   1186 		 * Attach the {b,c}devsw's
   1187 		 */
   1188 		error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1189 		    &cgd_cdevsw, &cgd_cmajor);
   1190 
   1191 		/*
   1192 		 * If devsw_attach fails, remove from autoconf database
   1193 		 */
   1194 		if (error) {
   1195 			config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
   1196 			config_cfdriver_detach(&cgd_cd);
   1197 			aprint_error("%s: unable to attach %s devsw, "
   1198 			    "error %d", __func__, cgd_cd.cd_name, error);
   1199 			break;
   1200 		}
   1201 #endif
   1202 		break;
   1203 
   1204 	case MODULE_CMD_FINI:
   1205 #ifdef _MODULE
   1206 		/*
   1207 		 * Remove {b,c}devsw's
   1208 		 */
   1209 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
   1210 
   1211 		/*
   1212 		 * Now remove device from autoconf database
   1213 		 */
   1214 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
   1215 		if (error) {
   1216 			(void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1217 			    &cgd_cdevsw, &cgd_cmajor);
   1218 			aprint_error("%s: failed to detach %s cfattach, "
   1219 			    "error %d\n", __func__, cgd_cd.cd_name, error);
   1220  			break;
   1221 		}
   1222 		error = config_cfdriver_detach(&cgd_cd);
   1223 		if (error) {
   1224 			(void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
   1225 			(void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1226 			    &cgd_cdevsw, &cgd_cmajor);
   1227 			aprint_error("%s: failed to detach %s cfdriver, "
   1228 			    "error %d\n", __func__, cgd_cd.cd_name, error);
   1229 			break;
   1230 		}
   1231 #endif
   1232 		break;
   1233 
   1234 	case MODULE_CMD_STAT:
   1235 		error = ENOTTY;
   1236 		break;
   1237 	default:
   1238 		error = ENOTTY;
   1239 		break;
   1240 	}
   1241 
   1242 	return error;
   1243 }
   1244