Home | History | Annotate | Line # | Download | only in dev
cgd.c revision 1.116.10.5
      1 /* $NetBSD: cgd.c,v 1.116.10.5 2021/12/30 12:38:22 martin Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Roland C. Dowdeswell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.116.10.5 2021/12/30 12:38:22 martin Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/proc.h>
     39 #include <sys/errno.h>
     40 #include <sys/buf.h>
     41 #include <sys/bufq.h>
     42 #include <sys/kmem.h>
     43 #include <sys/module.h>
     44 #include <sys/pool.h>
     45 #include <sys/ioctl.h>
     46 #include <sys/device.h>
     47 #include <sys/disk.h>
     48 #include <sys/disklabel.h>
     49 #include <sys/fcntl.h>
     50 #include <sys/namei.h> /* for pathbuf */
     51 #include <sys/vnode.h>
     52 #include <sys/conf.h>
     53 #include <sys/syslog.h>
     54 #include <sys/workqueue.h>
     55 #include <sys/cpu.h>
     56 
     57 #include <dev/dkvar.h>
     58 #include <dev/cgdvar.h>
     59 
     60 #include <miscfs/specfs/specdev.h> /* for v_rdev */
     61 
     62 #include "ioconf.h"
     63 
     64 struct selftest_params {
     65 	const char *alg;
     66 	int blocksize;	/* number of bytes */
     67 	int secsize;
     68 	daddr_t blkno;
     69 	int keylen;	/* number of bits */
     70 	int txtlen;	/* number of bytes */
     71 	const uint8_t *key;
     72 	const uint8_t *ptxt;
     73 	const uint8_t *ctxt;
     74 };
     75 
     76 /* Entry Point Functions */
     77 
     78 static dev_type_open(cgdopen);
     79 static dev_type_close(cgdclose);
     80 static dev_type_read(cgdread);
     81 static dev_type_write(cgdwrite);
     82 static dev_type_ioctl(cgdioctl);
     83 static dev_type_strategy(cgdstrategy);
     84 static dev_type_dump(cgddump);
     85 static dev_type_size(cgdsize);
     86 
     87 const struct bdevsw cgd_bdevsw = {
     88 	.d_open = cgdopen,
     89 	.d_close = cgdclose,
     90 	.d_strategy = cgdstrategy,
     91 	.d_ioctl = cgdioctl,
     92 	.d_dump = cgddump,
     93 	.d_psize = cgdsize,
     94 	.d_discard = nodiscard,
     95 	.d_flag = D_DISK | D_MPSAFE
     96 };
     97 
     98 const struct cdevsw cgd_cdevsw = {
     99 	.d_open = cgdopen,
    100 	.d_close = cgdclose,
    101 	.d_read = cgdread,
    102 	.d_write = cgdwrite,
    103 	.d_ioctl = cgdioctl,
    104 	.d_stop = nostop,
    105 	.d_tty = notty,
    106 	.d_poll = nopoll,
    107 	.d_mmap = nommap,
    108 	.d_kqfilter = nokqfilter,
    109 	.d_discard = nodiscard,
    110 	.d_flag = D_DISK | D_MPSAFE
    111 };
    112 
    113 /*
    114  * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
    115  */
    116 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
    117 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
    118 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
    119 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
    120 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
    121 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
    122 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
    123 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
    124 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
    125 };
    126 
    127 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
    128 	0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
    129 	0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
    130 	0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
    131 	0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
    132 	0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
    133 	0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
    134 	0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
    135 	0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
    136 };
    137 
    138 static const uint8_t selftest_aes_xts_256_key[33] = {
    139 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
    140 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
    141 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
    142 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
    143 	0
    144 };
    145 
    146 /*
    147  * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
    148  */
    149 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
    150 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
    151 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
    152 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
    153 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
    154 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
    155 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
    156 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
    157 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
    158 };
    159 
    160 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
    161 	0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
    162 	0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
    163 	0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
    164 	0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
    165 	0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
    166 	0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
    167 	0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
    168 	0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
    169 };
    170 
    171 static const uint8_t selftest_aes_xts_512_key[65] = {
    172 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
    173 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
    174 	0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
    175 	0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
    176 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
    177 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
    178 	0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
    179 	0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
    180 	0
    181 };
    182 
    183 const struct selftest_params selftests[] = {
    184 	{
    185 		.alg = "aes-xts",
    186 		.blocksize = 16,
    187 		.secsize = 512,
    188 		.blkno = 1,
    189 		.keylen = 256,
    190 		.txtlen = sizeof(selftest_aes_xts_256_ptxt),
    191 		.key  = selftest_aes_xts_256_key,
    192 		.ptxt = selftest_aes_xts_256_ptxt,
    193 		.ctxt = selftest_aes_xts_256_ctxt
    194 	},
    195 	{
    196 		.alg = "aes-xts",
    197 		.blocksize = 16,
    198 		.secsize = 512,
    199 		.blkno = 0xffff,
    200 		.keylen = 512,
    201 		.txtlen = sizeof(selftest_aes_xts_512_ptxt),
    202 		.key  = selftest_aes_xts_512_key,
    203 		.ptxt = selftest_aes_xts_512_ptxt,
    204 		.ctxt = selftest_aes_xts_512_ctxt
    205 	}
    206 };
    207 
    208 static int cgd_match(device_t, cfdata_t, void *);
    209 static void cgd_attach(device_t, device_t, void *);
    210 static int cgd_detach(device_t, int);
    211 static struct cgd_softc	*cgd_spawn(int);
    212 static struct cgd_worker *cgd_create_one_worker(void);
    213 static void cgd_destroy_one_worker(struct cgd_worker *);
    214 static struct cgd_worker *cgd_create_worker(void);
    215 static void cgd_destroy_worker(struct cgd_worker *);
    216 static int cgd_destroy(device_t);
    217 
    218 /* Internal Functions */
    219 
    220 static int	cgd_diskstart(device_t, struct buf *);
    221 static void	cgd_diskstart2(struct cgd_softc *, struct cgd_xfer *);
    222 static void	cgdiodone(struct buf *);
    223 static void	cgd_iodone2(struct cgd_softc *, struct cgd_xfer *);
    224 static void	cgd_enqueue(struct cgd_softc *, struct cgd_xfer *);
    225 static void	cgd_process(struct work *, void *);
    226 static int	cgd_dumpblocks(device_t, void *, daddr_t, int);
    227 
    228 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
    229 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
    230 static int	cgd_ioctl_get(dev_t, void *, struct lwp *);
    231 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
    232 			struct lwp *);
    233 static void	cgd_cipher(struct cgd_softc *, void *, void *,
    234 			   size_t, daddr_t, size_t, int);
    235 
    236 static struct dkdriver cgddkdriver = {
    237         .d_minphys  = minphys,
    238         .d_open = cgdopen,
    239         .d_close = cgdclose,
    240         .d_strategy = cgdstrategy,
    241         .d_iosize = NULL,
    242         .d_diskstart = cgd_diskstart,
    243         .d_dumpblocks = cgd_dumpblocks,
    244         .d_lastclose = NULL
    245 };
    246 
    247 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
    248     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    249 
    250 /* DIAGNOSTIC and DEBUG definitions */
    251 
    252 #if defined(CGDDEBUG) && !defined(DEBUG)
    253 #define DEBUG
    254 #endif
    255 
    256 #ifdef DEBUG
    257 int cgddebug = 0;
    258 
    259 #define CGDB_FOLLOW	0x1
    260 #define CGDB_IO	0x2
    261 #define CGDB_CRYPTO	0x4
    262 
    263 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
    264 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
    265 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
    266 
    267 static void	hexprint(const char *, void *, int);
    268 
    269 #else
    270 #define IFDEBUG(x,y)
    271 #define DPRINTF(x,y)
    272 #define DPRINTF_FOLLOW(y)
    273 #endif
    274 
    275 #ifdef DIAGNOSTIC
    276 #define DIAGPANIC(x)		panic x
    277 #define DIAGCONDPANIC(x,y)	if (x) panic y
    278 #else
    279 #define DIAGPANIC(x)
    280 #define DIAGCONDPANIC(x,y)
    281 #endif
    282 
    283 /* Global variables */
    284 
    285 static kmutex_t cgd_spawning_mtx;
    286 static kcondvar_t cgd_spawning_cv;
    287 static bool cgd_spawning;
    288 static struct cgd_worker *cgd_worker;
    289 static u_int cgd_refcnt;	/* number of users of cgd_worker */
    290 
    291 /* Utility Functions */
    292 
    293 #define CGDUNIT(x)		DISKUNIT(x)
    294 
    295 /* The code */
    296 
    297 static int
    298 cgd_lock(bool intr)
    299 {
    300 	int error = 0;
    301 
    302 	mutex_enter(&cgd_spawning_mtx);
    303 	while (cgd_spawning) {
    304 		if (intr)
    305 			error = cv_wait_sig(&cgd_spawning_cv, &cgd_spawning_mtx);
    306 		else
    307 			cv_wait(&cgd_spawning_cv, &cgd_spawning_mtx);
    308 	}
    309 	if (error == 0)
    310 		cgd_spawning = true;
    311 	mutex_exit(&cgd_spawning_mtx);
    312 	return error;
    313 }
    314 
    315 static void
    316 cgd_unlock(void)
    317 {
    318 	mutex_enter(&cgd_spawning_mtx);
    319 	cgd_spawning = false;
    320 	cv_broadcast(&cgd_spawning_cv);
    321 	mutex_exit(&cgd_spawning_mtx);
    322 }
    323 
    324 static struct cgd_softc *
    325 getcgd_softc(dev_t dev)
    326 {
    327 	return device_lookup_private(&cgd_cd, CGDUNIT(dev));
    328 }
    329 
    330 static int
    331 cgd_match(device_t self, cfdata_t cfdata, void *aux)
    332 {
    333 
    334 	return 1;
    335 }
    336 
    337 static void
    338 cgd_attach(device_t parent, device_t self, void *aux)
    339 {
    340 	struct cgd_softc *sc = device_private(self);
    341 
    342 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
    343 	cv_init(&sc->sc_cv, "cgdcv");
    344 	dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
    345 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
    346 
    347 	if (!pmf_device_register(self, NULL, NULL))
    348 		aprint_error_dev(self,
    349 		    "unable to register power management hooks\n");
    350 }
    351 
    352 
    353 static int
    354 cgd_detach(device_t self, int flags)
    355 {
    356 	int ret;
    357 	struct cgd_softc *sc = device_private(self);
    358 	struct dk_softc *dksc = &sc->sc_dksc;
    359 
    360 	if (DK_BUSY(dksc, 0))
    361 		return EBUSY;
    362 
    363 	if (DK_ATTACHED(dksc) &&
    364 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
    365 		return ret;
    366 
    367 	disk_destroy(&dksc->sc_dkdev);
    368 	cv_destroy(&sc->sc_cv);
    369 	mutex_destroy(&sc->sc_lock);
    370 
    371 	return 0;
    372 }
    373 
    374 void
    375 cgdattach(int num)
    376 {
    377 #ifndef _MODULE
    378 	int error;
    379 
    380 	mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
    381 	cv_init(&cgd_spawning_cv, "cgspwn");
    382 
    383 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
    384 	if (error != 0)
    385 		aprint_error("%s: unable to register cfattach\n",
    386 		    cgd_cd.cd_name);
    387 #endif
    388 }
    389 
    390 static struct cgd_softc *
    391 cgd_spawn(int unit)
    392 {
    393 	cfdata_t cf;
    394 	struct cgd_worker *cw;
    395 	struct cgd_softc *sc;
    396 
    397 	cf = kmem_alloc(sizeof(*cf), KM_SLEEP);
    398 	cf->cf_name = cgd_cd.cd_name;
    399 	cf->cf_atname = cgd_cd.cd_name;
    400 	cf->cf_unit = unit;
    401 	cf->cf_fstate = FSTATE_STAR;
    402 
    403 	cw = cgd_create_one_worker();
    404 	if (cw == NULL) {
    405 		kmem_free(cf, sizeof(*cf));
    406 		return NULL;
    407 	}
    408 
    409 	sc = device_private(config_attach_pseudo(cf));
    410 	if (sc == NULL) {
    411 		cgd_destroy_one_worker(cw);
    412 		return NULL;
    413 	}
    414 
    415 	sc->sc_worker = cw;
    416 
    417 	return sc;
    418 }
    419 
    420 static int
    421 cgd_destroy(device_t dev)
    422 {
    423 	struct cgd_softc *sc = device_private(dev);
    424 	struct cgd_worker *cw = sc->sc_worker;
    425 	cfdata_t cf;
    426 	int error;
    427 
    428 	cf = device_cfdata(dev);
    429 	error = config_detach(dev, DETACH_QUIET);
    430 	if (error)
    431 		return error;
    432 
    433 	cgd_destroy_one_worker(cw);
    434 
    435 	kmem_free(cf, sizeof(*cf));
    436 	return 0;
    437 }
    438 
    439 static void
    440 cgd_busy(struct cgd_softc *sc)
    441 {
    442 
    443 	mutex_enter(&sc->sc_lock);
    444 	while (sc->sc_busy)
    445 		cv_wait(&sc->sc_cv, &sc->sc_lock);
    446 	sc->sc_busy = true;
    447 	mutex_exit(&sc->sc_lock);
    448 }
    449 
    450 static void
    451 cgd_unbusy(struct cgd_softc *sc)
    452 {
    453 
    454 	mutex_enter(&sc->sc_lock);
    455 	sc->sc_busy = false;
    456 	cv_broadcast(&sc->sc_cv);
    457 	mutex_exit(&sc->sc_lock);
    458 }
    459 
    460 static struct cgd_worker *
    461 cgd_create_one_worker(void)
    462 {
    463 	KASSERT(cgd_spawning);
    464 
    465 	if (cgd_refcnt++ == 0) {
    466 		KASSERT(cgd_worker == NULL);
    467 		cgd_worker = cgd_create_worker();
    468 	}
    469 
    470 	KASSERT(cgd_worker != NULL);
    471 	return cgd_worker;
    472 }
    473 
    474 static void
    475 cgd_destroy_one_worker(struct cgd_worker *cw)
    476 {
    477 	KASSERT(cgd_spawning);
    478 	KASSERT(cw == cgd_worker);
    479 
    480 	if (--cgd_refcnt == 0) {
    481 		cgd_destroy_worker(cgd_worker);
    482 		cgd_worker = NULL;
    483 	}
    484 }
    485 
    486 static struct cgd_worker *
    487 cgd_create_worker(void)
    488 {
    489 	struct cgd_worker *cw;
    490 	struct workqueue *wq;
    491 	struct pool *cp;
    492 	int error;
    493 
    494 	cw = kmem_alloc(sizeof(struct cgd_worker), KM_SLEEP);
    495 	cp = kmem_alloc(sizeof(struct pool), KM_SLEEP);
    496 
    497 	error = workqueue_create(&wq, "cgd", cgd_process, NULL,
    498 	                         PRI_BIO, IPL_BIO, WQ_MPSAFE | WQ_PERCPU);
    499 	if (error) {
    500 		kmem_free(cp, sizeof(struct pool));
    501 		kmem_free(cw, sizeof(struct cgd_worker));
    502 		return NULL;
    503 	}
    504 
    505 	cw->cw_cpool = cp;
    506 	cw->cw_wq = wq;
    507 	pool_init(cw->cw_cpool, sizeof(struct cgd_xfer), 0,
    508 	    0, 0, "cgdcpl", NULL, IPL_BIO);
    509 
    510 	mutex_init(&cw->cw_lock, MUTEX_DEFAULT, IPL_BIO);
    511 
    512 	return cw;
    513 }
    514 
    515 static void
    516 cgd_destroy_worker(struct cgd_worker *cw)
    517 {
    518 
    519 	/*
    520 	 * Wait for all worker threads to complete before destroying
    521 	 * the rest of the cgd_worker.
    522 	 */
    523 	if (cw->cw_wq)
    524 		workqueue_destroy(cw->cw_wq);
    525 
    526 	mutex_destroy(&cw->cw_lock);
    527 
    528 	if (cw->cw_cpool) {
    529 		pool_destroy(cw->cw_cpool);
    530 		kmem_free(cw->cw_cpool, sizeof(struct pool));
    531 	}
    532 
    533 	kmem_free(cw, sizeof(struct cgd_worker));
    534 }
    535 
    536 static int
    537 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
    538 {
    539 	struct	cgd_softc *sc;
    540 	int error;
    541 
    542 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
    543 
    544 	error = cgd_lock(true);
    545 	if (error)
    546 		return error;
    547 	sc = getcgd_softc(dev);
    548 	if (sc == NULL)
    549 		sc = cgd_spawn(CGDUNIT(dev));
    550 	cgd_unlock();
    551 	if (sc == NULL)
    552 		return ENXIO;
    553 
    554 	return dk_open(&sc->sc_dksc, dev, flags, fmt, l);
    555 }
    556 
    557 static int
    558 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
    559 {
    560 	struct	cgd_softc *sc;
    561 	struct	dk_softc *dksc;
    562 	int error;
    563 
    564 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
    565 
    566 	error = cgd_lock(false);
    567 	if (error)
    568 		return error;
    569 	sc = getcgd_softc(dev);
    570 	if (sc == NULL) {
    571 		error = ENXIO;
    572 		goto done;
    573 	}
    574 
    575 	dksc = &sc->sc_dksc;
    576 	if ((error =  dk_close(dksc, dev, flags, fmt, l)) != 0)
    577 		goto done;
    578 
    579 	if (!DK_ATTACHED(dksc)) {
    580 		if ((error = cgd_destroy(sc->sc_dksc.sc_dev)) != 0) {
    581 			device_printf(dksc->sc_dev,
    582 			    "unable to detach instance\n");
    583 			goto done;
    584 		}
    585 	}
    586 
    587 done:
    588 	cgd_unlock();
    589 
    590 	return error;
    591 }
    592 
    593 static void
    594 cgdstrategy(struct buf *bp)
    595 {
    596 	struct	cgd_softc *sc = getcgd_softc(bp->b_dev);
    597 
    598 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
    599 	    (long)bp->b_bcount));
    600 
    601 	/*
    602 	 * Reject unaligned writes.
    603 	 */
    604 	if (((uintptr_t)bp->b_data & 3) != 0) {
    605 		bp->b_error = EINVAL;
    606 		goto bail;
    607 	}
    608 
    609 	dk_strategy(&sc->sc_dksc, bp);
    610 	return;
    611 
    612 bail:
    613 	bp->b_resid = bp->b_bcount;
    614 	biodone(bp);
    615 	return;
    616 }
    617 
    618 static int
    619 cgdsize(dev_t dev)
    620 {
    621 	struct cgd_softc *sc = getcgd_softc(dev);
    622 
    623 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
    624 	if (!sc)
    625 		return -1;
    626 	return dk_size(&sc->sc_dksc, dev);
    627 }
    628 
    629 /*
    630  * cgd_{get,put}data are functions that deal with getting a buffer
    631  * for the new encrypted data.
    632  * We can no longer have a buffer per device, we need a buffer per
    633  * work queue...
    634  */
    635 
    636 static void *
    637 cgd_getdata(struct cgd_softc *sc, unsigned long size)
    638 {
    639 	void *data = NULL;
    640 
    641 	mutex_enter(&sc->sc_lock);
    642 	if (!sc->sc_data_used) {
    643 		sc->sc_data_used = true;
    644 		data = sc->sc_data;
    645 	}
    646 	mutex_exit(&sc->sc_lock);
    647 
    648 	if (data)
    649 		return data;
    650 
    651 	return kmem_intr_alloc(size, KM_NOSLEEP);
    652 }
    653 
    654 static void
    655 cgd_putdata(struct cgd_softc *sc, void *data, unsigned long size)
    656 {
    657 
    658 	if (data == sc->sc_data) {
    659 		mutex_enter(&sc->sc_lock);
    660 		sc->sc_data_used = false;
    661 		mutex_exit(&sc->sc_lock);
    662 	} else
    663 		kmem_intr_free(data, size);
    664 }
    665 
    666 static int
    667 cgd_diskstart(device_t dev, struct buf *bp)
    668 {
    669 	struct	cgd_softc *sc = device_private(dev);
    670 	struct	cgd_worker *cw = sc->sc_worker;
    671 	struct	dk_softc *dksc = &sc->sc_dksc;
    672 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    673 	struct	cgd_xfer *cx;
    674 	struct	buf *nbp;
    675 	void *	newaddr;
    676 	daddr_t	bn;
    677 
    678 	DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
    679 
    680 	bn = bp->b_rawblkno;
    681 
    682 	/*
    683 	 * We attempt to allocate all of our resources up front, so that
    684 	 * we can fail quickly if they are unavailable.
    685 	 */
    686 	nbp = getiobuf(sc->sc_tvn, false);
    687 	if (nbp == NULL)
    688 		return EAGAIN;
    689 
    690 	cx = pool_get(cw->cw_cpool, PR_NOWAIT);
    691 	if (cx == NULL) {
    692 		putiobuf(nbp);
    693 		return EAGAIN;
    694 	}
    695 
    696 	cx->cx_sc = sc;
    697 	cx->cx_obp = bp;
    698 	cx->cx_nbp = nbp;
    699 	cx->cx_srcv = cx->cx_dstv = bp->b_data;
    700 	cx->cx_blkno = bn;
    701 	cx->cx_secsize = dg->dg_secsize;
    702 
    703 	/*
    704 	 * If we are writing, then we need to encrypt the outgoing
    705 	 * block into a new block of memory.
    706 	 */
    707 	if ((bp->b_flags & B_READ) == 0) {
    708 		newaddr = cgd_getdata(sc, bp->b_bcount);
    709 		if (!newaddr) {
    710 			pool_put(cw->cw_cpool, cx);
    711 			putiobuf(nbp);
    712 			return EAGAIN;
    713 		}
    714 
    715 		cx->cx_dstv = newaddr;
    716 		cx->cx_len = bp->b_bcount;
    717 		cx->cx_dir = CGD_CIPHER_ENCRYPT;
    718 
    719 		cgd_enqueue(sc, cx);
    720 		return 0;
    721 	}
    722 
    723 	cgd_diskstart2(sc, cx);
    724 	return 0;
    725 }
    726 
    727 static void
    728 cgd_diskstart2(struct cgd_softc *sc, struct cgd_xfer *cx)
    729 {
    730 	struct	vnode *vp;
    731 	struct	buf *bp;
    732 	struct	buf *nbp;
    733 
    734 	bp = cx->cx_obp;
    735 	nbp = cx->cx_nbp;
    736 
    737 	nbp->b_data = cx->cx_dstv;
    738 	nbp->b_flags = bp->b_flags;
    739 	nbp->b_oflags = bp->b_oflags;
    740 	nbp->b_cflags = bp->b_cflags;
    741 	nbp->b_iodone = cgdiodone;
    742 	nbp->b_proc = bp->b_proc;
    743 	nbp->b_blkno = btodb(cx->cx_blkno * cx->cx_secsize);
    744 	nbp->b_bcount = bp->b_bcount;
    745 	nbp->b_private = cx;
    746 
    747 	BIO_COPYPRIO(nbp, bp);
    748 
    749 	if ((nbp->b_flags & B_READ) == 0) {
    750 		vp = nbp->b_vp;
    751 		mutex_enter(vp->v_interlock);
    752 		vp->v_numoutput++;
    753 		mutex_exit(vp->v_interlock);
    754 	}
    755 	VOP_STRATEGY(sc->sc_tvn, nbp);
    756 }
    757 
    758 static void
    759 cgdiodone(struct buf *nbp)
    760 {
    761 	struct	cgd_xfer *cx = nbp->b_private;
    762 	struct	buf *obp = cx->cx_obp;
    763 	struct	cgd_softc *sc = getcgd_softc(obp->b_dev);
    764 	struct	dk_softc *dksc = &sc->sc_dksc;
    765 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    766 	daddr_t	bn;
    767 
    768 	KDASSERT(sc);
    769 
    770 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
    771 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
    772 	    obp, obp->b_bcount, obp->b_resid));
    773 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
    774 	    " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
    775 		nbp->b_bcount));
    776 	if (nbp->b_error != 0) {
    777 		obp->b_error = nbp->b_error;
    778 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
    779 		    obp->b_error));
    780 	}
    781 
    782 	/* Perform the decryption if we are reading.
    783 	 *
    784 	 * Note: use the blocknumber from nbp, since it is what
    785 	 *       we used to encrypt the blocks.
    786 	 */
    787 
    788 	if (nbp->b_flags & B_READ) {
    789 		bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
    790 
    791 		cx->cx_obp     = obp;
    792 		cx->cx_nbp     = nbp;
    793 		cx->cx_dstv    = obp->b_data;
    794 		cx->cx_srcv    = obp->b_data;
    795 		cx->cx_len     = obp->b_bcount;
    796 		cx->cx_blkno   = bn;
    797 		cx->cx_secsize = dg->dg_secsize;
    798 		cx->cx_dir     = CGD_CIPHER_DECRYPT;
    799 
    800 		cgd_enqueue(sc, cx);
    801 		return;
    802 	}
    803 
    804 	cgd_iodone2(sc, cx);
    805 }
    806 
    807 static void
    808 cgd_iodone2(struct cgd_softc *sc, struct cgd_xfer *cx)
    809 {
    810 	struct cgd_worker *cw = sc->sc_worker;
    811 	struct buf *obp = cx->cx_obp;
    812 	struct buf *nbp = cx->cx_nbp;
    813 	struct dk_softc *dksc = &sc->sc_dksc;
    814 
    815 	pool_put(cw->cw_cpool, cx);
    816 
    817 	/* If we allocated memory, free it now... */
    818 	if (nbp->b_data != obp->b_data)
    819 		cgd_putdata(sc, nbp->b_data, nbp->b_bcount);
    820 
    821 	putiobuf(nbp);
    822 
    823 	/* Request is complete for whatever reason */
    824 	obp->b_resid = 0;
    825 	if (obp->b_error != 0)
    826 		obp->b_resid = obp->b_bcount;
    827 
    828 	dk_done(dksc, obp);
    829 	dk_start(dksc, NULL);
    830 }
    831 
    832 static int
    833 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
    834 {
    835 	struct cgd_softc *sc = device_private(dev);
    836 	struct dk_softc *dksc = &sc->sc_dksc;
    837 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    838 	size_t nbytes, blksize;
    839 	void *buf;
    840 	int error;
    841 
    842 	/*
    843 	 * dk_dump gives us units of disklabel sectors.  Everything
    844 	 * else in cgd uses units of diskgeom sectors.  These had
    845 	 * better agree; otherwise we need to figure out how to convert
    846 	 * between them.
    847 	 */
    848 	KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
    849 	    "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
    850 	    dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
    851 	blksize = dg->dg_secsize;
    852 
    853 	/*
    854 	 * Compute the number of bytes in this request, which dk_dump
    855 	 * has `helpfully' converted to a number of blocks for us.
    856 	 */
    857 	nbytes = nblk*blksize;
    858 
    859 	/* Try to acquire a buffer to store the ciphertext.  */
    860 	buf = cgd_getdata(sc, nbytes);
    861 	if (buf == NULL)
    862 		/* Out of memory: give up.  */
    863 		return ENOMEM;
    864 
    865 	/* Encrypt the caller's data into the temporary buffer.  */
    866 	cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
    867 
    868 	/* Pass it on to the underlying disk device.  */
    869 	error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
    870 
    871 	/* Release the buffer.  */
    872 	cgd_putdata(sc, buf, nbytes);
    873 
    874 	/* Return any error from the underlying disk device.  */
    875 	return error;
    876 }
    877 
    878 /* XXX: we should probably put these into dksubr.c, mostly */
    879 static int
    880 cgdread(dev_t dev, struct uio *uio, int flags)
    881 {
    882 	struct	cgd_softc *sc;
    883 	struct	dk_softc *dksc;
    884 
    885 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
    886 	    (unsigned long long)dev, uio, flags));
    887 	sc = getcgd_softc(dev);
    888 	if (sc == NULL)
    889 		return ENXIO;
    890 	dksc = &sc->sc_dksc;
    891 	if (!DK_ATTACHED(dksc))
    892 		return ENXIO;
    893 	return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
    894 }
    895 
    896 /* XXX: we should probably put these into dksubr.c, mostly */
    897 static int
    898 cgdwrite(dev_t dev, struct uio *uio, int flags)
    899 {
    900 	struct	cgd_softc *sc;
    901 	struct	dk_softc *dksc;
    902 
    903 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
    904 	sc = getcgd_softc(dev);
    905 	if (sc == NULL)
    906 		return ENXIO;
    907 	dksc = &sc->sc_dksc;
    908 	if (!DK_ATTACHED(dksc))
    909 		return ENXIO;
    910 	return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
    911 }
    912 
    913 static int
    914 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
    915 {
    916 	struct	cgd_softc *sc;
    917 	struct	dk_softc *dksc;
    918 	int	part = DISKPART(dev);
    919 	int	pmask = 1 << part;
    920 	int	error;
    921 
    922 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
    923 	    dev, cmd, data, flag, l));
    924 
    925 	switch (cmd) {
    926 	case CGDIOCGET:
    927 		return cgd_ioctl_get(dev, data, l);
    928 	case CGDIOCSET:
    929 	case CGDIOCCLR:
    930 		if ((flag & FWRITE) == 0)
    931 			return EBADF;
    932 		/* FALLTHROUGH */
    933 	default:
    934 		sc = getcgd_softc(dev);
    935 		if (sc == NULL)
    936 			return ENXIO;
    937 		dksc = &sc->sc_dksc;
    938 		break;
    939 	}
    940 
    941 	switch (cmd) {
    942 	case CGDIOCSET:
    943 		cgd_busy(sc);
    944 		if (DK_ATTACHED(dksc))
    945 			error = EBUSY;
    946 		else
    947 			error = cgd_ioctl_set(sc, data, l);
    948 		cgd_unbusy(sc);
    949 		break;
    950 	case CGDIOCCLR:
    951 		cgd_busy(sc);
    952 		if (DK_BUSY(&sc->sc_dksc, pmask))
    953 			error = EBUSY;
    954 		else
    955 			error = cgd_ioctl_clr(sc, l);
    956 		cgd_unbusy(sc);
    957 		break;
    958 	case DIOCGCACHE:
    959 	case DIOCCACHESYNC:
    960 		cgd_busy(sc);
    961 		if (!DK_ATTACHED(dksc)) {
    962 			cgd_unbusy(sc);
    963 			error = ENOENT;
    964 			break;
    965 		}
    966 		/*
    967 		 * We pass this call down to the underlying disk.
    968 		 */
    969 		error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
    970 		cgd_unbusy(sc);
    971 		break;
    972 	case DIOCGSECTORALIGN: {
    973 		struct disk_sectoralign *dsa = data;
    974 
    975 		cgd_busy(sc);
    976 		if (!DK_ATTACHED(dksc)) {
    977 			cgd_unbusy(sc);
    978 			error = ENOENT;
    979 			break;
    980 		}
    981 
    982 		/* Get the underlying disk's sector alignment.  */
    983 		error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
    984 		if (error) {
    985 			cgd_unbusy(sc);
    986 			break;
    987 		}
    988 
    989 		/* Adjust for the disklabel partition if necessary.  */
    990 		if (part != RAW_PART) {
    991 			struct disklabel *lp = dksc->sc_dkdev.dk_label;
    992 			daddr_t offset = lp->d_partitions[part].p_offset;
    993 			uint32_t r = offset % dsa->dsa_alignment;
    994 
    995 			if (r < dsa->dsa_firstaligned)
    996 				dsa->dsa_firstaligned = dsa->dsa_firstaligned
    997 				    - r;
    998 			else
    999 				dsa->dsa_firstaligned = (dsa->dsa_firstaligned
   1000 				    + dsa->dsa_alignment) - r;
   1001 		}
   1002 		cgd_unbusy(sc);
   1003 		break;
   1004 	}
   1005 	case DIOCGSTRATEGY:
   1006 	case DIOCSSTRATEGY:
   1007 		if (!DK_ATTACHED(dksc)) {
   1008 			error = ENOENT;
   1009 			break;
   1010 		}
   1011 		/*FALLTHROUGH*/
   1012 	default:
   1013 		error = dk_ioctl(dksc, dev, cmd, data, flag, l);
   1014 		break;
   1015 	case CGDIOCGET:
   1016 		KASSERT(0);
   1017 		error = EINVAL;
   1018 	}
   1019 
   1020 	return error;
   1021 }
   1022 
   1023 static int
   1024 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
   1025 {
   1026 	struct	cgd_softc *sc;
   1027 
   1028 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
   1029 	    dev, blkno, va, (unsigned long)size));
   1030 	sc = getcgd_softc(dev);
   1031 	if (sc == NULL)
   1032 		return ENXIO;
   1033 	return dk_dump(&sc->sc_dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
   1034 }
   1035 
   1036 /*
   1037  * XXXrcd:
   1038  *  for now we hardcode the maximum key length.
   1039  */
   1040 #define MAX_KEYSIZE	1024
   1041 
   1042 static const struct {
   1043 	const char *n;
   1044 	int v;
   1045 	int d;
   1046 } encblkno[] = {
   1047 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
   1048 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
   1049 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
   1050 };
   1051 
   1052 /* ARGSUSED */
   1053 static int
   1054 cgd_ioctl_set(struct cgd_softc *sc, void *data, struct lwp *l)
   1055 {
   1056 	struct	 cgd_ioctl *ci = data;
   1057 	struct	 vnode *vp;
   1058 	int	 ret;
   1059 	size_t	 i;
   1060 	size_t	 keybytes;			/* key length in bytes */
   1061 	const char *cp;
   1062 	struct pathbuf *pb;
   1063 	char	 *inbuf;
   1064 	struct dk_softc *dksc = &sc->sc_dksc;
   1065 
   1066 	cp = ci->ci_disk;
   1067 
   1068 	ret = pathbuf_copyin(ci->ci_disk, &pb);
   1069 	if (ret != 0) {
   1070 		return ret;
   1071 	}
   1072 	ret = dk_lookup(pb, l, &vp);
   1073 	pathbuf_destroy(pb);
   1074 	if (ret != 0) {
   1075 		return ret;
   1076 	}
   1077 
   1078 	inbuf = kmem_alloc(MAX_KEYSIZE, KM_SLEEP);
   1079 
   1080 	if ((ret = cgdinit(sc, cp, vp, l)) != 0)
   1081 		goto bail;
   1082 
   1083 	(void)memset(inbuf, 0, MAX_KEYSIZE);
   1084 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
   1085 	if (ret)
   1086 		goto bail;
   1087 	sc->sc_cfuncs = cryptfuncs_find(inbuf);
   1088 	if (!sc->sc_cfuncs) {
   1089 		ret = EINVAL;
   1090 		goto bail;
   1091 	}
   1092 
   1093 	(void)memset(inbuf, 0, MAX_KEYSIZE);
   1094 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
   1095 	if (ret)
   1096 		goto bail;
   1097 
   1098 	for (i = 0; i < __arraycount(encblkno); i++)
   1099 		if (strcmp(encblkno[i].n, inbuf) == 0)
   1100 			break;
   1101 
   1102 	if (i == __arraycount(encblkno)) {
   1103 		ret = EINVAL;
   1104 		goto bail;
   1105 	}
   1106 
   1107 	keybytes = ci->ci_keylen / 8 + 1;
   1108 	if (keybytes > MAX_KEYSIZE) {
   1109 		ret = EINVAL;
   1110 		goto bail;
   1111 	}
   1112 
   1113 	(void)memset(inbuf, 0, MAX_KEYSIZE);
   1114 	ret = copyin(ci->ci_key, inbuf, keybytes);
   1115 	if (ret)
   1116 		goto bail;
   1117 
   1118 	sc->sc_cdata.cf_blocksize = ci->ci_blocksize;
   1119 	sc->sc_cdata.cf_mode = encblkno[i].v;
   1120 	sc->sc_cdata.cf_keylen = ci->ci_keylen;
   1121 	sc->sc_cdata.cf_priv = sc->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
   1122 	    &sc->sc_cdata.cf_blocksize);
   1123 	if (sc->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
   1124 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
   1125 		sc->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
   1126 	    sc->sc_cdata.cf_priv = NULL;
   1127 	}
   1128 
   1129 	/*
   1130 	 * The blocksize is supposed to be in bytes. Unfortunately originally
   1131 	 * it was expressed in bits. For compatibility we maintain encblkno
   1132 	 * and encblkno8.
   1133 	 */
   1134 	sc->sc_cdata.cf_blocksize /= encblkno[i].d;
   1135 	(void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
   1136 	if (!sc->sc_cdata.cf_priv) {
   1137 		ret = EINVAL;		/* XXX is this the right error? */
   1138 		goto bail;
   1139 	}
   1140 	kmem_free(inbuf, MAX_KEYSIZE);
   1141 
   1142 	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
   1143 
   1144 	sc->sc_data = kmem_alloc(MAXPHYS, KM_SLEEP);
   1145 	sc->sc_data_used = false;
   1146 
   1147 	/* Attach the disk. */
   1148 	dk_attach(dksc);
   1149 	disk_attach(&dksc->sc_dkdev);
   1150 
   1151 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
   1152 
   1153 	/* Discover wedges on this disk. */
   1154 	dkwedge_discover(&dksc->sc_dkdev);
   1155 
   1156 	return 0;
   1157 
   1158 bail:
   1159 	kmem_free(inbuf, MAX_KEYSIZE);
   1160 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
   1161 	return ret;
   1162 }
   1163 
   1164 /* ARGSUSED */
   1165 static int
   1166 cgd_ioctl_clr(struct cgd_softc *sc, struct lwp *l)
   1167 {
   1168 	struct	dk_softc *dksc = &sc->sc_dksc;
   1169 
   1170 	if (!DK_ATTACHED(dksc))
   1171 		return ENXIO;
   1172 
   1173 	/* Delete all of our wedges. */
   1174 	dkwedge_delall(&dksc->sc_dkdev);
   1175 
   1176 	/* Kill off any queued buffers. */
   1177 	dk_drain(dksc);
   1178 	bufq_free(dksc->sc_bufq);
   1179 
   1180 	(void)vn_close(sc->sc_tvn, FREAD|FWRITE, l->l_cred);
   1181 	sc->sc_cfuncs->cf_destroy(sc->sc_cdata.cf_priv);
   1182 	kmem_free(sc->sc_tpath, sc->sc_tpathlen);
   1183 	kmem_free(sc->sc_data, MAXPHYS);
   1184 	sc->sc_data_used = false;
   1185 	dk_detach(dksc);
   1186 	disk_detach(&dksc->sc_dkdev);
   1187 
   1188 	return 0;
   1189 }
   1190 
   1191 static int
   1192 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
   1193 {
   1194 	struct cgd_softc *sc;
   1195 	struct cgd_user *cgu;
   1196 	int unit, error;
   1197 
   1198 	unit = CGDUNIT(dev);
   1199 	cgu = (struct cgd_user *)data;
   1200 
   1201 	DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
   1202 			   dev, unit, data, l));
   1203 
   1204 	/* XXX, we always return this units data, so if cgu_unit is
   1205 	 * not -1, that field doesn't match the rest
   1206 	 */
   1207 	if (cgu->cgu_unit == -1)
   1208 		cgu->cgu_unit = unit;
   1209 
   1210 	if (cgu->cgu_unit < 0)
   1211 		return EINVAL;	/* XXX: should this be ENXIO? */
   1212 
   1213 	error = cgd_lock(false);
   1214 	if (error)
   1215 		return error;
   1216 
   1217 	sc = device_lookup_private(&cgd_cd, unit);
   1218 	if (sc == NULL || !DK_ATTACHED(&sc->sc_dksc)) {
   1219 		cgu->cgu_dev = 0;
   1220 		cgu->cgu_alg[0] = '\0';
   1221 		cgu->cgu_blocksize = 0;
   1222 		cgu->cgu_mode = 0;
   1223 		cgu->cgu_keylen = 0;
   1224 	}
   1225 	else {
   1226 		mutex_enter(&sc->sc_lock);
   1227 		cgu->cgu_dev = sc->sc_tdev;
   1228 		strncpy(cgu->cgu_alg, sc->sc_cfuncs->cf_name,
   1229 		    sizeof(cgu->cgu_alg));
   1230 		cgu->cgu_blocksize = sc->sc_cdata.cf_blocksize;
   1231 		cgu->cgu_mode = sc->sc_cdata.cf_mode;
   1232 		cgu->cgu_keylen = sc->sc_cdata.cf_keylen;
   1233 		mutex_exit(&sc->sc_lock);
   1234 	}
   1235 
   1236 	cgd_unlock();
   1237 	return 0;
   1238 }
   1239 
   1240 static int
   1241 cgdinit(struct cgd_softc *sc, const char *cpath, struct vnode *vp,
   1242 	struct lwp *l)
   1243 {
   1244 	struct	disk_geom *dg;
   1245 	int	ret;
   1246 	char	*tmppath;
   1247 	uint64_t psize;
   1248 	unsigned secsize;
   1249 	struct dk_softc *dksc = &sc->sc_dksc;
   1250 
   1251 	sc->sc_tvn = vp;
   1252 	sc->sc_tpath = NULL;
   1253 
   1254 	tmppath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
   1255 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &sc->sc_tpathlen);
   1256 	if (ret)
   1257 		goto bail;
   1258 	sc->sc_tpath = kmem_alloc(sc->sc_tpathlen, KM_SLEEP);
   1259 	memcpy(sc->sc_tpath, tmppath, sc->sc_tpathlen);
   1260 
   1261 	sc->sc_tdev = vp->v_rdev;
   1262 
   1263 	if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
   1264 		goto bail;
   1265 
   1266 	if (psize == 0) {
   1267 		ret = ENODEV;
   1268 		goto bail;
   1269 	}
   1270 
   1271 	/*
   1272 	 * XXX here we should probe the underlying device.  If we
   1273 	 *     are accessing a partition of type RAW_PART, then
   1274 	 *     we should populate our initial geometry with the
   1275 	 *     geometry that we discover from the device.
   1276 	 */
   1277 	dg = &dksc->sc_dkdev.dk_geom;
   1278 	memset(dg, 0, sizeof(*dg));
   1279 	dg->dg_secperunit = psize;
   1280 	dg->dg_secsize = secsize;
   1281 	dg->dg_ntracks = 1;
   1282 	dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
   1283 	dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
   1284 
   1285 bail:
   1286 	kmem_free(tmppath, MAXPATHLEN);
   1287 	if (ret && sc->sc_tpath)
   1288 		kmem_free(sc->sc_tpath, sc->sc_tpathlen);
   1289 	return ret;
   1290 }
   1291 
   1292 /*
   1293  * Our generic cipher entry point.  This takes care of the
   1294  * IV mode and passes off the work to the specific cipher.
   1295  * We implement here the IV method ``encrypted block
   1296  * number''.
   1297  *
   1298  * XXXrcd: for now we rely on our own crypto framework defined
   1299  *         in dev/cgd_crypto.c.  This will change when we
   1300  *         get a generic kernel crypto framework.
   1301  */
   1302 
   1303 static void
   1304 blkno2blkno_buf(char *sbuf, daddr_t blkno)
   1305 {
   1306 	int	i;
   1307 
   1308 	/* Set up the blkno in blkno_buf, here we do not care much
   1309 	 * about the final layout of the information as long as we
   1310 	 * can guarantee that each sector will have a different IV
   1311 	 * and that the endianness of the machine will not affect
   1312 	 * the representation that we have chosen.
   1313 	 *
   1314 	 * We choose this representation, because it does not rely
   1315 	 * on the size of buf (which is the blocksize of the cipher),
   1316 	 * but allows daddr_t to grow without breaking existing
   1317 	 * disks.
   1318 	 *
   1319 	 * Note that blkno2blkno_buf does not take a size as input,
   1320 	 * and hence must be called on a pre-zeroed buffer of length
   1321 	 * greater than or equal to sizeof(daddr_t).
   1322 	 */
   1323 	for (i=0; i < sizeof(daddr_t); i++) {
   1324 		*sbuf++ = blkno & 0xff;
   1325 		blkno >>= 8;
   1326 	}
   1327 }
   1328 
   1329 static struct cpu_info *
   1330 cgd_cpu(struct cgd_softc *sc)
   1331 {
   1332 	struct cgd_worker *cw = sc->sc_worker;
   1333 	struct cpu_info *ci = NULL;
   1334 	u_int cidx, i;
   1335 
   1336 	if (cw->cw_busy == 0) {
   1337 		cw->cw_last = cpu_index(curcpu());
   1338 		return NULL;
   1339 	}
   1340 
   1341 	for (i=0, cidx = cw->cw_last+1; i<maxcpus; ++i, ++cidx) {
   1342 		if (cidx >= maxcpus)
   1343 			cidx = 0;
   1344 		ci = cpu_lookup(cidx);
   1345 		if (ci) {
   1346 			cw->cw_last = cidx;
   1347 			break;
   1348 		}
   1349 	}
   1350 
   1351 	return ci;
   1352 }
   1353 
   1354 static void
   1355 cgd_enqueue(struct cgd_softc *sc, struct cgd_xfer *cx)
   1356 {
   1357 	struct cgd_worker *cw = sc->sc_worker;
   1358 	struct cpu_info *ci;
   1359 
   1360 	mutex_enter(&cw->cw_lock);
   1361 	ci = cgd_cpu(sc);
   1362 	cw->cw_busy++;
   1363 	mutex_exit(&cw->cw_lock);
   1364 
   1365 	workqueue_enqueue(cw->cw_wq, &cx->cx_work, ci);
   1366 }
   1367 
   1368 static void
   1369 cgd_process(struct work *wk, void *arg)
   1370 {
   1371 	struct cgd_xfer *cx = (struct cgd_xfer *)wk;
   1372 	struct cgd_softc *sc = cx->cx_sc;
   1373 	struct cgd_worker *cw = sc->sc_worker;
   1374 
   1375 	cgd_cipher(sc, cx->cx_dstv, cx->cx_srcv, cx->cx_len,
   1376 	    cx->cx_blkno, cx->cx_secsize, cx->cx_dir);
   1377 
   1378 	if (cx->cx_dir == CGD_CIPHER_ENCRYPT) {
   1379 		cgd_diskstart2(sc, cx);
   1380 	} else {
   1381 		cgd_iodone2(sc, cx);
   1382 	}
   1383 
   1384 	mutex_enter(&cw->cw_lock);
   1385 	if (cw->cw_busy > 0)
   1386 		cw->cw_busy--;
   1387 	mutex_exit(&cw->cw_lock);
   1388 }
   1389 
   1390 static void
   1391 cgd_cipher(struct cgd_softc *sc, void *dstv, void *srcv,
   1392     size_t len, daddr_t blkno, size_t secsize, int dir)
   1393 {
   1394 	char		*dst = dstv;
   1395 	char		*src = srcv;
   1396 	cfunc_cipher_prep	*ciprep = sc->sc_cfuncs->cf_cipher_prep;
   1397 	cfunc_cipher	*cipher = sc->sc_cfuncs->cf_cipher;
   1398 	struct uio	dstuio;
   1399 	struct uio	srcuio;
   1400 	struct iovec	dstiov[2];
   1401 	struct iovec	srciov[2];
   1402 	size_t		blocksize = sc->sc_cdata.cf_blocksize;
   1403 	size_t		todo;
   1404 	char		blkno_buf[CGD_MAXBLOCKSIZE], *iv;
   1405 
   1406 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
   1407 
   1408 	DIAGCONDPANIC(len % blocksize != 0,
   1409 	    ("cgd_cipher: len %% blocksize != 0"));
   1410 
   1411 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
   1412 	DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
   1413 	    ("cgd_cipher: sizeof(daddr_t) > blocksize"));
   1414 
   1415 	DIAGCONDPANIC(blocksize > CGD_MAXBLOCKSIZE,
   1416 	    ("cgd_cipher: blocksize > CGD_MAXBLOCKSIZE"));
   1417 
   1418 	dstuio.uio_iov = dstiov;
   1419 	dstuio.uio_iovcnt = 1;
   1420 
   1421 	srcuio.uio_iov = srciov;
   1422 	srcuio.uio_iovcnt = 1;
   1423 
   1424 	for (; len > 0; len -= todo) {
   1425 		todo = MIN(len, secsize);
   1426 
   1427 		dstiov[0].iov_base = dst;
   1428 		srciov[0].iov_base = src;
   1429 		dstiov[0].iov_len  = todo;
   1430 		srciov[0].iov_len  = todo;
   1431 
   1432 		memset(blkno_buf, 0x0, blocksize);
   1433 		blkno2blkno_buf(blkno_buf, blkno);
   1434 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
   1435 		    blkno_buf, blocksize));
   1436 
   1437 		/*
   1438 		 * Compute an initial IV. All ciphers
   1439 		 * can convert blkno_buf in-place.
   1440 		 */
   1441 		iv = blkno_buf;
   1442 		ciprep(sc->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir);
   1443 		IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize));
   1444 
   1445 		cipher(sc->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir);
   1446 
   1447 		dst += todo;
   1448 		src += todo;
   1449 		blkno++;
   1450 	}
   1451 }
   1452 
   1453 #ifdef DEBUG
   1454 static void
   1455 hexprint(const char *start, void *buf, int len)
   1456 {
   1457 	char	*c = buf;
   1458 
   1459 	DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
   1460 	printf("%s: len=%06d 0x", start, len);
   1461 	while (len--)
   1462 		printf("%02x", (unsigned char) *c++);
   1463 }
   1464 #endif
   1465 
   1466 static void
   1467 selftest(void)
   1468 {
   1469 	struct cgd_softc sc;
   1470 	void *buf;
   1471 
   1472 	printf("running cgd selftest ");
   1473 
   1474 	for (size_t i = 0; i < __arraycount(selftests); i++) {
   1475 		const char *alg = selftests[i].alg;
   1476 		const uint8_t *key = selftests[i].key;
   1477 		int keylen = selftests[i].keylen;
   1478 		int txtlen = selftests[i].txtlen;
   1479 
   1480 		printf("%s-%d ", alg, keylen);
   1481 
   1482 		memset(&sc, 0, sizeof(sc));
   1483 
   1484 		sc.sc_cfuncs = cryptfuncs_find(alg);
   1485 		if (sc.sc_cfuncs == NULL)
   1486 			panic("%s not implemented", alg);
   1487 
   1488 		sc.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
   1489 		sc.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
   1490 		sc.sc_cdata.cf_keylen = keylen;
   1491 
   1492 		sc.sc_cdata.cf_priv = sc.sc_cfuncs->cf_init(keylen,
   1493 		    key, &sc.sc_cdata.cf_blocksize);
   1494 		if (sc.sc_cdata.cf_priv == NULL)
   1495 			panic("cf_priv is NULL");
   1496 		if (sc.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
   1497 			panic("bad block size %zu", sc.sc_cdata.cf_blocksize);
   1498 
   1499 		sc.sc_cdata.cf_blocksize /= 8;
   1500 
   1501 		buf = kmem_alloc(txtlen, KM_SLEEP);
   1502 		memcpy(buf, selftests[i].ptxt, txtlen);
   1503 
   1504 		cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
   1505 				selftests[i].secsize, CGD_CIPHER_ENCRYPT);
   1506 		if (memcmp(buf, selftests[i].ctxt, txtlen) != 0)
   1507 			panic("encryption is broken");
   1508 
   1509 		cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
   1510 				selftests[i].secsize, CGD_CIPHER_DECRYPT);
   1511 		if (memcmp(buf, selftests[i].ptxt, txtlen) != 0)
   1512 			panic("decryption is broken");
   1513 
   1514 		kmem_free(buf, txtlen);
   1515 		sc.sc_cfuncs->cf_destroy(sc.sc_cdata.cf_priv);
   1516 	}
   1517 
   1518 	printf("done\n");
   1519 }
   1520 
   1521 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
   1522 
   1523 #ifdef _MODULE
   1524 CFDRIVER_DECL(cgd, DV_DISK, NULL);
   1525 
   1526 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
   1527 #endif
   1528 
   1529 static int
   1530 cgd_modcmd(modcmd_t cmd, void *arg)
   1531 {
   1532 	int error = 0;
   1533 
   1534 	switch (cmd) {
   1535 	case MODULE_CMD_INIT:
   1536 		selftest();
   1537 #ifdef _MODULE
   1538 		mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
   1539 		cv_init(&cgd_spawning_cv, "cgspwn");
   1540 
   1541 		error = config_cfdriver_attach(&cgd_cd);
   1542 		if (error)
   1543 			break;
   1544 
   1545 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
   1546 	        if (error) {
   1547 			config_cfdriver_detach(&cgd_cd);
   1548 			aprint_error("%s: unable to register cfattach for"
   1549 			    "%s, error %d\n", __func__, cgd_cd.cd_name, error);
   1550 			break;
   1551 		}
   1552 		/*
   1553 		 * Attach the {b,c}devsw's
   1554 		 */
   1555 		error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1556 		    &cgd_cdevsw, &cgd_cmajor);
   1557 
   1558 		/*
   1559 		 * If devsw_attach fails, remove from autoconf database
   1560 		 */
   1561 		if (error) {
   1562 			config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
   1563 			config_cfdriver_detach(&cgd_cd);
   1564 			aprint_error("%s: unable to attach %s devsw, "
   1565 			    "error %d", __func__, cgd_cd.cd_name, error);
   1566 			break;
   1567 		}
   1568 #endif
   1569 		break;
   1570 
   1571 	case MODULE_CMD_FINI:
   1572 #ifdef _MODULE
   1573 		/*
   1574 		 * Remove {b,c}devsw's
   1575 		 */
   1576 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
   1577 
   1578 		/*
   1579 		 * Now remove device from autoconf database
   1580 		 */
   1581 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
   1582 		if (error) {
   1583 			(void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1584 			    &cgd_cdevsw, &cgd_cmajor);
   1585 			aprint_error("%s: failed to detach %s cfattach, "
   1586 			    "error %d\n", __func__, cgd_cd.cd_name, error);
   1587  			break;
   1588 		}
   1589 		error = config_cfdriver_detach(&cgd_cd);
   1590 		if (error) {
   1591 			(void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
   1592 			(void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1593 			    &cgd_cdevsw, &cgd_cmajor);
   1594 			aprint_error("%s: failed to detach %s cfdriver, "
   1595 			    "error %d\n", __func__, cgd_cd.cd_name, error);
   1596 			break;
   1597 		}
   1598 
   1599 		cv_destroy(&cgd_spawning_cv);
   1600 		mutex_destroy(&cgd_spawning_mtx);
   1601 #endif
   1602 		break;
   1603 
   1604 	case MODULE_CMD_STAT:
   1605 		error = ENOTTY;
   1606 		break;
   1607 	default:
   1608 		error = ENOTTY;
   1609 		break;
   1610 	}
   1611 
   1612 	return error;
   1613 }
   1614