Home | History | Annotate | Line # | Download | only in dev
cgd.c revision 1.130
      1 /* $NetBSD: cgd.c,v 1.130 2020/06/13 18:40:44 riastradh Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Roland C. Dowdeswell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.130 2020/06/13 18:40:44 riastradh Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/param.h>
     37 #include <sys/buf.h>
     38 #include <sys/bufq.h>
     39 #include <sys/conf.h>
     40 #include <sys/cpu.h>
     41 #include <sys/device.h>
     42 #include <sys/disk.h>
     43 #include <sys/disklabel.h>
     44 #include <sys/errno.h>
     45 #include <sys/fcntl.h>
     46 #include <sys/ioctl.h>
     47 #include <sys/kmem.h>
     48 #include <sys/module.h>
     49 #include <sys/namei.h> /* for pathbuf */
     50 #include <sys/pool.h>
     51 #include <sys/proc.h>
     52 #include <sys/syslog.h>
     53 #include <sys/systm.h>
     54 #include <sys/vnode.h>
     55 #include <sys/workqueue.h>
     56 
     57 #include <dev/cgd_crypto.h>
     58 #include <dev/cgdvar.h>
     59 #include <dev/dkvar.h>
     60 
     61 #include <miscfs/specfs/specdev.h> /* for v_rdev */
     62 
     63 #include "ioconf.h"
     64 
     65 struct selftest_params {
     66 	const char *alg;
     67 	int blocksize;	/* number of bytes */
     68 	int secsize;
     69 	daddr_t blkno;
     70 	int keylen;	/* number of bits */
     71 	int txtlen;	/* number of bytes */
     72 	const uint8_t *key;
     73 	const uint8_t *ptxt;
     74 	const uint8_t *ctxt;
     75 };
     76 
     77 /* Entry Point Functions */
     78 
     79 static dev_type_open(cgdopen);
     80 static dev_type_close(cgdclose);
     81 static dev_type_read(cgdread);
     82 static dev_type_write(cgdwrite);
     83 static dev_type_ioctl(cgdioctl);
     84 static dev_type_strategy(cgdstrategy);
     85 static dev_type_dump(cgddump);
     86 static dev_type_size(cgdsize);
     87 
     88 const struct bdevsw cgd_bdevsw = {
     89 	.d_open = cgdopen,
     90 	.d_close = cgdclose,
     91 	.d_strategy = cgdstrategy,
     92 	.d_ioctl = cgdioctl,
     93 	.d_dump = cgddump,
     94 	.d_psize = cgdsize,
     95 	.d_discard = nodiscard,
     96 	.d_flag = D_DISK | D_MPSAFE
     97 };
     98 
     99 const struct cdevsw cgd_cdevsw = {
    100 	.d_open = cgdopen,
    101 	.d_close = cgdclose,
    102 	.d_read = cgdread,
    103 	.d_write = cgdwrite,
    104 	.d_ioctl = cgdioctl,
    105 	.d_stop = nostop,
    106 	.d_tty = notty,
    107 	.d_poll = nopoll,
    108 	.d_mmap = nommap,
    109 	.d_kqfilter = nokqfilter,
    110 	.d_discard = nodiscard,
    111 	.d_flag = D_DISK | D_MPSAFE
    112 };
    113 
    114 /*
    115  * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
    116  */
    117 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
    118 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
    119 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
    120 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
    121 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
    122 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
    123 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
    124 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
    125 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
    126 };
    127 
    128 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
    129 	0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
    130 	0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
    131 	0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
    132 	0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
    133 	0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
    134 	0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
    135 	0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
    136 	0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
    137 };
    138 
    139 static const uint8_t selftest_aes_xts_256_key[33] = {
    140 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
    141 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
    142 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
    143 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
    144 	0
    145 };
    146 
    147 /*
    148  * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
    149  */
    150 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
    151 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
    152 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
    153 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
    154 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
    155 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
    156 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
    157 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
    158 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
    159 };
    160 
    161 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
    162 	0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
    163 	0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
    164 	0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
    165 	0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
    166 	0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
    167 	0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
    168 	0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
    169 	0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
    170 };
    171 
    172 static const uint8_t selftest_aes_xts_512_key[65] = {
    173 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
    174 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
    175 	0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
    176 	0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
    177 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
    178 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
    179 	0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
    180 	0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
    181 	0
    182 };
    183 
    184 static const uint8_t selftest_aes_cbc_key[32] = {
    185 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
    186 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
    187 	0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
    188 	0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
    189 };
    190 
    191 static const uint8_t selftest_aes_cbc_128_ptxt[64] = {
    192 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
    193 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
    194 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
    195 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
    196 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
    197 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
    198 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
    199 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
    200 };
    201 
    202 static const uint8_t selftest_aes_cbc_128_ctxt[64] = { /* blkno=1 */
    203 	0x93, 0x94, 0x56, 0x36, 0x83, 0xbc, 0xff, 0xa4,
    204 	0xe0, 0x24, 0x34, 0x12, 0xbe, 0xfa, 0xb0, 0x7d,
    205 	0x88, 0x1e, 0xc5, 0x57, 0x55, 0x23, 0x05, 0x0c,
    206 	0x69, 0xa5, 0xc1, 0xda, 0x64, 0xee, 0x74, 0x10,
    207 	0xc2, 0xc5, 0xe6, 0x66, 0xd6, 0xa7, 0x49, 0x1c,
    208 	0x9d, 0x40, 0xb5, 0x0c, 0x9b, 0x6e, 0x1c, 0xe6,
    209 	0xb1, 0x7a, 0x1c, 0xe7, 0x5a, 0xfe, 0xf9, 0x2a,
    210 	0x78, 0xfa, 0xb7, 0x7b, 0x08, 0xdf, 0x8e, 0x51,
    211 };
    212 
    213 static const uint8_t selftest_aes_cbc_256_ptxt[64] = {
    214 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
    215 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
    216 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
    217 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
    218 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
    219 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
    220 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
    221 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
    222 };
    223 
    224 static const uint8_t selftest_aes_cbc_256_ctxt[64] = { /* blkno=0xffff */
    225 	0x6c, 0xa3, 0x15, 0x17, 0x51, 0x90, 0xe9, 0x69,
    226 	0x08, 0x36, 0x7b, 0xa6, 0xbb, 0xd1, 0x0b, 0x9e,
    227 	0xcd, 0x6b, 0x1e, 0xaf, 0xb6, 0x2e, 0x62, 0x7d,
    228 	0x8e, 0xde, 0xf0, 0xed, 0x0d, 0x44, 0xe7, 0x31,
    229 	0x26, 0xcf, 0xd5, 0x0b, 0x3e, 0x95, 0x59, 0x89,
    230 	0xdf, 0x5d, 0xd6, 0x9a, 0x00, 0x66, 0xcc, 0x7f,
    231 	0x45, 0xd3, 0x06, 0x58, 0xed, 0xef, 0x49, 0x47,
    232 	0x87, 0x89, 0x17, 0x7d, 0x08, 0x56, 0x50, 0xe1,
    233 };
    234 
    235 static const uint8_t selftest_3des_cbc_key[24] = {
    236 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
    237 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
    238 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
    239 };
    240 
    241 static const uint8_t selftest_3des_cbc_ptxt[64] = {
    242 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
    243 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
    244 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
    245 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
    246 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
    247 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
    248 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
    249 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
    250 };
    251 
    252 static const uint8_t selftest_3des_cbc_ctxt[64] = {
    253 	0xa2, 0xfe, 0x81, 0xaa, 0x10, 0x6c, 0xea, 0xb9,
    254 	0x11, 0x58, 0x1f, 0x29, 0xb5, 0x86, 0x71, 0x56,
    255 	0xe9, 0x25, 0x1d, 0x07, 0xb1, 0x69, 0x59, 0x6c,
    256 	0x96, 0x80, 0xf7, 0x54, 0x38, 0xaa, 0xa7, 0xe4,
    257 	0xe8, 0x81, 0xf5, 0x00, 0xbb, 0x1c, 0x00, 0x3c,
    258 	0xba, 0x38, 0x45, 0x97, 0x4c, 0xcf, 0x84, 0x14,
    259 	0x46, 0x86, 0xd9, 0xf4, 0xc5, 0xe2, 0xf0, 0x54,
    260 	0xde, 0x41, 0xf6, 0xa1, 0xef, 0x1b, 0x0a, 0xea,
    261 };
    262 
    263 static const uint8_t selftest_bf_cbc_key[56] = {
    264 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
    265 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
    266 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
    267 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
    268 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
    269 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
    270 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
    271 };
    272 
    273 static const uint8_t selftest_bf_cbc_ptxt[64] = {
    274 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
    275 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
    276 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
    277 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
    278 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
    279 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
    280 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
    281 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
    282 };
    283 
    284 static const uint8_t selftest_bf_cbc_ctxt[64] = {
    285 	0xec, 0xa2, 0xc0, 0x0e, 0xa9, 0x7f, 0x04, 0x1e,
    286 	0x2e, 0x4f, 0x64, 0x07, 0x67, 0x3e, 0xf4, 0x58,
    287 	0x61, 0x5f, 0xd3, 0x50, 0x5e, 0xd3, 0x4d, 0x34,
    288 	0xa0, 0x53, 0xbe, 0x47, 0x75, 0x69, 0x3b, 0x1f,
    289 	0x86, 0xf2, 0xae, 0x8b, 0xb7, 0x91, 0xda, 0xd4,
    290 	0x2b, 0xa5, 0x47, 0x9b, 0x7d, 0x13, 0x30, 0xdd,
    291 	0x7b, 0xad, 0x86, 0x57, 0x51, 0x11, 0x74, 0x42,
    292 	0xb8, 0xbf, 0x69, 0x17, 0x20, 0x0a, 0xf7, 0xda,
    293 };
    294 
    295 const struct selftest_params selftests[] = {
    296 	{
    297 		.alg = "aes-xts",
    298 		.blocksize = 16,
    299 		.secsize = 512,
    300 		.blkno = 1,
    301 		.keylen = 256,
    302 		.txtlen = sizeof(selftest_aes_xts_256_ptxt),
    303 		.key  = selftest_aes_xts_256_key,
    304 		.ptxt = selftest_aes_xts_256_ptxt,
    305 		.ctxt = selftest_aes_xts_256_ctxt
    306 	},
    307 	{
    308 		.alg = "aes-xts",
    309 		.blocksize = 16,
    310 		.secsize = 512,
    311 		.blkno = 0xffff,
    312 		.keylen = 512,
    313 		.txtlen = sizeof(selftest_aes_xts_512_ptxt),
    314 		.key  = selftest_aes_xts_512_key,
    315 		.ptxt = selftest_aes_xts_512_ptxt,
    316 		.ctxt = selftest_aes_xts_512_ctxt
    317 	},
    318 	{
    319 		.alg = "aes-cbc",
    320 		.blocksize = 16,
    321 		.secsize = 512,
    322 		.blkno = 1,
    323 		.keylen = 128,
    324 		.txtlen = sizeof(selftest_aes_cbc_128_ptxt),
    325 		.key  = selftest_aes_cbc_key,
    326 		.ptxt = selftest_aes_cbc_128_ptxt,
    327 		.ctxt = selftest_aes_cbc_128_ctxt,
    328 	},
    329 	{
    330 		.alg = "aes-cbc",
    331 		.blocksize = 16,
    332 		.secsize = 512,
    333 		.blkno = 0xffff,
    334 		.keylen = 256,
    335 		.txtlen = sizeof(selftest_aes_cbc_256_ptxt),
    336 		.key  = selftest_aes_cbc_key,
    337 		.ptxt = selftest_aes_cbc_256_ptxt,
    338 		.ctxt = selftest_aes_cbc_256_ctxt,
    339 	},
    340 	{
    341 		.alg = "3des-cbc",
    342 		.blocksize = 8,
    343 		.secsize = 512,
    344 		.blkno = 1,
    345 		.keylen = 192,	/* 168 + 3*8 parity bits */
    346 		.txtlen = sizeof(selftest_3des_cbc_ptxt),
    347 		.key  = selftest_3des_cbc_key,
    348 		.ptxt = selftest_3des_cbc_ptxt,
    349 		.ctxt = selftest_3des_cbc_ctxt,
    350 	},
    351 	{
    352 		.alg = "blowfish-cbc",
    353 		.blocksize = 8,
    354 		.secsize = 512,
    355 		.blkno = 1,
    356 		.keylen = 448,
    357 		.txtlen = sizeof(selftest_bf_cbc_ptxt),
    358 		.key  = selftest_bf_cbc_key,
    359 		.ptxt = selftest_bf_cbc_ptxt,
    360 		.ctxt = selftest_bf_cbc_ctxt,
    361 	},
    362 };
    363 
    364 static int cgd_match(device_t, cfdata_t, void *);
    365 static void cgd_attach(device_t, device_t, void *);
    366 static int cgd_detach(device_t, int);
    367 static struct cgd_softc	*cgd_spawn(int);
    368 static struct cgd_worker *cgd_create_one_worker(void);
    369 static void cgd_destroy_one_worker(struct cgd_worker *);
    370 static struct cgd_worker *cgd_create_worker(void);
    371 static void cgd_destroy_worker(struct cgd_worker *);
    372 static int cgd_destroy(device_t);
    373 
    374 /* Internal Functions */
    375 
    376 static int	cgd_diskstart(device_t, struct buf *);
    377 static void	cgd_diskstart2(struct cgd_softc *, struct cgd_xfer *);
    378 static void	cgdiodone(struct buf *);
    379 static void	cgd_iodone2(struct cgd_softc *, struct cgd_xfer *);
    380 static void	cgd_enqueue(struct cgd_softc *, struct cgd_xfer *);
    381 static void	cgd_process(struct work *, void *);
    382 static int	cgd_dumpblocks(device_t, void *, daddr_t, int);
    383 
    384 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
    385 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
    386 static int	cgd_ioctl_get(dev_t, void *, struct lwp *);
    387 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
    388 			struct lwp *);
    389 static void	cgd_cipher(struct cgd_softc *, void *, void *,
    390 			   size_t, daddr_t, size_t, int);
    391 
    392 static const struct dkdriver cgddkdriver = {
    393         .d_minphys  = minphys,
    394         .d_open = cgdopen,
    395         .d_close = cgdclose,
    396         .d_strategy = cgdstrategy,
    397         .d_iosize = NULL,
    398         .d_diskstart = cgd_diskstart,
    399         .d_dumpblocks = cgd_dumpblocks,
    400         .d_lastclose = NULL
    401 };
    402 
    403 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
    404     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    405 
    406 /* DIAGNOSTIC and DEBUG definitions */
    407 
    408 #if defined(CGDDEBUG) && !defined(DEBUG)
    409 #define DEBUG
    410 #endif
    411 
    412 #ifdef DEBUG
    413 int cgddebug = 0;
    414 
    415 #define CGDB_FOLLOW	0x1
    416 #define CGDB_IO	0x2
    417 #define CGDB_CRYPTO	0x4
    418 
    419 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
    420 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
    421 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
    422 
    423 static void	hexprint(const char *, void *, int);
    424 
    425 #else
    426 #define IFDEBUG(x,y)
    427 #define DPRINTF(x,y)
    428 #define DPRINTF_FOLLOW(y)
    429 #endif
    430 
    431 /* Global variables */
    432 
    433 static kmutex_t cgd_spawning_mtx;
    434 static kcondvar_t cgd_spawning_cv;
    435 static bool cgd_spawning;
    436 static struct cgd_worker *cgd_worker;
    437 static u_int cgd_refcnt;	/* number of users of cgd_worker */
    438 
    439 /* Utility Functions */
    440 
    441 #define CGDUNIT(x)		DISKUNIT(x)
    442 
    443 /* The code */
    444 
    445 static int
    446 cgd_lock(bool intr)
    447 {
    448 	int error = 0;
    449 
    450 	mutex_enter(&cgd_spawning_mtx);
    451 	while (cgd_spawning) {
    452 		if (intr)
    453 			error = cv_wait_sig(&cgd_spawning_cv, &cgd_spawning_mtx);
    454 		else
    455 			cv_wait(&cgd_spawning_cv, &cgd_spawning_mtx);
    456 	}
    457 	if (error == 0)
    458 		cgd_spawning = true;
    459 	mutex_exit(&cgd_spawning_mtx);
    460 	return error;
    461 }
    462 
    463 static void
    464 cgd_unlock(void)
    465 {
    466 	mutex_enter(&cgd_spawning_mtx);
    467 	cgd_spawning = false;
    468 	cv_broadcast(&cgd_spawning_cv);
    469 	mutex_exit(&cgd_spawning_mtx);
    470 }
    471 
    472 static struct cgd_softc *
    473 getcgd_softc(dev_t dev)
    474 {
    475 	return device_lookup_private(&cgd_cd, CGDUNIT(dev));
    476 }
    477 
    478 static int
    479 cgd_match(device_t self, cfdata_t cfdata, void *aux)
    480 {
    481 
    482 	return 1;
    483 }
    484 
    485 static void
    486 cgd_attach(device_t parent, device_t self, void *aux)
    487 {
    488 	struct cgd_softc *sc = device_private(self);
    489 
    490 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
    491 	cv_init(&sc->sc_cv, "cgdcv");
    492 	dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
    493 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
    494 
    495 	if (!pmf_device_register(self, NULL, NULL))
    496 		aprint_error_dev(self,
    497 		    "unable to register power management hooks\n");
    498 }
    499 
    500 
    501 static int
    502 cgd_detach(device_t self, int flags)
    503 {
    504 	int ret;
    505 	const int pmask = 1 << RAW_PART;
    506 	struct cgd_softc *sc = device_private(self);
    507 	struct dk_softc *dksc = &sc->sc_dksc;
    508 
    509 	if (DK_BUSY(dksc, pmask))
    510 		return EBUSY;
    511 
    512 	if (DK_ATTACHED(dksc) &&
    513 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
    514 		return ret;
    515 
    516 	disk_destroy(&dksc->sc_dkdev);
    517 	cv_destroy(&sc->sc_cv);
    518 	mutex_destroy(&sc->sc_lock);
    519 
    520 	return 0;
    521 }
    522 
    523 void
    524 cgdattach(int num)
    525 {
    526 #ifndef _MODULE
    527 	int error;
    528 
    529 	mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
    530 	cv_init(&cgd_spawning_cv, "cgspwn");
    531 
    532 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
    533 	if (error != 0)
    534 		aprint_error("%s: unable to register cfattach\n",
    535 		    cgd_cd.cd_name);
    536 #endif
    537 }
    538 
    539 static struct cgd_softc *
    540 cgd_spawn(int unit)
    541 {
    542 	cfdata_t cf;
    543 	struct cgd_worker *cw;
    544 	struct cgd_softc *sc;
    545 
    546 	cf = kmem_alloc(sizeof(*cf), KM_SLEEP);
    547 	cf->cf_name = cgd_cd.cd_name;
    548 	cf->cf_atname = cgd_cd.cd_name;
    549 	cf->cf_unit = unit;
    550 	cf->cf_fstate = FSTATE_STAR;
    551 
    552 	cw = cgd_create_one_worker();
    553 	if (cw == NULL) {
    554 		kmem_free(cf, sizeof(*cf));
    555 		return NULL;
    556 	}
    557 
    558 	sc = device_private(config_attach_pseudo(cf));
    559 	if (sc == NULL) {
    560 		cgd_destroy_one_worker(cw);
    561 		return NULL;
    562 	}
    563 
    564 	sc->sc_worker = cw;
    565 
    566 	return sc;
    567 }
    568 
    569 static int
    570 cgd_destroy(device_t dev)
    571 {
    572 	struct cgd_softc *sc = device_private(dev);
    573 	struct cgd_worker *cw = sc->sc_worker;
    574 	cfdata_t cf;
    575 	int error;
    576 
    577 	cf = device_cfdata(dev);
    578 	error = config_detach(dev, DETACH_QUIET);
    579 	if (error)
    580 		return error;
    581 
    582 	cgd_destroy_one_worker(cw);
    583 
    584 	kmem_free(cf, sizeof(*cf));
    585 	return 0;
    586 }
    587 
    588 static void
    589 cgd_busy(struct cgd_softc *sc)
    590 {
    591 
    592 	mutex_enter(&sc->sc_lock);
    593 	while (sc->sc_busy)
    594 		cv_wait(&sc->sc_cv, &sc->sc_lock);
    595 	sc->sc_busy = true;
    596 	mutex_exit(&sc->sc_lock);
    597 }
    598 
    599 static void
    600 cgd_unbusy(struct cgd_softc *sc)
    601 {
    602 
    603 	mutex_enter(&sc->sc_lock);
    604 	sc->sc_busy = false;
    605 	cv_broadcast(&sc->sc_cv);
    606 	mutex_exit(&sc->sc_lock);
    607 }
    608 
    609 static struct cgd_worker *
    610 cgd_create_one_worker(void)
    611 {
    612 	KASSERT(cgd_spawning);
    613 
    614 	if (cgd_refcnt++ == 0) {
    615 		KASSERT(cgd_worker == NULL);
    616 		cgd_worker = cgd_create_worker();
    617 	}
    618 
    619 	KASSERT(cgd_worker != NULL);
    620 	return cgd_worker;
    621 }
    622 
    623 static void
    624 cgd_destroy_one_worker(struct cgd_worker *cw)
    625 {
    626 	KASSERT(cgd_spawning);
    627 	KASSERT(cw == cgd_worker);
    628 
    629 	if (--cgd_refcnt == 0) {
    630 		cgd_destroy_worker(cgd_worker);
    631 		cgd_worker = NULL;
    632 	}
    633 }
    634 
    635 static struct cgd_worker *
    636 cgd_create_worker(void)
    637 {
    638 	struct cgd_worker *cw;
    639 	struct workqueue *wq;
    640 	struct pool *cp;
    641 	int error;
    642 
    643 	cw = kmem_alloc(sizeof(struct cgd_worker), KM_SLEEP);
    644 	cp = kmem_alloc(sizeof(struct pool), KM_SLEEP);
    645 
    646 	error = workqueue_create(&wq, "cgd", cgd_process, NULL,
    647 	                         PRI_BIO, IPL_BIO, WQ_MPSAFE | WQ_PERCPU);
    648 	if (error) {
    649 		kmem_free(cp, sizeof(struct pool));
    650 		kmem_free(cw, sizeof(struct cgd_worker));
    651 		return NULL;
    652 	}
    653 
    654 	cw->cw_cpool = cp;
    655 	cw->cw_wq = wq;
    656 	pool_init(cw->cw_cpool, sizeof(struct cgd_xfer), 0,
    657 	    0, 0, "cgdcpl", NULL, IPL_BIO);
    658 
    659 	mutex_init(&cw->cw_lock, MUTEX_DEFAULT, IPL_BIO);
    660 
    661 	return cw;
    662 }
    663 
    664 static void
    665 cgd_destroy_worker(struct cgd_worker *cw)
    666 {
    667 	mutex_destroy(&cw->cw_lock);
    668 
    669 	if (cw->cw_cpool) {
    670 		pool_destroy(cw->cw_cpool);
    671 		kmem_free(cw->cw_cpool, sizeof(struct pool));
    672 	}
    673 	if (cw->cw_wq)
    674 		workqueue_destroy(cw->cw_wq);
    675 
    676 	kmem_free(cw, sizeof(struct cgd_worker));
    677 }
    678 
    679 static int
    680 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
    681 {
    682 	struct	cgd_softc *sc;
    683 	int error;
    684 
    685 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
    686 
    687 	error = cgd_lock(true);
    688 	if (error)
    689 		return error;
    690 	sc = getcgd_softc(dev);
    691 	if (sc == NULL)
    692 		sc = cgd_spawn(CGDUNIT(dev));
    693 	cgd_unlock();
    694 	if (sc == NULL)
    695 		return ENXIO;
    696 
    697 	return dk_open(&sc->sc_dksc, dev, flags, fmt, l);
    698 }
    699 
    700 static int
    701 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
    702 {
    703 	struct	cgd_softc *sc;
    704 	struct	dk_softc *dksc;
    705 	int error;
    706 
    707 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
    708 
    709 	error = cgd_lock(false);
    710 	if (error)
    711 		return error;
    712 	sc = getcgd_softc(dev);
    713 	if (sc == NULL) {
    714 		error = ENXIO;
    715 		goto done;
    716 	}
    717 
    718 	dksc = &sc->sc_dksc;
    719 	if ((error =  dk_close(dksc, dev, flags, fmt, l)) != 0)
    720 		goto done;
    721 
    722 	if (!DK_ATTACHED(dksc)) {
    723 		if ((error = cgd_destroy(sc->sc_dksc.sc_dev)) != 0) {
    724 			device_printf(dksc->sc_dev,
    725 			    "unable to detach instance\n");
    726 			goto done;
    727 		}
    728 	}
    729 
    730 done:
    731 	cgd_unlock();
    732 
    733 	return error;
    734 }
    735 
    736 static void
    737 cgdstrategy(struct buf *bp)
    738 {
    739 	struct	cgd_softc *sc = getcgd_softc(bp->b_dev);
    740 
    741 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
    742 	    (long)bp->b_bcount));
    743 
    744 	/*
    745 	 * Reject unaligned writes.
    746 	 */
    747 	if (((uintptr_t)bp->b_data & 3) != 0) {
    748 		bp->b_error = EINVAL;
    749 		goto bail;
    750 	}
    751 
    752 	dk_strategy(&sc->sc_dksc, bp);
    753 	return;
    754 
    755 bail:
    756 	bp->b_resid = bp->b_bcount;
    757 	biodone(bp);
    758 	return;
    759 }
    760 
    761 static int
    762 cgdsize(dev_t dev)
    763 {
    764 	struct cgd_softc *sc = getcgd_softc(dev);
    765 
    766 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
    767 	if (!sc)
    768 		return -1;
    769 	return dk_size(&sc->sc_dksc, dev);
    770 }
    771 
    772 /*
    773  * cgd_{get,put}data are functions that deal with getting a buffer
    774  * for the new encrypted data.
    775  * We can no longer have a buffer per device, we need a buffer per
    776  * work queue...
    777  */
    778 
    779 static void *
    780 cgd_getdata(struct cgd_softc *sc, unsigned long size)
    781 {
    782 	void *data = NULL;
    783 
    784 	mutex_enter(&sc->sc_lock);
    785 	if (!sc->sc_data_used) {
    786 		sc->sc_data_used = true;
    787 		data = sc->sc_data;
    788 	}
    789 	mutex_exit(&sc->sc_lock);
    790 
    791 	if (data)
    792 		return data;
    793 
    794 	return kmem_intr_alloc(size, KM_NOSLEEP);
    795 }
    796 
    797 static void
    798 cgd_putdata(struct cgd_softc *sc, void *data, unsigned long size)
    799 {
    800 
    801 	if (data == sc->sc_data) {
    802 		mutex_enter(&sc->sc_lock);
    803 		sc->sc_data_used = false;
    804 		mutex_exit(&sc->sc_lock);
    805 	} else
    806 		kmem_intr_free(data, size);
    807 }
    808 
    809 static int
    810 cgd_diskstart(device_t dev, struct buf *bp)
    811 {
    812 	struct	cgd_softc *sc = device_private(dev);
    813 	struct	cgd_worker *cw = sc->sc_worker;
    814 	struct	dk_softc *dksc = &sc->sc_dksc;
    815 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    816 	struct	cgd_xfer *cx;
    817 	struct	buf *nbp;
    818 	void *	newaddr;
    819 	daddr_t	bn;
    820 
    821 	DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
    822 
    823 	bn = bp->b_rawblkno;
    824 
    825 	/*
    826 	 * We attempt to allocate all of our resources up front, so that
    827 	 * we can fail quickly if they are unavailable.
    828 	 */
    829 	nbp = getiobuf(sc->sc_tvn, false);
    830 	if (nbp == NULL)
    831 		return EAGAIN;
    832 
    833 	cx = pool_get(cw->cw_cpool, PR_NOWAIT);
    834 	if (cx == NULL) {
    835 		putiobuf(nbp);
    836 		return EAGAIN;
    837 	}
    838 
    839 	cx->cx_sc = sc;
    840 	cx->cx_obp = bp;
    841 	cx->cx_nbp = nbp;
    842 	cx->cx_srcv = cx->cx_dstv = bp->b_data;
    843 	cx->cx_blkno = bn;
    844 	cx->cx_secsize = dg->dg_secsize;
    845 
    846 	/*
    847 	 * If we are writing, then we need to encrypt the outgoing
    848 	 * block into a new block of memory.
    849 	 */
    850 	if ((bp->b_flags & B_READ) == 0) {
    851 		newaddr = cgd_getdata(sc, bp->b_bcount);
    852 		if (!newaddr) {
    853 			pool_put(cw->cw_cpool, cx);
    854 			putiobuf(nbp);
    855 			return EAGAIN;
    856 		}
    857 
    858 		cx->cx_dstv = newaddr;
    859 		cx->cx_len = bp->b_bcount;
    860 		cx->cx_dir = CGD_CIPHER_ENCRYPT;
    861 
    862 		cgd_enqueue(sc, cx);
    863 		return 0;
    864 	}
    865 
    866 	cgd_diskstart2(sc, cx);
    867 	return 0;
    868 }
    869 
    870 static void
    871 cgd_diskstart2(struct cgd_softc *sc, struct cgd_xfer *cx)
    872 {
    873 	struct	vnode *vp;
    874 	struct	buf *bp;
    875 	struct	buf *nbp;
    876 
    877 	bp = cx->cx_obp;
    878 	nbp = cx->cx_nbp;
    879 
    880 	nbp->b_data = cx->cx_dstv;
    881 	nbp->b_flags = bp->b_flags;
    882 	nbp->b_oflags = bp->b_oflags;
    883 	nbp->b_cflags = bp->b_cflags;
    884 	nbp->b_iodone = cgdiodone;
    885 	nbp->b_proc = bp->b_proc;
    886 	nbp->b_blkno = btodb(cx->cx_blkno * cx->cx_secsize);
    887 	nbp->b_bcount = bp->b_bcount;
    888 	nbp->b_private = cx;
    889 
    890 	BIO_COPYPRIO(nbp, bp);
    891 
    892 	if ((nbp->b_flags & B_READ) == 0) {
    893 		vp = nbp->b_vp;
    894 		mutex_enter(vp->v_interlock);
    895 		vp->v_numoutput++;
    896 		mutex_exit(vp->v_interlock);
    897 	}
    898 	VOP_STRATEGY(sc->sc_tvn, nbp);
    899 }
    900 
    901 static void
    902 cgdiodone(struct buf *nbp)
    903 {
    904 	struct	cgd_xfer *cx = nbp->b_private;
    905 	struct	buf *obp = cx->cx_obp;
    906 	struct	cgd_softc *sc = getcgd_softc(obp->b_dev);
    907 	struct	dk_softc *dksc = &sc->sc_dksc;
    908 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    909 	daddr_t	bn;
    910 
    911 	KDASSERT(sc);
    912 
    913 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
    914 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
    915 	    obp, obp->b_bcount, obp->b_resid));
    916 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
    917 	    " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
    918 		nbp->b_bcount));
    919 	if (nbp->b_error != 0) {
    920 		obp->b_error = nbp->b_error;
    921 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
    922 		    obp->b_error));
    923 	}
    924 
    925 	/* Perform the decryption if we are reading.
    926 	 *
    927 	 * Note: use the blocknumber from nbp, since it is what
    928 	 *       we used to encrypt the blocks.
    929 	 */
    930 
    931 	if (nbp->b_flags & B_READ) {
    932 		bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
    933 
    934 		cx->cx_obp     = obp;
    935 		cx->cx_nbp     = nbp;
    936 		cx->cx_dstv    = obp->b_data;
    937 		cx->cx_srcv    = obp->b_data;
    938 		cx->cx_len     = obp->b_bcount;
    939 		cx->cx_blkno   = bn;
    940 		cx->cx_secsize = dg->dg_secsize;
    941 		cx->cx_dir     = CGD_CIPHER_DECRYPT;
    942 
    943 		cgd_enqueue(sc, cx);
    944 		return;
    945 	}
    946 
    947 	cgd_iodone2(sc, cx);
    948 }
    949 
    950 static void
    951 cgd_iodone2(struct cgd_softc *sc, struct cgd_xfer *cx)
    952 {
    953 	struct cgd_worker *cw = sc->sc_worker;
    954 	struct buf *obp = cx->cx_obp;
    955 	struct buf *nbp = cx->cx_nbp;
    956 	struct dk_softc *dksc = &sc->sc_dksc;
    957 
    958 	pool_put(cw->cw_cpool, cx);
    959 
    960 	/* If we allocated memory, free it now... */
    961 	if (nbp->b_data != obp->b_data)
    962 		cgd_putdata(sc, nbp->b_data, nbp->b_bcount);
    963 
    964 	putiobuf(nbp);
    965 
    966 	/* Request is complete for whatever reason */
    967 	obp->b_resid = 0;
    968 	if (obp->b_error != 0)
    969 		obp->b_resid = obp->b_bcount;
    970 
    971 	dk_done(dksc, obp);
    972 	dk_start(dksc, NULL);
    973 }
    974 
    975 static int
    976 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
    977 {
    978 	struct cgd_softc *sc = device_private(dev);
    979 	struct dk_softc *dksc = &sc->sc_dksc;
    980 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
    981 	size_t nbytes, blksize;
    982 	void *buf;
    983 	int error;
    984 
    985 	/*
    986 	 * dk_dump gives us units of disklabel sectors.  Everything
    987 	 * else in cgd uses units of diskgeom sectors.  These had
    988 	 * better agree; otherwise we need to figure out how to convert
    989 	 * between them.
    990 	 */
    991 	KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
    992 	    "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
    993 	    dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
    994 	blksize = dg->dg_secsize;
    995 
    996 	/*
    997 	 * Compute the number of bytes in this request, which dk_dump
    998 	 * has `helpfully' converted to a number of blocks for us.
    999 	 */
   1000 	nbytes = nblk*blksize;
   1001 
   1002 	/* Try to acquire a buffer to store the ciphertext.  */
   1003 	buf = cgd_getdata(sc, nbytes);
   1004 	if (buf == NULL)
   1005 		/* Out of memory: give up.  */
   1006 		return ENOMEM;
   1007 
   1008 	/* Encrypt the caller's data into the temporary buffer.  */
   1009 	cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
   1010 
   1011 	/* Pass it on to the underlying disk device.  */
   1012 	error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
   1013 
   1014 	/* Release the buffer.  */
   1015 	cgd_putdata(sc, buf, nbytes);
   1016 
   1017 	/* Return any error from the underlying disk device.  */
   1018 	return error;
   1019 }
   1020 
   1021 /* XXX: we should probably put these into dksubr.c, mostly */
   1022 static int
   1023 cgdread(dev_t dev, struct uio *uio, int flags)
   1024 {
   1025 	struct	cgd_softc *sc;
   1026 	struct	dk_softc *dksc;
   1027 
   1028 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
   1029 	    (unsigned long long)dev, uio, flags));
   1030 	sc = getcgd_softc(dev);
   1031 	if (sc == NULL)
   1032 		return ENXIO;
   1033 	dksc = &sc->sc_dksc;
   1034 	if (!DK_ATTACHED(dksc))
   1035 		return ENXIO;
   1036 	return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
   1037 }
   1038 
   1039 /* XXX: we should probably put these into dksubr.c, mostly */
   1040 static int
   1041 cgdwrite(dev_t dev, struct uio *uio, int flags)
   1042 {
   1043 	struct	cgd_softc *sc;
   1044 	struct	dk_softc *dksc;
   1045 
   1046 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
   1047 	sc = getcgd_softc(dev);
   1048 	if (sc == NULL)
   1049 		return ENXIO;
   1050 	dksc = &sc->sc_dksc;
   1051 	if (!DK_ATTACHED(dksc))
   1052 		return ENXIO;
   1053 	return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
   1054 }
   1055 
   1056 static int
   1057 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
   1058 {
   1059 	struct	cgd_softc *sc;
   1060 	struct	dk_softc *dksc;
   1061 	int	part = DISKPART(dev);
   1062 	int	pmask = 1 << part;
   1063 	int	error;
   1064 
   1065 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
   1066 	    dev, cmd, data, flag, l));
   1067 
   1068 	switch (cmd) {
   1069 	case CGDIOCGET:
   1070 		return cgd_ioctl_get(dev, data, l);
   1071 	case CGDIOCSET:
   1072 	case CGDIOCCLR:
   1073 		if ((flag & FWRITE) == 0)
   1074 			return EBADF;
   1075 		/* FALLTHROUGH */
   1076 	default:
   1077 		sc = getcgd_softc(dev);
   1078 		if (sc == NULL)
   1079 			return ENXIO;
   1080 		dksc = &sc->sc_dksc;
   1081 		break;
   1082 	}
   1083 
   1084 	switch (cmd) {
   1085 	case CGDIOCSET:
   1086 		cgd_busy(sc);
   1087 		if (DK_ATTACHED(dksc))
   1088 			error = EBUSY;
   1089 		else
   1090 			error = cgd_ioctl_set(sc, data, l);
   1091 		cgd_unbusy(sc);
   1092 		break;
   1093 	case CGDIOCCLR:
   1094 		cgd_busy(sc);
   1095 		if (DK_BUSY(&sc->sc_dksc, pmask))
   1096 			error = EBUSY;
   1097 		else
   1098 			error = cgd_ioctl_clr(sc, l);
   1099 		cgd_unbusy(sc);
   1100 		break;
   1101 	case DIOCGCACHE:
   1102 	case DIOCCACHESYNC:
   1103 		cgd_busy(sc);
   1104 		if (!DK_ATTACHED(dksc)) {
   1105 			cgd_unbusy(sc);
   1106 			error = ENOENT;
   1107 			break;
   1108 		}
   1109 		/*
   1110 		 * We pass this call down to the underlying disk.
   1111 		 */
   1112 		error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
   1113 		cgd_unbusy(sc);
   1114 		break;
   1115 	case DIOCGSECTORALIGN: {
   1116 		struct disk_sectoralign *dsa = data;
   1117 
   1118 		cgd_busy(sc);
   1119 		if (!DK_ATTACHED(dksc)) {
   1120 			cgd_unbusy(sc);
   1121 			error = ENOENT;
   1122 			break;
   1123 		}
   1124 
   1125 		/* Get the underlying disk's sector alignment.  */
   1126 		error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
   1127 		if (error) {
   1128 			cgd_unbusy(sc);
   1129 			break;
   1130 		}
   1131 
   1132 		/* Adjust for the disklabel partition if necessary.  */
   1133 		if (part != RAW_PART) {
   1134 			struct disklabel *lp = dksc->sc_dkdev.dk_label;
   1135 			daddr_t offset = lp->d_partitions[part].p_offset;
   1136 			uint32_t r = offset % dsa->dsa_alignment;
   1137 
   1138 			if (r < dsa->dsa_firstaligned)
   1139 				dsa->dsa_firstaligned = dsa->dsa_firstaligned
   1140 				    - r;
   1141 			else
   1142 				dsa->dsa_firstaligned = (dsa->dsa_firstaligned
   1143 				    + dsa->dsa_alignment) - r;
   1144 		}
   1145 		cgd_unbusy(sc);
   1146 		break;
   1147 	}
   1148 	case DIOCGSTRATEGY:
   1149 	case DIOCSSTRATEGY:
   1150 		if (!DK_ATTACHED(dksc)) {
   1151 			error = ENOENT;
   1152 			break;
   1153 		}
   1154 		/*FALLTHROUGH*/
   1155 	default:
   1156 		error = dk_ioctl(dksc, dev, cmd, data, flag, l);
   1157 		break;
   1158 	case CGDIOCGET:
   1159 		KASSERT(0);
   1160 		error = EINVAL;
   1161 	}
   1162 
   1163 	return error;
   1164 }
   1165 
   1166 static int
   1167 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
   1168 {
   1169 	struct	cgd_softc *sc;
   1170 
   1171 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
   1172 	    dev, blkno, va, (unsigned long)size));
   1173 	sc = getcgd_softc(dev);
   1174 	if (sc == NULL)
   1175 		return ENXIO;
   1176 	return dk_dump(&sc->sc_dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
   1177 }
   1178 
   1179 /*
   1180  * XXXrcd:
   1181  *  for now we hardcode the maximum key length.
   1182  */
   1183 #define MAX_KEYSIZE	1024
   1184 
   1185 static const struct {
   1186 	const char *n;
   1187 	int v;
   1188 	int d;
   1189 } encblkno[] = {
   1190 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
   1191 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
   1192 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
   1193 };
   1194 
   1195 /* ARGSUSED */
   1196 static int
   1197 cgd_ioctl_set(struct cgd_softc *sc, void *data, struct lwp *l)
   1198 {
   1199 	struct	 cgd_ioctl *ci = data;
   1200 	struct	 vnode *vp;
   1201 	int	 ret;
   1202 	size_t	 i;
   1203 	size_t	 keybytes;			/* key length in bytes */
   1204 	const char *cp;
   1205 	struct pathbuf *pb;
   1206 	char	 *inbuf;
   1207 	struct dk_softc *dksc = &sc->sc_dksc;
   1208 
   1209 	cp = ci->ci_disk;
   1210 
   1211 	ret = pathbuf_copyin(ci->ci_disk, &pb);
   1212 	if (ret != 0) {
   1213 		return ret;
   1214 	}
   1215 	ret = vn_bdev_openpath(pb, &vp, l);
   1216 	pathbuf_destroy(pb);
   1217 	if (ret != 0) {
   1218 		return ret;
   1219 	}
   1220 
   1221 	inbuf = kmem_alloc(MAX_KEYSIZE, KM_SLEEP);
   1222 
   1223 	if ((ret = cgdinit(sc, cp, vp, l)) != 0)
   1224 		goto bail;
   1225 
   1226 	(void)memset(inbuf, 0, MAX_KEYSIZE);
   1227 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
   1228 	if (ret)
   1229 		goto bail;
   1230 	sc->sc_cfuncs = cryptfuncs_find(inbuf);
   1231 	if (!sc->sc_cfuncs) {
   1232 		ret = EINVAL;
   1233 		goto bail;
   1234 	}
   1235 
   1236 	(void)memset(inbuf, 0, MAX_KEYSIZE);
   1237 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
   1238 	if (ret)
   1239 		goto bail;
   1240 
   1241 	for (i = 0; i < __arraycount(encblkno); i++)
   1242 		if (strcmp(encblkno[i].n, inbuf) == 0)
   1243 			break;
   1244 
   1245 	if (i == __arraycount(encblkno)) {
   1246 		ret = EINVAL;
   1247 		goto bail;
   1248 	}
   1249 
   1250 	keybytes = ci->ci_keylen / 8 + 1;
   1251 	if (keybytes > MAX_KEYSIZE) {
   1252 		ret = EINVAL;
   1253 		goto bail;
   1254 	}
   1255 
   1256 	(void)memset(inbuf, 0, MAX_KEYSIZE);
   1257 	ret = copyin(ci->ci_key, inbuf, keybytes);
   1258 	if (ret)
   1259 		goto bail;
   1260 
   1261 	sc->sc_cdata.cf_blocksize = ci->ci_blocksize;
   1262 	sc->sc_cdata.cf_mode = encblkno[i].v;
   1263 	sc->sc_cdata.cf_keylen = ci->ci_keylen;
   1264 	sc->sc_cdata.cf_priv = sc->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
   1265 	    &sc->sc_cdata.cf_blocksize);
   1266 	if (sc->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
   1267 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
   1268 		sc->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
   1269 	    sc->sc_cdata.cf_priv = NULL;
   1270 	}
   1271 
   1272 	/*
   1273 	 * The blocksize is supposed to be in bytes. Unfortunately originally
   1274 	 * it was expressed in bits. For compatibility we maintain encblkno
   1275 	 * and encblkno8.
   1276 	 */
   1277 	sc->sc_cdata.cf_blocksize /= encblkno[i].d;
   1278 	(void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
   1279 	if (!sc->sc_cdata.cf_priv) {
   1280 		ret = EINVAL;		/* XXX is this the right error? */
   1281 		goto bail;
   1282 	}
   1283 	kmem_free(inbuf, MAX_KEYSIZE);
   1284 
   1285 	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
   1286 
   1287 	sc->sc_data = kmem_alloc(MAXPHYS, KM_SLEEP);
   1288 	sc->sc_data_used = false;
   1289 
   1290 	/* Attach the disk. */
   1291 	dk_attach(dksc);
   1292 	disk_attach(&dksc->sc_dkdev);
   1293 
   1294 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
   1295 
   1296 	/* Discover wedges on this disk. */
   1297 	dkwedge_discover(&dksc->sc_dkdev);
   1298 
   1299 	return 0;
   1300 
   1301 bail:
   1302 	kmem_free(inbuf, MAX_KEYSIZE);
   1303 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
   1304 	return ret;
   1305 }
   1306 
   1307 /* ARGSUSED */
   1308 static int
   1309 cgd_ioctl_clr(struct cgd_softc *sc, struct lwp *l)
   1310 {
   1311 	struct	dk_softc *dksc = &sc->sc_dksc;
   1312 
   1313 	if (!DK_ATTACHED(dksc))
   1314 		return ENXIO;
   1315 
   1316 	/* Delete all of our wedges. */
   1317 	dkwedge_delall(&dksc->sc_dkdev);
   1318 
   1319 	/* Kill off any queued buffers. */
   1320 	dk_drain(dksc);
   1321 	bufq_free(dksc->sc_bufq);
   1322 
   1323 	(void)vn_close(sc->sc_tvn, FREAD|FWRITE, l->l_cred);
   1324 	sc->sc_cfuncs->cf_destroy(sc->sc_cdata.cf_priv);
   1325 	kmem_free(sc->sc_tpath, sc->sc_tpathlen);
   1326 	kmem_free(sc->sc_data, MAXPHYS);
   1327 	sc->sc_data_used = false;
   1328 	dk_detach(dksc);
   1329 	disk_detach(&dksc->sc_dkdev);
   1330 
   1331 	return 0;
   1332 }
   1333 
   1334 static int
   1335 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
   1336 {
   1337 	struct cgd_softc *sc;
   1338 	struct cgd_user *cgu;
   1339 	int unit, error;
   1340 
   1341 	unit = CGDUNIT(dev);
   1342 	cgu = (struct cgd_user *)data;
   1343 
   1344 	DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
   1345 			   dev, unit, data, l));
   1346 
   1347 	/* XXX, we always return this units data, so if cgu_unit is
   1348 	 * not -1, that field doesn't match the rest
   1349 	 */
   1350 	if (cgu->cgu_unit == -1)
   1351 		cgu->cgu_unit = unit;
   1352 
   1353 	if (cgu->cgu_unit < 0)
   1354 		return EINVAL;	/* XXX: should this be ENXIO? */
   1355 
   1356 	error = cgd_lock(false);
   1357 	if (error)
   1358 		return error;
   1359 
   1360 	sc = device_lookup_private(&cgd_cd, unit);
   1361 	if (sc == NULL || !DK_ATTACHED(&sc->sc_dksc)) {
   1362 		cgu->cgu_dev = 0;
   1363 		cgu->cgu_alg[0] = '\0';
   1364 		cgu->cgu_blocksize = 0;
   1365 		cgu->cgu_mode = 0;
   1366 		cgu->cgu_keylen = 0;
   1367 	}
   1368 	else {
   1369 		mutex_enter(&sc->sc_lock);
   1370 		cgu->cgu_dev = sc->sc_tdev;
   1371 		strncpy(cgu->cgu_alg, sc->sc_cfuncs->cf_name,
   1372 		    sizeof(cgu->cgu_alg));
   1373 		cgu->cgu_blocksize = sc->sc_cdata.cf_blocksize;
   1374 		cgu->cgu_mode = sc->sc_cdata.cf_mode;
   1375 		cgu->cgu_keylen = sc->sc_cdata.cf_keylen;
   1376 		mutex_exit(&sc->sc_lock);
   1377 	}
   1378 
   1379 	cgd_unlock();
   1380 	return 0;
   1381 }
   1382 
   1383 static int
   1384 cgdinit(struct cgd_softc *sc, const char *cpath, struct vnode *vp,
   1385 	struct lwp *l)
   1386 {
   1387 	struct	disk_geom *dg;
   1388 	int	ret;
   1389 	char	*tmppath;
   1390 	uint64_t psize;
   1391 	unsigned secsize;
   1392 	struct dk_softc *dksc = &sc->sc_dksc;
   1393 
   1394 	sc->sc_tvn = vp;
   1395 	sc->sc_tpath = NULL;
   1396 
   1397 	tmppath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
   1398 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &sc->sc_tpathlen);
   1399 	if (ret)
   1400 		goto bail;
   1401 	sc->sc_tpath = kmem_alloc(sc->sc_tpathlen, KM_SLEEP);
   1402 	memcpy(sc->sc_tpath, tmppath, sc->sc_tpathlen);
   1403 
   1404 	sc->sc_tdev = vp->v_rdev;
   1405 
   1406 	if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
   1407 		goto bail;
   1408 
   1409 	if (psize == 0) {
   1410 		ret = ENODEV;
   1411 		goto bail;
   1412 	}
   1413 
   1414 	/*
   1415 	 * XXX here we should probe the underlying device.  If we
   1416 	 *     are accessing a partition of type RAW_PART, then
   1417 	 *     we should populate our initial geometry with the
   1418 	 *     geometry that we discover from the device.
   1419 	 */
   1420 	dg = &dksc->sc_dkdev.dk_geom;
   1421 	memset(dg, 0, sizeof(*dg));
   1422 	dg->dg_secperunit = psize;
   1423 	dg->dg_secsize = secsize;
   1424 	dg->dg_ntracks = 1;
   1425 	dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
   1426 	dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
   1427 
   1428 bail:
   1429 	kmem_free(tmppath, MAXPATHLEN);
   1430 	if (ret && sc->sc_tpath)
   1431 		kmem_free(sc->sc_tpath, sc->sc_tpathlen);
   1432 	return ret;
   1433 }
   1434 
   1435 /*
   1436  * Our generic cipher entry point.  This takes care of the
   1437  * IV mode and passes off the work to the specific cipher.
   1438  * We implement here the IV method ``encrypted block
   1439  * number''.
   1440  *
   1441  * XXXrcd: for now we rely on our own crypto framework defined
   1442  *         in dev/cgd_crypto.c.  This will change when we
   1443  *         get a generic kernel crypto framework.
   1444  */
   1445 
   1446 static void
   1447 blkno2blkno_buf(char *sbuf, daddr_t blkno)
   1448 {
   1449 	int	i;
   1450 
   1451 	/* Set up the blkno in blkno_buf, here we do not care much
   1452 	 * about the final layout of the information as long as we
   1453 	 * can guarantee that each sector will have a different IV
   1454 	 * and that the endianness of the machine will not affect
   1455 	 * the representation that we have chosen.
   1456 	 *
   1457 	 * We choose this representation, because it does not rely
   1458 	 * on the size of buf (which is the blocksize of the cipher),
   1459 	 * but allows daddr_t to grow without breaking existing
   1460 	 * disks.
   1461 	 *
   1462 	 * Note that blkno2blkno_buf does not take a size as input,
   1463 	 * and hence must be called on a pre-zeroed buffer of length
   1464 	 * greater than or equal to sizeof(daddr_t).
   1465 	 */
   1466 	for (i=0; i < sizeof(daddr_t); i++) {
   1467 		*sbuf++ = blkno & 0xff;
   1468 		blkno >>= 8;
   1469 	}
   1470 }
   1471 
   1472 static struct cpu_info *
   1473 cgd_cpu(struct cgd_softc *sc)
   1474 {
   1475 	struct cgd_worker *cw = sc->sc_worker;
   1476 	struct cpu_info *ci = NULL;
   1477 	u_int cidx, i;
   1478 
   1479 	if (cw->cw_busy == 0) {
   1480 		cw->cw_last = cpu_index(curcpu());
   1481 		return NULL;
   1482 	}
   1483 
   1484 	for (i=0, cidx = cw->cw_last+1; i<maxcpus; ++i, ++cidx) {
   1485 		if (cidx >= maxcpus)
   1486 			cidx = 0;
   1487 		ci = cpu_lookup(cidx);
   1488 		if (ci) {
   1489 			cw->cw_last = cidx;
   1490 			break;
   1491 		}
   1492 	}
   1493 
   1494 	return ci;
   1495 }
   1496 
   1497 static void
   1498 cgd_enqueue(struct cgd_softc *sc, struct cgd_xfer *cx)
   1499 {
   1500 	struct cgd_worker *cw = sc->sc_worker;
   1501 	struct cpu_info *ci;
   1502 
   1503 	mutex_enter(&cw->cw_lock);
   1504 	ci = cgd_cpu(sc);
   1505 	cw->cw_busy++;
   1506 	mutex_exit(&cw->cw_lock);
   1507 
   1508 	workqueue_enqueue(cw->cw_wq, &cx->cx_work, ci);
   1509 }
   1510 
   1511 static void
   1512 cgd_process(struct work *wk, void *arg)
   1513 {
   1514 	struct cgd_xfer *cx = (struct cgd_xfer *)wk;
   1515 	struct cgd_softc *sc = cx->cx_sc;
   1516 	struct cgd_worker *cw = sc->sc_worker;
   1517 
   1518 	cgd_cipher(sc, cx->cx_dstv, cx->cx_srcv, cx->cx_len,
   1519 	    cx->cx_blkno, cx->cx_secsize, cx->cx_dir);
   1520 
   1521 	if (cx->cx_dir == CGD_CIPHER_ENCRYPT) {
   1522 		cgd_diskstart2(sc, cx);
   1523 	} else {
   1524 		cgd_iodone2(sc, cx);
   1525 	}
   1526 
   1527 	mutex_enter(&cw->cw_lock);
   1528 	if (cw->cw_busy > 0)
   1529 		cw->cw_busy--;
   1530 	mutex_exit(&cw->cw_lock);
   1531 }
   1532 
   1533 static void
   1534 cgd_cipher(struct cgd_softc *sc, void *dstv, void *srcv,
   1535     size_t len, daddr_t blkno, size_t secsize, int dir)
   1536 {
   1537 	char		*dst = dstv;
   1538 	char		*src = srcv;
   1539 	cfunc_cipher	*cipher = sc->sc_cfuncs->cf_cipher;
   1540 	struct uio	dstuio;
   1541 	struct uio	srcuio;
   1542 	struct iovec	dstiov[2];
   1543 	struct iovec	srciov[2];
   1544 	size_t		blocksize = sc->sc_cdata.cf_blocksize;
   1545 	size_t		todo;
   1546 	char		blkno_buf[CGD_MAXBLOCKSIZE];
   1547 
   1548 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
   1549 
   1550 	KASSERTMSG(len % blocksize == 0,
   1551 	    "cgd_cipher: len %% blocksize != 0");
   1552 
   1553 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
   1554 	KASSERTMSG(sizeof(daddr_t) <= blocksize,
   1555 	    "cgd_cipher: sizeof(daddr_t) > blocksize");
   1556 
   1557 	KASSERTMSG(blocksize <= CGD_MAXBLOCKSIZE,
   1558 	    "cgd_cipher: blocksize > CGD_MAXBLOCKSIZE");
   1559 
   1560 	dstuio.uio_iov = dstiov;
   1561 	dstuio.uio_iovcnt = 1;
   1562 
   1563 	srcuio.uio_iov = srciov;
   1564 	srcuio.uio_iovcnt = 1;
   1565 
   1566 	for (; len > 0; len -= todo) {
   1567 		todo = MIN(len, secsize);
   1568 
   1569 		dstiov[0].iov_base = dst;
   1570 		srciov[0].iov_base = src;
   1571 		dstiov[0].iov_len  = todo;
   1572 		srciov[0].iov_len  = todo;
   1573 
   1574 		memset(blkno_buf, 0x0, blocksize);
   1575 		blkno2blkno_buf(blkno_buf, blkno);
   1576 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
   1577 		    blkno_buf, blocksize));
   1578 
   1579 		cipher(sc->sc_cdata.cf_priv, &dstuio, &srcuio, blkno_buf, dir);
   1580 
   1581 		dst += todo;
   1582 		src += todo;
   1583 		blkno++;
   1584 	}
   1585 }
   1586 
   1587 #ifdef DEBUG
   1588 static void
   1589 hexprint(const char *start, void *buf, int len)
   1590 {
   1591 	char	*c = buf;
   1592 
   1593 	KASSERTMSG(len >= 0, "hexprint: called with len < 0");
   1594 	printf("%s: len=%06d 0x", start, len);
   1595 	while (len--)
   1596 		printf("%02x", (unsigned char) *c++);
   1597 }
   1598 #endif
   1599 
   1600 static void
   1601 selftest(void)
   1602 {
   1603 	struct cgd_softc sc;
   1604 	void *buf;
   1605 
   1606 	for (size_t i = 0; i < __arraycount(selftests); i++) {
   1607 		const char *alg = selftests[i].alg;
   1608 		const uint8_t *key = selftests[i].key;
   1609 		int keylen = selftests[i].keylen;
   1610 		int txtlen = selftests[i].txtlen;
   1611 
   1612 		aprint_verbose("cgd: self-test %s-%d\n", alg, keylen);
   1613 
   1614 		memset(&sc, 0, sizeof(sc));
   1615 
   1616 		sc.sc_cfuncs = cryptfuncs_find(alg);
   1617 		if (sc.sc_cfuncs == NULL)
   1618 			panic("%s not implemented", alg);
   1619 
   1620 		sc.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
   1621 		sc.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
   1622 		sc.sc_cdata.cf_keylen = keylen;
   1623 
   1624 		sc.sc_cdata.cf_priv = sc.sc_cfuncs->cf_init(keylen,
   1625 		    key, &sc.sc_cdata.cf_blocksize);
   1626 		if (sc.sc_cdata.cf_priv == NULL)
   1627 			panic("cf_priv is NULL");
   1628 		if (sc.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
   1629 			panic("bad block size %zu", sc.sc_cdata.cf_blocksize);
   1630 
   1631 		sc.sc_cdata.cf_blocksize /= 8;
   1632 
   1633 		buf = kmem_alloc(txtlen, KM_SLEEP);
   1634 		memcpy(buf, selftests[i].ptxt, txtlen);
   1635 
   1636 		cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
   1637 				selftests[i].secsize, CGD_CIPHER_ENCRYPT);
   1638 		if (memcmp(buf, selftests[i].ctxt, txtlen) != 0) {
   1639 			hexdump(printf, "was", buf, txtlen);
   1640 			hexdump(printf, "exp", selftests[i].ctxt, txtlen);
   1641 			panic("cgd %s encryption is broken [%zu]",
   1642 			    selftests[i].alg, i);
   1643 		}
   1644 
   1645 		cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
   1646 				selftests[i].secsize, CGD_CIPHER_DECRYPT);
   1647 		if (memcmp(buf, selftests[i].ptxt, txtlen) != 0) {
   1648 			hexdump(printf, "was", buf, txtlen);
   1649 			hexdump(printf, "exp", selftests[i].ptxt, txtlen);
   1650 			panic("cgd %s decryption is broken [%zu]",
   1651 			    selftests[i].alg, i);
   1652 		}
   1653 
   1654 		kmem_free(buf, txtlen);
   1655 		sc.sc_cfuncs->cf_destroy(sc.sc_cdata.cf_priv);
   1656 	}
   1657 
   1658 	aprint_verbose("cgd: self-tests passed\n");
   1659 }
   1660 
   1661 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
   1662 
   1663 #ifdef _MODULE
   1664 CFDRIVER_DECL(cgd, DV_DISK, NULL);
   1665 
   1666 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
   1667 #endif
   1668 
   1669 static int
   1670 cgd_modcmd(modcmd_t cmd, void *arg)
   1671 {
   1672 	int error = 0;
   1673 
   1674 	switch (cmd) {
   1675 	case MODULE_CMD_INIT:
   1676 		selftest();
   1677 #ifdef _MODULE
   1678 		mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
   1679 		cv_init(&cgd_spawning_cv, "cgspwn");
   1680 
   1681 		error = config_cfdriver_attach(&cgd_cd);
   1682 		if (error)
   1683 			break;
   1684 
   1685 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
   1686 	        if (error) {
   1687 			config_cfdriver_detach(&cgd_cd);
   1688 			aprint_error("%s: unable to register cfattach for"
   1689 			    "%s, error %d\n", __func__, cgd_cd.cd_name, error);
   1690 			break;
   1691 		}
   1692 		/*
   1693 		 * Attach the {b,c}devsw's
   1694 		 */
   1695 		error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1696 		    &cgd_cdevsw, &cgd_cmajor);
   1697 
   1698 		/*
   1699 		 * If devsw_attach fails, remove from autoconf database
   1700 		 */
   1701 		if (error) {
   1702 			config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
   1703 			config_cfdriver_detach(&cgd_cd);
   1704 			aprint_error("%s: unable to attach %s devsw, "
   1705 			    "error %d", __func__, cgd_cd.cd_name, error);
   1706 			break;
   1707 		}
   1708 #endif
   1709 		break;
   1710 
   1711 	case MODULE_CMD_FINI:
   1712 #ifdef _MODULE
   1713 		/*
   1714 		 * Remove {b,c}devsw's
   1715 		 */
   1716 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
   1717 
   1718 		/*
   1719 		 * Now remove device from autoconf database
   1720 		 */
   1721 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
   1722 		if (error) {
   1723 			(void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1724 			    &cgd_cdevsw, &cgd_cmajor);
   1725 			aprint_error("%s: failed to detach %s cfattach, "
   1726 			    "error %d\n", __func__, cgd_cd.cd_name, error);
   1727  			break;
   1728 		}
   1729 		error = config_cfdriver_detach(&cgd_cd);
   1730 		if (error) {
   1731 			(void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
   1732 			(void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
   1733 			    &cgd_cdevsw, &cgd_cmajor);
   1734 			aprint_error("%s: failed to detach %s cfdriver, "
   1735 			    "error %d\n", __func__, cgd_cd.cd_name, error);
   1736 			break;
   1737 		}
   1738 
   1739 		cv_destroy(&cgd_spawning_cv);
   1740 		mutex_destroy(&cgd_spawning_mtx);
   1741 #endif
   1742 		break;
   1743 
   1744 	case MODULE_CMD_STAT:
   1745 		error = ENOTTY;
   1746 		break;
   1747 	default:
   1748 		error = ENOTTY;
   1749 		break;
   1750 	}
   1751 
   1752 	return error;
   1753 }
   1754