cgd.c revision 1.135 1 /* $NetBSD: cgd.c,v 1.135 2020/06/17 20:44:45 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.135 2020/06/17 20:44:45 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/buf.h>
38 #include <sys/bufq.h>
39 #include <sys/conf.h>
40 #include <sys/cpu.h>
41 #include <sys/device.h>
42 #include <sys/disk.h>
43 #include <sys/disklabel.h>
44 #include <sys/errno.h>
45 #include <sys/fcntl.h>
46 #include <sys/ioctl.h>
47 #include <sys/kmem.h>
48 #include <sys/module.h>
49 #include <sys/namei.h> /* for pathbuf */
50 #include <sys/pool.h>
51 #include <sys/proc.h>
52 #include <sys/syslog.h>
53 #include <sys/systm.h>
54 #include <sys/vnode.h>
55 #include <sys/workqueue.h>
56
57 #include <dev/cgd_crypto.h>
58 #include <dev/cgdvar.h>
59 #include <dev/dkvar.h>
60
61 #include <miscfs/specfs/specdev.h> /* for v_rdev */
62
63 #include "ioconf.h"
64
65 struct selftest_params {
66 const char *alg;
67 int encblkno8;
68 int blocksize; /* number of bytes */
69 int secsize;
70 daddr_t blkno;
71 int keylen; /* number of bits */
72 int txtlen; /* number of bytes */
73 const uint8_t *key;
74 const uint8_t *ptxt;
75 const uint8_t *ctxt;
76 };
77
78 /* Entry Point Functions */
79
80 static dev_type_open(cgdopen);
81 static dev_type_close(cgdclose);
82 static dev_type_read(cgdread);
83 static dev_type_write(cgdwrite);
84 static dev_type_ioctl(cgdioctl);
85 static dev_type_strategy(cgdstrategy);
86 static dev_type_dump(cgddump);
87 static dev_type_size(cgdsize);
88
89 const struct bdevsw cgd_bdevsw = {
90 .d_open = cgdopen,
91 .d_close = cgdclose,
92 .d_strategy = cgdstrategy,
93 .d_ioctl = cgdioctl,
94 .d_dump = cgddump,
95 .d_psize = cgdsize,
96 .d_discard = nodiscard,
97 .d_flag = D_DISK | D_MPSAFE
98 };
99
100 const struct cdevsw cgd_cdevsw = {
101 .d_open = cgdopen,
102 .d_close = cgdclose,
103 .d_read = cgdread,
104 .d_write = cgdwrite,
105 .d_ioctl = cgdioctl,
106 .d_stop = nostop,
107 .d_tty = notty,
108 .d_poll = nopoll,
109 .d_mmap = nommap,
110 .d_kqfilter = nokqfilter,
111 .d_discard = nodiscard,
112 .d_flag = D_DISK | D_MPSAFE
113 };
114
115 /*
116 * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
117 */
118 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
119 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
120 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
121 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
122 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
123 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
124 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
125 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
126 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
127 };
128
129 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
130 0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
131 0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
132 0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
133 0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
134 0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
135 0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
136 0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
137 0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
138 };
139
140 static const uint8_t selftest_aes_xts_256_key[33] = {
141 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
142 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
143 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
144 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
145 0
146 };
147
148 /*
149 * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
150 */
151 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
152 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
153 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
154 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
155 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
156 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
157 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
158 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
159 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
160 };
161
162 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
163 0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
164 0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
165 0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
166 0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
167 0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
168 0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
169 0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
170 0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
171 };
172
173 static const uint8_t selftest_aes_xts_512_key[65] = {
174 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
175 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
176 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
177 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
178 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
179 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
180 0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
181 0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
182 0
183 };
184
185 static const uint8_t selftest_aes_cbc_key[32] = {
186 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
187 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
188 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
189 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
190 };
191
192 static const uint8_t selftest_aes_cbc_128_ptxt[64] = {
193 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
194 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
195 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
196 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
197 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
198 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
199 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
200 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
201 };
202
203 static const uint8_t selftest_aes_cbc_128_ctxt[64] = { /* blkno=1 */
204 0x93, 0x94, 0x56, 0x36, 0x83, 0xbc, 0xff, 0xa4,
205 0xe0, 0x24, 0x34, 0x12, 0xbe, 0xfa, 0xb0, 0x7d,
206 0x88, 0x1e, 0xc5, 0x57, 0x55, 0x23, 0x05, 0x0c,
207 0x69, 0xa5, 0xc1, 0xda, 0x64, 0xee, 0x74, 0x10,
208 0xc2, 0xc5, 0xe6, 0x66, 0xd6, 0xa7, 0x49, 0x1c,
209 0x9d, 0x40, 0xb5, 0x0c, 0x9b, 0x6e, 0x1c, 0xe6,
210 0xb1, 0x7a, 0x1c, 0xe7, 0x5a, 0xfe, 0xf9, 0x2a,
211 0x78, 0xfa, 0xb7, 0x7b, 0x08, 0xdf, 0x8e, 0x51,
212 };
213
214 static const uint8_t selftest_aes_cbc_256_ptxt[64] = {
215 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
216 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
217 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
218 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
219 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
220 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
221 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
222 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
223 };
224
225 static const uint8_t selftest_aes_cbc_256_ctxt[64] = { /* blkno=0xffff */
226 0x6c, 0xa3, 0x15, 0x17, 0x51, 0x90, 0xe9, 0x69,
227 0x08, 0x36, 0x7b, 0xa6, 0xbb, 0xd1, 0x0b, 0x9e,
228 0xcd, 0x6b, 0x1e, 0xaf, 0xb6, 0x2e, 0x62, 0x7d,
229 0x8e, 0xde, 0xf0, 0xed, 0x0d, 0x44, 0xe7, 0x31,
230 0x26, 0xcf, 0xd5, 0x0b, 0x3e, 0x95, 0x59, 0x89,
231 0xdf, 0x5d, 0xd6, 0x9a, 0x00, 0x66, 0xcc, 0x7f,
232 0x45, 0xd3, 0x06, 0x58, 0xed, 0xef, 0x49, 0x47,
233 0x87, 0x89, 0x17, 0x7d, 0x08, 0x56, 0x50, 0xe1,
234 };
235
236 static const uint8_t selftest_3des_cbc_key[24] = {
237 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
238 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
239 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
240 };
241
242 static const uint8_t selftest_3des_cbc_ptxt[64] = {
243 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
244 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
245 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
246 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
247 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
248 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
249 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
250 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
251 };
252
253 static const uint8_t selftest_3des_cbc_ctxt[64] = {
254 0xa2, 0xfe, 0x81, 0xaa, 0x10, 0x6c, 0xea, 0xb9,
255 0x11, 0x58, 0x1f, 0x29, 0xb5, 0x86, 0x71, 0x56,
256 0xe9, 0x25, 0x1d, 0x07, 0xb1, 0x69, 0x59, 0x6c,
257 0x96, 0x80, 0xf7, 0x54, 0x38, 0xaa, 0xa7, 0xe4,
258 0xe8, 0x81, 0xf5, 0x00, 0xbb, 0x1c, 0x00, 0x3c,
259 0xba, 0x38, 0x45, 0x97, 0x4c, 0xcf, 0x84, 0x14,
260 0x46, 0x86, 0xd9, 0xf4, 0xc5, 0xe2, 0xf0, 0x54,
261 0xde, 0x41, 0xf6, 0xa1, 0xef, 0x1b, 0x0a, 0xea,
262 };
263
264 static const uint8_t selftest_bf_cbc_key[56] = {
265 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
266 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
267 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
268 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
269 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
270 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
271 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
272 };
273
274 static const uint8_t selftest_bf_cbc_ptxt[64] = {
275 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
276 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
277 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
278 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
279 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
280 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
281 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
282 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
283 };
284
285 static const uint8_t selftest_bf_cbc_ctxt[64] = {
286 0xec, 0xa2, 0xc0, 0x0e, 0xa9, 0x7f, 0x04, 0x1e,
287 0x2e, 0x4f, 0x64, 0x07, 0x67, 0x3e, 0xf4, 0x58,
288 0x61, 0x5f, 0xd3, 0x50, 0x5e, 0xd3, 0x4d, 0x34,
289 0xa0, 0x53, 0xbe, 0x47, 0x75, 0x69, 0x3b, 0x1f,
290 0x86, 0xf2, 0xae, 0x8b, 0xb7, 0x91, 0xda, 0xd4,
291 0x2b, 0xa5, 0x47, 0x9b, 0x7d, 0x13, 0x30, 0xdd,
292 0x7b, 0xad, 0x86, 0x57, 0x51, 0x11, 0x74, 0x42,
293 0xb8, 0xbf, 0x69, 0x17, 0x20, 0x0a, 0xf7, 0xda,
294 };
295
296 static const uint8_t selftest_aes_cbc_encblkno8_zero64[64];
297 static const uint8_t selftest_aes_cbc_encblkno8_ctxt[64] = {
298 0xa2, 0x06, 0x26, 0x26, 0xac, 0xdc, 0xe7, 0xcf,
299 0x47, 0x68, 0x24, 0x0e, 0xfa, 0x40, 0x44, 0x83,
300 0x07, 0xe1, 0xf4, 0x5d, 0x53, 0x47, 0xa0, 0xfe,
301 0xc0, 0x6e, 0x4e, 0xf8, 0x9d, 0x98, 0x63, 0xb8,
302 0x2c, 0x27, 0xfa, 0x3a, 0xd5, 0x40, 0xda, 0xdb,
303 0xe6, 0xc3, 0xe4, 0xfb, 0x85, 0x53, 0xfb, 0x78,
304 0x5d, 0xbd, 0x8f, 0x4c, 0x1a, 0x04, 0x9c, 0x88,
305 0x85, 0xec, 0x3c, 0x56, 0x46, 0x1a, 0x6e, 0xf5,
306 };
307
308 const struct selftest_params selftests[] = {
309 {
310 .alg = "aes-xts",
311 .blocksize = 16,
312 .secsize = 512,
313 .blkno = 1,
314 .keylen = 256,
315 .txtlen = sizeof(selftest_aes_xts_256_ptxt),
316 .key = selftest_aes_xts_256_key,
317 .ptxt = selftest_aes_xts_256_ptxt,
318 .ctxt = selftest_aes_xts_256_ctxt
319 },
320 {
321 .alg = "aes-xts",
322 .blocksize = 16,
323 .secsize = 512,
324 .blkno = 0xffff,
325 .keylen = 512,
326 .txtlen = sizeof(selftest_aes_xts_512_ptxt),
327 .key = selftest_aes_xts_512_key,
328 .ptxt = selftest_aes_xts_512_ptxt,
329 .ctxt = selftest_aes_xts_512_ctxt
330 },
331 {
332 .alg = "aes-cbc",
333 .blocksize = 16,
334 .secsize = 512,
335 .blkno = 1,
336 .keylen = 128,
337 .txtlen = sizeof(selftest_aes_cbc_128_ptxt),
338 .key = selftest_aes_cbc_key,
339 .ptxt = selftest_aes_cbc_128_ptxt,
340 .ctxt = selftest_aes_cbc_128_ctxt,
341 },
342 {
343 .alg = "aes-cbc",
344 .blocksize = 16,
345 .secsize = 512,
346 .blkno = 0xffff,
347 .keylen = 256,
348 .txtlen = sizeof(selftest_aes_cbc_256_ptxt),
349 .key = selftest_aes_cbc_key,
350 .ptxt = selftest_aes_cbc_256_ptxt,
351 .ctxt = selftest_aes_cbc_256_ctxt,
352 },
353 {
354 .alg = "3des-cbc",
355 .blocksize = 8,
356 .secsize = 512,
357 .blkno = 1,
358 .keylen = 192, /* 168 + 3*8 parity bits */
359 .txtlen = sizeof(selftest_3des_cbc_ptxt),
360 .key = selftest_3des_cbc_key,
361 .ptxt = selftest_3des_cbc_ptxt,
362 .ctxt = selftest_3des_cbc_ctxt,
363 },
364 {
365 .alg = "blowfish-cbc",
366 .blocksize = 8,
367 .secsize = 512,
368 .blkno = 1,
369 .keylen = 448,
370 .txtlen = sizeof(selftest_bf_cbc_ptxt),
371 .key = selftest_bf_cbc_key,
372 .ptxt = selftest_bf_cbc_ptxt,
373 .ctxt = selftest_bf_cbc_ctxt,
374 },
375 {
376 .alg = "aes-cbc",
377 .encblkno8 = 1,
378 .blocksize = 16,
379 .secsize = 512,
380 .blkno = 0,
381 .keylen = 128,
382 .txtlen = sizeof(selftest_aes_cbc_encblkno8_zero64),
383 .key = selftest_aes_cbc_encblkno8_zero64,
384 .ptxt = selftest_aes_cbc_encblkno8_zero64,
385 .ctxt = selftest_aes_cbc_encblkno8_ctxt,
386 },
387 };
388
389 static int cgd_match(device_t, cfdata_t, void *);
390 static void cgd_attach(device_t, device_t, void *);
391 static int cgd_detach(device_t, int);
392 static struct cgd_softc *cgd_spawn(int);
393 static struct cgd_worker *cgd_create_one_worker(void);
394 static void cgd_destroy_one_worker(struct cgd_worker *);
395 static struct cgd_worker *cgd_create_worker(void);
396 static void cgd_destroy_worker(struct cgd_worker *);
397 static int cgd_destroy(device_t);
398
399 /* Internal Functions */
400
401 static int cgd_diskstart(device_t, struct buf *);
402 static void cgd_diskstart2(struct cgd_softc *, struct cgd_xfer *);
403 static void cgdiodone(struct buf *);
404 static void cgd_iodone2(struct cgd_softc *, struct cgd_xfer *);
405 static void cgd_enqueue(struct cgd_softc *, struct cgd_xfer *);
406 static void cgd_process(struct work *, void *);
407 static int cgd_dumpblocks(device_t, void *, daddr_t, int);
408
409 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
410 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
411 static int cgd_ioctl_get(dev_t, void *, struct lwp *);
412 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
413 struct lwp *);
414 static void cgd_cipher(struct cgd_softc *, void *, const void *,
415 size_t, daddr_t, size_t, int);
416
417 static void cgd_selftest(void);
418
419 static const struct dkdriver cgddkdriver = {
420 .d_minphys = minphys,
421 .d_open = cgdopen,
422 .d_close = cgdclose,
423 .d_strategy = cgdstrategy,
424 .d_iosize = NULL,
425 .d_diskstart = cgd_diskstart,
426 .d_dumpblocks = cgd_dumpblocks,
427 .d_lastclose = NULL
428 };
429
430 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
431 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
432
433 /* DIAGNOSTIC and DEBUG definitions */
434
435 #if defined(CGDDEBUG) && !defined(DEBUG)
436 #define DEBUG
437 #endif
438
439 #ifdef DEBUG
440 int cgddebug = 0;
441
442 #define CGDB_FOLLOW 0x1
443 #define CGDB_IO 0x2
444 #define CGDB_CRYPTO 0x4
445
446 #define IFDEBUG(x,y) if (cgddebug & (x)) y
447 #define DPRINTF(x,y) IFDEBUG(x, printf y)
448 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
449
450 static void hexprint(const char *, void *, int);
451
452 #else
453 #define IFDEBUG(x,y)
454 #define DPRINTF(x,y)
455 #define DPRINTF_FOLLOW(y)
456 #endif
457
458 /* Global variables */
459
460 static kmutex_t cgd_spawning_mtx;
461 static kcondvar_t cgd_spawning_cv;
462 static bool cgd_spawning;
463 static struct cgd_worker *cgd_worker;
464 static u_int cgd_refcnt; /* number of users of cgd_worker */
465
466 /* Utility Functions */
467
468 #define CGDUNIT(x) DISKUNIT(x)
469
470 /* The code */
471
472 static int
473 cgd_lock(bool intr)
474 {
475 int error = 0;
476
477 mutex_enter(&cgd_spawning_mtx);
478 while (cgd_spawning) {
479 if (intr)
480 error = cv_wait_sig(&cgd_spawning_cv, &cgd_spawning_mtx);
481 else
482 cv_wait(&cgd_spawning_cv, &cgd_spawning_mtx);
483 }
484 if (error == 0)
485 cgd_spawning = true;
486 mutex_exit(&cgd_spawning_mtx);
487 return error;
488 }
489
490 static void
491 cgd_unlock(void)
492 {
493 mutex_enter(&cgd_spawning_mtx);
494 cgd_spawning = false;
495 cv_broadcast(&cgd_spawning_cv);
496 mutex_exit(&cgd_spawning_mtx);
497 }
498
499 static struct cgd_softc *
500 getcgd_softc(dev_t dev)
501 {
502 return device_lookup_private(&cgd_cd, CGDUNIT(dev));
503 }
504
505 static int
506 cgd_match(device_t self, cfdata_t cfdata, void *aux)
507 {
508
509 return 1;
510 }
511
512 static void
513 cgd_attach(device_t parent, device_t self, void *aux)
514 {
515 struct cgd_softc *sc = device_private(self);
516
517 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
518 cv_init(&sc->sc_cv, "cgdcv");
519 dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
520 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
521
522 if (!pmf_device_register(self, NULL, NULL))
523 aprint_error_dev(self,
524 "unable to register power management hooks\n");
525 }
526
527
528 static int
529 cgd_detach(device_t self, int flags)
530 {
531 int ret;
532 const int pmask = 1 << RAW_PART;
533 struct cgd_softc *sc = device_private(self);
534 struct dk_softc *dksc = &sc->sc_dksc;
535
536 if (DK_BUSY(dksc, pmask))
537 return EBUSY;
538
539 if (DK_ATTACHED(dksc) &&
540 (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
541 return ret;
542
543 disk_destroy(&dksc->sc_dkdev);
544 cv_destroy(&sc->sc_cv);
545 mutex_destroy(&sc->sc_lock);
546
547 return 0;
548 }
549
550 void
551 cgdattach(int num)
552 {
553 #ifndef _MODULE
554 int error;
555
556 mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
557 cv_init(&cgd_spawning_cv, "cgspwn");
558
559 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
560 if (error != 0)
561 aprint_error("%s: unable to register cfattach\n",
562 cgd_cd.cd_name);
563 #endif
564
565 cgd_selftest();
566 }
567
568 static struct cgd_softc *
569 cgd_spawn(int unit)
570 {
571 cfdata_t cf;
572 struct cgd_worker *cw;
573 struct cgd_softc *sc;
574
575 cf = kmem_alloc(sizeof(*cf), KM_SLEEP);
576 cf->cf_name = cgd_cd.cd_name;
577 cf->cf_atname = cgd_cd.cd_name;
578 cf->cf_unit = unit;
579 cf->cf_fstate = FSTATE_STAR;
580
581 cw = cgd_create_one_worker();
582 if (cw == NULL) {
583 kmem_free(cf, sizeof(*cf));
584 return NULL;
585 }
586
587 sc = device_private(config_attach_pseudo(cf));
588 if (sc == NULL) {
589 cgd_destroy_one_worker(cw);
590 return NULL;
591 }
592
593 sc->sc_worker = cw;
594
595 return sc;
596 }
597
598 static int
599 cgd_destroy(device_t dev)
600 {
601 struct cgd_softc *sc = device_private(dev);
602 struct cgd_worker *cw = sc->sc_worker;
603 cfdata_t cf;
604 int error;
605
606 cf = device_cfdata(dev);
607 error = config_detach(dev, DETACH_QUIET);
608 if (error)
609 return error;
610
611 cgd_destroy_one_worker(cw);
612
613 kmem_free(cf, sizeof(*cf));
614 return 0;
615 }
616
617 static void
618 cgd_busy(struct cgd_softc *sc)
619 {
620
621 mutex_enter(&sc->sc_lock);
622 while (sc->sc_busy)
623 cv_wait(&sc->sc_cv, &sc->sc_lock);
624 sc->sc_busy = true;
625 mutex_exit(&sc->sc_lock);
626 }
627
628 static void
629 cgd_unbusy(struct cgd_softc *sc)
630 {
631
632 mutex_enter(&sc->sc_lock);
633 sc->sc_busy = false;
634 cv_broadcast(&sc->sc_cv);
635 mutex_exit(&sc->sc_lock);
636 }
637
638 static struct cgd_worker *
639 cgd_create_one_worker(void)
640 {
641 KASSERT(cgd_spawning);
642
643 if (cgd_refcnt++ == 0) {
644 KASSERT(cgd_worker == NULL);
645 cgd_worker = cgd_create_worker();
646 }
647
648 KASSERT(cgd_worker != NULL);
649 return cgd_worker;
650 }
651
652 static void
653 cgd_destroy_one_worker(struct cgd_worker *cw)
654 {
655 KASSERT(cgd_spawning);
656 KASSERT(cw == cgd_worker);
657
658 if (--cgd_refcnt == 0) {
659 cgd_destroy_worker(cgd_worker);
660 cgd_worker = NULL;
661 }
662 }
663
664 static struct cgd_worker *
665 cgd_create_worker(void)
666 {
667 struct cgd_worker *cw;
668 struct workqueue *wq;
669 struct pool *cp;
670 int error;
671
672 cw = kmem_alloc(sizeof(struct cgd_worker), KM_SLEEP);
673 cp = kmem_alloc(sizeof(struct pool), KM_SLEEP);
674
675 error = workqueue_create(&wq, "cgd", cgd_process, NULL,
676 PRI_BIO, IPL_BIO, WQ_MPSAFE | WQ_PERCPU);
677 if (error) {
678 kmem_free(cp, sizeof(struct pool));
679 kmem_free(cw, sizeof(struct cgd_worker));
680 return NULL;
681 }
682
683 cw->cw_cpool = cp;
684 cw->cw_wq = wq;
685 pool_init(cw->cw_cpool, sizeof(struct cgd_xfer), 0,
686 0, 0, "cgdcpl", NULL, IPL_BIO);
687
688 mutex_init(&cw->cw_lock, MUTEX_DEFAULT, IPL_BIO);
689
690 return cw;
691 }
692
693 static void
694 cgd_destroy_worker(struct cgd_worker *cw)
695 {
696 mutex_destroy(&cw->cw_lock);
697
698 if (cw->cw_cpool) {
699 pool_destroy(cw->cw_cpool);
700 kmem_free(cw->cw_cpool, sizeof(struct pool));
701 }
702 if (cw->cw_wq)
703 workqueue_destroy(cw->cw_wq);
704
705 kmem_free(cw, sizeof(struct cgd_worker));
706 }
707
708 static int
709 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
710 {
711 struct cgd_softc *sc;
712 int error;
713
714 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
715
716 error = cgd_lock(true);
717 if (error)
718 return error;
719 sc = getcgd_softc(dev);
720 if (sc == NULL)
721 sc = cgd_spawn(CGDUNIT(dev));
722 cgd_unlock();
723 if (sc == NULL)
724 return ENXIO;
725
726 return dk_open(&sc->sc_dksc, dev, flags, fmt, l);
727 }
728
729 static int
730 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
731 {
732 struct cgd_softc *sc;
733 struct dk_softc *dksc;
734 int error;
735
736 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
737
738 error = cgd_lock(false);
739 if (error)
740 return error;
741 sc = getcgd_softc(dev);
742 if (sc == NULL) {
743 error = ENXIO;
744 goto done;
745 }
746
747 dksc = &sc->sc_dksc;
748 if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0)
749 goto done;
750
751 if (!DK_ATTACHED(dksc)) {
752 if ((error = cgd_destroy(sc->sc_dksc.sc_dev)) != 0) {
753 device_printf(dksc->sc_dev,
754 "unable to detach instance\n");
755 goto done;
756 }
757 }
758
759 done:
760 cgd_unlock();
761
762 return error;
763 }
764
765 static void
766 cgdstrategy(struct buf *bp)
767 {
768 struct cgd_softc *sc = getcgd_softc(bp->b_dev);
769
770 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
771 (long)bp->b_bcount));
772
773 /*
774 * Reject unaligned writes.
775 */
776 if (((uintptr_t)bp->b_data & 3) != 0) {
777 bp->b_error = EINVAL;
778 goto bail;
779 }
780
781 dk_strategy(&sc->sc_dksc, bp);
782 return;
783
784 bail:
785 bp->b_resid = bp->b_bcount;
786 biodone(bp);
787 return;
788 }
789
790 static int
791 cgdsize(dev_t dev)
792 {
793 struct cgd_softc *sc = getcgd_softc(dev);
794
795 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
796 if (!sc)
797 return -1;
798 return dk_size(&sc->sc_dksc, dev);
799 }
800
801 /*
802 * cgd_{get,put}data are functions that deal with getting a buffer
803 * for the new encrypted data.
804 * We can no longer have a buffer per device, we need a buffer per
805 * work queue...
806 */
807
808 static void *
809 cgd_getdata(struct cgd_softc *sc, unsigned long size)
810 {
811 void *data = NULL;
812
813 mutex_enter(&sc->sc_lock);
814 if (!sc->sc_data_used) {
815 sc->sc_data_used = true;
816 data = sc->sc_data;
817 }
818 mutex_exit(&sc->sc_lock);
819
820 if (data)
821 return data;
822
823 return kmem_intr_alloc(size, KM_NOSLEEP);
824 }
825
826 static void
827 cgd_putdata(struct cgd_softc *sc, void *data, unsigned long size)
828 {
829
830 if (data == sc->sc_data) {
831 mutex_enter(&sc->sc_lock);
832 sc->sc_data_used = false;
833 mutex_exit(&sc->sc_lock);
834 } else
835 kmem_intr_free(data, size);
836 }
837
838 static int
839 cgd_diskstart(device_t dev, struct buf *bp)
840 {
841 struct cgd_softc *sc = device_private(dev);
842 struct cgd_worker *cw = sc->sc_worker;
843 struct dk_softc *dksc = &sc->sc_dksc;
844 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
845 struct cgd_xfer *cx;
846 struct buf *nbp;
847 void * newaddr;
848 daddr_t bn;
849
850 DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
851
852 bn = bp->b_rawblkno;
853
854 /*
855 * We attempt to allocate all of our resources up front, so that
856 * we can fail quickly if they are unavailable.
857 */
858 nbp = getiobuf(sc->sc_tvn, false);
859 if (nbp == NULL)
860 return EAGAIN;
861
862 cx = pool_get(cw->cw_cpool, PR_NOWAIT);
863 if (cx == NULL) {
864 putiobuf(nbp);
865 return EAGAIN;
866 }
867
868 cx->cx_sc = sc;
869 cx->cx_obp = bp;
870 cx->cx_nbp = nbp;
871 cx->cx_srcv = cx->cx_dstv = bp->b_data;
872 cx->cx_blkno = bn;
873 cx->cx_secsize = dg->dg_secsize;
874
875 /*
876 * If we are writing, then we need to encrypt the outgoing
877 * block into a new block of memory.
878 */
879 if ((bp->b_flags & B_READ) == 0) {
880 newaddr = cgd_getdata(sc, bp->b_bcount);
881 if (!newaddr) {
882 pool_put(cw->cw_cpool, cx);
883 putiobuf(nbp);
884 return EAGAIN;
885 }
886
887 cx->cx_dstv = newaddr;
888 cx->cx_len = bp->b_bcount;
889 cx->cx_dir = CGD_CIPHER_ENCRYPT;
890
891 cgd_enqueue(sc, cx);
892 return 0;
893 }
894
895 cgd_diskstart2(sc, cx);
896 return 0;
897 }
898
899 static void
900 cgd_diskstart2(struct cgd_softc *sc, struct cgd_xfer *cx)
901 {
902 struct vnode *vp;
903 struct buf *bp;
904 struct buf *nbp;
905
906 bp = cx->cx_obp;
907 nbp = cx->cx_nbp;
908
909 nbp->b_data = cx->cx_dstv;
910 nbp->b_flags = bp->b_flags;
911 nbp->b_oflags = bp->b_oflags;
912 nbp->b_cflags = bp->b_cflags;
913 nbp->b_iodone = cgdiodone;
914 nbp->b_proc = bp->b_proc;
915 nbp->b_blkno = btodb(cx->cx_blkno * cx->cx_secsize);
916 nbp->b_bcount = bp->b_bcount;
917 nbp->b_private = cx;
918
919 BIO_COPYPRIO(nbp, bp);
920
921 if ((nbp->b_flags & B_READ) == 0) {
922 vp = nbp->b_vp;
923 mutex_enter(vp->v_interlock);
924 vp->v_numoutput++;
925 mutex_exit(vp->v_interlock);
926 }
927 VOP_STRATEGY(sc->sc_tvn, nbp);
928 }
929
930 static void
931 cgdiodone(struct buf *nbp)
932 {
933 struct cgd_xfer *cx = nbp->b_private;
934 struct buf *obp = cx->cx_obp;
935 struct cgd_softc *sc = getcgd_softc(obp->b_dev);
936 struct dk_softc *dksc = &sc->sc_dksc;
937 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
938 daddr_t bn;
939
940 KDASSERT(sc);
941
942 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
943 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
944 obp, obp->b_bcount, obp->b_resid));
945 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
946 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
947 nbp->b_bcount));
948 if (nbp->b_error != 0) {
949 obp->b_error = nbp->b_error;
950 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
951 obp->b_error));
952 }
953
954 /* Perform the decryption if we are reading.
955 *
956 * Note: use the blocknumber from nbp, since it is what
957 * we used to encrypt the blocks.
958 */
959
960 if (nbp->b_flags & B_READ) {
961 bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
962
963 cx->cx_obp = obp;
964 cx->cx_nbp = nbp;
965 cx->cx_dstv = obp->b_data;
966 cx->cx_srcv = obp->b_data;
967 cx->cx_len = obp->b_bcount;
968 cx->cx_blkno = bn;
969 cx->cx_secsize = dg->dg_secsize;
970 cx->cx_dir = CGD_CIPHER_DECRYPT;
971
972 cgd_enqueue(sc, cx);
973 return;
974 }
975
976 cgd_iodone2(sc, cx);
977 }
978
979 static void
980 cgd_iodone2(struct cgd_softc *sc, struct cgd_xfer *cx)
981 {
982 struct cgd_worker *cw = sc->sc_worker;
983 struct buf *obp = cx->cx_obp;
984 struct buf *nbp = cx->cx_nbp;
985 struct dk_softc *dksc = &sc->sc_dksc;
986
987 pool_put(cw->cw_cpool, cx);
988
989 /* If we allocated memory, free it now... */
990 if (nbp->b_data != obp->b_data)
991 cgd_putdata(sc, nbp->b_data, nbp->b_bcount);
992
993 putiobuf(nbp);
994
995 /* Request is complete for whatever reason */
996 obp->b_resid = 0;
997 if (obp->b_error != 0)
998 obp->b_resid = obp->b_bcount;
999
1000 dk_done(dksc, obp);
1001 dk_start(dksc, NULL);
1002 }
1003
1004 static int
1005 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
1006 {
1007 struct cgd_softc *sc = device_private(dev);
1008 struct dk_softc *dksc = &sc->sc_dksc;
1009 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
1010 size_t nbytes, blksize;
1011 void *buf;
1012 int error;
1013
1014 /*
1015 * dk_dump gives us units of disklabel sectors. Everything
1016 * else in cgd uses units of diskgeom sectors. These had
1017 * better agree; otherwise we need to figure out how to convert
1018 * between them.
1019 */
1020 KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
1021 "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
1022 dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
1023 blksize = dg->dg_secsize;
1024
1025 /*
1026 * Compute the number of bytes in this request, which dk_dump
1027 * has `helpfully' converted to a number of blocks for us.
1028 */
1029 nbytes = nblk*blksize;
1030
1031 /* Try to acquire a buffer to store the ciphertext. */
1032 buf = cgd_getdata(sc, nbytes);
1033 if (buf == NULL)
1034 /* Out of memory: give up. */
1035 return ENOMEM;
1036
1037 /* Encrypt the caller's data into the temporary buffer. */
1038 cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
1039
1040 /* Pass it on to the underlying disk device. */
1041 error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
1042
1043 /* Release the buffer. */
1044 cgd_putdata(sc, buf, nbytes);
1045
1046 /* Return any error from the underlying disk device. */
1047 return error;
1048 }
1049
1050 /* XXX: we should probably put these into dksubr.c, mostly */
1051 static int
1052 cgdread(dev_t dev, struct uio *uio, int flags)
1053 {
1054 struct cgd_softc *sc;
1055 struct dk_softc *dksc;
1056
1057 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
1058 (unsigned long long)dev, uio, flags));
1059 sc = getcgd_softc(dev);
1060 if (sc == NULL)
1061 return ENXIO;
1062 dksc = &sc->sc_dksc;
1063 if (!DK_ATTACHED(dksc))
1064 return ENXIO;
1065 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
1066 }
1067
1068 /* XXX: we should probably put these into dksubr.c, mostly */
1069 static int
1070 cgdwrite(dev_t dev, struct uio *uio, int flags)
1071 {
1072 struct cgd_softc *sc;
1073 struct dk_softc *dksc;
1074
1075 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
1076 sc = getcgd_softc(dev);
1077 if (sc == NULL)
1078 return ENXIO;
1079 dksc = &sc->sc_dksc;
1080 if (!DK_ATTACHED(dksc))
1081 return ENXIO;
1082 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
1083 }
1084
1085 static int
1086 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1087 {
1088 struct cgd_softc *sc;
1089 struct dk_softc *dksc;
1090 int part = DISKPART(dev);
1091 int pmask = 1 << part;
1092 int error;
1093
1094 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
1095 dev, cmd, data, flag, l));
1096
1097 switch (cmd) {
1098 case CGDIOCGET:
1099 return cgd_ioctl_get(dev, data, l);
1100 case CGDIOCSET:
1101 case CGDIOCCLR:
1102 if ((flag & FWRITE) == 0)
1103 return EBADF;
1104 /* FALLTHROUGH */
1105 default:
1106 sc = getcgd_softc(dev);
1107 if (sc == NULL)
1108 return ENXIO;
1109 dksc = &sc->sc_dksc;
1110 break;
1111 }
1112
1113 switch (cmd) {
1114 case CGDIOCSET:
1115 cgd_busy(sc);
1116 if (DK_ATTACHED(dksc))
1117 error = EBUSY;
1118 else
1119 error = cgd_ioctl_set(sc, data, l);
1120 cgd_unbusy(sc);
1121 break;
1122 case CGDIOCCLR:
1123 cgd_busy(sc);
1124 if (DK_BUSY(&sc->sc_dksc, pmask))
1125 error = EBUSY;
1126 else
1127 error = cgd_ioctl_clr(sc, l);
1128 cgd_unbusy(sc);
1129 break;
1130 case DIOCGCACHE:
1131 case DIOCCACHESYNC:
1132 cgd_busy(sc);
1133 if (!DK_ATTACHED(dksc)) {
1134 cgd_unbusy(sc);
1135 error = ENOENT;
1136 break;
1137 }
1138 /*
1139 * We pass this call down to the underlying disk.
1140 */
1141 error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
1142 cgd_unbusy(sc);
1143 break;
1144 case DIOCGSECTORALIGN: {
1145 struct disk_sectoralign *dsa = data;
1146
1147 cgd_busy(sc);
1148 if (!DK_ATTACHED(dksc)) {
1149 cgd_unbusy(sc);
1150 error = ENOENT;
1151 break;
1152 }
1153
1154 /* Get the underlying disk's sector alignment. */
1155 error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
1156 if (error) {
1157 cgd_unbusy(sc);
1158 break;
1159 }
1160
1161 /* Adjust for the disklabel partition if necessary. */
1162 if (part != RAW_PART) {
1163 struct disklabel *lp = dksc->sc_dkdev.dk_label;
1164 daddr_t offset = lp->d_partitions[part].p_offset;
1165 uint32_t r = offset % dsa->dsa_alignment;
1166
1167 if (r < dsa->dsa_firstaligned)
1168 dsa->dsa_firstaligned = dsa->dsa_firstaligned
1169 - r;
1170 else
1171 dsa->dsa_firstaligned = (dsa->dsa_firstaligned
1172 + dsa->dsa_alignment) - r;
1173 }
1174 cgd_unbusy(sc);
1175 break;
1176 }
1177 case DIOCGSTRATEGY:
1178 case DIOCSSTRATEGY:
1179 if (!DK_ATTACHED(dksc)) {
1180 error = ENOENT;
1181 break;
1182 }
1183 /*FALLTHROUGH*/
1184 default:
1185 error = dk_ioctl(dksc, dev, cmd, data, flag, l);
1186 break;
1187 case CGDIOCGET:
1188 KASSERT(0);
1189 error = EINVAL;
1190 }
1191
1192 return error;
1193 }
1194
1195 static int
1196 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1197 {
1198 struct cgd_softc *sc;
1199
1200 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
1201 dev, blkno, va, (unsigned long)size));
1202 sc = getcgd_softc(dev);
1203 if (sc == NULL)
1204 return ENXIO;
1205 return dk_dump(&sc->sc_dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
1206 }
1207
1208 /*
1209 * XXXrcd:
1210 * for now we hardcode the maximum key length.
1211 */
1212 #define MAX_KEYSIZE 1024
1213
1214 static const struct {
1215 const char *n;
1216 int v;
1217 int d;
1218 } encblkno[] = {
1219 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1220 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1221 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
1222 };
1223
1224 /* ARGSUSED */
1225 static int
1226 cgd_ioctl_set(struct cgd_softc *sc, void *data, struct lwp *l)
1227 {
1228 struct cgd_ioctl *ci = data;
1229 struct vnode *vp;
1230 int ret;
1231 size_t i;
1232 size_t keybytes; /* key length in bytes */
1233 const char *cp;
1234 struct pathbuf *pb;
1235 char *inbuf;
1236 struct dk_softc *dksc = &sc->sc_dksc;
1237
1238 cp = ci->ci_disk;
1239
1240 ret = pathbuf_copyin(ci->ci_disk, &pb);
1241 if (ret != 0) {
1242 return ret;
1243 }
1244 ret = vn_bdev_openpath(pb, &vp, l);
1245 pathbuf_destroy(pb);
1246 if (ret != 0) {
1247 return ret;
1248 }
1249
1250 inbuf = kmem_alloc(MAX_KEYSIZE, KM_SLEEP);
1251
1252 if ((ret = cgdinit(sc, cp, vp, l)) != 0)
1253 goto bail;
1254
1255 (void)memset(inbuf, 0, MAX_KEYSIZE);
1256 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
1257 if (ret)
1258 goto bail;
1259 sc->sc_cfuncs = cryptfuncs_find(inbuf);
1260 if (!sc->sc_cfuncs) {
1261 ret = EINVAL;
1262 goto bail;
1263 }
1264
1265 (void)memset(inbuf, 0, MAX_KEYSIZE);
1266 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
1267 if (ret)
1268 goto bail;
1269
1270 for (i = 0; i < __arraycount(encblkno); i++)
1271 if (strcmp(encblkno[i].n, inbuf) == 0)
1272 break;
1273
1274 if (i == __arraycount(encblkno)) {
1275 ret = EINVAL;
1276 goto bail;
1277 }
1278
1279 keybytes = ci->ci_keylen / 8 + 1;
1280 if (keybytes > MAX_KEYSIZE) {
1281 ret = EINVAL;
1282 goto bail;
1283 }
1284
1285 (void)memset(inbuf, 0, MAX_KEYSIZE);
1286 ret = copyin(ci->ci_key, inbuf, keybytes);
1287 if (ret)
1288 goto bail;
1289
1290 sc->sc_cdata.cf_blocksize = ci->ci_blocksize;
1291 sc->sc_cdata.cf_mode = encblkno[i].v;
1292
1293 /*
1294 * Print a warning if the user selected the legacy encblkno8
1295 * mistake, and reject it altogether for ciphers that it
1296 * doesn't apply to.
1297 */
1298 if (encblkno[i].v != CGD_CIPHER_CBC_ENCBLKNO1) {
1299 if (strcmp(sc->sc_cfuncs->cf_name, "aes-cbc") &&
1300 strcmp(sc->sc_cfuncs->cf_name, "3des-cbc") &&
1301 strcmp(sc->sc_cfuncs->cf_name, "blowfish-cbc")) {
1302 log(LOG_WARNING, "cgd: %s only makes sense for cbc,"
1303 " not for %s; ignoring\n",
1304 encblkno[i].n, sc->sc_cfuncs->cf_name);
1305 sc->sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1306 } else {
1307 log(LOG_WARNING, "cgd: enabling legacy encblkno8\n");
1308 }
1309 }
1310
1311 sc->sc_cdata.cf_keylen = ci->ci_keylen;
1312 sc->sc_cdata.cf_priv = sc->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
1313 &sc->sc_cdata.cf_blocksize);
1314 if (sc->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
1315 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
1316 sc->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
1317 sc->sc_cdata.cf_priv = NULL;
1318 }
1319
1320 /*
1321 * The blocksize is supposed to be in bytes. Unfortunately originally
1322 * it was expressed in bits. For compatibility we maintain encblkno
1323 * and encblkno8.
1324 */
1325 sc->sc_cdata.cf_blocksize /= encblkno[i].d;
1326 (void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
1327 if (!sc->sc_cdata.cf_priv) {
1328 ret = EINVAL; /* XXX is this the right error? */
1329 goto bail;
1330 }
1331 kmem_free(inbuf, MAX_KEYSIZE);
1332
1333 bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
1334
1335 sc->sc_data = kmem_alloc(MAXPHYS, KM_SLEEP);
1336 sc->sc_data_used = false;
1337
1338 /* Attach the disk. */
1339 dk_attach(dksc);
1340 disk_attach(&dksc->sc_dkdev);
1341
1342 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
1343
1344 /* Discover wedges on this disk. */
1345 dkwedge_discover(&dksc->sc_dkdev);
1346
1347 return 0;
1348
1349 bail:
1350 kmem_free(inbuf, MAX_KEYSIZE);
1351 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
1352 return ret;
1353 }
1354
1355 /* ARGSUSED */
1356 static int
1357 cgd_ioctl_clr(struct cgd_softc *sc, struct lwp *l)
1358 {
1359 struct dk_softc *dksc = &sc->sc_dksc;
1360
1361 if (!DK_ATTACHED(dksc))
1362 return ENXIO;
1363
1364 /* Delete all of our wedges. */
1365 dkwedge_delall(&dksc->sc_dkdev);
1366
1367 /* Kill off any queued buffers. */
1368 dk_drain(dksc);
1369 bufq_free(dksc->sc_bufq);
1370
1371 (void)vn_close(sc->sc_tvn, FREAD|FWRITE, l->l_cred);
1372 sc->sc_cfuncs->cf_destroy(sc->sc_cdata.cf_priv);
1373 kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1374 kmem_free(sc->sc_data, MAXPHYS);
1375 sc->sc_data_used = false;
1376 dk_detach(dksc);
1377 disk_detach(&dksc->sc_dkdev);
1378
1379 return 0;
1380 }
1381
1382 static int
1383 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
1384 {
1385 struct cgd_softc *sc;
1386 struct cgd_user *cgu;
1387 int unit, error;
1388
1389 unit = CGDUNIT(dev);
1390 cgu = (struct cgd_user *)data;
1391
1392 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1393 dev, unit, data, l));
1394
1395 /* XXX, we always return this units data, so if cgu_unit is
1396 * not -1, that field doesn't match the rest
1397 */
1398 if (cgu->cgu_unit == -1)
1399 cgu->cgu_unit = unit;
1400
1401 if (cgu->cgu_unit < 0)
1402 return EINVAL; /* XXX: should this be ENXIO? */
1403
1404 error = cgd_lock(false);
1405 if (error)
1406 return error;
1407
1408 sc = device_lookup_private(&cgd_cd, unit);
1409 if (sc == NULL || !DK_ATTACHED(&sc->sc_dksc)) {
1410 cgu->cgu_dev = 0;
1411 cgu->cgu_alg[0] = '\0';
1412 cgu->cgu_blocksize = 0;
1413 cgu->cgu_mode = 0;
1414 cgu->cgu_keylen = 0;
1415 }
1416 else {
1417 mutex_enter(&sc->sc_lock);
1418 cgu->cgu_dev = sc->sc_tdev;
1419 strncpy(cgu->cgu_alg, sc->sc_cfuncs->cf_name,
1420 sizeof(cgu->cgu_alg));
1421 cgu->cgu_blocksize = sc->sc_cdata.cf_blocksize;
1422 cgu->cgu_mode = sc->sc_cdata.cf_mode;
1423 cgu->cgu_keylen = sc->sc_cdata.cf_keylen;
1424 mutex_exit(&sc->sc_lock);
1425 }
1426
1427 cgd_unlock();
1428 return 0;
1429 }
1430
1431 static int
1432 cgdinit(struct cgd_softc *sc, const char *cpath, struct vnode *vp,
1433 struct lwp *l)
1434 {
1435 struct disk_geom *dg;
1436 int ret;
1437 char *tmppath;
1438 uint64_t psize;
1439 unsigned secsize;
1440 struct dk_softc *dksc = &sc->sc_dksc;
1441
1442 sc->sc_tvn = vp;
1443 sc->sc_tpath = NULL;
1444
1445 tmppath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1446 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &sc->sc_tpathlen);
1447 if (ret)
1448 goto bail;
1449 sc->sc_tpath = kmem_alloc(sc->sc_tpathlen, KM_SLEEP);
1450 memcpy(sc->sc_tpath, tmppath, sc->sc_tpathlen);
1451
1452 sc->sc_tdev = vp->v_rdev;
1453
1454 if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1455 goto bail;
1456
1457 if (psize == 0) {
1458 ret = ENODEV;
1459 goto bail;
1460 }
1461
1462 /*
1463 * XXX here we should probe the underlying device. If we
1464 * are accessing a partition of type RAW_PART, then
1465 * we should populate our initial geometry with the
1466 * geometry that we discover from the device.
1467 */
1468 dg = &dksc->sc_dkdev.dk_geom;
1469 memset(dg, 0, sizeof(*dg));
1470 dg->dg_secperunit = psize;
1471 dg->dg_secsize = secsize;
1472 dg->dg_ntracks = 1;
1473 dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1474 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1475
1476 bail:
1477 kmem_free(tmppath, MAXPATHLEN);
1478 if (ret && sc->sc_tpath)
1479 kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1480 return ret;
1481 }
1482
1483 /*
1484 * Our generic cipher entry point. This takes care of the
1485 * IV mode and passes off the work to the specific cipher.
1486 * We implement here the IV method ``encrypted block
1487 * number''.
1488 *
1489 * XXXrcd: for now we rely on our own crypto framework defined
1490 * in dev/cgd_crypto.c. This will change when we
1491 * get a generic kernel crypto framework.
1492 */
1493
1494 static void
1495 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1496 {
1497 int i;
1498
1499 /* Set up the blkno in blkno_buf, here we do not care much
1500 * about the final layout of the information as long as we
1501 * can guarantee that each sector will have a different IV
1502 * and that the endianness of the machine will not affect
1503 * the representation that we have chosen.
1504 *
1505 * We choose this representation, because it does not rely
1506 * on the size of buf (which is the blocksize of the cipher),
1507 * but allows daddr_t to grow without breaking existing
1508 * disks.
1509 *
1510 * Note that blkno2blkno_buf does not take a size as input,
1511 * and hence must be called on a pre-zeroed buffer of length
1512 * greater than or equal to sizeof(daddr_t).
1513 */
1514 for (i=0; i < sizeof(daddr_t); i++) {
1515 *sbuf++ = blkno & 0xff;
1516 blkno >>= 8;
1517 }
1518 }
1519
1520 static struct cpu_info *
1521 cgd_cpu(struct cgd_softc *sc)
1522 {
1523 struct cgd_worker *cw = sc->sc_worker;
1524 struct cpu_info *ci = NULL;
1525 u_int cidx, i;
1526
1527 if (cw->cw_busy == 0) {
1528 cw->cw_last = cpu_index(curcpu());
1529 return NULL;
1530 }
1531
1532 for (i=0, cidx = cw->cw_last+1; i<maxcpus; ++i, ++cidx) {
1533 if (cidx >= maxcpus)
1534 cidx = 0;
1535 ci = cpu_lookup(cidx);
1536 if (ci) {
1537 cw->cw_last = cidx;
1538 break;
1539 }
1540 }
1541
1542 return ci;
1543 }
1544
1545 static void
1546 cgd_enqueue(struct cgd_softc *sc, struct cgd_xfer *cx)
1547 {
1548 struct cgd_worker *cw = sc->sc_worker;
1549 struct cpu_info *ci;
1550
1551 mutex_enter(&cw->cw_lock);
1552 ci = cgd_cpu(sc);
1553 cw->cw_busy++;
1554 mutex_exit(&cw->cw_lock);
1555
1556 workqueue_enqueue(cw->cw_wq, &cx->cx_work, ci);
1557 }
1558
1559 static void
1560 cgd_process(struct work *wk, void *arg)
1561 {
1562 struct cgd_xfer *cx = (struct cgd_xfer *)wk;
1563 struct cgd_softc *sc = cx->cx_sc;
1564 struct cgd_worker *cw = sc->sc_worker;
1565
1566 cgd_cipher(sc, cx->cx_dstv, cx->cx_srcv, cx->cx_len,
1567 cx->cx_blkno, cx->cx_secsize, cx->cx_dir);
1568
1569 if (cx->cx_dir == CGD_CIPHER_ENCRYPT) {
1570 cgd_diskstart2(sc, cx);
1571 } else {
1572 cgd_iodone2(sc, cx);
1573 }
1574
1575 mutex_enter(&cw->cw_lock);
1576 if (cw->cw_busy > 0)
1577 cw->cw_busy--;
1578 mutex_exit(&cw->cw_lock);
1579 }
1580
1581 static void
1582 cgd_cipher(struct cgd_softc *sc, void *dstv, const void *srcv,
1583 size_t len, daddr_t blkno, size_t secsize, int dir)
1584 {
1585 char *dst = dstv;
1586 const char *src = srcv;
1587 cfunc_cipher *cipher = sc->sc_cfuncs->cf_cipher;
1588 size_t blocksize = sc->sc_cdata.cf_blocksize;
1589 size_t todo;
1590 char blkno_buf[CGD_MAXBLOCKSIZE];
1591
1592 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1593
1594 if (sc->sc_cdata.cf_mode == CGD_CIPHER_CBC_ENCBLKNO8)
1595 blocksize /= 8;
1596
1597 KASSERT(len % blocksize == 0);
1598 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1599 KASSERT(sizeof(daddr_t) <= blocksize);
1600 KASSERT(blocksize <= CGD_MAXBLOCKSIZE);
1601
1602 for (; len > 0; len -= todo) {
1603 todo = MIN(len, secsize);
1604
1605 memset(blkno_buf, 0x0, blocksize);
1606 blkno2blkno_buf(blkno_buf, blkno);
1607 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1608 blkno_buf, blocksize));
1609
1610 /*
1611 * Handle bollocksed up encblkno8 mistake. We used to
1612 * compute the encryption of a zero block with blkno as
1613 * the CBC IV -- except in an early mistake arising
1614 * from bit/byte confusion, we actually computed the
1615 * encryption of the last of _eight_ zero blocks under
1616 * CBC as the CBC IV.
1617 *
1618 * Encrypting the block number is handled inside the
1619 * cipher dispatch now (even though in practice, both
1620 * CBC and XTS will do the same thing), so we have to
1621 * simulate the block number that would yield the same
1622 * result. So we encrypt _six_ zero blocks -- the
1623 * first one and the last one are handled inside the
1624 * cipher dispatch.
1625 */
1626 if (sc->sc_cdata.cf_mode == CGD_CIPHER_CBC_ENCBLKNO8) {
1627 static const uint8_t zero[CGD_MAXBLOCKSIZE];
1628 uint8_t iv[CGD_MAXBLOCKSIZE];
1629
1630 memcpy(iv, blkno_buf, blocksize);
1631 cipher(sc->sc_cdata.cf_priv, blkno_buf, zero,
1632 6*blocksize, iv, CGD_CIPHER_ENCRYPT);
1633 memmove(blkno_buf, blkno_buf + 5*blocksize, blocksize);
1634 }
1635
1636 cipher(sc->sc_cdata.cf_priv, dst, src, todo, blkno_buf, dir);
1637
1638 dst += todo;
1639 src += todo;
1640 blkno++;
1641 }
1642 }
1643
1644 #ifdef DEBUG
1645 static void
1646 hexprint(const char *start, void *buf, int len)
1647 {
1648 char *c = buf;
1649
1650 KASSERTMSG(len >= 0, "hexprint: called with len < 0");
1651 printf("%s: len=%06d 0x", start, len);
1652 while (len--)
1653 printf("%02x", (unsigned char) *c++);
1654 }
1655 #endif
1656
1657 static void
1658 cgd_selftest(void)
1659 {
1660 struct cgd_softc sc;
1661 void *buf;
1662
1663 for (size_t i = 0; i < __arraycount(selftests); i++) {
1664 const char *alg = selftests[i].alg;
1665 int encblkno8 = selftests[i].encblkno8;
1666 const uint8_t *key = selftests[i].key;
1667 int keylen = selftests[i].keylen;
1668 int txtlen = selftests[i].txtlen;
1669
1670 aprint_verbose("cgd: self-test %s-%d%s\n", alg, keylen,
1671 encblkno8 ? " (encblkno8)" : "");
1672
1673 memset(&sc, 0, sizeof(sc));
1674
1675 sc.sc_cfuncs = cryptfuncs_find(alg);
1676 if (sc.sc_cfuncs == NULL)
1677 panic("%s not implemented", alg);
1678
1679 sc.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1680 sc.sc_cdata.cf_mode = encblkno8 ? CGD_CIPHER_CBC_ENCBLKNO8 :
1681 CGD_CIPHER_CBC_ENCBLKNO1;
1682 sc.sc_cdata.cf_keylen = keylen;
1683
1684 sc.sc_cdata.cf_priv = sc.sc_cfuncs->cf_init(keylen,
1685 key, &sc.sc_cdata.cf_blocksize);
1686 if (sc.sc_cdata.cf_priv == NULL)
1687 panic("cf_priv is NULL");
1688 if (sc.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1689 panic("bad block size %zu", sc.sc_cdata.cf_blocksize);
1690
1691 if (!encblkno8)
1692 sc.sc_cdata.cf_blocksize /= 8;
1693
1694 buf = kmem_alloc(txtlen, KM_SLEEP);
1695 memcpy(buf, selftests[i].ptxt, txtlen);
1696
1697 cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1698 selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1699 if (memcmp(buf, selftests[i].ctxt, txtlen) != 0) {
1700 hexdump(printf, "was", buf, txtlen);
1701 hexdump(printf, "exp", selftests[i].ctxt, txtlen);
1702 panic("cgd %s encryption is broken [%zu]",
1703 selftests[i].alg, i);
1704 }
1705
1706 cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1707 selftests[i].secsize, CGD_CIPHER_DECRYPT);
1708 if (memcmp(buf, selftests[i].ptxt, txtlen) != 0) {
1709 hexdump(printf, "was", buf, txtlen);
1710 hexdump(printf, "exp", selftests[i].ptxt, txtlen);
1711 panic("cgd %s decryption is broken [%zu]",
1712 selftests[i].alg, i);
1713 }
1714
1715 kmem_free(buf, txtlen);
1716 sc.sc_cfuncs->cf_destroy(sc.sc_cdata.cf_priv);
1717 }
1718
1719 aprint_verbose("cgd: self-tests passed\n");
1720 }
1721
1722 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
1723
1724 #ifdef _MODULE
1725 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1726
1727 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1728 #endif
1729
1730 static int
1731 cgd_modcmd(modcmd_t cmd, void *arg)
1732 {
1733 int error = 0;
1734
1735 switch (cmd) {
1736 case MODULE_CMD_INIT:
1737 #ifdef _MODULE
1738 mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
1739 cv_init(&cgd_spawning_cv, "cgspwn");
1740
1741 error = config_cfdriver_attach(&cgd_cd);
1742 if (error)
1743 break;
1744
1745 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1746 if (error) {
1747 config_cfdriver_detach(&cgd_cd);
1748 aprint_error("%s: unable to register cfattach for"
1749 "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1750 break;
1751 }
1752 /*
1753 * Attach the {b,c}devsw's
1754 */
1755 error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1756 &cgd_cdevsw, &cgd_cmajor);
1757
1758 /*
1759 * If devsw_attach fails, remove from autoconf database
1760 */
1761 if (error) {
1762 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1763 config_cfdriver_detach(&cgd_cd);
1764 aprint_error("%s: unable to attach %s devsw, "
1765 "error %d", __func__, cgd_cd.cd_name, error);
1766 break;
1767 }
1768 #endif
1769 break;
1770
1771 case MODULE_CMD_FINI:
1772 #ifdef _MODULE
1773 /*
1774 * Remove {b,c}devsw's
1775 */
1776 devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1777
1778 /*
1779 * Now remove device from autoconf database
1780 */
1781 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1782 if (error) {
1783 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1784 &cgd_cdevsw, &cgd_cmajor);
1785 aprint_error("%s: failed to detach %s cfattach, "
1786 "error %d\n", __func__, cgd_cd.cd_name, error);
1787 break;
1788 }
1789 error = config_cfdriver_detach(&cgd_cd);
1790 if (error) {
1791 (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1792 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1793 &cgd_cdevsw, &cgd_cmajor);
1794 aprint_error("%s: failed to detach %s cfdriver, "
1795 "error %d\n", __func__, cgd_cd.cd_name, error);
1796 break;
1797 }
1798
1799 cv_destroy(&cgd_spawning_cv);
1800 mutex_destroy(&cgd_spawning_mtx);
1801 #endif
1802 break;
1803
1804 case MODULE_CMD_STAT:
1805 error = ENOTTY;
1806 break;
1807 default:
1808 error = ENOTTY;
1809 break;
1810 }
1811
1812 return error;
1813 }
1814