cgd.c revision 1.132 1 /* $NetBSD: cgd.c,v 1.132 2020/06/13 22:15:06 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.132 2020/06/13 22:15:06 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/buf.h>
38 #include <sys/bufq.h>
39 #include <sys/conf.h>
40 #include <sys/cpu.h>
41 #include <sys/device.h>
42 #include <sys/disk.h>
43 #include <sys/disklabel.h>
44 #include <sys/errno.h>
45 #include <sys/fcntl.h>
46 #include <sys/ioctl.h>
47 #include <sys/kmem.h>
48 #include <sys/module.h>
49 #include <sys/namei.h> /* for pathbuf */
50 #include <sys/pool.h>
51 #include <sys/proc.h>
52 #include <sys/syslog.h>
53 #include <sys/systm.h>
54 #include <sys/vnode.h>
55 #include <sys/workqueue.h>
56
57 #include <dev/cgd_crypto.h>
58 #include <dev/cgdvar.h>
59 #include <dev/dkvar.h>
60
61 #include <miscfs/specfs/specdev.h> /* for v_rdev */
62
63 #include "ioconf.h"
64
65 struct selftest_params {
66 const char *alg;
67 int blocksize; /* number of bytes */
68 int secsize;
69 daddr_t blkno;
70 int keylen; /* number of bits */
71 int txtlen; /* number of bytes */
72 const uint8_t *key;
73 const uint8_t *ptxt;
74 const uint8_t *ctxt;
75 };
76
77 /* Entry Point Functions */
78
79 static dev_type_open(cgdopen);
80 static dev_type_close(cgdclose);
81 static dev_type_read(cgdread);
82 static dev_type_write(cgdwrite);
83 static dev_type_ioctl(cgdioctl);
84 static dev_type_strategy(cgdstrategy);
85 static dev_type_dump(cgddump);
86 static dev_type_size(cgdsize);
87
88 const struct bdevsw cgd_bdevsw = {
89 .d_open = cgdopen,
90 .d_close = cgdclose,
91 .d_strategy = cgdstrategy,
92 .d_ioctl = cgdioctl,
93 .d_dump = cgddump,
94 .d_psize = cgdsize,
95 .d_discard = nodiscard,
96 .d_flag = D_DISK | D_MPSAFE
97 };
98
99 const struct cdevsw cgd_cdevsw = {
100 .d_open = cgdopen,
101 .d_close = cgdclose,
102 .d_read = cgdread,
103 .d_write = cgdwrite,
104 .d_ioctl = cgdioctl,
105 .d_stop = nostop,
106 .d_tty = notty,
107 .d_poll = nopoll,
108 .d_mmap = nommap,
109 .d_kqfilter = nokqfilter,
110 .d_discard = nodiscard,
111 .d_flag = D_DISK | D_MPSAFE
112 };
113
114 /*
115 * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
116 */
117 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
118 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
119 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
120 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
121 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
122 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
123 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
124 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
125 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
126 };
127
128 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
129 0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
130 0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
131 0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
132 0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
133 0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
134 0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
135 0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
136 0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
137 };
138
139 static const uint8_t selftest_aes_xts_256_key[33] = {
140 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
141 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
142 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
143 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
144 0
145 };
146
147 /*
148 * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
149 */
150 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
151 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
152 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
153 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
154 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
155 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
156 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
157 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
158 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
159 };
160
161 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
162 0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
163 0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
164 0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
165 0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
166 0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
167 0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
168 0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
169 0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
170 };
171
172 static const uint8_t selftest_aes_xts_512_key[65] = {
173 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
174 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
175 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
176 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
177 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
178 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
179 0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
180 0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
181 0
182 };
183
184 static const uint8_t selftest_aes_cbc_key[32] = {
185 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
186 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
187 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
188 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
189 };
190
191 static const uint8_t selftest_aes_cbc_128_ptxt[64] = {
192 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
193 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
194 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
195 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
196 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
197 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
198 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
199 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
200 };
201
202 static const uint8_t selftest_aes_cbc_128_ctxt[64] = { /* blkno=1 */
203 0x93, 0x94, 0x56, 0x36, 0x83, 0xbc, 0xff, 0xa4,
204 0xe0, 0x24, 0x34, 0x12, 0xbe, 0xfa, 0xb0, 0x7d,
205 0x88, 0x1e, 0xc5, 0x57, 0x55, 0x23, 0x05, 0x0c,
206 0x69, 0xa5, 0xc1, 0xda, 0x64, 0xee, 0x74, 0x10,
207 0xc2, 0xc5, 0xe6, 0x66, 0xd6, 0xa7, 0x49, 0x1c,
208 0x9d, 0x40, 0xb5, 0x0c, 0x9b, 0x6e, 0x1c, 0xe6,
209 0xb1, 0x7a, 0x1c, 0xe7, 0x5a, 0xfe, 0xf9, 0x2a,
210 0x78, 0xfa, 0xb7, 0x7b, 0x08, 0xdf, 0x8e, 0x51,
211 };
212
213 static const uint8_t selftest_aes_cbc_256_ptxt[64] = {
214 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
215 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
216 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
217 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
218 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
219 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
220 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
221 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
222 };
223
224 static const uint8_t selftest_aes_cbc_256_ctxt[64] = { /* blkno=0xffff */
225 0x6c, 0xa3, 0x15, 0x17, 0x51, 0x90, 0xe9, 0x69,
226 0x08, 0x36, 0x7b, 0xa6, 0xbb, 0xd1, 0x0b, 0x9e,
227 0xcd, 0x6b, 0x1e, 0xaf, 0xb6, 0x2e, 0x62, 0x7d,
228 0x8e, 0xde, 0xf0, 0xed, 0x0d, 0x44, 0xe7, 0x31,
229 0x26, 0xcf, 0xd5, 0x0b, 0x3e, 0x95, 0x59, 0x89,
230 0xdf, 0x5d, 0xd6, 0x9a, 0x00, 0x66, 0xcc, 0x7f,
231 0x45, 0xd3, 0x06, 0x58, 0xed, 0xef, 0x49, 0x47,
232 0x87, 0x89, 0x17, 0x7d, 0x08, 0x56, 0x50, 0xe1,
233 };
234
235 static const uint8_t selftest_3des_cbc_key[24] = {
236 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
237 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
238 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
239 };
240
241 static const uint8_t selftest_3des_cbc_ptxt[64] = {
242 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
243 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
244 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
245 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
246 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
247 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
248 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
249 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
250 };
251
252 static const uint8_t selftest_3des_cbc_ctxt[64] = {
253 0xa2, 0xfe, 0x81, 0xaa, 0x10, 0x6c, 0xea, 0xb9,
254 0x11, 0x58, 0x1f, 0x29, 0xb5, 0x86, 0x71, 0x56,
255 0xe9, 0x25, 0x1d, 0x07, 0xb1, 0x69, 0x59, 0x6c,
256 0x96, 0x80, 0xf7, 0x54, 0x38, 0xaa, 0xa7, 0xe4,
257 0xe8, 0x81, 0xf5, 0x00, 0xbb, 0x1c, 0x00, 0x3c,
258 0xba, 0x38, 0x45, 0x97, 0x4c, 0xcf, 0x84, 0x14,
259 0x46, 0x86, 0xd9, 0xf4, 0xc5, 0xe2, 0xf0, 0x54,
260 0xde, 0x41, 0xf6, 0xa1, 0xef, 0x1b, 0x0a, 0xea,
261 };
262
263 static const uint8_t selftest_bf_cbc_key[56] = {
264 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
265 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
266 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
267 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
268 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
269 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
270 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
271 };
272
273 static const uint8_t selftest_bf_cbc_ptxt[64] = {
274 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
275 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
276 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
277 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
278 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
279 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
280 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
281 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
282 };
283
284 static const uint8_t selftest_bf_cbc_ctxt[64] = {
285 0xec, 0xa2, 0xc0, 0x0e, 0xa9, 0x7f, 0x04, 0x1e,
286 0x2e, 0x4f, 0x64, 0x07, 0x67, 0x3e, 0xf4, 0x58,
287 0x61, 0x5f, 0xd3, 0x50, 0x5e, 0xd3, 0x4d, 0x34,
288 0xa0, 0x53, 0xbe, 0x47, 0x75, 0x69, 0x3b, 0x1f,
289 0x86, 0xf2, 0xae, 0x8b, 0xb7, 0x91, 0xda, 0xd4,
290 0x2b, 0xa5, 0x47, 0x9b, 0x7d, 0x13, 0x30, 0xdd,
291 0x7b, 0xad, 0x86, 0x57, 0x51, 0x11, 0x74, 0x42,
292 0xb8, 0xbf, 0x69, 0x17, 0x20, 0x0a, 0xf7, 0xda,
293 };
294
295 const struct selftest_params selftests[] = {
296 {
297 .alg = "aes-xts",
298 .blocksize = 16,
299 .secsize = 512,
300 .blkno = 1,
301 .keylen = 256,
302 .txtlen = sizeof(selftest_aes_xts_256_ptxt),
303 .key = selftest_aes_xts_256_key,
304 .ptxt = selftest_aes_xts_256_ptxt,
305 .ctxt = selftest_aes_xts_256_ctxt
306 },
307 {
308 .alg = "aes-xts",
309 .blocksize = 16,
310 .secsize = 512,
311 .blkno = 0xffff,
312 .keylen = 512,
313 .txtlen = sizeof(selftest_aes_xts_512_ptxt),
314 .key = selftest_aes_xts_512_key,
315 .ptxt = selftest_aes_xts_512_ptxt,
316 .ctxt = selftest_aes_xts_512_ctxt
317 },
318 {
319 .alg = "aes-cbc",
320 .blocksize = 16,
321 .secsize = 512,
322 .blkno = 1,
323 .keylen = 128,
324 .txtlen = sizeof(selftest_aes_cbc_128_ptxt),
325 .key = selftest_aes_cbc_key,
326 .ptxt = selftest_aes_cbc_128_ptxt,
327 .ctxt = selftest_aes_cbc_128_ctxt,
328 },
329 {
330 .alg = "aes-cbc",
331 .blocksize = 16,
332 .secsize = 512,
333 .blkno = 0xffff,
334 .keylen = 256,
335 .txtlen = sizeof(selftest_aes_cbc_256_ptxt),
336 .key = selftest_aes_cbc_key,
337 .ptxt = selftest_aes_cbc_256_ptxt,
338 .ctxt = selftest_aes_cbc_256_ctxt,
339 },
340 {
341 .alg = "3des-cbc",
342 .blocksize = 8,
343 .secsize = 512,
344 .blkno = 1,
345 .keylen = 192, /* 168 + 3*8 parity bits */
346 .txtlen = sizeof(selftest_3des_cbc_ptxt),
347 .key = selftest_3des_cbc_key,
348 .ptxt = selftest_3des_cbc_ptxt,
349 .ctxt = selftest_3des_cbc_ctxt,
350 },
351 {
352 .alg = "blowfish-cbc",
353 .blocksize = 8,
354 .secsize = 512,
355 .blkno = 1,
356 .keylen = 448,
357 .txtlen = sizeof(selftest_bf_cbc_ptxt),
358 .key = selftest_bf_cbc_key,
359 .ptxt = selftest_bf_cbc_ptxt,
360 .ctxt = selftest_bf_cbc_ctxt,
361 },
362 };
363
364 static int cgd_match(device_t, cfdata_t, void *);
365 static void cgd_attach(device_t, device_t, void *);
366 static int cgd_detach(device_t, int);
367 static struct cgd_softc *cgd_spawn(int);
368 static struct cgd_worker *cgd_create_one_worker(void);
369 static void cgd_destroy_one_worker(struct cgd_worker *);
370 static struct cgd_worker *cgd_create_worker(void);
371 static void cgd_destroy_worker(struct cgd_worker *);
372 static int cgd_destroy(device_t);
373
374 /* Internal Functions */
375
376 static int cgd_diskstart(device_t, struct buf *);
377 static void cgd_diskstart2(struct cgd_softc *, struct cgd_xfer *);
378 static void cgdiodone(struct buf *);
379 static void cgd_iodone2(struct cgd_softc *, struct cgd_xfer *);
380 static void cgd_enqueue(struct cgd_softc *, struct cgd_xfer *);
381 static void cgd_process(struct work *, void *);
382 static int cgd_dumpblocks(device_t, void *, daddr_t, int);
383
384 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
385 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
386 static int cgd_ioctl_get(dev_t, void *, struct lwp *);
387 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
388 struct lwp *);
389 static void cgd_cipher(struct cgd_softc *, void *, void *,
390 size_t, daddr_t, size_t, int);
391
392 static void cgd_selftest(void);
393
394 static const struct dkdriver cgddkdriver = {
395 .d_minphys = minphys,
396 .d_open = cgdopen,
397 .d_close = cgdclose,
398 .d_strategy = cgdstrategy,
399 .d_iosize = NULL,
400 .d_diskstart = cgd_diskstart,
401 .d_dumpblocks = cgd_dumpblocks,
402 .d_lastclose = NULL
403 };
404
405 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
406 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
407
408 /* DIAGNOSTIC and DEBUG definitions */
409
410 #if defined(CGDDEBUG) && !defined(DEBUG)
411 #define DEBUG
412 #endif
413
414 #ifdef DEBUG
415 int cgddebug = 0;
416
417 #define CGDB_FOLLOW 0x1
418 #define CGDB_IO 0x2
419 #define CGDB_CRYPTO 0x4
420
421 #define IFDEBUG(x,y) if (cgddebug & (x)) y
422 #define DPRINTF(x,y) IFDEBUG(x, printf y)
423 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
424
425 static void hexprint(const char *, void *, int);
426
427 #else
428 #define IFDEBUG(x,y)
429 #define DPRINTF(x,y)
430 #define DPRINTF_FOLLOW(y)
431 #endif
432
433 /* Global variables */
434
435 static kmutex_t cgd_spawning_mtx;
436 static kcondvar_t cgd_spawning_cv;
437 static bool cgd_spawning;
438 static struct cgd_worker *cgd_worker;
439 static u_int cgd_refcnt; /* number of users of cgd_worker */
440
441 /* Utility Functions */
442
443 #define CGDUNIT(x) DISKUNIT(x)
444
445 /* The code */
446
447 static int
448 cgd_lock(bool intr)
449 {
450 int error = 0;
451
452 mutex_enter(&cgd_spawning_mtx);
453 while (cgd_spawning) {
454 if (intr)
455 error = cv_wait_sig(&cgd_spawning_cv, &cgd_spawning_mtx);
456 else
457 cv_wait(&cgd_spawning_cv, &cgd_spawning_mtx);
458 }
459 if (error == 0)
460 cgd_spawning = true;
461 mutex_exit(&cgd_spawning_mtx);
462 return error;
463 }
464
465 static void
466 cgd_unlock(void)
467 {
468 mutex_enter(&cgd_spawning_mtx);
469 cgd_spawning = false;
470 cv_broadcast(&cgd_spawning_cv);
471 mutex_exit(&cgd_spawning_mtx);
472 }
473
474 static struct cgd_softc *
475 getcgd_softc(dev_t dev)
476 {
477 return device_lookup_private(&cgd_cd, CGDUNIT(dev));
478 }
479
480 static int
481 cgd_match(device_t self, cfdata_t cfdata, void *aux)
482 {
483
484 return 1;
485 }
486
487 static void
488 cgd_attach(device_t parent, device_t self, void *aux)
489 {
490 struct cgd_softc *sc = device_private(self);
491
492 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
493 cv_init(&sc->sc_cv, "cgdcv");
494 dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
495 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
496
497 if (!pmf_device_register(self, NULL, NULL))
498 aprint_error_dev(self,
499 "unable to register power management hooks\n");
500 }
501
502
503 static int
504 cgd_detach(device_t self, int flags)
505 {
506 int ret;
507 const int pmask = 1 << RAW_PART;
508 struct cgd_softc *sc = device_private(self);
509 struct dk_softc *dksc = &sc->sc_dksc;
510
511 if (DK_BUSY(dksc, pmask))
512 return EBUSY;
513
514 if (DK_ATTACHED(dksc) &&
515 (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
516 return ret;
517
518 disk_destroy(&dksc->sc_dkdev);
519 cv_destroy(&sc->sc_cv);
520 mutex_destroy(&sc->sc_lock);
521
522 return 0;
523 }
524
525 void
526 cgdattach(int num)
527 {
528 #ifndef _MODULE
529 int error;
530
531 mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
532 cv_init(&cgd_spawning_cv, "cgspwn");
533
534 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
535 if (error != 0)
536 aprint_error("%s: unable to register cfattach\n",
537 cgd_cd.cd_name);
538 #endif
539
540 cgd_selftest();
541 }
542
543 static struct cgd_softc *
544 cgd_spawn(int unit)
545 {
546 cfdata_t cf;
547 struct cgd_worker *cw;
548 struct cgd_softc *sc;
549
550 cf = kmem_alloc(sizeof(*cf), KM_SLEEP);
551 cf->cf_name = cgd_cd.cd_name;
552 cf->cf_atname = cgd_cd.cd_name;
553 cf->cf_unit = unit;
554 cf->cf_fstate = FSTATE_STAR;
555
556 cw = cgd_create_one_worker();
557 if (cw == NULL) {
558 kmem_free(cf, sizeof(*cf));
559 return NULL;
560 }
561
562 sc = device_private(config_attach_pseudo(cf));
563 if (sc == NULL) {
564 cgd_destroy_one_worker(cw);
565 return NULL;
566 }
567
568 sc->sc_worker = cw;
569
570 return sc;
571 }
572
573 static int
574 cgd_destroy(device_t dev)
575 {
576 struct cgd_softc *sc = device_private(dev);
577 struct cgd_worker *cw = sc->sc_worker;
578 cfdata_t cf;
579 int error;
580
581 cf = device_cfdata(dev);
582 error = config_detach(dev, DETACH_QUIET);
583 if (error)
584 return error;
585
586 cgd_destroy_one_worker(cw);
587
588 kmem_free(cf, sizeof(*cf));
589 return 0;
590 }
591
592 static void
593 cgd_busy(struct cgd_softc *sc)
594 {
595
596 mutex_enter(&sc->sc_lock);
597 while (sc->sc_busy)
598 cv_wait(&sc->sc_cv, &sc->sc_lock);
599 sc->sc_busy = true;
600 mutex_exit(&sc->sc_lock);
601 }
602
603 static void
604 cgd_unbusy(struct cgd_softc *sc)
605 {
606
607 mutex_enter(&sc->sc_lock);
608 sc->sc_busy = false;
609 cv_broadcast(&sc->sc_cv);
610 mutex_exit(&sc->sc_lock);
611 }
612
613 static struct cgd_worker *
614 cgd_create_one_worker(void)
615 {
616 KASSERT(cgd_spawning);
617
618 if (cgd_refcnt++ == 0) {
619 KASSERT(cgd_worker == NULL);
620 cgd_worker = cgd_create_worker();
621 }
622
623 KASSERT(cgd_worker != NULL);
624 return cgd_worker;
625 }
626
627 static void
628 cgd_destroy_one_worker(struct cgd_worker *cw)
629 {
630 KASSERT(cgd_spawning);
631 KASSERT(cw == cgd_worker);
632
633 if (--cgd_refcnt == 0) {
634 cgd_destroy_worker(cgd_worker);
635 cgd_worker = NULL;
636 }
637 }
638
639 static struct cgd_worker *
640 cgd_create_worker(void)
641 {
642 struct cgd_worker *cw;
643 struct workqueue *wq;
644 struct pool *cp;
645 int error;
646
647 cw = kmem_alloc(sizeof(struct cgd_worker), KM_SLEEP);
648 cp = kmem_alloc(sizeof(struct pool), KM_SLEEP);
649
650 error = workqueue_create(&wq, "cgd", cgd_process, NULL,
651 PRI_BIO, IPL_BIO, WQ_MPSAFE | WQ_PERCPU);
652 if (error) {
653 kmem_free(cp, sizeof(struct pool));
654 kmem_free(cw, sizeof(struct cgd_worker));
655 return NULL;
656 }
657
658 cw->cw_cpool = cp;
659 cw->cw_wq = wq;
660 pool_init(cw->cw_cpool, sizeof(struct cgd_xfer), 0,
661 0, 0, "cgdcpl", NULL, IPL_BIO);
662
663 mutex_init(&cw->cw_lock, MUTEX_DEFAULT, IPL_BIO);
664
665 return cw;
666 }
667
668 static void
669 cgd_destroy_worker(struct cgd_worker *cw)
670 {
671 mutex_destroy(&cw->cw_lock);
672
673 if (cw->cw_cpool) {
674 pool_destroy(cw->cw_cpool);
675 kmem_free(cw->cw_cpool, sizeof(struct pool));
676 }
677 if (cw->cw_wq)
678 workqueue_destroy(cw->cw_wq);
679
680 kmem_free(cw, sizeof(struct cgd_worker));
681 }
682
683 static int
684 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
685 {
686 struct cgd_softc *sc;
687 int error;
688
689 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
690
691 error = cgd_lock(true);
692 if (error)
693 return error;
694 sc = getcgd_softc(dev);
695 if (sc == NULL)
696 sc = cgd_spawn(CGDUNIT(dev));
697 cgd_unlock();
698 if (sc == NULL)
699 return ENXIO;
700
701 return dk_open(&sc->sc_dksc, dev, flags, fmt, l);
702 }
703
704 static int
705 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
706 {
707 struct cgd_softc *sc;
708 struct dk_softc *dksc;
709 int error;
710
711 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
712
713 error = cgd_lock(false);
714 if (error)
715 return error;
716 sc = getcgd_softc(dev);
717 if (sc == NULL) {
718 error = ENXIO;
719 goto done;
720 }
721
722 dksc = &sc->sc_dksc;
723 if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0)
724 goto done;
725
726 if (!DK_ATTACHED(dksc)) {
727 if ((error = cgd_destroy(sc->sc_dksc.sc_dev)) != 0) {
728 device_printf(dksc->sc_dev,
729 "unable to detach instance\n");
730 goto done;
731 }
732 }
733
734 done:
735 cgd_unlock();
736
737 return error;
738 }
739
740 static void
741 cgdstrategy(struct buf *bp)
742 {
743 struct cgd_softc *sc = getcgd_softc(bp->b_dev);
744
745 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
746 (long)bp->b_bcount));
747
748 /*
749 * Reject unaligned writes.
750 */
751 if (((uintptr_t)bp->b_data & 3) != 0) {
752 bp->b_error = EINVAL;
753 goto bail;
754 }
755
756 dk_strategy(&sc->sc_dksc, bp);
757 return;
758
759 bail:
760 bp->b_resid = bp->b_bcount;
761 biodone(bp);
762 return;
763 }
764
765 static int
766 cgdsize(dev_t dev)
767 {
768 struct cgd_softc *sc = getcgd_softc(dev);
769
770 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
771 if (!sc)
772 return -1;
773 return dk_size(&sc->sc_dksc, dev);
774 }
775
776 /*
777 * cgd_{get,put}data are functions that deal with getting a buffer
778 * for the new encrypted data.
779 * We can no longer have a buffer per device, we need a buffer per
780 * work queue...
781 */
782
783 static void *
784 cgd_getdata(struct cgd_softc *sc, unsigned long size)
785 {
786 void *data = NULL;
787
788 mutex_enter(&sc->sc_lock);
789 if (!sc->sc_data_used) {
790 sc->sc_data_used = true;
791 data = sc->sc_data;
792 }
793 mutex_exit(&sc->sc_lock);
794
795 if (data)
796 return data;
797
798 return kmem_intr_alloc(size, KM_NOSLEEP);
799 }
800
801 static void
802 cgd_putdata(struct cgd_softc *sc, void *data, unsigned long size)
803 {
804
805 if (data == sc->sc_data) {
806 mutex_enter(&sc->sc_lock);
807 sc->sc_data_used = false;
808 mutex_exit(&sc->sc_lock);
809 } else
810 kmem_intr_free(data, size);
811 }
812
813 static int
814 cgd_diskstart(device_t dev, struct buf *bp)
815 {
816 struct cgd_softc *sc = device_private(dev);
817 struct cgd_worker *cw = sc->sc_worker;
818 struct dk_softc *dksc = &sc->sc_dksc;
819 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
820 struct cgd_xfer *cx;
821 struct buf *nbp;
822 void * newaddr;
823 daddr_t bn;
824
825 DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
826
827 bn = bp->b_rawblkno;
828
829 /*
830 * We attempt to allocate all of our resources up front, so that
831 * we can fail quickly if they are unavailable.
832 */
833 nbp = getiobuf(sc->sc_tvn, false);
834 if (nbp == NULL)
835 return EAGAIN;
836
837 cx = pool_get(cw->cw_cpool, PR_NOWAIT);
838 if (cx == NULL) {
839 putiobuf(nbp);
840 return EAGAIN;
841 }
842
843 cx->cx_sc = sc;
844 cx->cx_obp = bp;
845 cx->cx_nbp = nbp;
846 cx->cx_srcv = cx->cx_dstv = bp->b_data;
847 cx->cx_blkno = bn;
848 cx->cx_secsize = dg->dg_secsize;
849
850 /*
851 * If we are writing, then we need to encrypt the outgoing
852 * block into a new block of memory.
853 */
854 if ((bp->b_flags & B_READ) == 0) {
855 newaddr = cgd_getdata(sc, bp->b_bcount);
856 if (!newaddr) {
857 pool_put(cw->cw_cpool, cx);
858 putiobuf(nbp);
859 return EAGAIN;
860 }
861
862 cx->cx_dstv = newaddr;
863 cx->cx_len = bp->b_bcount;
864 cx->cx_dir = CGD_CIPHER_ENCRYPT;
865
866 cgd_enqueue(sc, cx);
867 return 0;
868 }
869
870 cgd_diskstart2(sc, cx);
871 return 0;
872 }
873
874 static void
875 cgd_diskstart2(struct cgd_softc *sc, struct cgd_xfer *cx)
876 {
877 struct vnode *vp;
878 struct buf *bp;
879 struct buf *nbp;
880
881 bp = cx->cx_obp;
882 nbp = cx->cx_nbp;
883
884 nbp->b_data = cx->cx_dstv;
885 nbp->b_flags = bp->b_flags;
886 nbp->b_oflags = bp->b_oflags;
887 nbp->b_cflags = bp->b_cflags;
888 nbp->b_iodone = cgdiodone;
889 nbp->b_proc = bp->b_proc;
890 nbp->b_blkno = btodb(cx->cx_blkno * cx->cx_secsize);
891 nbp->b_bcount = bp->b_bcount;
892 nbp->b_private = cx;
893
894 BIO_COPYPRIO(nbp, bp);
895
896 if ((nbp->b_flags & B_READ) == 0) {
897 vp = nbp->b_vp;
898 mutex_enter(vp->v_interlock);
899 vp->v_numoutput++;
900 mutex_exit(vp->v_interlock);
901 }
902 VOP_STRATEGY(sc->sc_tvn, nbp);
903 }
904
905 static void
906 cgdiodone(struct buf *nbp)
907 {
908 struct cgd_xfer *cx = nbp->b_private;
909 struct buf *obp = cx->cx_obp;
910 struct cgd_softc *sc = getcgd_softc(obp->b_dev);
911 struct dk_softc *dksc = &sc->sc_dksc;
912 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
913 daddr_t bn;
914
915 KDASSERT(sc);
916
917 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
918 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
919 obp, obp->b_bcount, obp->b_resid));
920 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
921 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
922 nbp->b_bcount));
923 if (nbp->b_error != 0) {
924 obp->b_error = nbp->b_error;
925 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
926 obp->b_error));
927 }
928
929 /* Perform the decryption if we are reading.
930 *
931 * Note: use the blocknumber from nbp, since it is what
932 * we used to encrypt the blocks.
933 */
934
935 if (nbp->b_flags & B_READ) {
936 bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
937
938 cx->cx_obp = obp;
939 cx->cx_nbp = nbp;
940 cx->cx_dstv = obp->b_data;
941 cx->cx_srcv = obp->b_data;
942 cx->cx_len = obp->b_bcount;
943 cx->cx_blkno = bn;
944 cx->cx_secsize = dg->dg_secsize;
945 cx->cx_dir = CGD_CIPHER_DECRYPT;
946
947 cgd_enqueue(sc, cx);
948 return;
949 }
950
951 cgd_iodone2(sc, cx);
952 }
953
954 static void
955 cgd_iodone2(struct cgd_softc *sc, struct cgd_xfer *cx)
956 {
957 struct cgd_worker *cw = sc->sc_worker;
958 struct buf *obp = cx->cx_obp;
959 struct buf *nbp = cx->cx_nbp;
960 struct dk_softc *dksc = &sc->sc_dksc;
961
962 pool_put(cw->cw_cpool, cx);
963
964 /* If we allocated memory, free it now... */
965 if (nbp->b_data != obp->b_data)
966 cgd_putdata(sc, nbp->b_data, nbp->b_bcount);
967
968 putiobuf(nbp);
969
970 /* Request is complete for whatever reason */
971 obp->b_resid = 0;
972 if (obp->b_error != 0)
973 obp->b_resid = obp->b_bcount;
974
975 dk_done(dksc, obp);
976 dk_start(dksc, NULL);
977 }
978
979 static int
980 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
981 {
982 struct cgd_softc *sc = device_private(dev);
983 struct dk_softc *dksc = &sc->sc_dksc;
984 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
985 size_t nbytes, blksize;
986 void *buf;
987 int error;
988
989 /*
990 * dk_dump gives us units of disklabel sectors. Everything
991 * else in cgd uses units of diskgeom sectors. These had
992 * better agree; otherwise we need to figure out how to convert
993 * between them.
994 */
995 KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
996 "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
997 dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
998 blksize = dg->dg_secsize;
999
1000 /*
1001 * Compute the number of bytes in this request, which dk_dump
1002 * has `helpfully' converted to a number of blocks for us.
1003 */
1004 nbytes = nblk*blksize;
1005
1006 /* Try to acquire a buffer to store the ciphertext. */
1007 buf = cgd_getdata(sc, nbytes);
1008 if (buf == NULL)
1009 /* Out of memory: give up. */
1010 return ENOMEM;
1011
1012 /* Encrypt the caller's data into the temporary buffer. */
1013 cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
1014
1015 /* Pass it on to the underlying disk device. */
1016 error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
1017
1018 /* Release the buffer. */
1019 cgd_putdata(sc, buf, nbytes);
1020
1021 /* Return any error from the underlying disk device. */
1022 return error;
1023 }
1024
1025 /* XXX: we should probably put these into dksubr.c, mostly */
1026 static int
1027 cgdread(dev_t dev, struct uio *uio, int flags)
1028 {
1029 struct cgd_softc *sc;
1030 struct dk_softc *dksc;
1031
1032 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
1033 (unsigned long long)dev, uio, flags));
1034 sc = getcgd_softc(dev);
1035 if (sc == NULL)
1036 return ENXIO;
1037 dksc = &sc->sc_dksc;
1038 if (!DK_ATTACHED(dksc))
1039 return ENXIO;
1040 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
1041 }
1042
1043 /* XXX: we should probably put these into dksubr.c, mostly */
1044 static int
1045 cgdwrite(dev_t dev, struct uio *uio, int flags)
1046 {
1047 struct cgd_softc *sc;
1048 struct dk_softc *dksc;
1049
1050 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
1051 sc = getcgd_softc(dev);
1052 if (sc == NULL)
1053 return ENXIO;
1054 dksc = &sc->sc_dksc;
1055 if (!DK_ATTACHED(dksc))
1056 return ENXIO;
1057 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
1058 }
1059
1060 static int
1061 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1062 {
1063 struct cgd_softc *sc;
1064 struct dk_softc *dksc;
1065 int part = DISKPART(dev);
1066 int pmask = 1 << part;
1067 int error;
1068
1069 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
1070 dev, cmd, data, flag, l));
1071
1072 switch (cmd) {
1073 case CGDIOCGET:
1074 return cgd_ioctl_get(dev, data, l);
1075 case CGDIOCSET:
1076 case CGDIOCCLR:
1077 if ((flag & FWRITE) == 0)
1078 return EBADF;
1079 /* FALLTHROUGH */
1080 default:
1081 sc = getcgd_softc(dev);
1082 if (sc == NULL)
1083 return ENXIO;
1084 dksc = &sc->sc_dksc;
1085 break;
1086 }
1087
1088 switch (cmd) {
1089 case CGDIOCSET:
1090 cgd_busy(sc);
1091 if (DK_ATTACHED(dksc))
1092 error = EBUSY;
1093 else
1094 error = cgd_ioctl_set(sc, data, l);
1095 cgd_unbusy(sc);
1096 break;
1097 case CGDIOCCLR:
1098 cgd_busy(sc);
1099 if (DK_BUSY(&sc->sc_dksc, pmask))
1100 error = EBUSY;
1101 else
1102 error = cgd_ioctl_clr(sc, l);
1103 cgd_unbusy(sc);
1104 break;
1105 case DIOCGCACHE:
1106 case DIOCCACHESYNC:
1107 cgd_busy(sc);
1108 if (!DK_ATTACHED(dksc)) {
1109 cgd_unbusy(sc);
1110 error = ENOENT;
1111 break;
1112 }
1113 /*
1114 * We pass this call down to the underlying disk.
1115 */
1116 error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
1117 cgd_unbusy(sc);
1118 break;
1119 case DIOCGSECTORALIGN: {
1120 struct disk_sectoralign *dsa = data;
1121
1122 cgd_busy(sc);
1123 if (!DK_ATTACHED(dksc)) {
1124 cgd_unbusy(sc);
1125 error = ENOENT;
1126 break;
1127 }
1128
1129 /* Get the underlying disk's sector alignment. */
1130 error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
1131 if (error) {
1132 cgd_unbusy(sc);
1133 break;
1134 }
1135
1136 /* Adjust for the disklabel partition if necessary. */
1137 if (part != RAW_PART) {
1138 struct disklabel *lp = dksc->sc_dkdev.dk_label;
1139 daddr_t offset = lp->d_partitions[part].p_offset;
1140 uint32_t r = offset % dsa->dsa_alignment;
1141
1142 if (r < dsa->dsa_firstaligned)
1143 dsa->dsa_firstaligned = dsa->dsa_firstaligned
1144 - r;
1145 else
1146 dsa->dsa_firstaligned = (dsa->dsa_firstaligned
1147 + dsa->dsa_alignment) - r;
1148 }
1149 cgd_unbusy(sc);
1150 break;
1151 }
1152 case DIOCGSTRATEGY:
1153 case DIOCSSTRATEGY:
1154 if (!DK_ATTACHED(dksc)) {
1155 error = ENOENT;
1156 break;
1157 }
1158 /*FALLTHROUGH*/
1159 default:
1160 error = dk_ioctl(dksc, dev, cmd, data, flag, l);
1161 break;
1162 case CGDIOCGET:
1163 KASSERT(0);
1164 error = EINVAL;
1165 }
1166
1167 return error;
1168 }
1169
1170 static int
1171 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1172 {
1173 struct cgd_softc *sc;
1174
1175 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
1176 dev, blkno, va, (unsigned long)size));
1177 sc = getcgd_softc(dev);
1178 if (sc == NULL)
1179 return ENXIO;
1180 return dk_dump(&sc->sc_dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
1181 }
1182
1183 /*
1184 * XXXrcd:
1185 * for now we hardcode the maximum key length.
1186 */
1187 #define MAX_KEYSIZE 1024
1188
1189 static const struct {
1190 const char *n;
1191 int v;
1192 int d;
1193 } encblkno[] = {
1194 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1195 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1196 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
1197 };
1198
1199 /* ARGSUSED */
1200 static int
1201 cgd_ioctl_set(struct cgd_softc *sc, void *data, struct lwp *l)
1202 {
1203 struct cgd_ioctl *ci = data;
1204 struct vnode *vp;
1205 int ret;
1206 size_t i;
1207 size_t keybytes; /* key length in bytes */
1208 const char *cp;
1209 struct pathbuf *pb;
1210 char *inbuf;
1211 struct dk_softc *dksc = &sc->sc_dksc;
1212
1213 cp = ci->ci_disk;
1214
1215 ret = pathbuf_copyin(ci->ci_disk, &pb);
1216 if (ret != 0) {
1217 return ret;
1218 }
1219 ret = vn_bdev_openpath(pb, &vp, l);
1220 pathbuf_destroy(pb);
1221 if (ret != 0) {
1222 return ret;
1223 }
1224
1225 inbuf = kmem_alloc(MAX_KEYSIZE, KM_SLEEP);
1226
1227 if ((ret = cgdinit(sc, cp, vp, l)) != 0)
1228 goto bail;
1229
1230 (void)memset(inbuf, 0, MAX_KEYSIZE);
1231 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
1232 if (ret)
1233 goto bail;
1234 sc->sc_cfuncs = cryptfuncs_find(inbuf);
1235 if (!sc->sc_cfuncs) {
1236 ret = EINVAL;
1237 goto bail;
1238 }
1239
1240 (void)memset(inbuf, 0, MAX_KEYSIZE);
1241 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
1242 if (ret)
1243 goto bail;
1244
1245 for (i = 0; i < __arraycount(encblkno); i++)
1246 if (strcmp(encblkno[i].n, inbuf) == 0)
1247 break;
1248
1249 if (i == __arraycount(encblkno)) {
1250 ret = EINVAL;
1251 goto bail;
1252 }
1253
1254 keybytes = ci->ci_keylen / 8 + 1;
1255 if (keybytes > MAX_KEYSIZE) {
1256 ret = EINVAL;
1257 goto bail;
1258 }
1259
1260 (void)memset(inbuf, 0, MAX_KEYSIZE);
1261 ret = copyin(ci->ci_key, inbuf, keybytes);
1262 if (ret)
1263 goto bail;
1264
1265 sc->sc_cdata.cf_blocksize = ci->ci_blocksize;
1266 sc->sc_cdata.cf_mode = encblkno[i].v;
1267 sc->sc_cdata.cf_keylen = ci->ci_keylen;
1268 sc->sc_cdata.cf_priv = sc->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
1269 &sc->sc_cdata.cf_blocksize);
1270 if (sc->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
1271 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
1272 sc->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
1273 sc->sc_cdata.cf_priv = NULL;
1274 }
1275
1276 /*
1277 * The blocksize is supposed to be in bytes. Unfortunately originally
1278 * it was expressed in bits. For compatibility we maintain encblkno
1279 * and encblkno8.
1280 */
1281 sc->sc_cdata.cf_blocksize /= encblkno[i].d;
1282 (void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
1283 if (!sc->sc_cdata.cf_priv) {
1284 ret = EINVAL; /* XXX is this the right error? */
1285 goto bail;
1286 }
1287 kmem_free(inbuf, MAX_KEYSIZE);
1288
1289 bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
1290
1291 sc->sc_data = kmem_alloc(MAXPHYS, KM_SLEEP);
1292 sc->sc_data_used = false;
1293
1294 /* Attach the disk. */
1295 dk_attach(dksc);
1296 disk_attach(&dksc->sc_dkdev);
1297
1298 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
1299
1300 /* Discover wedges on this disk. */
1301 dkwedge_discover(&dksc->sc_dkdev);
1302
1303 return 0;
1304
1305 bail:
1306 kmem_free(inbuf, MAX_KEYSIZE);
1307 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
1308 return ret;
1309 }
1310
1311 /* ARGSUSED */
1312 static int
1313 cgd_ioctl_clr(struct cgd_softc *sc, struct lwp *l)
1314 {
1315 struct dk_softc *dksc = &sc->sc_dksc;
1316
1317 if (!DK_ATTACHED(dksc))
1318 return ENXIO;
1319
1320 /* Delete all of our wedges. */
1321 dkwedge_delall(&dksc->sc_dkdev);
1322
1323 /* Kill off any queued buffers. */
1324 dk_drain(dksc);
1325 bufq_free(dksc->sc_bufq);
1326
1327 (void)vn_close(sc->sc_tvn, FREAD|FWRITE, l->l_cred);
1328 sc->sc_cfuncs->cf_destroy(sc->sc_cdata.cf_priv);
1329 kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1330 kmem_free(sc->sc_data, MAXPHYS);
1331 sc->sc_data_used = false;
1332 dk_detach(dksc);
1333 disk_detach(&dksc->sc_dkdev);
1334
1335 return 0;
1336 }
1337
1338 static int
1339 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
1340 {
1341 struct cgd_softc *sc;
1342 struct cgd_user *cgu;
1343 int unit, error;
1344
1345 unit = CGDUNIT(dev);
1346 cgu = (struct cgd_user *)data;
1347
1348 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1349 dev, unit, data, l));
1350
1351 /* XXX, we always return this units data, so if cgu_unit is
1352 * not -1, that field doesn't match the rest
1353 */
1354 if (cgu->cgu_unit == -1)
1355 cgu->cgu_unit = unit;
1356
1357 if (cgu->cgu_unit < 0)
1358 return EINVAL; /* XXX: should this be ENXIO? */
1359
1360 error = cgd_lock(false);
1361 if (error)
1362 return error;
1363
1364 sc = device_lookup_private(&cgd_cd, unit);
1365 if (sc == NULL || !DK_ATTACHED(&sc->sc_dksc)) {
1366 cgu->cgu_dev = 0;
1367 cgu->cgu_alg[0] = '\0';
1368 cgu->cgu_blocksize = 0;
1369 cgu->cgu_mode = 0;
1370 cgu->cgu_keylen = 0;
1371 }
1372 else {
1373 mutex_enter(&sc->sc_lock);
1374 cgu->cgu_dev = sc->sc_tdev;
1375 strncpy(cgu->cgu_alg, sc->sc_cfuncs->cf_name,
1376 sizeof(cgu->cgu_alg));
1377 cgu->cgu_blocksize = sc->sc_cdata.cf_blocksize;
1378 cgu->cgu_mode = sc->sc_cdata.cf_mode;
1379 cgu->cgu_keylen = sc->sc_cdata.cf_keylen;
1380 mutex_exit(&sc->sc_lock);
1381 }
1382
1383 cgd_unlock();
1384 return 0;
1385 }
1386
1387 static int
1388 cgdinit(struct cgd_softc *sc, const char *cpath, struct vnode *vp,
1389 struct lwp *l)
1390 {
1391 struct disk_geom *dg;
1392 int ret;
1393 char *tmppath;
1394 uint64_t psize;
1395 unsigned secsize;
1396 struct dk_softc *dksc = &sc->sc_dksc;
1397
1398 sc->sc_tvn = vp;
1399 sc->sc_tpath = NULL;
1400
1401 tmppath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1402 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &sc->sc_tpathlen);
1403 if (ret)
1404 goto bail;
1405 sc->sc_tpath = kmem_alloc(sc->sc_tpathlen, KM_SLEEP);
1406 memcpy(sc->sc_tpath, tmppath, sc->sc_tpathlen);
1407
1408 sc->sc_tdev = vp->v_rdev;
1409
1410 if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1411 goto bail;
1412
1413 if (psize == 0) {
1414 ret = ENODEV;
1415 goto bail;
1416 }
1417
1418 /*
1419 * XXX here we should probe the underlying device. If we
1420 * are accessing a partition of type RAW_PART, then
1421 * we should populate our initial geometry with the
1422 * geometry that we discover from the device.
1423 */
1424 dg = &dksc->sc_dkdev.dk_geom;
1425 memset(dg, 0, sizeof(*dg));
1426 dg->dg_secperunit = psize;
1427 dg->dg_secsize = secsize;
1428 dg->dg_ntracks = 1;
1429 dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1430 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1431
1432 bail:
1433 kmem_free(tmppath, MAXPATHLEN);
1434 if (ret && sc->sc_tpath)
1435 kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1436 return ret;
1437 }
1438
1439 /*
1440 * Our generic cipher entry point. This takes care of the
1441 * IV mode and passes off the work to the specific cipher.
1442 * We implement here the IV method ``encrypted block
1443 * number''.
1444 *
1445 * XXXrcd: for now we rely on our own crypto framework defined
1446 * in dev/cgd_crypto.c. This will change when we
1447 * get a generic kernel crypto framework.
1448 */
1449
1450 static void
1451 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1452 {
1453 int i;
1454
1455 /* Set up the blkno in blkno_buf, here we do not care much
1456 * about the final layout of the information as long as we
1457 * can guarantee that each sector will have a different IV
1458 * and that the endianness of the machine will not affect
1459 * the representation that we have chosen.
1460 *
1461 * We choose this representation, because it does not rely
1462 * on the size of buf (which is the blocksize of the cipher),
1463 * but allows daddr_t to grow without breaking existing
1464 * disks.
1465 *
1466 * Note that blkno2blkno_buf does not take a size as input,
1467 * and hence must be called on a pre-zeroed buffer of length
1468 * greater than or equal to sizeof(daddr_t).
1469 */
1470 for (i=0; i < sizeof(daddr_t); i++) {
1471 *sbuf++ = blkno & 0xff;
1472 blkno >>= 8;
1473 }
1474 }
1475
1476 static struct cpu_info *
1477 cgd_cpu(struct cgd_softc *sc)
1478 {
1479 struct cgd_worker *cw = sc->sc_worker;
1480 struct cpu_info *ci = NULL;
1481 u_int cidx, i;
1482
1483 if (cw->cw_busy == 0) {
1484 cw->cw_last = cpu_index(curcpu());
1485 return NULL;
1486 }
1487
1488 for (i=0, cidx = cw->cw_last+1; i<maxcpus; ++i, ++cidx) {
1489 if (cidx >= maxcpus)
1490 cidx = 0;
1491 ci = cpu_lookup(cidx);
1492 if (ci) {
1493 cw->cw_last = cidx;
1494 break;
1495 }
1496 }
1497
1498 return ci;
1499 }
1500
1501 static void
1502 cgd_enqueue(struct cgd_softc *sc, struct cgd_xfer *cx)
1503 {
1504 struct cgd_worker *cw = sc->sc_worker;
1505 struct cpu_info *ci;
1506
1507 mutex_enter(&cw->cw_lock);
1508 ci = cgd_cpu(sc);
1509 cw->cw_busy++;
1510 mutex_exit(&cw->cw_lock);
1511
1512 workqueue_enqueue(cw->cw_wq, &cx->cx_work, ci);
1513 }
1514
1515 static void
1516 cgd_process(struct work *wk, void *arg)
1517 {
1518 struct cgd_xfer *cx = (struct cgd_xfer *)wk;
1519 struct cgd_softc *sc = cx->cx_sc;
1520 struct cgd_worker *cw = sc->sc_worker;
1521
1522 cgd_cipher(sc, cx->cx_dstv, cx->cx_srcv, cx->cx_len,
1523 cx->cx_blkno, cx->cx_secsize, cx->cx_dir);
1524
1525 if (cx->cx_dir == CGD_CIPHER_ENCRYPT) {
1526 cgd_diskstart2(sc, cx);
1527 } else {
1528 cgd_iodone2(sc, cx);
1529 }
1530
1531 mutex_enter(&cw->cw_lock);
1532 if (cw->cw_busy > 0)
1533 cw->cw_busy--;
1534 mutex_exit(&cw->cw_lock);
1535 }
1536
1537 static void
1538 cgd_cipher(struct cgd_softc *sc, void *dstv, void *srcv,
1539 size_t len, daddr_t blkno, size_t secsize, int dir)
1540 {
1541 char *dst = dstv;
1542 char *src = srcv;
1543 cfunc_cipher *cipher = sc->sc_cfuncs->cf_cipher;
1544 size_t blocksize = sc->sc_cdata.cf_blocksize;
1545 size_t todo;
1546 char blkno_buf[CGD_MAXBLOCKSIZE];
1547
1548 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1549
1550 KASSERT(len % blocksize == 0);
1551 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1552 KASSERT(sizeof(daddr_t) <= blocksize);
1553 KASSERT(blocksize <= CGD_MAXBLOCKSIZE);
1554
1555 for (; len > 0; len -= todo) {
1556 todo = MIN(len, secsize);
1557
1558 memset(blkno_buf, 0x0, blocksize);
1559 blkno2blkno_buf(blkno_buf, blkno);
1560 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1561 blkno_buf, blocksize));
1562
1563 cipher(sc->sc_cdata.cf_priv, dst, src, todo, blkno_buf, dir);
1564
1565 dst += todo;
1566 src += todo;
1567 blkno++;
1568 }
1569 }
1570
1571 #ifdef DEBUG
1572 static void
1573 hexprint(const char *start, void *buf, int len)
1574 {
1575 char *c = buf;
1576
1577 KASSERTMSG(len >= 0, "hexprint: called with len < 0");
1578 printf("%s: len=%06d 0x", start, len);
1579 while (len--)
1580 printf("%02x", (unsigned char) *c++);
1581 }
1582 #endif
1583
1584 static void
1585 cgd_selftest(void)
1586 {
1587 struct cgd_softc sc;
1588 void *buf;
1589
1590 for (size_t i = 0; i < __arraycount(selftests); i++) {
1591 const char *alg = selftests[i].alg;
1592 const uint8_t *key = selftests[i].key;
1593 int keylen = selftests[i].keylen;
1594 int txtlen = selftests[i].txtlen;
1595
1596 aprint_verbose("cgd: self-test %s-%d\n", alg, keylen);
1597
1598 memset(&sc, 0, sizeof(sc));
1599
1600 sc.sc_cfuncs = cryptfuncs_find(alg);
1601 if (sc.sc_cfuncs == NULL)
1602 panic("%s not implemented", alg);
1603
1604 sc.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1605 sc.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1606 sc.sc_cdata.cf_keylen = keylen;
1607
1608 sc.sc_cdata.cf_priv = sc.sc_cfuncs->cf_init(keylen,
1609 key, &sc.sc_cdata.cf_blocksize);
1610 if (sc.sc_cdata.cf_priv == NULL)
1611 panic("cf_priv is NULL");
1612 if (sc.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1613 panic("bad block size %zu", sc.sc_cdata.cf_blocksize);
1614
1615 sc.sc_cdata.cf_blocksize /= 8;
1616
1617 buf = kmem_alloc(txtlen, KM_SLEEP);
1618 memcpy(buf, selftests[i].ptxt, txtlen);
1619
1620 cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1621 selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1622 if (memcmp(buf, selftests[i].ctxt, txtlen) != 0) {
1623 hexdump(printf, "was", buf, txtlen);
1624 hexdump(printf, "exp", selftests[i].ctxt, txtlen);
1625 panic("cgd %s encryption is broken [%zu]",
1626 selftests[i].alg, i);
1627 }
1628
1629 cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1630 selftests[i].secsize, CGD_CIPHER_DECRYPT);
1631 if (memcmp(buf, selftests[i].ptxt, txtlen) != 0) {
1632 hexdump(printf, "was", buf, txtlen);
1633 hexdump(printf, "exp", selftests[i].ptxt, txtlen);
1634 panic("cgd %s decryption is broken [%zu]",
1635 selftests[i].alg, i);
1636 }
1637
1638 kmem_free(buf, txtlen);
1639 sc.sc_cfuncs->cf_destroy(sc.sc_cdata.cf_priv);
1640 }
1641
1642 aprint_verbose("cgd: self-tests passed\n");
1643 }
1644
1645 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
1646
1647 #ifdef _MODULE
1648 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1649
1650 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1651 #endif
1652
1653 static int
1654 cgd_modcmd(modcmd_t cmd, void *arg)
1655 {
1656 int error = 0;
1657
1658 switch (cmd) {
1659 case MODULE_CMD_INIT:
1660 #ifdef _MODULE
1661 mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
1662 cv_init(&cgd_spawning_cv, "cgspwn");
1663
1664 error = config_cfdriver_attach(&cgd_cd);
1665 if (error)
1666 break;
1667
1668 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1669 if (error) {
1670 config_cfdriver_detach(&cgd_cd);
1671 aprint_error("%s: unable to register cfattach for"
1672 "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1673 break;
1674 }
1675 /*
1676 * Attach the {b,c}devsw's
1677 */
1678 error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1679 &cgd_cdevsw, &cgd_cmajor);
1680
1681 /*
1682 * If devsw_attach fails, remove from autoconf database
1683 */
1684 if (error) {
1685 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1686 config_cfdriver_detach(&cgd_cd);
1687 aprint_error("%s: unable to attach %s devsw, "
1688 "error %d", __func__, cgd_cd.cd_name, error);
1689 break;
1690 }
1691 #endif
1692 break;
1693
1694 case MODULE_CMD_FINI:
1695 #ifdef _MODULE
1696 /*
1697 * Remove {b,c}devsw's
1698 */
1699 devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1700
1701 /*
1702 * Now remove device from autoconf database
1703 */
1704 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1705 if (error) {
1706 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1707 &cgd_cdevsw, &cgd_cmajor);
1708 aprint_error("%s: failed to detach %s cfattach, "
1709 "error %d\n", __func__, cgd_cd.cd_name, error);
1710 break;
1711 }
1712 error = config_cfdriver_detach(&cgd_cd);
1713 if (error) {
1714 (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1715 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1716 &cgd_cdevsw, &cgd_cmajor);
1717 aprint_error("%s: failed to detach %s cfdriver, "
1718 "error %d\n", __func__, cgd_cd.cd_name, error);
1719 break;
1720 }
1721
1722 cv_destroy(&cgd_spawning_cv);
1723 mutex_destroy(&cgd_spawning_mtx);
1724 #endif
1725 break;
1726
1727 case MODULE_CMD_STAT:
1728 error = ENOTTY;
1729 break;
1730 default:
1731 error = ENOTTY;
1732 break;
1733 }
1734
1735 return error;
1736 }
1737