cgd.c revision 1.126 1 /* $NetBSD: cgd.c,v 1.126 2020/06/04 19:54:53 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.126 2020/06/04 19:54:53 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/kmem.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54 #include <sys/workqueue.h>
55 #include <sys/cpu.h>
56
57 #include <dev/dkvar.h>
58 #include <dev/cgdvar.h>
59
60 #include <miscfs/specfs/specdev.h> /* for v_rdev */
61
62 #include "ioconf.h"
63
64 struct selftest_params {
65 const char *alg;
66 int blocksize; /* number of bytes */
67 int secsize;
68 daddr_t blkno;
69 int keylen; /* number of bits */
70 int txtlen; /* number of bytes */
71 const uint8_t *key;
72 const uint8_t *ptxt;
73 const uint8_t *ctxt;
74 };
75
76 /* Entry Point Functions */
77
78 static dev_type_open(cgdopen);
79 static dev_type_close(cgdclose);
80 static dev_type_read(cgdread);
81 static dev_type_write(cgdwrite);
82 static dev_type_ioctl(cgdioctl);
83 static dev_type_strategy(cgdstrategy);
84 static dev_type_dump(cgddump);
85 static dev_type_size(cgdsize);
86
87 const struct bdevsw cgd_bdevsw = {
88 .d_open = cgdopen,
89 .d_close = cgdclose,
90 .d_strategy = cgdstrategy,
91 .d_ioctl = cgdioctl,
92 .d_dump = cgddump,
93 .d_psize = cgdsize,
94 .d_discard = nodiscard,
95 .d_flag = D_DISK | D_MPSAFE
96 };
97
98 const struct cdevsw cgd_cdevsw = {
99 .d_open = cgdopen,
100 .d_close = cgdclose,
101 .d_read = cgdread,
102 .d_write = cgdwrite,
103 .d_ioctl = cgdioctl,
104 .d_stop = nostop,
105 .d_tty = notty,
106 .d_poll = nopoll,
107 .d_mmap = nommap,
108 .d_kqfilter = nokqfilter,
109 .d_discard = nodiscard,
110 .d_flag = D_DISK | D_MPSAFE
111 };
112
113 /*
114 * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
115 */
116 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
117 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
118 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
119 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
120 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
121 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
122 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
123 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
124 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
125 };
126
127 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
128 0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
129 0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
130 0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
131 0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
132 0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
133 0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
134 0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
135 0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
136 };
137
138 static const uint8_t selftest_aes_xts_256_key[33] = {
139 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
140 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
141 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
142 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
143 0
144 };
145
146 /*
147 * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
148 */
149 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
150 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
151 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
152 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
153 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
154 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
155 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
156 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
157 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
158 };
159
160 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
161 0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
162 0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
163 0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
164 0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
165 0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
166 0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
167 0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
168 0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
169 };
170
171 static const uint8_t selftest_aes_xts_512_key[65] = {
172 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
173 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
174 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
175 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
176 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
177 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
178 0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
179 0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
180 0
181 };
182
183 static const uint8_t selftest_aes_cbc_key[32] = {
184 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
185 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
186 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
187 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
188 };
189
190 static const uint8_t selftest_aes_cbc_128_ptxt[64] = {
191 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
192 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
193 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
194 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
195 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
196 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
197 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
198 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
199 };
200
201 static const uint8_t selftest_aes_cbc_128_ctxt[64] = { /* blkno=1 */
202 0x93, 0x94, 0x56, 0x36, 0x83, 0xbc, 0xff, 0xa4,
203 0xe0, 0x24, 0x34, 0x12, 0xbe, 0xfa, 0xb0, 0x7d,
204 0x88, 0x1e, 0xc5, 0x57, 0x55, 0x23, 0x05, 0x0c,
205 0x69, 0xa5, 0xc1, 0xda, 0x64, 0xee, 0x74, 0x10,
206 0xc2, 0xc5, 0xe6, 0x66, 0xd6, 0xa7, 0x49, 0x1c,
207 0x9d, 0x40, 0xb5, 0x0c, 0x9b, 0x6e, 0x1c, 0xe6,
208 0xb1, 0x7a, 0x1c, 0xe7, 0x5a, 0xfe, 0xf9, 0x2a,
209 0x78, 0xfa, 0xb7, 0x7b, 0x08, 0xdf, 0x8e, 0x51,
210 };
211
212 static const uint8_t selftest_aes_cbc_256_ptxt[64] = {
213 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
214 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
215 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
216 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
217 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
218 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
219 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
220 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
221 };
222
223 static const uint8_t selftest_aes_cbc_256_ctxt[64] = { /* blkno=0xffff */
224 0x6c, 0xa3, 0x15, 0x17, 0x51, 0x90, 0xe9, 0x69,
225 0x08, 0x36, 0x7b, 0xa6, 0xbb, 0xd1, 0x0b, 0x9e,
226 0xcd, 0x6b, 0x1e, 0xaf, 0xb6, 0x2e, 0x62, 0x7d,
227 0x8e, 0xde, 0xf0, 0xed, 0x0d, 0x44, 0xe7, 0x31,
228 0x26, 0xcf, 0xd5, 0x0b, 0x3e, 0x95, 0x59, 0x89,
229 0xdf, 0x5d, 0xd6, 0x9a, 0x00, 0x66, 0xcc, 0x7f,
230 0x45, 0xd3, 0x06, 0x58, 0xed, 0xef, 0x49, 0x47,
231 0x87, 0x89, 0x17, 0x7d, 0x08, 0x56, 0x50, 0xe1,
232 };
233
234 static const uint8_t selftest_3des_cbc_key[24] = {
235 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
236 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
237 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
238 };
239
240 static const uint8_t selftest_3des_cbc_ptxt[64] = {
241 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
242 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
243 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
244 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
245 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
246 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
247 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
248 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
249 };
250
251 static const uint8_t selftest_3des_cbc_ctxt[64] = {
252 0xa2, 0xfe, 0x81, 0xaa, 0x10, 0x6c, 0xea, 0xb9,
253 0x11, 0x58, 0x1f, 0x29, 0xb5, 0x86, 0x71, 0x56,
254 0xe9, 0x25, 0x1d, 0x07, 0xb1, 0x69, 0x59, 0x6c,
255 0x96, 0x80, 0xf7, 0x54, 0x38, 0xaa, 0xa7, 0xe4,
256 0xe8, 0x81, 0xf5, 0x00, 0xbb, 0x1c, 0x00, 0x3c,
257 0xba, 0x38, 0x45, 0x97, 0x4c, 0xcf, 0x84, 0x14,
258 0x46, 0x86, 0xd9, 0xf4, 0xc5, 0xe2, 0xf0, 0x54,
259 0xde, 0x41, 0xf6, 0xa1, 0xef, 0x1b, 0x0a, 0xea,
260 };
261
262 static const uint8_t selftest_bf_cbc_key[56] = {
263 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
264 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
265 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
266 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
267 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
268 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
269 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
270 };
271
272 static const uint8_t selftest_bf_cbc_ptxt[64] = {
273 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
274 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
275 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
276 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
277 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
278 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
279 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
280 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
281 };
282
283 static const uint8_t selftest_bf_cbc_ctxt[64] = {
284 0xec, 0xa2, 0xc0, 0x0e, 0xa9, 0x7f, 0x04, 0x1e,
285 0x2e, 0x4f, 0x64, 0x07, 0x67, 0x3e, 0xf4, 0x58,
286 0x61, 0x5f, 0xd3, 0x50, 0x5e, 0xd3, 0x4d, 0x34,
287 0xa0, 0x53, 0xbe, 0x47, 0x75, 0x69, 0x3b, 0x1f,
288 0x86, 0xf2, 0xae, 0x8b, 0xb7, 0x91, 0xda, 0xd4,
289 0x2b, 0xa5, 0x47, 0x9b, 0x7d, 0x13, 0x30, 0xdd,
290 0x7b, 0xad, 0x86, 0x57, 0x51, 0x11, 0x74, 0x42,
291 0xb8, 0xbf, 0x69, 0x17, 0x20, 0x0a, 0xf7, 0xda,
292 };
293
294 const struct selftest_params selftests[] = {
295 {
296 .alg = "aes-xts",
297 .blocksize = 16,
298 .secsize = 512,
299 .blkno = 1,
300 .keylen = 256,
301 .txtlen = sizeof(selftest_aes_xts_256_ptxt),
302 .key = selftest_aes_xts_256_key,
303 .ptxt = selftest_aes_xts_256_ptxt,
304 .ctxt = selftest_aes_xts_256_ctxt
305 },
306 {
307 .alg = "aes-xts",
308 .blocksize = 16,
309 .secsize = 512,
310 .blkno = 0xffff,
311 .keylen = 512,
312 .txtlen = sizeof(selftest_aes_xts_512_ptxt),
313 .key = selftest_aes_xts_512_key,
314 .ptxt = selftest_aes_xts_512_ptxt,
315 .ctxt = selftest_aes_xts_512_ctxt
316 },
317 {
318 .alg = "aes-cbc",
319 .blocksize = 16,
320 .secsize = 512,
321 .blkno = 1,
322 .keylen = 128,
323 .txtlen = sizeof(selftest_aes_cbc_128_ptxt),
324 .key = selftest_aes_cbc_key,
325 .ptxt = selftest_aes_cbc_128_ptxt,
326 .ctxt = selftest_aes_cbc_128_ctxt,
327 },
328 {
329 .alg = "aes-cbc",
330 .blocksize = 16,
331 .secsize = 512,
332 .blkno = 0xffff,
333 .keylen = 256,
334 .txtlen = sizeof(selftest_aes_cbc_256_ptxt),
335 .key = selftest_aes_cbc_key,
336 .ptxt = selftest_aes_cbc_256_ptxt,
337 .ctxt = selftest_aes_cbc_256_ctxt,
338 },
339 {
340 .alg = "3des-cbc",
341 .blocksize = 8,
342 .secsize = 512,
343 .blkno = 1,
344 .keylen = 192, /* 168 + 3*8 parity bits */
345 .txtlen = sizeof(selftest_3des_cbc_ptxt),
346 .key = selftest_3des_cbc_key,
347 .ptxt = selftest_3des_cbc_ptxt,
348 .ctxt = selftest_3des_cbc_ctxt,
349 },
350 {
351 .alg = "blowfish-cbc",
352 .blocksize = 8,
353 .secsize = 512,
354 .blkno = 1,
355 .keylen = 448,
356 .txtlen = sizeof(selftest_bf_cbc_ptxt),
357 .key = selftest_bf_cbc_key,
358 .ptxt = selftest_bf_cbc_ptxt,
359 .ctxt = selftest_bf_cbc_ctxt,
360 },
361 };
362
363 static int cgd_match(device_t, cfdata_t, void *);
364 static void cgd_attach(device_t, device_t, void *);
365 static int cgd_detach(device_t, int);
366 static struct cgd_softc *cgd_spawn(int);
367 static struct cgd_worker *cgd_create_one_worker(void);
368 static void cgd_destroy_one_worker(struct cgd_worker *);
369 static struct cgd_worker *cgd_create_worker(void);
370 static void cgd_destroy_worker(struct cgd_worker *);
371 static int cgd_destroy(device_t);
372
373 /* Internal Functions */
374
375 static int cgd_diskstart(device_t, struct buf *);
376 static void cgd_diskstart2(struct cgd_softc *, struct cgd_xfer *);
377 static void cgdiodone(struct buf *);
378 static void cgd_iodone2(struct cgd_softc *, struct cgd_xfer *);
379 static void cgd_enqueue(struct cgd_softc *, struct cgd_xfer *);
380 static void cgd_process(struct work *, void *);
381 static int cgd_dumpblocks(device_t, void *, daddr_t, int);
382
383 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
384 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
385 static int cgd_ioctl_get(dev_t, void *, struct lwp *);
386 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
387 struct lwp *);
388 static void cgd_cipher(struct cgd_softc *, void *, void *,
389 size_t, daddr_t, size_t, int);
390
391 static const struct dkdriver cgddkdriver = {
392 .d_minphys = minphys,
393 .d_open = cgdopen,
394 .d_close = cgdclose,
395 .d_strategy = cgdstrategy,
396 .d_iosize = NULL,
397 .d_diskstart = cgd_diskstart,
398 .d_dumpblocks = cgd_dumpblocks,
399 .d_lastclose = NULL
400 };
401
402 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
403 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
404
405 /* DIAGNOSTIC and DEBUG definitions */
406
407 #if defined(CGDDEBUG) && !defined(DEBUG)
408 #define DEBUG
409 #endif
410
411 #ifdef DEBUG
412 int cgddebug = 0;
413
414 #define CGDB_FOLLOW 0x1
415 #define CGDB_IO 0x2
416 #define CGDB_CRYPTO 0x4
417
418 #define IFDEBUG(x,y) if (cgddebug & (x)) y
419 #define DPRINTF(x,y) IFDEBUG(x, printf y)
420 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
421
422 static void hexprint(const char *, void *, int);
423
424 #else
425 #define IFDEBUG(x,y)
426 #define DPRINTF(x,y)
427 #define DPRINTF_FOLLOW(y)
428 #endif
429
430 /* Global variables */
431
432 static kmutex_t cgd_spawning_mtx;
433 static kcondvar_t cgd_spawning_cv;
434 static bool cgd_spawning;
435 static struct cgd_worker *cgd_worker;
436 static u_int cgd_refcnt; /* number of users of cgd_worker */
437
438 /* Utility Functions */
439
440 #define CGDUNIT(x) DISKUNIT(x)
441
442 /* The code */
443
444 static int
445 cgd_lock(bool intr)
446 {
447 int error = 0;
448
449 mutex_enter(&cgd_spawning_mtx);
450 while (cgd_spawning) {
451 if (intr)
452 error = cv_wait_sig(&cgd_spawning_cv, &cgd_spawning_mtx);
453 else
454 cv_wait(&cgd_spawning_cv, &cgd_spawning_mtx);
455 }
456 if (error == 0)
457 cgd_spawning = true;
458 mutex_exit(&cgd_spawning_mtx);
459 return error;
460 }
461
462 static void
463 cgd_unlock(void)
464 {
465 mutex_enter(&cgd_spawning_mtx);
466 cgd_spawning = false;
467 cv_broadcast(&cgd_spawning_cv);
468 mutex_exit(&cgd_spawning_mtx);
469 }
470
471 static struct cgd_softc *
472 getcgd_softc(dev_t dev)
473 {
474 return device_lookup_private(&cgd_cd, CGDUNIT(dev));
475 }
476
477 static int
478 cgd_match(device_t self, cfdata_t cfdata, void *aux)
479 {
480
481 return 1;
482 }
483
484 static void
485 cgd_attach(device_t parent, device_t self, void *aux)
486 {
487 struct cgd_softc *sc = device_private(self);
488
489 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
490 cv_init(&sc->sc_cv, "cgdcv");
491 dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
492 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
493
494 if (!pmf_device_register(self, NULL, NULL))
495 aprint_error_dev(self,
496 "unable to register power management hooks\n");
497 }
498
499
500 static int
501 cgd_detach(device_t self, int flags)
502 {
503 int ret;
504 const int pmask = 1 << RAW_PART;
505 struct cgd_softc *sc = device_private(self);
506 struct dk_softc *dksc = &sc->sc_dksc;
507
508 if (DK_BUSY(dksc, pmask))
509 return EBUSY;
510
511 if (DK_ATTACHED(dksc) &&
512 (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
513 return ret;
514
515 disk_destroy(&dksc->sc_dkdev);
516 cv_destroy(&sc->sc_cv);
517 mutex_destroy(&sc->sc_lock);
518
519 return 0;
520 }
521
522 void
523 cgdattach(int num)
524 {
525 #ifndef _MODULE
526 int error;
527
528 mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
529 cv_init(&cgd_spawning_cv, "cgspwn");
530
531 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
532 if (error != 0)
533 aprint_error("%s: unable to register cfattach\n",
534 cgd_cd.cd_name);
535 #endif
536 }
537
538 static struct cgd_softc *
539 cgd_spawn(int unit)
540 {
541 cfdata_t cf;
542 struct cgd_worker *cw;
543 struct cgd_softc *sc;
544
545 cf = kmem_alloc(sizeof(*cf), KM_SLEEP);
546 cf->cf_name = cgd_cd.cd_name;
547 cf->cf_atname = cgd_cd.cd_name;
548 cf->cf_unit = unit;
549 cf->cf_fstate = FSTATE_STAR;
550
551 cw = cgd_create_one_worker();
552 if (cw == NULL) {
553 kmem_free(cf, sizeof(*cf));
554 return NULL;
555 }
556
557 sc = device_private(config_attach_pseudo(cf));
558 if (sc == NULL) {
559 cgd_destroy_one_worker(cw);
560 return NULL;
561 }
562
563 sc->sc_worker = cw;
564
565 return sc;
566 }
567
568 static int
569 cgd_destroy(device_t dev)
570 {
571 struct cgd_softc *sc = device_private(dev);
572 struct cgd_worker *cw = sc->sc_worker;
573 cfdata_t cf;
574 int error;
575
576 cf = device_cfdata(dev);
577 error = config_detach(dev, DETACH_QUIET);
578 if (error)
579 return error;
580
581 cgd_destroy_one_worker(cw);
582
583 kmem_free(cf, sizeof(*cf));
584 return 0;
585 }
586
587 static void
588 cgd_busy(struct cgd_softc *sc)
589 {
590
591 mutex_enter(&sc->sc_lock);
592 while (sc->sc_busy)
593 cv_wait(&sc->sc_cv, &sc->sc_lock);
594 sc->sc_busy = true;
595 mutex_exit(&sc->sc_lock);
596 }
597
598 static void
599 cgd_unbusy(struct cgd_softc *sc)
600 {
601
602 mutex_enter(&sc->sc_lock);
603 sc->sc_busy = false;
604 cv_broadcast(&sc->sc_cv);
605 mutex_exit(&sc->sc_lock);
606 }
607
608 static struct cgd_worker *
609 cgd_create_one_worker(void)
610 {
611 KASSERT(cgd_spawning);
612
613 if (cgd_refcnt++ == 0) {
614 KASSERT(cgd_worker == NULL);
615 cgd_worker = cgd_create_worker();
616 }
617
618 KASSERT(cgd_worker != NULL);
619 return cgd_worker;
620 }
621
622 static void
623 cgd_destroy_one_worker(struct cgd_worker *cw)
624 {
625 KASSERT(cgd_spawning);
626 KASSERT(cw == cgd_worker);
627
628 if (--cgd_refcnt == 0) {
629 cgd_destroy_worker(cgd_worker);
630 cgd_worker = NULL;
631 }
632 }
633
634 static struct cgd_worker *
635 cgd_create_worker(void)
636 {
637 struct cgd_worker *cw;
638 struct workqueue *wq;
639 struct pool *cp;
640 int error;
641
642 cw = kmem_alloc(sizeof(struct cgd_worker), KM_SLEEP);
643 cp = kmem_alloc(sizeof(struct pool), KM_SLEEP);
644
645 error = workqueue_create(&wq, "cgd", cgd_process, NULL,
646 PRI_BIO, IPL_BIO, WQ_MPSAFE | WQ_PERCPU);
647 if (error) {
648 kmem_free(cp, sizeof(struct pool));
649 kmem_free(cw, sizeof(struct cgd_worker));
650 return NULL;
651 }
652
653 cw->cw_cpool = cp;
654 cw->cw_wq = wq;
655 pool_init(cw->cw_cpool, sizeof(struct cgd_xfer), 0,
656 0, 0, "cgdcpl", NULL, IPL_BIO);
657
658 mutex_init(&cw->cw_lock, MUTEX_DEFAULT, IPL_BIO);
659
660 return cw;
661 }
662
663 static void
664 cgd_destroy_worker(struct cgd_worker *cw)
665 {
666 mutex_destroy(&cw->cw_lock);
667
668 if (cw->cw_cpool) {
669 pool_destroy(cw->cw_cpool);
670 kmem_free(cw->cw_cpool, sizeof(struct pool));
671 }
672 if (cw->cw_wq)
673 workqueue_destroy(cw->cw_wq);
674
675 kmem_free(cw, sizeof(struct cgd_worker));
676 }
677
678 static int
679 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
680 {
681 struct cgd_softc *sc;
682 int error;
683
684 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
685
686 error = cgd_lock(true);
687 if (error)
688 return error;
689 sc = getcgd_softc(dev);
690 if (sc == NULL)
691 sc = cgd_spawn(CGDUNIT(dev));
692 cgd_unlock();
693 if (sc == NULL)
694 return ENXIO;
695
696 return dk_open(&sc->sc_dksc, dev, flags, fmt, l);
697 }
698
699 static int
700 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
701 {
702 struct cgd_softc *sc;
703 struct dk_softc *dksc;
704 int error;
705
706 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
707
708 error = cgd_lock(false);
709 if (error)
710 return error;
711 sc = getcgd_softc(dev);
712 if (sc == NULL) {
713 error = ENXIO;
714 goto done;
715 }
716
717 dksc = &sc->sc_dksc;
718 if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0)
719 goto done;
720
721 if (!DK_ATTACHED(dksc)) {
722 if ((error = cgd_destroy(sc->sc_dksc.sc_dev)) != 0) {
723 device_printf(dksc->sc_dev,
724 "unable to detach instance\n");
725 goto done;
726 }
727 }
728
729 done:
730 cgd_unlock();
731
732 return error;
733 }
734
735 static void
736 cgdstrategy(struct buf *bp)
737 {
738 struct cgd_softc *sc = getcgd_softc(bp->b_dev);
739
740 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
741 (long)bp->b_bcount));
742
743 /*
744 * Reject unaligned writes.
745 */
746 if (((uintptr_t)bp->b_data & 3) != 0) {
747 bp->b_error = EINVAL;
748 goto bail;
749 }
750
751 dk_strategy(&sc->sc_dksc, bp);
752 return;
753
754 bail:
755 bp->b_resid = bp->b_bcount;
756 biodone(bp);
757 return;
758 }
759
760 static int
761 cgdsize(dev_t dev)
762 {
763 struct cgd_softc *sc = getcgd_softc(dev);
764
765 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
766 if (!sc)
767 return -1;
768 return dk_size(&sc->sc_dksc, dev);
769 }
770
771 /*
772 * cgd_{get,put}data are functions that deal with getting a buffer
773 * for the new encrypted data.
774 * We can no longer have a buffer per device, we need a buffer per
775 * work queue...
776 */
777
778 static void *
779 cgd_getdata(struct cgd_softc *sc, unsigned long size)
780 {
781 void *data = NULL;
782
783 mutex_enter(&sc->sc_lock);
784 if (!sc->sc_data_used) {
785 sc->sc_data_used = true;
786 data = sc->sc_data;
787 }
788 mutex_exit(&sc->sc_lock);
789
790 if (data)
791 return data;
792
793 return kmem_intr_alloc(size, KM_NOSLEEP);
794 }
795
796 static void
797 cgd_putdata(struct cgd_softc *sc, void *data, unsigned long size)
798 {
799
800 if (data == sc->sc_data) {
801 mutex_enter(&sc->sc_lock);
802 sc->sc_data_used = false;
803 mutex_exit(&sc->sc_lock);
804 } else
805 kmem_intr_free(data, size);
806 }
807
808 static int
809 cgd_diskstart(device_t dev, struct buf *bp)
810 {
811 struct cgd_softc *sc = device_private(dev);
812 struct cgd_worker *cw = sc->sc_worker;
813 struct dk_softc *dksc = &sc->sc_dksc;
814 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
815 struct cgd_xfer *cx;
816 struct buf *nbp;
817 void * newaddr;
818 daddr_t bn;
819
820 DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
821
822 bn = bp->b_rawblkno;
823
824 /*
825 * We attempt to allocate all of our resources up front, so that
826 * we can fail quickly if they are unavailable.
827 */
828 nbp = getiobuf(sc->sc_tvn, false);
829 if (nbp == NULL)
830 return EAGAIN;
831
832 cx = pool_get(cw->cw_cpool, PR_NOWAIT);
833 if (cx == NULL) {
834 putiobuf(nbp);
835 return EAGAIN;
836 }
837
838 cx->cx_sc = sc;
839 cx->cx_obp = bp;
840 cx->cx_nbp = nbp;
841 cx->cx_srcv = cx->cx_dstv = bp->b_data;
842 cx->cx_blkno = bn;
843 cx->cx_secsize = dg->dg_secsize;
844
845 /*
846 * If we are writing, then we need to encrypt the outgoing
847 * block into a new block of memory.
848 */
849 if ((bp->b_flags & B_READ) == 0) {
850 newaddr = cgd_getdata(sc, bp->b_bcount);
851 if (!newaddr) {
852 pool_put(cw->cw_cpool, cx);
853 putiobuf(nbp);
854 return EAGAIN;
855 }
856
857 cx->cx_dstv = newaddr;
858 cx->cx_len = bp->b_bcount;
859 cx->cx_dir = CGD_CIPHER_ENCRYPT;
860
861 cgd_enqueue(sc, cx);
862 return 0;
863 }
864
865 cgd_diskstart2(sc, cx);
866 return 0;
867 }
868
869 static void
870 cgd_diskstart2(struct cgd_softc *sc, struct cgd_xfer *cx)
871 {
872 struct vnode *vp;
873 struct buf *bp;
874 struct buf *nbp;
875
876 bp = cx->cx_obp;
877 nbp = cx->cx_nbp;
878
879 nbp->b_data = cx->cx_dstv;
880 nbp->b_flags = bp->b_flags;
881 nbp->b_oflags = bp->b_oflags;
882 nbp->b_cflags = bp->b_cflags;
883 nbp->b_iodone = cgdiodone;
884 nbp->b_proc = bp->b_proc;
885 nbp->b_blkno = btodb(cx->cx_blkno * cx->cx_secsize);
886 nbp->b_bcount = bp->b_bcount;
887 nbp->b_private = cx;
888
889 BIO_COPYPRIO(nbp, bp);
890
891 if ((nbp->b_flags & B_READ) == 0) {
892 vp = nbp->b_vp;
893 mutex_enter(vp->v_interlock);
894 vp->v_numoutput++;
895 mutex_exit(vp->v_interlock);
896 }
897 VOP_STRATEGY(sc->sc_tvn, nbp);
898 }
899
900 static void
901 cgdiodone(struct buf *nbp)
902 {
903 struct cgd_xfer *cx = nbp->b_private;
904 struct buf *obp = cx->cx_obp;
905 struct cgd_softc *sc = getcgd_softc(obp->b_dev);
906 struct dk_softc *dksc = &sc->sc_dksc;
907 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
908 daddr_t bn;
909
910 KDASSERT(sc);
911
912 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
913 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
914 obp, obp->b_bcount, obp->b_resid));
915 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
916 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
917 nbp->b_bcount));
918 if (nbp->b_error != 0) {
919 obp->b_error = nbp->b_error;
920 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
921 obp->b_error));
922 }
923
924 /* Perform the decryption if we are reading.
925 *
926 * Note: use the blocknumber from nbp, since it is what
927 * we used to encrypt the blocks.
928 */
929
930 if (nbp->b_flags & B_READ) {
931 bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
932
933 cx->cx_obp = obp;
934 cx->cx_nbp = nbp;
935 cx->cx_dstv = obp->b_data;
936 cx->cx_srcv = obp->b_data;
937 cx->cx_len = obp->b_bcount;
938 cx->cx_blkno = bn;
939 cx->cx_secsize = dg->dg_secsize;
940 cx->cx_dir = CGD_CIPHER_DECRYPT;
941
942 cgd_enqueue(sc, cx);
943 return;
944 }
945
946 cgd_iodone2(sc, cx);
947 }
948
949 static void
950 cgd_iodone2(struct cgd_softc *sc, struct cgd_xfer *cx)
951 {
952 struct cgd_worker *cw = sc->sc_worker;
953 struct buf *obp = cx->cx_obp;
954 struct buf *nbp = cx->cx_nbp;
955 struct dk_softc *dksc = &sc->sc_dksc;
956
957 pool_put(cw->cw_cpool, cx);
958
959 /* If we allocated memory, free it now... */
960 if (nbp->b_data != obp->b_data)
961 cgd_putdata(sc, nbp->b_data, nbp->b_bcount);
962
963 putiobuf(nbp);
964
965 /* Request is complete for whatever reason */
966 obp->b_resid = 0;
967 if (obp->b_error != 0)
968 obp->b_resid = obp->b_bcount;
969
970 dk_done(dksc, obp);
971 dk_start(dksc, NULL);
972 }
973
974 static int
975 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
976 {
977 struct cgd_softc *sc = device_private(dev);
978 struct dk_softc *dksc = &sc->sc_dksc;
979 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
980 size_t nbytes, blksize;
981 void *buf;
982 int error;
983
984 /*
985 * dk_dump gives us units of disklabel sectors. Everything
986 * else in cgd uses units of diskgeom sectors. These had
987 * better agree; otherwise we need to figure out how to convert
988 * between them.
989 */
990 KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
991 "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
992 dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
993 blksize = dg->dg_secsize;
994
995 /*
996 * Compute the number of bytes in this request, which dk_dump
997 * has `helpfully' converted to a number of blocks for us.
998 */
999 nbytes = nblk*blksize;
1000
1001 /* Try to acquire a buffer to store the ciphertext. */
1002 buf = cgd_getdata(sc, nbytes);
1003 if (buf == NULL)
1004 /* Out of memory: give up. */
1005 return ENOMEM;
1006
1007 /* Encrypt the caller's data into the temporary buffer. */
1008 cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
1009
1010 /* Pass it on to the underlying disk device. */
1011 error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
1012
1013 /* Release the buffer. */
1014 cgd_putdata(sc, buf, nbytes);
1015
1016 /* Return any error from the underlying disk device. */
1017 return error;
1018 }
1019
1020 /* XXX: we should probably put these into dksubr.c, mostly */
1021 static int
1022 cgdread(dev_t dev, struct uio *uio, int flags)
1023 {
1024 struct cgd_softc *sc;
1025 struct dk_softc *dksc;
1026
1027 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
1028 (unsigned long long)dev, uio, flags));
1029 sc = getcgd_softc(dev);
1030 if (sc == NULL)
1031 return ENXIO;
1032 dksc = &sc->sc_dksc;
1033 if (!DK_ATTACHED(dksc))
1034 return ENXIO;
1035 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
1036 }
1037
1038 /* XXX: we should probably put these into dksubr.c, mostly */
1039 static int
1040 cgdwrite(dev_t dev, struct uio *uio, int flags)
1041 {
1042 struct cgd_softc *sc;
1043 struct dk_softc *dksc;
1044
1045 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
1046 sc = getcgd_softc(dev);
1047 if (sc == NULL)
1048 return ENXIO;
1049 dksc = &sc->sc_dksc;
1050 if (!DK_ATTACHED(dksc))
1051 return ENXIO;
1052 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
1053 }
1054
1055 static int
1056 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1057 {
1058 struct cgd_softc *sc;
1059 struct dk_softc *dksc;
1060 int part = DISKPART(dev);
1061 int pmask = 1 << part;
1062 int error;
1063
1064 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
1065 dev, cmd, data, flag, l));
1066
1067 switch (cmd) {
1068 case CGDIOCGET:
1069 return cgd_ioctl_get(dev, data, l);
1070 case CGDIOCSET:
1071 case CGDIOCCLR:
1072 if ((flag & FWRITE) == 0)
1073 return EBADF;
1074 /* FALLTHROUGH */
1075 default:
1076 sc = getcgd_softc(dev);
1077 if (sc == NULL)
1078 return ENXIO;
1079 dksc = &sc->sc_dksc;
1080 break;
1081 }
1082
1083 switch (cmd) {
1084 case CGDIOCSET:
1085 cgd_busy(sc);
1086 if (DK_ATTACHED(dksc))
1087 error = EBUSY;
1088 else
1089 error = cgd_ioctl_set(sc, data, l);
1090 cgd_unbusy(sc);
1091 break;
1092 case CGDIOCCLR:
1093 cgd_busy(sc);
1094 if (DK_BUSY(&sc->sc_dksc, pmask))
1095 error = EBUSY;
1096 else
1097 error = cgd_ioctl_clr(sc, l);
1098 cgd_unbusy(sc);
1099 break;
1100 case DIOCGCACHE:
1101 case DIOCCACHESYNC:
1102 cgd_busy(sc);
1103 if (!DK_ATTACHED(dksc)) {
1104 cgd_unbusy(sc);
1105 error = ENOENT;
1106 break;
1107 }
1108 /*
1109 * We pass this call down to the underlying disk.
1110 */
1111 error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
1112 cgd_unbusy(sc);
1113 break;
1114 case DIOCGSECTORALIGN: {
1115 struct disk_sectoralign *dsa = data;
1116
1117 cgd_busy(sc);
1118 if (!DK_ATTACHED(dksc)) {
1119 cgd_unbusy(sc);
1120 error = ENOENT;
1121 break;
1122 }
1123
1124 /* Get the underlying disk's sector alignment. */
1125 error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
1126 if (error) {
1127 cgd_unbusy(sc);
1128 break;
1129 }
1130
1131 /* Adjust for the disklabel partition if necessary. */
1132 if (part != RAW_PART) {
1133 struct disklabel *lp = dksc->sc_dkdev.dk_label;
1134 daddr_t offset = lp->d_partitions[part].p_offset;
1135 uint32_t r = offset % dsa->dsa_alignment;
1136
1137 if (r < dsa->dsa_firstaligned)
1138 dsa->dsa_firstaligned = dsa->dsa_firstaligned
1139 - r;
1140 else
1141 dsa->dsa_firstaligned = (dsa->dsa_firstaligned
1142 + dsa->dsa_alignment) - r;
1143 }
1144 cgd_unbusy(sc);
1145 break;
1146 }
1147 case DIOCGSTRATEGY:
1148 case DIOCSSTRATEGY:
1149 if (!DK_ATTACHED(dksc)) {
1150 error = ENOENT;
1151 break;
1152 }
1153 /*FALLTHROUGH*/
1154 default:
1155 error = dk_ioctl(dksc, dev, cmd, data, flag, l);
1156 break;
1157 case CGDIOCGET:
1158 KASSERT(0);
1159 error = EINVAL;
1160 }
1161
1162 return error;
1163 }
1164
1165 static int
1166 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1167 {
1168 struct cgd_softc *sc;
1169
1170 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
1171 dev, blkno, va, (unsigned long)size));
1172 sc = getcgd_softc(dev);
1173 if (sc == NULL)
1174 return ENXIO;
1175 return dk_dump(&sc->sc_dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
1176 }
1177
1178 /*
1179 * XXXrcd:
1180 * for now we hardcode the maximum key length.
1181 */
1182 #define MAX_KEYSIZE 1024
1183
1184 static const struct {
1185 const char *n;
1186 int v;
1187 int d;
1188 } encblkno[] = {
1189 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1190 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1191 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
1192 };
1193
1194 /* ARGSUSED */
1195 static int
1196 cgd_ioctl_set(struct cgd_softc *sc, void *data, struct lwp *l)
1197 {
1198 struct cgd_ioctl *ci = data;
1199 struct vnode *vp;
1200 int ret;
1201 size_t i;
1202 size_t keybytes; /* key length in bytes */
1203 const char *cp;
1204 struct pathbuf *pb;
1205 char *inbuf;
1206 struct dk_softc *dksc = &sc->sc_dksc;
1207
1208 cp = ci->ci_disk;
1209
1210 ret = pathbuf_copyin(ci->ci_disk, &pb);
1211 if (ret != 0) {
1212 return ret;
1213 }
1214 ret = vn_bdev_openpath(pb, &vp, l);
1215 pathbuf_destroy(pb);
1216 if (ret != 0) {
1217 return ret;
1218 }
1219
1220 inbuf = kmem_alloc(MAX_KEYSIZE, KM_SLEEP);
1221
1222 if ((ret = cgdinit(sc, cp, vp, l)) != 0)
1223 goto bail;
1224
1225 (void)memset(inbuf, 0, MAX_KEYSIZE);
1226 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
1227 if (ret)
1228 goto bail;
1229 sc->sc_cfuncs = cryptfuncs_find(inbuf);
1230 if (!sc->sc_cfuncs) {
1231 ret = EINVAL;
1232 goto bail;
1233 }
1234
1235 (void)memset(inbuf, 0, MAX_KEYSIZE);
1236 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
1237 if (ret)
1238 goto bail;
1239
1240 for (i = 0; i < __arraycount(encblkno); i++)
1241 if (strcmp(encblkno[i].n, inbuf) == 0)
1242 break;
1243
1244 if (i == __arraycount(encblkno)) {
1245 ret = EINVAL;
1246 goto bail;
1247 }
1248
1249 keybytes = ci->ci_keylen / 8 + 1;
1250 if (keybytes > MAX_KEYSIZE) {
1251 ret = EINVAL;
1252 goto bail;
1253 }
1254
1255 (void)memset(inbuf, 0, MAX_KEYSIZE);
1256 ret = copyin(ci->ci_key, inbuf, keybytes);
1257 if (ret)
1258 goto bail;
1259
1260 sc->sc_cdata.cf_blocksize = ci->ci_blocksize;
1261 sc->sc_cdata.cf_mode = encblkno[i].v;
1262 sc->sc_cdata.cf_keylen = ci->ci_keylen;
1263 sc->sc_cdata.cf_priv = sc->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
1264 &sc->sc_cdata.cf_blocksize);
1265 if (sc->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
1266 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
1267 sc->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
1268 sc->sc_cdata.cf_priv = NULL;
1269 }
1270
1271 /*
1272 * The blocksize is supposed to be in bytes. Unfortunately originally
1273 * it was expressed in bits. For compatibility we maintain encblkno
1274 * and encblkno8.
1275 */
1276 sc->sc_cdata.cf_blocksize /= encblkno[i].d;
1277 (void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
1278 if (!sc->sc_cdata.cf_priv) {
1279 ret = EINVAL; /* XXX is this the right error? */
1280 goto bail;
1281 }
1282 kmem_free(inbuf, MAX_KEYSIZE);
1283
1284 bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
1285
1286 sc->sc_data = kmem_alloc(MAXPHYS, KM_SLEEP);
1287 sc->sc_data_used = false;
1288
1289 /* Attach the disk. */
1290 dk_attach(dksc);
1291 disk_attach(&dksc->sc_dkdev);
1292
1293 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
1294
1295 /* Discover wedges on this disk. */
1296 dkwedge_discover(&dksc->sc_dkdev);
1297
1298 return 0;
1299
1300 bail:
1301 kmem_free(inbuf, MAX_KEYSIZE);
1302 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
1303 return ret;
1304 }
1305
1306 /* ARGSUSED */
1307 static int
1308 cgd_ioctl_clr(struct cgd_softc *sc, struct lwp *l)
1309 {
1310 struct dk_softc *dksc = &sc->sc_dksc;
1311
1312 if (!DK_ATTACHED(dksc))
1313 return ENXIO;
1314
1315 /* Delete all of our wedges. */
1316 dkwedge_delall(&dksc->sc_dkdev);
1317
1318 /* Kill off any queued buffers. */
1319 dk_drain(dksc);
1320 bufq_free(dksc->sc_bufq);
1321
1322 (void)vn_close(sc->sc_tvn, FREAD|FWRITE, l->l_cred);
1323 sc->sc_cfuncs->cf_destroy(sc->sc_cdata.cf_priv);
1324 kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1325 kmem_free(sc->sc_data, MAXPHYS);
1326 sc->sc_data_used = false;
1327 dk_detach(dksc);
1328 disk_detach(&dksc->sc_dkdev);
1329
1330 return 0;
1331 }
1332
1333 static int
1334 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
1335 {
1336 struct cgd_softc *sc;
1337 struct cgd_user *cgu;
1338 int unit, error;
1339
1340 unit = CGDUNIT(dev);
1341 cgu = (struct cgd_user *)data;
1342
1343 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1344 dev, unit, data, l));
1345
1346 /* XXX, we always return this units data, so if cgu_unit is
1347 * not -1, that field doesn't match the rest
1348 */
1349 if (cgu->cgu_unit == -1)
1350 cgu->cgu_unit = unit;
1351
1352 if (cgu->cgu_unit < 0)
1353 return EINVAL; /* XXX: should this be ENXIO? */
1354
1355 error = cgd_lock(false);
1356 if (error)
1357 return error;
1358
1359 sc = device_lookup_private(&cgd_cd, unit);
1360 if (sc == NULL || !DK_ATTACHED(&sc->sc_dksc)) {
1361 cgu->cgu_dev = 0;
1362 cgu->cgu_alg[0] = '\0';
1363 cgu->cgu_blocksize = 0;
1364 cgu->cgu_mode = 0;
1365 cgu->cgu_keylen = 0;
1366 }
1367 else {
1368 mutex_enter(&sc->sc_lock);
1369 cgu->cgu_dev = sc->sc_tdev;
1370 strncpy(cgu->cgu_alg, sc->sc_cfuncs->cf_name,
1371 sizeof(cgu->cgu_alg));
1372 cgu->cgu_blocksize = sc->sc_cdata.cf_blocksize;
1373 cgu->cgu_mode = sc->sc_cdata.cf_mode;
1374 cgu->cgu_keylen = sc->sc_cdata.cf_keylen;
1375 mutex_exit(&sc->sc_lock);
1376 }
1377
1378 cgd_unlock();
1379 return 0;
1380 }
1381
1382 static int
1383 cgdinit(struct cgd_softc *sc, const char *cpath, struct vnode *vp,
1384 struct lwp *l)
1385 {
1386 struct disk_geom *dg;
1387 int ret;
1388 char *tmppath;
1389 uint64_t psize;
1390 unsigned secsize;
1391 struct dk_softc *dksc = &sc->sc_dksc;
1392
1393 sc->sc_tvn = vp;
1394 sc->sc_tpath = NULL;
1395
1396 tmppath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1397 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &sc->sc_tpathlen);
1398 if (ret)
1399 goto bail;
1400 sc->sc_tpath = kmem_alloc(sc->sc_tpathlen, KM_SLEEP);
1401 memcpy(sc->sc_tpath, tmppath, sc->sc_tpathlen);
1402
1403 sc->sc_tdev = vp->v_rdev;
1404
1405 if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1406 goto bail;
1407
1408 if (psize == 0) {
1409 ret = ENODEV;
1410 goto bail;
1411 }
1412
1413 /*
1414 * XXX here we should probe the underlying device. If we
1415 * are accessing a partition of type RAW_PART, then
1416 * we should populate our initial geometry with the
1417 * geometry that we discover from the device.
1418 */
1419 dg = &dksc->sc_dkdev.dk_geom;
1420 memset(dg, 0, sizeof(*dg));
1421 dg->dg_secperunit = psize;
1422 dg->dg_secsize = secsize;
1423 dg->dg_ntracks = 1;
1424 dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1425 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1426
1427 bail:
1428 kmem_free(tmppath, MAXPATHLEN);
1429 if (ret && sc->sc_tpath)
1430 kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1431 return ret;
1432 }
1433
1434 /*
1435 * Our generic cipher entry point. This takes care of the
1436 * IV mode and passes off the work to the specific cipher.
1437 * We implement here the IV method ``encrypted block
1438 * number''.
1439 *
1440 * XXXrcd: for now we rely on our own crypto framework defined
1441 * in dev/cgd_crypto.c. This will change when we
1442 * get a generic kernel crypto framework.
1443 */
1444
1445 static void
1446 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1447 {
1448 int i;
1449
1450 /* Set up the blkno in blkno_buf, here we do not care much
1451 * about the final layout of the information as long as we
1452 * can guarantee that each sector will have a different IV
1453 * and that the endianness of the machine will not affect
1454 * the representation that we have chosen.
1455 *
1456 * We choose this representation, because it does not rely
1457 * on the size of buf (which is the blocksize of the cipher),
1458 * but allows daddr_t to grow without breaking existing
1459 * disks.
1460 *
1461 * Note that blkno2blkno_buf does not take a size as input,
1462 * and hence must be called on a pre-zeroed buffer of length
1463 * greater than or equal to sizeof(daddr_t).
1464 */
1465 for (i=0; i < sizeof(daddr_t); i++) {
1466 *sbuf++ = blkno & 0xff;
1467 blkno >>= 8;
1468 }
1469 }
1470
1471 static struct cpu_info *
1472 cgd_cpu(struct cgd_softc *sc)
1473 {
1474 struct cgd_worker *cw = sc->sc_worker;
1475 struct cpu_info *ci = NULL;
1476 u_int cidx, i;
1477
1478 if (cw->cw_busy == 0) {
1479 cw->cw_last = cpu_index(curcpu());
1480 return NULL;
1481 }
1482
1483 for (i=0, cidx = cw->cw_last+1; i<maxcpus; ++i, ++cidx) {
1484 if (cidx >= maxcpus)
1485 cidx = 0;
1486 ci = cpu_lookup(cidx);
1487 if (ci) {
1488 cw->cw_last = cidx;
1489 break;
1490 }
1491 }
1492
1493 return ci;
1494 }
1495
1496 static void
1497 cgd_enqueue(struct cgd_softc *sc, struct cgd_xfer *cx)
1498 {
1499 struct cgd_worker *cw = sc->sc_worker;
1500 struct cpu_info *ci;
1501
1502 mutex_enter(&cw->cw_lock);
1503 ci = cgd_cpu(sc);
1504 cw->cw_busy++;
1505 mutex_exit(&cw->cw_lock);
1506
1507 workqueue_enqueue(cw->cw_wq, &cx->cx_work, ci);
1508 }
1509
1510 static void
1511 cgd_process(struct work *wk, void *arg)
1512 {
1513 struct cgd_xfer *cx = (struct cgd_xfer *)wk;
1514 struct cgd_softc *sc = cx->cx_sc;
1515 struct cgd_worker *cw = sc->sc_worker;
1516
1517 cgd_cipher(sc, cx->cx_dstv, cx->cx_srcv, cx->cx_len,
1518 cx->cx_blkno, cx->cx_secsize, cx->cx_dir);
1519
1520 if (cx->cx_dir == CGD_CIPHER_ENCRYPT) {
1521 cgd_diskstart2(sc, cx);
1522 } else {
1523 cgd_iodone2(sc, cx);
1524 }
1525
1526 mutex_enter(&cw->cw_lock);
1527 if (cw->cw_busy > 0)
1528 cw->cw_busy--;
1529 mutex_exit(&cw->cw_lock);
1530 }
1531
1532 static void
1533 cgd_cipher(struct cgd_softc *sc, void *dstv, void *srcv,
1534 size_t len, daddr_t blkno, size_t secsize, int dir)
1535 {
1536 char *dst = dstv;
1537 char *src = srcv;
1538 cfunc_cipher_prep *ciprep = sc->sc_cfuncs->cf_cipher_prep;
1539 cfunc_cipher *cipher = sc->sc_cfuncs->cf_cipher;
1540 struct uio dstuio;
1541 struct uio srcuio;
1542 struct iovec dstiov[2];
1543 struct iovec srciov[2];
1544 size_t blocksize = sc->sc_cdata.cf_blocksize;
1545 size_t todo;
1546 char blkno_buf[CGD_MAXBLOCKSIZE], *iv;
1547
1548 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1549
1550 KASSERTMSG(len % blocksize == 0,
1551 "cgd_cipher: len %% blocksize != 0");
1552
1553 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1554 KASSERTMSG(sizeof(daddr_t) <= blocksize,
1555 "cgd_cipher: sizeof(daddr_t) > blocksize");
1556
1557 KASSERTMSG(blocksize <= CGD_MAXBLOCKSIZE,
1558 "cgd_cipher: blocksize > CGD_MAXBLOCKSIZE");
1559
1560 dstuio.uio_iov = dstiov;
1561 dstuio.uio_iovcnt = 1;
1562
1563 srcuio.uio_iov = srciov;
1564 srcuio.uio_iovcnt = 1;
1565
1566 for (; len > 0; len -= todo) {
1567 todo = MIN(len, secsize);
1568
1569 dstiov[0].iov_base = dst;
1570 srciov[0].iov_base = src;
1571 dstiov[0].iov_len = todo;
1572 srciov[0].iov_len = todo;
1573
1574 memset(blkno_buf, 0x0, blocksize);
1575 blkno2blkno_buf(blkno_buf, blkno);
1576 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1577 blkno_buf, blocksize));
1578
1579 /*
1580 * Compute an initial IV. All ciphers
1581 * can convert blkno_buf in-place.
1582 */
1583 iv = blkno_buf;
1584 ciprep(sc->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir);
1585 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize));
1586
1587 cipher(sc->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir);
1588
1589 dst += todo;
1590 src += todo;
1591 blkno++;
1592 }
1593 }
1594
1595 #ifdef DEBUG
1596 static void
1597 hexprint(const char *start, void *buf, int len)
1598 {
1599 char *c = buf;
1600
1601 KASSERTMSG(len >= 0, "hexprint: called with len < 0");
1602 printf("%s: len=%06d 0x", start, len);
1603 while (len--)
1604 printf("%02x", (unsigned char) *c++);
1605 }
1606 #endif
1607
1608 static void
1609 selftest(void)
1610 {
1611 struct cgd_softc sc;
1612 void *buf;
1613
1614 printf("running cgd selftest ");
1615
1616 for (size_t i = 0; i < __arraycount(selftests); i++) {
1617 const char *alg = selftests[i].alg;
1618 const uint8_t *key = selftests[i].key;
1619 int keylen = selftests[i].keylen;
1620 int txtlen = selftests[i].txtlen;
1621
1622 printf("%s-%d ", alg, keylen);
1623
1624 memset(&sc, 0, sizeof(sc));
1625
1626 sc.sc_cfuncs = cryptfuncs_find(alg);
1627 if (sc.sc_cfuncs == NULL)
1628 panic("%s not implemented", alg);
1629
1630 sc.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1631 sc.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1632 sc.sc_cdata.cf_keylen = keylen;
1633
1634 sc.sc_cdata.cf_priv = sc.sc_cfuncs->cf_init(keylen,
1635 key, &sc.sc_cdata.cf_blocksize);
1636 if (sc.sc_cdata.cf_priv == NULL)
1637 panic("cf_priv is NULL");
1638 if (sc.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1639 panic("bad block size %zu", sc.sc_cdata.cf_blocksize);
1640
1641 sc.sc_cdata.cf_blocksize /= 8;
1642
1643 buf = kmem_alloc(txtlen, KM_SLEEP);
1644 memcpy(buf, selftests[i].ptxt, txtlen);
1645
1646 cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1647 selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1648 if (memcmp(buf, selftests[i].ctxt, txtlen) != 0)
1649 panic("encryption is broken");
1650
1651 cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1652 selftests[i].secsize, CGD_CIPHER_DECRYPT);
1653 if (memcmp(buf, selftests[i].ptxt, txtlen) != 0)
1654 panic("decryption is broken");
1655
1656 kmem_free(buf, txtlen);
1657 sc.sc_cfuncs->cf_destroy(sc.sc_cdata.cf_priv);
1658 }
1659
1660 printf("done\n");
1661 }
1662
1663 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
1664
1665 #ifdef _MODULE
1666 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1667
1668 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1669 #endif
1670
1671 static int
1672 cgd_modcmd(modcmd_t cmd, void *arg)
1673 {
1674 int error = 0;
1675
1676 switch (cmd) {
1677 case MODULE_CMD_INIT:
1678 selftest();
1679 #ifdef _MODULE
1680 mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
1681 cv_init(&cgd_spawning_cv, "cgspwn");
1682
1683 error = config_cfdriver_attach(&cgd_cd);
1684 if (error)
1685 break;
1686
1687 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1688 if (error) {
1689 config_cfdriver_detach(&cgd_cd);
1690 aprint_error("%s: unable to register cfattach for"
1691 "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1692 break;
1693 }
1694 /*
1695 * Attach the {b,c}devsw's
1696 */
1697 error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1698 &cgd_cdevsw, &cgd_cmajor);
1699
1700 /*
1701 * If devsw_attach fails, remove from autoconf database
1702 */
1703 if (error) {
1704 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1705 config_cfdriver_detach(&cgd_cd);
1706 aprint_error("%s: unable to attach %s devsw, "
1707 "error %d", __func__, cgd_cd.cd_name, error);
1708 break;
1709 }
1710 #endif
1711 break;
1712
1713 case MODULE_CMD_FINI:
1714 #ifdef _MODULE
1715 /*
1716 * Remove {b,c}devsw's
1717 */
1718 devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1719
1720 /*
1721 * Now remove device from autoconf database
1722 */
1723 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1724 if (error) {
1725 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1726 &cgd_cdevsw, &cgd_cmajor);
1727 aprint_error("%s: failed to detach %s cfattach, "
1728 "error %d\n", __func__, cgd_cd.cd_name, error);
1729 break;
1730 }
1731 error = config_cfdriver_detach(&cgd_cd);
1732 if (error) {
1733 (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1734 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1735 &cgd_cdevsw, &cgd_cmajor);
1736 aprint_error("%s: failed to detach %s cfdriver, "
1737 "error %d\n", __func__, cgd_cd.cd_name, error);
1738 break;
1739 }
1740
1741 cv_destroy(&cgd_spawning_cv);
1742 mutex_destroy(&cgd_spawning_mtx);
1743 #endif
1744 break;
1745
1746 case MODULE_CMD_STAT:
1747 error = ENOTTY;
1748 break;
1749 default:
1750 error = ENOTTY;
1751 break;
1752 }
1753
1754 return error;
1755 }
1756