cgd.c revision 1.116.10.5 1 1.116.10.5 martin /* $NetBSD: cgd.c,v 1.116.10.5 2021/12/30 12:38:22 martin Exp $ */
2 1.1 elric
3 1.1 elric /*-
4 1.1 elric * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 1.1 elric * All rights reserved.
6 1.1 elric *
7 1.1 elric * This code is derived from software contributed to The NetBSD Foundation
8 1.1 elric * by Roland C. Dowdeswell.
9 1.1 elric *
10 1.1 elric * Redistribution and use in source and binary forms, with or without
11 1.1 elric * modification, are permitted provided that the following conditions
12 1.1 elric * are met:
13 1.1 elric * 1. Redistributions of source code must retain the above copyright
14 1.1 elric * notice, this list of conditions and the following disclaimer.
15 1.1 elric * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 elric * notice, this list of conditions and the following disclaimer in the
17 1.1 elric * documentation and/or other materials provided with the distribution.
18 1.1 elric *
19 1.1 elric * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 elric * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 elric * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 elric * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 elric * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 elric * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 elric * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 elric * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 elric * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 elric * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 elric * POSSIBILITY OF SUCH DAMAGE.
30 1.1 elric */
31 1.1 elric
32 1.1 elric #include <sys/cdefs.h>
33 1.116.10.5 martin __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.116.10.5 2021/12/30 12:38:22 martin Exp $");
34 1.1 elric
35 1.1 elric #include <sys/types.h>
36 1.1 elric #include <sys/param.h>
37 1.1 elric #include <sys/systm.h>
38 1.1 elric #include <sys/proc.h>
39 1.1 elric #include <sys/errno.h>
40 1.1 elric #include <sys/buf.h>
41 1.21 yamt #include <sys/bufq.h>
42 1.116.10.3 martin #include <sys/kmem.h>
43 1.74 jruoho #include <sys/module.h>
44 1.1 elric #include <sys/pool.h>
45 1.1 elric #include <sys/ioctl.h>
46 1.1 elric #include <sys/device.h>
47 1.1 elric #include <sys/disk.h>
48 1.1 elric #include <sys/disklabel.h>
49 1.1 elric #include <sys/fcntl.h>
50 1.71 dholland #include <sys/namei.h> /* for pathbuf */
51 1.1 elric #include <sys/vnode.h>
52 1.1 elric #include <sys/conf.h>
53 1.62 christos #include <sys/syslog.h>
54 1.116.10.3 martin #include <sys/workqueue.h>
55 1.116.10.3 martin #include <sys/cpu.h>
56 1.1 elric
57 1.1 elric #include <dev/dkvar.h>
58 1.1 elric #include <dev/cgdvar.h>
59 1.1 elric
60 1.88 hannken #include <miscfs/specfs/specdev.h> /* for v_rdev */
61 1.88 hannken
62 1.102 christos #include "ioconf.h"
63 1.102 christos
64 1.112 alnsn struct selftest_params {
65 1.112 alnsn const char *alg;
66 1.112 alnsn int blocksize; /* number of bytes */
67 1.112 alnsn int secsize;
68 1.112 alnsn daddr_t blkno;
69 1.112 alnsn int keylen; /* number of bits */
70 1.112 alnsn int txtlen; /* number of bytes */
71 1.112 alnsn const uint8_t *key;
72 1.112 alnsn const uint8_t *ptxt;
73 1.112 alnsn const uint8_t *ctxt;
74 1.112 alnsn };
75 1.112 alnsn
76 1.1 elric /* Entry Point Functions */
77 1.1 elric
78 1.18 thorpej static dev_type_open(cgdopen);
79 1.18 thorpej static dev_type_close(cgdclose);
80 1.18 thorpej static dev_type_read(cgdread);
81 1.18 thorpej static dev_type_write(cgdwrite);
82 1.18 thorpej static dev_type_ioctl(cgdioctl);
83 1.18 thorpej static dev_type_strategy(cgdstrategy);
84 1.18 thorpej static dev_type_dump(cgddump);
85 1.18 thorpej static dev_type_size(cgdsize);
86 1.1 elric
87 1.1 elric const struct bdevsw cgd_bdevsw = {
88 1.84 dholland .d_open = cgdopen,
89 1.84 dholland .d_close = cgdclose,
90 1.84 dholland .d_strategy = cgdstrategy,
91 1.84 dholland .d_ioctl = cgdioctl,
92 1.84 dholland .d_dump = cgddump,
93 1.84 dholland .d_psize = cgdsize,
94 1.89 dholland .d_discard = nodiscard,
95 1.116.10.3 martin .d_flag = D_DISK | D_MPSAFE
96 1.1 elric };
97 1.1 elric
98 1.1 elric const struct cdevsw cgd_cdevsw = {
99 1.84 dholland .d_open = cgdopen,
100 1.84 dholland .d_close = cgdclose,
101 1.84 dholland .d_read = cgdread,
102 1.84 dholland .d_write = cgdwrite,
103 1.84 dholland .d_ioctl = cgdioctl,
104 1.84 dholland .d_stop = nostop,
105 1.84 dholland .d_tty = notty,
106 1.84 dholland .d_poll = nopoll,
107 1.84 dholland .d_mmap = nommap,
108 1.84 dholland .d_kqfilter = nokqfilter,
109 1.90 dholland .d_discard = nodiscard,
110 1.116.10.3 martin .d_flag = D_DISK | D_MPSAFE
111 1.1 elric };
112 1.1 elric
113 1.112 alnsn /*
114 1.112 alnsn * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
115 1.112 alnsn */
116 1.112 alnsn static const uint8_t selftest_aes_xts_256_ptxt[64] = {
117 1.112 alnsn 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
118 1.112 alnsn 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
119 1.112 alnsn 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
120 1.112 alnsn 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
121 1.112 alnsn 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
122 1.112 alnsn 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
123 1.112 alnsn 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
124 1.112 alnsn 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
125 1.112 alnsn };
126 1.112 alnsn
127 1.112 alnsn static const uint8_t selftest_aes_xts_256_ctxt[512] = {
128 1.112 alnsn 0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
129 1.112 alnsn 0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
130 1.112 alnsn 0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
131 1.112 alnsn 0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
132 1.112 alnsn 0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
133 1.112 alnsn 0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
134 1.112 alnsn 0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
135 1.112 alnsn 0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
136 1.112 alnsn };
137 1.112 alnsn
138 1.112 alnsn static const uint8_t selftest_aes_xts_256_key[33] = {
139 1.112 alnsn 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
140 1.112 alnsn 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
141 1.112 alnsn 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
142 1.112 alnsn 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
143 1.112 alnsn 0
144 1.112 alnsn };
145 1.112 alnsn
146 1.112 alnsn /*
147 1.112 alnsn * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
148 1.112 alnsn */
149 1.112 alnsn static const uint8_t selftest_aes_xts_512_ptxt[64] = {
150 1.112 alnsn 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
151 1.112 alnsn 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
152 1.112 alnsn 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
153 1.112 alnsn 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
154 1.112 alnsn 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
155 1.112 alnsn 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
156 1.112 alnsn 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
157 1.112 alnsn 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
158 1.112 alnsn };
159 1.112 alnsn
160 1.112 alnsn static const uint8_t selftest_aes_xts_512_ctxt[64] = {
161 1.112 alnsn 0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
162 1.112 alnsn 0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
163 1.112 alnsn 0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
164 1.112 alnsn 0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
165 1.112 alnsn 0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
166 1.112 alnsn 0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
167 1.112 alnsn 0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
168 1.112 alnsn 0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
169 1.112 alnsn };
170 1.112 alnsn
171 1.112 alnsn static const uint8_t selftest_aes_xts_512_key[65] = {
172 1.112 alnsn 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
173 1.112 alnsn 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
174 1.112 alnsn 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
175 1.112 alnsn 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
176 1.112 alnsn 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
177 1.112 alnsn 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
178 1.112 alnsn 0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
179 1.112 alnsn 0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
180 1.112 alnsn 0
181 1.112 alnsn };
182 1.112 alnsn
183 1.112 alnsn const struct selftest_params selftests[] = {
184 1.112 alnsn {
185 1.112 alnsn .alg = "aes-xts",
186 1.112 alnsn .blocksize = 16,
187 1.112 alnsn .secsize = 512,
188 1.112 alnsn .blkno = 1,
189 1.112 alnsn .keylen = 256,
190 1.112 alnsn .txtlen = sizeof(selftest_aes_xts_256_ptxt),
191 1.112 alnsn .key = selftest_aes_xts_256_key,
192 1.112 alnsn .ptxt = selftest_aes_xts_256_ptxt,
193 1.112 alnsn .ctxt = selftest_aes_xts_256_ctxt
194 1.112 alnsn },
195 1.112 alnsn {
196 1.112 alnsn .alg = "aes-xts",
197 1.112 alnsn .blocksize = 16,
198 1.112 alnsn .secsize = 512,
199 1.112 alnsn .blkno = 0xffff,
200 1.112 alnsn .keylen = 512,
201 1.112 alnsn .txtlen = sizeof(selftest_aes_xts_512_ptxt),
202 1.112 alnsn .key = selftest_aes_xts_512_key,
203 1.112 alnsn .ptxt = selftest_aes_xts_512_ptxt,
204 1.112 alnsn .ctxt = selftest_aes_xts_512_ctxt
205 1.112 alnsn }
206 1.112 alnsn };
207 1.112 alnsn
208 1.65 dyoung static int cgd_match(device_t, cfdata_t, void *);
209 1.65 dyoung static void cgd_attach(device_t, device_t, void *);
210 1.65 dyoung static int cgd_detach(device_t, int);
211 1.65 dyoung static struct cgd_softc *cgd_spawn(int);
212 1.116.10.3 martin static struct cgd_worker *cgd_create_one_worker(void);
213 1.116.10.3 martin static void cgd_destroy_one_worker(struct cgd_worker *);
214 1.116.10.3 martin static struct cgd_worker *cgd_create_worker(void);
215 1.116.10.3 martin static void cgd_destroy_worker(struct cgd_worker *);
216 1.65 dyoung static int cgd_destroy(device_t);
217 1.65 dyoung
218 1.1 elric /* Internal Functions */
219 1.1 elric
220 1.99 mlelstv static int cgd_diskstart(device_t, struct buf *);
221 1.116.10.3 martin static void cgd_diskstart2(struct cgd_softc *, struct cgd_xfer *);
222 1.1 elric static void cgdiodone(struct buf *);
223 1.116.10.3 martin static void cgd_iodone2(struct cgd_softc *, struct cgd_xfer *);
224 1.116.10.3 martin static void cgd_enqueue(struct cgd_softc *, struct cgd_xfer *);
225 1.116.10.3 martin static void cgd_process(struct work *, void *);
226 1.108 riastrad static int cgd_dumpblocks(device_t, void *, daddr_t, int);
227 1.1 elric
228 1.32 christos static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
229 1.65 dyoung static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
230 1.78 christos static int cgd_ioctl_get(dev_t, void *, struct lwp *);
231 1.27 drochner static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
232 1.32 christos struct lwp *);
233 1.44 christos static void cgd_cipher(struct cgd_softc *, void *, void *,
234 1.1 elric size_t, daddr_t, size_t, int);
235 1.1 elric
236 1.29 yamt static struct dkdriver cgddkdriver = {
237 1.98 mlelstv .d_minphys = minphys,
238 1.98 mlelstv .d_open = cgdopen,
239 1.98 mlelstv .d_close = cgdclose,
240 1.98 mlelstv .d_strategy = cgdstrategy,
241 1.98 mlelstv .d_iosize = NULL,
242 1.99 mlelstv .d_diskstart = cgd_diskstart,
243 1.108 riastrad .d_dumpblocks = cgd_dumpblocks,
244 1.98 mlelstv .d_lastclose = NULL
245 1.29 yamt };
246 1.29 yamt
247 1.65 dyoung CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
248 1.65 dyoung cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
249 1.65 dyoung
250 1.1 elric /* DIAGNOSTIC and DEBUG definitions */
251 1.1 elric
252 1.1 elric #if defined(CGDDEBUG) && !defined(DEBUG)
253 1.1 elric #define DEBUG
254 1.1 elric #endif
255 1.1 elric
256 1.1 elric #ifdef DEBUG
257 1.1 elric int cgddebug = 0;
258 1.1 elric
259 1.1 elric #define CGDB_FOLLOW 0x1
260 1.1 elric #define CGDB_IO 0x2
261 1.1 elric #define CGDB_CRYPTO 0x4
262 1.1 elric
263 1.1 elric #define IFDEBUG(x,y) if (cgddebug & (x)) y
264 1.1 elric #define DPRINTF(x,y) IFDEBUG(x, printf y)
265 1.1 elric #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
266 1.1 elric
267 1.26 drochner static void hexprint(const char *, void *, int);
268 1.1 elric
269 1.1 elric #else
270 1.1 elric #define IFDEBUG(x,y)
271 1.1 elric #define DPRINTF(x,y)
272 1.1 elric #define DPRINTF_FOLLOW(y)
273 1.1 elric #endif
274 1.1 elric
275 1.1 elric #ifdef DIAGNOSTIC
276 1.22 perry #define DIAGPANIC(x) panic x
277 1.1 elric #define DIAGCONDPANIC(x,y) if (x) panic y
278 1.1 elric #else
279 1.1 elric #define DIAGPANIC(x)
280 1.1 elric #define DIAGCONDPANIC(x,y)
281 1.1 elric #endif
282 1.1 elric
283 1.1 elric /* Global variables */
284 1.1 elric
285 1.116.10.3 martin static kmutex_t cgd_spawning_mtx;
286 1.116.10.3 martin static kcondvar_t cgd_spawning_cv;
287 1.116.10.3 martin static bool cgd_spawning;
288 1.116.10.3 martin static struct cgd_worker *cgd_worker;
289 1.116.10.3 martin static u_int cgd_refcnt; /* number of users of cgd_worker */
290 1.116.10.3 martin
291 1.1 elric /* Utility Functions */
292 1.1 elric
293 1.1 elric #define CGDUNIT(x) DISKUNIT(x)
294 1.1 elric
295 1.65 dyoung /* The code */
296 1.65 dyoung
297 1.116.10.3 martin static int
298 1.116.10.3 martin cgd_lock(bool intr)
299 1.1 elric {
300 1.116.10.3 martin int error = 0;
301 1.1 elric
302 1.116.10.3 martin mutex_enter(&cgd_spawning_mtx);
303 1.116.10.3 martin while (cgd_spawning) {
304 1.116.10.3 martin if (intr)
305 1.116.10.3 martin error = cv_wait_sig(&cgd_spawning_cv, &cgd_spawning_mtx);
306 1.116.10.3 martin else
307 1.116.10.3 martin cv_wait(&cgd_spawning_cv, &cgd_spawning_mtx);
308 1.116.10.3 martin }
309 1.116.10.3 martin if (error == 0)
310 1.116.10.3 martin cgd_spawning = true;
311 1.116.10.3 martin mutex_exit(&cgd_spawning_mtx);
312 1.116.10.3 martin return error;
313 1.116.10.3 martin }
314 1.65 dyoung
315 1.116.10.3 martin static void
316 1.116.10.3 martin cgd_unlock(void)
317 1.116.10.3 martin {
318 1.116.10.3 martin mutex_enter(&cgd_spawning_mtx);
319 1.116.10.3 martin cgd_spawning = false;
320 1.116.10.3 martin cv_broadcast(&cgd_spawning_cv);
321 1.116.10.3 martin mutex_exit(&cgd_spawning_mtx);
322 1.116.10.3 martin }
323 1.116.10.3 martin
324 1.116.10.3 martin static struct cgd_softc *
325 1.116.10.3 martin getcgd_softc(dev_t dev)
326 1.116.10.3 martin {
327 1.116.10.3 martin return device_lookup_private(&cgd_cd, CGDUNIT(dev));
328 1.1 elric }
329 1.1 elric
330 1.65 dyoung static int
331 1.65 dyoung cgd_match(device_t self, cfdata_t cfdata, void *aux)
332 1.65 dyoung {
333 1.65 dyoung
334 1.65 dyoung return 1;
335 1.65 dyoung }
336 1.1 elric
337 1.1 elric static void
338 1.65 dyoung cgd_attach(device_t parent, device_t self, void *aux)
339 1.1 elric {
340 1.65 dyoung struct cgd_softc *sc = device_private(self);
341 1.1 elric
342 1.85 skrll mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
343 1.116.10.3 martin cv_init(&sc->sc_cv, "cgdcv");
344 1.98 mlelstv dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
345 1.65 dyoung disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
346 1.70 joerg
347 1.98 mlelstv if (!pmf_device_register(self, NULL, NULL))
348 1.107 msaitoh aprint_error_dev(self,
349 1.107 msaitoh "unable to register power management hooks\n");
350 1.65 dyoung }
351 1.65 dyoung
352 1.65 dyoung
353 1.65 dyoung static int
354 1.65 dyoung cgd_detach(device_t self, int flags)
355 1.65 dyoung {
356 1.67 dyoung int ret;
357 1.65 dyoung struct cgd_softc *sc = device_private(self);
358 1.67 dyoung struct dk_softc *dksc = &sc->sc_dksc;
359 1.67 dyoung
360 1.116.10.5 martin if (DK_BUSY(dksc, 0))
361 1.67 dyoung return EBUSY;
362 1.65 dyoung
363 1.98 mlelstv if (DK_ATTACHED(dksc) &&
364 1.67 dyoung (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
365 1.67 dyoung return ret;
366 1.65 dyoung
367 1.67 dyoung disk_destroy(&dksc->sc_dkdev);
368 1.116.10.3 martin cv_destroy(&sc->sc_cv);
369 1.86 christos mutex_destroy(&sc->sc_lock);
370 1.65 dyoung
371 1.67 dyoung return 0;
372 1.1 elric }
373 1.1 elric
374 1.1 elric void
375 1.1 elric cgdattach(int num)
376 1.1 elric {
377 1.116.10.3 martin #ifndef _MODULE
378 1.65 dyoung int error;
379 1.65 dyoung
380 1.116.10.3 martin mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
381 1.116.10.3 martin cv_init(&cgd_spawning_cv, "cgspwn");
382 1.116.10.3 martin
383 1.65 dyoung error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
384 1.65 dyoung if (error != 0)
385 1.65 dyoung aprint_error("%s: unable to register cfattach\n",
386 1.65 dyoung cgd_cd.cd_name);
387 1.116.10.3 martin #endif
388 1.65 dyoung }
389 1.65 dyoung
390 1.65 dyoung static struct cgd_softc *
391 1.65 dyoung cgd_spawn(int unit)
392 1.65 dyoung {
393 1.65 dyoung cfdata_t cf;
394 1.116.10.3 martin struct cgd_worker *cw;
395 1.116.10.3 martin struct cgd_softc *sc;
396 1.65 dyoung
397 1.116.10.3 martin cf = kmem_alloc(sizeof(*cf), KM_SLEEP);
398 1.65 dyoung cf->cf_name = cgd_cd.cd_name;
399 1.65 dyoung cf->cf_atname = cgd_cd.cd_name;
400 1.65 dyoung cf->cf_unit = unit;
401 1.65 dyoung cf->cf_fstate = FSTATE_STAR;
402 1.65 dyoung
403 1.116.10.3 martin cw = cgd_create_one_worker();
404 1.116.10.3 martin if (cw == NULL) {
405 1.116.10.3 martin kmem_free(cf, sizeof(*cf));
406 1.116.10.3 martin return NULL;
407 1.116.10.3 martin }
408 1.116.10.3 martin
409 1.116.10.3 martin sc = device_private(config_attach_pseudo(cf));
410 1.116.10.3 martin if (sc == NULL) {
411 1.116.10.3 martin cgd_destroy_one_worker(cw);
412 1.116.10.3 martin return NULL;
413 1.116.10.3 martin }
414 1.116.10.3 martin
415 1.116.10.3 martin sc->sc_worker = cw;
416 1.116.10.3 martin
417 1.116.10.3 martin return sc;
418 1.65 dyoung }
419 1.65 dyoung
420 1.65 dyoung static int
421 1.65 dyoung cgd_destroy(device_t dev)
422 1.65 dyoung {
423 1.116.10.3 martin struct cgd_softc *sc = device_private(dev);
424 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
425 1.65 dyoung cfdata_t cf;
426 1.116.10.3 martin int error;
427 1.1 elric
428 1.65 dyoung cf = device_cfdata(dev);
429 1.65 dyoung error = config_detach(dev, DETACH_QUIET);
430 1.65 dyoung if (error)
431 1.65 dyoung return error;
432 1.116.10.3 martin
433 1.116.10.3 martin cgd_destroy_one_worker(cw);
434 1.116.10.3 martin
435 1.116.10.3 martin kmem_free(cf, sizeof(*cf));
436 1.65 dyoung return 0;
437 1.1 elric }
438 1.1 elric
439 1.116.10.3 martin static void
440 1.116.10.3 martin cgd_busy(struct cgd_softc *sc)
441 1.116.10.3 martin {
442 1.116.10.3 martin
443 1.116.10.3 martin mutex_enter(&sc->sc_lock);
444 1.116.10.3 martin while (sc->sc_busy)
445 1.116.10.3 martin cv_wait(&sc->sc_cv, &sc->sc_lock);
446 1.116.10.3 martin sc->sc_busy = true;
447 1.116.10.3 martin mutex_exit(&sc->sc_lock);
448 1.116.10.3 martin }
449 1.116.10.3 martin
450 1.116.10.3 martin static void
451 1.116.10.3 martin cgd_unbusy(struct cgd_softc *sc)
452 1.116.10.3 martin {
453 1.116.10.3 martin
454 1.116.10.3 martin mutex_enter(&sc->sc_lock);
455 1.116.10.3 martin sc->sc_busy = false;
456 1.116.10.3 martin cv_broadcast(&sc->sc_cv);
457 1.116.10.3 martin mutex_exit(&sc->sc_lock);
458 1.116.10.3 martin }
459 1.116.10.3 martin
460 1.116.10.3 martin static struct cgd_worker *
461 1.116.10.3 martin cgd_create_one_worker(void)
462 1.116.10.3 martin {
463 1.116.10.3 martin KASSERT(cgd_spawning);
464 1.116.10.3 martin
465 1.116.10.3 martin if (cgd_refcnt++ == 0) {
466 1.116.10.3 martin KASSERT(cgd_worker == NULL);
467 1.116.10.3 martin cgd_worker = cgd_create_worker();
468 1.116.10.3 martin }
469 1.116.10.3 martin
470 1.116.10.3 martin KASSERT(cgd_worker != NULL);
471 1.116.10.3 martin return cgd_worker;
472 1.116.10.3 martin }
473 1.116.10.3 martin
474 1.116.10.3 martin static void
475 1.116.10.3 martin cgd_destroy_one_worker(struct cgd_worker *cw)
476 1.116.10.3 martin {
477 1.116.10.3 martin KASSERT(cgd_spawning);
478 1.116.10.3 martin KASSERT(cw == cgd_worker);
479 1.116.10.3 martin
480 1.116.10.3 martin if (--cgd_refcnt == 0) {
481 1.116.10.3 martin cgd_destroy_worker(cgd_worker);
482 1.116.10.3 martin cgd_worker = NULL;
483 1.116.10.3 martin }
484 1.116.10.3 martin }
485 1.116.10.3 martin
486 1.116.10.3 martin static struct cgd_worker *
487 1.116.10.3 martin cgd_create_worker(void)
488 1.116.10.3 martin {
489 1.116.10.3 martin struct cgd_worker *cw;
490 1.116.10.3 martin struct workqueue *wq;
491 1.116.10.3 martin struct pool *cp;
492 1.116.10.3 martin int error;
493 1.116.10.3 martin
494 1.116.10.3 martin cw = kmem_alloc(sizeof(struct cgd_worker), KM_SLEEP);
495 1.116.10.3 martin cp = kmem_alloc(sizeof(struct pool), KM_SLEEP);
496 1.116.10.3 martin
497 1.116.10.3 martin error = workqueue_create(&wq, "cgd", cgd_process, NULL,
498 1.116.10.3 martin PRI_BIO, IPL_BIO, WQ_MPSAFE | WQ_PERCPU);
499 1.116.10.3 martin if (error) {
500 1.116.10.3 martin kmem_free(cp, sizeof(struct pool));
501 1.116.10.3 martin kmem_free(cw, sizeof(struct cgd_worker));
502 1.116.10.3 martin return NULL;
503 1.116.10.3 martin }
504 1.116.10.3 martin
505 1.116.10.3 martin cw->cw_cpool = cp;
506 1.116.10.3 martin cw->cw_wq = wq;
507 1.116.10.3 martin pool_init(cw->cw_cpool, sizeof(struct cgd_xfer), 0,
508 1.116.10.3 martin 0, 0, "cgdcpl", NULL, IPL_BIO);
509 1.116.10.3 martin
510 1.116.10.3 martin mutex_init(&cw->cw_lock, MUTEX_DEFAULT, IPL_BIO);
511 1.116.10.3 martin
512 1.116.10.3 martin return cw;
513 1.116.10.3 martin }
514 1.116.10.3 martin
515 1.116.10.3 martin static void
516 1.116.10.3 martin cgd_destroy_worker(struct cgd_worker *cw)
517 1.116.10.3 martin {
518 1.116.10.4 martin
519 1.116.10.4 martin /*
520 1.116.10.4 martin * Wait for all worker threads to complete before destroying
521 1.116.10.4 martin * the rest of the cgd_worker.
522 1.116.10.4 martin */
523 1.116.10.4 martin if (cw->cw_wq)
524 1.116.10.4 martin workqueue_destroy(cw->cw_wq);
525 1.116.10.4 martin
526 1.116.10.3 martin mutex_destroy(&cw->cw_lock);
527 1.116.10.3 martin
528 1.116.10.3 martin if (cw->cw_cpool) {
529 1.116.10.3 martin pool_destroy(cw->cw_cpool);
530 1.116.10.3 martin kmem_free(cw->cw_cpool, sizeof(struct pool));
531 1.116.10.3 martin }
532 1.116.10.3 martin
533 1.116.10.3 martin kmem_free(cw, sizeof(struct cgd_worker));
534 1.116.10.3 martin }
535 1.116.10.3 martin
536 1.18 thorpej static int
537 1.32 christos cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
538 1.1 elric {
539 1.116.10.3 martin struct cgd_softc *sc;
540 1.116.10.3 martin int error;
541 1.1 elric
542 1.56 cegger DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
543 1.116.10.3 martin
544 1.116.10.3 martin error = cgd_lock(true);
545 1.116.10.3 martin if (error)
546 1.116.10.3 martin return error;
547 1.116.10.3 martin sc = getcgd_softc(dev);
548 1.116.10.3 martin if (sc == NULL)
549 1.116.10.3 martin sc = cgd_spawn(CGDUNIT(dev));
550 1.116.10.3 martin cgd_unlock();
551 1.116.10.3 martin if (sc == NULL)
552 1.116.10.3 martin return ENXIO;
553 1.116.10.3 martin
554 1.116.10.3 martin return dk_open(&sc->sc_dksc, dev, flags, fmt, l);
555 1.1 elric }
556 1.1 elric
557 1.18 thorpej static int
558 1.32 christos cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
559 1.1 elric {
560 1.116.10.3 martin struct cgd_softc *sc;
561 1.65 dyoung struct dk_softc *dksc;
562 1.116.10.3 martin int error;
563 1.1 elric
564 1.56 cegger DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
565 1.116.10.3 martin
566 1.116.10.3 martin error = cgd_lock(false);
567 1.116.10.3 martin if (error)
568 1.65 dyoung return error;
569 1.116.10.3 martin sc = getcgd_softc(dev);
570 1.116.10.3 martin if (sc == NULL) {
571 1.116.10.3 martin error = ENXIO;
572 1.116.10.3 martin goto done;
573 1.116.10.3 martin }
574 1.116.10.3 martin
575 1.116.10.3 martin dksc = &sc->sc_dksc;
576 1.116.10.3 martin if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0)
577 1.116.10.3 martin goto done;
578 1.65 dyoung
579 1.98 mlelstv if (!DK_ATTACHED(dksc)) {
580 1.116.10.3 martin if ((error = cgd_destroy(sc->sc_dksc.sc_dev)) != 0) {
581 1.116.10.3 martin device_printf(dksc->sc_dev,
582 1.65 dyoung "unable to detach instance\n");
583 1.116.10.3 martin goto done;
584 1.65 dyoung }
585 1.65 dyoung }
586 1.116.10.3 martin
587 1.116.10.3 martin done:
588 1.116.10.3 martin cgd_unlock();
589 1.116.10.3 martin
590 1.116.10.3 martin return error;
591 1.1 elric }
592 1.1 elric
593 1.18 thorpej static void
594 1.1 elric cgdstrategy(struct buf *bp)
595 1.1 elric {
596 1.116.10.3 martin struct cgd_softc *sc = getcgd_softc(bp->b_dev);
597 1.1 elric
598 1.1 elric DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
599 1.1 elric (long)bp->b_bcount));
600 1.72 riastrad
601 1.72 riastrad /*
602 1.111 mlelstv * Reject unaligned writes.
603 1.72 riastrad */
604 1.111 mlelstv if (((uintptr_t)bp->b_data & 3) != 0) {
605 1.72 riastrad bp->b_error = EINVAL;
606 1.111 mlelstv goto bail;
607 1.72 riastrad }
608 1.72 riastrad
609 1.116.10.3 martin dk_strategy(&sc->sc_dksc, bp);
610 1.1 elric return;
611 1.111 mlelstv
612 1.111 mlelstv bail:
613 1.111 mlelstv bp->b_resid = bp->b_bcount;
614 1.111 mlelstv biodone(bp);
615 1.111 mlelstv return;
616 1.1 elric }
617 1.1 elric
618 1.18 thorpej static int
619 1.1 elric cgdsize(dev_t dev)
620 1.1 elric {
621 1.116.10.3 martin struct cgd_softc *sc = getcgd_softc(dev);
622 1.1 elric
623 1.56 cegger DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
624 1.116.10.3 martin if (!sc)
625 1.1 elric return -1;
626 1.116.10.3 martin return dk_size(&sc->sc_dksc, dev);
627 1.1 elric }
628 1.1 elric
629 1.16 elric /*
630 1.16 elric * cgd_{get,put}data are functions that deal with getting a buffer
631 1.116.10.3 martin * for the new encrypted data.
632 1.116.10.3 martin * We can no longer have a buffer per device, we need a buffer per
633 1.116.10.3 martin * work queue...
634 1.16 elric */
635 1.16 elric
636 1.16 elric static void *
637 1.116.10.3 martin cgd_getdata(struct cgd_softc *sc, unsigned long size)
638 1.16 elric {
639 1.116.10.3 martin void *data = NULL;
640 1.16 elric
641 1.116.10.3 martin mutex_enter(&sc->sc_lock);
642 1.116.10.3 martin if (!sc->sc_data_used) {
643 1.116.10.3 martin sc->sc_data_used = true;
644 1.116.10.3 martin data = sc->sc_data;
645 1.16 elric }
646 1.116.10.3 martin mutex_exit(&sc->sc_lock);
647 1.16 elric
648 1.16 elric if (data)
649 1.16 elric return data;
650 1.16 elric
651 1.116.10.3 martin return kmem_intr_alloc(size, KM_NOSLEEP);
652 1.16 elric }
653 1.16 elric
654 1.1 elric static void
655 1.116.10.3 martin cgd_putdata(struct cgd_softc *sc, void *data, unsigned long size)
656 1.16 elric {
657 1.16 elric
658 1.116.10.3 martin if (data == sc->sc_data) {
659 1.116.10.3 martin mutex_enter(&sc->sc_lock);
660 1.116.10.3 martin sc->sc_data_used = false;
661 1.116.10.3 martin mutex_exit(&sc->sc_lock);
662 1.116.10.3 martin } else
663 1.116.10.3 martin kmem_intr_free(data, size);
664 1.16 elric }
665 1.16 elric
666 1.99 mlelstv static int
667 1.99 mlelstv cgd_diskstart(device_t dev, struct buf *bp)
668 1.1 elric {
669 1.116.10.3 martin struct cgd_softc *sc = device_private(dev);
670 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
671 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
672 1.105 mlelstv struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
673 1.116.10.3 martin struct cgd_xfer *cx;
674 1.99 mlelstv struct buf *nbp;
675 1.44 christos void * newaddr;
676 1.1 elric daddr_t bn;
677 1.1 elric
678 1.99 mlelstv DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
679 1.1 elric
680 1.99 mlelstv bn = bp->b_rawblkno;
681 1.22 perry
682 1.99 mlelstv /*
683 1.99 mlelstv * We attempt to allocate all of our resources up front, so that
684 1.99 mlelstv * we can fail quickly if they are unavailable.
685 1.99 mlelstv */
686 1.116.10.3 martin nbp = getiobuf(sc->sc_tvn, false);
687 1.99 mlelstv if (nbp == NULL)
688 1.99 mlelstv return EAGAIN;
689 1.16 elric
690 1.116.10.3 martin cx = pool_get(cw->cw_cpool, PR_NOWAIT);
691 1.116.10.3 martin if (cx == NULL) {
692 1.116.10.3 martin putiobuf(nbp);
693 1.116.10.3 martin return EAGAIN;
694 1.116.10.3 martin }
695 1.116.10.3 martin
696 1.116.10.3 martin cx->cx_sc = sc;
697 1.116.10.3 martin cx->cx_obp = bp;
698 1.116.10.3 martin cx->cx_nbp = nbp;
699 1.116.10.3 martin cx->cx_srcv = cx->cx_dstv = bp->b_data;
700 1.116.10.3 martin cx->cx_blkno = bn;
701 1.116.10.3 martin cx->cx_secsize = dg->dg_secsize;
702 1.116.10.3 martin
703 1.99 mlelstv /*
704 1.99 mlelstv * If we are writing, then we need to encrypt the outgoing
705 1.99 mlelstv * block into a new block of memory.
706 1.99 mlelstv */
707 1.99 mlelstv if ((bp->b_flags & B_READ) == 0) {
708 1.116.10.3 martin newaddr = cgd_getdata(sc, bp->b_bcount);
709 1.99 mlelstv if (!newaddr) {
710 1.116.10.3 martin pool_put(cw->cw_cpool, cx);
711 1.99 mlelstv putiobuf(nbp);
712 1.99 mlelstv return EAGAIN;
713 1.16 elric }
714 1.116.10.3 martin
715 1.116.10.3 martin cx->cx_dstv = newaddr;
716 1.116.10.3 martin cx->cx_len = bp->b_bcount;
717 1.116.10.3 martin cx->cx_dir = CGD_CIPHER_ENCRYPT;
718 1.116.10.3 martin
719 1.116.10.3 martin cgd_enqueue(sc, cx);
720 1.116.10.3 martin return 0;
721 1.99 mlelstv }
722 1.1 elric
723 1.116.10.3 martin cgd_diskstart2(sc, cx);
724 1.116.10.3 martin return 0;
725 1.116.10.3 martin }
726 1.116.10.3 martin
727 1.116.10.3 martin static void
728 1.116.10.3 martin cgd_diskstart2(struct cgd_softc *sc, struct cgd_xfer *cx)
729 1.116.10.3 martin {
730 1.116.10.3 martin struct vnode *vp;
731 1.116.10.3 martin struct buf *bp;
732 1.116.10.3 martin struct buf *nbp;
733 1.116.10.3 martin
734 1.116.10.3 martin bp = cx->cx_obp;
735 1.116.10.3 martin nbp = cx->cx_nbp;
736 1.116.10.3 martin
737 1.116.10.3 martin nbp->b_data = cx->cx_dstv;
738 1.99 mlelstv nbp->b_flags = bp->b_flags;
739 1.99 mlelstv nbp->b_oflags = bp->b_oflags;
740 1.99 mlelstv nbp->b_cflags = bp->b_cflags;
741 1.99 mlelstv nbp->b_iodone = cgdiodone;
742 1.99 mlelstv nbp->b_proc = bp->b_proc;
743 1.116.10.3 martin nbp->b_blkno = btodb(cx->cx_blkno * cx->cx_secsize);
744 1.99 mlelstv nbp->b_bcount = bp->b_bcount;
745 1.116.10.3 martin nbp->b_private = cx;
746 1.99 mlelstv
747 1.99 mlelstv BIO_COPYPRIO(nbp, bp);
748 1.99 mlelstv
749 1.99 mlelstv if ((nbp->b_flags & B_READ) == 0) {
750 1.99 mlelstv vp = nbp->b_vp;
751 1.99 mlelstv mutex_enter(vp->v_interlock);
752 1.99 mlelstv vp->v_numoutput++;
753 1.99 mlelstv mutex_exit(vp->v_interlock);
754 1.17 dbj }
755 1.116.10.3 martin VOP_STRATEGY(sc->sc_tvn, nbp);
756 1.1 elric }
757 1.1 elric
758 1.18 thorpej static void
759 1.17 dbj cgdiodone(struct buf *nbp)
760 1.1 elric {
761 1.116.10.3 martin struct cgd_xfer *cx = nbp->b_private;
762 1.116.10.3 martin struct buf *obp = cx->cx_obp;
763 1.116.10.3 martin struct cgd_softc *sc = getcgd_softc(obp->b_dev);
764 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
765 1.105 mlelstv struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
766 1.105 mlelstv daddr_t bn;
767 1.22 perry
768 1.116.10.3 martin KDASSERT(sc);
769 1.1 elric
770 1.17 dbj DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
771 1.20 yamt DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
772 1.1 elric obp, obp->b_bcount, obp->b_resid));
773 1.107 msaitoh DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
774 1.107 msaitoh " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
775 1.107 msaitoh nbp->b_bcount));
776 1.46 ad if (nbp->b_error != 0) {
777 1.46 ad obp->b_error = nbp->b_error;
778 1.62 christos DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
779 1.62 christos obp->b_error));
780 1.1 elric }
781 1.1 elric
782 1.16 elric /* Perform the decryption if we are reading.
783 1.1 elric *
784 1.1 elric * Note: use the blocknumber from nbp, since it is what
785 1.1 elric * we used to encrypt the blocks.
786 1.1 elric */
787 1.1 elric
788 1.105 mlelstv if (nbp->b_flags & B_READ) {
789 1.105 mlelstv bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
790 1.116.10.3 martin
791 1.116.10.3 martin cx->cx_obp = obp;
792 1.116.10.3 martin cx->cx_nbp = nbp;
793 1.116.10.3 martin cx->cx_dstv = obp->b_data;
794 1.116.10.3 martin cx->cx_srcv = obp->b_data;
795 1.116.10.3 martin cx->cx_len = obp->b_bcount;
796 1.116.10.3 martin cx->cx_blkno = bn;
797 1.116.10.3 martin cx->cx_secsize = dg->dg_secsize;
798 1.116.10.3 martin cx->cx_dir = CGD_CIPHER_DECRYPT;
799 1.116.10.3 martin
800 1.116.10.3 martin cgd_enqueue(sc, cx);
801 1.116.10.3 martin return;
802 1.105 mlelstv }
803 1.1 elric
804 1.116.10.3 martin cgd_iodone2(sc, cx);
805 1.116.10.3 martin }
806 1.116.10.3 martin
807 1.116.10.3 martin static void
808 1.116.10.3 martin cgd_iodone2(struct cgd_softc *sc, struct cgd_xfer *cx)
809 1.116.10.3 martin {
810 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
811 1.116.10.3 martin struct buf *obp = cx->cx_obp;
812 1.116.10.3 martin struct buf *nbp = cx->cx_nbp;
813 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
814 1.116.10.3 martin
815 1.116.10.3 martin pool_put(cw->cw_cpool, cx);
816 1.116.10.3 martin
817 1.16 elric /* If we allocated memory, free it now... */
818 1.1 elric if (nbp->b_data != obp->b_data)
819 1.116.10.3 martin cgd_putdata(sc, nbp->b_data, nbp->b_bcount);
820 1.1 elric
821 1.33 yamt putiobuf(nbp);
822 1.1 elric
823 1.100 mlelstv /* Request is complete for whatever reason */
824 1.100 mlelstv obp->b_resid = 0;
825 1.100 mlelstv if (obp->b_error != 0)
826 1.100 mlelstv obp->b_resid = obp->b_bcount;
827 1.100 mlelstv
828 1.99 mlelstv dk_done(dksc, obp);
829 1.101 mlelstv dk_start(dksc, NULL);
830 1.1 elric }
831 1.1 elric
832 1.108 riastrad static int
833 1.108 riastrad cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
834 1.108 riastrad {
835 1.108 riastrad struct cgd_softc *sc = device_private(dev);
836 1.108 riastrad struct dk_softc *dksc = &sc->sc_dksc;
837 1.108 riastrad struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
838 1.108 riastrad size_t nbytes, blksize;
839 1.108 riastrad void *buf;
840 1.108 riastrad int error;
841 1.108 riastrad
842 1.108 riastrad /*
843 1.108 riastrad * dk_dump gives us units of disklabel sectors. Everything
844 1.108 riastrad * else in cgd uses units of diskgeom sectors. These had
845 1.108 riastrad * better agree; otherwise we need to figure out how to convert
846 1.108 riastrad * between them.
847 1.108 riastrad */
848 1.108 riastrad KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
849 1.108 riastrad "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
850 1.108 riastrad dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
851 1.108 riastrad blksize = dg->dg_secsize;
852 1.108 riastrad
853 1.108 riastrad /*
854 1.108 riastrad * Compute the number of bytes in this request, which dk_dump
855 1.108 riastrad * has `helpfully' converted to a number of blocks for us.
856 1.108 riastrad */
857 1.108 riastrad nbytes = nblk*blksize;
858 1.108 riastrad
859 1.108 riastrad /* Try to acquire a buffer to store the ciphertext. */
860 1.116.10.3 martin buf = cgd_getdata(sc, nbytes);
861 1.108 riastrad if (buf == NULL)
862 1.108 riastrad /* Out of memory: give up. */
863 1.108 riastrad return ENOMEM;
864 1.108 riastrad
865 1.108 riastrad /* Encrypt the caller's data into the temporary buffer. */
866 1.108 riastrad cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
867 1.108 riastrad
868 1.108 riastrad /* Pass it on to the underlying disk device. */
869 1.108 riastrad error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
870 1.108 riastrad
871 1.108 riastrad /* Release the buffer. */
872 1.116.10.3 martin cgd_putdata(sc, buf, nbytes);
873 1.108 riastrad
874 1.108 riastrad /* Return any error from the underlying disk device. */
875 1.108 riastrad return error;
876 1.108 riastrad }
877 1.108 riastrad
878 1.1 elric /* XXX: we should probably put these into dksubr.c, mostly */
879 1.18 thorpej static int
880 1.40 christos cgdread(dev_t dev, struct uio *uio, int flags)
881 1.1 elric {
882 1.116.10.3 martin struct cgd_softc *sc;
883 1.1 elric struct dk_softc *dksc;
884 1.1 elric
885 1.56 cegger DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
886 1.56 cegger (unsigned long long)dev, uio, flags));
887 1.116.10.3 martin sc = getcgd_softc(dev);
888 1.116.10.3 martin if (sc == NULL)
889 1.116.10.3 martin return ENXIO;
890 1.116.10.3 martin dksc = &sc->sc_dksc;
891 1.98 mlelstv if (!DK_ATTACHED(dksc))
892 1.1 elric return ENXIO;
893 1.1 elric return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
894 1.1 elric }
895 1.1 elric
896 1.1 elric /* XXX: we should probably put these into dksubr.c, mostly */
897 1.18 thorpej static int
898 1.40 christos cgdwrite(dev_t dev, struct uio *uio, int flags)
899 1.1 elric {
900 1.116.10.3 martin struct cgd_softc *sc;
901 1.1 elric struct dk_softc *dksc;
902 1.1 elric
903 1.56 cegger DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
904 1.116.10.3 martin sc = getcgd_softc(dev);
905 1.116.10.3 martin if (sc == NULL)
906 1.116.10.3 martin return ENXIO;
907 1.116.10.3 martin dksc = &sc->sc_dksc;
908 1.98 mlelstv if (!DK_ATTACHED(dksc))
909 1.1 elric return ENXIO;
910 1.1 elric return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
911 1.1 elric }
912 1.1 elric
913 1.18 thorpej static int
914 1.44 christos cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
915 1.1 elric {
916 1.116.10.3 martin struct cgd_softc *sc;
917 1.1 elric struct dk_softc *dksc;
918 1.1 elric int part = DISKPART(dev);
919 1.1 elric int pmask = 1 << part;
920 1.116.10.3 martin int error;
921 1.1 elric
922 1.56 cegger DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
923 1.32 christos dev, cmd, data, flag, l));
924 1.78 christos
925 1.1 elric switch (cmd) {
926 1.93 christos case CGDIOCGET:
927 1.93 christos return cgd_ioctl_get(dev, data, l);
928 1.1 elric case CGDIOCSET:
929 1.1 elric case CGDIOCCLR:
930 1.1 elric if ((flag & FWRITE) == 0)
931 1.1 elric return EBADF;
932 1.78 christos /* FALLTHROUGH */
933 1.78 christos default:
934 1.116.10.3 martin sc = getcgd_softc(dev);
935 1.116.10.3 martin if (sc == NULL)
936 1.116.10.3 martin return ENXIO;
937 1.116.10.3 martin dksc = &sc->sc_dksc;
938 1.78 christos break;
939 1.1 elric }
940 1.1 elric
941 1.1 elric switch (cmd) {
942 1.1 elric case CGDIOCSET:
943 1.116.10.3 martin cgd_busy(sc);
944 1.98 mlelstv if (DK_ATTACHED(dksc))
945 1.116.10.3 martin error = EBUSY;
946 1.116.10.3 martin else
947 1.116.10.3 martin error = cgd_ioctl_set(sc, data, l);
948 1.116.10.3 martin cgd_unbusy(sc);
949 1.116.10.3 martin break;
950 1.1 elric case CGDIOCCLR:
951 1.116.10.3 martin cgd_busy(sc);
952 1.116.10.3 martin if (DK_BUSY(&sc->sc_dksc, pmask))
953 1.116.10.3 martin error = EBUSY;
954 1.116.10.3 martin else
955 1.116.10.3 martin error = cgd_ioctl_clr(sc, l);
956 1.116.10.3 martin cgd_unbusy(sc);
957 1.116.10.3 martin break;
958 1.114 jdolecek case DIOCGCACHE:
959 1.57 apb case DIOCCACHESYNC:
960 1.116.10.3 martin cgd_busy(sc);
961 1.116.10.3 martin if (!DK_ATTACHED(dksc)) {
962 1.116.10.3 martin cgd_unbusy(sc);
963 1.116.10.3 martin error = ENOENT;
964 1.116.10.3 martin break;
965 1.116.10.3 martin }
966 1.57 apb /*
967 1.57 apb * We pass this call down to the underlying disk.
968 1.57 apb */
969 1.116.10.3 martin error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
970 1.116.10.3 martin cgd_unbusy(sc);
971 1.116.10.3 martin break;
972 1.116.10.1 martin case DIOCGSECTORALIGN: {
973 1.116.10.1 martin struct disk_sectoralign *dsa = data;
974 1.116.10.1 martin
975 1.116.10.3 martin cgd_busy(sc);
976 1.116.10.3 martin if (!DK_ATTACHED(dksc)) {
977 1.116.10.3 martin cgd_unbusy(sc);
978 1.116.10.3 martin error = ENOENT;
979 1.116.10.3 martin break;
980 1.116.10.3 martin }
981 1.116.10.1 martin
982 1.116.10.1 martin /* Get the underlying disk's sector alignment. */
983 1.116.10.3 martin error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
984 1.116.10.3 martin if (error) {
985 1.116.10.3 martin cgd_unbusy(sc);
986 1.116.10.3 martin break;
987 1.116.10.3 martin }
988 1.116.10.1 martin
989 1.116.10.1 martin /* Adjust for the disklabel partition if necessary. */
990 1.116.10.1 martin if (part != RAW_PART) {
991 1.116.10.1 martin struct disklabel *lp = dksc->sc_dkdev.dk_label;
992 1.116.10.1 martin daddr_t offset = lp->d_partitions[part].p_offset;
993 1.116.10.1 martin uint32_t r = offset % dsa->dsa_alignment;
994 1.116.10.1 martin
995 1.116.10.1 martin if (r < dsa->dsa_firstaligned)
996 1.116.10.1 martin dsa->dsa_firstaligned = dsa->dsa_firstaligned
997 1.116.10.1 martin - r;
998 1.116.10.1 martin else
999 1.116.10.1 martin dsa->dsa_firstaligned = (dsa->dsa_firstaligned
1000 1.116.10.1 martin + dsa->dsa_alignment) - r;
1001 1.116.10.1 martin }
1002 1.116.10.3 martin cgd_unbusy(sc);
1003 1.116.10.3 martin break;
1004 1.116.10.1 martin }
1005 1.103 christos case DIOCGSTRATEGY:
1006 1.103 christos case DIOCSSTRATEGY:
1007 1.116.10.3 martin if (!DK_ATTACHED(dksc)) {
1008 1.116.10.3 martin error = ENOENT;
1009 1.116.10.3 martin break;
1010 1.116.10.3 martin }
1011 1.103 christos /*FALLTHROUGH*/
1012 1.1 elric default:
1013 1.116.10.3 martin error = dk_ioctl(dksc, dev, cmd, data, flag, l);
1014 1.116.10.3 martin break;
1015 1.93 christos case CGDIOCGET:
1016 1.93 christos KASSERT(0);
1017 1.116.10.3 martin error = EINVAL;
1018 1.1 elric }
1019 1.116.10.3 martin
1020 1.116.10.3 martin return error;
1021 1.1 elric }
1022 1.1 elric
1023 1.18 thorpej static int
1024 1.44 christos cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1025 1.1 elric {
1026 1.116.10.3 martin struct cgd_softc *sc;
1027 1.1 elric
1028 1.56 cegger DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
1029 1.56 cegger dev, blkno, va, (unsigned long)size));
1030 1.116.10.3 martin sc = getcgd_softc(dev);
1031 1.116.10.3 martin if (sc == NULL)
1032 1.116.10.3 martin return ENXIO;
1033 1.116.10.3 martin return dk_dump(&sc->sc_dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
1034 1.1 elric }
1035 1.1 elric
1036 1.1 elric /*
1037 1.1 elric * XXXrcd:
1038 1.1 elric * for now we hardcode the maximum key length.
1039 1.1 elric */
1040 1.1 elric #define MAX_KEYSIZE 1024
1041 1.1 elric
1042 1.53 christos static const struct {
1043 1.53 christos const char *n;
1044 1.53 christos int v;
1045 1.53 christos int d;
1046 1.53 christos } encblkno[] = {
1047 1.53 christos { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1048 1.53 christos { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1049 1.53 christos { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
1050 1.53 christos };
1051 1.53 christos
1052 1.1 elric /* ARGSUSED */
1053 1.1 elric static int
1054 1.116.10.3 martin cgd_ioctl_set(struct cgd_softc *sc, void *data, struct lwp *l)
1055 1.1 elric {
1056 1.1 elric struct cgd_ioctl *ci = data;
1057 1.1 elric struct vnode *vp;
1058 1.1 elric int ret;
1059 1.53 christos size_t i;
1060 1.43 cbiere size_t keybytes; /* key length in bytes */
1061 1.27 drochner const char *cp;
1062 1.71 dholland struct pathbuf *pb;
1063 1.36 christos char *inbuf;
1064 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
1065 1.1 elric
1066 1.1 elric cp = ci->ci_disk;
1067 1.71 dholland
1068 1.71 dholland ret = pathbuf_copyin(ci->ci_disk, &pb);
1069 1.71 dholland if (ret != 0) {
1070 1.71 dholland return ret;
1071 1.71 dholland }
1072 1.71 dholland ret = dk_lookup(pb, l, &vp);
1073 1.71 dholland pathbuf_destroy(pb);
1074 1.71 dholland if (ret != 0) {
1075 1.1 elric return ret;
1076 1.71 dholland }
1077 1.1 elric
1078 1.116.10.3 martin inbuf = kmem_alloc(MAX_KEYSIZE, KM_SLEEP);
1079 1.36 christos
1080 1.116.10.3 martin if ((ret = cgdinit(sc, cp, vp, l)) != 0)
1081 1.1 elric goto bail;
1082 1.1 elric
1083 1.36 christos (void)memset(inbuf, 0, MAX_KEYSIZE);
1084 1.1 elric ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
1085 1.1 elric if (ret)
1086 1.1 elric goto bail;
1087 1.116.10.3 martin sc->sc_cfuncs = cryptfuncs_find(inbuf);
1088 1.116.10.3 martin if (!sc->sc_cfuncs) {
1089 1.1 elric ret = EINVAL;
1090 1.1 elric goto bail;
1091 1.1 elric }
1092 1.1 elric
1093 1.43 cbiere (void)memset(inbuf, 0, MAX_KEYSIZE);
1094 1.36 christos ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
1095 1.1 elric if (ret)
1096 1.1 elric goto bail;
1097 1.53 christos
1098 1.53 christos for (i = 0; i < __arraycount(encblkno); i++)
1099 1.53 christos if (strcmp(encblkno[i].n, inbuf) == 0)
1100 1.53 christos break;
1101 1.53 christos
1102 1.53 christos if (i == __arraycount(encblkno)) {
1103 1.1 elric ret = EINVAL;
1104 1.1 elric goto bail;
1105 1.1 elric }
1106 1.1 elric
1107 1.15 dan keybytes = ci->ci_keylen / 8 + 1;
1108 1.15 dan if (keybytes > MAX_KEYSIZE) {
1109 1.1 elric ret = EINVAL;
1110 1.1 elric goto bail;
1111 1.1 elric }
1112 1.53 christos
1113 1.36 christos (void)memset(inbuf, 0, MAX_KEYSIZE);
1114 1.15 dan ret = copyin(ci->ci_key, inbuf, keybytes);
1115 1.1 elric if (ret)
1116 1.1 elric goto bail;
1117 1.1 elric
1118 1.116.10.3 martin sc->sc_cdata.cf_blocksize = ci->ci_blocksize;
1119 1.116.10.3 martin sc->sc_cdata.cf_mode = encblkno[i].v;
1120 1.116.10.3 martin sc->sc_cdata.cf_keylen = ci->ci_keylen;
1121 1.116.10.3 martin sc->sc_cdata.cf_priv = sc->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
1122 1.116.10.3 martin &sc->sc_cdata.cf_blocksize);
1123 1.116.10.3 martin if (sc->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
1124 1.62 christos log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
1125 1.116.10.3 martin sc->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
1126 1.116.10.3 martin sc->sc_cdata.cf_priv = NULL;
1127 1.62 christos }
1128 1.78 christos
1129 1.53 christos /*
1130 1.53 christos * The blocksize is supposed to be in bytes. Unfortunately originally
1131 1.53 christos * it was expressed in bits. For compatibility we maintain encblkno
1132 1.53 christos * and encblkno8.
1133 1.53 christos */
1134 1.116.10.3 martin sc->sc_cdata.cf_blocksize /= encblkno[i].d;
1135 1.97 riastrad (void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
1136 1.116.10.3 martin if (!sc->sc_cdata.cf_priv) {
1137 1.1 elric ret = EINVAL; /* XXX is this the right error? */
1138 1.1 elric goto bail;
1139 1.1 elric }
1140 1.116.10.3 martin kmem_free(inbuf, MAX_KEYSIZE);
1141 1.1 elric
1142 1.80 christos bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
1143 1.16 elric
1144 1.116.10.3 martin sc->sc_data = kmem_alloc(MAXPHYS, KM_SLEEP);
1145 1.116.10.3 martin sc->sc_data_used = false;
1146 1.16 elric
1147 1.98 mlelstv /* Attach the disk. */
1148 1.98 mlelstv dk_attach(dksc);
1149 1.98 mlelstv disk_attach(&dksc->sc_dkdev);
1150 1.1 elric
1151 1.80 christos disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
1152 1.77 elric
1153 1.29 yamt /* Discover wedges on this disk. */
1154 1.80 christos dkwedge_discover(&dksc->sc_dkdev);
1155 1.29 yamt
1156 1.1 elric return 0;
1157 1.1 elric
1158 1.1 elric bail:
1159 1.116.10.3 martin kmem_free(inbuf, MAX_KEYSIZE);
1160 1.51 ad (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
1161 1.1 elric return ret;
1162 1.1 elric }
1163 1.1 elric
1164 1.1 elric /* ARGSUSED */
1165 1.1 elric static int
1166 1.116.10.3 martin cgd_ioctl_clr(struct cgd_softc *sc, struct lwp *l)
1167 1.1 elric {
1168 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
1169 1.65 dyoung
1170 1.98 mlelstv if (!DK_ATTACHED(dksc))
1171 1.65 dyoung return ENXIO;
1172 1.16 elric
1173 1.29 yamt /* Delete all of our wedges. */
1174 1.80 christos dkwedge_delall(&dksc->sc_dkdev);
1175 1.29 yamt
1176 1.16 elric /* Kill off any queued buffers. */
1177 1.104 mlelstv dk_drain(dksc);
1178 1.80 christos bufq_free(dksc->sc_bufq);
1179 1.1 elric
1180 1.116.10.3 martin (void)vn_close(sc->sc_tvn, FREAD|FWRITE, l->l_cred);
1181 1.116.10.3 martin sc->sc_cfuncs->cf_destroy(sc->sc_cdata.cf_priv);
1182 1.116.10.3 martin kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1183 1.116.10.3 martin kmem_free(sc->sc_data, MAXPHYS);
1184 1.116.10.3 martin sc->sc_data_used = false;
1185 1.98 mlelstv dk_detach(dksc);
1186 1.80 christos disk_detach(&dksc->sc_dkdev);
1187 1.1 elric
1188 1.1 elric return 0;
1189 1.1 elric }
1190 1.1 elric
1191 1.1 elric static int
1192 1.78 christos cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
1193 1.78 christos {
1194 1.116.10.3 martin struct cgd_softc *sc;
1195 1.78 christos struct cgd_user *cgu;
1196 1.116.10.3 martin int unit, error;
1197 1.78 christos
1198 1.78 christos unit = CGDUNIT(dev);
1199 1.78 christos cgu = (struct cgd_user *)data;
1200 1.78 christos
1201 1.78 christos DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1202 1.78 christos dev, unit, data, l));
1203 1.78 christos
1204 1.116.10.3 martin /* XXX, we always return this units data, so if cgu_unit is
1205 1.116.10.3 martin * not -1, that field doesn't match the rest
1206 1.116.10.3 martin */
1207 1.78 christos if (cgu->cgu_unit == -1)
1208 1.78 christos cgu->cgu_unit = unit;
1209 1.78 christos
1210 1.78 christos if (cgu->cgu_unit < 0)
1211 1.78 christos return EINVAL; /* XXX: should this be ENXIO? */
1212 1.78 christos
1213 1.116.10.3 martin error = cgd_lock(false);
1214 1.116.10.3 martin if (error)
1215 1.116.10.3 martin return error;
1216 1.116.10.3 martin
1217 1.116.10.3 martin sc = device_lookup_private(&cgd_cd, unit);
1218 1.116.10.3 martin if (sc == NULL || !DK_ATTACHED(&sc->sc_dksc)) {
1219 1.78 christos cgu->cgu_dev = 0;
1220 1.78 christos cgu->cgu_alg[0] = '\0';
1221 1.78 christos cgu->cgu_blocksize = 0;
1222 1.78 christos cgu->cgu_mode = 0;
1223 1.78 christos cgu->cgu_keylen = 0;
1224 1.78 christos }
1225 1.78 christos else {
1226 1.116.10.3 martin mutex_enter(&sc->sc_lock);
1227 1.116.10.3 martin cgu->cgu_dev = sc->sc_tdev;
1228 1.116.10.3 martin strncpy(cgu->cgu_alg, sc->sc_cfuncs->cf_name,
1229 1.78 christos sizeof(cgu->cgu_alg));
1230 1.116.10.3 martin cgu->cgu_blocksize = sc->sc_cdata.cf_blocksize;
1231 1.116.10.3 martin cgu->cgu_mode = sc->sc_cdata.cf_mode;
1232 1.116.10.3 martin cgu->cgu_keylen = sc->sc_cdata.cf_keylen;
1233 1.116.10.3 martin mutex_exit(&sc->sc_lock);
1234 1.78 christos }
1235 1.116.10.3 martin
1236 1.116.10.3 martin cgd_unlock();
1237 1.78 christos return 0;
1238 1.78 christos }
1239 1.78 christos
1240 1.78 christos static int
1241 1.116.10.3 martin cgdinit(struct cgd_softc *sc, const char *cpath, struct vnode *vp,
1242 1.32 christos struct lwp *l)
1243 1.1 elric {
1244 1.80 christos struct disk_geom *dg;
1245 1.1 elric int ret;
1246 1.36 christos char *tmppath;
1247 1.76 christos uint64_t psize;
1248 1.76 christos unsigned secsize;
1249 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
1250 1.1 elric
1251 1.116.10.3 martin sc->sc_tvn = vp;
1252 1.116.10.3 martin sc->sc_tpath = NULL;
1253 1.1 elric
1254 1.116.10.3 martin tmppath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1255 1.116.10.3 martin ret = copyinstr(cpath, tmppath, MAXPATHLEN, &sc->sc_tpathlen);
1256 1.1 elric if (ret)
1257 1.1 elric goto bail;
1258 1.116.10.3 martin sc->sc_tpath = kmem_alloc(sc->sc_tpathlen, KM_SLEEP);
1259 1.116.10.3 martin memcpy(sc->sc_tpath, tmppath, sc->sc_tpathlen);
1260 1.1 elric
1261 1.116.10.3 martin sc->sc_tdev = vp->v_rdev;
1262 1.1 elric
1263 1.76 christos if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1264 1.1 elric goto bail;
1265 1.1 elric
1266 1.76 christos if (psize == 0) {
1267 1.1 elric ret = ENODEV;
1268 1.1 elric goto bail;
1269 1.1 elric }
1270 1.1 elric
1271 1.1 elric /*
1272 1.1 elric * XXX here we should probe the underlying device. If we
1273 1.1 elric * are accessing a partition of type RAW_PART, then
1274 1.1 elric * we should populate our initial geometry with the
1275 1.1 elric * geometry that we discover from the device.
1276 1.1 elric */
1277 1.80 christos dg = &dksc->sc_dkdev.dk_geom;
1278 1.80 christos memset(dg, 0, sizeof(*dg));
1279 1.80 christos dg->dg_secperunit = psize;
1280 1.105 mlelstv dg->dg_secsize = secsize;
1281 1.80 christos dg->dg_ntracks = 1;
1282 1.105 mlelstv dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1283 1.80 christos dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1284 1.1 elric
1285 1.1 elric bail:
1286 1.116.10.3 martin kmem_free(tmppath, MAXPATHLEN);
1287 1.116.10.3 martin if (ret && sc->sc_tpath)
1288 1.116.10.3 martin kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1289 1.1 elric return ret;
1290 1.1 elric }
1291 1.1 elric
1292 1.1 elric /*
1293 1.1 elric * Our generic cipher entry point. This takes care of the
1294 1.1 elric * IV mode and passes off the work to the specific cipher.
1295 1.1 elric * We implement here the IV method ``encrypted block
1296 1.1 elric * number''.
1297 1.22 perry *
1298 1.1 elric * XXXrcd: for now we rely on our own crypto framework defined
1299 1.1 elric * in dev/cgd_crypto.c. This will change when we
1300 1.1 elric * get a generic kernel crypto framework.
1301 1.1 elric */
1302 1.1 elric
1303 1.1 elric static void
1304 1.25 xtraeme blkno2blkno_buf(char *sbuf, daddr_t blkno)
1305 1.1 elric {
1306 1.1 elric int i;
1307 1.1 elric
1308 1.1 elric /* Set up the blkno in blkno_buf, here we do not care much
1309 1.1 elric * about the final layout of the information as long as we
1310 1.1 elric * can guarantee that each sector will have a different IV
1311 1.1 elric * and that the endianness of the machine will not affect
1312 1.1 elric * the representation that we have chosen.
1313 1.1 elric *
1314 1.1 elric * We choose this representation, because it does not rely
1315 1.1 elric * on the size of buf (which is the blocksize of the cipher),
1316 1.1 elric * but allows daddr_t to grow without breaking existing
1317 1.1 elric * disks.
1318 1.1 elric *
1319 1.1 elric * Note that blkno2blkno_buf does not take a size as input,
1320 1.1 elric * and hence must be called on a pre-zeroed buffer of length
1321 1.1 elric * greater than or equal to sizeof(daddr_t).
1322 1.1 elric */
1323 1.1 elric for (i=0; i < sizeof(daddr_t); i++) {
1324 1.25 xtraeme *sbuf++ = blkno & 0xff;
1325 1.1 elric blkno >>= 8;
1326 1.1 elric }
1327 1.1 elric }
1328 1.1 elric
1329 1.116.10.3 martin static struct cpu_info *
1330 1.116.10.3 martin cgd_cpu(struct cgd_softc *sc)
1331 1.116.10.3 martin {
1332 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
1333 1.116.10.3 martin struct cpu_info *ci = NULL;
1334 1.116.10.3 martin u_int cidx, i;
1335 1.116.10.3 martin
1336 1.116.10.3 martin if (cw->cw_busy == 0) {
1337 1.116.10.3 martin cw->cw_last = cpu_index(curcpu());
1338 1.116.10.3 martin return NULL;
1339 1.116.10.3 martin }
1340 1.116.10.3 martin
1341 1.116.10.3 martin for (i=0, cidx = cw->cw_last+1; i<maxcpus; ++i, ++cidx) {
1342 1.116.10.3 martin if (cidx >= maxcpus)
1343 1.116.10.3 martin cidx = 0;
1344 1.116.10.3 martin ci = cpu_lookup(cidx);
1345 1.116.10.3 martin if (ci) {
1346 1.116.10.3 martin cw->cw_last = cidx;
1347 1.116.10.3 martin break;
1348 1.116.10.3 martin }
1349 1.116.10.3 martin }
1350 1.116.10.3 martin
1351 1.116.10.3 martin return ci;
1352 1.116.10.3 martin }
1353 1.116.10.3 martin
1354 1.116.10.3 martin static void
1355 1.116.10.3 martin cgd_enqueue(struct cgd_softc *sc, struct cgd_xfer *cx)
1356 1.116.10.3 martin {
1357 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
1358 1.116.10.3 martin struct cpu_info *ci;
1359 1.116.10.3 martin
1360 1.116.10.3 martin mutex_enter(&cw->cw_lock);
1361 1.116.10.3 martin ci = cgd_cpu(sc);
1362 1.116.10.3 martin cw->cw_busy++;
1363 1.116.10.3 martin mutex_exit(&cw->cw_lock);
1364 1.116.10.3 martin
1365 1.116.10.3 martin workqueue_enqueue(cw->cw_wq, &cx->cx_work, ci);
1366 1.116.10.3 martin }
1367 1.116.10.3 martin
1368 1.116.10.3 martin static void
1369 1.116.10.3 martin cgd_process(struct work *wk, void *arg)
1370 1.116.10.3 martin {
1371 1.116.10.3 martin struct cgd_xfer *cx = (struct cgd_xfer *)wk;
1372 1.116.10.3 martin struct cgd_softc *sc = cx->cx_sc;
1373 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
1374 1.116.10.3 martin
1375 1.116.10.3 martin cgd_cipher(sc, cx->cx_dstv, cx->cx_srcv, cx->cx_len,
1376 1.116.10.3 martin cx->cx_blkno, cx->cx_secsize, cx->cx_dir);
1377 1.116.10.3 martin
1378 1.116.10.3 martin if (cx->cx_dir == CGD_CIPHER_ENCRYPT) {
1379 1.116.10.3 martin cgd_diskstart2(sc, cx);
1380 1.116.10.3 martin } else {
1381 1.116.10.3 martin cgd_iodone2(sc, cx);
1382 1.116.10.3 martin }
1383 1.116.10.3 martin
1384 1.116.10.3 martin mutex_enter(&cw->cw_lock);
1385 1.116.10.3 martin if (cw->cw_busy > 0)
1386 1.116.10.3 martin cw->cw_busy--;
1387 1.116.10.3 martin mutex_exit(&cw->cw_lock);
1388 1.116.10.3 martin }
1389 1.116.10.3 martin
1390 1.1 elric static void
1391 1.116.10.3 martin cgd_cipher(struct cgd_softc *sc, void *dstv, void *srcv,
1392 1.44 christos size_t len, daddr_t blkno, size_t secsize, int dir)
1393 1.1 elric {
1394 1.44 christos char *dst = dstv;
1395 1.112 alnsn char *src = srcv;
1396 1.116.10.3 martin cfunc_cipher_prep *ciprep = sc->sc_cfuncs->cf_cipher_prep;
1397 1.116.10.3 martin cfunc_cipher *cipher = sc->sc_cfuncs->cf_cipher;
1398 1.1 elric struct uio dstuio;
1399 1.1 elric struct uio srcuio;
1400 1.1 elric struct iovec dstiov[2];
1401 1.1 elric struct iovec srciov[2];
1402 1.116.10.3 martin size_t blocksize = sc->sc_cdata.cf_blocksize;
1403 1.105 mlelstv size_t todo;
1404 1.112 alnsn char blkno_buf[CGD_MAXBLOCKSIZE], *iv;
1405 1.1 elric
1406 1.1 elric DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1407 1.1 elric
1408 1.22 perry DIAGCONDPANIC(len % blocksize != 0,
1409 1.1 elric ("cgd_cipher: len %% blocksize != 0"));
1410 1.1 elric
1411 1.1 elric /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1412 1.1 elric DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
1413 1.1 elric ("cgd_cipher: sizeof(daddr_t) > blocksize"));
1414 1.1 elric
1415 1.112 alnsn DIAGCONDPANIC(blocksize > CGD_MAXBLOCKSIZE,
1416 1.112 alnsn ("cgd_cipher: blocksize > CGD_MAXBLOCKSIZE"));
1417 1.1 elric
1418 1.1 elric dstuio.uio_iov = dstiov;
1419 1.112 alnsn dstuio.uio_iovcnt = 1;
1420 1.1 elric
1421 1.1 elric srcuio.uio_iov = srciov;
1422 1.112 alnsn srcuio.uio_iovcnt = 1;
1423 1.1 elric
1424 1.105 mlelstv for (; len > 0; len -= todo) {
1425 1.105 mlelstv todo = MIN(len, secsize);
1426 1.105 mlelstv
1427 1.112 alnsn dstiov[0].iov_base = dst;
1428 1.112 alnsn srciov[0].iov_base = src;
1429 1.112 alnsn dstiov[0].iov_len = todo;
1430 1.112 alnsn srciov[0].iov_len = todo;
1431 1.1 elric
1432 1.64 christos memset(blkno_buf, 0x0, blocksize);
1433 1.1 elric blkno2blkno_buf(blkno_buf, blkno);
1434 1.1 elric IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1435 1.64 christos blkno_buf, blocksize));
1436 1.112 alnsn
1437 1.112 alnsn /*
1438 1.112 alnsn * Compute an initial IV. All ciphers
1439 1.112 alnsn * can convert blkno_buf in-place.
1440 1.112 alnsn */
1441 1.112 alnsn iv = blkno_buf;
1442 1.116.10.3 martin ciprep(sc->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir);
1443 1.112 alnsn IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize));
1444 1.112 alnsn
1445 1.116.10.3 martin cipher(sc->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir);
1446 1.1 elric
1447 1.105 mlelstv dst += todo;
1448 1.105 mlelstv src += todo;
1449 1.1 elric blkno++;
1450 1.1 elric }
1451 1.1 elric }
1452 1.1 elric
1453 1.1 elric #ifdef DEBUG
1454 1.1 elric static void
1455 1.26 drochner hexprint(const char *start, void *buf, int len)
1456 1.1 elric {
1457 1.1 elric char *c = buf;
1458 1.1 elric
1459 1.1 elric DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
1460 1.1 elric printf("%s: len=%06d 0x", start, len);
1461 1.1 elric while (len--)
1462 1.43 cbiere printf("%02x", (unsigned char) *c++);
1463 1.1 elric }
1464 1.1 elric #endif
1465 1.58 haad
1466 1.112 alnsn static void
1467 1.112 alnsn selftest(void)
1468 1.112 alnsn {
1469 1.116.10.3 martin struct cgd_softc sc;
1470 1.112 alnsn void *buf;
1471 1.112 alnsn
1472 1.112 alnsn printf("running cgd selftest ");
1473 1.112 alnsn
1474 1.112 alnsn for (size_t i = 0; i < __arraycount(selftests); i++) {
1475 1.112 alnsn const char *alg = selftests[i].alg;
1476 1.112 alnsn const uint8_t *key = selftests[i].key;
1477 1.112 alnsn int keylen = selftests[i].keylen;
1478 1.112 alnsn int txtlen = selftests[i].txtlen;
1479 1.112 alnsn
1480 1.112 alnsn printf("%s-%d ", alg, keylen);
1481 1.112 alnsn
1482 1.116.10.3 martin memset(&sc, 0, sizeof(sc));
1483 1.112 alnsn
1484 1.116.10.3 martin sc.sc_cfuncs = cryptfuncs_find(alg);
1485 1.116.10.3 martin if (sc.sc_cfuncs == NULL)
1486 1.112 alnsn panic("%s not implemented", alg);
1487 1.112 alnsn
1488 1.116.10.3 martin sc.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1489 1.116.10.3 martin sc.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1490 1.116.10.3 martin sc.sc_cdata.cf_keylen = keylen;
1491 1.116.10.3 martin
1492 1.116.10.3 martin sc.sc_cdata.cf_priv = sc.sc_cfuncs->cf_init(keylen,
1493 1.116.10.3 martin key, &sc.sc_cdata.cf_blocksize);
1494 1.116.10.3 martin if (sc.sc_cdata.cf_priv == NULL)
1495 1.112 alnsn panic("cf_priv is NULL");
1496 1.116.10.3 martin if (sc.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1497 1.116.10.3 martin panic("bad block size %zu", sc.sc_cdata.cf_blocksize);
1498 1.112 alnsn
1499 1.116.10.3 martin sc.sc_cdata.cf_blocksize /= 8;
1500 1.112 alnsn
1501 1.116.10.3 martin buf = kmem_alloc(txtlen, KM_SLEEP);
1502 1.112 alnsn memcpy(buf, selftests[i].ptxt, txtlen);
1503 1.112 alnsn
1504 1.116.10.3 martin cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1505 1.112 alnsn selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1506 1.112 alnsn if (memcmp(buf, selftests[i].ctxt, txtlen) != 0)
1507 1.112 alnsn panic("encryption is broken");
1508 1.112 alnsn
1509 1.116.10.3 martin cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1510 1.112 alnsn selftests[i].secsize, CGD_CIPHER_DECRYPT);
1511 1.112 alnsn if (memcmp(buf, selftests[i].ptxt, txtlen) != 0)
1512 1.112 alnsn panic("decryption is broken");
1513 1.112 alnsn
1514 1.116.10.3 martin kmem_free(buf, txtlen);
1515 1.116.10.3 martin sc.sc_cfuncs->cf_destroy(sc.sc_cdata.cf_priv);
1516 1.112 alnsn }
1517 1.112 alnsn
1518 1.112 alnsn printf("done\n");
1519 1.112 alnsn }
1520 1.112 alnsn
1521 1.116 pgoyette MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
1522 1.74 jruoho
1523 1.58 haad #ifdef _MODULE
1524 1.66 dyoung CFDRIVER_DECL(cgd, DV_DISK, NULL);
1525 1.109 pgoyette
1526 1.109 pgoyette devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1527 1.74 jruoho #endif
1528 1.58 haad
1529 1.58 haad static int
1530 1.58 haad cgd_modcmd(modcmd_t cmd, void *arg)
1531 1.58 haad {
1532 1.82 martin int error = 0;
1533 1.74 jruoho
1534 1.58 haad switch (cmd) {
1535 1.58 haad case MODULE_CMD_INIT:
1536 1.112 alnsn selftest();
1537 1.74 jruoho #ifdef _MODULE
1538 1.116.10.3 martin mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
1539 1.116.10.3 martin cv_init(&cgd_spawning_cv, "cgspwn");
1540 1.116.10.3 martin
1541 1.66 dyoung error = config_cfdriver_attach(&cgd_cd);
1542 1.66 dyoung if (error)
1543 1.66 dyoung break;
1544 1.66 dyoung
1545 1.66 dyoung error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1546 1.66 dyoung if (error) {
1547 1.66 dyoung config_cfdriver_detach(&cgd_cd);
1548 1.109 pgoyette aprint_error("%s: unable to register cfattach for"
1549 1.109 pgoyette "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1550 1.66 dyoung break;
1551 1.66 dyoung }
1552 1.109 pgoyette /*
1553 1.109 pgoyette * Attach the {b,c}devsw's
1554 1.109 pgoyette */
1555 1.109 pgoyette error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1556 1.109 pgoyette &cgd_cdevsw, &cgd_cmajor);
1557 1.74 jruoho
1558 1.109 pgoyette /*
1559 1.109 pgoyette * If devsw_attach fails, remove from autoconf database
1560 1.109 pgoyette */
1561 1.66 dyoung if (error) {
1562 1.66 dyoung config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1563 1.66 dyoung config_cfdriver_detach(&cgd_cd);
1564 1.109 pgoyette aprint_error("%s: unable to attach %s devsw, "
1565 1.109 pgoyette "error %d", __func__, cgd_cd.cd_name, error);
1566 1.66 dyoung break;
1567 1.66 dyoung }
1568 1.74 jruoho #endif
1569 1.58 haad break;
1570 1.58 haad
1571 1.58 haad case MODULE_CMD_FINI:
1572 1.74 jruoho #ifdef _MODULE
1573 1.109 pgoyette /*
1574 1.109 pgoyette * Remove {b,c}devsw's
1575 1.109 pgoyette */
1576 1.109 pgoyette devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1577 1.109 pgoyette
1578 1.109 pgoyette /*
1579 1.109 pgoyette * Now remove device from autoconf database
1580 1.109 pgoyette */
1581 1.66 dyoung error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1582 1.109 pgoyette if (error) {
1583 1.110 pgoyette (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1584 1.109 pgoyette &cgd_cdevsw, &cgd_cmajor);
1585 1.109 pgoyette aprint_error("%s: failed to detach %s cfattach, "
1586 1.109 pgoyette "error %d\n", __func__, cgd_cd.cd_name, error);
1587 1.109 pgoyette break;
1588 1.109 pgoyette }
1589 1.109 pgoyette error = config_cfdriver_detach(&cgd_cd);
1590 1.109 pgoyette if (error) {
1591 1.110 pgoyette (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1592 1.110 pgoyette (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1593 1.109 pgoyette &cgd_cdevsw, &cgd_cmajor);
1594 1.109 pgoyette aprint_error("%s: failed to detach %s cfdriver, "
1595 1.109 pgoyette "error %d\n", __func__, cgd_cd.cd_name, error);
1596 1.66 dyoung break;
1597 1.109 pgoyette }
1598 1.116.10.3 martin
1599 1.116.10.3 martin cv_destroy(&cgd_spawning_cv);
1600 1.116.10.3 martin mutex_destroy(&cgd_spawning_mtx);
1601 1.74 jruoho #endif
1602 1.58 haad break;
1603 1.58 haad
1604 1.58 haad case MODULE_CMD_STAT:
1605 1.109 pgoyette error = ENOTTY;
1606 1.109 pgoyette break;
1607 1.58 haad default:
1608 1.109 pgoyette error = ENOTTY;
1609 1.109 pgoyette break;
1610 1.58 haad }
1611 1.58 haad
1612 1.58 haad return error;
1613 1.58 haad }
1614