cgd.c revision 1.116.10.3 1 1.116.10.3 martin /* $NetBSD: cgd.c,v 1.116.10.3 2020/04/06 14:57:42 martin Exp $ */
2 1.1 elric
3 1.1 elric /*-
4 1.1 elric * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 1.1 elric * All rights reserved.
6 1.1 elric *
7 1.1 elric * This code is derived from software contributed to The NetBSD Foundation
8 1.1 elric * by Roland C. Dowdeswell.
9 1.1 elric *
10 1.1 elric * Redistribution and use in source and binary forms, with or without
11 1.1 elric * modification, are permitted provided that the following conditions
12 1.1 elric * are met:
13 1.1 elric * 1. Redistributions of source code must retain the above copyright
14 1.1 elric * notice, this list of conditions and the following disclaimer.
15 1.1 elric * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 elric * notice, this list of conditions and the following disclaimer in the
17 1.1 elric * documentation and/or other materials provided with the distribution.
18 1.1 elric *
19 1.1 elric * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 elric * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 elric * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 elric * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 elric * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 elric * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 elric * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 elric * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 elric * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 elric * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 elric * POSSIBILITY OF SUCH DAMAGE.
30 1.1 elric */
31 1.1 elric
32 1.1 elric #include <sys/cdefs.h>
33 1.116.10.3 martin __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.116.10.3 2020/04/06 14:57:42 martin Exp $");
34 1.1 elric
35 1.1 elric #include <sys/types.h>
36 1.1 elric #include <sys/param.h>
37 1.1 elric #include <sys/systm.h>
38 1.1 elric #include <sys/proc.h>
39 1.1 elric #include <sys/errno.h>
40 1.1 elric #include <sys/buf.h>
41 1.21 yamt #include <sys/bufq.h>
42 1.116.10.3 martin #include <sys/kmem.h>
43 1.74 jruoho #include <sys/module.h>
44 1.1 elric #include <sys/pool.h>
45 1.1 elric #include <sys/ioctl.h>
46 1.1 elric #include <sys/device.h>
47 1.1 elric #include <sys/disk.h>
48 1.1 elric #include <sys/disklabel.h>
49 1.1 elric #include <sys/fcntl.h>
50 1.71 dholland #include <sys/namei.h> /* for pathbuf */
51 1.1 elric #include <sys/vnode.h>
52 1.1 elric #include <sys/conf.h>
53 1.62 christos #include <sys/syslog.h>
54 1.116.10.3 martin #include <sys/workqueue.h>
55 1.116.10.3 martin #include <sys/cpu.h>
56 1.1 elric
57 1.1 elric #include <dev/dkvar.h>
58 1.1 elric #include <dev/cgdvar.h>
59 1.1 elric
60 1.88 hannken #include <miscfs/specfs/specdev.h> /* for v_rdev */
61 1.88 hannken
62 1.102 christos #include "ioconf.h"
63 1.102 christos
64 1.112 alnsn struct selftest_params {
65 1.112 alnsn const char *alg;
66 1.112 alnsn int blocksize; /* number of bytes */
67 1.112 alnsn int secsize;
68 1.112 alnsn daddr_t blkno;
69 1.112 alnsn int keylen; /* number of bits */
70 1.112 alnsn int txtlen; /* number of bytes */
71 1.112 alnsn const uint8_t *key;
72 1.112 alnsn const uint8_t *ptxt;
73 1.112 alnsn const uint8_t *ctxt;
74 1.112 alnsn };
75 1.112 alnsn
76 1.1 elric /* Entry Point Functions */
77 1.1 elric
78 1.18 thorpej static dev_type_open(cgdopen);
79 1.18 thorpej static dev_type_close(cgdclose);
80 1.18 thorpej static dev_type_read(cgdread);
81 1.18 thorpej static dev_type_write(cgdwrite);
82 1.18 thorpej static dev_type_ioctl(cgdioctl);
83 1.18 thorpej static dev_type_strategy(cgdstrategy);
84 1.18 thorpej static dev_type_dump(cgddump);
85 1.18 thorpej static dev_type_size(cgdsize);
86 1.1 elric
87 1.1 elric const struct bdevsw cgd_bdevsw = {
88 1.84 dholland .d_open = cgdopen,
89 1.84 dholland .d_close = cgdclose,
90 1.84 dholland .d_strategy = cgdstrategy,
91 1.84 dholland .d_ioctl = cgdioctl,
92 1.84 dholland .d_dump = cgddump,
93 1.84 dholland .d_psize = cgdsize,
94 1.89 dholland .d_discard = nodiscard,
95 1.116.10.3 martin .d_flag = D_DISK | D_MPSAFE
96 1.1 elric };
97 1.1 elric
98 1.1 elric const struct cdevsw cgd_cdevsw = {
99 1.84 dholland .d_open = cgdopen,
100 1.84 dholland .d_close = cgdclose,
101 1.84 dholland .d_read = cgdread,
102 1.84 dholland .d_write = cgdwrite,
103 1.84 dholland .d_ioctl = cgdioctl,
104 1.84 dholland .d_stop = nostop,
105 1.84 dholland .d_tty = notty,
106 1.84 dholland .d_poll = nopoll,
107 1.84 dholland .d_mmap = nommap,
108 1.84 dholland .d_kqfilter = nokqfilter,
109 1.90 dholland .d_discard = nodiscard,
110 1.116.10.3 martin .d_flag = D_DISK | D_MPSAFE
111 1.1 elric };
112 1.1 elric
113 1.112 alnsn /*
114 1.112 alnsn * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
115 1.112 alnsn */
116 1.112 alnsn static const uint8_t selftest_aes_xts_256_ptxt[64] = {
117 1.112 alnsn 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
118 1.112 alnsn 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
119 1.112 alnsn 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
120 1.112 alnsn 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
121 1.112 alnsn 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
122 1.112 alnsn 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
123 1.112 alnsn 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
124 1.112 alnsn 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
125 1.112 alnsn };
126 1.112 alnsn
127 1.112 alnsn static const uint8_t selftest_aes_xts_256_ctxt[512] = {
128 1.112 alnsn 0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
129 1.112 alnsn 0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
130 1.112 alnsn 0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
131 1.112 alnsn 0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
132 1.112 alnsn 0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
133 1.112 alnsn 0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
134 1.112 alnsn 0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
135 1.112 alnsn 0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
136 1.112 alnsn };
137 1.112 alnsn
138 1.112 alnsn static const uint8_t selftest_aes_xts_256_key[33] = {
139 1.112 alnsn 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
140 1.112 alnsn 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
141 1.112 alnsn 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
142 1.112 alnsn 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
143 1.112 alnsn 0
144 1.112 alnsn };
145 1.112 alnsn
146 1.112 alnsn /*
147 1.112 alnsn * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
148 1.112 alnsn */
149 1.112 alnsn static const uint8_t selftest_aes_xts_512_ptxt[64] = {
150 1.112 alnsn 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
151 1.112 alnsn 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
152 1.112 alnsn 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
153 1.112 alnsn 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
154 1.112 alnsn 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
155 1.112 alnsn 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
156 1.112 alnsn 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
157 1.112 alnsn 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
158 1.112 alnsn };
159 1.112 alnsn
160 1.112 alnsn static const uint8_t selftest_aes_xts_512_ctxt[64] = {
161 1.112 alnsn 0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
162 1.112 alnsn 0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
163 1.112 alnsn 0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
164 1.112 alnsn 0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
165 1.112 alnsn 0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
166 1.112 alnsn 0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
167 1.112 alnsn 0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
168 1.112 alnsn 0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
169 1.112 alnsn };
170 1.112 alnsn
171 1.112 alnsn static const uint8_t selftest_aes_xts_512_key[65] = {
172 1.112 alnsn 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
173 1.112 alnsn 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
174 1.112 alnsn 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
175 1.112 alnsn 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
176 1.112 alnsn 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
177 1.112 alnsn 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
178 1.112 alnsn 0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
179 1.112 alnsn 0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
180 1.112 alnsn 0
181 1.112 alnsn };
182 1.112 alnsn
183 1.112 alnsn const struct selftest_params selftests[] = {
184 1.112 alnsn {
185 1.112 alnsn .alg = "aes-xts",
186 1.112 alnsn .blocksize = 16,
187 1.112 alnsn .secsize = 512,
188 1.112 alnsn .blkno = 1,
189 1.112 alnsn .keylen = 256,
190 1.112 alnsn .txtlen = sizeof(selftest_aes_xts_256_ptxt),
191 1.112 alnsn .key = selftest_aes_xts_256_key,
192 1.112 alnsn .ptxt = selftest_aes_xts_256_ptxt,
193 1.112 alnsn .ctxt = selftest_aes_xts_256_ctxt
194 1.112 alnsn },
195 1.112 alnsn {
196 1.112 alnsn .alg = "aes-xts",
197 1.112 alnsn .blocksize = 16,
198 1.112 alnsn .secsize = 512,
199 1.112 alnsn .blkno = 0xffff,
200 1.112 alnsn .keylen = 512,
201 1.112 alnsn .txtlen = sizeof(selftest_aes_xts_512_ptxt),
202 1.112 alnsn .key = selftest_aes_xts_512_key,
203 1.112 alnsn .ptxt = selftest_aes_xts_512_ptxt,
204 1.112 alnsn .ctxt = selftest_aes_xts_512_ctxt
205 1.112 alnsn }
206 1.112 alnsn };
207 1.112 alnsn
208 1.65 dyoung static int cgd_match(device_t, cfdata_t, void *);
209 1.65 dyoung static void cgd_attach(device_t, device_t, void *);
210 1.65 dyoung static int cgd_detach(device_t, int);
211 1.65 dyoung static struct cgd_softc *cgd_spawn(int);
212 1.116.10.3 martin static struct cgd_worker *cgd_create_one_worker(void);
213 1.116.10.3 martin static void cgd_destroy_one_worker(struct cgd_worker *);
214 1.116.10.3 martin static struct cgd_worker *cgd_create_worker(void);
215 1.116.10.3 martin static void cgd_destroy_worker(struct cgd_worker *);
216 1.65 dyoung static int cgd_destroy(device_t);
217 1.65 dyoung
218 1.1 elric /* Internal Functions */
219 1.1 elric
220 1.99 mlelstv static int cgd_diskstart(device_t, struct buf *);
221 1.116.10.3 martin static void cgd_diskstart2(struct cgd_softc *, struct cgd_xfer *);
222 1.1 elric static void cgdiodone(struct buf *);
223 1.116.10.3 martin static void cgd_iodone2(struct cgd_softc *, struct cgd_xfer *);
224 1.116.10.3 martin static void cgd_enqueue(struct cgd_softc *, struct cgd_xfer *);
225 1.116.10.3 martin static void cgd_process(struct work *, void *);
226 1.108 riastrad static int cgd_dumpblocks(device_t, void *, daddr_t, int);
227 1.1 elric
228 1.32 christos static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
229 1.65 dyoung static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
230 1.78 christos static int cgd_ioctl_get(dev_t, void *, struct lwp *);
231 1.27 drochner static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
232 1.32 christos struct lwp *);
233 1.44 christos static void cgd_cipher(struct cgd_softc *, void *, void *,
234 1.1 elric size_t, daddr_t, size_t, int);
235 1.1 elric
236 1.29 yamt static struct dkdriver cgddkdriver = {
237 1.98 mlelstv .d_minphys = minphys,
238 1.98 mlelstv .d_open = cgdopen,
239 1.98 mlelstv .d_close = cgdclose,
240 1.98 mlelstv .d_strategy = cgdstrategy,
241 1.98 mlelstv .d_iosize = NULL,
242 1.99 mlelstv .d_diskstart = cgd_diskstart,
243 1.108 riastrad .d_dumpblocks = cgd_dumpblocks,
244 1.98 mlelstv .d_lastclose = NULL
245 1.29 yamt };
246 1.29 yamt
247 1.65 dyoung CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
248 1.65 dyoung cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
249 1.65 dyoung
250 1.1 elric /* DIAGNOSTIC and DEBUG definitions */
251 1.1 elric
252 1.1 elric #if defined(CGDDEBUG) && !defined(DEBUG)
253 1.1 elric #define DEBUG
254 1.1 elric #endif
255 1.1 elric
256 1.1 elric #ifdef DEBUG
257 1.1 elric int cgddebug = 0;
258 1.1 elric
259 1.1 elric #define CGDB_FOLLOW 0x1
260 1.1 elric #define CGDB_IO 0x2
261 1.1 elric #define CGDB_CRYPTO 0x4
262 1.1 elric
263 1.1 elric #define IFDEBUG(x,y) if (cgddebug & (x)) y
264 1.1 elric #define DPRINTF(x,y) IFDEBUG(x, printf y)
265 1.1 elric #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
266 1.1 elric
267 1.26 drochner static void hexprint(const char *, void *, int);
268 1.1 elric
269 1.1 elric #else
270 1.1 elric #define IFDEBUG(x,y)
271 1.1 elric #define DPRINTF(x,y)
272 1.1 elric #define DPRINTF_FOLLOW(y)
273 1.1 elric #endif
274 1.1 elric
275 1.1 elric #ifdef DIAGNOSTIC
276 1.22 perry #define DIAGPANIC(x) panic x
277 1.1 elric #define DIAGCONDPANIC(x,y) if (x) panic y
278 1.1 elric #else
279 1.1 elric #define DIAGPANIC(x)
280 1.1 elric #define DIAGCONDPANIC(x,y)
281 1.1 elric #endif
282 1.1 elric
283 1.1 elric /* Global variables */
284 1.1 elric
285 1.116.10.3 martin static kmutex_t cgd_spawning_mtx;
286 1.116.10.3 martin static kcondvar_t cgd_spawning_cv;
287 1.116.10.3 martin static bool cgd_spawning;
288 1.116.10.3 martin static struct cgd_worker *cgd_worker;
289 1.116.10.3 martin static u_int cgd_refcnt; /* number of users of cgd_worker */
290 1.116.10.3 martin
291 1.1 elric /* Utility Functions */
292 1.1 elric
293 1.1 elric #define CGDUNIT(x) DISKUNIT(x)
294 1.1 elric
295 1.65 dyoung /* The code */
296 1.65 dyoung
297 1.116.10.3 martin static int
298 1.116.10.3 martin cgd_lock(bool intr)
299 1.1 elric {
300 1.116.10.3 martin int error = 0;
301 1.1 elric
302 1.116.10.3 martin mutex_enter(&cgd_spawning_mtx);
303 1.116.10.3 martin while (cgd_spawning) {
304 1.116.10.3 martin if (intr)
305 1.116.10.3 martin error = cv_wait_sig(&cgd_spawning_cv, &cgd_spawning_mtx);
306 1.116.10.3 martin else
307 1.116.10.3 martin cv_wait(&cgd_spawning_cv, &cgd_spawning_mtx);
308 1.116.10.3 martin }
309 1.116.10.3 martin if (error == 0)
310 1.116.10.3 martin cgd_spawning = true;
311 1.116.10.3 martin mutex_exit(&cgd_spawning_mtx);
312 1.116.10.3 martin return error;
313 1.116.10.3 martin }
314 1.65 dyoung
315 1.116.10.3 martin static void
316 1.116.10.3 martin cgd_unlock(void)
317 1.116.10.3 martin {
318 1.116.10.3 martin mutex_enter(&cgd_spawning_mtx);
319 1.116.10.3 martin cgd_spawning = false;
320 1.116.10.3 martin cv_broadcast(&cgd_spawning_cv);
321 1.116.10.3 martin mutex_exit(&cgd_spawning_mtx);
322 1.116.10.3 martin }
323 1.116.10.3 martin
324 1.116.10.3 martin static struct cgd_softc *
325 1.116.10.3 martin getcgd_softc(dev_t dev)
326 1.116.10.3 martin {
327 1.116.10.3 martin return device_lookup_private(&cgd_cd, CGDUNIT(dev));
328 1.1 elric }
329 1.1 elric
330 1.65 dyoung static int
331 1.65 dyoung cgd_match(device_t self, cfdata_t cfdata, void *aux)
332 1.65 dyoung {
333 1.65 dyoung
334 1.65 dyoung return 1;
335 1.65 dyoung }
336 1.1 elric
337 1.1 elric static void
338 1.65 dyoung cgd_attach(device_t parent, device_t self, void *aux)
339 1.1 elric {
340 1.65 dyoung struct cgd_softc *sc = device_private(self);
341 1.1 elric
342 1.85 skrll mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
343 1.116.10.3 martin cv_init(&sc->sc_cv, "cgdcv");
344 1.98 mlelstv dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
345 1.65 dyoung disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
346 1.70 joerg
347 1.98 mlelstv if (!pmf_device_register(self, NULL, NULL))
348 1.107 msaitoh aprint_error_dev(self,
349 1.107 msaitoh "unable to register power management hooks\n");
350 1.65 dyoung }
351 1.65 dyoung
352 1.65 dyoung
353 1.65 dyoung static int
354 1.65 dyoung cgd_detach(device_t self, int flags)
355 1.65 dyoung {
356 1.67 dyoung int ret;
357 1.67 dyoung const int pmask = 1 << RAW_PART;
358 1.65 dyoung struct cgd_softc *sc = device_private(self);
359 1.67 dyoung struct dk_softc *dksc = &sc->sc_dksc;
360 1.67 dyoung
361 1.67 dyoung if (DK_BUSY(dksc, pmask))
362 1.67 dyoung return EBUSY;
363 1.65 dyoung
364 1.98 mlelstv if (DK_ATTACHED(dksc) &&
365 1.67 dyoung (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
366 1.67 dyoung return ret;
367 1.65 dyoung
368 1.67 dyoung disk_destroy(&dksc->sc_dkdev);
369 1.116.10.3 martin cv_destroy(&sc->sc_cv);
370 1.86 christos mutex_destroy(&sc->sc_lock);
371 1.65 dyoung
372 1.67 dyoung return 0;
373 1.1 elric }
374 1.1 elric
375 1.1 elric void
376 1.1 elric cgdattach(int num)
377 1.1 elric {
378 1.116.10.3 martin #ifndef _MODULE
379 1.65 dyoung int error;
380 1.65 dyoung
381 1.116.10.3 martin mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
382 1.116.10.3 martin cv_init(&cgd_spawning_cv, "cgspwn");
383 1.116.10.3 martin
384 1.65 dyoung error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
385 1.65 dyoung if (error != 0)
386 1.65 dyoung aprint_error("%s: unable to register cfattach\n",
387 1.65 dyoung cgd_cd.cd_name);
388 1.116.10.3 martin #endif
389 1.65 dyoung }
390 1.65 dyoung
391 1.65 dyoung static struct cgd_softc *
392 1.65 dyoung cgd_spawn(int unit)
393 1.65 dyoung {
394 1.65 dyoung cfdata_t cf;
395 1.116.10.3 martin struct cgd_worker *cw;
396 1.116.10.3 martin struct cgd_softc *sc;
397 1.65 dyoung
398 1.116.10.3 martin cf = kmem_alloc(sizeof(*cf), KM_SLEEP);
399 1.65 dyoung cf->cf_name = cgd_cd.cd_name;
400 1.65 dyoung cf->cf_atname = cgd_cd.cd_name;
401 1.65 dyoung cf->cf_unit = unit;
402 1.65 dyoung cf->cf_fstate = FSTATE_STAR;
403 1.65 dyoung
404 1.116.10.3 martin cw = cgd_create_one_worker();
405 1.116.10.3 martin if (cw == NULL) {
406 1.116.10.3 martin kmem_free(cf, sizeof(*cf));
407 1.116.10.3 martin return NULL;
408 1.116.10.3 martin }
409 1.116.10.3 martin
410 1.116.10.3 martin sc = device_private(config_attach_pseudo(cf));
411 1.116.10.3 martin if (sc == NULL) {
412 1.116.10.3 martin cgd_destroy_one_worker(cw);
413 1.116.10.3 martin return NULL;
414 1.116.10.3 martin }
415 1.116.10.3 martin
416 1.116.10.3 martin sc->sc_worker = cw;
417 1.116.10.3 martin
418 1.116.10.3 martin return sc;
419 1.65 dyoung }
420 1.65 dyoung
421 1.65 dyoung static int
422 1.65 dyoung cgd_destroy(device_t dev)
423 1.65 dyoung {
424 1.116.10.3 martin struct cgd_softc *sc = device_private(dev);
425 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
426 1.65 dyoung cfdata_t cf;
427 1.116.10.3 martin int error;
428 1.1 elric
429 1.65 dyoung cf = device_cfdata(dev);
430 1.65 dyoung error = config_detach(dev, DETACH_QUIET);
431 1.65 dyoung if (error)
432 1.65 dyoung return error;
433 1.116.10.3 martin
434 1.116.10.3 martin cgd_destroy_one_worker(cw);
435 1.116.10.3 martin
436 1.116.10.3 martin kmem_free(cf, sizeof(*cf));
437 1.65 dyoung return 0;
438 1.1 elric }
439 1.1 elric
440 1.116.10.3 martin static void
441 1.116.10.3 martin cgd_busy(struct cgd_softc *sc)
442 1.116.10.3 martin {
443 1.116.10.3 martin
444 1.116.10.3 martin mutex_enter(&sc->sc_lock);
445 1.116.10.3 martin while (sc->sc_busy)
446 1.116.10.3 martin cv_wait(&sc->sc_cv, &sc->sc_lock);
447 1.116.10.3 martin sc->sc_busy = true;
448 1.116.10.3 martin mutex_exit(&sc->sc_lock);
449 1.116.10.3 martin }
450 1.116.10.3 martin
451 1.116.10.3 martin static void
452 1.116.10.3 martin cgd_unbusy(struct cgd_softc *sc)
453 1.116.10.3 martin {
454 1.116.10.3 martin
455 1.116.10.3 martin mutex_enter(&sc->sc_lock);
456 1.116.10.3 martin sc->sc_busy = false;
457 1.116.10.3 martin cv_broadcast(&sc->sc_cv);
458 1.116.10.3 martin mutex_exit(&sc->sc_lock);
459 1.116.10.3 martin }
460 1.116.10.3 martin
461 1.116.10.3 martin static struct cgd_worker *
462 1.116.10.3 martin cgd_create_one_worker(void)
463 1.116.10.3 martin {
464 1.116.10.3 martin KASSERT(cgd_spawning);
465 1.116.10.3 martin
466 1.116.10.3 martin if (cgd_refcnt++ == 0) {
467 1.116.10.3 martin KASSERT(cgd_worker == NULL);
468 1.116.10.3 martin cgd_worker = cgd_create_worker();
469 1.116.10.3 martin }
470 1.116.10.3 martin
471 1.116.10.3 martin KASSERT(cgd_worker != NULL);
472 1.116.10.3 martin return cgd_worker;
473 1.116.10.3 martin }
474 1.116.10.3 martin
475 1.116.10.3 martin static void
476 1.116.10.3 martin cgd_destroy_one_worker(struct cgd_worker *cw)
477 1.116.10.3 martin {
478 1.116.10.3 martin KASSERT(cgd_spawning);
479 1.116.10.3 martin KASSERT(cw == cgd_worker);
480 1.116.10.3 martin
481 1.116.10.3 martin if (--cgd_refcnt == 0) {
482 1.116.10.3 martin cgd_destroy_worker(cgd_worker);
483 1.116.10.3 martin cgd_worker = NULL;
484 1.116.10.3 martin }
485 1.116.10.3 martin }
486 1.116.10.3 martin
487 1.116.10.3 martin static struct cgd_worker *
488 1.116.10.3 martin cgd_create_worker(void)
489 1.116.10.3 martin {
490 1.116.10.3 martin struct cgd_worker *cw;
491 1.116.10.3 martin struct workqueue *wq;
492 1.116.10.3 martin struct pool *cp;
493 1.116.10.3 martin int error;
494 1.116.10.3 martin
495 1.116.10.3 martin cw = kmem_alloc(sizeof(struct cgd_worker), KM_SLEEP);
496 1.116.10.3 martin cp = kmem_alloc(sizeof(struct pool), KM_SLEEP);
497 1.116.10.3 martin
498 1.116.10.3 martin error = workqueue_create(&wq, "cgd", cgd_process, NULL,
499 1.116.10.3 martin PRI_BIO, IPL_BIO, WQ_MPSAFE | WQ_PERCPU);
500 1.116.10.3 martin if (error) {
501 1.116.10.3 martin kmem_free(cp, sizeof(struct pool));
502 1.116.10.3 martin kmem_free(cw, sizeof(struct cgd_worker));
503 1.116.10.3 martin return NULL;
504 1.116.10.3 martin }
505 1.116.10.3 martin
506 1.116.10.3 martin cw->cw_cpool = cp;
507 1.116.10.3 martin cw->cw_wq = wq;
508 1.116.10.3 martin pool_init(cw->cw_cpool, sizeof(struct cgd_xfer), 0,
509 1.116.10.3 martin 0, 0, "cgdcpl", NULL, IPL_BIO);
510 1.116.10.3 martin
511 1.116.10.3 martin mutex_init(&cw->cw_lock, MUTEX_DEFAULT, IPL_BIO);
512 1.116.10.3 martin
513 1.116.10.3 martin return cw;
514 1.116.10.3 martin }
515 1.116.10.3 martin
516 1.116.10.3 martin static void
517 1.116.10.3 martin cgd_destroy_worker(struct cgd_worker *cw)
518 1.116.10.3 martin {
519 1.116.10.3 martin mutex_destroy(&cw->cw_lock);
520 1.116.10.3 martin
521 1.116.10.3 martin if (cw->cw_cpool) {
522 1.116.10.3 martin pool_destroy(cw->cw_cpool);
523 1.116.10.3 martin kmem_free(cw->cw_cpool, sizeof(struct pool));
524 1.116.10.3 martin }
525 1.116.10.3 martin if (cw->cw_wq)
526 1.116.10.3 martin workqueue_destroy(cw->cw_wq);
527 1.116.10.3 martin
528 1.116.10.3 martin kmem_free(cw, sizeof(struct cgd_worker));
529 1.116.10.3 martin }
530 1.116.10.3 martin
531 1.18 thorpej static int
532 1.32 christos cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
533 1.1 elric {
534 1.116.10.3 martin struct cgd_softc *sc;
535 1.116.10.3 martin int error;
536 1.1 elric
537 1.56 cegger DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
538 1.116.10.3 martin
539 1.116.10.3 martin error = cgd_lock(true);
540 1.116.10.3 martin if (error)
541 1.116.10.3 martin return error;
542 1.116.10.3 martin sc = getcgd_softc(dev);
543 1.116.10.3 martin if (sc == NULL)
544 1.116.10.3 martin sc = cgd_spawn(CGDUNIT(dev));
545 1.116.10.3 martin cgd_unlock();
546 1.116.10.3 martin if (sc == NULL)
547 1.116.10.3 martin return ENXIO;
548 1.116.10.3 martin
549 1.116.10.3 martin return dk_open(&sc->sc_dksc, dev, flags, fmt, l);
550 1.1 elric }
551 1.1 elric
552 1.18 thorpej static int
553 1.32 christos cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
554 1.1 elric {
555 1.116.10.3 martin struct cgd_softc *sc;
556 1.65 dyoung struct dk_softc *dksc;
557 1.116.10.3 martin int error;
558 1.1 elric
559 1.56 cegger DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
560 1.116.10.3 martin
561 1.116.10.3 martin error = cgd_lock(false);
562 1.116.10.3 martin if (error)
563 1.65 dyoung return error;
564 1.116.10.3 martin sc = getcgd_softc(dev);
565 1.116.10.3 martin if (sc == NULL) {
566 1.116.10.3 martin error = ENXIO;
567 1.116.10.3 martin goto done;
568 1.116.10.3 martin }
569 1.116.10.3 martin
570 1.116.10.3 martin dksc = &sc->sc_dksc;
571 1.116.10.3 martin if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0)
572 1.116.10.3 martin goto done;
573 1.65 dyoung
574 1.98 mlelstv if (!DK_ATTACHED(dksc)) {
575 1.116.10.3 martin if ((error = cgd_destroy(sc->sc_dksc.sc_dev)) != 0) {
576 1.116.10.3 martin device_printf(dksc->sc_dev,
577 1.65 dyoung "unable to detach instance\n");
578 1.116.10.3 martin goto done;
579 1.65 dyoung }
580 1.65 dyoung }
581 1.116.10.3 martin
582 1.116.10.3 martin done:
583 1.116.10.3 martin cgd_unlock();
584 1.116.10.3 martin
585 1.116.10.3 martin return error;
586 1.1 elric }
587 1.1 elric
588 1.18 thorpej static void
589 1.1 elric cgdstrategy(struct buf *bp)
590 1.1 elric {
591 1.116.10.3 martin struct cgd_softc *sc = getcgd_softc(bp->b_dev);
592 1.1 elric
593 1.1 elric DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
594 1.1 elric (long)bp->b_bcount));
595 1.72 riastrad
596 1.72 riastrad /*
597 1.111 mlelstv * Reject unaligned writes.
598 1.72 riastrad */
599 1.111 mlelstv if (((uintptr_t)bp->b_data & 3) != 0) {
600 1.72 riastrad bp->b_error = EINVAL;
601 1.111 mlelstv goto bail;
602 1.72 riastrad }
603 1.72 riastrad
604 1.116.10.3 martin dk_strategy(&sc->sc_dksc, bp);
605 1.1 elric return;
606 1.111 mlelstv
607 1.111 mlelstv bail:
608 1.111 mlelstv bp->b_resid = bp->b_bcount;
609 1.111 mlelstv biodone(bp);
610 1.111 mlelstv return;
611 1.1 elric }
612 1.1 elric
613 1.18 thorpej static int
614 1.1 elric cgdsize(dev_t dev)
615 1.1 elric {
616 1.116.10.3 martin struct cgd_softc *sc = getcgd_softc(dev);
617 1.1 elric
618 1.56 cegger DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
619 1.116.10.3 martin if (!sc)
620 1.1 elric return -1;
621 1.116.10.3 martin return dk_size(&sc->sc_dksc, dev);
622 1.1 elric }
623 1.1 elric
624 1.16 elric /*
625 1.16 elric * cgd_{get,put}data are functions that deal with getting a buffer
626 1.116.10.3 martin * for the new encrypted data.
627 1.116.10.3 martin * We can no longer have a buffer per device, we need a buffer per
628 1.116.10.3 martin * work queue...
629 1.16 elric */
630 1.16 elric
631 1.16 elric static void *
632 1.116.10.3 martin cgd_getdata(struct cgd_softc *sc, unsigned long size)
633 1.16 elric {
634 1.116.10.3 martin void *data = NULL;
635 1.16 elric
636 1.116.10.3 martin mutex_enter(&sc->sc_lock);
637 1.116.10.3 martin if (!sc->sc_data_used) {
638 1.116.10.3 martin sc->sc_data_used = true;
639 1.116.10.3 martin data = sc->sc_data;
640 1.16 elric }
641 1.116.10.3 martin mutex_exit(&sc->sc_lock);
642 1.16 elric
643 1.16 elric if (data)
644 1.16 elric return data;
645 1.16 elric
646 1.116.10.3 martin return kmem_intr_alloc(size, KM_NOSLEEP);
647 1.16 elric }
648 1.16 elric
649 1.1 elric static void
650 1.116.10.3 martin cgd_putdata(struct cgd_softc *sc, void *data, unsigned long size)
651 1.16 elric {
652 1.16 elric
653 1.116.10.3 martin if (data == sc->sc_data) {
654 1.116.10.3 martin mutex_enter(&sc->sc_lock);
655 1.116.10.3 martin sc->sc_data_used = false;
656 1.116.10.3 martin mutex_exit(&sc->sc_lock);
657 1.116.10.3 martin } else
658 1.116.10.3 martin kmem_intr_free(data, size);
659 1.16 elric }
660 1.16 elric
661 1.99 mlelstv static int
662 1.99 mlelstv cgd_diskstart(device_t dev, struct buf *bp)
663 1.1 elric {
664 1.116.10.3 martin struct cgd_softc *sc = device_private(dev);
665 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
666 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
667 1.105 mlelstv struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
668 1.116.10.3 martin struct cgd_xfer *cx;
669 1.99 mlelstv struct buf *nbp;
670 1.44 christos void * newaddr;
671 1.1 elric daddr_t bn;
672 1.1 elric
673 1.99 mlelstv DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
674 1.1 elric
675 1.99 mlelstv bn = bp->b_rawblkno;
676 1.22 perry
677 1.99 mlelstv /*
678 1.99 mlelstv * We attempt to allocate all of our resources up front, so that
679 1.99 mlelstv * we can fail quickly if they are unavailable.
680 1.99 mlelstv */
681 1.116.10.3 martin nbp = getiobuf(sc->sc_tvn, false);
682 1.99 mlelstv if (nbp == NULL)
683 1.99 mlelstv return EAGAIN;
684 1.16 elric
685 1.116.10.3 martin cx = pool_get(cw->cw_cpool, PR_NOWAIT);
686 1.116.10.3 martin if (cx == NULL) {
687 1.116.10.3 martin putiobuf(nbp);
688 1.116.10.3 martin return EAGAIN;
689 1.116.10.3 martin }
690 1.116.10.3 martin
691 1.116.10.3 martin cx->cx_sc = sc;
692 1.116.10.3 martin cx->cx_obp = bp;
693 1.116.10.3 martin cx->cx_nbp = nbp;
694 1.116.10.3 martin cx->cx_srcv = cx->cx_dstv = bp->b_data;
695 1.116.10.3 martin cx->cx_blkno = bn;
696 1.116.10.3 martin cx->cx_secsize = dg->dg_secsize;
697 1.116.10.3 martin
698 1.99 mlelstv /*
699 1.99 mlelstv * If we are writing, then we need to encrypt the outgoing
700 1.99 mlelstv * block into a new block of memory.
701 1.99 mlelstv */
702 1.99 mlelstv if ((bp->b_flags & B_READ) == 0) {
703 1.116.10.3 martin newaddr = cgd_getdata(sc, bp->b_bcount);
704 1.99 mlelstv if (!newaddr) {
705 1.116.10.3 martin pool_put(cw->cw_cpool, cx);
706 1.99 mlelstv putiobuf(nbp);
707 1.99 mlelstv return EAGAIN;
708 1.16 elric }
709 1.116.10.3 martin
710 1.116.10.3 martin cx->cx_dstv = newaddr;
711 1.116.10.3 martin cx->cx_len = bp->b_bcount;
712 1.116.10.3 martin cx->cx_dir = CGD_CIPHER_ENCRYPT;
713 1.116.10.3 martin
714 1.116.10.3 martin cgd_enqueue(sc, cx);
715 1.116.10.3 martin return 0;
716 1.99 mlelstv }
717 1.1 elric
718 1.116.10.3 martin cgd_diskstart2(sc, cx);
719 1.116.10.3 martin return 0;
720 1.116.10.3 martin }
721 1.116.10.3 martin
722 1.116.10.3 martin static void
723 1.116.10.3 martin cgd_diskstart2(struct cgd_softc *sc, struct cgd_xfer *cx)
724 1.116.10.3 martin {
725 1.116.10.3 martin struct vnode *vp;
726 1.116.10.3 martin struct buf *bp;
727 1.116.10.3 martin struct buf *nbp;
728 1.116.10.3 martin
729 1.116.10.3 martin bp = cx->cx_obp;
730 1.116.10.3 martin nbp = cx->cx_nbp;
731 1.116.10.3 martin
732 1.116.10.3 martin nbp->b_data = cx->cx_dstv;
733 1.99 mlelstv nbp->b_flags = bp->b_flags;
734 1.99 mlelstv nbp->b_oflags = bp->b_oflags;
735 1.99 mlelstv nbp->b_cflags = bp->b_cflags;
736 1.99 mlelstv nbp->b_iodone = cgdiodone;
737 1.99 mlelstv nbp->b_proc = bp->b_proc;
738 1.116.10.3 martin nbp->b_blkno = btodb(cx->cx_blkno * cx->cx_secsize);
739 1.99 mlelstv nbp->b_bcount = bp->b_bcount;
740 1.116.10.3 martin nbp->b_private = cx;
741 1.99 mlelstv
742 1.99 mlelstv BIO_COPYPRIO(nbp, bp);
743 1.99 mlelstv
744 1.99 mlelstv if ((nbp->b_flags & B_READ) == 0) {
745 1.99 mlelstv vp = nbp->b_vp;
746 1.99 mlelstv mutex_enter(vp->v_interlock);
747 1.99 mlelstv vp->v_numoutput++;
748 1.99 mlelstv mutex_exit(vp->v_interlock);
749 1.17 dbj }
750 1.116.10.3 martin VOP_STRATEGY(sc->sc_tvn, nbp);
751 1.1 elric }
752 1.1 elric
753 1.18 thorpej static void
754 1.17 dbj cgdiodone(struct buf *nbp)
755 1.1 elric {
756 1.116.10.3 martin struct cgd_xfer *cx = nbp->b_private;
757 1.116.10.3 martin struct buf *obp = cx->cx_obp;
758 1.116.10.3 martin struct cgd_softc *sc = getcgd_softc(obp->b_dev);
759 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
760 1.105 mlelstv struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
761 1.105 mlelstv daddr_t bn;
762 1.22 perry
763 1.116.10.3 martin KDASSERT(sc);
764 1.1 elric
765 1.17 dbj DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
766 1.20 yamt DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
767 1.1 elric obp, obp->b_bcount, obp->b_resid));
768 1.107 msaitoh DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
769 1.107 msaitoh " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
770 1.107 msaitoh nbp->b_bcount));
771 1.46 ad if (nbp->b_error != 0) {
772 1.46 ad obp->b_error = nbp->b_error;
773 1.62 christos DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
774 1.62 christos obp->b_error));
775 1.1 elric }
776 1.1 elric
777 1.16 elric /* Perform the decryption if we are reading.
778 1.1 elric *
779 1.1 elric * Note: use the blocknumber from nbp, since it is what
780 1.1 elric * we used to encrypt the blocks.
781 1.1 elric */
782 1.1 elric
783 1.105 mlelstv if (nbp->b_flags & B_READ) {
784 1.105 mlelstv bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
785 1.116.10.3 martin
786 1.116.10.3 martin cx->cx_obp = obp;
787 1.116.10.3 martin cx->cx_nbp = nbp;
788 1.116.10.3 martin cx->cx_dstv = obp->b_data;
789 1.116.10.3 martin cx->cx_srcv = obp->b_data;
790 1.116.10.3 martin cx->cx_len = obp->b_bcount;
791 1.116.10.3 martin cx->cx_blkno = bn;
792 1.116.10.3 martin cx->cx_secsize = dg->dg_secsize;
793 1.116.10.3 martin cx->cx_dir = CGD_CIPHER_DECRYPT;
794 1.116.10.3 martin
795 1.116.10.3 martin cgd_enqueue(sc, cx);
796 1.116.10.3 martin return;
797 1.105 mlelstv }
798 1.1 elric
799 1.116.10.3 martin cgd_iodone2(sc, cx);
800 1.116.10.3 martin }
801 1.116.10.3 martin
802 1.116.10.3 martin static void
803 1.116.10.3 martin cgd_iodone2(struct cgd_softc *sc, struct cgd_xfer *cx)
804 1.116.10.3 martin {
805 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
806 1.116.10.3 martin struct buf *obp = cx->cx_obp;
807 1.116.10.3 martin struct buf *nbp = cx->cx_nbp;
808 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
809 1.116.10.3 martin
810 1.116.10.3 martin pool_put(cw->cw_cpool, cx);
811 1.116.10.3 martin
812 1.16 elric /* If we allocated memory, free it now... */
813 1.1 elric if (nbp->b_data != obp->b_data)
814 1.116.10.3 martin cgd_putdata(sc, nbp->b_data, nbp->b_bcount);
815 1.1 elric
816 1.33 yamt putiobuf(nbp);
817 1.1 elric
818 1.100 mlelstv /* Request is complete for whatever reason */
819 1.100 mlelstv obp->b_resid = 0;
820 1.100 mlelstv if (obp->b_error != 0)
821 1.100 mlelstv obp->b_resid = obp->b_bcount;
822 1.100 mlelstv
823 1.99 mlelstv dk_done(dksc, obp);
824 1.101 mlelstv dk_start(dksc, NULL);
825 1.1 elric }
826 1.1 elric
827 1.108 riastrad static int
828 1.108 riastrad cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
829 1.108 riastrad {
830 1.108 riastrad struct cgd_softc *sc = device_private(dev);
831 1.108 riastrad struct dk_softc *dksc = &sc->sc_dksc;
832 1.108 riastrad struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
833 1.108 riastrad size_t nbytes, blksize;
834 1.108 riastrad void *buf;
835 1.108 riastrad int error;
836 1.108 riastrad
837 1.108 riastrad /*
838 1.108 riastrad * dk_dump gives us units of disklabel sectors. Everything
839 1.108 riastrad * else in cgd uses units of diskgeom sectors. These had
840 1.108 riastrad * better agree; otherwise we need to figure out how to convert
841 1.108 riastrad * between them.
842 1.108 riastrad */
843 1.108 riastrad KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
844 1.108 riastrad "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
845 1.108 riastrad dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
846 1.108 riastrad blksize = dg->dg_secsize;
847 1.108 riastrad
848 1.108 riastrad /*
849 1.108 riastrad * Compute the number of bytes in this request, which dk_dump
850 1.108 riastrad * has `helpfully' converted to a number of blocks for us.
851 1.108 riastrad */
852 1.108 riastrad nbytes = nblk*blksize;
853 1.108 riastrad
854 1.108 riastrad /* Try to acquire a buffer to store the ciphertext. */
855 1.116.10.3 martin buf = cgd_getdata(sc, nbytes);
856 1.108 riastrad if (buf == NULL)
857 1.108 riastrad /* Out of memory: give up. */
858 1.108 riastrad return ENOMEM;
859 1.108 riastrad
860 1.108 riastrad /* Encrypt the caller's data into the temporary buffer. */
861 1.108 riastrad cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
862 1.108 riastrad
863 1.108 riastrad /* Pass it on to the underlying disk device. */
864 1.108 riastrad error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
865 1.108 riastrad
866 1.108 riastrad /* Release the buffer. */
867 1.116.10.3 martin cgd_putdata(sc, buf, nbytes);
868 1.108 riastrad
869 1.108 riastrad /* Return any error from the underlying disk device. */
870 1.108 riastrad return error;
871 1.108 riastrad }
872 1.108 riastrad
873 1.1 elric /* XXX: we should probably put these into dksubr.c, mostly */
874 1.18 thorpej static int
875 1.40 christos cgdread(dev_t dev, struct uio *uio, int flags)
876 1.1 elric {
877 1.116.10.3 martin struct cgd_softc *sc;
878 1.1 elric struct dk_softc *dksc;
879 1.1 elric
880 1.56 cegger DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
881 1.56 cegger (unsigned long long)dev, uio, flags));
882 1.116.10.3 martin sc = getcgd_softc(dev);
883 1.116.10.3 martin if (sc == NULL)
884 1.116.10.3 martin return ENXIO;
885 1.116.10.3 martin dksc = &sc->sc_dksc;
886 1.98 mlelstv if (!DK_ATTACHED(dksc))
887 1.1 elric return ENXIO;
888 1.1 elric return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
889 1.1 elric }
890 1.1 elric
891 1.1 elric /* XXX: we should probably put these into dksubr.c, mostly */
892 1.18 thorpej static int
893 1.40 christos cgdwrite(dev_t dev, struct uio *uio, int flags)
894 1.1 elric {
895 1.116.10.3 martin struct cgd_softc *sc;
896 1.1 elric struct dk_softc *dksc;
897 1.1 elric
898 1.56 cegger DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
899 1.116.10.3 martin sc = getcgd_softc(dev);
900 1.116.10.3 martin if (sc == NULL)
901 1.116.10.3 martin return ENXIO;
902 1.116.10.3 martin dksc = &sc->sc_dksc;
903 1.98 mlelstv if (!DK_ATTACHED(dksc))
904 1.1 elric return ENXIO;
905 1.1 elric return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
906 1.1 elric }
907 1.1 elric
908 1.18 thorpej static int
909 1.44 christos cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
910 1.1 elric {
911 1.116.10.3 martin struct cgd_softc *sc;
912 1.1 elric struct dk_softc *dksc;
913 1.1 elric int part = DISKPART(dev);
914 1.1 elric int pmask = 1 << part;
915 1.116.10.3 martin int error;
916 1.1 elric
917 1.56 cegger DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
918 1.32 christos dev, cmd, data, flag, l));
919 1.78 christos
920 1.1 elric switch (cmd) {
921 1.93 christos case CGDIOCGET:
922 1.93 christos return cgd_ioctl_get(dev, data, l);
923 1.1 elric case CGDIOCSET:
924 1.1 elric case CGDIOCCLR:
925 1.1 elric if ((flag & FWRITE) == 0)
926 1.1 elric return EBADF;
927 1.78 christos /* FALLTHROUGH */
928 1.78 christos default:
929 1.116.10.3 martin sc = getcgd_softc(dev);
930 1.116.10.3 martin if (sc == NULL)
931 1.116.10.3 martin return ENXIO;
932 1.116.10.3 martin dksc = &sc->sc_dksc;
933 1.78 christos break;
934 1.1 elric }
935 1.1 elric
936 1.1 elric switch (cmd) {
937 1.1 elric case CGDIOCSET:
938 1.116.10.3 martin cgd_busy(sc);
939 1.98 mlelstv if (DK_ATTACHED(dksc))
940 1.116.10.3 martin error = EBUSY;
941 1.116.10.3 martin else
942 1.116.10.3 martin error = cgd_ioctl_set(sc, data, l);
943 1.116.10.3 martin cgd_unbusy(sc);
944 1.116.10.3 martin break;
945 1.1 elric case CGDIOCCLR:
946 1.116.10.3 martin cgd_busy(sc);
947 1.116.10.3 martin if (DK_BUSY(&sc->sc_dksc, pmask))
948 1.116.10.3 martin error = EBUSY;
949 1.116.10.3 martin else
950 1.116.10.3 martin error = cgd_ioctl_clr(sc, l);
951 1.116.10.3 martin cgd_unbusy(sc);
952 1.116.10.3 martin break;
953 1.114 jdolecek case DIOCGCACHE:
954 1.57 apb case DIOCCACHESYNC:
955 1.116.10.3 martin cgd_busy(sc);
956 1.116.10.3 martin if (!DK_ATTACHED(dksc)) {
957 1.116.10.3 martin cgd_unbusy(sc);
958 1.116.10.3 martin error = ENOENT;
959 1.116.10.3 martin break;
960 1.116.10.3 martin }
961 1.57 apb /*
962 1.57 apb * We pass this call down to the underlying disk.
963 1.57 apb */
964 1.116.10.3 martin error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
965 1.116.10.3 martin cgd_unbusy(sc);
966 1.116.10.3 martin break;
967 1.116.10.1 martin case DIOCGSECTORALIGN: {
968 1.116.10.1 martin struct disk_sectoralign *dsa = data;
969 1.116.10.1 martin
970 1.116.10.3 martin cgd_busy(sc);
971 1.116.10.3 martin if (!DK_ATTACHED(dksc)) {
972 1.116.10.3 martin cgd_unbusy(sc);
973 1.116.10.3 martin error = ENOENT;
974 1.116.10.3 martin break;
975 1.116.10.3 martin }
976 1.116.10.1 martin
977 1.116.10.1 martin /* Get the underlying disk's sector alignment. */
978 1.116.10.3 martin error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
979 1.116.10.3 martin if (error) {
980 1.116.10.3 martin cgd_unbusy(sc);
981 1.116.10.3 martin break;
982 1.116.10.3 martin }
983 1.116.10.1 martin
984 1.116.10.1 martin /* Adjust for the disklabel partition if necessary. */
985 1.116.10.1 martin if (part != RAW_PART) {
986 1.116.10.1 martin struct disklabel *lp = dksc->sc_dkdev.dk_label;
987 1.116.10.1 martin daddr_t offset = lp->d_partitions[part].p_offset;
988 1.116.10.1 martin uint32_t r = offset % dsa->dsa_alignment;
989 1.116.10.1 martin
990 1.116.10.1 martin if (r < dsa->dsa_firstaligned)
991 1.116.10.1 martin dsa->dsa_firstaligned = dsa->dsa_firstaligned
992 1.116.10.1 martin - r;
993 1.116.10.1 martin else
994 1.116.10.1 martin dsa->dsa_firstaligned = (dsa->dsa_firstaligned
995 1.116.10.1 martin + dsa->dsa_alignment) - r;
996 1.116.10.1 martin }
997 1.116.10.3 martin cgd_unbusy(sc);
998 1.116.10.3 martin break;
999 1.116.10.1 martin }
1000 1.103 christos case DIOCGSTRATEGY:
1001 1.103 christos case DIOCSSTRATEGY:
1002 1.116.10.3 martin if (!DK_ATTACHED(dksc)) {
1003 1.116.10.3 martin error = ENOENT;
1004 1.116.10.3 martin break;
1005 1.116.10.3 martin }
1006 1.103 christos /*FALLTHROUGH*/
1007 1.1 elric default:
1008 1.116.10.3 martin error = dk_ioctl(dksc, dev, cmd, data, flag, l);
1009 1.116.10.3 martin break;
1010 1.93 christos case CGDIOCGET:
1011 1.93 christos KASSERT(0);
1012 1.116.10.3 martin error = EINVAL;
1013 1.1 elric }
1014 1.116.10.3 martin
1015 1.116.10.3 martin return error;
1016 1.1 elric }
1017 1.1 elric
1018 1.18 thorpej static int
1019 1.44 christos cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1020 1.1 elric {
1021 1.116.10.3 martin struct cgd_softc *sc;
1022 1.1 elric
1023 1.56 cegger DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
1024 1.56 cegger dev, blkno, va, (unsigned long)size));
1025 1.116.10.3 martin sc = getcgd_softc(dev);
1026 1.116.10.3 martin if (sc == NULL)
1027 1.116.10.3 martin return ENXIO;
1028 1.116.10.3 martin return dk_dump(&sc->sc_dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
1029 1.1 elric }
1030 1.1 elric
1031 1.1 elric /*
1032 1.1 elric * XXXrcd:
1033 1.1 elric * for now we hardcode the maximum key length.
1034 1.1 elric */
1035 1.1 elric #define MAX_KEYSIZE 1024
1036 1.1 elric
1037 1.53 christos static const struct {
1038 1.53 christos const char *n;
1039 1.53 christos int v;
1040 1.53 christos int d;
1041 1.53 christos } encblkno[] = {
1042 1.53 christos { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1043 1.53 christos { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1044 1.53 christos { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
1045 1.53 christos };
1046 1.53 christos
1047 1.1 elric /* ARGSUSED */
1048 1.1 elric static int
1049 1.116.10.3 martin cgd_ioctl_set(struct cgd_softc *sc, void *data, struct lwp *l)
1050 1.1 elric {
1051 1.1 elric struct cgd_ioctl *ci = data;
1052 1.1 elric struct vnode *vp;
1053 1.1 elric int ret;
1054 1.53 christos size_t i;
1055 1.43 cbiere size_t keybytes; /* key length in bytes */
1056 1.27 drochner const char *cp;
1057 1.71 dholland struct pathbuf *pb;
1058 1.36 christos char *inbuf;
1059 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
1060 1.1 elric
1061 1.1 elric cp = ci->ci_disk;
1062 1.71 dholland
1063 1.71 dholland ret = pathbuf_copyin(ci->ci_disk, &pb);
1064 1.71 dholland if (ret != 0) {
1065 1.71 dholland return ret;
1066 1.71 dholland }
1067 1.71 dholland ret = dk_lookup(pb, l, &vp);
1068 1.71 dholland pathbuf_destroy(pb);
1069 1.71 dholland if (ret != 0) {
1070 1.1 elric return ret;
1071 1.71 dholland }
1072 1.1 elric
1073 1.116.10.3 martin inbuf = kmem_alloc(MAX_KEYSIZE, KM_SLEEP);
1074 1.36 christos
1075 1.116.10.3 martin if ((ret = cgdinit(sc, cp, vp, l)) != 0)
1076 1.1 elric goto bail;
1077 1.1 elric
1078 1.36 christos (void)memset(inbuf, 0, MAX_KEYSIZE);
1079 1.1 elric ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
1080 1.1 elric if (ret)
1081 1.1 elric goto bail;
1082 1.116.10.3 martin sc->sc_cfuncs = cryptfuncs_find(inbuf);
1083 1.116.10.3 martin if (!sc->sc_cfuncs) {
1084 1.1 elric ret = EINVAL;
1085 1.1 elric goto bail;
1086 1.1 elric }
1087 1.1 elric
1088 1.43 cbiere (void)memset(inbuf, 0, MAX_KEYSIZE);
1089 1.36 christos ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
1090 1.1 elric if (ret)
1091 1.1 elric goto bail;
1092 1.53 christos
1093 1.53 christos for (i = 0; i < __arraycount(encblkno); i++)
1094 1.53 christos if (strcmp(encblkno[i].n, inbuf) == 0)
1095 1.53 christos break;
1096 1.53 christos
1097 1.53 christos if (i == __arraycount(encblkno)) {
1098 1.1 elric ret = EINVAL;
1099 1.1 elric goto bail;
1100 1.1 elric }
1101 1.1 elric
1102 1.15 dan keybytes = ci->ci_keylen / 8 + 1;
1103 1.15 dan if (keybytes > MAX_KEYSIZE) {
1104 1.1 elric ret = EINVAL;
1105 1.1 elric goto bail;
1106 1.1 elric }
1107 1.53 christos
1108 1.36 christos (void)memset(inbuf, 0, MAX_KEYSIZE);
1109 1.15 dan ret = copyin(ci->ci_key, inbuf, keybytes);
1110 1.1 elric if (ret)
1111 1.1 elric goto bail;
1112 1.1 elric
1113 1.116.10.3 martin sc->sc_cdata.cf_blocksize = ci->ci_blocksize;
1114 1.116.10.3 martin sc->sc_cdata.cf_mode = encblkno[i].v;
1115 1.116.10.3 martin sc->sc_cdata.cf_keylen = ci->ci_keylen;
1116 1.116.10.3 martin sc->sc_cdata.cf_priv = sc->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
1117 1.116.10.3 martin &sc->sc_cdata.cf_blocksize);
1118 1.116.10.3 martin if (sc->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
1119 1.62 christos log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
1120 1.116.10.3 martin sc->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
1121 1.116.10.3 martin sc->sc_cdata.cf_priv = NULL;
1122 1.62 christos }
1123 1.78 christos
1124 1.53 christos /*
1125 1.53 christos * The blocksize is supposed to be in bytes. Unfortunately originally
1126 1.53 christos * it was expressed in bits. For compatibility we maintain encblkno
1127 1.53 christos * and encblkno8.
1128 1.53 christos */
1129 1.116.10.3 martin sc->sc_cdata.cf_blocksize /= encblkno[i].d;
1130 1.97 riastrad (void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
1131 1.116.10.3 martin if (!sc->sc_cdata.cf_priv) {
1132 1.1 elric ret = EINVAL; /* XXX is this the right error? */
1133 1.1 elric goto bail;
1134 1.1 elric }
1135 1.116.10.3 martin kmem_free(inbuf, MAX_KEYSIZE);
1136 1.1 elric
1137 1.80 christos bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
1138 1.16 elric
1139 1.116.10.3 martin sc->sc_data = kmem_alloc(MAXPHYS, KM_SLEEP);
1140 1.116.10.3 martin sc->sc_data_used = false;
1141 1.16 elric
1142 1.98 mlelstv /* Attach the disk. */
1143 1.98 mlelstv dk_attach(dksc);
1144 1.98 mlelstv disk_attach(&dksc->sc_dkdev);
1145 1.1 elric
1146 1.80 christos disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
1147 1.77 elric
1148 1.29 yamt /* Discover wedges on this disk. */
1149 1.80 christos dkwedge_discover(&dksc->sc_dkdev);
1150 1.29 yamt
1151 1.1 elric return 0;
1152 1.1 elric
1153 1.1 elric bail:
1154 1.116.10.3 martin kmem_free(inbuf, MAX_KEYSIZE);
1155 1.51 ad (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
1156 1.1 elric return ret;
1157 1.1 elric }
1158 1.1 elric
1159 1.1 elric /* ARGSUSED */
1160 1.1 elric static int
1161 1.116.10.3 martin cgd_ioctl_clr(struct cgd_softc *sc, struct lwp *l)
1162 1.1 elric {
1163 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
1164 1.65 dyoung
1165 1.98 mlelstv if (!DK_ATTACHED(dksc))
1166 1.65 dyoung return ENXIO;
1167 1.16 elric
1168 1.29 yamt /* Delete all of our wedges. */
1169 1.80 christos dkwedge_delall(&dksc->sc_dkdev);
1170 1.29 yamt
1171 1.16 elric /* Kill off any queued buffers. */
1172 1.104 mlelstv dk_drain(dksc);
1173 1.80 christos bufq_free(dksc->sc_bufq);
1174 1.1 elric
1175 1.116.10.3 martin (void)vn_close(sc->sc_tvn, FREAD|FWRITE, l->l_cred);
1176 1.116.10.3 martin sc->sc_cfuncs->cf_destroy(sc->sc_cdata.cf_priv);
1177 1.116.10.3 martin kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1178 1.116.10.3 martin kmem_free(sc->sc_data, MAXPHYS);
1179 1.116.10.3 martin sc->sc_data_used = false;
1180 1.98 mlelstv dk_detach(dksc);
1181 1.80 christos disk_detach(&dksc->sc_dkdev);
1182 1.1 elric
1183 1.1 elric return 0;
1184 1.1 elric }
1185 1.1 elric
1186 1.1 elric static int
1187 1.78 christos cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
1188 1.78 christos {
1189 1.116.10.3 martin struct cgd_softc *sc;
1190 1.78 christos struct cgd_user *cgu;
1191 1.116.10.3 martin int unit, error;
1192 1.78 christos
1193 1.78 christos unit = CGDUNIT(dev);
1194 1.78 christos cgu = (struct cgd_user *)data;
1195 1.78 christos
1196 1.78 christos DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1197 1.78 christos dev, unit, data, l));
1198 1.78 christos
1199 1.116.10.3 martin /* XXX, we always return this units data, so if cgu_unit is
1200 1.116.10.3 martin * not -1, that field doesn't match the rest
1201 1.116.10.3 martin */
1202 1.78 christos if (cgu->cgu_unit == -1)
1203 1.78 christos cgu->cgu_unit = unit;
1204 1.78 christos
1205 1.78 christos if (cgu->cgu_unit < 0)
1206 1.78 christos return EINVAL; /* XXX: should this be ENXIO? */
1207 1.78 christos
1208 1.116.10.3 martin error = cgd_lock(false);
1209 1.116.10.3 martin if (error)
1210 1.116.10.3 martin return error;
1211 1.116.10.3 martin
1212 1.116.10.3 martin sc = device_lookup_private(&cgd_cd, unit);
1213 1.116.10.3 martin if (sc == NULL || !DK_ATTACHED(&sc->sc_dksc)) {
1214 1.78 christos cgu->cgu_dev = 0;
1215 1.78 christos cgu->cgu_alg[0] = '\0';
1216 1.78 christos cgu->cgu_blocksize = 0;
1217 1.78 christos cgu->cgu_mode = 0;
1218 1.78 christos cgu->cgu_keylen = 0;
1219 1.78 christos }
1220 1.78 christos else {
1221 1.116.10.3 martin mutex_enter(&sc->sc_lock);
1222 1.116.10.3 martin cgu->cgu_dev = sc->sc_tdev;
1223 1.116.10.3 martin strncpy(cgu->cgu_alg, sc->sc_cfuncs->cf_name,
1224 1.78 christos sizeof(cgu->cgu_alg));
1225 1.116.10.3 martin cgu->cgu_blocksize = sc->sc_cdata.cf_blocksize;
1226 1.116.10.3 martin cgu->cgu_mode = sc->sc_cdata.cf_mode;
1227 1.116.10.3 martin cgu->cgu_keylen = sc->sc_cdata.cf_keylen;
1228 1.116.10.3 martin mutex_exit(&sc->sc_lock);
1229 1.78 christos }
1230 1.116.10.3 martin
1231 1.116.10.3 martin cgd_unlock();
1232 1.78 christos return 0;
1233 1.78 christos }
1234 1.78 christos
1235 1.78 christos static int
1236 1.116.10.3 martin cgdinit(struct cgd_softc *sc, const char *cpath, struct vnode *vp,
1237 1.32 christos struct lwp *l)
1238 1.1 elric {
1239 1.80 christos struct disk_geom *dg;
1240 1.1 elric int ret;
1241 1.36 christos char *tmppath;
1242 1.76 christos uint64_t psize;
1243 1.76 christos unsigned secsize;
1244 1.116.10.3 martin struct dk_softc *dksc = &sc->sc_dksc;
1245 1.1 elric
1246 1.116.10.3 martin sc->sc_tvn = vp;
1247 1.116.10.3 martin sc->sc_tpath = NULL;
1248 1.1 elric
1249 1.116.10.3 martin tmppath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1250 1.116.10.3 martin ret = copyinstr(cpath, tmppath, MAXPATHLEN, &sc->sc_tpathlen);
1251 1.1 elric if (ret)
1252 1.1 elric goto bail;
1253 1.116.10.3 martin sc->sc_tpath = kmem_alloc(sc->sc_tpathlen, KM_SLEEP);
1254 1.116.10.3 martin memcpy(sc->sc_tpath, tmppath, sc->sc_tpathlen);
1255 1.1 elric
1256 1.116.10.3 martin sc->sc_tdev = vp->v_rdev;
1257 1.1 elric
1258 1.76 christos if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1259 1.1 elric goto bail;
1260 1.1 elric
1261 1.76 christos if (psize == 0) {
1262 1.1 elric ret = ENODEV;
1263 1.1 elric goto bail;
1264 1.1 elric }
1265 1.1 elric
1266 1.1 elric /*
1267 1.1 elric * XXX here we should probe the underlying device. If we
1268 1.1 elric * are accessing a partition of type RAW_PART, then
1269 1.1 elric * we should populate our initial geometry with the
1270 1.1 elric * geometry that we discover from the device.
1271 1.1 elric */
1272 1.80 christos dg = &dksc->sc_dkdev.dk_geom;
1273 1.80 christos memset(dg, 0, sizeof(*dg));
1274 1.80 christos dg->dg_secperunit = psize;
1275 1.105 mlelstv dg->dg_secsize = secsize;
1276 1.80 christos dg->dg_ntracks = 1;
1277 1.105 mlelstv dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1278 1.80 christos dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1279 1.1 elric
1280 1.1 elric bail:
1281 1.116.10.3 martin kmem_free(tmppath, MAXPATHLEN);
1282 1.116.10.3 martin if (ret && sc->sc_tpath)
1283 1.116.10.3 martin kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1284 1.1 elric return ret;
1285 1.1 elric }
1286 1.1 elric
1287 1.1 elric /*
1288 1.1 elric * Our generic cipher entry point. This takes care of the
1289 1.1 elric * IV mode and passes off the work to the specific cipher.
1290 1.1 elric * We implement here the IV method ``encrypted block
1291 1.1 elric * number''.
1292 1.22 perry *
1293 1.1 elric * XXXrcd: for now we rely on our own crypto framework defined
1294 1.1 elric * in dev/cgd_crypto.c. This will change when we
1295 1.1 elric * get a generic kernel crypto framework.
1296 1.1 elric */
1297 1.1 elric
1298 1.1 elric static void
1299 1.25 xtraeme blkno2blkno_buf(char *sbuf, daddr_t blkno)
1300 1.1 elric {
1301 1.1 elric int i;
1302 1.1 elric
1303 1.1 elric /* Set up the blkno in blkno_buf, here we do not care much
1304 1.1 elric * about the final layout of the information as long as we
1305 1.1 elric * can guarantee that each sector will have a different IV
1306 1.1 elric * and that the endianness of the machine will not affect
1307 1.1 elric * the representation that we have chosen.
1308 1.1 elric *
1309 1.1 elric * We choose this representation, because it does not rely
1310 1.1 elric * on the size of buf (which is the blocksize of the cipher),
1311 1.1 elric * but allows daddr_t to grow without breaking existing
1312 1.1 elric * disks.
1313 1.1 elric *
1314 1.1 elric * Note that blkno2blkno_buf does not take a size as input,
1315 1.1 elric * and hence must be called on a pre-zeroed buffer of length
1316 1.1 elric * greater than or equal to sizeof(daddr_t).
1317 1.1 elric */
1318 1.1 elric for (i=0; i < sizeof(daddr_t); i++) {
1319 1.25 xtraeme *sbuf++ = blkno & 0xff;
1320 1.1 elric blkno >>= 8;
1321 1.1 elric }
1322 1.1 elric }
1323 1.1 elric
1324 1.116.10.3 martin static struct cpu_info *
1325 1.116.10.3 martin cgd_cpu(struct cgd_softc *sc)
1326 1.116.10.3 martin {
1327 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
1328 1.116.10.3 martin struct cpu_info *ci = NULL;
1329 1.116.10.3 martin u_int cidx, i;
1330 1.116.10.3 martin
1331 1.116.10.3 martin if (cw->cw_busy == 0) {
1332 1.116.10.3 martin cw->cw_last = cpu_index(curcpu());
1333 1.116.10.3 martin return NULL;
1334 1.116.10.3 martin }
1335 1.116.10.3 martin
1336 1.116.10.3 martin for (i=0, cidx = cw->cw_last+1; i<maxcpus; ++i, ++cidx) {
1337 1.116.10.3 martin if (cidx >= maxcpus)
1338 1.116.10.3 martin cidx = 0;
1339 1.116.10.3 martin ci = cpu_lookup(cidx);
1340 1.116.10.3 martin if (ci) {
1341 1.116.10.3 martin cw->cw_last = cidx;
1342 1.116.10.3 martin break;
1343 1.116.10.3 martin }
1344 1.116.10.3 martin }
1345 1.116.10.3 martin
1346 1.116.10.3 martin return ci;
1347 1.116.10.3 martin }
1348 1.116.10.3 martin
1349 1.116.10.3 martin static void
1350 1.116.10.3 martin cgd_enqueue(struct cgd_softc *sc, struct cgd_xfer *cx)
1351 1.116.10.3 martin {
1352 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
1353 1.116.10.3 martin struct cpu_info *ci;
1354 1.116.10.3 martin
1355 1.116.10.3 martin mutex_enter(&cw->cw_lock);
1356 1.116.10.3 martin ci = cgd_cpu(sc);
1357 1.116.10.3 martin cw->cw_busy++;
1358 1.116.10.3 martin mutex_exit(&cw->cw_lock);
1359 1.116.10.3 martin
1360 1.116.10.3 martin workqueue_enqueue(cw->cw_wq, &cx->cx_work, ci);
1361 1.116.10.3 martin }
1362 1.116.10.3 martin
1363 1.116.10.3 martin static void
1364 1.116.10.3 martin cgd_process(struct work *wk, void *arg)
1365 1.116.10.3 martin {
1366 1.116.10.3 martin struct cgd_xfer *cx = (struct cgd_xfer *)wk;
1367 1.116.10.3 martin struct cgd_softc *sc = cx->cx_sc;
1368 1.116.10.3 martin struct cgd_worker *cw = sc->sc_worker;
1369 1.116.10.3 martin
1370 1.116.10.3 martin cgd_cipher(sc, cx->cx_dstv, cx->cx_srcv, cx->cx_len,
1371 1.116.10.3 martin cx->cx_blkno, cx->cx_secsize, cx->cx_dir);
1372 1.116.10.3 martin
1373 1.116.10.3 martin if (cx->cx_dir == CGD_CIPHER_ENCRYPT) {
1374 1.116.10.3 martin cgd_diskstart2(sc, cx);
1375 1.116.10.3 martin } else {
1376 1.116.10.3 martin cgd_iodone2(sc, cx);
1377 1.116.10.3 martin }
1378 1.116.10.3 martin
1379 1.116.10.3 martin mutex_enter(&cw->cw_lock);
1380 1.116.10.3 martin if (cw->cw_busy > 0)
1381 1.116.10.3 martin cw->cw_busy--;
1382 1.116.10.3 martin mutex_exit(&cw->cw_lock);
1383 1.116.10.3 martin }
1384 1.116.10.3 martin
1385 1.1 elric static void
1386 1.116.10.3 martin cgd_cipher(struct cgd_softc *sc, void *dstv, void *srcv,
1387 1.44 christos size_t len, daddr_t blkno, size_t secsize, int dir)
1388 1.1 elric {
1389 1.44 christos char *dst = dstv;
1390 1.112 alnsn char *src = srcv;
1391 1.116.10.3 martin cfunc_cipher_prep *ciprep = sc->sc_cfuncs->cf_cipher_prep;
1392 1.116.10.3 martin cfunc_cipher *cipher = sc->sc_cfuncs->cf_cipher;
1393 1.1 elric struct uio dstuio;
1394 1.1 elric struct uio srcuio;
1395 1.1 elric struct iovec dstiov[2];
1396 1.1 elric struct iovec srciov[2];
1397 1.116.10.3 martin size_t blocksize = sc->sc_cdata.cf_blocksize;
1398 1.105 mlelstv size_t todo;
1399 1.112 alnsn char blkno_buf[CGD_MAXBLOCKSIZE], *iv;
1400 1.1 elric
1401 1.1 elric DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1402 1.1 elric
1403 1.22 perry DIAGCONDPANIC(len % blocksize != 0,
1404 1.1 elric ("cgd_cipher: len %% blocksize != 0"));
1405 1.1 elric
1406 1.1 elric /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1407 1.1 elric DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
1408 1.1 elric ("cgd_cipher: sizeof(daddr_t) > blocksize"));
1409 1.1 elric
1410 1.112 alnsn DIAGCONDPANIC(blocksize > CGD_MAXBLOCKSIZE,
1411 1.112 alnsn ("cgd_cipher: blocksize > CGD_MAXBLOCKSIZE"));
1412 1.1 elric
1413 1.1 elric dstuio.uio_iov = dstiov;
1414 1.112 alnsn dstuio.uio_iovcnt = 1;
1415 1.1 elric
1416 1.1 elric srcuio.uio_iov = srciov;
1417 1.112 alnsn srcuio.uio_iovcnt = 1;
1418 1.1 elric
1419 1.105 mlelstv for (; len > 0; len -= todo) {
1420 1.105 mlelstv todo = MIN(len, secsize);
1421 1.105 mlelstv
1422 1.112 alnsn dstiov[0].iov_base = dst;
1423 1.112 alnsn srciov[0].iov_base = src;
1424 1.112 alnsn dstiov[0].iov_len = todo;
1425 1.112 alnsn srciov[0].iov_len = todo;
1426 1.1 elric
1427 1.64 christos memset(blkno_buf, 0x0, blocksize);
1428 1.1 elric blkno2blkno_buf(blkno_buf, blkno);
1429 1.1 elric IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1430 1.64 christos blkno_buf, blocksize));
1431 1.112 alnsn
1432 1.112 alnsn /*
1433 1.112 alnsn * Compute an initial IV. All ciphers
1434 1.112 alnsn * can convert blkno_buf in-place.
1435 1.112 alnsn */
1436 1.112 alnsn iv = blkno_buf;
1437 1.116.10.3 martin ciprep(sc->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir);
1438 1.112 alnsn IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize));
1439 1.112 alnsn
1440 1.116.10.3 martin cipher(sc->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir);
1441 1.1 elric
1442 1.105 mlelstv dst += todo;
1443 1.105 mlelstv src += todo;
1444 1.1 elric blkno++;
1445 1.1 elric }
1446 1.1 elric }
1447 1.1 elric
1448 1.1 elric #ifdef DEBUG
1449 1.1 elric static void
1450 1.26 drochner hexprint(const char *start, void *buf, int len)
1451 1.1 elric {
1452 1.1 elric char *c = buf;
1453 1.1 elric
1454 1.1 elric DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
1455 1.1 elric printf("%s: len=%06d 0x", start, len);
1456 1.1 elric while (len--)
1457 1.43 cbiere printf("%02x", (unsigned char) *c++);
1458 1.1 elric }
1459 1.1 elric #endif
1460 1.58 haad
1461 1.112 alnsn static void
1462 1.112 alnsn selftest(void)
1463 1.112 alnsn {
1464 1.116.10.3 martin struct cgd_softc sc;
1465 1.112 alnsn void *buf;
1466 1.112 alnsn
1467 1.112 alnsn printf("running cgd selftest ");
1468 1.112 alnsn
1469 1.112 alnsn for (size_t i = 0; i < __arraycount(selftests); i++) {
1470 1.112 alnsn const char *alg = selftests[i].alg;
1471 1.112 alnsn const uint8_t *key = selftests[i].key;
1472 1.112 alnsn int keylen = selftests[i].keylen;
1473 1.112 alnsn int txtlen = selftests[i].txtlen;
1474 1.112 alnsn
1475 1.112 alnsn printf("%s-%d ", alg, keylen);
1476 1.112 alnsn
1477 1.116.10.3 martin memset(&sc, 0, sizeof(sc));
1478 1.112 alnsn
1479 1.116.10.3 martin sc.sc_cfuncs = cryptfuncs_find(alg);
1480 1.116.10.3 martin if (sc.sc_cfuncs == NULL)
1481 1.112 alnsn panic("%s not implemented", alg);
1482 1.112 alnsn
1483 1.116.10.3 martin sc.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1484 1.116.10.3 martin sc.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1485 1.116.10.3 martin sc.sc_cdata.cf_keylen = keylen;
1486 1.116.10.3 martin
1487 1.116.10.3 martin sc.sc_cdata.cf_priv = sc.sc_cfuncs->cf_init(keylen,
1488 1.116.10.3 martin key, &sc.sc_cdata.cf_blocksize);
1489 1.116.10.3 martin if (sc.sc_cdata.cf_priv == NULL)
1490 1.112 alnsn panic("cf_priv is NULL");
1491 1.116.10.3 martin if (sc.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1492 1.116.10.3 martin panic("bad block size %zu", sc.sc_cdata.cf_blocksize);
1493 1.112 alnsn
1494 1.116.10.3 martin sc.sc_cdata.cf_blocksize /= 8;
1495 1.112 alnsn
1496 1.116.10.3 martin buf = kmem_alloc(txtlen, KM_SLEEP);
1497 1.112 alnsn memcpy(buf, selftests[i].ptxt, txtlen);
1498 1.112 alnsn
1499 1.116.10.3 martin cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1500 1.112 alnsn selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1501 1.112 alnsn if (memcmp(buf, selftests[i].ctxt, txtlen) != 0)
1502 1.112 alnsn panic("encryption is broken");
1503 1.112 alnsn
1504 1.116.10.3 martin cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1505 1.112 alnsn selftests[i].secsize, CGD_CIPHER_DECRYPT);
1506 1.112 alnsn if (memcmp(buf, selftests[i].ptxt, txtlen) != 0)
1507 1.112 alnsn panic("decryption is broken");
1508 1.112 alnsn
1509 1.116.10.3 martin kmem_free(buf, txtlen);
1510 1.116.10.3 martin sc.sc_cfuncs->cf_destroy(sc.sc_cdata.cf_priv);
1511 1.112 alnsn }
1512 1.112 alnsn
1513 1.112 alnsn printf("done\n");
1514 1.112 alnsn }
1515 1.112 alnsn
1516 1.116 pgoyette MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
1517 1.74 jruoho
1518 1.58 haad #ifdef _MODULE
1519 1.66 dyoung CFDRIVER_DECL(cgd, DV_DISK, NULL);
1520 1.109 pgoyette
1521 1.109 pgoyette devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1522 1.74 jruoho #endif
1523 1.58 haad
1524 1.58 haad static int
1525 1.58 haad cgd_modcmd(modcmd_t cmd, void *arg)
1526 1.58 haad {
1527 1.82 martin int error = 0;
1528 1.74 jruoho
1529 1.58 haad switch (cmd) {
1530 1.58 haad case MODULE_CMD_INIT:
1531 1.112 alnsn selftest();
1532 1.74 jruoho #ifdef _MODULE
1533 1.116.10.3 martin mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
1534 1.116.10.3 martin cv_init(&cgd_spawning_cv, "cgspwn");
1535 1.116.10.3 martin
1536 1.66 dyoung error = config_cfdriver_attach(&cgd_cd);
1537 1.66 dyoung if (error)
1538 1.66 dyoung break;
1539 1.66 dyoung
1540 1.66 dyoung error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1541 1.66 dyoung if (error) {
1542 1.66 dyoung config_cfdriver_detach(&cgd_cd);
1543 1.109 pgoyette aprint_error("%s: unable to register cfattach for"
1544 1.109 pgoyette "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1545 1.66 dyoung break;
1546 1.66 dyoung }
1547 1.109 pgoyette /*
1548 1.109 pgoyette * Attach the {b,c}devsw's
1549 1.109 pgoyette */
1550 1.109 pgoyette error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1551 1.109 pgoyette &cgd_cdevsw, &cgd_cmajor);
1552 1.74 jruoho
1553 1.109 pgoyette /*
1554 1.109 pgoyette * If devsw_attach fails, remove from autoconf database
1555 1.109 pgoyette */
1556 1.66 dyoung if (error) {
1557 1.66 dyoung config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1558 1.66 dyoung config_cfdriver_detach(&cgd_cd);
1559 1.109 pgoyette aprint_error("%s: unable to attach %s devsw, "
1560 1.109 pgoyette "error %d", __func__, cgd_cd.cd_name, error);
1561 1.66 dyoung break;
1562 1.66 dyoung }
1563 1.74 jruoho #endif
1564 1.58 haad break;
1565 1.58 haad
1566 1.58 haad case MODULE_CMD_FINI:
1567 1.74 jruoho #ifdef _MODULE
1568 1.109 pgoyette /*
1569 1.109 pgoyette * Remove {b,c}devsw's
1570 1.109 pgoyette */
1571 1.109 pgoyette devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1572 1.109 pgoyette
1573 1.109 pgoyette /*
1574 1.109 pgoyette * Now remove device from autoconf database
1575 1.109 pgoyette */
1576 1.66 dyoung error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1577 1.109 pgoyette if (error) {
1578 1.110 pgoyette (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1579 1.109 pgoyette &cgd_cdevsw, &cgd_cmajor);
1580 1.109 pgoyette aprint_error("%s: failed to detach %s cfattach, "
1581 1.109 pgoyette "error %d\n", __func__, cgd_cd.cd_name, error);
1582 1.109 pgoyette break;
1583 1.109 pgoyette }
1584 1.109 pgoyette error = config_cfdriver_detach(&cgd_cd);
1585 1.109 pgoyette if (error) {
1586 1.110 pgoyette (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1587 1.110 pgoyette (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1588 1.109 pgoyette &cgd_cdevsw, &cgd_cmajor);
1589 1.109 pgoyette aprint_error("%s: failed to detach %s cfdriver, "
1590 1.109 pgoyette "error %d\n", __func__, cgd_cd.cd_name, error);
1591 1.66 dyoung break;
1592 1.109 pgoyette }
1593 1.116.10.3 martin
1594 1.116.10.3 martin cv_destroy(&cgd_spawning_cv);
1595 1.116.10.3 martin mutex_destroy(&cgd_spawning_mtx);
1596 1.74 jruoho #endif
1597 1.58 haad break;
1598 1.58 haad
1599 1.58 haad case MODULE_CMD_STAT:
1600 1.109 pgoyette error = ENOTTY;
1601 1.109 pgoyette break;
1602 1.58 haad default:
1603 1.109 pgoyette error = ENOTTY;
1604 1.109 pgoyette break;
1605 1.58 haad }
1606 1.58 haad
1607 1.58 haad return error;
1608 1.58 haad }
1609