cgd.c revision 1.124.2.1 1 /* $NetBSD: cgd.c,v 1.124.2.1 2020/04/20 11:29:02 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.124.2.1 2020/04/20 11:29:02 bouyer Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/kmem.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54 #include <sys/workqueue.h>
55 #include <sys/cpu.h>
56
57 #include <dev/dkvar.h>
58 #include <dev/cgdvar.h>
59
60 #include <miscfs/specfs/specdev.h> /* for v_rdev */
61
62 #include "ioconf.h"
63
64 struct selftest_params {
65 const char *alg;
66 int blocksize; /* number of bytes */
67 int secsize;
68 daddr_t blkno;
69 int keylen; /* number of bits */
70 int txtlen; /* number of bytes */
71 const uint8_t *key;
72 const uint8_t *ptxt;
73 const uint8_t *ctxt;
74 };
75
76 /* Entry Point Functions */
77
78 static dev_type_open(cgdopen);
79 static dev_type_close(cgdclose);
80 static dev_type_read(cgdread);
81 static dev_type_write(cgdwrite);
82 static dev_type_ioctl(cgdioctl);
83 static dev_type_strategy(cgdstrategy);
84 static dev_type_dump(cgddump);
85 static dev_type_size(cgdsize);
86
87 const struct bdevsw cgd_bdevsw = {
88 .d_open = cgdopen,
89 .d_close = cgdclose,
90 .d_strategy = cgdstrategy,
91 .d_ioctl = cgdioctl,
92 .d_dump = cgddump,
93 .d_psize = cgdsize,
94 .d_discard = nodiscard,
95 .d_flag = D_DISK | D_MPSAFE
96 };
97
98 const struct cdevsw cgd_cdevsw = {
99 .d_open = cgdopen,
100 .d_close = cgdclose,
101 .d_read = cgdread,
102 .d_write = cgdwrite,
103 .d_ioctl = cgdioctl,
104 .d_stop = nostop,
105 .d_tty = notty,
106 .d_poll = nopoll,
107 .d_mmap = nommap,
108 .d_kqfilter = nokqfilter,
109 .d_discard = nodiscard,
110 .d_flag = D_DISK | D_MPSAFE
111 };
112
113 /*
114 * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
115 */
116 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
117 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
118 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
119 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
120 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
121 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
122 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
123 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
124 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
125 };
126
127 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
128 0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
129 0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
130 0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
131 0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
132 0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
133 0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
134 0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
135 0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
136 };
137
138 static const uint8_t selftest_aes_xts_256_key[33] = {
139 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
140 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
141 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
142 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
143 0
144 };
145
146 /*
147 * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
148 */
149 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
150 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
151 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
152 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
153 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
154 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
155 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
156 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
157 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
158 };
159
160 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
161 0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
162 0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
163 0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
164 0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
165 0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
166 0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
167 0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
168 0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
169 };
170
171 static const uint8_t selftest_aes_xts_512_key[65] = {
172 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
173 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
174 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
175 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
176 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
177 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
178 0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
179 0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
180 0
181 };
182
183 const struct selftest_params selftests[] = {
184 {
185 .alg = "aes-xts",
186 .blocksize = 16,
187 .secsize = 512,
188 .blkno = 1,
189 .keylen = 256,
190 .txtlen = sizeof(selftest_aes_xts_256_ptxt),
191 .key = selftest_aes_xts_256_key,
192 .ptxt = selftest_aes_xts_256_ptxt,
193 .ctxt = selftest_aes_xts_256_ctxt
194 },
195 {
196 .alg = "aes-xts",
197 .blocksize = 16,
198 .secsize = 512,
199 .blkno = 0xffff,
200 .keylen = 512,
201 .txtlen = sizeof(selftest_aes_xts_512_ptxt),
202 .key = selftest_aes_xts_512_key,
203 .ptxt = selftest_aes_xts_512_ptxt,
204 .ctxt = selftest_aes_xts_512_ctxt
205 }
206 };
207
208 static int cgd_match(device_t, cfdata_t, void *);
209 static void cgd_attach(device_t, device_t, void *);
210 static int cgd_detach(device_t, int);
211 static struct cgd_softc *cgd_spawn(int);
212 static struct cgd_worker *cgd_create_one_worker(void);
213 static void cgd_destroy_one_worker(struct cgd_worker *);
214 static struct cgd_worker *cgd_create_worker(void);
215 static void cgd_destroy_worker(struct cgd_worker *);
216 static int cgd_destroy(device_t);
217
218 /* Internal Functions */
219
220 static int cgd_diskstart(device_t, struct buf *);
221 static void cgd_diskstart2(struct cgd_softc *, struct cgd_xfer *);
222 static void cgdiodone(struct buf *);
223 static void cgd_iodone2(struct cgd_softc *, struct cgd_xfer *);
224 static void cgd_enqueue(struct cgd_softc *, struct cgd_xfer *);
225 static void cgd_process(struct work *, void *);
226 static int cgd_dumpblocks(device_t, void *, daddr_t, int);
227
228 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
229 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
230 static int cgd_ioctl_get(dev_t, void *, struct lwp *);
231 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
232 struct lwp *);
233 static void cgd_cipher(struct cgd_softc *, void *, void *,
234 size_t, daddr_t, size_t, int);
235
236 static const struct dkdriver cgddkdriver = {
237 .d_minphys = minphys,
238 .d_open = cgdopen,
239 .d_close = cgdclose,
240 .d_strategy = cgdstrategy,
241 .d_iosize = NULL,
242 .d_diskstart = cgd_diskstart,
243 .d_dumpblocks = cgd_dumpblocks,
244 .d_lastclose = NULL
245 };
246
247 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
248 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
249
250 /* DIAGNOSTIC and DEBUG definitions */
251
252 #if defined(CGDDEBUG) && !defined(DEBUG)
253 #define DEBUG
254 #endif
255
256 #ifdef DEBUG
257 int cgddebug = 0;
258
259 #define CGDB_FOLLOW 0x1
260 #define CGDB_IO 0x2
261 #define CGDB_CRYPTO 0x4
262
263 #define IFDEBUG(x,y) if (cgddebug & (x)) y
264 #define DPRINTF(x,y) IFDEBUG(x, printf y)
265 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
266
267 static void hexprint(const char *, void *, int);
268
269 #else
270 #define IFDEBUG(x,y)
271 #define DPRINTF(x,y)
272 #define DPRINTF_FOLLOW(y)
273 #endif
274
275 /* Global variables */
276
277 static kmutex_t cgd_spawning_mtx;
278 static kcondvar_t cgd_spawning_cv;
279 static bool cgd_spawning;
280 static struct cgd_worker *cgd_worker;
281 static u_int cgd_refcnt; /* number of users of cgd_worker */
282
283 /* Utility Functions */
284
285 #define CGDUNIT(x) DISKUNIT(x)
286
287 /* The code */
288
289 static int
290 cgd_lock(bool intr)
291 {
292 int error = 0;
293
294 mutex_enter(&cgd_spawning_mtx);
295 while (cgd_spawning) {
296 if (intr)
297 error = cv_wait_sig(&cgd_spawning_cv, &cgd_spawning_mtx);
298 else
299 cv_wait(&cgd_spawning_cv, &cgd_spawning_mtx);
300 }
301 if (error == 0)
302 cgd_spawning = true;
303 mutex_exit(&cgd_spawning_mtx);
304 return error;
305 }
306
307 static void
308 cgd_unlock(void)
309 {
310 mutex_enter(&cgd_spawning_mtx);
311 cgd_spawning = false;
312 cv_broadcast(&cgd_spawning_cv);
313 mutex_exit(&cgd_spawning_mtx);
314 }
315
316 static struct cgd_softc *
317 getcgd_softc(dev_t dev)
318 {
319 return device_lookup_private(&cgd_cd, CGDUNIT(dev));
320 }
321
322 static int
323 cgd_match(device_t self, cfdata_t cfdata, void *aux)
324 {
325
326 return 1;
327 }
328
329 static void
330 cgd_attach(device_t parent, device_t self, void *aux)
331 {
332 struct cgd_softc *sc = device_private(self);
333
334 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
335 cv_init(&sc->sc_cv, "cgdcv");
336 dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
337 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
338
339 if (!pmf_device_register(self, NULL, NULL))
340 aprint_error_dev(self,
341 "unable to register power management hooks\n");
342 }
343
344
345 static int
346 cgd_detach(device_t self, int flags)
347 {
348 int ret;
349 const int pmask = 1 << RAW_PART;
350 struct cgd_softc *sc = device_private(self);
351 struct dk_softc *dksc = &sc->sc_dksc;
352
353 if (DK_BUSY(dksc, pmask))
354 return EBUSY;
355
356 if (DK_ATTACHED(dksc) &&
357 (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
358 return ret;
359
360 disk_destroy(&dksc->sc_dkdev);
361 cv_destroy(&sc->sc_cv);
362 mutex_destroy(&sc->sc_lock);
363
364 return 0;
365 }
366
367 void
368 cgdattach(int num)
369 {
370 #ifndef _MODULE
371 int error;
372
373 mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
374 cv_init(&cgd_spawning_cv, "cgspwn");
375
376 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
377 if (error != 0)
378 aprint_error("%s: unable to register cfattach\n",
379 cgd_cd.cd_name);
380 #endif
381 }
382
383 static struct cgd_softc *
384 cgd_spawn(int unit)
385 {
386 cfdata_t cf;
387 struct cgd_worker *cw;
388 struct cgd_softc *sc;
389
390 cf = kmem_alloc(sizeof(*cf), KM_SLEEP);
391 cf->cf_name = cgd_cd.cd_name;
392 cf->cf_atname = cgd_cd.cd_name;
393 cf->cf_unit = unit;
394 cf->cf_fstate = FSTATE_STAR;
395
396 cw = cgd_create_one_worker();
397 if (cw == NULL) {
398 kmem_free(cf, sizeof(*cf));
399 return NULL;
400 }
401
402 sc = device_private(config_attach_pseudo(cf));
403 if (sc == NULL) {
404 cgd_destroy_one_worker(cw);
405 return NULL;
406 }
407
408 sc->sc_worker = cw;
409
410 return sc;
411 }
412
413 static int
414 cgd_destroy(device_t dev)
415 {
416 struct cgd_softc *sc = device_private(dev);
417 struct cgd_worker *cw = sc->sc_worker;
418 cfdata_t cf;
419 int error;
420
421 cf = device_cfdata(dev);
422 error = config_detach(dev, DETACH_QUIET);
423 if (error)
424 return error;
425
426 cgd_destroy_one_worker(cw);
427
428 kmem_free(cf, sizeof(*cf));
429 return 0;
430 }
431
432 static void
433 cgd_busy(struct cgd_softc *sc)
434 {
435
436 mutex_enter(&sc->sc_lock);
437 while (sc->sc_busy)
438 cv_wait(&sc->sc_cv, &sc->sc_lock);
439 sc->sc_busy = true;
440 mutex_exit(&sc->sc_lock);
441 }
442
443 static void
444 cgd_unbusy(struct cgd_softc *sc)
445 {
446
447 mutex_enter(&sc->sc_lock);
448 sc->sc_busy = false;
449 cv_broadcast(&sc->sc_cv);
450 mutex_exit(&sc->sc_lock);
451 }
452
453 static struct cgd_worker *
454 cgd_create_one_worker(void)
455 {
456 KASSERT(cgd_spawning);
457
458 if (cgd_refcnt++ == 0) {
459 KASSERT(cgd_worker == NULL);
460 cgd_worker = cgd_create_worker();
461 }
462
463 KASSERT(cgd_worker != NULL);
464 return cgd_worker;
465 }
466
467 static void
468 cgd_destroy_one_worker(struct cgd_worker *cw)
469 {
470 KASSERT(cgd_spawning);
471 KASSERT(cw == cgd_worker);
472
473 if (--cgd_refcnt == 0) {
474 cgd_destroy_worker(cgd_worker);
475 cgd_worker = NULL;
476 }
477 }
478
479 static struct cgd_worker *
480 cgd_create_worker(void)
481 {
482 struct cgd_worker *cw;
483 struct workqueue *wq;
484 struct pool *cp;
485 int error;
486
487 cw = kmem_alloc(sizeof(struct cgd_worker), KM_SLEEP);
488 cp = kmem_alloc(sizeof(struct pool), KM_SLEEP);
489
490 error = workqueue_create(&wq, "cgd", cgd_process, NULL,
491 PRI_BIO, IPL_BIO, WQ_MPSAFE | WQ_PERCPU);
492 if (error) {
493 kmem_free(cp, sizeof(struct pool));
494 kmem_free(cw, sizeof(struct cgd_worker));
495 return NULL;
496 }
497
498 cw->cw_cpool = cp;
499 cw->cw_wq = wq;
500 pool_init(cw->cw_cpool, sizeof(struct cgd_xfer), 0,
501 0, 0, "cgdcpl", NULL, IPL_BIO);
502
503 mutex_init(&cw->cw_lock, MUTEX_DEFAULT, IPL_BIO);
504
505 return cw;
506 }
507
508 static void
509 cgd_destroy_worker(struct cgd_worker *cw)
510 {
511 mutex_destroy(&cw->cw_lock);
512
513 if (cw->cw_cpool) {
514 pool_destroy(cw->cw_cpool);
515 kmem_free(cw->cw_cpool, sizeof(struct pool));
516 }
517 if (cw->cw_wq)
518 workqueue_destroy(cw->cw_wq);
519
520 kmem_free(cw, sizeof(struct cgd_worker));
521 }
522
523 static int
524 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
525 {
526 struct cgd_softc *sc;
527 int error;
528
529 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
530
531 error = cgd_lock(true);
532 if (error)
533 return error;
534 sc = getcgd_softc(dev);
535 if (sc == NULL)
536 sc = cgd_spawn(CGDUNIT(dev));
537 cgd_unlock();
538 if (sc == NULL)
539 return ENXIO;
540
541 return dk_open(&sc->sc_dksc, dev, flags, fmt, l);
542 }
543
544 static int
545 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
546 {
547 struct cgd_softc *sc;
548 struct dk_softc *dksc;
549 int error;
550
551 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
552
553 error = cgd_lock(false);
554 if (error)
555 return error;
556 sc = getcgd_softc(dev);
557 if (sc == NULL) {
558 error = ENXIO;
559 goto done;
560 }
561
562 dksc = &sc->sc_dksc;
563 if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0)
564 goto done;
565
566 if (!DK_ATTACHED(dksc)) {
567 if ((error = cgd_destroy(sc->sc_dksc.sc_dev)) != 0) {
568 device_printf(dksc->sc_dev,
569 "unable to detach instance\n");
570 goto done;
571 }
572 }
573
574 done:
575 cgd_unlock();
576
577 return error;
578 }
579
580 static void
581 cgdstrategy(struct buf *bp)
582 {
583 struct cgd_softc *sc = getcgd_softc(bp->b_dev);
584
585 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
586 (long)bp->b_bcount));
587
588 /*
589 * Reject unaligned writes.
590 */
591 if (((uintptr_t)bp->b_data & 3) != 0) {
592 bp->b_error = EINVAL;
593 goto bail;
594 }
595
596 dk_strategy(&sc->sc_dksc, bp);
597 return;
598
599 bail:
600 bp->b_resid = bp->b_bcount;
601 biodone(bp);
602 return;
603 }
604
605 static int
606 cgdsize(dev_t dev)
607 {
608 struct cgd_softc *sc = getcgd_softc(dev);
609
610 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
611 if (!sc)
612 return -1;
613 return dk_size(&sc->sc_dksc, dev);
614 }
615
616 /*
617 * cgd_{get,put}data are functions that deal with getting a buffer
618 * for the new encrypted data.
619 * We can no longer have a buffer per device, we need a buffer per
620 * work queue...
621 */
622
623 static void *
624 cgd_getdata(struct cgd_softc *sc, unsigned long size)
625 {
626 void *data = NULL;
627
628 mutex_enter(&sc->sc_lock);
629 if (!sc->sc_data_used) {
630 sc->sc_data_used = true;
631 data = sc->sc_data;
632 }
633 mutex_exit(&sc->sc_lock);
634
635 if (data)
636 return data;
637
638 return kmem_intr_alloc(size, KM_NOSLEEP);
639 }
640
641 static void
642 cgd_putdata(struct cgd_softc *sc, void *data, unsigned long size)
643 {
644
645 if (data == sc->sc_data) {
646 mutex_enter(&sc->sc_lock);
647 sc->sc_data_used = false;
648 mutex_exit(&sc->sc_lock);
649 } else
650 kmem_intr_free(data, size);
651 }
652
653 static int
654 cgd_diskstart(device_t dev, struct buf *bp)
655 {
656 struct cgd_softc *sc = device_private(dev);
657 struct cgd_worker *cw = sc->sc_worker;
658 struct dk_softc *dksc = &sc->sc_dksc;
659 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
660 struct cgd_xfer *cx;
661 struct buf *nbp;
662 void * newaddr;
663 daddr_t bn;
664
665 DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
666
667 bn = bp->b_rawblkno;
668
669 /*
670 * We attempt to allocate all of our resources up front, so that
671 * we can fail quickly if they are unavailable.
672 */
673 nbp = getiobuf(sc->sc_tvn, false);
674 if (nbp == NULL)
675 return EAGAIN;
676
677 cx = pool_get(cw->cw_cpool, PR_NOWAIT);
678 if (cx == NULL) {
679 putiobuf(nbp);
680 return EAGAIN;
681 }
682
683 cx->cx_sc = sc;
684 cx->cx_obp = bp;
685 cx->cx_nbp = nbp;
686 cx->cx_srcv = cx->cx_dstv = bp->b_data;
687 cx->cx_blkno = bn;
688 cx->cx_secsize = dg->dg_secsize;
689
690 /*
691 * If we are writing, then we need to encrypt the outgoing
692 * block into a new block of memory.
693 */
694 if ((bp->b_flags & B_READ) == 0) {
695 newaddr = cgd_getdata(sc, bp->b_bcount);
696 if (!newaddr) {
697 pool_put(cw->cw_cpool, cx);
698 putiobuf(nbp);
699 return EAGAIN;
700 }
701
702 cx->cx_dstv = newaddr;
703 cx->cx_len = bp->b_bcount;
704 cx->cx_dir = CGD_CIPHER_ENCRYPT;
705
706 cgd_enqueue(sc, cx);
707 return 0;
708 }
709
710 cgd_diskstart2(sc, cx);
711 return 0;
712 }
713
714 static void
715 cgd_diskstart2(struct cgd_softc *sc, struct cgd_xfer *cx)
716 {
717 struct vnode *vp;
718 struct buf *bp;
719 struct buf *nbp;
720
721 bp = cx->cx_obp;
722 nbp = cx->cx_nbp;
723
724 nbp->b_data = cx->cx_dstv;
725 nbp->b_flags = bp->b_flags;
726 nbp->b_oflags = bp->b_oflags;
727 nbp->b_cflags = bp->b_cflags;
728 nbp->b_iodone = cgdiodone;
729 nbp->b_proc = bp->b_proc;
730 nbp->b_blkno = btodb(cx->cx_blkno * cx->cx_secsize);
731 nbp->b_bcount = bp->b_bcount;
732 nbp->b_private = cx;
733
734 BIO_COPYPRIO(nbp, bp);
735
736 if ((nbp->b_flags & B_READ) == 0) {
737 vp = nbp->b_vp;
738 mutex_enter(vp->v_interlock);
739 vp->v_numoutput++;
740 mutex_exit(vp->v_interlock);
741 }
742 VOP_STRATEGY(sc->sc_tvn, nbp);
743 }
744
745 static void
746 cgdiodone(struct buf *nbp)
747 {
748 struct cgd_xfer *cx = nbp->b_private;
749 struct buf *obp = cx->cx_obp;
750 struct cgd_softc *sc = getcgd_softc(obp->b_dev);
751 struct dk_softc *dksc = &sc->sc_dksc;
752 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
753 daddr_t bn;
754
755 KDASSERT(sc);
756
757 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
758 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
759 obp, obp->b_bcount, obp->b_resid));
760 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
761 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
762 nbp->b_bcount));
763 if (nbp->b_error != 0) {
764 obp->b_error = nbp->b_error;
765 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
766 obp->b_error));
767 }
768
769 /* Perform the decryption if we are reading.
770 *
771 * Note: use the blocknumber from nbp, since it is what
772 * we used to encrypt the blocks.
773 */
774
775 if (nbp->b_flags & B_READ) {
776 bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
777
778 cx->cx_obp = obp;
779 cx->cx_nbp = nbp;
780 cx->cx_dstv = obp->b_data;
781 cx->cx_srcv = obp->b_data;
782 cx->cx_len = obp->b_bcount;
783 cx->cx_blkno = bn;
784 cx->cx_secsize = dg->dg_secsize;
785 cx->cx_dir = CGD_CIPHER_DECRYPT;
786
787 cgd_enqueue(sc, cx);
788 return;
789 }
790
791 cgd_iodone2(sc, cx);
792 }
793
794 static void
795 cgd_iodone2(struct cgd_softc *sc, struct cgd_xfer *cx)
796 {
797 struct cgd_worker *cw = sc->sc_worker;
798 struct buf *obp = cx->cx_obp;
799 struct buf *nbp = cx->cx_nbp;
800 struct dk_softc *dksc = &sc->sc_dksc;
801
802 pool_put(cw->cw_cpool, cx);
803
804 /* If we allocated memory, free it now... */
805 if (nbp->b_data != obp->b_data)
806 cgd_putdata(sc, nbp->b_data, nbp->b_bcount);
807
808 putiobuf(nbp);
809
810 /* Request is complete for whatever reason */
811 obp->b_resid = 0;
812 if (obp->b_error != 0)
813 obp->b_resid = obp->b_bcount;
814
815 dk_done(dksc, obp);
816 dk_start(dksc, NULL);
817 }
818
819 static int
820 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
821 {
822 struct cgd_softc *sc = device_private(dev);
823 struct dk_softc *dksc = &sc->sc_dksc;
824 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
825 size_t nbytes, blksize;
826 void *buf;
827 int error;
828
829 /*
830 * dk_dump gives us units of disklabel sectors. Everything
831 * else in cgd uses units of diskgeom sectors. These had
832 * better agree; otherwise we need to figure out how to convert
833 * between them.
834 */
835 KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
836 "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
837 dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
838 blksize = dg->dg_secsize;
839
840 /*
841 * Compute the number of bytes in this request, which dk_dump
842 * has `helpfully' converted to a number of blocks for us.
843 */
844 nbytes = nblk*blksize;
845
846 /* Try to acquire a buffer to store the ciphertext. */
847 buf = cgd_getdata(sc, nbytes);
848 if (buf == NULL)
849 /* Out of memory: give up. */
850 return ENOMEM;
851
852 /* Encrypt the caller's data into the temporary buffer. */
853 cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
854
855 /* Pass it on to the underlying disk device. */
856 error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
857
858 /* Release the buffer. */
859 cgd_putdata(sc, buf, nbytes);
860
861 /* Return any error from the underlying disk device. */
862 return error;
863 }
864
865 /* XXX: we should probably put these into dksubr.c, mostly */
866 static int
867 cgdread(dev_t dev, struct uio *uio, int flags)
868 {
869 struct cgd_softc *sc;
870 struct dk_softc *dksc;
871
872 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
873 (unsigned long long)dev, uio, flags));
874 sc = getcgd_softc(dev);
875 if (sc == NULL)
876 return ENXIO;
877 dksc = &sc->sc_dksc;
878 if (!DK_ATTACHED(dksc))
879 return ENXIO;
880 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
881 }
882
883 /* XXX: we should probably put these into dksubr.c, mostly */
884 static int
885 cgdwrite(dev_t dev, struct uio *uio, int flags)
886 {
887 struct cgd_softc *sc;
888 struct dk_softc *dksc;
889
890 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
891 sc = getcgd_softc(dev);
892 if (sc == NULL)
893 return ENXIO;
894 dksc = &sc->sc_dksc;
895 if (!DK_ATTACHED(dksc))
896 return ENXIO;
897 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
898 }
899
900 static int
901 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
902 {
903 struct cgd_softc *sc;
904 struct dk_softc *dksc;
905 int part = DISKPART(dev);
906 int pmask = 1 << part;
907 int error;
908
909 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
910 dev, cmd, data, flag, l));
911
912 switch (cmd) {
913 case CGDIOCGET:
914 return cgd_ioctl_get(dev, data, l);
915 case CGDIOCSET:
916 case CGDIOCCLR:
917 if ((flag & FWRITE) == 0)
918 return EBADF;
919 /* FALLTHROUGH */
920 default:
921 sc = getcgd_softc(dev);
922 if (sc == NULL)
923 return ENXIO;
924 dksc = &sc->sc_dksc;
925 break;
926 }
927
928 switch (cmd) {
929 case CGDIOCSET:
930 cgd_busy(sc);
931 if (DK_ATTACHED(dksc))
932 error = EBUSY;
933 else
934 error = cgd_ioctl_set(sc, data, l);
935 cgd_unbusy(sc);
936 break;
937 case CGDIOCCLR:
938 cgd_busy(sc);
939 if (DK_BUSY(&sc->sc_dksc, pmask))
940 error = EBUSY;
941 else
942 error = cgd_ioctl_clr(sc, l);
943 cgd_unbusy(sc);
944 break;
945 case DIOCGCACHE:
946 case DIOCCACHESYNC:
947 cgd_busy(sc);
948 if (!DK_ATTACHED(dksc)) {
949 cgd_unbusy(sc);
950 error = ENOENT;
951 break;
952 }
953 /*
954 * We pass this call down to the underlying disk.
955 */
956 error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
957 cgd_unbusy(sc);
958 break;
959 case DIOCGSECTORALIGN: {
960 struct disk_sectoralign *dsa = data;
961
962 cgd_busy(sc);
963 if (!DK_ATTACHED(dksc)) {
964 cgd_unbusy(sc);
965 error = ENOENT;
966 break;
967 }
968
969 /* Get the underlying disk's sector alignment. */
970 error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
971 if (error) {
972 cgd_unbusy(sc);
973 break;
974 }
975
976 /* Adjust for the disklabel partition if necessary. */
977 if (part != RAW_PART) {
978 struct disklabel *lp = dksc->sc_dkdev.dk_label;
979 daddr_t offset = lp->d_partitions[part].p_offset;
980 uint32_t r = offset % dsa->dsa_alignment;
981
982 if (r < dsa->dsa_firstaligned)
983 dsa->dsa_firstaligned = dsa->dsa_firstaligned
984 - r;
985 else
986 dsa->dsa_firstaligned = (dsa->dsa_firstaligned
987 + dsa->dsa_alignment) - r;
988 }
989 cgd_unbusy(sc);
990 break;
991 }
992 case DIOCGSTRATEGY:
993 case DIOCSSTRATEGY:
994 if (!DK_ATTACHED(dksc)) {
995 error = ENOENT;
996 break;
997 }
998 /*FALLTHROUGH*/
999 default:
1000 error = dk_ioctl(dksc, dev, cmd, data, flag, l);
1001 break;
1002 case CGDIOCGET:
1003 KASSERT(0);
1004 error = EINVAL;
1005 }
1006
1007 return error;
1008 }
1009
1010 static int
1011 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1012 {
1013 struct cgd_softc *sc;
1014
1015 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
1016 dev, blkno, va, (unsigned long)size));
1017 sc = getcgd_softc(dev);
1018 if (sc == NULL)
1019 return ENXIO;
1020 return dk_dump(&sc->sc_dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
1021 }
1022
1023 /*
1024 * XXXrcd:
1025 * for now we hardcode the maximum key length.
1026 */
1027 #define MAX_KEYSIZE 1024
1028
1029 static const struct {
1030 const char *n;
1031 int v;
1032 int d;
1033 } encblkno[] = {
1034 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1035 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1036 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
1037 };
1038
1039 /* ARGSUSED */
1040 static int
1041 cgd_ioctl_set(struct cgd_softc *sc, void *data, struct lwp *l)
1042 {
1043 struct cgd_ioctl *ci = data;
1044 struct vnode *vp;
1045 int ret;
1046 size_t i;
1047 size_t keybytes; /* key length in bytes */
1048 const char *cp;
1049 struct pathbuf *pb;
1050 char *inbuf;
1051 struct dk_softc *dksc = &sc->sc_dksc;
1052
1053 cp = ci->ci_disk;
1054
1055 ret = pathbuf_copyin(ci->ci_disk, &pb);
1056 if (ret != 0) {
1057 return ret;
1058 }
1059 ret = vn_bdev_openpath(pb, &vp, l);
1060 pathbuf_destroy(pb);
1061 if (ret != 0) {
1062 return ret;
1063 }
1064
1065 inbuf = kmem_alloc(MAX_KEYSIZE, KM_SLEEP);
1066
1067 if ((ret = cgdinit(sc, cp, vp, l)) != 0)
1068 goto bail;
1069
1070 (void)memset(inbuf, 0, MAX_KEYSIZE);
1071 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
1072 if (ret)
1073 goto bail;
1074 sc->sc_cfuncs = cryptfuncs_find(inbuf);
1075 if (!sc->sc_cfuncs) {
1076 ret = EINVAL;
1077 goto bail;
1078 }
1079
1080 (void)memset(inbuf, 0, MAX_KEYSIZE);
1081 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
1082 if (ret)
1083 goto bail;
1084
1085 for (i = 0; i < __arraycount(encblkno); i++)
1086 if (strcmp(encblkno[i].n, inbuf) == 0)
1087 break;
1088
1089 if (i == __arraycount(encblkno)) {
1090 ret = EINVAL;
1091 goto bail;
1092 }
1093
1094 keybytes = ci->ci_keylen / 8 + 1;
1095 if (keybytes > MAX_KEYSIZE) {
1096 ret = EINVAL;
1097 goto bail;
1098 }
1099
1100 (void)memset(inbuf, 0, MAX_KEYSIZE);
1101 ret = copyin(ci->ci_key, inbuf, keybytes);
1102 if (ret)
1103 goto bail;
1104
1105 sc->sc_cdata.cf_blocksize = ci->ci_blocksize;
1106 sc->sc_cdata.cf_mode = encblkno[i].v;
1107 sc->sc_cdata.cf_keylen = ci->ci_keylen;
1108 sc->sc_cdata.cf_priv = sc->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
1109 &sc->sc_cdata.cf_blocksize);
1110 if (sc->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
1111 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
1112 sc->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
1113 sc->sc_cdata.cf_priv = NULL;
1114 }
1115
1116 /*
1117 * The blocksize is supposed to be in bytes. Unfortunately originally
1118 * it was expressed in bits. For compatibility we maintain encblkno
1119 * and encblkno8.
1120 */
1121 sc->sc_cdata.cf_blocksize /= encblkno[i].d;
1122 (void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
1123 if (!sc->sc_cdata.cf_priv) {
1124 ret = EINVAL; /* XXX is this the right error? */
1125 goto bail;
1126 }
1127 kmem_free(inbuf, MAX_KEYSIZE);
1128
1129 bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
1130
1131 sc->sc_data = kmem_alloc(MAXPHYS, KM_SLEEP);
1132 sc->sc_data_used = false;
1133
1134 /* Attach the disk. */
1135 dk_attach(dksc);
1136 disk_attach(&dksc->sc_dkdev);
1137
1138 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
1139
1140 /* Discover wedges on this disk. */
1141 dkwedge_discover(&dksc->sc_dkdev);
1142
1143 return 0;
1144
1145 bail:
1146 kmem_free(inbuf, MAX_KEYSIZE);
1147 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
1148 return ret;
1149 }
1150
1151 /* ARGSUSED */
1152 static int
1153 cgd_ioctl_clr(struct cgd_softc *sc, struct lwp *l)
1154 {
1155 struct dk_softc *dksc = &sc->sc_dksc;
1156
1157 if (!DK_ATTACHED(dksc))
1158 return ENXIO;
1159
1160 /* Delete all of our wedges. */
1161 dkwedge_delall(&dksc->sc_dkdev);
1162
1163 /* Kill off any queued buffers. */
1164 dk_drain(dksc);
1165 bufq_free(dksc->sc_bufq);
1166
1167 (void)vn_close(sc->sc_tvn, FREAD|FWRITE, l->l_cred);
1168 sc->sc_cfuncs->cf_destroy(sc->sc_cdata.cf_priv);
1169 kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1170 kmem_free(sc->sc_data, MAXPHYS);
1171 sc->sc_data_used = false;
1172 dk_detach(dksc);
1173 disk_detach(&dksc->sc_dkdev);
1174
1175 return 0;
1176 }
1177
1178 static int
1179 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
1180 {
1181 struct cgd_softc *sc;
1182 struct cgd_user *cgu;
1183 int unit, error;
1184
1185 unit = CGDUNIT(dev);
1186 cgu = (struct cgd_user *)data;
1187
1188 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1189 dev, unit, data, l));
1190
1191 /* XXX, we always return this units data, so if cgu_unit is
1192 * not -1, that field doesn't match the rest
1193 */
1194 if (cgu->cgu_unit == -1)
1195 cgu->cgu_unit = unit;
1196
1197 if (cgu->cgu_unit < 0)
1198 return EINVAL; /* XXX: should this be ENXIO? */
1199
1200 error = cgd_lock(false);
1201 if (error)
1202 return error;
1203
1204 sc = device_lookup_private(&cgd_cd, unit);
1205 if (sc == NULL || !DK_ATTACHED(&sc->sc_dksc)) {
1206 cgu->cgu_dev = 0;
1207 cgu->cgu_alg[0] = '\0';
1208 cgu->cgu_blocksize = 0;
1209 cgu->cgu_mode = 0;
1210 cgu->cgu_keylen = 0;
1211 }
1212 else {
1213 mutex_enter(&sc->sc_lock);
1214 cgu->cgu_dev = sc->sc_tdev;
1215 strncpy(cgu->cgu_alg, sc->sc_cfuncs->cf_name,
1216 sizeof(cgu->cgu_alg));
1217 cgu->cgu_blocksize = sc->sc_cdata.cf_blocksize;
1218 cgu->cgu_mode = sc->sc_cdata.cf_mode;
1219 cgu->cgu_keylen = sc->sc_cdata.cf_keylen;
1220 mutex_exit(&sc->sc_lock);
1221 }
1222
1223 cgd_unlock();
1224 return 0;
1225 }
1226
1227 static int
1228 cgdinit(struct cgd_softc *sc, const char *cpath, struct vnode *vp,
1229 struct lwp *l)
1230 {
1231 struct disk_geom *dg;
1232 int ret;
1233 char *tmppath;
1234 uint64_t psize;
1235 unsigned secsize;
1236 struct dk_softc *dksc = &sc->sc_dksc;
1237
1238 sc->sc_tvn = vp;
1239 sc->sc_tpath = NULL;
1240
1241 tmppath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1242 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &sc->sc_tpathlen);
1243 if (ret)
1244 goto bail;
1245 sc->sc_tpath = kmem_alloc(sc->sc_tpathlen, KM_SLEEP);
1246 memcpy(sc->sc_tpath, tmppath, sc->sc_tpathlen);
1247
1248 sc->sc_tdev = vp->v_rdev;
1249
1250 if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1251 goto bail;
1252
1253 if (psize == 0) {
1254 ret = ENODEV;
1255 goto bail;
1256 }
1257
1258 /*
1259 * XXX here we should probe the underlying device. If we
1260 * are accessing a partition of type RAW_PART, then
1261 * we should populate our initial geometry with the
1262 * geometry that we discover from the device.
1263 */
1264 dg = &dksc->sc_dkdev.dk_geom;
1265 memset(dg, 0, sizeof(*dg));
1266 dg->dg_secperunit = psize;
1267 dg->dg_secsize = secsize;
1268 dg->dg_ntracks = 1;
1269 dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1270 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1271
1272 bail:
1273 kmem_free(tmppath, MAXPATHLEN);
1274 if (ret && sc->sc_tpath)
1275 kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1276 return ret;
1277 }
1278
1279 /*
1280 * Our generic cipher entry point. This takes care of the
1281 * IV mode and passes off the work to the specific cipher.
1282 * We implement here the IV method ``encrypted block
1283 * number''.
1284 *
1285 * XXXrcd: for now we rely on our own crypto framework defined
1286 * in dev/cgd_crypto.c. This will change when we
1287 * get a generic kernel crypto framework.
1288 */
1289
1290 static void
1291 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1292 {
1293 int i;
1294
1295 /* Set up the blkno in blkno_buf, here we do not care much
1296 * about the final layout of the information as long as we
1297 * can guarantee that each sector will have a different IV
1298 * and that the endianness of the machine will not affect
1299 * the representation that we have chosen.
1300 *
1301 * We choose this representation, because it does not rely
1302 * on the size of buf (which is the blocksize of the cipher),
1303 * but allows daddr_t to grow without breaking existing
1304 * disks.
1305 *
1306 * Note that blkno2blkno_buf does not take a size as input,
1307 * and hence must be called on a pre-zeroed buffer of length
1308 * greater than or equal to sizeof(daddr_t).
1309 */
1310 for (i=0; i < sizeof(daddr_t); i++) {
1311 *sbuf++ = blkno & 0xff;
1312 blkno >>= 8;
1313 }
1314 }
1315
1316 static struct cpu_info *
1317 cgd_cpu(struct cgd_softc *sc)
1318 {
1319 struct cgd_worker *cw = sc->sc_worker;
1320 struct cpu_info *ci = NULL;
1321 u_int cidx, i;
1322
1323 if (cw->cw_busy == 0) {
1324 cw->cw_last = cpu_index(curcpu());
1325 return NULL;
1326 }
1327
1328 for (i=0, cidx = cw->cw_last+1; i<maxcpus; ++i, ++cidx) {
1329 if (cidx >= maxcpus)
1330 cidx = 0;
1331 ci = cpu_lookup(cidx);
1332 if (ci) {
1333 cw->cw_last = cidx;
1334 break;
1335 }
1336 }
1337
1338 return ci;
1339 }
1340
1341 static void
1342 cgd_enqueue(struct cgd_softc *sc, struct cgd_xfer *cx)
1343 {
1344 struct cgd_worker *cw = sc->sc_worker;
1345 struct cpu_info *ci;
1346
1347 mutex_enter(&cw->cw_lock);
1348 ci = cgd_cpu(sc);
1349 cw->cw_busy++;
1350 mutex_exit(&cw->cw_lock);
1351
1352 workqueue_enqueue(cw->cw_wq, &cx->cx_work, ci);
1353 }
1354
1355 static void
1356 cgd_process(struct work *wk, void *arg)
1357 {
1358 struct cgd_xfer *cx = (struct cgd_xfer *)wk;
1359 struct cgd_softc *sc = cx->cx_sc;
1360 struct cgd_worker *cw = sc->sc_worker;
1361
1362 cgd_cipher(sc, cx->cx_dstv, cx->cx_srcv, cx->cx_len,
1363 cx->cx_blkno, cx->cx_secsize, cx->cx_dir);
1364
1365 if (cx->cx_dir == CGD_CIPHER_ENCRYPT) {
1366 cgd_diskstart2(sc, cx);
1367 } else {
1368 cgd_iodone2(sc, cx);
1369 }
1370
1371 mutex_enter(&cw->cw_lock);
1372 if (cw->cw_busy > 0)
1373 cw->cw_busy--;
1374 mutex_exit(&cw->cw_lock);
1375 }
1376
1377 static void
1378 cgd_cipher(struct cgd_softc *sc, void *dstv, void *srcv,
1379 size_t len, daddr_t blkno, size_t secsize, int dir)
1380 {
1381 char *dst = dstv;
1382 char *src = srcv;
1383 cfunc_cipher_prep *ciprep = sc->sc_cfuncs->cf_cipher_prep;
1384 cfunc_cipher *cipher = sc->sc_cfuncs->cf_cipher;
1385 struct uio dstuio;
1386 struct uio srcuio;
1387 struct iovec dstiov[2];
1388 struct iovec srciov[2];
1389 size_t blocksize = sc->sc_cdata.cf_blocksize;
1390 size_t todo;
1391 char blkno_buf[CGD_MAXBLOCKSIZE], *iv;
1392
1393 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1394
1395 KASSERTMSG(len % blocksize == 0,
1396 "cgd_cipher: len %% blocksize != 0");
1397
1398 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1399 KASSERTMSG(sizeof(daddr_t) <= blocksize,
1400 "cgd_cipher: sizeof(daddr_t) > blocksize");
1401
1402 KASSERTMSG(blocksize <= CGD_MAXBLOCKSIZE,
1403 "cgd_cipher: blocksize > CGD_MAXBLOCKSIZE");
1404
1405 dstuio.uio_iov = dstiov;
1406 dstuio.uio_iovcnt = 1;
1407
1408 srcuio.uio_iov = srciov;
1409 srcuio.uio_iovcnt = 1;
1410
1411 for (; len > 0; len -= todo) {
1412 todo = MIN(len, secsize);
1413
1414 dstiov[0].iov_base = dst;
1415 srciov[0].iov_base = src;
1416 dstiov[0].iov_len = todo;
1417 srciov[0].iov_len = todo;
1418
1419 memset(blkno_buf, 0x0, blocksize);
1420 blkno2blkno_buf(blkno_buf, blkno);
1421 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1422 blkno_buf, blocksize));
1423
1424 /*
1425 * Compute an initial IV. All ciphers
1426 * can convert blkno_buf in-place.
1427 */
1428 iv = blkno_buf;
1429 ciprep(sc->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir);
1430 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize));
1431
1432 cipher(sc->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir);
1433
1434 dst += todo;
1435 src += todo;
1436 blkno++;
1437 }
1438 }
1439
1440 #ifdef DEBUG
1441 static void
1442 hexprint(const char *start, void *buf, int len)
1443 {
1444 char *c = buf;
1445
1446 KASSERTMSG(len >= 0, "hexprint: called with len < 0");
1447 printf("%s: len=%06d 0x", start, len);
1448 while (len--)
1449 printf("%02x", (unsigned char) *c++);
1450 }
1451 #endif
1452
1453 static void
1454 selftest(void)
1455 {
1456 struct cgd_softc sc;
1457 void *buf;
1458
1459 printf("running cgd selftest ");
1460
1461 for (size_t i = 0; i < __arraycount(selftests); i++) {
1462 const char *alg = selftests[i].alg;
1463 const uint8_t *key = selftests[i].key;
1464 int keylen = selftests[i].keylen;
1465 int txtlen = selftests[i].txtlen;
1466
1467 printf("%s-%d ", alg, keylen);
1468
1469 memset(&sc, 0, sizeof(sc));
1470
1471 sc.sc_cfuncs = cryptfuncs_find(alg);
1472 if (sc.sc_cfuncs == NULL)
1473 panic("%s not implemented", alg);
1474
1475 sc.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1476 sc.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1477 sc.sc_cdata.cf_keylen = keylen;
1478
1479 sc.sc_cdata.cf_priv = sc.sc_cfuncs->cf_init(keylen,
1480 key, &sc.sc_cdata.cf_blocksize);
1481 if (sc.sc_cdata.cf_priv == NULL)
1482 panic("cf_priv is NULL");
1483 if (sc.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1484 panic("bad block size %zu", sc.sc_cdata.cf_blocksize);
1485
1486 sc.sc_cdata.cf_blocksize /= 8;
1487
1488 buf = kmem_alloc(txtlen, KM_SLEEP);
1489 memcpy(buf, selftests[i].ptxt, txtlen);
1490
1491 cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1492 selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1493 if (memcmp(buf, selftests[i].ctxt, txtlen) != 0)
1494 panic("encryption is broken");
1495
1496 cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1497 selftests[i].secsize, CGD_CIPHER_DECRYPT);
1498 if (memcmp(buf, selftests[i].ptxt, txtlen) != 0)
1499 panic("decryption is broken");
1500
1501 kmem_free(buf, txtlen);
1502 sc.sc_cfuncs->cf_destroy(sc.sc_cdata.cf_priv);
1503 }
1504
1505 printf("done\n");
1506 }
1507
1508 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
1509
1510 #ifdef _MODULE
1511 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1512
1513 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1514 #endif
1515
1516 static int
1517 cgd_modcmd(modcmd_t cmd, void *arg)
1518 {
1519 int error = 0;
1520
1521 switch (cmd) {
1522 case MODULE_CMD_INIT:
1523 selftest();
1524 #ifdef _MODULE
1525 mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
1526 cv_init(&cgd_spawning_cv, "cgspwn");
1527
1528 error = config_cfdriver_attach(&cgd_cd);
1529 if (error)
1530 break;
1531
1532 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1533 if (error) {
1534 config_cfdriver_detach(&cgd_cd);
1535 aprint_error("%s: unable to register cfattach for"
1536 "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1537 break;
1538 }
1539 /*
1540 * Attach the {b,c}devsw's
1541 */
1542 error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1543 &cgd_cdevsw, &cgd_cmajor);
1544
1545 /*
1546 * If devsw_attach fails, remove from autoconf database
1547 */
1548 if (error) {
1549 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1550 config_cfdriver_detach(&cgd_cd);
1551 aprint_error("%s: unable to attach %s devsw, "
1552 "error %d", __func__, cgd_cd.cd_name, error);
1553 break;
1554 }
1555 #endif
1556 break;
1557
1558 case MODULE_CMD_FINI:
1559 #ifdef _MODULE
1560 /*
1561 * Remove {b,c}devsw's
1562 */
1563 devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1564
1565 /*
1566 * Now remove device from autoconf database
1567 */
1568 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1569 if (error) {
1570 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1571 &cgd_cdevsw, &cgd_cmajor);
1572 aprint_error("%s: failed to detach %s cfattach, "
1573 "error %d\n", __func__, cgd_cd.cd_name, error);
1574 break;
1575 }
1576 error = config_cfdriver_detach(&cgd_cd);
1577 if (error) {
1578 (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1579 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1580 &cgd_cdevsw, &cgd_cmajor);
1581 aprint_error("%s: failed to detach %s cfdriver, "
1582 "error %d\n", __func__, cgd_cd.cd_name, error);
1583 break;
1584 }
1585
1586 cv_destroy(&cgd_spawning_cv);
1587 mutex_destroy(&cgd_spawning_mtx);
1588 #endif
1589 break;
1590
1591 case MODULE_CMD_STAT:
1592 error = ENOTTY;
1593 break;
1594 default:
1595 error = ENOTTY;
1596 break;
1597 }
1598
1599 return error;
1600 }
1601