cgd.c revision 1.122 1 /* $NetBSD: cgd.c,v 1.122 2020/03/09 08:33:15 mlelstv Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.122 2020/03/09 08:33:15 mlelstv Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/kmem.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/pool.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/disk.h>
49 #include <sys/disklabel.h>
50 #include <sys/fcntl.h>
51 #include <sys/namei.h> /* for pathbuf */
52 #include <sys/vnode.h>
53 #include <sys/conf.h>
54 #include <sys/syslog.h>
55 #include <sys/workqueue.h>
56 #include <sys/cpu.h>
57
58 #include <dev/dkvar.h>
59 #include <dev/cgdvar.h>
60
61 #include <miscfs/specfs/specdev.h> /* for v_rdev */
62
63 #include "ioconf.h"
64
65 struct selftest_params {
66 const char *alg;
67 int blocksize; /* number of bytes */
68 int secsize;
69 daddr_t blkno;
70 int keylen; /* number of bits */
71 int txtlen; /* number of bytes */
72 const uint8_t *key;
73 const uint8_t *ptxt;
74 const uint8_t *ctxt;
75 };
76
77 /* Entry Point Functions */
78
79 static dev_type_open(cgdopen);
80 static dev_type_close(cgdclose);
81 static dev_type_read(cgdread);
82 static dev_type_write(cgdwrite);
83 static dev_type_ioctl(cgdioctl);
84 static dev_type_strategy(cgdstrategy);
85 static dev_type_dump(cgddump);
86 static dev_type_size(cgdsize);
87
88 const struct bdevsw cgd_bdevsw = {
89 .d_open = cgdopen,
90 .d_close = cgdclose,
91 .d_strategy = cgdstrategy,
92 .d_ioctl = cgdioctl,
93 .d_dump = cgddump,
94 .d_psize = cgdsize,
95 .d_discard = nodiscard,
96 .d_flag = D_DISK | D_MPSAFE
97 };
98
99 const struct cdevsw cgd_cdevsw = {
100 .d_open = cgdopen,
101 .d_close = cgdclose,
102 .d_read = cgdread,
103 .d_write = cgdwrite,
104 .d_ioctl = cgdioctl,
105 .d_stop = nostop,
106 .d_tty = notty,
107 .d_poll = nopoll,
108 .d_mmap = nommap,
109 .d_kqfilter = nokqfilter,
110 .d_discard = nodiscard,
111 .d_flag = D_DISK | D_MPSAFE
112 };
113
114 /*
115 * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
116 */
117 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
118 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
119 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
120 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
121 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
122 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
123 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
124 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
125 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
126 };
127
128 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
129 0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
130 0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
131 0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
132 0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
133 0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
134 0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
135 0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
136 0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
137 };
138
139 static const uint8_t selftest_aes_xts_256_key[33] = {
140 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
141 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
142 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
143 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
144 0
145 };
146
147 /*
148 * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
149 */
150 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
151 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
152 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
153 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
154 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
155 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
156 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
157 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
158 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
159 };
160
161 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
162 0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
163 0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
164 0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
165 0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
166 0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
167 0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
168 0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
169 0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
170 };
171
172 static const uint8_t selftest_aes_xts_512_key[65] = {
173 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
174 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
175 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
176 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
177 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
178 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
179 0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
180 0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
181 0
182 };
183
184 const struct selftest_params selftests[] = {
185 {
186 .alg = "aes-xts",
187 .blocksize = 16,
188 .secsize = 512,
189 .blkno = 1,
190 .keylen = 256,
191 .txtlen = sizeof(selftest_aes_xts_256_ptxt),
192 .key = selftest_aes_xts_256_key,
193 .ptxt = selftest_aes_xts_256_ptxt,
194 .ctxt = selftest_aes_xts_256_ctxt
195 },
196 {
197 .alg = "aes-xts",
198 .blocksize = 16,
199 .secsize = 512,
200 .blkno = 0xffff,
201 .keylen = 512,
202 .txtlen = sizeof(selftest_aes_xts_512_ptxt),
203 .key = selftest_aes_xts_512_key,
204 .ptxt = selftest_aes_xts_512_ptxt,
205 .ctxt = selftest_aes_xts_512_ctxt
206 }
207 };
208
209 static int cgd_match(device_t, cfdata_t, void *);
210 static void cgd_attach(device_t, device_t, void *);
211 static int cgd_detach(device_t, int);
212 static struct cgd_softc *cgd_spawn(int);
213 static struct cgd_worker *cgd_create_one_worker(void);
214 static void cgd_destroy_one_worker(struct cgd_worker *);
215 static struct cgd_worker *cgd_create_worker(void);
216 static void cgd_destroy_worker(struct cgd_worker *);
217 static int cgd_destroy(device_t);
218
219 /* Internal Functions */
220
221 static int cgd_diskstart(device_t, struct buf *);
222 static void cgd_diskstart2(struct cgd_softc *, struct cgd_xfer *);
223 static void cgdiodone(struct buf *);
224 static void cgd_iodone2(struct cgd_softc *, struct cgd_xfer *);
225 static void cgd_enqueue(struct cgd_softc *, struct cgd_xfer *);
226 static void cgd_process(struct work *, void *);
227 static int cgd_dumpblocks(device_t, void *, daddr_t, int);
228
229 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
230 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
231 static int cgd_ioctl_get(dev_t, void *, struct lwp *);
232 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
233 struct lwp *);
234 static void cgd_cipher(struct cgd_softc *, void *, void *,
235 size_t, daddr_t, size_t, int);
236
237 static struct dkdriver cgddkdriver = {
238 .d_minphys = minphys,
239 .d_open = cgdopen,
240 .d_close = cgdclose,
241 .d_strategy = cgdstrategy,
242 .d_iosize = NULL,
243 .d_diskstart = cgd_diskstart,
244 .d_dumpblocks = cgd_dumpblocks,
245 .d_lastclose = NULL
246 };
247
248 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
249 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
250
251 /* DIAGNOSTIC and DEBUG definitions */
252
253 #if defined(CGDDEBUG) && !defined(DEBUG)
254 #define DEBUG
255 #endif
256
257 #ifdef DEBUG
258 int cgddebug = 0;
259
260 #define CGDB_FOLLOW 0x1
261 #define CGDB_IO 0x2
262 #define CGDB_CRYPTO 0x4
263
264 #define IFDEBUG(x,y) if (cgddebug & (x)) y
265 #define DPRINTF(x,y) IFDEBUG(x, printf y)
266 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
267
268 static void hexprint(const char *, void *, int);
269
270 #else
271 #define IFDEBUG(x,y)
272 #define DPRINTF(x,y)
273 #define DPRINTF_FOLLOW(y)
274 #endif
275
276 /* Global variables */
277
278 static kmutex_t cgd_spawning_mtx;
279 static kcondvar_t cgd_spawning_cv;
280 static bool cgd_spawning;
281 static struct cgd_worker *cgd_worker;
282 static u_int cgd_refcnt; /* number of users of cgd_worker */
283
284 /* Utility Functions */
285
286 #define CGDUNIT(x) DISKUNIT(x)
287
288 /* The code */
289
290 static int
291 cgd_lock(bool intr)
292 {
293 int error = 0;
294
295 mutex_enter(&cgd_spawning_mtx);
296 while (cgd_spawning) {
297 if (intr)
298 error = cv_wait_sig(&cgd_spawning_cv, &cgd_spawning_mtx);
299 else
300 cv_wait(&cgd_spawning_cv, &cgd_spawning_mtx);
301 }
302 if (error == 0)
303 cgd_spawning = true;
304 mutex_exit(&cgd_spawning_mtx);
305 return error;
306 }
307
308 static void
309 cgd_unlock(void)
310 {
311 mutex_enter(&cgd_spawning_mtx);
312 cgd_spawning = false;
313 cv_broadcast(&cgd_spawning_cv);
314 mutex_exit(&cgd_spawning_mtx);
315 }
316
317 static struct cgd_softc *
318 getcgd_softc(dev_t dev)
319 {
320 return device_lookup_private(&cgd_cd, CGDUNIT(dev));
321 }
322
323 static int
324 cgd_match(device_t self, cfdata_t cfdata, void *aux)
325 {
326
327 return 1;
328 }
329
330 static void
331 cgd_attach(device_t parent, device_t self, void *aux)
332 {
333 struct cgd_softc *sc = device_private(self);
334
335 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
336 cv_init(&sc->sc_cv, "cgdcv");
337 dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
338 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
339
340 if (!pmf_device_register(self, NULL, NULL))
341 aprint_error_dev(self,
342 "unable to register power management hooks\n");
343 }
344
345
346 static int
347 cgd_detach(device_t self, int flags)
348 {
349 int ret;
350 const int pmask = 1 << RAW_PART;
351 struct cgd_softc *sc = device_private(self);
352 struct dk_softc *dksc = &sc->sc_dksc;
353
354 if (DK_BUSY(dksc, pmask))
355 return EBUSY;
356
357 if (DK_ATTACHED(dksc) &&
358 (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
359 return ret;
360
361 disk_destroy(&dksc->sc_dkdev);
362 cv_destroy(&sc->sc_cv);
363 mutex_destroy(&sc->sc_lock);
364
365 return 0;
366 }
367
368 void
369 cgdattach(int num)
370 {
371 #ifndef _MODULE
372 int error;
373
374 mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
375 cv_init(&cgd_spawning_cv, "cgspwn");
376
377 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
378 if (error != 0)
379 aprint_error("%s: unable to register cfattach\n",
380 cgd_cd.cd_name);
381 #endif
382 }
383
384 static struct cgd_softc *
385 cgd_spawn(int unit)
386 {
387 cfdata_t cf;
388 struct cgd_worker *cw;
389 struct cgd_softc *sc;
390
391 cf = kmem_alloc(sizeof(*cf), KM_SLEEP);
392 cf->cf_name = cgd_cd.cd_name;
393 cf->cf_atname = cgd_cd.cd_name;
394 cf->cf_unit = unit;
395 cf->cf_fstate = FSTATE_STAR;
396
397 cw = cgd_create_one_worker();
398 if (cw == NULL) {
399 kmem_free(cf, sizeof(*cf));
400 return NULL;
401 }
402
403 sc = device_private(config_attach_pseudo(cf));
404 if (sc == NULL) {
405 cgd_destroy_one_worker(cw);
406 return NULL;
407 }
408
409 sc->sc_worker = cw;
410
411 return sc;
412 }
413
414 static int
415 cgd_destroy(device_t dev)
416 {
417 struct cgd_softc *sc = device_private(dev);
418 struct cgd_worker *cw = sc->sc_worker;
419 cfdata_t cf;
420 int error;
421
422 cf = device_cfdata(dev);
423 error = config_detach(dev, DETACH_QUIET);
424 if (error)
425 return error;
426
427 cgd_destroy_one_worker(cw);
428
429 kmem_free(cf, sizeof(*cf));
430 return 0;
431 }
432
433 static void
434 cgd_busy(struct cgd_softc *sc)
435 {
436
437 mutex_enter(&sc->sc_lock);
438 while (sc->sc_busy)
439 cv_wait(&sc->sc_cv, &sc->sc_lock);
440 sc->sc_busy = true;
441 mutex_exit(&sc->sc_lock);
442 }
443
444 static void
445 cgd_unbusy(struct cgd_softc *sc)
446 {
447
448 mutex_enter(&sc->sc_lock);
449 sc->sc_busy = false;
450 cv_broadcast(&sc->sc_cv);
451 mutex_exit(&sc->sc_lock);
452 }
453
454 static struct cgd_worker *
455 cgd_create_one_worker(void)
456 {
457 KASSERT(cgd_spawning);
458
459 if (cgd_refcnt++ == 0) {
460 KASSERT(cgd_worker == NULL);
461 cgd_worker = cgd_create_worker();
462 }
463
464 KASSERT(cgd_worker != NULL);
465 return cgd_worker;
466 }
467
468 static void
469 cgd_destroy_one_worker(struct cgd_worker *cw)
470 {
471 KASSERT(cgd_spawning);
472 KASSERT(cw == cgd_worker);
473
474 if (--cgd_refcnt == 0) {
475 cgd_destroy_worker(cgd_worker);
476 cgd_worker = NULL;
477 }
478 }
479
480 static struct cgd_worker *
481 cgd_create_worker(void)
482 {
483 struct cgd_worker *cw;
484 struct workqueue *wq;
485 struct pool *cp;
486 int error;
487
488 cw = kmem_alloc(sizeof(struct cgd_worker), KM_SLEEP);
489 cp = kmem_alloc(sizeof(struct pool), KM_SLEEP);
490
491 error = workqueue_create(&wq, "cgd", cgd_process, NULL,
492 PRI_BIO, IPL_BIO, WQ_MPSAFE | WQ_PERCPU);
493 if (error) {
494 kmem_free(cp, sizeof(struct pool));
495 kmem_free(cw, sizeof(struct cgd_worker));
496 return NULL;
497 }
498
499 cw->cw_cpool = cp;
500 cw->cw_wq = wq;
501 pool_init(cw->cw_cpool, sizeof(struct cgd_xfer), 0,
502 0, 0, "cgdcpl", NULL, IPL_BIO);
503
504 mutex_init(&cw->cw_lock, MUTEX_DEFAULT, IPL_BIO);
505
506 return cw;
507 }
508
509 static void
510 cgd_destroy_worker(struct cgd_worker *cw)
511 {
512 mutex_destroy(&cw->cw_lock);
513
514 if (cw->cw_cpool) {
515 pool_destroy(cw->cw_cpool);
516 kmem_free(cw->cw_cpool, sizeof(struct pool));
517 }
518 if (cw->cw_wq)
519 workqueue_destroy(cw->cw_wq);
520
521 kmem_free(cw, sizeof(struct cgd_worker));
522 }
523
524 static int
525 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
526 {
527 struct cgd_softc *sc;
528 int error;
529
530 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
531
532 error = cgd_lock(true);
533 if (error)
534 return error;
535 sc = getcgd_softc(dev);
536 if (sc == NULL)
537 sc = cgd_spawn(CGDUNIT(dev));
538 cgd_unlock();
539 if (sc == NULL)
540 return ENXIO;
541
542 return dk_open(&sc->sc_dksc, dev, flags, fmt, l);
543 }
544
545 static int
546 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
547 {
548 struct cgd_softc *sc;
549 struct dk_softc *dksc;
550 int error;
551
552 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
553
554 error = cgd_lock(false);
555 if (error)
556 return error;
557 sc = getcgd_softc(dev);
558 if (sc == NULL) {
559 error = ENXIO;
560 goto done;
561 }
562
563 dksc = &sc->sc_dksc;
564 if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0)
565 goto done;
566
567 if (!DK_ATTACHED(dksc)) {
568 if ((error = cgd_destroy(sc->sc_dksc.sc_dev)) != 0) {
569 device_printf(dksc->sc_dev,
570 "unable to detach instance\n");
571 goto done;
572 }
573 }
574
575 done:
576 cgd_unlock();
577
578 return error;
579 }
580
581 static void
582 cgdstrategy(struct buf *bp)
583 {
584 struct cgd_softc *sc = getcgd_softc(bp->b_dev);
585
586 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
587 (long)bp->b_bcount));
588
589 /*
590 * Reject unaligned writes.
591 */
592 if (((uintptr_t)bp->b_data & 3) != 0) {
593 bp->b_error = EINVAL;
594 goto bail;
595 }
596
597 dk_strategy(&sc->sc_dksc, bp);
598 return;
599
600 bail:
601 bp->b_resid = bp->b_bcount;
602 biodone(bp);
603 return;
604 }
605
606 static int
607 cgdsize(dev_t dev)
608 {
609 struct cgd_softc *sc = getcgd_softc(dev);
610
611 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
612 if (!sc)
613 return -1;
614 return dk_size(&sc->sc_dksc, dev);
615 }
616
617 /*
618 * cgd_{get,put}data are functions that deal with getting a buffer
619 * for the new encrypted data.
620 * We can no longer have a buffer per device, we need a buffer per
621 * work queue...
622 */
623
624 static void *
625 cgd_getdata(struct cgd_softc *sc, unsigned long size)
626 {
627 void *data = NULL;
628
629 mutex_enter(&sc->sc_lock);
630 if (!sc->sc_data_used) {
631 sc->sc_data_used = true;
632 data = sc->sc_data;
633 }
634 mutex_exit(&sc->sc_lock);
635
636 if (data)
637 return data;
638
639 return malloc(size, M_DEVBUF, M_WAITOK);
640 }
641
642 static void
643 cgd_putdata(struct cgd_softc *sc, void *data)
644 {
645
646 if (data == sc->sc_data) {
647 mutex_enter(&sc->sc_lock);
648 sc->sc_data_used = false;
649 mutex_exit(&sc->sc_lock);
650 } else
651 free(data, M_DEVBUF);
652 }
653
654 static int
655 cgd_diskstart(device_t dev, struct buf *bp)
656 {
657 struct cgd_softc *sc = device_private(dev);
658 struct cgd_worker *cw = sc->sc_worker;
659 struct dk_softc *dksc = &sc->sc_dksc;
660 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
661 struct cgd_xfer *cx;
662 struct buf *nbp;
663 void * newaddr;
664 daddr_t bn;
665
666 DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
667
668 bn = bp->b_rawblkno;
669
670 /*
671 * We attempt to allocate all of our resources up front, so that
672 * we can fail quickly if they are unavailable.
673 */
674 nbp = getiobuf(sc->sc_tvn, false);
675 if (nbp == NULL)
676 return EAGAIN;
677
678 cx = pool_get(cw->cw_cpool, PR_NOWAIT);
679 if (cx == NULL) {
680 putiobuf(nbp);
681 return EAGAIN;
682 }
683
684 cx->cx_sc = sc;
685 cx->cx_obp = bp;
686 cx->cx_nbp = nbp;
687 cx->cx_srcv = cx->cx_dstv = bp->b_data;
688 cx->cx_blkno = bn;
689 cx->cx_secsize = dg->dg_secsize;
690
691 /*
692 * If we are writing, then we need to encrypt the outgoing
693 * block into a new block of memory.
694 */
695 if ((bp->b_flags & B_READ) == 0) {
696 newaddr = cgd_getdata(sc, bp->b_bcount);
697 if (!newaddr) {
698 pool_put(cw->cw_cpool, cx);
699 putiobuf(nbp);
700 return EAGAIN;
701 }
702
703 cx->cx_dstv = newaddr;
704 cx->cx_len = bp->b_bcount;
705 cx->cx_dir = CGD_CIPHER_ENCRYPT;
706
707 cgd_enqueue(sc, cx);
708 return 0;
709 }
710
711 cgd_diskstart2(sc, cx);
712 return 0;
713 }
714
715 static void
716 cgd_diskstart2(struct cgd_softc *sc, struct cgd_xfer *cx)
717 {
718 struct vnode *vp;
719 struct buf *bp;
720 struct buf *nbp;
721
722 bp = cx->cx_obp;
723 nbp = cx->cx_nbp;
724
725 nbp->b_data = cx->cx_dstv;
726 nbp->b_flags = bp->b_flags;
727 nbp->b_oflags = bp->b_oflags;
728 nbp->b_cflags = bp->b_cflags;
729 nbp->b_iodone = cgdiodone;
730 nbp->b_proc = bp->b_proc;
731 nbp->b_blkno = btodb(cx->cx_blkno * cx->cx_secsize);
732 nbp->b_bcount = bp->b_bcount;
733 nbp->b_private = cx;
734
735 BIO_COPYPRIO(nbp, bp);
736
737 if ((nbp->b_flags & B_READ) == 0) {
738 vp = nbp->b_vp;
739 mutex_enter(vp->v_interlock);
740 vp->v_numoutput++;
741 mutex_exit(vp->v_interlock);
742 }
743 VOP_STRATEGY(sc->sc_tvn, nbp);
744 }
745
746 static void
747 cgdiodone(struct buf *nbp)
748 {
749 struct cgd_xfer *cx = nbp->b_private;
750 struct buf *obp = cx->cx_obp;
751 struct cgd_softc *sc = getcgd_softc(obp->b_dev);
752 struct dk_softc *dksc = &sc->sc_dksc;
753 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
754 daddr_t bn;
755
756 KDASSERT(sc);
757
758 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
759 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
760 obp, obp->b_bcount, obp->b_resid));
761 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
762 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
763 nbp->b_bcount));
764 if (nbp->b_error != 0) {
765 obp->b_error = nbp->b_error;
766 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
767 obp->b_error));
768 }
769
770 /* Perform the decryption if we are reading.
771 *
772 * Note: use the blocknumber from nbp, since it is what
773 * we used to encrypt the blocks.
774 */
775
776 if (nbp->b_flags & B_READ) {
777 bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
778
779 cx->cx_obp = obp;
780 cx->cx_nbp = nbp;
781 cx->cx_dstv = obp->b_data;
782 cx->cx_srcv = obp->b_data;
783 cx->cx_len = obp->b_bcount;
784 cx->cx_blkno = bn;
785 cx->cx_secsize = dg->dg_secsize;
786 cx->cx_dir = CGD_CIPHER_DECRYPT;
787
788 cgd_enqueue(sc, cx);
789 return;
790 }
791
792 cgd_iodone2(sc, cx);
793 }
794
795 static void
796 cgd_iodone2(struct cgd_softc *sc, struct cgd_xfer *cx)
797 {
798 struct cgd_worker *cw = sc->sc_worker;
799 struct buf *obp = cx->cx_obp;
800 struct buf *nbp = cx->cx_nbp;
801 struct dk_softc *dksc = &sc->sc_dksc;
802
803 pool_put(cw->cw_cpool, cx);
804
805 /* If we allocated memory, free it now... */
806 if (nbp->b_data != obp->b_data)
807 cgd_putdata(sc, nbp->b_data);
808
809 putiobuf(nbp);
810
811 /* Request is complete for whatever reason */
812 obp->b_resid = 0;
813 if (obp->b_error != 0)
814 obp->b_resid = obp->b_bcount;
815
816 dk_done(dksc, obp);
817 dk_start(dksc, NULL);
818 }
819
820 static int
821 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
822 {
823 struct cgd_softc *sc = device_private(dev);
824 struct dk_softc *dksc = &sc->sc_dksc;
825 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
826 size_t nbytes, blksize;
827 void *buf;
828 int error;
829
830 /*
831 * dk_dump gives us units of disklabel sectors. Everything
832 * else in cgd uses units of diskgeom sectors. These had
833 * better agree; otherwise we need to figure out how to convert
834 * between them.
835 */
836 KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
837 "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
838 dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
839 blksize = dg->dg_secsize;
840
841 /*
842 * Compute the number of bytes in this request, which dk_dump
843 * has `helpfully' converted to a number of blocks for us.
844 */
845 nbytes = nblk*blksize;
846
847 /* Try to acquire a buffer to store the ciphertext. */
848 buf = cgd_getdata(sc, nbytes);
849 if (buf == NULL)
850 /* Out of memory: give up. */
851 return ENOMEM;
852
853 /* Encrypt the caller's data into the temporary buffer. */
854 cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
855
856 /* Pass it on to the underlying disk device. */
857 error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
858
859 /* Release the buffer. */
860 cgd_putdata(sc, buf);
861
862 /* Return any error from the underlying disk device. */
863 return error;
864 }
865
866 /* XXX: we should probably put these into dksubr.c, mostly */
867 static int
868 cgdread(dev_t dev, struct uio *uio, int flags)
869 {
870 struct cgd_softc *sc;
871 struct dk_softc *dksc;
872
873 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
874 (unsigned long long)dev, uio, flags));
875 sc = getcgd_softc(dev);
876 if (sc == NULL)
877 return ENXIO;
878 dksc = &sc->sc_dksc;
879 if (!DK_ATTACHED(dksc))
880 return ENXIO;
881 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
882 }
883
884 /* XXX: we should probably put these into dksubr.c, mostly */
885 static int
886 cgdwrite(dev_t dev, struct uio *uio, int flags)
887 {
888 struct cgd_softc *sc;
889 struct dk_softc *dksc;
890
891 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
892 sc = getcgd_softc(dev);
893 if (sc == NULL)
894 return ENXIO;
895 dksc = &sc->sc_dksc;
896 if (!DK_ATTACHED(dksc))
897 return ENXIO;
898 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
899 }
900
901 static int
902 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
903 {
904 struct cgd_softc *sc;
905 struct dk_softc *dksc;
906 int part = DISKPART(dev);
907 int pmask = 1 << part;
908 int error;
909
910 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
911 dev, cmd, data, flag, l));
912
913 switch (cmd) {
914 case CGDIOCGET:
915 return cgd_ioctl_get(dev, data, l);
916 case CGDIOCSET:
917 case CGDIOCCLR:
918 if ((flag & FWRITE) == 0)
919 return EBADF;
920 /* FALLTHROUGH */
921 default:
922 sc = getcgd_softc(dev);
923 if (sc == NULL)
924 return ENXIO;
925 dksc = &sc->sc_dksc;
926 break;
927 }
928
929 switch (cmd) {
930 case CGDIOCSET:
931 cgd_busy(sc);
932 if (DK_ATTACHED(dksc))
933 error = EBUSY;
934 else
935 error = cgd_ioctl_set(sc, data, l);
936 cgd_unbusy(sc);
937 break;
938 case CGDIOCCLR:
939 cgd_busy(sc);
940 if (DK_BUSY(&sc->sc_dksc, pmask))
941 error = EBUSY;
942 else
943 error = cgd_ioctl_clr(sc, l);
944 cgd_unbusy(sc);
945 break;
946 case DIOCGCACHE:
947 case DIOCCACHESYNC:
948 cgd_busy(sc);
949 if (!DK_ATTACHED(dksc)) {
950 cgd_unbusy(sc);
951 error = ENOENT;
952 break;
953 }
954 /*
955 * We pass this call down to the underlying disk.
956 */
957 error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
958 cgd_unbusy(sc);
959 break;
960 case DIOCGSECTORALIGN: {
961 struct disk_sectoralign *dsa = data;
962
963 cgd_busy(sc);
964 if (!DK_ATTACHED(dksc)) {
965 cgd_unbusy(sc);
966 error = ENOENT;
967 break;
968 }
969
970 /* Get the underlying disk's sector alignment. */
971 error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
972 if (error) {
973 cgd_unbusy(sc);
974 break;
975 }
976
977 /* Adjust for the disklabel partition if necessary. */
978 if (part != RAW_PART) {
979 struct disklabel *lp = dksc->sc_dkdev.dk_label;
980 daddr_t offset = lp->d_partitions[part].p_offset;
981 uint32_t r = offset % dsa->dsa_alignment;
982
983 if (r < dsa->dsa_firstaligned)
984 dsa->dsa_firstaligned = dsa->dsa_firstaligned
985 - r;
986 else
987 dsa->dsa_firstaligned = (dsa->dsa_firstaligned
988 + dsa->dsa_alignment) - r;
989 }
990 cgd_unbusy(sc);
991 break;
992 }
993 case DIOCGSTRATEGY:
994 case DIOCSSTRATEGY:
995 if (!DK_ATTACHED(dksc)) {
996 error = ENOENT;
997 break;
998 }
999 /*FALLTHROUGH*/
1000 default:
1001 error = dk_ioctl(dksc, dev, cmd, data, flag, l);
1002 break;
1003 case CGDIOCGET:
1004 KASSERT(0);
1005 error = EINVAL;
1006 }
1007
1008 return error;
1009 }
1010
1011 static int
1012 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1013 {
1014 struct cgd_softc *sc;
1015
1016 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
1017 dev, blkno, va, (unsigned long)size));
1018 sc = getcgd_softc(dev);
1019 if (sc == NULL)
1020 return ENXIO;
1021 return dk_dump(&sc->sc_dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
1022 }
1023
1024 /*
1025 * XXXrcd:
1026 * for now we hardcode the maximum key length.
1027 */
1028 #define MAX_KEYSIZE 1024
1029
1030 static const struct {
1031 const char *n;
1032 int v;
1033 int d;
1034 } encblkno[] = {
1035 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1036 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1037 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
1038 };
1039
1040 /* ARGSUSED */
1041 static int
1042 cgd_ioctl_set(struct cgd_softc *sc, void *data, struct lwp *l)
1043 {
1044 struct cgd_ioctl *ci = data;
1045 struct vnode *vp;
1046 int ret;
1047 size_t i;
1048 size_t keybytes; /* key length in bytes */
1049 const char *cp;
1050 struct pathbuf *pb;
1051 char *inbuf;
1052 struct dk_softc *dksc = &sc->sc_dksc;
1053
1054 cp = ci->ci_disk;
1055
1056 ret = pathbuf_copyin(ci->ci_disk, &pb);
1057 if (ret != 0) {
1058 return ret;
1059 }
1060 ret = vn_bdev_openpath(pb, &vp, l);
1061 pathbuf_destroy(pb);
1062 if (ret != 0) {
1063 return ret;
1064 }
1065
1066 inbuf = kmem_alloc(MAX_KEYSIZE, KM_SLEEP);
1067
1068 if ((ret = cgdinit(sc, cp, vp, l)) != 0)
1069 goto bail;
1070
1071 (void)memset(inbuf, 0, MAX_KEYSIZE);
1072 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
1073 if (ret)
1074 goto bail;
1075 sc->sc_cfuncs = cryptfuncs_find(inbuf);
1076 if (!sc->sc_cfuncs) {
1077 ret = EINVAL;
1078 goto bail;
1079 }
1080
1081 (void)memset(inbuf, 0, MAX_KEYSIZE);
1082 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
1083 if (ret)
1084 goto bail;
1085
1086 for (i = 0; i < __arraycount(encblkno); i++)
1087 if (strcmp(encblkno[i].n, inbuf) == 0)
1088 break;
1089
1090 if (i == __arraycount(encblkno)) {
1091 ret = EINVAL;
1092 goto bail;
1093 }
1094
1095 keybytes = ci->ci_keylen / 8 + 1;
1096 if (keybytes > MAX_KEYSIZE) {
1097 ret = EINVAL;
1098 goto bail;
1099 }
1100
1101 (void)memset(inbuf, 0, MAX_KEYSIZE);
1102 ret = copyin(ci->ci_key, inbuf, keybytes);
1103 if (ret)
1104 goto bail;
1105
1106 sc->sc_cdata.cf_blocksize = ci->ci_blocksize;
1107 sc->sc_cdata.cf_mode = encblkno[i].v;
1108 sc->sc_cdata.cf_keylen = ci->ci_keylen;
1109 sc->sc_cdata.cf_priv = sc->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
1110 &sc->sc_cdata.cf_blocksize);
1111 if (sc->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
1112 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
1113 sc->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
1114 sc->sc_cdata.cf_priv = NULL;
1115 }
1116
1117 /*
1118 * The blocksize is supposed to be in bytes. Unfortunately originally
1119 * it was expressed in bits. For compatibility we maintain encblkno
1120 * and encblkno8.
1121 */
1122 sc->sc_cdata.cf_blocksize /= encblkno[i].d;
1123 (void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
1124 if (!sc->sc_cdata.cf_priv) {
1125 ret = EINVAL; /* XXX is this the right error? */
1126 goto bail;
1127 }
1128 kmem_free(inbuf, MAX_KEYSIZE);
1129
1130 bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
1131
1132 sc->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
1133 sc->sc_data_used = false;
1134
1135 /* Attach the disk. */
1136 dk_attach(dksc);
1137 disk_attach(&dksc->sc_dkdev);
1138
1139 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
1140
1141 /* Discover wedges on this disk. */
1142 dkwedge_discover(&dksc->sc_dkdev);
1143
1144 return 0;
1145
1146 bail:
1147 kmem_free(inbuf, MAX_KEYSIZE);
1148 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
1149 return ret;
1150 }
1151
1152 /* ARGSUSED */
1153 static int
1154 cgd_ioctl_clr(struct cgd_softc *sc, struct lwp *l)
1155 {
1156 struct dk_softc *dksc = &sc->sc_dksc;
1157
1158 if (!DK_ATTACHED(dksc))
1159 return ENXIO;
1160
1161 /* Delete all of our wedges. */
1162 dkwedge_delall(&dksc->sc_dkdev);
1163
1164 /* Kill off any queued buffers. */
1165 dk_drain(dksc);
1166 bufq_free(dksc->sc_bufq);
1167
1168 (void)vn_close(sc->sc_tvn, FREAD|FWRITE, l->l_cred);
1169 sc->sc_cfuncs->cf_destroy(sc->sc_cdata.cf_priv);
1170 kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1171 free(sc->sc_data, M_DEVBUF);
1172 sc->sc_data_used = false;
1173 dk_detach(dksc);
1174 disk_detach(&dksc->sc_dkdev);
1175
1176 return 0;
1177 }
1178
1179 static int
1180 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
1181 {
1182 struct cgd_softc *sc;
1183 struct cgd_user *cgu;
1184 int unit, error;
1185
1186 unit = CGDUNIT(dev);
1187 cgu = (struct cgd_user *)data;
1188
1189 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1190 dev, unit, data, l));
1191
1192 /* XXX, we always return this units data, so if cgu_unit is
1193 * not -1, that field doesn't match the rest
1194 */
1195 if (cgu->cgu_unit == -1)
1196 cgu->cgu_unit = unit;
1197
1198 if (cgu->cgu_unit < 0)
1199 return EINVAL; /* XXX: should this be ENXIO? */
1200
1201 error = cgd_lock(false);
1202 if (error)
1203 return error;
1204
1205 sc = device_lookup_private(&cgd_cd, unit);
1206 if (sc == NULL || !DK_ATTACHED(&sc->sc_dksc)) {
1207 cgu->cgu_dev = 0;
1208 cgu->cgu_alg[0] = '\0';
1209 cgu->cgu_blocksize = 0;
1210 cgu->cgu_mode = 0;
1211 cgu->cgu_keylen = 0;
1212 }
1213 else {
1214 mutex_enter(&sc->sc_lock);
1215 cgu->cgu_dev = sc->sc_tdev;
1216 strncpy(cgu->cgu_alg, sc->sc_cfuncs->cf_name,
1217 sizeof(cgu->cgu_alg));
1218 cgu->cgu_blocksize = sc->sc_cdata.cf_blocksize;
1219 cgu->cgu_mode = sc->sc_cdata.cf_mode;
1220 cgu->cgu_keylen = sc->sc_cdata.cf_keylen;
1221 mutex_exit(&sc->sc_lock);
1222 }
1223
1224 cgd_unlock();
1225 return 0;
1226 }
1227
1228 static int
1229 cgdinit(struct cgd_softc *sc, const char *cpath, struct vnode *vp,
1230 struct lwp *l)
1231 {
1232 struct disk_geom *dg;
1233 int ret;
1234 char *tmppath;
1235 uint64_t psize;
1236 unsigned secsize;
1237 struct dk_softc *dksc = &sc->sc_dksc;
1238
1239 sc->sc_tvn = vp;
1240 sc->sc_tpath = NULL;
1241
1242 tmppath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1243 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &sc->sc_tpathlen);
1244 if (ret)
1245 goto bail;
1246 sc->sc_tpath = kmem_alloc(sc->sc_tpathlen, KM_SLEEP);
1247 memcpy(sc->sc_tpath, tmppath, sc->sc_tpathlen);
1248
1249 sc->sc_tdev = vp->v_rdev;
1250
1251 if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1252 goto bail;
1253
1254 if (psize == 0) {
1255 ret = ENODEV;
1256 goto bail;
1257 }
1258
1259 /*
1260 * XXX here we should probe the underlying device. If we
1261 * are accessing a partition of type RAW_PART, then
1262 * we should populate our initial geometry with the
1263 * geometry that we discover from the device.
1264 */
1265 dg = &dksc->sc_dkdev.dk_geom;
1266 memset(dg, 0, sizeof(*dg));
1267 dg->dg_secperunit = psize;
1268 dg->dg_secsize = secsize;
1269 dg->dg_ntracks = 1;
1270 dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1271 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1272
1273 bail:
1274 kmem_free(tmppath, MAXPATHLEN);
1275 if (ret && sc->sc_tpath)
1276 kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1277 return ret;
1278 }
1279
1280 /*
1281 * Our generic cipher entry point. This takes care of the
1282 * IV mode and passes off the work to the specific cipher.
1283 * We implement here the IV method ``encrypted block
1284 * number''.
1285 *
1286 * XXXrcd: for now we rely on our own crypto framework defined
1287 * in dev/cgd_crypto.c. This will change when we
1288 * get a generic kernel crypto framework.
1289 */
1290
1291 static void
1292 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1293 {
1294 int i;
1295
1296 /* Set up the blkno in blkno_buf, here we do not care much
1297 * about the final layout of the information as long as we
1298 * can guarantee that each sector will have a different IV
1299 * and that the endianness of the machine will not affect
1300 * the representation that we have chosen.
1301 *
1302 * We choose this representation, because it does not rely
1303 * on the size of buf (which is the blocksize of the cipher),
1304 * but allows daddr_t to grow without breaking existing
1305 * disks.
1306 *
1307 * Note that blkno2blkno_buf does not take a size as input,
1308 * and hence must be called on a pre-zeroed buffer of length
1309 * greater than or equal to sizeof(daddr_t).
1310 */
1311 for (i=0; i < sizeof(daddr_t); i++) {
1312 *sbuf++ = blkno & 0xff;
1313 blkno >>= 8;
1314 }
1315 }
1316
1317 static struct cpu_info *
1318 cgd_cpu(struct cgd_softc *sc)
1319 {
1320 struct cgd_worker *cw = sc->sc_worker;
1321 struct cpu_info *ci = NULL;
1322 u_int cidx, i;
1323
1324 if (cw->cw_busy == 0) {
1325 cw->cw_last = cpu_index(curcpu());
1326 return NULL;
1327 }
1328
1329 for (i=0, cidx = cw->cw_last+1; i<maxcpus; ++i, ++cidx) {
1330 if (cidx >= maxcpus)
1331 cidx = 0;
1332 ci = cpu_lookup(cidx);
1333 if (ci) {
1334 cw->cw_last = cidx;
1335 break;
1336 }
1337 }
1338
1339 return ci;
1340 }
1341
1342 static void
1343 cgd_enqueue(struct cgd_softc *sc, struct cgd_xfer *cx)
1344 {
1345 struct cgd_worker *cw = sc->sc_worker;
1346 struct cpu_info *ci;
1347
1348 mutex_enter(&cw->cw_lock);
1349 ci = cgd_cpu(sc);
1350 cw->cw_busy++;
1351 mutex_exit(&cw->cw_lock);
1352
1353 workqueue_enqueue(cw->cw_wq, &cx->cx_work, ci);
1354 }
1355
1356 static void
1357 cgd_process(struct work *wk, void *arg)
1358 {
1359 struct cgd_xfer *cx = (struct cgd_xfer *)wk;
1360 struct cgd_softc *sc = cx->cx_sc;
1361 struct cgd_worker *cw = sc->sc_worker;
1362
1363 cgd_cipher(sc, cx->cx_dstv, cx->cx_srcv, cx->cx_len,
1364 cx->cx_blkno, cx->cx_secsize, cx->cx_dir);
1365
1366 if (cx->cx_dir == CGD_CIPHER_ENCRYPT) {
1367 cgd_diskstart2(sc, cx);
1368 } else {
1369 cgd_iodone2(sc, cx);
1370 }
1371
1372 mutex_enter(&cw->cw_lock);
1373 if (cw->cw_busy > 0)
1374 cw->cw_busy--;
1375 mutex_exit(&cw->cw_lock);
1376 }
1377
1378 static void
1379 cgd_cipher(struct cgd_softc *sc, void *dstv, void *srcv,
1380 size_t len, daddr_t blkno, size_t secsize, int dir)
1381 {
1382 char *dst = dstv;
1383 char *src = srcv;
1384 cfunc_cipher_prep *ciprep = sc->sc_cfuncs->cf_cipher_prep;
1385 cfunc_cipher *cipher = sc->sc_cfuncs->cf_cipher;
1386 struct uio dstuio;
1387 struct uio srcuio;
1388 struct iovec dstiov[2];
1389 struct iovec srciov[2];
1390 size_t blocksize = sc->sc_cdata.cf_blocksize;
1391 size_t todo;
1392 char blkno_buf[CGD_MAXBLOCKSIZE], *iv;
1393
1394 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1395
1396 KASSERTMSG(len % blocksize == 0,
1397 "cgd_cipher: len %% blocksize != 0");
1398
1399 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1400 KASSERTMSG(sizeof(daddr_t) <= blocksize,
1401 "cgd_cipher: sizeof(daddr_t) > blocksize");
1402
1403 KASSERTMSG(blocksize <= CGD_MAXBLOCKSIZE,
1404 "cgd_cipher: blocksize > CGD_MAXBLOCKSIZE");
1405
1406 dstuio.uio_iov = dstiov;
1407 dstuio.uio_iovcnt = 1;
1408
1409 srcuio.uio_iov = srciov;
1410 srcuio.uio_iovcnt = 1;
1411
1412 for (; len > 0; len -= todo) {
1413 todo = MIN(len, secsize);
1414
1415 dstiov[0].iov_base = dst;
1416 srciov[0].iov_base = src;
1417 dstiov[0].iov_len = todo;
1418 srciov[0].iov_len = todo;
1419
1420 memset(blkno_buf, 0x0, blocksize);
1421 blkno2blkno_buf(blkno_buf, blkno);
1422 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1423 blkno_buf, blocksize));
1424
1425 /*
1426 * Compute an initial IV. All ciphers
1427 * can convert blkno_buf in-place.
1428 */
1429 iv = blkno_buf;
1430 ciprep(sc->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir);
1431 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize));
1432
1433 cipher(sc->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir);
1434
1435 dst += todo;
1436 src += todo;
1437 blkno++;
1438 }
1439 }
1440
1441 #ifdef DEBUG
1442 static void
1443 hexprint(const char *start, void *buf, int len)
1444 {
1445 char *c = buf;
1446
1447 KASSERTMSG(len >= 0, "hexprint: called with len < 0");
1448 printf("%s: len=%06d 0x", start, len);
1449 while (len--)
1450 printf("%02x", (unsigned char) *c++);
1451 }
1452 #endif
1453
1454 static void
1455 selftest(void)
1456 {
1457 struct cgd_softc sc;
1458 void *buf;
1459
1460 printf("running cgd selftest ");
1461
1462 for (size_t i = 0; i < __arraycount(selftests); i++) {
1463 const char *alg = selftests[i].alg;
1464 const uint8_t *key = selftests[i].key;
1465 int keylen = selftests[i].keylen;
1466 int txtlen = selftests[i].txtlen;
1467
1468 printf("%s-%d ", alg, keylen);
1469
1470 memset(&sc, 0, sizeof(sc));
1471
1472 sc.sc_cfuncs = cryptfuncs_find(alg);
1473 if (sc.sc_cfuncs == NULL)
1474 panic("%s not implemented", alg);
1475
1476 sc.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1477 sc.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1478 sc.sc_cdata.cf_keylen = keylen;
1479
1480 sc.sc_cdata.cf_priv = sc.sc_cfuncs->cf_init(keylen,
1481 key, &sc.sc_cdata.cf_blocksize);
1482 if (sc.sc_cdata.cf_priv == NULL)
1483 panic("cf_priv is NULL");
1484 if (sc.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1485 panic("bad block size %zu", sc.sc_cdata.cf_blocksize);
1486
1487 sc.sc_cdata.cf_blocksize /= 8;
1488
1489 buf = malloc(txtlen, M_DEVBUF, M_WAITOK);
1490 memcpy(buf, selftests[i].ptxt, txtlen);
1491
1492 cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1493 selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1494 if (memcmp(buf, selftests[i].ctxt, txtlen) != 0)
1495 panic("encryption is broken");
1496
1497 cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1498 selftests[i].secsize, CGD_CIPHER_DECRYPT);
1499 if (memcmp(buf, selftests[i].ptxt, txtlen) != 0)
1500 panic("decryption is broken");
1501
1502 free(buf, M_DEVBUF);
1503 sc.sc_cfuncs->cf_destroy(sc.sc_cdata.cf_priv);
1504 }
1505
1506 printf("done\n");
1507 }
1508
1509 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
1510
1511 #ifdef _MODULE
1512 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1513
1514 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1515 #endif
1516
1517 static int
1518 cgd_modcmd(modcmd_t cmd, void *arg)
1519 {
1520 int error = 0;
1521
1522 switch (cmd) {
1523 case MODULE_CMD_INIT:
1524 selftest();
1525 #ifdef _MODULE
1526 mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
1527 cv_init(&cgd_spawning_cv, "cgspwn");
1528
1529 error = config_cfdriver_attach(&cgd_cd);
1530 if (error)
1531 break;
1532
1533 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1534 if (error) {
1535 config_cfdriver_detach(&cgd_cd);
1536 aprint_error("%s: unable to register cfattach for"
1537 "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1538 break;
1539 }
1540 /*
1541 * Attach the {b,c}devsw's
1542 */
1543 error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1544 &cgd_cdevsw, &cgd_cmajor);
1545
1546 /*
1547 * If devsw_attach fails, remove from autoconf database
1548 */
1549 if (error) {
1550 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1551 config_cfdriver_detach(&cgd_cd);
1552 aprint_error("%s: unable to attach %s devsw, "
1553 "error %d", __func__, cgd_cd.cd_name, error);
1554 break;
1555 }
1556 #endif
1557 break;
1558
1559 case MODULE_CMD_FINI:
1560 #ifdef _MODULE
1561 /*
1562 * Remove {b,c}devsw's
1563 */
1564 devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1565
1566 /*
1567 * Now remove device from autoconf database
1568 */
1569 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1570 if (error) {
1571 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1572 &cgd_cdevsw, &cgd_cmajor);
1573 aprint_error("%s: failed to detach %s cfattach, "
1574 "error %d\n", __func__, cgd_cd.cd_name, error);
1575 break;
1576 }
1577 error = config_cfdriver_detach(&cgd_cd);
1578 if (error) {
1579 (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1580 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1581 &cgd_cdevsw, &cgd_cmajor);
1582 aprint_error("%s: failed to detach %s cfdriver, "
1583 "error %d\n", __func__, cgd_cd.cd_name, error);
1584 break;
1585 }
1586
1587 cv_destroy(&cgd_spawning_cv);
1588 mutex_destroy(&cgd_spawning_mtx);
1589 #endif
1590 break;
1591
1592 case MODULE_CMD_STAT:
1593 error = ENOTTY;
1594 break;
1595 default:
1596 error = ENOTTY;
1597 break;
1598 }
1599
1600 return error;
1601 }
1602