cgd.c revision 1.114.4.4 1 /* $NetBSD: cgd.c,v 1.114.4.4 2017/04/28 06:00:33 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.114.4.4 2017/04/28 06:00:33 pgoyette Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54 #include <sys/localcount.h>
55
56 #include <dev/dkvar.h>
57 #include <dev/cgdvar.h>
58
59 #include <miscfs/specfs/specdev.h> /* for v_rdev */
60
61 #include "ioconf.h"
62
63 struct selftest_params {
64 const char *alg;
65 int blocksize; /* number of bytes */
66 int secsize;
67 daddr_t blkno;
68 int keylen; /* number of bits */
69 int txtlen; /* number of bytes */
70 const uint8_t *key;
71 const uint8_t *ptxt;
72 const uint8_t *ctxt;
73 };
74
75 /* Entry Point Functions */
76
77 static dev_type_open(cgdopen);
78 static dev_type_close(cgdclose);
79 static dev_type_read(cgdread);
80 static dev_type_write(cgdwrite);
81 static dev_type_ioctl(cgdioctl);
82 static dev_type_strategy(cgdstrategy);
83 static dev_type_dump(cgddump);
84 static dev_type_size(cgdsize);
85
86 const struct bdevsw cgd_bdevsw = {
87 DEVSW_MODULE_INIT
88 .d_open = cgdopen,
89 .d_close = cgdclose,
90 .d_strategy = cgdstrategy,
91 .d_ioctl = cgdioctl,
92 .d_dump = cgddump,
93 .d_psize = cgdsize,
94 .d_discard = nodiscard,
95 .d_flag = D_DISK
96 };
97
98 const struct cdevsw cgd_cdevsw = {
99 DEVSW_MODULE_INIT
100 .d_open = cgdopen,
101 .d_close = cgdclose,
102 .d_read = cgdread,
103 .d_write = cgdwrite,
104 .d_ioctl = cgdioctl,
105 .d_stop = nostop,
106 .d_tty = notty,
107 .d_poll = nopoll,
108 .d_mmap = nommap,
109 .d_kqfilter = nokqfilter,
110 .d_discard = nodiscard,
111 .d_flag = D_DISK
112 };
113
114 /*
115 * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
116 */
117 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
118 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
119 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
120 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
121 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
122 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
123 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
124 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
125 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
126 };
127
128 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
129 0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
130 0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
131 0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
132 0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
133 0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
134 0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
135 0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
136 0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
137 };
138
139 static const uint8_t selftest_aes_xts_256_key[33] = {
140 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
141 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
142 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
143 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
144 0
145 };
146
147 /*
148 * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
149 */
150 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
151 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
152 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
153 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
154 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
155 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
156 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
157 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
158 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
159 };
160
161 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
162 0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
163 0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
164 0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
165 0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
166 0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
167 0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
168 0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
169 0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
170 };
171
172 static const uint8_t selftest_aes_xts_512_key[65] = {
173 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
174 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
175 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
176 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
177 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
178 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
179 0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
180 0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
181 0
182 };
183
184 const struct selftest_params selftests[] = {
185 {
186 .alg = "aes-xts",
187 .blocksize = 16,
188 .secsize = 512,
189 .blkno = 1,
190 .keylen = 256,
191 .txtlen = sizeof(selftest_aes_xts_256_ptxt),
192 .key = selftest_aes_xts_256_key,
193 .ptxt = selftest_aes_xts_256_ptxt,
194 .ctxt = selftest_aes_xts_256_ctxt
195 },
196 {
197 .alg = "aes-xts",
198 .blocksize = 16,
199 .secsize = 512,
200 .blkno = 0xffff,
201 .keylen = 512,
202 .txtlen = sizeof(selftest_aes_xts_512_ptxt),
203 .key = selftest_aes_xts_512_key,
204 .ptxt = selftest_aes_xts_512_ptxt,
205 .ctxt = selftest_aes_xts_512_ctxt
206 }
207 };
208
209 static int cgd_match(device_t, cfdata_t, void *);
210 static void cgd_attach(device_t, device_t, void *);
211 static int cgd_detach(device_t, int);
212 static struct cgd_softc *cgd_spawn(int, device_t *);
213 static int cgd_destroy(device_t);
214
215 /* Internal Functions */
216
217 static int cgd_diskstart(device_t, struct buf *);
218 static void cgdiodone(struct buf *);
219 static int cgd_dumpblocks(device_t, void *, daddr_t, int);
220
221 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
222 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
223 static int cgd_ioctl_get(dev_t, void *, struct lwp *);
224 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
225 struct lwp *);
226 static void cgd_cipher(struct cgd_softc *, void *, void *,
227 size_t, daddr_t, size_t, int);
228
229 static struct dkdriver cgddkdriver = {
230 .d_minphys = minphys,
231 .d_open = cgdopen,
232 .d_close = cgdclose,
233 .d_strategy = cgdstrategy,
234 .d_iosize = NULL,
235 .d_diskstart = cgd_diskstart,
236 .d_dumpblocks = cgd_dumpblocks,
237 .d_lastclose = NULL
238 };
239
240 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
241 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
242 extern struct cfdriver cgd_cd;
243
244 /* DIAGNOSTIC and DEBUG definitions */
245
246 #if defined(CGDDEBUG) && !defined(DEBUG)
247 #define DEBUG
248 #endif
249
250 #ifdef DEBUG
251 int cgddebug = 0;
252
253 #define CGDB_FOLLOW 0x1
254 #define CGDB_IO 0x2
255 #define CGDB_CRYPTO 0x4
256
257 #define IFDEBUG(x,y) if (cgddebug & (x)) y
258 #define DPRINTF(x,y) IFDEBUG(x, printf y)
259 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
260
261 static void hexprint(const char *, void *, int);
262
263 #else
264 #define IFDEBUG(x,y)
265 #define DPRINTF(x,y)
266 #define DPRINTF_FOLLOW(y)
267 #endif
268
269 #ifdef DIAGNOSTIC
270 #define DIAGPANIC(x) panic x
271 #define DIAGCONDPANIC(x,y) if (x) panic y
272 #else
273 #define DIAGPANIC(x)
274 #define DIAGCONDPANIC(x,y)
275 #endif
276
277 /* Global variables */
278
279 /* Utility Functions */
280
281 #define CGDUNIT(x) DISKUNIT(x)
282 #define GETCGD_SOFTC(_cs, x, _dv) \
283 if (((_cs) = getcgd_softc(x, &_dv)) == NULL) { \
284 return ENXIO; \
285 }
286
287 /* The code */
288
289 /*
290 * Lookup the device and return it's softc. If the device doesn't
291 * exist, spawn it.
292 *
293 * In either case, the device is "acquired", and must be "released"
294 * by the caller after it is finished with the softc.
295 */
296 static struct cgd_softc *
297 getcgd_softc(dev_t dev, device_t *self)
298 {
299 int unit = CGDUNIT(dev);
300 struct cgd_softc *sc;
301
302 DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
303
304 *self = device_lookup_acquire(&cgd_cd, unit);
305
306 if (*self == NULL) {
307 sc = cgd_spawn(unit, self);
308 } else {
309 sc = device_private(*self);
310 }
311
312 return sc;
313 }
314
315 static int
316 cgd_match(device_t self, cfdata_t cfdata, void *aux)
317 {
318
319 return 1;
320 }
321
322 static void
323 cgd_attach(device_t parent, device_t self, void *aux)
324 {
325 struct cgd_softc *sc;
326
327 sc = device_private(self);
328
329 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
330 dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
331 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
332
333 if (!pmf_device_register(self, NULL, NULL))
334 aprint_error_dev(self,
335 "unable to register power management hooks\n");
336 }
337
338
339 /*
340 * The caller must hold a reference to the device's localcount.
341 */
342 static int
343 cgd_detach(device_t self, int flags)
344 {
345 int ret;
346 const int pmask = 1 << RAW_PART;
347 struct cgd_softc *sc = device_private(self);
348 struct dk_softc *dksc = &sc->sc_dksc;
349
350 if (DK_BUSY(dksc, pmask))
351 return EBUSY;
352
353 if (DK_ATTACHED(dksc) &&
354 (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
355 return ret;
356
357 disk_destroy(&dksc->sc_dkdev);
358 mutex_destroy(&sc->sc_lock);
359
360 return 0;
361 }
362
363 void
364 cgdattach(int num)
365 {
366 int error;
367
368 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
369 if (error != 0)
370 aprint_error("%s: unable to register cfattach\n",
371 cgd_cd.cd_name);
372 }
373
374 static struct cgd_softc *
375 cgd_spawn(int unit, device_t *self)
376 {
377 cfdata_t cf;
378 struct cgd_softc *sc;
379
380 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
381 cf->cf_name = cgd_cd.cd_name;
382 cf->cf_atname = cgd_cd.cd_name;
383 cf->cf_unit = unit;
384 cf->cf_fstate = FSTATE_STAR;
385
386 if (config_attach_pseudo(cf) == NULL)
387 return NULL;
388
389 if ((*self = device_lookup_acquire(&cgd_cd, unit)) == NULL)
390 return NULL;
391 else {
392 /*
393 * Note that we return while still holding a reference
394 * to the device!
395 */
396 sc = device_private(*self);
397 return sc;
398 }
399 }
400
401 static int
402 cgd_destroy(device_t dev)
403 {
404 int error;
405 cfdata_t cf;
406
407 cf = device_cfdata(dev);
408 error = config_detach_release(dev, DETACH_QUIET);
409 if (error == 0)
410 free(cf, M_DEVBUF);
411
412 return error;
413 }
414
415 static int
416 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
417 {
418 device_t self;
419 int error;
420 struct cgd_softc *cs;
421
422 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
423 GETCGD_SOFTC(cs, dev, self);
424 error = dk_open(&cs->sc_dksc, dev, flags, fmt, l);
425 device_release(self);
426 return error;
427 }
428
429 static int
430 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
431 {
432 int error;
433 device_t self;
434 struct cgd_softc *cs;
435 struct dk_softc *dksc;
436
437 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
438 GETCGD_SOFTC(cs, dev, self);
439 dksc = &cs->sc_dksc;
440 if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0) {
441 device_release(self);
442 return error;
443 }
444
445 if (!DK_ATTACHED(dksc)) {
446 if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
447 aprint_error_dev(dksc->sc_dev,
448 "unable to detach instance\n");
449 }
450 return error;
451 }
452 device_release(self);
453 return 0;
454 }
455
456 static void
457 cgdstrategy(struct buf *bp)
458 {
459 device_t self;
460 struct cgd_softc *cs = getcgd_softc(bp->b_dev, &self);
461
462 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
463 (long)bp->b_bcount));
464
465 if (!cs) {
466 bp->b_error = ENXIO;
467 goto bail;
468 }
469
470 /*
471 * Reject unaligned writes.
472 */
473 if (((uintptr_t)bp->b_data & 3) != 0) {
474 bp->b_error = EINVAL;
475 goto bail;
476 }
477
478 dk_strategy(&cs->sc_dksc, bp);
479 device_release(self);
480 return;
481
482 bail:
483 bp->b_resid = bp->b_bcount;
484 biodone(bp);
485 if (self)
486 device_release(self);
487 return;
488 }
489
490 static int
491 cgdsize(dev_t dev)
492 {
493 int retval;
494 device_t self;
495 struct cgd_softc *cs = getcgd_softc(dev, &self);
496
497 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
498 if (!cs)
499 retval = -1;
500 else
501 retval = dk_size(&cs->sc_dksc, dev);
502
503 device_release(self);
504 return retval;
505 }
506
507 /*
508 * cgd_{get,put}data are functions that deal with getting a buffer
509 * for the new encrypted data. We have a buffer per device so that
510 * we can ensure that we can always have a transaction in flight.
511 * We use this buffer first so that we have one less piece of
512 * malloc'ed data at any given point.
513 */
514
515 static void *
516 cgd_getdata(struct dk_softc *dksc, unsigned long size)
517 {
518 struct cgd_softc *cs = (struct cgd_softc *)dksc;
519 void * data = NULL;
520
521 mutex_enter(&cs->sc_lock);
522 if (cs->sc_data_used == 0) {
523 cs->sc_data_used = 1;
524 data = cs->sc_data;
525 }
526 mutex_exit(&cs->sc_lock);
527
528 if (data)
529 return data;
530
531 return malloc(size, M_DEVBUF, M_NOWAIT);
532 }
533
534 static void
535 cgd_putdata(struct dk_softc *dksc, void *data)
536 {
537 struct cgd_softc *cs = (struct cgd_softc *)dksc;
538
539 if (data == cs->sc_data) {
540 mutex_enter(&cs->sc_lock);
541 cs->sc_data_used = 0;
542 mutex_exit(&cs->sc_lock);
543 } else {
544 free(data, M_DEVBUF);
545 }
546 }
547
548 static int
549 cgd_diskstart(device_t dev, struct buf *bp)
550 {
551 struct cgd_softc *cs = device_private(dev);
552 struct dk_softc *dksc = &cs->sc_dksc;
553 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
554 struct buf *nbp;
555 void * addr;
556 void * newaddr;
557 daddr_t bn;
558 struct vnode *vp;
559
560 DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
561
562 bn = bp->b_rawblkno;
563
564 /*
565 * We attempt to allocate all of our resources up front, so that
566 * we can fail quickly if they are unavailable.
567 */
568 nbp = getiobuf(cs->sc_tvn, false);
569 if (nbp == NULL)
570 return EAGAIN;
571
572 /*
573 * If we are writing, then we need to encrypt the outgoing
574 * block into a new block of memory.
575 */
576 newaddr = addr = bp->b_data;
577 if ((bp->b_flags & B_READ) == 0) {
578 newaddr = cgd_getdata(dksc, bp->b_bcount);
579 if (!newaddr) {
580 putiobuf(nbp);
581 return EAGAIN;
582 }
583 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
584 dg->dg_secsize, CGD_CIPHER_ENCRYPT);
585 }
586
587 nbp->b_data = newaddr;
588 nbp->b_flags = bp->b_flags;
589 nbp->b_oflags = bp->b_oflags;
590 nbp->b_cflags = bp->b_cflags;
591 nbp->b_iodone = cgdiodone;
592 nbp->b_proc = bp->b_proc;
593 nbp->b_blkno = btodb(bn * dg->dg_secsize);
594 nbp->b_bcount = bp->b_bcount;
595 nbp->b_private = bp;
596
597 BIO_COPYPRIO(nbp, bp);
598
599 if ((nbp->b_flags & B_READ) == 0) {
600 vp = nbp->b_vp;
601 mutex_enter(vp->v_interlock);
602 vp->v_numoutput++;
603 mutex_exit(vp->v_interlock);
604 }
605 VOP_STRATEGY(cs->sc_tvn, nbp);
606
607 return 0;
608 }
609
610 static void
611 cgdiodone(struct buf *nbp)
612 {
613 device_t self;
614 struct buf *obp = nbp->b_private;
615 struct cgd_softc *cs = getcgd_softc(obp->b_dev, &self);
616 struct dk_softc *dksc = &cs->sc_dksc;
617 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
618 daddr_t bn;
619
620 KDASSERT(cs);
621
622 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
623 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
624 obp, obp->b_bcount, obp->b_resid));
625 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
626 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
627 nbp->b_bcount));
628 if (nbp->b_error != 0) {
629 obp->b_error = nbp->b_error;
630 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
631 obp->b_error));
632 }
633
634 /* Perform the decryption if we are reading.
635 *
636 * Note: use the blocknumber from nbp, since it is what
637 * we used to encrypt the blocks.
638 */
639
640 if (nbp->b_flags & B_READ) {
641 bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
642 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
643 bn, dg->dg_secsize, CGD_CIPHER_DECRYPT);
644 }
645
646 /* If we allocated memory, free it now... */
647 if (nbp->b_data != obp->b_data)
648 cgd_putdata(dksc, nbp->b_data);
649
650 putiobuf(nbp);
651
652 /* Request is complete for whatever reason */
653 obp->b_resid = 0;
654 if (obp->b_error != 0)
655 obp->b_resid = obp->b_bcount;
656
657 dk_done(dksc, obp);
658 device_release(self);
659
660 dk_start(dksc, NULL);
661 }
662
663 static int
664 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
665 {
666 struct cgd_softc *sc = device_private(dev);
667 struct dk_softc *dksc = &sc->sc_dksc;
668 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
669 size_t nbytes, blksize;
670 void *buf;
671 int error;
672
673 /*
674 * dk_dump gives us units of disklabel sectors. Everything
675 * else in cgd uses units of diskgeom sectors. These had
676 * better agree; otherwise we need to figure out how to convert
677 * between them.
678 */
679 KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
680 "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
681 dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
682 blksize = dg->dg_secsize;
683
684 /*
685 * Compute the number of bytes in this request, which dk_dump
686 * has `helpfully' converted to a number of blocks for us.
687 */
688 nbytes = nblk*blksize;
689
690 /* Try to acquire a buffer to store the ciphertext. */
691 buf = cgd_getdata(dksc, nbytes);
692 if (buf == NULL)
693 /* Out of memory: give up. */
694 return ENOMEM;
695
696 /* Encrypt the caller's data into the temporary buffer. */
697 cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
698
699 /* Pass it on to the underlying disk device. */
700 error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
701
702 /* Release the buffer. */
703 cgd_putdata(dksc, buf);
704
705 /* Return any error from the underlying disk device. */
706 return error;
707 }
708
709 /* XXX: we should probably put these into dksubr.c, mostly */
710 static int
711 cgdread(dev_t dev, struct uio *uio, int flags)
712 {
713 device_t self;
714 int error;
715 struct cgd_softc *cs;
716 struct dk_softc *dksc;
717
718 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
719 (unsigned long long)dev, uio, flags));
720 GETCGD_SOFTC(cs, dev, self);
721 dksc = &cs->sc_dksc;
722 if (!DK_ATTACHED(dksc)) {
723 device_release(self);
724 return ENXIO;
725 }
726 error = physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
727 device_release(self);
728 return error;
729 }
730
731 /* XXX: we should probably put these into dksubr.c, mostly */
732 static int
733 cgdwrite(dev_t dev, struct uio *uio, int flags)
734 {
735 device_t self;
736 int error;
737 struct cgd_softc *cs;
738 struct dk_softc *dksc;
739
740 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
741 GETCGD_SOFTC(cs, dev, self);
742 dksc = &cs->sc_dksc;
743 if (!DK_ATTACHED(dksc)) {
744 device_release(self);
745 return ENXIO;
746 }
747 error = physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
748 device_release(self);
749 return error;
750 }
751
752 static int
753 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
754 {
755 device_t self;
756 struct cgd_softc *cs;
757 struct dk_softc *dksc;
758 int part = DISKPART(dev);
759 int pmask = 1 << part;
760 int error = 0;
761
762 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
763 dev, cmd, data, flag, l));
764
765 switch (cmd) {
766 case CGDIOCGET:
767 return cgd_ioctl_get(dev, data, l);
768 case CGDIOCSET:
769 case CGDIOCCLR:
770 if ((flag & FWRITE) == 0)
771 return EBADF;
772 /* FALLTHROUGH */
773 default:
774 GETCGD_SOFTC(cs, dev, self);
775 dksc = &cs->sc_dksc;
776 break;
777 }
778
779 switch (cmd) {
780 case CGDIOCSET:
781 if (DK_ATTACHED(dksc))
782 error = EBUSY;
783 else
784 error = cgd_ioctl_set(cs, data, l);
785 break;
786 case CGDIOCCLR:
787 if (DK_BUSY(&cs->sc_dksc, pmask))
788 error = EBUSY;
789 else
790 error = cgd_ioctl_clr(cs, l);
791 break;
792 case DIOCGCACHE:
793 case DIOCCACHESYNC:
794 if (!DK_ATTACHED(dksc))
795 return ENOENT;
796 /*
797 * We pass this call down to the underlying disk.
798 */
799 else
800 error = VOP_IOCTL(cs->sc_tvn, cmd, data, flag,
801 l->l_cred);
802 break;
803 case DIOCGSTRATEGY:
804 case DIOCSSTRATEGY:
805 if (!DK_ATTACHED(dksc)) {
806 error = ENOENT;
807 break;
808 }
809 /*FALLTHROUGH*/
810 default:
811 error = dk_ioctl(dksc, dev, cmd, data, flag, l);
812 break;
813 case CGDIOCGET:
814 KASSERT(0);
815 error = EINVAL;
816 break;
817 }
818 device_release(self);
819 return error;
820 }
821
822 static int
823 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
824 {
825 device_t self;
826 int error;
827 struct cgd_softc *cs;
828
829 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
830 dev, blkno, va, (unsigned long)size));
831 GETCGD_SOFTC(cs, dev, self);
832 error = dk_dump(&cs->sc_dksc, dev, blkno, va, size);
833 device_release(self);
834 return error;
835 }
836
837 /*
838 * XXXrcd:
839 * for now we hardcode the maximum key length.
840 */
841 #define MAX_KEYSIZE 1024
842
843 static const struct {
844 const char *n;
845 int v;
846 int d;
847 } encblkno[] = {
848 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
849 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
850 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
851 };
852
853 /* ARGSUSED */
854 static int
855 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
856 {
857 struct cgd_ioctl *ci = data;
858 struct vnode *vp;
859 int ret;
860 size_t i;
861 size_t keybytes; /* key length in bytes */
862 const char *cp;
863 struct pathbuf *pb;
864 char *inbuf;
865 struct dk_softc *dksc = &cs->sc_dksc;
866
867 cp = ci->ci_disk;
868
869 ret = pathbuf_copyin(ci->ci_disk, &pb);
870 if (ret != 0) {
871 return ret;
872 }
873 ret = dk_lookup(pb, l, &vp);
874 pathbuf_destroy(pb);
875 if (ret != 0) {
876 return ret;
877 }
878
879 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
880
881 if ((ret = cgdinit(cs, cp, vp, l)) != 0)
882 goto bail;
883
884 (void)memset(inbuf, 0, MAX_KEYSIZE);
885 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
886 if (ret)
887 goto bail;
888 cs->sc_cfuncs = cryptfuncs_find(inbuf);
889 if (!cs->sc_cfuncs) {
890 ret = EINVAL;
891 goto bail;
892 }
893
894 (void)memset(inbuf, 0, MAX_KEYSIZE);
895 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
896 if (ret)
897 goto bail;
898
899 for (i = 0; i < __arraycount(encblkno); i++)
900 if (strcmp(encblkno[i].n, inbuf) == 0)
901 break;
902
903 if (i == __arraycount(encblkno)) {
904 ret = EINVAL;
905 goto bail;
906 }
907
908 keybytes = ci->ci_keylen / 8 + 1;
909 if (keybytes > MAX_KEYSIZE) {
910 ret = EINVAL;
911 goto bail;
912 }
913
914 (void)memset(inbuf, 0, MAX_KEYSIZE);
915 ret = copyin(ci->ci_key, inbuf, keybytes);
916 if (ret)
917 goto bail;
918
919 cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
920 cs->sc_cdata.cf_mode = encblkno[i].v;
921 cs->sc_cdata.cf_keylen = ci->ci_keylen;
922 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
923 &cs->sc_cdata.cf_blocksize);
924 if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
925 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
926 cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
927 cs->sc_cdata.cf_priv = NULL;
928 }
929
930 /*
931 * The blocksize is supposed to be in bytes. Unfortunately originally
932 * it was expressed in bits. For compatibility we maintain encblkno
933 * and encblkno8.
934 */
935 cs->sc_cdata.cf_blocksize /= encblkno[i].d;
936 (void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
937 if (!cs->sc_cdata.cf_priv) {
938 ret = EINVAL; /* XXX is this the right error? */
939 goto bail;
940 }
941 free(inbuf, M_TEMP);
942
943 bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
944
945 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
946 cs->sc_data_used = 0;
947
948 /* Attach the disk. */
949 dk_attach(dksc);
950 disk_attach(&dksc->sc_dkdev);
951
952 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
953
954 /* Discover wedges on this disk. */
955 dkwedge_discover(&dksc->sc_dkdev);
956
957 return 0;
958
959 bail:
960 free(inbuf, M_TEMP);
961 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
962 return ret;
963 }
964
965 /* ARGSUSED */
966 static int
967 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
968 {
969 struct dk_softc *dksc = &cs->sc_dksc;
970
971 if (!DK_ATTACHED(dksc))
972 return ENXIO;
973
974 /* Delete all of our wedges. */
975 dkwedge_delall(&dksc->sc_dkdev);
976
977 /* Kill off any queued buffers. */
978 dk_drain(dksc);
979 bufq_free(dksc->sc_bufq);
980
981 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
982 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
983 free(cs->sc_tpath, M_DEVBUF);
984 free(cs->sc_data, M_DEVBUF);
985 cs->sc_data_used = 0;
986 dk_detach(dksc);
987 disk_detach(&dksc->sc_dkdev);
988
989 return 0;
990 }
991
992 static int
993 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
994 {
995 device_t self;
996 struct cgd_softc *cs = getcgd_softc(dev, &self);
997 struct cgd_user *cgu;
998 int unit;
999 struct dk_softc *dksc = &cs->sc_dksc;
1000
1001 unit = CGDUNIT(dev);
1002 cgu = (struct cgd_user *)data;
1003
1004 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1005 dev, unit, data, l));
1006
1007 if (cgu->cgu_unit == -1)
1008 cgu->cgu_unit = unit;
1009
1010 if (cgu->cgu_unit < 0) {
1011 device_release(self);
1012 return EINVAL; /* XXX: should this be ENXIO? */
1013 }
1014
1015 /*
1016 * XXX This appears to be redundant, given the initialization
1017 * XXX when it was declared. Leave it for now, but don't
1018 * XXX take an extra reference to the device!
1019 */
1020 cs = device_lookup_private(&cgd_cd, unit);
1021 if (cs == NULL || !DK_ATTACHED(dksc)) {
1022 cgu->cgu_dev = 0;
1023 cgu->cgu_alg[0] = '\0';
1024 cgu->cgu_blocksize = 0;
1025 cgu->cgu_mode = 0;
1026 cgu->cgu_keylen = 0;
1027 }
1028 else {
1029 cgu->cgu_dev = cs->sc_tdev;
1030 strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
1031 sizeof(cgu->cgu_alg));
1032 cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
1033 cgu->cgu_mode = cs->sc_cdata.cf_mode;
1034 cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
1035 }
1036 device_release(self);
1037 return 0;
1038 }
1039
1040 static int
1041 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
1042 struct lwp *l)
1043 {
1044 struct disk_geom *dg;
1045 int ret;
1046 char *tmppath;
1047 uint64_t psize;
1048 unsigned secsize;
1049 struct dk_softc *dksc = &cs->sc_dksc;
1050
1051 cs->sc_tvn = vp;
1052 cs->sc_tpath = NULL;
1053
1054 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1055 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
1056 if (ret)
1057 goto bail;
1058 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
1059 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
1060
1061 cs->sc_tdev = vp->v_rdev;
1062
1063 if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1064 goto bail;
1065
1066 if (psize == 0) {
1067 ret = ENODEV;
1068 goto bail;
1069 }
1070
1071 /*
1072 * XXX here we should probe the underlying device. If we
1073 * are accessing a partition of type RAW_PART, then
1074 * we should populate our initial geometry with the
1075 * geometry that we discover from the device.
1076 */
1077 dg = &dksc->sc_dkdev.dk_geom;
1078 memset(dg, 0, sizeof(*dg));
1079 dg->dg_secperunit = psize;
1080 dg->dg_secsize = secsize;
1081 dg->dg_ntracks = 1;
1082 dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1083 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1084
1085 bail:
1086 free(tmppath, M_TEMP);
1087 if (ret && cs->sc_tpath)
1088 free(cs->sc_tpath, M_DEVBUF);
1089 return ret;
1090 }
1091
1092 /*
1093 * Our generic cipher entry point. This takes care of the
1094 * IV mode and passes off the work to the specific cipher.
1095 * We implement here the IV method ``encrypted block
1096 * number''.
1097 *
1098 * XXXrcd: for now we rely on our own crypto framework defined
1099 * in dev/cgd_crypto.c. This will change when we
1100 * get a generic kernel crypto framework.
1101 */
1102
1103 static void
1104 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1105 {
1106 int i;
1107
1108 /* Set up the blkno in blkno_buf, here we do not care much
1109 * about the final layout of the information as long as we
1110 * can guarantee that each sector will have a different IV
1111 * and that the endianness of the machine will not affect
1112 * the representation that we have chosen.
1113 *
1114 * We choose this representation, because it does not rely
1115 * on the size of buf (which is the blocksize of the cipher),
1116 * but allows daddr_t to grow without breaking existing
1117 * disks.
1118 *
1119 * Note that blkno2blkno_buf does not take a size as input,
1120 * and hence must be called on a pre-zeroed buffer of length
1121 * greater than or equal to sizeof(daddr_t).
1122 */
1123 for (i=0; i < sizeof(daddr_t); i++) {
1124 *sbuf++ = blkno & 0xff;
1125 blkno >>= 8;
1126 }
1127 }
1128
1129 static void
1130 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
1131 size_t len, daddr_t blkno, size_t secsize, int dir)
1132 {
1133 char *dst = dstv;
1134 char *src = srcv;
1135 cfunc_cipher_prep *ciprep = cs->sc_cfuncs->cf_cipher_prep;
1136 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher;
1137 struct uio dstuio;
1138 struct uio srcuio;
1139 struct iovec dstiov[2];
1140 struct iovec srciov[2];
1141 size_t blocksize = cs->sc_cdata.cf_blocksize;
1142 size_t todo;
1143 char blkno_buf[CGD_MAXBLOCKSIZE], *iv;
1144
1145 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1146
1147 DIAGCONDPANIC(len % blocksize != 0,
1148 ("cgd_cipher: len %% blocksize != 0"));
1149
1150 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1151 DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
1152 ("cgd_cipher: sizeof(daddr_t) > blocksize"));
1153
1154 DIAGCONDPANIC(blocksize > CGD_MAXBLOCKSIZE,
1155 ("cgd_cipher: blocksize > CGD_MAXBLOCKSIZE"));
1156
1157 dstuio.uio_iov = dstiov;
1158 dstuio.uio_iovcnt = 1;
1159
1160 srcuio.uio_iov = srciov;
1161 srcuio.uio_iovcnt = 1;
1162
1163 for (; len > 0; len -= todo) {
1164 todo = MIN(len, secsize);
1165
1166 dstiov[0].iov_base = dst;
1167 srciov[0].iov_base = src;
1168 dstiov[0].iov_len = todo;
1169 srciov[0].iov_len = todo;
1170
1171 memset(blkno_buf, 0x0, blocksize);
1172 blkno2blkno_buf(blkno_buf, blkno);
1173 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1174 blkno_buf, blocksize));
1175
1176 /*
1177 * Compute an initial IV. All ciphers
1178 * can convert blkno_buf in-place.
1179 */
1180 iv = blkno_buf;
1181 ciprep(cs->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir);
1182 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize));
1183
1184 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir);
1185
1186 dst += todo;
1187 src += todo;
1188 blkno++;
1189 }
1190 }
1191
1192 #ifdef DEBUG
1193 static void
1194 hexprint(const char *start, void *buf, int len)
1195 {
1196 char *c = buf;
1197
1198 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
1199 printf("%s: len=%06d 0x", start, len);
1200 while (len--)
1201 printf("%02x", (unsigned char) *c++);
1202 }
1203 #endif
1204
1205 static void
1206 selftest(void)
1207 {
1208 struct cgd_softc cs;
1209 void *buf;
1210
1211 printf("running cgd selftest ");
1212
1213 for (size_t i = 0; i < __arraycount(selftests); i++) {
1214 const char *alg = selftests[i].alg;
1215 const uint8_t *key = selftests[i].key;
1216 int keylen = selftests[i].keylen;
1217 int txtlen = selftests[i].txtlen;
1218
1219 printf("%s-%d ", alg, keylen);
1220
1221 memset(&cs, 0, sizeof(cs));
1222
1223 cs.sc_cfuncs = cryptfuncs_find(alg);
1224 if (cs.sc_cfuncs == NULL)
1225 panic("%s not implemented", alg);
1226
1227 cs.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1228 cs.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1229 cs.sc_cdata.cf_keylen = keylen;
1230
1231 cs.sc_cdata.cf_priv = cs.sc_cfuncs->cf_init(keylen,
1232 key, &cs.sc_cdata.cf_blocksize);
1233 if (cs.sc_cdata.cf_priv == NULL)
1234 panic("cf_priv is NULL");
1235 if (cs.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1236 panic("bad block size %zu", cs.sc_cdata.cf_blocksize);
1237
1238 cs.sc_cdata.cf_blocksize /= 8;
1239
1240 buf = malloc(txtlen, M_DEVBUF, M_WAITOK);
1241 memcpy(buf, selftests[i].ptxt, txtlen);
1242
1243 cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno,
1244 selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1245 if (memcmp(buf, selftests[i].ctxt, txtlen) != 0)
1246 panic("encryption is broken");
1247
1248 cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno,
1249 selftests[i].secsize, CGD_CIPHER_DECRYPT);
1250 if (memcmp(buf, selftests[i].ptxt, txtlen) != 0)
1251 panic("decryption is broken");
1252
1253 free(buf, M_DEVBUF);
1254 cs.sc_cfuncs->cf_destroy(cs.sc_cdata.cf_priv);
1255 }
1256
1257 printf("done\n");
1258 }
1259
1260 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr");
1261
1262 #ifdef _MODULE
1263 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1264
1265 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1266 #endif
1267
1268 static int
1269 cgd_modcmd(modcmd_t cmd, void *arg)
1270 {
1271 int error = 0;
1272
1273 switch (cmd) {
1274 case MODULE_CMD_INIT:
1275 selftest();
1276 #ifdef _MODULE
1277 error = config_cfdriver_attach(&cgd_cd);
1278 if (error)
1279 break;
1280
1281 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1282 if (error) {
1283 config_cfdriver_detach(&cgd_cd);
1284 aprint_error("%s: unable to register cfattach for "
1285 "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1286 break;
1287 }
1288 /*
1289 * Attach the {b,c}devsw's
1290 */
1291 error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1292 &cgd_cdevsw, &cgd_cmajor);
1293
1294 /*
1295 * If devsw_attach fails, remove from autoconf database
1296 */
1297 if (error) {
1298 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1299 config_cfdriver_detach(&cgd_cd);
1300 aprint_error("%s: unable to attach %s devsw, "
1301 "error %d", __func__, cgd_cd.cd_name, error);
1302 break;
1303 }
1304 #endif
1305 break;
1306
1307 case MODULE_CMD_FINI:
1308 #ifdef _MODULE
1309 /*
1310 * Remove {b,c}devsw's
1311 */
1312 devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1313
1314 /*
1315 * Now remove device from autoconf database
1316 */
1317 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1318 if (error) {
1319 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1320 &cgd_cdevsw, &cgd_cmajor);
1321 aprint_error("%s: failed to detach %s cfattach, "
1322 "error %d\n", __func__, cgd_cd.cd_name, error);
1323 break;
1324 }
1325 error = config_cfdriver_detach(&cgd_cd);
1326 if (error) {
1327 (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1328 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1329 &cgd_cdevsw, &cgd_cmajor);
1330 aprint_error("%s: failed to detach %s cfdriver, "
1331 "error %d\n", __func__, cgd_cd.cd_name, error);
1332 break;
1333 }
1334 #endif
1335 break;
1336
1337 case MODULE_CMD_STAT:
1338 error = ENOTTY;
1339 break;
1340 default:
1341 error = ENOTTY;
1342 break;
1343 }
1344
1345 return error;
1346 }
1347