cgd.c revision 1.114.4.1 1 /* $NetBSD: cgd.c,v 1.114.4.1 2017/04/27 05:36:35 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.114.4.1 2017/04/27 05:36:35 pgoyette Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54 #include <sys/localcount.h>
55
56 #include <dev/dkvar.h>
57 #include <dev/cgdvar.h>
58
59 #include <miscfs/specfs/specdev.h> /* for v_rdev */
60
61 #include "ioconf.h"
62
63 struct selftest_params {
64 const char *alg;
65 int blocksize; /* number of bytes */
66 int secsize;
67 daddr_t blkno;
68 int keylen; /* number of bits */
69 int txtlen; /* number of bytes */
70 const uint8_t *key;
71 const uint8_t *ptxt;
72 const uint8_t *ctxt;
73 };
74
75 /* Entry Point Functions */
76
77 static dev_type_open(cgdopen);
78 static dev_type_close(cgdclose);
79 static dev_type_read(cgdread);
80 static dev_type_write(cgdwrite);
81 static dev_type_ioctl(cgdioctl);
82 static dev_type_strategy(cgdstrategy);
83 static dev_type_dump(cgddump);
84 static dev_type_size(cgdsize);
85
86 const struct bdevsw cgd_bdevsw = {
87 DEVSW_MODULE_INIT
88 .d_open = cgdopen,
89 .d_close = cgdclose,
90 .d_strategy = cgdstrategy,
91 .d_ioctl = cgdioctl,
92 .d_dump = cgddump,
93 .d_psize = cgdsize,
94 .d_discard = nodiscard,
95 .d_flag = D_DISK
96 };
97
98 const struct cdevsw cgd_cdevsw = {
99 DEVSW_MODULE_INIT
100 .d_open = cgdopen,
101 .d_close = cgdclose,
102 .d_read = cgdread,
103 .d_write = cgdwrite,
104 .d_ioctl = cgdioctl,
105 .d_stop = nostop,
106 .d_tty = notty,
107 .d_poll = nopoll,
108 .d_mmap = nommap,
109 .d_kqfilter = nokqfilter,
110 .d_discard = nodiscard,
111 .d_flag = D_DISK
112 };
113
114 /*
115 * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
116 */
117 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
118 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
119 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
120 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
121 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
122 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
123 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
124 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
125 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
126 };
127
128 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
129 0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
130 0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
131 0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
132 0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
133 0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
134 0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
135 0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
136 0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
137 };
138
139 static const uint8_t selftest_aes_xts_256_key[33] = {
140 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
141 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
142 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
143 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
144 0
145 };
146
147 /*
148 * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
149 */
150 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
151 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
152 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
153 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
154 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
155 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
156 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
157 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
158 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
159 };
160
161 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
162 0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
163 0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
164 0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
165 0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
166 0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
167 0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
168 0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
169 0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
170 };
171
172 static const uint8_t selftest_aes_xts_512_key[65] = {
173 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
174 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
175 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
176 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
177 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
178 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
179 0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
180 0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
181 0
182 };
183
184 const struct selftest_params selftests[] = {
185 {
186 .alg = "aes-xts",
187 .blocksize = 16,
188 .secsize = 512,
189 .blkno = 1,
190 .keylen = 256,
191 .txtlen = sizeof(selftest_aes_xts_256_ptxt),
192 .key = selftest_aes_xts_256_key,
193 .ptxt = selftest_aes_xts_256_ptxt,
194 .ctxt = selftest_aes_xts_256_ctxt
195 },
196 {
197 .alg = "aes-xts",
198 .blocksize = 16,
199 .secsize = 512,
200 .blkno = 0xffff,
201 .keylen = 512,
202 .txtlen = sizeof(selftest_aes_xts_512_ptxt),
203 .key = selftest_aes_xts_512_key,
204 .ptxt = selftest_aes_xts_512_ptxt,
205 .ctxt = selftest_aes_xts_512_ctxt
206 }
207 };
208
209 static int cgd_match(device_t, cfdata_t, void *);
210 static void cgd_attach(device_t, device_t, void *);
211 static int cgd_detach(device_t, int);
212 static struct cgd_softc *cgd_spawn(int, device_t *);
213 static int cgd_destroy(device_t);
214
215 /* Internal Functions */
216
217 static int cgd_diskstart(device_t, struct buf *);
218 static void cgdiodone(struct buf *);
219 static int cgd_dumpblocks(device_t, void *, daddr_t, int);
220
221 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
222 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
223 static int cgd_ioctl_get(dev_t, void *, struct lwp *);
224 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
225 struct lwp *);
226 static void cgd_cipher(struct cgd_softc *, void *, void *,
227 size_t, daddr_t, size_t, int);
228
229 static struct dkdriver cgddkdriver = {
230 .d_minphys = minphys,
231 .d_open = cgdopen,
232 .d_close = cgdclose,
233 .d_strategy = cgdstrategy,
234 .d_iosize = NULL,
235 .d_diskstart = cgd_diskstart,
236 .d_dumpblocks = cgd_dumpblocks,
237 .d_lastclose = NULL
238 };
239
240 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
241 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
242 extern struct cfdriver cgd_cd;
243
244 /* DIAGNOSTIC and DEBUG definitions */
245
246 #if defined(CGDDEBUG) && !defined(DEBUG)
247 #define DEBUG
248 #endif
249
250 #ifdef DEBUG
251 int cgddebug = 0;
252
253 #define CGDB_FOLLOW 0x1
254 #define CGDB_IO 0x2
255 #define CGDB_CRYPTO 0x4
256
257 #define IFDEBUG(x,y) if (cgddebug & (x)) y
258 #define DPRINTF(x,y) IFDEBUG(x, printf y)
259 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
260
261 static void hexprint(const char *, void *, int);
262
263 #else
264 #define IFDEBUG(x,y)
265 #define DPRINTF(x,y)
266 #define DPRINTF_FOLLOW(y)
267 #endif
268
269 #ifdef DIAGNOSTIC
270 #define DIAGPANIC(x) panic x
271 #define DIAGCONDPANIC(x,y) if (x) panic y
272 #else
273 #define DIAGPANIC(x)
274 #define DIAGCONDPANIC(x,y)
275 #endif
276
277 /* Global variables */
278
279 /* Utility Functions */
280
281 #define CGDUNIT(x) DISKUNIT(x)
282 #define GETCGD_SOFTC(_cs, x, _dv) \
283 if (((_cs) = getcgd_softc(x, &_dv)) == NULL) { \
284 return ENXIO; \
285 }
286
287 /* The code */
288
289 /*
290 * Lookup the device and return it's softc. If the device doesn't
291 * exist, spawn it.
292 *
293 * In either case, the device is "acquired", and must be "released"
294 * by the caller after it is finished with the softc.
295 */
296 static struct cgd_softc *
297 getcgd_softc(dev_t dev, device_t *self)
298 {
299 int unit = CGDUNIT(dev);
300 struct cgd_softc *sc;
301
302 DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
303
304 *self = device_lookup_acquire(&cgd_cd, unit);
305
306 if (*self == NULL) {
307 sc = cgd_spawn(unit, self);
308 } else {
309 sc = device_private(*self);
310 }
311
312 return sc;
313 }
314
315 static int
316 cgd_match(device_t self, cfdata_t cfdata, void *aux)
317 {
318
319 return 1;
320 }
321
322 static void
323 cgd_attach(device_t parent, device_t self, void *aux)
324 {
325 struct cgd_softc *sc;
326
327 sc = device_private(self);
328
329 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
330 dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
331 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
332
333 if (!pmf_device_register(self, NULL, NULL))
334 aprint_error_dev(self,
335 "unable to register power management hooks\n");
336 }
337
338
339 /*
340 * The caller must hold a reference to the device's localcount. the
341 * reference is released if the device is available for detach.
342 */
343 static int
344 cgd_detach(device_t self, int flags)
345 {
346 int ret;
347 const int pmask = 1 << RAW_PART;
348 struct cgd_softc *sc = device_private(self);
349 struct dk_softc *dksc = &sc->sc_dksc;
350
351 if (DK_BUSY(dksc, pmask))
352 return EBUSY;
353
354 if (DK_ATTACHED(dksc) &&
355 (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
356 return ret;
357
358 disk_destroy(&dksc->sc_dkdev);
359 mutex_destroy(&sc->sc_lock);
360
361 device_release(self);
362 return 0;
363 }
364
365 void
366 cgdattach(int num)
367 {
368 int error;
369
370 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
371 if (error != 0)
372 aprint_error("%s: unable to register cfattach\n",
373 cgd_cd.cd_name);
374 }
375
376 static struct cgd_softc *
377 cgd_spawn(int unit, device_t *self)
378 {
379 cfdata_t cf;
380 struct cgd_softc *sc;
381
382 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
383 cf->cf_name = cgd_cd.cd_name;
384 cf->cf_atname = cgd_cd.cd_name;
385 cf->cf_unit = unit;
386 cf->cf_fstate = FSTATE_STAR;
387
388 if (config_attach_pseudo(cf) == NULL)
389 return NULL;
390
391 *self = device_lookup_acquire(&cgd_cd, unit);
392 if (self == NULL)
393 return NULL;
394 else {
395 /*
396 * Note that we return while still holding a reference
397 * to the device!
398 */
399 sc = device_private(*self);
400 return sc;
401 }
402 }
403
404 static int
405 cgd_destroy(device_t dev)
406 {
407 int error;
408 cfdata_t cf;
409
410 cf = device_cfdata(dev);
411 error = config_detach(dev, DETACH_QUIET);
412 if (error == 0)
413 free(cf, M_DEVBUF);
414
415 return error;
416 }
417
418 static int
419 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
420 {
421 device_t self;
422 int error;
423 struct cgd_softc *cs;
424
425 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
426 GETCGD_SOFTC(cs, dev, self);
427 error = dk_open(&cs->sc_dksc, dev, flags, fmt, l);
428 device_release(self);
429 return error;
430 }
431
432 static int
433 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
434 {
435 int error;
436 device_t self;
437 struct cgd_softc *cs;
438 struct dk_softc *dksc;
439
440 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
441 GETCGD_SOFTC(cs, dev, self);
442 dksc = &cs->sc_dksc;
443 if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0) {
444 device_release(self);
445 return error;
446 }
447
448 if (!DK_ATTACHED(dksc)) {
449 if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
450 aprint_error_dev(dksc->sc_dev,
451 "unable to detach instance\n");
452 return error;
453 }
454 } else
455 device_release(self);
456 return 0;
457 }
458
459 static void
460 cgdstrategy(struct buf *bp)
461 {
462 device_t self;
463 struct cgd_softc *cs = getcgd_softc(bp->b_dev, &self);
464
465 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
466 (long)bp->b_bcount));
467
468 if (!cs) {
469 bp->b_error = ENXIO;
470 goto bail;
471 }
472
473 /*
474 * Reject unaligned writes.
475 */
476 if (((uintptr_t)bp->b_data & 3) != 0) {
477 bp->b_error = EINVAL;
478 goto bail;
479 }
480
481 dk_strategy(&cs->sc_dksc, bp);
482 device_release(self);
483 return;
484
485 bail:
486 bp->b_resid = bp->b_bcount;
487 biodone(bp);
488 if (self)
489 device_release(self);
490 return;
491 }
492
493 static int
494 cgdsize(dev_t dev)
495 {
496 int retval;
497 device_t self;
498 struct cgd_softc *cs = getcgd_softc(dev, &self);
499
500 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
501 if (!cs)
502 retval = -1;
503 else
504 retval = dk_size(&cs->sc_dksc, dev);
505
506 device_release(self);
507 return retval;
508 }
509
510 /*
511 * cgd_{get,put}data are functions that deal with getting a buffer
512 * for the new encrypted data. We have a buffer per device so that
513 * we can ensure that we can always have a transaction in flight.
514 * We use this buffer first so that we have one less piece of
515 * malloc'ed data at any given point.
516 */
517
518 static void *
519 cgd_getdata(struct dk_softc *dksc, unsigned long size)
520 {
521 struct cgd_softc *cs = (struct cgd_softc *)dksc;
522 void * data = NULL;
523
524 mutex_enter(&cs->sc_lock);
525 if (cs->sc_data_used == 0) {
526 cs->sc_data_used = 1;
527 data = cs->sc_data;
528 }
529 mutex_exit(&cs->sc_lock);
530
531 if (data)
532 return data;
533
534 return malloc(size, M_DEVBUF, M_NOWAIT);
535 }
536
537 static void
538 cgd_putdata(struct dk_softc *dksc, void *data)
539 {
540 struct cgd_softc *cs = (struct cgd_softc *)dksc;
541
542 if (data == cs->sc_data) {
543 mutex_enter(&cs->sc_lock);
544 cs->sc_data_used = 0;
545 mutex_exit(&cs->sc_lock);
546 } else {
547 free(data, M_DEVBUF);
548 }
549 }
550
551 static int
552 cgd_diskstart(device_t dev, struct buf *bp)
553 {
554 struct cgd_softc *cs = device_private(dev);
555 struct dk_softc *dksc = &cs->sc_dksc;
556 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
557 struct buf *nbp;
558 void * addr;
559 void * newaddr;
560 daddr_t bn;
561 struct vnode *vp;
562
563 DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
564
565 bn = bp->b_rawblkno;
566
567 /*
568 * We attempt to allocate all of our resources up front, so that
569 * we can fail quickly if they are unavailable.
570 */
571 nbp = getiobuf(cs->sc_tvn, false);
572 if (nbp == NULL)
573 return EAGAIN;
574
575 /*
576 * If we are writing, then we need to encrypt the outgoing
577 * block into a new block of memory.
578 */
579 newaddr = addr = bp->b_data;
580 if ((bp->b_flags & B_READ) == 0) {
581 newaddr = cgd_getdata(dksc, bp->b_bcount);
582 if (!newaddr) {
583 putiobuf(nbp);
584 return EAGAIN;
585 }
586 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
587 dg->dg_secsize, CGD_CIPHER_ENCRYPT);
588 }
589
590 nbp->b_data = newaddr;
591 nbp->b_flags = bp->b_flags;
592 nbp->b_oflags = bp->b_oflags;
593 nbp->b_cflags = bp->b_cflags;
594 nbp->b_iodone = cgdiodone;
595 nbp->b_proc = bp->b_proc;
596 nbp->b_blkno = btodb(bn * dg->dg_secsize);
597 nbp->b_bcount = bp->b_bcount;
598 nbp->b_private = bp;
599
600 BIO_COPYPRIO(nbp, bp);
601
602 if ((nbp->b_flags & B_READ) == 0) {
603 vp = nbp->b_vp;
604 mutex_enter(vp->v_interlock);
605 vp->v_numoutput++;
606 mutex_exit(vp->v_interlock);
607 }
608 VOP_STRATEGY(cs->sc_tvn, nbp);
609
610 return 0;
611 }
612
613 static void
614 cgdiodone(struct buf *nbp)
615 {
616 device_t self;
617 struct buf *obp = nbp->b_private;
618 struct cgd_softc *cs = getcgd_softc(obp->b_dev, &self);
619 struct dk_softc *dksc = &cs->sc_dksc;
620 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
621 daddr_t bn;
622
623 KDASSERT(cs);
624
625 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
626 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
627 obp, obp->b_bcount, obp->b_resid));
628 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
629 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
630 nbp->b_bcount));
631 if (nbp->b_error != 0) {
632 obp->b_error = nbp->b_error;
633 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
634 obp->b_error));
635 }
636
637 /* Perform the decryption if we are reading.
638 *
639 * Note: use the blocknumber from nbp, since it is what
640 * we used to encrypt the blocks.
641 */
642
643 if (nbp->b_flags & B_READ) {
644 bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
645 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
646 bn, dg->dg_secsize, CGD_CIPHER_DECRYPT);
647 }
648
649 /* If we allocated memory, free it now... */
650 if (nbp->b_data != obp->b_data)
651 cgd_putdata(dksc, nbp->b_data);
652
653 putiobuf(nbp);
654
655 /* Request is complete for whatever reason */
656 obp->b_resid = 0;
657 if (obp->b_error != 0)
658 obp->b_resid = obp->b_bcount;
659
660 dk_done(dksc, obp);
661 device_release(self);
662
663 dk_start(dksc, NULL);
664 }
665
666 static int
667 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
668 {
669 struct cgd_softc *sc = device_private(dev);
670 struct dk_softc *dksc = &sc->sc_dksc;
671 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
672 size_t nbytes, blksize;
673 void *buf;
674 int error;
675
676 /*
677 * dk_dump gives us units of disklabel sectors. Everything
678 * else in cgd uses units of diskgeom sectors. These had
679 * better agree; otherwise we need to figure out how to convert
680 * between them.
681 */
682 KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
683 "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
684 dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
685 blksize = dg->dg_secsize;
686
687 /*
688 * Compute the number of bytes in this request, which dk_dump
689 * has `helpfully' converted to a number of blocks for us.
690 */
691 nbytes = nblk*blksize;
692
693 /* Try to acquire a buffer to store the ciphertext. */
694 buf = cgd_getdata(dksc, nbytes);
695 if (buf == NULL)
696 /* Out of memory: give up. */
697 return ENOMEM;
698
699 /* Encrypt the caller's data into the temporary buffer. */
700 cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
701
702 /* Pass it on to the underlying disk device. */
703 error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
704
705 /* Release the buffer. */
706 cgd_putdata(dksc, buf);
707
708 /* Return any error from the underlying disk device. */
709 return error;
710 }
711
712 /* XXX: we should probably put these into dksubr.c, mostly */
713 static int
714 cgdread(dev_t dev, struct uio *uio, int flags)
715 {
716 device_t self;
717 int error;
718 struct cgd_softc *cs;
719 struct dk_softc *dksc;
720
721 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
722 (unsigned long long)dev, uio, flags));
723 GETCGD_SOFTC(cs, dev, self);
724 dksc = &cs->sc_dksc;
725 if (!DK_ATTACHED(dksc)) {
726 device_release(self);
727 return ENXIO;
728 }
729 error = physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
730 device_release(self);
731 return error;
732 }
733
734 /* XXX: we should probably put these into dksubr.c, mostly */
735 static int
736 cgdwrite(dev_t dev, struct uio *uio, int flags)
737 {
738 device_t self;
739 int error;
740 struct cgd_softc *cs;
741 struct dk_softc *dksc;
742
743 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
744 GETCGD_SOFTC(cs, dev, self);
745 dksc = &cs->sc_dksc;
746 if (!DK_ATTACHED(dksc)) {
747 device_release(self);
748 return ENXIO;
749 }
750 error = physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
751 device_release(self);
752 return error;
753 }
754
755 static int
756 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
757 {
758 device_t self;
759 struct cgd_softc *cs;
760 struct dk_softc *dksc;
761 int part = DISKPART(dev);
762 int pmask = 1 << part;
763 int error = 0;
764
765 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
766 dev, cmd, data, flag, l));
767
768 switch (cmd) {
769 case CGDIOCGET:
770 return cgd_ioctl_get(dev, data, l);
771 case CGDIOCSET:
772 case CGDIOCCLR:
773 if ((flag & FWRITE) == 0)
774 return EBADF;
775 /* FALLTHROUGH */
776 default:
777 GETCGD_SOFTC(cs, dev, self);
778 dksc = &cs->sc_dksc;
779 break;
780 }
781
782 switch (cmd) {
783 case CGDIOCSET:
784 if (DK_ATTACHED(dksc))
785 error = EBUSY;
786 else
787 error = cgd_ioctl_set(cs, data, l);
788 break;
789 case CGDIOCCLR:
790 if (DK_BUSY(&cs->sc_dksc, pmask))
791 return EBUSY;
792 return cgd_ioctl_clr(cs, l);
793 case DIOCGCACHE:
794 case DIOCCACHESYNC:
795 if (!DK_ATTACHED(dksc))
796 return ENOENT;
797 /*
798 * We pass this call down to the underlying disk.
799 */
800 else
801 error = VOP_IOCTL(cs->sc_tvn, cmd, data, flag,
802 l->l_cred);
803 break;
804 case DIOCGSTRATEGY:
805 case DIOCSSTRATEGY:
806 if (!DK_ATTACHED(dksc)) {
807 error = ENOENT;
808 break;
809 }
810 /*FALLTHROUGH*/
811 default:
812 error = dk_ioctl(dksc, dev, cmd, data, flag, l);
813 break;
814 case CGDIOCGET:
815 KASSERT(0);
816 error = EINVAL;
817 break;
818 }
819 device_release(self);
820 return error;
821 }
822
823 static int
824 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
825 {
826 device_t self;
827 int error;
828 struct cgd_softc *cs;
829
830 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
831 dev, blkno, va, (unsigned long)size));
832 GETCGD_SOFTC(cs, dev, self);
833 error = dk_dump(&cs->sc_dksc, dev, blkno, va, size);
834 device_release(self);
835 return error;
836 }
837
838 /*
839 * XXXrcd:
840 * for now we hardcode the maximum key length.
841 */
842 #define MAX_KEYSIZE 1024
843
844 static const struct {
845 const char *n;
846 int v;
847 int d;
848 } encblkno[] = {
849 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
850 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
851 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
852 };
853
854 /* ARGSUSED */
855 static int
856 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
857 {
858 struct cgd_ioctl *ci = data;
859 struct vnode *vp;
860 int ret;
861 size_t i;
862 size_t keybytes; /* key length in bytes */
863 const char *cp;
864 struct pathbuf *pb;
865 char *inbuf;
866 struct dk_softc *dksc = &cs->sc_dksc;
867
868 cp = ci->ci_disk;
869
870 ret = pathbuf_copyin(ci->ci_disk, &pb);
871 if (ret != 0) {
872 return ret;
873 }
874 ret = dk_lookup(pb, l, &vp);
875 pathbuf_destroy(pb);
876 if (ret != 0) {
877 return ret;
878 }
879
880 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
881
882 if ((ret = cgdinit(cs, cp, vp, l)) != 0)
883 goto bail;
884
885 (void)memset(inbuf, 0, MAX_KEYSIZE);
886 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
887 if (ret)
888 goto bail;
889 cs->sc_cfuncs = cryptfuncs_find(inbuf);
890 if (!cs->sc_cfuncs) {
891 ret = EINVAL;
892 goto bail;
893 }
894
895 (void)memset(inbuf, 0, MAX_KEYSIZE);
896 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
897 if (ret)
898 goto bail;
899
900 for (i = 0; i < __arraycount(encblkno); i++)
901 if (strcmp(encblkno[i].n, inbuf) == 0)
902 break;
903
904 if (i == __arraycount(encblkno)) {
905 ret = EINVAL;
906 goto bail;
907 }
908
909 keybytes = ci->ci_keylen / 8 + 1;
910 if (keybytes > MAX_KEYSIZE) {
911 ret = EINVAL;
912 goto bail;
913 }
914
915 (void)memset(inbuf, 0, MAX_KEYSIZE);
916 ret = copyin(ci->ci_key, inbuf, keybytes);
917 if (ret)
918 goto bail;
919
920 cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
921 cs->sc_cdata.cf_mode = encblkno[i].v;
922 cs->sc_cdata.cf_keylen = ci->ci_keylen;
923 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
924 &cs->sc_cdata.cf_blocksize);
925 if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
926 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
927 cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
928 cs->sc_cdata.cf_priv = NULL;
929 }
930
931 /*
932 * The blocksize is supposed to be in bytes. Unfortunately originally
933 * it was expressed in bits. For compatibility we maintain encblkno
934 * and encblkno8.
935 */
936 cs->sc_cdata.cf_blocksize /= encblkno[i].d;
937 (void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
938 if (!cs->sc_cdata.cf_priv) {
939 ret = EINVAL; /* XXX is this the right error? */
940 goto bail;
941 }
942 free(inbuf, M_TEMP);
943
944 bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
945
946 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
947 cs->sc_data_used = 0;
948
949 /* Attach the disk. */
950 dk_attach(dksc);
951 disk_attach(&dksc->sc_dkdev);
952
953 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
954
955 /* Discover wedges on this disk. */
956 dkwedge_discover(&dksc->sc_dkdev);
957
958 return 0;
959
960 bail:
961 free(inbuf, M_TEMP);
962 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
963 return ret;
964 }
965
966 /* ARGSUSED */
967 static int
968 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
969 {
970 struct dk_softc *dksc = &cs->sc_dksc;
971
972 if (!DK_ATTACHED(dksc))
973 return ENXIO;
974
975 /* Delete all of our wedges. */
976 dkwedge_delall(&dksc->sc_dkdev);
977
978 /* Kill off any queued buffers. */
979 dk_drain(dksc);
980 bufq_free(dksc->sc_bufq);
981
982 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
983 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
984 free(cs->sc_tpath, M_DEVBUF);
985 free(cs->sc_data, M_DEVBUF);
986 cs->sc_data_used = 0;
987 dk_detach(dksc);
988 disk_detach(&dksc->sc_dkdev);
989
990 return 0;
991 }
992
993 static int
994 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
995 {
996 device_t self;
997 struct cgd_softc *cs = getcgd_softc(dev, &self);
998 struct cgd_user *cgu;
999 int unit;
1000 struct dk_softc *dksc = &cs->sc_dksc;
1001
1002 unit = CGDUNIT(dev);
1003 cgu = (struct cgd_user *)data;
1004
1005 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1006 dev, unit, data, l));
1007
1008 if (cgu->cgu_unit == -1)
1009 cgu->cgu_unit = unit;
1010
1011 if (cgu->cgu_unit < 0) {
1012 device_release(self);
1013 return EINVAL; /* XXX: should this be ENXIO? */
1014 }
1015
1016 /*
1017 * XXX This appears to be redundant, given the initialization
1018 * XXX when it was declared. Leave it for now, but don't
1019 * XXX take an extra reference to the device!
1020 */
1021 cs = device_lookup_private(&cgd_cd, unit);
1022 if (cs == NULL || !DK_ATTACHED(dksc)) {
1023 cgu->cgu_dev = 0;
1024 cgu->cgu_alg[0] = '\0';
1025 cgu->cgu_blocksize = 0;
1026 cgu->cgu_mode = 0;
1027 cgu->cgu_keylen = 0;
1028 }
1029 else {
1030 cgu->cgu_dev = cs->sc_tdev;
1031 strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
1032 sizeof(cgu->cgu_alg));
1033 cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
1034 cgu->cgu_mode = cs->sc_cdata.cf_mode;
1035 cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
1036 }
1037 device_release(self);
1038 return 0;
1039 }
1040
1041 static int
1042 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
1043 struct lwp *l)
1044 {
1045 struct disk_geom *dg;
1046 int ret;
1047 char *tmppath;
1048 uint64_t psize;
1049 unsigned secsize;
1050 struct dk_softc *dksc = &cs->sc_dksc;
1051
1052 cs->sc_tvn = vp;
1053 cs->sc_tpath = NULL;
1054
1055 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1056 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
1057 if (ret)
1058 goto bail;
1059 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
1060 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
1061
1062 cs->sc_tdev = vp->v_rdev;
1063
1064 if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1065 goto bail;
1066
1067 if (psize == 0) {
1068 ret = ENODEV;
1069 goto bail;
1070 }
1071
1072 /*
1073 * XXX here we should probe the underlying device. If we
1074 * are accessing a partition of type RAW_PART, then
1075 * we should populate our initial geometry with the
1076 * geometry that we discover from the device.
1077 */
1078 dg = &dksc->sc_dkdev.dk_geom;
1079 memset(dg, 0, sizeof(*dg));
1080 dg->dg_secperunit = psize;
1081 dg->dg_secsize = secsize;
1082 dg->dg_ntracks = 1;
1083 dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1084 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1085
1086 bail:
1087 free(tmppath, M_TEMP);
1088 if (ret && cs->sc_tpath)
1089 free(cs->sc_tpath, M_DEVBUF);
1090 return ret;
1091 }
1092
1093 /*
1094 * Our generic cipher entry point. This takes care of the
1095 * IV mode and passes off the work to the specific cipher.
1096 * We implement here the IV method ``encrypted block
1097 * number''.
1098 *
1099 * XXXrcd: for now we rely on our own crypto framework defined
1100 * in dev/cgd_crypto.c. This will change when we
1101 * get a generic kernel crypto framework.
1102 */
1103
1104 static void
1105 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1106 {
1107 int i;
1108
1109 /* Set up the blkno in blkno_buf, here we do not care much
1110 * about the final layout of the information as long as we
1111 * can guarantee that each sector will have a different IV
1112 * and that the endianness of the machine will not affect
1113 * the representation that we have chosen.
1114 *
1115 * We choose this representation, because it does not rely
1116 * on the size of buf (which is the blocksize of the cipher),
1117 * but allows daddr_t to grow without breaking existing
1118 * disks.
1119 *
1120 * Note that blkno2blkno_buf does not take a size as input,
1121 * and hence must be called on a pre-zeroed buffer of length
1122 * greater than or equal to sizeof(daddr_t).
1123 */
1124 for (i=0; i < sizeof(daddr_t); i++) {
1125 *sbuf++ = blkno & 0xff;
1126 blkno >>= 8;
1127 }
1128 }
1129
1130 static void
1131 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
1132 size_t len, daddr_t blkno, size_t secsize, int dir)
1133 {
1134 char *dst = dstv;
1135 char *src = srcv;
1136 cfunc_cipher_prep *ciprep = cs->sc_cfuncs->cf_cipher_prep;
1137 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher;
1138 struct uio dstuio;
1139 struct uio srcuio;
1140 struct iovec dstiov[2];
1141 struct iovec srciov[2];
1142 size_t blocksize = cs->sc_cdata.cf_blocksize;
1143 size_t todo;
1144 char blkno_buf[CGD_MAXBLOCKSIZE], *iv;
1145
1146 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1147
1148 DIAGCONDPANIC(len % blocksize != 0,
1149 ("cgd_cipher: len %% blocksize != 0"));
1150
1151 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1152 DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
1153 ("cgd_cipher: sizeof(daddr_t) > blocksize"));
1154
1155 DIAGCONDPANIC(blocksize > CGD_MAXBLOCKSIZE,
1156 ("cgd_cipher: blocksize > CGD_MAXBLOCKSIZE"));
1157
1158 dstuio.uio_iov = dstiov;
1159 dstuio.uio_iovcnt = 1;
1160
1161 srcuio.uio_iov = srciov;
1162 srcuio.uio_iovcnt = 1;
1163
1164 for (; len > 0; len -= todo) {
1165 todo = MIN(len, secsize);
1166
1167 dstiov[0].iov_base = dst;
1168 srciov[0].iov_base = src;
1169 dstiov[0].iov_len = todo;
1170 srciov[0].iov_len = todo;
1171
1172 memset(blkno_buf, 0x0, blocksize);
1173 blkno2blkno_buf(blkno_buf, blkno);
1174 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1175 blkno_buf, blocksize));
1176
1177 /*
1178 * Compute an initial IV. All ciphers
1179 * can convert blkno_buf in-place.
1180 */
1181 iv = blkno_buf;
1182 ciprep(cs->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir);
1183 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize));
1184
1185 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir);
1186
1187 dst += todo;
1188 src += todo;
1189 blkno++;
1190 }
1191 }
1192
1193 #ifdef DEBUG
1194 static void
1195 hexprint(const char *start, void *buf, int len)
1196 {
1197 char *c = buf;
1198
1199 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
1200 printf("%s: len=%06d 0x", start, len);
1201 while (len--)
1202 printf("%02x", (unsigned char) *c++);
1203 }
1204 #endif
1205
1206 static void
1207 selftest(void)
1208 {
1209 struct cgd_softc cs;
1210 void *buf;
1211
1212 printf("running cgd selftest ");
1213
1214 for (size_t i = 0; i < __arraycount(selftests); i++) {
1215 const char *alg = selftests[i].alg;
1216 const uint8_t *key = selftests[i].key;
1217 int keylen = selftests[i].keylen;
1218 int txtlen = selftests[i].txtlen;
1219
1220 printf("%s-%d ", alg, keylen);
1221
1222 memset(&cs, 0, sizeof(cs));
1223
1224 cs.sc_cfuncs = cryptfuncs_find(alg);
1225 if (cs.sc_cfuncs == NULL)
1226 panic("%s not implemented", alg);
1227
1228 cs.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1229 cs.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1230 cs.sc_cdata.cf_keylen = keylen;
1231
1232 cs.sc_cdata.cf_priv = cs.sc_cfuncs->cf_init(keylen,
1233 key, &cs.sc_cdata.cf_blocksize);
1234 if (cs.sc_cdata.cf_priv == NULL)
1235 panic("cf_priv is NULL");
1236 if (cs.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1237 panic("bad block size %zu", cs.sc_cdata.cf_blocksize);
1238
1239 cs.sc_cdata.cf_blocksize /= 8;
1240
1241 buf = malloc(txtlen, M_DEVBUF, M_WAITOK);
1242 memcpy(buf, selftests[i].ptxt, txtlen);
1243
1244 cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno,
1245 selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1246 if (memcmp(buf, selftests[i].ctxt, txtlen) != 0)
1247 panic("encryption is broken");
1248
1249 cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno,
1250 selftests[i].secsize, CGD_CIPHER_DECRYPT);
1251 if (memcmp(buf, selftests[i].ptxt, txtlen) != 0)
1252 panic("decryption is broken");
1253
1254 free(buf, M_DEVBUF);
1255 cs.sc_cfuncs->cf_destroy(cs.sc_cdata.cf_priv);
1256 }
1257
1258 printf("done\n");
1259 }
1260
1261 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr");
1262
1263 #ifdef _MODULE
1264 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1265
1266 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1267 #endif
1268
1269 static int
1270 cgd_modcmd(modcmd_t cmd, void *arg)
1271 {
1272 int error = 0;
1273
1274 switch (cmd) {
1275 case MODULE_CMD_INIT:
1276 selftest();
1277 #ifdef _MODULE
1278 error = config_cfdriver_attach(&cgd_cd);
1279 if (error)
1280 break;
1281
1282 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1283 if (error) {
1284 config_cfdriver_detach(&cgd_cd);
1285 aprint_error("%s: unable to register cfattach for "
1286 "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1287 break;
1288 }
1289 /*
1290 * Attach the {b,c}devsw's
1291 */
1292 error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1293 &cgd_cdevsw, &cgd_cmajor);
1294
1295 /*
1296 * If devsw_attach fails, remove from autoconf database
1297 */
1298 if (error) {
1299 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1300 config_cfdriver_detach(&cgd_cd);
1301 aprint_error("%s: unable to attach %s devsw, "
1302 "error %d", __func__, cgd_cd.cd_name, error);
1303 break;
1304 }
1305 #endif
1306 break;
1307
1308 case MODULE_CMD_FINI:
1309 #ifdef _MODULE
1310 /*
1311 * Remove {b,c}devsw's
1312 */
1313 devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1314
1315 /*
1316 * Now remove device from autoconf database
1317 */
1318 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1319 if (error) {
1320 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1321 &cgd_cdevsw, &cgd_cmajor);
1322 aprint_error("%s: failed to detach %s cfattach, "
1323 "error %d\n", __func__, cgd_cd.cd_name, error);
1324 break;
1325 }
1326 error = config_cfdriver_detach(&cgd_cd);
1327 if (error) {
1328 (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1329 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1330 &cgd_cdevsw, &cgd_cmajor);
1331 aprint_error("%s: failed to detach %s cfdriver, "
1332 "error %d\n", __func__, cgd_cd.cd_name, error);
1333 break;
1334 }
1335 #endif
1336 break;
1337
1338 case MODULE_CMD_STAT:
1339 error = ENOTTY;
1340 break;
1341 default:
1342 error = ENOTTY;
1343 break;
1344 }
1345
1346 return error;
1347 }
1348