cgd.c revision 1.108.2.20 1 /* $NetBSD: cgd.c,v 1.108.2.20 2017/01/07 08:56:31 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.108.2.20 2017/01/07 08:56:31 pgoyette Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54 #include <sys/localcount.h>
55
56 #include <dev/dkvar.h>
57 #include <dev/cgdvar.h>
58
59 #include <miscfs/specfs/specdev.h> /* for v_rdev */
60
61 #include "ioconf.h"
62
63 struct selftest_params {
64 const char *alg;
65 int blocksize; /* number of bytes */
66 int secsize;
67 daddr_t blkno;
68 int keylen; /* number of bits */
69 int txtlen; /* number of bytes */
70 const uint8_t *key;
71 const uint8_t *ptxt;
72 const uint8_t *ctxt;
73 };
74
75 /* Entry Point Functions */
76
77 static dev_type_open(cgdopen);
78 static dev_type_close(cgdclose);
79 static dev_type_read(cgdread);
80 static dev_type_write(cgdwrite);
81 static dev_type_ioctl(cgdioctl);
82 static dev_type_strategy(cgdstrategy);
83 static dev_type_dump(cgddump);
84 static dev_type_size(cgdsize);
85
86 const struct bdevsw cgd_bdevsw = {
87 DEVSW_MODULE_INIT
88 .d_open = cgdopen,
89 .d_close = cgdclose,
90 .d_strategy = cgdstrategy,
91 .d_ioctl = cgdioctl,
92 .d_dump = cgddump,
93 .d_psize = cgdsize,
94 .d_discard = nodiscard,
95 .d_flag = D_DISK
96 };
97
98 const struct cdevsw cgd_cdevsw = {
99 DEVSW_MODULE_INIT
100 .d_open = cgdopen,
101 .d_close = cgdclose,
102 .d_read = cgdread,
103 .d_write = cgdwrite,
104 .d_ioctl = cgdioctl,
105 .d_stop = nostop,
106 .d_tty = notty,
107 .d_poll = nopoll,
108 .d_mmap = nommap,
109 .d_kqfilter = nokqfilter,
110 .d_discard = nodiscard,
111 .d_flag = D_DISK
112 };
113
114 /*
115 * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
116 */
117 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
118 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
119 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
120 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
121 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
122 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
123 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
124 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
125 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
126 };
127
128 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
129 0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
130 0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
131 0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
132 0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
133 0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
134 0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
135 0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
136 0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
137 };
138
139 static const uint8_t selftest_aes_xts_256_key[33] = {
140 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
141 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
142 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
143 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
144 0
145 };
146
147 /*
148 * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
149 */
150 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
151 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
152 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
153 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
154 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
155 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
156 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
157 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
158 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
159 };
160
161 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
162 0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
163 0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
164 0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
165 0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
166 0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
167 0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
168 0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
169 0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
170 };
171
172 static const uint8_t selftest_aes_xts_512_key[65] = {
173 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
174 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
175 0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
176 0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
177 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
178 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
179 0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
180 0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
181 0
182 };
183
184 const struct selftest_params selftests[] = {
185 {
186 .alg = "aes-xts",
187 .blocksize = 16,
188 .secsize = 512,
189 .blkno = 1,
190 .keylen = 256,
191 .txtlen = sizeof(selftest_aes_xts_256_ptxt),
192 .key = selftest_aes_xts_256_key,
193 .ptxt = selftest_aes_xts_256_ptxt,
194 .ctxt = selftest_aes_xts_256_ctxt
195 },
196 {
197 .alg = "aes-xts",
198 .blocksize = 16,
199 .secsize = 512,
200 .blkno = 0xffff,
201 .keylen = 512,
202 .txtlen = sizeof(selftest_aes_xts_512_ptxt),
203 .key = selftest_aes_xts_512_key,
204 .ptxt = selftest_aes_xts_512_ptxt,
205 .ctxt = selftest_aes_xts_512_ctxt
206 }
207 };
208
209 static int cgd_match(device_t, cfdata_t, void *);
210 static void cgd_attach(device_t, device_t, void *);
211 static int cgd_detach(device_t, int);
212 static struct cgd_softc *cgd_spawn(int, device_t *);
213 static int cgd_destroy(device_t);
214
215 /* Internal Functions */
216
217 static int cgd_diskstart(device_t, struct buf *);
218 static void cgdiodone(struct buf *);
219 static int cgd_dumpblocks(device_t, void *, daddr_t, int);
220
221 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
222 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
223 static int cgd_ioctl_get(dev_t, void *, struct lwp *);
224 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
225 struct lwp *);
226 static void cgd_cipher(struct cgd_softc *, void *, void *,
227 size_t, daddr_t, size_t, int);
228
229 static struct dkdriver cgddkdriver = {
230 .d_minphys = minphys,
231 .d_open = cgdopen,
232 .d_close = cgdclose,
233 .d_strategy = cgdstrategy,
234 .d_iosize = NULL,
235 .d_diskstart = cgd_diskstart,
236 .d_dumpblocks = cgd_dumpblocks,
237 .d_lastclose = NULL
238 };
239
240 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
241 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
242 extern struct cfdriver cgd_cd;
243
244 /* DIAGNOSTIC and DEBUG definitions */
245
246 #if defined(CGDDEBUG) && !defined(DEBUG)
247 #define DEBUG
248 #endif
249
250 #ifdef DEBUG
251 int cgddebug = 0;
252
253 #define CGDB_FOLLOW 0x1
254 #define CGDB_IO 0x2
255 #define CGDB_CRYPTO 0x4
256
257 #define IFDEBUG(x,y) if (cgddebug & (x)) y
258 #define DPRINTF(x,y) IFDEBUG(x, printf y)
259 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
260
261 static void hexprint(const char *, void *, int);
262
263 #else
264 #define IFDEBUG(x,y)
265 #define DPRINTF(x,y)
266 #define DPRINTF_FOLLOW(y)
267 #endif
268
269 #ifdef DIAGNOSTIC
270 #define DIAGPANIC(x) panic x
271 #define DIAGCONDPANIC(x,y) if (x) panic y
272 #else
273 #define DIAGPANIC(x)
274 #define DIAGCONDPANIC(x,y)
275 #endif
276
277 /* Global variables */
278
279 /* Utility Functions */
280
281 #define CGDUNIT(x) DISKUNIT(x)
282 #define GETCGD_SOFTC(_cs, x, _dv) \
283 if (((_cs) = getcgd_softc(x, &_dv)) == NULL) { \
284 return ENXIO; \
285 }
286
287 /* The code */
288
289 /*
290 * Lookup the device and return it's softc. If the device doesn't
291 * exist, spawn it.
292 *
293 * In either case, the device is "acquired", and must be "released"
294 * by the caller after it is finished with the softc.
295 */
296 static struct cgd_softc *
297 getcgd_softc(dev_t dev, device_t *self)
298 {
299 int unit = CGDUNIT(dev);
300 struct cgd_softc *sc;
301
302 DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
303
304 *self = device_lookup_acquire(&cgd_cd, unit);
305
306 if (*self == NULL) {
307 sc = cgd_spawn(unit, self);
308 } else {
309 sc = device_private(*self);
310 }
311
312 return sc;
313 }
314
315 static int
316 cgd_match(device_t self, cfdata_t cfdata, void *aux)
317 {
318
319 return 1;
320 }
321
322 static void
323 cgd_attach(device_t parent, device_t self, void *aux)
324 {
325 struct cgd_softc *sc;
326
327 sc = device_private(self);
328
329 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
330 dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
331 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
332
333 if (!pmf_device_register(self, NULL, NULL))
334 aprint_error_dev(self,
335 "unable to register power management hooks\n");
336 }
337
338
339 /*
340 * The caller must hold a reference to the device's localcount. the
341 * reference is released if the device is available for detach.
342 */
343 static int
344 cgd_detach(device_t self, int flags)
345 {
346 int ret;
347 const int pmask = 1 << RAW_PART;
348 struct cgd_softc *sc = device_private(self);
349 struct dk_softc *dksc = &sc->sc_dksc;
350
351 if (DK_BUSY(dksc, pmask))
352 return EBUSY;
353
354 if (DK_ATTACHED(dksc) &&
355 (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
356 return ret;
357
358 disk_destroy(&dksc->sc_dkdev);
359 mutex_destroy(&sc->sc_lock);
360
361 device_release(self);
362 return 0;
363 }
364
365 void
366 cgdattach(int num)
367 {
368 int error;
369
370 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
371 if (error != 0)
372 aprint_error("%s: unable to register cfattach\n",
373 cgd_cd.cd_name);
374 }
375
376 static struct cgd_softc *
377 cgd_spawn(int unit, device_t *self)
378 {
379 cfdata_t cf;
380 struct cgd_softc *sc;
381
382 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
383 cf->cf_name = cgd_cd.cd_name;
384 cf->cf_atname = cgd_cd.cd_name;
385 cf->cf_unit = unit;
386 cf->cf_fstate = FSTATE_STAR;
387
388 if (config_attach_pseudo(cf) == NULL)
389 return NULL;
390
391 *self = device_lookup_acquire(&cgd_cd, unit);
392 if (self == NULL)
393 return NULL;
394 else {
395 /*
396 * Note that we return while still holding a reference
397 * to the device!
398 */
399 sc = device_private(*self);
400 return sc;
401 }
402 }
403
404 static int
405 cgd_destroy(device_t dev)
406 {
407 int error;
408 cfdata_t cf;
409
410 cf = device_cfdata(dev);
411 error = config_detach(dev, DETACH_QUIET);
412 if (error == 0)
413 free(cf, M_DEVBUF);
414
415 return error;
416 }
417
418 static int
419 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
420 {
421 device_t self;
422 int error;
423 struct cgd_softc *cs;
424
425 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
426 GETCGD_SOFTC(cs, dev, self);
427 error = dk_open(&cs->sc_dksc, dev, flags, fmt, l);
428 device_release(self);
429 return error;
430 }
431
432 static int
433 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
434 {
435 int error;
436 device_t self;
437 struct cgd_softc *cs;
438 struct dk_softc *dksc;
439
440 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
441 GETCGD_SOFTC(cs, dev, self);
442 dksc = &cs->sc_dksc;
443 if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0) {
444 device_release(self);
445 return error;
446 }
447
448 if (!DK_ATTACHED(dksc)) {
449 if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
450 aprint_error_dev(dksc->sc_dev,
451 "unable to detach instance\n");
452 return error;
453 }
454 } else
455 device_release(self);
456 return 0;
457 }
458
459 static void
460 cgdstrategy(struct buf *bp)
461 {
462 device_t self;
463 struct cgd_softc *cs = getcgd_softc(bp->b_dev, &self);
464 struct dk_softc *dksc = &cs->sc_dksc;
465 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
466
467 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
468 (long)bp->b_bcount));
469
470 cs = getcgd_softc(bp->b_dev);
471 if (!cs) {
472 bp->b_error = ENXIO;
473 goto bail;
474 }
475
476 /*
477 * Reject unaligned writes.
478 */
479 if (((uintptr_t)bp->b_data & 3) != 0) {
480 bp->b_error = EINVAL;
481 bp->b_resid = bp->b_bcount;
482 biodone(bp);
483 device_release(self);
484 return;
485 }
486
487 dk_strategy(&cs->sc_dksc, bp);
488 device_release(self);
489 return;
490
491 bail:
492 bp->b_resid = bp->b_bcount;
493 biodone(bp);
494 return;
495 }
496
497 static int
498 cgdsize(dev_t dev)
499 {
500 int retval;
501 device_t self;
502 struct cgd_softc *cs = getcgd_softc(dev, &self);
503
504 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
505 if (!cs)
506 retval = -1;
507 else
508 retval = dk_size(&cs->sc_dksc, dev);
509
510 device_release(self);
511 return retval;
512 }
513
514 /*
515 * cgd_{get,put}data are functions that deal with getting a buffer
516 * for the new encrypted data. We have a buffer per device so that
517 * we can ensure that we can always have a transaction in flight.
518 * We use this buffer first so that we have one less piece of
519 * malloc'ed data at any given point.
520 */
521
522 static void *
523 cgd_getdata(struct dk_softc *dksc, unsigned long size)
524 {
525 struct cgd_softc *cs = (struct cgd_softc *)dksc;
526 void * data = NULL;
527
528 mutex_enter(&cs->sc_lock);
529 if (cs->sc_data_used == 0) {
530 cs->sc_data_used = 1;
531 data = cs->sc_data;
532 }
533 mutex_exit(&cs->sc_lock);
534
535 if (data)
536 return data;
537
538 return malloc(size, M_DEVBUF, M_NOWAIT);
539 }
540
541 static void
542 cgd_putdata(struct dk_softc *dksc, void *data)
543 {
544 struct cgd_softc *cs = (struct cgd_softc *)dksc;
545
546 if (data == cs->sc_data) {
547 mutex_enter(&cs->sc_lock);
548 cs->sc_data_used = 0;
549 mutex_exit(&cs->sc_lock);
550 } else {
551 free(data, M_DEVBUF);
552 }
553 }
554
555 static int
556 cgd_diskstart(device_t dev, struct buf *bp)
557 {
558 struct cgd_softc *cs = device_private(dev);
559 struct dk_softc *dksc = &cs->sc_dksc;
560 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
561 struct buf *nbp;
562 void * addr;
563 void * newaddr;
564 daddr_t bn;
565 struct vnode *vp;
566
567 DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
568
569 bn = bp->b_rawblkno;
570
571 /*
572 * We attempt to allocate all of our resources up front, so that
573 * we can fail quickly if they are unavailable.
574 */
575 nbp = getiobuf(cs->sc_tvn, false);
576 if (nbp == NULL)
577 return EAGAIN;
578
579 /*
580 * If we are writing, then we need to encrypt the outgoing
581 * block into a new block of memory.
582 */
583 newaddr = addr = bp->b_data;
584 if ((bp->b_flags & B_READ) == 0) {
585 newaddr = cgd_getdata(dksc, bp->b_bcount);
586 if (!newaddr) {
587 putiobuf(nbp);
588 return EAGAIN;
589 }
590 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
591 dg->dg_secsize, CGD_CIPHER_ENCRYPT);
592 }
593
594 nbp->b_data = newaddr;
595 nbp->b_flags = bp->b_flags;
596 nbp->b_oflags = bp->b_oflags;
597 nbp->b_cflags = bp->b_cflags;
598 nbp->b_iodone = cgdiodone;
599 nbp->b_proc = bp->b_proc;
600 nbp->b_blkno = btodb(bn * dg->dg_secsize);
601 nbp->b_bcount = bp->b_bcount;
602 nbp->b_private = bp;
603
604 BIO_COPYPRIO(nbp, bp);
605
606 if ((nbp->b_flags & B_READ) == 0) {
607 vp = nbp->b_vp;
608 mutex_enter(vp->v_interlock);
609 vp->v_numoutput++;
610 mutex_exit(vp->v_interlock);
611 }
612 VOP_STRATEGY(cs->sc_tvn, nbp);
613
614 return 0;
615 }
616
617 static void
618 cgdiodone(struct buf *nbp)
619 {
620 device_t self;
621 struct buf *obp = nbp->b_private;
622 struct cgd_softc *cs = getcgd_softc(obp->b_dev, &self);
623 struct dk_softc *dksc = &cs->sc_dksc;
624 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
625 daddr_t bn;
626
627 KDASSERT(cs);
628
629 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
630 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
631 obp, obp->b_bcount, obp->b_resid));
632 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
633 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
634 nbp->b_bcount));
635 if (nbp->b_error != 0) {
636 obp->b_error = nbp->b_error;
637 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
638 obp->b_error));
639 }
640
641 /* Perform the decryption if we are reading.
642 *
643 * Note: use the blocknumber from nbp, since it is what
644 * we used to encrypt the blocks.
645 */
646
647 if (nbp->b_flags & B_READ) {
648 bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
649 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
650 bn, dg->dg_secsize, CGD_CIPHER_DECRYPT);
651 }
652
653 /* If we allocated memory, free it now... */
654 if (nbp->b_data != obp->b_data)
655 cgd_putdata(dksc, nbp->b_data);
656
657 putiobuf(nbp);
658
659 /* Request is complete for whatever reason */
660 obp->b_resid = 0;
661 if (obp->b_error != 0)
662 obp->b_resid = obp->b_bcount;
663
664 dk_done(dksc, obp);
665 device_release(self);
666
667 dk_start(dksc, NULL);
668 }
669
670 static int
671 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
672 {
673 struct cgd_softc *sc = device_private(dev);
674 struct dk_softc *dksc = &sc->sc_dksc;
675 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
676 size_t nbytes, blksize;
677 void *buf;
678 int error;
679
680 /*
681 * dk_dump gives us units of disklabel sectors. Everything
682 * else in cgd uses units of diskgeom sectors. These had
683 * better agree; otherwise we need to figure out how to convert
684 * between them.
685 */
686 KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
687 "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
688 dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
689 blksize = dg->dg_secsize;
690
691 /*
692 * Compute the number of bytes in this request, which dk_dump
693 * has `helpfully' converted to a number of blocks for us.
694 */
695 nbytes = nblk*blksize;
696
697 /* Try to acquire a buffer to store the ciphertext. */
698 buf = cgd_getdata(dksc, nbytes);
699 if (buf == NULL)
700 /* Out of memory: give up. */
701 return ENOMEM;
702
703 /* Encrypt the caller's data into the temporary buffer. */
704 cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
705
706 /* Pass it on to the underlying disk device. */
707 error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
708
709 /* Release the buffer. */
710 cgd_putdata(dksc, buf);
711
712 /* Return any error from the underlying disk device. */
713 return error;
714 }
715
716 /* XXX: we should probably put these into dksubr.c, mostly */
717 static int
718 cgdread(dev_t dev, struct uio *uio, int flags)
719 {
720 device_t self;
721 int error;
722 struct cgd_softc *cs;
723 struct dk_softc *dksc;
724
725 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
726 (unsigned long long)dev, uio, flags));
727 GETCGD_SOFTC(cs, dev, self);
728 dksc = &cs->sc_dksc;
729 if (!DK_ATTACHED(dksc)) {
730 device_release(self);
731 return ENXIO;
732 }
733 error = physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
734 device_release(self);
735 return error;
736 }
737
738 /* XXX: we should probably put these into dksubr.c, mostly */
739 static int
740 cgdwrite(dev_t dev, struct uio *uio, int flags)
741 {
742 device_t self;
743 int error;
744 struct cgd_softc *cs;
745 struct dk_softc *dksc;
746
747 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
748 GETCGD_SOFTC(cs, dev, self);
749 dksc = &cs->sc_dksc;
750 if (!DK_ATTACHED(dksc)) {
751 device_release(self);
752 return ENXIO;
753 }
754 error = physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
755 device_release(self);
756 return error;
757 }
758
759 static int
760 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
761 {
762 device_t self;
763 struct cgd_softc *cs;
764 struct dk_softc *dksc;
765 int part = DISKPART(dev);
766 int pmask = 1 << part;
767 int error = 0;
768
769 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
770 dev, cmd, data, flag, l));
771
772 switch (cmd) {
773 case CGDIOCGET:
774 return cgd_ioctl_get(dev, data, l);
775 case CGDIOCSET:
776 case CGDIOCCLR:
777 if ((flag & FWRITE) == 0)
778 return EBADF;
779 /* FALLTHROUGH */
780 default:
781 GETCGD_SOFTC(cs, dev, self);
782 dksc = &cs->sc_dksc;
783 break;
784 }
785
786 switch (cmd) {
787 case CGDIOCSET:
788 if (DK_ATTACHED(dksc))
789 error = EBUSY;
790 else
791 error = cgd_ioctl_set(cs, data, l);
792 break;
793 case CGDIOCCLR:
794 if (DK_BUSY(&cs->sc_dksc, pmask))
795 error = EBUSY;
796 else
797 error = cgd_ioctl_clr(cs, l);
798 break;
799 case DIOCCACHESYNC:
800 /*
801 * XXX Do we really need to care about having a writable
802 * file descriptor here?
803 */
804 if ((flag & FWRITE) == 0)
805 error = (EBADF);
806
807 /*
808 * We pass this call down to the underlying disk.
809 */
810 else
811 error = VOP_IOCTL(cs->sc_tvn, cmd, data, flag,
812 l->l_cred);
813 break;
814 case DIOCGSTRATEGY:
815 case DIOCSSTRATEGY:
816 if (!DK_ATTACHED(dksc)) {
817 error = ENOENT;
818 break;
819 }
820 /*FALLTHROUGH*/
821 default:
822 error = dk_ioctl(dksc, dev, cmd, data, flag, l);
823 break;
824 case CGDIOCGET:
825 KASSERT(0);
826 error = EINVAL;
827 break;
828 }
829 device_release(self);
830 return error;
831 }
832
833 static int
834 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
835 {
836 device_t self;
837 int error;
838 struct cgd_softc *cs;
839
840 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
841 dev, blkno, va, (unsigned long)size));
842 GETCGD_SOFTC(cs, dev, self);
843 error = dk_dump(&cs->sc_dksc, dev, blkno, va, size);
844 device_release(self);
845 return error;
846 }
847
848 /*
849 * XXXrcd:
850 * for now we hardcode the maximum key length.
851 */
852 #define MAX_KEYSIZE 1024
853
854 static const struct {
855 const char *n;
856 int v;
857 int d;
858 } encblkno[] = {
859 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
860 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
861 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
862 };
863
864 /* ARGSUSED */
865 static int
866 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
867 {
868 struct cgd_ioctl *ci = data;
869 struct vnode *vp;
870 int ret;
871 size_t i;
872 size_t keybytes; /* key length in bytes */
873 const char *cp;
874 struct pathbuf *pb;
875 char *inbuf;
876 struct dk_softc *dksc = &cs->sc_dksc;
877
878 cp = ci->ci_disk;
879
880 ret = pathbuf_copyin(ci->ci_disk, &pb);
881 if (ret != 0) {
882 return ret;
883 }
884 ret = dk_lookup(pb, l, &vp);
885 pathbuf_destroy(pb);
886 if (ret != 0) {
887 return ret;
888 }
889
890 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
891
892 if ((ret = cgdinit(cs, cp, vp, l)) != 0)
893 goto bail;
894
895 (void)memset(inbuf, 0, MAX_KEYSIZE);
896 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
897 if (ret)
898 goto bail;
899 cs->sc_cfuncs = cryptfuncs_find(inbuf);
900 if (!cs->sc_cfuncs) {
901 ret = EINVAL;
902 goto bail;
903 }
904
905 (void)memset(inbuf, 0, MAX_KEYSIZE);
906 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
907 if (ret)
908 goto bail;
909
910 for (i = 0; i < __arraycount(encblkno); i++)
911 if (strcmp(encblkno[i].n, inbuf) == 0)
912 break;
913
914 if (i == __arraycount(encblkno)) {
915 ret = EINVAL;
916 goto bail;
917 }
918
919 keybytes = ci->ci_keylen / 8 + 1;
920 if (keybytes > MAX_KEYSIZE) {
921 ret = EINVAL;
922 goto bail;
923 }
924
925 (void)memset(inbuf, 0, MAX_KEYSIZE);
926 ret = copyin(ci->ci_key, inbuf, keybytes);
927 if (ret)
928 goto bail;
929
930 cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
931 cs->sc_cdata.cf_mode = encblkno[i].v;
932 cs->sc_cdata.cf_keylen = ci->ci_keylen;
933 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
934 &cs->sc_cdata.cf_blocksize);
935 if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
936 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
937 cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
938 cs->sc_cdata.cf_priv = NULL;
939 }
940
941 /*
942 * The blocksize is supposed to be in bytes. Unfortunately originally
943 * it was expressed in bits. For compatibility we maintain encblkno
944 * and encblkno8.
945 */
946 cs->sc_cdata.cf_blocksize /= encblkno[i].d;
947 (void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
948 if (!cs->sc_cdata.cf_priv) {
949 ret = EINVAL; /* XXX is this the right error? */
950 goto bail;
951 }
952 free(inbuf, M_TEMP);
953
954 bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
955
956 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
957 cs->sc_data_used = 0;
958
959 /* Attach the disk. */
960 dk_attach(dksc);
961 disk_attach(&dksc->sc_dkdev);
962
963 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
964
965 /* Discover wedges on this disk. */
966 dkwedge_discover(&dksc->sc_dkdev);
967
968 return 0;
969
970 bail:
971 free(inbuf, M_TEMP);
972 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
973 return ret;
974 }
975
976 /* ARGSUSED */
977 static int
978 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
979 {
980 struct dk_softc *dksc = &cs->sc_dksc;
981
982 if (!DK_ATTACHED(dksc))
983 return ENXIO;
984
985 /* Delete all of our wedges. */
986 dkwedge_delall(&dksc->sc_dkdev);
987
988 /* Kill off any queued buffers. */
989 dk_drain(dksc);
990 bufq_free(dksc->sc_bufq);
991
992 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
993 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
994 free(cs->sc_tpath, M_DEVBUF);
995 free(cs->sc_data, M_DEVBUF);
996 cs->sc_data_used = 0;
997 dk_detach(dksc);
998 disk_detach(&dksc->sc_dkdev);
999
1000 return 0;
1001 }
1002
1003 static int
1004 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
1005 {
1006 device_t self;
1007 struct cgd_softc *cs = getcgd_softc(dev, &self);
1008 struct cgd_user *cgu;
1009 int unit;
1010 struct dk_softc *dksc = &cs->sc_dksc;
1011
1012 unit = CGDUNIT(dev);
1013 cgu = (struct cgd_user *)data;
1014
1015 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1016 dev, unit, data, l));
1017
1018 if (cgu->cgu_unit == -1)
1019 cgu->cgu_unit = unit;
1020
1021 if (cgu->cgu_unit < 0) {
1022 device_release(self);
1023 return EINVAL; /* XXX: should this be ENXIO? */
1024 }
1025
1026 /*
1027 * XXX This appears to be redundant, given the initialization
1028 * XXX when it was declared. Leave it for now, but don't
1029 * XXX take an extra reference to the device!
1030 */
1031 cs = device_lookup_private(&cgd_cd, unit);
1032 if (cs == NULL || !DK_ATTACHED(dksc)) {
1033 cgu->cgu_dev = 0;
1034 cgu->cgu_alg[0] = '\0';
1035 cgu->cgu_blocksize = 0;
1036 cgu->cgu_mode = 0;
1037 cgu->cgu_keylen = 0;
1038 }
1039 else {
1040 cgu->cgu_dev = cs->sc_tdev;
1041 strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
1042 sizeof(cgu->cgu_alg));
1043 cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
1044 cgu->cgu_mode = cs->sc_cdata.cf_mode;
1045 cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
1046 }
1047 device_release(self);
1048 return 0;
1049 }
1050
1051 static int
1052 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
1053 struct lwp *l)
1054 {
1055 struct disk_geom *dg;
1056 int ret;
1057 char *tmppath;
1058 uint64_t psize;
1059 unsigned secsize;
1060 struct dk_softc *dksc = &cs->sc_dksc;
1061
1062 cs->sc_tvn = vp;
1063 cs->sc_tpath = NULL;
1064
1065 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1066 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
1067 if (ret)
1068 goto bail;
1069 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
1070 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
1071
1072 cs->sc_tdev = vp->v_rdev;
1073
1074 if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1075 goto bail;
1076
1077 if (psize == 0) {
1078 ret = ENODEV;
1079 goto bail;
1080 }
1081
1082 /*
1083 * XXX here we should probe the underlying device. If we
1084 * are accessing a partition of type RAW_PART, then
1085 * we should populate our initial geometry with the
1086 * geometry that we discover from the device.
1087 */
1088 dg = &dksc->sc_dkdev.dk_geom;
1089 memset(dg, 0, sizeof(*dg));
1090 dg->dg_secperunit = psize;
1091 dg->dg_secsize = secsize;
1092 dg->dg_ntracks = 1;
1093 dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1094 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1095
1096 bail:
1097 free(tmppath, M_TEMP);
1098 if (ret && cs->sc_tpath)
1099 free(cs->sc_tpath, M_DEVBUF);
1100 return ret;
1101 }
1102
1103 /*
1104 * Our generic cipher entry point. This takes care of the
1105 * IV mode and passes off the work to the specific cipher.
1106 * We implement here the IV method ``encrypted block
1107 * number''.
1108 *
1109 * XXXrcd: for now we rely on our own crypto framework defined
1110 * in dev/cgd_crypto.c. This will change when we
1111 * get a generic kernel crypto framework.
1112 */
1113
1114 static void
1115 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1116 {
1117 int i;
1118
1119 /* Set up the blkno in blkno_buf, here we do not care much
1120 * about the final layout of the information as long as we
1121 * can guarantee that each sector will have a different IV
1122 * and that the endianness of the machine will not affect
1123 * the representation that we have chosen.
1124 *
1125 * We choose this representation, because it does not rely
1126 * on the size of buf (which is the blocksize of the cipher),
1127 * but allows daddr_t to grow without breaking existing
1128 * disks.
1129 *
1130 * Note that blkno2blkno_buf does not take a size as input,
1131 * and hence must be called on a pre-zeroed buffer of length
1132 * greater than or equal to sizeof(daddr_t).
1133 */
1134 for (i=0; i < sizeof(daddr_t); i++) {
1135 *sbuf++ = blkno & 0xff;
1136 blkno >>= 8;
1137 }
1138 }
1139
1140 static void
1141 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
1142 size_t len, daddr_t blkno, size_t secsize, int dir)
1143 {
1144 char *dst = dstv;
1145 char *src = srcv;
1146 cfunc_cipher_prep *ciprep = cs->sc_cfuncs->cf_cipher_prep;
1147 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher;
1148 struct uio dstuio;
1149 struct uio srcuio;
1150 struct iovec dstiov[2];
1151 struct iovec srciov[2];
1152 size_t blocksize = cs->sc_cdata.cf_blocksize;
1153 size_t todo;
1154 char blkno_buf[CGD_MAXBLOCKSIZE], *iv;
1155
1156 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1157
1158 DIAGCONDPANIC(len % blocksize != 0,
1159 ("cgd_cipher: len %% blocksize != 0"));
1160
1161 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1162 DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
1163 ("cgd_cipher: sizeof(daddr_t) > blocksize"));
1164
1165 DIAGCONDPANIC(blocksize > CGD_MAXBLOCKSIZE,
1166 ("cgd_cipher: blocksize > CGD_MAXBLOCKSIZE"));
1167
1168 dstuio.uio_iov = dstiov;
1169 dstuio.uio_iovcnt = 1;
1170
1171 srcuio.uio_iov = srciov;
1172 srcuio.uio_iovcnt = 1;
1173
1174 for (; len > 0; len -= todo) {
1175 todo = MIN(len, secsize);
1176
1177 dstiov[0].iov_base = dst;
1178 srciov[0].iov_base = src;
1179 dstiov[0].iov_len = todo;
1180 srciov[0].iov_len = todo;
1181
1182 memset(blkno_buf, 0x0, blocksize);
1183 blkno2blkno_buf(blkno_buf, blkno);
1184 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1185 blkno_buf, blocksize));
1186
1187 /*
1188 * Compute an initial IV. All ciphers
1189 * can convert blkno_buf in-place.
1190 */
1191 iv = blkno_buf;
1192 ciprep(cs->sc_cdata.cf_priv, iv, blkno_buf, blocksize, dir);
1193 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: iv", iv, blocksize));
1194
1195 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, iv, dir);
1196
1197 dst += todo;
1198 src += todo;
1199 blkno++;
1200 }
1201 }
1202
1203 #ifdef DEBUG
1204 static void
1205 hexprint(const char *start, void *buf, int len)
1206 {
1207 char *c = buf;
1208
1209 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
1210 printf("%s: len=%06d 0x", start, len);
1211 while (len--)
1212 printf("%02x", (unsigned char) *c++);
1213 }
1214 #endif
1215
1216 static void
1217 selftest(void)
1218 {
1219 struct cgd_softc cs;
1220 void *buf;
1221
1222 printf("running cgd selftest ");
1223
1224 for (size_t i = 0; i < __arraycount(selftests); i++) {
1225 const char *alg = selftests[i].alg;
1226 const uint8_t *key = selftests[i].key;
1227 int keylen = selftests[i].keylen;
1228 int txtlen = selftests[i].txtlen;
1229
1230 printf("%s-%d ", alg, keylen);
1231
1232 memset(&cs, 0, sizeof(cs));
1233
1234 cs.sc_cfuncs = cryptfuncs_find(alg);
1235 if (cs.sc_cfuncs == NULL)
1236 panic("%s not implemented", alg);
1237
1238 cs.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1239 cs.sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1240 cs.sc_cdata.cf_keylen = keylen;
1241
1242 cs.sc_cdata.cf_priv = cs.sc_cfuncs->cf_init(keylen,
1243 key, &cs.sc_cdata.cf_blocksize);
1244 if (cs.sc_cdata.cf_priv == NULL)
1245 panic("cf_priv is NULL");
1246 if (cs.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1247 panic("bad block size %zu", cs.sc_cdata.cf_blocksize);
1248
1249 cs.sc_cdata.cf_blocksize /= 8;
1250
1251 buf = malloc(txtlen, M_DEVBUF, M_WAITOK);
1252 memcpy(buf, selftests[i].ptxt, txtlen);
1253
1254 cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno,
1255 selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1256 if (memcmp(buf, selftests[i].ctxt, txtlen) != 0)
1257 panic("encryption is broken");
1258
1259 cgd_cipher(&cs, buf, buf, txtlen, selftests[i].blkno,
1260 selftests[i].secsize, CGD_CIPHER_DECRYPT);
1261 if (memcmp(buf, selftests[i].ptxt, txtlen) != 0)
1262 panic("decryption is broken");
1263
1264 free(buf, M_DEVBUF);
1265 cs.sc_cfuncs->cf_destroy(cs.sc_cdata.cf_priv);
1266 }
1267
1268 printf("done\n");
1269 }
1270
1271 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr");
1272
1273 #ifdef _MODULE
1274 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1275
1276 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1277 #endif
1278
1279 static int
1280 cgd_modcmd(modcmd_t cmd, void *arg)
1281 {
1282 int error = 0;
1283
1284 switch (cmd) {
1285 case MODULE_CMD_INIT:
1286 selftest();
1287 #ifdef _MODULE
1288 error = config_cfdriver_attach(&cgd_cd);
1289 if (error)
1290 break;
1291
1292 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1293 if (error) {
1294 config_cfdriver_detach(&cgd_cd);
1295 aprint_error("%s: unable to register cfattach for "
1296 "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1297 break;
1298 }
1299 /*
1300 * Attach the {b,c}devsw's
1301 */
1302 error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1303 &cgd_cdevsw, &cgd_cmajor);
1304
1305 /*
1306 * If devsw_attach fails, remove from autoconf database
1307 */
1308 if (error) {
1309 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1310 config_cfdriver_detach(&cgd_cd);
1311 aprint_error("%s: unable to attach %s devsw, "
1312 "error %d", __func__, cgd_cd.cd_name, error);
1313 break;
1314 }
1315 #endif
1316 break;
1317
1318 case MODULE_CMD_FINI:
1319 #ifdef _MODULE
1320 /*
1321 * Remove {b,c}devsw's
1322 */
1323 devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1324
1325 /*
1326 * Now remove device from autoconf database
1327 */
1328 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1329 if (error) {
1330 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1331 &cgd_cdevsw, &cgd_cmajor);
1332 aprint_error("%s: failed to detach %s cfattach, "
1333 "error %d\n", __func__, cgd_cd.cd_name, error);
1334 break;
1335 }
1336 error = config_cfdriver_detach(&cgd_cd);
1337 if (error) {
1338 (void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1339 (void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1340 &cgd_cdevsw, &cgd_cmajor);
1341 aprint_error("%s: failed to detach %s cfdriver, "
1342 "error %d\n", __func__, cgd_cd.cd_name, error);
1343 break;
1344 }
1345 #endif
1346 break;
1347
1348 case MODULE_CMD_STAT:
1349 error = ENOTTY;
1350 break;
1351 default:
1352 error = ENOTTY;
1353 break;
1354 }
1355
1356 return error;
1357 }
1358