cgd.c revision 1.108.2.9 1 /* $NetBSD: cgd.c,v 1.108.2.9 2016/07/22 06:32:54 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.108.2.9 2016/07/22 06:32:54 pgoyette Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54 #include <sys/localcount.h>
55
56 #include <dev/dkvar.h>
57 #include <dev/cgdvar.h>
58
59 #include <miscfs/specfs/specdev.h> /* for v_rdev */
60
61 #include "ioconf.h"
62
63 /* Entry Point Functions */
64
65 static dev_type_open(cgdopen);
66 static dev_type_close(cgdclose);
67 static dev_type_read(cgdread);
68 static dev_type_write(cgdwrite);
69 static dev_type_ioctl(cgdioctl);
70 static dev_type_strategy(cgdstrategy);
71 static dev_type_dump(cgddump);
72 static dev_type_size(cgdsize);
73
74 const struct bdevsw cgd_bdevsw = {
75 LOCALCOUNT_INITIALIZER
76 .d_open = cgdopen,
77 .d_close = cgdclose,
78 .d_strategy = cgdstrategy,
79 .d_ioctl = cgdioctl,
80 .d_dump = cgddump,
81 .d_psize = cgdsize,
82 .d_discard = nodiscard,
83 .d_flag = D_DISK
84 };
85
86 const struct cdevsw cgd_cdevsw = {
87 LOCALCOUNT_INITIALIZER
88 .d_open = cgdopen,
89 .d_close = cgdclose,
90 .d_read = cgdread,
91 .d_write = cgdwrite,
92 .d_ioctl = cgdioctl,
93 .d_stop = nostop,
94 .d_tty = notty,
95 .d_poll = nopoll,
96 .d_mmap = nommap,
97 .d_kqfilter = nokqfilter,
98 .d_discard = nodiscard,
99 .d_flag = D_DISK
100 };
101
102 static int cgd_match(device_t, cfdata_t, void *);
103 static void cgd_attach(device_t, device_t, void *);
104 static int cgd_detach(device_t, int);
105 static struct cgd_softc *cgd_spawn(int, device_t *);
106 static int cgd_destroy(device_t);
107
108 /* Internal Functions */
109
110 static int cgd_diskstart(device_t, struct buf *);
111 static void cgdiodone(struct buf *);
112 static int cgd_dumpblocks(device_t, void *, daddr_t, int);
113
114 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
115 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
116 static int cgd_ioctl_get(dev_t, void *, struct lwp *);
117 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
118 struct lwp *);
119 static void cgd_cipher(struct cgd_softc *, void *, void *,
120 size_t, daddr_t, size_t, int);
121
122 static struct dkdriver cgddkdriver = {
123 .d_minphys = minphys,
124 .d_open = cgdopen,
125 .d_close = cgdclose,
126 .d_strategy = cgdstrategy,
127 .d_iosize = NULL,
128 .d_diskstart = cgd_diskstart,
129 .d_dumpblocks = cgd_dumpblocks,
130 .d_lastclose = NULL
131 };
132
133 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
134 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
135 extern struct cfdriver cgd_cd;
136
137 /* DIAGNOSTIC and DEBUG definitions */
138
139 #if defined(CGDDEBUG) && !defined(DEBUG)
140 #define DEBUG
141 #endif
142
143 #ifdef DEBUG
144 int cgddebug = 0;
145
146 #define CGDB_FOLLOW 0x1
147 #define CGDB_IO 0x2
148 #define CGDB_CRYPTO 0x4
149
150 #define IFDEBUG(x,y) if (cgddebug & (x)) y
151 #define DPRINTF(x,y) IFDEBUG(x, printf y)
152 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
153
154 static void hexprint(const char *, void *, int);
155
156 #else
157 #define IFDEBUG(x,y)
158 #define DPRINTF(x,y)
159 #define DPRINTF_FOLLOW(y)
160 #endif
161
162 #ifdef DIAGNOSTIC
163 #define DIAGPANIC(x) panic x
164 #define DIAGCONDPANIC(x,y) if (x) panic y
165 #else
166 #define DIAGPANIC(x)
167 #define DIAGCONDPANIC(x,y)
168 #endif
169
170 /* Global variables */
171
172 /* Utility Functions */
173
174 #define CGDUNIT(x) DISKUNIT(x)
175 #define GETCGD_SOFTC(_cs, x, _dv) \
176 if (!((_cs) = getcgd_softc(x, &_dv))) return ENXIO;
177
178 /* The code */
179
180 static void
181 cgd_release(dev_t dev)
182 {
183 int unit = CGDUNIT(dev);
184 device_t self;
185
186 self = device_lookup_acquire(&cgd_cd, unit);
187 if (self != NULL)
188 device_release(self);
189 }
190
191 static struct cgd_softc *
192 getcgd_softc(dev_t dev, device_t *self)
193 {
194 int unit = CGDUNIT(dev);
195 struct cgd_softc *sc;
196
197 DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
198
199 *self = device_lookup_acquire(&cgd_cd, unit);
200
201 if (*self == NULL) {
202 sc = cgd_spawn(unit, self);
203 } else {
204 sc = device_private(*self);
205 }
206
207 return sc;
208 }
209
210 static int
211 cgd_match(device_t self, cfdata_t cfdata, void *aux)
212 {
213
214 return 1;
215 }
216
217 static void
218 cgd_attach(device_t parent, device_t self, void *aux)
219 {
220 struct cgd_softc *sc = device_private(self);
221
222 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
223 dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
224 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
225
226 if (!pmf_device_register(self, NULL, NULL))
227 aprint_error_dev(self,
228 "unable to register power management hooks\n");
229 }
230
231
232 static int
233 cgd_detach(device_t self, int flags)
234 {
235 int ret;
236 const int pmask = 1 << RAW_PART;
237 struct cgd_softc *sc = device_private(self);
238 struct dk_softc *dksc = &sc->sc_dksc;
239
240 if (DK_BUSY(dksc, pmask))
241 return EBUSY;
242
243 if (DK_ATTACHED(dksc) &&
244 (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
245 return ret;
246
247 disk_destroy(&dksc->sc_dkdev);
248 mutex_destroy(&sc->sc_lock);
249
250 return 0;
251 }
252
253 void
254 cgdattach(int num)
255 {
256 int error;
257
258 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
259 if (error != 0)
260 aprint_error("%s: unable to register cfattach\n",
261 cgd_cd.cd_name);
262 }
263
264 static struct cgd_softc *
265 cgd_spawn(int unit, device_t *self)
266 {
267 cfdata_t cf;
268
269 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
270 cf->cf_name = cgd_cd.cd_name;
271 cf->cf_atname = cgd_cd.cd_name;
272 cf->cf_unit = unit;
273 cf->cf_fstate = FSTATE_STAR;
274
275 if (config_attach_pseudo(cf) == NULL)
276 { printf("%s: config_attach_pseudo() failed\n", __func__);
277 return NULL;
278 }
279
280 *self = device_lookup_acquire(&cgd_cd, unit);
281 if (self == NULL)
282 return NULL;
283 else
284 /*
285 * Note that we return while still holding a reference
286 * to the device!
287 */
288 return device_private(*self);
289 }
290
291 static int
292 cgd_destroy(device_t dev)
293 {
294 int error;
295 cfdata_t cf;
296
297 cf = device_cfdata(dev);
298 error = config_detach(dev, DETACH_QUIET);
299 if (error)
300 return error;
301 free(cf, M_DEVBUF);
302 return 0;
303 }
304
305 static int
306 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
307 {
308 device_t self;
309 int error;
310 struct cgd_softc *cs;
311
312 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
313 GETCGD_SOFTC(cs, dev, self);
314 error = dk_open(&cs->sc_dksc, dev, flags, fmt, l);
315 device_release(self);
316 return error;
317 }
318
319 static int
320 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
321 {
322 int error;
323 device_t self;
324 struct cgd_softc *cs;
325 struct dk_softc *dksc;
326
327 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
328 GETCGD_SOFTC(cs, dev, self);
329 dksc = &cs->sc_dksc;
330 if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0) {
331 device_release(self);
332 return error;
333 }
334
335 if (!DK_ATTACHED(dksc)) {
336 if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
337 aprint_error_dev(dksc->sc_dev,
338 "unable to detach instance\n");
339 device_release(self);
340 return error;
341 }
342 }
343 device_release(self);
344 return error;
345 }
346
347 static void
348 cgdstrategy(struct buf *bp)
349 {
350 device_t self;
351 struct cgd_softc *cs = getcgd_softc(bp->b_dev, &self);
352 struct dk_softc *dksc = &cs->sc_dksc;
353 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
354
355 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
356 (long)bp->b_bcount));
357
358 /*
359 * Reject unaligned writes. We can encrypt and decrypt only
360 * complete disk sectors, and we let the ciphers require their
361 * buffers to be aligned to 32-bit boundaries.
362 */
363 if (bp->b_blkno < 0 ||
364 (bp->b_bcount % dg->dg_secsize) != 0 ||
365 ((uintptr_t)bp->b_data & 3) != 0) {
366 bp->b_error = EINVAL;
367 bp->b_resid = bp->b_bcount;
368 biodone(bp);
369 cgd_release(bp->b_dev);
370 device_release(self);
371 return;
372 }
373
374 /* XXXrcd: Should we test for (cs != NULL)? */
375 dk_strategy(&cs->sc_dksc, bp);
376 cgd_release(bp->b_dev);
377 device_release(self);
378 return;
379 }
380
381 static int
382 cgdsize(dev_t dev)
383 {
384 int retval;
385 device_t self;
386 struct cgd_softc *cs = getcgd_softc(dev, &self);
387
388 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
389 if (!cs)
390 retval = -1;
391 else
392 retval = dk_size(&cs->sc_dksc, dev);
393
394 cgd_release(dev);
395 device_release(self);
396 return retval;
397 }
398
399 /*
400 * cgd_{get,put}data are functions that deal with getting a buffer
401 * for the new encrypted data. We have a buffer per device so that
402 * we can ensure that we can always have a transaction in flight.
403 * We use this buffer first so that we have one less piece of
404 * malloc'ed data at any given point.
405 */
406
407 static void *
408 cgd_getdata(struct dk_softc *dksc, unsigned long size)
409 {
410 struct cgd_softc *cs = (struct cgd_softc *)dksc;
411 void * data = NULL;
412
413 mutex_enter(&cs->sc_lock);
414 if (cs->sc_data_used == 0) {
415 cs->sc_data_used = 1;
416 data = cs->sc_data;
417 }
418 mutex_exit(&cs->sc_lock);
419
420 if (data)
421 return data;
422
423 return malloc(size, M_DEVBUF, M_NOWAIT);
424 }
425
426 static void
427 cgd_putdata(struct dk_softc *dksc, void *data)
428 {
429 struct cgd_softc *cs = (struct cgd_softc *)dksc;
430
431 if (data == cs->sc_data) {
432 mutex_enter(&cs->sc_lock);
433 cs->sc_data_used = 0;
434 mutex_exit(&cs->sc_lock);
435 } else {
436 free(data, M_DEVBUF);
437 }
438 }
439
440 static int
441 cgd_diskstart(device_t dev, struct buf *bp)
442 {
443 struct cgd_softc *cs = device_private(dev);
444 struct dk_softc *dksc = &cs->sc_dksc;
445 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
446 struct buf *nbp;
447 void * addr;
448 void * newaddr;
449 daddr_t bn;
450 struct vnode *vp;
451
452 DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
453
454 bn = bp->b_rawblkno;
455
456 /*
457 * We attempt to allocate all of our resources up front, so that
458 * we can fail quickly if they are unavailable.
459 */
460 nbp = getiobuf(cs->sc_tvn, false);
461 if (nbp == NULL)
462 return EAGAIN;
463
464 /*
465 * If we are writing, then we need to encrypt the outgoing
466 * block into a new block of memory.
467 */
468 newaddr = addr = bp->b_data;
469 if ((bp->b_flags & B_READ) == 0) {
470 newaddr = cgd_getdata(dksc, bp->b_bcount);
471 if (!newaddr) {
472 putiobuf(nbp);
473 return EAGAIN;
474 }
475 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
476 dg->dg_secsize, CGD_CIPHER_ENCRYPT);
477 }
478
479 nbp->b_data = newaddr;
480 nbp->b_flags = bp->b_flags;
481 nbp->b_oflags = bp->b_oflags;
482 nbp->b_cflags = bp->b_cflags;
483 nbp->b_iodone = cgdiodone;
484 nbp->b_proc = bp->b_proc;
485 nbp->b_blkno = btodb(bn * dg->dg_secsize);
486 nbp->b_bcount = bp->b_bcount;
487 nbp->b_private = bp;
488
489 BIO_COPYPRIO(nbp, bp);
490
491 if ((nbp->b_flags & B_READ) == 0) {
492 vp = nbp->b_vp;
493 mutex_enter(vp->v_interlock);
494 vp->v_numoutput++;
495 mutex_exit(vp->v_interlock);
496 }
497 VOP_STRATEGY(cs->sc_tvn, nbp);
498
499 return 0;
500 }
501
502 static void
503 cgdiodone(struct buf *nbp)
504 {
505 dev_t dev;
506 device_t self;
507 struct buf *obp = nbp->b_private;
508 struct cgd_softc *cs = getcgd_softc(obp->b_dev, &self);
509 struct dk_softc *dksc = &cs->sc_dksc;
510 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
511 daddr_t bn;
512
513 KDASSERT(cs);
514
515 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
516 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
517 obp, obp->b_bcount, obp->b_resid));
518 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
519 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
520 nbp->b_bcount));
521 if (nbp->b_error != 0) {
522 obp->b_error = nbp->b_error;
523 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
524 obp->b_error));
525 }
526
527 /* Perform the decryption if we are reading.
528 *
529 * Note: use the blocknumber from nbp, since it is what
530 * we used to encrypt the blocks.
531 */
532
533 if (nbp->b_flags & B_READ) {
534 bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
535 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
536 bn, dg->dg_secsize, CGD_CIPHER_DECRYPT);
537 }
538
539 /* If we allocated memory, free it now... */
540 if (nbp->b_data != obp->b_data)
541 cgd_putdata(dksc, nbp->b_data);
542
543 putiobuf(nbp);
544
545 /* Request is complete for whatever reason */
546 obp->b_resid = 0;
547 if (obp->b_error != 0)
548 obp->b_resid = obp->b_bcount;
549
550 /*
551 * copy the dev_t, finish the disk operation, and release the
552 * reference we're holding on to (from getcgd_softc() earlier)
553 */
554 dev = obp->b_dev;
555 dk_done(dksc, obp);
556 cgd_release(dev);
557 device_release(self);
558
559 dk_start(dksc, NULL);
560 }
561
562 static int
563 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
564 {
565 struct cgd_softc *sc = device_private(dev);
566 struct dk_softc *dksc = &sc->sc_dksc;
567 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
568 size_t nbytes, blksize;
569 void *buf;
570 int error;
571
572 /*
573 * dk_dump gives us units of disklabel sectors. Everything
574 * else in cgd uses units of diskgeom sectors. These had
575 * better agree; otherwise we need to figure out how to convert
576 * between them.
577 */
578 KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
579 "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
580 dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
581 blksize = dg->dg_secsize;
582
583 /*
584 * Compute the number of bytes in this request, which dk_dump
585 * has `helpfully' converted to a number of blocks for us.
586 */
587 nbytes = nblk*blksize;
588
589 /* Try to acquire a buffer to store the ciphertext. */
590 buf = cgd_getdata(dksc, nbytes);
591 if (buf == NULL)
592 /* Out of memory: give up. */
593 return ENOMEM;
594
595 /* Encrypt the caller's data into the temporary buffer. */
596 cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
597
598 /* Pass it on to the underlying disk device. */
599 error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
600
601 /* Release the buffer. */
602 cgd_putdata(dksc, buf);
603
604 /* Return any error from the underlying disk device. */
605 return error;
606 }
607
608 /* XXX: we should probably put these into dksubr.c, mostly */
609 static int
610 cgdread(dev_t dev, struct uio *uio, int flags)
611 {
612 device_t self;
613 int error;
614 struct cgd_softc *cs;
615 struct dk_softc *dksc;
616
617 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
618 (unsigned long long)dev, uio, flags));
619 GETCGD_SOFTC(cs, dev, self);
620 dksc = &cs->sc_dksc;
621 if (!DK_ATTACHED(dksc))
622 return ENXIO;
623 error = physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
624 device_release(self);
625 return error;
626 }
627
628 /* XXX: we should probably put these into dksubr.c, mostly */
629 static int
630 cgdwrite(dev_t dev, struct uio *uio, int flags)
631 {
632 device_t self;
633 int error;
634 struct cgd_softc *cs;
635 struct dk_softc *dksc;
636
637 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
638 GETCGD_SOFTC(cs, dev, self);
639 dksc = &cs->sc_dksc;
640 if (!DK_ATTACHED(dksc)) {
641 device_release(self);
642 return ENXIO;
643 }
644 error = physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
645 device_release(self);
646 return error;
647 }
648
649 static int
650 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
651 {
652 device_t self;
653 struct cgd_softc *cs;
654 struct dk_softc *dksc;
655 int part = DISKPART(dev);
656 int pmask = 1 << part;
657 int error = 0;
658
659 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
660 dev, cmd, data, flag, l));
661
662 switch (cmd) {
663 case CGDIOCGET:
664 return cgd_ioctl_get(dev, data, l);
665 case CGDIOCSET:
666 case CGDIOCCLR:
667 if ((flag & FWRITE) == 0)
668 return EBADF;
669 /* FALLTHROUGH */
670 default:
671 GETCGD_SOFTC(cs, dev, self);
672 dksc = &cs->sc_dksc;
673 break;
674 }
675
676 switch (cmd) {
677 case CGDIOCSET:
678 if (DK_ATTACHED(dksc))
679 error = EBUSY;
680 else
681 cgd_ioctl_set(cs, data, l);
682 break;
683 case CGDIOCCLR:
684 if (DK_BUSY(&cs->sc_dksc, pmask))
685 error = EBUSY;
686 else
687 cgd_ioctl_clr(cs, l);
688 break;
689 case DIOCCACHESYNC:
690 /*
691 * XXX Do we really need to care about having a writable
692 * file descriptor here?
693 */
694 if ((flag & FWRITE) == 0)
695 error = (EBADF);
696
697 /*
698 * We pass this call down to the underlying disk.
699 */
700 else
701 error = VOP_IOCTL(cs->sc_tvn, cmd, data, flag,
702 l->l_cred);
703 break;
704 case DIOCGSTRATEGY:
705 case DIOCSSTRATEGY:
706 if (!DK_ATTACHED(dksc))
707 error = ENOENT;
708 /*FALLTHROUGH*/
709 default:
710 if (error == 0)
711 error = dk_ioctl(dksc, dev, cmd, data, flag, l);
712 break;
713 case CGDIOCGET:
714 KASSERT(0);
715 error = EINVAL;
716 }
717 device_release(self);
718 return error;
719 }
720
721 static int
722 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
723 {
724 device_t self;
725 int error;
726 struct cgd_softc *cs;
727
728 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
729 dev, blkno, va, (unsigned long)size));
730 GETCGD_SOFTC(cs, dev, self);
731 error = dk_dump(&cs->sc_dksc, dev, blkno, va, size);
732 device_release(self);
733 return error;
734 }
735
736 /*
737 * XXXrcd:
738 * for now we hardcode the maximum key length.
739 */
740 #define MAX_KEYSIZE 1024
741
742 static const struct {
743 const char *n;
744 int v;
745 int d;
746 } encblkno[] = {
747 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
748 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
749 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
750 };
751
752 /* ARGSUSED */
753 static int
754 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
755 {
756 struct cgd_ioctl *ci = data;
757 struct vnode *vp;
758 int ret;
759 size_t i;
760 size_t keybytes; /* key length in bytes */
761 const char *cp;
762 struct pathbuf *pb;
763 char *inbuf;
764 struct dk_softc *dksc = &cs->sc_dksc;
765
766 cp = ci->ci_disk;
767
768 ret = pathbuf_copyin(ci->ci_disk, &pb);
769 if (ret != 0) {
770 return ret;
771 }
772 ret = dk_lookup(pb, l, &vp);
773 pathbuf_destroy(pb);
774 if (ret != 0) {
775 return ret;
776 }
777
778 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
779
780 if ((ret = cgdinit(cs, cp, vp, l)) != 0)
781 goto bail;
782
783 (void)memset(inbuf, 0, MAX_KEYSIZE);
784 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
785 if (ret)
786 goto bail;
787 cs->sc_cfuncs = cryptfuncs_find(inbuf);
788 if (!cs->sc_cfuncs) {
789 ret = EINVAL;
790 goto bail;
791 }
792
793 (void)memset(inbuf, 0, MAX_KEYSIZE);
794 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
795 if (ret)
796 goto bail;
797
798 for (i = 0; i < __arraycount(encblkno); i++)
799 if (strcmp(encblkno[i].n, inbuf) == 0)
800 break;
801
802 if (i == __arraycount(encblkno)) {
803 ret = EINVAL;
804 goto bail;
805 }
806
807 keybytes = ci->ci_keylen / 8 + 1;
808 if (keybytes > MAX_KEYSIZE) {
809 ret = EINVAL;
810 goto bail;
811 }
812
813 (void)memset(inbuf, 0, MAX_KEYSIZE);
814 ret = copyin(ci->ci_key, inbuf, keybytes);
815 if (ret)
816 goto bail;
817
818 cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
819 cs->sc_cdata.cf_mode = encblkno[i].v;
820 cs->sc_cdata.cf_keylen = ci->ci_keylen;
821 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
822 &cs->sc_cdata.cf_blocksize);
823 if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
824 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
825 cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
826 cs->sc_cdata.cf_priv = NULL;
827 }
828
829 /*
830 * The blocksize is supposed to be in bytes. Unfortunately originally
831 * it was expressed in bits. For compatibility we maintain encblkno
832 * and encblkno8.
833 */
834 cs->sc_cdata.cf_blocksize /= encblkno[i].d;
835 (void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
836 if (!cs->sc_cdata.cf_priv) {
837 ret = EINVAL; /* XXX is this the right error? */
838 goto bail;
839 }
840 free(inbuf, M_TEMP);
841
842 bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
843
844 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
845 cs->sc_data_used = 0;
846
847 /* Attach the disk. */
848 dk_attach(dksc);
849 disk_attach(&dksc->sc_dkdev);
850
851 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
852
853 /* Discover wedges on this disk. */
854 dkwedge_discover(&dksc->sc_dkdev);
855
856 return 0;
857
858 bail:
859 free(inbuf, M_TEMP);
860 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
861 return ret;
862 }
863
864 /* ARGSUSED */
865 static int
866 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
867 {
868 struct dk_softc *dksc = &cs->sc_dksc;
869
870 if (!DK_ATTACHED(dksc))
871 return ENXIO;
872
873 /* Delete all of our wedges. */
874 dkwedge_delall(&dksc->sc_dkdev);
875
876 /* Kill off any queued buffers. */
877 dk_drain(dksc);
878 bufq_free(dksc->sc_bufq);
879
880 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
881 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
882 free(cs->sc_tpath, M_DEVBUF);
883 free(cs->sc_data, M_DEVBUF);
884 cs->sc_data_used = 0;
885 dk_detach(dksc);
886 disk_detach(&dksc->sc_dkdev);
887
888 return 0;
889 }
890
891 static int
892 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
893 {
894 device_t self;
895 struct cgd_softc *cs = getcgd_softc(dev, &self);
896 struct cgd_user *cgu;
897 int unit;
898 struct dk_softc *dksc = &cs->sc_dksc;
899
900 unit = CGDUNIT(dev);
901 cgu = (struct cgd_user *)data;
902
903 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
904 dev, unit, data, l));
905
906 if (cgu->cgu_unit == -1)
907 cgu->cgu_unit = unit;
908
909 if (cgu->cgu_unit < 0) {
910 cgd_release(dev);
911 device_release(self);
912 return EINVAL; /* XXX: should this be ENXIO? */
913 }
914
915 cs = device_lookup_private(&cgd_cd, unit);
916 if (cs == NULL || !DK_ATTACHED(dksc)) {
917 cgu->cgu_dev = 0;
918 cgu->cgu_alg[0] = '\0';
919 cgu->cgu_blocksize = 0;
920 cgu->cgu_mode = 0;
921 cgu->cgu_keylen = 0;
922 }
923 else {
924 cgu->cgu_dev = cs->sc_tdev;
925 strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
926 sizeof(cgu->cgu_alg));
927 cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
928 cgu->cgu_mode = cs->sc_cdata.cf_mode;
929 cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
930 }
931 cgd_release(dev);
932 device_release(self);
933 return 0;
934 }
935
936 static int
937 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
938 struct lwp *l)
939 {
940 struct disk_geom *dg;
941 int ret;
942 char *tmppath;
943 uint64_t psize;
944 unsigned secsize;
945 struct dk_softc *dksc = &cs->sc_dksc;
946
947 cs->sc_tvn = vp;
948 cs->sc_tpath = NULL;
949
950 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
951 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
952 if (ret)
953 goto bail;
954 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
955 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
956
957 cs->sc_tdev = vp->v_rdev;
958
959 if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
960 goto bail;
961
962 if (psize == 0) {
963 ret = ENODEV;
964 goto bail;
965 }
966
967 /*
968 * XXX here we should probe the underlying device. If we
969 * are accessing a partition of type RAW_PART, then
970 * we should populate our initial geometry with the
971 * geometry that we discover from the device.
972 */
973 dg = &dksc->sc_dkdev.dk_geom;
974 memset(dg, 0, sizeof(*dg));
975 dg->dg_secperunit = psize;
976 dg->dg_secsize = secsize;
977 dg->dg_ntracks = 1;
978 dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
979 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
980
981 bail:
982 free(tmppath, M_TEMP);
983 if (ret && cs->sc_tpath)
984 free(cs->sc_tpath, M_DEVBUF);
985 return ret;
986 }
987
988 /*
989 * Our generic cipher entry point. This takes care of the
990 * IV mode and passes off the work to the specific cipher.
991 * We implement here the IV method ``encrypted block
992 * number''.
993 *
994 * For the encryption case, we accomplish this by setting
995 * up a struct uio where the first iovec of the source is
996 * the blocknumber and the first iovec of the dest is a
997 * sink. We then call the cipher with an IV of zero, and
998 * the right thing happens.
999 *
1000 * For the decryption case, we use the same basic mechanism
1001 * for symmetry, but we encrypt the block number in the
1002 * first iovec.
1003 *
1004 * We mainly do this to avoid requiring the definition of
1005 * an ECB mode.
1006 *
1007 * XXXrcd: for now we rely on our own crypto framework defined
1008 * in dev/cgd_crypto.c. This will change when we
1009 * get a generic kernel crypto framework.
1010 */
1011
1012 static void
1013 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1014 {
1015 int i;
1016
1017 /* Set up the blkno in blkno_buf, here we do not care much
1018 * about the final layout of the information as long as we
1019 * can guarantee that each sector will have a different IV
1020 * and that the endianness of the machine will not affect
1021 * the representation that we have chosen.
1022 *
1023 * We choose this representation, because it does not rely
1024 * on the size of buf (which is the blocksize of the cipher),
1025 * but allows daddr_t to grow without breaking existing
1026 * disks.
1027 *
1028 * Note that blkno2blkno_buf does not take a size as input,
1029 * and hence must be called on a pre-zeroed buffer of length
1030 * greater than or equal to sizeof(daddr_t).
1031 */
1032 for (i=0; i < sizeof(daddr_t); i++) {
1033 *sbuf++ = blkno & 0xff;
1034 blkno >>= 8;
1035 }
1036 }
1037
1038 static void
1039 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
1040 size_t len, daddr_t blkno, size_t secsize, int dir)
1041 {
1042 char *dst = dstv;
1043 char *src = srcv;
1044 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher;
1045 struct uio dstuio;
1046 struct uio srcuio;
1047 struct iovec dstiov[2];
1048 struct iovec srciov[2];
1049 size_t blocksize = cs->sc_cdata.cf_blocksize;
1050 size_t todo;
1051 char sink[CGD_MAXBLOCKSIZE];
1052 char zero_iv[CGD_MAXBLOCKSIZE];
1053 char blkno_buf[CGD_MAXBLOCKSIZE];
1054
1055 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1056
1057 DIAGCONDPANIC(len % blocksize != 0,
1058 ("cgd_cipher: len %% blocksize != 0"));
1059
1060 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1061 DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
1062 ("cgd_cipher: sizeof(daddr_t) > blocksize"));
1063
1064 memset(zero_iv, 0x0, blocksize);
1065
1066 dstuio.uio_iov = dstiov;
1067 dstuio.uio_iovcnt = 2;
1068
1069 srcuio.uio_iov = srciov;
1070 srcuio.uio_iovcnt = 2;
1071
1072 dstiov[0].iov_base = sink;
1073 dstiov[0].iov_len = blocksize;
1074 srciov[0].iov_base = blkno_buf;
1075 srciov[0].iov_len = blocksize;
1076
1077 for (; len > 0; len -= todo) {
1078 todo = MIN(len, secsize);
1079
1080 dstiov[1].iov_base = dst;
1081 srciov[1].iov_base = src;
1082 dstiov[1].iov_len = todo;
1083 srciov[1].iov_len = todo;
1084
1085 memset(blkno_buf, 0x0, blocksize);
1086 blkno2blkno_buf(blkno_buf, blkno);
1087 if (dir == CGD_CIPHER_DECRYPT) {
1088 dstuio.uio_iovcnt = 1;
1089 srcuio.uio_iovcnt = 1;
1090 IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
1091 blkno_buf, blocksize));
1092 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
1093 zero_iv, CGD_CIPHER_ENCRYPT);
1094 memcpy(blkno_buf, sink, blocksize);
1095 dstuio.uio_iovcnt = 2;
1096 srcuio.uio_iovcnt = 2;
1097 }
1098
1099 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1100 blkno_buf, blocksize));
1101 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
1102 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
1103 sink, blocksize));
1104
1105 dst += todo;
1106 src += todo;
1107 blkno++;
1108 }
1109 }
1110
1111 #ifdef DEBUG
1112 static void
1113 hexprint(const char *start, void *buf, int len)
1114 {
1115 char *c = buf;
1116
1117 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
1118 printf("%s: len=%06d 0x", start, len);
1119 while (len--)
1120 printf("%02x", (unsigned char) *c++);
1121 }
1122 #endif
1123
1124 MODULE(MODULE_CLASS_DRIVER, cgd, "dk_subr");
1125
1126 #ifdef _MODULE
1127 #include "ioconf.c"
1128 #endif
1129
1130 static int
1131 cgd_modcmd(modcmd_t cmd, void *arg)
1132 {
1133 int error = 0;
1134
1135 #ifdef _MODULE
1136 devmajor_t bmajor = -1, cmajor = -1;
1137 #endif
1138
1139 switch (cmd) {
1140 case MODULE_CMD_INIT:
1141 #ifdef _MODULE
1142 /*
1143 * Insert the driver into the autoconf database
1144 */
1145 error = config_init_component(cfdriver_ioconf_cgd,
1146 cfattach_ioconf_cgd, cfdata_ioconf_cgd);
1147 if (error) {
1148 aprint_error("%s: unable to init component"
1149 ", error %d", cgd_cd.cd_name, error);
1150 break;
1151 }
1152
1153 /*
1154 * Attach the {b,c}devsw's
1155 */
1156 error = devsw_attach("cgd", &cgd_bdevsw, &bmajor,
1157 &cgd_cdevsw, &cmajor);
1158
1159 /*
1160 * If devsw_attach fails, remove from autoconf database
1161 */
1162 if (error) {
1163 config_fini_component(cfdriver_ioconf_cgd,
1164 cfattach_ioconf_cgd, cfdata_ioconf_cgd);
1165 aprint_error("%s: unable to attach devsw"
1166 ", error %d", cgd_cd.cd_name, error);
1167 }
1168 #endif
1169 break;
1170
1171 case MODULE_CMD_FINI:
1172 #ifdef _MODULE
1173 /*
1174 * Remove {b,c}devsw's
1175 */
1176 devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1177
1178 /*
1179 * Now remove device from autoconf database
1180 */
1181 error = config_fini_component(cfdriver_ioconf_cgd,
1182 cfattach_ioconf_cgd, cfdata_ioconf_cgd);
1183
1184 /*
1185 * If removal fails, re-attach our {b,c}devsw's
1186 */
1187 if (error) {
1188 aprint_error("%s: failed to remove from autoconf"
1189 ", error %d", cgd_cd.cd_name, error);
1190 devsw_attach("cgd", &cgd_bdevsw, &bmajor,
1191 &cgd_cdevsw, &cmajor);
1192 }
1193 #endif
1194 break;
1195
1196 case MODULE_CMD_STAT:
1197 error = ENOTTY;
1198 break;
1199 default:
1200 error = ENOTTY;
1201 break;
1202 }
1203
1204 return error;
1205 }
1206