cgd.c revision 1.94 1 /* $NetBSD: cgd.c,v 1.94 2014/12/31 19:52:05 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.94 2014/12/31 19:52:05 christos Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54
55 #include <dev/dkvar.h>
56 #include <dev/cgdvar.h>
57
58 #include <miscfs/specfs/specdev.h> /* for v_rdev */
59
60 /* Entry Point Functions */
61
62 void cgdattach(int);
63
64 static dev_type_open(cgdopen);
65 static dev_type_close(cgdclose);
66 static dev_type_read(cgdread);
67 static dev_type_write(cgdwrite);
68 static dev_type_ioctl(cgdioctl);
69 static dev_type_strategy(cgdstrategy);
70 static dev_type_dump(cgddump);
71 static dev_type_size(cgdsize);
72
73 const struct bdevsw cgd_bdevsw = {
74 .d_open = cgdopen,
75 .d_close = cgdclose,
76 .d_strategy = cgdstrategy,
77 .d_ioctl = cgdioctl,
78 .d_dump = cgddump,
79 .d_psize = cgdsize,
80 .d_discard = nodiscard,
81 .d_flag = D_DISK
82 };
83
84 const struct cdevsw cgd_cdevsw = {
85 .d_open = cgdopen,
86 .d_close = cgdclose,
87 .d_read = cgdread,
88 .d_write = cgdwrite,
89 .d_ioctl = cgdioctl,
90 .d_stop = nostop,
91 .d_tty = notty,
92 .d_poll = nopoll,
93 .d_mmap = nommap,
94 .d_kqfilter = nokqfilter,
95 .d_discard = nodiscard,
96 .d_flag = D_DISK
97 };
98
99 static int cgd_match(device_t, cfdata_t, void *);
100 static void cgd_attach(device_t, device_t, void *);
101 static int cgd_detach(device_t, int);
102 static struct cgd_softc *cgd_spawn(int);
103 static int cgd_destroy(device_t);
104
105 /* Internal Functions */
106
107 static void cgdstart(struct dk_softc *);
108 static void cgdiodone(struct buf *);
109
110 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
111 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
112 static int cgd_ioctl_get(dev_t, void *, struct lwp *);
113 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
114 struct lwp *);
115 static void cgd_cipher(struct cgd_softc *, void *, void *,
116 size_t, daddr_t, size_t, int);
117
118 /* Pseudo-disk Interface */
119
120 static struct dk_intf the_dkintf = {
121 DTYPE_CGD,
122 "cgd",
123 cgdopen,
124 cgdclose,
125 cgdstrategy,
126 cgdstart,
127 };
128 static struct dk_intf *di = &the_dkintf;
129
130 static struct dkdriver cgddkdriver = {
131 .d_strategy = cgdstrategy,
132 .d_minphys = minphys,
133 };
134
135 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
136 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
137 extern struct cfdriver cgd_cd;
138
139 /* DIAGNOSTIC and DEBUG definitions */
140
141 #if defined(CGDDEBUG) && !defined(DEBUG)
142 #define DEBUG
143 #endif
144
145 #ifdef DEBUG
146 int cgddebug = 0;
147
148 #define CGDB_FOLLOW 0x1
149 #define CGDB_IO 0x2
150 #define CGDB_CRYPTO 0x4
151
152 #define IFDEBUG(x,y) if (cgddebug & (x)) y
153 #define DPRINTF(x,y) IFDEBUG(x, printf y)
154 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
155
156 static void hexprint(const char *, void *, int);
157
158 #else
159 #define IFDEBUG(x,y)
160 #define DPRINTF(x,y)
161 #define DPRINTF_FOLLOW(y)
162 #endif
163
164 #ifdef DIAGNOSTIC
165 #define DIAGPANIC(x) panic x
166 #define DIAGCONDPANIC(x,y) if (x) panic y
167 #else
168 #define DIAGPANIC(x)
169 #define DIAGCONDPANIC(x,y)
170 #endif
171
172 /* Global variables */
173
174 /* Utility Functions */
175
176 #define CGDUNIT(x) DISKUNIT(x)
177 #define GETCGD_SOFTC(_cs, x) if (!((_cs) = getcgd_softc(x))) return ENXIO
178
179 /* The code */
180
181 static struct cgd_softc *
182 getcgd_softc(dev_t dev)
183 {
184 int unit = CGDUNIT(dev);
185 struct cgd_softc *sc;
186
187 DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
188
189 sc = device_lookup_private(&cgd_cd, unit);
190 if (sc == NULL)
191 sc = cgd_spawn(unit);
192 return sc;
193 }
194
195 static int
196 cgd_match(device_t self, cfdata_t cfdata, void *aux)
197 {
198
199 return 1;
200 }
201
202 static void
203 cgd_attach(device_t parent, device_t self, void *aux)
204 {
205 struct cgd_softc *sc = device_private(self);
206
207 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
208 dk_sc_init(&sc->sc_dksc, device_xname(self));
209 sc->sc_dksc.sc_dev = self;
210 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
211
212 if (!pmf_device_register(self, NULL, NULL))
213 aprint_error_dev(self, "unable to register power management hooks\n");
214 }
215
216
217 static int
218 cgd_detach(device_t self, int flags)
219 {
220 int ret;
221 const int pmask = 1 << RAW_PART;
222 struct cgd_softc *sc = device_private(self);
223 struct dk_softc *dksc = &sc->sc_dksc;
224
225 if (DK_BUSY(dksc, pmask))
226 return EBUSY;
227
228 if ((dksc->sc_flags & DKF_INITED) != 0 &&
229 (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
230 return ret;
231
232 disk_destroy(&dksc->sc_dkdev);
233 mutex_destroy(&sc->sc_lock);
234
235 return 0;
236 }
237
238 void
239 cgdattach(int num)
240 {
241 int error;
242
243 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
244 if (error != 0)
245 aprint_error("%s: unable to register cfattach\n",
246 cgd_cd.cd_name);
247 }
248
249 static struct cgd_softc *
250 cgd_spawn(int unit)
251 {
252 cfdata_t cf;
253
254 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
255 cf->cf_name = cgd_cd.cd_name;
256 cf->cf_atname = cgd_cd.cd_name;
257 cf->cf_unit = unit;
258 cf->cf_fstate = FSTATE_STAR;
259
260 return device_private(config_attach_pseudo(cf));
261 }
262
263 static int
264 cgd_destroy(device_t dev)
265 {
266 int error;
267 cfdata_t cf;
268
269 cf = device_cfdata(dev);
270 error = config_detach(dev, DETACH_QUIET);
271 if (error)
272 return error;
273 free(cf, M_DEVBUF);
274 return 0;
275 }
276
277 static int
278 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
279 {
280 struct cgd_softc *cs;
281
282 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
283 GETCGD_SOFTC(cs, dev);
284 return dk_open(di, &cs->sc_dksc, dev, flags, fmt, l);
285 }
286
287 static int
288 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
289 {
290 int error;
291 struct cgd_softc *cs;
292 struct dk_softc *dksc;
293
294 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
295 GETCGD_SOFTC(cs, dev);
296 dksc = &cs->sc_dksc;
297 if ((error = dk_close(di, dksc, dev, flags, fmt, l)) != 0)
298 return error;
299
300 if ((dksc->sc_flags & DKF_INITED) == 0) {
301 if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
302 aprint_error_dev(dksc->sc_dev,
303 "unable to detach instance\n");
304 return error;
305 }
306 }
307 return 0;
308 }
309
310 static void
311 cgdstrategy(struct buf *bp)
312 {
313 struct cgd_softc *cs = getcgd_softc(bp->b_dev);
314
315 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
316 (long)bp->b_bcount));
317
318 /*
319 * Reject unaligned writes. We can encrypt and decrypt only
320 * complete disk sectors, and we let the ciphers require their
321 * buffers to be aligned to 32-bit boundaries.
322 */
323 if (bp->b_blkno < 0 ||
324 (bp->b_bcount % DEV_BSIZE) != 0 ||
325 ((uintptr_t)bp->b_data & 3) != 0) {
326 bp->b_error = EINVAL;
327 bp->b_resid = bp->b_bcount;
328 biodone(bp);
329 return;
330 }
331
332 /* XXXrcd: Should we test for (cs != NULL)? */
333 dk_strategy(di, &cs->sc_dksc, bp);
334 return;
335 }
336
337 static int
338 cgdsize(dev_t dev)
339 {
340 struct cgd_softc *cs = getcgd_softc(dev);
341
342 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
343 if (!cs)
344 return -1;
345 return dk_size(di, &cs->sc_dksc, dev);
346 }
347
348 /*
349 * cgd_{get,put}data are functions that deal with getting a buffer
350 * for the new encrypted data. We have a buffer per device so that
351 * we can ensure that we can always have a transaction in flight.
352 * We use this buffer first so that we have one less piece of
353 * malloc'ed data at any given point.
354 */
355
356 static void *
357 cgd_getdata(struct dk_softc *dksc, unsigned long size)
358 {
359 struct cgd_softc *cs = (struct cgd_softc *)dksc;
360 void * data = NULL;
361
362 mutex_enter(&cs->sc_lock);
363 if (cs->sc_data_used == 0) {
364 cs->sc_data_used = 1;
365 data = cs->sc_data;
366 }
367 mutex_exit(&cs->sc_lock);
368
369 if (data)
370 return data;
371
372 return malloc(size, M_DEVBUF, M_NOWAIT);
373 }
374
375 static void
376 cgd_putdata(struct dk_softc *dksc, void *data)
377 {
378 struct cgd_softc *cs = (struct cgd_softc *)dksc;
379
380 if (data == cs->sc_data) {
381 mutex_enter(&cs->sc_lock);
382 cs->sc_data_used = 0;
383 mutex_exit(&cs->sc_lock);
384 } else {
385 free(data, M_DEVBUF);
386 }
387 }
388
389 static void
390 cgdstart(struct dk_softc *dksc)
391 {
392 struct cgd_softc *cs = (struct cgd_softc *)dksc;
393 struct buf *bp, *nbp;
394 #ifdef DIAGNOSTIC
395 struct buf *qbp;
396 #endif
397 void * addr;
398 void * newaddr;
399 daddr_t bn;
400 struct vnode *vp;
401
402 while ((bp = bufq_peek(dksc->sc_bufq)) != NULL) {
403
404 DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp));
405 disk_busy(&dksc->sc_dkdev);
406
407 bn = bp->b_rawblkno;
408
409 /*
410 * We attempt to allocate all of our resources up front, so that
411 * we can fail quickly if they are unavailable.
412 */
413 nbp = getiobuf(cs->sc_tvn, false);
414 if (nbp == NULL) {
415 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
416 break;
417 }
418
419 /*
420 * If we are writing, then we need to encrypt the outgoing
421 * block into a new block of memory.
422 */
423 newaddr = addr = bp->b_data;
424 if ((bp->b_flags & B_READ) == 0) {
425 newaddr = cgd_getdata(dksc, bp->b_bcount);
426 if (!newaddr) {
427 putiobuf(nbp);
428 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
429 break;
430 }
431 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
432 DEV_BSIZE, CGD_CIPHER_ENCRYPT);
433 }
434 /* we now have all needed resources to process this buf */
435 #ifdef DIAGNOSTIC
436 qbp = bufq_get(dksc->sc_bufq);
437 KASSERT(bp == qbp);
438 #else
439 (void)bufq_get(dksc->sc_bufq);
440 #endif
441 nbp->b_data = newaddr;
442 nbp->b_flags = bp->b_flags;
443 nbp->b_oflags = bp->b_oflags;
444 nbp->b_cflags = bp->b_cflags;
445 nbp->b_iodone = cgdiodone;
446 nbp->b_proc = bp->b_proc;
447 nbp->b_blkno = bn;
448 nbp->b_bcount = bp->b_bcount;
449 nbp->b_private = bp;
450
451 BIO_COPYPRIO(nbp, bp);
452
453 if ((nbp->b_flags & B_READ) == 0) {
454 vp = nbp->b_vp;
455 mutex_enter(vp->v_interlock);
456 vp->v_numoutput++;
457 mutex_exit(vp->v_interlock);
458 }
459 VOP_STRATEGY(cs->sc_tvn, nbp);
460 }
461 }
462
463 static void
464 cgdiodone(struct buf *nbp)
465 {
466 struct buf *obp = nbp->b_private;
467 struct cgd_softc *cs = getcgd_softc(obp->b_dev);
468 struct dk_softc *dksc = &cs->sc_dksc;
469 int s;
470
471 KDASSERT(cs);
472
473 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
474 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
475 obp, obp->b_bcount, obp->b_resid));
476 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 " addr %p bcnt %d\n",
477 nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
478 nbp->b_bcount));
479 if (nbp->b_error != 0) {
480 obp->b_error = nbp->b_error;
481 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
482 obp->b_error));
483 }
484
485 /* Perform the decryption if we are reading.
486 *
487 * Note: use the blocknumber from nbp, since it is what
488 * we used to encrypt the blocks.
489 */
490
491 if (nbp->b_flags & B_READ)
492 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
493 nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT);
494
495 /* If we allocated memory, free it now... */
496 if (nbp->b_data != obp->b_data)
497 cgd_putdata(dksc, nbp->b_data);
498
499 putiobuf(nbp);
500
501 /* Request is complete for whatever reason */
502 obp->b_resid = 0;
503 if (obp->b_error != 0)
504 obp->b_resid = obp->b_bcount;
505 s = splbio();
506 disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid,
507 (obp->b_flags & B_READ));
508 biodone(obp);
509 cgdstart(dksc);
510 splx(s);
511 }
512
513 /* XXX: we should probably put these into dksubr.c, mostly */
514 static int
515 cgdread(dev_t dev, struct uio *uio, int flags)
516 {
517 struct cgd_softc *cs;
518 struct dk_softc *dksc;
519
520 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
521 (unsigned long long)dev, uio, flags));
522 GETCGD_SOFTC(cs, dev);
523 dksc = &cs->sc_dksc;
524 if ((dksc->sc_flags & DKF_INITED) == 0)
525 return ENXIO;
526 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
527 }
528
529 /* XXX: we should probably put these into dksubr.c, mostly */
530 static int
531 cgdwrite(dev_t dev, struct uio *uio, int flags)
532 {
533 struct cgd_softc *cs;
534 struct dk_softc *dksc;
535
536 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
537 GETCGD_SOFTC(cs, dev);
538 dksc = &cs->sc_dksc;
539 if ((dksc->sc_flags & DKF_INITED) == 0)
540 return ENXIO;
541 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
542 }
543
544 static int
545 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
546 {
547 struct cgd_softc *cs;
548 struct dk_softc *dksc;
549 int part = DISKPART(dev);
550 int pmask = 1 << part;
551 int error;
552
553 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
554 dev, cmd, data, flag, l));
555
556 switch (cmd) {
557 case CGDIOCGET:
558 return cgd_ioctl_get(dev, data, l);
559 case CGDIOCSET:
560 case CGDIOCCLR:
561 if ((flag & FWRITE) == 0)
562 return EBADF;
563 /* FALLTHROUGH */
564 default:
565 GETCGD_SOFTC(cs, dev);
566 dksc = &cs->sc_dksc;
567 break;
568 }
569
570 error = disk_ioctl(&dksc->sc_dkdev, dev, cmd, data, flag, l);
571 if (error != EPASSTHROUGH)
572 return (error);
573
574 switch (cmd) {
575 case CGDIOCSET:
576 if (dksc->sc_flags & DKF_INITED)
577 return EBUSY;
578 return cgd_ioctl_set(cs, data, l);
579 case CGDIOCCLR:
580 if (DK_BUSY(&cs->sc_dksc, pmask))
581 return EBUSY;
582 return cgd_ioctl_clr(cs, l);
583 case DIOCCACHESYNC:
584 /*
585 * XXX Do we really need to care about having a writable
586 * file descriptor here?
587 */
588 if ((flag & FWRITE) == 0)
589 return (EBADF);
590
591 /*
592 * We pass this call down to the underlying disk.
593 */
594 return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred);
595 default:
596 return dk_ioctl(di, dksc, dev, cmd, data, flag, l);
597 case CGDIOCGET:
598 KASSERT(0);
599 return EINVAL;
600 }
601 }
602
603 static int
604 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
605 {
606 struct cgd_softc *cs;
607
608 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
609 dev, blkno, va, (unsigned long)size));
610 GETCGD_SOFTC(cs, dev);
611 return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size);
612 }
613
614 /*
615 * XXXrcd:
616 * for now we hardcode the maximum key length.
617 */
618 #define MAX_KEYSIZE 1024
619
620 static const struct {
621 const char *n;
622 int v;
623 int d;
624 } encblkno[] = {
625 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
626 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
627 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
628 };
629
630 /* ARGSUSED */
631 static int
632 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
633 {
634 struct cgd_ioctl *ci = data;
635 struct vnode *vp;
636 int ret;
637 size_t i;
638 size_t keybytes; /* key length in bytes */
639 const char *cp;
640 struct pathbuf *pb;
641 char *inbuf;
642 struct dk_softc *dksc = &cs->sc_dksc;
643
644 cp = ci->ci_disk;
645
646 ret = pathbuf_copyin(ci->ci_disk, &pb);
647 if (ret != 0) {
648 return ret;
649 }
650 ret = dk_lookup(pb, l, &vp);
651 pathbuf_destroy(pb);
652 if (ret != 0) {
653 return ret;
654 }
655
656 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
657
658 if ((ret = cgdinit(cs, cp, vp, l)) != 0)
659 goto bail;
660
661 (void)memset(inbuf, 0, MAX_KEYSIZE);
662 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
663 if (ret)
664 goto bail;
665 cs->sc_cfuncs = cryptfuncs_find(inbuf);
666 if (!cs->sc_cfuncs) {
667 ret = EINVAL;
668 goto bail;
669 }
670
671 (void)memset(inbuf, 0, MAX_KEYSIZE);
672 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
673 if (ret)
674 goto bail;
675
676 for (i = 0; i < __arraycount(encblkno); i++)
677 if (strcmp(encblkno[i].n, inbuf) == 0)
678 break;
679
680 if (i == __arraycount(encblkno)) {
681 ret = EINVAL;
682 goto bail;
683 }
684
685 keybytes = ci->ci_keylen / 8 + 1;
686 if (keybytes > MAX_KEYSIZE) {
687 ret = EINVAL;
688 goto bail;
689 }
690
691 (void)memset(inbuf, 0, MAX_KEYSIZE);
692 ret = copyin(ci->ci_key, inbuf, keybytes);
693 if (ret)
694 goto bail;
695
696 cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
697 cs->sc_cdata.cf_mode = encblkno[i].v;
698 cs->sc_cdata.cf_keylen = ci->ci_keylen;
699 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
700 &cs->sc_cdata.cf_blocksize);
701 if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
702 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
703 cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
704 cs->sc_cdata.cf_priv = NULL;
705 }
706
707 /*
708 * The blocksize is supposed to be in bytes. Unfortunately originally
709 * it was expressed in bits. For compatibility we maintain encblkno
710 * and encblkno8.
711 */
712 cs->sc_cdata.cf_blocksize /= encblkno[i].d;
713 (void)memset(inbuf, 0, MAX_KEYSIZE);
714 if (!cs->sc_cdata.cf_priv) {
715 ret = EINVAL; /* XXX is this the right error? */
716 goto bail;
717 }
718 free(inbuf, M_TEMP);
719
720 bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
721
722 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
723 cs->sc_data_used = 0;
724
725 dksc->sc_flags |= DKF_INITED;
726
727 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
728
729 /* Attach the disk. */
730 disk_attach(&dksc->sc_dkdev);
731
732 /* Try and read the disklabel. */
733 dk_getdisklabel(di, dksc, 0 /* XXX ? (cause of PR 41704) */);
734
735 /* Discover wedges on this disk. */
736 dkwedge_discover(&dksc->sc_dkdev);
737
738 return 0;
739
740 bail:
741 free(inbuf, M_TEMP);
742 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
743 return ret;
744 }
745
746 /* ARGSUSED */
747 static int
748 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
749 {
750 int s;
751 struct dk_softc *dksc = &cs->sc_dksc;
752
753 if ((dksc->sc_flags & DKF_INITED) == 0)
754 return ENXIO;
755
756 /* Delete all of our wedges. */
757 dkwedge_delall(&dksc->sc_dkdev);
758
759 /* Kill off any queued buffers. */
760 s = splbio();
761 bufq_drain(dksc->sc_bufq);
762 splx(s);
763 bufq_free(dksc->sc_bufq);
764
765 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
766 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
767 free(cs->sc_tpath, M_DEVBUF);
768 free(cs->sc_data, M_DEVBUF);
769 cs->sc_data_used = 0;
770 dksc->sc_flags &= ~DKF_INITED;
771 disk_detach(&dksc->sc_dkdev);
772
773 return 0;
774 }
775
776 static int
777 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
778 {
779 struct cgd_softc *cs = getcgd_softc(dev);
780 struct cgd_user *cgu;
781 int unit;
782 struct dk_softc *dksc = &cs->sc_dksc;
783
784 unit = CGDUNIT(dev);
785 cgu = (struct cgd_user *)data;
786
787 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
788 dev, unit, data, l));
789
790 if (cgu->cgu_unit == -1)
791 cgu->cgu_unit = unit;
792
793 if (cgu->cgu_unit < 0)
794 return EINVAL; /* XXX: should this be ENXIO? */
795
796 cs = device_lookup_private(&cgd_cd, unit);
797 if (cs == NULL || (dksc->sc_flags & DKF_INITED) == 0) {
798 cgu->cgu_dev = 0;
799 cgu->cgu_alg[0] = '\0';
800 cgu->cgu_blocksize = 0;
801 cgu->cgu_mode = 0;
802 cgu->cgu_keylen = 0;
803 }
804 else {
805 cgu->cgu_dev = cs->sc_tdev;
806 strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
807 sizeof(cgu->cgu_alg));
808 cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
809 cgu->cgu_mode = cs->sc_cdata.cf_mode;
810 cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
811 }
812 return 0;
813 }
814
815 static int
816 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
817 struct lwp *l)
818 {
819 struct disk_geom *dg;
820 int ret;
821 char *tmppath;
822 uint64_t psize;
823 unsigned secsize;
824 struct dk_softc *dksc = &cs->sc_dksc;
825
826 cs->sc_tvn = vp;
827 cs->sc_tpath = NULL;
828
829 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
830 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
831 if (ret)
832 goto bail;
833 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
834 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
835
836 cs->sc_tdev = vp->v_rdev;
837
838 if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
839 goto bail;
840
841 if (psize == 0) {
842 ret = ENODEV;
843 goto bail;
844 }
845
846 /*
847 * XXX here we should probe the underlying device. If we
848 * are accessing a partition of type RAW_PART, then
849 * we should populate our initial geometry with the
850 * geometry that we discover from the device.
851 */
852 dg = &dksc->sc_dkdev.dk_geom;
853 memset(dg, 0, sizeof(*dg));
854 dg->dg_secperunit = psize;
855 // XXX: Inherit?
856 dg->dg_secsize = DEV_BSIZE;
857 dg->dg_ntracks = 1;
858 dg->dg_nsectors = 1024 * (1024 / dg->dg_secsize);
859 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
860
861 bail:
862 free(tmppath, M_TEMP);
863 if (ret && cs->sc_tpath)
864 free(cs->sc_tpath, M_DEVBUF);
865 return ret;
866 }
867
868 /*
869 * Our generic cipher entry point. This takes care of the
870 * IV mode and passes off the work to the specific cipher.
871 * We implement here the IV method ``encrypted block
872 * number''.
873 *
874 * For the encryption case, we accomplish this by setting
875 * up a struct uio where the first iovec of the source is
876 * the blocknumber and the first iovec of the dest is a
877 * sink. We then call the cipher with an IV of zero, and
878 * the right thing happens.
879 *
880 * For the decryption case, we use the same basic mechanism
881 * for symmetry, but we encrypt the block number in the
882 * first iovec.
883 *
884 * We mainly do this to avoid requiring the definition of
885 * an ECB mode.
886 *
887 * XXXrcd: for now we rely on our own crypto framework defined
888 * in dev/cgd_crypto.c. This will change when we
889 * get a generic kernel crypto framework.
890 */
891
892 static void
893 blkno2blkno_buf(char *sbuf, daddr_t blkno)
894 {
895 int i;
896
897 /* Set up the blkno in blkno_buf, here we do not care much
898 * about the final layout of the information as long as we
899 * can guarantee that each sector will have a different IV
900 * and that the endianness of the machine will not affect
901 * the representation that we have chosen.
902 *
903 * We choose this representation, because it does not rely
904 * on the size of buf (which is the blocksize of the cipher),
905 * but allows daddr_t to grow without breaking existing
906 * disks.
907 *
908 * Note that blkno2blkno_buf does not take a size as input,
909 * and hence must be called on a pre-zeroed buffer of length
910 * greater than or equal to sizeof(daddr_t).
911 */
912 for (i=0; i < sizeof(daddr_t); i++) {
913 *sbuf++ = blkno & 0xff;
914 blkno >>= 8;
915 }
916 }
917
918 static void
919 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
920 size_t len, daddr_t blkno, size_t secsize, int dir)
921 {
922 char *dst = dstv;
923 char *src = srcv;
924 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher;
925 struct uio dstuio;
926 struct uio srcuio;
927 struct iovec dstiov[2];
928 struct iovec srciov[2];
929 size_t blocksize = cs->sc_cdata.cf_blocksize;
930 char sink[CGD_MAXBLOCKSIZE];
931 char zero_iv[CGD_MAXBLOCKSIZE];
932 char blkno_buf[CGD_MAXBLOCKSIZE];
933
934 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
935
936 DIAGCONDPANIC(len % blocksize != 0,
937 ("cgd_cipher: len %% blocksize != 0"));
938
939 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
940 DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
941 ("cgd_cipher: sizeof(daddr_t) > blocksize"));
942
943 memset(zero_iv, 0x0, blocksize);
944
945 dstuio.uio_iov = dstiov;
946 dstuio.uio_iovcnt = 2;
947
948 srcuio.uio_iov = srciov;
949 srcuio.uio_iovcnt = 2;
950
951 dstiov[0].iov_base = sink;
952 dstiov[0].iov_len = blocksize;
953 srciov[0].iov_base = blkno_buf;
954 srciov[0].iov_len = blocksize;
955 dstiov[1].iov_len = secsize;
956 srciov[1].iov_len = secsize;
957
958 for (; len > 0; len -= secsize) {
959 dstiov[1].iov_base = dst;
960 srciov[1].iov_base = src;
961
962 memset(blkno_buf, 0x0, blocksize);
963 blkno2blkno_buf(blkno_buf, blkno);
964 if (dir == CGD_CIPHER_DECRYPT) {
965 dstuio.uio_iovcnt = 1;
966 srcuio.uio_iovcnt = 1;
967 IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
968 blkno_buf, blocksize));
969 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
970 zero_iv, CGD_CIPHER_ENCRYPT);
971 memcpy(blkno_buf, sink, blocksize);
972 dstuio.uio_iovcnt = 2;
973 srcuio.uio_iovcnt = 2;
974 }
975
976 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
977 blkno_buf, blocksize));
978 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
979 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
980 sink, blocksize));
981
982 dst += secsize;
983 src += secsize;
984 blkno++;
985 }
986 }
987
988 #ifdef DEBUG
989 static void
990 hexprint(const char *start, void *buf, int len)
991 {
992 char *c = buf;
993
994 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
995 printf("%s: len=%06d 0x", start, len);
996 while (len--)
997 printf("%02x", (unsigned char) *c++);
998 }
999 #endif
1000
1001 MODULE(MODULE_CLASS_DRIVER, cgd, "dk_subr");
1002
1003 #ifdef _MODULE
1004 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1005 #endif
1006
1007 static int
1008 cgd_modcmd(modcmd_t cmd, void *arg)
1009 {
1010 int error = 0;
1011
1012 #ifdef _MODULE
1013 devmajor_t bmajor = -1, cmajor = -1;
1014 #endif
1015
1016 switch (cmd) {
1017 case MODULE_CMD_INIT:
1018 #ifdef _MODULE
1019 error = config_cfdriver_attach(&cgd_cd);
1020 if (error)
1021 break;
1022
1023 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1024 if (error) {
1025 config_cfdriver_detach(&cgd_cd);
1026 aprint_error("%s: unable to register cfattach\n",
1027 cgd_cd.cd_name);
1028 break;
1029 }
1030
1031 error = devsw_attach("cgd", &cgd_bdevsw, &bmajor,
1032 &cgd_cdevsw, &cmajor);
1033 if (error) {
1034 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1035 config_cfdriver_detach(&cgd_cd);
1036 break;
1037 }
1038 #endif
1039 break;
1040
1041 case MODULE_CMD_FINI:
1042 #ifdef _MODULE
1043 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1044 if (error)
1045 break;
1046 config_cfdriver_detach(&cgd_cd);
1047 devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1048 #endif
1049 break;
1050
1051 case MODULE_CMD_STAT:
1052 return ENOTTY;
1053
1054 default:
1055 return ENOTTY;
1056 }
1057
1058 return error;
1059 }
1060