cgd.c revision 1.88 1 /* $NetBSD: cgd.c,v 1.88 2014/06/14 07:39:00 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.88 2014/06/14 07:39:00 hannken Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/pool.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/disk.h>
48 #include <sys/disklabel.h>
49 #include <sys/fcntl.h>
50 #include <sys/namei.h> /* for pathbuf */
51 #include <sys/vnode.h>
52 #include <sys/conf.h>
53 #include <sys/syslog.h>
54
55 #include <dev/dkvar.h>
56 #include <dev/cgdvar.h>
57
58 #include <miscfs/specfs/specdev.h> /* for v_rdev */
59
60 /* Entry Point Functions */
61
62 void cgdattach(int);
63
64 static dev_type_open(cgdopen);
65 static dev_type_close(cgdclose);
66 static dev_type_read(cgdread);
67 static dev_type_write(cgdwrite);
68 static dev_type_ioctl(cgdioctl);
69 static dev_type_strategy(cgdstrategy);
70 static dev_type_dump(cgddump);
71 static dev_type_size(cgdsize);
72
73 const struct bdevsw cgd_bdevsw = {
74 .d_open = cgdopen,
75 .d_close = cgdclose,
76 .d_strategy = cgdstrategy,
77 .d_ioctl = cgdioctl,
78 .d_dump = cgddump,
79 .d_psize = cgdsize,
80 .d_flag = D_DISK
81 };
82
83 const struct cdevsw cgd_cdevsw = {
84 .d_open = cgdopen,
85 .d_close = cgdclose,
86 .d_read = cgdread,
87 .d_write = cgdwrite,
88 .d_ioctl = cgdioctl,
89 .d_stop = nostop,
90 .d_tty = notty,
91 .d_poll = nopoll,
92 .d_mmap = nommap,
93 .d_kqfilter = nokqfilter,
94 .d_flag = D_DISK
95 };
96
97 static int cgd_match(device_t, cfdata_t, void *);
98 static void cgd_attach(device_t, device_t, void *);
99 static int cgd_detach(device_t, int);
100 static struct cgd_softc *cgd_spawn(int);
101 static int cgd_destroy(device_t);
102
103 /* Internal Functions */
104
105 static void cgdstart(struct dk_softc *);
106 static void cgdiodone(struct buf *);
107
108 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
109 static int cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
110 static int cgd_ioctl_get(dev_t, void *, struct lwp *);
111 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
112 struct lwp *);
113 static void cgd_cipher(struct cgd_softc *, void *, void *,
114 size_t, daddr_t, size_t, int);
115
116 /* Pseudo-disk Interface */
117
118 static struct dk_intf the_dkintf = {
119 DTYPE_CGD,
120 "cgd",
121 cgdopen,
122 cgdclose,
123 cgdstrategy,
124 cgdstart,
125 };
126 static struct dk_intf *di = &the_dkintf;
127
128 static struct dkdriver cgddkdriver = {
129 .d_strategy = cgdstrategy,
130 .d_minphys = minphys,
131 };
132
133 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
134 cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
135 extern struct cfdriver cgd_cd;
136
137 /* DIAGNOSTIC and DEBUG definitions */
138
139 #if defined(CGDDEBUG) && !defined(DEBUG)
140 #define DEBUG
141 #endif
142
143 #ifdef DEBUG
144 int cgddebug = 0;
145
146 #define CGDB_FOLLOW 0x1
147 #define CGDB_IO 0x2
148 #define CGDB_CRYPTO 0x4
149
150 #define IFDEBUG(x,y) if (cgddebug & (x)) y
151 #define DPRINTF(x,y) IFDEBUG(x, printf y)
152 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
153
154 static void hexprint(const char *, void *, int);
155
156 #else
157 #define IFDEBUG(x,y)
158 #define DPRINTF(x,y)
159 #define DPRINTF_FOLLOW(y)
160 #endif
161
162 #ifdef DIAGNOSTIC
163 #define DIAGPANIC(x) panic x
164 #define DIAGCONDPANIC(x,y) if (x) panic y
165 #else
166 #define DIAGPANIC(x)
167 #define DIAGCONDPANIC(x,y)
168 #endif
169
170 /* Global variables */
171
172 /* Utility Functions */
173
174 #define CGDUNIT(x) DISKUNIT(x)
175 #define GETCGD_SOFTC(_cs, x) if (!((_cs) = getcgd_softc(x))) return ENXIO
176
177 /* The code */
178
179 static struct cgd_softc *
180 getcgd_softc(dev_t dev)
181 {
182 int unit = CGDUNIT(dev);
183 struct cgd_softc *sc;
184
185 DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
186
187 sc = device_lookup_private(&cgd_cd, unit);
188 if (sc == NULL)
189 sc = cgd_spawn(unit);
190 return sc;
191 }
192
193 static int
194 cgd_match(device_t self, cfdata_t cfdata, void *aux)
195 {
196
197 return 1;
198 }
199
200 static void
201 cgd_attach(device_t parent, device_t self, void *aux)
202 {
203 struct cgd_softc *sc = device_private(self);
204
205 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
206 dk_sc_init(&sc->sc_dksc, device_xname(self));
207 sc->sc_dksc.sc_dev = self;
208 disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
209
210 if (!pmf_device_register(self, NULL, NULL))
211 aprint_error_dev(self, "unable to register power management hooks\n");
212 }
213
214
215 static int
216 cgd_detach(device_t self, int flags)
217 {
218 int ret;
219 const int pmask = 1 << RAW_PART;
220 struct cgd_softc *sc = device_private(self);
221 struct dk_softc *dksc = &sc->sc_dksc;
222
223 if (DK_BUSY(dksc, pmask))
224 return EBUSY;
225
226 if ((dksc->sc_flags & DKF_INITED) != 0 &&
227 (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
228 return ret;
229
230 disk_destroy(&dksc->sc_dkdev);
231 mutex_destroy(&sc->sc_lock);
232
233 return 0;
234 }
235
236 void
237 cgdattach(int num)
238 {
239 int error;
240
241 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
242 if (error != 0)
243 aprint_error("%s: unable to register cfattach\n",
244 cgd_cd.cd_name);
245 }
246
247 static struct cgd_softc *
248 cgd_spawn(int unit)
249 {
250 cfdata_t cf;
251
252 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
253 cf->cf_name = cgd_cd.cd_name;
254 cf->cf_atname = cgd_cd.cd_name;
255 cf->cf_unit = unit;
256 cf->cf_fstate = FSTATE_STAR;
257
258 return device_private(config_attach_pseudo(cf));
259 }
260
261 static int
262 cgd_destroy(device_t dev)
263 {
264 int error;
265 cfdata_t cf;
266
267 cf = device_cfdata(dev);
268 error = config_detach(dev, DETACH_QUIET);
269 if (error)
270 return error;
271 free(cf, M_DEVBUF);
272 return 0;
273 }
274
275 static int
276 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
277 {
278 struct cgd_softc *cs;
279
280 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
281 GETCGD_SOFTC(cs, dev);
282 return dk_open(di, &cs->sc_dksc, dev, flags, fmt, l);
283 }
284
285 static int
286 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
287 {
288 int error;
289 struct cgd_softc *cs;
290 struct dk_softc *dksc;
291
292 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
293 GETCGD_SOFTC(cs, dev);
294 dksc = &cs->sc_dksc;
295 if ((error = dk_close(di, dksc, dev, flags, fmt, l)) != 0)
296 return error;
297
298 if ((dksc->sc_flags & DKF_INITED) == 0) {
299 if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) {
300 aprint_error_dev(dksc->sc_dev,
301 "unable to detach instance\n");
302 return error;
303 }
304 }
305 return 0;
306 }
307
308 static void
309 cgdstrategy(struct buf *bp)
310 {
311 struct cgd_softc *cs = getcgd_softc(bp->b_dev);
312
313 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
314 (long)bp->b_bcount));
315
316 /*
317 * Reject unaligned writes. We can encrypt and decrypt only
318 * complete disk sectors, and we let the ciphers require their
319 * buffers to be aligned to 32-bit boundaries.
320 */
321 if (bp->b_blkno < 0 ||
322 (bp->b_bcount % DEV_BSIZE) != 0 ||
323 ((uintptr_t)bp->b_data & 3) != 0) {
324 bp->b_error = EINVAL;
325 bp->b_resid = bp->b_bcount;
326 biodone(bp);
327 return;
328 }
329
330 /* XXXrcd: Should we test for (cs != NULL)? */
331 dk_strategy(di, &cs->sc_dksc, bp);
332 return;
333 }
334
335 static int
336 cgdsize(dev_t dev)
337 {
338 struct cgd_softc *cs = getcgd_softc(dev);
339
340 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
341 if (!cs)
342 return -1;
343 return dk_size(di, &cs->sc_dksc, dev);
344 }
345
346 /*
347 * cgd_{get,put}data are functions that deal with getting a buffer
348 * for the new encrypted data. We have a buffer per device so that
349 * we can ensure that we can always have a transaction in flight.
350 * We use this buffer first so that we have one less piece of
351 * malloc'ed data at any given point.
352 */
353
354 static void *
355 cgd_getdata(struct dk_softc *dksc, unsigned long size)
356 {
357 struct cgd_softc *cs = (struct cgd_softc *)dksc;
358 void * data = NULL;
359
360 mutex_enter(&cs->sc_lock);
361 if (cs->sc_data_used == 0) {
362 cs->sc_data_used = 1;
363 data = cs->sc_data;
364 }
365 mutex_exit(&cs->sc_lock);
366
367 if (data)
368 return data;
369
370 return malloc(size, M_DEVBUF, M_NOWAIT);
371 }
372
373 static void
374 cgd_putdata(struct dk_softc *dksc, void *data)
375 {
376 struct cgd_softc *cs = (struct cgd_softc *)dksc;
377
378 if (data == cs->sc_data) {
379 mutex_enter(&cs->sc_lock);
380 cs->sc_data_used = 0;
381 mutex_exit(&cs->sc_lock);
382 } else {
383 free(data, M_DEVBUF);
384 }
385 }
386
387 static void
388 cgdstart(struct dk_softc *dksc)
389 {
390 struct cgd_softc *cs = (struct cgd_softc *)dksc;
391 struct buf *bp, *nbp;
392 #ifdef DIAGNOSTIC
393 struct buf *qbp;
394 #endif
395 void * addr;
396 void * newaddr;
397 daddr_t bn;
398 struct vnode *vp;
399
400 while ((bp = bufq_peek(dksc->sc_bufq)) != NULL) {
401
402 DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp));
403 disk_busy(&dksc->sc_dkdev);
404
405 bn = bp->b_rawblkno;
406
407 /*
408 * We attempt to allocate all of our resources up front, so that
409 * we can fail quickly if they are unavailable.
410 */
411 nbp = getiobuf(cs->sc_tvn, false);
412 if (nbp == NULL) {
413 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
414 break;
415 }
416
417 /*
418 * If we are writing, then we need to encrypt the outgoing
419 * block into a new block of memory.
420 */
421 newaddr = addr = bp->b_data;
422 if ((bp->b_flags & B_READ) == 0) {
423 newaddr = cgd_getdata(dksc, bp->b_bcount);
424 if (!newaddr) {
425 putiobuf(nbp);
426 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
427 break;
428 }
429 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
430 DEV_BSIZE, CGD_CIPHER_ENCRYPT);
431 }
432 /* we now have all needed resources to process this buf */
433 #ifdef DIAGNOSTIC
434 qbp = bufq_get(dksc->sc_bufq);
435 KASSERT(bp == qbp);
436 #else
437 (void)bufq_get(dksc->sc_bufq);
438 #endif
439 nbp->b_data = newaddr;
440 nbp->b_flags = bp->b_flags;
441 nbp->b_oflags = bp->b_oflags;
442 nbp->b_cflags = bp->b_cflags;
443 nbp->b_iodone = cgdiodone;
444 nbp->b_proc = bp->b_proc;
445 nbp->b_blkno = bn;
446 nbp->b_bcount = bp->b_bcount;
447 nbp->b_private = bp;
448
449 BIO_COPYPRIO(nbp, bp);
450
451 if ((nbp->b_flags & B_READ) == 0) {
452 vp = nbp->b_vp;
453 mutex_enter(vp->v_interlock);
454 vp->v_numoutput++;
455 mutex_exit(vp->v_interlock);
456 }
457 VOP_STRATEGY(cs->sc_tvn, nbp);
458 }
459 }
460
461 static void
462 cgdiodone(struct buf *nbp)
463 {
464 struct buf *obp = nbp->b_private;
465 struct cgd_softc *cs = getcgd_softc(obp->b_dev);
466 struct dk_softc *dksc = &cs->sc_dksc;
467 int s;
468
469 KDASSERT(cs);
470
471 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
472 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
473 obp, obp->b_bcount, obp->b_resid));
474 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 " addr %p bcnt %d\n",
475 nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
476 nbp->b_bcount));
477 if (nbp->b_error != 0) {
478 obp->b_error = nbp->b_error;
479 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
480 obp->b_error));
481 }
482
483 /* Perform the decryption if we are reading.
484 *
485 * Note: use the blocknumber from nbp, since it is what
486 * we used to encrypt the blocks.
487 */
488
489 if (nbp->b_flags & B_READ)
490 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
491 nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT);
492
493 /* If we allocated memory, free it now... */
494 if (nbp->b_data != obp->b_data)
495 cgd_putdata(dksc, nbp->b_data);
496
497 putiobuf(nbp);
498
499 /* Request is complete for whatever reason */
500 obp->b_resid = 0;
501 if (obp->b_error != 0)
502 obp->b_resid = obp->b_bcount;
503 s = splbio();
504 disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid,
505 (obp->b_flags & B_READ));
506 biodone(obp);
507 cgdstart(dksc);
508 splx(s);
509 }
510
511 /* XXX: we should probably put these into dksubr.c, mostly */
512 static int
513 cgdread(dev_t dev, struct uio *uio, int flags)
514 {
515 struct cgd_softc *cs;
516 struct dk_softc *dksc;
517
518 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
519 (unsigned long long)dev, uio, flags));
520 GETCGD_SOFTC(cs, dev);
521 dksc = &cs->sc_dksc;
522 if ((dksc->sc_flags & DKF_INITED) == 0)
523 return ENXIO;
524 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
525 }
526
527 /* XXX: we should probably put these into dksubr.c, mostly */
528 static int
529 cgdwrite(dev_t dev, struct uio *uio, int flags)
530 {
531 struct cgd_softc *cs;
532 struct dk_softc *dksc;
533
534 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
535 GETCGD_SOFTC(cs, dev);
536 dksc = &cs->sc_dksc;
537 if ((dksc->sc_flags & DKF_INITED) == 0)
538 return ENXIO;
539 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
540 }
541
542 static int
543 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
544 {
545 struct cgd_softc *cs;
546 struct dk_softc *dksc;
547 int part = DISKPART(dev);
548 int pmask = 1 << part;
549
550 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
551 dev, cmd, data, flag, l));
552
553 switch (cmd) {
554 case CGDIOCGET: /* don't call cgd_spawn() if the device isn't there */
555 cs = NULL;
556 dksc = NULL;
557 break;
558 case CGDIOCSET:
559 case CGDIOCCLR:
560 if ((flag & FWRITE) == 0)
561 return EBADF;
562 /* FALLTHROUGH */
563 default:
564 GETCGD_SOFTC(cs, dev);
565 dksc = &cs->sc_dksc;
566 break;
567 }
568
569 switch (cmd) {
570 case CGDIOCSET:
571 if (dksc->sc_flags & DKF_INITED)
572 return EBUSY;
573 return cgd_ioctl_set(cs, data, l);
574 case CGDIOCCLR:
575 if (DK_BUSY(&cs->sc_dksc, pmask))
576 return EBUSY;
577 return cgd_ioctl_clr(cs, l);
578 case CGDIOCGET:
579 return cgd_ioctl_get(dev, data, l);
580 case DIOCCACHESYNC:
581 /*
582 * XXX Do we really need to care about having a writable
583 * file descriptor here?
584 */
585 if ((flag & FWRITE) == 0)
586 return (EBADF);
587
588 /*
589 * We pass this call down to the underlying disk.
590 */
591 return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred);
592 default:
593 return dk_ioctl(di, dksc, dev, cmd, data, flag, l);
594 }
595 }
596
597 static int
598 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
599 {
600 struct cgd_softc *cs;
601
602 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
603 dev, blkno, va, (unsigned long)size));
604 GETCGD_SOFTC(cs, dev);
605 return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size);
606 }
607
608 /*
609 * XXXrcd:
610 * for now we hardcode the maximum key length.
611 */
612 #define MAX_KEYSIZE 1024
613
614 static const struct {
615 const char *n;
616 int v;
617 int d;
618 } encblkno[] = {
619 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
620 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
621 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
622 };
623
624 /* ARGSUSED */
625 static int
626 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
627 {
628 struct cgd_ioctl *ci = data;
629 struct vnode *vp;
630 int ret;
631 size_t i;
632 size_t keybytes; /* key length in bytes */
633 const char *cp;
634 struct pathbuf *pb;
635 char *inbuf;
636 struct dk_softc *dksc = &cs->sc_dksc;
637
638 cp = ci->ci_disk;
639
640 ret = pathbuf_copyin(ci->ci_disk, &pb);
641 if (ret != 0) {
642 return ret;
643 }
644 ret = dk_lookup(pb, l, &vp);
645 pathbuf_destroy(pb);
646 if (ret != 0) {
647 return ret;
648 }
649
650 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
651
652 if ((ret = cgdinit(cs, cp, vp, l)) != 0)
653 goto bail;
654
655 (void)memset(inbuf, 0, MAX_KEYSIZE);
656 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
657 if (ret)
658 goto bail;
659 cs->sc_cfuncs = cryptfuncs_find(inbuf);
660 if (!cs->sc_cfuncs) {
661 ret = EINVAL;
662 goto bail;
663 }
664
665 (void)memset(inbuf, 0, MAX_KEYSIZE);
666 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
667 if (ret)
668 goto bail;
669
670 for (i = 0; i < __arraycount(encblkno); i++)
671 if (strcmp(encblkno[i].n, inbuf) == 0)
672 break;
673
674 if (i == __arraycount(encblkno)) {
675 ret = EINVAL;
676 goto bail;
677 }
678
679 keybytes = ci->ci_keylen / 8 + 1;
680 if (keybytes > MAX_KEYSIZE) {
681 ret = EINVAL;
682 goto bail;
683 }
684
685 (void)memset(inbuf, 0, MAX_KEYSIZE);
686 ret = copyin(ci->ci_key, inbuf, keybytes);
687 if (ret)
688 goto bail;
689
690 cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
691 cs->sc_cdata.cf_mode = encblkno[i].v;
692 cs->sc_cdata.cf_keylen = ci->ci_keylen;
693 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
694 &cs->sc_cdata.cf_blocksize);
695 if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
696 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
697 cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
698 cs->sc_cdata.cf_priv = NULL;
699 }
700
701 /*
702 * The blocksize is supposed to be in bytes. Unfortunately originally
703 * it was expressed in bits. For compatibility we maintain encblkno
704 * and encblkno8.
705 */
706 cs->sc_cdata.cf_blocksize /= encblkno[i].d;
707 (void)memset(inbuf, 0, MAX_KEYSIZE);
708 if (!cs->sc_cdata.cf_priv) {
709 ret = EINVAL; /* XXX is this the right error? */
710 goto bail;
711 }
712 free(inbuf, M_TEMP);
713
714 bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
715
716 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
717 cs->sc_data_used = 0;
718
719 dksc->sc_flags |= DKF_INITED;
720
721 disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
722
723 /* Attach the disk. */
724 disk_attach(&dksc->sc_dkdev);
725
726 /* Try and read the disklabel. */
727 dk_getdisklabel(di, dksc, 0 /* XXX ? (cause of PR 41704) */);
728
729 /* Discover wedges on this disk. */
730 dkwedge_discover(&dksc->sc_dkdev);
731
732 return 0;
733
734 bail:
735 free(inbuf, M_TEMP);
736 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
737 return ret;
738 }
739
740 /* ARGSUSED */
741 static int
742 cgd_ioctl_clr(struct cgd_softc *cs, struct lwp *l)
743 {
744 int s;
745 struct dk_softc *dksc = &cs->sc_dksc;
746
747 if ((dksc->sc_flags & DKF_INITED) == 0)
748 return ENXIO;
749
750 /* Delete all of our wedges. */
751 dkwedge_delall(&dksc->sc_dkdev);
752
753 /* Kill off any queued buffers. */
754 s = splbio();
755 bufq_drain(dksc->sc_bufq);
756 splx(s);
757 bufq_free(dksc->sc_bufq);
758
759 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
760 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
761 free(cs->sc_tpath, M_DEVBUF);
762 free(cs->sc_data, M_DEVBUF);
763 cs->sc_data_used = 0;
764 dksc->sc_flags &= ~DKF_INITED;
765 disk_detach(&dksc->sc_dkdev);
766
767 return 0;
768 }
769
770 static int
771 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
772 {
773 struct cgd_softc *cs = getcgd_softc(dev);
774 struct cgd_user *cgu;
775 int unit;
776 struct dk_softc *dksc = &cs->sc_dksc;
777
778 unit = CGDUNIT(dev);
779 cgu = (struct cgd_user *)data;
780
781 DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
782 dev, unit, data, l));
783
784 if (cgu->cgu_unit == -1)
785 cgu->cgu_unit = unit;
786
787 if (cgu->cgu_unit < 0)
788 return EINVAL; /* XXX: should this be ENXIO? */
789
790 cs = device_lookup_private(&cgd_cd, unit);
791 if (cs == NULL || (dksc->sc_flags & DKF_INITED) == 0) {
792 cgu->cgu_dev = 0;
793 cgu->cgu_alg[0] = '\0';
794 cgu->cgu_blocksize = 0;
795 cgu->cgu_mode = 0;
796 cgu->cgu_keylen = 0;
797 }
798 else {
799 cgu->cgu_dev = cs->sc_tdev;
800 strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name,
801 sizeof(cgu->cgu_alg));
802 cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize;
803 cgu->cgu_mode = cs->sc_cdata.cf_mode;
804 cgu->cgu_keylen = cs->sc_cdata.cf_keylen;
805 }
806 return 0;
807 }
808
809 static int
810 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
811 struct lwp *l)
812 {
813 struct disk_geom *dg;
814 int ret;
815 char *tmppath;
816 uint64_t psize;
817 unsigned secsize;
818 struct dk_softc *dksc = &cs->sc_dksc;
819
820 cs->sc_tvn = vp;
821 cs->sc_tpath = NULL;
822
823 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
824 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
825 if (ret)
826 goto bail;
827 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
828 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
829
830 cs->sc_tdev = vp->v_rdev;
831
832 if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
833 goto bail;
834
835 if (psize == 0) {
836 ret = ENODEV;
837 goto bail;
838 }
839
840 /*
841 * XXX here we should probe the underlying device. If we
842 * are accessing a partition of type RAW_PART, then
843 * we should populate our initial geometry with the
844 * geometry that we discover from the device.
845 */
846 dg = &dksc->sc_dkdev.dk_geom;
847 memset(dg, 0, sizeof(*dg));
848 dg->dg_secperunit = psize;
849 // XXX: Inherit?
850 dg->dg_secsize = DEV_BSIZE;
851 dg->dg_ntracks = 1;
852 dg->dg_nsectors = 1024 * (1024 / dg->dg_secsize);
853 dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
854
855 bail:
856 free(tmppath, M_TEMP);
857 if (ret && cs->sc_tpath)
858 free(cs->sc_tpath, M_DEVBUF);
859 return ret;
860 }
861
862 /*
863 * Our generic cipher entry point. This takes care of the
864 * IV mode and passes off the work to the specific cipher.
865 * We implement here the IV method ``encrypted block
866 * number''.
867 *
868 * For the encryption case, we accomplish this by setting
869 * up a struct uio where the first iovec of the source is
870 * the blocknumber and the first iovec of the dest is a
871 * sink. We then call the cipher with an IV of zero, and
872 * the right thing happens.
873 *
874 * For the decryption case, we use the same basic mechanism
875 * for symmetry, but we encrypt the block number in the
876 * first iovec.
877 *
878 * We mainly do this to avoid requiring the definition of
879 * an ECB mode.
880 *
881 * XXXrcd: for now we rely on our own crypto framework defined
882 * in dev/cgd_crypto.c. This will change when we
883 * get a generic kernel crypto framework.
884 */
885
886 static void
887 blkno2blkno_buf(char *sbuf, daddr_t blkno)
888 {
889 int i;
890
891 /* Set up the blkno in blkno_buf, here we do not care much
892 * about the final layout of the information as long as we
893 * can guarantee that each sector will have a different IV
894 * and that the endianness of the machine will not affect
895 * the representation that we have chosen.
896 *
897 * We choose this representation, because it does not rely
898 * on the size of buf (which is the blocksize of the cipher),
899 * but allows daddr_t to grow without breaking existing
900 * disks.
901 *
902 * Note that blkno2blkno_buf does not take a size as input,
903 * and hence must be called on a pre-zeroed buffer of length
904 * greater than or equal to sizeof(daddr_t).
905 */
906 for (i=0; i < sizeof(daddr_t); i++) {
907 *sbuf++ = blkno & 0xff;
908 blkno >>= 8;
909 }
910 }
911
912 static void
913 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
914 size_t len, daddr_t blkno, size_t secsize, int dir)
915 {
916 char *dst = dstv;
917 char *src = srcv;
918 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher;
919 struct uio dstuio;
920 struct uio srcuio;
921 struct iovec dstiov[2];
922 struct iovec srciov[2];
923 size_t blocksize = cs->sc_cdata.cf_blocksize;
924 char sink[CGD_MAXBLOCKSIZE];
925 char zero_iv[CGD_MAXBLOCKSIZE];
926 char blkno_buf[CGD_MAXBLOCKSIZE];
927
928 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
929
930 DIAGCONDPANIC(len % blocksize != 0,
931 ("cgd_cipher: len %% blocksize != 0"));
932
933 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
934 DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
935 ("cgd_cipher: sizeof(daddr_t) > blocksize"));
936
937 memset(zero_iv, 0x0, blocksize);
938
939 dstuio.uio_iov = dstiov;
940 dstuio.uio_iovcnt = 2;
941
942 srcuio.uio_iov = srciov;
943 srcuio.uio_iovcnt = 2;
944
945 dstiov[0].iov_base = sink;
946 dstiov[0].iov_len = blocksize;
947 srciov[0].iov_base = blkno_buf;
948 srciov[0].iov_len = blocksize;
949 dstiov[1].iov_len = secsize;
950 srciov[1].iov_len = secsize;
951
952 for (; len > 0; len -= secsize) {
953 dstiov[1].iov_base = dst;
954 srciov[1].iov_base = src;
955
956 memset(blkno_buf, 0x0, blocksize);
957 blkno2blkno_buf(blkno_buf, blkno);
958 if (dir == CGD_CIPHER_DECRYPT) {
959 dstuio.uio_iovcnt = 1;
960 srcuio.uio_iovcnt = 1;
961 IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
962 blkno_buf, blocksize));
963 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
964 zero_iv, CGD_CIPHER_ENCRYPT);
965 memcpy(blkno_buf, sink, blocksize);
966 dstuio.uio_iovcnt = 2;
967 srcuio.uio_iovcnt = 2;
968 }
969
970 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
971 blkno_buf, blocksize));
972 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
973 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
974 sink, blocksize));
975
976 dst += secsize;
977 src += secsize;
978 blkno++;
979 }
980 }
981
982 #ifdef DEBUG
983 static void
984 hexprint(const char *start, void *buf, int len)
985 {
986 char *c = buf;
987
988 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
989 printf("%s: len=%06d 0x", start, len);
990 while (len--)
991 printf("%02x", (unsigned char) *c++);
992 }
993 #endif
994
995 MODULE(MODULE_CLASS_DRIVER, cgd, "dk_subr");
996
997 #ifdef _MODULE
998 CFDRIVER_DECL(cgd, DV_DISK, NULL);
999 #endif
1000
1001 static int
1002 cgd_modcmd(modcmd_t cmd, void *arg)
1003 {
1004 int error = 0;
1005
1006 #ifdef _MODULE
1007 int bmajor = -1, cmajor = -1;
1008 #endif
1009
1010 switch (cmd) {
1011 case MODULE_CMD_INIT:
1012 #ifdef _MODULE
1013 error = config_cfdriver_attach(&cgd_cd);
1014 if (error)
1015 break;
1016
1017 error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1018 if (error) {
1019 config_cfdriver_detach(&cgd_cd);
1020 aprint_error("%s: unable to register cfattach\n",
1021 cgd_cd.cd_name);
1022 break;
1023 }
1024
1025 error = devsw_attach("cgd", &cgd_bdevsw, &bmajor,
1026 &cgd_cdevsw, &cmajor);
1027 if (error) {
1028 config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1029 config_cfdriver_detach(&cgd_cd);
1030 break;
1031 }
1032 #endif
1033 break;
1034
1035 case MODULE_CMD_FINI:
1036 #ifdef _MODULE
1037 error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1038 if (error)
1039 break;
1040 config_cfdriver_detach(&cgd_cd);
1041 devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1042 #endif
1043 break;
1044
1045 case MODULE_CMD_STAT:
1046 return ENOTTY;
1047
1048 default:
1049 return ENOTTY;
1050 }
1051
1052 return error;
1053 }
1054