cgd.c revision 1.41 1 /* $NetBSD: cgd.c,v 1.41 2006/11/25 21:13:23 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.41 2006/11/25 21:13:23 christos Exp $");
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/errno.h>
47 #include <sys/buf.h>
48 #include <sys/bufq.h>
49 #include <sys/malloc.h>
50 #include <sys/pool.h>
51 #include <sys/ioctl.h>
52 #include <sys/device.h>
53 #include <sys/disk.h>
54 #include <sys/disklabel.h>
55 #include <sys/fcntl.h>
56 #include <sys/vnode.h>
57 #include <sys/lock.h>
58 #include <sys/conf.h>
59
60 #include <dev/dkvar.h>
61 #include <dev/cgdvar.h>
62
63 /* Entry Point Functions */
64
65 void cgdattach(int);
66
67 static dev_type_open(cgdopen);
68 static dev_type_close(cgdclose);
69 static dev_type_read(cgdread);
70 static dev_type_write(cgdwrite);
71 static dev_type_ioctl(cgdioctl);
72 static dev_type_strategy(cgdstrategy);
73 static dev_type_dump(cgddump);
74 static dev_type_size(cgdsize);
75
76 const struct bdevsw cgd_bdevsw = {
77 cgdopen, cgdclose, cgdstrategy, cgdioctl,
78 cgddump, cgdsize, D_DISK
79 };
80
81 const struct cdevsw cgd_cdevsw = {
82 cgdopen, cgdclose, cgdread, cgdwrite, cgdioctl,
83 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
84 };
85
86 /* Internal Functions */
87
88 static int cgdstart(struct dk_softc *, struct buf *);
89 static void cgdiodone(struct buf *);
90
91 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
92 static int cgd_ioctl_clr(struct cgd_softc *, void *, struct lwp *);
93 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
94 struct lwp *);
95 static void cgd_cipher(struct cgd_softc *, caddr_t, caddr_t,
96 size_t, daddr_t, size_t, int);
97
98 /* Pseudo-disk Interface */
99
100 static struct dk_intf the_dkintf = {
101 DTYPE_CGD,
102 "cgd",
103 cgdopen,
104 cgdclose,
105 cgdstrategy,
106 cgdstart,
107 };
108 static struct dk_intf *di = &the_dkintf;
109
110 static struct dkdriver cgddkdriver = {
111 .d_strategy = cgdstrategy,
112 .d_minphys = minphys,
113 };
114
115 /* DIAGNOSTIC and DEBUG definitions */
116
117 #if defined(CGDDEBUG) && !defined(DEBUG)
118 #define DEBUG
119 #endif
120
121 #ifdef DEBUG
122 int cgddebug = 0;
123
124 #define CGDB_FOLLOW 0x1
125 #define CGDB_IO 0x2
126 #define CGDB_CRYPTO 0x4
127
128 #define IFDEBUG(x,y) if (cgddebug & (x)) y
129 #define DPRINTF(x,y) IFDEBUG(x, printf y)
130 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
131
132 static void hexprint(const char *, void *, int);
133
134 #else
135 #define IFDEBUG(x,y)
136 #define DPRINTF(x,y)
137 #define DPRINTF_FOLLOW(y)
138 #endif
139
140 #ifdef DIAGNOSTIC
141 #define DIAGPANIC(x) panic x
142 #define DIAGCONDPANIC(x,y) if (x) panic y
143 #else
144 #define DIAGPANIC(x)
145 #define DIAGCONDPANIC(x,y)
146 #endif
147
148 /* Global variables */
149
150 struct cgd_softc *cgd_softc;
151 int numcgd = 0;
152
153 /* Utility Functions */
154
155 #define CGDUNIT(x) DISKUNIT(x)
156 #define GETCGD_SOFTC(_cs, x) if (!((_cs) = getcgd_softc(x))) return ENXIO
157
158 static struct cgd_softc *
159 getcgd_softc(dev_t dev)
160 {
161 int unit = CGDUNIT(dev);
162
163 DPRINTF_FOLLOW(("getcgd_softc(0x%x): unit = %d\n", dev, unit));
164 if (unit >= numcgd)
165 return NULL;
166 return &cgd_softc[unit];
167 }
168
169 /* The code */
170
171 static void
172 cgdsoftc_init(struct cgd_softc *cs, int num)
173 {
174 char sbuf[DK_XNAME_SIZE];
175
176 memset(cs, 0x0, sizeof(*cs));
177 snprintf(sbuf, DK_XNAME_SIZE, "cgd%d", num);
178 simple_lock_init(&cs->sc_slock);
179 dk_sc_init(&cs->sc_dksc, cs, sbuf);
180 cs->sc_dksc.sc_dkdev.dk_driver = &cgddkdriver;
181 pseudo_disk_init(&cs->sc_dksc.sc_dkdev);
182 }
183
184 void
185 cgdattach(int num)
186 {
187 int i;
188
189 DPRINTF_FOLLOW(("cgdattach(%d)\n", num));
190 if (num <= 0) {
191 DIAGPANIC(("cgdattach: count <= 0"));
192 return;
193 }
194
195 cgd_softc = (void *)malloc(num * sizeof(*cgd_softc), M_DEVBUF, M_NOWAIT);
196 if (!cgd_softc) {
197 printf("WARNING: unable to malloc(9) memory for crypt disks\n");
198 DIAGPANIC(("cgdattach: cannot malloc(9) enough memory"));
199 return;
200 }
201
202 numcgd = num;
203 for (i=0; i<num; i++)
204 cgdsoftc_init(&cgd_softc[i], i);
205 }
206
207 static int
208 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
209 {
210 struct cgd_softc *cs;
211
212 DPRINTF_FOLLOW(("cgdopen(%d, %d)\n", dev, flags));
213 GETCGD_SOFTC(cs, dev);
214 return dk_open(di, &cs->sc_dksc, dev, flags, fmt, l);
215 }
216
217 static int
218 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
219 {
220 struct cgd_softc *cs;
221
222 DPRINTF_FOLLOW(("cgdclose(%d, %d)\n", dev, flags));
223 GETCGD_SOFTC(cs, dev);
224 return dk_close(di, &cs->sc_dksc, dev, flags, fmt, l);
225 }
226
227 static void
228 cgdstrategy(struct buf *bp)
229 {
230 struct cgd_softc *cs = getcgd_softc(bp->b_dev);
231
232 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
233 (long)bp->b_bcount));
234 /* XXXrcd: Should we test for (cs != NULL)? */
235 dk_strategy(di, &cs->sc_dksc, bp);
236 return;
237 }
238
239 static int
240 cgdsize(dev_t dev)
241 {
242 struct cgd_softc *cs = getcgd_softc(dev);
243
244 DPRINTF_FOLLOW(("cgdsize(%d)\n", dev));
245 if (!cs)
246 return -1;
247 return dk_size(di, &cs->sc_dksc, dev);
248 }
249
250 /*
251 * cgd_{get,put}data are functions that deal with getting a buffer
252 * for the new encrypted data. We have a buffer per device so that
253 * we can ensure that we can always have a transaction in flight.
254 * We use this buffer first so that we have one less piece of
255 * malloc'ed data at any given point.
256 */
257
258 static void *
259 cgd_getdata(struct dk_softc *dksc, unsigned long size)
260 {
261 struct cgd_softc *cs =dksc->sc_osc;
262 caddr_t data = NULL;
263
264 simple_lock(&cs->sc_slock);
265 if (cs->sc_data_used == 0) {
266 cs->sc_data_used = 1;
267 data = cs->sc_data;
268 }
269 simple_unlock(&cs->sc_slock);
270
271 if (data)
272 return data;
273
274 return malloc(size, M_DEVBUF, M_NOWAIT);
275 }
276
277 static void
278 cgd_putdata(struct dk_softc *dksc, caddr_t data)
279 {
280 struct cgd_softc *cs =dksc->sc_osc;
281
282 if (data == cs->sc_data) {
283 simple_lock(&cs->sc_slock);
284 cs->sc_data_used = 0;
285 simple_unlock(&cs->sc_slock);
286 } else {
287 free(data, M_DEVBUF);
288 }
289 }
290
291 static int
292 cgdstart(struct dk_softc *dksc, struct buf *bp)
293 {
294 struct cgd_softc *cs = dksc->sc_osc;
295 struct buf *nbp;
296 caddr_t addr;
297 caddr_t newaddr;
298 daddr_t bn;
299
300 DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp));
301 disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */
302
303 bn = bp->b_rawblkno;
304
305 /*
306 * We attempt to allocate all of our resources up front, so that
307 * we can fail quickly if they are unavailable.
308 */
309
310 nbp = getiobuf_nowait();
311 if (nbp == NULL) {
312 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
313 return -1;
314 }
315
316 /*
317 * If we are writing, then we need to encrypt the outgoing
318 * block into a new block of memory. If we fail, then we
319 * return an error and let the dksubr framework deal with it.
320 */
321 newaddr = addr = bp->b_data;
322 if ((bp->b_flags & B_READ) == 0) {
323 newaddr = cgd_getdata(dksc, bp->b_bcount);
324 if (!newaddr) {
325 putiobuf(nbp);
326 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
327 return -1;
328 }
329 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
330 DEV_BSIZE, CGD_CIPHER_ENCRYPT);
331 }
332
333 nbp->b_data = newaddr;
334 nbp->b_flags = bp->b_flags | B_CALL;
335 nbp->b_iodone = cgdiodone;
336 nbp->b_proc = bp->b_proc;
337 nbp->b_blkno = bn;
338 nbp->b_vp = cs->sc_tvn;
339 nbp->b_bcount = bp->b_bcount;
340 nbp->b_private = bp;
341
342 BIO_COPYPRIO(nbp, bp);
343
344 if ((nbp->b_flags & B_READ) == 0) {
345 V_INCR_NUMOUTPUT(nbp->b_vp);
346 }
347 VOP_STRATEGY(cs->sc_tvn, nbp);
348 return 0;
349 }
350
351 /* expected to be called at splbio() */
352 static void
353 cgdiodone(struct buf *nbp)
354 {
355 struct buf *obp = nbp->b_private;
356 struct cgd_softc *cs = getcgd_softc(obp->b_dev);
357 struct dk_softc *dksc = &cs->sc_dksc;
358
359 KDASSERT(cs);
360
361 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
362 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
363 obp, obp->b_bcount, obp->b_resid));
364 DPRINTF(CGDB_IO, (" dev 0x%x, nbp %p bn %" PRId64 " addr %p bcnt %d\n",
365 nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
366 nbp->b_bcount));
367 if (nbp->b_flags & B_ERROR) {
368 obp->b_flags |= B_ERROR;
369 obp->b_error = nbp->b_error ? nbp->b_error : EIO;
370
371 printf("%s: error %d\n", dksc->sc_xname, obp->b_error);
372 }
373
374 /* Perform the decryption if we are reading.
375 *
376 * Note: use the blocknumber from nbp, since it is what
377 * we used to encrypt the blocks.
378 */
379
380 if (nbp->b_flags & B_READ)
381 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
382 nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT);
383
384 /* If we allocated memory, free it now... */
385 if (nbp->b_data != obp->b_data)
386 cgd_putdata(dksc, nbp->b_data);
387
388 putiobuf(nbp);
389
390 /* Request is complete for whatever reason */
391 obp->b_resid = 0;
392 if (obp->b_flags & B_ERROR)
393 obp->b_resid = obp->b_bcount;
394 disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid,
395 (obp->b_flags & B_READ));
396 biodone(obp);
397 dk_iodone(di, dksc);
398 }
399
400 /* XXX: we should probably put these into dksubr.c, mostly */
401 static int
402 cgdread(dev_t dev, struct uio *uio, int flags)
403 {
404 struct cgd_softc *cs;
405 struct dk_softc *dksc;
406
407 DPRINTF_FOLLOW(("cgdread(%d, %p, %d)\n", dev, uio, flags));
408 GETCGD_SOFTC(cs, dev);
409 dksc = &cs->sc_dksc;
410 if ((dksc->sc_flags & DKF_INITED) == 0)
411 return ENXIO;
412 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
413 }
414
415 /* XXX: we should probably put these into dksubr.c, mostly */
416 static int
417 cgdwrite(dev_t dev, struct uio *uio, int flags)
418 {
419 struct cgd_softc *cs;
420 struct dk_softc *dksc;
421
422 DPRINTF_FOLLOW(("cgdwrite(%d, %p, %d)\n", dev, uio, flags));
423 GETCGD_SOFTC(cs, dev);
424 dksc = &cs->sc_dksc;
425 if ((dksc->sc_flags & DKF_INITED) == 0)
426 return ENXIO;
427 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
428 }
429
430 static int
431 cgdioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
432 {
433 struct cgd_softc *cs;
434 struct dk_softc *dksc;
435 struct disk *dk;
436 int ret;
437 int part = DISKPART(dev);
438 int pmask = 1 << part;
439
440 DPRINTF_FOLLOW(("cgdioctl(%d, %ld, %p, %d, %p)\n",
441 dev, cmd, data, flag, l));
442 GETCGD_SOFTC(cs, dev);
443 dksc = &cs->sc_dksc;
444 dk = &dksc->sc_dkdev;
445 switch (cmd) {
446 case CGDIOCSET:
447 case CGDIOCCLR:
448 if ((flag & FWRITE) == 0)
449 return EBADF;
450 }
451
452 switch (cmd) {
453 case CGDIOCSET:
454 if (dksc->sc_flags & DKF_INITED)
455 ret = EBUSY;
456 else
457 ret = cgd_ioctl_set(cs, data, l);
458 break;
459 case CGDIOCCLR:
460 if (!(dksc->sc_flags & DKF_INITED)) {
461 ret = ENXIO;
462 break;
463 }
464 if (DK_BUSY(&cs->sc_dksc, pmask)) {
465 ret = EBUSY;
466 break;
467 }
468 ret = cgd_ioctl_clr(cs, data, l);
469 break;
470 default:
471 ret = dk_ioctl(di, dksc, dev, cmd, data, flag, l);
472 break;
473 }
474
475 return ret;
476 }
477
478 static int
479 cgddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
480 {
481 struct cgd_softc *cs;
482
483 DPRINTF_FOLLOW(("cgddump(%d, %" PRId64 ", %p, %lu)\n", dev, blkno, va,
484 (unsigned long)size));
485 GETCGD_SOFTC(cs, dev);
486 return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size);
487 }
488
489 /*
490 * XXXrcd:
491 * for now we hardcode the maximum key length.
492 */
493 #define MAX_KEYSIZE 1024
494
495 /* ARGSUSED */
496 static int
497 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
498 {
499 struct cgd_ioctl *ci = data;
500 struct vnode *vp;
501 int ret;
502 int keybytes; /* key length in bytes */
503 const char *cp;
504 char *inbuf;
505
506 cp = ci->ci_disk;
507 if ((ret = dk_lookup(cp, l, &vp)) != 0)
508 return ret;
509
510 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
511
512 if ((ret = cgdinit(cs, cp, vp, l)) != 0)
513 goto bail;
514
515 (void)memset(inbuf, 0, MAX_KEYSIZE);
516 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
517 if (ret)
518 goto bail;
519 cs->sc_cfuncs = cryptfuncs_find(inbuf);
520 if (!cs->sc_cfuncs) {
521 ret = EINVAL;
522 goto bail;
523 }
524
525 /* right now we only support encblkno, so hard-code it */
526 (void)memset(inbuf, 0, sizeof(inbuf));
527 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
528 if (ret)
529 goto bail;
530 if (strcmp("encblkno", inbuf)) {
531 ret = EINVAL;
532 goto bail;
533 }
534
535 keybytes = ci->ci_keylen / 8 + 1;
536 if (keybytes > MAX_KEYSIZE) {
537 ret = EINVAL;
538 goto bail;
539 }
540 (void)memset(inbuf, 0, MAX_KEYSIZE);
541 ret = copyin(ci->ci_key, inbuf, keybytes);
542 if (ret)
543 goto bail;
544
545 if (ci->ci_blocksize > 4096) {
546 printf("cgd: large blocksize %zu\n", ci->ci_blocksize);
547 ret = ENOMEM;
548 goto bail;
549 }
550
551 cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
552 cs->sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO;
553 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
554 &cs->sc_cdata.cf_blocksize);
555 (void)memset(inbuf, 0, MAX_KEYSIZE);
556 if (!cs->sc_cdata.cf_priv) {
557 printf("cgd: unable to initialize cipher\n");
558 ret = EINVAL; /* XXX is this the right error? */
559 goto bail;
560 }
561 free(inbuf, M_TEMP);
562
563 bufq_alloc(&cs->sc_dksc.sc_bufq, "fcfs", 0);
564
565 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
566 cs->sc_data_used = 0;
567
568 cs->sc_dksc.sc_flags |= DKF_INITED;
569
570 /* Attach the disk. */
571 pseudo_disk_attach(&cs->sc_dksc.sc_dkdev);
572
573 /* Try and read the disklabel. */
574 dk_getdisklabel(di, &cs->sc_dksc, 0 /* XXX ? */);
575
576 /* Discover wedges on this disk. */
577 dkwedge_discover(&cs->sc_dksc.sc_dkdev);
578
579 return 0;
580
581 bail:
582 free(inbuf, M_TEMP);
583 (void)vn_close(vp, FREAD|FWRITE, l->l_cred, l);
584 return ret;
585 }
586
587 /* ARGSUSED */
588 static int
589 cgd_ioctl_clr(struct cgd_softc *cs, void *data, struct lwp *l)
590 {
591 int s;
592
593 /* Delete all of our wedges. */
594 dkwedge_delall(&cs->sc_dksc.sc_dkdev);
595
596 /* Kill off any queued buffers. */
597 s = splbio();
598 bufq_drain(cs->sc_dksc.sc_bufq);
599 splx(s);
600 bufq_free(cs->sc_dksc.sc_bufq);
601
602 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred, l);
603 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
604 free(cs->sc_tpath, M_DEVBUF);
605 free(cs->sc_data, M_DEVBUF);
606 cs->sc_data_used = 0;
607 cs->sc_dksc.sc_flags &= ~DKF_INITED;
608 pseudo_disk_detach(&cs->sc_dksc.sc_dkdev);
609
610 return 0;
611 }
612
613 static int
614 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
615 struct lwp *l)
616 {
617 struct dk_geom *pdg;
618 struct partinfo dpart;
619 struct vattr va;
620 size_t size;
621 int maxsecsize = 0;
622 int ret;
623 char *tmppath;
624
625 cs->sc_dksc.sc_size = 0;
626 cs->sc_tvn = vp;
627 cs->sc_tpath = NULL;
628
629 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
630 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
631 if (ret)
632 goto bail;
633 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
634 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
635
636 if ((ret = VOP_GETATTR(vp, &va, l->l_cred, l)) != 0)
637 goto bail;
638
639 cs->sc_tdev = va.va_rdev;
640
641 ret = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred, l);
642 if (ret)
643 goto bail;
644
645 maxsecsize =
646 ((dpart.disklab->d_secsize > maxsecsize) ?
647 dpart.disklab->d_secsize : maxsecsize);
648 size = dpart.part->p_size;
649
650 if (!size) {
651 ret = ENODEV;
652 goto bail;
653 }
654
655 cs->sc_dksc.sc_size = size;
656
657 /*
658 * XXX here we should probe the underlying device. If we
659 * are accessing a partition of type RAW_PART, then
660 * we should populate our initial geometry with the
661 * geometry that we discover from the device.
662 */
663 pdg = &cs->sc_dksc.sc_geom;
664 pdg->pdg_secsize = DEV_BSIZE;
665 pdg->pdg_ntracks = 1;
666 pdg->pdg_nsectors = 1024 * (1024 / pdg->pdg_secsize);
667 pdg->pdg_ncylinders = cs->sc_dksc.sc_size / pdg->pdg_nsectors;
668
669 bail:
670 free(tmppath, M_TEMP);
671 if (ret && cs->sc_tpath)
672 free(cs->sc_tpath, M_DEVBUF);
673 return ret;
674 }
675
676 /*
677 * Our generic cipher entry point. This takes care of the
678 * IV mode and passes off the work to the specific cipher.
679 * We implement here the IV method ``encrypted block
680 * number''.
681 *
682 * For the encryption case, we accomplish this by setting
683 * up a struct uio where the first iovec of the source is
684 * the blocknumber and the first iovec of the dest is a
685 * sink. We then call the cipher with an IV of zero, and
686 * the right thing happens.
687 *
688 * For the decryption case, we use the same basic mechanism
689 * for symmetry, but we encrypt the block number in the
690 * first iovec.
691 *
692 * We mainly do this to avoid requiring the definition of
693 * an ECB mode.
694 *
695 * XXXrcd: for now we rely on our own crypto framework defined
696 * in dev/cgd_crypto.c. This will change when we
697 * get a generic kernel crypto framework.
698 */
699
700 static void
701 blkno2blkno_buf(char *sbuf, daddr_t blkno)
702 {
703 int i;
704
705 /* Set up the blkno in blkno_buf, here we do not care much
706 * about the final layout of the information as long as we
707 * can guarantee that each sector will have a different IV
708 * and that the endianness of the machine will not affect
709 * the representation that we have chosen.
710 *
711 * We choose this representation, because it does not rely
712 * on the size of buf (which is the blocksize of the cipher),
713 * but allows daddr_t to grow without breaking existing
714 * disks.
715 *
716 * Note that blkno2blkno_buf does not take a size as input,
717 * and hence must be called on a pre-zeroed buffer of length
718 * greater than or equal to sizeof(daddr_t).
719 */
720 for (i=0; i < sizeof(daddr_t); i++) {
721 *sbuf++ = blkno & 0xff;
722 blkno >>= 8;
723 }
724 }
725
726 static void
727 cgd_cipher(struct cgd_softc *cs, caddr_t dst, caddr_t src,
728 size_t len, daddr_t blkno, size_t secsize, int dir)
729 {
730 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher;
731 struct uio dstuio;
732 struct uio srcuio;
733 struct iovec dstiov[2];
734 struct iovec srciov[2];
735 int blocksize = cs->sc_cdata.cf_blocksize;
736 char sink[blocksize];
737 char zero_iv[blocksize];
738 char blkno_buf[blocksize];
739
740 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
741
742 DIAGCONDPANIC(len % blocksize != 0,
743 ("cgd_cipher: len %% blocksize != 0"));
744
745 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
746 DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
747 ("cgd_cipher: sizeof(daddr_t) > blocksize"));
748
749 memset(zero_iv, 0x0, sizeof(zero_iv));
750
751 dstuio.uio_iov = dstiov;
752 dstuio.uio_iovcnt = 2;
753
754 srcuio.uio_iov = srciov;
755 srcuio.uio_iovcnt = 2;
756
757 dstiov[0].iov_base = sink;
758 dstiov[0].iov_len = blocksize;
759 srciov[0].iov_base = blkno_buf;
760 srciov[0].iov_len = blocksize;
761 dstiov[1].iov_len = secsize;
762 srciov[1].iov_len = secsize;
763
764 for (; len > 0; len -= secsize) {
765 dstiov[1].iov_base = dst;
766 srciov[1].iov_base = src;
767
768 memset(blkno_buf, 0x0, sizeof(blkno_buf));
769 blkno2blkno_buf(blkno_buf, blkno);
770 if (dir == CGD_CIPHER_DECRYPT) {
771 dstuio.uio_iovcnt = 1;
772 srcuio.uio_iovcnt = 1;
773 IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
774 blkno_buf, sizeof(blkno_buf)));
775 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
776 zero_iv, CGD_CIPHER_ENCRYPT);
777 memcpy(blkno_buf, sink, blocksize);
778 dstuio.uio_iovcnt = 2;
779 srcuio.uio_iovcnt = 2;
780 }
781
782 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
783 blkno_buf, sizeof(blkno_buf)));
784 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
785 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
786 sink, sizeof(sink)));
787
788 dst += secsize;
789 src += secsize;
790 blkno++;
791 }
792 }
793
794 #ifdef DEBUG
795 static void
796 hexprint(const char *start, void *buf, int len)
797 {
798 char *c = buf;
799
800 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
801 printf("%s: len=%06d 0x", start, len);
802 while (len--)
803 printf("%02x", (unsigned) *c++);
804 }
805 #endif
806