cgd.c revision 1.51.6.1 1 /* $NetBSD: cgd.c,v 1.51.6.1 2008/05/16 02:23:48 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.51.6.1 2008/05/16 02:23:48 yamt Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/pool.h>
44 #include <sys/ioctl.h>
45 #include <sys/device.h>
46 #include <sys/disk.h>
47 #include <sys/disklabel.h>
48 #include <sys/fcntl.h>
49 #include <sys/vnode.h>
50 #include <sys/conf.h>
51
52 #include <dev/dkvar.h>
53 #include <dev/cgdvar.h>
54
55 /* Entry Point Functions */
56
57 void cgdattach(int);
58
59 static dev_type_open(cgdopen);
60 static dev_type_close(cgdclose);
61 static dev_type_read(cgdread);
62 static dev_type_write(cgdwrite);
63 static dev_type_ioctl(cgdioctl);
64 static dev_type_strategy(cgdstrategy);
65 static dev_type_dump(cgddump);
66 static dev_type_size(cgdsize);
67
68 const struct bdevsw cgd_bdevsw = {
69 cgdopen, cgdclose, cgdstrategy, cgdioctl,
70 cgddump, cgdsize, D_DISK
71 };
72
73 const struct cdevsw cgd_cdevsw = {
74 cgdopen, cgdclose, cgdread, cgdwrite, cgdioctl,
75 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
76 };
77
78 /* Internal Functions */
79
80 static int cgdstart(struct dk_softc *, struct buf *);
81 static void cgdiodone(struct buf *);
82
83 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
84 static int cgd_ioctl_clr(struct cgd_softc *, void *, struct lwp *);
85 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
86 struct lwp *);
87 static void cgd_cipher(struct cgd_softc *, void *, void *,
88 size_t, daddr_t, size_t, int);
89
90 /* Pseudo-disk Interface */
91
92 static struct dk_intf the_dkintf = {
93 DTYPE_CGD,
94 "cgd",
95 cgdopen,
96 cgdclose,
97 cgdstrategy,
98 cgdstart,
99 };
100 static struct dk_intf *di = &the_dkintf;
101
102 static struct dkdriver cgddkdriver = {
103 .d_strategy = cgdstrategy,
104 .d_minphys = minphys,
105 };
106
107 /* DIAGNOSTIC and DEBUG definitions */
108
109 #if defined(CGDDEBUG) && !defined(DEBUG)
110 #define DEBUG
111 #endif
112
113 #ifdef DEBUG
114 int cgddebug = 0;
115
116 #define CGDB_FOLLOW 0x1
117 #define CGDB_IO 0x2
118 #define CGDB_CRYPTO 0x4
119
120 #define IFDEBUG(x,y) if (cgddebug & (x)) y
121 #define DPRINTF(x,y) IFDEBUG(x, printf y)
122 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
123
124 static void hexprint(const char *, void *, int);
125
126 #else
127 #define IFDEBUG(x,y)
128 #define DPRINTF(x,y)
129 #define DPRINTF_FOLLOW(y)
130 #endif
131
132 #ifdef DIAGNOSTIC
133 #define DIAGPANIC(x) panic x
134 #define DIAGCONDPANIC(x,y) if (x) panic y
135 #else
136 #define DIAGPANIC(x)
137 #define DIAGCONDPANIC(x,y)
138 #endif
139
140 /* Global variables */
141
142 struct cgd_softc *cgd_softc;
143 int numcgd = 0;
144
145 /* Utility Functions */
146
147 #define CGDUNIT(x) DISKUNIT(x)
148 #define GETCGD_SOFTC(_cs, x) if (!((_cs) = getcgd_softc(x))) return ENXIO
149
150 static struct cgd_softc *
151 getcgd_softc(dev_t dev)
152 {
153 int unit = CGDUNIT(dev);
154
155 DPRINTF_FOLLOW(("getcgd_softc(0x%x): unit = %d\n", dev, unit));
156 if (unit >= numcgd)
157 return NULL;
158 return &cgd_softc[unit];
159 }
160
161 /* The code */
162
163 static void
164 cgdsoftc_init(struct cgd_softc *cs, int num)
165 {
166 char sbuf[DK_XNAME_SIZE];
167
168 memset(cs, 0x0, sizeof(*cs));
169 snprintf(sbuf, DK_XNAME_SIZE, "cgd%d", num);
170 simple_lock_init(&cs->sc_slock);
171 dk_sc_init(&cs->sc_dksc, cs, sbuf);
172 disk_init(&cs->sc_dksc.sc_dkdev, cs->sc_dksc.sc_xname, &cgddkdriver);
173 }
174
175 void
176 cgdattach(int num)
177 {
178 int i;
179
180 DPRINTF_FOLLOW(("cgdattach(%d)\n", num));
181 if (num <= 0) {
182 DIAGPANIC(("cgdattach: count <= 0"));
183 return;
184 }
185
186 cgd_softc = (void *)malloc(num * sizeof(*cgd_softc), M_DEVBUF, M_NOWAIT);
187 if (!cgd_softc) {
188 printf("WARNING: unable to malloc(9) memory for crypt disks\n");
189 DIAGPANIC(("cgdattach: cannot malloc(9) enough memory"));
190 return;
191 }
192
193 numcgd = num;
194 for (i=0; i<num; i++)
195 cgdsoftc_init(&cgd_softc[i], i);
196 }
197
198 static int
199 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
200 {
201 struct cgd_softc *cs;
202
203 DPRINTF_FOLLOW(("cgdopen(%d, %d)\n", dev, flags));
204 GETCGD_SOFTC(cs, dev);
205 return dk_open(di, &cs->sc_dksc, dev, flags, fmt, l);
206 }
207
208 static int
209 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
210 {
211 struct cgd_softc *cs;
212
213 DPRINTF_FOLLOW(("cgdclose(%d, %d)\n", dev, flags));
214 GETCGD_SOFTC(cs, dev);
215 return dk_close(di, &cs->sc_dksc, dev, flags, fmt, l);
216 }
217
218 static void
219 cgdstrategy(struct buf *bp)
220 {
221 struct cgd_softc *cs = getcgd_softc(bp->b_dev);
222
223 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
224 (long)bp->b_bcount));
225 /* XXXrcd: Should we test for (cs != NULL)? */
226 dk_strategy(di, &cs->sc_dksc, bp);
227 return;
228 }
229
230 static int
231 cgdsize(dev_t dev)
232 {
233 struct cgd_softc *cs = getcgd_softc(dev);
234
235 DPRINTF_FOLLOW(("cgdsize(%d)\n", dev));
236 if (!cs)
237 return -1;
238 return dk_size(di, &cs->sc_dksc, dev);
239 }
240
241 /*
242 * cgd_{get,put}data are functions that deal with getting a buffer
243 * for the new encrypted data. We have a buffer per device so that
244 * we can ensure that we can always have a transaction in flight.
245 * We use this buffer first so that we have one less piece of
246 * malloc'ed data at any given point.
247 */
248
249 static void *
250 cgd_getdata(struct dk_softc *dksc, unsigned long size)
251 {
252 struct cgd_softc *cs =dksc->sc_osc;
253 void * data = NULL;
254
255 simple_lock(&cs->sc_slock);
256 if (cs->sc_data_used == 0) {
257 cs->sc_data_used = 1;
258 data = cs->sc_data;
259 }
260 simple_unlock(&cs->sc_slock);
261
262 if (data)
263 return data;
264
265 return malloc(size, M_DEVBUF, M_NOWAIT);
266 }
267
268 static void
269 cgd_putdata(struct dk_softc *dksc, void *data)
270 {
271 struct cgd_softc *cs =dksc->sc_osc;
272
273 if (data == cs->sc_data) {
274 simple_lock(&cs->sc_slock);
275 cs->sc_data_used = 0;
276 simple_unlock(&cs->sc_slock);
277 } else {
278 free(data, M_DEVBUF);
279 }
280 }
281
282 static int
283 cgdstart(struct dk_softc *dksc, struct buf *bp)
284 {
285 struct cgd_softc *cs = dksc->sc_osc;
286 struct buf *nbp;
287 void * addr;
288 void * newaddr;
289 daddr_t bn;
290 struct vnode *vp;
291
292 DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp));
293 disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */
294
295 bn = bp->b_rawblkno;
296
297 /*
298 * We attempt to allocate all of our resources up front, so that
299 * we can fail quickly if they are unavailable.
300 */
301
302 nbp = getiobuf(cs->sc_tvn, false);
303 if (nbp == NULL) {
304 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
305 return -1;
306 }
307
308 /*
309 * If we are writing, then we need to encrypt the outgoing
310 * block into a new block of memory. If we fail, then we
311 * return an error and let the dksubr framework deal with it.
312 */
313 newaddr = addr = bp->b_data;
314 if ((bp->b_flags & B_READ) == 0) {
315 newaddr = cgd_getdata(dksc, bp->b_bcount);
316 if (!newaddr) {
317 putiobuf(nbp);
318 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
319 return -1;
320 }
321 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
322 DEV_BSIZE, CGD_CIPHER_ENCRYPT);
323 }
324
325 nbp->b_data = newaddr;
326 nbp->b_flags = bp->b_flags;
327 nbp->b_oflags = bp->b_oflags;
328 nbp->b_cflags = bp->b_cflags;
329 nbp->b_iodone = cgdiodone;
330 nbp->b_proc = bp->b_proc;
331 nbp->b_blkno = bn;
332 nbp->b_bcount = bp->b_bcount;
333 nbp->b_private = bp;
334
335 BIO_COPYPRIO(nbp, bp);
336
337 if ((nbp->b_flags & B_READ) == 0) {
338 vp = nbp->b_vp;
339 mutex_enter(&vp->v_interlock);
340 vp->v_numoutput++;
341 mutex_exit(&vp->v_interlock);
342 }
343 VOP_STRATEGY(cs->sc_tvn, nbp);
344 return 0;
345 }
346
347 /* expected to be called at splbio() */
348 static void
349 cgdiodone(struct buf *nbp)
350 {
351 struct buf *obp = nbp->b_private;
352 struct cgd_softc *cs = getcgd_softc(obp->b_dev);
353 struct dk_softc *dksc = &cs->sc_dksc;
354
355 KDASSERT(cs);
356
357 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
358 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
359 obp, obp->b_bcount, obp->b_resid));
360 DPRINTF(CGDB_IO, (" dev 0x%x, nbp %p bn %" PRId64 " addr %p bcnt %d\n",
361 nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
362 nbp->b_bcount));
363 if (nbp->b_error != 0) {
364 obp->b_error = nbp->b_error;
365 printf("%s: error %d\n", dksc->sc_xname, obp->b_error);
366 }
367
368 /* Perform the decryption if we are reading.
369 *
370 * Note: use the blocknumber from nbp, since it is what
371 * we used to encrypt the blocks.
372 */
373
374 if (nbp->b_flags & B_READ)
375 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
376 nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT);
377
378 /* If we allocated memory, free it now... */
379 if (nbp->b_data != obp->b_data)
380 cgd_putdata(dksc, nbp->b_data);
381
382 putiobuf(nbp);
383
384 /* Request is complete for whatever reason */
385 obp->b_resid = 0;
386 if (obp->b_error != 0)
387 obp->b_resid = obp->b_bcount;
388 disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid,
389 (obp->b_flags & B_READ));
390 biodone(obp);
391 dk_iodone(di, dksc);
392 }
393
394 /* XXX: we should probably put these into dksubr.c, mostly */
395 static int
396 cgdread(dev_t dev, struct uio *uio, int flags)
397 {
398 struct cgd_softc *cs;
399 struct dk_softc *dksc;
400
401 DPRINTF_FOLLOW(("cgdread(%d, %p, %d)\n", dev, uio, flags));
402 GETCGD_SOFTC(cs, dev);
403 dksc = &cs->sc_dksc;
404 if ((dksc->sc_flags & DKF_INITED) == 0)
405 return ENXIO;
406 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
407 }
408
409 /* XXX: we should probably put these into dksubr.c, mostly */
410 static int
411 cgdwrite(dev_t dev, struct uio *uio, int flags)
412 {
413 struct cgd_softc *cs;
414 struct dk_softc *dksc;
415
416 DPRINTF_FOLLOW(("cgdwrite(%d, %p, %d)\n", dev, uio, flags));
417 GETCGD_SOFTC(cs, dev);
418 dksc = &cs->sc_dksc;
419 if ((dksc->sc_flags & DKF_INITED) == 0)
420 return ENXIO;
421 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
422 }
423
424 static int
425 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
426 {
427 struct cgd_softc *cs;
428 struct dk_softc *dksc;
429 struct disk *dk;
430 int ret;
431 int part = DISKPART(dev);
432 int pmask = 1 << part;
433
434 DPRINTF_FOLLOW(("cgdioctl(%d, %ld, %p, %d, %p)\n",
435 dev, cmd, data, flag, l));
436 GETCGD_SOFTC(cs, dev);
437 dksc = &cs->sc_dksc;
438 dk = &dksc->sc_dkdev;
439 switch (cmd) {
440 case CGDIOCSET:
441 case CGDIOCCLR:
442 if ((flag & FWRITE) == 0)
443 return EBADF;
444 }
445
446 switch (cmd) {
447 case CGDIOCSET:
448 if (dksc->sc_flags & DKF_INITED)
449 ret = EBUSY;
450 else
451 ret = cgd_ioctl_set(cs, data, l);
452 break;
453 case CGDIOCCLR:
454 if (!(dksc->sc_flags & DKF_INITED)) {
455 ret = ENXIO;
456 break;
457 }
458 if (DK_BUSY(&cs->sc_dksc, pmask)) {
459 ret = EBUSY;
460 break;
461 }
462 ret = cgd_ioctl_clr(cs, data, l);
463 break;
464 default:
465 ret = dk_ioctl(di, dksc, dev, cmd, data, flag, l);
466 break;
467 }
468
469 return ret;
470 }
471
472 static int
473 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
474 {
475 struct cgd_softc *cs;
476
477 DPRINTF_FOLLOW(("cgddump(%d, %" PRId64 ", %p, %lu)\n", dev, blkno, va,
478 (unsigned long)size));
479 GETCGD_SOFTC(cs, dev);
480 return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size);
481 }
482
483 /*
484 * XXXrcd:
485 * for now we hardcode the maximum key length.
486 */
487 #define MAX_KEYSIZE 1024
488
489 /* ARGSUSED */
490 static int
491 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
492 {
493 struct cgd_ioctl *ci = data;
494 struct vnode *vp;
495 int ret;
496 size_t keybytes; /* key length in bytes */
497 const char *cp;
498 char *inbuf;
499
500 cp = ci->ci_disk;
501 if ((ret = dk_lookup(cp, l, &vp, UIO_USERSPACE)) != 0)
502 return ret;
503
504 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
505
506 if ((ret = cgdinit(cs, cp, vp, l)) != 0)
507 goto bail;
508
509 (void)memset(inbuf, 0, MAX_KEYSIZE);
510 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
511 if (ret)
512 goto bail;
513 cs->sc_cfuncs = cryptfuncs_find(inbuf);
514 if (!cs->sc_cfuncs) {
515 ret = EINVAL;
516 goto bail;
517 }
518
519 /* right now we only support encblkno, so hard-code it */
520 (void)memset(inbuf, 0, MAX_KEYSIZE);
521 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
522 if (ret)
523 goto bail;
524 if (strcmp("encblkno", inbuf)) {
525 ret = EINVAL;
526 goto bail;
527 }
528
529 keybytes = ci->ci_keylen / 8 + 1;
530 if (keybytes > MAX_KEYSIZE) {
531 ret = EINVAL;
532 goto bail;
533 }
534 (void)memset(inbuf, 0, MAX_KEYSIZE);
535 ret = copyin(ci->ci_key, inbuf, keybytes);
536 if (ret)
537 goto bail;
538
539 cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
540 cs->sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO;
541 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
542 &cs->sc_cdata.cf_blocksize);
543 (void)memset(inbuf, 0, MAX_KEYSIZE);
544 if (!cs->sc_cdata.cf_priv) {
545 printf("cgd: unable to initialize cipher\n");
546 ret = EINVAL; /* XXX is this the right error? */
547 goto bail;
548 }
549 free(inbuf, M_TEMP);
550
551 bufq_alloc(&cs->sc_dksc.sc_bufq, "fcfs", 0);
552
553 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
554 cs->sc_data_used = 0;
555
556 cs->sc_dksc.sc_flags |= DKF_INITED;
557
558 /* Attach the disk. */
559 disk_attach(&cs->sc_dksc.sc_dkdev);
560
561 /* Try and read the disklabel. */
562 dk_getdisklabel(di, &cs->sc_dksc, 0 /* XXX ? */);
563
564 /* Discover wedges on this disk. */
565 dkwedge_discover(&cs->sc_dksc.sc_dkdev);
566
567 return 0;
568
569 bail:
570 free(inbuf, M_TEMP);
571 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
572 return ret;
573 }
574
575 /* ARGSUSED */
576 static int
577 cgd_ioctl_clr(struct cgd_softc *cs, void *data, struct lwp *l)
578 {
579 int s;
580
581 /* Delete all of our wedges. */
582 dkwedge_delall(&cs->sc_dksc.sc_dkdev);
583
584 /* Kill off any queued buffers. */
585 s = splbio();
586 bufq_drain(cs->sc_dksc.sc_bufq);
587 splx(s);
588 bufq_free(cs->sc_dksc.sc_bufq);
589
590 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
591 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
592 free(cs->sc_tpath, M_DEVBUF);
593 free(cs->sc_data, M_DEVBUF);
594 cs->sc_data_used = 0;
595 cs->sc_dksc.sc_flags &= ~DKF_INITED;
596 disk_detach(&cs->sc_dksc.sc_dkdev);
597
598 return 0;
599 }
600
601 static int
602 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
603 struct lwp *l)
604 {
605 struct dk_geom *pdg;
606 struct partinfo dpart;
607 struct vattr va;
608 size_t size;
609 int maxsecsize = 0;
610 int ret;
611 char *tmppath;
612
613 cs->sc_dksc.sc_size = 0;
614 cs->sc_tvn = vp;
615 cs->sc_tpath = NULL;
616
617 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
618 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
619 if (ret)
620 goto bail;
621 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
622 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
623
624 if ((ret = VOP_GETATTR(vp, &va, l->l_cred)) != 0)
625 goto bail;
626
627 cs->sc_tdev = va.va_rdev;
628
629 ret = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred);
630 if (ret)
631 goto bail;
632
633 maxsecsize =
634 ((dpart.disklab->d_secsize > maxsecsize) ?
635 dpart.disklab->d_secsize : maxsecsize);
636 size = dpart.part->p_size;
637
638 if (!size) {
639 ret = ENODEV;
640 goto bail;
641 }
642
643 cs->sc_dksc.sc_size = size;
644
645 /*
646 * XXX here we should probe the underlying device. If we
647 * are accessing a partition of type RAW_PART, then
648 * we should populate our initial geometry with the
649 * geometry that we discover from the device.
650 */
651 pdg = &cs->sc_dksc.sc_geom;
652 pdg->pdg_secsize = DEV_BSIZE;
653 pdg->pdg_ntracks = 1;
654 pdg->pdg_nsectors = 1024 * (1024 / pdg->pdg_secsize);
655 pdg->pdg_ncylinders = cs->sc_dksc.sc_size / pdg->pdg_nsectors;
656
657 bail:
658 free(tmppath, M_TEMP);
659 if (ret && cs->sc_tpath)
660 free(cs->sc_tpath, M_DEVBUF);
661 return ret;
662 }
663
664 /*
665 * Our generic cipher entry point. This takes care of the
666 * IV mode and passes off the work to the specific cipher.
667 * We implement here the IV method ``encrypted block
668 * number''.
669 *
670 * For the encryption case, we accomplish this by setting
671 * up a struct uio where the first iovec of the source is
672 * the blocknumber and the first iovec of the dest is a
673 * sink. We then call the cipher with an IV of zero, and
674 * the right thing happens.
675 *
676 * For the decryption case, we use the same basic mechanism
677 * for symmetry, but we encrypt the block number in the
678 * first iovec.
679 *
680 * We mainly do this to avoid requiring the definition of
681 * an ECB mode.
682 *
683 * XXXrcd: for now we rely on our own crypto framework defined
684 * in dev/cgd_crypto.c. This will change when we
685 * get a generic kernel crypto framework.
686 */
687
688 static void
689 blkno2blkno_buf(char *sbuf, daddr_t blkno)
690 {
691 int i;
692
693 /* Set up the blkno in blkno_buf, here we do not care much
694 * about the final layout of the information as long as we
695 * can guarantee that each sector will have a different IV
696 * and that the endianness of the machine will not affect
697 * the representation that we have chosen.
698 *
699 * We choose this representation, because it does not rely
700 * on the size of buf (which is the blocksize of the cipher),
701 * but allows daddr_t to grow without breaking existing
702 * disks.
703 *
704 * Note that blkno2blkno_buf does not take a size as input,
705 * and hence must be called on a pre-zeroed buffer of length
706 * greater than or equal to sizeof(daddr_t).
707 */
708 for (i=0; i < sizeof(daddr_t); i++) {
709 *sbuf++ = blkno & 0xff;
710 blkno >>= 8;
711 }
712 }
713
714 static void
715 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
716 size_t len, daddr_t blkno, size_t secsize, int dir)
717 {
718 char *dst = dstv;
719 char *src = srcv;
720 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher;
721 struct uio dstuio;
722 struct uio srcuio;
723 struct iovec dstiov[2];
724 struct iovec srciov[2];
725 size_t blocksize = cs->sc_cdata.cf_blocksize;
726 char sink[blocksize];
727 char zero_iv[blocksize];
728 char blkno_buf[blocksize];
729
730 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
731
732 DIAGCONDPANIC(len % blocksize != 0,
733 ("cgd_cipher: len %% blocksize != 0"));
734
735 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
736 DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
737 ("cgd_cipher: sizeof(daddr_t) > blocksize"));
738
739 memset(zero_iv, 0x0, sizeof(zero_iv));
740
741 dstuio.uio_iov = dstiov;
742 dstuio.uio_iovcnt = 2;
743
744 srcuio.uio_iov = srciov;
745 srcuio.uio_iovcnt = 2;
746
747 dstiov[0].iov_base = sink;
748 dstiov[0].iov_len = blocksize;
749 srciov[0].iov_base = blkno_buf;
750 srciov[0].iov_len = blocksize;
751 dstiov[1].iov_len = secsize;
752 srciov[1].iov_len = secsize;
753
754 for (; len > 0; len -= secsize) {
755 dstiov[1].iov_base = dst;
756 srciov[1].iov_base = src;
757
758 memset(blkno_buf, 0x0, sizeof(blkno_buf));
759 blkno2blkno_buf(blkno_buf, blkno);
760 if (dir == CGD_CIPHER_DECRYPT) {
761 dstuio.uio_iovcnt = 1;
762 srcuio.uio_iovcnt = 1;
763 IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
764 blkno_buf, sizeof(blkno_buf)));
765 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
766 zero_iv, CGD_CIPHER_ENCRYPT);
767 memcpy(blkno_buf, sink, blocksize);
768 dstuio.uio_iovcnt = 2;
769 srcuio.uio_iovcnt = 2;
770 }
771
772 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
773 blkno_buf, sizeof(blkno_buf)));
774 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
775 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
776 sink, sizeof(sink)));
777
778 dst += secsize;
779 src += secsize;
780 blkno++;
781 }
782 }
783
784 #ifdef DEBUG
785 static void
786 hexprint(const char *start, void *buf, int len)
787 {
788 char *c = buf;
789
790 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
791 printf("%s: len=%06d 0x", start, len);
792 while (len--)
793 printf("%02x", (unsigned char) *c++);
794 }
795 #endif
796