cgd.c revision 1.20 1 /* $NetBSD: cgd.c,v 1.20 2004/10/04 11:12:09 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.20 2004/10/04 11:12:09 yamt Exp $");
41
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/errno.h>
47 #include <sys/buf.h>
48 #include <sys/malloc.h>
49 #include <sys/pool.h>
50 #include <sys/ioctl.h>
51 #include <sys/device.h>
52 #include <sys/disk.h>
53 #include <sys/disklabel.h>
54 #include <sys/fcntl.h>
55 #include <sys/vnode.h>
56 #include <sys/lock.h>
57 #include <sys/conf.h>
58
59 #include <dev/dkvar.h>
60 #include <dev/cgdvar.h>
61
62 /* Entry Point Functions */
63
64 void cgdattach(int);
65
66 static dev_type_open(cgdopen);
67 static dev_type_close(cgdclose);
68 static dev_type_read(cgdread);
69 static dev_type_write(cgdwrite);
70 static dev_type_ioctl(cgdioctl);
71 static dev_type_strategy(cgdstrategy);
72 static dev_type_dump(cgddump);
73 static dev_type_size(cgdsize);
74
75 const struct bdevsw cgd_bdevsw = {
76 cgdopen, cgdclose, cgdstrategy, cgdioctl,
77 cgddump, cgdsize, D_DISK
78 };
79
80 const struct cdevsw cgd_cdevsw = {
81 cgdopen, cgdclose, cgdread, cgdwrite, cgdioctl,
82 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
83 };
84
85 /* Internal Functions */
86
87 static int cgdstart(struct dk_softc *, struct buf *);
88 static void cgdiodone(struct buf *);
89
90 static int cgd_ioctl_set(struct cgd_softc *, void *, struct proc *);
91 static int cgd_ioctl_clr(struct cgd_softc *, void *, struct proc *);
92 static int cgdinit(struct cgd_softc *, char *, struct vnode *,
93 struct proc *);
94 static void cgd_cipher(struct cgd_softc *, caddr_t, caddr_t,
95 size_t, daddr_t, size_t, int);
96
97 /* Pseudo-disk Interface */
98
99 static struct dk_intf the_dkintf = {
100 DTYPE_CGD,
101 "cgd",
102 cgdopen,
103 cgdclose,
104 cgdstrategy,
105 cgdstart,
106 };
107 static struct dk_intf *di = &the_dkintf;
108
109 /* DIAGNOSTIC and DEBUG definitions */
110
111 #if defined(CGDDEBUG) && !defined(DEBUG)
112 #define DEBUG
113 #endif
114
115 #ifdef DEBUG
116 int cgddebug = 0;
117
118 #define CGDB_FOLLOW 0x1
119 #define CGDB_IO 0x2
120 #define CGDB_CRYPTO 0x4
121
122 #define IFDEBUG(x,y) if (cgddebug & (x)) y
123 #define DPRINTF(x,y) IFDEBUG(x, printf y)
124 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
125
126 static void hexprint(char *, void *, int);
127
128 #else
129 #define IFDEBUG(x,y)
130 #define DPRINTF(x,y)
131 #define DPRINTF_FOLLOW(y)
132 #endif
133
134 #ifdef DIAGNOSTIC
135 #define DIAGPANIC(x) panic x
136 #define DIAGCONDPANIC(x,y) if (x) panic y
137 #else
138 #define DIAGPANIC(x)
139 #define DIAGCONDPANIC(x,y)
140 #endif
141
142 /* Global variables */
143
144 struct cgd_softc *cgd_softc;
145 int numcgd = 0;
146
147 /* Utility Functions */
148
149 #define CGDUNIT(x) DISKUNIT(x)
150 #define GETCGD_SOFTC(_cs, x) if (!((_cs) = getcgd_softc(x))) return ENXIO
151
152 static struct cgd_softc *
153 getcgd_softc(dev_t dev)
154 {
155 int unit = CGDUNIT(dev);
156
157 DPRINTF_FOLLOW(("getcgd_softc(0x%x): unit = %d\n", dev, unit));
158 if (unit >= numcgd)
159 return NULL;
160 return &cgd_softc[unit];
161 }
162
163 /* The code */
164
165 static void
166 cgdsoftc_init(struct cgd_softc *cs, int num)
167 {
168 char buf[DK_XNAME_SIZE];
169
170 memset(cs, 0x0, sizeof(*cs));
171 snprintf(buf, DK_XNAME_SIZE, "cgd%d", num);
172 simple_lock_init(&cs->sc_slock);
173 dk_sc_init(&cs->sc_dksc, cs, buf);
174 }
175
176 void
177 cgdattach(int num)
178 {
179 int i;
180
181 DPRINTF_FOLLOW(("cgdattach(%d)\n", num));
182 if (num <= 0) {
183 DIAGPANIC(("cgdattach: count <= 0"));
184 return;
185 }
186
187 cgd_softc = (void *)malloc(num * sizeof(*cgd_softc), M_DEVBUF, M_NOWAIT);
188 if (!cgd_softc) {
189 printf("WARNING: unable to malloc(9) memory for crypt disks\n");
190 DIAGPANIC(("cgdattach: cannot malloc(9) enough memory"));
191 return;
192 }
193
194 numcgd = num;
195 for (i=0; i<num; i++)
196 cgdsoftc_init(&cgd_softc[i], i);
197 }
198
199 static int
200 cgdopen(dev_t dev, int flags, int fmt, struct proc *p)
201 {
202 struct cgd_softc *cs;
203
204 DPRINTF_FOLLOW(("cgdopen(%d, %d)\n", dev, flags));
205 GETCGD_SOFTC(cs, dev);
206 return dk_open(di, &cs->sc_dksc, dev, flags, fmt, p);
207 }
208
209 static int
210 cgdclose(dev_t dev, int flags, int fmt, struct proc *p)
211 {
212 struct cgd_softc *cs;
213
214 DPRINTF_FOLLOW(("cgdclose(%d, %d)\n", dev, flags));
215 GETCGD_SOFTC(cs, dev);
216 return dk_close(di, &cs->sc_dksc, dev, flags, fmt, p);
217 }
218
219 static void
220 cgdstrategy(struct buf *bp)
221 {
222 struct cgd_softc *cs = getcgd_softc(bp->b_dev);
223
224 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
225 (long)bp->b_bcount));
226 /* XXXrcd: Should we test for (cs != NULL)? */
227 dk_strategy(di, &cs->sc_dksc, bp);
228 return;
229 }
230
231 static int
232 cgdsize(dev_t dev)
233 {
234 struct cgd_softc *cs = getcgd_softc(dev);
235
236 DPRINTF_FOLLOW(("cgdsize(%d)\n", dev));
237 if (!cs)
238 return -1;
239 return dk_size(di, &cs->sc_dksc, dev);
240 }
241
242 /*
243 * cgd_{get,put}data are functions that deal with getting a buffer
244 * for the new encrypted data. We have a buffer per device so that
245 * we can ensure that we can always have a transaction in flight.
246 * We use this buffer first so that we have one less piece of
247 * malloc'ed data at any given point.
248 */
249
250 static void *
251 cgd_getdata(struct dk_softc *dksc, unsigned long size)
252 {
253 struct cgd_softc *cs =dksc->sc_osc;
254 caddr_t data = NULL;
255
256 simple_lock(&cs->sc_slock);
257 if (cs->sc_data_used == 0) {
258 cs->sc_data_used = 1;
259 data = cs->sc_data;
260 }
261 simple_unlock(&cs->sc_slock);
262
263 if (data)
264 return data;
265
266 return malloc(size, M_DEVBUF, M_NOWAIT);
267 }
268
269 static void
270 cgd_putdata(struct dk_softc *dksc, caddr_t data)
271 {
272 struct cgd_softc *cs =dksc->sc_osc;
273
274 if (data == cs->sc_data) {
275 simple_lock(&cs->sc_slock);
276 cs->sc_data_used = 0;
277 simple_unlock(&cs->sc_slock);
278 } else {
279 free(data, M_DEVBUF);
280 }
281 }
282
283 static int
284 cgdstart(struct dk_softc *dksc, struct buf *bp)
285 {
286 struct cgd_softc *cs = dksc->sc_osc;
287 struct buf *nbp;
288 struct partition *pp;
289 caddr_t addr;
290 caddr_t newaddr;
291 daddr_t bn;
292 int s;
293
294 DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp));
295 disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */
296
297 /* XXXrcd:
298 * Translate partition relative blocks to absolute blocks,
299 * this probably belongs (somehow) in dksubr.c, since it
300 * is independant of the underlying code... This will require
301 * that the interface be expanded slightly, though.
302 */
303 bn = bp->b_blkno;
304 if (DISKPART(bp->b_dev) != RAW_PART) {
305 pp = &cs->sc_dksc.sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
306 bn += pp->p_offset;
307 }
308
309 /*
310 * We attempt to allocate all of our resources up front, so that
311 * we can fail quickly if they are unavailable.
312 */
313
314 s = splbio();
315 nbp = pool_get(&bufpool, PR_NOWAIT);
316 splx(s);
317 if (nbp == NULL) {
318 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
319 return -1;
320 }
321
322 /*
323 * If we are writing, then we need to encrypt the outgoing
324 * block into a new block of memory. If we fail, then we
325 * return an error and let the dksubr framework deal with it.
326 */
327 newaddr = addr = bp->b_data;
328 if ((bp->b_flags & B_READ) == 0) {
329 newaddr = cgd_getdata(dksc, bp->b_bcount);
330 if (!newaddr) {
331 s = splbio();
332 pool_put(&bufpool, nbp);
333 splx(s);
334 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
335 return -1;
336 }
337 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
338 DEV_BSIZE, CGD_CIPHER_ENCRYPT);
339 }
340
341 BUF_INIT(nbp);
342 nbp->b_data = newaddr;
343 nbp->b_flags = bp->b_flags | B_CALL;
344 nbp->b_iodone = cgdiodone;
345 nbp->b_proc = bp->b_proc;
346 nbp->b_blkno = bn;
347 nbp->b_vp = cs->sc_tvn;
348 nbp->b_bcount = bp->b_bcount;
349 nbp->b_private = bp;
350
351 BIO_COPYPRIO(nbp, bp);
352
353 if ((nbp->b_flags & B_READ) == 0) {
354 V_INCR_NUMOUTPUT(nbp->b_vp);
355 }
356 VOP_STRATEGY(cs->sc_tvn, nbp);
357 return 0;
358 }
359
360 /* expected to be called at splbio() */
361 static void
362 cgdiodone(struct buf *nbp)
363 {
364 struct buf *obp = nbp->b_private;
365 struct cgd_softc *cs = getcgd_softc(obp->b_dev);
366 struct dk_softc *dksc = &cs->sc_dksc;
367
368 KDASSERT(cs);
369
370 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
371 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
372 obp, obp->b_bcount, obp->b_resid));
373 DPRINTF(CGDB_IO, (" dev 0x%x, nbp %p bn %" PRId64 " addr %p bcnt %d\n",
374 nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
375 nbp->b_bcount));
376 if (nbp->b_flags & B_ERROR) {
377 obp->b_flags |= B_ERROR;
378 obp->b_error = nbp->b_error ? nbp->b_error : EIO;
379
380 printf("%s: error %d\n", dksc->sc_xname, obp->b_error);
381 }
382
383 /* Perform the decryption if we are reading.
384 *
385 * Note: use the blocknumber from nbp, since it is what
386 * we used to encrypt the blocks.
387 */
388
389 if (nbp->b_flags & B_READ)
390 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
391 nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT);
392
393 /* If we allocated memory, free it now... */
394 if (nbp->b_data != obp->b_data)
395 cgd_putdata(dksc, nbp->b_data);
396
397 pool_put(&bufpool, nbp);
398
399 /* Request is complete for whatever reason */
400 obp->b_resid = 0;
401 if (obp->b_flags & B_ERROR)
402 obp->b_resid = obp->b_bcount;
403 disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid,
404 (obp->b_flags & B_READ));
405 biodone(obp);
406 dk_iodone(di, dksc);
407 }
408
409 /* XXX: we should probably put these into dksubr.c, mostly */
410 static int
411 cgdread(dev_t dev, struct uio *uio, int flags)
412 {
413 struct cgd_softc *cs;
414 struct dk_softc *dksc;
415
416 DPRINTF_FOLLOW(("cgdread(%d, %p, %d)\n", dev, uio, flags));
417 GETCGD_SOFTC(cs, dev);
418 dksc = &cs->sc_dksc;
419 if ((dksc->sc_flags & DKF_INITED) == 0)
420 return ENXIO;
421 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
422 }
423
424 /* XXX: we should probably put these into dksubr.c, mostly */
425 static int
426 cgdwrite(dev_t dev, struct uio *uio, int flags)
427 {
428 struct cgd_softc *cs;
429 struct dk_softc *dksc;
430
431 DPRINTF_FOLLOW(("cgdwrite(%d, %p, %d)\n", dev, uio, flags));
432 GETCGD_SOFTC(cs, dev);
433 dksc = &cs->sc_dksc;
434 if ((dksc->sc_flags & DKF_INITED) == 0)
435 return ENXIO;
436 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
437 }
438
439 static int
440 cgdioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
441 {
442 struct cgd_softc *cs;
443 struct dk_softc *dksc;
444 int ret;
445 int part = DISKPART(dev);
446 int pmask = 1 << part;
447
448 DPRINTF_FOLLOW(("cgdioctl(%d, %ld, %p, %d, %p)\n",
449 dev, cmd, data, flag, p));
450 GETCGD_SOFTC(cs, dev);
451 dksc = &cs->sc_dksc;
452 switch (cmd) {
453 case CGDIOCSET:
454 case CGDIOCCLR:
455 if ((flag & FWRITE) == 0)
456 return EBADF;
457 }
458
459 if ((ret = lockmgr(&dksc->sc_lock, LK_EXCLUSIVE, NULL)) != 0)
460 return ret;
461
462 switch (cmd) {
463 case CGDIOCSET:
464 if (dksc->sc_flags & DKF_INITED)
465 ret = EBUSY;
466 else
467 ret = cgd_ioctl_set(cs, data, p);
468 break;
469 case CGDIOCCLR:
470 if (!(dksc->sc_flags & DKF_INITED)) {
471 ret = ENXIO;
472 break;
473 }
474 if (DK_BUSY(&cs->sc_dksc, pmask)) {
475 ret = EBUSY;
476 break;
477 }
478 ret = cgd_ioctl_clr(cs, data, p);
479 break;
480 default:
481 ret = dk_ioctl(di, dksc, dev, cmd, data, flag, p);
482 break;
483 }
484
485 lockmgr(&dksc->sc_lock, LK_RELEASE, NULL);
486 return ret;
487 }
488
489 static int
490 cgddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
491 {
492 struct cgd_softc *cs;
493
494 DPRINTF_FOLLOW(("cgddump(%d, %" PRId64 ", %p, %lu)\n", dev, blkno, va,
495 (unsigned long)size));
496 GETCGD_SOFTC(cs, dev);
497 return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size);
498 }
499
500 /*
501 * XXXrcd:
502 * for now we hardcode the maximum key length.
503 */
504 #define MAX_KEYSIZE 1024
505
506 /* ARGSUSED */
507 static int
508 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct proc *p)
509 {
510 struct cgd_ioctl *ci = data;
511 struct vnode *vp;
512 int ret;
513 int keybytes; /* key length in bytes */
514 char *cp;
515 char inbuf[MAX_KEYSIZE];
516
517 cp = ci->ci_disk;
518 if ((ret = dk_lookup(cp, p, &vp)) != 0)
519 return ret;
520
521 if ((ret = cgdinit(cs, cp, vp, p)) != 0)
522 goto bail;
523
524 memset(inbuf, 0x0, sizeof(inbuf));
525 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
526 if (ret)
527 goto bail;
528 cs->sc_cfuncs = cryptfuncs_find(inbuf);
529 if (!cs->sc_cfuncs) {
530 ret = EINVAL;
531 goto bail;
532 }
533
534 /* right now we only support encblkno, so hard-code it */
535 memset(inbuf, 0x0, sizeof(inbuf));
536 ret = copyinstr(ci->ci_ivmethod, inbuf, sizeof(inbuf), NULL);
537 if (ret)
538 goto bail;
539 if (strcmp("encblkno", inbuf)) {
540 ret = EINVAL;
541 goto bail;
542 }
543
544 keybytes = ci->ci_keylen / 8 + 1;
545 if (keybytes > MAX_KEYSIZE) {
546 ret = EINVAL;
547 goto bail;
548 }
549 memset(inbuf, 0x0, sizeof(inbuf));
550 ret = copyin(ci->ci_key, inbuf, keybytes);
551 if (ret)
552 goto bail;
553
554 cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
555 cs->sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO;
556 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
557 &cs->sc_cdata.cf_blocksize);
558 memset(inbuf, 0x0, sizeof(inbuf));
559 if (!cs->sc_cdata.cf_priv) {
560 printf("cgd: unable to initialize cipher\n");
561 ret = EINVAL; /* XXX is this the right error? */
562 goto bail;
563 }
564
565 bufq_alloc(&cs->sc_dksc.sc_bufq, BUFQ_FCFS);
566
567 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
568 cs->sc_data_used = 0;
569
570 cs->sc_dksc.sc_flags |= DKF_INITED;
571
572 /* Attach the disk. */
573 disk_attach(&cs->sc_dksc.sc_dkdev);
574
575 /* Try and read the disklabel. */
576 dk_getdisklabel(di, &cs->sc_dksc, 0 /* XXX ? */);
577
578 return 0;
579
580 bail:
581 (void)vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
582 return ret;
583 }
584
585 /* ARGSUSED */
586 static int
587 cgd_ioctl_clr(struct cgd_softc *cs, void *data, struct proc *p)
588 {
589 struct buf *bp;
590 int s;
591
592 /* Kill off any queued buffers. */
593 s = splbio();
594 while ((bp = BUFQ_GET(&cs->sc_dksc.sc_bufq)) != NULL) {
595 bp->b_error = EIO;
596 bp->b_flags |= B_ERROR;
597 bp->b_resid = bp->b_bcount;
598 biodone(bp);
599 }
600 splx(s);
601 bufq_free(&cs->sc_dksc.sc_bufq);
602
603 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, p->p_ucred, p);
604 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
605 free(cs->sc_tpath, M_DEVBUF);
606 free(cs->sc_data, M_DEVBUF);
607 cs->sc_data_used = 0;
608 cs->sc_dksc.sc_flags &= ~DKF_INITED;
609 disk_detach(&cs->sc_dksc.sc_dkdev);
610
611 return 0;
612 }
613
614 static int
615 cgdinit(struct cgd_softc *cs, char *cpath, struct vnode *vp,
616 struct proc *p)
617 {
618 struct dk_geom *pdg;
619 struct partinfo dpart;
620 struct vattr va;
621 size_t size;
622 int maxsecsize = 0;
623 int ret;
624 char tmppath[MAXPATHLEN];
625
626 cs->sc_dksc.sc_size = 0;
627 cs->sc_tvn = vp;
628
629 memset(tmppath, 0x0, sizeof(tmppath));
630 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
631 if (ret)
632 goto bail;
633 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
634 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
635
636 if ((ret = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0)
637 goto bail;
638
639 cs->sc_tdev = va.va_rdev;
640
641 ret = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, p->p_ucred, p);
642 if (ret)
643 goto bail;
644
645 maxsecsize =
646 ((dpart.disklab->d_secsize > maxsecsize) ?
647 dpart.disklab->d_secsize : maxsecsize);
648 size = dpart.part->p_size;
649
650 if (!size) {
651 ret = ENODEV;
652 goto bail;
653 }
654
655 cs->sc_dksc.sc_size = size;
656
657 /*
658 * XXX here we should probe the underlying device. If we
659 * are accessing a partition of type RAW_PART, then
660 * we should populate our initial geometry with the
661 * geometry that we discover from the device.
662 */
663 pdg = &cs->sc_dksc.sc_geom;
664 pdg->pdg_secsize = DEV_BSIZE;
665 pdg->pdg_ntracks = 1;
666 pdg->pdg_nsectors = 1024 * (1024 / pdg->pdg_secsize);
667 pdg->pdg_ncylinders = cs->sc_dksc.sc_size / pdg->pdg_nsectors;
668
669 bail:
670 if (ret && cs->sc_tpath)
671 free(cs->sc_tpath, M_DEVBUF);
672 return ret;
673 }
674
675 /*
676 * Our generic cipher entry point. This takes care of the
677 * IV mode and passes off the work to the specific cipher.
678 * We implement here the IV method ``encrypted block
679 * number''.
680 *
681 * For the encryption case, we accomplish this by setting
682 * up a struct uio where the first iovec of the source is
683 * the blocknumber and the first iovec of the dest is a
684 * sink. We then call the cipher with an IV of zero, and
685 * the right thing happens.
686 *
687 * For the decryption case, we use the same basic mechanism
688 * for symmetry, but we encrypt the block number in the
689 * first iovec.
690 *
691 * We mainly do this to avoid requiring the definition of
692 * an ECB mode.
693 *
694 * XXXrcd: for now we rely on our own crypto framework defined
695 * in dev/cgd_crypto.c. This will change when we
696 * get a generic kernel crypto framework.
697 */
698
699 static void
700 blkno2blkno_buf(char *buf, daddr_t blkno)
701 {
702 int i;
703
704 /* Set up the blkno in blkno_buf, here we do not care much
705 * about the final layout of the information as long as we
706 * can guarantee that each sector will have a different IV
707 * and that the endianness of the machine will not affect
708 * the representation that we have chosen.
709 *
710 * We choose this representation, because it does not rely
711 * on the size of buf (which is the blocksize of the cipher),
712 * but allows daddr_t to grow without breaking existing
713 * disks.
714 *
715 * Note that blkno2blkno_buf does not take a size as input,
716 * and hence must be called on a pre-zeroed buffer of length
717 * greater than or equal to sizeof(daddr_t).
718 */
719 for (i=0; i < sizeof(daddr_t); i++) {
720 *buf++ = blkno & 0xff;
721 blkno >>= 8;
722 }
723 }
724
725 static void
726 cgd_cipher(struct cgd_softc *cs, caddr_t dst, caddr_t src,
727 size_t len, daddr_t blkno, size_t secsize, int dir)
728 {
729 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher;
730 struct uio dstuio;
731 struct uio srcuio;
732 struct iovec dstiov[2];
733 struct iovec srciov[2];
734 int blocksize = cs->sc_cdata.cf_blocksize;
735 char sink[blocksize];
736 char zero_iv[blocksize];
737 char blkno_buf[blocksize];
738
739 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
740
741 DIAGCONDPANIC(len % blocksize != 0,
742 ("cgd_cipher: len %% blocksize != 0"));
743
744 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
745 DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
746 ("cgd_cipher: sizeof(daddr_t) > blocksize"));
747
748 memset(zero_iv, 0x0, sizeof(zero_iv));
749
750 dstuio.uio_iov = dstiov;
751 dstuio.uio_iovcnt = 2;
752
753 srcuio.uio_iov = srciov;
754 srcuio.uio_iovcnt = 2;
755
756 dstiov[0].iov_base = sink;
757 dstiov[0].iov_len = blocksize;
758 srciov[0].iov_base = blkno_buf;
759 srciov[0].iov_len = blocksize;
760 dstiov[1].iov_len = secsize;
761 srciov[1].iov_len = secsize;
762
763 for (; len > 0; len -= secsize) {
764 dstiov[1].iov_base = dst;
765 srciov[1].iov_base = src;
766
767 memset(blkno_buf, 0x0, sizeof(blkno_buf));
768 blkno2blkno_buf(blkno_buf, blkno);
769 if (dir == CGD_CIPHER_DECRYPT) {
770 dstuio.uio_iovcnt = 1;
771 srcuio.uio_iovcnt = 1;
772 IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
773 blkno_buf, sizeof(blkno_buf)));
774 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
775 zero_iv, CGD_CIPHER_ENCRYPT);
776 memcpy(blkno_buf, sink, blocksize);
777 dstuio.uio_iovcnt = 2;
778 srcuio.uio_iovcnt = 2;
779 }
780
781 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
782 blkno_buf, sizeof(blkno_buf)));
783 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
784 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
785 sink, sizeof(sink)));
786
787 dst += secsize;
788 src += secsize;
789 blkno++;
790 }
791 }
792
793 #ifdef DEBUG
794 static void
795 hexprint(char *start, void *buf, int len)
796 {
797 char *c = buf;
798
799 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
800 printf("%s: len=%06d 0x", start, len);
801 while (len--)
802 printf("%02x", (unsigned) *c++);
803 }
804 #endif
805