cryptosoft.c revision 1.16 1 /* $NetBSD: cryptosoft.c,v 1.16 2007/02/17 00:28:25 daniel Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */
3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.16 2007/02/17 00:28:25 daniel Exp $");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35
36 #include <opencrypto/cryptodev.h>
37 #include <opencrypto/cryptosoft.h>
38 #include <opencrypto/xform.h>
39
40 #include <opencrypto/cryptosoft_xform.c>
41
42 union authctx {
43 MD5_CTX md5ctx;
44 SHA1_CTX sha1ctx;
45 RMD160_CTX rmd160ctx;
46 SHA256_CTX sha256ctx;
47 SHA384_CTX sha384ctx;
48 SHA512_CTX sha512ctx;
49 };
50
51 struct swcr_data **swcr_sessions = NULL;
52 u_int32_t swcr_sesnum = 0;
53 int32_t swcr_id = -1;
54
55 #define COPYBACK(x, a, b, c, d) \
56 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
57 : cuio_copyback((struct uio *)a,b,c,d)
58 #define COPYDATA(x, a, b, c, d) \
59 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
60 : cuio_copydata((struct uio *)a,b,c,d)
61
62 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
63 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
64 static int swcr_process(void *, struct cryptop *, int);
65 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
66 static int swcr_freesession(void *, u_int64_t);
67
68 /*
69 * Apply a symmetric encryption/decryption algorithm.
70 */
71 static int
72 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
73 int outtype)
74 {
75 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
76 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
77 const struct swcr_enc_xform *exf;
78 int i, k, j, blks;
79 int count, ind;
80
81 exf = sw->sw_exf;
82 blks = exf->enc_xform->blocksize;
83
84 /* Check for non-padded data */
85 if (crd->crd_len % blks)
86 return EINVAL;
87
88 /* Initialize the IV */
89 if (crd->crd_flags & CRD_F_ENCRYPT) {
90 /* IV explicitly provided ? */
91 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
92 bcopy(crd->crd_iv, iv, blks);
93 else {
94 /* Get random IV */
95 for (i = 0;
96 i + sizeof (u_int32_t) < EALG_MAX_BLOCK_LEN;
97 i += sizeof (u_int32_t)) {
98 u_int32_t temp = arc4random();
99
100 bcopy(&temp, iv + i, sizeof(u_int32_t));
101 }
102 /*
103 * What if the block size is not a multiple
104 * of sizeof (u_int32_t), which is the size of
105 * what arc4random() returns ?
106 */
107 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
108 u_int32_t temp = arc4random();
109
110 bcopy (&temp, iv + i,
111 EALG_MAX_BLOCK_LEN - i);
112 }
113 }
114
115 /* Do we need to write the IV */
116 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
117 COPYBACK(outtype, buf, crd->crd_inject, blks, iv);
118 }
119
120 } else { /* Decryption */
121 /* IV explicitly provided ? */
122 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
123 bcopy(crd->crd_iv, iv, blks);
124 else {
125 /* Get IV off buf */
126 COPYDATA(outtype, buf, crd->crd_inject, blks, iv);
127 }
128 }
129
130 ivp = iv;
131
132 if (outtype == CRYPTO_BUF_CONTIG) {
133 if (crd->crd_flags & CRD_F_ENCRYPT) {
134 for (i = crd->crd_skip;
135 i < crd->crd_skip + crd->crd_len; i += blks) {
136 /* XOR with the IV/previous block, as appropriate. */
137 if (i == crd->crd_skip)
138 for (k = 0; k < blks; k++)
139 buf[i + k] ^= ivp[k];
140 else
141 for (k = 0; k < blks; k++)
142 buf[i + k] ^= buf[i + k - blks];
143 exf->encrypt(sw->sw_kschedule, buf + i);
144 }
145 } else { /* Decrypt */
146 /*
147 * Start at the end, so we don't need to keep the encrypted
148 * block as the IV for the next block.
149 */
150 for (i = crd->crd_skip + crd->crd_len - blks;
151 i >= crd->crd_skip; i -= blks) {
152 exf->decrypt(sw->sw_kschedule, buf + i);
153
154 /* XOR with the IV/previous block, as appropriate */
155 if (i == crd->crd_skip)
156 for (k = 0; k < blks; k++)
157 buf[i + k] ^= ivp[k];
158 else
159 for (k = 0; k < blks; k++)
160 buf[i + k] ^= buf[i + k - blks];
161 }
162 }
163
164 return 0;
165 } else if (outtype == CRYPTO_BUF_MBUF) {
166 struct mbuf *m = (struct mbuf *) buf;
167
168 /* Find beginning of data */
169 m = m_getptr(m, crd->crd_skip, &k);
170 if (m == NULL)
171 return EINVAL;
172
173 i = crd->crd_len;
174
175 while (i > 0) {
176 /*
177 * If there's insufficient data at the end of
178 * an mbuf, we have to do some copying.
179 */
180 if (m->m_len < k + blks && m->m_len != k) {
181 m_copydata(m, k, blks, blk);
182
183 /* Actual encryption/decryption */
184 if (crd->crd_flags & CRD_F_ENCRYPT) {
185 /* XOR with previous block */
186 for (j = 0; j < blks; j++)
187 blk[j] ^= ivp[j];
188
189 exf->encrypt(sw->sw_kschedule, blk);
190
191 /*
192 * Keep encrypted block for XOR'ing
193 * with next block
194 */
195 bcopy(blk, iv, blks);
196 ivp = iv;
197 } else { /* decrypt */
198 /*
199 * Keep encrypted block for XOR'ing
200 * with next block
201 */
202 if (ivp == iv)
203 bcopy(blk, piv, blks);
204 else
205 bcopy(blk, iv, blks);
206
207 exf->decrypt(sw->sw_kschedule, blk);
208
209 /* XOR with previous block */
210 for (j = 0; j < blks; j++)
211 blk[j] ^= ivp[j];
212
213 if (ivp == iv)
214 bcopy(piv, iv, blks);
215 else
216 ivp = iv;
217 }
218
219 /* Copy back decrypted block */
220 m_copyback(m, k, blks, blk);
221
222 /* Advance pointer */
223 m = m_getptr(m, k + blks, &k);
224 if (m == NULL)
225 return EINVAL;
226
227 i -= blks;
228
229 /* Could be done... */
230 if (i == 0)
231 break;
232 }
233
234 /* Skip possibly empty mbufs */
235 if (k == m->m_len) {
236 for (m = m->m_next; m && m->m_len == 0;
237 m = m->m_next)
238 ;
239 k = 0;
240 }
241
242 /* Sanity check */
243 if (m == NULL)
244 return EINVAL;
245
246 /*
247 * Warning: idat may point to garbage here, but
248 * we only use it in the while() loop, only if
249 * there are indeed enough data.
250 */
251 idat = mtod(m, unsigned char *) + k;
252
253 while (m->m_len >= k + blks && i > 0) {
254 if (crd->crd_flags & CRD_F_ENCRYPT) {
255 /* XOR with previous block/IV */
256 for (j = 0; j < blks; j++)
257 idat[j] ^= ivp[j];
258
259 exf->encrypt(sw->sw_kschedule, idat);
260 ivp = idat;
261 } else { /* decrypt */
262 /*
263 * Keep encrypted block to be used
264 * in next block's processing.
265 */
266 if (ivp == iv)
267 bcopy(idat, piv, blks);
268 else
269 bcopy(idat, iv, blks);
270
271 exf->decrypt(sw->sw_kschedule, idat);
272
273 /* XOR with previous block/IV */
274 for (j = 0; j < blks; j++)
275 idat[j] ^= ivp[j];
276
277 if (ivp == iv)
278 bcopy(piv, iv, blks);
279 else
280 ivp = iv;
281 }
282
283 idat += blks;
284 k += blks;
285 i -= blks;
286 }
287 }
288
289 return 0; /* Done with mbuf encryption/decryption */
290 } else if (outtype == CRYPTO_BUF_IOV) {
291 struct uio *uio = (struct uio *) buf;
292
293 #ifdef __FreeBSD__
294 struct iovec *iov;
295 /* Find beginning of data */
296 iov = cuio_getptr(uio, crd->crd_skip, &k);
297 if (iov == NULL)
298 return EINVAL;
299
300 i = crd->crd_len;
301
302 while (i > 0) {
303 /*
304 * If there's insufficient data at the end of
305 * an iovec, we have to do some copying.
306 */
307 if (iov->iov_len < k + blks && iov->iov_len != k) {
308 cuio_copydata(uio, k, blks, blk);
309
310 /* Actual encryption/decryption */
311 if (crd->crd_flags & CRD_F_ENCRYPT) {
312 /* XOR with previous block */
313 for (j = 0; j < blks; j++)
314 blk[j] ^= ivp[j];
315
316 exf->encrypt(sw->sw_kschedule, blk);
317
318 /*
319 * Keep encrypted block for XOR'ing
320 * with next block
321 */
322 bcopy(blk, iv, blks);
323 ivp = iv;
324 } else { /* decrypt */
325 /*
326 * Keep encrypted block for XOR'ing
327 * with next block
328 */
329 if (ivp == iv)
330 bcopy(blk, piv, blks);
331 else
332 bcopy(blk, iv, blks);
333
334 exf->decrypt(sw->sw_kschedule, blk);
335
336 /* XOR with previous block */
337 for (j = 0; j < blks; j++)
338 blk[j] ^= ivp[j];
339
340 if (ivp == iv)
341 bcopy(piv, iv, blks);
342 else
343 ivp = iv;
344 }
345
346 /* Copy back decrypted block */
347 cuio_copyback(uio, k, blks, blk);
348
349 /* Advance pointer */
350 iov = cuio_getptr(uio, k + blks, &k);
351 if (iov == NULL)
352 return EINVAL;
353
354 i -= blks;
355
356 /* Could be done... */
357 if (i == 0)
358 break;
359 }
360
361 /*
362 * Warning: idat may point to garbage here, but
363 * we only use it in the while() loop, only if
364 * there are indeed enough data.
365 */
366 idat = (char *)iov->iov_base + k;
367
368 while (iov->iov_len >= k + blks && i > 0) {
369 if (crd->crd_flags & CRD_F_ENCRYPT) {
370 /* XOR with previous block/IV */
371 for (j = 0; j < blks; j++)
372 idat[j] ^= ivp[j];
373
374 exf->encrypt(sw->sw_kschedule, idat);
375 ivp = idat;
376 } else { /* decrypt */
377 /*
378 * Keep encrypted block to be used
379 * in next block's processing.
380 */
381 if (ivp == iv)
382 bcopy(idat, piv, blks);
383 else
384 bcopy(idat, iv, blks);
385
386 exf->decrypt(sw->sw_kschedule, idat);
387
388 /* XOR with previous block/IV */
389 for (j = 0; j < blks; j++)
390 idat[j] ^= ivp[j];
391
392 if (ivp == iv)
393 bcopy(piv, iv, blks);
394 else
395 ivp = iv;
396 }
397
398 idat += blks;
399 k += blks;
400 i -= blks;
401 }
402 }
403
404 return 0; /* Done with mbuf encryption/decryption */
405 #else /* !freebsd iov */
406 /* Find beginning of data */
407 count = crd->crd_skip;
408 ind = cuio_getptr(uio, count, &k);
409 if (ind == -1)
410 return EINVAL;
411
412 i = crd->crd_len;
413
414 while (i > 0) {
415 /*
416 * If there's insufficient data at the end,
417 * we have to do some copying.
418 */
419 if (uio->uio_iov[ind].iov_len < k + blks &&
420 uio->uio_iov[ind].iov_len != k) {
421 cuio_copydata(uio, k, blks, blk);
422
423 /* Actual encryption/decryption */
424 if (crd->crd_flags & CRD_F_ENCRYPT) {
425 /* XOR with previous block */
426 for (j = 0; j < blks; j++)
427 blk[j] ^= ivp[j];
428
429 exf->encrypt(sw->sw_kschedule, blk);
430
431 /*
432 * Keep encrypted block for XOR'ing
433 * with next block
434 */
435 bcopy(blk, iv, blks);
436 ivp = iv;
437 } else { /* decrypt */
438 /*
439 * Keep encrypted block for XOR'ing
440 * with next block
441 */
442 if (ivp == iv)
443 bcopy(blk, piv, blks);
444 else
445 bcopy(blk, iv, blks);
446
447 exf->decrypt(sw->sw_kschedule, blk);
448
449 /* XOR with previous block */
450 for (j = 0; j < blks; j++)
451 blk[j] ^= ivp[j];
452
453 if (ivp == iv)
454 bcopy(piv, iv, blks);
455 else
456 ivp = iv;
457 }
458
459 /* Copy back decrypted block */
460 cuio_copyback(uio, k, blks, blk);
461
462 count += blks;
463
464 /* Advance pointer */
465 ind = cuio_getptr(uio, count, &k);
466 if (ind == -1)
467 return (EINVAL);
468
469 i -= blks;
470
471 /* Could be done... */
472 if (i == 0)
473 break;
474 }
475
476 /*
477 * Warning: idat may point to garbage here, but
478 * we only use it in the while() loop, only if
479 * there are indeed enough data.
480 */
481 idat = ((caddr_t)uio->uio_iov[ind].iov_base) + k;
482
483 while (uio->uio_iov[ind].iov_len >= k + blks &&
484 i > 0) {
485 if (crd->crd_flags & CRD_F_ENCRYPT) {
486 /* XOR with previous block/IV */
487 for (j = 0; j < blks; j++)
488 idat[j] ^= ivp[j];
489
490 exf->encrypt(sw->sw_kschedule, idat);
491 ivp = idat;
492 } else { /* decrypt */
493 /*
494 * Keep encrypted block to be used
495 * in next block's processing.
496 */
497 if (ivp == iv)
498 bcopy(idat, piv, blks);
499 else
500 bcopy(idat, iv, blks);
501
502 exf->decrypt(sw->sw_kschedule, idat);
503
504 /* XOR with previous block/IV */
505 for (j = 0; j < blks; j++)
506 idat[j] ^= ivp[j];
507
508 if (ivp == iv)
509 bcopy(piv, iv, blks);
510 else
511 ivp = iv;
512 }
513
514 idat += blks;
515 count += blks;
516 k += blks;
517 i -= blks;
518 }
519 }
520 #endif
521 return 0; /* Done with mbuf encryption/decryption */
522 }
523
524 /* Unreachable */
525 return EINVAL;
526 }
527
528 /*
529 * Compute keyed-hash authenticator.
530 */
531 int
532 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
533 struct swcr_data *sw, caddr_t buf, int outtype)
534 {
535 unsigned char aalg[AALG_MAX_RESULT_LEN];
536 const struct swcr_auth_hash *axf;
537 union authctx ctx;
538 int err;
539
540 if (sw->sw_ictx == 0)
541 return EINVAL;
542
543 axf = sw->sw_axf;
544
545 bcopy(sw->sw_ictx, &ctx, axf->auth_hash->ctxsize);
546
547 switch (outtype) {
548 case CRYPTO_BUF_CONTIG:
549 axf->Update(&ctx, buf + crd->crd_skip, crd->crd_len);
550 break;
551 case CRYPTO_BUF_MBUF:
552 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
553 (int (*)(void*, caddr_t, unsigned int)) axf->Update,
554 (caddr_t) &ctx);
555 if (err)
556 return err;
557 break;
558 case CRYPTO_BUF_IOV:
559 #ifdef __FreeBSD__
560 /*XXX FIXME: handle iov case*/
561 return EINVAL;
562 #else
563 err = cuio_apply((struct uio *) buf, crd->crd_skip,
564 crd->crd_len,
565 (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
566 (caddr_t) &ctx);
567 if (err) {
568 return err;
569 }
570 #endif
571 break;
572 default:
573 return EINVAL;
574 }
575
576 switch (sw->sw_alg) {
577 case CRYPTO_MD5_HMAC:
578 case CRYPTO_SHA1_HMAC:
579 case CRYPTO_SHA2_HMAC:
580 case CRYPTO_RIPEMD160_HMAC:
581 if (sw->sw_octx == NULL)
582 return EINVAL;
583
584 axf->Final(aalg, &ctx);
585 bcopy(sw->sw_octx, &ctx, axf->auth_hash->ctxsize);
586 axf->Update(&ctx, aalg, axf->auth_hash->hashsize);
587 axf->Final(aalg, &ctx);
588 break;
589
590 case CRYPTO_MD5_KPDK:
591 case CRYPTO_SHA1_KPDK:
592 if (sw->sw_octx == NULL)
593 return EINVAL;
594
595 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
596 axf->Final(aalg, &ctx);
597 break;
598
599 case CRYPTO_NULL_HMAC:
600 case CRYPTO_MD5:
601 case CRYPTO_SHA1:
602 axf->Final(aalg, &ctx);
603 break;
604 }
605
606 /* Inject the authentication data */
607 switch (outtype) {
608 case CRYPTO_BUF_CONTIG:
609 bcopy(aalg, buf + crd->crd_inject, axf->auth_hash->authsize);
610 break;
611 case CRYPTO_BUF_MBUF:
612 m_copyback((struct mbuf *) buf, crd->crd_inject,
613 axf->auth_hash->authsize, aalg);
614 break;
615 case CRYPTO_BUF_IOV:
616 bcopy(aalg, crp->crp_mac, axf->auth_hash->authsize);
617 break;
618 default:
619 return EINVAL;
620 }
621 return 0;
622 }
623
624 /*
625 * Apply a compression/decompression algorithm
626 */
627 static int
628 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
629 caddr_t buf, int outtype)
630 {
631 u_int8_t *data, *out;
632 const struct swcr_comp_algo *cxf;
633 int adj;
634 u_int32_t result;
635
636 cxf = sw->sw_cxf;
637
638 /* We must handle the whole buffer of data in one time
639 * then if there is not all the data in the mbuf, we must
640 * copy in a buffer.
641 */
642
643 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
644 if (data == NULL)
645 return (EINVAL);
646 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
647
648 if (crd->crd_flags & CRD_F_COMP)
649 result = cxf->compress(data, crd->crd_len, &out);
650 else
651 result = cxf->decompress(data, crd->crd_len, &out);
652
653 FREE(data, M_CRYPTO_DATA);
654 if (result == 0)
655 return EINVAL;
656
657 /* Copy back the (de)compressed data. m_copyback is
658 * extending the mbuf as necessary.
659 */
660 sw->sw_size = result;
661 /* Check the compressed size when doing compression */
662 if (crd->crd_flags & CRD_F_COMP) {
663 if (result > crd->crd_len) {
664 /* Compression was useless, we lost time */
665 FREE(out, M_CRYPTO_DATA);
666 return 0;
667 }
668 }
669
670 COPYBACK(outtype, buf, crd->crd_skip, result, out);
671 if (result < crd->crd_len) {
672 adj = result - crd->crd_len;
673 if (outtype == CRYPTO_BUF_MBUF) {
674 adj = result - crd->crd_len;
675 m_adj((struct mbuf *)buf, adj);
676 } else {
677 struct uio *uio = (struct uio *)buf;
678 int ind;
679
680 adj = crd->crd_len - result;
681 ind = uio->uio_iovcnt - 1;
682
683 while (adj > 0 && ind >= 0) {
684 if (adj < uio->uio_iov[ind].iov_len) {
685 uio->uio_iov[ind].iov_len -= adj;
686 break;
687 }
688
689 adj -= uio->uio_iov[ind].iov_len;
690 uio->uio_iov[ind].iov_len = 0;
691 ind--;
692 uio->uio_iovcnt--;
693 }
694 }
695 }
696 FREE(out, M_CRYPTO_DATA);
697 return 0;
698 }
699
700 /*
701 * Generate a new software session.
702 */
703 static int
704 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
705 {
706 struct swcr_data **swd;
707 const struct swcr_auth_hash *axf;
708 const struct swcr_enc_xform *txf;
709 const struct swcr_comp_algo *cxf;
710 u_int32_t i;
711 int k, error;
712
713 if (sid == NULL || cri == NULL)
714 return EINVAL;
715
716 if (swcr_sessions) {
717 for (i = 1; i < swcr_sesnum; i++)
718 if (swcr_sessions[i] == NULL)
719 break;
720 } else
721 i = 1; /* NB: to silence compiler warning */
722
723 if (swcr_sessions == NULL || i == swcr_sesnum) {
724 if (swcr_sessions == NULL) {
725 i = 1; /* We leave swcr_sessions[0] empty */
726 swcr_sesnum = CRYPTO_SW_SESSIONS;
727 } else
728 swcr_sesnum *= 2;
729
730 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
731 M_CRYPTO_DATA, M_NOWAIT);
732 if (swd == NULL) {
733 /* Reset session number */
734 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
735 swcr_sesnum = 0;
736 else
737 swcr_sesnum /= 2;
738 return ENOBUFS;
739 }
740
741 bzero(swd, swcr_sesnum * sizeof(struct swcr_data *));
742
743 /* Copy existing sessions */
744 if (swcr_sessions) {
745 bcopy(swcr_sessions, swd,
746 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
747 free(swcr_sessions, M_CRYPTO_DATA);
748 }
749
750 swcr_sessions = swd;
751 }
752
753 swd = &swcr_sessions[i];
754 *sid = i;
755
756 while (cri) {
757 *swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT);
758 if (*swd == NULL) {
759 swcr_freesession(NULL, i);
760 return ENOBUFS;
761 }
762 bzero(*swd, sizeof(struct swcr_data));
763
764 switch (cri->cri_alg) {
765 case CRYPTO_DES_CBC:
766 txf = &swcr_enc_xform_des;
767 goto enccommon;
768 case CRYPTO_3DES_CBC:
769 txf = &swcr_enc_xform_3des;
770 goto enccommon;
771 case CRYPTO_BLF_CBC:
772 txf = &swcr_enc_xform_blf;
773 goto enccommon;
774 case CRYPTO_CAST_CBC:
775 txf = &swcr_enc_xform_cast5;
776 goto enccommon;
777 case CRYPTO_SKIPJACK_CBC:
778 txf = &swcr_enc_xform_skipjack;
779 goto enccommon;
780 case CRYPTO_RIJNDAEL128_CBC:
781 txf = &swcr_enc_xform_rijndael128;
782 goto enccommon;
783 case CRYPTO_NULL_CBC:
784 txf = &swcr_enc_xform_null;
785 goto enccommon;
786 enccommon:
787 error = txf->setkey(&((*swd)->sw_kschedule),
788 cri->cri_key, cri->cri_klen / 8);
789 if (error) {
790 swcr_freesession(NULL, i);
791 return error;
792 }
793 (*swd)->sw_exf = txf;
794 break;
795
796 case CRYPTO_MD5_HMAC:
797 axf = &swcr_auth_hash_hmac_md5_96;
798 goto authcommon;
799 case CRYPTO_SHA1_HMAC:
800 axf = &swcr_auth_hash_hmac_sha1_96;
801 goto authcommon;
802 case CRYPTO_SHA2_HMAC:
803 if (cri->cri_klen == 256)
804 axf = &swcr_auth_hash_hmac_sha2_256;
805 else if (cri->cri_klen == 384)
806 axf = &swcr_auth_hash_hmac_sha2_384;
807 else if (cri->cri_klen == 512)
808 axf = &swcr_auth_hash_hmac_sha2_512;
809 else {
810 swcr_freesession(NULL, i);
811 return EINVAL;
812 }
813 goto authcommon;
814 case CRYPTO_NULL_HMAC:
815 axf = &swcr_auth_hash_null;
816 goto authcommon;
817 case CRYPTO_RIPEMD160_HMAC:
818 axf = &swcr_auth_hash_hmac_ripemd_160_96;
819 authcommon:
820 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize,
821 M_CRYPTO_DATA, M_NOWAIT);
822 if ((*swd)->sw_ictx == NULL) {
823 swcr_freesession(NULL, i);
824 return ENOBUFS;
825 }
826
827 (*swd)->sw_octx = malloc(axf->auth_hash->ctxsize,
828 M_CRYPTO_DATA, M_NOWAIT);
829 if ((*swd)->sw_octx == NULL) {
830 swcr_freesession(NULL, i);
831 return ENOBUFS;
832 }
833
834 for (k = 0; k < cri->cri_klen / 8; k++)
835 cri->cri_key[k] ^= HMAC_IPAD_VAL;
836
837 axf->Init((*swd)->sw_ictx);
838 axf->Update((*swd)->sw_ictx, cri->cri_key,
839 cri->cri_klen / 8);
840 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
841 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
842
843 for (k = 0; k < cri->cri_klen / 8; k++)
844 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
845
846 axf->Init((*swd)->sw_octx);
847 axf->Update((*swd)->sw_octx, cri->cri_key,
848 cri->cri_klen / 8);
849 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
850 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
851
852 for (k = 0; k < cri->cri_klen / 8; k++)
853 cri->cri_key[k] ^= HMAC_OPAD_VAL;
854 (*swd)->sw_axf = axf;
855 break;
856
857 case CRYPTO_MD5_KPDK:
858 axf = &swcr_auth_hash_key_md5;
859 goto auth2common;
860
861 case CRYPTO_SHA1_KPDK:
862 axf = &swcr_auth_hash_key_sha1;
863 auth2common:
864 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize,
865 M_CRYPTO_DATA, M_NOWAIT);
866 if ((*swd)->sw_ictx == NULL) {
867 swcr_freesession(NULL, i);
868 return ENOBUFS;
869 }
870
871 /* Store the key so we can "append" it to the payload */
872 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
873 M_NOWAIT);
874 if ((*swd)->sw_octx == NULL) {
875 swcr_freesession(NULL, i);
876 return ENOBUFS;
877 }
878
879 (*swd)->sw_klen = cri->cri_klen / 8;
880 bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8);
881 axf->Init((*swd)->sw_ictx);
882 axf->Update((*swd)->sw_ictx, cri->cri_key,
883 cri->cri_klen / 8);
884 axf->Final(NULL, (*swd)->sw_ictx);
885 (*swd)->sw_axf = axf;
886 break;
887
888 case CRYPTO_MD5:
889 axf = &swcr_auth_hash_md5;
890 goto auth3common;
891
892 case CRYPTO_SHA1:
893 axf = &swcr_auth_hash_sha1;
894 auth3common:
895 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize,
896 M_CRYPTO_DATA, M_NOWAIT);
897 if ((*swd)->sw_ictx == NULL) {
898 swcr_freesession(NULL, i);
899 return ENOBUFS;
900 }
901
902 axf->Init((*swd)->sw_ictx);
903 (*swd)->sw_axf = axf;
904 break;
905
906 case CRYPTO_DEFLATE_COMP:
907 cxf = &swcr_comp_algo_deflate;
908 (*swd)->sw_cxf = cxf;
909 break;
910 default:
911 swcr_freesession(NULL, i);
912 return EINVAL;
913 }
914
915 (*swd)->sw_alg = cri->cri_alg;
916 cri = cri->cri_next;
917 swd = &((*swd)->sw_next);
918 }
919 return 0;
920 }
921
922 /*
923 * Free a session.
924 */
925 static int
926 swcr_freesession(void *arg, u_int64_t tid)
927 {
928 struct swcr_data *swd;
929 const struct swcr_enc_xform *txf;
930 const struct swcr_auth_hash *axf;
931 const struct swcr_comp_algo *cxf;
932 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
933
934 if (sid > swcr_sesnum || swcr_sessions == NULL ||
935 swcr_sessions[sid] == NULL)
936 return EINVAL;
937
938 /* Silently accept and return */
939 if (sid == 0)
940 return 0;
941
942 while ((swd = swcr_sessions[sid]) != NULL) {
943 swcr_sessions[sid] = swd->sw_next;
944
945 switch (swd->sw_alg) {
946 case CRYPTO_DES_CBC:
947 case CRYPTO_3DES_CBC:
948 case CRYPTO_BLF_CBC:
949 case CRYPTO_CAST_CBC:
950 case CRYPTO_SKIPJACK_CBC:
951 case CRYPTO_RIJNDAEL128_CBC:
952 case CRYPTO_NULL_CBC:
953 txf = swd->sw_exf;
954
955 if (swd->sw_kschedule)
956 txf->zerokey(&(swd->sw_kschedule));
957 break;
958
959 case CRYPTO_MD5_HMAC:
960 case CRYPTO_SHA1_HMAC:
961 case CRYPTO_SHA2_HMAC:
962 case CRYPTO_RIPEMD160_HMAC:
963 case CRYPTO_NULL_HMAC:
964 axf = swd->sw_axf;
965
966 if (swd->sw_ictx) {
967 bzero(swd->sw_ictx, axf->auth_hash->ctxsize);
968 free(swd->sw_ictx, M_CRYPTO_DATA);
969 }
970 if (swd->sw_octx) {
971 bzero(swd->sw_octx, axf->auth_hash->ctxsize);
972 free(swd->sw_octx, M_CRYPTO_DATA);
973 }
974 break;
975
976 case CRYPTO_MD5_KPDK:
977 case CRYPTO_SHA1_KPDK:
978 axf = swd->sw_axf;
979
980 if (swd->sw_ictx) {
981 bzero(swd->sw_ictx, axf->auth_hash->ctxsize);
982 free(swd->sw_ictx, M_CRYPTO_DATA);
983 }
984 if (swd->sw_octx) {
985 bzero(swd->sw_octx, swd->sw_klen);
986 free(swd->sw_octx, M_CRYPTO_DATA);
987 }
988 break;
989
990 case CRYPTO_MD5:
991 case CRYPTO_SHA1:
992 axf = swd->sw_axf;
993
994 if (swd->sw_ictx)
995 free(swd->sw_ictx, M_CRYPTO_DATA);
996 break;
997
998 case CRYPTO_DEFLATE_COMP:
999 cxf = swd->sw_cxf;
1000 break;
1001 }
1002
1003 FREE(swd, M_CRYPTO_DATA);
1004 }
1005 return 0;
1006 }
1007
1008 /*
1009 * Process a software request.
1010 */
1011 static int
1012 swcr_process(void *arg, struct cryptop *crp, int hint)
1013 {
1014 struct cryptodesc *crd;
1015 struct swcr_data *sw;
1016 u_int32_t lid;
1017 int type;
1018
1019 /* Sanity check */
1020 if (crp == NULL)
1021 return EINVAL;
1022
1023 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1024 crp->crp_etype = EINVAL;
1025 goto done;
1026 }
1027
1028 lid = crp->crp_sid & 0xffffffff;
1029 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1030 crp->crp_etype = ENOENT;
1031 goto done;
1032 }
1033
1034 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1035 type = CRYPTO_BUF_MBUF;
1036 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1037 type = CRYPTO_BUF_IOV;
1038 } else {
1039 type = CRYPTO_BUF_CONTIG;
1040 }
1041
1042 /* Go through crypto descriptors, processing as we go */
1043 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1044 /*
1045 * Find the crypto context.
1046 *
1047 * XXX Note that the logic here prevents us from having
1048 * XXX the same algorithm multiple times in a session
1049 * XXX (or rather, we can but it won't give us the right
1050 * XXX results). To do that, we'd need some way of differentiating
1051 * XXX between the various instances of an algorithm (so we can
1052 * XXX locate the correct crypto context).
1053 */
1054 for (sw = swcr_sessions[lid];
1055 sw && sw->sw_alg != crd->crd_alg;
1056 sw = sw->sw_next)
1057 ;
1058
1059 /* No such context ? */
1060 if (sw == NULL) {
1061 crp->crp_etype = EINVAL;
1062 goto done;
1063 }
1064
1065 switch (sw->sw_alg) {
1066 case CRYPTO_DES_CBC:
1067 case CRYPTO_3DES_CBC:
1068 case CRYPTO_BLF_CBC:
1069 case CRYPTO_CAST_CBC:
1070 case CRYPTO_SKIPJACK_CBC:
1071 case CRYPTO_RIJNDAEL128_CBC:
1072 if ((crp->crp_etype = swcr_encdec(crd, sw,
1073 crp->crp_buf, type)) != 0)
1074 goto done;
1075 break;
1076 case CRYPTO_NULL_CBC:
1077 crp->crp_etype = 0;
1078 break;
1079 case CRYPTO_MD5_HMAC:
1080 case CRYPTO_SHA1_HMAC:
1081 case CRYPTO_SHA2_HMAC:
1082 case CRYPTO_RIPEMD160_HMAC:
1083 case CRYPTO_NULL_HMAC:
1084 case CRYPTO_MD5_KPDK:
1085 case CRYPTO_SHA1_KPDK:
1086 case CRYPTO_MD5:
1087 case CRYPTO_SHA1:
1088 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1089 crp->crp_buf, type)) != 0)
1090 goto done;
1091 break;
1092
1093 case CRYPTO_DEFLATE_COMP:
1094 if ((crp->crp_etype = swcr_compdec(crd, sw,
1095 crp->crp_buf, type)) != 0)
1096 goto done;
1097 else
1098 crp->crp_olen = (int)sw->sw_size;
1099 break;
1100
1101 default:
1102 /* Unknown/unsupported algorithm */
1103 crp->crp_etype = EINVAL;
1104 goto done;
1105 }
1106 }
1107
1108 done:
1109 crypto_done(crp);
1110 return 0;
1111 }
1112
1113 static void
1114 swcr_init(void)
1115 {
1116 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1117 if (swcr_id < 0) {
1118 /* This should never happen */
1119 panic("Software crypto device cannot initialize!");
1120 }
1121
1122 crypto_register(swcr_id, CRYPTO_DES_CBC,
1123 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1124 #define REGISTER(alg) \
1125 crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL)
1126
1127 REGISTER(CRYPTO_3DES_CBC);
1128 REGISTER(CRYPTO_BLF_CBC);
1129 REGISTER(CRYPTO_CAST_CBC);
1130 REGISTER(CRYPTO_SKIPJACK_CBC);
1131 REGISTER(CRYPTO_NULL_CBC);
1132 REGISTER(CRYPTO_MD5_HMAC);
1133 REGISTER(CRYPTO_SHA1_HMAC);
1134 REGISTER(CRYPTO_SHA2_HMAC);
1135 REGISTER(CRYPTO_RIPEMD160_HMAC);
1136 REGISTER(CRYPTO_NULL_HMAC);
1137 REGISTER(CRYPTO_MD5_KPDK);
1138 REGISTER(CRYPTO_SHA1_KPDK);
1139 REGISTER(CRYPTO_MD5);
1140 REGISTER(CRYPTO_SHA1);
1141 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1142 REGISTER(CRYPTO_DEFLATE_COMP);
1143 #undef REGISTER
1144 }
1145
1146 #ifdef __FreeBSD__
1147 SYSINIT(cryptosoft_init, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_init, NULL)
1148 #endif
1149
1150 #ifdef __NetBSD__
1151 /*
1152 * Pseudo-device init routine for software crypto.
1153 */
1154 void swcryptoattach(int);
1155
1156 void
1157 swcryptoattach(int num)
1158 {
1159
1160 swcr_init();
1161 }
1162 #endif /* __NetBSD__ */
1163