cryptosoft.c revision 1.32 1 /* $NetBSD: cryptosoft.c,v 1.32 2011/05/23 13:46:54 drochner Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */
3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.32 2011/05/23 13:46:54 drochner Exp $");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35
36 #include "opt_ocf.h"
37 #include <opencrypto/cryptodev.h>
38 #include <opencrypto/cryptosoft.h>
39 #include <opencrypto/xform.h>
40
41 #include <opencrypto/cryptosoft_xform.c>
42
43 union authctx {
44 MD5_CTX md5ctx;
45 SHA1_CTX sha1ctx;
46 RMD160_CTX rmd160ctx;
47 SHA256_CTX sha256ctx;
48 SHA384_CTX sha384ctx;
49 SHA512_CTX sha512ctx;
50 };
51
52 struct swcr_data **swcr_sessions = NULL;
53 u_int32_t swcr_sesnum = 0;
54 int32_t swcr_id = -1;
55
56 #define COPYBACK(x, a, b, c, d) \
57 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
58 : cuio_copyback((struct uio *)a,b,c,d)
59 #define COPYDATA(x, a, b, c, d) \
60 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
61 : cuio_copydata((struct uio *)a,b,c,d)
62
63 static int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int);
64 static int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *);
65 static int swcr_process(void *, struct cryptop *, int);
66 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
67 static int swcr_freesession(void *, u_int64_t);
68
69 /*
70 * Apply a symmetric encryption/decryption algorithm.
71 */
72 static int
73 swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv,
74 int outtype)
75 {
76 char *buf = bufv;
77 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
78 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
79 const struct swcr_enc_xform *exf;
80 int i, k, j, blks, ivlen;
81 int count, ind;
82
83 exf = sw->sw_exf;
84 blks = exf->enc_xform->blocksize;
85 ivlen = exf->enc_xform->ivsize;
86 KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks);
87
88 /* Check for non-padded data */
89 if (crd->crd_len % blks)
90 return EINVAL;
91
92 /* Initialize the IV */
93 if (crd->crd_flags & CRD_F_ENCRYPT) {
94 /* IV explicitly provided ? */
95 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
96 memcpy(iv, crd->crd_iv, ivlen);
97 else {
98 /* Get random IV */
99 for (i = 0;
100 i + sizeof (u_int32_t) <= EALG_MAX_BLOCK_LEN;
101 i += sizeof (u_int32_t)) {
102 u_int32_t temp = arc4random();
103
104 memcpy(iv + i, &temp, sizeof(u_int32_t));
105 }
106 /*
107 * What if the block size is not a multiple
108 * of sizeof (u_int32_t), which is the size of
109 * what arc4random() returns ?
110 */
111 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
112 u_int32_t temp = arc4random();
113
114 bcopy (&temp, iv + i,
115 EALG_MAX_BLOCK_LEN - i);
116 }
117 }
118
119 /* Do we need to write the IV */
120 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
121 COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
122 }
123
124 } else { /* Decryption */
125 /* IV explicitly provided ? */
126 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
127 memcpy(iv, crd->crd_iv, ivlen);
128 else {
129 /* Get IV off buf */
130 COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
131 }
132 }
133
134 ivp = iv;
135
136 if (exf->reinit)
137 exf->reinit(sw->sw_kschedule, iv);
138
139 if (outtype == CRYPTO_BUF_CONTIG) {
140 if (exf->reinit) {
141 for (i = crd->crd_skip;
142 i < crd->crd_skip + crd->crd_len; i += blks) {
143 if (crd->crd_flags & CRD_F_ENCRYPT) {
144 exf->encrypt(sw->sw_kschedule, buf + i);
145 } else {
146 exf->decrypt(sw->sw_kschedule, buf + i);
147 }
148 }
149 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
150 for (i = crd->crd_skip;
151 i < crd->crd_skip + crd->crd_len; i += blks) {
152 /* XOR with the IV/previous block, as appropriate. */
153 if (i == crd->crd_skip)
154 for (k = 0; k < blks; k++)
155 buf[i + k] ^= ivp[k];
156 else
157 for (k = 0; k < blks; k++)
158 buf[i + k] ^= buf[i + k - blks];
159 exf->encrypt(sw->sw_kschedule, buf + i);
160 }
161 } else { /* Decrypt */
162 /*
163 * Start at the end, so we don't need to keep the encrypted
164 * block as the IV for the next block.
165 */
166 for (i = crd->crd_skip + crd->crd_len - blks;
167 i >= crd->crd_skip; i -= blks) {
168 exf->decrypt(sw->sw_kschedule, buf + i);
169
170 /* XOR with the IV/previous block, as appropriate */
171 if (i == crd->crd_skip)
172 for (k = 0; k < blks; k++)
173 buf[i + k] ^= ivp[k];
174 else
175 for (k = 0; k < blks; k++)
176 buf[i + k] ^= buf[i + k - blks];
177 }
178 }
179
180 return 0;
181 } else if (outtype == CRYPTO_BUF_MBUF) {
182 struct mbuf *m = (struct mbuf *) buf;
183
184 /* Find beginning of data */
185 m = m_getptr(m, crd->crd_skip, &k);
186 if (m == NULL)
187 return EINVAL;
188
189 i = crd->crd_len;
190
191 while (i > 0) {
192 /*
193 * If there's insufficient data at the end of
194 * an mbuf, we have to do some copying.
195 */
196 if (m->m_len < k + blks && m->m_len != k) {
197 m_copydata(m, k, blks, blk);
198
199 /* Actual encryption/decryption */
200 if (exf->reinit) {
201 if (crd->crd_flags & CRD_F_ENCRYPT) {
202 exf->encrypt(sw->sw_kschedule,
203 blk);
204 } else {
205 exf->decrypt(sw->sw_kschedule,
206 blk);
207 }
208 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
209 /* XOR with previous block */
210 for (j = 0; j < blks; j++)
211 blk[j] ^= ivp[j];
212
213 exf->encrypt(sw->sw_kschedule, blk);
214
215 /*
216 * Keep encrypted block for XOR'ing
217 * with next block
218 */
219 memcpy(iv, blk, blks);
220 ivp = iv;
221 } else { /* decrypt */
222 /*
223 * Keep encrypted block for XOR'ing
224 * with next block
225 */
226 if (ivp == iv)
227 memcpy(piv, blk, blks);
228 else
229 memcpy(iv, blk, blks);
230
231 exf->decrypt(sw->sw_kschedule, blk);
232
233 /* XOR with previous block */
234 for (j = 0; j < blks; j++)
235 blk[j] ^= ivp[j];
236
237 if (ivp == iv)
238 memcpy(iv, piv, blks);
239 else
240 ivp = iv;
241 }
242
243 /* Copy back decrypted block */
244 m_copyback(m, k, blks, blk);
245
246 /* Advance pointer */
247 m = m_getptr(m, k + blks, &k);
248 if (m == NULL)
249 return EINVAL;
250
251 i -= blks;
252
253 /* Could be done... */
254 if (i == 0)
255 break;
256 }
257
258 /* Skip possibly empty mbufs */
259 if (k == m->m_len) {
260 for (m = m->m_next; m && m->m_len == 0;
261 m = m->m_next)
262 ;
263 k = 0;
264 }
265
266 /* Sanity check */
267 if (m == NULL)
268 return EINVAL;
269
270 /*
271 * Warning: idat may point to garbage here, but
272 * we only use it in the while() loop, only if
273 * there are indeed enough data.
274 */
275 idat = mtod(m, unsigned char *) + k;
276
277 while (m->m_len >= k + blks && i > 0) {
278 if (exf->reinit) {
279 if (crd->crd_flags & CRD_F_ENCRYPT) {
280 exf->encrypt(sw->sw_kschedule,
281 idat);
282 } else {
283 exf->decrypt(sw->sw_kschedule,
284 idat);
285 }
286 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
287 /* XOR with previous block/IV */
288 for (j = 0; j < blks; j++)
289 idat[j] ^= ivp[j];
290
291 exf->encrypt(sw->sw_kschedule, idat);
292 ivp = idat;
293 } else { /* decrypt */
294 /*
295 * Keep encrypted block to be used
296 * in next block's processing.
297 */
298 if (ivp == iv)
299 memcpy(piv, idat, blks);
300 else
301 memcpy(iv, idat, blks);
302
303 exf->decrypt(sw->sw_kschedule, idat);
304
305 /* XOR with previous block/IV */
306 for (j = 0; j < blks; j++)
307 idat[j] ^= ivp[j];
308
309 if (ivp == iv)
310 memcpy(iv, piv, blks);
311 else
312 ivp = iv;
313 }
314
315 idat += blks;
316 k += blks;
317 i -= blks;
318 }
319 }
320
321 return 0; /* Done with mbuf encryption/decryption */
322 } else if (outtype == CRYPTO_BUF_IOV) {
323 struct uio *uio = (struct uio *) buf;
324
325 /* Find beginning of data */
326 count = crd->crd_skip;
327 ind = cuio_getptr(uio, count, &k);
328 if (ind == -1)
329 return EINVAL;
330
331 i = crd->crd_len;
332
333 while (i > 0) {
334 /*
335 * If there's insufficient data at the end,
336 * we have to do some copying.
337 */
338 if (uio->uio_iov[ind].iov_len < k + blks &&
339 uio->uio_iov[ind].iov_len != k) {
340 cuio_copydata(uio, k, blks, blk);
341
342 /* Actual encryption/decryption */
343 if (exf->reinit) {
344 if (crd->crd_flags & CRD_F_ENCRYPT) {
345 exf->encrypt(sw->sw_kschedule,
346 blk);
347 } else {
348 exf->decrypt(sw->sw_kschedule,
349 blk);
350 }
351 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
352 /* XOR with previous block */
353 for (j = 0; j < blks; j++)
354 blk[j] ^= ivp[j];
355
356 exf->encrypt(sw->sw_kschedule, blk);
357
358 /*
359 * Keep encrypted block for XOR'ing
360 * with next block
361 */
362 memcpy(iv, blk, blks);
363 ivp = iv;
364 } else { /* decrypt */
365 /*
366 * Keep encrypted block for XOR'ing
367 * with next block
368 */
369 if (ivp == iv)
370 memcpy(piv, blk, blks);
371 else
372 memcpy(iv, blk, blks);
373
374 exf->decrypt(sw->sw_kschedule, blk);
375
376 /* XOR with previous block */
377 for (j = 0; j < blks; j++)
378 blk[j] ^= ivp[j];
379
380 if (ivp == iv)
381 memcpy(iv, piv, blks);
382 else
383 ivp = iv;
384 }
385
386 /* Copy back decrypted block */
387 cuio_copyback(uio, k, blks, blk);
388
389 count += blks;
390
391 /* Advance pointer */
392 ind = cuio_getptr(uio, count, &k);
393 if (ind == -1)
394 return (EINVAL);
395
396 i -= blks;
397
398 /* Could be done... */
399 if (i == 0)
400 break;
401 }
402
403 /*
404 * Warning: idat may point to garbage here, but
405 * we only use it in the while() loop, only if
406 * there are indeed enough data.
407 */
408 idat = ((char *)uio->uio_iov[ind].iov_base) + k;
409
410 while (uio->uio_iov[ind].iov_len >= k + blks &&
411 i > 0) {
412 if (exf->reinit) {
413 if (crd->crd_flags & CRD_F_ENCRYPT) {
414 exf->encrypt(sw->sw_kschedule,
415 idat);
416 } else {
417 exf->decrypt(sw->sw_kschedule,
418 idat);
419 }
420 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
421 /* XOR with previous block/IV */
422 for (j = 0; j < blks; j++)
423 idat[j] ^= ivp[j];
424
425 exf->encrypt(sw->sw_kschedule, idat);
426 ivp = idat;
427 } else { /* decrypt */
428 /*
429 * Keep encrypted block to be used
430 * in next block's processing.
431 */
432 if (ivp == iv)
433 memcpy(piv, idat, blks);
434 else
435 memcpy(iv, idat, blks);
436
437 exf->decrypt(sw->sw_kschedule, idat);
438
439 /* XOR with previous block/IV */
440 for (j = 0; j < blks; j++)
441 idat[j] ^= ivp[j];
442
443 if (ivp == iv)
444 memcpy(iv, piv, blks);
445 else
446 ivp = iv;
447 }
448
449 idat += blks;
450 count += blks;
451 k += blks;
452 i -= blks;
453 }
454 }
455 return 0; /* Done with mbuf encryption/decryption */
456 }
457
458 /* Unreachable */
459 return EINVAL;
460 }
461
462 /*
463 * Compute keyed-hash authenticator.
464 */
465 int
466 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
467 const struct swcr_data *sw, void *buf, int outtype)
468 {
469 unsigned char aalg[AALG_MAX_RESULT_LEN];
470 const struct swcr_auth_hash *axf;
471 union authctx ctx;
472 int err;
473
474 if (sw->sw_ictx == 0)
475 return EINVAL;
476
477 axf = sw->sw_axf;
478
479 memcpy(&ctx, sw->sw_ictx, axf->auth_hash->ctxsize);
480
481 switch (outtype) {
482 case CRYPTO_BUF_CONTIG:
483 axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len);
484 break;
485 case CRYPTO_BUF_MBUF:
486 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
487 (int (*)(void*, void *, unsigned int)) axf->Update,
488 (void *) &ctx);
489 if (err)
490 return err;
491 break;
492 case CRYPTO_BUF_IOV:
493 err = cuio_apply((struct uio *) buf, crd->crd_skip,
494 crd->crd_len,
495 (int (*)(void *, void *, unsigned int)) axf->Update,
496 (void *) &ctx);
497 if (err) {
498 return err;
499 }
500 break;
501 default:
502 return EINVAL;
503 }
504
505 switch (sw->sw_alg) {
506 case CRYPTO_MD5_HMAC:
507 case CRYPTO_MD5_HMAC_96:
508 case CRYPTO_SHA1_HMAC:
509 case CRYPTO_SHA1_HMAC_96:
510 case CRYPTO_SHA2_256_HMAC:
511 case CRYPTO_SHA2_384_HMAC:
512 case CRYPTO_SHA2_512_HMAC:
513 case CRYPTO_RIPEMD160_HMAC:
514 case CRYPTO_RIPEMD160_HMAC_96:
515 if (sw->sw_octx == NULL)
516 return EINVAL;
517
518 axf->Final(aalg, &ctx);
519 memcpy(&ctx, sw->sw_octx, axf->auth_hash->ctxsize);
520 axf->Update(&ctx, aalg, axf->auth_hash->hashsize);
521 axf->Final(aalg, &ctx);
522 break;
523
524 case CRYPTO_MD5_KPDK:
525 case CRYPTO_SHA1_KPDK:
526 if (sw->sw_octx == NULL)
527 return EINVAL;
528
529 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
530 axf->Final(aalg, &ctx);
531 break;
532
533 case CRYPTO_NULL_HMAC:
534 case CRYPTO_MD5:
535 case CRYPTO_SHA1:
536 axf->Final(aalg, &ctx);
537 break;
538 }
539
540 /* Inject the authentication data */
541 switch (outtype) {
542 case CRYPTO_BUF_CONTIG:
543 (void)memcpy((char *)buf + crd->crd_inject, aalg,
544 axf->auth_hash->authsize);
545 break;
546 case CRYPTO_BUF_MBUF:
547 m_copyback((struct mbuf *) buf, crd->crd_inject,
548 axf->auth_hash->authsize, aalg);
549 break;
550 case CRYPTO_BUF_IOV:
551 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
552 break;
553 default:
554 return EINVAL;
555 }
556 return 0;
557 }
558
559 /*
560 * Apply a compression/decompression algorithm
561 */
562 static int
563 swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw,
564 void *buf, int outtype, int *res_size)
565 {
566 u_int8_t *data, *out;
567 const struct swcr_comp_algo *cxf;
568 int adj;
569 u_int32_t result;
570
571 cxf = sw->sw_cxf;
572
573 /* We must handle the whole buffer of data in one time
574 * then if there is not all the data in the mbuf, we must
575 * copy in a buffer.
576 */
577
578 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
579 if (data == NULL)
580 return (EINVAL);
581 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
582
583 if (crd->crd_flags & CRD_F_COMP)
584 result = cxf->compress(data, crd->crd_len, &out);
585 else
586 result = cxf->decompress(data, crd->crd_len, &out,
587 *res_size);
588
589 free(data, M_CRYPTO_DATA);
590 if (result == 0)
591 return EINVAL;
592
593 /* Copy back the (de)compressed data. m_copyback is
594 * extending the mbuf as necessary.
595 */
596 *res_size = (int)result;
597 /* Check the compressed size when doing compression */
598 if (crd->crd_flags & CRD_F_COMP &&
599 sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW &&
600 result >= crd->crd_len) {
601 /* Compression was useless, we lost time */
602 free(out, M_CRYPTO_DATA);
603 return 0;
604 }
605
606 COPYBACK(outtype, buf, crd->crd_skip, result, out);
607 if (result < crd->crd_len) {
608 adj = result - crd->crd_len;
609 if (outtype == CRYPTO_BUF_MBUF) {
610 adj = result - crd->crd_len;
611 m_adj((struct mbuf *)buf, adj);
612 }
613 /* Don't adjust the iov_len, it breaks the kmem_free */
614 }
615 free(out, M_CRYPTO_DATA);
616 return 0;
617 }
618
619 /*
620 * Generate a new software session.
621 */
622 static int
623 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
624 {
625 struct swcr_data **swd;
626 const struct swcr_auth_hash *axf;
627 const struct swcr_enc_xform *txf;
628 const struct swcr_comp_algo *cxf;
629 u_int32_t i;
630 int k, error;
631
632 if (sid == NULL || cri == NULL)
633 return EINVAL;
634
635 if (swcr_sessions) {
636 for (i = 1; i < swcr_sesnum; i++)
637 if (swcr_sessions[i] == NULL)
638 break;
639 } else
640 i = 1; /* NB: to silence compiler warning */
641
642 if (swcr_sessions == NULL || i == swcr_sesnum) {
643 if (swcr_sessions == NULL) {
644 i = 1; /* We leave swcr_sessions[0] empty */
645 swcr_sesnum = CRYPTO_SW_SESSIONS;
646 } else
647 swcr_sesnum *= 2;
648
649 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
650 M_CRYPTO_DATA, M_NOWAIT);
651 if (swd == NULL) {
652 /* Reset session number */
653 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
654 swcr_sesnum = 0;
655 else
656 swcr_sesnum /= 2;
657 return ENOBUFS;
658 }
659
660 memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
661
662 /* Copy existing sessions */
663 if (swcr_sessions) {
664 memcpy(swd, swcr_sessions,
665 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
666 free(swcr_sessions, M_CRYPTO_DATA);
667 }
668
669 swcr_sessions = swd;
670 }
671
672 swd = &swcr_sessions[i];
673 *sid = i;
674
675 while (cri) {
676 *swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT);
677 if (*swd == NULL) {
678 swcr_freesession(NULL, i);
679 return ENOBUFS;
680 }
681 memset(*swd, 0, sizeof(struct swcr_data));
682
683 switch (cri->cri_alg) {
684 case CRYPTO_DES_CBC:
685 txf = &swcr_enc_xform_des;
686 goto enccommon;
687 case CRYPTO_3DES_CBC:
688 txf = &swcr_enc_xform_3des;
689 goto enccommon;
690 case CRYPTO_BLF_CBC:
691 txf = &swcr_enc_xform_blf;
692 goto enccommon;
693 case CRYPTO_CAST_CBC:
694 txf = &swcr_enc_xform_cast5;
695 goto enccommon;
696 case CRYPTO_SKIPJACK_CBC:
697 txf = &swcr_enc_xform_skipjack;
698 goto enccommon;
699 case CRYPTO_RIJNDAEL128_CBC:
700 txf = &swcr_enc_xform_rijndael128;
701 goto enccommon;
702 case CRYPTO_CAMELLIA_CBC:
703 txf = &swcr_enc_xform_camellia;
704 goto enccommon;
705 case CRYPTO_NULL_CBC:
706 txf = &swcr_enc_xform_null;
707 goto enccommon;
708 enccommon:
709 error = txf->setkey(&((*swd)->sw_kschedule),
710 cri->cri_key, cri->cri_klen / 8);
711 if (error) {
712 swcr_freesession(NULL, i);
713 return error;
714 }
715 (*swd)->sw_exf = txf;
716 break;
717
718 case CRYPTO_MD5_HMAC:
719 axf = &swcr_auth_hash_hmac_md5;
720 goto authcommon;
721 case CRYPTO_MD5_HMAC_96:
722 axf = &swcr_auth_hash_hmac_md5_96;
723 goto authcommon;
724 case CRYPTO_SHA1_HMAC:
725 axf = &swcr_auth_hash_hmac_sha1;
726 goto authcommon;
727 case CRYPTO_SHA1_HMAC_96:
728 axf = &swcr_auth_hash_hmac_sha1_96;
729 goto authcommon;
730 case CRYPTO_SHA2_256_HMAC:
731 axf = &swcr_auth_hash_hmac_sha2_256;
732 goto authcommon;
733 case CRYPTO_SHA2_384_HMAC:
734 axf = &swcr_auth_hash_hmac_sha2_384;
735 goto authcommon;
736 case CRYPTO_SHA2_512_HMAC:
737 axf = &swcr_auth_hash_hmac_sha2_512;
738 goto authcommon;
739 case CRYPTO_NULL_HMAC:
740 axf = &swcr_auth_hash_null;
741 goto authcommon;
742 case CRYPTO_RIPEMD160_HMAC:
743 axf = &swcr_auth_hash_hmac_ripemd_160;
744 goto authcommon;
745 case CRYPTO_RIPEMD160_HMAC_96:
746 axf = &swcr_auth_hash_hmac_ripemd_160_96;
747 goto authcommon; /* leave this for safety */
748 authcommon:
749 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize,
750 M_CRYPTO_DATA, M_NOWAIT);
751 if ((*swd)->sw_ictx == NULL) {
752 swcr_freesession(NULL, i);
753 return ENOBUFS;
754 }
755
756 (*swd)->sw_octx = malloc(axf->auth_hash->ctxsize,
757 M_CRYPTO_DATA, M_NOWAIT);
758 if ((*swd)->sw_octx == NULL) {
759 swcr_freesession(NULL, i);
760 return ENOBUFS;
761 }
762
763 for (k = 0; k < cri->cri_klen / 8; k++)
764 cri->cri_key[k] ^= HMAC_IPAD_VAL;
765
766 axf->Init((*swd)->sw_ictx);
767 axf->Update((*swd)->sw_ictx, cri->cri_key,
768 cri->cri_klen / 8);
769 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
770 axf->auth_hash->blocksize - (cri->cri_klen / 8));
771
772 for (k = 0; k < cri->cri_klen / 8; k++)
773 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
774
775 axf->Init((*swd)->sw_octx);
776 axf->Update((*swd)->sw_octx, cri->cri_key,
777 cri->cri_klen / 8);
778 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
779 axf->auth_hash->blocksize - (cri->cri_klen / 8));
780
781 for (k = 0; k < cri->cri_klen / 8; k++)
782 cri->cri_key[k] ^= HMAC_OPAD_VAL;
783 (*swd)->sw_axf = axf;
784 break;
785
786 case CRYPTO_MD5_KPDK:
787 axf = &swcr_auth_hash_key_md5;
788 goto auth2common;
789
790 case CRYPTO_SHA1_KPDK:
791 axf = &swcr_auth_hash_key_sha1;
792 auth2common:
793 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize,
794 M_CRYPTO_DATA, M_NOWAIT);
795 if ((*swd)->sw_ictx == NULL) {
796 swcr_freesession(NULL, i);
797 return ENOBUFS;
798 }
799
800 /* Store the key so we can "append" it to the payload */
801 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
802 M_NOWAIT);
803 if ((*swd)->sw_octx == NULL) {
804 swcr_freesession(NULL, i);
805 return ENOBUFS;
806 }
807
808 (*swd)->sw_klen = cri->cri_klen / 8;
809 memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8);
810 axf->Init((*swd)->sw_ictx);
811 axf->Update((*swd)->sw_ictx, cri->cri_key,
812 cri->cri_klen / 8);
813 axf->Final(NULL, (*swd)->sw_ictx);
814 (*swd)->sw_axf = axf;
815 break;
816
817 case CRYPTO_MD5:
818 axf = &swcr_auth_hash_md5;
819 goto auth3common;
820
821 case CRYPTO_SHA1:
822 axf = &swcr_auth_hash_sha1;
823 auth3common:
824 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize,
825 M_CRYPTO_DATA, M_NOWAIT);
826 if ((*swd)->sw_ictx == NULL) {
827 swcr_freesession(NULL, i);
828 return ENOBUFS;
829 }
830
831 axf->Init((*swd)->sw_ictx);
832 (*swd)->sw_axf = axf;
833 break;
834
835 case CRYPTO_DEFLATE_COMP:
836 cxf = &swcr_comp_algo_deflate;
837 (*swd)->sw_cxf = cxf;
838 break;
839
840 case CRYPTO_DEFLATE_COMP_NOGROW:
841 cxf = &swcr_comp_algo_deflate_nogrow;
842 (*swd)->sw_cxf = cxf;
843 break;
844
845 case CRYPTO_GZIP_COMP:
846 cxf = &swcr_comp_algo_gzip;
847 (*swd)->sw_cxf = cxf;
848 break;
849 default:
850 swcr_freesession(NULL, i);
851 return EINVAL;
852 }
853
854 (*swd)->sw_alg = cri->cri_alg;
855 cri = cri->cri_next;
856 swd = &((*swd)->sw_next);
857 }
858 return 0;
859 }
860
861 /*
862 * Free a session.
863 */
864 static int
865 swcr_freesession(void *arg, u_int64_t tid)
866 {
867 struct swcr_data *swd;
868 const struct swcr_enc_xform *txf;
869 const struct swcr_auth_hash *axf;
870 const struct swcr_comp_algo *cxf;
871 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
872
873 if (sid > swcr_sesnum || swcr_sessions == NULL ||
874 swcr_sessions[sid] == NULL)
875 return EINVAL;
876
877 /* Silently accept and return */
878 if (sid == 0)
879 return 0;
880
881 while ((swd = swcr_sessions[sid]) != NULL) {
882 swcr_sessions[sid] = swd->sw_next;
883
884 switch (swd->sw_alg) {
885 case CRYPTO_DES_CBC:
886 case CRYPTO_3DES_CBC:
887 case CRYPTO_BLF_CBC:
888 case CRYPTO_CAST_CBC:
889 case CRYPTO_SKIPJACK_CBC:
890 case CRYPTO_RIJNDAEL128_CBC:
891 case CRYPTO_CAMELLIA_CBC:
892 case CRYPTO_NULL_CBC:
893 txf = swd->sw_exf;
894
895 if (swd->sw_kschedule)
896 txf->zerokey(&(swd->sw_kschedule));
897 break;
898
899 case CRYPTO_MD5_HMAC:
900 case CRYPTO_MD5_HMAC_96:
901 case CRYPTO_SHA1_HMAC:
902 case CRYPTO_SHA1_HMAC_96:
903 case CRYPTO_SHA2_256_HMAC:
904 case CRYPTO_SHA2_384_HMAC:
905 case CRYPTO_SHA2_512_HMAC:
906 case CRYPTO_RIPEMD160_HMAC:
907 case CRYPTO_RIPEMD160_HMAC_96:
908 case CRYPTO_NULL_HMAC:
909 axf = swd->sw_axf;
910
911 if (swd->sw_ictx) {
912 memset(swd->sw_ictx, 0, axf->auth_hash->ctxsize);
913 free(swd->sw_ictx, M_CRYPTO_DATA);
914 }
915 if (swd->sw_octx) {
916 memset(swd->sw_octx, 0, axf->auth_hash->ctxsize);
917 free(swd->sw_octx, M_CRYPTO_DATA);
918 }
919 break;
920
921 case CRYPTO_MD5_KPDK:
922 case CRYPTO_SHA1_KPDK:
923 axf = swd->sw_axf;
924
925 if (swd->sw_ictx) {
926 memset(swd->sw_ictx, 0, axf->auth_hash->ctxsize);
927 free(swd->sw_ictx, M_CRYPTO_DATA);
928 }
929 if (swd->sw_octx) {
930 memset(swd->sw_octx, 0, swd->sw_klen);
931 free(swd->sw_octx, M_CRYPTO_DATA);
932 }
933 break;
934
935 case CRYPTO_MD5:
936 case CRYPTO_SHA1:
937 axf = swd->sw_axf;
938
939 if (swd->sw_ictx)
940 free(swd->sw_ictx, M_CRYPTO_DATA);
941 break;
942
943 case CRYPTO_DEFLATE_COMP:
944 case CRYPTO_DEFLATE_COMP_NOGROW:
945 case CRYPTO_GZIP_COMP:
946 cxf = swd->sw_cxf;
947 break;
948 }
949
950 free(swd, M_CRYPTO_DATA);
951 }
952 return 0;
953 }
954
955 /*
956 * Process a software request.
957 */
958 static int
959 swcr_process(void *arg, struct cryptop *crp, int hint)
960 {
961 struct cryptodesc *crd;
962 struct swcr_data *sw;
963 u_int32_t lid;
964 int type;
965
966 /* Sanity check */
967 if (crp == NULL)
968 return EINVAL;
969
970 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
971 crp->crp_etype = EINVAL;
972 goto done;
973 }
974
975 lid = crp->crp_sid & 0xffffffff;
976 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
977 crp->crp_etype = ENOENT;
978 goto done;
979 }
980
981 if (crp->crp_flags & CRYPTO_F_IMBUF) {
982 type = CRYPTO_BUF_MBUF;
983 } else if (crp->crp_flags & CRYPTO_F_IOV) {
984 type = CRYPTO_BUF_IOV;
985 } else {
986 type = CRYPTO_BUF_CONTIG;
987 }
988
989 /* Go through crypto descriptors, processing as we go */
990 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
991 /*
992 * Find the crypto context.
993 *
994 * XXX Note that the logic here prevents us from having
995 * XXX the same algorithm multiple times in a session
996 * XXX (or rather, we can but it won't give us the right
997 * XXX results). To do that, we'd need some way of differentiating
998 * XXX between the various instances of an algorithm (so we can
999 * XXX locate the correct crypto context).
1000 */
1001 for (sw = swcr_sessions[lid];
1002 sw && sw->sw_alg != crd->crd_alg;
1003 sw = sw->sw_next)
1004 ;
1005
1006 /* No such context ? */
1007 if (sw == NULL) {
1008 crp->crp_etype = EINVAL;
1009 goto done;
1010 }
1011
1012 switch (sw->sw_alg) {
1013 case CRYPTO_DES_CBC:
1014 case CRYPTO_3DES_CBC:
1015 case CRYPTO_BLF_CBC:
1016 case CRYPTO_CAST_CBC:
1017 case CRYPTO_SKIPJACK_CBC:
1018 case CRYPTO_RIJNDAEL128_CBC:
1019 case CRYPTO_CAMELLIA_CBC:
1020 if ((crp->crp_etype = swcr_encdec(crd, sw,
1021 crp->crp_buf, type)) != 0)
1022 goto done;
1023 break;
1024 case CRYPTO_NULL_CBC:
1025 crp->crp_etype = 0;
1026 break;
1027 case CRYPTO_MD5_HMAC:
1028 case CRYPTO_MD5_HMAC_96:
1029 case CRYPTO_SHA1_HMAC:
1030 case CRYPTO_SHA1_HMAC_96:
1031 case CRYPTO_SHA2_256_HMAC:
1032 case CRYPTO_SHA2_384_HMAC:
1033 case CRYPTO_SHA2_512_HMAC:
1034 case CRYPTO_RIPEMD160_HMAC:
1035 case CRYPTO_RIPEMD160_HMAC_96:
1036 case CRYPTO_NULL_HMAC:
1037 case CRYPTO_MD5_KPDK:
1038 case CRYPTO_SHA1_KPDK:
1039 case CRYPTO_MD5:
1040 case CRYPTO_SHA1:
1041 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1042 crp->crp_buf, type)) != 0)
1043 goto done;
1044 break;
1045
1046 case CRYPTO_DEFLATE_COMP:
1047 case CRYPTO_DEFLATE_COMP_NOGROW:
1048 case CRYPTO_GZIP_COMP:
1049 DPRINTF(("swcr_process: compdec for %d\n", sw->sw_alg));
1050 if ((crp->crp_etype = swcr_compdec(crd, sw,
1051 crp->crp_buf, type, &crp->crp_olen)) != 0)
1052 goto done;
1053 break;
1054
1055 default:
1056 /* Unknown/unsupported algorithm */
1057 crp->crp_etype = EINVAL;
1058 goto done;
1059 }
1060 }
1061
1062 done:
1063 DPRINTF(("request %p done\n", crp));
1064 crypto_done(crp);
1065 return 0;
1066 }
1067
1068 static void
1069 swcr_init(void)
1070 {
1071 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1072 if (swcr_id < 0) {
1073 /* This should never happen */
1074 panic("Software crypto device cannot initialize!");
1075 }
1076
1077 crypto_register(swcr_id, CRYPTO_DES_CBC,
1078 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1079 #define REGISTER(alg) \
1080 crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL)
1081
1082 REGISTER(CRYPTO_3DES_CBC);
1083 REGISTER(CRYPTO_BLF_CBC);
1084 REGISTER(CRYPTO_CAST_CBC);
1085 REGISTER(CRYPTO_SKIPJACK_CBC);
1086 REGISTER(CRYPTO_CAMELLIA_CBC);
1087 REGISTER(CRYPTO_NULL_CBC);
1088 REGISTER(CRYPTO_MD5_HMAC);
1089 REGISTER(CRYPTO_MD5_HMAC_96);
1090 REGISTER(CRYPTO_SHA1_HMAC);
1091 REGISTER(CRYPTO_SHA1_HMAC_96);
1092 REGISTER(CRYPTO_SHA2_256_HMAC);
1093 REGISTER(CRYPTO_SHA2_384_HMAC);
1094 REGISTER(CRYPTO_SHA2_512_HMAC);
1095 REGISTER(CRYPTO_RIPEMD160_HMAC);
1096 REGISTER(CRYPTO_RIPEMD160_HMAC_96);
1097 REGISTER(CRYPTO_NULL_HMAC);
1098 REGISTER(CRYPTO_MD5_KPDK);
1099 REGISTER(CRYPTO_SHA1_KPDK);
1100 REGISTER(CRYPTO_MD5);
1101 REGISTER(CRYPTO_SHA1);
1102 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1103 REGISTER(CRYPTO_DEFLATE_COMP);
1104 REGISTER(CRYPTO_DEFLATE_COMP_NOGROW);
1105 REGISTER(CRYPTO_GZIP_COMP);
1106 #undef REGISTER
1107 }
1108
1109
1110 /*
1111 * Pseudo-device init routine for software crypto.
1112 */
1113 void swcryptoattach(int);
1114
1115 void
1116 swcryptoattach(int num)
1117 {
1118
1119 swcr_init();
1120 }
1121