cryptosoft.c revision 1.19 1 /* $NetBSD: cryptosoft.c,v 1.19 2008/02/02 04:46:29 tls Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */
3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.19 2008/02/02 04:46:29 tls Exp $");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35
36 #include <opencrypto/cryptodev.h>
37 #include <opencrypto/cryptosoft.h>
38 #include <opencrypto/xform.h>
39
40 #include <opencrypto/cryptosoft_xform.c>
41
42 union authctx {
43 MD5_CTX md5ctx;
44 SHA1_CTX sha1ctx;
45 RMD160_CTX rmd160ctx;
46 SHA256_CTX sha256ctx;
47 SHA384_CTX sha384ctx;
48 SHA512_CTX sha512ctx;
49 };
50
51 struct swcr_data **swcr_sessions = NULL;
52 u_int32_t swcr_sesnum = 0;
53 int32_t swcr_id = -1;
54
55 #define COPYBACK(x, a, b, c, d) \
56 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
57 : cuio_copyback((struct uio *)a,b,c,d)
58 #define COPYDATA(x, a, b, c, d) \
59 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
60 : cuio_copydata((struct uio *)a,b,c,d)
61
62 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, void *, int);
63 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, void *, int);
64 static int swcr_process(void *, struct cryptop *, int);
65 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
66 static int swcr_freesession(void *, u_int64_t);
67
68 /*
69 * Apply a symmetric encryption/decryption algorithm.
70 */
71 static int
72 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, void *bufv,
73 int outtype)
74 {
75 char *buf = bufv;
76 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
77 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
78 const struct swcr_enc_xform *exf;
79 int i, k, j, blks;
80 int count, ind;
81
82 exf = sw->sw_exf;
83 blks = exf->enc_xform->blocksize;
84
85 /* Check for non-padded data */
86 if (crd->crd_len % blks)
87 return EINVAL;
88
89 /* Initialize the IV */
90 if (crd->crd_flags & CRD_F_ENCRYPT) {
91 /* IV explicitly provided ? */
92 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
93 bcopy(crd->crd_iv, iv, blks);
94 else {
95 /* Get random IV */
96 for (i = 0;
97 i + sizeof (u_int32_t) < EALG_MAX_BLOCK_LEN;
98 i += sizeof (u_int32_t)) {
99 u_int32_t temp = arc4random();
100
101 bcopy(&temp, iv + i, sizeof(u_int32_t));
102 }
103 /*
104 * What if the block size is not a multiple
105 * of sizeof (u_int32_t), which is the size of
106 * what arc4random() returns ?
107 */
108 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
109 u_int32_t temp = arc4random();
110
111 bcopy (&temp, iv + i,
112 EALG_MAX_BLOCK_LEN - i);
113 }
114 }
115
116 /* Do we need to write the IV */
117 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
118 COPYBACK(outtype, buf, crd->crd_inject, blks, iv);
119 }
120
121 } else { /* Decryption */
122 /* IV explicitly provided ? */
123 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
124 bcopy(crd->crd_iv, iv, blks);
125 else {
126 /* Get IV off buf */
127 COPYDATA(outtype, buf, crd->crd_inject, blks, iv);
128 }
129 }
130
131 ivp = iv;
132
133 if (outtype == CRYPTO_BUF_CONTIG) {
134 if (crd->crd_flags & CRD_F_ENCRYPT) {
135 for (i = crd->crd_skip;
136 i < crd->crd_skip + crd->crd_len; i += blks) {
137 /* XOR with the IV/previous block, as appropriate. */
138 if (i == crd->crd_skip)
139 for (k = 0; k < blks; k++)
140 buf[i + k] ^= ivp[k];
141 else
142 for (k = 0; k < blks; k++)
143 buf[i + k] ^= buf[i + k - blks];
144 exf->encrypt(sw->sw_kschedule, buf + i);
145 }
146 } else { /* Decrypt */
147 /*
148 * Start at the end, so we don't need to keep the encrypted
149 * block as the IV for the next block.
150 */
151 for (i = crd->crd_skip + crd->crd_len - blks;
152 i >= crd->crd_skip; i -= blks) {
153 exf->decrypt(sw->sw_kschedule, buf + i);
154
155 /* XOR with the IV/previous block, as appropriate */
156 if (i == crd->crd_skip)
157 for (k = 0; k < blks; k++)
158 buf[i + k] ^= ivp[k];
159 else
160 for (k = 0; k < blks; k++)
161 buf[i + k] ^= buf[i + k - blks];
162 }
163 }
164
165 return 0;
166 } else if (outtype == CRYPTO_BUF_MBUF) {
167 struct mbuf *m = (struct mbuf *) buf;
168
169 /* Find beginning of data */
170 m = m_getptr(m, crd->crd_skip, &k);
171 if (m == NULL)
172 return EINVAL;
173
174 i = crd->crd_len;
175
176 while (i > 0) {
177 /*
178 * If there's insufficient data at the end of
179 * an mbuf, we have to do some copying.
180 */
181 if (m->m_len < k + blks && m->m_len != k) {
182 m_copydata(m, k, blks, blk);
183
184 /* Actual encryption/decryption */
185 if (crd->crd_flags & CRD_F_ENCRYPT) {
186 /* XOR with previous block */
187 for (j = 0; j < blks; j++)
188 blk[j] ^= ivp[j];
189
190 exf->encrypt(sw->sw_kschedule, blk);
191
192 /*
193 * Keep encrypted block for XOR'ing
194 * with next block
195 */
196 bcopy(blk, iv, blks);
197 ivp = iv;
198 } else { /* decrypt */
199 /*
200 * Keep encrypted block for XOR'ing
201 * with next block
202 */
203 if (ivp == iv)
204 bcopy(blk, piv, blks);
205 else
206 bcopy(blk, iv, blks);
207
208 exf->decrypt(sw->sw_kschedule, blk);
209
210 /* XOR with previous block */
211 for (j = 0; j < blks; j++)
212 blk[j] ^= ivp[j];
213
214 if (ivp == iv)
215 bcopy(piv, iv, blks);
216 else
217 ivp = iv;
218 }
219
220 /* Copy back decrypted block */
221 m_copyback(m, k, blks, blk);
222
223 /* Advance pointer */
224 m = m_getptr(m, k + blks, &k);
225 if (m == NULL)
226 return EINVAL;
227
228 i -= blks;
229
230 /* Could be done... */
231 if (i == 0)
232 break;
233 }
234
235 /* Skip possibly empty mbufs */
236 if (k == m->m_len) {
237 for (m = m->m_next; m && m->m_len == 0;
238 m = m->m_next)
239 ;
240 k = 0;
241 }
242
243 /* Sanity check */
244 if (m == NULL)
245 return EINVAL;
246
247 /*
248 * Warning: idat may point to garbage here, but
249 * we only use it in the while() loop, only if
250 * there are indeed enough data.
251 */
252 idat = mtod(m, unsigned char *) + k;
253
254 while (m->m_len >= k + blks && i > 0) {
255 if (crd->crd_flags & CRD_F_ENCRYPT) {
256 /* XOR with previous block/IV */
257 for (j = 0; j < blks; j++)
258 idat[j] ^= ivp[j];
259
260 exf->encrypt(sw->sw_kschedule, idat);
261 ivp = idat;
262 } else { /* decrypt */
263 /*
264 * Keep encrypted block to be used
265 * in next block's processing.
266 */
267 if (ivp == iv)
268 bcopy(idat, piv, blks);
269 else
270 bcopy(idat, iv, blks);
271
272 exf->decrypt(sw->sw_kschedule, idat);
273
274 /* XOR with previous block/IV */
275 for (j = 0; j < blks; j++)
276 idat[j] ^= ivp[j];
277
278 if (ivp == iv)
279 bcopy(piv, iv, blks);
280 else
281 ivp = iv;
282 }
283
284 idat += blks;
285 k += blks;
286 i -= blks;
287 }
288 }
289
290 return 0; /* Done with mbuf encryption/decryption */
291 } else if (outtype == CRYPTO_BUF_IOV) {
292 struct uio *uio = (struct uio *) buf;
293
294 /* Find beginning of data */
295 count = crd->crd_skip;
296 ind = cuio_getptr(uio, count, &k);
297 if (ind == -1)
298 return EINVAL;
299
300 i = crd->crd_len;
301
302 while (i > 0) {
303 /*
304 * If there's insufficient data at the end,
305 * we have to do some copying.
306 */
307 if (uio->uio_iov[ind].iov_len < k + blks &&
308 uio->uio_iov[ind].iov_len != k) {
309 cuio_copydata(uio, k, blks, blk);
310
311 /* Actual encryption/decryption */
312 if (crd->crd_flags & CRD_F_ENCRYPT) {
313 /* XOR with previous block */
314 for (j = 0; j < blks; j++)
315 blk[j] ^= ivp[j];
316
317 exf->encrypt(sw->sw_kschedule, blk);
318
319 /*
320 * Keep encrypted block for XOR'ing
321 * with next block
322 */
323 bcopy(blk, iv, blks);
324 ivp = iv;
325 } else { /* decrypt */
326 /*
327 * Keep encrypted block for XOR'ing
328 * with next block
329 */
330 if (ivp == iv)
331 bcopy(blk, piv, blks);
332 else
333 bcopy(blk, iv, blks);
334
335 exf->decrypt(sw->sw_kschedule, blk);
336
337 /* XOR with previous block */
338 for (j = 0; j < blks; j++)
339 blk[j] ^= ivp[j];
340
341 if (ivp == iv)
342 bcopy(piv, iv, blks);
343 else
344 ivp = iv;
345 }
346
347 /* Copy back decrypted block */
348 cuio_copyback(uio, k, blks, blk);
349
350 count += blks;
351
352 /* Advance pointer */
353 ind = cuio_getptr(uio, count, &k);
354 if (ind == -1)
355 return (EINVAL);
356
357 i -= blks;
358
359 /* Could be done... */
360 if (i == 0)
361 break;
362 }
363
364 /*
365 * Warning: idat may point to garbage here, but
366 * we only use it in the while() loop, only if
367 * there are indeed enough data.
368 */
369 idat = ((char *)uio->uio_iov[ind].iov_base) + k;
370
371 while (uio->uio_iov[ind].iov_len >= k + blks &&
372 i > 0) {
373 if (crd->crd_flags & CRD_F_ENCRYPT) {
374 /* XOR with previous block/IV */
375 for (j = 0; j < blks; j++)
376 idat[j] ^= ivp[j];
377
378 exf->encrypt(sw->sw_kschedule, idat);
379 ivp = idat;
380 } else { /* decrypt */
381 /*
382 * Keep encrypted block to be used
383 * in next block's processing.
384 */
385 if (ivp == iv)
386 bcopy(idat, piv, blks);
387 else
388 bcopy(idat, iv, blks);
389
390 exf->decrypt(sw->sw_kschedule, idat);
391
392 /* XOR with previous block/IV */
393 for (j = 0; j < blks; j++)
394 idat[j] ^= ivp[j];
395
396 if (ivp == iv)
397 bcopy(piv, iv, blks);
398 else
399 ivp = iv;
400 }
401
402 idat += blks;
403 count += blks;
404 k += blks;
405 i -= blks;
406 }
407 }
408 return 0; /* Done with mbuf encryption/decryption */
409 }
410
411 /* Unreachable */
412 return EINVAL;
413 }
414
415 /*
416 * Compute keyed-hash authenticator.
417 */
418 int
419 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
420 struct swcr_data *sw, void *buf, int outtype)
421 {
422 unsigned char aalg[AALG_MAX_RESULT_LEN];
423 const struct swcr_auth_hash *axf;
424 union authctx ctx;
425 int err;
426
427 if (sw->sw_ictx == 0)
428 return EINVAL;
429
430 axf = sw->sw_axf;
431
432 bcopy(sw->sw_ictx, &ctx, axf->auth_hash->ctxsize);
433
434 switch (outtype) {
435 case CRYPTO_BUF_CONTIG:
436 axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len);
437 break;
438 case CRYPTO_BUF_MBUF:
439 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
440 (int (*)(void*, void *, unsigned int)) axf->Update,
441 (void *) &ctx);
442 if (err)
443 return err;
444 break;
445 case CRYPTO_BUF_IOV:
446 err = cuio_apply((struct uio *) buf, crd->crd_skip,
447 crd->crd_len,
448 (int (*)(void *, void *, unsigned int)) axf->Update,
449 (void *) &ctx);
450 if (err) {
451 return err;
452 }
453 break;
454 default:
455 return EINVAL;
456 }
457
458 switch (sw->sw_alg) {
459 case CRYPTO_MD5_HMAC:
460 case CRYPTO_MD5_HMAC_96:
461 case CRYPTO_SHA1_HMAC:
462 case CRYPTO_SHA1_HMAC_96:
463 case CRYPTO_SHA2_HMAC:
464 case CRYPTO_RIPEMD160_HMAC:
465 case CRYPTO_RIPEMD160_HMAC_96:
466 if (sw->sw_octx == NULL)
467 return EINVAL;
468
469 axf->Final(aalg, &ctx);
470 bcopy(sw->sw_octx, &ctx, axf->auth_hash->ctxsize);
471 axf->Update(&ctx, aalg, axf->auth_hash->hashsize);
472 axf->Final(aalg, &ctx);
473 break;
474
475 case CRYPTO_MD5_KPDK:
476 case CRYPTO_SHA1_KPDK:
477 if (sw->sw_octx == NULL)
478 return EINVAL;
479
480 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
481 axf->Final(aalg, &ctx);
482 break;
483
484 case CRYPTO_NULL_HMAC:
485 case CRYPTO_MD5:
486 case CRYPTO_SHA1:
487 axf->Final(aalg, &ctx);
488 break;
489 }
490
491 /* Inject the authentication data */
492 switch (outtype) {
493 case CRYPTO_BUF_CONTIG:
494 (void)memcpy((char *)buf + crd->crd_inject, aalg,
495 axf->auth_hash->authsize);
496 break;
497 case CRYPTO_BUF_MBUF:
498 m_copyback((struct mbuf *) buf, crd->crd_inject,
499 axf->auth_hash->authsize, aalg);
500 break;
501 case CRYPTO_BUF_IOV:
502 bcopy(aalg, crp->crp_mac, axf->auth_hash->authsize);
503 break;
504 default:
505 return EINVAL;
506 }
507 return 0;
508 }
509
510 /*
511 * Apply a compression/decompression algorithm
512 */
513 static int
514 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
515 void *buf, int outtype)
516 {
517 u_int8_t *data, *out;
518 const struct swcr_comp_algo *cxf;
519 int adj;
520 u_int32_t result;
521
522 cxf = sw->sw_cxf;
523
524 /* We must handle the whole buffer of data in one time
525 * then if there is not all the data in the mbuf, we must
526 * copy in a buffer.
527 */
528
529 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
530 if (data == NULL)
531 return (EINVAL);
532 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
533
534 if (crd->crd_flags & CRD_F_COMP)
535 result = cxf->compress(data, crd->crd_len, &out);
536 else
537 result = cxf->decompress(data, crd->crd_len, &out);
538
539 FREE(data, M_CRYPTO_DATA);
540 if (result == 0)
541 return EINVAL;
542
543 /* Copy back the (de)compressed data. m_copyback is
544 * extending the mbuf as necessary.
545 */
546 sw->sw_size = result;
547 /* Check the compressed size when doing compression */
548 if (crd->crd_flags & CRD_F_COMP) {
549 if (result > crd->crd_len) {
550 /* Compression was useless, we lost time */
551 FREE(out, M_CRYPTO_DATA);
552 return 0;
553 }
554 }
555
556 COPYBACK(outtype, buf, crd->crd_skip, result, out);
557 if (result < crd->crd_len) {
558 adj = result - crd->crd_len;
559 if (outtype == CRYPTO_BUF_MBUF) {
560 adj = result - crd->crd_len;
561 m_adj((struct mbuf *)buf, adj);
562 } else {
563 struct uio *uio = (struct uio *)buf;
564 int ind;
565
566 adj = crd->crd_len - result;
567 ind = uio->uio_iovcnt - 1;
568
569 while (adj > 0 && ind >= 0) {
570 if (adj < uio->uio_iov[ind].iov_len) {
571 uio->uio_iov[ind].iov_len -= adj;
572 break;
573 }
574
575 adj -= uio->uio_iov[ind].iov_len;
576 uio->uio_iov[ind].iov_len = 0;
577 ind--;
578 uio->uio_iovcnt--;
579 }
580 }
581 }
582 FREE(out, M_CRYPTO_DATA);
583 return 0;
584 }
585
586 /*
587 * Generate a new software session.
588 */
589 static int
590 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
591 {
592 struct swcr_data **swd;
593 const struct swcr_auth_hash *axf;
594 const struct swcr_enc_xform *txf;
595 const struct swcr_comp_algo *cxf;
596 u_int32_t i;
597 int k, error;
598
599 if (sid == NULL || cri == NULL)
600 return EINVAL;
601
602 if (swcr_sessions) {
603 for (i = 1; i < swcr_sesnum; i++)
604 if (swcr_sessions[i] == NULL)
605 break;
606 } else
607 i = 1; /* NB: to silence compiler warning */
608
609 if (swcr_sessions == NULL || i == swcr_sesnum) {
610 if (swcr_sessions == NULL) {
611 i = 1; /* We leave swcr_sessions[0] empty */
612 swcr_sesnum = CRYPTO_SW_SESSIONS;
613 } else
614 swcr_sesnum *= 2;
615
616 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
617 M_CRYPTO_DATA, M_NOWAIT);
618 if (swd == NULL) {
619 /* Reset session number */
620 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
621 swcr_sesnum = 0;
622 else
623 swcr_sesnum /= 2;
624 return ENOBUFS;
625 }
626
627 bzero(swd, swcr_sesnum * sizeof(struct swcr_data *));
628
629 /* Copy existing sessions */
630 if (swcr_sessions) {
631 bcopy(swcr_sessions, swd,
632 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
633 free(swcr_sessions, M_CRYPTO_DATA);
634 }
635
636 swcr_sessions = swd;
637 }
638
639 swd = &swcr_sessions[i];
640 *sid = i;
641
642 while (cri) {
643 *swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT);
644 if (*swd == NULL) {
645 swcr_freesession(NULL, i);
646 return ENOBUFS;
647 }
648 bzero(*swd, sizeof(struct swcr_data));
649
650 switch (cri->cri_alg) {
651 case CRYPTO_DES_CBC:
652 txf = &swcr_enc_xform_des;
653 goto enccommon;
654 case CRYPTO_3DES_CBC:
655 txf = &swcr_enc_xform_3des;
656 goto enccommon;
657 case CRYPTO_BLF_CBC:
658 txf = &swcr_enc_xform_blf;
659 goto enccommon;
660 case CRYPTO_CAST_CBC:
661 txf = &swcr_enc_xform_cast5;
662 goto enccommon;
663 case CRYPTO_SKIPJACK_CBC:
664 txf = &swcr_enc_xform_skipjack;
665 goto enccommon;
666 case CRYPTO_RIJNDAEL128_CBC:
667 txf = &swcr_enc_xform_rijndael128;
668 goto enccommon;
669 case CRYPTO_NULL_CBC:
670 txf = &swcr_enc_xform_null;
671 goto enccommon;
672 enccommon:
673 error = txf->setkey(&((*swd)->sw_kschedule),
674 cri->cri_key, cri->cri_klen / 8);
675 if (error) {
676 swcr_freesession(NULL, i);
677 return error;
678 }
679 (*swd)->sw_exf = txf;
680 break;
681
682 case CRYPTO_MD5_HMAC:
683 axf = &swcr_auth_hash_hmac_md5;
684 goto authcommon;
685 case CRYPTO_MD5_HMAC_96:
686 axf = &swcr_auth_hash_hmac_md5_96;
687 goto authcommon;
688 case CRYPTO_SHA1_HMAC:
689 axf = &swcr_auth_hash_hmac_sha1;
690 goto authcommon;
691 case CRYPTO_SHA1_HMAC_96:
692 axf = &swcr_auth_hash_hmac_sha1_96;
693 goto authcommon;
694 case CRYPTO_SHA2_HMAC:
695 if (cri->cri_klen == 256)
696 axf = &swcr_auth_hash_hmac_sha2_256;
697 else if (cri->cri_klen == 384)
698 axf = &swcr_auth_hash_hmac_sha2_384;
699 else if (cri->cri_klen == 512)
700 axf = &swcr_auth_hash_hmac_sha2_512;
701 else {
702 swcr_freesession(NULL, i);
703 return EINVAL;
704 }
705 goto authcommon;
706 case CRYPTO_NULL_HMAC:
707 axf = &swcr_auth_hash_null;
708 goto authcommon;
709 case CRYPTO_RIPEMD160_HMAC:
710 axf = &swcr_auth_hash_hmac_ripemd_160;
711 goto authcommon;
712 case CRYPTO_RIPEMD160_HMAC_96:
713 axf = &swcr_auth_hash_hmac_ripemd_160_96;
714 goto authcommon; /* leave this for safety */
715 authcommon:
716 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize,
717 M_CRYPTO_DATA, M_NOWAIT);
718 if ((*swd)->sw_ictx == NULL) {
719 swcr_freesession(NULL, i);
720 return ENOBUFS;
721 }
722
723 (*swd)->sw_octx = malloc(axf->auth_hash->ctxsize,
724 M_CRYPTO_DATA, M_NOWAIT);
725 if ((*swd)->sw_octx == NULL) {
726 swcr_freesession(NULL, i);
727 return ENOBUFS;
728 }
729
730 for (k = 0; k < cri->cri_klen / 8; k++)
731 cri->cri_key[k] ^= HMAC_IPAD_VAL;
732
733 axf->Init((*swd)->sw_ictx);
734 axf->Update((*swd)->sw_ictx, cri->cri_key,
735 cri->cri_klen / 8);
736 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
737 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
738
739 for (k = 0; k < cri->cri_klen / 8; k++)
740 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
741
742 axf->Init((*swd)->sw_octx);
743 axf->Update((*swd)->sw_octx, cri->cri_key,
744 cri->cri_klen / 8);
745 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
746 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
747
748 for (k = 0; k < cri->cri_klen / 8; k++)
749 cri->cri_key[k] ^= HMAC_OPAD_VAL;
750 (*swd)->sw_axf = axf;
751 break;
752
753 case CRYPTO_MD5_KPDK:
754 axf = &swcr_auth_hash_key_md5;
755 goto auth2common;
756
757 case CRYPTO_SHA1_KPDK:
758 axf = &swcr_auth_hash_key_sha1;
759 auth2common:
760 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize,
761 M_CRYPTO_DATA, M_NOWAIT);
762 if ((*swd)->sw_ictx == NULL) {
763 swcr_freesession(NULL, i);
764 return ENOBUFS;
765 }
766
767 /* Store the key so we can "append" it to the payload */
768 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
769 M_NOWAIT);
770 if ((*swd)->sw_octx == NULL) {
771 swcr_freesession(NULL, i);
772 return ENOBUFS;
773 }
774
775 (*swd)->sw_klen = cri->cri_klen / 8;
776 bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8);
777 axf->Init((*swd)->sw_ictx);
778 axf->Update((*swd)->sw_ictx, cri->cri_key,
779 cri->cri_klen / 8);
780 axf->Final(NULL, (*swd)->sw_ictx);
781 (*swd)->sw_axf = axf;
782 break;
783
784 case CRYPTO_MD5:
785 axf = &swcr_auth_hash_md5;
786 goto auth3common;
787
788 case CRYPTO_SHA1:
789 axf = &swcr_auth_hash_sha1;
790 auth3common:
791 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize,
792 M_CRYPTO_DATA, M_NOWAIT);
793 if ((*swd)->sw_ictx == NULL) {
794 swcr_freesession(NULL, i);
795 return ENOBUFS;
796 }
797
798 axf->Init((*swd)->sw_ictx);
799 (*swd)->sw_axf = axf;
800 break;
801
802 case CRYPTO_DEFLATE_COMP:
803 cxf = &swcr_comp_algo_deflate;
804 (*swd)->sw_cxf = cxf;
805 break;
806 default:
807 swcr_freesession(NULL, i);
808 return EINVAL;
809 }
810
811 (*swd)->sw_alg = cri->cri_alg;
812 cri = cri->cri_next;
813 swd = &((*swd)->sw_next);
814 }
815 return 0;
816 }
817
818 /*
819 * Free a session.
820 */
821 static int
822 swcr_freesession(void *arg, u_int64_t tid)
823 {
824 struct swcr_data *swd;
825 const struct swcr_enc_xform *txf;
826 const struct swcr_auth_hash *axf;
827 const struct swcr_comp_algo *cxf;
828 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
829
830 if (sid > swcr_sesnum || swcr_sessions == NULL ||
831 swcr_sessions[sid] == NULL)
832 return EINVAL;
833
834 /* Silently accept and return */
835 if (sid == 0)
836 return 0;
837
838 while ((swd = swcr_sessions[sid]) != NULL) {
839 swcr_sessions[sid] = swd->sw_next;
840
841 switch (swd->sw_alg) {
842 case CRYPTO_DES_CBC:
843 case CRYPTO_3DES_CBC:
844 case CRYPTO_BLF_CBC:
845 case CRYPTO_CAST_CBC:
846 case CRYPTO_SKIPJACK_CBC:
847 case CRYPTO_RIJNDAEL128_CBC:
848 case CRYPTO_NULL_CBC:
849 txf = swd->sw_exf;
850
851 if (swd->sw_kschedule)
852 txf->zerokey(&(swd->sw_kschedule));
853 break;
854
855 case CRYPTO_MD5_HMAC:
856 case CRYPTO_MD5_HMAC_96:
857 case CRYPTO_SHA1_HMAC:
858 case CRYPTO_SHA1_HMAC_96:
859 case CRYPTO_SHA2_HMAC:
860 case CRYPTO_RIPEMD160_HMAC:
861 case CRYPTO_RIPEMD160_HMAC_96:
862 case CRYPTO_NULL_HMAC:
863 axf = swd->sw_axf;
864
865 if (swd->sw_ictx) {
866 bzero(swd->sw_ictx, axf->auth_hash->ctxsize);
867 free(swd->sw_ictx, M_CRYPTO_DATA);
868 }
869 if (swd->sw_octx) {
870 bzero(swd->sw_octx, axf->auth_hash->ctxsize);
871 free(swd->sw_octx, M_CRYPTO_DATA);
872 }
873 break;
874
875 case CRYPTO_MD5_KPDK:
876 case CRYPTO_SHA1_KPDK:
877 axf = swd->sw_axf;
878
879 if (swd->sw_ictx) {
880 bzero(swd->sw_ictx, axf->auth_hash->ctxsize);
881 free(swd->sw_ictx, M_CRYPTO_DATA);
882 }
883 if (swd->sw_octx) {
884 bzero(swd->sw_octx, swd->sw_klen);
885 free(swd->sw_octx, M_CRYPTO_DATA);
886 }
887 break;
888
889 case CRYPTO_MD5:
890 case CRYPTO_SHA1:
891 axf = swd->sw_axf;
892
893 if (swd->sw_ictx)
894 free(swd->sw_ictx, M_CRYPTO_DATA);
895 break;
896
897 case CRYPTO_DEFLATE_COMP:
898 cxf = swd->sw_cxf;
899 break;
900 }
901
902 FREE(swd, M_CRYPTO_DATA);
903 }
904 return 0;
905 }
906
907 /*
908 * Process a software request.
909 */
910 static int
911 swcr_process(void *arg, struct cryptop *crp, int hint)
912 {
913 struct cryptodesc *crd;
914 struct swcr_data *sw;
915 u_int32_t lid;
916 int type;
917
918 /* Sanity check */
919 if (crp == NULL)
920 return EINVAL;
921
922 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
923 crp->crp_etype = EINVAL;
924 goto done;
925 }
926
927 lid = crp->crp_sid & 0xffffffff;
928 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
929 crp->crp_etype = ENOENT;
930 goto done;
931 }
932
933 if (crp->crp_flags & CRYPTO_F_IMBUF) {
934 type = CRYPTO_BUF_MBUF;
935 } else if (crp->crp_flags & CRYPTO_F_IOV) {
936 type = CRYPTO_BUF_IOV;
937 } else {
938 type = CRYPTO_BUF_CONTIG;
939 }
940
941 /* Go through crypto descriptors, processing as we go */
942 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
943 /*
944 * Find the crypto context.
945 *
946 * XXX Note that the logic here prevents us from having
947 * XXX the same algorithm multiple times in a session
948 * XXX (or rather, we can but it won't give us the right
949 * XXX results). To do that, we'd need some way of differentiating
950 * XXX between the various instances of an algorithm (so we can
951 * XXX locate the correct crypto context).
952 */
953 for (sw = swcr_sessions[lid];
954 sw && sw->sw_alg != crd->crd_alg;
955 sw = sw->sw_next)
956 ;
957
958 /* No such context ? */
959 if (sw == NULL) {
960 crp->crp_etype = EINVAL;
961 goto done;
962 }
963
964 switch (sw->sw_alg) {
965 case CRYPTO_DES_CBC:
966 case CRYPTO_3DES_CBC:
967 case CRYPTO_BLF_CBC:
968 case CRYPTO_CAST_CBC:
969 case CRYPTO_SKIPJACK_CBC:
970 case CRYPTO_RIJNDAEL128_CBC:
971 if ((crp->crp_etype = swcr_encdec(crd, sw,
972 crp->crp_buf, type)) != 0)
973 goto done;
974 break;
975 case CRYPTO_NULL_CBC:
976 crp->crp_etype = 0;
977 break;
978 case CRYPTO_MD5_HMAC:
979 case CRYPTO_MD5_HMAC_96:
980 case CRYPTO_SHA1_HMAC:
981 case CRYPTO_SHA1_HMAC_96:
982 case CRYPTO_SHA2_HMAC:
983 case CRYPTO_RIPEMD160_HMAC:
984 case CRYPTO_RIPEMD160_HMAC_96:
985 case CRYPTO_NULL_HMAC:
986 case CRYPTO_MD5_KPDK:
987 case CRYPTO_SHA1_KPDK:
988 case CRYPTO_MD5:
989 case CRYPTO_SHA1:
990 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
991 crp->crp_buf, type)) != 0)
992 goto done;
993 break;
994
995 case CRYPTO_DEFLATE_COMP:
996 if ((crp->crp_etype = swcr_compdec(crd, sw,
997 crp->crp_buf, type)) != 0)
998 goto done;
999 else
1000 crp->crp_olen = (int)sw->sw_size;
1001 break;
1002
1003 default:
1004 /* Unknown/unsupported algorithm */
1005 crp->crp_etype = EINVAL;
1006 goto done;
1007 }
1008 }
1009
1010 done:
1011 crypto_done(crp);
1012 return 0;
1013 }
1014
1015 static void
1016 swcr_init(void)
1017 {
1018 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1019 if (swcr_id < 0) {
1020 /* This should never happen */
1021 panic("Software crypto device cannot initialize!");
1022 }
1023
1024 crypto_register(swcr_id, CRYPTO_DES_CBC,
1025 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1026 #define REGISTER(alg) \
1027 crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL)
1028
1029 REGISTER(CRYPTO_3DES_CBC);
1030 REGISTER(CRYPTO_BLF_CBC);
1031 REGISTER(CRYPTO_CAST_CBC);
1032 REGISTER(CRYPTO_SKIPJACK_CBC);
1033 REGISTER(CRYPTO_NULL_CBC);
1034 REGISTER(CRYPTO_MD5_HMAC);
1035 REGISTER(CRYPTO_MD5_HMAC_96);
1036 REGISTER(CRYPTO_SHA1_HMAC);
1037 REGISTER(CRYPTO_SHA1_HMAC_96);
1038 REGISTER(CRYPTO_SHA2_HMAC);
1039 REGISTER(CRYPTO_RIPEMD160_HMAC);
1040 REGISTER(CRYPTO_RIPEMD160_HMAC_96);
1041 REGISTER(CRYPTO_NULL_HMAC);
1042 REGISTER(CRYPTO_MD5_KPDK);
1043 REGISTER(CRYPTO_SHA1_KPDK);
1044 REGISTER(CRYPTO_MD5);
1045 REGISTER(CRYPTO_SHA1);
1046 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1047 REGISTER(CRYPTO_DEFLATE_COMP);
1048 #undef REGISTER
1049 }
1050
1051
1052 /*
1053 * Pseudo-device init routine for software crypto.
1054 */
1055 void swcryptoattach(int);
1056
1057 void
1058 swcryptoattach(int num)
1059 {
1060
1061 swcr_init();
1062 }
1063