cryptosoft.c revision 1.36 1 /* $NetBSD: cryptosoft.c,v 1.36 2011/05/24 19:10:10 drochner Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */
3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.36 2011/05/24 19:10:10 drochner Exp $");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35
36 #include "opt_ocf.h"
37 #include <opencrypto/cryptodev.h>
38 #include <opencrypto/cryptosoft.h>
39 #include <opencrypto/xform.h>
40
41 #include <opencrypto/cryptosoft_xform.c>
42
43 union authctx {
44 MD5_CTX md5ctx;
45 SHA1_CTX sha1ctx;
46 RMD160_CTX rmd160ctx;
47 SHA256_CTX sha256ctx;
48 SHA384_CTX sha384ctx;
49 SHA512_CTX sha512ctx;
50 aesxcbc_ctx aesxcbcctx;
51 };
52
53 struct swcr_data **swcr_sessions = NULL;
54 u_int32_t swcr_sesnum = 0;
55 int32_t swcr_id = -1;
56
57 #define COPYBACK(x, a, b, c, d) \
58 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
59 : cuio_copyback((struct uio *)a,b,c,d)
60 #define COPYDATA(x, a, b, c, d) \
61 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
62 : cuio_copydata((struct uio *)a,b,c,d)
63
64 static int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int);
65 static int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *);
66 static int swcr_process(void *, struct cryptop *, int);
67 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
68 static int swcr_freesession(void *, u_int64_t);
69
70 /*
71 * Apply a symmetric encryption/decryption algorithm.
72 */
73 static int
74 swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv,
75 int outtype)
76 {
77 char *buf = bufv;
78 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
79 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
80 const struct swcr_enc_xform *exf;
81 int i, k, j, blks, ivlen;
82 int count, ind;
83
84 exf = sw->sw_exf;
85 blks = exf->enc_xform->blocksize;
86 ivlen = exf->enc_xform->ivsize;
87 KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks);
88
89 /* Check for non-padded data */
90 if (crd->crd_len % blks)
91 return EINVAL;
92
93 /* Initialize the IV */
94 if (crd->crd_flags & CRD_F_ENCRYPT) {
95 /* IV explicitly provided ? */
96 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
97 memcpy(iv, crd->crd_iv, ivlen);
98 if (exf->reinit)
99 exf->reinit(sw->sw_kschedule, iv, 0);
100 } else if (exf->reinit) {
101 exf->reinit(sw->sw_kschedule, 0, iv);
102 } else {
103 /* Get random IV */
104 for (i = 0;
105 i + sizeof (u_int32_t) <= EALG_MAX_BLOCK_LEN;
106 i += sizeof (u_int32_t)) {
107 u_int32_t temp = arc4random();
108
109 memcpy(iv + i, &temp, sizeof(u_int32_t));
110 }
111 /*
112 * What if the block size is not a multiple
113 * of sizeof (u_int32_t), which is the size of
114 * what arc4random() returns ?
115 */
116 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
117 u_int32_t temp = arc4random();
118
119 bcopy (&temp, iv + i,
120 EALG_MAX_BLOCK_LEN - i);
121 }
122 }
123
124 /* Do we need to write the IV */
125 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
126 COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
127 }
128
129 } else { /* Decryption */
130 /* IV explicitly provided ? */
131 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
132 memcpy(iv, crd->crd_iv, ivlen);
133 else {
134 /* Get IV off buf */
135 COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
136 }
137 if (exf->reinit)
138 exf->reinit(sw->sw_kschedule, iv, 0);
139 }
140
141 ivp = iv;
142
143 if (outtype == CRYPTO_BUF_CONTIG) {
144 if (exf->reinit) {
145 for (i = crd->crd_skip;
146 i < crd->crd_skip + crd->crd_len; i += blks) {
147 if (crd->crd_flags & CRD_F_ENCRYPT) {
148 exf->encrypt(sw->sw_kschedule, buf + i);
149 } else {
150 exf->decrypt(sw->sw_kschedule, buf + i);
151 }
152 }
153 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
154 for (i = crd->crd_skip;
155 i < crd->crd_skip + crd->crd_len; i += blks) {
156 /* XOR with the IV/previous block, as appropriate. */
157 if (i == crd->crd_skip)
158 for (k = 0; k < blks; k++)
159 buf[i + k] ^= ivp[k];
160 else
161 for (k = 0; k < blks; k++)
162 buf[i + k] ^= buf[i + k - blks];
163 exf->encrypt(sw->sw_kschedule, buf + i);
164 }
165 } else { /* Decrypt */
166 /*
167 * Start at the end, so we don't need to keep the encrypted
168 * block as the IV for the next block.
169 */
170 for (i = crd->crd_skip + crd->crd_len - blks;
171 i >= crd->crd_skip; i -= blks) {
172 exf->decrypt(sw->sw_kschedule, buf + i);
173
174 /* XOR with the IV/previous block, as appropriate */
175 if (i == crd->crd_skip)
176 for (k = 0; k < blks; k++)
177 buf[i + k] ^= ivp[k];
178 else
179 for (k = 0; k < blks; k++)
180 buf[i + k] ^= buf[i + k - blks];
181 }
182 }
183
184 return 0;
185 } else if (outtype == CRYPTO_BUF_MBUF) {
186 struct mbuf *m = (struct mbuf *) buf;
187
188 /* Find beginning of data */
189 m = m_getptr(m, crd->crd_skip, &k);
190 if (m == NULL)
191 return EINVAL;
192
193 i = crd->crd_len;
194
195 while (i > 0) {
196 /*
197 * If there's insufficient data at the end of
198 * an mbuf, we have to do some copying.
199 */
200 if (m->m_len < k + blks && m->m_len != k) {
201 m_copydata(m, k, blks, blk);
202
203 /* Actual encryption/decryption */
204 if (exf->reinit) {
205 if (crd->crd_flags & CRD_F_ENCRYPT) {
206 exf->encrypt(sw->sw_kschedule,
207 blk);
208 } else {
209 exf->decrypt(sw->sw_kschedule,
210 blk);
211 }
212 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
213 /* XOR with previous block */
214 for (j = 0; j < blks; j++)
215 blk[j] ^= ivp[j];
216
217 exf->encrypt(sw->sw_kschedule, blk);
218
219 /*
220 * Keep encrypted block for XOR'ing
221 * with next block
222 */
223 memcpy(iv, blk, blks);
224 ivp = iv;
225 } else { /* decrypt */
226 /*
227 * Keep encrypted block for XOR'ing
228 * with next block
229 */
230 if (ivp == iv)
231 memcpy(piv, blk, blks);
232 else
233 memcpy(iv, blk, blks);
234
235 exf->decrypt(sw->sw_kschedule, blk);
236
237 /* XOR with previous block */
238 for (j = 0; j < blks; j++)
239 blk[j] ^= ivp[j];
240
241 if (ivp == iv)
242 memcpy(iv, piv, blks);
243 else
244 ivp = iv;
245 }
246
247 /* Copy back decrypted block */
248 m_copyback(m, k, blks, blk);
249
250 /* Advance pointer */
251 m = m_getptr(m, k + blks, &k);
252 if (m == NULL)
253 return EINVAL;
254
255 i -= blks;
256
257 /* Could be done... */
258 if (i == 0)
259 break;
260 }
261
262 /* Skip possibly empty mbufs */
263 if (k == m->m_len) {
264 for (m = m->m_next; m && m->m_len == 0;
265 m = m->m_next)
266 ;
267 k = 0;
268 }
269
270 /* Sanity check */
271 if (m == NULL)
272 return EINVAL;
273
274 /*
275 * Warning: idat may point to garbage here, but
276 * we only use it in the while() loop, only if
277 * there are indeed enough data.
278 */
279 idat = mtod(m, unsigned char *) + k;
280
281 while (m->m_len >= k + blks && i > 0) {
282 if (exf->reinit) {
283 if (crd->crd_flags & CRD_F_ENCRYPT) {
284 exf->encrypt(sw->sw_kschedule,
285 idat);
286 } else {
287 exf->decrypt(sw->sw_kschedule,
288 idat);
289 }
290 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
291 /* XOR with previous block/IV */
292 for (j = 0; j < blks; j++)
293 idat[j] ^= ivp[j];
294
295 exf->encrypt(sw->sw_kschedule, idat);
296 ivp = idat;
297 } else { /* decrypt */
298 /*
299 * Keep encrypted block to be used
300 * in next block's processing.
301 */
302 if (ivp == iv)
303 memcpy(piv, idat, blks);
304 else
305 memcpy(iv, idat, blks);
306
307 exf->decrypt(sw->sw_kschedule, idat);
308
309 /* XOR with previous block/IV */
310 for (j = 0; j < blks; j++)
311 idat[j] ^= ivp[j];
312
313 if (ivp == iv)
314 memcpy(iv, piv, blks);
315 else
316 ivp = iv;
317 }
318
319 idat += blks;
320 k += blks;
321 i -= blks;
322 }
323 }
324
325 return 0; /* Done with mbuf encryption/decryption */
326 } else if (outtype == CRYPTO_BUF_IOV) {
327 struct uio *uio = (struct uio *) buf;
328
329 /* Find beginning of data */
330 count = crd->crd_skip;
331 ind = cuio_getptr(uio, count, &k);
332 if (ind == -1)
333 return EINVAL;
334
335 i = crd->crd_len;
336
337 while (i > 0) {
338 /*
339 * If there's insufficient data at the end,
340 * we have to do some copying.
341 */
342 if (uio->uio_iov[ind].iov_len < k + blks &&
343 uio->uio_iov[ind].iov_len != k) {
344 cuio_copydata(uio, k, blks, blk);
345
346 /* Actual encryption/decryption */
347 if (exf->reinit) {
348 if (crd->crd_flags & CRD_F_ENCRYPT) {
349 exf->encrypt(sw->sw_kschedule,
350 blk);
351 } else {
352 exf->decrypt(sw->sw_kschedule,
353 blk);
354 }
355 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
356 /* XOR with previous block */
357 for (j = 0; j < blks; j++)
358 blk[j] ^= ivp[j];
359
360 exf->encrypt(sw->sw_kschedule, blk);
361
362 /*
363 * Keep encrypted block for XOR'ing
364 * with next block
365 */
366 memcpy(iv, blk, blks);
367 ivp = iv;
368 } else { /* decrypt */
369 /*
370 * Keep encrypted block for XOR'ing
371 * with next block
372 */
373 if (ivp == iv)
374 memcpy(piv, blk, blks);
375 else
376 memcpy(iv, blk, blks);
377
378 exf->decrypt(sw->sw_kschedule, blk);
379
380 /* XOR with previous block */
381 for (j = 0; j < blks; j++)
382 blk[j] ^= ivp[j];
383
384 if (ivp == iv)
385 memcpy(iv, piv, blks);
386 else
387 ivp = iv;
388 }
389
390 /* Copy back decrypted block */
391 cuio_copyback(uio, k, blks, blk);
392
393 count += blks;
394
395 /* Advance pointer */
396 ind = cuio_getptr(uio, count, &k);
397 if (ind == -1)
398 return (EINVAL);
399
400 i -= blks;
401
402 /* Could be done... */
403 if (i == 0)
404 break;
405 }
406
407 /*
408 * Warning: idat may point to garbage here, but
409 * we only use it in the while() loop, only if
410 * there are indeed enough data.
411 */
412 idat = ((char *)uio->uio_iov[ind].iov_base) + k;
413
414 while (uio->uio_iov[ind].iov_len >= k + blks &&
415 i > 0) {
416 if (exf->reinit) {
417 if (crd->crd_flags & CRD_F_ENCRYPT) {
418 exf->encrypt(sw->sw_kschedule,
419 idat);
420 } else {
421 exf->decrypt(sw->sw_kschedule,
422 idat);
423 }
424 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
425 /* XOR with previous block/IV */
426 for (j = 0; j < blks; j++)
427 idat[j] ^= ivp[j];
428
429 exf->encrypt(sw->sw_kschedule, idat);
430 ivp = idat;
431 } else { /* decrypt */
432 /*
433 * Keep encrypted block to be used
434 * in next block's processing.
435 */
436 if (ivp == iv)
437 memcpy(piv, idat, blks);
438 else
439 memcpy(iv, idat, blks);
440
441 exf->decrypt(sw->sw_kschedule, idat);
442
443 /* XOR with previous block/IV */
444 for (j = 0; j < blks; j++)
445 idat[j] ^= ivp[j];
446
447 if (ivp == iv)
448 memcpy(iv, piv, blks);
449 else
450 ivp = iv;
451 }
452
453 idat += blks;
454 count += blks;
455 k += blks;
456 i -= blks;
457 }
458 }
459 return 0; /* Done with mbuf encryption/decryption */
460 }
461
462 /* Unreachable */
463 return EINVAL;
464 }
465
466 /*
467 * Compute keyed-hash authenticator.
468 */
469 int
470 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
471 const struct swcr_data *sw, void *buf, int outtype)
472 {
473 unsigned char aalg[AALG_MAX_RESULT_LEN];
474 const struct swcr_auth_hash *axf;
475 union authctx ctx;
476 int err;
477
478 if (sw->sw_ictx == 0)
479 return EINVAL;
480
481 axf = sw->sw_axf;
482
483 memcpy(&ctx, sw->sw_ictx, axf->ctxsize);
484
485 switch (outtype) {
486 case CRYPTO_BUF_CONTIG:
487 axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len);
488 break;
489 case CRYPTO_BUF_MBUF:
490 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
491 (int (*)(void*, void *, unsigned int)) axf->Update,
492 (void *) &ctx);
493 if (err)
494 return err;
495 break;
496 case CRYPTO_BUF_IOV:
497 err = cuio_apply((struct uio *) buf, crd->crd_skip,
498 crd->crd_len,
499 (int (*)(void *, void *, unsigned int)) axf->Update,
500 (void *) &ctx);
501 if (err) {
502 return err;
503 }
504 break;
505 default:
506 return EINVAL;
507 }
508
509 switch (sw->sw_alg) {
510 case CRYPTO_MD5_HMAC:
511 case CRYPTO_MD5_HMAC_96:
512 case CRYPTO_SHA1_HMAC:
513 case CRYPTO_SHA1_HMAC_96:
514 case CRYPTO_SHA2_256_HMAC:
515 case CRYPTO_SHA2_384_HMAC:
516 case CRYPTO_SHA2_512_HMAC:
517 case CRYPTO_RIPEMD160_HMAC:
518 case CRYPTO_RIPEMD160_HMAC_96:
519 if (sw->sw_octx == NULL)
520 return EINVAL;
521
522 axf->Final(aalg, &ctx);
523 memcpy(&ctx, sw->sw_octx, axf->ctxsize);
524 axf->Update(&ctx, aalg, axf->auth_hash->hashsize);
525 axf->Final(aalg, &ctx);
526 break;
527
528 case CRYPTO_MD5_KPDK:
529 case CRYPTO_SHA1_KPDK:
530 if (sw->sw_octx == NULL)
531 return EINVAL;
532
533 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
534 axf->Final(aalg, &ctx);
535 break;
536
537 case CRYPTO_NULL_HMAC:
538 case CRYPTO_MD5:
539 case CRYPTO_SHA1:
540 case CRYPTO_AES_XCBC_MAC_96:
541 axf->Final(aalg, &ctx);
542 break;
543 }
544
545 /* Inject the authentication data */
546 switch (outtype) {
547 case CRYPTO_BUF_CONTIG:
548 (void)memcpy((char *)buf + crd->crd_inject, aalg,
549 axf->auth_hash->authsize);
550 break;
551 case CRYPTO_BUF_MBUF:
552 m_copyback((struct mbuf *) buf, crd->crd_inject,
553 axf->auth_hash->authsize, aalg);
554 break;
555 case CRYPTO_BUF_IOV:
556 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
557 break;
558 default:
559 return EINVAL;
560 }
561 return 0;
562 }
563
564 /*
565 * Apply a compression/decompression algorithm
566 */
567 static int
568 swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw,
569 void *buf, int outtype, int *res_size)
570 {
571 u_int8_t *data, *out;
572 const struct swcr_comp_algo *cxf;
573 int adj;
574 u_int32_t result;
575
576 cxf = sw->sw_cxf;
577
578 /* We must handle the whole buffer of data in one time
579 * then if there is not all the data in the mbuf, we must
580 * copy in a buffer.
581 */
582
583 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
584 if (data == NULL)
585 return (EINVAL);
586 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
587
588 if (crd->crd_flags & CRD_F_COMP)
589 result = cxf->compress(data, crd->crd_len, &out);
590 else
591 result = cxf->decompress(data, crd->crd_len, &out,
592 *res_size);
593
594 free(data, M_CRYPTO_DATA);
595 if (result == 0)
596 return EINVAL;
597
598 /* Copy back the (de)compressed data. m_copyback is
599 * extending the mbuf as necessary.
600 */
601 *res_size = (int)result;
602 /* Check the compressed size when doing compression */
603 if (crd->crd_flags & CRD_F_COMP &&
604 sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW &&
605 result >= crd->crd_len) {
606 /* Compression was useless, we lost time */
607 free(out, M_CRYPTO_DATA);
608 return 0;
609 }
610
611 COPYBACK(outtype, buf, crd->crd_skip, result, out);
612 if (result < crd->crd_len) {
613 adj = result - crd->crd_len;
614 if (outtype == CRYPTO_BUF_MBUF) {
615 adj = result - crd->crd_len;
616 m_adj((struct mbuf *)buf, adj);
617 }
618 /* Don't adjust the iov_len, it breaks the kmem_free */
619 }
620 free(out, M_CRYPTO_DATA);
621 return 0;
622 }
623
624 /*
625 * Generate a new software session.
626 */
627 static int
628 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
629 {
630 struct swcr_data **swd;
631 const struct swcr_auth_hash *axf;
632 const struct swcr_enc_xform *txf;
633 const struct swcr_comp_algo *cxf;
634 u_int32_t i;
635 int k, error;
636
637 if (sid == NULL || cri == NULL)
638 return EINVAL;
639
640 if (swcr_sessions) {
641 for (i = 1; i < swcr_sesnum; i++)
642 if (swcr_sessions[i] == NULL)
643 break;
644 } else
645 i = 1; /* NB: to silence compiler warning */
646
647 if (swcr_sessions == NULL || i == swcr_sesnum) {
648 if (swcr_sessions == NULL) {
649 i = 1; /* We leave swcr_sessions[0] empty */
650 swcr_sesnum = CRYPTO_SW_SESSIONS;
651 } else
652 swcr_sesnum *= 2;
653
654 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
655 M_CRYPTO_DATA, M_NOWAIT);
656 if (swd == NULL) {
657 /* Reset session number */
658 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
659 swcr_sesnum = 0;
660 else
661 swcr_sesnum /= 2;
662 return ENOBUFS;
663 }
664
665 memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
666
667 /* Copy existing sessions */
668 if (swcr_sessions) {
669 memcpy(swd, swcr_sessions,
670 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
671 free(swcr_sessions, M_CRYPTO_DATA);
672 }
673
674 swcr_sessions = swd;
675 }
676
677 swd = &swcr_sessions[i];
678 *sid = i;
679
680 while (cri) {
681 *swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT);
682 if (*swd == NULL) {
683 swcr_freesession(NULL, i);
684 return ENOBUFS;
685 }
686 memset(*swd, 0, sizeof(struct swcr_data));
687
688 switch (cri->cri_alg) {
689 case CRYPTO_DES_CBC:
690 txf = &swcr_enc_xform_des;
691 goto enccommon;
692 case CRYPTO_3DES_CBC:
693 txf = &swcr_enc_xform_3des;
694 goto enccommon;
695 case CRYPTO_BLF_CBC:
696 txf = &swcr_enc_xform_blf;
697 goto enccommon;
698 case CRYPTO_CAST_CBC:
699 txf = &swcr_enc_xform_cast5;
700 goto enccommon;
701 case CRYPTO_SKIPJACK_CBC:
702 txf = &swcr_enc_xform_skipjack;
703 goto enccommon;
704 case CRYPTO_RIJNDAEL128_CBC:
705 txf = &swcr_enc_xform_rijndael128;
706 goto enccommon;
707 case CRYPTO_CAMELLIA_CBC:
708 txf = &swcr_enc_xform_camellia;
709 goto enccommon;
710 case CRYPTO_AES_CTR:
711 txf = &swcr_enc_xform_aes_ctr;
712 goto enccommon;
713 case CRYPTO_NULL_CBC:
714 txf = &swcr_enc_xform_null;
715 goto enccommon;
716 enccommon:
717 error = txf->setkey(&((*swd)->sw_kschedule),
718 cri->cri_key, cri->cri_klen / 8);
719 if (error) {
720 swcr_freesession(NULL, i);
721 return error;
722 }
723 (*swd)->sw_exf = txf;
724 break;
725
726 case CRYPTO_MD5_HMAC:
727 axf = &swcr_auth_hash_hmac_md5;
728 goto authcommon;
729 case CRYPTO_MD5_HMAC_96:
730 axf = &swcr_auth_hash_hmac_md5_96;
731 goto authcommon;
732 case CRYPTO_SHA1_HMAC:
733 axf = &swcr_auth_hash_hmac_sha1;
734 goto authcommon;
735 case CRYPTO_SHA1_HMAC_96:
736 axf = &swcr_auth_hash_hmac_sha1_96;
737 goto authcommon;
738 case CRYPTO_SHA2_256_HMAC:
739 axf = &swcr_auth_hash_hmac_sha2_256;
740 goto authcommon;
741 case CRYPTO_SHA2_384_HMAC:
742 axf = &swcr_auth_hash_hmac_sha2_384;
743 goto authcommon;
744 case CRYPTO_SHA2_512_HMAC:
745 axf = &swcr_auth_hash_hmac_sha2_512;
746 goto authcommon;
747 case CRYPTO_NULL_HMAC:
748 axf = &swcr_auth_hash_null;
749 goto authcommon;
750 case CRYPTO_RIPEMD160_HMAC:
751 axf = &swcr_auth_hash_hmac_ripemd_160;
752 goto authcommon;
753 case CRYPTO_RIPEMD160_HMAC_96:
754 axf = &swcr_auth_hash_hmac_ripemd_160_96;
755 goto authcommon; /* leave this for safety */
756 authcommon:
757 (*swd)->sw_ictx = malloc(axf->ctxsize,
758 M_CRYPTO_DATA, M_NOWAIT);
759 if ((*swd)->sw_ictx == NULL) {
760 swcr_freesession(NULL, i);
761 return ENOBUFS;
762 }
763
764 (*swd)->sw_octx = malloc(axf->ctxsize,
765 M_CRYPTO_DATA, M_NOWAIT);
766 if ((*swd)->sw_octx == NULL) {
767 swcr_freesession(NULL, i);
768 return ENOBUFS;
769 }
770
771 for (k = 0; k < cri->cri_klen / 8; k++)
772 cri->cri_key[k] ^= HMAC_IPAD_VAL;
773
774 axf->Init((*swd)->sw_ictx);
775 axf->Update((*swd)->sw_ictx, cri->cri_key,
776 cri->cri_klen / 8);
777 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
778 axf->auth_hash->blocksize - (cri->cri_klen / 8));
779
780 for (k = 0; k < cri->cri_klen / 8; k++)
781 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
782
783 axf->Init((*swd)->sw_octx);
784 axf->Update((*swd)->sw_octx, cri->cri_key,
785 cri->cri_klen / 8);
786 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
787 axf->auth_hash->blocksize - (cri->cri_klen / 8));
788
789 for (k = 0; k < cri->cri_klen / 8; k++)
790 cri->cri_key[k] ^= HMAC_OPAD_VAL;
791 (*swd)->sw_axf = axf;
792 break;
793
794 case CRYPTO_MD5_KPDK:
795 axf = &swcr_auth_hash_key_md5;
796 goto auth2common;
797
798 case CRYPTO_SHA1_KPDK:
799 axf = &swcr_auth_hash_key_sha1;
800 auth2common:
801 (*swd)->sw_ictx = malloc(axf->ctxsize,
802 M_CRYPTO_DATA, M_NOWAIT);
803 if ((*swd)->sw_ictx == NULL) {
804 swcr_freesession(NULL, i);
805 return ENOBUFS;
806 }
807
808 /* Store the key so we can "append" it to the payload */
809 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
810 M_NOWAIT);
811 if ((*swd)->sw_octx == NULL) {
812 swcr_freesession(NULL, i);
813 return ENOBUFS;
814 }
815
816 (*swd)->sw_klen = cri->cri_klen / 8;
817 memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8);
818 axf->Init((*swd)->sw_ictx);
819 axf->Update((*swd)->sw_ictx, cri->cri_key,
820 cri->cri_klen / 8);
821 axf->Final(NULL, (*swd)->sw_ictx);
822 (*swd)->sw_axf = axf;
823 break;
824
825 case CRYPTO_MD5:
826 axf = &swcr_auth_hash_md5;
827 goto auth3common;
828
829 case CRYPTO_SHA1:
830 axf = &swcr_auth_hash_sha1;
831 auth3common:
832 (*swd)->sw_ictx = malloc(axf->ctxsize,
833 M_CRYPTO_DATA, M_NOWAIT);
834 if ((*swd)->sw_ictx == NULL) {
835 swcr_freesession(NULL, i);
836 return ENOBUFS;
837 }
838
839 axf->Init((*swd)->sw_ictx);
840 (*swd)->sw_axf = axf;
841 break;
842
843 case CRYPTO_AES_XCBC_MAC_96:
844 axf = &swcr_auth_hash_aes_xcbc_mac;
845 (*swd)->sw_ictx = malloc(axf->ctxsize,
846 M_CRYPTO_DATA, M_NOWAIT);
847 if ((*swd)->sw_ictx == NULL) {
848 swcr_freesession(NULL, i);
849 return ENOBUFS;
850 }
851 axf->Init((*swd)->sw_ictx);
852 axf->Setkey((*swd)->sw_ictx,
853 cri->cri_key, cri->cri_klen / 8);
854 (*swd)->sw_axf = axf;
855 break;
856
857 case CRYPTO_DEFLATE_COMP:
858 cxf = &swcr_comp_algo_deflate;
859 (*swd)->sw_cxf = cxf;
860 break;
861
862 case CRYPTO_DEFLATE_COMP_NOGROW:
863 cxf = &swcr_comp_algo_deflate_nogrow;
864 (*swd)->sw_cxf = cxf;
865 break;
866
867 case CRYPTO_GZIP_COMP:
868 cxf = &swcr_comp_algo_gzip;
869 (*swd)->sw_cxf = cxf;
870 break;
871 default:
872 swcr_freesession(NULL, i);
873 return EINVAL;
874 }
875
876 (*swd)->sw_alg = cri->cri_alg;
877 cri = cri->cri_next;
878 swd = &((*swd)->sw_next);
879 }
880 return 0;
881 }
882
883 /*
884 * Free a session.
885 */
886 static int
887 swcr_freesession(void *arg, u_int64_t tid)
888 {
889 struct swcr_data *swd;
890 const struct swcr_enc_xform *txf;
891 const struct swcr_auth_hash *axf;
892 const struct swcr_comp_algo *cxf;
893 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
894
895 if (sid > swcr_sesnum || swcr_sessions == NULL ||
896 swcr_sessions[sid] == NULL)
897 return EINVAL;
898
899 /* Silently accept and return */
900 if (sid == 0)
901 return 0;
902
903 while ((swd = swcr_sessions[sid]) != NULL) {
904 swcr_sessions[sid] = swd->sw_next;
905
906 switch (swd->sw_alg) {
907 case CRYPTO_DES_CBC:
908 case CRYPTO_3DES_CBC:
909 case CRYPTO_BLF_CBC:
910 case CRYPTO_CAST_CBC:
911 case CRYPTO_SKIPJACK_CBC:
912 case CRYPTO_RIJNDAEL128_CBC:
913 case CRYPTO_CAMELLIA_CBC:
914 case CRYPTO_AES_CTR:
915 case CRYPTO_NULL_CBC:
916 txf = swd->sw_exf;
917
918 if (swd->sw_kschedule)
919 txf->zerokey(&(swd->sw_kschedule));
920 break;
921
922 case CRYPTO_MD5_HMAC:
923 case CRYPTO_MD5_HMAC_96:
924 case CRYPTO_SHA1_HMAC:
925 case CRYPTO_SHA1_HMAC_96:
926 case CRYPTO_SHA2_256_HMAC:
927 case CRYPTO_SHA2_384_HMAC:
928 case CRYPTO_SHA2_512_HMAC:
929 case CRYPTO_RIPEMD160_HMAC:
930 case CRYPTO_RIPEMD160_HMAC_96:
931 case CRYPTO_NULL_HMAC:
932 axf = swd->sw_axf;
933
934 if (swd->sw_ictx) {
935 memset(swd->sw_ictx, 0, axf->ctxsize);
936 free(swd->sw_ictx, M_CRYPTO_DATA);
937 }
938 if (swd->sw_octx) {
939 memset(swd->sw_octx, 0, axf->ctxsize);
940 free(swd->sw_octx, M_CRYPTO_DATA);
941 }
942 break;
943
944 case CRYPTO_MD5_KPDK:
945 case CRYPTO_SHA1_KPDK:
946 axf = swd->sw_axf;
947
948 if (swd->sw_ictx) {
949 memset(swd->sw_ictx, 0, axf->ctxsize);
950 free(swd->sw_ictx, M_CRYPTO_DATA);
951 }
952 if (swd->sw_octx) {
953 memset(swd->sw_octx, 0, swd->sw_klen);
954 free(swd->sw_octx, M_CRYPTO_DATA);
955 }
956 break;
957
958 case CRYPTO_MD5:
959 case CRYPTO_SHA1:
960 case CRYPTO_AES_XCBC_MAC_96:
961 axf = swd->sw_axf;
962
963 if (swd->sw_ictx)
964 free(swd->sw_ictx, M_CRYPTO_DATA);
965 break;
966
967 case CRYPTO_DEFLATE_COMP:
968 case CRYPTO_DEFLATE_COMP_NOGROW:
969 case CRYPTO_GZIP_COMP:
970 cxf = swd->sw_cxf;
971 break;
972 }
973
974 free(swd, M_CRYPTO_DATA);
975 }
976 return 0;
977 }
978
979 /*
980 * Process a software request.
981 */
982 static int
983 swcr_process(void *arg, struct cryptop *crp, int hint)
984 {
985 struct cryptodesc *crd;
986 struct swcr_data *sw;
987 u_int32_t lid;
988 int type;
989
990 /* Sanity check */
991 if (crp == NULL)
992 return EINVAL;
993
994 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
995 crp->crp_etype = EINVAL;
996 goto done;
997 }
998
999 lid = crp->crp_sid & 0xffffffff;
1000 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1001 crp->crp_etype = ENOENT;
1002 goto done;
1003 }
1004
1005 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1006 type = CRYPTO_BUF_MBUF;
1007 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1008 type = CRYPTO_BUF_IOV;
1009 } else {
1010 type = CRYPTO_BUF_CONTIG;
1011 }
1012
1013 /* Go through crypto descriptors, processing as we go */
1014 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1015 /*
1016 * Find the crypto context.
1017 *
1018 * XXX Note that the logic here prevents us from having
1019 * XXX the same algorithm multiple times in a session
1020 * XXX (or rather, we can but it won't give us the right
1021 * XXX results). To do that, we'd need some way of differentiating
1022 * XXX between the various instances of an algorithm (so we can
1023 * XXX locate the correct crypto context).
1024 */
1025 for (sw = swcr_sessions[lid];
1026 sw && sw->sw_alg != crd->crd_alg;
1027 sw = sw->sw_next)
1028 ;
1029
1030 /* No such context ? */
1031 if (sw == NULL) {
1032 crp->crp_etype = EINVAL;
1033 goto done;
1034 }
1035
1036 switch (sw->sw_alg) {
1037 case CRYPTO_DES_CBC:
1038 case CRYPTO_3DES_CBC:
1039 case CRYPTO_BLF_CBC:
1040 case CRYPTO_CAST_CBC:
1041 case CRYPTO_SKIPJACK_CBC:
1042 case CRYPTO_RIJNDAEL128_CBC:
1043 case CRYPTO_CAMELLIA_CBC:
1044 case CRYPTO_AES_CTR:
1045 if ((crp->crp_etype = swcr_encdec(crd, sw,
1046 crp->crp_buf, type)) != 0)
1047 goto done;
1048 break;
1049 case CRYPTO_NULL_CBC:
1050 crp->crp_etype = 0;
1051 break;
1052 case CRYPTO_MD5_HMAC:
1053 case CRYPTO_MD5_HMAC_96:
1054 case CRYPTO_SHA1_HMAC:
1055 case CRYPTO_SHA1_HMAC_96:
1056 case CRYPTO_SHA2_256_HMAC:
1057 case CRYPTO_SHA2_384_HMAC:
1058 case CRYPTO_SHA2_512_HMAC:
1059 case CRYPTO_RIPEMD160_HMAC:
1060 case CRYPTO_RIPEMD160_HMAC_96:
1061 case CRYPTO_NULL_HMAC:
1062 case CRYPTO_MD5_KPDK:
1063 case CRYPTO_SHA1_KPDK:
1064 case CRYPTO_MD5:
1065 case CRYPTO_SHA1:
1066 case CRYPTO_AES_XCBC_MAC_96:
1067 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1068 crp->crp_buf, type)) != 0)
1069 goto done;
1070 break;
1071
1072 case CRYPTO_DEFLATE_COMP:
1073 case CRYPTO_DEFLATE_COMP_NOGROW:
1074 case CRYPTO_GZIP_COMP:
1075 DPRINTF(("swcr_process: compdec for %d\n", sw->sw_alg));
1076 if ((crp->crp_etype = swcr_compdec(crd, sw,
1077 crp->crp_buf, type, &crp->crp_olen)) != 0)
1078 goto done;
1079 break;
1080
1081 default:
1082 /* Unknown/unsupported algorithm */
1083 crp->crp_etype = EINVAL;
1084 goto done;
1085 }
1086 }
1087
1088 done:
1089 DPRINTF(("request %p done\n", crp));
1090 crypto_done(crp);
1091 return 0;
1092 }
1093
1094 static void
1095 swcr_init(void)
1096 {
1097 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1098 if (swcr_id < 0) {
1099 /* This should never happen */
1100 panic("Software crypto device cannot initialize!");
1101 }
1102
1103 crypto_register(swcr_id, CRYPTO_DES_CBC,
1104 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1105 #define REGISTER(alg) \
1106 crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL)
1107
1108 REGISTER(CRYPTO_3DES_CBC);
1109 REGISTER(CRYPTO_BLF_CBC);
1110 REGISTER(CRYPTO_CAST_CBC);
1111 REGISTER(CRYPTO_SKIPJACK_CBC);
1112 REGISTER(CRYPTO_CAMELLIA_CBC);
1113 REGISTER(CRYPTO_AES_CTR);
1114 REGISTER(CRYPTO_NULL_CBC);
1115 REGISTER(CRYPTO_MD5_HMAC);
1116 REGISTER(CRYPTO_MD5_HMAC_96);
1117 REGISTER(CRYPTO_SHA1_HMAC);
1118 REGISTER(CRYPTO_SHA1_HMAC_96);
1119 REGISTER(CRYPTO_SHA2_256_HMAC);
1120 REGISTER(CRYPTO_SHA2_384_HMAC);
1121 REGISTER(CRYPTO_SHA2_512_HMAC);
1122 REGISTER(CRYPTO_RIPEMD160_HMAC);
1123 REGISTER(CRYPTO_RIPEMD160_HMAC_96);
1124 REGISTER(CRYPTO_NULL_HMAC);
1125 REGISTER(CRYPTO_MD5_KPDK);
1126 REGISTER(CRYPTO_SHA1_KPDK);
1127 REGISTER(CRYPTO_MD5);
1128 REGISTER(CRYPTO_SHA1);
1129 REGISTER(CRYPTO_AES_XCBC_MAC_96);
1130 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1131 REGISTER(CRYPTO_DEFLATE_COMP);
1132 REGISTER(CRYPTO_DEFLATE_COMP_NOGROW);
1133 REGISTER(CRYPTO_GZIP_COMP);
1134 #undef REGISTER
1135 }
1136
1137
1138 /*
1139 * Pseudo-device init routine for software crypto.
1140 */
1141 void swcryptoattach(int);
1142
1143 void
1144 swcryptoattach(int num)
1145 {
1146
1147 swcr_init();
1148 }
1149