cryptosoft.c revision 1.45 1 /* $NetBSD: cryptosoft.c,v 1.45 2014/06/21 17:34:30 christos Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */
3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.45 2014/06/21 17:34:30 christos Exp $");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/cprng.h>
36 #include <sys/module.h>
37 #include <sys/device.h>
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ocf.h"
41 #endif
42
43 #include <opencrypto/cryptodev.h>
44 #include <opencrypto/cryptosoft.h>
45 #include <opencrypto/xform.h>
46
47 #include <opencrypto/cryptosoft_xform.c>
48
49 union authctx {
50 MD5_CTX md5ctx;
51 SHA1_CTX sha1ctx;
52 RMD160_CTX rmd160ctx;
53 SHA256_CTX sha256ctx;
54 SHA384_CTX sha384ctx;
55 SHA512_CTX sha512ctx;
56 aesxcbc_ctx aesxcbcctx;
57 AES_GMAC_CTX aesgmacctx;
58 };
59
60 struct swcr_data **swcr_sessions = NULL;
61 u_int32_t swcr_sesnum = 0;
62 int32_t swcr_id = -1;
63
64 #define COPYBACK(x, a, b, c, d) \
65 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
66 : cuio_copyback((struct uio *)a,b,c,d)
67 #define COPYDATA(x, a, b, c, d) \
68 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
69 : cuio_copydata((struct uio *)a,b,c,d)
70
71 static int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int);
72 static int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *);
73 static int swcr_combined(struct cryptop *, int);
74 static int swcr_process(void *, struct cryptop *, int);
75 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
76 static int swcr_freesession(void *, u_int64_t);
77
78 /*
79 * Apply a symmetric encryption/decryption algorithm.
80 */
81 static int
82 swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv,
83 int outtype)
84 {
85 char *buf = bufv;
86 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
87 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
88 const struct swcr_enc_xform *exf;
89 int i, k, j, blks, ivlen;
90 int count, ind;
91
92 exf = sw->sw_exf;
93 blks = exf->enc_xform->blocksize;
94 ivlen = exf->enc_xform->ivsize;
95 KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks);
96
97 /* Check for non-padded data */
98 if (crd->crd_len % blks)
99 return EINVAL;
100
101 /* Initialize the IV */
102 if (crd->crd_flags & CRD_F_ENCRYPT) {
103 /* IV explicitly provided ? */
104 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
105 memcpy(iv, crd->crd_iv, ivlen);
106 if (exf->reinit)
107 exf->reinit(sw->sw_kschedule, iv, 0);
108 } else if (exf->reinit) {
109 exf->reinit(sw->sw_kschedule, 0, iv);
110 } else {
111 /* Get random IV */
112 for (i = 0;
113 i + sizeof (u_int32_t) <= EALG_MAX_BLOCK_LEN;
114 i += sizeof (u_int32_t)) {
115 u_int32_t temp = cprng_fast32();
116
117 memcpy(iv + i, &temp, sizeof(u_int32_t));
118 }
119 /*
120 * What if the block size is not a multiple
121 * of sizeof (u_int32_t), which is the size of
122 * what arc4random() returns ?
123 */
124 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
125 u_int32_t temp = cprng_fast32();
126
127 bcopy (&temp, iv + i,
128 EALG_MAX_BLOCK_LEN - i);
129 }
130 }
131
132 /* Do we need to write the IV */
133 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
134 COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
135 }
136
137 } else { /* Decryption */
138 /* IV explicitly provided ? */
139 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
140 memcpy(iv, crd->crd_iv, ivlen);
141 else {
142 /* Get IV off buf */
143 COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
144 }
145 if (exf->reinit)
146 exf->reinit(sw->sw_kschedule, iv, 0);
147 }
148
149 ivp = iv;
150
151 if (outtype == CRYPTO_BUF_CONTIG) {
152 if (exf->reinit) {
153 for (i = crd->crd_skip;
154 i < crd->crd_skip + crd->crd_len; i += blks) {
155 if (crd->crd_flags & CRD_F_ENCRYPT) {
156 exf->encrypt(sw->sw_kschedule, buf + i);
157 } else {
158 exf->decrypt(sw->sw_kschedule, buf + i);
159 }
160 }
161 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
162 for (i = crd->crd_skip;
163 i < crd->crd_skip + crd->crd_len; i += blks) {
164 /* XOR with the IV/previous block, as appropriate. */
165 if (i == crd->crd_skip)
166 for (k = 0; k < blks; k++)
167 buf[i + k] ^= ivp[k];
168 else
169 for (k = 0; k < blks; k++)
170 buf[i + k] ^= buf[i + k - blks];
171 exf->encrypt(sw->sw_kschedule, buf + i);
172 }
173 } else { /* Decrypt */
174 /*
175 * Start at the end, so we don't need to keep the encrypted
176 * block as the IV for the next block.
177 */
178 for (i = crd->crd_skip + crd->crd_len - blks;
179 i >= crd->crd_skip; i -= blks) {
180 exf->decrypt(sw->sw_kschedule, buf + i);
181
182 /* XOR with the IV/previous block, as appropriate */
183 if (i == crd->crd_skip)
184 for (k = 0; k < blks; k++)
185 buf[i + k] ^= ivp[k];
186 else
187 for (k = 0; k < blks; k++)
188 buf[i + k] ^= buf[i + k - blks];
189 }
190 }
191
192 return 0;
193 } else if (outtype == CRYPTO_BUF_MBUF) {
194 struct mbuf *m = (struct mbuf *) buf;
195
196 /* Find beginning of data */
197 m = m_getptr(m, crd->crd_skip, &k);
198 if (m == NULL)
199 return EINVAL;
200
201 i = crd->crd_len;
202
203 while (i > 0) {
204 /*
205 * If there's insufficient data at the end of
206 * an mbuf, we have to do some copying.
207 */
208 if (m->m_len < k + blks && m->m_len != k) {
209 m_copydata(m, k, blks, blk);
210
211 /* Actual encryption/decryption */
212 if (exf->reinit) {
213 if (crd->crd_flags & CRD_F_ENCRYPT) {
214 exf->encrypt(sw->sw_kschedule,
215 blk);
216 } else {
217 exf->decrypt(sw->sw_kschedule,
218 blk);
219 }
220 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
221 /* XOR with previous block */
222 for (j = 0; j < blks; j++)
223 blk[j] ^= ivp[j];
224
225 exf->encrypt(sw->sw_kschedule, blk);
226
227 /*
228 * Keep encrypted block for XOR'ing
229 * with next block
230 */
231 memcpy(iv, blk, blks);
232 ivp = iv;
233 } else { /* decrypt */
234 /*
235 * Keep encrypted block for XOR'ing
236 * with next block
237 */
238 if (ivp == iv)
239 memcpy(piv, blk, blks);
240 else
241 memcpy(iv, blk, blks);
242
243 exf->decrypt(sw->sw_kschedule, blk);
244
245 /* XOR with previous block */
246 for (j = 0; j < blks; j++)
247 blk[j] ^= ivp[j];
248
249 if (ivp == iv)
250 memcpy(iv, piv, blks);
251 else
252 ivp = iv;
253 }
254
255 /* Copy back decrypted block */
256 m_copyback(m, k, blks, blk);
257
258 /* Advance pointer */
259 m = m_getptr(m, k + blks, &k);
260 if (m == NULL)
261 return EINVAL;
262
263 i -= blks;
264
265 /* Could be done... */
266 if (i == 0)
267 break;
268 }
269
270 /* Skip possibly empty mbufs */
271 if (k == m->m_len) {
272 for (m = m->m_next; m && m->m_len == 0;
273 m = m->m_next)
274 ;
275 k = 0;
276 }
277
278 /* Sanity check */
279 if (m == NULL)
280 return EINVAL;
281
282 /*
283 * Warning: idat may point to garbage here, but
284 * we only use it in the while() loop, only if
285 * there are indeed enough data.
286 */
287 idat = mtod(m, unsigned char *) + k;
288
289 while (m->m_len >= k + blks && i > 0) {
290 if (exf->reinit) {
291 if (crd->crd_flags & CRD_F_ENCRYPT) {
292 exf->encrypt(sw->sw_kschedule,
293 idat);
294 } else {
295 exf->decrypt(sw->sw_kschedule,
296 idat);
297 }
298 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
299 /* XOR with previous block/IV */
300 for (j = 0; j < blks; j++)
301 idat[j] ^= ivp[j];
302
303 exf->encrypt(sw->sw_kschedule, idat);
304 ivp = idat;
305 } else { /* decrypt */
306 /*
307 * Keep encrypted block to be used
308 * in next block's processing.
309 */
310 if (ivp == iv)
311 memcpy(piv, idat, blks);
312 else
313 memcpy(iv, idat, blks);
314
315 exf->decrypt(sw->sw_kschedule, idat);
316
317 /* XOR with previous block/IV */
318 for (j = 0; j < blks; j++)
319 idat[j] ^= ivp[j];
320
321 if (ivp == iv)
322 memcpy(iv, piv, blks);
323 else
324 ivp = iv;
325 }
326
327 idat += blks;
328 k += blks;
329 i -= blks;
330 }
331 }
332
333 return 0; /* Done with mbuf encryption/decryption */
334 } else if (outtype == CRYPTO_BUF_IOV) {
335 struct uio *uio = (struct uio *) buf;
336
337 /* Find beginning of data */
338 count = crd->crd_skip;
339 ind = cuio_getptr(uio, count, &k);
340 if (ind == -1)
341 return EINVAL;
342
343 i = crd->crd_len;
344
345 while (i > 0) {
346 /*
347 * If there's insufficient data at the end,
348 * we have to do some copying.
349 */
350 if (uio->uio_iov[ind].iov_len < k + blks &&
351 uio->uio_iov[ind].iov_len != k) {
352 cuio_copydata(uio, k, blks, blk);
353
354 /* Actual encryption/decryption */
355 if (exf->reinit) {
356 if (crd->crd_flags & CRD_F_ENCRYPT) {
357 exf->encrypt(sw->sw_kschedule,
358 blk);
359 } else {
360 exf->decrypt(sw->sw_kschedule,
361 blk);
362 }
363 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
364 /* XOR with previous block */
365 for (j = 0; j < blks; j++)
366 blk[j] ^= ivp[j];
367
368 exf->encrypt(sw->sw_kschedule, blk);
369
370 /*
371 * Keep encrypted block for XOR'ing
372 * with next block
373 */
374 memcpy(iv, blk, blks);
375 ivp = iv;
376 } else { /* decrypt */
377 /*
378 * Keep encrypted block for XOR'ing
379 * with next block
380 */
381 if (ivp == iv)
382 memcpy(piv, blk, blks);
383 else
384 memcpy(iv, blk, blks);
385
386 exf->decrypt(sw->sw_kschedule, blk);
387
388 /* XOR with previous block */
389 for (j = 0; j < blks; j++)
390 blk[j] ^= ivp[j];
391
392 if (ivp == iv)
393 memcpy(iv, piv, blks);
394 else
395 ivp = iv;
396 }
397
398 /* Copy back decrypted block */
399 cuio_copyback(uio, k, blks, blk);
400
401 count += blks;
402
403 /* Advance pointer */
404 ind = cuio_getptr(uio, count, &k);
405 if (ind == -1)
406 return (EINVAL);
407
408 i -= blks;
409
410 /* Could be done... */
411 if (i == 0)
412 break;
413 }
414
415 /*
416 * Warning: idat may point to garbage here, but
417 * we only use it in the while() loop, only if
418 * there are indeed enough data.
419 */
420 idat = ((char *)uio->uio_iov[ind].iov_base) + k;
421
422 while (uio->uio_iov[ind].iov_len >= k + blks &&
423 i > 0) {
424 if (exf->reinit) {
425 if (crd->crd_flags & CRD_F_ENCRYPT) {
426 exf->encrypt(sw->sw_kschedule,
427 idat);
428 } else {
429 exf->decrypt(sw->sw_kschedule,
430 idat);
431 }
432 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
433 /* XOR with previous block/IV */
434 for (j = 0; j < blks; j++)
435 idat[j] ^= ivp[j];
436
437 exf->encrypt(sw->sw_kschedule, idat);
438 ivp = idat;
439 } else { /* decrypt */
440 /*
441 * Keep encrypted block to be used
442 * in next block's processing.
443 */
444 if (ivp == iv)
445 memcpy(piv, idat, blks);
446 else
447 memcpy(iv, idat, blks);
448
449 exf->decrypt(sw->sw_kschedule, idat);
450
451 /* XOR with previous block/IV */
452 for (j = 0; j < blks; j++)
453 idat[j] ^= ivp[j];
454
455 if (ivp == iv)
456 memcpy(iv, piv, blks);
457 else
458 ivp = iv;
459 }
460
461 idat += blks;
462 count += blks;
463 k += blks;
464 i -= blks;
465 }
466 }
467 return 0; /* Done with mbuf encryption/decryption */
468 }
469
470 /* Unreachable */
471 return EINVAL;
472 }
473
474 /*
475 * Compute keyed-hash authenticator.
476 */
477 int
478 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
479 const struct swcr_data *sw, void *buf, int outtype)
480 {
481 unsigned char aalg[AALG_MAX_RESULT_LEN];
482 const struct swcr_auth_hash *axf;
483 union authctx ctx;
484 int err;
485
486 if (sw->sw_ictx == 0)
487 return EINVAL;
488
489 axf = sw->sw_axf;
490
491 memcpy(&ctx, sw->sw_ictx, axf->ctxsize);
492
493 switch (outtype) {
494 case CRYPTO_BUF_CONTIG:
495 axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len);
496 break;
497 case CRYPTO_BUF_MBUF:
498 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
499 (int (*)(void*, void *, unsigned int)) axf->Update,
500 (void *) &ctx);
501 if (err)
502 return err;
503 break;
504 case CRYPTO_BUF_IOV:
505 err = cuio_apply((struct uio *) buf, crd->crd_skip,
506 crd->crd_len,
507 (int (*)(void *, void *, unsigned int)) axf->Update,
508 (void *) &ctx);
509 if (err) {
510 return err;
511 }
512 break;
513 default:
514 return EINVAL;
515 }
516
517 switch (sw->sw_alg) {
518 case CRYPTO_MD5_HMAC:
519 case CRYPTO_MD5_HMAC_96:
520 case CRYPTO_SHA1_HMAC:
521 case CRYPTO_SHA1_HMAC_96:
522 case CRYPTO_SHA2_256_HMAC:
523 case CRYPTO_SHA2_384_HMAC:
524 case CRYPTO_SHA2_512_HMAC:
525 case CRYPTO_RIPEMD160_HMAC:
526 case CRYPTO_RIPEMD160_HMAC_96:
527 if (sw->sw_octx == NULL)
528 return EINVAL;
529
530 axf->Final(aalg, &ctx);
531 memcpy(&ctx, sw->sw_octx, axf->ctxsize);
532 axf->Update(&ctx, aalg, axf->auth_hash->hashsize);
533 axf->Final(aalg, &ctx);
534 break;
535
536 case CRYPTO_MD5_KPDK:
537 case CRYPTO_SHA1_KPDK:
538 if (sw->sw_octx == NULL)
539 return EINVAL;
540
541 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
542 axf->Final(aalg, &ctx);
543 break;
544
545 case CRYPTO_NULL_HMAC:
546 case CRYPTO_MD5:
547 case CRYPTO_SHA1:
548 case CRYPTO_AES_XCBC_MAC_96:
549 axf->Final(aalg, &ctx);
550 break;
551 }
552
553 /* Inject the authentication data */
554 switch (outtype) {
555 case CRYPTO_BUF_CONTIG:
556 (void)memcpy((char *)buf + crd->crd_inject, aalg,
557 axf->auth_hash->authsize);
558 break;
559 case CRYPTO_BUF_MBUF:
560 m_copyback((struct mbuf *) buf, crd->crd_inject,
561 axf->auth_hash->authsize, aalg);
562 break;
563 case CRYPTO_BUF_IOV:
564 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
565 break;
566 default:
567 return EINVAL;
568 }
569 return 0;
570 }
571
572 /*
573 * Apply a combined encryption-authentication transformation
574 */
575 static int
576 swcr_combined(struct cryptop *crp, int outtype)
577 {
578 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
579 u_char *blk = (u_char *)blkbuf;
580 u_char aalg[AALG_MAX_RESULT_LEN];
581 u_char iv[EALG_MAX_BLOCK_LEN];
582 union authctx ctx;
583 struct cryptodesc *crd, *crda = NULL, *crde = NULL;
584 struct swcr_data *sw, *swa, *swe = NULL;
585 const struct swcr_auth_hash *axf = NULL;
586 const struct swcr_enc_xform *exf = NULL;
587 void *buf = (void *)crp->crp_buf;
588 uint32_t *blkp;
589 int i, blksz = 0, ivlen = 0, len;
590
591 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
592 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
593 sw && sw->sw_alg != crd->crd_alg;
594 sw = sw->sw_next)
595 ;
596 if (sw == NULL)
597 return (EINVAL);
598
599 switch (sw->sw_alg) {
600 case CRYPTO_AES_GCM_16:
601 case CRYPTO_AES_GMAC:
602 swe = sw;
603 crde = crd;
604 exf = swe->sw_exf;
605 ivlen = exf->enc_xform->ivsize;
606 break;
607 case CRYPTO_AES_128_GMAC:
608 case CRYPTO_AES_192_GMAC:
609 case CRYPTO_AES_256_GMAC:
610 swa = sw;
611 crda = crd;
612 axf = swa->sw_axf;
613 if (swa->sw_ictx == 0)
614 return (EINVAL);
615 memcpy(&ctx, swa->sw_ictx, axf->ctxsize);
616 blksz = axf->auth_hash->blocksize;
617 break;
618 default:
619 return (EINVAL);
620 }
621 }
622 if (crde == NULL || crda == NULL)
623 return (EINVAL);
624 if (outtype == CRYPTO_BUF_CONTIG)
625 return (EINVAL);
626
627 /* Initialize the IV */
628 if (crde->crd_flags & CRD_F_ENCRYPT) {
629 /* IV explicitly provided ? */
630 if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
631 memcpy(iv, crde->crd_iv, ivlen);
632 if (exf->reinit)
633 exf->reinit(swe->sw_kschedule, iv, 0);
634 } else if (exf->reinit)
635 exf->reinit(swe->sw_kschedule, 0, iv);
636 else
637 cprng_fast(iv, ivlen);
638
639 /* Do we need to write the IV */
640 if (!(crde->crd_flags & CRD_F_IV_PRESENT))
641 COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
642
643 } else { /* Decryption */
644 /* IV explicitly provided ? */
645 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
646 memcpy(iv, crde->crd_iv, ivlen);
647 else {
648 /* Get IV off buf */
649 COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
650 }
651 if (exf->reinit)
652 exf->reinit(swe->sw_kschedule, iv, 0);
653 }
654
655 /* Supply MAC with IV */
656 if (axf->Reinit)
657 axf->Reinit(&ctx, iv, ivlen);
658
659 /* Supply MAC with AAD */
660 for (i = 0; i < crda->crd_len; i += blksz) {
661 len = MIN(crda->crd_len - i, blksz);
662 COPYDATA(outtype, buf, crda->crd_skip + i, len, blk);
663 axf->Update(&ctx, blk, len);
664 }
665
666 /* Do encryption/decryption with MAC */
667 for (i = 0; i < crde->crd_len; i += blksz) {
668 len = MIN(crde->crd_len - i, blksz);
669 if (len < blksz)
670 memset(blk, 0, blksz);
671 COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
672 if (crde->crd_flags & CRD_F_ENCRYPT) {
673 exf->encrypt(swe->sw_kschedule, blk);
674 axf->Update(&ctx, blk, len);
675 } else {
676 axf->Update(&ctx, blk, len);
677 exf->decrypt(swe->sw_kschedule, blk);
678 }
679 COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
680 }
681
682 /* Do any required special finalization */
683 switch (crda->crd_alg) {
684 case CRYPTO_AES_128_GMAC:
685 case CRYPTO_AES_192_GMAC:
686 case CRYPTO_AES_256_GMAC:
687 /* length block */
688 memset(blk, 0, blksz);
689 blkp = (uint32_t *)blk + 1;
690 *blkp = htobe32(crda->crd_len * 8);
691 blkp = (uint32_t *)blk + 3;
692 *blkp = htobe32(crde->crd_len * 8);
693 axf->Update(&ctx, blk, blksz);
694 break;
695 }
696
697 /* Finalize MAC */
698 axf->Final(aalg, &ctx);
699
700 /* Inject the authentication data */
701 if (outtype == CRYPTO_BUF_MBUF)
702 COPYBACK(outtype, buf, crda->crd_inject, axf->auth_hash->authsize, aalg);
703 else
704 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
705
706 return (0);
707 }
708
709 /*
710 * Apply a compression/decompression algorithm
711 */
712 static int
713 swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw,
714 void *buf, int outtype, int *res_size)
715 {
716 u_int8_t *data, *out;
717 const struct swcr_comp_algo *cxf;
718 int adj;
719 u_int32_t result;
720
721 cxf = sw->sw_cxf;
722
723 /* We must handle the whole buffer of data in one time
724 * then if there is not all the data in the mbuf, we must
725 * copy in a buffer.
726 */
727
728 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
729 if (data == NULL)
730 return (EINVAL);
731 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
732
733 if (crd->crd_flags & CRD_F_COMP)
734 result = cxf->compress(data, crd->crd_len, &out);
735 else
736 result = cxf->decompress(data, crd->crd_len, &out,
737 *res_size);
738
739 free(data, M_CRYPTO_DATA);
740 if (result == 0)
741 return EINVAL;
742
743 /* Copy back the (de)compressed data. m_copyback is
744 * extending the mbuf as necessary.
745 */
746 *res_size = (int)result;
747 /* Check the compressed size when doing compression */
748 if (crd->crd_flags & CRD_F_COMP &&
749 sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW &&
750 result >= crd->crd_len) {
751 /* Compression was useless, we lost time */
752 free(out, M_CRYPTO_DATA);
753 return 0;
754 }
755
756 COPYBACK(outtype, buf, crd->crd_skip, result, out);
757 if (result < crd->crd_len) {
758 adj = result - crd->crd_len;
759 if (outtype == CRYPTO_BUF_MBUF) {
760 adj = result - crd->crd_len;
761 m_adj((struct mbuf *)buf, adj);
762 }
763 /* Don't adjust the iov_len, it breaks the kmem_free */
764 }
765 free(out, M_CRYPTO_DATA);
766 return 0;
767 }
768
769 /*
770 * Generate a new software session.
771 */
772 static int
773 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
774 {
775 struct swcr_data **swd;
776 const struct swcr_auth_hash *axf;
777 const struct swcr_enc_xform *txf;
778 const struct swcr_comp_algo *cxf;
779 u_int32_t i;
780 int k, error;
781
782 if (sid == NULL || cri == NULL)
783 return EINVAL;
784
785 if (swcr_sessions) {
786 for (i = 1; i < swcr_sesnum; i++)
787 if (swcr_sessions[i] == NULL)
788 break;
789 } else
790 i = 1; /* NB: to silence compiler warning */
791
792 if (swcr_sessions == NULL || i == swcr_sesnum) {
793 if (swcr_sessions == NULL) {
794 i = 1; /* We leave swcr_sessions[0] empty */
795 swcr_sesnum = CRYPTO_SW_SESSIONS;
796 } else
797 swcr_sesnum *= 2;
798
799 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
800 M_CRYPTO_DATA, M_NOWAIT);
801 if (swd == NULL) {
802 /* Reset session number */
803 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
804 swcr_sesnum = 0;
805 else
806 swcr_sesnum /= 2;
807 return ENOBUFS;
808 }
809
810 memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
811
812 /* Copy existing sessions */
813 if (swcr_sessions) {
814 memcpy(swd, swcr_sessions,
815 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
816 free(swcr_sessions, M_CRYPTO_DATA);
817 }
818
819 swcr_sessions = swd;
820 }
821
822 swd = &swcr_sessions[i];
823 *sid = i;
824
825 while (cri) {
826 *swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT);
827 if (*swd == NULL) {
828 swcr_freesession(NULL, i);
829 return ENOBUFS;
830 }
831 memset(*swd, 0, sizeof(struct swcr_data));
832
833 switch (cri->cri_alg) {
834 case CRYPTO_DES_CBC:
835 txf = &swcr_enc_xform_des;
836 goto enccommon;
837 case CRYPTO_3DES_CBC:
838 txf = &swcr_enc_xform_3des;
839 goto enccommon;
840 case CRYPTO_BLF_CBC:
841 txf = &swcr_enc_xform_blf;
842 goto enccommon;
843 case CRYPTO_CAST_CBC:
844 txf = &swcr_enc_xform_cast5;
845 goto enccommon;
846 case CRYPTO_SKIPJACK_CBC:
847 txf = &swcr_enc_xform_skipjack;
848 goto enccommon;
849 case CRYPTO_RIJNDAEL128_CBC:
850 txf = &swcr_enc_xform_rijndael128;
851 goto enccommon;
852 case CRYPTO_CAMELLIA_CBC:
853 txf = &swcr_enc_xform_camellia;
854 goto enccommon;
855 case CRYPTO_AES_CTR:
856 txf = &swcr_enc_xform_aes_ctr;
857 goto enccommon;
858 case CRYPTO_AES_GCM_16:
859 txf = &swcr_enc_xform_aes_gcm;
860 goto enccommon;
861 case CRYPTO_AES_GMAC:
862 txf = &swcr_enc_xform_aes_gmac;
863 goto enccommon;
864 case CRYPTO_NULL_CBC:
865 txf = &swcr_enc_xform_null;
866 goto enccommon;
867 enccommon:
868 error = txf->setkey(&((*swd)->sw_kschedule),
869 cri->cri_key, cri->cri_klen / 8);
870 if (error) {
871 swcr_freesession(NULL, i);
872 return error;
873 }
874 (*swd)->sw_exf = txf;
875 break;
876
877 case CRYPTO_MD5_HMAC:
878 axf = &swcr_auth_hash_hmac_md5;
879 goto authcommon;
880 case CRYPTO_MD5_HMAC_96:
881 axf = &swcr_auth_hash_hmac_md5_96;
882 goto authcommon;
883 case CRYPTO_SHA1_HMAC:
884 axf = &swcr_auth_hash_hmac_sha1;
885 goto authcommon;
886 case CRYPTO_SHA1_HMAC_96:
887 axf = &swcr_auth_hash_hmac_sha1_96;
888 goto authcommon;
889 case CRYPTO_SHA2_256_HMAC:
890 axf = &swcr_auth_hash_hmac_sha2_256;
891 goto authcommon;
892 case CRYPTO_SHA2_384_HMAC:
893 axf = &swcr_auth_hash_hmac_sha2_384;
894 goto authcommon;
895 case CRYPTO_SHA2_512_HMAC:
896 axf = &swcr_auth_hash_hmac_sha2_512;
897 goto authcommon;
898 case CRYPTO_NULL_HMAC:
899 axf = &swcr_auth_hash_null;
900 goto authcommon;
901 case CRYPTO_RIPEMD160_HMAC:
902 axf = &swcr_auth_hash_hmac_ripemd_160;
903 goto authcommon;
904 case CRYPTO_RIPEMD160_HMAC_96:
905 axf = &swcr_auth_hash_hmac_ripemd_160_96;
906 goto authcommon; /* leave this for safety */
907 authcommon:
908 (*swd)->sw_ictx = malloc(axf->ctxsize,
909 M_CRYPTO_DATA, M_NOWAIT);
910 if ((*swd)->sw_ictx == NULL) {
911 swcr_freesession(NULL, i);
912 return ENOBUFS;
913 }
914
915 (*swd)->sw_octx = malloc(axf->ctxsize,
916 M_CRYPTO_DATA, M_NOWAIT);
917 if ((*swd)->sw_octx == NULL) {
918 swcr_freesession(NULL, i);
919 return ENOBUFS;
920 }
921
922 for (k = 0; k < cri->cri_klen / 8; k++)
923 cri->cri_key[k] ^= HMAC_IPAD_VAL;
924
925 axf->Init((*swd)->sw_ictx);
926 axf->Update((*swd)->sw_ictx, cri->cri_key,
927 cri->cri_klen / 8);
928 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
929 axf->auth_hash->blocksize - (cri->cri_klen / 8));
930
931 for (k = 0; k < cri->cri_klen / 8; k++)
932 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
933
934 axf->Init((*swd)->sw_octx);
935 axf->Update((*swd)->sw_octx, cri->cri_key,
936 cri->cri_klen / 8);
937 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
938 axf->auth_hash->blocksize - (cri->cri_klen / 8));
939
940 for (k = 0; k < cri->cri_klen / 8; k++)
941 cri->cri_key[k] ^= HMAC_OPAD_VAL;
942 (*swd)->sw_axf = axf;
943 break;
944
945 case CRYPTO_MD5_KPDK:
946 axf = &swcr_auth_hash_key_md5;
947 goto auth2common;
948
949 case CRYPTO_SHA1_KPDK:
950 axf = &swcr_auth_hash_key_sha1;
951 auth2common:
952 (*swd)->sw_ictx = malloc(axf->ctxsize,
953 M_CRYPTO_DATA, M_NOWAIT);
954 if ((*swd)->sw_ictx == NULL) {
955 swcr_freesession(NULL, i);
956 return ENOBUFS;
957 }
958
959 /* Store the key so we can "append" it to the payload */
960 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
961 M_NOWAIT);
962 if ((*swd)->sw_octx == NULL) {
963 swcr_freesession(NULL, i);
964 return ENOBUFS;
965 }
966
967 (*swd)->sw_klen = cri->cri_klen / 8;
968 memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8);
969 axf->Init((*swd)->sw_ictx);
970 axf->Update((*swd)->sw_ictx, cri->cri_key,
971 cri->cri_klen / 8);
972 axf->Final(NULL, (*swd)->sw_ictx);
973 (*swd)->sw_axf = axf;
974 break;
975
976 case CRYPTO_MD5:
977 axf = &swcr_auth_hash_md5;
978 goto auth3common;
979
980 case CRYPTO_SHA1:
981 axf = &swcr_auth_hash_sha1;
982 auth3common:
983 (*swd)->sw_ictx = malloc(axf->ctxsize,
984 M_CRYPTO_DATA, M_NOWAIT);
985 if ((*swd)->sw_ictx == NULL) {
986 swcr_freesession(NULL, i);
987 return ENOBUFS;
988 }
989
990 axf->Init((*swd)->sw_ictx);
991 (*swd)->sw_axf = axf;
992 break;
993
994 case CRYPTO_AES_XCBC_MAC_96:
995 axf = &swcr_auth_hash_aes_xcbc_mac;
996 goto auth4common;
997 case CRYPTO_AES_128_GMAC:
998 axf = &swcr_auth_hash_gmac_aes_128;
999 goto auth4common;
1000 case CRYPTO_AES_192_GMAC:
1001 axf = &swcr_auth_hash_gmac_aes_192;
1002 goto auth4common;
1003 case CRYPTO_AES_256_GMAC:
1004 axf = &swcr_auth_hash_gmac_aes_256;
1005 auth4common:
1006 (*swd)->sw_ictx = malloc(axf->ctxsize,
1007 M_CRYPTO_DATA, M_NOWAIT);
1008 if ((*swd)->sw_ictx == NULL) {
1009 swcr_freesession(NULL, i);
1010 return ENOBUFS;
1011 }
1012 axf->Init((*swd)->sw_ictx);
1013 axf->Setkey((*swd)->sw_ictx,
1014 cri->cri_key, cri->cri_klen / 8);
1015 (*swd)->sw_axf = axf;
1016 break;
1017
1018 case CRYPTO_DEFLATE_COMP:
1019 cxf = &swcr_comp_algo_deflate;
1020 (*swd)->sw_cxf = cxf;
1021 break;
1022
1023 case CRYPTO_DEFLATE_COMP_NOGROW:
1024 cxf = &swcr_comp_algo_deflate_nogrow;
1025 (*swd)->sw_cxf = cxf;
1026 break;
1027
1028 case CRYPTO_GZIP_COMP:
1029 cxf = &swcr_comp_algo_gzip;
1030 (*swd)->sw_cxf = cxf;
1031 break;
1032 default:
1033 swcr_freesession(NULL, i);
1034 return EINVAL;
1035 }
1036
1037 (*swd)->sw_alg = cri->cri_alg;
1038 cri = cri->cri_next;
1039 swd = &((*swd)->sw_next);
1040 }
1041 return 0;
1042 }
1043
1044 /*
1045 * Free a session.
1046 */
1047 static int
1048 swcr_freesession(void *arg, u_int64_t tid)
1049 {
1050 struct swcr_data *swd;
1051 const struct swcr_enc_xform *txf;
1052 const struct swcr_auth_hash *axf;
1053 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
1054
1055 if (sid > swcr_sesnum || swcr_sessions == NULL ||
1056 swcr_sessions[sid] == NULL)
1057 return EINVAL;
1058
1059 /* Silently accept and return */
1060 if (sid == 0)
1061 return 0;
1062
1063 while ((swd = swcr_sessions[sid]) != NULL) {
1064 swcr_sessions[sid] = swd->sw_next;
1065
1066 switch (swd->sw_alg) {
1067 case CRYPTO_DES_CBC:
1068 case CRYPTO_3DES_CBC:
1069 case CRYPTO_BLF_CBC:
1070 case CRYPTO_CAST_CBC:
1071 case CRYPTO_SKIPJACK_CBC:
1072 case CRYPTO_RIJNDAEL128_CBC:
1073 case CRYPTO_CAMELLIA_CBC:
1074 case CRYPTO_AES_CTR:
1075 case CRYPTO_AES_GCM_16:
1076 case CRYPTO_AES_GMAC:
1077 case CRYPTO_NULL_CBC:
1078 txf = swd->sw_exf;
1079
1080 if (swd->sw_kschedule)
1081 txf->zerokey(&(swd->sw_kschedule));
1082 break;
1083
1084 case CRYPTO_MD5_HMAC:
1085 case CRYPTO_MD5_HMAC_96:
1086 case CRYPTO_SHA1_HMAC:
1087 case CRYPTO_SHA1_HMAC_96:
1088 case CRYPTO_SHA2_256_HMAC:
1089 case CRYPTO_SHA2_384_HMAC:
1090 case CRYPTO_SHA2_512_HMAC:
1091 case CRYPTO_RIPEMD160_HMAC:
1092 case CRYPTO_RIPEMD160_HMAC_96:
1093 case CRYPTO_NULL_HMAC:
1094 axf = swd->sw_axf;
1095
1096 if (swd->sw_ictx) {
1097 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1098 free(swd->sw_ictx, M_CRYPTO_DATA);
1099 }
1100 if (swd->sw_octx) {
1101 explicit_memset(swd->sw_octx, 0, axf->ctxsize);
1102 free(swd->sw_octx, M_CRYPTO_DATA);
1103 }
1104 break;
1105
1106 case CRYPTO_MD5_KPDK:
1107 case CRYPTO_SHA1_KPDK:
1108 axf = swd->sw_axf;
1109
1110 if (swd->sw_ictx) {
1111 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1112 free(swd->sw_ictx, M_CRYPTO_DATA);
1113 }
1114 if (swd->sw_octx) {
1115 explicit_memset(swd->sw_octx, 0, swd->sw_klen);
1116 free(swd->sw_octx, M_CRYPTO_DATA);
1117 }
1118 break;
1119
1120 case CRYPTO_MD5:
1121 case CRYPTO_SHA1:
1122 case CRYPTO_AES_XCBC_MAC_96:
1123 case CRYPTO_AES_128_GMAC:
1124 case CRYPTO_AES_192_GMAC:
1125 case CRYPTO_AES_256_GMAC:
1126 axf = swd->sw_axf;
1127
1128 if (swd->sw_ictx) {
1129 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1130 free(swd->sw_ictx, M_CRYPTO_DATA);
1131 }
1132 break;
1133
1134 case CRYPTO_DEFLATE_COMP:
1135 case CRYPTO_DEFLATE_COMP_NOGROW:
1136 case CRYPTO_GZIP_COMP:
1137 break;
1138 }
1139
1140 free(swd, M_CRYPTO_DATA);
1141 }
1142 return 0;
1143 }
1144
1145 /*
1146 * Process a software request.
1147 */
1148 static int
1149 swcr_process(void *arg, struct cryptop *crp, int hint)
1150 {
1151 struct cryptodesc *crd;
1152 struct swcr_data *sw;
1153 u_int32_t lid;
1154 int type;
1155
1156 /* Sanity check */
1157 if (crp == NULL)
1158 return EINVAL;
1159
1160 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1161 crp->crp_etype = EINVAL;
1162 goto done;
1163 }
1164
1165 lid = crp->crp_sid & 0xffffffff;
1166 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1167 crp->crp_etype = ENOENT;
1168 goto done;
1169 }
1170
1171 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1172 type = CRYPTO_BUF_MBUF;
1173 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1174 type = CRYPTO_BUF_IOV;
1175 } else {
1176 type = CRYPTO_BUF_CONTIG;
1177 }
1178
1179 /* Go through crypto descriptors, processing as we go */
1180 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1181 /*
1182 * Find the crypto context.
1183 *
1184 * XXX Note that the logic here prevents us from having
1185 * XXX the same algorithm multiple times in a session
1186 * XXX (or rather, we can but it won't give us the right
1187 * XXX results). To do that, we'd need some way of differentiating
1188 * XXX between the various instances of an algorithm (so we can
1189 * XXX locate the correct crypto context).
1190 */
1191 for (sw = swcr_sessions[lid];
1192 sw && sw->sw_alg != crd->crd_alg;
1193 sw = sw->sw_next)
1194 ;
1195
1196 /* No such context ? */
1197 if (sw == NULL) {
1198 crp->crp_etype = EINVAL;
1199 goto done;
1200 }
1201
1202 switch (sw->sw_alg) {
1203 case CRYPTO_DES_CBC:
1204 case CRYPTO_3DES_CBC:
1205 case CRYPTO_BLF_CBC:
1206 case CRYPTO_CAST_CBC:
1207 case CRYPTO_SKIPJACK_CBC:
1208 case CRYPTO_RIJNDAEL128_CBC:
1209 case CRYPTO_CAMELLIA_CBC:
1210 case CRYPTO_AES_CTR:
1211 if ((crp->crp_etype = swcr_encdec(crd, sw,
1212 crp->crp_buf, type)) != 0)
1213 goto done;
1214 break;
1215 case CRYPTO_NULL_CBC:
1216 crp->crp_etype = 0;
1217 break;
1218 case CRYPTO_MD5_HMAC:
1219 case CRYPTO_MD5_HMAC_96:
1220 case CRYPTO_SHA1_HMAC:
1221 case CRYPTO_SHA1_HMAC_96:
1222 case CRYPTO_SHA2_256_HMAC:
1223 case CRYPTO_SHA2_384_HMAC:
1224 case CRYPTO_SHA2_512_HMAC:
1225 case CRYPTO_RIPEMD160_HMAC:
1226 case CRYPTO_RIPEMD160_HMAC_96:
1227 case CRYPTO_NULL_HMAC:
1228 case CRYPTO_MD5_KPDK:
1229 case CRYPTO_SHA1_KPDK:
1230 case CRYPTO_MD5:
1231 case CRYPTO_SHA1:
1232 case CRYPTO_AES_XCBC_MAC_96:
1233 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1234 crp->crp_buf, type)) != 0)
1235 goto done;
1236 break;
1237
1238 case CRYPTO_AES_GCM_16:
1239 case CRYPTO_AES_GMAC:
1240 case CRYPTO_AES_128_GMAC:
1241 case CRYPTO_AES_192_GMAC:
1242 case CRYPTO_AES_256_GMAC:
1243 crp->crp_etype = swcr_combined(crp, type);
1244 goto done;
1245
1246 case CRYPTO_DEFLATE_COMP:
1247 case CRYPTO_DEFLATE_COMP_NOGROW:
1248 case CRYPTO_GZIP_COMP:
1249 DPRINTF(("swcr_process: compdec for %d\n", sw->sw_alg));
1250 if ((crp->crp_etype = swcr_compdec(crd, sw,
1251 crp->crp_buf, type, &crp->crp_olen)) != 0)
1252 goto done;
1253 break;
1254
1255 default:
1256 /* Unknown/unsupported algorithm */
1257 crp->crp_etype = EINVAL;
1258 goto done;
1259 }
1260 }
1261
1262 done:
1263 DPRINTF(("request %p done\n", crp));
1264 crypto_done(crp);
1265 return 0;
1266 }
1267
1268 static void
1269 swcr_init(void)
1270 {
1271 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1272 if (swcr_id < 0) {
1273 /* This should never happen */
1274 panic("Software crypto device cannot initialize!");
1275 }
1276
1277 crypto_register(swcr_id, CRYPTO_DES_CBC,
1278 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1279 #define REGISTER(alg) \
1280 crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL)
1281
1282 REGISTER(CRYPTO_3DES_CBC);
1283 REGISTER(CRYPTO_BLF_CBC);
1284 REGISTER(CRYPTO_CAST_CBC);
1285 REGISTER(CRYPTO_SKIPJACK_CBC);
1286 REGISTER(CRYPTO_CAMELLIA_CBC);
1287 REGISTER(CRYPTO_AES_CTR);
1288 REGISTER(CRYPTO_AES_GCM_16);
1289 REGISTER(CRYPTO_AES_GMAC);
1290 REGISTER(CRYPTO_NULL_CBC);
1291 REGISTER(CRYPTO_MD5_HMAC);
1292 REGISTER(CRYPTO_MD5_HMAC_96);
1293 REGISTER(CRYPTO_SHA1_HMAC);
1294 REGISTER(CRYPTO_SHA1_HMAC_96);
1295 REGISTER(CRYPTO_SHA2_256_HMAC);
1296 REGISTER(CRYPTO_SHA2_384_HMAC);
1297 REGISTER(CRYPTO_SHA2_512_HMAC);
1298 REGISTER(CRYPTO_RIPEMD160_HMAC);
1299 REGISTER(CRYPTO_RIPEMD160_HMAC_96);
1300 REGISTER(CRYPTO_NULL_HMAC);
1301 REGISTER(CRYPTO_MD5_KPDK);
1302 REGISTER(CRYPTO_SHA1_KPDK);
1303 REGISTER(CRYPTO_MD5);
1304 REGISTER(CRYPTO_SHA1);
1305 REGISTER(CRYPTO_AES_XCBC_MAC_96);
1306 REGISTER(CRYPTO_AES_128_GMAC);
1307 REGISTER(CRYPTO_AES_192_GMAC);
1308 REGISTER(CRYPTO_AES_256_GMAC);
1309 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1310 REGISTER(CRYPTO_DEFLATE_COMP);
1311 REGISTER(CRYPTO_DEFLATE_COMP_NOGROW);
1312 REGISTER(CRYPTO_GZIP_COMP);
1313 #undef REGISTER
1314 }
1315
1316
1317 /*
1318 * Pseudo-device init routine for software crypto.
1319 */
1320 void swcryptoattach(int);
1321
1322 void
1323 swcryptoattach(int num)
1324 {
1325
1326 swcr_init();
1327 }
1328
1329 void swcrypto_attach(device_t, device_t, void *);
1330
1331 void
1332 swcrypto_attach(device_t parent, device_t self, void *opaque)
1333 {
1334
1335 swcr_init();
1336
1337 if (!pmf_device_register(self, NULL, NULL))
1338 aprint_error_dev(self, "couldn't establish power handler\n");
1339 }
1340
1341 int swcrypto_detach(device_t, int);
1342
1343 int
1344 swcrypto_detach(device_t self, int flag)
1345 {
1346 if (swcr_id >= 0)
1347 crypto_unregister_all(swcr_id);
1348 return 0;
1349 }
1350
1351 int swcrypto_match(device_t, cfdata_t, void *);
1352
1353 int
1354 swcrypto_match(device_t parent, cfdata_t data, void *opaque)
1355 {
1356
1357 return 1;
1358 }
1359
1360 MODULE(MODULE_CLASS_DRIVER, swcrypto,
1361 "opencrypto,zlib,blowfish,des,cast128,camellia,skipjack");
1362
1363 CFDRIVER_DECL(swcrypto, DV_DULL, NULL);
1364
1365 CFATTACH_DECL2_NEW(swcrypto, 0, swcrypto_match, swcrypto_attach,
1366 swcrypto_detach, NULL, NULL, NULL);
1367
1368 static int swcryptoloc[] = { -1, -1 };
1369
1370 static struct cfdata swcrypto_cfdata[] = {
1371 {
1372 .cf_name = "swcrypto",
1373 .cf_atname = "swcrypto",
1374 .cf_unit = 0,
1375 .cf_fstate = 0,
1376 .cf_loc = swcryptoloc,
1377 .cf_flags = 0,
1378 .cf_pspec = NULL,
1379 },
1380 { NULL, NULL, 0, 0, NULL, 0, NULL }
1381 };
1382
1383 static int
1384 swcrypto_modcmd(modcmd_t cmd, void *arg)
1385 {
1386 int error;
1387
1388 switch (cmd) {
1389 case MODULE_CMD_INIT:
1390 error = config_cfdriver_attach(&swcrypto_cd);
1391 if (error) {
1392 return error;
1393 }
1394
1395 error = config_cfattach_attach(swcrypto_cd.cd_name,
1396 &swcrypto_ca);
1397 if (error) {
1398 config_cfdriver_detach(&swcrypto_cd);
1399 aprint_error("%s: unable to register cfattach\n",
1400 swcrypto_cd.cd_name);
1401
1402 return error;
1403 }
1404
1405 error = config_cfdata_attach(swcrypto_cfdata, 1);
1406 if (error) {
1407 config_cfattach_detach(swcrypto_cd.cd_name,
1408 &swcrypto_ca);
1409 config_cfdriver_detach(&swcrypto_cd);
1410 aprint_error("%s: unable to register cfdata\n",
1411 swcrypto_cd.cd_name);
1412
1413 return error;
1414 }
1415
1416 (void)config_attach_pseudo(swcrypto_cfdata);
1417
1418 return 0;
1419 case MODULE_CMD_FINI:
1420 error = config_cfdata_detach(swcrypto_cfdata);
1421 if (error) {
1422 return error;
1423 }
1424
1425 config_cfattach_detach(swcrypto_cd.cd_name, &swcrypto_ca);
1426 config_cfdriver_detach(&swcrypto_cd);
1427
1428 return 0;
1429 default:
1430 return ENOTTY;
1431 }
1432 }
1433