cryptosoft.c revision 1.51 1 /* $NetBSD: cryptosoft.c,v 1.51 2017/06/01 08:49:35 knakahara Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */
3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.51 2017/06/01 08:49:35 knakahara Exp $");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/cprng.h>
36 #include <sys/module.h>
37 #include <sys/device.h>
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ocf.h"
41 #endif
42
43 #include <opencrypto/cryptodev.h>
44 #include <opencrypto/cryptosoft.h>
45 #include <opencrypto/xform.h>
46
47 #include <opencrypto/cryptosoft_xform.c>
48
49 #include "ioconf.h"
50
51 union authctx {
52 MD5_CTX md5ctx;
53 SHA1_CTX sha1ctx;
54 RMD160_CTX rmd160ctx;
55 SHA256_CTX sha256ctx;
56 SHA384_CTX sha384ctx;
57 SHA512_CTX sha512ctx;
58 aesxcbc_ctx aesxcbcctx;
59 AES_GMAC_CTX aesgmacctx;
60 };
61
62 struct swcr_data **swcr_sessions = NULL;
63 u_int32_t swcr_sesnum = 0;
64 int32_t swcr_id = -1;
65
66 #define COPYBACK(x, a, b, c, d) \
67 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
68 : cuio_copyback((struct uio *)a,b,c,d)
69 #define COPYDATA(x, a, b, c, d) \
70 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
71 : cuio_copydata((struct uio *)a,b,c,d)
72
73 static int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int);
74 static int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *);
75 static int swcr_combined(struct cryptop *, int);
76 static int swcr_process(void *, struct cryptop *, int);
77 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
78 static int swcr_freesession(void *, u_int64_t);
79
80 /*
81 * Apply a symmetric encryption/decryption algorithm.
82 */
83 static int
84 swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv,
85 int outtype)
86 {
87 char *buf = bufv;
88 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
89 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
90 const struct swcr_enc_xform *exf;
91 int i, k, j, blks, ivlen;
92 int count, ind;
93
94 exf = sw->sw_exf;
95 blks = exf->enc_xform->blocksize;
96 ivlen = exf->enc_xform->ivsize;
97 KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks);
98
99 /* Check for non-padded data */
100 if (crd->crd_len % blks)
101 return EINVAL;
102
103 /* Initialize the IV */
104 if (crd->crd_flags & CRD_F_ENCRYPT) {
105 /* IV explicitly provided ? */
106 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
107 memcpy(iv, crd->crd_iv, ivlen);
108 if (exf->reinit)
109 exf->reinit(sw->sw_kschedule, iv, 0);
110 } else if (exf->reinit) {
111 exf->reinit(sw->sw_kschedule, 0, iv);
112 } else {
113 /* Get random IV */
114 for (i = 0;
115 i + sizeof (u_int32_t) <= EALG_MAX_BLOCK_LEN;
116 i += sizeof (u_int32_t)) {
117 u_int32_t temp = cprng_fast32();
118
119 memcpy(iv + i, &temp, sizeof(u_int32_t));
120 }
121 /*
122 * What if the block size is not a multiple
123 * of sizeof (u_int32_t), which is the size of
124 * what arc4random() returns ?
125 */
126 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
127 u_int32_t temp = cprng_fast32();
128
129 bcopy (&temp, iv + i,
130 EALG_MAX_BLOCK_LEN - i);
131 }
132 }
133
134 /* Do we need to write the IV */
135 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
136 COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
137 }
138
139 } else { /* Decryption */
140 /* IV explicitly provided ? */
141 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
142 memcpy(iv, crd->crd_iv, ivlen);
143 else {
144 /* Get IV off buf */
145 COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
146 }
147 if (exf->reinit)
148 exf->reinit(sw->sw_kschedule, iv, 0);
149 }
150
151 ivp = iv;
152
153 if (outtype == CRYPTO_BUF_CONTIG) {
154 if (exf->reinit) {
155 for (i = crd->crd_skip;
156 i < crd->crd_skip + crd->crd_len; i += blks) {
157 if (crd->crd_flags & CRD_F_ENCRYPT) {
158 exf->encrypt(sw->sw_kschedule, buf + i);
159 } else {
160 exf->decrypt(sw->sw_kschedule, buf + i);
161 }
162 }
163 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
164 for (i = crd->crd_skip;
165 i < crd->crd_skip + crd->crd_len; i += blks) {
166 /* XOR with the IV/previous block, as appropriate. */
167 if (i == crd->crd_skip)
168 for (k = 0; k < blks; k++)
169 buf[i + k] ^= ivp[k];
170 else
171 for (k = 0; k < blks; k++)
172 buf[i + k] ^= buf[i + k - blks];
173 exf->encrypt(sw->sw_kschedule, buf + i);
174 }
175 } else { /* Decrypt */
176 /*
177 * Start at the end, so we don't need to keep the encrypted
178 * block as the IV for the next block.
179 */
180 for (i = crd->crd_skip + crd->crd_len - blks;
181 i >= crd->crd_skip; i -= blks) {
182 exf->decrypt(sw->sw_kschedule, buf + i);
183
184 /* XOR with the IV/previous block, as appropriate */
185 if (i == crd->crd_skip)
186 for (k = 0; k < blks; k++)
187 buf[i + k] ^= ivp[k];
188 else
189 for (k = 0; k < blks; k++)
190 buf[i + k] ^= buf[i + k - blks];
191 }
192 }
193
194 return 0;
195 } else if (outtype == CRYPTO_BUF_MBUF) {
196 struct mbuf *m = (struct mbuf *) buf;
197
198 /* Find beginning of data */
199 m = m_getptr(m, crd->crd_skip, &k);
200 if (m == NULL)
201 return EINVAL;
202
203 i = crd->crd_len;
204
205 while (i > 0) {
206 /*
207 * If there's insufficient data at the end of
208 * an mbuf, we have to do some copying.
209 */
210 if (m->m_len < k + blks && m->m_len != k) {
211 m_copydata(m, k, blks, blk);
212
213 /* Actual encryption/decryption */
214 if (exf->reinit) {
215 if (crd->crd_flags & CRD_F_ENCRYPT) {
216 exf->encrypt(sw->sw_kschedule,
217 blk);
218 } else {
219 exf->decrypt(sw->sw_kschedule,
220 blk);
221 }
222 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
223 /* XOR with previous block */
224 for (j = 0; j < blks; j++)
225 blk[j] ^= ivp[j];
226
227 exf->encrypt(sw->sw_kschedule, blk);
228
229 /*
230 * Keep encrypted block for XOR'ing
231 * with next block
232 */
233 memcpy(iv, blk, blks);
234 ivp = iv;
235 } else { /* decrypt */
236 /*
237 * Keep encrypted block for XOR'ing
238 * with next block
239 */
240 if (ivp == iv)
241 memcpy(piv, blk, blks);
242 else
243 memcpy(iv, blk, blks);
244
245 exf->decrypt(sw->sw_kschedule, blk);
246
247 /* XOR with previous block */
248 for (j = 0; j < blks; j++)
249 blk[j] ^= ivp[j];
250
251 if (ivp == iv)
252 memcpy(iv, piv, blks);
253 else
254 ivp = iv;
255 }
256
257 /* Copy back decrypted block */
258 m_copyback(m, k, blks, blk);
259
260 /* Advance pointer */
261 m = m_getptr(m, k + blks, &k);
262 if (m == NULL)
263 return EINVAL;
264
265 i -= blks;
266
267 /* Could be done... */
268 if (i == 0)
269 break;
270 }
271
272 /* Skip possibly empty mbufs */
273 if (k == m->m_len) {
274 for (m = m->m_next; m && m->m_len == 0;
275 m = m->m_next)
276 ;
277 k = 0;
278 }
279
280 /* Sanity check */
281 if (m == NULL)
282 return EINVAL;
283
284 /*
285 * Warning: idat may point to garbage here, but
286 * we only use it in the while() loop, only if
287 * there are indeed enough data.
288 */
289 idat = mtod(m, unsigned char *) + k;
290
291 while (m->m_len >= k + blks && i > 0) {
292 if (exf->reinit) {
293 if (crd->crd_flags & CRD_F_ENCRYPT) {
294 exf->encrypt(sw->sw_kschedule,
295 idat);
296 } else {
297 exf->decrypt(sw->sw_kschedule,
298 idat);
299 }
300 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
301 /* XOR with previous block/IV */
302 for (j = 0; j < blks; j++)
303 idat[j] ^= ivp[j];
304
305 exf->encrypt(sw->sw_kschedule, idat);
306 ivp = idat;
307 } else { /* decrypt */
308 /*
309 * Keep encrypted block to be used
310 * in next block's processing.
311 */
312 if (ivp == iv)
313 memcpy(piv, idat, blks);
314 else
315 memcpy(iv, idat, blks);
316
317 exf->decrypt(sw->sw_kschedule, idat);
318
319 /* XOR with previous block/IV */
320 for (j = 0; j < blks; j++)
321 idat[j] ^= ivp[j];
322
323 if (ivp == iv)
324 memcpy(iv, piv, blks);
325 else
326 ivp = iv;
327 }
328
329 idat += blks;
330 k += blks;
331 i -= blks;
332 }
333 }
334
335 return 0; /* Done with mbuf encryption/decryption */
336 } else if (outtype == CRYPTO_BUF_IOV) {
337 struct uio *uio = (struct uio *) buf;
338
339 /* Find beginning of data */
340 count = crd->crd_skip;
341 ind = cuio_getptr(uio, count, &k);
342 if (ind == -1)
343 return EINVAL;
344
345 i = crd->crd_len;
346
347 while (i > 0) {
348 /*
349 * If there's insufficient data at the end,
350 * we have to do some copying.
351 */
352 if (uio->uio_iov[ind].iov_len < k + blks &&
353 uio->uio_iov[ind].iov_len != k) {
354 cuio_copydata(uio, k, blks, blk);
355
356 /* Actual encryption/decryption */
357 if (exf->reinit) {
358 if (crd->crd_flags & CRD_F_ENCRYPT) {
359 exf->encrypt(sw->sw_kschedule,
360 blk);
361 } else {
362 exf->decrypt(sw->sw_kschedule,
363 blk);
364 }
365 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
366 /* XOR with previous block */
367 for (j = 0; j < blks; j++)
368 blk[j] ^= ivp[j];
369
370 exf->encrypt(sw->sw_kschedule, blk);
371
372 /*
373 * Keep encrypted block for XOR'ing
374 * with next block
375 */
376 memcpy(iv, blk, blks);
377 ivp = iv;
378 } else { /* decrypt */
379 /*
380 * Keep encrypted block for XOR'ing
381 * with next block
382 */
383 if (ivp == iv)
384 memcpy(piv, blk, blks);
385 else
386 memcpy(iv, blk, blks);
387
388 exf->decrypt(sw->sw_kschedule, blk);
389
390 /* XOR with previous block */
391 for (j = 0; j < blks; j++)
392 blk[j] ^= ivp[j];
393
394 if (ivp == iv)
395 memcpy(iv, piv, blks);
396 else
397 ivp = iv;
398 }
399
400 /* Copy back decrypted block */
401 cuio_copyback(uio, k, blks, blk);
402
403 count += blks;
404
405 /* Advance pointer */
406 ind = cuio_getptr(uio, count, &k);
407 if (ind == -1)
408 return (EINVAL);
409
410 i -= blks;
411
412 /* Could be done... */
413 if (i == 0)
414 break;
415 }
416
417 /*
418 * Warning: idat may point to garbage here, but
419 * we only use it in the while() loop, only if
420 * there are indeed enough data.
421 */
422 idat = ((char *)uio->uio_iov[ind].iov_base) + k;
423
424 while (uio->uio_iov[ind].iov_len >= k + blks &&
425 i > 0) {
426 if (exf->reinit) {
427 if (crd->crd_flags & CRD_F_ENCRYPT) {
428 exf->encrypt(sw->sw_kschedule,
429 idat);
430 } else {
431 exf->decrypt(sw->sw_kschedule,
432 idat);
433 }
434 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
435 /* XOR with previous block/IV */
436 for (j = 0; j < blks; j++)
437 idat[j] ^= ivp[j];
438
439 exf->encrypt(sw->sw_kschedule, idat);
440 ivp = idat;
441 } else { /* decrypt */
442 /*
443 * Keep encrypted block to be used
444 * in next block's processing.
445 */
446 if (ivp == iv)
447 memcpy(piv, idat, blks);
448 else
449 memcpy(iv, idat, blks);
450
451 exf->decrypt(sw->sw_kschedule, idat);
452
453 /* XOR with previous block/IV */
454 for (j = 0; j < blks; j++)
455 idat[j] ^= ivp[j];
456
457 if (ivp == iv)
458 memcpy(iv, piv, blks);
459 else
460 ivp = iv;
461 }
462
463 idat += blks;
464 count += blks;
465 k += blks;
466 i -= blks;
467 }
468 }
469 return 0; /* Done with mbuf encryption/decryption */
470 }
471
472 /* Unreachable */
473 return EINVAL;
474 }
475
476 /*
477 * Compute keyed-hash authenticator.
478 */
479 int
480 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
481 const struct swcr_data *sw, void *buf, int outtype)
482 {
483 unsigned char aalg[AALG_MAX_RESULT_LEN];
484 const struct swcr_auth_hash *axf;
485 union authctx ctx;
486 int err;
487
488 if (sw->sw_ictx == 0)
489 return EINVAL;
490
491 axf = sw->sw_axf;
492
493 memcpy(&ctx, sw->sw_ictx, axf->ctxsize);
494
495 switch (outtype) {
496 case CRYPTO_BUF_CONTIG:
497 axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len);
498 break;
499 case CRYPTO_BUF_MBUF:
500 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
501 (int (*)(void*, void *, unsigned int)) axf->Update,
502 (void *) &ctx);
503 if (err)
504 return err;
505 break;
506 case CRYPTO_BUF_IOV:
507 err = cuio_apply((struct uio *) buf, crd->crd_skip,
508 crd->crd_len,
509 (int (*)(void *, void *, unsigned int)) axf->Update,
510 (void *) &ctx);
511 if (err) {
512 return err;
513 }
514 break;
515 default:
516 return EINVAL;
517 }
518
519 switch (sw->sw_alg) {
520 case CRYPTO_MD5_HMAC:
521 case CRYPTO_MD5_HMAC_96:
522 case CRYPTO_SHA1_HMAC:
523 case CRYPTO_SHA1_HMAC_96:
524 case CRYPTO_SHA2_256_HMAC:
525 case CRYPTO_SHA2_384_HMAC:
526 case CRYPTO_SHA2_512_HMAC:
527 case CRYPTO_RIPEMD160_HMAC:
528 case CRYPTO_RIPEMD160_HMAC_96:
529 if (sw->sw_octx == NULL)
530 return EINVAL;
531
532 axf->Final(aalg, &ctx);
533 memcpy(&ctx, sw->sw_octx, axf->ctxsize);
534 axf->Update(&ctx, aalg, axf->auth_hash->hashsize);
535 axf->Final(aalg, &ctx);
536 break;
537
538 case CRYPTO_MD5_KPDK:
539 case CRYPTO_SHA1_KPDK:
540 if (sw->sw_octx == NULL)
541 return EINVAL;
542
543 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
544 axf->Final(aalg, &ctx);
545 break;
546
547 case CRYPTO_NULL_HMAC:
548 case CRYPTO_MD5:
549 case CRYPTO_SHA1:
550 case CRYPTO_AES_XCBC_MAC_96:
551 axf->Final(aalg, &ctx);
552 break;
553 }
554
555 /* Inject the authentication data */
556 switch (outtype) {
557 case CRYPTO_BUF_CONTIG:
558 (void)memcpy((char *)buf + crd->crd_inject, aalg,
559 axf->auth_hash->authsize);
560 break;
561 case CRYPTO_BUF_MBUF:
562 m_copyback((struct mbuf *) buf, crd->crd_inject,
563 axf->auth_hash->authsize, aalg);
564 break;
565 case CRYPTO_BUF_IOV:
566 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
567 break;
568 default:
569 return EINVAL;
570 }
571 return 0;
572 }
573
574 /*
575 * Apply a combined encryption-authentication transformation
576 */
577 static int
578 swcr_combined(struct cryptop *crp, int outtype)
579 {
580 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
581 u_char *blk = (u_char *)blkbuf;
582 u_char aalg[AALG_MAX_RESULT_LEN];
583 u_char iv[EALG_MAX_BLOCK_LEN];
584 union authctx ctx;
585 struct cryptodesc *crd, *crda = NULL, *crde = NULL;
586 struct swcr_data *sw, *swa, *swe = NULL;
587 const struct swcr_auth_hash *axf = NULL;
588 const struct swcr_enc_xform *exf = NULL;
589 void *buf = (void *)crp->crp_buf;
590 uint32_t *blkp;
591 int i, blksz = 0, ivlen = 0, len;
592
593 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
594 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
595 sw && sw->sw_alg != crd->crd_alg;
596 sw = sw->sw_next)
597 ;
598 if (sw == NULL)
599 return (EINVAL);
600
601 switch (sw->sw_alg) {
602 case CRYPTO_AES_GCM_16:
603 case CRYPTO_AES_GMAC:
604 swe = sw;
605 crde = crd;
606 exf = swe->sw_exf;
607 ivlen = exf->enc_xform->ivsize;
608 break;
609 case CRYPTO_AES_128_GMAC:
610 case CRYPTO_AES_192_GMAC:
611 case CRYPTO_AES_256_GMAC:
612 swa = sw;
613 crda = crd;
614 axf = swa->sw_axf;
615 if (swa->sw_ictx == 0)
616 return (EINVAL);
617 memcpy(&ctx, swa->sw_ictx, axf->ctxsize);
618 blksz = axf->auth_hash->blocksize;
619 break;
620 default:
621 return (EINVAL);
622 }
623 }
624 if (crde == NULL || crda == NULL)
625 return (EINVAL);
626 if (outtype == CRYPTO_BUF_CONTIG)
627 return (EINVAL);
628
629 /* Initialize the IV */
630 if (crde->crd_flags & CRD_F_ENCRYPT) {
631 /* IV explicitly provided ? */
632 if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
633 memcpy(iv, crde->crd_iv, ivlen);
634 if (exf->reinit)
635 exf->reinit(swe->sw_kschedule, iv, 0);
636 } else if (exf->reinit)
637 exf->reinit(swe->sw_kschedule, 0, iv);
638 else
639 cprng_fast(iv, ivlen);
640
641 /* Do we need to write the IV */
642 if (!(crde->crd_flags & CRD_F_IV_PRESENT))
643 COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
644
645 } else { /* Decryption */
646 /* IV explicitly provided ? */
647 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
648 memcpy(iv, crde->crd_iv, ivlen);
649 else {
650 /* Get IV off buf */
651 COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
652 }
653 if (exf->reinit)
654 exf->reinit(swe->sw_kschedule, iv, 0);
655 }
656
657 /* Supply MAC with IV */
658 if (axf->Reinit)
659 axf->Reinit(&ctx, iv, ivlen);
660
661 /* Supply MAC with AAD */
662 for (i = 0; i < crda->crd_len; i += blksz) {
663 len = MIN(crda->crd_len - i, blksz);
664 COPYDATA(outtype, buf, crda->crd_skip + i, len, blk);
665 axf->Update(&ctx, blk, len);
666 }
667
668 /* Do encryption/decryption with MAC */
669 for (i = 0; i < crde->crd_len; i += blksz) {
670 len = MIN(crde->crd_len - i, blksz);
671 if (len < blksz)
672 memset(blk, 0, blksz);
673 COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
674 if (crde->crd_flags & CRD_F_ENCRYPT) {
675 exf->encrypt(swe->sw_kschedule, blk);
676 axf->Update(&ctx, blk, len);
677 } else {
678 axf->Update(&ctx, blk, len);
679 exf->decrypt(swe->sw_kschedule, blk);
680 }
681 COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
682 }
683
684 /* Do any required special finalization */
685 switch (crda->crd_alg) {
686 case CRYPTO_AES_128_GMAC:
687 case CRYPTO_AES_192_GMAC:
688 case CRYPTO_AES_256_GMAC:
689 /* length block */
690 memset(blk, 0, blksz);
691 blkp = (uint32_t *)blk + 1;
692 *blkp = htobe32(crda->crd_len * 8);
693 blkp = (uint32_t *)blk + 3;
694 *blkp = htobe32(crde->crd_len * 8);
695 axf->Update(&ctx, blk, blksz);
696 break;
697 }
698
699 /* Finalize MAC */
700 axf->Final(aalg, &ctx);
701
702 /* Inject the authentication data */
703 if (outtype == CRYPTO_BUF_MBUF)
704 COPYBACK(outtype, buf, crda->crd_inject, axf->auth_hash->authsize, aalg);
705 else
706 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
707
708 return (0);
709 }
710
711 /*
712 * Apply a compression/decompression algorithm
713 */
714 static int
715 swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw,
716 void *buf, int outtype, int *res_size)
717 {
718 u_int8_t *data, *out;
719 const struct swcr_comp_algo *cxf;
720 int adj;
721 u_int32_t result;
722
723 cxf = sw->sw_cxf;
724
725 /* We must handle the whole buffer of data in one time
726 * then if there is not all the data in the mbuf, we must
727 * copy in a buffer.
728 */
729
730 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
731 if (data == NULL)
732 return (EINVAL);
733 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
734
735 if (crd->crd_flags & CRD_F_COMP)
736 result = cxf->compress(data, crd->crd_len, &out);
737 else
738 result = cxf->decompress(data, crd->crd_len, &out,
739 *res_size);
740
741 free(data, M_CRYPTO_DATA);
742 if (result == 0)
743 return EINVAL;
744
745 /* Copy back the (de)compressed data. m_copyback is
746 * extending the mbuf as necessary.
747 */
748 *res_size = (int)result;
749 /* Check the compressed size when doing compression */
750 if (crd->crd_flags & CRD_F_COMP &&
751 sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW &&
752 result >= crd->crd_len) {
753 /* Compression was useless, we lost time */
754 free(out, M_CRYPTO_DATA);
755 return 0;
756 }
757
758 COPYBACK(outtype, buf, crd->crd_skip, result, out);
759 if (result < crd->crd_len) {
760 adj = result - crd->crd_len;
761 if (outtype == CRYPTO_BUF_MBUF) {
762 m_adj((struct mbuf *)buf, adj);
763 }
764 /* Don't adjust the iov_len, it breaks the kmem_free */
765 }
766 free(out, M_CRYPTO_DATA);
767 return 0;
768 }
769
770 /*
771 * Generate a new software session.
772 */
773 static int
774 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
775 {
776 struct swcr_data **swd;
777 const struct swcr_auth_hash *axf;
778 const struct swcr_enc_xform *txf;
779 const struct swcr_comp_algo *cxf;
780 u_int32_t i;
781 int k, error;
782
783 if (sid == NULL || cri == NULL)
784 return EINVAL;
785
786 if (swcr_sessions) {
787 for (i = 1; i < swcr_sesnum; i++)
788 if (swcr_sessions[i] == NULL)
789 break;
790 } else
791 i = 1; /* NB: to silence compiler warning */
792
793 if (swcr_sessions == NULL || i == swcr_sesnum) {
794 if (swcr_sessions == NULL) {
795 i = 1; /* We leave swcr_sessions[0] empty */
796 swcr_sesnum = CRYPTO_SW_SESSIONS;
797 } else
798 swcr_sesnum *= 2;
799
800 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
801 M_CRYPTO_DATA, M_NOWAIT);
802 if (swd == NULL) {
803 /* Reset session number */
804 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
805 swcr_sesnum = 0;
806 else
807 swcr_sesnum /= 2;
808 return ENOBUFS;
809 }
810
811 memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
812
813 /* Copy existing sessions */
814 if (swcr_sessions) {
815 memcpy(swd, swcr_sessions,
816 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
817 free(swcr_sessions, M_CRYPTO_DATA);
818 }
819
820 swcr_sessions = swd;
821 }
822
823 swd = &swcr_sessions[i];
824 *sid = i;
825
826 while (cri) {
827 *swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT);
828 if (*swd == NULL) {
829 swcr_freesession(NULL, i);
830 return ENOBUFS;
831 }
832 memset(*swd, 0, sizeof(struct swcr_data));
833
834 switch (cri->cri_alg) {
835 case CRYPTO_DES_CBC:
836 txf = &swcr_enc_xform_des;
837 goto enccommon;
838 case CRYPTO_3DES_CBC:
839 txf = &swcr_enc_xform_3des;
840 goto enccommon;
841 case CRYPTO_BLF_CBC:
842 txf = &swcr_enc_xform_blf;
843 goto enccommon;
844 case CRYPTO_CAST_CBC:
845 txf = &swcr_enc_xform_cast5;
846 goto enccommon;
847 case CRYPTO_SKIPJACK_CBC:
848 txf = &swcr_enc_xform_skipjack;
849 goto enccommon;
850 case CRYPTO_RIJNDAEL128_CBC:
851 txf = &swcr_enc_xform_rijndael128;
852 goto enccommon;
853 case CRYPTO_CAMELLIA_CBC:
854 txf = &swcr_enc_xform_camellia;
855 goto enccommon;
856 case CRYPTO_AES_CTR:
857 txf = &swcr_enc_xform_aes_ctr;
858 goto enccommon;
859 case CRYPTO_AES_GCM_16:
860 txf = &swcr_enc_xform_aes_gcm;
861 goto enccommon;
862 case CRYPTO_AES_GMAC:
863 txf = &swcr_enc_xform_aes_gmac;
864 goto enccommon;
865 case CRYPTO_NULL_CBC:
866 txf = &swcr_enc_xform_null;
867 goto enccommon;
868 enccommon:
869 error = txf->setkey(&((*swd)->sw_kschedule),
870 cri->cri_key, cri->cri_klen / 8);
871 if (error) {
872 swcr_freesession(NULL, i);
873 return error;
874 }
875 (*swd)->sw_exf = txf;
876 break;
877
878 case CRYPTO_MD5_HMAC:
879 axf = &swcr_auth_hash_hmac_md5;
880 goto authcommon;
881 case CRYPTO_MD5_HMAC_96:
882 axf = &swcr_auth_hash_hmac_md5_96;
883 goto authcommon;
884 case CRYPTO_SHA1_HMAC:
885 axf = &swcr_auth_hash_hmac_sha1;
886 goto authcommon;
887 case CRYPTO_SHA1_HMAC_96:
888 axf = &swcr_auth_hash_hmac_sha1_96;
889 goto authcommon;
890 case CRYPTO_SHA2_256_HMAC:
891 axf = &swcr_auth_hash_hmac_sha2_256;
892 goto authcommon;
893 case CRYPTO_SHA2_384_HMAC:
894 axf = &swcr_auth_hash_hmac_sha2_384;
895 goto authcommon;
896 case CRYPTO_SHA2_512_HMAC:
897 axf = &swcr_auth_hash_hmac_sha2_512;
898 goto authcommon;
899 case CRYPTO_NULL_HMAC:
900 axf = &swcr_auth_hash_null;
901 goto authcommon;
902 case CRYPTO_RIPEMD160_HMAC:
903 axf = &swcr_auth_hash_hmac_ripemd_160;
904 goto authcommon;
905 case CRYPTO_RIPEMD160_HMAC_96:
906 axf = &swcr_auth_hash_hmac_ripemd_160_96;
907 goto authcommon; /* leave this for safety */
908 authcommon:
909 (*swd)->sw_ictx = malloc(axf->ctxsize,
910 M_CRYPTO_DATA, M_NOWAIT);
911 if ((*swd)->sw_ictx == NULL) {
912 swcr_freesession(NULL, i);
913 return ENOBUFS;
914 }
915
916 (*swd)->sw_octx = malloc(axf->ctxsize,
917 M_CRYPTO_DATA, M_NOWAIT);
918 if ((*swd)->sw_octx == NULL) {
919 swcr_freesession(NULL, i);
920 return ENOBUFS;
921 }
922
923 for (k = 0; k < cri->cri_klen / 8; k++)
924 cri->cri_key[k] ^= HMAC_IPAD_VAL;
925
926 axf->Init((*swd)->sw_ictx);
927 axf->Update((*swd)->sw_ictx, cri->cri_key,
928 cri->cri_klen / 8);
929 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
930 axf->auth_hash->blocksize - (cri->cri_klen / 8));
931
932 for (k = 0; k < cri->cri_klen / 8; k++)
933 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
934
935 axf->Init((*swd)->sw_octx);
936 axf->Update((*swd)->sw_octx, cri->cri_key,
937 cri->cri_klen / 8);
938 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
939 axf->auth_hash->blocksize - (cri->cri_klen / 8));
940
941 for (k = 0; k < cri->cri_klen / 8; k++)
942 cri->cri_key[k] ^= HMAC_OPAD_VAL;
943 (*swd)->sw_axf = axf;
944 break;
945
946 case CRYPTO_MD5_KPDK:
947 axf = &swcr_auth_hash_key_md5;
948 goto auth2common;
949
950 case CRYPTO_SHA1_KPDK: {
951 unsigned char digest[SHA1_DIGEST_LENGTH];
952 CTASSERT(SHA1_DIGEST_LENGTH >= MD5_DIGEST_LENGTH);
953 axf = &swcr_auth_hash_key_sha1;
954 auth2common:
955 (*swd)->sw_ictx = malloc(axf->ctxsize,
956 M_CRYPTO_DATA, M_NOWAIT);
957 if ((*swd)->sw_ictx == NULL) {
958 swcr_freesession(NULL, i);
959 return ENOBUFS;
960 }
961
962 /* Store the key so we can "append" it to the payload */
963 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
964 M_NOWAIT);
965 if ((*swd)->sw_octx == NULL) {
966 swcr_freesession(NULL, i);
967 return ENOBUFS;
968 }
969
970 (*swd)->sw_klen = cri->cri_klen / 8;
971 memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8);
972 axf->Init((*swd)->sw_ictx);
973 axf->Update((*swd)->sw_ictx, cri->cri_key,
974 cri->cri_klen / 8);
975 axf->Final(digest, (*swd)->sw_ictx);
976 (*swd)->sw_axf = axf;
977 break;
978 }
979
980 case CRYPTO_MD5:
981 axf = &swcr_auth_hash_md5;
982 goto auth3common;
983
984 case CRYPTO_SHA1:
985 axf = &swcr_auth_hash_sha1;
986 auth3common:
987 (*swd)->sw_ictx = malloc(axf->ctxsize,
988 M_CRYPTO_DATA, M_NOWAIT);
989 if ((*swd)->sw_ictx == NULL) {
990 swcr_freesession(NULL, i);
991 return ENOBUFS;
992 }
993
994 axf->Init((*swd)->sw_ictx);
995 (*swd)->sw_axf = axf;
996 break;
997
998 case CRYPTO_AES_XCBC_MAC_96:
999 axf = &swcr_auth_hash_aes_xcbc_mac;
1000 goto auth4common;
1001 case CRYPTO_AES_128_GMAC:
1002 axf = &swcr_auth_hash_gmac_aes_128;
1003 goto auth4common;
1004 case CRYPTO_AES_192_GMAC:
1005 axf = &swcr_auth_hash_gmac_aes_192;
1006 goto auth4common;
1007 case CRYPTO_AES_256_GMAC:
1008 axf = &swcr_auth_hash_gmac_aes_256;
1009 auth4common:
1010 (*swd)->sw_ictx = malloc(axf->ctxsize,
1011 M_CRYPTO_DATA, M_NOWAIT);
1012 if ((*swd)->sw_ictx == NULL) {
1013 swcr_freesession(NULL, i);
1014 return ENOBUFS;
1015 }
1016 axf->Init((*swd)->sw_ictx);
1017 axf->Setkey((*swd)->sw_ictx,
1018 cri->cri_key, cri->cri_klen / 8);
1019 (*swd)->sw_axf = axf;
1020 break;
1021
1022 case CRYPTO_DEFLATE_COMP:
1023 cxf = &swcr_comp_algo_deflate;
1024 (*swd)->sw_cxf = cxf;
1025 break;
1026
1027 case CRYPTO_DEFLATE_COMP_NOGROW:
1028 cxf = &swcr_comp_algo_deflate_nogrow;
1029 (*swd)->sw_cxf = cxf;
1030 break;
1031
1032 case CRYPTO_GZIP_COMP:
1033 cxf = &swcr_comp_algo_gzip;
1034 (*swd)->sw_cxf = cxf;
1035 break;
1036 default:
1037 swcr_freesession(NULL, i);
1038 return EINVAL;
1039 }
1040
1041 (*swd)->sw_alg = cri->cri_alg;
1042 cri = cri->cri_next;
1043 swd = &((*swd)->sw_next);
1044 }
1045 return 0;
1046 }
1047
1048 /*
1049 * Free a session.
1050 */
1051 static int
1052 swcr_freesession(void *arg, u_int64_t tid)
1053 {
1054 struct swcr_data *swd;
1055 const struct swcr_enc_xform *txf;
1056 const struct swcr_auth_hash *axf;
1057 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
1058
1059 if (sid > swcr_sesnum || swcr_sessions == NULL ||
1060 swcr_sessions[sid] == NULL)
1061 return EINVAL;
1062
1063 /* Silently accept and return */
1064 if (sid == 0)
1065 return 0;
1066
1067 while ((swd = swcr_sessions[sid]) != NULL) {
1068 swcr_sessions[sid] = swd->sw_next;
1069
1070 switch (swd->sw_alg) {
1071 case CRYPTO_DES_CBC:
1072 case CRYPTO_3DES_CBC:
1073 case CRYPTO_BLF_CBC:
1074 case CRYPTO_CAST_CBC:
1075 case CRYPTO_SKIPJACK_CBC:
1076 case CRYPTO_RIJNDAEL128_CBC:
1077 case CRYPTO_CAMELLIA_CBC:
1078 case CRYPTO_AES_CTR:
1079 case CRYPTO_AES_GCM_16:
1080 case CRYPTO_AES_GMAC:
1081 case CRYPTO_NULL_CBC:
1082 txf = swd->sw_exf;
1083
1084 if (swd->sw_kschedule)
1085 txf->zerokey(&(swd->sw_kschedule));
1086 break;
1087
1088 case CRYPTO_MD5_HMAC:
1089 case CRYPTO_MD5_HMAC_96:
1090 case CRYPTO_SHA1_HMAC:
1091 case CRYPTO_SHA1_HMAC_96:
1092 case CRYPTO_SHA2_256_HMAC:
1093 case CRYPTO_SHA2_384_HMAC:
1094 case CRYPTO_SHA2_512_HMAC:
1095 case CRYPTO_RIPEMD160_HMAC:
1096 case CRYPTO_RIPEMD160_HMAC_96:
1097 case CRYPTO_NULL_HMAC:
1098 axf = swd->sw_axf;
1099
1100 if (swd->sw_ictx) {
1101 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1102 free(swd->sw_ictx, M_CRYPTO_DATA);
1103 }
1104 if (swd->sw_octx) {
1105 explicit_memset(swd->sw_octx, 0, axf->ctxsize);
1106 free(swd->sw_octx, M_CRYPTO_DATA);
1107 }
1108 break;
1109
1110 case CRYPTO_MD5_KPDK:
1111 case CRYPTO_SHA1_KPDK:
1112 axf = swd->sw_axf;
1113
1114 if (swd->sw_ictx) {
1115 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1116 free(swd->sw_ictx, M_CRYPTO_DATA);
1117 }
1118 if (swd->sw_octx) {
1119 explicit_memset(swd->sw_octx, 0, swd->sw_klen);
1120 free(swd->sw_octx, M_CRYPTO_DATA);
1121 }
1122 break;
1123
1124 case CRYPTO_MD5:
1125 case CRYPTO_SHA1:
1126 case CRYPTO_AES_XCBC_MAC_96:
1127 case CRYPTO_AES_128_GMAC:
1128 case CRYPTO_AES_192_GMAC:
1129 case CRYPTO_AES_256_GMAC:
1130 axf = swd->sw_axf;
1131
1132 if (swd->sw_ictx) {
1133 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1134 free(swd->sw_ictx, M_CRYPTO_DATA);
1135 }
1136 break;
1137
1138 case CRYPTO_DEFLATE_COMP:
1139 case CRYPTO_DEFLATE_COMP_NOGROW:
1140 case CRYPTO_GZIP_COMP:
1141 break;
1142 }
1143
1144 free(swd, M_CRYPTO_DATA);
1145 }
1146 return 0;
1147 }
1148
1149 /*
1150 * Process a software request.
1151 */
1152 static int
1153 swcr_process(void *arg, struct cryptop *crp, int hint)
1154 {
1155 struct cryptodesc *crd;
1156 struct swcr_data *sw;
1157 u_int32_t lid;
1158 int type;
1159
1160 /* Sanity check */
1161 if (crp == NULL)
1162 return EINVAL;
1163
1164 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1165 crp->crp_etype = EINVAL;
1166 goto done;
1167 }
1168
1169 lid = crp->crp_sid & 0xffffffff;
1170 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1171 crp->crp_etype = ENOENT;
1172 goto done;
1173 }
1174
1175 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1176 type = CRYPTO_BUF_MBUF;
1177 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1178 type = CRYPTO_BUF_IOV;
1179 } else {
1180 type = CRYPTO_BUF_CONTIG;
1181 }
1182
1183 /* Go through crypto descriptors, processing as we go */
1184 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1185 /*
1186 * Find the crypto context.
1187 *
1188 * XXX Note that the logic here prevents us from having
1189 * XXX the same algorithm multiple times in a session
1190 * XXX (or rather, we can but it won't give us the right
1191 * XXX results). To do that, we'd need some way of differentiating
1192 * XXX between the various instances of an algorithm (so we can
1193 * XXX locate the correct crypto context).
1194 */
1195 for (sw = swcr_sessions[lid];
1196 sw && sw->sw_alg != crd->crd_alg;
1197 sw = sw->sw_next)
1198 ;
1199
1200 /* No such context ? */
1201 if (sw == NULL) {
1202 crp->crp_etype = EINVAL;
1203 goto done;
1204 }
1205
1206 switch (sw->sw_alg) {
1207 case CRYPTO_DES_CBC:
1208 case CRYPTO_3DES_CBC:
1209 case CRYPTO_BLF_CBC:
1210 case CRYPTO_CAST_CBC:
1211 case CRYPTO_SKIPJACK_CBC:
1212 case CRYPTO_RIJNDAEL128_CBC:
1213 case CRYPTO_CAMELLIA_CBC:
1214 case CRYPTO_AES_CTR:
1215 if ((crp->crp_etype = swcr_encdec(crd, sw,
1216 crp->crp_buf, type)) != 0)
1217 goto done;
1218 break;
1219 case CRYPTO_NULL_CBC:
1220 crp->crp_etype = 0;
1221 break;
1222 case CRYPTO_MD5_HMAC:
1223 case CRYPTO_MD5_HMAC_96:
1224 case CRYPTO_SHA1_HMAC:
1225 case CRYPTO_SHA1_HMAC_96:
1226 case CRYPTO_SHA2_256_HMAC:
1227 case CRYPTO_SHA2_384_HMAC:
1228 case CRYPTO_SHA2_512_HMAC:
1229 case CRYPTO_RIPEMD160_HMAC:
1230 case CRYPTO_RIPEMD160_HMAC_96:
1231 case CRYPTO_NULL_HMAC:
1232 case CRYPTO_MD5_KPDK:
1233 case CRYPTO_SHA1_KPDK:
1234 case CRYPTO_MD5:
1235 case CRYPTO_SHA1:
1236 case CRYPTO_AES_XCBC_MAC_96:
1237 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1238 crp->crp_buf, type)) != 0)
1239 goto done;
1240 break;
1241
1242 case CRYPTO_AES_GCM_16:
1243 case CRYPTO_AES_GMAC:
1244 case CRYPTO_AES_128_GMAC:
1245 case CRYPTO_AES_192_GMAC:
1246 case CRYPTO_AES_256_GMAC:
1247 crp->crp_etype = swcr_combined(crp, type);
1248 goto done;
1249
1250 case CRYPTO_DEFLATE_COMP:
1251 case CRYPTO_DEFLATE_COMP_NOGROW:
1252 case CRYPTO_GZIP_COMP:
1253 DPRINTF("compdec for %d\n", sw->sw_alg);
1254 if ((crp->crp_etype = swcr_compdec(crd, sw,
1255 crp->crp_buf, type, &crp->crp_olen)) != 0)
1256 goto done;
1257 break;
1258
1259 default:
1260 /* Unknown/unsupported algorithm */
1261 crp->crp_etype = EINVAL;
1262 goto done;
1263 }
1264 }
1265
1266 done:
1267 DPRINTF("request %p done\n", crp);
1268 crypto_done(crp);
1269 return 0;
1270 }
1271
1272 static void
1273 swcr_init(void)
1274 {
1275 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1276 if (swcr_id < 0) {
1277 /* This should never happen */
1278 panic("Software crypto device cannot initialize!");
1279 }
1280
1281 crypto_register(swcr_id, CRYPTO_DES_CBC,
1282 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1283 #define REGISTER(alg) \
1284 crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL)
1285
1286 REGISTER(CRYPTO_3DES_CBC);
1287 REGISTER(CRYPTO_BLF_CBC);
1288 REGISTER(CRYPTO_CAST_CBC);
1289 REGISTER(CRYPTO_SKIPJACK_CBC);
1290 REGISTER(CRYPTO_CAMELLIA_CBC);
1291 REGISTER(CRYPTO_AES_CTR);
1292 REGISTER(CRYPTO_AES_GCM_16);
1293 REGISTER(CRYPTO_AES_GMAC);
1294 REGISTER(CRYPTO_NULL_CBC);
1295 REGISTER(CRYPTO_MD5_HMAC);
1296 REGISTER(CRYPTO_MD5_HMAC_96);
1297 REGISTER(CRYPTO_SHA1_HMAC);
1298 REGISTER(CRYPTO_SHA1_HMAC_96);
1299 REGISTER(CRYPTO_SHA2_256_HMAC);
1300 REGISTER(CRYPTO_SHA2_384_HMAC);
1301 REGISTER(CRYPTO_SHA2_512_HMAC);
1302 REGISTER(CRYPTO_RIPEMD160_HMAC);
1303 REGISTER(CRYPTO_RIPEMD160_HMAC_96);
1304 REGISTER(CRYPTO_NULL_HMAC);
1305 REGISTER(CRYPTO_MD5_KPDK);
1306 REGISTER(CRYPTO_SHA1_KPDK);
1307 REGISTER(CRYPTO_MD5);
1308 REGISTER(CRYPTO_SHA1);
1309 REGISTER(CRYPTO_AES_XCBC_MAC_96);
1310 REGISTER(CRYPTO_AES_128_GMAC);
1311 REGISTER(CRYPTO_AES_192_GMAC);
1312 REGISTER(CRYPTO_AES_256_GMAC);
1313 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1314 REGISTER(CRYPTO_DEFLATE_COMP);
1315 REGISTER(CRYPTO_DEFLATE_COMP_NOGROW);
1316 REGISTER(CRYPTO_GZIP_COMP);
1317 #undef REGISTER
1318 }
1319
1320
1321 /*
1322 * Pseudo-device init routine for software crypto.
1323 */
1324
1325 void
1326 swcryptoattach(int num)
1327 {
1328 /*
1329 * Nothing to do here, initialization is handled by the
1330 * module initialization code in swcrypto_attach() below).
1331 */
1332 }
1333
1334 void swcrypto_attach(device_t, device_t, void *);
1335
1336 void
1337 swcrypto_attach(device_t parent, device_t self, void *opaque)
1338 {
1339
1340 swcr_init();
1341
1342 if (!pmf_device_register(self, NULL, NULL))
1343 aprint_error_dev(self, "couldn't establish power handler\n");
1344 }
1345
1346 int swcrypto_detach(device_t, int);
1347
1348 int
1349 swcrypto_detach(device_t self, int flag)
1350 {
1351 pmf_device_deregister(self);
1352 if (swcr_id >= 0)
1353 crypto_unregister_all(swcr_id);
1354 return 0;
1355 }
1356
1357 int swcrypto_match(device_t, cfdata_t, void *);
1358
1359 int
1360 swcrypto_match(device_t parent, cfdata_t data, void *opaque)
1361 {
1362
1363 return 1;
1364 }
1365
1366 MODULE(MODULE_CLASS_DRIVER, swcrypto,
1367 "opencrypto,zlib,blowfish,des,cast128,camellia,skipjack");
1368
1369 CFDRIVER_DECL(swcrypto, DV_DULL, NULL);
1370
1371 CFATTACH_DECL2_NEW(swcrypto, 0, swcrypto_match, swcrypto_attach,
1372 swcrypto_detach, NULL, NULL, NULL);
1373
1374 static int swcryptoloc[] = { -1, -1 };
1375
1376 static struct cfdata swcrypto_cfdata[] = {
1377 {
1378 .cf_name = "swcrypto",
1379 .cf_atname = "swcrypto",
1380 .cf_unit = 0,
1381 .cf_fstate = 0,
1382 .cf_loc = swcryptoloc,
1383 .cf_flags = 0,
1384 .cf_pspec = NULL,
1385 },
1386 { NULL, NULL, 0, 0, NULL, 0, NULL }
1387 };
1388
1389 static int
1390 swcrypto_modcmd(modcmd_t cmd, void *arg)
1391 {
1392 int error;
1393
1394 switch (cmd) {
1395 case MODULE_CMD_INIT:
1396 error = config_cfdriver_attach(&swcrypto_cd);
1397 if (error) {
1398 return error;
1399 }
1400
1401 error = config_cfattach_attach(swcrypto_cd.cd_name,
1402 &swcrypto_ca);
1403 if (error) {
1404 config_cfdriver_detach(&swcrypto_cd);
1405 aprint_error("%s: unable to register cfattach\n",
1406 swcrypto_cd.cd_name);
1407
1408 return error;
1409 }
1410
1411 error = config_cfdata_attach(swcrypto_cfdata, 1);
1412 if (error) {
1413 config_cfattach_detach(swcrypto_cd.cd_name,
1414 &swcrypto_ca);
1415 config_cfdriver_detach(&swcrypto_cd);
1416 aprint_error("%s: unable to register cfdata\n",
1417 swcrypto_cd.cd_name);
1418
1419 return error;
1420 }
1421
1422 (void)config_attach_pseudo(swcrypto_cfdata);
1423
1424 return 0;
1425 case MODULE_CMD_FINI:
1426 error = config_cfdata_detach(swcrypto_cfdata);
1427 if (error) {
1428 return error;
1429 }
1430
1431 config_cfattach_detach(swcrypto_cd.cd_name, &swcrypto_ca);
1432 config_cfdriver_detach(&swcrypto_cd);
1433
1434 return 0;
1435 default:
1436 return ENOTTY;
1437 }
1438 }
1439