cryptosoft.c revision 1.52 1 /* $NetBSD: cryptosoft.c,v 1.52 2017/06/23 11:41:58 knakahara Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */
3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.52 2017/06/23 11:41:58 knakahara Exp $");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/cprng.h>
36 #include <sys/module.h>
37 #include <sys/device.h>
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ocf.h"
41 #endif
42
43 #include <opencrypto/cryptodev.h>
44 #include <opencrypto/cryptosoft.h>
45 #include <opencrypto/xform.h>
46
47 #include <opencrypto/cryptosoft_xform.c>
48
49 #include "ioconf.h"
50
51 union authctx {
52 MD5_CTX md5ctx;
53 SHA1_CTX sha1ctx;
54 RMD160_CTX rmd160ctx;
55 SHA256_CTX sha256ctx;
56 SHA384_CTX sha384ctx;
57 SHA512_CTX sha512ctx;
58 aesxcbc_ctx aesxcbcctx;
59 AES_GMAC_CTX aesgmacctx;
60 };
61
62 struct swcr_data **swcr_sessions = NULL;
63 u_int32_t swcr_sesnum = 0;
64 int32_t swcr_id = -1;
65
66 #define COPYBACK(x, a, b, c, d) \
67 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
68 : cuio_copyback((struct uio *)a,b,c,d)
69 #define COPYDATA(x, a, b, c, d) \
70 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
71 : cuio_copydata((struct uio *)a,b,c,d)
72
73 static int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int);
74 static int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *);
75 static int swcr_combined(struct cryptop *, int);
76 static int swcr_process(void *, struct cryptop *, int);
77 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
78 static int swcr_freesession(void *, u_int64_t);
79
80 static int swcryptoattach_internal(void);
81
82 /*
83 * Apply a symmetric encryption/decryption algorithm.
84 */
85 static int
86 swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv,
87 int outtype)
88 {
89 char *buf = bufv;
90 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
91 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
92 const struct swcr_enc_xform *exf;
93 int i, k, j, blks, ivlen;
94 int count, ind;
95
96 exf = sw->sw_exf;
97 blks = exf->enc_xform->blocksize;
98 ivlen = exf->enc_xform->ivsize;
99 KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks);
100
101 /* Check for non-padded data */
102 if (crd->crd_len % blks)
103 return EINVAL;
104
105 /* Initialize the IV */
106 if (crd->crd_flags & CRD_F_ENCRYPT) {
107 /* IV explicitly provided ? */
108 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
109 memcpy(iv, crd->crd_iv, ivlen);
110 if (exf->reinit)
111 exf->reinit(sw->sw_kschedule, iv, 0);
112 } else if (exf->reinit) {
113 exf->reinit(sw->sw_kschedule, 0, iv);
114 } else {
115 /* Get random IV */
116 for (i = 0;
117 i + sizeof (u_int32_t) <= EALG_MAX_BLOCK_LEN;
118 i += sizeof (u_int32_t)) {
119 u_int32_t temp = cprng_fast32();
120
121 memcpy(iv + i, &temp, sizeof(u_int32_t));
122 }
123 /*
124 * What if the block size is not a multiple
125 * of sizeof (u_int32_t), which is the size of
126 * what arc4random() returns ?
127 */
128 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
129 u_int32_t temp = cprng_fast32();
130
131 bcopy (&temp, iv + i,
132 EALG_MAX_BLOCK_LEN - i);
133 }
134 }
135
136 /* Do we need to write the IV */
137 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
138 COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
139 }
140
141 } else { /* Decryption */
142 /* IV explicitly provided ? */
143 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
144 memcpy(iv, crd->crd_iv, ivlen);
145 else {
146 /* Get IV off buf */
147 COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
148 }
149 if (exf->reinit)
150 exf->reinit(sw->sw_kschedule, iv, 0);
151 }
152
153 ivp = iv;
154
155 if (outtype == CRYPTO_BUF_CONTIG) {
156 if (exf->reinit) {
157 for (i = crd->crd_skip;
158 i < crd->crd_skip + crd->crd_len; i += blks) {
159 if (crd->crd_flags & CRD_F_ENCRYPT) {
160 exf->encrypt(sw->sw_kschedule, buf + i);
161 } else {
162 exf->decrypt(sw->sw_kschedule, buf + i);
163 }
164 }
165 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
166 for (i = crd->crd_skip;
167 i < crd->crd_skip + crd->crd_len; i += blks) {
168 /* XOR with the IV/previous block, as appropriate. */
169 if (i == crd->crd_skip)
170 for (k = 0; k < blks; k++)
171 buf[i + k] ^= ivp[k];
172 else
173 for (k = 0; k < blks; k++)
174 buf[i + k] ^= buf[i + k - blks];
175 exf->encrypt(sw->sw_kschedule, buf + i);
176 }
177 } else { /* Decrypt */
178 /*
179 * Start at the end, so we don't need to keep the encrypted
180 * block as the IV for the next block.
181 */
182 for (i = crd->crd_skip + crd->crd_len - blks;
183 i >= crd->crd_skip; i -= blks) {
184 exf->decrypt(sw->sw_kschedule, buf + i);
185
186 /* XOR with the IV/previous block, as appropriate */
187 if (i == crd->crd_skip)
188 for (k = 0; k < blks; k++)
189 buf[i + k] ^= ivp[k];
190 else
191 for (k = 0; k < blks; k++)
192 buf[i + k] ^= buf[i + k - blks];
193 }
194 }
195
196 return 0;
197 } else if (outtype == CRYPTO_BUF_MBUF) {
198 struct mbuf *m = (struct mbuf *) buf;
199
200 /* Find beginning of data */
201 m = m_getptr(m, crd->crd_skip, &k);
202 if (m == NULL)
203 return EINVAL;
204
205 i = crd->crd_len;
206
207 while (i > 0) {
208 /*
209 * If there's insufficient data at the end of
210 * an mbuf, we have to do some copying.
211 */
212 if (m->m_len < k + blks && m->m_len != k) {
213 m_copydata(m, k, blks, blk);
214
215 /* Actual encryption/decryption */
216 if (exf->reinit) {
217 if (crd->crd_flags & CRD_F_ENCRYPT) {
218 exf->encrypt(sw->sw_kschedule,
219 blk);
220 } else {
221 exf->decrypt(sw->sw_kschedule,
222 blk);
223 }
224 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
225 /* XOR with previous block */
226 for (j = 0; j < blks; j++)
227 blk[j] ^= ivp[j];
228
229 exf->encrypt(sw->sw_kschedule, blk);
230
231 /*
232 * Keep encrypted block for XOR'ing
233 * with next block
234 */
235 memcpy(iv, blk, blks);
236 ivp = iv;
237 } else { /* decrypt */
238 /*
239 * Keep encrypted block for XOR'ing
240 * with next block
241 */
242 if (ivp == iv)
243 memcpy(piv, blk, blks);
244 else
245 memcpy(iv, blk, blks);
246
247 exf->decrypt(sw->sw_kschedule, blk);
248
249 /* XOR with previous block */
250 for (j = 0; j < blks; j++)
251 blk[j] ^= ivp[j];
252
253 if (ivp == iv)
254 memcpy(iv, piv, blks);
255 else
256 ivp = iv;
257 }
258
259 /* Copy back decrypted block */
260 m_copyback(m, k, blks, blk);
261
262 /* Advance pointer */
263 m = m_getptr(m, k + blks, &k);
264 if (m == NULL)
265 return EINVAL;
266
267 i -= blks;
268
269 /* Could be done... */
270 if (i == 0)
271 break;
272 }
273
274 /* Skip possibly empty mbufs */
275 if (k == m->m_len) {
276 for (m = m->m_next; m && m->m_len == 0;
277 m = m->m_next)
278 ;
279 k = 0;
280 }
281
282 /* Sanity check */
283 if (m == NULL)
284 return EINVAL;
285
286 /*
287 * Warning: idat may point to garbage here, but
288 * we only use it in the while() loop, only if
289 * there are indeed enough data.
290 */
291 idat = mtod(m, unsigned char *) + k;
292
293 while (m->m_len >= k + blks && i > 0) {
294 if (exf->reinit) {
295 if (crd->crd_flags & CRD_F_ENCRYPT) {
296 exf->encrypt(sw->sw_kschedule,
297 idat);
298 } else {
299 exf->decrypt(sw->sw_kschedule,
300 idat);
301 }
302 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
303 /* XOR with previous block/IV */
304 for (j = 0; j < blks; j++)
305 idat[j] ^= ivp[j];
306
307 exf->encrypt(sw->sw_kschedule, idat);
308 ivp = idat;
309 } else { /* decrypt */
310 /*
311 * Keep encrypted block to be used
312 * in next block's processing.
313 */
314 if (ivp == iv)
315 memcpy(piv, idat, blks);
316 else
317 memcpy(iv, idat, blks);
318
319 exf->decrypt(sw->sw_kschedule, idat);
320
321 /* XOR with previous block/IV */
322 for (j = 0; j < blks; j++)
323 idat[j] ^= ivp[j];
324
325 if (ivp == iv)
326 memcpy(iv, piv, blks);
327 else
328 ivp = iv;
329 }
330
331 idat += blks;
332 k += blks;
333 i -= blks;
334 }
335 }
336
337 return 0; /* Done with mbuf encryption/decryption */
338 } else if (outtype == CRYPTO_BUF_IOV) {
339 struct uio *uio = (struct uio *) buf;
340
341 /* Find beginning of data */
342 count = crd->crd_skip;
343 ind = cuio_getptr(uio, count, &k);
344 if (ind == -1)
345 return EINVAL;
346
347 i = crd->crd_len;
348
349 while (i > 0) {
350 /*
351 * If there's insufficient data at the end,
352 * we have to do some copying.
353 */
354 if (uio->uio_iov[ind].iov_len < k + blks &&
355 uio->uio_iov[ind].iov_len != k) {
356 cuio_copydata(uio, k, blks, blk);
357
358 /* Actual encryption/decryption */
359 if (exf->reinit) {
360 if (crd->crd_flags & CRD_F_ENCRYPT) {
361 exf->encrypt(sw->sw_kschedule,
362 blk);
363 } else {
364 exf->decrypt(sw->sw_kschedule,
365 blk);
366 }
367 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
368 /* XOR with previous block */
369 for (j = 0; j < blks; j++)
370 blk[j] ^= ivp[j];
371
372 exf->encrypt(sw->sw_kschedule, blk);
373
374 /*
375 * Keep encrypted block for XOR'ing
376 * with next block
377 */
378 memcpy(iv, blk, blks);
379 ivp = iv;
380 } else { /* decrypt */
381 /*
382 * Keep encrypted block for XOR'ing
383 * with next block
384 */
385 if (ivp == iv)
386 memcpy(piv, blk, blks);
387 else
388 memcpy(iv, blk, blks);
389
390 exf->decrypt(sw->sw_kschedule, blk);
391
392 /* XOR with previous block */
393 for (j = 0; j < blks; j++)
394 blk[j] ^= ivp[j];
395
396 if (ivp == iv)
397 memcpy(iv, piv, blks);
398 else
399 ivp = iv;
400 }
401
402 /* Copy back decrypted block */
403 cuio_copyback(uio, k, blks, blk);
404
405 count += blks;
406
407 /* Advance pointer */
408 ind = cuio_getptr(uio, count, &k);
409 if (ind == -1)
410 return (EINVAL);
411
412 i -= blks;
413
414 /* Could be done... */
415 if (i == 0)
416 break;
417 }
418
419 /*
420 * Warning: idat may point to garbage here, but
421 * we only use it in the while() loop, only if
422 * there are indeed enough data.
423 */
424 idat = ((char *)uio->uio_iov[ind].iov_base) + k;
425
426 while (uio->uio_iov[ind].iov_len >= k + blks &&
427 i > 0) {
428 if (exf->reinit) {
429 if (crd->crd_flags & CRD_F_ENCRYPT) {
430 exf->encrypt(sw->sw_kschedule,
431 idat);
432 } else {
433 exf->decrypt(sw->sw_kschedule,
434 idat);
435 }
436 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
437 /* XOR with previous block/IV */
438 for (j = 0; j < blks; j++)
439 idat[j] ^= ivp[j];
440
441 exf->encrypt(sw->sw_kschedule, idat);
442 ivp = idat;
443 } else { /* decrypt */
444 /*
445 * Keep encrypted block to be used
446 * in next block's processing.
447 */
448 if (ivp == iv)
449 memcpy(piv, idat, blks);
450 else
451 memcpy(iv, idat, blks);
452
453 exf->decrypt(sw->sw_kschedule, idat);
454
455 /* XOR with previous block/IV */
456 for (j = 0; j < blks; j++)
457 idat[j] ^= ivp[j];
458
459 if (ivp == iv)
460 memcpy(iv, piv, blks);
461 else
462 ivp = iv;
463 }
464
465 idat += blks;
466 count += blks;
467 k += blks;
468 i -= blks;
469 }
470 }
471 return 0; /* Done with mbuf encryption/decryption */
472 }
473
474 /* Unreachable */
475 return EINVAL;
476 }
477
478 /*
479 * Compute keyed-hash authenticator.
480 */
481 int
482 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
483 const struct swcr_data *sw, void *buf, int outtype)
484 {
485 unsigned char aalg[AALG_MAX_RESULT_LEN];
486 const struct swcr_auth_hash *axf;
487 union authctx ctx;
488 int err;
489
490 if (sw->sw_ictx == 0)
491 return EINVAL;
492
493 axf = sw->sw_axf;
494
495 memcpy(&ctx, sw->sw_ictx, axf->ctxsize);
496
497 switch (outtype) {
498 case CRYPTO_BUF_CONTIG:
499 axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len);
500 break;
501 case CRYPTO_BUF_MBUF:
502 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
503 (int (*)(void*, void *, unsigned int)) axf->Update,
504 (void *) &ctx);
505 if (err)
506 return err;
507 break;
508 case CRYPTO_BUF_IOV:
509 err = cuio_apply((struct uio *) buf, crd->crd_skip,
510 crd->crd_len,
511 (int (*)(void *, void *, unsigned int)) axf->Update,
512 (void *) &ctx);
513 if (err) {
514 return err;
515 }
516 break;
517 default:
518 return EINVAL;
519 }
520
521 switch (sw->sw_alg) {
522 case CRYPTO_MD5_HMAC:
523 case CRYPTO_MD5_HMAC_96:
524 case CRYPTO_SHA1_HMAC:
525 case CRYPTO_SHA1_HMAC_96:
526 case CRYPTO_SHA2_256_HMAC:
527 case CRYPTO_SHA2_384_HMAC:
528 case CRYPTO_SHA2_512_HMAC:
529 case CRYPTO_RIPEMD160_HMAC:
530 case CRYPTO_RIPEMD160_HMAC_96:
531 if (sw->sw_octx == NULL)
532 return EINVAL;
533
534 axf->Final(aalg, &ctx);
535 memcpy(&ctx, sw->sw_octx, axf->ctxsize);
536 axf->Update(&ctx, aalg, axf->auth_hash->hashsize);
537 axf->Final(aalg, &ctx);
538 break;
539
540 case CRYPTO_MD5_KPDK:
541 case CRYPTO_SHA1_KPDK:
542 if (sw->sw_octx == NULL)
543 return EINVAL;
544
545 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
546 axf->Final(aalg, &ctx);
547 break;
548
549 case CRYPTO_NULL_HMAC:
550 case CRYPTO_MD5:
551 case CRYPTO_SHA1:
552 case CRYPTO_AES_XCBC_MAC_96:
553 axf->Final(aalg, &ctx);
554 break;
555 }
556
557 /* Inject the authentication data */
558 switch (outtype) {
559 case CRYPTO_BUF_CONTIG:
560 (void)memcpy((char *)buf + crd->crd_inject, aalg,
561 axf->auth_hash->authsize);
562 break;
563 case CRYPTO_BUF_MBUF:
564 m_copyback((struct mbuf *) buf, crd->crd_inject,
565 axf->auth_hash->authsize, aalg);
566 break;
567 case CRYPTO_BUF_IOV:
568 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
569 break;
570 default:
571 return EINVAL;
572 }
573 return 0;
574 }
575
576 /*
577 * Apply a combined encryption-authentication transformation
578 */
579 static int
580 swcr_combined(struct cryptop *crp, int outtype)
581 {
582 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
583 u_char *blk = (u_char *)blkbuf;
584 u_char aalg[AALG_MAX_RESULT_LEN];
585 u_char iv[EALG_MAX_BLOCK_LEN];
586 union authctx ctx;
587 struct cryptodesc *crd, *crda = NULL, *crde = NULL;
588 struct swcr_data *sw, *swa, *swe = NULL;
589 const struct swcr_auth_hash *axf = NULL;
590 const struct swcr_enc_xform *exf = NULL;
591 void *buf = (void *)crp->crp_buf;
592 uint32_t *blkp;
593 int i, blksz = 0, ivlen = 0, len;
594
595 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
596 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
597 sw && sw->sw_alg != crd->crd_alg;
598 sw = sw->sw_next)
599 ;
600 if (sw == NULL)
601 return (EINVAL);
602
603 switch (sw->sw_alg) {
604 case CRYPTO_AES_GCM_16:
605 case CRYPTO_AES_GMAC:
606 swe = sw;
607 crde = crd;
608 exf = swe->sw_exf;
609 ivlen = exf->enc_xform->ivsize;
610 break;
611 case CRYPTO_AES_128_GMAC:
612 case CRYPTO_AES_192_GMAC:
613 case CRYPTO_AES_256_GMAC:
614 swa = sw;
615 crda = crd;
616 axf = swa->sw_axf;
617 if (swa->sw_ictx == 0)
618 return (EINVAL);
619 memcpy(&ctx, swa->sw_ictx, axf->ctxsize);
620 blksz = axf->auth_hash->blocksize;
621 break;
622 default:
623 return (EINVAL);
624 }
625 }
626 if (crde == NULL || crda == NULL)
627 return (EINVAL);
628 if (outtype == CRYPTO_BUF_CONTIG)
629 return (EINVAL);
630
631 /* Initialize the IV */
632 if (crde->crd_flags & CRD_F_ENCRYPT) {
633 /* IV explicitly provided ? */
634 if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
635 memcpy(iv, crde->crd_iv, ivlen);
636 if (exf->reinit)
637 exf->reinit(swe->sw_kschedule, iv, 0);
638 } else if (exf->reinit)
639 exf->reinit(swe->sw_kschedule, 0, iv);
640 else
641 cprng_fast(iv, ivlen);
642
643 /* Do we need to write the IV */
644 if (!(crde->crd_flags & CRD_F_IV_PRESENT))
645 COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
646
647 } else { /* Decryption */
648 /* IV explicitly provided ? */
649 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
650 memcpy(iv, crde->crd_iv, ivlen);
651 else {
652 /* Get IV off buf */
653 COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
654 }
655 if (exf->reinit)
656 exf->reinit(swe->sw_kschedule, iv, 0);
657 }
658
659 /* Supply MAC with IV */
660 if (axf->Reinit)
661 axf->Reinit(&ctx, iv, ivlen);
662
663 /* Supply MAC with AAD */
664 for (i = 0; i < crda->crd_len; i += blksz) {
665 len = MIN(crda->crd_len - i, blksz);
666 COPYDATA(outtype, buf, crda->crd_skip + i, len, blk);
667 axf->Update(&ctx, blk, len);
668 }
669
670 /* Do encryption/decryption with MAC */
671 for (i = 0; i < crde->crd_len; i += blksz) {
672 len = MIN(crde->crd_len - i, blksz);
673 if (len < blksz)
674 memset(blk, 0, blksz);
675 COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
676 if (crde->crd_flags & CRD_F_ENCRYPT) {
677 exf->encrypt(swe->sw_kschedule, blk);
678 axf->Update(&ctx, blk, len);
679 } else {
680 axf->Update(&ctx, blk, len);
681 exf->decrypt(swe->sw_kschedule, blk);
682 }
683 COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
684 }
685
686 /* Do any required special finalization */
687 switch (crda->crd_alg) {
688 case CRYPTO_AES_128_GMAC:
689 case CRYPTO_AES_192_GMAC:
690 case CRYPTO_AES_256_GMAC:
691 /* length block */
692 memset(blk, 0, blksz);
693 blkp = (uint32_t *)blk + 1;
694 *blkp = htobe32(crda->crd_len * 8);
695 blkp = (uint32_t *)blk + 3;
696 *blkp = htobe32(crde->crd_len * 8);
697 axf->Update(&ctx, blk, blksz);
698 break;
699 }
700
701 /* Finalize MAC */
702 axf->Final(aalg, &ctx);
703
704 /* Inject the authentication data */
705 if (outtype == CRYPTO_BUF_MBUF)
706 COPYBACK(outtype, buf, crda->crd_inject, axf->auth_hash->authsize, aalg);
707 else
708 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
709
710 return (0);
711 }
712
713 /*
714 * Apply a compression/decompression algorithm
715 */
716 static int
717 swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw,
718 void *buf, int outtype, int *res_size)
719 {
720 u_int8_t *data, *out;
721 const struct swcr_comp_algo *cxf;
722 int adj;
723 u_int32_t result;
724
725 cxf = sw->sw_cxf;
726
727 /* We must handle the whole buffer of data in one time
728 * then if there is not all the data in the mbuf, we must
729 * copy in a buffer.
730 */
731
732 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
733 if (data == NULL)
734 return (EINVAL);
735 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
736
737 if (crd->crd_flags & CRD_F_COMP)
738 result = cxf->compress(data, crd->crd_len, &out);
739 else
740 result = cxf->decompress(data, crd->crd_len, &out,
741 *res_size);
742
743 free(data, M_CRYPTO_DATA);
744 if (result == 0)
745 return EINVAL;
746
747 /* Copy back the (de)compressed data. m_copyback is
748 * extending the mbuf as necessary.
749 */
750 *res_size = (int)result;
751 /* Check the compressed size when doing compression */
752 if (crd->crd_flags & CRD_F_COMP &&
753 sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW &&
754 result >= crd->crd_len) {
755 /* Compression was useless, we lost time */
756 free(out, M_CRYPTO_DATA);
757 return 0;
758 }
759
760 COPYBACK(outtype, buf, crd->crd_skip, result, out);
761 if (result < crd->crd_len) {
762 adj = result - crd->crd_len;
763 if (outtype == CRYPTO_BUF_MBUF) {
764 m_adj((struct mbuf *)buf, adj);
765 }
766 /* Don't adjust the iov_len, it breaks the kmem_free */
767 }
768 free(out, M_CRYPTO_DATA);
769 return 0;
770 }
771
772 /*
773 * Generate a new software session.
774 */
775 static int
776 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
777 {
778 struct swcr_data **swd;
779 const struct swcr_auth_hash *axf;
780 const struct swcr_enc_xform *txf;
781 const struct swcr_comp_algo *cxf;
782 u_int32_t i;
783 int k, error;
784
785 if (sid == NULL || cri == NULL)
786 return EINVAL;
787
788 if (swcr_sessions) {
789 for (i = 1; i < swcr_sesnum; i++)
790 if (swcr_sessions[i] == NULL)
791 break;
792 } else
793 i = 1; /* NB: to silence compiler warning */
794
795 if (swcr_sessions == NULL || i == swcr_sesnum) {
796 if (swcr_sessions == NULL) {
797 i = 1; /* We leave swcr_sessions[0] empty */
798 swcr_sesnum = CRYPTO_SW_SESSIONS;
799 } else
800 swcr_sesnum *= 2;
801
802 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
803 M_CRYPTO_DATA, M_NOWAIT);
804 if (swd == NULL) {
805 /* Reset session number */
806 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
807 swcr_sesnum = 0;
808 else
809 swcr_sesnum /= 2;
810 return ENOBUFS;
811 }
812
813 memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
814
815 /* Copy existing sessions */
816 if (swcr_sessions) {
817 memcpy(swd, swcr_sessions,
818 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
819 free(swcr_sessions, M_CRYPTO_DATA);
820 }
821
822 swcr_sessions = swd;
823 }
824
825 swd = &swcr_sessions[i];
826 *sid = i;
827
828 while (cri) {
829 *swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT);
830 if (*swd == NULL) {
831 swcr_freesession(NULL, i);
832 return ENOBUFS;
833 }
834 memset(*swd, 0, sizeof(struct swcr_data));
835
836 switch (cri->cri_alg) {
837 case CRYPTO_DES_CBC:
838 txf = &swcr_enc_xform_des;
839 goto enccommon;
840 case CRYPTO_3DES_CBC:
841 txf = &swcr_enc_xform_3des;
842 goto enccommon;
843 case CRYPTO_BLF_CBC:
844 txf = &swcr_enc_xform_blf;
845 goto enccommon;
846 case CRYPTO_CAST_CBC:
847 txf = &swcr_enc_xform_cast5;
848 goto enccommon;
849 case CRYPTO_SKIPJACK_CBC:
850 txf = &swcr_enc_xform_skipjack;
851 goto enccommon;
852 case CRYPTO_RIJNDAEL128_CBC:
853 txf = &swcr_enc_xform_rijndael128;
854 goto enccommon;
855 case CRYPTO_CAMELLIA_CBC:
856 txf = &swcr_enc_xform_camellia;
857 goto enccommon;
858 case CRYPTO_AES_CTR:
859 txf = &swcr_enc_xform_aes_ctr;
860 goto enccommon;
861 case CRYPTO_AES_GCM_16:
862 txf = &swcr_enc_xform_aes_gcm;
863 goto enccommon;
864 case CRYPTO_AES_GMAC:
865 txf = &swcr_enc_xform_aes_gmac;
866 goto enccommon;
867 case CRYPTO_NULL_CBC:
868 txf = &swcr_enc_xform_null;
869 goto enccommon;
870 enccommon:
871 error = txf->setkey(&((*swd)->sw_kschedule),
872 cri->cri_key, cri->cri_klen / 8);
873 if (error) {
874 swcr_freesession(NULL, i);
875 return error;
876 }
877 (*swd)->sw_exf = txf;
878 break;
879
880 case CRYPTO_MD5_HMAC:
881 axf = &swcr_auth_hash_hmac_md5;
882 goto authcommon;
883 case CRYPTO_MD5_HMAC_96:
884 axf = &swcr_auth_hash_hmac_md5_96;
885 goto authcommon;
886 case CRYPTO_SHA1_HMAC:
887 axf = &swcr_auth_hash_hmac_sha1;
888 goto authcommon;
889 case CRYPTO_SHA1_HMAC_96:
890 axf = &swcr_auth_hash_hmac_sha1_96;
891 goto authcommon;
892 case CRYPTO_SHA2_256_HMAC:
893 axf = &swcr_auth_hash_hmac_sha2_256;
894 goto authcommon;
895 case CRYPTO_SHA2_384_HMAC:
896 axf = &swcr_auth_hash_hmac_sha2_384;
897 goto authcommon;
898 case CRYPTO_SHA2_512_HMAC:
899 axf = &swcr_auth_hash_hmac_sha2_512;
900 goto authcommon;
901 case CRYPTO_NULL_HMAC:
902 axf = &swcr_auth_hash_null;
903 goto authcommon;
904 case CRYPTO_RIPEMD160_HMAC:
905 axf = &swcr_auth_hash_hmac_ripemd_160;
906 goto authcommon;
907 case CRYPTO_RIPEMD160_HMAC_96:
908 axf = &swcr_auth_hash_hmac_ripemd_160_96;
909 goto authcommon; /* leave this for safety */
910 authcommon:
911 (*swd)->sw_ictx = malloc(axf->ctxsize,
912 M_CRYPTO_DATA, M_NOWAIT);
913 if ((*swd)->sw_ictx == NULL) {
914 swcr_freesession(NULL, i);
915 return ENOBUFS;
916 }
917
918 (*swd)->sw_octx = malloc(axf->ctxsize,
919 M_CRYPTO_DATA, M_NOWAIT);
920 if ((*swd)->sw_octx == NULL) {
921 swcr_freesession(NULL, i);
922 return ENOBUFS;
923 }
924
925 for (k = 0; k < cri->cri_klen / 8; k++)
926 cri->cri_key[k] ^= HMAC_IPAD_VAL;
927
928 axf->Init((*swd)->sw_ictx);
929 axf->Update((*swd)->sw_ictx, cri->cri_key,
930 cri->cri_klen / 8);
931 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
932 axf->auth_hash->blocksize - (cri->cri_klen / 8));
933
934 for (k = 0; k < cri->cri_klen / 8; k++)
935 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
936
937 axf->Init((*swd)->sw_octx);
938 axf->Update((*swd)->sw_octx, cri->cri_key,
939 cri->cri_klen / 8);
940 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
941 axf->auth_hash->blocksize - (cri->cri_klen / 8));
942
943 for (k = 0; k < cri->cri_klen / 8; k++)
944 cri->cri_key[k] ^= HMAC_OPAD_VAL;
945 (*swd)->sw_axf = axf;
946 break;
947
948 case CRYPTO_MD5_KPDK:
949 axf = &swcr_auth_hash_key_md5;
950 goto auth2common;
951
952 case CRYPTO_SHA1_KPDK: {
953 unsigned char digest[SHA1_DIGEST_LENGTH];
954 CTASSERT(SHA1_DIGEST_LENGTH >= MD5_DIGEST_LENGTH);
955 axf = &swcr_auth_hash_key_sha1;
956 auth2common:
957 (*swd)->sw_ictx = malloc(axf->ctxsize,
958 M_CRYPTO_DATA, M_NOWAIT);
959 if ((*swd)->sw_ictx == NULL) {
960 swcr_freesession(NULL, i);
961 return ENOBUFS;
962 }
963
964 /* Store the key so we can "append" it to the payload */
965 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
966 M_NOWAIT);
967 if ((*swd)->sw_octx == NULL) {
968 swcr_freesession(NULL, i);
969 return ENOBUFS;
970 }
971
972 (*swd)->sw_klen = cri->cri_klen / 8;
973 memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8);
974 axf->Init((*swd)->sw_ictx);
975 axf->Update((*swd)->sw_ictx, cri->cri_key,
976 cri->cri_klen / 8);
977 axf->Final(digest, (*swd)->sw_ictx);
978 (*swd)->sw_axf = axf;
979 break;
980 }
981
982 case CRYPTO_MD5:
983 axf = &swcr_auth_hash_md5;
984 goto auth3common;
985
986 case CRYPTO_SHA1:
987 axf = &swcr_auth_hash_sha1;
988 auth3common:
989 (*swd)->sw_ictx = malloc(axf->ctxsize,
990 M_CRYPTO_DATA, M_NOWAIT);
991 if ((*swd)->sw_ictx == NULL) {
992 swcr_freesession(NULL, i);
993 return ENOBUFS;
994 }
995
996 axf->Init((*swd)->sw_ictx);
997 (*swd)->sw_axf = axf;
998 break;
999
1000 case CRYPTO_AES_XCBC_MAC_96:
1001 axf = &swcr_auth_hash_aes_xcbc_mac;
1002 goto auth4common;
1003 case CRYPTO_AES_128_GMAC:
1004 axf = &swcr_auth_hash_gmac_aes_128;
1005 goto auth4common;
1006 case CRYPTO_AES_192_GMAC:
1007 axf = &swcr_auth_hash_gmac_aes_192;
1008 goto auth4common;
1009 case CRYPTO_AES_256_GMAC:
1010 axf = &swcr_auth_hash_gmac_aes_256;
1011 auth4common:
1012 (*swd)->sw_ictx = malloc(axf->ctxsize,
1013 M_CRYPTO_DATA, M_NOWAIT);
1014 if ((*swd)->sw_ictx == NULL) {
1015 swcr_freesession(NULL, i);
1016 return ENOBUFS;
1017 }
1018 axf->Init((*swd)->sw_ictx);
1019 axf->Setkey((*swd)->sw_ictx,
1020 cri->cri_key, cri->cri_klen / 8);
1021 (*swd)->sw_axf = axf;
1022 break;
1023
1024 case CRYPTO_DEFLATE_COMP:
1025 cxf = &swcr_comp_algo_deflate;
1026 (*swd)->sw_cxf = cxf;
1027 break;
1028
1029 case CRYPTO_DEFLATE_COMP_NOGROW:
1030 cxf = &swcr_comp_algo_deflate_nogrow;
1031 (*swd)->sw_cxf = cxf;
1032 break;
1033
1034 case CRYPTO_GZIP_COMP:
1035 cxf = &swcr_comp_algo_gzip;
1036 (*swd)->sw_cxf = cxf;
1037 break;
1038 default:
1039 swcr_freesession(NULL, i);
1040 return EINVAL;
1041 }
1042
1043 (*swd)->sw_alg = cri->cri_alg;
1044 cri = cri->cri_next;
1045 swd = &((*swd)->sw_next);
1046 }
1047 return 0;
1048 }
1049
1050 /*
1051 * Free a session.
1052 */
1053 static int
1054 swcr_freesession(void *arg, u_int64_t tid)
1055 {
1056 struct swcr_data *swd;
1057 const struct swcr_enc_xform *txf;
1058 const struct swcr_auth_hash *axf;
1059 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
1060
1061 if (sid > swcr_sesnum || swcr_sessions == NULL ||
1062 swcr_sessions[sid] == NULL)
1063 return EINVAL;
1064
1065 /* Silently accept and return */
1066 if (sid == 0)
1067 return 0;
1068
1069 while ((swd = swcr_sessions[sid]) != NULL) {
1070 swcr_sessions[sid] = swd->sw_next;
1071
1072 switch (swd->sw_alg) {
1073 case CRYPTO_DES_CBC:
1074 case CRYPTO_3DES_CBC:
1075 case CRYPTO_BLF_CBC:
1076 case CRYPTO_CAST_CBC:
1077 case CRYPTO_SKIPJACK_CBC:
1078 case CRYPTO_RIJNDAEL128_CBC:
1079 case CRYPTO_CAMELLIA_CBC:
1080 case CRYPTO_AES_CTR:
1081 case CRYPTO_AES_GCM_16:
1082 case CRYPTO_AES_GMAC:
1083 case CRYPTO_NULL_CBC:
1084 txf = swd->sw_exf;
1085
1086 if (swd->sw_kschedule)
1087 txf->zerokey(&(swd->sw_kschedule));
1088 break;
1089
1090 case CRYPTO_MD5_HMAC:
1091 case CRYPTO_MD5_HMAC_96:
1092 case CRYPTO_SHA1_HMAC:
1093 case CRYPTO_SHA1_HMAC_96:
1094 case CRYPTO_SHA2_256_HMAC:
1095 case CRYPTO_SHA2_384_HMAC:
1096 case CRYPTO_SHA2_512_HMAC:
1097 case CRYPTO_RIPEMD160_HMAC:
1098 case CRYPTO_RIPEMD160_HMAC_96:
1099 case CRYPTO_NULL_HMAC:
1100 axf = swd->sw_axf;
1101
1102 if (swd->sw_ictx) {
1103 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1104 free(swd->sw_ictx, M_CRYPTO_DATA);
1105 }
1106 if (swd->sw_octx) {
1107 explicit_memset(swd->sw_octx, 0, axf->ctxsize);
1108 free(swd->sw_octx, M_CRYPTO_DATA);
1109 }
1110 break;
1111
1112 case CRYPTO_MD5_KPDK:
1113 case CRYPTO_SHA1_KPDK:
1114 axf = swd->sw_axf;
1115
1116 if (swd->sw_ictx) {
1117 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1118 free(swd->sw_ictx, M_CRYPTO_DATA);
1119 }
1120 if (swd->sw_octx) {
1121 explicit_memset(swd->sw_octx, 0, swd->sw_klen);
1122 free(swd->sw_octx, M_CRYPTO_DATA);
1123 }
1124 break;
1125
1126 case CRYPTO_MD5:
1127 case CRYPTO_SHA1:
1128 case CRYPTO_AES_XCBC_MAC_96:
1129 case CRYPTO_AES_128_GMAC:
1130 case CRYPTO_AES_192_GMAC:
1131 case CRYPTO_AES_256_GMAC:
1132 axf = swd->sw_axf;
1133
1134 if (swd->sw_ictx) {
1135 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1136 free(swd->sw_ictx, M_CRYPTO_DATA);
1137 }
1138 break;
1139
1140 case CRYPTO_DEFLATE_COMP:
1141 case CRYPTO_DEFLATE_COMP_NOGROW:
1142 case CRYPTO_GZIP_COMP:
1143 break;
1144 }
1145
1146 free(swd, M_CRYPTO_DATA);
1147 }
1148 return 0;
1149 }
1150
1151 /*
1152 * Process a software request.
1153 */
1154 static int
1155 swcr_process(void *arg, struct cryptop *crp, int hint)
1156 {
1157 struct cryptodesc *crd;
1158 struct swcr_data *sw;
1159 u_int32_t lid;
1160 int type;
1161
1162 /* Sanity check */
1163 if (crp == NULL)
1164 return EINVAL;
1165
1166 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1167 crp->crp_etype = EINVAL;
1168 goto done;
1169 }
1170
1171 lid = crp->crp_sid & 0xffffffff;
1172 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1173 crp->crp_etype = ENOENT;
1174 goto done;
1175 }
1176
1177 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1178 type = CRYPTO_BUF_MBUF;
1179 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1180 type = CRYPTO_BUF_IOV;
1181 } else {
1182 type = CRYPTO_BUF_CONTIG;
1183 }
1184
1185 /* Go through crypto descriptors, processing as we go */
1186 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1187 /*
1188 * Find the crypto context.
1189 *
1190 * XXX Note that the logic here prevents us from having
1191 * XXX the same algorithm multiple times in a session
1192 * XXX (or rather, we can but it won't give us the right
1193 * XXX results). To do that, we'd need some way of differentiating
1194 * XXX between the various instances of an algorithm (so we can
1195 * XXX locate the correct crypto context).
1196 */
1197 for (sw = swcr_sessions[lid];
1198 sw && sw->sw_alg != crd->crd_alg;
1199 sw = sw->sw_next)
1200 ;
1201
1202 /* No such context ? */
1203 if (sw == NULL) {
1204 crp->crp_etype = EINVAL;
1205 goto done;
1206 }
1207
1208 switch (sw->sw_alg) {
1209 case CRYPTO_DES_CBC:
1210 case CRYPTO_3DES_CBC:
1211 case CRYPTO_BLF_CBC:
1212 case CRYPTO_CAST_CBC:
1213 case CRYPTO_SKIPJACK_CBC:
1214 case CRYPTO_RIJNDAEL128_CBC:
1215 case CRYPTO_CAMELLIA_CBC:
1216 case CRYPTO_AES_CTR:
1217 if ((crp->crp_etype = swcr_encdec(crd, sw,
1218 crp->crp_buf, type)) != 0)
1219 goto done;
1220 break;
1221 case CRYPTO_NULL_CBC:
1222 crp->crp_etype = 0;
1223 break;
1224 case CRYPTO_MD5_HMAC:
1225 case CRYPTO_MD5_HMAC_96:
1226 case CRYPTO_SHA1_HMAC:
1227 case CRYPTO_SHA1_HMAC_96:
1228 case CRYPTO_SHA2_256_HMAC:
1229 case CRYPTO_SHA2_384_HMAC:
1230 case CRYPTO_SHA2_512_HMAC:
1231 case CRYPTO_RIPEMD160_HMAC:
1232 case CRYPTO_RIPEMD160_HMAC_96:
1233 case CRYPTO_NULL_HMAC:
1234 case CRYPTO_MD5_KPDK:
1235 case CRYPTO_SHA1_KPDK:
1236 case CRYPTO_MD5:
1237 case CRYPTO_SHA1:
1238 case CRYPTO_AES_XCBC_MAC_96:
1239 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1240 crp->crp_buf, type)) != 0)
1241 goto done;
1242 break;
1243
1244 case CRYPTO_AES_GCM_16:
1245 case CRYPTO_AES_GMAC:
1246 case CRYPTO_AES_128_GMAC:
1247 case CRYPTO_AES_192_GMAC:
1248 case CRYPTO_AES_256_GMAC:
1249 crp->crp_etype = swcr_combined(crp, type);
1250 goto done;
1251
1252 case CRYPTO_DEFLATE_COMP:
1253 case CRYPTO_DEFLATE_COMP_NOGROW:
1254 case CRYPTO_GZIP_COMP:
1255 DPRINTF("compdec for %d\n", sw->sw_alg);
1256 if ((crp->crp_etype = swcr_compdec(crd, sw,
1257 crp->crp_buf, type, &crp->crp_olen)) != 0)
1258 goto done;
1259 break;
1260
1261 default:
1262 /* Unknown/unsupported algorithm */
1263 crp->crp_etype = EINVAL;
1264 goto done;
1265 }
1266 }
1267
1268 done:
1269 DPRINTF("request %p done\n", crp);
1270 crypto_done(crp);
1271 return 0;
1272 }
1273
1274 static void
1275 swcr_init(void)
1276 {
1277 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1278 if (swcr_id < 0) {
1279 /* This should never happen */
1280 panic("Software crypto device cannot initialize!");
1281 }
1282
1283 crypto_register(swcr_id, CRYPTO_DES_CBC,
1284 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1285 #define REGISTER(alg) \
1286 crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL)
1287
1288 REGISTER(CRYPTO_3DES_CBC);
1289 REGISTER(CRYPTO_BLF_CBC);
1290 REGISTER(CRYPTO_CAST_CBC);
1291 REGISTER(CRYPTO_SKIPJACK_CBC);
1292 REGISTER(CRYPTO_CAMELLIA_CBC);
1293 REGISTER(CRYPTO_AES_CTR);
1294 REGISTER(CRYPTO_AES_GCM_16);
1295 REGISTER(CRYPTO_AES_GMAC);
1296 REGISTER(CRYPTO_NULL_CBC);
1297 REGISTER(CRYPTO_MD5_HMAC);
1298 REGISTER(CRYPTO_MD5_HMAC_96);
1299 REGISTER(CRYPTO_SHA1_HMAC);
1300 REGISTER(CRYPTO_SHA1_HMAC_96);
1301 REGISTER(CRYPTO_SHA2_256_HMAC);
1302 REGISTER(CRYPTO_SHA2_384_HMAC);
1303 REGISTER(CRYPTO_SHA2_512_HMAC);
1304 REGISTER(CRYPTO_RIPEMD160_HMAC);
1305 REGISTER(CRYPTO_RIPEMD160_HMAC_96);
1306 REGISTER(CRYPTO_NULL_HMAC);
1307 REGISTER(CRYPTO_MD5_KPDK);
1308 REGISTER(CRYPTO_SHA1_KPDK);
1309 REGISTER(CRYPTO_MD5);
1310 REGISTER(CRYPTO_SHA1);
1311 REGISTER(CRYPTO_AES_XCBC_MAC_96);
1312 REGISTER(CRYPTO_AES_128_GMAC);
1313 REGISTER(CRYPTO_AES_192_GMAC);
1314 REGISTER(CRYPTO_AES_256_GMAC);
1315 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1316 REGISTER(CRYPTO_DEFLATE_COMP);
1317 REGISTER(CRYPTO_DEFLATE_COMP_NOGROW);
1318 REGISTER(CRYPTO_GZIP_COMP);
1319 #undef REGISTER
1320 }
1321
1322
1323 /*
1324 * Pseudo-device init routine for software crypto.
1325 */
1326
1327 void
1328 swcryptoattach(int num)
1329 {
1330 /*
1331 * swcrypto_attach() must be called after attached cpus, because
1332 * it calls softint_establish() through below call path.
1333 * swcr_init() => crypto_get_driverid() => crypto_init()
1334 * => crypto_init0()
1335 * If softint_establish() is called before attached cpus that ncpu == 0,
1336 * the softint handler is established to CPU#0 only.
1337 *
1338 * So, swcrypto_attach() must be called from not module_init_class()
1339 * but config_finalize() when it is built as builtin module.
1340 */
1341 swcryptoattach_internal();
1342 }
1343
1344 void swcrypto_attach(device_t, device_t, void *);
1345
1346 void
1347 swcrypto_attach(device_t parent, device_t self, void *opaque)
1348 {
1349
1350 swcr_init();
1351
1352 if (!pmf_device_register(self, NULL, NULL))
1353 aprint_error_dev(self, "couldn't establish power handler\n");
1354 }
1355
1356 int swcrypto_detach(device_t, int);
1357
1358 int
1359 swcrypto_detach(device_t self, int flag)
1360 {
1361 pmf_device_deregister(self);
1362 if (swcr_id >= 0)
1363 crypto_unregister_all(swcr_id);
1364 return 0;
1365 }
1366
1367 int swcrypto_match(device_t, cfdata_t, void *);
1368
1369 int
1370 swcrypto_match(device_t parent, cfdata_t data, void *opaque)
1371 {
1372
1373 return 1;
1374 }
1375
1376 MODULE(MODULE_CLASS_DRIVER, swcrypto,
1377 "opencrypto,zlib,blowfish,des,cast128,camellia,skipjack");
1378
1379 CFDRIVER_DECL(swcrypto, DV_DULL, NULL);
1380
1381 CFATTACH_DECL2_NEW(swcrypto, 0, swcrypto_match, swcrypto_attach,
1382 swcrypto_detach, NULL, NULL, NULL);
1383
1384 static int swcryptoloc[] = { -1, -1 };
1385
1386 static struct cfdata swcrypto_cfdata[] = {
1387 {
1388 .cf_name = "swcrypto",
1389 .cf_atname = "swcrypto",
1390 .cf_unit = 0,
1391 .cf_fstate = 0,
1392 .cf_loc = swcryptoloc,
1393 .cf_flags = 0,
1394 .cf_pspec = NULL,
1395 },
1396 { NULL, NULL, 0, 0, NULL, 0, NULL }
1397 };
1398
1399 /*
1400 * Internal attach routine.
1401 * Don't call before attached cpus.
1402 */
1403 static int
1404 swcryptoattach_internal(void)
1405 {
1406 int error;
1407
1408 error = config_cfdriver_attach(&swcrypto_cd);
1409 if (error) {
1410 return error;
1411 }
1412
1413 error = config_cfattach_attach(swcrypto_cd.cd_name, &swcrypto_ca);
1414 if (error) {
1415 config_cfdriver_detach(&swcrypto_cd);
1416 aprint_error("%s: unable to register cfattach\n",
1417 swcrypto_cd.cd_name);
1418
1419 return error;
1420 }
1421
1422 error = config_cfdata_attach(swcrypto_cfdata, 1);
1423 if (error) {
1424 config_cfattach_detach(swcrypto_cd.cd_name,
1425 &swcrypto_ca);
1426 config_cfdriver_detach(&swcrypto_cd);
1427 aprint_error("%s: unable to register cfdata\n",
1428 swcrypto_cd.cd_name);
1429
1430 return error;
1431 }
1432
1433 (void)config_attach_pseudo(swcrypto_cfdata);
1434
1435 return 0;
1436 }
1437
1438 static int
1439 swcrypto_modcmd(modcmd_t cmd, void *arg)
1440 {
1441 int error = 0;
1442
1443 switch (cmd) {
1444 case MODULE_CMD_INIT:
1445 #ifdef _MODULE
1446 error = swcryptoattach_internal();
1447 #endif
1448 return error;
1449 case MODULE_CMD_FINI:
1450 error = config_cfdata_detach(swcrypto_cfdata);
1451 if (error) {
1452 return error;
1453 }
1454
1455 config_cfattach_detach(swcrypto_cd.cd_name, &swcrypto_ca);
1456 config_cfdriver_detach(&swcrypto_cd);
1457
1458 return 0;
1459 default:
1460 return ENOTTY;
1461 }
1462 }
1463