crypto.c revision 1.11 1 /* $NetBSD: crypto.c,v 1.11 2005/11/25 16:16:46 thorpej Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.11 2005/11/25 16:16:46 thorpej Exp $");
28
29 /* XXX FIXME: should be defopt'ed */
30 #define CRYPTO_TIMING /* enable cryptop timing stuff */
31
32 #include <sys/param.h>
33 #include <sys/reboot.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/proc.h>
37 #include <sys/pool.h>
38 #include <opencrypto/cryptodev.h>
39 #include <sys/kthread.h>
40 #include <sys/once.h>
41
42 #include <opencrypto/xform.h> /* XXX for M_XDATA */
43
44 #ifdef __NetBSD__
45 #define splcrypto splnet
46 /* below is kludges to check whats still missing */
47 #define SWI_CRYPTO 17
48 #define register_swi(lvl, fn) \
49 softintr_establish(IPL_SOFTNET, (void (*)(void*))fn, NULL)
50 #define unregister_swi(lvl, fn) softintr_disestablish(softintr_cookie)
51 #define setsoftcrypto(x) softintr_schedule(x)
52
53 static void nanouptime(struct timespec *);
54 static void
55 nanouptime(struct timespec *tp)
56 {
57 struct timeval tv;
58 microtime(&tv);
59 TIMEVAL_TO_TIMESPEC(&tv, tp);
60 }
61
62 #endif
63
64 #define SESID2HID(sid) (((sid) >> 32) & 0xffffffff)
65
66 /*
67 * Crypto drivers register themselves by allocating a slot in the
68 * crypto_drivers table with crypto_get_driverid() and then registering
69 * each algorithm they support with crypto_register() and crypto_kregister().
70 */
71 static struct cryptocap *crypto_drivers;
72 static int crypto_drivers_num;
73 static void* softintr_cookie;
74
75 /*
76 * There are two queues for crypto requests; one for symmetric (e.g.
77 * cipher) operations and one for asymmetric (e.g. MOD) operations.
78 * See below for how synchronization is handled.
79 */
80 static TAILQ_HEAD(,cryptop) crp_q = /* request queues */
81 TAILQ_HEAD_INITIALIZER(crp_q);
82 static TAILQ_HEAD(,cryptkop) crp_kq =
83 TAILQ_HEAD_INITIALIZER(crp_kq);
84
85 /*
86 * There are two queues for processing completed crypto requests; one
87 * for the symmetric and one for the asymmetric ops. We only need one
88 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
89 * for how synchronization is handled.
90 */
91 static TAILQ_HEAD(,cryptop) crp_ret_q = /* callback queues */
92 TAILQ_HEAD_INITIALIZER(crp_ret_q);
93 static TAILQ_HEAD(,cryptkop) crp_ret_kq =
94 TAILQ_HEAD_INITIALIZER(crp_ret_kq);
95
96 /*
97 * Crypto op and desciptor data structures are allocated
98 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
99 */
100 struct pool cryptop_pool;
101 struct pool cryptodesc_pool;
102 int crypto_pool_initialized = 0;
103
104 #ifdef __NetBSD__
105 static void deferred_crypto_thread(void *arg);
106 #endif
107
108 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
109 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
110 /*
111 * cryptodevallowsoft is (intended to be) sysctl'able, controlling
112 * access to hardware versus software transforms as below:
113 *
114 * crypto_devallowsoft < 0: Force userlevel requests to use software
115 * transforms, always
116 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel
117 * requests for non-accelerated transforms
118 * (handling the latter in software)
119 * crypto_devallowsoft > 0: Allow user requests only for transforms which
120 * are hardware-accelerated.
121 */
122 int crypto_devallowsoft = 1; /* only use hardware crypto */
123
124 #ifdef __FreeBSD__
125 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW,
126 &crypto_usercrypto, 0,
127 "Enable/disable user-mode access to crypto support");
128 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
129 &crypto_userasymcrypto, 0,
130 "Enable/disable user-mode access to asymmetric crypto support");
131 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
132 &crypto_devallowsoft, 0,
133 "Enable/disable use of software asym crypto support");
134 #endif
135
136 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
137
138 /*
139 * Synchronization: read carefully, this is non-trivial.
140 *
141 * Crypto requests are submitted via crypto_dispatch. Typically
142 * these come in from network protocols at spl0 (output path) or
143 * spl[,soft]net (input path).
144 *
145 * Requests are typically passed on the driver directly, but they
146 * may also be queued for processing by a software interrupt thread,
147 * cryptointr, that runs at splsoftcrypto. This thread dispatches
148 * the requests to crypto drivers (h/w or s/w) who call crypto_done
149 * when a request is complete. Hardware crypto drivers are assumed
150 * to register their IRQ's as network devices so their interrupt handlers
151 * and subsequent "done callbacks" happen at spl[imp,net].
152 *
153 * Completed crypto ops are queued for a separate kernel thread that
154 * handles the callbacks at spl0. This decoupling insures the crypto
155 * driver interrupt service routine is not delayed while the callback
156 * takes place and that callbacks are delivered after a context switch
157 * (as opposed to a software interrupt that clients must block).
158 *
159 * This scheme is not intended for SMP machines.
160 */
161 static void cryptointr(void); /* swi thread to dispatch ops */
162 static void cryptoret(void); /* kernel thread for callbacks*/
163 static struct proc *cryptoproc;
164 static void crypto_destroy(void);
165 static int crypto_invoke(struct cryptop *crp, int hint);
166 static int crypto_kinvoke(struct cryptkop *krp, int hint);
167
168 static struct cryptostats cryptostats;
169 static int crypto_timing = 0;
170
171 #ifdef __FreeBSD__
172 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
173 cryptostats, "Crypto system statistics");
174
175 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
176 &crypto_timing, 0, "Enable/disable crypto timing support");
177 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
178 cryptostats, "Crypto system statistics");
179 #endif /* __FreeBSD__ */
180
181 static void
182 crypto_init0(void)
183 {
184 #ifdef __FreeBSD__
185 int error;
186
187 cryptop_zone = zinit("cryptop", sizeof (struct cryptop), 0, 0, 1);
188 cryptodesc_zone = zinit("cryptodesc", sizeof (struct cryptodesc),
189 0, 0, 1);
190 if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
191 printf("crypto_init: cannot setup crypto zones\n");
192 return;
193 }
194 #endif
195
196 crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL *
197 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
198 if (crypto_drivers == NULL) {
199 printf("crypto_init: cannot malloc driver table\n");
200 return;
201 }
202 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
203
204 softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
205 #ifdef __FreeBSD__
206 error = kthread_create((void (*)(void *)) cryptoret, NULL,
207 &cryptoproc, "cryptoret");
208 if (error) {
209 printf("crypto_init: cannot start cryptoret thread; error %d",
210 error);
211 crypto_destroy();
212 }
213 #else
214 /* defer thread creation until after boot */
215 kthread_create( deferred_crypto_thread, NULL);
216 #endif
217 }
218
219 void
220 crypto_init(void)
221 {
222 ONCE_DECL(crypto_init_once);
223
224 RUN_ONCE(&crypto_init_once, crypto_init0);
225 }
226
227 static void
228 crypto_destroy(void)
229 {
230 /* XXX no wait to reclaim zones */
231 if (crypto_drivers != NULL)
232 free(crypto_drivers, M_CRYPTO_DATA);
233 unregister_swi(SWI_CRYPTO, cryptointr);
234 }
235
236 /*
237 * Create a new session.
238 */
239 int
240 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
241 {
242 struct cryptoini *cr;
243 u_int32_t hid, lid;
244 int err = EINVAL;
245 int s;
246
247 s = splcrypto();
248
249 if (crypto_drivers == NULL)
250 goto done;
251
252 /*
253 * The algorithm we use here is pretty stupid; just use the
254 * first driver that supports all the algorithms we need.
255 *
256 * XXX We need more smarts here (in real life too, but that's
257 * XXX another story altogether).
258 */
259
260 for (hid = 0; hid < crypto_drivers_num; hid++) {
261 /*
262 * If it's not initialized or has remaining sessions
263 * referencing it, skip.
264 */
265 if (crypto_drivers[hid].cc_newsession == NULL ||
266 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
267 continue;
268
269 /* Hardware required -- ignore software drivers. */
270 if (hard > 0 &&
271 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
272 continue;
273 /* Software required -- ignore hardware drivers. */
274 if (hard < 0 &&
275 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
276 continue;
277
278 /* See if all the algorithms are supported. */
279 for (cr = cri; cr; cr = cr->cri_next)
280 if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
281 break;
282
283 if (cr == NULL) {
284 /* Ok, all algorithms are supported. */
285
286 /*
287 * Can't do everything in one session.
288 *
289 * XXX Fix this. We need to inject a "virtual" session layer right
290 * XXX about here.
291 */
292
293 /* Call the driver initialization routine. */
294 lid = hid; /* Pass the driver ID. */
295 err = crypto_drivers[hid].cc_newsession(
296 crypto_drivers[hid].cc_arg, &lid, cri);
297 if (err == 0) {
298 (*sid) = hid;
299 (*sid) <<= 32;
300 (*sid) |= (lid & 0xffffffff);
301 crypto_drivers[hid].cc_sessions++;
302 }
303 goto done;
304 /*break;*/
305 }
306 }
307 done:
308 splx(s);
309 return err;
310 }
311
312 /*
313 * Delete an existing session (or a reserved session on an unregistered
314 * driver).
315 */
316 int
317 crypto_freesession(u_int64_t sid)
318 {
319 u_int32_t hid;
320 int err = 0;
321 int s;
322
323 s = splcrypto();
324
325 if (crypto_drivers == NULL) {
326 err = EINVAL;
327 goto done;
328 }
329
330 /* Determine two IDs. */
331 hid = SESID2HID(sid);
332
333 if (hid >= crypto_drivers_num) {
334 err = ENOENT;
335 goto done;
336 }
337
338 if (crypto_drivers[hid].cc_sessions)
339 crypto_drivers[hid].cc_sessions--;
340
341 /* Call the driver cleanup routine, if available. */
342 if (crypto_drivers[hid].cc_freesession)
343 err = crypto_drivers[hid].cc_freesession(
344 crypto_drivers[hid].cc_arg, sid);
345 else
346 err = 0;
347
348 /*
349 * If this was the last session of a driver marked as invalid,
350 * make the entry available for reuse.
351 */
352 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
353 crypto_drivers[hid].cc_sessions == 0)
354 bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
355
356 done:
357 splx(s);
358 return err;
359 }
360
361 /*
362 * Return an unused driver id. Used by drivers prior to registering
363 * support for the algorithms they handle.
364 */
365 int32_t
366 crypto_get_driverid(u_int32_t flags)
367 {
368 struct cryptocap *newdrv;
369 int i, s;
370
371 crypto_init();
372
373 s = splcrypto();
374 for (i = 0; i < crypto_drivers_num; i++)
375 if (crypto_drivers[i].cc_process == NULL &&
376 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
377 crypto_drivers[i].cc_sessions == 0)
378 break;
379
380 /* Out of entries, allocate some more. */
381 if (i == crypto_drivers_num) {
382 /* Be careful about wrap-around. */
383 if (2 * crypto_drivers_num <= crypto_drivers_num) {
384 splx(s);
385 printf("crypto: driver count wraparound!\n");
386 return -1;
387 }
388
389 newdrv = malloc(2 * crypto_drivers_num *
390 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
391 if (newdrv == NULL) {
392 splx(s);
393 printf("crypto: no space to expand driver table!\n");
394 return -1;
395 }
396
397 bcopy(crypto_drivers, newdrv,
398 crypto_drivers_num * sizeof(struct cryptocap));
399
400 crypto_drivers_num *= 2;
401
402 free(crypto_drivers, M_CRYPTO_DATA);
403 crypto_drivers = newdrv;
404 }
405
406 /* NB: state is zero'd on free */
407 crypto_drivers[i].cc_sessions = 1; /* Mark */
408 crypto_drivers[i].cc_flags = flags;
409
410 if (bootverbose)
411 printf("crypto: assign driver %u, flags %u\n", i, flags);
412
413 splx(s);
414
415 return i;
416 }
417
418 static struct cryptocap *
419 crypto_checkdriver(u_int32_t hid)
420 {
421 if (crypto_drivers == NULL)
422 return NULL;
423 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
424 }
425
426 /*
427 * Register support for a key-related algorithm. This routine
428 * is called once for each algorithm supported a driver.
429 */
430 int
431 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
432 int (*kprocess)(void*, struct cryptkop *, int),
433 void *karg)
434 {
435 int s;
436 struct cryptocap *cap;
437 int err;
438
439 s = splcrypto();
440
441 cap = crypto_checkdriver(driverid);
442 if (cap != NULL &&
443 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
444 /*
445 * XXX Do some performance testing to determine placing.
446 * XXX We probably need an auxiliary data structure that
447 * XXX describes relative performances.
448 */
449
450 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
451 if (bootverbose)
452 printf("crypto: driver %u registers key alg %u flags %u\n"
453 , driverid
454 , kalg
455 , flags
456 );
457
458 if (cap->cc_kprocess == NULL) {
459 cap->cc_karg = karg;
460 cap->cc_kprocess = kprocess;
461 }
462 err = 0;
463 } else
464 err = EINVAL;
465
466 splx(s);
467 return err;
468 }
469
470 /*
471 * Register support for a non-key-related algorithm. This routine
472 * is called once for each such algorithm supported by a driver.
473 */
474 int
475 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
476 u_int32_t flags,
477 int (*newses)(void*, u_int32_t*, struct cryptoini*),
478 int (*freeses)(void*, u_int64_t),
479 int (*process)(void*, struct cryptop *, int),
480 void *arg)
481 {
482 struct cryptocap *cap;
483 int s, err;
484
485 s = splcrypto();
486
487 cap = crypto_checkdriver(driverid);
488 /* NB: algorithms are in the range [1..max] */
489 if (cap != NULL &&
490 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
491 /*
492 * XXX Do some performance testing to determine placing.
493 * XXX We probably need an auxiliary data structure that
494 * XXX describes relative performances.
495 */
496
497 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
498 cap->cc_max_op_len[alg] = maxoplen;
499 if (bootverbose)
500 printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
501 , driverid
502 , alg
503 , flags
504 , maxoplen
505 );
506
507 if (cap->cc_process == NULL) {
508 cap->cc_arg = arg;
509 cap->cc_newsession = newses;
510 cap->cc_process = process;
511 cap->cc_freesession = freeses;
512 cap->cc_sessions = 0; /* Unmark */
513 }
514 err = 0;
515 } else
516 err = EINVAL;
517
518 splx(s);
519 return err;
520 }
521
522 /*
523 * Unregister a crypto driver. If there are pending sessions using it,
524 * leave enough information around so that subsequent calls using those
525 * sessions will correctly detect the driver has been unregistered and
526 * reroute requests.
527 */
528 int
529 crypto_unregister(u_int32_t driverid, int alg)
530 {
531 int i, err, s;
532 u_int32_t ses;
533 struct cryptocap *cap;
534
535 s = splcrypto();
536
537 cap = crypto_checkdriver(driverid);
538 if (cap != NULL &&
539 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
540 cap->cc_alg[alg] != 0) {
541 cap->cc_alg[alg] = 0;
542 cap->cc_max_op_len[alg] = 0;
543
544 /* Was this the last algorithm ? */
545 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
546 if (cap->cc_alg[i] != 0)
547 break;
548
549 if (i == CRYPTO_ALGORITHM_MAX + 1) {
550 ses = cap->cc_sessions;
551 bzero(cap, sizeof(struct cryptocap));
552 if (ses != 0) {
553 /*
554 * If there are pending sessions, just mark as invalid.
555 */
556 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
557 cap->cc_sessions = ses;
558 }
559 }
560 err = 0;
561 } else
562 err = EINVAL;
563
564 splx(s);
565 return err;
566 }
567
568 /*
569 * Unregister all algorithms associated with a crypto driver.
570 * If there are pending sessions using it, leave enough information
571 * around so that subsequent calls using those sessions will
572 * correctly detect the driver has been unregistered and reroute
573 * requests.
574 */
575 int
576 crypto_unregister_all(u_int32_t driverid)
577 {
578 int i, err, s = splcrypto();
579 u_int32_t ses;
580 struct cryptocap *cap;
581
582 cap = crypto_checkdriver(driverid);
583 if (cap != NULL) {
584 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
585 cap->cc_alg[i] = 0;
586 cap->cc_max_op_len[i] = 0;
587 }
588 ses = cap->cc_sessions;
589 bzero(cap, sizeof(struct cryptocap));
590 if (ses != 0) {
591 /*
592 * If there are pending sessions, just mark as invalid.
593 */
594 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
595 cap->cc_sessions = ses;
596 }
597 err = 0;
598 } else
599 err = EINVAL;
600
601 splx(s);
602 return err;
603 }
604
605 /*
606 * Clear blockage on a driver. The what parameter indicates whether
607 * the driver is now ready for cryptop's and/or cryptokop's.
608 */
609 int
610 crypto_unblock(u_int32_t driverid, int what)
611 {
612 struct cryptocap *cap;
613 int needwakeup, err, s;
614
615 s = splcrypto();
616 cap = crypto_checkdriver(driverid);
617 if (cap != NULL) {
618 needwakeup = 0;
619 if (what & CRYPTO_SYMQ) {
620 needwakeup |= cap->cc_qblocked;
621 cap->cc_qblocked = 0;
622 }
623 if (what & CRYPTO_ASYMQ) {
624 needwakeup |= cap->cc_kqblocked;
625 cap->cc_kqblocked = 0;
626 }
627 if (needwakeup) {
628 setsoftcrypto(softintr_cookie);
629 }
630 err = 0;
631 } else
632 err = EINVAL;
633 splx(s);
634
635 return err;
636 }
637
638 /*
639 * Dispatch a crypto request to a driver or queue
640 * it, to be processed by the kernel thread.
641 */
642 int
643 crypto_dispatch(struct cryptop *crp)
644 {
645 u_int32_t hid = SESID2HID(crp->crp_sid);
646 int s, result;
647
648 s = splcrypto();
649
650 cryptostats.cs_ops++;
651
652 #ifdef CRYPTO_TIMING
653 if (crypto_timing)
654 nanouptime(&crp->crp_tstamp);
655 #endif
656 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
657 struct cryptocap *cap;
658 /*
659 * Caller marked the request to be processed
660 * immediately; dispatch it directly to the
661 * driver unless the driver is currently blocked.
662 */
663 cap = crypto_checkdriver(hid);
664 if (cap && !cap->cc_qblocked) {
665 result = crypto_invoke(crp, 0);
666 if (result == ERESTART) {
667 /*
668 * The driver ran out of resources, mark the
669 * driver ``blocked'' for cryptop's and put
670 * the op on the queue.
671 */
672 crypto_drivers[hid].cc_qblocked = 1;
673 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
674 cryptostats.cs_blocks++;
675 }
676 } else {
677 /*
678 * The driver is blocked, just queue the op until
679 * it unblocks and the swi thread gets kicked.
680 */
681 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
682 result = 0;
683 }
684 } else {
685 int wasempty = TAILQ_EMPTY(&crp_q);
686 /*
687 * Caller marked the request as ``ok to delay'';
688 * queue it for the swi thread. This is desirable
689 * when the operation is low priority and/or suitable
690 * for batching.
691 */
692 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
693 if (wasempty) {
694 setsoftcrypto(softintr_cookie);
695 }
696
697 result = 0;
698 }
699 splx(s);
700
701 return result;
702 }
703
704 /*
705 * Add an asymetric crypto request to a queue,
706 * to be processed by the kernel thread.
707 */
708 int
709 crypto_kdispatch(struct cryptkop *krp)
710 {
711 struct cryptocap *cap;
712 int s, result;
713
714 s = splcrypto();
715 cryptostats.cs_kops++;
716
717 cap = crypto_checkdriver(krp->krp_hid);
718 if (cap && !cap->cc_kqblocked) {
719 result = crypto_kinvoke(krp, 0);
720 if (result == ERESTART) {
721 /*
722 * The driver ran out of resources, mark the
723 * driver ``blocked'' for cryptop's and put
724 * the op on the queue.
725 */
726 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
727 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
728 cryptostats.cs_kblocks++;
729 }
730 } else {
731 /*
732 * The driver is blocked, just queue the op until
733 * it unblocks and the swi thread gets kicked.
734 */
735 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
736 result = 0;
737 }
738 splx(s);
739
740 return result;
741 }
742
743 /*
744 * Dispatch an assymetric crypto request to the appropriate crypto devices.
745 */
746 static int
747 crypto_kinvoke(struct cryptkop *krp, int hint)
748 {
749 u_int32_t hid;
750 int error;
751
752 /* Sanity checks. */
753 if (krp == NULL)
754 return EINVAL;
755 if (krp->krp_callback == NULL) {
756 free(krp, M_XDATA); /* XXX allocated in cryptodev */
757 return EINVAL;
758 }
759
760 for (hid = 0; hid < crypto_drivers_num; hid++) {
761 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
762 crypto_devallowsoft == 0)
763 continue;
764 if (crypto_drivers[hid].cc_kprocess == NULL)
765 continue;
766 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
767 CRYPTO_ALG_FLAG_SUPPORTED) == 0)
768 continue;
769 break;
770 }
771 if (hid < crypto_drivers_num) {
772 krp->krp_hid = hid;
773 error = crypto_drivers[hid].cc_kprocess(
774 crypto_drivers[hid].cc_karg, krp, hint);
775 } else {
776 error = ENODEV;
777 }
778
779 if (error) {
780 krp->krp_status = error;
781 crypto_kdone(krp);
782 }
783 return 0;
784 }
785
786 #ifdef CRYPTO_TIMING
787 static void
788 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
789 {
790 struct timespec now, t;
791
792 nanouptime(&now);
793 t.tv_sec = now.tv_sec - tv->tv_sec;
794 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
795 if (t.tv_nsec < 0) {
796 t.tv_sec--;
797 t.tv_nsec += 1000000000;
798 }
799 timespecadd(&ts->acc, &t, &t);
800 if (timespeccmp(&t, &ts->min, <))
801 ts->min = t;
802 if (timespeccmp(&t, &ts->max, >))
803 ts->max = t;
804 ts->count++;
805
806 *tv = now;
807 }
808 #endif
809
810 /*
811 * Dispatch a crypto request to the appropriate crypto devices.
812 */
813 static int
814 crypto_invoke(struct cryptop *crp, int hint)
815 {
816 u_int32_t hid;
817 int (*process)(void*, struct cryptop *, int);
818
819 #ifdef CRYPTO_TIMING
820 if (crypto_timing)
821 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
822 #endif
823 /* Sanity checks. */
824 if (crp == NULL)
825 return EINVAL;
826 if (crp->crp_callback == NULL) {
827 crypto_freereq(crp);
828 return EINVAL;
829 }
830 if (crp->crp_desc == NULL) {
831 crp->crp_etype = EINVAL;
832 crypto_done(crp);
833 return 0;
834 }
835
836 hid = SESID2HID(crp->crp_sid);
837 if (hid < crypto_drivers_num) {
838 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
839 crypto_freesession(crp->crp_sid);
840 process = crypto_drivers[hid].cc_process;
841 } else {
842 process = NULL;
843 }
844
845 if (process == NULL) {
846 struct cryptodesc *crd;
847 u_int64_t nid;
848
849 /*
850 * Driver has unregistered; migrate the session and return
851 * an error to the caller so they'll resubmit the op.
852 */
853 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
854 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
855
856 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
857 crp->crp_sid = nid;
858
859 crp->crp_etype = EAGAIN;
860 crypto_done(crp);
861 return 0;
862 } else {
863 /*
864 * Invoke the driver to process the request.
865 */
866 return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
867 }
868 }
869
870 /*
871 * Release a set of crypto descriptors.
872 */
873 void
874 crypto_freereq(struct cryptop *crp)
875 {
876 struct cryptodesc *crd;
877 int s;
878
879 if (crp == NULL)
880 return;
881
882 s = splcrypto();
883
884 while ((crd = crp->crp_desc) != NULL) {
885 crp->crp_desc = crd->crd_next;
886 pool_put(&cryptodesc_pool, crd);
887 }
888
889 pool_put(&cryptop_pool, crp);
890 splx(s);
891 }
892
893 /*
894 * Acquire a set of crypto descriptors.
895 */
896 struct cryptop *
897 crypto_getreq(int num)
898 {
899 struct cryptodesc *crd;
900 struct cryptop *crp;
901 int s;
902
903 s = splcrypto();
904
905 if (crypto_pool_initialized == 0) {
906 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
907 0, "cryptop", NULL);
908 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
909 0, "cryptodesc", NULL);
910 crypto_pool_initialized = 1;
911 }
912
913 crp = pool_get(&cryptop_pool, 0);
914 if (crp == NULL) {
915 splx(s);
916 return NULL;
917 }
918 bzero(crp, sizeof(struct cryptop));
919
920 while (num--) {
921 crd = pool_get(&cryptodesc_pool, 0);
922 if (crd == NULL) {
923 splx(s);
924 crypto_freereq(crp);
925 return NULL;
926 }
927
928 bzero(crd, sizeof(struct cryptodesc));
929 crd->crd_next = crp->crp_desc;
930 crp->crp_desc = crd;
931 }
932
933 splx(s);
934 return crp;
935 }
936
937 /*
938 * Invoke the callback on behalf of the driver.
939 */
940 void
941 crypto_done(struct cryptop *crp)
942 {
943 if (crp->crp_etype != 0)
944 cryptostats.cs_errs++;
945 #ifdef CRYPTO_TIMING
946 if (crypto_timing)
947 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
948 #endif
949 /*
950 * On netbsd 1.6O, CBIMM does its wake_one() before the requestor
951 * has done its tsleep().
952 */
953 #ifndef __NetBSD__
954 if (crp->crp_flags & CRYPTO_F_CBIMM) {
955 /*
956 * Do the callback directly. This is ok when the
957 * callback routine does very little (e.g. the
958 * /dev/crypto callback method just does a wakeup).
959 */
960 #ifdef CRYPTO_TIMING
961 if (crypto_timing) {
962 /*
963 * NB: We must copy the timestamp before
964 * doing the callback as the cryptop is
965 * likely to be reclaimed.
966 */
967 struct timespec t = crp->crp_tstamp;
968 crypto_tstat(&cryptostats.cs_cb, &t);
969 crp->crp_callback(crp);
970 crypto_tstat(&cryptostats.cs_finis, &t);
971 } else
972 #endif
973 crp->crp_callback(crp);
974 } else
975 #endif /* __NetBSD__ */
976 {
977 int s, wasempty;
978 /*
979 * Normal case; queue the callback for the thread.
980 *
981 * The return queue is manipulated by the swi thread
982 * and, potentially, by crypto device drivers calling
983 * back to mark operations completed. Thus we need
984 * to mask both while manipulating the return queue.
985 */
986 s = splcrypto();
987 wasempty = TAILQ_EMPTY(&crp_ret_q);
988 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
989 if (wasempty)
990 wakeup_one(&crp_ret_q);
991 splx(s);
992 }
993 }
994
995 /*
996 * Invoke the callback on behalf of the driver.
997 */
998 void
999 crypto_kdone(struct cryptkop *krp)
1000 {
1001 int s, wasempty;
1002
1003 if (krp->krp_status != 0)
1004 cryptostats.cs_kerrs++;
1005 /*
1006 * The return queue is manipulated by the swi thread
1007 * and, potentially, by crypto device drivers calling
1008 * back to mark operations completed. Thus we need
1009 * to mask both while manipulating the return queue.
1010 */
1011 s = splcrypto();
1012 wasempty = TAILQ_EMPTY(&crp_ret_kq);
1013 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1014 if (wasempty)
1015 wakeup_one(&crp_ret_q);
1016 splx(s);
1017 }
1018
1019 int
1020 crypto_getfeat(int *featp)
1021 {
1022 int hid, kalg, feat = 0;
1023 int s;
1024
1025 s = splcrypto();
1026
1027 if (crypto_userasymcrypto == 0)
1028 goto out;
1029
1030 for (hid = 0; hid < crypto_drivers_num; hid++) {
1031 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1032 crypto_devallowsoft == 0) {
1033 continue;
1034 }
1035 if (crypto_drivers[hid].cc_kprocess == NULL)
1036 continue;
1037 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1038 if ((crypto_drivers[hid].cc_kalg[kalg] &
1039 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1040 feat |= 1 << kalg;
1041 }
1042 out:
1043 splx(s);
1044 *featp = feat;
1045 return (0);
1046 }
1047
1048 /*
1049 * Software interrupt thread to dispatch crypto requests.
1050 */
1051 static void
1052 cryptointr(void)
1053 {
1054 struct cryptop *crp, *submit;
1055 struct cryptkop *krp;
1056 struct cryptocap *cap;
1057 int result, hint, s;
1058
1059 printf("crypto softint\n");
1060 cryptostats.cs_intrs++;
1061 s = splcrypto();
1062 do {
1063 /*
1064 * Find the first element in the queue that can be
1065 * processed and look-ahead to see if multiple ops
1066 * are ready for the same driver.
1067 */
1068 submit = NULL;
1069 hint = 0;
1070 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1071 u_int32_t hid = SESID2HID(crp->crp_sid);
1072 cap = crypto_checkdriver(hid);
1073 if (cap == NULL || cap->cc_process == NULL) {
1074 /* Op needs to be migrated, process it. */
1075 if (submit == NULL)
1076 submit = crp;
1077 break;
1078 }
1079 if (!cap->cc_qblocked) {
1080 if (submit != NULL) {
1081 /*
1082 * We stop on finding another op,
1083 * regardless whether its for the same
1084 * driver or not. We could keep
1085 * searching the queue but it might be
1086 * better to just use a per-driver
1087 * queue instead.
1088 */
1089 if (SESID2HID(submit->crp_sid) == hid)
1090 hint = CRYPTO_HINT_MORE;
1091 break;
1092 } else {
1093 submit = crp;
1094 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1095 break;
1096 /* keep scanning for more are q'd */
1097 }
1098 }
1099 }
1100 if (submit != NULL) {
1101 TAILQ_REMOVE(&crp_q, submit, crp_next);
1102 result = crypto_invoke(submit, hint);
1103 if (result == ERESTART) {
1104 /*
1105 * The driver ran out of resources, mark the
1106 * driver ``blocked'' for cryptop's and put
1107 * the request back in the queue. It would
1108 * best to put the request back where we got
1109 * it but that's hard so for now we put it
1110 * at the front. This should be ok; putting
1111 * it at the end does not work.
1112 */
1113 /* XXX validate sid again? */
1114 crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1115 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1116 cryptostats.cs_blocks++;
1117 }
1118 }
1119
1120 /* As above, but for key ops */
1121 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1122 cap = crypto_checkdriver(krp->krp_hid);
1123 if (cap == NULL || cap->cc_kprocess == NULL) {
1124 /* Op needs to be migrated, process it. */
1125 break;
1126 }
1127 if (!cap->cc_kqblocked)
1128 break;
1129 }
1130 if (krp != NULL) {
1131 TAILQ_REMOVE(&crp_kq, krp, krp_next);
1132 result = crypto_kinvoke(krp, 0);
1133 if (result == ERESTART) {
1134 /*
1135 * The driver ran out of resources, mark the
1136 * driver ``blocked'' for cryptkop's and put
1137 * the request back in the queue. It would
1138 * best to put the request back where we got
1139 * it but that's hard so for now we put it
1140 * at the front. This should be ok; putting
1141 * it at the end does not work.
1142 */
1143 /* XXX validate sid again? */
1144 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1145 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1146 cryptostats.cs_kblocks++;
1147 }
1148 }
1149 } while (submit != NULL || krp != NULL);
1150 splx(s);
1151 }
1152
1153 /*
1154 * Kernel thread to do callbacks.
1155 */
1156 static void
1157 cryptoret(void)
1158 {
1159 struct cryptop *crp;
1160 struct cryptkop *krp;
1161 int s;
1162
1163 s = splcrypto();
1164 for (;;) {
1165 crp = TAILQ_FIRST(&crp_ret_q);
1166 if (crp != NULL)
1167 TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1168 krp = TAILQ_FIRST(&crp_ret_kq);
1169 if (krp != NULL)
1170 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1171
1172 if (crp != NULL || krp != NULL) {
1173 splx(s); /* lower ipl for callbacks */
1174 if (crp != NULL) {
1175 #ifdef CRYPTO_TIMING
1176 if (crypto_timing) {
1177 /*
1178 * NB: We must copy the timestamp before
1179 * doing the callback as the cryptop is
1180 * likely to be reclaimed.
1181 */
1182 struct timespec t = crp->crp_tstamp;
1183 crypto_tstat(&cryptostats.cs_cb, &t);
1184 crp->crp_callback(crp);
1185 crypto_tstat(&cryptostats.cs_finis, &t);
1186 } else
1187 #endif
1188 crp->crp_callback(crp);
1189 }
1190 if (krp != NULL)
1191 krp->krp_callback(krp);
1192 s = splcrypto();
1193 } else {
1194 (void) tsleep(&crp_ret_q, PLOCK, "crypto_wait", 0);
1195 cryptostats.cs_rets++;
1196 }
1197 }
1198 }
1199
1200 static void
1202 deferred_crypto_thread(void *arg)
1203 {
1204 int error;
1205
1206 error = kthread_create1((void (*)(void*)) cryptoret, NULL,
1207 &cryptoproc, "cryptoret");
1208 if (error) {
1209 printf("crypto_init: cannot start cryptoret thread; error %d",
1210 error);
1211 crypto_destroy();
1212 }
1213 }
1214
1215 #ifdef __FreeBSD__
1216 /*
1217 * Initialization code, both for static and dynamic loading.
1218 */
1219 static int
1220 crypto_modevent(module_t mod, int type, void *unused)
1221 {
1222 int error = EINVAL;
1223
1224 switch (type) {
1225 case MOD_LOAD:
1226 error = crypto_init();
1227 if (error == 0 && bootverbose)
1228 printf("crypto: <crypto core>\n");
1229 break;
1230 case MOD_UNLOAD:
1231 /*XXX disallow if active sessions */
1232 error = 0;
1233 crypto_destroy();
1234 break;
1235 }
1236 return error;
1237 }
1238 static moduledata_t crypto_mod = {
1239 "crypto",
1240 crypto_modevent,
1241 0
1242 };
1243
1244 MODULE_VERSION(crypto, 1);
1245 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
1246 #endif /* __FreeBSD__ */
1247
1248
1249