crypto.c revision 1.12 1 /* $NetBSD: crypto.c,v 1.12 2006/01/16 21:45:38 yamt Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.12 2006/01/16 21:45:38 yamt Exp $");
28
29 /* XXX FIXME: should be defopt'ed */
30 #define CRYPTO_TIMING /* enable cryptop timing stuff */
31
32 #include <sys/param.h>
33 #include <sys/reboot.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/proc.h>
37 #include <sys/pool.h>
38 #include <opencrypto/cryptodev.h>
39 #include <sys/kthread.h>
40 #include <sys/once.h>
41
42 #include <opencrypto/xform.h> /* XXX for M_XDATA */
43
44 #ifdef __NetBSD__
45 #define splcrypto splnet
46 /* below is kludges to check whats still missing */
47 #define SWI_CRYPTO 17
48 #define register_swi(lvl, fn) \
49 softintr_establish(IPL_SOFTNET, (void (*)(void*))fn, NULL)
50 #define unregister_swi(lvl, fn) softintr_disestablish(softintr_cookie)
51 #define setsoftcrypto(x) softintr_schedule(x)
52
53 static void nanouptime(struct timespec *);
54 static void
55 nanouptime(struct timespec *tp)
56 {
57 struct timeval tv;
58 microtime(&tv);
59 TIMEVAL_TO_TIMESPEC(&tv, tp);
60 }
61
62 #endif
63
64 #define SESID2HID(sid) (((sid) >> 32) & 0xffffffff)
65
66 /*
67 * Crypto drivers register themselves by allocating a slot in the
68 * crypto_drivers table with crypto_get_driverid() and then registering
69 * each algorithm they support with crypto_register() and crypto_kregister().
70 */
71 static struct cryptocap *crypto_drivers;
72 static int crypto_drivers_num;
73 static void* softintr_cookie;
74
75 /*
76 * There are two queues for crypto requests; one for symmetric (e.g.
77 * cipher) operations and one for asymmetric (e.g. MOD) operations.
78 * See below for how synchronization is handled.
79 */
80 static TAILQ_HEAD(,cryptop) crp_q = /* request queues */
81 TAILQ_HEAD_INITIALIZER(crp_q);
82 static TAILQ_HEAD(,cryptkop) crp_kq =
83 TAILQ_HEAD_INITIALIZER(crp_kq);
84
85 /*
86 * There are two queues for processing completed crypto requests; one
87 * for the symmetric and one for the asymmetric ops. We only need one
88 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
89 * for how synchronization is handled.
90 */
91 static TAILQ_HEAD(,cryptop) crp_ret_q = /* callback queues */
92 TAILQ_HEAD_INITIALIZER(crp_ret_q);
93 static TAILQ_HEAD(,cryptkop) crp_ret_kq =
94 TAILQ_HEAD_INITIALIZER(crp_ret_kq);
95
96 /*
97 * Crypto op and desciptor data structures are allocated
98 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
99 */
100 struct pool cryptop_pool;
101 struct pool cryptodesc_pool;
102 int crypto_pool_initialized = 0;
103
104 #ifdef __NetBSD__
105 static void deferred_crypto_thread(void *arg);
106 #endif
107
108 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
109 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
110 /*
111 * cryptodevallowsoft is (intended to be) sysctl'able, controlling
112 * access to hardware versus software transforms as below:
113 *
114 * crypto_devallowsoft < 0: Force userlevel requests to use software
115 * transforms, always
116 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel
117 * requests for non-accelerated transforms
118 * (handling the latter in software)
119 * crypto_devallowsoft > 0: Allow user requests only for transforms which
120 * are hardware-accelerated.
121 */
122 int crypto_devallowsoft = 1; /* only use hardware crypto */
123
124 #ifdef __FreeBSD__
125 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW,
126 &crypto_usercrypto, 0,
127 "Enable/disable user-mode access to crypto support");
128 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
129 &crypto_userasymcrypto, 0,
130 "Enable/disable user-mode access to asymmetric crypto support");
131 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
132 &crypto_devallowsoft, 0,
133 "Enable/disable use of software asym crypto support");
134 #endif
135
136 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
137
138 /*
139 * Synchronization: read carefully, this is non-trivial.
140 *
141 * Crypto requests are submitted via crypto_dispatch. Typically
142 * these come in from network protocols at spl0 (output path) or
143 * spl[,soft]net (input path).
144 *
145 * Requests are typically passed on the driver directly, but they
146 * may also be queued for processing by a software interrupt thread,
147 * cryptointr, that runs at splsoftcrypto. This thread dispatches
148 * the requests to crypto drivers (h/w or s/w) who call crypto_done
149 * when a request is complete. Hardware crypto drivers are assumed
150 * to register their IRQ's as network devices so their interrupt handlers
151 * and subsequent "done callbacks" happen at spl[imp,net].
152 *
153 * Completed crypto ops are queued for a separate kernel thread that
154 * handles the callbacks at spl0. This decoupling insures the crypto
155 * driver interrupt service routine is not delayed while the callback
156 * takes place and that callbacks are delivered after a context switch
157 * (as opposed to a software interrupt that clients must block).
158 *
159 * This scheme is not intended for SMP machines.
160 */
161 static void cryptointr(void); /* swi thread to dispatch ops */
162 static void cryptoret(void); /* kernel thread for callbacks*/
163 static struct proc *cryptoproc;
164 static void crypto_destroy(void);
165 static int crypto_invoke(struct cryptop *crp, int hint);
166 static int crypto_kinvoke(struct cryptkop *krp, int hint);
167
168 static struct cryptostats cryptostats;
169 static int crypto_timing = 0;
170
171 #ifdef __FreeBSD__
172 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
173 cryptostats, "Crypto system statistics");
174
175 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
176 &crypto_timing, 0, "Enable/disable crypto timing support");
177 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
178 cryptostats, "Crypto system statistics");
179 #endif /* __FreeBSD__ */
180
181 static int
182 crypto_init0(void)
183 {
184 #ifdef __FreeBSD__
185 int error;
186
187 cryptop_zone = zinit("cryptop", sizeof (struct cryptop), 0, 0, 1);
188 cryptodesc_zone = zinit("cryptodesc", sizeof (struct cryptodesc),
189 0, 0, 1);
190 if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
191 printf("crypto_init: cannot setup crypto zones\n");
192 return;
193 }
194 #endif
195
196 crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL *
197 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
198 if (crypto_drivers == NULL) {
199 printf("crypto_init: cannot malloc driver table\n");
200 return 0;
201 }
202 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
203
204 softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
205 #ifdef __FreeBSD__
206 error = kthread_create((void (*)(void *)) cryptoret, NULL,
207 &cryptoproc, "cryptoret");
208 if (error) {
209 printf("crypto_init: cannot start cryptoret thread; error %d",
210 error);
211 crypto_destroy();
212 }
213 #else
214 /* defer thread creation until after boot */
215 kthread_create( deferred_crypto_thread, NULL);
216 #endif
217 return 0;
218 }
219
220 void
221 crypto_init(void)
222 {
223 ONCE_DECL(crypto_init_once);
224
225 RUN_ONCE(&crypto_init_once, crypto_init0);
226 }
227
228 static void
229 crypto_destroy(void)
230 {
231 /* XXX no wait to reclaim zones */
232 if (crypto_drivers != NULL)
233 free(crypto_drivers, M_CRYPTO_DATA);
234 unregister_swi(SWI_CRYPTO, cryptointr);
235 }
236
237 /*
238 * Create a new session.
239 */
240 int
241 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
242 {
243 struct cryptoini *cr;
244 u_int32_t hid, lid;
245 int err = EINVAL;
246 int s;
247
248 s = splcrypto();
249
250 if (crypto_drivers == NULL)
251 goto done;
252
253 /*
254 * The algorithm we use here is pretty stupid; just use the
255 * first driver that supports all the algorithms we need.
256 *
257 * XXX We need more smarts here (in real life too, but that's
258 * XXX another story altogether).
259 */
260
261 for (hid = 0; hid < crypto_drivers_num; hid++) {
262 /*
263 * If it's not initialized or has remaining sessions
264 * referencing it, skip.
265 */
266 if (crypto_drivers[hid].cc_newsession == NULL ||
267 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
268 continue;
269
270 /* Hardware required -- ignore software drivers. */
271 if (hard > 0 &&
272 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
273 continue;
274 /* Software required -- ignore hardware drivers. */
275 if (hard < 0 &&
276 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
277 continue;
278
279 /* See if all the algorithms are supported. */
280 for (cr = cri; cr; cr = cr->cri_next)
281 if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
282 break;
283
284 if (cr == NULL) {
285 /* Ok, all algorithms are supported. */
286
287 /*
288 * Can't do everything in one session.
289 *
290 * XXX Fix this. We need to inject a "virtual" session layer right
291 * XXX about here.
292 */
293
294 /* Call the driver initialization routine. */
295 lid = hid; /* Pass the driver ID. */
296 err = crypto_drivers[hid].cc_newsession(
297 crypto_drivers[hid].cc_arg, &lid, cri);
298 if (err == 0) {
299 (*sid) = hid;
300 (*sid) <<= 32;
301 (*sid) |= (lid & 0xffffffff);
302 crypto_drivers[hid].cc_sessions++;
303 }
304 goto done;
305 /*break;*/
306 }
307 }
308 done:
309 splx(s);
310 return err;
311 }
312
313 /*
314 * Delete an existing session (or a reserved session on an unregistered
315 * driver).
316 */
317 int
318 crypto_freesession(u_int64_t sid)
319 {
320 u_int32_t hid;
321 int err = 0;
322 int s;
323
324 s = splcrypto();
325
326 if (crypto_drivers == NULL) {
327 err = EINVAL;
328 goto done;
329 }
330
331 /* Determine two IDs. */
332 hid = SESID2HID(sid);
333
334 if (hid >= crypto_drivers_num) {
335 err = ENOENT;
336 goto done;
337 }
338
339 if (crypto_drivers[hid].cc_sessions)
340 crypto_drivers[hid].cc_sessions--;
341
342 /* Call the driver cleanup routine, if available. */
343 if (crypto_drivers[hid].cc_freesession)
344 err = crypto_drivers[hid].cc_freesession(
345 crypto_drivers[hid].cc_arg, sid);
346 else
347 err = 0;
348
349 /*
350 * If this was the last session of a driver marked as invalid,
351 * make the entry available for reuse.
352 */
353 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
354 crypto_drivers[hid].cc_sessions == 0)
355 bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
356
357 done:
358 splx(s);
359 return err;
360 }
361
362 /*
363 * Return an unused driver id. Used by drivers prior to registering
364 * support for the algorithms they handle.
365 */
366 int32_t
367 crypto_get_driverid(u_int32_t flags)
368 {
369 struct cryptocap *newdrv;
370 int i, s;
371
372 crypto_init();
373
374 s = splcrypto();
375 for (i = 0; i < crypto_drivers_num; i++)
376 if (crypto_drivers[i].cc_process == NULL &&
377 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
378 crypto_drivers[i].cc_sessions == 0)
379 break;
380
381 /* Out of entries, allocate some more. */
382 if (i == crypto_drivers_num) {
383 /* Be careful about wrap-around. */
384 if (2 * crypto_drivers_num <= crypto_drivers_num) {
385 splx(s);
386 printf("crypto: driver count wraparound!\n");
387 return -1;
388 }
389
390 newdrv = malloc(2 * crypto_drivers_num *
391 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
392 if (newdrv == NULL) {
393 splx(s);
394 printf("crypto: no space to expand driver table!\n");
395 return -1;
396 }
397
398 bcopy(crypto_drivers, newdrv,
399 crypto_drivers_num * sizeof(struct cryptocap));
400
401 crypto_drivers_num *= 2;
402
403 free(crypto_drivers, M_CRYPTO_DATA);
404 crypto_drivers = newdrv;
405 }
406
407 /* NB: state is zero'd on free */
408 crypto_drivers[i].cc_sessions = 1; /* Mark */
409 crypto_drivers[i].cc_flags = flags;
410
411 if (bootverbose)
412 printf("crypto: assign driver %u, flags %u\n", i, flags);
413
414 splx(s);
415
416 return i;
417 }
418
419 static struct cryptocap *
420 crypto_checkdriver(u_int32_t hid)
421 {
422 if (crypto_drivers == NULL)
423 return NULL;
424 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
425 }
426
427 /*
428 * Register support for a key-related algorithm. This routine
429 * is called once for each algorithm supported a driver.
430 */
431 int
432 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
433 int (*kprocess)(void*, struct cryptkop *, int),
434 void *karg)
435 {
436 int s;
437 struct cryptocap *cap;
438 int err;
439
440 s = splcrypto();
441
442 cap = crypto_checkdriver(driverid);
443 if (cap != NULL &&
444 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
445 /*
446 * XXX Do some performance testing to determine placing.
447 * XXX We probably need an auxiliary data structure that
448 * XXX describes relative performances.
449 */
450
451 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
452 if (bootverbose)
453 printf("crypto: driver %u registers key alg %u flags %u\n"
454 , driverid
455 , kalg
456 , flags
457 );
458
459 if (cap->cc_kprocess == NULL) {
460 cap->cc_karg = karg;
461 cap->cc_kprocess = kprocess;
462 }
463 err = 0;
464 } else
465 err = EINVAL;
466
467 splx(s);
468 return err;
469 }
470
471 /*
472 * Register support for a non-key-related algorithm. This routine
473 * is called once for each such algorithm supported by a driver.
474 */
475 int
476 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
477 u_int32_t flags,
478 int (*newses)(void*, u_int32_t*, struct cryptoini*),
479 int (*freeses)(void*, u_int64_t),
480 int (*process)(void*, struct cryptop *, int),
481 void *arg)
482 {
483 struct cryptocap *cap;
484 int s, err;
485
486 s = splcrypto();
487
488 cap = crypto_checkdriver(driverid);
489 /* NB: algorithms are in the range [1..max] */
490 if (cap != NULL &&
491 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
492 /*
493 * XXX Do some performance testing to determine placing.
494 * XXX We probably need an auxiliary data structure that
495 * XXX describes relative performances.
496 */
497
498 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
499 cap->cc_max_op_len[alg] = maxoplen;
500 if (bootverbose)
501 printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
502 , driverid
503 , alg
504 , flags
505 , maxoplen
506 );
507
508 if (cap->cc_process == NULL) {
509 cap->cc_arg = arg;
510 cap->cc_newsession = newses;
511 cap->cc_process = process;
512 cap->cc_freesession = freeses;
513 cap->cc_sessions = 0; /* Unmark */
514 }
515 err = 0;
516 } else
517 err = EINVAL;
518
519 splx(s);
520 return err;
521 }
522
523 /*
524 * Unregister a crypto driver. If there are pending sessions using it,
525 * leave enough information around so that subsequent calls using those
526 * sessions will correctly detect the driver has been unregistered and
527 * reroute requests.
528 */
529 int
530 crypto_unregister(u_int32_t driverid, int alg)
531 {
532 int i, err, s;
533 u_int32_t ses;
534 struct cryptocap *cap;
535
536 s = splcrypto();
537
538 cap = crypto_checkdriver(driverid);
539 if (cap != NULL &&
540 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
541 cap->cc_alg[alg] != 0) {
542 cap->cc_alg[alg] = 0;
543 cap->cc_max_op_len[alg] = 0;
544
545 /* Was this the last algorithm ? */
546 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
547 if (cap->cc_alg[i] != 0)
548 break;
549
550 if (i == CRYPTO_ALGORITHM_MAX + 1) {
551 ses = cap->cc_sessions;
552 bzero(cap, sizeof(struct cryptocap));
553 if (ses != 0) {
554 /*
555 * If there are pending sessions, just mark as invalid.
556 */
557 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
558 cap->cc_sessions = ses;
559 }
560 }
561 err = 0;
562 } else
563 err = EINVAL;
564
565 splx(s);
566 return err;
567 }
568
569 /*
570 * Unregister all algorithms associated with a crypto driver.
571 * If there are pending sessions using it, leave enough information
572 * around so that subsequent calls using those sessions will
573 * correctly detect the driver has been unregistered and reroute
574 * requests.
575 */
576 int
577 crypto_unregister_all(u_int32_t driverid)
578 {
579 int i, err, s = splcrypto();
580 u_int32_t ses;
581 struct cryptocap *cap;
582
583 cap = crypto_checkdriver(driverid);
584 if (cap != NULL) {
585 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
586 cap->cc_alg[i] = 0;
587 cap->cc_max_op_len[i] = 0;
588 }
589 ses = cap->cc_sessions;
590 bzero(cap, sizeof(struct cryptocap));
591 if (ses != 0) {
592 /*
593 * If there are pending sessions, just mark as invalid.
594 */
595 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
596 cap->cc_sessions = ses;
597 }
598 err = 0;
599 } else
600 err = EINVAL;
601
602 splx(s);
603 return err;
604 }
605
606 /*
607 * Clear blockage on a driver. The what parameter indicates whether
608 * the driver is now ready for cryptop's and/or cryptokop's.
609 */
610 int
611 crypto_unblock(u_int32_t driverid, int what)
612 {
613 struct cryptocap *cap;
614 int needwakeup, err, s;
615
616 s = splcrypto();
617 cap = crypto_checkdriver(driverid);
618 if (cap != NULL) {
619 needwakeup = 0;
620 if (what & CRYPTO_SYMQ) {
621 needwakeup |= cap->cc_qblocked;
622 cap->cc_qblocked = 0;
623 }
624 if (what & CRYPTO_ASYMQ) {
625 needwakeup |= cap->cc_kqblocked;
626 cap->cc_kqblocked = 0;
627 }
628 if (needwakeup) {
629 setsoftcrypto(softintr_cookie);
630 }
631 err = 0;
632 } else
633 err = EINVAL;
634 splx(s);
635
636 return err;
637 }
638
639 /*
640 * Dispatch a crypto request to a driver or queue
641 * it, to be processed by the kernel thread.
642 */
643 int
644 crypto_dispatch(struct cryptop *crp)
645 {
646 u_int32_t hid = SESID2HID(crp->crp_sid);
647 int s, result;
648
649 s = splcrypto();
650
651 cryptostats.cs_ops++;
652
653 #ifdef CRYPTO_TIMING
654 if (crypto_timing)
655 nanouptime(&crp->crp_tstamp);
656 #endif
657 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
658 struct cryptocap *cap;
659 /*
660 * Caller marked the request to be processed
661 * immediately; dispatch it directly to the
662 * driver unless the driver is currently blocked.
663 */
664 cap = crypto_checkdriver(hid);
665 if (cap && !cap->cc_qblocked) {
666 result = crypto_invoke(crp, 0);
667 if (result == ERESTART) {
668 /*
669 * The driver ran out of resources, mark the
670 * driver ``blocked'' for cryptop's and put
671 * the op on the queue.
672 */
673 crypto_drivers[hid].cc_qblocked = 1;
674 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
675 cryptostats.cs_blocks++;
676 }
677 } else {
678 /*
679 * The driver is blocked, just queue the op until
680 * it unblocks and the swi thread gets kicked.
681 */
682 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
683 result = 0;
684 }
685 } else {
686 int wasempty = TAILQ_EMPTY(&crp_q);
687 /*
688 * Caller marked the request as ``ok to delay'';
689 * queue it for the swi thread. This is desirable
690 * when the operation is low priority and/or suitable
691 * for batching.
692 */
693 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
694 if (wasempty) {
695 setsoftcrypto(softintr_cookie);
696 }
697
698 result = 0;
699 }
700 splx(s);
701
702 return result;
703 }
704
705 /*
706 * Add an asymetric crypto request to a queue,
707 * to be processed by the kernel thread.
708 */
709 int
710 crypto_kdispatch(struct cryptkop *krp)
711 {
712 struct cryptocap *cap;
713 int s, result;
714
715 s = splcrypto();
716 cryptostats.cs_kops++;
717
718 cap = crypto_checkdriver(krp->krp_hid);
719 if (cap && !cap->cc_kqblocked) {
720 result = crypto_kinvoke(krp, 0);
721 if (result == ERESTART) {
722 /*
723 * The driver ran out of resources, mark the
724 * driver ``blocked'' for cryptop's and put
725 * the op on the queue.
726 */
727 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
728 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
729 cryptostats.cs_kblocks++;
730 }
731 } else {
732 /*
733 * The driver is blocked, just queue the op until
734 * it unblocks and the swi thread gets kicked.
735 */
736 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
737 result = 0;
738 }
739 splx(s);
740
741 return result;
742 }
743
744 /*
745 * Dispatch an assymetric crypto request to the appropriate crypto devices.
746 */
747 static int
748 crypto_kinvoke(struct cryptkop *krp, int hint)
749 {
750 u_int32_t hid;
751 int error;
752
753 /* Sanity checks. */
754 if (krp == NULL)
755 return EINVAL;
756 if (krp->krp_callback == NULL) {
757 free(krp, M_XDATA); /* XXX allocated in cryptodev */
758 return EINVAL;
759 }
760
761 for (hid = 0; hid < crypto_drivers_num; hid++) {
762 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
763 crypto_devallowsoft == 0)
764 continue;
765 if (crypto_drivers[hid].cc_kprocess == NULL)
766 continue;
767 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
768 CRYPTO_ALG_FLAG_SUPPORTED) == 0)
769 continue;
770 break;
771 }
772 if (hid < crypto_drivers_num) {
773 krp->krp_hid = hid;
774 error = crypto_drivers[hid].cc_kprocess(
775 crypto_drivers[hid].cc_karg, krp, hint);
776 } else {
777 error = ENODEV;
778 }
779
780 if (error) {
781 krp->krp_status = error;
782 crypto_kdone(krp);
783 }
784 return 0;
785 }
786
787 #ifdef CRYPTO_TIMING
788 static void
789 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
790 {
791 struct timespec now, t;
792
793 nanouptime(&now);
794 t.tv_sec = now.tv_sec - tv->tv_sec;
795 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
796 if (t.tv_nsec < 0) {
797 t.tv_sec--;
798 t.tv_nsec += 1000000000;
799 }
800 timespecadd(&ts->acc, &t, &t);
801 if (timespeccmp(&t, &ts->min, <))
802 ts->min = t;
803 if (timespeccmp(&t, &ts->max, >))
804 ts->max = t;
805 ts->count++;
806
807 *tv = now;
808 }
809 #endif
810
811 /*
812 * Dispatch a crypto request to the appropriate crypto devices.
813 */
814 static int
815 crypto_invoke(struct cryptop *crp, int hint)
816 {
817 u_int32_t hid;
818 int (*process)(void*, struct cryptop *, int);
819
820 #ifdef CRYPTO_TIMING
821 if (crypto_timing)
822 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
823 #endif
824 /* Sanity checks. */
825 if (crp == NULL)
826 return EINVAL;
827 if (crp->crp_callback == NULL) {
828 crypto_freereq(crp);
829 return EINVAL;
830 }
831 if (crp->crp_desc == NULL) {
832 crp->crp_etype = EINVAL;
833 crypto_done(crp);
834 return 0;
835 }
836
837 hid = SESID2HID(crp->crp_sid);
838 if (hid < crypto_drivers_num) {
839 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
840 crypto_freesession(crp->crp_sid);
841 process = crypto_drivers[hid].cc_process;
842 } else {
843 process = NULL;
844 }
845
846 if (process == NULL) {
847 struct cryptodesc *crd;
848 u_int64_t nid;
849
850 /*
851 * Driver has unregistered; migrate the session and return
852 * an error to the caller so they'll resubmit the op.
853 */
854 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
855 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
856
857 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
858 crp->crp_sid = nid;
859
860 crp->crp_etype = EAGAIN;
861 crypto_done(crp);
862 return 0;
863 } else {
864 /*
865 * Invoke the driver to process the request.
866 */
867 return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
868 }
869 }
870
871 /*
872 * Release a set of crypto descriptors.
873 */
874 void
875 crypto_freereq(struct cryptop *crp)
876 {
877 struct cryptodesc *crd;
878 int s;
879
880 if (crp == NULL)
881 return;
882
883 s = splcrypto();
884
885 while ((crd = crp->crp_desc) != NULL) {
886 crp->crp_desc = crd->crd_next;
887 pool_put(&cryptodesc_pool, crd);
888 }
889
890 pool_put(&cryptop_pool, crp);
891 splx(s);
892 }
893
894 /*
895 * Acquire a set of crypto descriptors.
896 */
897 struct cryptop *
898 crypto_getreq(int num)
899 {
900 struct cryptodesc *crd;
901 struct cryptop *crp;
902 int s;
903
904 s = splcrypto();
905
906 if (crypto_pool_initialized == 0) {
907 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
908 0, "cryptop", NULL);
909 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
910 0, "cryptodesc", NULL);
911 crypto_pool_initialized = 1;
912 }
913
914 crp = pool_get(&cryptop_pool, 0);
915 if (crp == NULL) {
916 splx(s);
917 return NULL;
918 }
919 bzero(crp, sizeof(struct cryptop));
920
921 while (num--) {
922 crd = pool_get(&cryptodesc_pool, 0);
923 if (crd == NULL) {
924 splx(s);
925 crypto_freereq(crp);
926 return NULL;
927 }
928
929 bzero(crd, sizeof(struct cryptodesc));
930 crd->crd_next = crp->crp_desc;
931 crp->crp_desc = crd;
932 }
933
934 splx(s);
935 return crp;
936 }
937
938 /*
939 * Invoke the callback on behalf of the driver.
940 */
941 void
942 crypto_done(struct cryptop *crp)
943 {
944 if (crp->crp_etype != 0)
945 cryptostats.cs_errs++;
946 #ifdef CRYPTO_TIMING
947 if (crypto_timing)
948 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
949 #endif
950 /*
951 * On netbsd 1.6O, CBIMM does its wake_one() before the requestor
952 * has done its tsleep().
953 */
954 #ifndef __NetBSD__
955 if (crp->crp_flags & CRYPTO_F_CBIMM) {
956 /*
957 * Do the callback directly. This is ok when the
958 * callback routine does very little (e.g. the
959 * /dev/crypto callback method just does a wakeup).
960 */
961 #ifdef CRYPTO_TIMING
962 if (crypto_timing) {
963 /*
964 * NB: We must copy the timestamp before
965 * doing the callback as the cryptop is
966 * likely to be reclaimed.
967 */
968 struct timespec t = crp->crp_tstamp;
969 crypto_tstat(&cryptostats.cs_cb, &t);
970 crp->crp_callback(crp);
971 crypto_tstat(&cryptostats.cs_finis, &t);
972 } else
973 #endif
974 crp->crp_callback(crp);
975 } else
976 #endif /* __NetBSD__ */
977 {
978 int s, wasempty;
979 /*
980 * Normal case; queue the callback for the thread.
981 *
982 * The return queue is manipulated by the swi thread
983 * and, potentially, by crypto device drivers calling
984 * back to mark operations completed. Thus we need
985 * to mask both while manipulating the return queue.
986 */
987 s = splcrypto();
988 wasempty = TAILQ_EMPTY(&crp_ret_q);
989 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
990 if (wasempty)
991 wakeup_one(&crp_ret_q);
992 splx(s);
993 }
994 }
995
996 /*
997 * Invoke the callback on behalf of the driver.
998 */
999 void
1000 crypto_kdone(struct cryptkop *krp)
1001 {
1002 int s, wasempty;
1003
1004 if (krp->krp_status != 0)
1005 cryptostats.cs_kerrs++;
1006 /*
1007 * The return queue is manipulated by the swi thread
1008 * and, potentially, by crypto device drivers calling
1009 * back to mark operations completed. Thus we need
1010 * to mask both while manipulating the return queue.
1011 */
1012 s = splcrypto();
1013 wasempty = TAILQ_EMPTY(&crp_ret_kq);
1014 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1015 if (wasempty)
1016 wakeup_one(&crp_ret_q);
1017 splx(s);
1018 }
1019
1020 int
1021 crypto_getfeat(int *featp)
1022 {
1023 int hid, kalg, feat = 0;
1024 int s;
1025
1026 s = splcrypto();
1027
1028 if (crypto_userasymcrypto == 0)
1029 goto out;
1030
1031 for (hid = 0; hid < crypto_drivers_num; hid++) {
1032 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1033 crypto_devallowsoft == 0) {
1034 continue;
1035 }
1036 if (crypto_drivers[hid].cc_kprocess == NULL)
1037 continue;
1038 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1039 if ((crypto_drivers[hid].cc_kalg[kalg] &
1040 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1041 feat |= 1 << kalg;
1042 }
1043 out:
1044 splx(s);
1045 *featp = feat;
1046 return (0);
1047 }
1048
1049 /*
1050 * Software interrupt thread to dispatch crypto requests.
1051 */
1052 static void
1053 cryptointr(void)
1054 {
1055 struct cryptop *crp, *submit;
1056 struct cryptkop *krp;
1057 struct cryptocap *cap;
1058 int result, hint, s;
1059
1060 printf("crypto softint\n");
1061 cryptostats.cs_intrs++;
1062 s = splcrypto();
1063 do {
1064 /*
1065 * Find the first element in the queue that can be
1066 * processed and look-ahead to see if multiple ops
1067 * are ready for the same driver.
1068 */
1069 submit = NULL;
1070 hint = 0;
1071 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1072 u_int32_t hid = SESID2HID(crp->crp_sid);
1073 cap = crypto_checkdriver(hid);
1074 if (cap == NULL || cap->cc_process == NULL) {
1075 /* Op needs to be migrated, process it. */
1076 if (submit == NULL)
1077 submit = crp;
1078 break;
1079 }
1080 if (!cap->cc_qblocked) {
1081 if (submit != NULL) {
1082 /*
1083 * We stop on finding another op,
1084 * regardless whether its for the same
1085 * driver or not. We could keep
1086 * searching the queue but it might be
1087 * better to just use a per-driver
1088 * queue instead.
1089 */
1090 if (SESID2HID(submit->crp_sid) == hid)
1091 hint = CRYPTO_HINT_MORE;
1092 break;
1093 } else {
1094 submit = crp;
1095 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1096 break;
1097 /* keep scanning for more are q'd */
1098 }
1099 }
1100 }
1101 if (submit != NULL) {
1102 TAILQ_REMOVE(&crp_q, submit, crp_next);
1103 result = crypto_invoke(submit, hint);
1104 if (result == ERESTART) {
1105 /*
1106 * The driver ran out of resources, mark the
1107 * driver ``blocked'' for cryptop's and put
1108 * the request back in the queue. It would
1109 * best to put the request back where we got
1110 * it but that's hard so for now we put it
1111 * at the front. This should be ok; putting
1112 * it at the end does not work.
1113 */
1114 /* XXX validate sid again? */
1115 crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1116 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1117 cryptostats.cs_blocks++;
1118 }
1119 }
1120
1121 /* As above, but for key ops */
1122 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1123 cap = crypto_checkdriver(krp->krp_hid);
1124 if (cap == NULL || cap->cc_kprocess == NULL) {
1125 /* Op needs to be migrated, process it. */
1126 break;
1127 }
1128 if (!cap->cc_kqblocked)
1129 break;
1130 }
1131 if (krp != NULL) {
1132 TAILQ_REMOVE(&crp_kq, krp, krp_next);
1133 result = crypto_kinvoke(krp, 0);
1134 if (result == ERESTART) {
1135 /*
1136 * The driver ran out of resources, mark the
1137 * driver ``blocked'' for cryptkop's and put
1138 * the request back in the queue. It would
1139 * best to put the request back where we got
1140 * it but that's hard so for now we put it
1141 * at the front. This should be ok; putting
1142 * it at the end does not work.
1143 */
1144 /* XXX validate sid again? */
1145 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1146 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1147 cryptostats.cs_kblocks++;
1148 }
1149 }
1150 } while (submit != NULL || krp != NULL);
1151 splx(s);
1152 }
1153
1154 /*
1155 * Kernel thread to do callbacks.
1156 */
1157 static void
1158 cryptoret(void)
1159 {
1160 struct cryptop *crp;
1161 struct cryptkop *krp;
1162 int s;
1163
1164 s = splcrypto();
1165 for (;;) {
1166 crp = TAILQ_FIRST(&crp_ret_q);
1167 if (crp != NULL)
1168 TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1169 krp = TAILQ_FIRST(&crp_ret_kq);
1170 if (krp != NULL)
1171 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1172
1173 if (crp != NULL || krp != NULL) {
1174 splx(s); /* lower ipl for callbacks */
1175 if (crp != NULL) {
1176 #ifdef CRYPTO_TIMING
1177 if (crypto_timing) {
1178 /*
1179 * NB: We must copy the timestamp before
1180 * doing the callback as the cryptop is
1181 * likely to be reclaimed.
1182 */
1183 struct timespec t = crp->crp_tstamp;
1184 crypto_tstat(&cryptostats.cs_cb, &t);
1185 crp->crp_callback(crp);
1186 crypto_tstat(&cryptostats.cs_finis, &t);
1187 } else
1188 #endif
1189 crp->crp_callback(crp);
1190 }
1191 if (krp != NULL)
1192 krp->krp_callback(krp);
1193 s = splcrypto();
1194 } else {
1195 (void) tsleep(&crp_ret_q, PLOCK, "crypto_wait", 0);
1196 cryptostats.cs_rets++;
1197 }
1198 }
1199 }
1200
1201 static void
1203 deferred_crypto_thread(void *arg)
1204 {
1205 int error;
1206
1207 error = kthread_create1((void (*)(void*)) cryptoret, NULL,
1208 &cryptoproc, "cryptoret");
1209 if (error) {
1210 printf("crypto_init: cannot start cryptoret thread; error %d",
1211 error);
1212 crypto_destroy();
1213 }
1214 }
1215
1216 #ifdef __FreeBSD__
1217 /*
1218 * Initialization code, both for static and dynamic loading.
1219 */
1220 static int
1221 crypto_modevent(module_t mod, int type, void *unused)
1222 {
1223 int error = EINVAL;
1224
1225 switch (type) {
1226 case MOD_LOAD:
1227 error = crypto_init();
1228 if (error == 0 && bootverbose)
1229 printf("crypto: <crypto core>\n");
1230 break;
1231 case MOD_UNLOAD:
1232 /*XXX disallow if active sessions */
1233 error = 0;
1234 crypto_destroy();
1235 break;
1236 }
1237 return error;
1238 }
1239 static moduledata_t crypto_mod = {
1240 "crypto",
1241 crypto_modevent,
1242 0
1243 };
1244
1245 MODULE_VERSION(crypto, 1);
1246 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
1247 #endif /* __FreeBSD__ */
1248
1249
1250