crypto.c revision 1.20 1 /* $NetBSD: crypto.c,v 1.20 2007/07/09 21:11:31 ad Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.20 2007/07/09 21:11:31 ad Exp $");
28
29 /* XXX FIXME: should be defopt'ed */
30 #define CRYPTO_TIMING /* enable cryptop timing stuff */
31
32 #include <sys/param.h>
33 #include <sys/reboot.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/proc.h>
37 #include <sys/pool.h>
38 #include <opencrypto/cryptodev.h>
39 #include <sys/kthread.h>
40 #include <sys/once.h>
41 #include <sys/sysctl.h>
42
43 #include <opencrypto/xform.h> /* XXX for M_XDATA */
44
45 #ifdef __NetBSD__
46 #define splcrypto splnet
47 /* below is kludges to check whats still missing */
48 #define SWI_CRYPTO 17
49 #define register_swi(lvl, fn) \
50 softintr_establish(IPL_SOFTNET, (void (*)(void*))fn, NULL)
51 #define unregister_swi(lvl, fn) softintr_disestablish(softintr_cookie)
52 #define setsoftcrypto(x) softintr_schedule(x)
53 #endif
54
55 #define SESID2HID(sid) (((sid) >> 32) & 0xffffffff)
56
57 /*
58 * Crypto drivers register themselves by allocating a slot in the
59 * crypto_drivers table with crypto_get_driverid() and then registering
60 * each algorithm they support with crypto_register() and crypto_kregister().
61 */
62 static struct cryptocap *crypto_drivers;
63 static int crypto_drivers_num;
64 static void* softintr_cookie;
65
66 /*
67 * There are two queues for crypto requests; one for symmetric (e.g.
68 * cipher) operations and one for asymmetric (e.g. MOD) operations.
69 * See below for how synchronization is handled.
70 */
71 static TAILQ_HEAD(,cryptop) crp_q = /* request queues */
72 TAILQ_HEAD_INITIALIZER(crp_q);
73 static TAILQ_HEAD(,cryptkop) crp_kq =
74 TAILQ_HEAD_INITIALIZER(crp_kq);
75
76 /*
77 * There are two queues for processing completed crypto requests; one
78 * for the symmetric and one for the asymmetric ops. We only need one
79 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
80 * for how synchronization is handled.
81 */
82 static TAILQ_HEAD(,cryptop) crp_ret_q = /* callback queues */
83 TAILQ_HEAD_INITIALIZER(crp_ret_q);
84 static TAILQ_HEAD(,cryptkop) crp_ret_kq =
85 TAILQ_HEAD_INITIALIZER(crp_ret_kq);
86
87 /*
88 * Crypto op and desciptor data structures are allocated
89 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
90 */
91 struct pool cryptop_pool;
92 struct pool cryptodesc_pool;
93 int crypto_pool_initialized = 0;
94
95 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
96 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
97 /*
98 * cryptodevallowsoft is (intended to be) sysctl'able, controlling
99 * access to hardware versus software transforms as below:
100 *
101 * crypto_devallowsoft < 0: Force userlevel requests to use software
102 * transforms, always
103 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel
104 * requests for non-accelerated transforms
105 * (handling the latter in software)
106 * crypto_devallowsoft > 0: Allow user requests only for transforms which
107 * are hardware-accelerated.
108 */
109 int crypto_devallowsoft = 1; /* only use hardware crypto */
110
111 #ifdef __FreeBSD__
112 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW,
113 &crypto_usercrypto, 0,
114 "Enable/disable user-mode access to crypto support");
115 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
116 &crypto_userasymcrypto, 0,
117 "Enable/disable user-mode access to asymmetric crypto support");
118 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
119 &crypto_devallowsoft, 0,
120 "Enable/disable use of software asym crypto support");
121 #endif
122 #ifdef __NetBSD__
123 SYSCTL_SETUP(sysctl_opencrypto_setup, "sysctl opencrypto subtree setup")
124 {
125 sysctl_createv(clog, 0, NULL, NULL,
126 CTLFLAG_PERMANENT,
127 CTLTYPE_NODE, "kern", NULL,
128 NULL, 0, NULL, 0,
129 CTL_KERN, CTL_EOL);
130 sysctl_createv(clog, 0, NULL, NULL,
131 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
132 CTLTYPE_INT, "usercrypto",
133 SYSCTL_DESCR("Enable/disable user-mode access to "
134 "crypto support"),
135 NULL, 0, &crypto_usercrypto, 0,
136 CTL_KERN, CTL_CREATE, CTL_EOL);
137 sysctl_createv(clog, 0, NULL, NULL,
138 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
139 CTLTYPE_INT, "userasymcrypto",
140 SYSCTL_DESCR("Enable/disable user-mode access to "
141 "asymmetric crypto support"),
142 NULL, 0, &crypto_userasymcrypto, 0,
143 CTL_KERN, CTL_CREATE, CTL_EOL);
144 sysctl_createv(clog, 0, NULL, NULL,
145 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
146 CTLTYPE_INT, "cryptodevallowsoft",
147 SYSCTL_DESCR("Enable/disable use of software "
148 "asymmetric crypto support"),
149 NULL, 0, &crypto_devallowsoft, 0,
150 CTL_KERN, CTL_CREATE, CTL_EOL);
151 }
152 #endif
153
154 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
155
156 /*
157 * Synchronization: read carefully, this is non-trivial.
158 *
159 * Crypto requests are submitted via crypto_dispatch. Typically
160 * these come in from network protocols at spl0 (output path) or
161 * spl[,soft]net (input path).
162 *
163 * Requests are typically passed on the driver directly, but they
164 * may also be queued for processing by a software interrupt thread,
165 * cryptointr, that runs at splsoftcrypto. This thread dispatches
166 * the requests to crypto drivers (h/w or s/w) who call crypto_done
167 * when a request is complete. Hardware crypto drivers are assumed
168 * to register their IRQ's as network devices so their interrupt handlers
169 * and subsequent "done callbacks" happen at spl[imp,net].
170 *
171 * Completed crypto ops are queued for a separate kernel thread that
172 * handles the callbacks at spl0. This decoupling insures the crypto
173 * driver interrupt service routine is not delayed while the callback
174 * takes place and that callbacks are delivered after a context switch
175 * (as opposed to a software interrupt that clients must block).
176 *
177 * This scheme is not intended for SMP machines.
178 */
179 static void cryptointr(void); /* swi thread to dispatch ops */
180 static void cryptoret(void); /* kernel thread for callbacks*/
181 static struct lwp *cryptothread;
182 static void crypto_destroy(void);
183 static int crypto_invoke(struct cryptop *crp, int hint);
184 static int crypto_kinvoke(struct cryptkop *krp, int hint);
185
186 static struct cryptostats cryptostats;
187 static int crypto_timing = 0;
188
189 #ifdef __FreeBSD__
190 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
191 cryptostats, "Crypto system statistics");
192
193 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
194 &crypto_timing, 0, "Enable/disable crypto timing support");
195 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
196 cryptostats, "Crypto system statistics");
197 #endif /* __FreeBSD__ */
198
199 static int
200 crypto_init0(void)
201 {
202 int error;
203
204 #ifdef __FreeBSD__
205 cryptop_zone = zinit("cryptop", sizeof (struct cryptop), 0, 0, 1);
206 cryptodesc_zone = zinit("cryptodesc", sizeof (struct cryptodesc),
207 0, 0, 1);
208 if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
209 printf("crypto_init: cannot setup crypto zones\n");
210 return;
211 }
212 #endif
213
214 crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL *
215 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
216 if (crypto_drivers == NULL) {
217 printf("crypto_init: cannot malloc driver table\n");
218 return 0;
219 }
220 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
221
222 softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
223 #ifdef __FreeBSD__
224 error = kthread_create((void (*)(void *)) cryptoret, NULL,
225 &cryptothread, "cryptoret");
226 #else
227 error = kthread_create(PRI_NONE, 0, NULL, (void (*)(void*))cryptoret,
228 NULL, &cryptothread, "cryptoret");
229 #endif
230 if (error) {
231 printf("crypto_init: cannot start cryptoret thread; error %d",
232 error);
233 crypto_destroy();
234 }
235
236 return 0;
237 }
238
239 void
240 crypto_init(void)
241 {
242 static ONCE_DECL(crypto_init_once);
243
244 RUN_ONCE(&crypto_init_once, crypto_init0);
245 }
246
247 static void
248 crypto_destroy(void)
249 {
250 /* XXX no wait to reclaim zones */
251 if (crypto_drivers != NULL)
252 free(crypto_drivers, M_CRYPTO_DATA);
253 unregister_swi(SWI_CRYPTO, cryptointr);
254 }
255
256 /*
257 * Create a new session.
258 */
259 int
260 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
261 {
262 struct cryptoini *cr;
263 u_int32_t hid, lid;
264 int err = EINVAL;
265 int s;
266
267 s = splcrypto();
268
269 if (crypto_drivers == NULL)
270 goto done;
271
272 /*
273 * The algorithm we use here is pretty stupid; just use the
274 * first driver that supports all the algorithms we need.
275 *
276 * XXX We need more smarts here (in real life too, but that's
277 * XXX another story altogether).
278 */
279
280 for (hid = 0; hid < crypto_drivers_num; hid++) {
281 /*
282 * If it's not initialized or has remaining sessions
283 * referencing it, skip.
284 */
285 if (crypto_drivers[hid].cc_newsession == NULL ||
286 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
287 continue;
288
289 /* Hardware required -- ignore software drivers. */
290 if (hard > 0 &&
291 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
292 continue;
293 /* Software required -- ignore hardware drivers. */
294 if (hard < 0 &&
295 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
296 continue;
297
298 /* See if all the algorithms are supported. */
299 for (cr = cri; cr; cr = cr->cri_next)
300 if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
301 break;
302
303 if (cr == NULL) {
304 /* Ok, all algorithms are supported. */
305
306 /*
307 * Can't do everything in one session.
308 *
309 * XXX Fix this. We need to inject a "virtual" session layer right
310 * XXX about here.
311 */
312
313 /* Call the driver initialization routine. */
314 lid = hid; /* Pass the driver ID. */
315 err = crypto_drivers[hid].cc_newsession(
316 crypto_drivers[hid].cc_arg, &lid, cri);
317 if (err == 0) {
318 (*sid) = hid;
319 (*sid) <<= 32;
320 (*sid) |= (lid & 0xffffffff);
321 crypto_drivers[hid].cc_sessions++;
322 }
323 goto done;
324 /*break;*/
325 }
326 }
327 done:
328 splx(s);
329 return err;
330 }
331
332 /*
333 * Delete an existing session (or a reserved session on an unregistered
334 * driver).
335 */
336 int
337 crypto_freesession(u_int64_t sid)
338 {
339 u_int32_t hid;
340 int err = 0;
341 int s;
342
343 s = splcrypto();
344
345 if (crypto_drivers == NULL) {
346 err = EINVAL;
347 goto done;
348 }
349
350 /* Determine two IDs. */
351 hid = SESID2HID(sid);
352
353 if (hid >= crypto_drivers_num) {
354 err = ENOENT;
355 goto done;
356 }
357
358 if (crypto_drivers[hid].cc_sessions)
359 crypto_drivers[hid].cc_sessions--;
360
361 /* Call the driver cleanup routine, if available. */
362 if (crypto_drivers[hid].cc_freesession)
363 err = crypto_drivers[hid].cc_freesession(
364 crypto_drivers[hid].cc_arg, sid);
365 else
366 err = 0;
367
368 /*
369 * If this was the last session of a driver marked as invalid,
370 * make the entry available for reuse.
371 */
372 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
373 crypto_drivers[hid].cc_sessions == 0)
374 bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
375
376 done:
377 splx(s);
378 return err;
379 }
380
381 /*
382 * Return an unused driver id. Used by drivers prior to registering
383 * support for the algorithms they handle.
384 */
385 int32_t
386 crypto_get_driverid(u_int32_t flags)
387 {
388 struct cryptocap *newdrv;
389 int i, s;
390
391 crypto_init();
392
393 s = splcrypto();
394 for (i = 0; i < crypto_drivers_num; i++)
395 if (crypto_drivers[i].cc_process == NULL &&
396 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
397 crypto_drivers[i].cc_sessions == 0)
398 break;
399
400 /* Out of entries, allocate some more. */
401 if (i == crypto_drivers_num) {
402 /* Be careful about wrap-around. */
403 if (2 * crypto_drivers_num <= crypto_drivers_num) {
404 splx(s);
405 printf("crypto: driver count wraparound!\n");
406 return -1;
407 }
408
409 newdrv = malloc(2 * crypto_drivers_num *
410 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
411 if (newdrv == NULL) {
412 splx(s);
413 printf("crypto: no space to expand driver table!\n");
414 return -1;
415 }
416
417 bcopy(crypto_drivers, newdrv,
418 crypto_drivers_num * sizeof(struct cryptocap));
419
420 crypto_drivers_num *= 2;
421
422 free(crypto_drivers, M_CRYPTO_DATA);
423 crypto_drivers = newdrv;
424 }
425
426 /* NB: state is zero'd on free */
427 crypto_drivers[i].cc_sessions = 1; /* Mark */
428 crypto_drivers[i].cc_flags = flags;
429
430 if (bootverbose)
431 printf("crypto: assign driver %u, flags %u\n", i, flags);
432
433 splx(s);
434
435 return i;
436 }
437
438 static struct cryptocap *
439 crypto_checkdriver(u_int32_t hid)
440 {
441 if (crypto_drivers == NULL)
442 return NULL;
443 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
444 }
445
446 /*
447 * Register support for a key-related algorithm. This routine
448 * is called once for each algorithm supported a driver.
449 */
450 int
451 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
452 int (*kprocess)(void*, struct cryptkop *, int),
453 void *karg)
454 {
455 int s;
456 struct cryptocap *cap;
457 int err;
458
459 s = splcrypto();
460
461 cap = crypto_checkdriver(driverid);
462 if (cap != NULL &&
463 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
464 /*
465 * XXX Do some performance testing to determine placing.
466 * XXX We probably need an auxiliary data structure that
467 * XXX describes relative performances.
468 */
469
470 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
471 if (bootverbose)
472 printf("crypto: driver %u registers key alg %u flags %u\n"
473 , driverid
474 , kalg
475 , flags
476 );
477
478 if (cap->cc_kprocess == NULL) {
479 cap->cc_karg = karg;
480 cap->cc_kprocess = kprocess;
481 }
482 err = 0;
483 } else
484 err = EINVAL;
485
486 splx(s);
487 return err;
488 }
489
490 /*
491 * Register support for a non-key-related algorithm. This routine
492 * is called once for each such algorithm supported by a driver.
493 */
494 int
495 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
496 u_int32_t flags,
497 int (*newses)(void*, u_int32_t*, struct cryptoini*),
498 int (*freeses)(void*, u_int64_t),
499 int (*process)(void*, struct cryptop *, int),
500 void *arg)
501 {
502 struct cryptocap *cap;
503 int s, err;
504
505 s = splcrypto();
506
507 cap = crypto_checkdriver(driverid);
508 /* NB: algorithms are in the range [1..max] */
509 if (cap != NULL &&
510 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
511 /*
512 * XXX Do some performance testing to determine placing.
513 * XXX We probably need an auxiliary data structure that
514 * XXX describes relative performances.
515 */
516
517 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
518 cap->cc_max_op_len[alg] = maxoplen;
519 if (bootverbose)
520 printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
521 , driverid
522 , alg
523 , flags
524 , maxoplen
525 );
526
527 if (cap->cc_process == NULL) {
528 cap->cc_arg = arg;
529 cap->cc_newsession = newses;
530 cap->cc_process = process;
531 cap->cc_freesession = freeses;
532 cap->cc_sessions = 0; /* Unmark */
533 }
534 err = 0;
535 } else
536 err = EINVAL;
537
538 splx(s);
539 return err;
540 }
541
542 /*
543 * Unregister a crypto driver. If there are pending sessions using it,
544 * leave enough information around so that subsequent calls using those
545 * sessions will correctly detect the driver has been unregistered and
546 * reroute requests.
547 */
548 int
549 crypto_unregister(u_int32_t driverid, int alg)
550 {
551 int i, err, s;
552 u_int32_t ses;
553 struct cryptocap *cap;
554
555 s = splcrypto();
556
557 cap = crypto_checkdriver(driverid);
558 if (cap != NULL &&
559 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
560 cap->cc_alg[alg] != 0) {
561 cap->cc_alg[alg] = 0;
562 cap->cc_max_op_len[alg] = 0;
563
564 /* Was this the last algorithm ? */
565 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
566 if (cap->cc_alg[i] != 0)
567 break;
568
569 if (i == CRYPTO_ALGORITHM_MAX + 1) {
570 ses = cap->cc_sessions;
571 bzero(cap, sizeof(struct cryptocap));
572 if (ses != 0) {
573 /*
574 * If there are pending sessions, just mark as invalid.
575 */
576 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
577 cap->cc_sessions = ses;
578 }
579 }
580 err = 0;
581 } else
582 err = EINVAL;
583
584 splx(s);
585 return err;
586 }
587
588 /*
589 * Unregister all algorithms associated with a crypto driver.
590 * If there are pending sessions using it, leave enough information
591 * around so that subsequent calls using those sessions will
592 * correctly detect the driver has been unregistered and reroute
593 * requests.
594 */
595 int
596 crypto_unregister_all(u_int32_t driverid)
597 {
598 int i, err, s = splcrypto();
599 u_int32_t ses;
600 struct cryptocap *cap;
601
602 cap = crypto_checkdriver(driverid);
603 if (cap != NULL) {
604 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
605 cap->cc_alg[i] = 0;
606 cap->cc_max_op_len[i] = 0;
607 }
608 ses = cap->cc_sessions;
609 bzero(cap, sizeof(struct cryptocap));
610 if (ses != 0) {
611 /*
612 * If there are pending sessions, just mark as invalid.
613 */
614 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
615 cap->cc_sessions = ses;
616 }
617 err = 0;
618 } else
619 err = EINVAL;
620
621 splx(s);
622 return err;
623 }
624
625 /*
626 * Clear blockage on a driver. The what parameter indicates whether
627 * the driver is now ready for cryptop's and/or cryptokop's.
628 */
629 int
630 crypto_unblock(u_int32_t driverid, int what)
631 {
632 struct cryptocap *cap;
633 int needwakeup, err, s;
634
635 s = splcrypto();
636 cap = crypto_checkdriver(driverid);
637 if (cap != NULL) {
638 needwakeup = 0;
639 if (what & CRYPTO_SYMQ) {
640 needwakeup |= cap->cc_qblocked;
641 cap->cc_qblocked = 0;
642 }
643 if (what & CRYPTO_ASYMQ) {
644 needwakeup |= cap->cc_kqblocked;
645 cap->cc_kqblocked = 0;
646 }
647 if (needwakeup) {
648 setsoftcrypto(softintr_cookie);
649 }
650 err = 0;
651 } else
652 err = EINVAL;
653 splx(s);
654
655 return err;
656 }
657
658 /*
659 * Dispatch a crypto request to a driver or queue
660 * it, to be processed by the kernel thread.
661 */
662 int
663 crypto_dispatch(struct cryptop *crp)
664 {
665 u_int32_t hid = SESID2HID(crp->crp_sid);
666 int s, result;
667
668 s = splcrypto();
669
670 cryptostats.cs_ops++;
671
672 #ifdef CRYPTO_TIMING
673 if (crypto_timing)
674 nanouptime(&crp->crp_tstamp);
675 #endif
676 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
677 struct cryptocap *cap;
678 /*
679 * Caller marked the request to be processed
680 * immediately; dispatch it directly to the
681 * driver unless the driver is currently blocked.
682 */
683 cap = crypto_checkdriver(hid);
684 if (cap && !cap->cc_qblocked) {
685 result = crypto_invoke(crp, 0);
686 if (result == ERESTART) {
687 /*
688 * The driver ran out of resources, mark the
689 * driver ``blocked'' for cryptop's and put
690 * the op on the queue.
691 */
692 crypto_drivers[hid].cc_qblocked = 1;
693 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
694 cryptostats.cs_blocks++;
695 }
696 } else {
697 /*
698 * The driver is blocked, just queue the op until
699 * it unblocks and the swi thread gets kicked.
700 */
701 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
702 result = 0;
703 }
704 } else {
705 int wasempty = TAILQ_EMPTY(&crp_q);
706 /*
707 * Caller marked the request as ``ok to delay'';
708 * queue it for the swi thread. This is desirable
709 * when the operation is low priority and/or suitable
710 * for batching.
711 */
712 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
713 if (wasempty) {
714 setsoftcrypto(softintr_cookie);
715 }
716
717 result = 0;
718 }
719 splx(s);
720
721 return result;
722 }
723
724 /*
725 * Add an asymetric crypto request to a queue,
726 * to be processed by the kernel thread.
727 */
728 int
729 crypto_kdispatch(struct cryptkop *krp)
730 {
731 struct cryptocap *cap;
732 int s, result;
733
734 s = splcrypto();
735 cryptostats.cs_kops++;
736
737 cap = crypto_checkdriver(krp->krp_hid);
738 if (cap && !cap->cc_kqblocked) {
739 result = crypto_kinvoke(krp, 0);
740 if (result == ERESTART) {
741 /*
742 * The driver ran out of resources, mark the
743 * driver ``blocked'' for cryptop's and put
744 * the op on the queue.
745 */
746 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
747 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
748 cryptostats.cs_kblocks++;
749 }
750 } else {
751 /*
752 * The driver is blocked, just queue the op until
753 * it unblocks and the swi thread gets kicked.
754 */
755 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
756 result = 0;
757 }
758 splx(s);
759
760 return result;
761 }
762
763 /*
764 * Dispatch an assymetric crypto request to the appropriate crypto devices.
765 */
766 static int
767 crypto_kinvoke(struct cryptkop *krp, int hint)
768 {
769 u_int32_t hid;
770 int error;
771
772 /* Sanity checks. */
773 if (krp == NULL)
774 return EINVAL;
775 if (krp->krp_callback == NULL) {
776 free(krp, M_XDATA); /* XXX allocated in cryptodev */
777 return EINVAL;
778 }
779
780 for (hid = 0; hid < crypto_drivers_num; hid++) {
781 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
782 crypto_devallowsoft == 0)
783 continue;
784 if (crypto_drivers[hid].cc_kprocess == NULL)
785 continue;
786 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
787 CRYPTO_ALG_FLAG_SUPPORTED) == 0)
788 continue;
789 break;
790 }
791 if (hid < crypto_drivers_num) {
792 krp->krp_hid = hid;
793 error = crypto_drivers[hid].cc_kprocess(
794 crypto_drivers[hid].cc_karg, krp, hint);
795 } else {
796 error = ENODEV;
797 }
798
799 if (error) {
800 krp->krp_status = error;
801 crypto_kdone(krp);
802 }
803 return 0;
804 }
805
806 #ifdef CRYPTO_TIMING
807 static void
808 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
809 {
810 struct timespec now, t;
811
812 nanouptime(&now);
813 t.tv_sec = now.tv_sec - tv->tv_sec;
814 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
815 if (t.tv_nsec < 0) {
816 t.tv_sec--;
817 t.tv_nsec += 1000000000;
818 }
819 timespecadd(&ts->acc, &t, &t);
820 if (timespeccmp(&t, &ts->min, <))
821 ts->min = t;
822 if (timespeccmp(&t, &ts->max, >))
823 ts->max = t;
824 ts->count++;
825
826 *tv = now;
827 }
828 #endif
829
830 /*
831 * Dispatch a crypto request to the appropriate crypto devices.
832 */
833 static int
834 crypto_invoke(struct cryptop *crp, int hint)
835 {
836 u_int32_t hid;
837 int (*process)(void*, struct cryptop *, int);
838
839 #ifdef CRYPTO_TIMING
840 if (crypto_timing)
841 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
842 #endif
843 /* Sanity checks. */
844 if (crp == NULL)
845 return EINVAL;
846 if (crp->crp_callback == NULL) {
847 crypto_freereq(crp);
848 return EINVAL;
849 }
850 if (crp->crp_desc == NULL) {
851 crp->crp_etype = EINVAL;
852 crypto_done(crp);
853 return 0;
854 }
855
856 hid = SESID2HID(crp->crp_sid);
857 if (hid < crypto_drivers_num) {
858 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
859 crypto_freesession(crp->crp_sid);
860 process = crypto_drivers[hid].cc_process;
861 } else {
862 process = NULL;
863 }
864
865 if (process == NULL) {
866 struct cryptodesc *crd;
867 u_int64_t nid = 0;
868
869 /*
870 * Driver has unregistered; migrate the session and return
871 * an error to the caller so they'll resubmit the op.
872 */
873 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
874 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
875
876 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
877 crp->crp_sid = nid;
878
879 crp->crp_etype = EAGAIN;
880 crypto_done(crp);
881 return 0;
882 } else {
883 /*
884 * Invoke the driver to process the request.
885 */
886 return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
887 }
888 }
889
890 /*
891 * Release a set of crypto descriptors.
892 */
893 void
894 crypto_freereq(struct cryptop *crp)
895 {
896 struct cryptodesc *crd;
897 int s;
898
899 if (crp == NULL)
900 return;
901
902 s = splcrypto();
903
904 while ((crd = crp->crp_desc) != NULL) {
905 crp->crp_desc = crd->crd_next;
906 pool_put(&cryptodesc_pool, crd);
907 }
908
909 pool_put(&cryptop_pool, crp);
910 splx(s);
911 }
912
913 /*
914 * Acquire a set of crypto descriptors.
915 */
916 struct cryptop *
917 crypto_getreq(int num)
918 {
919 struct cryptodesc *crd;
920 struct cryptop *crp;
921 int s;
922
923 s = splcrypto();
924
925 if (crypto_pool_initialized == 0) {
926 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
927 0, "cryptop", NULL, IPL_NET);
928 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
929 0, "cryptodesc", NULL, IPL_NET);
930 crypto_pool_initialized = 1;
931 }
932
933 crp = pool_get(&cryptop_pool, 0);
934 if (crp == NULL) {
935 splx(s);
936 return NULL;
937 }
938 bzero(crp, sizeof(struct cryptop));
939
940 while (num--) {
941 crd = pool_get(&cryptodesc_pool, 0);
942 if (crd == NULL) {
943 splx(s);
944 crypto_freereq(crp);
945 return NULL;
946 }
947
948 bzero(crd, sizeof(struct cryptodesc));
949 crd->crd_next = crp->crp_desc;
950 crp->crp_desc = crd;
951 }
952
953 splx(s);
954 return crp;
955 }
956
957 /*
958 * Invoke the callback on behalf of the driver.
959 */
960 void
961 crypto_done(struct cryptop *crp)
962 {
963 if (crp->crp_etype != 0)
964 cryptostats.cs_errs++;
965 #ifdef CRYPTO_TIMING
966 if (crypto_timing)
967 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
968 #endif
969 /*
970 * On netbsd 1.6O, CBIMM does its wake_one() before the requestor
971 * has done its tsleep().
972 */
973 #ifndef __NetBSD__
974 if (crp->crp_flags & CRYPTO_F_CBIMM) {
975 /*
976 * Do the callback directly. This is ok when the
977 * callback routine does very little (e.g. the
978 * /dev/crypto callback method just does a wakeup).
979 */
980 #ifdef CRYPTO_TIMING
981 if (crypto_timing) {
982 /*
983 * NB: We must copy the timestamp before
984 * doing the callback as the cryptop is
985 * likely to be reclaimed.
986 */
987 struct timespec t = crp->crp_tstamp;
988 crypto_tstat(&cryptostats.cs_cb, &t);
989 crp->crp_callback(crp);
990 crypto_tstat(&cryptostats.cs_finis, &t);
991 } else
992 #endif
993 crp->crp_callback(crp);
994 } else
995 #endif /* __NetBSD__ */
996 {
997 int s, wasempty;
998 /*
999 * Normal case; queue the callback for the thread.
1000 *
1001 * The return queue is manipulated by the swi thread
1002 * and, potentially, by crypto device drivers calling
1003 * back to mark operations completed. Thus we need
1004 * to mask both while manipulating the return queue.
1005 */
1006 s = splcrypto();
1007 wasempty = TAILQ_EMPTY(&crp_ret_q);
1008 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1009 if (wasempty)
1010 wakeup_one(&crp_ret_q);
1011 splx(s);
1012 }
1013 }
1014
1015 /*
1016 * Invoke the callback on behalf of the driver.
1017 */
1018 void
1019 crypto_kdone(struct cryptkop *krp)
1020 {
1021 int s, wasempty;
1022
1023 if (krp->krp_status != 0)
1024 cryptostats.cs_kerrs++;
1025 /*
1026 * The return queue is manipulated by the swi thread
1027 * and, potentially, by crypto device drivers calling
1028 * back to mark operations completed. Thus we need
1029 * to mask both while manipulating the return queue.
1030 */
1031 s = splcrypto();
1032 wasempty = TAILQ_EMPTY(&crp_ret_kq);
1033 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1034 if (wasempty)
1035 wakeup_one(&crp_ret_q);
1036 splx(s);
1037 }
1038
1039 int
1040 crypto_getfeat(int *featp)
1041 {
1042 int hid, kalg, feat = 0;
1043 int s;
1044
1045 s = splcrypto();
1046
1047 if (crypto_userasymcrypto == 0)
1048 goto out;
1049
1050 for (hid = 0; hid < crypto_drivers_num; hid++) {
1051 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1052 crypto_devallowsoft == 0) {
1053 continue;
1054 }
1055 if (crypto_drivers[hid].cc_kprocess == NULL)
1056 continue;
1057 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1058 if ((crypto_drivers[hid].cc_kalg[kalg] &
1059 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1060 feat |= 1 << kalg;
1061 }
1062 out:
1063 splx(s);
1064 *featp = feat;
1065 return (0);
1066 }
1067
1068 /*
1069 * Software interrupt thread to dispatch crypto requests.
1070 */
1071 static void
1072 cryptointr(void)
1073 {
1074 struct cryptop *crp, *submit;
1075 struct cryptkop *krp;
1076 struct cryptocap *cap;
1077 int result, hint, s;
1078
1079 printf("crypto softint\n");
1080 cryptostats.cs_intrs++;
1081 s = splcrypto();
1082 do {
1083 /*
1084 * Find the first element in the queue that can be
1085 * processed and look-ahead to see if multiple ops
1086 * are ready for the same driver.
1087 */
1088 submit = NULL;
1089 hint = 0;
1090 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1091 u_int32_t hid = SESID2HID(crp->crp_sid);
1092 cap = crypto_checkdriver(hid);
1093 if (cap == NULL || cap->cc_process == NULL) {
1094 /* Op needs to be migrated, process it. */
1095 if (submit == NULL)
1096 submit = crp;
1097 break;
1098 }
1099 if (!cap->cc_qblocked) {
1100 if (submit != NULL) {
1101 /*
1102 * We stop on finding another op,
1103 * regardless whether its for the same
1104 * driver or not. We could keep
1105 * searching the queue but it might be
1106 * better to just use a per-driver
1107 * queue instead.
1108 */
1109 if (SESID2HID(submit->crp_sid) == hid)
1110 hint = CRYPTO_HINT_MORE;
1111 break;
1112 } else {
1113 submit = crp;
1114 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1115 break;
1116 /* keep scanning for more are q'd */
1117 }
1118 }
1119 }
1120 if (submit != NULL) {
1121 TAILQ_REMOVE(&crp_q, submit, crp_next);
1122 result = crypto_invoke(submit, hint);
1123 if (result == ERESTART) {
1124 /*
1125 * The driver ran out of resources, mark the
1126 * driver ``blocked'' for cryptop's and put
1127 * the request back in the queue. It would
1128 * best to put the request back where we got
1129 * it but that's hard so for now we put it
1130 * at the front. This should be ok; putting
1131 * it at the end does not work.
1132 */
1133 /* XXX validate sid again? */
1134 crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1135 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1136 cryptostats.cs_blocks++;
1137 }
1138 }
1139
1140 /* As above, but for key ops */
1141 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1142 cap = crypto_checkdriver(krp->krp_hid);
1143 if (cap == NULL || cap->cc_kprocess == NULL) {
1144 /* Op needs to be migrated, process it. */
1145 break;
1146 }
1147 if (!cap->cc_kqblocked)
1148 break;
1149 }
1150 if (krp != NULL) {
1151 TAILQ_REMOVE(&crp_kq, krp, krp_next);
1152 result = crypto_kinvoke(krp, 0);
1153 if (result == ERESTART) {
1154 /*
1155 * The driver ran out of resources, mark the
1156 * driver ``blocked'' for cryptkop's and put
1157 * the request back in the queue. It would
1158 * best to put the request back where we got
1159 * it but that's hard so for now we put it
1160 * at the front. This should be ok; putting
1161 * it at the end does not work.
1162 */
1163 /* XXX validate sid again? */
1164 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1165 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1166 cryptostats.cs_kblocks++;
1167 }
1168 }
1169 } while (submit != NULL || krp != NULL);
1170 splx(s);
1171 }
1172
1173 /*
1174 * Kernel thread to do callbacks.
1175 */
1176 static void
1177 cryptoret(void)
1178 {
1179 struct cryptop *crp;
1180 struct cryptkop *krp;
1181 int s;
1182
1183 s = splcrypto();
1184 for (;;) {
1185 crp = TAILQ_FIRST(&crp_ret_q);
1186 if (crp != NULL)
1187 TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1188 krp = TAILQ_FIRST(&crp_ret_kq);
1189 if (krp != NULL)
1190 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1191
1192 if (crp != NULL || krp != NULL) {
1193 splx(s); /* lower ipl for callbacks */
1194 if (crp != NULL) {
1195 #ifdef CRYPTO_TIMING
1196 if (crypto_timing) {
1197 /*
1198 * NB: We must copy the timestamp before
1199 * doing the callback as the cryptop is
1200 * likely to be reclaimed.
1201 */
1202 struct timespec t = crp->crp_tstamp;
1203 crypto_tstat(&cryptostats.cs_cb, &t);
1204 crp->crp_callback(crp);
1205 crypto_tstat(&cryptostats.cs_finis, &t);
1206 } else
1207 #endif
1208 crp->crp_callback(crp);
1209 }
1210 if (krp != NULL)
1211 krp->krp_callback(krp);
1212 s = splcrypto();
1213 } else {
1214 (void) tsleep(&crp_ret_q, PLOCK, "crypto_wait", 0);
1215 cryptostats.cs_rets++;
1216 }
1217 }
1218 }
1219
1220
1222 #ifdef __FreeBSD__
1223 /*
1224 * Initialization code, both for static and dynamic loading.
1225 */
1226 static int
1227 crypto_modevent(module_t mod, int type, void *unused)
1228 {
1229 int error = EINVAL;
1230
1231 switch (type) {
1232 case MOD_LOAD:
1233 error = crypto_init();
1234 if (error == 0 && bootverbose)
1235 printf("crypto: <crypto core>\n");
1236 break;
1237 case MOD_UNLOAD:
1238 /*XXX disallow if active sessions */
1239 error = 0;
1240 crypto_destroy();
1241 break;
1242 }
1243 return error;
1244 }
1245 static moduledata_t crypto_mod = {
1246 "crypto",
1247 crypto_modevent,
1248 0
1249 };
1250
1251 MODULE_VERSION(crypto, 1);
1252 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
1253 #endif /* __FreeBSD__ */
1254
1255
1256