crypto.c revision 1.27 1 /* $NetBSD: crypto.c,v 1.27 2008/04/10 22:48:42 tls Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4
5 /*-
6 * Copyright (c) 2008 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Coyote Point Systems, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*
42 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
43 *
44 * This code was written by Angelos D. Keromytis in Athens, Greece, in
45 * February 2000. Network Security Technologies Inc. (NSTI) kindly
46 * supported the development of this code.
47 *
48 * Copyright (c) 2000, 2001 Angelos D. Keromytis
49 *
50 * Permission to use, copy, and modify this software with or without fee
51 * is hereby granted, provided that this entire notice is included in
52 * all source code copies of any software which is or includes a copy or
53 * modification of this software.
54 *
55 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
56 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
57 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
58 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
59 * PURPOSE.
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.27 2008/04/10 22:48:42 tls Exp $");
64
65 #include <sys/param.h>
66 #include <sys/reboot.h>
67 #include <sys/systm.h>
68 #include <sys/malloc.h>
69 #include <sys/proc.h>
70 #include <sys/pool.h>
71 #include <sys/kthread.h>
72 #include <sys/once.h>
73 #include <sys/sysctl.h>
74 #include <sys/intr.h>
75
76 #include "opt_ocf.h"
77 #include <opencrypto/cryptodev.h>
78 #include <opencrypto/xform.h> /* XXX for M_XDATA */
79
80 kcondvar_t cryptoret_cv;
81 kmutex_t crypto_mtx;
82
83 /* below are kludges for residual code wrtitten to FreeBSD interfaces */
84 #define SWI_CRYPTO 17
85 #define register_swi(lvl, fn) \
86 softint_establish(SOFTINT_NET, (void (*)(void*))fn, NULL)
87 #define unregister_swi(lvl, fn) softint_disestablish(softintr_cookie)
88 #define setsoftcrypto(x) softint_schedule(x)
89
90 #define SESID2HID(sid) (((sid) >> 32) & 0xffffffff)
91
92 /*
93 * Crypto drivers register themselves by allocating a slot in the
94 * crypto_drivers table with crypto_get_driverid() and then registering
95 * each algorithm they support with crypto_register() and crypto_kregister().
96 */
97 static struct cryptocap *crypto_drivers;
98 static int crypto_drivers_num;
99 static void* softintr_cookie;
100
101 /*
102 * There are two queues for crypto requests; one for symmetric (e.g.
103 * cipher) operations and one for asymmetric (e.g. MOD) operations.
104 * See below for how synchronization is handled.
105 */
106 static TAILQ_HEAD(,cryptop) crp_q = /* request queues */
107 TAILQ_HEAD_INITIALIZER(crp_q);
108 static TAILQ_HEAD(,cryptkop) crp_kq =
109 TAILQ_HEAD_INITIALIZER(crp_kq);
110
111 /*
112 * There are two queues for processing completed crypto requests; one
113 * for the symmetric and one for the asymmetric ops. We only need one
114 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
115 * for how synchronization is handled.
116 */
117 static TAILQ_HEAD(crprethead, cryptop) crp_ret_q = /* callback queues */
118 TAILQ_HEAD_INITIALIZER(crp_ret_q);
119 static TAILQ_HEAD(krprethead, cryptkop) crp_ret_kq =
120 TAILQ_HEAD_INITIALIZER(crp_ret_kq);
121
122 /*
123 * XXX these functions are ghastly hacks for when the submission
124 * XXX routines discover a request that was not CBIMM is already
125 * XXX done, and must be yanked from the retq (where _done) put it
126 * XXX as cryptoret won't get the chance. The queue is walked backwards
127 * XXX as the request is generally the last one queued.
128 *
129 * call with the lock held, or else.
130 */
131 int
132 crypto_ret_q_remove(struct cryptop *crp)
133 {
134 struct cryptop * acrp;
135
136 TAILQ_FOREACH_REVERSE(acrp, &crp_ret_q, crprethead, crp_next) {
137 if (acrp == crp) {
138 TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
139 crp->crp_flags &= (~CRYPTO_F_ONRETQ);
140 return 1;
141 }
142 }
143 return 0;
144 }
145
146 int
147 crypto_ret_kq_remove(struct cryptkop *krp)
148 {
149 struct cryptkop * akrp;
150
151 TAILQ_FOREACH_REVERSE(akrp, &crp_ret_kq, krprethead, krp_next) {
152 if (akrp == krp) {
153 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
154 krp->krp_flags &= (~CRYPTO_F_ONRETQ);
155 return 1;
156 }
157 }
158 return 0;
159 }
160
161 /*
162 * Crypto op and desciptor data structures are allocated
163 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
164 */
165 struct pool cryptop_pool;
166 struct pool cryptodesc_pool;
167 struct pool cryptkop_pool;
168
169 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
170 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
171 /*
172 * cryptodevallowsoft is (intended to be) sysctl'able, controlling
173 * access to hardware versus software transforms as below:
174 *
175 * crypto_devallowsoft < 0: Force userlevel requests to use software
176 * transforms, always
177 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel
178 * requests for non-accelerated transforms
179 * (handling the latter in software)
180 * crypto_devallowsoft > 0: Allow user requests only for transforms which
181 * are hardware-accelerated.
182 */
183 int crypto_devallowsoft = 1; /* only use hardware crypto */
184
185 SYSCTL_SETUP(sysctl_opencrypto_setup, "sysctl opencrypto subtree setup")
186 {
187 sysctl_createv(clog, 0, NULL, NULL,
188 CTLFLAG_PERMANENT,
189 CTLTYPE_NODE, "kern", NULL,
190 NULL, 0, NULL, 0,
191 CTL_KERN, CTL_EOL);
192 sysctl_createv(clog, 0, NULL, NULL,
193 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
194 CTLTYPE_INT, "usercrypto",
195 SYSCTL_DESCR("Enable/disable user-mode access to "
196 "crypto support"),
197 NULL, 0, &crypto_usercrypto, 0,
198 CTL_KERN, CTL_CREATE, CTL_EOL);
199 sysctl_createv(clog, 0, NULL, NULL,
200 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
201 CTLTYPE_INT, "userasymcrypto",
202 SYSCTL_DESCR("Enable/disable user-mode access to "
203 "asymmetric crypto support"),
204 NULL, 0, &crypto_userasymcrypto, 0,
205 CTL_KERN, CTL_CREATE, CTL_EOL);
206 sysctl_createv(clog, 0, NULL, NULL,
207 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
208 CTLTYPE_INT, "cryptodevallowsoft",
209 SYSCTL_DESCR("Enable/disable use of software "
210 "asymmetric crypto support"),
211 NULL, 0, &crypto_devallowsoft, 0,
212 CTL_KERN, CTL_CREATE, CTL_EOL);
213 }
214
215 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
216
217 /*
218 * Synchronization: read carefully, this is non-trivial.
219 *
220 * Crypto requests are submitted via crypto_dispatch. Typically
221 * these come in from network protocols at spl0 (output path) or
222 * spl[,soft]net (input path).
223 *
224 * Requests are typically passed on the driver directly, but they
225 * may also be queued for processing by a software interrupt thread,
226 * cryptointr, that runs at splsoftcrypto. This thread dispatches
227 * the requests to crypto drivers (h/w or s/w) who call crypto_done
228 * when a request is complete. Hardware crypto drivers are assumed
229 * to register their IRQ's as network devices so their interrupt handlers
230 * and subsequent "done callbacks" happen at spl[imp,net].
231 *
232 * Completed crypto ops are queued for a separate kernel thread that
233 * handles the callbacks at spl0. This decoupling insures the crypto
234 * driver interrupt service routine is not delayed while the callback
235 * takes place and that callbacks are delivered after a context switch
236 * (as opposed to a software interrupt that clients must block).
237 *
238 * This scheme is not intended for SMP machines.
239 */
240 static void cryptointr(void); /* swi thread to dispatch ops */
241 static void cryptoret(void); /* kernel thread for callbacks*/
242 static struct lwp *cryptothread;
243 static void crypto_destroy(void);
244 static int crypto_invoke(struct cryptop *crp, int hint);
245 static int crypto_kinvoke(struct cryptkop *krp, int hint);
246
247 static struct cryptostats cryptostats;
248 #ifdef CRYPTO_TIMING
249 static int crypto_timing = 0;
250 #endif
251
252 static int
253 crypto_init0(void)
254 {
255 int error;
256
257 mutex_init(&crypto_mtx, MUTEX_DEFAULT, IPL_NET);
258 cv_init(&cryptoret_cv, "crypto_wait");
259 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
260 0, "cryptop", NULL, IPL_NET);
261 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
262 0, "cryptodesc", NULL, IPL_NET);
263 pool_init(&cryptkop_pool, sizeof(struct cryptkop), 0, 0,
264 0, "cryptkop", NULL, IPL_NET);
265
266 crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL *
267 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
268 if (crypto_drivers == NULL) {
269 printf("crypto_init: cannot malloc driver table\n");
270 return 0;
271 }
272 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
273
274 softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
275 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
276 (void (*)(void*))cryptoret, NULL, &cryptothread, "cryptoret");
277 if (error) {
278 printf("crypto_init: cannot start cryptoret thread; error %d",
279 error);
280 crypto_destroy();
281 }
282
283 return 0;
284 }
285
286 void
287 crypto_init(void)
288 {
289 static ONCE_DECL(crypto_init_once);
290
291 RUN_ONCE(&crypto_init_once, crypto_init0);
292 }
293
294 static void
295 crypto_destroy(void)
296 {
297 /* XXX no wait to reclaim zones */
298 if (crypto_drivers != NULL)
299 free(crypto_drivers, M_CRYPTO_DATA);
300 unregister_swi(SWI_CRYPTO, cryptointr);
301 }
302
303 /*
304 * Create a new session. Must be called with crypto_mtx held.
305 */
306 int
307 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
308 {
309 struct cryptoini *cr;
310 u_int32_t hid, lid;
311 int err = EINVAL;
312
313 KASSERT(mutex_owned(&crypto_mtx));
314
315 if (crypto_drivers == NULL)
316 goto done;
317
318 /*
319 * The algorithm we use here is pretty stupid; just use the
320 * first driver that supports all the algorithms we need.
321 *
322 * XXX We need more smarts here (in real life too, but that's
323 * XXX another story altogether).
324 */
325
326 for (hid = 0; hid < crypto_drivers_num; hid++) {
327 /*
328 * If it's not initialized or has remaining sessions
329 * referencing it, skip.
330 */
331 if (crypto_drivers[hid].cc_newsession == NULL ||
332 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
333 continue;
334
335 /* Hardware required -- ignore software drivers. */
336 if (hard > 0 &&
337 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
338 continue;
339 /* Software required -- ignore hardware drivers. */
340 if (hard < 0 &&
341 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
342 continue;
343
344 /* See if all the algorithms are supported. */
345 for (cr = cri; cr; cr = cr->cri_next)
346 if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
347 break;
348
349 if (cr == NULL) {
350 /* Ok, all algorithms are supported. */
351
352 /*
353 * Can't do everything in one session.
354 *
355 * XXX Fix this. We need to inject a "virtual" session layer right
356 * XXX about here.
357 */
358
359 /* Call the driver initialization routine. */
360 lid = hid; /* Pass the driver ID. */
361 err = crypto_drivers[hid].cc_newsession(
362 crypto_drivers[hid].cc_arg, &lid, cri);
363 if (err == 0) {
364 (*sid) = hid;
365 (*sid) <<= 32;
366 (*sid) |= (lid & 0xffffffff);
367 crypto_drivers[hid].cc_sessions++;
368 }
369 goto done;
370 /*break;*/
371 }
372 }
373 done:
374 return err;
375 }
376
377 /*
378 * Delete an existing session (or a reserved session on an unregistered
379 * driver). Must be called with crypto_mtx mutex held.
380 */
381 int
382 crypto_freesession(u_int64_t sid)
383 {
384 u_int32_t hid;
385 int err = 0;
386
387 KASSERT(mutex_owned(&crypto_mtx));
388
389 if (crypto_drivers == NULL) {
390 err = EINVAL;
391 goto done;
392 }
393
394 /* Determine two IDs. */
395 hid = SESID2HID(sid);
396
397 if (hid >= crypto_drivers_num) {
398 err = ENOENT;
399 goto done;
400 }
401
402 if (crypto_drivers[hid].cc_sessions)
403 crypto_drivers[hid].cc_sessions--;
404
405 /* Call the driver cleanup routine, if available. */
406 if (crypto_drivers[hid].cc_freesession) {
407 err = crypto_drivers[hid].cc_freesession(
408 crypto_drivers[hid].cc_arg, sid);
409 }
410 else
411 err = 0;
412
413 /*
414 * If this was the last session of a driver marked as invalid,
415 * make the entry available for reuse.
416 */
417 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
418 crypto_drivers[hid].cc_sessions == 0)
419 bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
420
421 done:
422 return err;
423 }
424
425 /*
426 * Return an unused driver id. Used by drivers prior to registering
427 * support for the algorithms they handle.
428 */
429 int32_t
430 crypto_get_driverid(u_int32_t flags)
431 {
432 struct cryptocap *newdrv;
433 int i;
434
435 crypto_init(); /* XXX oh, this is foul! */
436
437 mutex_spin_enter(&crypto_mtx);
438 for (i = 0; i < crypto_drivers_num; i++)
439 if (crypto_drivers[i].cc_process == NULL &&
440 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
441 crypto_drivers[i].cc_sessions == 0)
442 break;
443
444 /* Out of entries, allocate some more. */
445 if (i == crypto_drivers_num) {
446 /* Be careful about wrap-around. */
447 if (2 * crypto_drivers_num <= crypto_drivers_num) {
448 mutex_spin_exit(&crypto_mtx);
449 printf("crypto: driver count wraparound!\n");
450 return -1;
451 }
452
453 newdrv = malloc(2 * crypto_drivers_num *
454 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
455 if (newdrv == NULL) {
456 mutex_spin_exit(&crypto_mtx);
457 printf("crypto: no space to expand driver table!\n");
458 return -1;
459 }
460
461 bcopy(crypto_drivers, newdrv,
462 crypto_drivers_num * sizeof(struct cryptocap));
463
464 crypto_drivers_num *= 2;
465
466 free(crypto_drivers, M_CRYPTO_DATA);
467 crypto_drivers = newdrv;
468 }
469
470 /* NB: state is zero'd on free */
471 crypto_drivers[i].cc_sessions = 1; /* Mark */
472 crypto_drivers[i].cc_flags = flags;
473
474 if (bootverbose)
475 printf("crypto: assign driver %u, flags %u\n", i, flags);
476
477 mutex_spin_exit(&crypto_mtx);
478
479 return i;
480 }
481
482 static struct cryptocap *
483 crypto_checkdriver(u_int32_t hid)
484 {
485 if (crypto_drivers == NULL)
486 return NULL;
487 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
488 }
489
490 /*
491 * Register support for a key-related algorithm. This routine
492 * is called once for each algorithm supported a driver.
493 */
494 int
495 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
496 int (*kprocess)(void*, struct cryptkop *, int),
497 void *karg)
498 {
499 struct cryptocap *cap;
500 int err;
501
502 mutex_spin_enter(&crypto_mtx);
503
504 cap = crypto_checkdriver(driverid);
505 if (cap != NULL &&
506 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
507 /*
508 * XXX Do some performance testing to determine placing.
509 * XXX We probably need an auxiliary data structure that
510 * XXX describes relative performances.
511 */
512
513 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
514 if (bootverbose) {
515 printf("crypto: driver %u registers key alg %u "
516 " flags %u\n",
517 driverid,
518 kalg,
519 flags
520 );
521 }
522
523 if (cap->cc_kprocess == NULL) {
524 cap->cc_karg = karg;
525 cap->cc_kprocess = kprocess;
526 }
527 err = 0;
528 } else
529 err = EINVAL;
530
531 mutex_spin_exit(&crypto_mtx);
532 return err;
533 }
534
535 /*
536 * Register support for a non-key-related algorithm. This routine
537 * is called once for each such algorithm supported by a driver.
538 */
539 int
540 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
541 u_int32_t flags,
542 int (*newses)(void*, u_int32_t*, struct cryptoini*),
543 int (*freeses)(void*, u_int64_t),
544 int (*process)(void*, struct cryptop *, int),
545 void *arg)
546 {
547 struct cryptocap *cap;
548 int err;
549
550 mutex_spin_enter(&crypto_mtx);
551
552 cap = crypto_checkdriver(driverid);
553 /* NB: algorithms are in the range [1..max] */
554 if (cap != NULL &&
555 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
556 /*
557 * XXX Do some performance testing to determine placing.
558 * XXX We probably need an auxiliary data structure that
559 * XXX describes relative performances.
560 */
561
562 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
563 cap->cc_max_op_len[alg] = maxoplen;
564 if (bootverbose) {
565 printf("crypto: driver %u registers alg %u "
566 "flags %u maxoplen %u\n",
567 driverid,
568 alg,
569 flags,
570 maxoplen
571 );
572 }
573
574 if (cap->cc_process == NULL) {
575 cap->cc_arg = arg;
576 cap->cc_newsession = newses;
577 cap->cc_process = process;
578 cap->cc_freesession = freeses;
579 cap->cc_sessions = 0; /* Unmark */
580 }
581 err = 0;
582 } else
583 err = EINVAL;
584
585 mutex_spin_exit(&crypto_mtx);
586 return err;
587 }
588
589 /*
590 * Unregister a crypto driver. If there are pending sessions using it,
591 * leave enough information around so that subsequent calls using those
592 * sessions will correctly detect the driver has been unregistered and
593 * reroute requests.
594 */
595 int
596 crypto_unregister(u_int32_t driverid, int alg)
597 {
598 int i, err;
599 u_int32_t ses;
600 struct cryptocap *cap;
601
602 mutex_spin_enter(&crypto_mtx);
603
604 cap = crypto_checkdriver(driverid);
605 if (cap != NULL &&
606 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
607 cap->cc_alg[alg] != 0) {
608 cap->cc_alg[alg] = 0;
609 cap->cc_max_op_len[alg] = 0;
610
611 /* Was this the last algorithm ? */
612 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
613 if (cap->cc_alg[i] != 0)
614 break;
615
616 if (i == CRYPTO_ALGORITHM_MAX + 1) {
617 ses = cap->cc_sessions;
618 bzero(cap, sizeof(struct cryptocap));
619 if (ses != 0) {
620 /*
621 * If there are pending sessions, just mark as invalid.
622 */
623 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
624 cap->cc_sessions = ses;
625 }
626 }
627 err = 0;
628 } else
629 err = EINVAL;
630
631 mutex_spin_exit(&crypto_mtx);
632 return err;
633 }
634
635 /*
636 * Unregister all algorithms associated with a crypto driver.
637 * If there are pending sessions using it, leave enough information
638 * around so that subsequent calls using those sessions will
639 * correctly detect the driver has been unregistered and reroute
640 * requests.
641 *
642 * XXX careful. Don't change this to call crypto_unregister() for each
643 * XXX registered algorithm unless you drop the mutex across the calls;
644 * XXX you can't take it recursively.
645 */
646 int
647 crypto_unregister_all(u_int32_t driverid)
648 {
649 int i, err;
650 u_int32_t ses;
651 struct cryptocap *cap;
652
653 mutex_spin_enter(&crypto_mtx);
654 cap = crypto_checkdriver(driverid);
655 if (cap != NULL) {
656 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
657 cap->cc_alg[i] = 0;
658 cap->cc_max_op_len[i] = 0;
659 }
660 ses = cap->cc_sessions;
661 bzero(cap, sizeof(struct cryptocap));
662 if (ses != 0) {
663 /*
664 * If there are pending sessions, just mark as invalid.
665 */
666 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
667 cap->cc_sessions = ses;
668 }
669 err = 0;
670 } else
671 err = EINVAL;
672
673 mutex_spin_exit(&crypto_mtx);
674 return err;
675 }
676
677 /*
678 * Clear blockage on a driver. The what parameter indicates whether
679 * the driver is now ready for cryptop's and/or cryptokop's.
680 */
681 int
682 crypto_unblock(u_int32_t driverid, int what)
683 {
684 struct cryptocap *cap;
685 int needwakeup, err;
686
687 mutex_spin_enter(&crypto_mtx);
688 cap = crypto_checkdriver(driverid);
689 if (cap != NULL) {
690 needwakeup = 0;
691 if (what & CRYPTO_SYMQ) {
692 needwakeup |= cap->cc_qblocked;
693 cap->cc_qblocked = 0;
694 }
695 if (what & CRYPTO_ASYMQ) {
696 needwakeup |= cap->cc_kqblocked;
697 cap->cc_kqblocked = 0;
698 }
699 err = 0;
700 mutex_spin_exit(&crypto_mtx);
701 if (needwakeup)
702 setsoftcrypto(softintr_cookie);
703 } else {
704 err = EINVAL;
705 mutex_spin_exit(&crypto_mtx);
706 }
707
708 return err;
709 }
710
711 /*
712 * Dispatch a crypto request to a driver or queue
713 * it, to be processed by the kernel thread.
714 */
715 int
716 crypto_dispatch(struct cryptop *crp)
717 {
718 u_int32_t hid = SESID2HID(crp->crp_sid);
719 int result;
720
721 mutex_spin_enter(&crypto_mtx);
722
723 cryptostats.cs_ops++;
724
725 #ifdef CRYPTO_TIMING
726 if (crypto_timing)
727 nanouptime(&crp->crp_tstamp);
728 #endif
729 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
730 struct cryptocap *cap;
731 /*
732 * Caller marked the request to be processed
733 * immediately; dispatch it directly to the
734 * driver unless the driver is currently blocked.
735 */
736 cap = crypto_checkdriver(hid);
737 if (cap && !cap->cc_qblocked) {
738 mutex_spin_exit(&crypto_mtx);
739 result = crypto_invoke(crp, 0);
740 if (result == ERESTART) {
741 /*
742 * The driver ran out of resources, mark the
743 * driver ``blocked'' for cryptop's and put
744 * the op on the queue.
745 */
746 mutex_spin_enter(&crypto_mtx);
747 crypto_drivers[hid].cc_qblocked = 1;
748 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
749 cryptostats.cs_blocks++;
750 mutex_spin_exit(&crypto_mtx);
751 }
752 goto out_released;
753 } else {
754 /*
755 * The driver is blocked, just queue the op until
756 * it unblocks and the swi thread gets kicked.
757 */
758 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
759 result = 0;
760 }
761 } else {
762 int wasempty = TAILQ_EMPTY(&crp_q);
763 /*
764 * Caller marked the request as ``ok to delay'';
765 * queue it for the swi thread. This is desirable
766 * when the operation is low priority and/or suitable
767 * for batching.
768 */
769 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
770 if (wasempty) {
771 mutex_spin_exit(&crypto_mtx);
772 setsoftcrypto(softintr_cookie);
773 result = 0;
774 goto out_released;
775 }
776
777 result = 0;
778 }
779
780 mutex_spin_exit(&crypto_mtx);
781 out_released:
782 return result;
783 }
784
785 /*
786 * Add an asymetric crypto request to a queue,
787 * to be processed by the kernel thread.
788 */
789 int
790 crypto_kdispatch(struct cryptkop *krp)
791 {
792 struct cryptocap *cap;
793 int result;
794
795 mutex_spin_enter(&crypto_mtx);
796 cryptostats.cs_kops++;
797
798 cap = crypto_checkdriver(krp->krp_hid);
799 if (cap && !cap->cc_kqblocked) {
800 mutex_spin_exit(&crypto_mtx);
801 result = crypto_kinvoke(krp, 0);
802 if (result == ERESTART) {
803 /*
804 * The driver ran out of resources, mark the
805 * driver ``blocked'' for cryptop's and put
806 * the op on the queue.
807 */
808 mutex_spin_enter(&crypto_mtx);
809 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
810 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
811 cryptostats.cs_kblocks++;
812 mutex_spin_exit(&crypto_mtx);
813 }
814 } else {
815 /*
816 * The driver is blocked, just queue the op until
817 * it unblocks and the swi thread gets kicked.
818 */
819 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
820 result = 0;
821 mutex_spin_exit(&crypto_mtx);
822 }
823
824 return result;
825 }
826
827 /*
828 * Dispatch an assymetric crypto request to the appropriate crypto devices.
829 */
830 static int
831 crypto_kinvoke(struct cryptkop *krp, int hint)
832 {
833 u_int32_t hid;
834 int error;
835
836 /* Sanity checks. */
837 if (krp == NULL)
838 return EINVAL;
839 if (krp->krp_callback == NULL) {
840 pool_put(&cryptkop_pool, krp);
841 return EINVAL;
842 }
843
844 for (hid = 0; hid < crypto_drivers_num; hid++) {
845 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
846 crypto_devallowsoft == 0)
847 continue;
848 if (crypto_drivers[hid].cc_kprocess == NULL)
849 continue;
850 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
851 CRYPTO_ALG_FLAG_SUPPORTED) == 0)
852 continue;
853 break;
854 }
855 if (hid < crypto_drivers_num) {
856 krp->krp_hid = hid;
857 error = crypto_drivers[hid].cc_kprocess(
858 crypto_drivers[hid].cc_karg, krp, hint);
859 } else {
860 error = ENODEV;
861 }
862
863 if (error) {
864 krp->krp_status = error;
865 crypto_kdone(krp);
866 }
867 return 0;
868 }
869
870 #ifdef CRYPTO_TIMING
871 static void
872 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
873 {
874 struct timespec now, t;
875
876 nanouptime(&now);
877 t.tv_sec = now.tv_sec - tv->tv_sec;
878 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
879 if (t.tv_nsec < 0) {
880 t.tv_sec--;
881 t.tv_nsec += 1000000000;
882 }
883 timespecadd(&ts->acc, &t, &t);
884 if (timespeccmp(&t, &ts->min, <))
885 ts->min = t;
886 if (timespeccmp(&t, &ts->max, >))
887 ts->max = t;
888 ts->count++;
889
890 *tv = now;
891 }
892 #endif
893
894 /*
895 * Dispatch a crypto request to the appropriate crypto devices.
896 */
897 static int
898 crypto_invoke(struct cryptop *crp, int hint)
899 {
900 u_int32_t hid;
901 int (*process)(void*, struct cryptop *, int);
902
903 #ifdef CRYPTO_TIMING
904 if (crypto_timing)
905 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
906 #endif
907 /* Sanity checks. */
908 if (crp == NULL)
909 return EINVAL;
910 if (crp->crp_callback == NULL) {
911 crypto_freereq(crp);
912 return EINVAL;
913 }
914 if (crp->crp_desc == NULL) {
915 crp->crp_etype = EINVAL;
916 crypto_done(crp);
917 return 0;
918 }
919
920 hid = SESID2HID(crp->crp_sid);
921 if (hid < crypto_drivers_num) {
922 mutex_enter(&crypto_mtx);
923 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
924 crypto_freesession(crp->crp_sid);
925 process = crypto_drivers[hid].cc_process;
926 mutex_exit(&crypto_mtx);
927 } else {
928 process = NULL;
929 }
930
931 if (process == NULL) {
932 struct cryptodesc *crd;
933 u_int64_t nid = 0;
934
935 /*
936 * Driver has unregistered; migrate the session and return
937 * an error to the caller so they'll resubmit the op.
938 */
939 mutex_enter(&crypto_mtx);
940 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
941 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
942
943 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
944 crp->crp_sid = nid;
945
946 crp->crp_etype = EAGAIN;
947 mutex_exit(&crypto_mtx);
948
949 crypto_done(crp);
950 return 0;
951 } else {
952 /*
953 * Invoke the driver to process the request.
954 */
955 DPRINTF(("calling process for %08x\n", (uint32_t)crp));
956 return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
957 }
958 }
959
960 /*
961 * Release a set of crypto descriptors.
962 */
963 void
964 crypto_freereq(struct cryptop *crp)
965 {
966 struct cryptodesc *crd;
967
968 if (crp == NULL)
969 return;
970
971 while ((crd = crp->crp_desc) != NULL) {
972 crp->crp_desc = crd->crd_next;
973 pool_put(&cryptodesc_pool, crd);
974 }
975 pool_put(&cryptop_pool, crp);
976 }
977
978 /*
979 * Acquire a set of crypto descriptors.
980 */
981 struct cryptop *
982 crypto_getreq(int num)
983 {
984 struct cryptodesc *crd;
985 struct cryptop *crp;
986
987 crp = pool_get(&cryptop_pool, 0);
988 if (crp == NULL) {
989 return NULL;
990 }
991 bzero(crp, sizeof(struct cryptop));
992 cv_init(&crp->crp_cv, "crydev");
993
994 while (num--) {
995 crd = pool_get(&cryptodesc_pool, 0);
996 if (crd == NULL) {
997 crypto_freereq(crp);
998 return NULL;
999 }
1000
1001 bzero(crd, sizeof(struct cryptodesc));
1002 crd->crd_next = crp->crp_desc;
1003 crp->crp_desc = crd;
1004 }
1005
1006 return crp;
1007 }
1008
1009 /*
1010 * Invoke the callback on behalf of the driver.
1011 */
1012 void
1013 crypto_done(struct cryptop *crp)
1014 {
1015 int wasempty;
1016
1017 if (crp->crp_etype != 0)
1018 cryptostats.cs_errs++;
1019 #ifdef CRYPTO_TIMING
1020 if (crypto_timing)
1021 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1022 #endif
1023
1024 crp->crp_flags |= CRYPTO_F_DONE;
1025
1026 /*
1027 * Normal case; queue the callback for the thread.
1028 *
1029 * The return queue is manipulated by the swi thread
1030 * and, potentially, by crypto device drivers calling
1031 * back to mark operations completed. Thus we need
1032 * to mask both while manipulating the return queue.
1033 */
1034 if (crp->crp_flags & CRYPTO_F_CBIMM) {
1035 /*
1036 * Do the callback directly. This is ok when the
1037 * callback routine does very little (e.g. the
1038 * /dev/crypto callback method just does a wakeup).
1039 */
1040 #ifdef CRYPTO_TIMING
1041 if (crypto_timing) {
1042 /*
1043 * NB: We must copy the timestamp before
1044 * doing the callback as the cryptop is
1045 * likely to be reclaimed.
1046 */
1047 struct timespec t = crp->crp_tstamp;
1048 crypto_tstat(&cryptostats.cs_cb, &t);
1049 crp->crp_callback(crp);
1050 crypto_tstat(&cryptostats.cs_finis, &t);
1051 } else
1052 #endif
1053 crp->crp_callback(crp);
1054 } else {
1055 mutex_spin_enter(&crypto_mtx);
1056 wasempty = TAILQ_EMPTY(&crp_ret_q);
1057 DPRINTF(("crypto_done: queueing %08x\n", (uint32_t)crp));
1058 crp->crp_flags |= CRYPTO_F_ONRETQ;
1059 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1060 if (wasempty) {
1061 DPRINTF(("crypto_done: waking cryptoret, %08x " \
1062 "hit empty queue\n.", (uint32_t)crp));
1063 cv_signal(&cryptoret_cv);
1064 }
1065 mutex_spin_exit(&crypto_mtx);
1066 }
1067 }
1068
1069 /*
1070 * Invoke the callback on behalf of the driver.
1071 */
1072 void
1073 crypto_kdone(struct cryptkop *krp)
1074 {
1075 int wasempty;
1076
1077 if (krp->krp_status != 0)
1078 cryptostats.cs_kerrs++;
1079
1080 krp->krp_flags |= CRYPTO_F_DONE;
1081
1082 /*
1083 * The return queue is manipulated by the swi thread
1084 * and, potentially, by crypto device drivers calling
1085 * back to mark operations completed. Thus we need
1086 * to mask both while manipulating the return queue.
1087 */
1088 if (krp->krp_flags & CRYPTO_F_CBIMM) {
1089 krp->krp_callback(krp);
1090 } else {
1091 mutex_spin_enter(&crypto_mtx);
1092 wasempty = TAILQ_EMPTY(&crp_ret_kq);
1093 krp->krp_flags |= CRYPTO_F_ONRETQ;
1094 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1095 if (wasempty)
1096 cv_signal(&cryptoret_cv);
1097 mutex_spin_exit(&crypto_mtx);
1098 }
1099 }
1100
1101 int
1102 crypto_getfeat(int *featp)
1103 {
1104 int hid, kalg, feat = 0;
1105
1106 mutex_spin_enter(&crypto_mtx);
1107
1108 if (crypto_userasymcrypto == 0)
1109 goto out;
1110
1111 for (hid = 0; hid < crypto_drivers_num; hid++) {
1112 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1113 crypto_devallowsoft == 0) {
1114 continue;
1115 }
1116 if (crypto_drivers[hid].cc_kprocess == NULL)
1117 continue;
1118 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1119 if ((crypto_drivers[hid].cc_kalg[kalg] &
1120 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1121 feat |= 1 << kalg;
1122 }
1123 out:
1124 mutex_spin_exit(&crypto_mtx);
1125 *featp = feat;
1126 return (0);
1127 }
1128
1129 /*
1130 * Software interrupt thread to dispatch crypto requests.
1131 */
1132 static void
1133 cryptointr(void)
1134 {
1135 struct cryptop *crp, *submit;
1136 struct cryptkop *krp;
1137 struct cryptocap *cap;
1138 int result, hint;
1139
1140 printf("crypto softint\n");
1141 cryptostats.cs_intrs++;
1142 mutex_spin_enter(&crypto_mtx);
1143 do {
1144 /*
1145 * Find the first element in the queue that can be
1146 * processed and look-ahead to see if multiple ops
1147 * are ready for the same driver.
1148 */
1149 submit = NULL;
1150 hint = 0;
1151 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1152 u_int32_t hid = SESID2HID(crp->crp_sid);
1153 cap = crypto_checkdriver(hid);
1154 if (cap == NULL || cap->cc_process == NULL) {
1155 /* Op needs to be migrated, process it. */
1156 if (submit == NULL)
1157 submit = crp;
1158 break;
1159 }
1160 if (!cap->cc_qblocked) {
1161 if (submit != NULL) {
1162 /*
1163 * We stop on finding another op,
1164 * regardless whether its for the same
1165 * driver or not. We could keep
1166 * searching the queue but it might be
1167 * better to just use a per-driver
1168 * queue instead.
1169 */
1170 if (SESID2HID(submit->crp_sid) == hid)
1171 hint = CRYPTO_HINT_MORE;
1172 break;
1173 } else {
1174 submit = crp;
1175 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1176 break;
1177 /* keep scanning for more are q'd */
1178 }
1179 }
1180 }
1181 if (submit != NULL) {
1182 TAILQ_REMOVE(&crp_q, submit, crp_next);
1183 mutex_spin_exit(&crypto_mtx);
1184 result = crypto_invoke(submit, hint);
1185 /* we must take here as the TAILQ op or kinvoke
1186 may need this mutex below. sigh. */
1187 mutex_spin_enter(&crypto_mtx);
1188 if (result == ERESTART) {
1189 /*
1190 * The driver ran out of resources, mark the
1191 * driver ``blocked'' for cryptop's and put
1192 * the request back in the queue. It would
1193 * best to put the request back where we got
1194 * it but that's hard so for now we put it
1195 * at the front. This should be ok; putting
1196 * it at the end does not work.
1197 */
1198 /* XXX validate sid again? */
1199 crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1200 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1201 cryptostats.cs_blocks++;
1202 }
1203 }
1204
1205 /* As above, but for key ops */
1206 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1207 cap = crypto_checkdriver(krp->krp_hid);
1208 if (cap == NULL || cap->cc_kprocess == NULL) {
1209 /* Op needs to be migrated, process it. */
1210 break;
1211 }
1212 if (!cap->cc_kqblocked)
1213 break;
1214 }
1215 if (krp != NULL) {
1216 TAILQ_REMOVE(&crp_kq, krp, krp_next);
1217 mutex_spin_exit(&crypto_mtx);
1218 result = crypto_kinvoke(krp, 0);
1219 /* the next iteration will want the mutex. :-/ */
1220 mutex_spin_enter(&crypto_mtx);
1221 if (result == ERESTART) {
1222 /*
1223 * The driver ran out of resources, mark the
1224 * driver ``blocked'' for cryptkop's and put
1225 * the request back in the queue. It would
1226 * best to put the request back where we got
1227 * it but that's hard so for now we put it
1228 * at the front. This should be ok; putting
1229 * it at the end does not work.
1230 */
1231 /* XXX validate sid again? */
1232 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1233 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1234 cryptostats.cs_kblocks++;
1235 }
1236 }
1237 } while (submit != NULL || krp != NULL);
1238 mutex_spin_exit(&crypto_mtx);
1239 }
1240
1241 /*
1242 * Kernel thread to do callbacks.
1243 */
1244 static void
1245 cryptoret(void)
1246 {
1247 struct cryptop *crp;
1248 struct cryptkop *krp;
1249
1250 mutex_spin_enter(&crypto_mtx);
1251 for (;;) {
1252 crp = TAILQ_FIRST(&crp_ret_q);
1253 if (crp != NULL) {
1254 TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1255 crp->crp_flags &= ~CRYPTO_F_ONRETQ;
1256 }
1257 krp = TAILQ_FIRST(&crp_ret_kq);
1258 if (krp != NULL) {
1259 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1260 krp->krp_flags &= ~CRYPTO_F_ONRETQ;
1261 }
1262
1263 /* drop before calling any callbacks. */
1264 if (crp == NULL && krp == NULL) {
1265 cryptostats.cs_rets++;
1266 cv_wait(&cryptoret_cv, &crypto_mtx);
1267 continue;
1268 }
1269
1270 mutex_spin_exit(&crypto_mtx);
1271
1272 if (crp != NULL) {
1273 #ifdef CRYPTO_TIMING
1274 if (crypto_timing) {
1275 /*
1276 * NB: We must copy the timestamp before
1277 * doing the callback as the cryptop is
1278 * likely to be reclaimed.
1279 */
1280 struct timespec t = crp->crp_tstamp;
1281 crypto_tstat(&cryptostats.cs_cb, &t);
1282 crp->crp_callback(crp);
1283 crypto_tstat(&cryptostats.cs_finis, &t);
1284 } else
1285 #endif
1286 {
1287 crp->crp_callback(crp);
1288 }
1289 }
1290 if (krp != NULL)
1291 krp->krp_callback(krp);
1292
1293 mutex_spin_enter(&crypto_mtx);
1294 }
1295 }
1296