crypto.c revision 1.74 1 /* $NetBSD: crypto.c,v 1.74 2017/05/24 09:57:36 knakahara Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4
5 /*-
6 * Copyright (c) 2008 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Coyote Point Systems, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
36 *
37 * This code was written by Angelos D. Keromytis in Athens, Greece, in
38 * February 2000. Network Security Technologies Inc. (NSTI) kindly
39 * supported the development of this code.
40 *
41 * Copyright (c) 2000, 2001 Angelos D. Keromytis
42 *
43 * Permission to use, copy, and modify this software with or without fee
44 * is hereby granted, provided that this entire notice is included in
45 * all source code copies of any software which is or includes a copy or
46 * modification of this software.
47 *
48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
52 * PURPOSE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.74 2017/05/24 09:57:36 knakahara Exp $");
57
58 #include <sys/param.h>
59 #include <sys/reboot.h>
60 #include <sys/systm.h>
61 #include <sys/malloc.h>
62 #include <sys/proc.h>
63 #include <sys/pool.h>
64 #include <sys/kthread.h>
65 #include <sys/once.h>
66 #include <sys/sysctl.h>
67 #include <sys/intr.h>
68 #include <sys/errno.h>
69 #include <sys/module.h>
70
71 #if defined(_KERNEL_OPT)
72 #include "opt_ocf.h"
73 #endif
74
75 #include <opencrypto/cryptodev.h>
76 #include <opencrypto/xform.h> /* XXX for M_XDATA */
77
78 static kmutex_t crypto_q_mtx;
79 static kmutex_t crypto_ret_q_mtx;
80 static kcondvar_t cryptoret_cv;
81
82 /* below are kludges for residual code wrtitten to FreeBSD interfaces */
83 #define SWI_CRYPTO 17
84 #define register_swi(lvl, fn) \
85 softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, (void (*)(void *))fn, NULL)
86 #define unregister_swi(lvl, fn) softint_disestablish(softintr_cookie)
87 #define setsoftcrypto(x) \
88 do{ \
89 kpreempt_disable(); \
90 softint_schedule(x); \
91 kpreempt_enable(); \
92 }while(0)
93
94 int crypto_ret_q_check(struct cryptop *);
95
96 /*
97 * Crypto drivers register themselves by allocating a slot in the
98 * crypto_drivers table with crypto_get_driverid() and then registering
99 * each algorithm they support with crypto_register() and crypto_kregister().
100 */
101 static kmutex_t crypto_drv_mtx;
102 static struct cryptocap *crypto_drivers;
103 static int crypto_drivers_num;
104 static void *softintr_cookie;
105 static int crypto_exit_flag;
106
107 /*
108 * There are two queues for crypto requests; one for symmetric (e.g.
109 * cipher) operations and one for asymmetric (e.g. MOD) operations.
110 * See below for how synchronization is handled.
111 */
112 static TAILQ_HEAD(,cryptop) crp_q = /* request queues */
113 TAILQ_HEAD_INITIALIZER(crp_q);
114 static TAILQ_HEAD(,cryptkop) crp_kq =
115 TAILQ_HEAD_INITIALIZER(crp_kq);
116
117 /*
118 * There are two queues for processing completed crypto requests; one
119 * for the symmetric and one for the asymmetric ops. We only need one
120 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
121 * for how synchronization is handled.
122 */
123 static TAILQ_HEAD(crprethead, cryptop) crp_ret_q = /* callback queues */
124 TAILQ_HEAD_INITIALIZER(crp_ret_q);
125 static TAILQ_HEAD(krprethead, cryptkop) crp_ret_kq =
126 TAILQ_HEAD_INITIALIZER(crp_ret_kq);
127
128 #define DEFINIT_CRYPTO_Q_LEN(name) \
129 static int crypto_##name##_len = 0
130
131 #define DEFINIT_CRYPTO_Q_DROPS(name) \
132 static int crypto_##name##_drops = 0
133
134 #define CRYPTO_Q_MAXLEN 0
135 #define DEFINIT_CRYPTO_Q_MAXLEN(name) \
136 static int crypto_##name##_maxlen = CRYPTO_Q_MAXLEN
137
138 #define CRYPTO_Q_INC(name) \
139 do { \
140 crypto_##name##_len++; \
141 } while(0);
142
143 #define CRYPTO_Q_DEC(name) \
144 do { \
145 crypto_##name##_len--; \
146 } while(0);
147
148 #define CRYPTO_Q_INC_DROPS(name) \
149 do { \
150 crypto_##name##_drops++; \
151 } while(0);
152
153 #define CRYPTO_Q_IS_FULL(name) \
154 (crypto_##name##_maxlen > 0 \
155 && (crypto_##name##_len > crypto_##name##_maxlen))
156
157 /*
158 * current queue length.
159 */
160 DEFINIT_CRYPTO_Q_LEN(crp_ret_q);
161 DEFINIT_CRYPTO_Q_LEN(crp_ret_kq);
162
163 /*
164 * queue dropped count.
165 */
166 DEFINIT_CRYPTO_Q_DROPS(crp_ret_q);
167 DEFINIT_CRYPTO_Q_DROPS(crp_ret_kq);
168
169 /*
170 * queue length limit.
171 * default value is 0. <=0 means unlimited.
172 */
173 DEFINIT_CRYPTO_Q_MAXLEN(crp_ret_q);
174 DEFINIT_CRYPTO_Q_MAXLEN(crp_ret_kq);
175
176 /*
177 * TODO:
178 * make percpu
179 */
180 static int
181 sysctl_opencrypto_q_len(SYSCTLFN_ARGS)
182 {
183 int error;
184
185 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
186 if (error || newp == NULL)
187 return error;
188
189 return 0;
190 }
191
192 /*
193 * TODO:
194 * make percpu
195 */
196 static int
197 sysctl_opencrypto_q_drops(SYSCTLFN_ARGS)
198 {
199 int error;
200
201 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
202 if (error || newp == NULL)
203 return error;
204
205 return 0;
206 }
207
208 /*
209 * need to make percpu?
210 */
211 static int
212 sysctl_opencrypto_q_maxlen(SYSCTLFN_ARGS)
213 {
214 int error;
215
216 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
217 if (error || newp == NULL)
218 return error;
219
220 return 0;
221 }
222
223 /*
224 * Crypto op and desciptor data structures are allocated
225 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
226 */
227 struct pool cryptop_pool;
228 struct pool cryptodesc_pool;
229 struct pool cryptkop_pool;
230
231 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
232 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
233 /*
234 * cryptodevallowsoft is (intended to be) sysctl'able, controlling
235 * access to hardware versus software transforms as below:
236 *
237 * crypto_devallowsoft < 0: Force userlevel requests to use software
238 * transforms, always
239 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel
240 * requests for non-accelerated transforms
241 * (handling the latter in software)
242 * crypto_devallowsoft > 0: Allow user requests only for transforms which
243 * are hardware-accelerated.
244 */
245 int crypto_devallowsoft = 1; /* only use hardware crypto */
246
247 static void
248 sysctl_opencrypto_setup(struct sysctllog **clog)
249 {
250 const struct sysctlnode *ocnode;
251 const struct sysctlnode *retqnode, *retkqnode;
252
253 sysctl_createv(clog, 0, NULL, NULL,
254 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
255 CTLTYPE_INT, "usercrypto",
256 SYSCTL_DESCR("Enable/disable user-mode access to "
257 "crypto support"),
258 NULL, 0, &crypto_usercrypto, 0,
259 CTL_KERN, CTL_CREATE, CTL_EOL);
260 sysctl_createv(clog, 0, NULL, NULL,
261 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
262 CTLTYPE_INT, "userasymcrypto",
263 SYSCTL_DESCR("Enable/disable user-mode access to "
264 "asymmetric crypto support"),
265 NULL, 0, &crypto_userasymcrypto, 0,
266 CTL_KERN, CTL_CREATE, CTL_EOL);
267 sysctl_createv(clog, 0, NULL, NULL,
268 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
269 CTLTYPE_INT, "cryptodevallowsoft",
270 SYSCTL_DESCR("Enable/disable use of software "
271 "asymmetric crypto support"),
272 NULL, 0, &crypto_devallowsoft, 0,
273 CTL_KERN, CTL_CREATE, CTL_EOL);
274
275 sysctl_createv(clog, 0, NULL, &ocnode,
276 CTLFLAG_PERMANENT,
277 CTLTYPE_NODE, "opencrypto",
278 SYSCTL_DESCR("opencrypto related entries"),
279 NULL, 0, NULL, 0,
280 CTL_CREATE, CTL_EOL);
281
282 sysctl_createv(clog, 0, &ocnode, &retqnode,
283 CTLFLAG_PERMANENT,
284 CTLTYPE_NODE, "crypto_ret_q",
285 SYSCTL_DESCR("crypto_ret_q related entries"),
286 NULL, 0, NULL, 0,
287 CTL_CREATE, CTL_EOL);
288 sysctl_createv(clog, 0, &retqnode, NULL,
289 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
290 CTLTYPE_INT, "len",
291 SYSCTL_DESCR("Current queue length"),
292 sysctl_opencrypto_q_len, 0,
293 (void *)&crypto_crp_ret_q_len, 0,
294 CTL_CREATE, CTL_EOL);
295 sysctl_createv(clog, 0, &retqnode, NULL,
296 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
297 CTLTYPE_INT, "drops",
298 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
299 sysctl_opencrypto_q_drops, 0,
300 (void *)&crypto_crp_ret_q_drops, 0,
301 CTL_CREATE, CTL_EOL);
302 sysctl_createv(clog, 0, &retqnode, NULL,
303 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
304 CTLTYPE_INT, "maxlen",
305 SYSCTL_DESCR("Maximum allowed queue length"),
306 sysctl_opencrypto_q_maxlen, 0,
307 (void *)&crypto_crp_ret_q_maxlen, 0,
308 CTL_CREATE, CTL_EOL);
309
310 sysctl_createv(clog, 0, &ocnode, &retkqnode,
311 CTLFLAG_PERMANENT,
312 CTLTYPE_NODE, "crypto_ret_kq",
313 SYSCTL_DESCR("crypto_ret_kq related entries"),
314 NULL, 0, NULL, 0,
315 CTL_CREATE, CTL_EOL);
316 sysctl_createv(clog, 0, &retkqnode, NULL,
317 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
318 CTLTYPE_INT, "len",
319 SYSCTL_DESCR("Current queue length"),
320 sysctl_opencrypto_q_len, 0,
321 (void *)&crypto_crp_ret_kq_len, 0,
322 CTL_CREATE, CTL_EOL);
323 sysctl_createv(clog, 0, &retkqnode, NULL,
324 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
325 CTLTYPE_INT, "drops",
326 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
327 sysctl_opencrypto_q_drops, 0,
328 (void *)&crypto_crp_ret_kq_drops, 0,
329 CTL_CREATE, CTL_EOL);
330 sysctl_createv(clog, 0, &retkqnode, NULL,
331 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
332 CTLTYPE_INT, "maxlen",
333 SYSCTL_DESCR("Maximum allowed queue length"),
334 sysctl_opencrypto_q_maxlen, 0,
335 (void *)&crypto_crp_ret_kq_maxlen, 0,
336 CTL_CREATE, CTL_EOL);
337 }
338
339 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
340
341 /*
342 * Synchronization: read carefully, this is non-trivial.
343 *
344 * Crypto requests are submitted via crypto_dispatch. Typically
345 * these come in from network protocols at spl0 (output path) or
346 * spl[,soft]net (input path).
347 *
348 * Requests are typically passed on the driver directly, but they
349 * may also be queued for processing by a software interrupt thread,
350 * cryptointr, that runs at splsoftcrypto. This thread dispatches
351 * the requests to crypto drivers (h/w or s/w) who call crypto_done
352 * when a request is complete. Hardware crypto drivers are assumed
353 * to register their IRQ's as network devices so their interrupt handlers
354 * and subsequent "done callbacks" happen at spl[imp,net].
355 *
356 * Completed crypto ops are queued for a separate kernel thread that
357 * handles the callbacks at spl0. This decoupling insures the crypto
358 * driver interrupt service routine is not delayed while the callback
359 * takes place and that callbacks are delivered after a context switch
360 * (as opposed to a software interrupt that clients must block).
361 *
362 * This scheme is not intended for SMP machines.
363 */
364 static void cryptointr(void); /* swi thread to dispatch ops */
365 static void cryptoret(void); /* kernel thread for callbacks*/
366 static struct lwp *cryptothread;
367 static int crypto_destroy(bool);
368 static int crypto_invoke(struct cryptop *crp, int hint);
369 static int crypto_kinvoke(struct cryptkop *krp, int hint);
370
371 static struct cryptostats cryptostats;
372 #ifdef CRYPTO_TIMING
373 static int crypto_timing = 0;
374 #endif
375
376 static struct sysctllog *sysctl_opencrypto_clog;
377
378 static int
379 crypto_init0(void)
380 {
381 int error;
382
383 mutex_init(&crypto_drv_mtx, MUTEX_DEFAULT, IPL_NONE);
384 mutex_init(&crypto_q_mtx, MUTEX_DEFAULT, IPL_NET);
385 mutex_init(&crypto_ret_q_mtx, MUTEX_DEFAULT, IPL_NET);
386 cv_init(&cryptoret_cv, "crypto_w");
387 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
388 0, "cryptop", NULL, IPL_NET);
389 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
390 0, "cryptodesc", NULL, IPL_NET);
391 pool_init(&cryptkop_pool, sizeof(struct cryptkop), 0, 0,
392 0, "cryptkop", NULL, IPL_NET);
393
394 crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL *
395 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
396 if (crypto_drivers == NULL) {
397 printf("crypto_init: cannot malloc driver table\n");
398 return ENOMEM;
399 }
400 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
401
402 softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
403 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
404 (void (*)(void *))cryptoret, NULL, &cryptothread, "cryptoret");
405 if (error) {
406 printf("crypto_init: cannot start cryptoret thread; error %d",
407 error);
408 return crypto_destroy(false);
409 }
410
411 sysctl_opencrypto_setup(&sysctl_opencrypto_clog);
412
413 return 0;
414 }
415
416 int
417 crypto_init(void)
418 {
419 static ONCE_DECL(crypto_init_once);
420
421 return RUN_ONCE(&crypto_init_once, crypto_init0);
422 }
423
424 static int
425 crypto_destroy(bool exit_kthread)
426 {
427 int i;
428
429 if (exit_kthread) {
430 mutex_spin_enter(&crypto_ret_q_mtx);
431
432 /* if we have any in-progress requests, don't unload */
433 if (!TAILQ_EMPTY(&crp_q) || !TAILQ_EMPTY(&crp_kq)) {
434 mutex_spin_exit(&crypto_ret_q_mtx);
435 return EBUSY;
436 }
437
438 for (i = 0; i < crypto_drivers_num; i++)
439 if (crypto_drivers[i].cc_sessions != 0)
440 break;
441 if (i < crypto_drivers_num) {
442 mutex_spin_exit(&crypto_ret_q_mtx);
443 return EBUSY;
444 }
445
446 /* kick the cryptoret thread and wait for it to exit */
447 crypto_exit_flag = 1;
448 cv_signal(&cryptoret_cv);
449
450 while (crypto_exit_flag != 0)
451 cv_wait(&cryptoret_cv, &crypto_ret_q_mtx);
452 mutex_spin_exit(&crypto_ret_q_mtx);
453 }
454
455 if (sysctl_opencrypto_clog != NULL)
456 sysctl_teardown(&sysctl_opencrypto_clog);
457
458 unregister_swi(SWI_CRYPTO, cryptointr);
459
460 mutex_enter(&crypto_drv_mtx);
461 if (crypto_drivers != NULL)
462 free(crypto_drivers, M_CRYPTO_DATA);
463 mutex_exit(&crypto_drv_mtx);
464
465 pool_destroy(&cryptop_pool);
466 pool_destroy(&cryptodesc_pool);
467 pool_destroy(&cryptkop_pool);
468
469 cv_destroy(&cryptoret_cv);
470
471 mutex_destroy(&crypto_ret_q_mtx);
472 mutex_destroy(&crypto_q_mtx);
473 mutex_destroy(&crypto_drv_mtx);
474
475 return 0;
476 }
477
478 /*
479 * Create a new session.
480 */
481 int
482 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
483 {
484 struct cryptoini *cr;
485 u_int32_t hid, lid;
486 int err = EINVAL;
487
488 mutex_enter(&crypto_drv_mtx);
489
490 if (crypto_drivers == NULL)
491 goto done;
492
493 /*
494 * The algorithm we use here is pretty stupid; just use the
495 * first driver that supports all the algorithms we need.
496 *
497 * XXX We need more smarts here (in real life too, but that's
498 * XXX another story altogether).
499 */
500
501 for (hid = 0; hid < crypto_drivers_num; hid++) {
502 /*
503 * If it's not initialized or has remaining sessions
504 * referencing it, skip.
505 */
506 if (crypto_drivers[hid].cc_newsession == NULL ||
507 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
508 continue;
509
510 /* Hardware required -- ignore software drivers. */
511 if (hard > 0 &&
512 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
513 continue;
514 /* Software required -- ignore hardware drivers. */
515 if (hard < 0 &&
516 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
517 continue;
518
519 /* See if all the algorithms are supported. */
520 for (cr = cri; cr; cr = cr->cri_next)
521 if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0) {
522 DPRINTF("alg %d not supported\n", cr->cri_alg);
523 break;
524 }
525
526 if (cr == NULL) {
527 /* Ok, all algorithms are supported. */
528
529 /*
530 * Can't do everything in one session.
531 *
532 * XXX Fix this. We need to inject a "virtual" session layer right
533 * XXX about here.
534 */
535
536 /* Call the driver initialization routine. */
537 lid = hid; /* Pass the driver ID. */
538 err = crypto_drivers[hid].cc_newsession(
539 crypto_drivers[hid].cc_arg, &lid, cri);
540 if (err == 0) {
541 (*sid) = hid;
542 (*sid) <<= 32;
543 (*sid) |= (lid & 0xffffffff);
544 crypto_drivers[hid].cc_sessions++;
545 } else {
546 DPRINTF("crypto_drivers[%d].cc_newsession() failed. error=%d\n",
547 hid, err);
548 }
549 goto done;
550 /*break;*/
551 }
552 }
553 done:
554 mutex_exit(&crypto_drv_mtx);
555 return err;
556 }
557
558 /*
559 * Delete an existing session (or a reserved session on an unregistered
560 * driver).
561 */
562 int
563 crypto_freesession(u_int64_t sid)
564 {
565 u_int32_t hid;
566 int err = 0;
567
568 mutex_enter(&crypto_drv_mtx);
569
570 if (crypto_drivers == NULL) {
571 err = EINVAL;
572 goto done;
573 }
574
575 /* Determine two IDs. */
576 hid = CRYPTO_SESID2HID(sid);
577
578 if (hid >= crypto_drivers_num) {
579 err = ENOENT;
580 goto done;
581 }
582
583 if (crypto_drivers[hid].cc_sessions)
584 crypto_drivers[hid].cc_sessions--;
585
586 /* Call the driver cleanup routine, if available. */
587 if (crypto_drivers[hid].cc_freesession) {
588 err = crypto_drivers[hid].cc_freesession(
589 crypto_drivers[hid].cc_arg, sid);
590 }
591 else
592 err = 0;
593
594 /*
595 * If this was the last session of a driver marked as invalid,
596 * make the entry available for reuse.
597 */
598 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
599 crypto_drivers[hid].cc_sessions == 0)
600 memset(&crypto_drivers[hid], 0, sizeof(struct cryptocap));
601
602 done:
603 mutex_exit(&crypto_drv_mtx);
604 return err;
605 }
606
607 /*
608 * Return an unused driver id. Used by drivers prior to registering
609 * support for the algorithms they handle.
610 */
611 int32_t
612 crypto_get_driverid(u_int32_t flags)
613 {
614 struct cryptocap *newdrv;
615 int i;
616
617 (void)crypto_init(); /* XXX oh, this is foul! */
618
619 mutex_enter(&crypto_drv_mtx);
620 for (i = 0; i < crypto_drivers_num; i++)
621 if (crypto_drivers[i].cc_process == NULL &&
622 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
623 crypto_drivers[i].cc_sessions == 0)
624 break;
625
626 /* Out of entries, allocate some more. */
627 if (i == crypto_drivers_num) {
628 /* Be careful about wrap-around. */
629 if (2 * crypto_drivers_num <= crypto_drivers_num) {
630 mutex_exit(&crypto_drv_mtx);
631 printf("crypto: driver count wraparound!\n");
632 return -1;
633 }
634
635 newdrv = malloc(2 * crypto_drivers_num *
636 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
637 if (newdrv == NULL) {
638 mutex_exit(&crypto_drv_mtx);
639 printf("crypto: no space to expand driver table!\n");
640 return -1;
641 }
642
643 memcpy(newdrv, crypto_drivers,
644 crypto_drivers_num * sizeof(struct cryptocap));
645
646 crypto_drivers_num *= 2;
647
648 free(crypto_drivers, M_CRYPTO_DATA);
649 crypto_drivers = newdrv;
650 }
651
652 /* NB: state is zero'd on free */
653 crypto_drivers[i].cc_sessions = 1; /* Mark */
654 crypto_drivers[i].cc_flags = flags;
655
656 if (bootverbose)
657 printf("crypto: assign driver %u, flags %u\n", i, flags);
658
659 mutex_exit(&crypto_drv_mtx);
660
661 return i;
662 }
663
664 static struct cryptocap *
665 crypto_checkdriver(u_int32_t hid)
666 {
667 if (crypto_drivers == NULL)
668 return NULL;
669 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
670 }
671
672 /*
673 * Register support for a key-related algorithm. This routine
674 * is called once for each algorithm supported a driver.
675 */
676 int
677 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
678 int (*kprocess)(void *, struct cryptkop *, int),
679 void *karg)
680 {
681 struct cryptocap *cap;
682 int err;
683
684 mutex_enter(&crypto_drv_mtx);
685
686 cap = crypto_checkdriver(driverid);
687 if (cap != NULL &&
688 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
689 /*
690 * XXX Do some performance testing to determine placing.
691 * XXX We probably need an auxiliary data structure that
692 * XXX describes relative performances.
693 */
694
695 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
696 if (bootverbose) {
697 printf("crypto: driver %u registers key alg %u "
698 " flags %u\n",
699 driverid,
700 kalg,
701 flags
702 );
703 }
704
705 if (cap->cc_kprocess == NULL) {
706 cap->cc_karg = karg;
707 cap->cc_kprocess = kprocess;
708 }
709 err = 0;
710 } else
711 err = EINVAL;
712
713 mutex_exit(&crypto_drv_mtx);
714 return err;
715 }
716
717 /*
718 * Register support for a non-key-related algorithm. This routine
719 * is called once for each such algorithm supported by a driver.
720 */
721 int
722 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
723 u_int32_t flags,
724 int (*newses)(void *, u_int32_t*, struct cryptoini*),
725 int (*freeses)(void *, u_int64_t),
726 int (*process)(void *, struct cryptop *, int),
727 void *arg)
728 {
729 struct cryptocap *cap;
730 int err;
731
732 mutex_enter(&crypto_drv_mtx);
733
734 cap = crypto_checkdriver(driverid);
735 /* NB: algorithms are in the range [1..max] */
736 if (cap != NULL &&
737 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
738 /*
739 * XXX Do some performance testing to determine placing.
740 * XXX We probably need an auxiliary data structure that
741 * XXX describes relative performances.
742 */
743
744 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
745 cap->cc_max_op_len[alg] = maxoplen;
746 if (bootverbose) {
747 printf("crypto: driver %u registers alg %u "
748 "flags %u maxoplen %u\n",
749 driverid,
750 alg,
751 flags,
752 maxoplen
753 );
754 }
755
756 if (cap->cc_process == NULL) {
757 cap->cc_arg = arg;
758 cap->cc_newsession = newses;
759 cap->cc_process = process;
760 cap->cc_freesession = freeses;
761 cap->cc_sessions = 0; /* Unmark */
762 }
763 err = 0;
764 } else
765 err = EINVAL;
766
767 mutex_exit(&crypto_drv_mtx);
768 return err;
769 }
770
771 static int
772 crypto_unregister_locked(u_int32_t driverid, int alg, bool all)
773 {
774 int i;
775 u_int32_t ses;
776 struct cryptocap *cap;
777 bool lastalg = true;
778
779 KASSERT(mutex_owned(&crypto_drv_mtx));
780
781 if (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)
782 return EINVAL;
783
784 cap = crypto_checkdriver(driverid);
785 if (cap == NULL || (!all && cap->cc_alg[alg] == 0))
786 return EINVAL;
787
788 cap->cc_alg[alg] = 0;
789 cap->cc_max_op_len[alg] = 0;
790
791 if (all) {
792 if (alg != CRYPTO_ALGORITHM_MAX)
793 lastalg = false;
794 } else {
795 /* Was this the last algorithm ? */
796 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++)
797 if (cap->cc_alg[i] != 0) {
798 lastalg = false;
799 break;
800 }
801 }
802 if (lastalg) {
803 ses = cap->cc_sessions;
804 memset(cap, 0, sizeof(struct cryptocap));
805 if (ses != 0) {
806 /*
807 * If there are pending sessions, just mark as invalid.
808 */
809 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
810 cap->cc_sessions = ses;
811 }
812 }
813
814 return 0;
815 }
816
817 /*
818 * Unregister a crypto driver. If there are pending sessions using it,
819 * leave enough information around so that subsequent calls using those
820 * sessions will correctly detect the driver has been unregistered and
821 * reroute requests.
822 */
823 int
824 crypto_unregister(u_int32_t driverid, int alg)
825 {
826 int err;
827
828 mutex_enter(&crypto_drv_mtx);
829 err = crypto_unregister_locked(driverid, alg, false);
830 mutex_exit(&crypto_drv_mtx);
831
832 return err;
833 }
834
835 /*
836 * Unregister all algorithms associated with a crypto driver.
837 * If there are pending sessions using it, leave enough information
838 * around so that subsequent calls using those sessions will
839 * correctly detect the driver has been unregistered and reroute
840 * requests.
841 */
842 int
843 crypto_unregister_all(u_int32_t driverid)
844 {
845 int err, i;
846
847 mutex_enter(&crypto_drv_mtx);
848 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
849 err = crypto_unregister_locked(driverid, i, true);
850 if (err)
851 break;
852 }
853 mutex_exit(&crypto_drv_mtx);
854
855 return err;
856 }
857
858 /*
859 * Clear blockage on a driver. The what parameter indicates whether
860 * the driver is now ready for cryptop's and/or cryptokop's.
861 */
862 int
863 crypto_unblock(u_int32_t driverid, int what)
864 {
865 struct cryptocap *cap;
866 int needwakeup = 0;
867
868 mutex_spin_enter(&crypto_q_mtx);
869 cap = crypto_checkdriver(driverid);
870 if (cap == NULL) {
871 mutex_spin_exit(&crypto_q_mtx);
872 return EINVAL;
873 }
874
875 if (what & CRYPTO_SYMQ) {
876 needwakeup |= cap->cc_qblocked;
877 cap->cc_qblocked = 0;
878 }
879 if (what & CRYPTO_ASYMQ) {
880 needwakeup |= cap->cc_kqblocked;
881 cap->cc_kqblocked = 0;
882 }
883 mutex_spin_exit(&crypto_q_mtx);
884 if (needwakeup)
885 setsoftcrypto(softintr_cookie);
886
887 return 0;
888 }
889
890 /*
891 * Dispatch a crypto request to a driver or queue
892 * it, to be processed by the kernel thread.
893 */
894 int
895 crypto_dispatch(struct cryptop *crp)
896 {
897 u_int32_t hid;
898 int result;
899 struct cryptocap *cap;
900
901 KASSERT(crp != NULL);
902
903 hid = CRYPTO_SESID2HID(crp->crp_sid);
904
905 DPRINTF("crp %p, alg %d\n", crp, crp->crp_desc->crd_alg);
906
907 cryptostats.cs_ops++;
908
909 #ifdef CRYPTO_TIMING
910 if (crypto_timing)
911 nanouptime(&crp->crp_tstamp);
912 #endif
913
914 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
915 int wasempty = TAILQ_EMPTY(&crp_q);
916 /*
917 * Caller marked the request as ``ok to delay'';
918 * queue it for the swi thread. This is desirable
919 * when the operation is low priority and/or suitable
920 * for batching.
921 */
922 mutex_spin_enter(&crypto_q_mtx);
923 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
924 mutex_spin_exit(&crypto_q_mtx);
925 if (wasempty)
926 setsoftcrypto(softintr_cookie);
927
928 return 0;
929 }
930
931 mutex_spin_enter(&crypto_q_mtx);
932
933 cap = crypto_checkdriver(hid);
934 /*
935 * TODO:
936 * If we can ensure the driver has been valid until the driver is
937 * done crypto_unregister(), this migrate operation is not required.
938 */
939 if (cap == NULL) {
940 /*
941 * The driver must be detached, so this request will migrate
942 * to other drivers in cryptointr() later.
943 */
944 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
945 mutex_spin_exit(&crypto_q_mtx);
946
947 return 0;
948 }
949
950 /*
951 * TODO:
952 * cap->cc_qblocked should be protected by a spin lock other than
953 * crypto_q_mtx.
954 */
955 if (cap->cc_qblocked != 0) {
956 /*
957 * The driver is blocked, just queue the op until
958 * it unblocks and the swi thread gets kicked.
959 */
960 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
961 mutex_spin_exit(&crypto_q_mtx);
962
963 return 0;
964 }
965
966 /*
967 * Caller marked the request to be processed
968 * immediately; dispatch it directly to the
969 * driver unless the driver is currently blocked.
970 */
971 mutex_spin_exit(&crypto_q_mtx);
972 result = crypto_invoke(crp, 0);
973 if (result == ERESTART) {
974 /*
975 * The driver ran out of resources, mark the
976 * driver ``blocked'' for cryptop's and put
977 * the op on the queue.
978 */
979 mutex_spin_enter(&crypto_q_mtx);
980 crypto_drivers[hid].cc_qblocked = 1;
981 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
982 cryptostats.cs_blocks++;
983 mutex_spin_exit(&crypto_q_mtx);
984
985 /*
986 * The crp is enqueued to crp_q, that is,
987 * no error occurs. So, this function should
988 * not return error.
989 */
990 result = 0;
991 }
992
993 return result;
994 }
995
996 /*
997 * Add an asymetric crypto request to a queue,
998 * to be processed by the kernel thread.
999 */
1000 int
1001 crypto_kdispatch(struct cryptkop *krp)
1002 {
1003 struct cryptocap *cap;
1004 int result;
1005
1006 KASSERT(krp != NULL);
1007
1008 mutex_spin_enter(&crypto_q_mtx);
1009 cryptostats.cs_kops++;
1010
1011 cap = crypto_checkdriver(krp->krp_hid);
1012 /*
1013 * TODO:
1014 * If we can ensure the driver has been valid until the driver is
1015 * done crypto_unregister(), this migrate operation is not required.
1016 */
1017 if (cap == NULL) {
1018 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1019 mutex_spin_exit(&crypto_q_mtx);
1020
1021 return 0;
1022 }
1023
1024 if (cap->cc_kqblocked != 0) {
1025 /*
1026 * The driver is blocked, just queue the op until
1027 * it unblocks and the swi thread gets kicked.
1028 */
1029 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1030 mutex_spin_exit(&crypto_q_mtx);
1031
1032 return 0;
1033 }
1034
1035 mutex_spin_exit(&crypto_q_mtx);
1036 result = crypto_kinvoke(krp, 0);
1037 if (result == ERESTART) {
1038 /*
1039 * The driver ran out of resources, mark the
1040 * driver ``blocked'' for cryptop's and put
1041 * the op on the queue.
1042 */
1043 mutex_spin_enter(&crypto_q_mtx);
1044 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1045 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1046 cryptostats.cs_kblocks++;
1047 mutex_spin_exit(&crypto_q_mtx);
1048
1049 /*
1050 * The krp is enqueued to crp_kq, that is,
1051 * no error occurs. So, this function should
1052 * not return error.
1053 */
1054 result = 0;
1055 }
1056
1057 return result;
1058 }
1059
1060 /*
1061 * Dispatch an assymetric crypto request to the appropriate crypto devices.
1062 */
1063 static int
1064 crypto_kinvoke(struct cryptkop *krp, int hint)
1065 {
1066 u_int32_t hid;
1067 int error;
1068
1069 KASSERT(krp != NULL);
1070
1071 /* Sanity checks. */
1072 if (krp->krp_callback == NULL) {
1073 cv_destroy(&krp->krp_cv);
1074 pool_put(&cryptkop_pool, krp);
1075 return EINVAL;
1076 }
1077
1078 mutex_enter(&crypto_drv_mtx);
1079 for (hid = 0; hid < crypto_drivers_num; hid++) {
1080 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1081 crypto_devallowsoft == 0)
1082 continue;
1083 if (crypto_drivers[hid].cc_kprocess == NULL)
1084 continue;
1085 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
1086 CRYPTO_ALG_FLAG_SUPPORTED) == 0)
1087 continue;
1088 break;
1089 }
1090 if (hid < crypto_drivers_num) {
1091 int (*process)(void *, struct cryptkop *, int);
1092 void *arg;
1093
1094 process = crypto_drivers[hid].cc_kprocess;
1095 arg = crypto_drivers[hid].cc_karg;
1096 mutex_exit(&crypto_drv_mtx);
1097 krp->krp_hid = hid;
1098 error = (*process)(arg, krp, hint);
1099 } else {
1100 mutex_exit(&crypto_drv_mtx);
1101 error = ENODEV;
1102 }
1103
1104 if (error) {
1105 krp->krp_status = error;
1106 crypto_kdone(krp);
1107 }
1108 return 0;
1109 }
1110
1111 #ifdef CRYPTO_TIMING
1112 static void
1113 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
1114 {
1115 struct timespec now, t;
1116
1117 nanouptime(&now);
1118 t.tv_sec = now.tv_sec - tv->tv_sec;
1119 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
1120 if (t.tv_nsec < 0) {
1121 t.tv_sec--;
1122 t.tv_nsec += 1000000000;
1123 }
1124 timespecadd(&ts->acc, &t, &t);
1125 if (timespeccmp(&t, &ts->min, <))
1126 ts->min = t;
1127 if (timespeccmp(&t, &ts->max, >))
1128 ts->max = t;
1129 ts->count++;
1130
1131 *tv = now;
1132 }
1133 #endif
1134
1135 /*
1136 * Dispatch a crypto request to the appropriate crypto devices.
1137 */
1138 static int
1139 crypto_invoke(struct cryptop *crp, int hint)
1140 {
1141 u_int32_t hid;
1142
1143 KASSERT(crp != NULL);
1144
1145 #ifdef CRYPTO_TIMING
1146 if (crypto_timing)
1147 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1148 #endif
1149 /* Sanity checks. */
1150 if (crp->crp_callback == NULL) {
1151 return EINVAL;
1152 }
1153 if (crp->crp_desc == NULL) {
1154 crp->crp_etype = EINVAL;
1155 crypto_done(crp);
1156 return 0;
1157 }
1158
1159 hid = CRYPTO_SESID2HID(crp->crp_sid);
1160
1161 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
1162 int (*process)(void *, struct cryptop *, int);
1163 void *arg;
1164
1165 process = crypto_drivers[hid].cc_process;
1166 arg = crypto_drivers[hid].cc_arg;
1167
1168 /*
1169 * Invoke the driver to process the request.
1170 */
1171 DPRINTF("calling process for %p\n", crp);
1172 return (*process)(arg, crp, hint);
1173 } else {
1174 struct cryptodesc *crd;
1175 u_int64_t nid = 0;
1176
1177 /*
1178 * Driver has unregistered; migrate the session and return
1179 * an error to the caller so they'll resubmit the op.
1180 */
1181 crypto_freesession(crp->crp_sid);
1182
1183 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
1184 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
1185
1186 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
1187 crp->crp_sid = nid;
1188
1189 crp->crp_etype = EAGAIN;
1190
1191 crypto_done(crp);
1192 return 0;
1193 }
1194 }
1195
1196 /*
1197 * Release a set of crypto descriptors.
1198 */
1199 void
1200 crypto_freereq(struct cryptop *crp)
1201 {
1202 struct cryptodesc *crd;
1203
1204 if (crp == NULL)
1205 return;
1206 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1207
1208 /* sanity check */
1209 if (crp->crp_flags & CRYPTO_F_ONRETQ) {
1210 panic("crypto_freereq() freeing crp on RETQ\n");
1211 }
1212
1213 while ((crd = crp->crp_desc) != NULL) {
1214 crp->crp_desc = crd->crd_next;
1215 pool_put(&cryptodesc_pool, crd);
1216 }
1217 pool_put(&cryptop_pool, crp);
1218 }
1219
1220 /*
1221 * Acquire a set of crypto descriptors.
1222 */
1223 struct cryptop *
1224 crypto_getreq(int num)
1225 {
1226 struct cryptodesc *crd;
1227 struct cryptop *crp;
1228
1229 /*
1230 * When crp_ret_q is full, we restrict here to avoid crp_ret_q overflow
1231 * by error callback.
1232 */
1233 if (CRYPTO_Q_IS_FULL(crp_ret_q)) {
1234 CRYPTO_Q_INC_DROPS(crp_ret_q);
1235 return NULL;
1236 }
1237
1238 crp = pool_get(&cryptop_pool, 0);
1239 if (crp == NULL) {
1240 return NULL;
1241 }
1242 memset(crp, 0, sizeof(struct cryptop));
1243
1244 while (num--) {
1245 crd = pool_get(&cryptodesc_pool, 0);
1246 if (crd == NULL) {
1247 crypto_freereq(crp);
1248 return NULL;
1249 }
1250
1251 memset(crd, 0, sizeof(struct cryptodesc));
1252 crd->crd_next = crp->crp_desc;
1253 crp->crp_desc = crd;
1254 }
1255
1256 return crp;
1257 }
1258
1259 /*
1260 * Invoke the callback on behalf of the driver.
1261 */
1262 void
1263 crypto_done(struct cryptop *crp)
1264 {
1265 int wasempty;
1266
1267 KASSERT(crp != NULL);
1268
1269 if (crp->crp_etype != 0)
1270 cryptostats.cs_errs++;
1271 #ifdef CRYPTO_TIMING
1272 if (crypto_timing)
1273 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1274 #endif
1275 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1276
1277 /*
1278 * Normal case; queue the callback for the thread.
1279 *
1280 * The return queue is manipulated by the swi thread
1281 * and, potentially, by crypto device drivers calling
1282 * back to mark operations completed. Thus we need
1283 * to mask both while manipulating the return queue.
1284 */
1285 if (crp->crp_flags & CRYPTO_F_CBIMM) {
1286 /*
1287 * Do the callback directly. This is ok when the
1288 * callback routine does very little (e.g. the
1289 * /dev/crypto callback method just does a wakeup).
1290 */
1291 mutex_spin_enter(&crypto_ret_q_mtx);
1292 crp->crp_flags |= CRYPTO_F_DONE;
1293 mutex_spin_exit(&crypto_ret_q_mtx);
1294
1295 #ifdef CRYPTO_TIMING
1296 if (crypto_timing) {
1297 /*
1298 * NB: We must copy the timestamp before
1299 * doing the callback as the cryptop is
1300 * likely to be reclaimed.
1301 */
1302 struct timespec t = crp->crp_tstamp;
1303 crypto_tstat(&cryptostats.cs_cb, &t);
1304 crp->crp_callback(crp);
1305 crypto_tstat(&cryptostats.cs_finis, &t);
1306 } else
1307 #endif
1308 crp->crp_callback(crp);
1309 } else {
1310 mutex_spin_enter(&crypto_ret_q_mtx);
1311 crp->crp_flags |= CRYPTO_F_DONE;
1312 #if 0
1313 if (crp->crp_flags & CRYPTO_F_USER) {
1314 /*
1315 * TODO:
1316 * If crp->crp_flags & CRYPTO_F_USER and the used
1317 * encryption driver does all the processing in
1318 * the same context, we can skip enqueueing crp_ret_q
1319 * and cv_signal(&cryptoret_cv).
1320 */
1321 DPRINTF("lid[%u]: crp %p CRYPTO_F_USER\n",
1322 CRYPTO_SESID2LID(crp->crp_sid), crp);
1323 } else
1324 #endif
1325 {
1326 wasempty = TAILQ_EMPTY(&crp_ret_q);
1327 DPRINTF("lid[%u]: queueing %p\n",
1328 CRYPTO_SESID2LID(crp->crp_sid), crp);
1329 crp->crp_flags |= CRYPTO_F_ONRETQ;
1330 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1331 CRYPTO_Q_INC(crp_ret_q);
1332 if (wasempty) {
1333 DPRINTF("lid[%u]: waking cryptoret, "
1334 "crp %p hit empty queue\n.",
1335 CRYPTO_SESID2LID(crp->crp_sid), crp);
1336 cv_signal(&cryptoret_cv);
1337 }
1338 }
1339 mutex_spin_exit(&crypto_ret_q_mtx);
1340 }
1341 }
1342
1343 /*
1344 * Invoke the callback on behalf of the driver.
1345 */
1346 void
1347 crypto_kdone(struct cryptkop *krp)
1348 {
1349 int wasempty;
1350
1351 KASSERT(krp != NULL);
1352
1353 if (krp->krp_status != 0)
1354 cryptostats.cs_kerrs++;
1355
1356 krp->krp_flags |= CRYPTO_F_DONE;
1357
1358 /*
1359 * The return queue is manipulated by the swi thread
1360 * and, potentially, by crypto device drivers calling
1361 * back to mark operations completed. Thus we need
1362 * to mask both while manipulating the return queue.
1363 */
1364 if (krp->krp_flags & CRYPTO_F_CBIMM) {
1365 krp->krp_callback(krp);
1366 } else {
1367 mutex_spin_enter(&crypto_ret_q_mtx);
1368 wasempty = TAILQ_EMPTY(&crp_ret_kq);
1369 krp->krp_flags |= CRYPTO_F_ONRETQ;
1370 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1371 CRYPTO_Q_INC(crp_ret_kq);
1372 if (wasempty)
1373 cv_signal(&cryptoret_cv);
1374 mutex_spin_exit(&crypto_ret_q_mtx);
1375 }
1376 }
1377
1378 int
1379 crypto_getfeat(int *featp)
1380 {
1381 int hid, kalg, feat = 0;
1382
1383 if (crypto_userasymcrypto == 0)
1384 return 0;
1385
1386 mutex_enter(&crypto_drv_mtx);
1387
1388 for (hid = 0; hid < crypto_drivers_num; hid++) {
1389 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1390 crypto_devallowsoft == 0) {
1391 continue;
1392 }
1393 if (crypto_drivers[hid].cc_kprocess == NULL)
1394 continue;
1395 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1396 if ((crypto_drivers[hid].cc_kalg[kalg] &
1397 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1398 feat |= 1 << kalg;
1399 }
1400
1401 mutex_exit(&crypto_drv_mtx);
1402 *featp = feat;
1403 return (0);
1404 }
1405
1406 /*
1407 * Software interrupt thread to dispatch crypto requests.
1408 */
1409 static void
1410 cryptointr(void)
1411 {
1412 struct cryptop *crp, *submit, *cnext;
1413 struct cryptkop *krp, *knext;
1414 struct cryptocap *cap;
1415 int result, hint;
1416
1417 cryptostats.cs_intrs++;
1418 mutex_spin_enter(&crypto_q_mtx);
1419 do {
1420 /*
1421 * Find the first element in the queue that can be
1422 * processed and look-ahead to see if multiple ops
1423 * are ready for the same driver.
1424 */
1425 submit = NULL;
1426 hint = 0;
1427 TAILQ_FOREACH_SAFE(crp, &crp_q, crp_next, cnext) {
1428 u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid);
1429 cap = crypto_checkdriver(hid);
1430 if (cap == NULL || cap->cc_process == NULL) {
1431 /* Op needs to be migrated, process it. */
1432 submit = crp;
1433 break;
1434 }
1435
1436 /*
1437 * skip blocked crp regardless of CRYPTO_F_BATCH
1438 */
1439 if (cap->cc_qblocked != 0)
1440 continue;
1441
1442 /*
1443 * skip batch crp until the end of crp_q
1444 */
1445 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
1446 if (submit == NULL) {
1447 submit = crp;
1448 } else {
1449 if (CRYPTO_SESID2HID(submit->crp_sid)
1450 == hid)
1451 hint = CRYPTO_HINT_MORE;
1452 }
1453
1454 continue;
1455 }
1456
1457 /*
1458 * found first crp which is neither blocked nor batch.
1459 */
1460 submit = crp;
1461 /*
1462 * batch crp can be processed much later, so clear hint.
1463 */
1464 hint = 0;
1465 break;
1466 }
1467 if (submit != NULL) {
1468 TAILQ_REMOVE(&crp_q, submit, crp_next);
1469 mutex_spin_exit(&crypto_q_mtx);
1470 result = crypto_invoke(submit, hint);
1471 /* we must take here as the TAILQ op or kinvoke
1472 may need this mutex below. sigh. */
1473 mutex_spin_enter(&crypto_q_mtx);
1474 if (result == ERESTART) {
1475 /*
1476 * The driver ran out of resources, mark the
1477 * driver ``blocked'' for cryptop's and put
1478 * the request back in the queue. It would
1479 * best to put the request back where we got
1480 * it but that's hard so for now we put it
1481 * at the front. This should be ok; putting
1482 * it at the end does not work.
1483 */
1484 /* XXX validate sid again? */
1485 crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1486 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1487 cryptostats.cs_blocks++;
1488 }
1489 }
1490
1491 /* As above, but for key ops */
1492 TAILQ_FOREACH_SAFE(krp, &crp_kq, krp_next, knext) {
1493 cap = crypto_checkdriver(krp->krp_hid);
1494 if (cap == NULL || cap->cc_kprocess == NULL) {
1495 /* Op needs to be migrated, process it. */
1496 break;
1497 }
1498 if (!cap->cc_kqblocked)
1499 break;
1500 }
1501 if (krp != NULL) {
1502 TAILQ_REMOVE(&crp_kq, krp, krp_next);
1503 mutex_spin_exit(&crypto_q_mtx);
1504 result = crypto_kinvoke(krp, 0);
1505 /* the next iteration will want the mutex. :-/ */
1506 mutex_spin_enter(&crypto_q_mtx);
1507 if (result == ERESTART) {
1508 /*
1509 * The driver ran out of resources, mark the
1510 * driver ``blocked'' for cryptkop's and put
1511 * the request back in the queue. It would
1512 * best to put the request back where we got
1513 * it but that's hard so for now we put it
1514 * at the front. This should be ok; putting
1515 * it at the end does not work.
1516 */
1517 /* XXX validate sid again? */
1518 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1519 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1520 cryptostats.cs_kblocks++;
1521 }
1522 }
1523 } while (submit != NULL || krp != NULL);
1524 mutex_spin_exit(&crypto_q_mtx);
1525 }
1526
1527 /*
1528 * Kernel thread to do callbacks.
1529 */
1530 static void
1531 cryptoret(void)
1532 {
1533 struct cryptop *crp;
1534 struct cryptkop *krp;
1535
1536 mutex_spin_enter(&crypto_ret_q_mtx);
1537 for (;;) {
1538 crp = TAILQ_FIRST(&crp_ret_q);
1539 if (crp != NULL) {
1540 TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1541 CRYPTO_Q_DEC(crp_ret_q);
1542 crp->crp_flags &= ~CRYPTO_F_ONRETQ;
1543 }
1544 krp = TAILQ_FIRST(&crp_ret_kq);
1545 if (krp != NULL) {
1546 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1547 CRYPTO_Q_DEC(crp_ret_kq);
1548 krp->krp_flags &= ~CRYPTO_F_ONRETQ;
1549 }
1550
1551 /* drop before calling any callbacks. */
1552 if (crp == NULL && krp == NULL) {
1553
1554 /* Check for the exit condition. */
1555 if (crypto_exit_flag != 0) {
1556
1557 /* Time to die. */
1558 crypto_exit_flag = 0;
1559 cv_broadcast(&cryptoret_cv);
1560 mutex_spin_exit(&crypto_ret_q_mtx);
1561 kthread_exit(0);
1562 }
1563
1564 cryptostats.cs_rets++;
1565 cv_wait(&cryptoret_cv, &crypto_ret_q_mtx);
1566 continue;
1567 }
1568
1569 mutex_spin_exit(&crypto_ret_q_mtx);
1570
1571 if (crp != NULL) {
1572 #ifdef CRYPTO_TIMING
1573 if (crypto_timing) {
1574 /*
1575 * NB: We must copy the timestamp before
1576 * doing the callback as the cryptop is
1577 * likely to be reclaimed.
1578 */
1579 struct timespec t = crp->crp_tstamp;
1580 crypto_tstat(&cryptostats.cs_cb, &t);
1581 crp->crp_callback(crp);
1582 crypto_tstat(&cryptostats.cs_finis, &t);
1583 } else
1584 #endif
1585 {
1586 crp->crp_callback(crp);
1587 }
1588 }
1589 if (krp != NULL)
1590 krp->krp_callback(krp);
1591
1592 mutex_spin_enter(&crypto_ret_q_mtx);
1593 }
1594 }
1595
1596 /* NetBSD module interface */
1597
1598 MODULE(MODULE_CLASS_MISC, opencrypto, NULL);
1599
1600 static int
1601 opencrypto_modcmd(modcmd_t cmd, void *opaque)
1602 {
1603 int error = 0;
1604
1605 switch (cmd) {
1606 case MODULE_CMD_INIT:
1607 #ifdef _MODULE
1608 error = crypto_init();
1609 #endif
1610 break;
1611 case MODULE_CMD_FINI:
1612 #ifdef _MODULE
1613 error = crypto_destroy(true);
1614 #endif
1615 break;
1616 default:
1617 error = ENOTTY;
1618 }
1619 return error;
1620 }
1621