Home | History | Annotate | Line # | Download | only in opencrypto
crypto.c revision 1.99
      1 /*	$NetBSD: crypto.c,v 1.99 2017/07/31 04:23:48 knakahara Exp $ */
      2 /*	$FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $	*/
      3 /*	$OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $	*/
      4 
      5 /*-
      6  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to The NetBSD Foundation
     10  * by Coyote Point Systems, Inc.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31  * POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 /*
     35  * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
     36  *
     37  * This code was written by Angelos D. Keromytis in Athens, Greece, in
     38  * February 2000. Network Security Technologies Inc. (NSTI) kindly
     39  * supported the development of this code.
     40  *
     41  * Copyright (c) 2000, 2001 Angelos D. Keromytis
     42  *
     43  * Permission to use, copy, and modify this software with or without fee
     44  * is hereby granted, provided that this entire notice is included in
     45  * all source code copies of any software which is or includes a copy or
     46  * modification of this software.
     47  *
     48  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
     49  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
     50  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
     51  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
     52  * PURPOSE.
     53  */
     54 
     55 #include <sys/cdefs.h>
     56 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.99 2017/07/31 04:23:48 knakahara Exp $");
     57 
     58 #include <sys/param.h>
     59 #include <sys/reboot.h>
     60 #include <sys/systm.h>
     61 #include <sys/proc.h>
     62 #include <sys/pool.h>
     63 #include <sys/kthread.h>
     64 #include <sys/once.h>
     65 #include <sys/sysctl.h>
     66 #include <sys/intr.h>
     67 #include <sys/errno.h>
     68 #include <sys/module.h>
     69 #include <sys/xcall.h>
     70 #include <sys/device.h>
     71 #include <sys/cpu.h>
     72 #include <sys/percpu.h>
     73 #include <sys/kmem.h>
     74 
     75 #if defined(_KERNEL_OPT)
     76 #include "opt_ocf.h"
     77 #endif
     78 
     79 #include <opencrypto/cryptodev.h>
     80 #include <opencrypto/xform.h>			/* XXX for M_XDATA */
     81 
     82 /*
     83  * Crypto drivers register themselves by allocating a slot in the
     84  * crypto_drivers table with crypto_get_driverid() and then registering
     85  * each algorithm they support with crypto_register() and crypto_kregister().
     86  */
     87 /* Don't directly access crypto_drivers[i], use crypto_checkdriver(i). */
     88 static struct {
     89 	kmutex_t mtx;
     90 	int num;
     91 	struct cryptocap *list;
     92 } crypto_drv __cacheline_aligned;
     93 #define crypto_drv_mtx		(crypto_drv.mtx)
     94 #define crypto_drivers_num	(crypto_drv.num)
     95 #define crypto_drivers		(crypto_drv.list)
     96 
     97 static	void *crypto_q_si;
     98 static	void *crypto_ret_si;
     99 
    100 /*
    101  * There are two queues for crypto requests; one for symmetric (e.g.
    102  * cipher) operations and one for asymmetric (e.g. MOD) operations.
    103  * See below for how synchronization is handled.
    104  */
    105 TAILQ_HEAD(crypto_crp_q, cryptop);
    106 TAILQ_HEAD(crypto_crp_kq, cryptkop);
    107 struct crypto_crp_qs {
    108 	struct crypto_crp_q crp_q;
    109 	struct crypto_crp_kq crp_kq;
    110 };
    111 static percpu_t *crypto_crp_qs_percpu;
    112 
    113 static inline struct crypto_crp_qs *
    114 crypto_get_crp_qs(int *s)
    115 {
    116 
    117 	KASSERT(s != NULL);
    118 
    119 	*s = splsoftnet();
    120 	return percpu_getref(crypto_crp_qs_percpu);
    121 }
    122 
    123 static inline void
    124 crypto_put_crp_qs(int *s)
    125 {
    126 
    127 	KASSERT(s != NULL);
    128 
    129 	percpu_putref(crypto_crp_qs_percpu);
    130 	splx(*s);
    131 }
    132 
    133 static void
    134 crypto_crp_q_is_busy_pc(void *p, void *arg, struct cpu_info *ci __unused)
    135 {
    136 	struct crypto_crp_qs *qs_pc = p;
    137 	bool *isempty = arg;
    138 
    139 	if (!TAILQ_EMPTY(&qs_pc->crp_q) || !TAILQ_EMPTY(&qs_pc->crp_kq))
    140 		*isempty = true;
    141 }
    142 
    143 static void
    144 crypto_crp_qs_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
    145 {
    146 	struct crypto_crp_qs *qs = p;
    147 
    148 	TAILQ_INIT(&qs->crp_q);
    149 	TAILQ_INIT(&qs->crp_kq);
    150 }
    151 
    152 /*
    153  * There are two queues for processing completed crypto requests; one
    154  * for the symmetric and one for the asymmetric ops.  We only need one
    155  * but have two to avoid type futzing (cryptop vs. cryptkop).  See below
    156  * for how synchronization is handled.
    157  */
    158 TAILQ_HEAD(crypto_crp_ret_q, cryptop);
    159 TAILQ_HEAD(crypto_crp_ret_kq, cryptkop);
    160 struct crypto_crp_ret_qs {
    161 	kmutex_t crp_ret_q_mtx;
    162 	bool crp_ret_q_exit_flag;
    163 
    164 	struct crypto_crp_ret_q crp_ret_q;
    165 	int crp_ret_q_len;
    166 	int crp_ret_q_maxlen; /* queue length limit. <=0 means unlimited. */
    167 	int crp_ret_q_drops;
    168 
    169 	struct crypto_crp_ret_kq crp_ret_kq;
    170 	int crp_ret_kq_len;
    171 	int crp_ret_kq_maxlen; /* queue length limit. <=0 means unlimited. */
    172 	int crp_ret_kq_drops;
    173 };
    174 struct crypto_crp_ret_qs **crypto_crp_ret_qs_list;
    175 
    176 
    177 static inline struct crypto_crp_ret_qs *
    178 crypto_get_crp_ret_qs(struct cpu_info *ci)
    179 {
    180 	u_int cpuid;
    181 	struct crypto_crp_ret_qs *qs;
    182 
    183 	KASSERT(ci != NULL);
    184 
    185 	cpuid = cpu_index(ci);
    186 	qs = crypto_crp_ret_qs_list[cpuid];
    187 	mutex_enter(&qs->crp_ret_q_mtx);
    188 	return qs;
    189 }
    190 
    191 static inline void
    192 crypto_put_crp_ret_qs(struct cpu_info *ci)
    193 {
    194 	u_int cpuid;
    195 	struct crypto_crp_ret_qs *qs;
    196 
    197 	KASSERT(ci != NULL);
    198 
    199 	cpuid = cpu_index(ci);
    200 	qs = crypto_crp_ret_qs_list[cpuid];
    201 	mutex_exit(&qs->crp_ret_q_mtx);
    202 }
    203 
    204 #ifndef CRYPTO_RET_Q_MAXLEN
    205 #define CRYPTO_RET_Q_MAXLEN 0
    206 #endif
    207 #ifndef CRYPTO_RET_KQ_MAXLEN
    208 #define CRYPTO_RET_KQ_MAXLEN 0
    209 #endif
    210 
    211 static int
    212 sysctl_opencrypto_q_len(SYSCTLFN_ARGS)
    213 {
    214 	int error, len = 0;
    215 	struct sysctlnode node = *rnode;
    216 
    217 	for (int i = 0; i < ncpu; i++) {
    218 		struct crypto_crp_ret_qs *qs;
    219 		struct cpu_info *ci = cpu_lookup(i);
    220 
    221 		qs = crypto_get_crp_ret_qs(ci);
    222 		len += qs->crp_ret_q_len;
    223 		crypto_put_crp_ret_qs(ci);
    224 	}
    225 
    226 	node.sysctl_data = &len;
    227 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    228 	if (error || newp == NULL)
    229 		return error;
    230 
    231 	return 0;
    232 }
    233 
    234 static int
    235 sysctl_opencrypto_q_drops(SYSCTLFN_ARGS)
    236 {
    237 	int error, drops = 0;
    238 	struct sysctlnode node = *rnode;
    239 
    240 	for (int i = 0; i < ncpu; i++) {
    241 		struct crypto_crp_ret_qs *qs;
    242 		struct cpu_info *ci = cpu_lookup(i);
    243 
    244 		qs = crypto_get_crp_ret_qs(ci);
    245 		drops += qs->crp_ret_q_drops;
    246 		crypto_put_crp_ret_qs(ci);
    247 	}
    248 
    249 	node.sysctl_data = &drops;
    250 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    251 	if (error || newp == NULL)
    252 		return error;
    253 
    254 	return 0;
    255 }
    256 
    257 static int
    258 sysctl_opencrypto_q_maxlen(SYSCTLFN_ARGS)
    259 {
    260 	int error, maxlen;
    261 	struct crypto_crp_ret_qs *qs;
    262 	struct sysctlnode node = *rnode;
    263 
    264 	/* each crp_ret_kq_maxlen is the same. */
    265 	qs = crypto_get_crp_ret_qs(curcpu());
    266 	maxlen = qs->crp_ret_q_maxlen;
    267 	crypto_put_crp_ret_qs(curcpu());
    268 
    269 	node.sysctl_data = &maxlen;
    270 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    271 	if (error || newp == NULL)
    272 		return error;
    273 
    274 	for (int i = 0; i < ncpu; i++) {
    275 		struct cpu_info *ci = cpu_lookup(i);
    276 
    277 		qs = crypto_get_crp_ret_qs(ci);
    278 		qs->crp_ret_q_maxlen = maxlen;
    279 		crypto_put_crp_ret_qs(ci);
    280 	}
    281 
    282 	return 0;
    283 }
    284 
    285 static int
    286 sysctl_opencrypto_kq_len(SYSCTLFN_ARGS)
    287 {
    288 	int error, len = 0;
    289 	struct sysctlnode node = *rnode;
    290 
    291 	for (int i = 0; i < ncpu; i++) {
    292 		struct crypto_crp_ret_qs *qs;
    293 		struct cpu_info *ci = cpu_lookup(i);
    294 
    295 		qs = crypto_get_crp_ret_qs(ci);
    296 		len += qs->crp_ret_kq_len;
    297 		crypto_put_crp_ret_qs(ci);
    298 	}
    299 
    300 	node.sysctl_data = &len;
    301 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    302 	if (error || newp == NULL)
    303 		return error;
    304 
    305 	return 0;
    306 }
    307 
    308 static int
    309 sysctl_opencrypto_kq_drops(SYSCTLFN_ARGS)
    310 {
    311 	int error, drops = 0;
    312 	struct sysctlnode node = *rnode;
    313 
    314 	for (int i = 0; i < ncpu; i++) {
    315 		struct crypto_crp_ret_qs *qs;
    316 		struct cpu_info *ci = cpu_lookup(i);
    317 
    318 		qs = crypto_get_crp_ret_qs(ci);
    319 		drops += qs->crp_ret_kq_drops;
    320 		crypto_put_crp_ret_qs(ci);
    321 	}
    322 
    323 	node.sysctl_data = &drops;
    324 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    325 	if (error || newp == NULL)
    326 		return error;
    327 
    328 	return 0;
    329 }
    330 
    331 static int
    332 sysctl_opencrypto_kq_maxlen(SYSCTLFN_ARGS)
    333 {
    334 	int error, maxlen;
    335 	struct crypto_crp_ret_qs *qs;
    336 	struct sysctlnode node = *rnode;
    337 
    338 	/* each crp_ret_kq_maxlen is the same. */
    339 	qs = crypto_get_crp_ret_qs(curcpu());
    340 	maxlen = qs->crp_ret_kq_maxlen;
    341 	crypto_put_crp_ret_qs(curcpu());
    342 
    343 	node.sysctl_data = &maxlen;
    344 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    345 	if (error || newp == NULL)
    346 		return error;
    347 
    348 	for (int i = 0; i < ncpu; i++) {
    349 		struct cpu_info *ci = cpu_lookup(i);
    350 
    351 		qs = crypto_get_crp_ret_qs(ci);
    352 		qs->crp_ret_kq_maxlen = maxlen;
    353 		crypto_put_crp_ret_qs(ci);
    354 	}
    355 
    356 	return 0;
    357 }
    358 
    359 /*
    360  * Crypto op and desciptor data structures are allocated
    361  * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
    362  */
    363 struct pool cryptop_pool;
    364 struct pool cryptodesc_pool;
    365 struct pool cryptkop_pool;
    366 
    367 int	crypto_usercrypto = 1;		/* userland may open /dev/crypto */
    368 int	crypto_userasymcrypto = 1;	/* userland may do asym crypto reqs */
    369 /*
    370  * cryptodevallowsoft is (intended to be) sysctl'able, controlling
    371  * access to hardware versus software transforms as below:
    372  *
    373  * crypto_devallowsoft < 0:  Force userlevel requests to use software
    374  *                              transforms, always
    375  * crypto_devallowsoft = 0:  Use hardware if present, grant userlevel
    376  *                              requests for non-accelerated transforms
    377  *                              (handling the latter in software)
    378  * crypto_devallowsoft > 0:  Allow user requests only for transforms which
    379  *                               are hardware-accelerated.
    380  */
    381 int	crypto_devallowsoft = 1;	/* only use hardware crypto */
    382 
    383 static void
    384 sysctl_opencrypto_setup(struct sysctllog **clog)
    385 {
    386 	const struct sysctlnode *ocnode;
    387 	const struct sysctlnode *retqnode, *retkqnode;
    388 
    389 	sysctl_createv(clog, 0, NULL, NULL,
    390 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    391 		       CTLTYPE_INT, "usercrypto",
    392 		       SYSCTL_DESCR("Enable/disable user-mode access to "
    393 			   "crypto support"),
    394 		       NULL, 0, &crypto_usercrypto, 0,
    395 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    396 	sysctl_createv(clog, 0, NULL, NULL,
    397 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    398 		       CTLTYPE_INT, "userasymcrypto",
    399 		       SYSCTL_DESCR("Enable/disable user-mode access to "
    400 			   "asymmetric crypto support"),
    401 		       NULL, 0, &crypto_userasymcrypto, 0,
    402 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    403 	sysctl_createv(clog, 0, NULL, NULL,
    404 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    405 		       CTLTYPE_INT, "cryptodevallowsoft",
    406 		       SYSCTL_DESCR("Enable/disable use of software "
    407 			   "asymmetric crypto support"),
    408 		       NULL, 0, &crypto_devallowsoft, 0,
    409 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    410 
    411 	sysctl_createv(clog, 0, NULL, &ocnode,
    412 		       CTLFLAG_PERMANENT,
    413 		       CTLTYPE_NODE, "opencrypto",
    414 		       SYSCTL_DESCR("opencrypto related entries"),
    415 		       NULL, 0, NULL, 0,
    416 		       CTL_CREATE, CTL_EOL);
    417 
    418 	sysctl_createv(clog, 0, &ocnode, &retqnode,
    419 		       CTLFLAG_PERMANENT,
    420 		       CTLTYPE_NODE, "crypto_ret_q",
    421 		       SYSCTL_DESCR("crypto_ret_q related entries"),
    422 		       NULL, 0, NULL, 0,
    423 		       CTL_CREATE, CTL_EOL);
    424 	sysctl_createv(clog, 0, &retqnode, NULL,
    425 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    426 		       CTLTYPE_INT, "len",
    427 		       SYSCTL_DESCR("Current queue length"),
    428 		       sysctl_opencrypto_q_len, 0,
    429 		       NULL, 0,
    430 		       CTL_CREATE, CTL_EOL);
    431 	sysctl_createv(clog, 0, &retqnode, NULL,
    432 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    433 		       CTLTYPE_INT, "drops",
    434 		       SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
    435 		       sysctl_opencrypto_q_drops, 0,
    436 		       NULL, 0,
    437 		       CTL_CREATE, CTL_EOL);
    438 	sysctl_createv(clog, 0, &retqnode, NULL,
    439 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    440 		       CTLTYPE_INT, "maxlen",
    441 		       SYSCTL_DESCR("Maximum allowed queue length"),
    442 		       sysctl_opencrypto_q_maxlen, 0,
    443 		       NULL, 0,
    444 		       CTL_CREATE, CTL_EOL);
    445 
    446 
    447 	sysctl_createv(clog, 0, &ocnode, &retkqnode,
    448 		       CTLFLAG_PERMANENT,
    449 		       CTLTYPE_NODE, "crypto_ret_kq",
    450 		       SYSCTL_DESCR("crypto_ret_kq related entries"),
    451 		       NULL, 0, NULL, 0,
    452 		       CTL_CREATE, CTL_EOL);
    453 	sysctl_createv(clog, 0, &retkqnode, NULL,
    454 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    455 		       CTLTYPE_INT, "len",
    456 		       SYSCTL_DESCR("Current queue length"),
    457 		       sysctl_opencrypto_kq_len, 0,
    458 		       NULL, 0,
    459 		       CTL_CREATE, CTL_EOL);
    460 	sysctl_createv(clog, 0, &retkqnode, NULL,
    461 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    462 		       CTLTYPE_INT, "drops",
    463 		       SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
    464 		       sysctl_opencrypto_kq_drops, 0,
    465 		       NULL, 0,
    466 		       CTL_CREATE, CTL_EOL);
    467 	sysctl_createv(clog, 0, &retkqnode, NULL,
    468 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    469 		       CTLTYPE_INT, "maxlen",
    470 		       SYSCTL_DESCR("Maximum allowed queue length"),
    471 		       sysctl_opencrypto_kq_maxlen, 0,
    472 		       NULL, 0,
    473 		       CTL_CREATE, CTL_EOL);
    474 }
    475 
    476 /*
    477  * Synchronization: read carefully, this is non-trivial.
    478  *
    479  * Crypto requests are submitted via crypto_dispatch.  Typically
    480  * these come in from network protocols at spl0 (output path) or
    481  * spl[,soft]net (input path).
    482  *
    483  * Requests are typically passed on the driver directly, but they
    484  * may also be queued for processing by a software interrupt thread,
    485  * cryptointr, that runs at splsoftcrypto.  This thread dispatches
    486  * the requests to crypto drivers (h/w or s/w) who call crypto_done
    487  * when a request is complete.  Hardware crypto drivers are assumed
    488  * to register their IRQ's as network devices so their interrupt handlers
    489  * and subsequent "done callbacks" happen at spl[imp,net].
    490  *
    491  * Completed crypto ops are queued for a separate kernel thread that
    492  * handles the callbacks at spl0.  This decoupling insures the crypto
    493  * driver interrupt service routine is not delayed while the callback
    494  * takes place and that callbacks are delivered after a context switch
    495  * (as opposed to a software interrupt that clients must block).
    496  *
    497  * This scheme is not intended for SMP machines.
    498  */
    499 static	void cryptointr(void *);	/* swi thread to dispatch ops */
    500 static	void cryptoret_softint(void *);	/* kernel thread for callbacks*/
    501 static	int crypto_destroy(bool);
    502 static	int crypto_invoke(struct cryptop *crp, int hint);
    503 static	int crypto_kinvoke(struct cryptkop *krp, int hint);
    504 
    505 static struct cryptocap *crypto_checkdriver_lock(u_int32_t);
    506 static struct cryptocap *crypto_checkdriver_uninit(u_int32_t);
    507 static struct cryptocap *crypto_checkdriver(u_int32_t);
    508 static void crypto_driver_lock(struct cryptocap *);
    509 static void crypto_driver_unlock(struct cryptocap *);
    510 static void crypto_driver_clear(struct cryptocap *);
    511 
    512 static int crypto_init_finalize(device_t);
    513 
    514 static struct cryptostats cryptostats;
    515 #ifdef CRYPTO_TIMING
    516 static	int crypto_timing = 0;
    517 #endif
    518 
    519 static struct sysctllog *sysctl_opencrypto_clog;
    520 
    521 static int
    522 crypto_crp_ret_qs_init(void)
    523 {
    524 	int i, j;
    525 
    526 	crypto_crp_ret_qs_list = kmem_alloc(sizeof(struct crypto_crp_ret_qs *) * ncpu,
    527 	    KM_NOSLEEP);
    528 	if (crypto_crp_ret_qs_list == NULL) {
    529 		printf("crypto_init: crypto_crp_qs_list\n");
    530 		return ENOMEM;
    531 	}
    532 
    533 	for (i = 0; i < ncpu; i++) {
    534 		struct crypto_crp_ret_qs *qs;
    535 		qs = kmem_alloc(sizeof(struct crypto_crp_ret_qs), KM_NOSLEEP);
    536 		if (qs == NULL)
    537 			break;
    538 
    539 		mutex_init(&qs->crp_ret_q_mtx, MUTEX_DEFAULT, IPL_NET);
    540 		qs->crp_ret_q_exit_flag = false;
    541 
    542 		TAILQ_INIT(&qs->crp_ret_q);
    543 		qs->crp_ret_q_len = 0;
    544 		qs->crp_ret_q_maxlen = CRYPTO_RET_Q_MAXLEN;
    545 		qs->crp_ret_q_drops = 0;
    546 
    547 		TAILQ_INIT(&qs->crp_ret_kq);
    548 		qs->crp_ret_kq_len = 0;
    549 		qs->crp_ret_kq_maxlen = CRYPTO_RET_KQ_MAXLEN;
    550 		qs->crp_ret_kq_drops = 0;
    551 
    552 		crypto_crp_ret_qs_list[i] = qs;
    553 	}
    554 	if (i == ncpu)
    555 		return 0;
    556 
    557 	for (j = 0; j < i; j++) {
    558 		struct crypto_crp_ret_qs *qs = crypto_crp_ret_qs_list[j];
    559 
    560 		mutex_destroy(&qs->crp_ret_q_mtx);
    561 		kmem_free(qs, sizeof(struct crypto_crp_ret_qs));
    562 	}
    563 	kmem_free(crypto_crp_ret_qs_list, sizeof(struct crypto_crp_ret_qs *) * ncpu);
    564 
    565 	return ENOMEM;
    566 }
    567 
    568 static int
    569 crypto_init0(void)
    570 {
    571 	int error;
    572 
    573 	mutex_init(&crypto_drv_mtx, MUTEX_DEFAULT, IPL_NONE);
    574 	pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
    575 		  0, "cryptop", NULL, IPL_NET);
    576 	pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
    577 		  0, "cryptodesc", NULL, IPL_NET);
    578 	pool_init(&cryptkop_pool, sizeof(struct cryptkop), 0, 0,
    579 		  0, "cryptkop", NULL, IPL_NET);
    580 
    581 	crypto_crp_qs_percpu = percpu_alloc(sizeof(struct crypto_crp_qs));
    582 	percpu_foreach(crypto_crp_qs_percpu, crypto_crp_qs_init_pc, NULL);
    583 
    584 	error = crypto_crp_ret_qs_init();
    585 	if (error) {
    586 		printf("crypto_init: cannot malloc ret_q list\n");
    587 		return ENOMEM;
    588 	}
    589 
    590 	crypto_drivers = kmem_zalloc(CRYPTO_DRIVERS_INITIAL *
    591 	    sizeof(struct cryptocap), KM_NOSLEEP);
    592 	if (crypto_drivers == NULL) {
    593 		printf("crypto_init: cannot malloc driver table\n");
    594 		return ENOMEM;
    595 	}
    596 	crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
    597 
    598 	crypto_q_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, cryptointr, NULL);
    599 	if (crypto_q_si == NULL) {
    600 		printf("crypto_init: cannot establish request queue handler\n");
    601 		return crypto_destroy(false);
    602 	}
    603 
    604 	/*
    605 	 * Some encryption devices (such as mvcesa) are attached before
    606 	 * ipi_sysinit(). That causes an assertion in ipi_register() as
    607 	 * crypto_ret_si softint uses SOFTINT_RCPU.
    608 	 */
    609 	if (config_finalize_register(NULL, crypto_init_finalize) != 0) {
    610 		printf("crypto_init: cannot register crypto_init_finalize\n");
    611 		return crypto_destroy(false);
    612 	}
    613 
    614 	sysctl_opencrypto_setup(&sysctl_opencrypto_clog);
    615 
    616 	return 0;
    617 }
    618 
    619 static int
    620 crypto_init_finalize(device_t self __unused)
    621 {
    622 
    623 	crypto_ret_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE|SOFTINT_RCPU,
    624 	    &cryptoret_softint, NULL);
    625 	KASSERT(crypto_ret_si != NULL);
    626 
    627 	return 0;
    628 }
    629 
    630 int
    631 crypto_init(void)
    632 {
    633 	static ONCE_DECL(crypto_init_once);
    634 
    635 	return RUN_ONCE(&crypto_init_once, crypto_init0);
    636 }
    637 
    638 static int
    639 crypto_destroy(bool exit_kthread)
    640 {
    641 	int i;
    642 
    643 	if (exit_kthread) {
    644 		struct cryptocap *cap = NULL;
    645 		uint64_t where;
    646 		bool is_busy = false;
    647 
    648 		/* if we have any in-progress requests, don't unload */
    649 		percpu_foreach(crypto_crp_qs_percpu, crypto_crp_q_is_busy_pc,
    650 				   &is_busy);
    651 		if (is_busy)
    652 			return EBUSY;
    653 		/* FIXME:
    654 		 * prohibit enqueue to crp_q and crp_kq after here.
    655 		 */
    656 
    657 		mutex_enter(&crypto_drv_mtx);
    658 		for (i = 0; i < crypto_drivers_num; i++) {
    659 			cap = crypto_checkdriver(i);
    660 			if (cap == NULL)
    661 				continue;
    662 			if (cap->cc_sessions != 0) {
    663 				mutex_exit(&crypto_drv_mtx);
    664 				return EBUSY;
    665 			}
    666 		}
    667 		mutex_exit(&crypto_drv_mtx);
    668 		/* FIXME:
    669 		 * prohibit touch crypto_drivers[] and each element after here.
    670 		 */
    671 
    672 		/*
    673 		 * Ensure cryptoret_softint() is never scheduled and then wait
    674 		 * for last softint_execute().
    675 		 */
    676 		for (i = 0; i < ncpu; i++) {
    677 			struct crypto_crp_ret_qs *qs;
    678 			struct cpu_info *ci = cpu_lookup(i);
    679 
    680 			qs = crypto_get_crp_ret_qs(ci);
    681 			qs->crp_ret_q_exit_flag = true;
    682 			crypto_put_crp_ret_qs(ci);
    683 		}
    684 		where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
    685 		xc_wait(where);
    686 	}
    687 
    688 	if (sysctl_opencrypto_clog != NULL)
    689 		sysctl_teardown(&sysctl_opencrypto_clog);
    690 
    691 	if (crypto_ret_si != NULL)
    692 		softint_disestablish(crypto_ret_si);
    693 
    694 	if (crypto_q_si != NULL)
    695 		softint_disestablish(crypto_q_si);
    696 
    697 	mutex_enter(&crypto_drv_mtx);
    698 	if (crypto_drivers != NULL)
    699 		kmem_free(crypto_drivers,
    700 		    crypto_drivers_num * sizeof(struct cryptocap));
    701 	mutex_exit(&crypto_drv_mtx);
    702 
    703 	percpu_free(crypto_crp_qs_percpu, sizeof(struct crypto_crp_qs));
    704 
    705 	pool_destroy(&cryptop_pool);
    706 	pool_destroy(&cryptodesc_pool);
    707 	pool_destroy(&cryptkop_pool);
    708 
    709 	mutex_destroy(&crypto_drv_mtx);
    710 
    711 	return 0;
    712 }
    713 
    714 static bool
    715 crypto_driver_suitable(struct cryptocap *cap, struct cryptoini *cri)
    716 {
    717 	struct cryptoini *cr;
    718 
    719 	for (cr = cri; cr; cr = cr->cri_next)
    720 		if (cap->cc_alg[cr->cri_alg] == 0) {
    721 			DPRINTF("alg %d not supported\n", cr->cri_alg);
    722 			return false;
    723 		}
    724 
    725 	return true;
    726 }
    727 
    728 #define CRYPTO_ACCEPT_HARDWARE 0x1
    729 #define CRYPTO_ACCEPT_SOFTWARE 0x2
    730 /*
    731  * The algorithm we use here is pretty stupid; just use the
    732  * first driver that supports all the algorithms we need.
    733  * If there are multiple drivers we choose the driver with
    734  * the fewest active sessions. We prefer hardware-backed
    735  * drivers to software ones.
    736  *
    737  * XXX We need more smarts here (in real life too, but that's
    738  * XXX another story altogether).
    739  */
    740 static struct cryptocap *
    741 crypto_select_driver_lock(struct cryptoini *cri, int hard)
    742 {
    743 	u_int32_t hid;
    744 	int accept;
    745 	struct cryptocap *cap, *best;
    746 
    747 	best = NULL;
    748 	/*
    749 	 * hard == 0 can use both hardware and software drivers.
    750 	 * We use hardware drivers prior to software drivers, so search
    751 	 * hardware drivers at first time.
    752 	 */
    753 	if (hard >= 0)
    754 		accept = CRYPTO_ACCEPT_HARDWARE;
    755 	else
    756 		accept = CRYPTO_ACCEPT_SOFTWARE;
    757 again:
    758 	for (hid = 0; hid < crypto_drivers_num; hid++) {
    759 		cap = crypto_checkdriver(hid);
    760 		if (cap == NULL)
    761 			continue;
    762 
    763 		crypto_driver_lock(cap);
    764 
    765 		/*
    766 		 * If it's not initialized or has remaining sessions
    767 		 * referencing it, skip.
    768 		 */
    769 		if (cap->cc_newsession == NULL ||
    770 		    (cap->cc_flags & CRYPTOCAP_F_CLEANUP)) {
    771 			crypto_driver_unlock(cap);
    772 			continue;
    773 		}
    774 
    775 		/* Hardware required -- ignore software drivers. */
    776 		if ((accept & CRYPTO_ACCEPT_SOFTWARE) == 0
    777 		    && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE)) {
    778 			crypto_driver_unlock(cap);
    779 			continue;
    780 		}
    781 		/* Software required -- ignore hardware drivers. */
    782 		if ((accept & CRYPTO_ACCEPT_HARDWARE) == 0
    783 		    && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) {
    784 			crypto_driver_unlock(cap);
    785 			continue;
    786 		}
    787 
    788 		/* See if all the algorithms are supported. */
    789 		if (crypto_driver_suitable(cap, cri)) {
    790 			if (best == NULL) {
    791 				/* keep holding crypto_driver_lock(cap) */
    792 				best = cap;
    793 				continue;
    794 			} else if (cap->cc_sessions < best->cc_sessions) {
    795 				crypto_driver_unlock(best);
    796 				/* keep holding crypto_driver_lock(cap) */
    797 				best = cap;
    798 				continue;
    799 			}
    800 		}
    801 
    802 		crypto_driver_unlock(cap);
    803 	}
    804 	if (best == NULL && hard == 0
    805 	    && (accept & CRYPTO_ACCEPT_SOFTWARE) == 0) {
    806 		accept = CRYPTO_ACCEPT_SOFTWARE;
    807 		goto again;
    808 	}
    809 
    810 	return best;
    811 }
    812 
    813 /*
    814  * Create a new session.
    815  */
    816 int
    817 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
    818 {
    819 	struct cryptocap *cap;
    820 	int err = EINVAL;
    821 
    822 	mutex_enter(&crypto_drv_mtx);
    823 
    824 	cap = crypto_select_driver_lock(cri, hard);
    825 	if (cap != NULL) {
    826 		u_int32_t hid, lid;
    827 
    828 		hid = cap - crypto_drivers;
    829 		/*
    830 		 * Can't do everything in one session.
    831 		 *
    832 		 * XXX Fix this. We need to inject a "virtual" session layer right
    833 		 * XXX about here.
    834 		 */
    835 
    836 		/* Call the driver initialization routine. */
    837 		lid = hid;		/* Pass the driver ID. */
    838 		crypto_driver_unlock(cap);
    839 		err = cap->cc_newsession(cap->cc_arg, &lid, cri);
    840 		crypto_driver_lock(cap);
    841 		if (err == 0) {
    842 			(*sid) = hid;
    843 			(*sid) <<= 32;
    844 			(*sid) |= (lid & 0xffffffff);
    845 			(cap->cc_sessions)++;
    846 		} else {
    847 			DPRINTF("crypto_drivers[%d].cc_newsession() failed. error=%d\n",
    848 			    hid, err);
    849 		}
    850 		crypto_driver_unlock(cap);
    851 	}
    852 
    853 	mutex_exit(&crypto_drv_mtx);
    854 
    855 	return err;
    856 }
    857 
    858 /*
    859  * Delete an existing session (or a reserved session on an unregistered
    860  * driver).
    861  */
    862 int
    863 crypto_freesession(u_int64_t sid)
    864 {
    865 	struct cryptocap *cap;
    866 	int err = 0;
    867 
    868 	/* Determine two IDs. */
    869 	cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(sid));
    870 	if (cap == NULL)
    871 		return ENOENT;
    872 
    873 	if (cap->cc_sessions)
    874 		(cap->cc_sessions)--;
    875 
    876 	/* Call the driver cleanup routine, if available. */
    877 	if (cap->cc_freesession)
    878 		err = cap->cc_freesession(cap->cc_arg, sid);
    879 	else
    880 		err = 0;
    881 
    882 	/*
    883 	 * If this was the last session of a driver marked as invalid,
    884 	 * make the entry available for reuse.
    885 	 */
    886 	if ((cap->cc_flags & CRYPTOCAP_F_CLEANUP) && cap->cc_sessions == 0)
    887 		crypto_driver_clear(cap);
    888 
    889 	crypto_driver_unlock(cap);
    890 	return err;
    891 }
    892 
    893 static bool
    894 crypto_checkdriver_initialized(const struct cryptocap *cap)
    895 {
    896 
    897 	return cap->cc_process != NULL ||
    898 	    (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0 ||
    899 	    cap->cc_sessions != 0;
    900 }
    901 
    902 /*
    903  * Return an unused driver id.  Used by drivers prior to registering
    904  * support for the algorithms they handle.
    905  */
    906 int32_t
    907 crypto_get_driverid(u_int32_t flags)
    908 {
    909 	struct cryptocap *newdrv;
    910 	struct cryptocap *cap = NULL;
    911 	int i;
    912 
    913 	(void)crypto_init();		/* XXX oh, this is foul! */
    914 
    915 	mutex_enter(&crypto_drv_mtx);
    916 	for (i = 0; i < crypto_drivers_num; i++) {
    917 		cap = crypto_checkdriver_uninit(i);
    918 		if (cap == NULL || crypto_checkdriver_initialized(cap))
    919 			continue;
    920 		break;
    921 	}
    922 
    923 	/* Out of entries, allocate some more. */
    924 	if (cap == NULL) {
    925 		/* Be careful about wrap-around. */
    926 		if (2 * crypto_drivers_num <= crypto_drivers_num) {
    927 			mutex_exit(&crypto_drv_mtx);
    928 			printf("crypto: driver count wraparound!\n");
    929 			return -1;
    930 		}
    931 
    932 		newdrv = kmem_zalloc(2 * crypto_drivers_num *
    933 		    sizeof(struct cryptocap), KM_NOSLEEP);
    934 		if (newdrv == NULL) {
    935 			mutex_exit(&crypto_drv_mtx);
    936 			printf("crypto: no space to expand driver table!\n");
    937 			return -1;
    938 		}
    939 
    940 		memcpy(newdrv, crypto_drivers,
    941 		    crypto_drivers_num * sizeof(struct cryptocap));
    942 		kmem_free(crypto_drivers,
    943 		    crypto_drivers_num * sizeof(struct cryptocap));
    944 
    945 		crypto_drivers_num *= 2;
    946 		crypto_drivers = newdrv;
    947 
    948 		cap = crypto_checkdriver_uninit(i);
    949 		KASSERT(cap != NULL);
    950 	}
    951 
    952 	/* NB: state is zero'd on free */
    953 	cap->cc_sessions = 1;	/* Mark */
    954 	cap->cc_flags = flags;
    955 	mutex_init(&cap->cc_lock, MUTEX_DEFAULT, IPL_NET);
    956 
    957 	if (bootverbose)
    958 		printf("crypto: assign driver %u, flags %u\n", i, flags);
    959 
    960 	mutex_exit(&crypto_drv_mtx);
    961 
    962 	return i;
    963 }
    964 
    965 static struct cryptocap *
    966 crypto_checkdriver_lock(u_int32_t hid)
    967 {
    968 	struct cryptocap *cap;
    969 
    970 	KASSERT(crypto_drivers != NULL);
    971 
    972 	if (hid >= crypto_drivers_num)
    973 		return NULL;
    974 
    975 	cap = &crypto_drivers[hid];
    976 	mutex_enter(&cap->cc_lock);
    977 	return cap;
    978 }
    979 
    980 /*
    981  * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two
    982  * situations
    983  *     - crypto_drivers[] may not be allocated
    984  *     - crypto_drivers[hid] may not be initialized
    985  */
    986 static struct cryptocap *
    987 crypto_checkdriver_uninit(u_int32_t hid)
    988 {
    989 
    990 	KASSERT(mutex_owned(&crypto_drv_mtx));
    991 
    992 	if (crypto_drivers == NULL)
    993 		return NULL;
    994 
    995 	return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
    996 }
    997 
    998 /*
    999  * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two
   1000  * situations
   1001  *     - crypto_drivers[] may not be allocated
   1002  *     - crypto_drivers[hid] may not be initialized
   1003  */
   1004 static struct cryptocap *
   1005 crypto_checkdriver(u_int32_t hid)
   1006 {
   1007 
   1008 	KASSERT(mutex_owned(&crypto_drv_mtx));
   1009 
   1010 	if (crypto_drivers == NULL || hid >= crypto_drivers_num)
   1011 		return NULL;
   1012 
   1013 	struct cryptocap *cap = &crypto_drivers[hid];
   1014 	return crypto_checkdriver_initialized(cap) ? cap : NULL;
   1015 }
   1016 
   1017 static inline void
   1018 crypto_driver_lock(struct cryptocap *cap)
   1019 {
   1020 
   1021 	KASSERT(cap != NULL);
   1022 
   1023 	mutex_enter(&cap->cc_lock);
   1024 }
   1025 
   1026 static inline void
   1027 crypto_driver_unlock(struct cryptocap *cap)
   1028 {
   1029 
   1030 	KASSERT(cap != NULL);
   1031 
   1032 	mutex_exit(&cap->cc_lock);
   1033 }
   1034 
   1035 static void
   1036 crypto_driver_clear(struct cryptocap *cap)
   1037 {
   1038 
   1039 	if (cap == NULL)
   1040 		return;
   1041 
   1042 	KASSERT(mutex_owned(&cap->cc_lock));
   1043 
   1044 	cap->cc_sessions = 0;
   1045 	memset(&cap->cc_max_op_len, 0, sizeof(cap->cc_max_op_len));
   1046 	memset(&cap->cc_alg, 0, sizeof(cap->cc_alg));
   1047 	memset(&cap->cc_kalg, 0, sizeof(cap->cc_kalg));
   1048 	cap->cc_flags = 0;
   1049 	cap->cc_qblocked = 0;
   1050 	cap->cc_kqblocked = 0;
   1051 
   1052 	cap->cc_arg = NULL;
   1053 	cap->cc_newsession = NULL;
   1054 	cap->cc_process = NULL;
   1055 	cap->cc_freesession = NULL;
   1056 	cap->cc_kprocess = NULL;
   1057 }
   1058 
   1059 /*
   1060  * Register support for a key-related algorithm.  This routine
   1061  * is called once for each algorithm supported a driver.
   1062  */
   1063 int
   1064 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
   1065     int (*kprocess)(void *, struct cryptkop *, int),
   1066     void *karg)
   1067 {
   1068 	struct cryptocap *cap;
   1069 	int err;
   1070 
   1071 	mutex_enter(&crypto_drv_mtx);
   1072 
   1073 	cap = crypto_checkdriver_lock(driverid);
   1074 	if (cap != NULL &&
   1075 	    (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
   1076 		/*
   1077 		 * XXX Do some performance testing to determine placing.
   1078 		 * XXX We probably need an auxiliary data structure that
   1079 		 * XXX describes relative performances.
   1080 		 */
   1081 
   1082 		cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
   1083 		if (bootverbose) {
   1084 			printf("crypto: driver %u registers key alg %u "
   1085 			       " flags %u\n",
   1086 				driverid,
   1087 				kalg,
   1088 				flags
   1089 			);
   1090 		}
   1091 
   1092 		if (cap->cc_kprocess == NULL) {
   1093 			cap->cc_karg = karg;
   1094 			cap->cc_kprocess = kprocess;
   1095 		}
   1096 		err = 0;
   1097 	} else
   1098 		err = EINVAL;
   1099 
   1100 	mutex_exit(&crypto_drv_mtx);
   1101 	return err;
   1102 }
   1103 
   1104 /*
   1105  * Register support for a non-key-related algorithm.  This routine
   1106  * is called once for each such algorithm supported by a driver.
   1107  */
   1108 int
   1109 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
   1110     u_int32_t flags,
   1111     int (*newses)(void *, u_int32_t*, struct cryptoini*),
   1112     int (*freeses)(void *, u_int64_t),
   1113     int (*process)(void *, struct cryptop *, int),
   1114     void *arg)
   1115 {
   1116 	struct cryptocap *cap;
   1117 	int err;
   1118 
   1119 	cap = crypto_checkdriver_lock(driverid);
   1120 	if (cap == NULL)
   1121 		return EINVAL;
   1122 
   1123 	/* NB: algorithms are in the range [1..max] */
   1124 	if (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) {
   1125 		/*
   1126 		 * XXX Do some performance testing to determine placing.
   1127 		 * XXX We probably need an auxiliary data structure that
   1128 		 * XXX describes relative performances.
   1129 		 */
   1130 
   1131 		cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
   1132 		cap->cc_max_op_len[alg] = maxoplen;
   1133 		if (bootverbose) {
   1134 			printf("crypto: driver %u registers alg %u "
   1135 				"flags %u maxoplen %u\n",
   1136 				driverid,
   1137 				alg,
   1138 				flags,
   1139 				maxoplen
   1140 			);
   1141 		}
   1142 
   1143 		if (cap->cc_process == NULL) {
   1144 			cap->cc_arg = arg;
   1145 			cap->cc_newsession = newses;
   1146 			cap->cc_process = process;
   1147 			cap->cc_freesession = freeses;
   1148 			cap->cc_sessions = 0;		/* Unmark */
   1149 		}
   1150 		err = 0;
   1151 	} else
   1152 		err = EINVAL;
   1153 
   1154 	crypto_driver_unlock(cap);
   1155 
   1156 	return err;
   1157 }
   1158 
   1159 static int
   1160 crypto_unregister_locked(struct cryptocap *cap, int alg, bool all)
   1161 {
   1162 	int i;
   1163 	u_int32_t ses;
   1164 	bool lastalg = true;
   1165 
   1166 	KASSERT(cap != NULL);
   1167 	KASSERT(mutex_owned(&cap->cc_lock));
   1168 
   1169 	if (alg < CRYPTO_ALGORITHM_MIN || CRYPTO_ALGORITHM_MAX < alg)
   1170 		return EINVAL;
   1171 
   1172 	if (!all && cap->cc_alg[alg] == 0)
   1173 		return EINVAL;
   1174 
   1175 	cap->cc_alg[alg] = 0;
   1176 	cap->cc_max_op_len[alg] = 0;
   1177 
   1178 	if (all) {
   1179 		if (alg != CRYPTO_ALGORITHM_MAX)
   1180 			lastalg = false;
   1181 	} else {
   1182 		/* Was this the last algorithm ? */
   1183 		for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++)
   1184 			if (cap->cc_alg[i] != 0) {
   1185 				lastalg = false;
   1186 				break;
   1187 			}
   1188 	}
   1189 	if (lastalg) {
   1190 		ses = cap->cc_sessions;
   1191 		crypto_driver_clear(cap);
   1192 		if (ses != 0) {
   1193 			/*
   1194 			 * If there are pending sessions, just mark as invalid.
   1195 			 */
   1196 			cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
   1197 			cap->cc_sessions = ses;
   1198 		}
   1199 	}
   1200 
   1201 	return 0;
   1202 }
   1203 
   1204 /*
   1205  * Unregister a crypto driver. If there are pending sessions using it,
   1206  * leave enough information around so that subsequent calls using those
   1207  * sessions will correctly detect the driver has been unregistered and
   1208  * reroute requests.
   1209  */
   1210 int
   1211 crypto_unregister(u_int32_t driverid, int alg)
   1212 {
   1213 	int err;
   1214 	struct cryptocap *cap;
   1215 
   1216 	cap = crypto_checkdriver_lock(driverid);
   1217 	err = crypto_unregister_locked(cap, alg, false);
   1218 	crypto_driver_unlock(cap);
   1219 
   1220 	return err;
   1221 }
   1222 
   1223 /*
   1224  * Unregister all algorithms associated with a crypto driver.
   1225  * If there are pending sessions using it, leave enough information
   1226  * around so that subsequent calls using those sessions will
   1227  * correctly detect the driver has been unregistered and reroute
   1228  * requests.
   1229  */
   1230 int
   1231 crypto_unregister_all(u_int32_t driverid)
   1232 {
   1233 	int err, i;
   1234 	struct cryptocap *cap;
   1235 
   1236 	cap = crypto_checkdriver_lock(driverid);
   1237 	for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
   1238 		err = crypto_unregister_locked(cap, i, true);
   1239 		if (err)
   1240 			break;
   1241 	}
   1242 	crypto_driver_unlock(cap);
   1243 
   1244 	return err;
   1245 }
   1246 
   1247 /*
   1248  * Clear blockage on a driver.  The what parameter indicates whether
   1249  * the driver is now ready for cryptop's and/or cryptokop's.
   1250  */
   1251 int
   1252 crypto_unblock(u_int32_t driverid, int what)
   1253 {
   1254 	struct cryptocap *cap;
   1255 	int needwakeup = 0;
   1256 
   1257 	cap = crypto_checkdriver_lock(driverid);
   1258 	if (cap == NULL)
   1259 		return EINVAL;
   1260 
   1261 	if (what & CRYPTO_SYMQ) {
   1262 		needwakeup |= cap->cc_qblocked;
   1263 		cap->cc_qblocked = 0;
   1264 	}
   1265 	if (what & CRYPTO_ASYMQ) {
   1266 		needwakeup |= cap->cc_kqblocked;
   1267 		cap->cc_kqblocked = 0;
   1268 	}
   1269 	crypto_driver_unlock(cap);
   1270 	if (needwakeup) {
   1271 		kpreempt_disable();
   1272 		softint_schedule(crypto_q_si);
   1273 		kpreempt_enable();
   1274 	}
   1275 
   1276 	return 0;
   1277 }
   1278 
   1279 /*
   1280  * Dispatch a crypto request to a driver or queue
   1281  * it, to be processed by the kernel thread.
   1282  */
   1283 int
   1284 crypto_dispatch(struct cryptop *crp)
   1285 {
   1286 	int result, s;
   1287 	struct cryptocap *cap;
   1288 	struct crypto_crp_qs *crp_qs;
   1289 	struct crypto_crp_q *crp_q;
   1290 
   1291 	KASSERT(crp != NULL);
   1292 
   1293 	DPRINTF("crp %p, alg %d\n", crp, crp->crp_desc->crd_alg);
   1294 
   1295 	cryptostats.cs_ops++;
   1296 
   1297 #ifdef CRYPTO_TIMING
   1298 	if (crypto_timing)
   1299 		nanouptime(&crp->crp_tstamp);
   1300 #endif
   1301 
   1302 	if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
   1303 		int wasempty;
   1304 		/*
   1305 		 * Caller marked the request as ``ok to delay'';
   1306 		 * queue it for the swi thread.  This is desirable
   1307 		 * when the operation is low priority and/or suitable
   1308 		 * for batching.
   1309 		 *
   1310 		 * don't care list order in batch job.
   1311 		 */
   1312 		crp_qs = crypto_get_crp_qs(&s);
   1313 		crp_q = &crp_qs->crp_q;
   1314 		wasempty  = TAILQ_EMPTY(crp_q);
   1315 		TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
   1316 		crypto_put_crp_qs(&s);
   1317 		crp_q = NULL;
   1318 		if (wasempty) {
   1319 			kpreempt_disable();
   1320 			softint_schedule(crypto_q_si);
   1321 			kpreempt_enable();
   1322 		}
   1323 
   1324 		return 0;
   1325 	}
   1326 
   1327 	crp_qs = crypto_get_crp_qs(&s);
   1328 	crp_q = &crp_qs->crp_q;
   1329 	cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid));
   1330 	/*
   1331 	 * TODO:
   1332 	 * If we can ensure the driver has been valid until the driver is
   1333 	 * done crypto_unregister(), this migrate operation is not required.
   1334 	 */
   1335 	if (cap == NULL) {
   1336 		/*
   1337 		 * The driver must be detached, so this request will migrate
   1338 		 * to other drivers in cryptointr() later.
   1339 		 */
   1340 		TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
   1341 		result = 0;
   1342 		goto out;
   1343 	}
   1344 
   1345 	if (cap->cc_qblocked != 0) {
   1346 		crypto_driver_unlock(cap);
   1347 		/*
   1348 		 * The driver is blocked, just queue the op until
   1349 		 * it unblocks and the swi thread gets kicked.
   1350 		 */
   1351 		TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
   1352 		result = 0;
   1353 		goto out;
   1354 	}
   1355 
   1356 	/*
   1357 	 * Caller marked the request to be processed
   1358 	 * immediately; dispatch it directly to the
   1359 	 * driver unless the driver is currently blocked.
   1360 	 */
   1361 	crypto_driver_unlock(cap);
   1362 	result = crypto_invoke(crp, 0);
   1363 	if (result == ERESTART) {
   1364 		/*
   1365 		 * The driver ran out of resources, mark the
   1366 		 * driver ``blocked'' for cryptop's and put
   1367 		 * the op on the queue.
   1368 		 */
   1369 		crypto_driver_lock(cap);
   1370 		cap->cc_qblocked = 1;
   1371 		crypto_driver_unlock(cap);
   1372 		TAILQ_INSERT_HEAD(crp_q, crp, crp_next);
   1373 		cryptostats.cs_blocks++;
   1374 
   1375 		/*
   1376 		 * The crp is enqueued to crp_q, that is,
   1377 		 * no error occurs. So, this function should
   1378 		 * not return error.
   1379 		 */
   1380 		result = 0;
   1381 	}
   1382 
   1383 out:
   1384 	crypto_put_crp_qs(&s);
   1385 	return result;
   1386 }
   1387 
   1388 /*
   1389  * Add an asymetric crypto request to a queue,
   1390  * to be processed by the kernel thread.
   1391  */
   1392 int
   1393 crypto_kdispatch(struct cryptkop *krp)
   1394 {
   1395 	int result, s;
   1396 	struct cryptocap *cap;
   1397 	struct crypto_crp_qs *crp_qs;
   1398 	struct crypto_crp_kq *crp_kq;
   1399 
   1400 	KASSERT(krp != NULL);
   1401 
   1402 	cryptostats.cs_kops++;
   1403 
   1404 	crp_qs = crypto_get_crp_qs(&s);
   1405 	crp_kq = &crp_qs->crp_kq;
   1406 	cap = crypto_checkdriver_lock(krp->krp_hid);
   1407 	/*
   1408 	 * TODO:
   1409 	 * If we can ensure the driver has been valid until the driver is
   1410 	 * done crypto_unregister(), this migrate operation is not required.
   1411 	 */
   1412 	if (cap == NULL) {
   1413 		TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
   1414 		result = 0;
   1415 		goto out;
   1416 	}
   1417 
   1418 	if (cap->cc_kqblocked != 0) {
   1419 		crypto_driver_unlock(cap);
   1420 		/*
   1421 		 * The driver is blocked, just queue the op until
   1422 		 * it unblocks and the swi thread gets kicked.
   1423 		 */
   1424 		TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
   1425 		result = 0;
   1426 		goto out;
   1427 	}
   1428 
   1429 	crypto_driver_unlock(cap);
   1430 	result = crypto_kinvoke(krp, 0);
   1431 	if (result == ERESTART) {
   1432 		/*
   1433 		 * The driver ran out of resources, mark the
   1434 		 * driver ``blocked'' for cryptop's and put
   1435 		 * the op on the queue.
   1436 		 */
   1437 		crypto_driver_lock(cap);
   1438 		cap->cc_kqblocked = 1;
   1439 		crypto_driver_unlock(cap);
   1440 		TAILQ_INSERT_HEAD(crp_kq, krp, krp_next);
   1441 		cryptostats.cs_kblocks++;
   1442 
   1443 		/*
   1444 		 * The krp is enqueued to crp_kq, that is,
   1445 		 * no error occurs. So, this function should
   1446 		 * not return error.
   1447 		 */
   1448 		result = 0;
   1449 	}
   1450 
   1451 out:
   1452 	crypto_put_crp_qs(&s);
   1453 	return result;
   1454 }
   1455 
   1456 /*
   1457  * Dispatch an assymetric crypto request to the appropriate crypto devices.
   1458  */
   1459 static int
   1460 crypto_kinvoke(struct cryptkop *krp, int hint)
   1461 {
   1462 	struct cryptocap *cap = NULL;
   1463 	u_int32_t hid;
   1464 	int error;
   1465 
   1466 	KASSERT(krp != NULL);
   1467 
   1468 	/* Sanity checks. */
   1469 	if (krp->krp_callback == NULL) {
   1470 		cv_destroy(&krp->krp_cv);
   1471 		crypto_kfreereq(krp);
   1472 		return EINVAL;
   1473 	}
   1474 
   1475 	mutex_enter(&crypto_drv_mtx);
   1476 	for (hid = 0; hid < crypto_drivers_num; hid++) {
   1477 		cap = crypto_checkdriver(hid);
   1478 		if (cap == NULL)
   1479 			continue;
   1480 		crypto_driver_lock(cap);
   1481 		if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
   1482 		    crypto_devallowsoft == 0) {
   1483 			crypto_driver_unlock(cap);
   1484 			continue;
   1485 		}
   1486 		if (cap->cc_kprocess == NULL) {
   1487 			crypto_driver_unlock(cap);
   1488 			continue;
   1489 		}
   1490 		if ((cap->cc_kalg[krp->krp_op] &
   1491 			CRYPTO_ALG_FLAG_SUPPORTED) == 0) {
   1492 			crypto_driver_unlock(cap);
   1493 			continue;
   1494 		}
   1495 		break;
   1496 	}
   1497 	mutex_exit(&crypto_drv_mtx);
   1498 	if (cap != NULL) {
   1499 		int (*process)(void *, struct cryptkop *, int);
   1500 		void *arg;
   1501 
   1502 		process = cap->cc_kprocess;
   1503 		arg = cap->cc_karg;
   1504 		krp->krp_hid = hid;
   1505 		krp->reqcpu = curcpu();
   1506 		crypto_driver_unlock(cap);
   1507 		error = (*process)(arg, krp, hint);
   1508 	} else {
   1509 		error = ENODEV;
   1510 	}
   1511 
   1512 	if (error) {
   1513 		krp->krp_status = error;
   1514 		crypto_kdone(krp);
   1515 	}
   1516 	return 0;
   1517 }
   1518 
   1519 #ifdef CRYPTO_TIMING
   1520 static void
   1521 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
   1522 {
   1523 	struct timespec now, t;
   1524 
   1525 	nanouptime(&now);
   1526 	t.tv_sec = now.tv_sec - tv->tv_sec;
   1527 	t.tv_nsec = now.tv_nsec - tv->tv_nsec;
   1528 	if (t.tv_nsec < 0) {
   1529 		t.tv_sec--;
   1530 		t.tv_nsec += 1000000000;
   1531 	}
   1532 	timespecadd(&ts->acc, &t, &t);
   1533 	if (timespeccmp(&t, &ts->min, <))
   1534 		ts->min = t;
   1535 	if (timespeccmp(&t, &ts->max, >))
   1536 		ts->max = t;
   1537 	ts->count++;
   1538 
   1539 	*tv = now;
   1540 }
   1541 #endif
   1542 
   1543 /*
   1544  * Dispatch a crypto request to the appropriate crypto devices.
   1545  */
   1546 static int
   1547 crypto_invoke(struct cryptop *crp, int hint)
   1548 {
   1549 	struct cryptocap *cap;
   1550 
   1551 	KASSERT(crp != NULL);
   1552 
   1553 #ifdef CRYPTO_TIMING
   1554 	if (crypto_timing)
   1555 		crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
   1556 #endif
   1557 	/* Sanity checks. */
   1558 	if (crp->crp_callback == NULL) {
   1559 		return EINVAL;
   1560 	}
   1561 	if (crp->crp_desc == NULL) {
   1562 		crp->crp_etype = EINVAL;
   1563 		crypto_done(crp);
   1564 		return 0;
   1565 	}
   1566 
   1567 	cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid));
   1568 	if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
   1569 		int (*process)(void *, struct cryptop *, int);
   1570 		void *arg;
   1571 
   1572 		process = cap->cc_process;
   1573 		arg = cap->cc_arg;
   1574 		crp->reqcpu = curcpu();
   1575 
   1576 		/*
   1577 		 * Invoke the driver to process the request.
   1578 		 */
   1579 		DPRINTF("calling process for %p\n", crp);
   1580 		crypto_driver_unlock(cap);
   1581 		return (*process)(arg, crp, hint);
   1582 	} else {
   1583 		struct cryptodesc *crd;
   1584 		u_int64_t nid = 0;
   1585 
   1586 		if (cap != NULL)
   1587 			crypto_driver_unlock(cap);
   1588 
   1589 		/*
   1590 		 * Driver has unregistered; migrate the session and return
   1591 		 * an error to the caller so they'll resubmit the op.
   1592 		 */
   1593 		crypto_freesession(crp->crp_sid);
   1594 
   1595 		for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
   1596 			crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
   1597 
   1598 		if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
   1599 			crp->crp_sid = nid;
   1600 
   1601 		crp->crp_etype = EAGAIN;
   1602 
   1603 		crypto_done(crp);
   1604 		return 0;
   1605 	}
   1606 }
   1607 
   1608 /*
   1609  * Release a set of crypto descriptors.
   1610  */
   1611 void
   1612 crypto_freereq(struct cryptop *crp)
   1613 {
   1614 	struct cryptodesc *crd;
   1615 
   1616 	if (crp == NULL)
   1617 		return;
   1618 	DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
   1619 
   1620 	/* sanity check */
   1621 	if (crp->crp_flags & CRYPTO_F_ONRETQ) {
   1622 		panic("crypto_freereq() freeing crp on RETQ\n");
   1623 	}
   1624 
   1625 	while ((crd = crp->crp_desc) != NULL) {
   1626 		crp->crp_desc = crd->crd_next;
   1627 		pool_put(&cryptodesc_pool, crd);
   1628 	}
   1629 	pool_put(&cryptop_pool, crp);
   1630 }
   1631 
   1632 /*
   1633  * Acquire a set of crypto descriptors.
   1634  */
   1635 struct cryptop *
   1636 crypto_getreq(int num)
   1637 {
   1638 	struct cryptodesc *crd;
   1639 	struct cryptop *crp;
   1640 	struct crypto_crp_ret_qs *qs;
   1641 
   1642 	/*
   1643 	 * When crp_ret_q is full, we restrict here to avoid crp_ret_q overflow
   1644 	 * by error callback.
   1645 	 */
   1646 	qs = crypto_get_crp_ret_qs(curcpu());
   1647 	if (qs->crp_ret_q_maxlen > 0
   1648 	    && qs->crp_ret_q_len > qs->crp_ret_q_maxlen) {
   1649 		qs->crp_ret_q_drops++;
   1650 		crypto_put_crp_ret_qs(curcpu());
   1651 		return NULL;
   1652 	}
   1653 	crypto_put_crp_ret_qs(curcpu());
   1654 
   1655 	crp = pool_get(&cryptop_pool, 0);
   1656 	if (crp == NULL) {
   1657 		return NULL;
   1658 	}
   1659 	memset(crp, 0, sizeof(struct cryptop));
   1660 
   1661 	while (num--) {
   1662 		crd = pool_get(&cryptodesc_pool, 0);
   1663 		if (crd == NULL) {
   1664 			crypto_freereq(crp);
   1665 			return NULL;
   1666 		}
   1667 
   1668 		memset(crd, 0, sizeof(struct cryptodesc));
   1669 		crd->crd_next = crp->crp_desc;
   1670 		crp->crp_desc = crd;
   1671 	}
   1672 
   1673 	return crp;
   1674 }
   1675 
   1676 /*
   1677  * Release a set of asymmetric crypto descriptors.
   1678  * Currently, support one descriptor only.
   1679  */
   1680 void
   1681 crypto_kfreereq(struct cryptkop *krp)
   1682 {
   1683 
   1684 	if (krp == NULL)
   1685 		return;
   1686 
   1687 	DPRINTF("krp %p\n", krp);
   1688 
   1689 	/* sanity check */
   1690 	if (krp->krp_flags & CRYPTO_F_ONRETQ) {
   1691 		panic("crypto_kfreereq() freeing krp on RETQ\n");
   1692 	}
   1693 
   1694 	pool_put(&cryptkop_pool, krp);
   1695 }
   1696 
   1697 /*
   1698  * Acquire a set of asymmetric crypto descriptors.
   1699  * Currently, support one descriptor only.
   1700  */
   1701 struct cryptkop *
   1702 crypto_kgetreq(int num __unused, int prflags)
   1703 {
   1704 	struct cryptkop *krp;
   1705 	struct crypto_crp_ret_qs *qs;
   1706 
   1707 	/*
   1708 	 * When crp_ret_kq is full, we restrict here to avoid crp_ret_kq
   1709 	 * overflow by error callback.
   1710 	 */
   1711 	qs = crypto_get_crp_ret_qs(curcpu());
   1712 	if (qs->crp_ret_kq_maxlen > 0
   1713 	    && qs->crp_ret_kq_len > qs->crp_ret_kq_maxlen) {
   1714 		qs->crp_ret_kq_drops++;
   1715 		crypto_put_crp_ret_qs(curcpu());
   1716 		return NULL;
   1717 	}
   1718 	crypto_put_crp_ret_qs(curcpu());
   1719 
   1720 	krp = pool_get(&cryptkop_pool, prflags);
   1721 	if (krp == NULL) {
   1722 		return NULL;
   1723 	}
   1724 	memset(krp, 0, sizeof(struct cryptkop));
   1725 
   1726 	return krp;
   1727 }
   1728 
   1729 /*
   1730  * Invoke the callback on behalf of the driver.
   1731  */
   1732 void
   1733 crypto_done(struct cryptop *crp)
   1734 {
   1735 
   1736 	KASSERT(crp != NULL);
   1737 
   1738 	if (crp->crp_etype != 0)
   1739 		cryptostats.cs_errs++;
   1740 #ifdef CRYPTO_TIMING
   1741 	if (crypto_timing)
   1742 		crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
   1743 #endif
   1744 	DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
   1745 
   1746 	/*
   1747 	 * Normal case; queue the callback for the thread.
   1748 	 *
   1749 	 * The return queue is manipulated by the swi thread
   1750 	 * and, potentially, by crypto device drivers calling
   1751 	 * back to mark operations completed.  Thus we need
   1752 	 * to mask both while manipulating the return queue.
   1753 	 */
   1754   	if (crp->crp_flags & CRYPTO_F_CBIMM) {
   1755 		/*
   1756 	 	* Do the callback directly.  This is ok when the
   1757   	 	* callback routine does very little (e.g. the
   1758 	 	* /dev/crypto callback method just does a wakeup).
   1759 	 	*/
   1760 		crp->crp_flags |= CRYPTO_F_DONE;
   1761 
   1762 #ifdef CRYPTO_TIMING
   1763 		if (crypto_timing) {
   1764 			/*
   1765 		 	* NB: We must copy the timestamp before
   1766 		 	* doing the callback as the cryptop is
   1767 		 	* likely to be reclaimed.
   1768 		 	*/
   1769 			struct timespec t = crp->crp_tstamp;
   1770 			crypto_tstat(&cryptostats.cs_cb, &t);
   1771 			crp->crp_callback(crp);
   1772 			crypto_tstat(&cryptostats.cs_finis, &t);
   1773 		} else
   1774 #endif
   1775 		crp->crp_callback(crp);
   1776 	} else {
   1777 		crp->crp_flags |= CRYPTO_F_DONE;
   1778 #if 0
   1779 		if (crp->crp_flags & CRYPTO_F_USER) {
   1780 			/*
   1781 			 * TODO:
   1782 			 * If crp->crp_flags & CRYPTO_F_USER and the used
   1783 			 * encryption driver does all the processing in
   1784 			 * the same context, we can skip enqueueing crp_ret_q
   1785 			 * and softint_schedule(crypto_ret_si).
   1786 			 */
   1787 			DPRINTF("lid[%u]: crp %p CRYPTO_F_USER\n",
   1788 				CRYPTO_SESID2LID(crp->crp_sid), crp);
   1789 		} else
   1790 #endif
   1791 		{
   1792 			int wasempty;
   1793 			struct crypto_crp_ret_qs *qs;
   1794 			struct crypto_crp_ret_q *crp_ret_q;;
   1795 
   1796 			qs = crypto_get_crp_ret_qs(crp->reqcpu);
   1797 			crp_ret_q = &qs->crp_ret_q;
   1798 			wasempty = TAILQ_EMPTY(crp_ret_q);
   1799 			DPRINTF("lid[%u]: queueing %p\n",
   1800 				CRYPTO_SESID2LID(crp->crp_sid), crp);
   1801 			crp->crp_flags |= CRYPTO_F_ONRETQ;
   1802 			TAILQ_INSERT_TAIL(crp_ret_q, crp, crp_next);
   1803 			qs->crp_ret_q_len++;
   1804 			if (wasempty && !qs->crp_ret_q_exit_flag) {
   1805 				DPRINTF("lid[%u]: waking cryptoret,"
   1806 					"crp %p hit empty queue\n.",
   1807 					CRYPTO_SESID2LID(crp->crp_sid), crp);
   1808 				softint_schedule_cpu(crypto_ret_si, crp->reqcpu);
   1809 			}
   1810 			crypto_put_crp_ret_qs(crp->reqcpu);
   1811 		}
   1812 	}
   1813 }
   1814 
   1815 /*
   1816  * Invoke the callback on behalf of the driver.
   1817  */
   1818 void
   1819 crypto_kdone(struct cryptkop *krp)
   1820 {
   1821 
   1822 	KASSERT(krp != NULL);
   1823 
   1824 	if (krp->krp_status != 0)
   1825 		cryptostats.cs_kerrs++;
   1826 
   1827 	krp->krp_flags |= CRYPTO_F_DONE;
   1828 
   1829 	/*
   1830 	 * The return queue is manipulated by the swi thread
   1831 	 * and, potentially, by crypto device drivers calling
   1832 	 * back to mark operations completed.  Thus we need
   1833 	 * to mask both while manipulating the return queue.
   1834 	 */
   1835 	if (krp->krp_flags & CRYPTO_F_CBIMM) {
   1836 		krp->krp_callback(krp);
   1837 	} else {
   1838 		int wasempty;
   1839 		struct crypto_crp_ret_qs *qs;
   1840 		struct crypto_crp_ret_kq *crp_ret_kq;;
   1841 
   1842 		qs = crypto_get_crp_ret_qs(krp->reqcpu);
   1843 		crp_ret_kq = &qs->crp_ret_kq;
   1844 
   1845 		wasempty = TAILQ_EMPTY(crp_ret_kq);
   1846 		krp->krp_flags |= CRYPTO_F_ONRETQ;
   1847 		TAILQ_INSERT_TAIL(crp_ret_kq, krp, krp_next);
   1848 		qs->crp_ret_kq_len++;
   1849 		if (wasempty && !qs->crp_ret_q_exit_flag)
   1850 			softint_schedule_cpu(crypto_ret_si, krp->reqcpu);
   1851 		crypto_put_crp_ret_qs(krp->reqcpu);
   1852 	}
   1853 }
   1854 
   1855 int
   1856 crypto_getfeat(int *featp)
   1857 {
   1858 
   1859 	if (crypto_userasymcrypto == 0) {
   1860 		*featp = 0;
   1861 		return 0;
   1862 	}
   1863 
   1864 	mutex_enter(&crypto_drv_mtx);
   1865 
   1866 	int feat = 0;
   1867 	for (int hid = 0; hid < crypto_drivers_num; hid++) {
   1868 		struct cryptocap *cap;
   1869 		cap = crypto_checkdriver(hid);
   1870 		if (cap == NULL)
   1871 			continue;
   1872 
   1873 		crypto_driver_lock(cap);
   1874 
   1875 		if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
   1876 		    crypto_devallowsoft == 0)
   1877 			goto unlock;
   1878 
   1879 		if (cap->cc_kprocess == NULL)
   1880 			goto unlock;
   1881 
   1882 		for (int kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
   1883 			if ((cap->cc_kalg[kalg] &
   1884 			    CRYPTO_ALG_FLAG_SUPPORTED) != 0)
   1885 				feat |=  1 << kalg;
   1886 
   1887 unlock:		crypto_driver_unlock(cap);
   1888 	}
   1889 
   1890 	mutex_exit(&crypto_drv_mtx);
   1891 	*featp = feat;
   1892 	return (0);
   1893 }
   1894 
   1895 /*
   1896  * Software interrupt thread to dispatch crypto requests.
   1897  */
   1898 static void
   1899 cryptointr(void *arg __unused)
   1900 {
   1901 	struct cryptop *crp, *submit, *cnext;
   1902 	struct cryptkop *krp, *knext;
   1903 	struct cryptocap *cap;
   1904 	struct crypto_crp_qs *crp_qs;
   1905 	struct crypto_crp_q *crp_q;
   1906 	struct crypto_crp_kq *crp_kq;
   1907 	int result, hint, s;
   1908 
   1909 	cryptostats.cs_intrs++;
   1910 	crp_qs = crypto_get_crp_qs(&s);
   1911 	crp_q = &crp_qs->crp_q;
   1912 	crp_kq = &crp_qs->crp_kq;
   1913 	do {
   1914 		/*
   1915 		 * Find the first element in the queue that can be
   1916 		 * processed and look-ahead to see if multiple ops
   1917 		 * are ready for the same driver.
   1918 		 */
   1919 		submit = NULL;
   1920 		hint = 0;
   1921 		TAILQ_FOREACH_SAFE(crp, crp_q, crp_next, cnext) {
   1922 			u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid);
   1923 			cap = crypto_checkdriver_lock(hid);
   1924 			if (cap == NULL || cap->cc_process == NULL) {
   1925 				if (cap != NULL)
   1926 					crypto_driver_unlock(cap);
   1927 				/* Op needs to be migrated, process it. */
   1928 				submit = crp;
   1929 				break;
   1930 			}
   1931 
   1932 			/*
   1933 			 * skip blocked crp regardless of CRYPTO_F_BATCH
   1934 			 */
   1935 			if (cap->cc_qblocked != 0) {
   1936 				crypto_driver_unlock(cap);
   1937 				continue;
   1938 			}
   1939 			crypto_driver_unlock(cap);
   1940 
   1941 			/*
   1942 			 * skip batch crp until the end of crp_q
   1943 			 */
   1944 			if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
   1945 				if (submit == NULL) {
   1946 					submit = crp;
   1947 				} else {
   1948 					if (CRYPTO_SESID2HID(submit->crp_sid)
   1949 					    == hid)
   1950 						hint = CRYPTO_HINT_MORE;
   1951 				}
   1952 
   1953 				continue;
   1954 			}
   1955 
   1956 			/*
   1957 			 * found first crp which is neither blocked nor batch.
   1958 			 */
   1959 			submit = crp;
   1960 			/*
   1961 			 * batch crp can be processed much later, so clear hint.
   1962 			 */
   1963 			hint = 0;
   1964 			break;
   1965 		}
   1966 		if (submit != NULL) {
   1967 			TAILQ_REMOVE(crp_q, submit, crp_next);
   1968 			result = crypto_invoke(submit, hint);
   1969 			/* we must take here as the TAILQ op or kinvoke
   1970 			   may need this mutex below.  sigh. */
   1971 			if (result == ERESTART) {
   1972 				/*
   1973 				 * The driver ran out of resources, mark the
   1974 				 * driver ``blocked'' for cryptop's and put
   1975 				 * the request back in the queue.  It would
   1976 				 * best to put the request back where we got
   1977 				 * it but that's hard so for now we put it
   1978 				 * at the front.  This should be ok; putting
   1979 				 * it at the end does not work.
   1980 				 */
   1981 				/* validate sid again */
   1982 				cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(submit->crp_sid));
   1983 				if (cap == NULL) {
   1984 					/* migrate again, sigh... */
   1985 					TAILQ_INSERT_TAIL(crp_q, submit, crp_next);
   1986 				} else {
   1987 					cap->cc_qblocked = 1;
   1988 					crypto_driver_unlock(cap);
   1989 					TAILQ_INSERT_HEAD(crp_q, submit, crp_next);
   1990 					cryptostats.cs_blocks++;
   1991 				}
   1992 			}
   1993 		}
   1994 
   1995 		/* As above, but for key ops */
   1996 		TAILQ_FOREACH_SAFE(krp, crp_kq, krp_next, knext) {
   1997 			cap = crypto_checkdriver_lock(krp->krp_hid);
   1998 			if (cap == NULL || cap->cc_kprocess == NULL) {
   1999 				if (cap != NULL)
   2000 					crypto_driver_unlock(cap);
   2001 				/* Op needs to be migrated, process it. */
   2002 				break;
   2003 			}
   2004 			if (!cap->cc_kqblocked) {
   2005 				crypto_driver_unlock(cap);
   2006 				break;
   2007 			}
   2008 			crypto_driver_unlock(cap);
   2009 		}
   2010 		if (krp != NULL) {
   2011 			TAILQ_REMOVE(crp_kq, krp, krp_next);
   2012 			result = crypto_kinvoke(krp, 0);
   2013 			/* the next iteration will want the mutex. :-/ */
   2014 			if (result == ERESTART) {
   2015 				/*
   2016 				 * The driver ran out of resources, mark the
   2017 				 * driver ``blocked'' for cryptkop's and put
   2018 				 * the request back in the queue.  It would
   2019 				 * best to put the request back where we got
   2020 				 * it but that's hard so for now we put it
   2021 				 * at the front.  This should be ok; putting
   2022 				 * it at the end does not work.
   2023 				 */
   2024 				/* validate sid again */
   2025 				cap = crypto_checkdriver_lock(krp->krp_hid);
   2026 				if (cap == NULL) {
   2027 					/* migrate again, sigh... */
   2028 					TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
   2029 				} else {
   2030 					cap->cc_kqblocked = 1;
   2031 					crypto_driver_unlock(cap);
   2032 					TAILQ_INSERT_HEAD(crp_kq, krp, krp_next);
   2033 					cryptostats.cs_kblocks++;
   2034 				}
   2035 			}
   2036 		}
   2037 	} while (submit != NULL || krp != NULL);
   2038 	crypto_put_crp_qs(&s);
   2039 }
   2040 
   2041 /*
   2042  * softint handler to do callbacks.
   2043  */
   2044 static void
   2045 cryptoret_softint(void *arg __unused)
   2046 {
   2047 	struct crypto_crp_ret_qs *qs;
   2048 	struct crypto_crp_ret_q *crp_ret_q;;
   2049 	struct crypto_crp_ret_kq *crp_ret_kq;;
   2050 
   2051 	qs = crypto_get_crp_ret_qs(curcpu());
   2052 	crp_ret_q = &qs->crp_ret_q;
   2053 	crp_ret_kq = &qs->crp_ret_kq;
   2054 	for (;;) {
   2055 		struct cryptop *crp;
   2056 		struct cryptkop *krp;
   2057 
   2058 		crp = TAILQ_FIRST(crp_ret_q);
   2059 		if (crp != NULL) {
   2060 			TAILQ_REMOVE(crp_ret_q, crp, crp_next);
   2061 			qs->crp_ret_q_len--;
   2062 			crp->crp_flags &= ~CRYPTO_F_ONRETQ;
   2063 		}
   2064 		krp = TAILQ_FIRST(crp_ret_kq);
   2065 		if (krp != NULL) {
   2066 			TAILQ_REMOVE(crp_ret_kq, krp, krp_next);
   2067 			qs->crp_ret_q_len--;
   2068 			krp->krp_flags &= ~CRYPTO_F_ONRETQ;
   2069 		}
   2070 
   2071 		/* drop before calling any callbacks. */
   2072 		if (crp == NULL && krp == NULL)
   2073 			break;
   2074 
   2075 		mutex_spin_exit(&qs->crp_ret_q_mtx);
   2076 		if (crp != NULL) {
   2077 #ifdef CRYPTO_TIMING
   2078 			if (crypto_timing) {
   2079 				/*
   2080 				 * NB: We must copy the timestamp before
   2081 				 * doing the callback as the cryptop is
   2082 				 * likely to be reclaimed.
   2083 				 */
   2084 				struct timespec t = crp->crp_tstamp;
   2085 				crypto_tstat(&cryptostats.cs_cb, &t);
   2086 				crp->crp_callback(crp);
   2087 				crypto_tstat(&cryptostats.cs_finis, &t);
   2088 			} else
   2089 #endif
   2090 			{
   2091 				crp->crp_callback(crp);
   2092 			}
   2093 		}
   2094 		if (krp != NULL)
   2095 			krp->krp_callback(krp);
   2096 
   2097 		mutex_spin_enter(&qs->crp_ret_q_mtx);
   2098 	}
   2099 	crypto_put_crp_ret_qs(curcpu());
   2100 }
   2101 
   2102 /* NetBSD module interface */
   2103 
   2104 MODULE(MODULE_CLASS_MISC, opencrypto, NULL);
   2105 
   2106 static int
   2107 opencrypto_modcmd(modcmd_t cmd, void *opaque)
   2108 {
   2109 	int error = 0;
   2110 
   2111 	switch (cmd) {
   2112 	case MODULE_CMD_INIT:
   2113 #ifdef _MODULE
   2114 		error = crypto_init();
   2115 #endif
   2116 		break;
   2117 	case MODULE_CMD_FINI:
   2118 #ifdef _MODULE
   2119 		error = crypto_destroy(true);
   2120 #endif
   2121 		break;
   2122 	default:
   2123 		error = ENOTTY;
   2124 	}
   2125 	return error;
   2126 }
   2127