Home | History | Annotate | Line # | Download | only in opencrypto
crypto.c revision 1.21
      1 /*	$NetBSD: crypto.c,v 1.21 2007/10/08 16:18:05 ad Exp $ */
      2 /*	$FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $	*/
      3 /*	$OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $	*/
      4 
      5 /*
      6  * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
      7  *
      8  * This code was written by Angelos D. Keromytis in Athens, Greece, in
      9  * February 2000. Network Security Technologies Inc. (NSTI) kindly
     10  * supported the development of this code.
     11  *
     12  * Copyright (c) 2000, 2001 Angelos D. Keromytis
     13  *
     14  * Permission to use, copy, and modify this software with or without fee
     15  * is hereby granted, provided that this entire notice is included in
     16  * all source code copies of any software which is or includes a copy or
     17  * modification of this software.
     18  *
     19  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
     20  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
     21  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
     22  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
     23  * PURPOSE.
     24  */
     25 
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.21 2007/10/08 16:18:05 ad Exp $");
     28 
     29 /* XXX FIXME: should be defopt'ed */
     30 #define CRYPTO_TIMING			/* enable cryptop timing stuff */
     31 
     32 #include <sys/param.h>
     33 #include <sys/reboot.h>
     34 #include <sys/systm.h>
     35 #include <sys/malloc.h>
     36 #include <sys/proc.h>
     37 #include <sys/pool.h>
     38 #include <sys/kthread.h>
     39 #include <sys/once.h>
     40 #include <sys/sysctl.h>
     41 #include <sys/intr.h>
     42 
     43 #include <opencrypto/cryptodev.h>
     44 #include <opencrypto/xform.h>			/* XXX for M_XDATA */
     45 
     46 #ifdef __NetBSD__
     47   #define splcrypto splnet
     48   /* below is kludges to check whats still missing */
     49   #define SWI_CRYPTO 17
     50   #define register_swi(lvl, fn)  \
     51   softint_establish(SOFTINT_NET, (void (*)(void*))fn, NULL)
     52   #define unregister_swi(lvl, fn)  softint_disestablish(softintr_cookie)
     53   #define setsoftcrypto(x) softint_schedule(x)
     54 #endif
     55 
     56 #define	SESID2HID(sid)	(((sid) >> 32) & 0xffffffff)
     57 
     58 /*
     59  * Crypto drivers register themselves by allocating a slot in the
     60  * crypto_drivers table with crypto_get_driverid() and then registering
     61  * each algorithm they support with crypto_register() and crypto_kregister().
     62  */
     63 static	struct cryptocap *crypto_drivers;
     64 static	int crypto_drivers_num;
     65 static	void* softintr_cookie;
     66 
     67 /*
     68  * There are two queues for crypto requests; one for symmetric (e.g.
     69  * cipher) operations and one for asymmetric (e.g. MOD) operations.
     70  * See below for how synchronization is handled.
     71  */
     72 static	TAILQ_HEAD(,cryptop) crp_q =		/* request queues */
     73 		TAILQ_HEAD_INITIALIZER(crp_q);
     74 static	TAILQ_HEAD(,cryptkop) crp_kq =
     75 		TAILQ_HEAD_INITIALIZER(crp_kq);
     76 
     77 /*
     78  * There are two queues for processing completed crypto requests; one
     79  * for the symmetric and one for the asymmetric ops.  We only need one
     80  * but have two to avoid type futzing (cryptop vs. cryptkop).  See below
     81  * for how synchronization is handled.
     82  */
     83 static	TAILQ_HEAD(,cryptop) crp_ret_q =	/* callback queues */
     84 		TAILQ_HEAD_INITIALIZER(crp_ret_q);
     85 static	TAILQ_HEAD(,cryptkop) crp_ret_kq =
     86 		TAILQ_HEAD_INITIALIZER(crp_ret_kq);
     87 
     88 /*
     89  * Crypto op and desciptor data structures are allocated
     90  * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
     91  */
     92 struct pool cryptop_pool;
     93 struct pool cryptodesc_pool;
     94 int crypto_pool_initialized = 0;
     95 
     96 int	crypto_usercrypto = 1;		/* userland may open /dev/crypto */
     97 int	crypto_userasymcrypto = 1;	/* userland may do asym crypto reqs */
     98 /*
     99  * cryptodevallowsoft is (intended to be) sysctl'able, controlling
    100  * access to hardware versus software transforms as below:
    101  *
    102  * crypto_devallowsoft < 0:  Force userlevel requests to use software
    103  *                              transforms, always
    104  * crypto_devallowsoft = 0:  Use hardware if present, grant userlevel
    105  *                              requests for non-accelerated transforms
    106  *                              (handling the latter in software)
    107  * crypto_devallowsoft > 0:  Allow user requests only for transforms which
    108  *                               are hardware-accelerated.
    109  */
    110 int	crypto_devallowsoft = 1;	/* only use hardware crypto */
    111 
    112 #ifdef __FreeBSD__
    113 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW,
    114 	   &crypto_usercrypto, 0,
    115 	   "Enable/disable user-mode access to crypto support");
    116 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
    117 	   &crypto_userasymcrypto, 0,
    118 	   "Enable/disable user-mode access to asymmetric crypto support");
    119 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
    120 	   &crypto_devallowsoft, 0,
    121 	   "Enable/disable use of software asym crypto support");
    122 #endif
    123 #ifdef __NetBSD__
    124 SYSCTL_SETUP(sysctl_opencrypto_setup, "sysctl opencrypto subtree setup")
    125 {
    126 	sysctl_createv(clog, 0, NULL, NULL,
    127 		       CTLFLAG_PERMANENT,
    128 		       CTLTYPE_NODE, "kern", NULL,
    129 		       NULL, 0, NULL, 0,
    130 		       CTL_KERN, CTL_EOL);
    131 	sysctl_createv(clog, 0, NULL, NULL,
    132 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    133 		       CTLTYPE_INT, "usercrypto",
    134 		       SYSCTL_DESCR("Enable/disable user-mode access to "
    135 			   "crypto support"),
    136 		       NULL, 0, &crypto_usercrypto, 0,
    137 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    138 	sysctl_createv(clog, 0, NULL, NULL,
    139 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    140 		       CTLTYPE_INT, "userasymcrypto",
    141 		       SYSCTL_DESCR("Enable/disable user-mode access to "
    142 			   "asymmetric crypto support"),
    143 		       NULL, 0, &crypto_userasymcrypto, 0,
    144 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    145 	sysctl_createv(clog, 0, NULL, NULL,
    146 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    147 		       CTLTYPE_INT, "cryptodevallowsoft",
    148 		       SYSCTL_DESCR("Enable/disable use of software "
    149 			   "asymmetric crypto support"),
    150 		       NULL, 0, &crypto_devallowsoft, 0,
    151 		       CTL_KERN, CTL_CREATE, CTL_EOL);
    152 }
    153 #endif
    154 
    155 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
    156 
    157 /*
    158  * Synchronization: read carefully, this is non-trivial.
    159  *
    160  * Crypto requests are submitted via crypto_dispatch.  Typically
    161  * these come in from network protocols at spl0 (output path) or
    162  * spl[,soft]net (input path).
    163  *
    164  * Requests are typically passed on the driver directly, but they
    165  * may also be queued for processing by a software interrupt thread,
    166  * cryptointr, that runs at splsoftcrypto.  This thread dispatches
    167  * the requests to crypto drivers (h/w or s/w) who call crypto_done
    168  * when a request is complete.  Hardware crypto drivers are assumed
    169  * to register their IRQ's as network devices so their interrupt handlers
    170  * and subsequent "done callbacks" happen at spl[imp,net].
    171  *
    172  * Completed crypto ops are queued for a separate kernel thread that
    173  * handles the callbacks at spl0.  This decoupling insures the crypto
    174  * driver interrupt service routine is not delayed while the callback
    175  * takes place and that callbacks are delivered after a context switch
    176  * (as opposed to a software interrupt that clients must block).
    177  *
    178  * This scheme is not intended for SMP machines.
    179  */
    180 static	void cryptointr(void);		/* swi thread to dispatch ops */
    181 static	void cryptoret(void);		/* kernel thread for callbacks*/
    182 static	struct lwp *cryptothread;
    183 static	void crypto_destroy(void);
    184 static	int crypto_invoke(struct cryptop *crp, int hint);
    185 static	int crypto_kinvoke(struct cryptkop *krp, int hint);
    186 
    187 static struct cryptostats cryptostats;
    188 static	int crypto_timing = 0;
    189 
    190 #ifdef __FreeBSD__
    191 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
    192 	    cryptostats, "Crypto system statistics");
    193 
    194 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
    195 	   &crypto_timing, 0, "Enable/disable crypto timing support");
    196 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
    197 	    cryptostats, "Crypto system statistics");
    198 #endif /* __FreeBSD__ */
    199 
    200 static int
    201 crypto_init0(void)
    202 {
    203 	int error;
    204 
    205 #ifdef __FreeBSD__
    206 	cryptop_zone = zinit("cryptop", sizeof (struct cryptop), 0, 0, 1);
    207 	cryptodesc_zone = zinit("cryptodesc", sizeof (struct cryptodesc),
    208 				0, 0, 1);
    209 	if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
    210 		printf("crypto_init: cannot setup crypto zones\n");
    211 		return;
    212 	}
    213 #endif
    214 
    215 	crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL *
    216 	    sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
    217 	if (crypto_drivers == NULL) {
    218 		printf("crypto_init: cannot malloc driver table\n");
    219 		return 0;
    220 	}
    221 	crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
    222 
    223 	softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
    224 #ifdef __FreeBSD__
    225 	error = kthread_create((void (*)(void *)) cryptoret, NULL,
    226 		    &cryptothread, "cryptoret");
    227 #else
    228 	error = kthread_create(PRI_NONE, 0, NULL, (void (*)(void*))cryptoret,
    229 	    NULL, &cryptothread, "cryptoret");
    230 #endif
    231 	if (error) {
    232 		printf("crypto_init: cannot start cryptoret thread; error %d",
    233 			error);
    234 		crypto_destroy();
    235 	}
    236 
    237 	return 0;
    238 }
    239 
    240 void
    241 crypto_init(void)
    242 {
    243 	static ONCE_DECL(crypto_init_once);
    244 
    245 	RUN_ONCE(&crypto_init_once, crypto_init0);
    246 }
    247 
    248 static void
    249 crypto_destroy(void)
    250 {
    251 	/* XXX no wait to reclaim zones */
    252 	if (crypto_drivers != NULL)
    253 		free(crypto_drivers, M_CRYPTO_DATA);
    254 	unregister_swi(SWI_CRYPTO, cryptointr);
    255 }
    256 
    257 /*
    258  * Create a new session.
    259  */
    260 int
    261 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
    262 {
    263 	struct cryptoini *cr;
    264 	u_int32_t hid, lid;
    265 	int err = EINVAL;
    266 	int s;
    267 
    268 	s = splcrypto();
    269 
    270 	if (crypto_drivers == NULL)
    271 		goto done;
    272 
    273 	/*
    274 	 * The algorithm we use here is pretty stupid; just use the
    275 	 * first driver that supports all the algorithms we need.
    276 	 *
    277 	 * XXX We need more smarts here (in real life too, but that's
    278 	 * XXX another story altogether).
    279 	 */
    280 
    281 	for (hid = 0; hid < crypto_drivers_num; hid++) {
    282 		/*
    283 		 * If it's not initialized or has remaining sessions
    284 		 * referencing it, skip.
    285 		 */
    286 		if (crypto_drivers[hid].cc_newsession == NULL ||
    287 		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
    288 			continue;
    289 
    290 		/* Hardware required -- ignore software drivers. */
    291 		if (hard > 0 &&
    292 		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
    293 			continue;
    294 		/* Software required -- ignore hardware drivers. */
    295 		if (hard < 0 &&
    296 		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
    297 			continue;
    298 
    299 		/* See if all the algorithms are supported. */
    300 		for (cr = cri; cr; cr = cr->cri_next)
    301 			if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
    302 				break;
    303 
    304 		if (cr == NULL) {
    305 			/* Ok, all algorithms are supported. */
    306 
    307 			/*
    308 			 * Can't do everything in one session.
    309 			 *
    310 			 * XXX Fix this. We need to inject a "virtual" session layer right
    311 			 * XXX about here.
    312 			 */
    313 
    314 			/* Call the driver initialization routine. */
    315 			lid = hid;		/* Pass the driver ID. */
    316 			err = crypto_drivers[hid].cc_newsession(
    317 					crypto_drivers[hid].cc_arg, &lid, cri);
    318 			if (err == 0) {
    319 				(*sid) = hid;
    320 				(*sid) <<= 32;
    321 				(*sid) |= (lid & 0xffffffff);
    322 				crypto_drivers[hid].cc_sessions++;
    323 			}
    324 			goto done;
    325 			/*break;*/
    326 		}
    327 	}
    328 done:
    329 	splx(s);
    330 	return err;
    331 }
    332 
    333 /*
    334  * Delete an existing session (or a reserved session on an unregistered
    335  * driver).
    336  */
    337 int
    338 crypto_freesession(u_int64_t sid)
    339 {
    340 	u_int32_t hid;
    341 	int err = 0;
    342 	int s;
    343 
    344 	s = splcrypto();
    345 
    346 	if (crypto_drivers == NULL) {
    347 		err = EINVAL;
    348 		goto done;
    349 	}
    350 
    351 	/* Determine two IDs. */
    352 	hid = SESID2HID(sid);
    353 
    354 	if (hid >= crypto_drivers_num) {
    355 		err = ENOENT;
    356 		goto done;
    357 	}
    358 
    359 	if (crypto_drivers[hid].cc_sessions)
    360 		crypto_drivers[hid].cc_sessions--;
    361 
    362 	/* Call the driver cleanup routine, if available. */
    363 	if (crypto_drivers[hid].cc_freesession)
    364 		err = crypto_drivers[hid].cc_freesession(
    365 				crypto_drivers[hid].cc_arg, sid);
    366 	else
    367 		err = 0;
    368 
    369 	/*
    370 	 * If this was the last session of a driver marked as invalid,
    371 	 * make the entry available for reuse.
    372 	 */
    373 	if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
    374 	    crypto_drivers[hid].cc_sessions == 0)
    375 		bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
    376 
    377 done:
    378 	splx(s);
    379 	return err;
    380 }
    381 
    382 /*
    383  * Return an unused driver id.  Used by drivers prior to registering
    384  * support for the algorithms they handle.
    385  */
    386 int32_t
    387 crypto_get_driverid(u_int32_t flags)
    388 {
    389 	struct cryptocap *newdrv;
    390 	int i, s;
    391 
    392 	crypto_init();
    393 
    394 	s = splcrypto();
    395 	for (i = 0; i < crypto_drivers_num; i++)
    396 		if (crypto_drivers[i].cc_process == NULL &&
    397 		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
    398 		    crypto_drivers[i].cc_sessions == 0)
    399 			break;
    400 
    401 	/* Out of entries, allocate some more. */
    402 	if (i == crypto_drivers_num) {
    403 		/* Be careful about wrap-around. */
    404 		if (2 * crypto_drivers_num <= crypto_drivers_num) {
    405 			splx(s);
    406 			printf("crypto: driver count wraparound!\n");
    407 			return -1;
    408 		}
    409 
    410 		newdrv = malloc(2 * crypto_drivers_num *
    411 		    sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
    412 		if (newdrv == NULL) {
    413 			splx(s);
    414 			printf("crypto: no space to expand driver table!\n");
    415 			return -1;
    416 		}
    417 
    418 		bcopy(crypto_drivers, newdrv,
    419 		    crypto_drivers_num * sizeof(struct cryptocap));
    420 
    421 		crypto_drivers_num *= 2;
    422 
    423 		free(crypto_drivers, M_CRYPTO_DATA);
    424 		crypto_drivers = newdrv;
    425 	}
    426 
    427 	/* NB: state is zero'd on free */
    428 	crypto_drivers[i].cc_sessions = 1;	/* Mark */
    429 	crypto_drivers[i].cc_flags = flags;
    430 
    431 	if (bootverbose)
    432 		printf("crypto: assign driver %u, flags %u\n", i, flags);
    433 
    434 	splx(s);
    435 
    436 	return i;
    437 }
    438 
    439 static struct cryptocap *
    440 crypto_checkdriver(u_int32_t hid)
    441 {
    442 	if (crypto_drivers == NULL)
    443 		return NULL;
    444 	return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
    445 }
    446 
    447 /*
    448  * Register support for a key-related algorithm.  This routine
    449  * is called once for each algorithm supported a driver.
    450  */
    451 int
    452 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
    453     int (*kprocess)(void*, struct cryptkop *, int),
    454     void *karg)
    455 {
    456 	int s;
    457 	struct cryptocap *cap;
    458 	int err;
    459 
    460 	s = splcrypto();
    461 
    462 	cap = crypto_checkdriver(driverid);
    463 	if (cap != NULL &&
    464 	    (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
    465 		/*
    466 		 * XXX Do some performance testing to determine placing.
    467 		 * XXX We probably need an auxiliary data structure that
    468 		 * XXX describes relative performances.
    469 		 */
    470 
    471 		cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
    472 		if (bootverbose)
    473 			printf("crypto: driver %u registers key alg %u flags %u\n"
    474 				, driverid
    475 				, kalg
    476 				, flags
    477 			);
    478 
    479 		if (cap->cc_kprocess == NULL) {
    480 			cap->cc_karg = karg;
    481 			cap->cc_kprocess = kprocess;
    482 		}
    483 		err = 0;
    484 	} else
    485 		err = EINVAL;
    486 
    487 	splx(s);
    488 	return err;
    489 }
    490 
    491 /*
    492  * Register support for a non-key-related algorithm.  This routine
    493  * is called once for each such algorithm supported by a driver.
    494  */
    495 int
    496 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
    497     u_int32_t flags,
    498     int (*newses)(void*, u_int32_t*, struct cryptoini*),
    499     int (*freeses)(void*, u_int64_t),
    500     int (*process)(void*, struct cryptop *, int),
    501     void *arg)
    502 {
    503 	struct cryptocap *cap;
    504 	int s, err;
    505 
    506 	s = splcrypto();
    507 
    508 	cap = crypto_checkdriver(driverid);
    509 	/* NB: algorithms are in the range [1..max] */
    510 	if (cap != NULL &&
    511 	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
    512 		/*
    513 		 * XXX Do some performance testing to determine placing.
    514 		 * XXX We probably need an auxiliary data structure that
    515 		 * XXX describes relative performances.
    516 		 */
    517 
    518 		cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
    519 		cap->cc_max_op_len[alg] = maxoplen;
    520 		if (bootverbose)
    521 			printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
    522 				, driverid
    523 				, alg
    524 				, flags
    525 				, maxoplen
    526 			);
    527 
    528 		if (cap->cc_process == NULL) {
    529 			cap->cc_arg = arg;
    530 			cap->cc_newsession = newses;
    531 			cap->cc_process = process;
    532 			cap->cc_freesession = freeses;
    533 			cap->cc_sessions = 0;		/* Unmark */
    534 		}
    535 		err = 0;
    536 	} else
    537 		err = EINVAL;
    538 
    539 	splx(s);
    540 	return err;
    541 }
    542 
    543 /*
    544  * Unregister a crypto driver. If there are pending sessions using it,
    545  * leave enough information around so that subsequent calls using those
    546  * sessions will correctly detect the driver has been unregistered and
    547  * reroute requests.
    548  */
    549 int
    550 crypto_unregister(u_int32_t driverid, int alg)
    551 {
    552 	int i, err, s;
    553 	u_int32_t ses;
    554 	struct cryptocap *cap;
    555 
    556 	s = splcrypto();
    557 
    558 	cap = crypto_checkdriver(driverid);
    559 	if (cap != NULL &&
    560 	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
    561 	    cap->cc_alg[alg] != 0) {
    562 		cap->cc_alg[alg] = 0;
    563 		cap->cc_max_op_len[alg] = 0;
    564 
    565 		/* Was this the last algorithm ? */
    566 		for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
    567 			if (cap->cc_alg[i] != 0)
    568 				break;
    569 
    570 		if (i == CRYPTO_ALGORITHM_MAX + 1) {
    571 			ses = cap->cc_sessions;
    572 			bzero(cap, sizeof(struct cryptocap));
    573 			if (ses != 0) {
    574 				/*
    575 				 * If there are pending sessions, just mark as invalid.
    576 				 */
    577 				cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
    578 				cap->cc_sessions = ses;
    579 			}
    580 		}
    581 		err = 0;
    582 	} else
    583 		err = EINVAL;
    584 
    585 	splx(s);
    586 	return err;
    587 }
    588 
    589 /*
    590  * Unregister all algorithms associated with a crypto driver.
    591  * If there are pending sessions using it, leave enough information
    592  * around so that subsequent calls using those sessions will
    593  * correctly detect the driver has been unregistered and reroute
    594  * requests.
    595  */
    596 int
    597 crypto_unregister_all(u_int32_t driverid)
    598 {
    599 	int i, err, s = splcrypto();
    600 	u_int32_t ses;
    601 	struct cryptocap *cap;
    602 
    603 	cap = crypto_checkdriver(driverid);
    604 	if (cap != NULL) {
    605 		for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
    606 			cap->cc_alg[i] = 0;
    607 			cap->cc_max_op_len[i] = 0;
    608 		}
    609 		ses = cap->cc_sessions;
    610 		bzero(cap, sizeof(struct cryptocap));
    611 		if (ses != 0) {
    612 			/*
    613 			 * If there are pending sessions, just mark as invalid.
    614 			 */
    615 			cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
    616 			cap->cc_sessions = ses;
    617 		}
    618 		err = 0;
    619 	} else
    620 		err = EINVAL;
    621 
    622 	splx(s);
    623 	return err;
    624 }
    625 
    626 /*
    627  * Clear blockage on a driver.  The what parameter indicates whether
    628  * the driver is now ready for cryptop's and/or cryptokop's.
    629  */
    630 int
    631 crypto_unblock(u_int32_t driverid, int what)
    632 {
    633 	struct cryptocap *cap;
    634 	int needwakeup, err, s;
    635 
    636 	s = splcrypto();
    637 	cap = crypto_checkdriver(driverid);
    638 	if (cap != NULL) {
    639 		needwakeup = 0;
    640 		if (what & CRYPTO_SYMQ) {
    641 			needwakeup |= cap->cc_qblocked;
    642 			cap->cc_qblocked = 0;
    643 		}
    644 		if (what & CRYPTO_ASYMQ) {
    645 			needwakeup |= cap->cc_kqblocked;
    646 			cap->cc_kqblocked = 0;
    647 		}
    648 		if (needwakeup) {
    649 			setsoftcrypto(softintr_cookie);
    650 		}
    651 		err = 0;
    652 	} else
    653 		err = EINVAL;
    654 	splx(s);
    655 
    656 	return err;
    657 }
    658 
    659 /*
    660  * Dispatch a crypto request to a driver or queue
    661  * it, to be processed by the kernel thread.
    662  */
    663 int
    664 crypto_dispatch(struct cryptop *crp)
    665 {
    666 	u_int32_t hid = SESID2HID(crp->crp_sid);
    667 	int s, result;
    668 
    669 	s = splcrypto();
    670 
    671 	cryptostats.cs_ops++;
    672 
    673 #ifdef CRYPTO_TIMING
    674 	if (crypto_timing)
    675 		nanouptime(&crp->crp_tstamp);
    676 #endif
    677 	if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
    678 		struct cryptocap *cap;
    679 		/*
    680 		 * Caller marked the request to be processed
    681 		 * immediately; dispatch it directly to the
    682 		 * driver unless the driver is currently blocked.
    683 		 */
    684 		cap = crypto_checkdriver(hid);
    685 		if (cap && !cap->cc_qblocked) {
    686 			result = crypto_invoke(crp, 0);
    687 			if (result == ERESTART) {
    688 				/*
    689 				 * The driver ran out of resources, mark the
    690 				 * driver ``blocked'' for cryptop's and put
    691 				 * the op on the queue.
    692 				 */
    693 				crypto_drivers[hid].cc_qblocked = 1;
    694 				TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
    695 				cryptostats.cs_blocks++;
    696 			}
    697 		} else {
    698 			/*
    699 			 * The driver is blocked, just queue the op until
    700 			 * it unblocks and the swi thread gets kicked.
    701 			 */
    702 			TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
    703 			result = 0;
    704 		}
    705 	} else {
    706 		int wasempty = TAILQ_EMPTY(&crp_q);
    707 		/*
    708 		 * Caller marked the request as ``ok to delay'';
    709 		 * queue it for the swi thread.  This is desirable
    710 		 * when the operation is low priority and/or suitable
    711 		 * for batching.
    712 		 */
    713 		TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
    714 		if (wasempty) {
    715 			setsoftcrypto(softintr_cookie);
    716 		}
    717 
    718 		result = 0;
    719 	}
    720 	splx(s);
    721 
    722 	return result;
    723 }
    724 
    725 /*
    726  * Add an asymetric crypto request to a queue,
    727  * to be processed by the kernel thread.
    728  */
    729 int
    730 crypto_kdispatch(struct cryptkop *krp)
    731 {
    732 	struct cryptocap *cap;
    733 	int s, result;
    734 
    735 	s = splcrypto();
    736 	cryptostats.cs_kops++;
    737 
    738 	cap = crypto_checkdriver(krp->krp_hid);
    739 	if (cap && !cap->cc_kqblocked) {
    740 		result = crypto_kinvoke(krp, 0);
    741 		if (result == ERESTART) {
    742 			/*
    743 			 * The driver ran out of resources, mark the
    744 			 * driver ``blocked'' for cryptop's and put
    745 			 * the op on the queue.
    746 			 */
    747 			crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
    748 			TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
    749 			cryptostats.cs_kblocks++;
    750 		}
    751 	} else {
    752 		/*
    753 		 * The driver is blocked, just queue the op until
    754 		 * it unblocks and the swi thread gets kicked.
    755 		 */
    756 		TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
    757 		result = 0;
    758 	}
    759 	splx(s);
    760 
    761 	return result;
    762 }
    763 
    764 /*
    765  * Dispatch an assymetric crypto request to the appropriate crypto devices.
    766  */
    767 static int
    768 crypto_kinvoke(struct cryptkop *krp, int hint)
    769 {
    770 	u_int32_t hid;
    771 	int error;
    772 
    773 	/* Sanity checks. */
    774 	if (krp == NULL)
    775 		return EINVAL;
    776 	if (krp->krp_callback == NULL) {
    777 		free(krp, M_XDATA);		/* XXX allocated in cryptodev */
    778 		return EINVAL;
    779 	}
    780 
    781 	for (hid = 0; hid < crypto_drivers_num; hid++) {
    782 		if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
    783 		    crypto_devallowsoft == 0)
    784 			continue;
    785 		if (crypto_drivers[hid].cc_kprocess == NULL)
    786 			continue;
    787 		if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
    788 		    CRYPTO_ALG_FLAG_SUPPORTED) == 0)
    789 			continue;
    790 		break;
    791 	}
    792 	if (hid < crypto_drivers_num) {
    793 		krp->krp_hid = hid;
    794 		error = crypto_drivers[hid].cc_kprocess(
    795 				crypto_drivers[hid].cc_karg, krp, hint);
    796 	} else {
    797 		error = ENODEV;
    798 	}
    799 
    800 	if (error) {
    801 		krp->krp_status = error;
    802 		crypto_kdone(krp);
    803 	}
    804 	return 0;
    805 }
    806 
    807 #ifdef CRYPTO_TIMING
    808 static void
    809 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
    810 {
    811 	struct timespec now, t;
    812 
    813 	nanouptime(&now);
    814 	t.tv_sec = now.tv_sec - tv->tv_sec;
    815 	t.tv_nsec = now.tv_nsec - tv->tv_nsec;
    816 	if (t.tv_nsec < 0) {
    817 		t.tv_sec--;
    818 		t.tv_nsec += 1000000000;
    819 	}
    820 	timespecadd(&ts->acc, &t, &t);
    821 	if (timespeccmp(&t, &ts->min, <))
    822 		ts->min = t;
    823 	if (timespeccmp(&t, &ts->max, >))
    824 		ts->max = t;
    825 	ts->count++;
    826 
    827 	*tv = now;
    828 }
    829 #endif
    830 
    831 /*
    832  * Dispatch a crypto request to the appropriate crypto devices.
    833  */
    834 static int
    835 crypto_invoke(struct cryptop *crp, int hint)
    836 {
    837 	u_int32_t hid;
    838 	int (*process)(void*, struct cryptop *, int);
    839 
    840 #ifdef CRYPTO_TIMING
    841 	if (crypto_timing)
    842 		crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
    843 #endif
    844 	/* Sanity checks. */
    845 	if (crp == NULL)
    846 		return EINVAL;
    847 	if (crp->crp_callback == NULL) {
    848 		crypto_freereq(crp);
    849 		return EINVAL;
    850 	}
    851 	if (crp->crp_desc == NULL) {
    852 		crp->crp_etype = EINVAL;
    853 		crypto_done(crp);
    854 		return 0;
    855 	}
    856 
    857 	hid = SESID2HID(crp->crp_sid);
    858 	if (hid < crypto_drivers_num) {
    859 		if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
    860 			crypto_freesession(crp->crp_sid);
    861 		process = crypto_drivers[hid].cc_process;
    862 	} else {
    863 		process = NULL;
    864 	}
    865 
    866 	if (process == NULL) {
    867 		struct cryptodesc *crd;
    868 		u_int64_t nid = 0;
    869 
    870 		/*
    871 		 * Driver has unregistered; migrate the session and return
    872 		 * an error to the caller so they'll resubmit the op.
    873 		 */
    874 		for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
    875 			crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
    876 
    877 		if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
    878 			crp->crp_sid = nid;
    879 
    880 		crp->crp_etype = EAGAIN;
    881 		crypto_done(crp);
    882 		return 0;
    883 	} else {
    884 		/*
    885 		 * Invoke the driver to process the request.
    886 		 */
    887 		return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
    888 	}
    889 }
    890 
    891 /*
    892  * Release a set of crypto descriptors.
    893  */
    894 void
    895 crypto_freereq(struct cryptop *crp)
    896 {
    897 	struct cryptodesc *crd;
    898 	int s;
    899 
    900 	if (crp == NULL)
    901 		return;
    902 
    903 	s = splcrypto();
    904 
    905 	while ((crd = crp->crp_desc) != NULL) {
    906 		crp->crp_desc = crd->crd_next;
    907 		pool_put(&cryptodesc_pool, crd);
    908 	}
    909 
    910 	pool_put(&cryptop_pool, crp);
    911 	splx(s);
    912 }
    913 
    914 /*
    915  * Acquire a set of crypto descriptors.
    916  */
    917 struct cryptop *
    918 crypto_getreq(int num)
    919 {
    920 	struct cryptodesc *crd;
    921 	struct cryptop *crp;
    922 	int s;
    923 
    924 	s = splcrypto();
    925 
    926 	if (crypto_pool_initialized == 0) {
    927 		pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
    928 		    0, "cryptop", NULL, IPL_NET);
    929 		pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
    930 		    0, "cryptodesc", NULL, IPL_NET);
    931 		crypto_pool_initialized = 1;
    932 	}
    933 
    934 	crp = pool_get(&cryptop_pool, 0);
    935 	if (crp == NULL) {
    936 		splx(s);
    937 		return NULL;
    938 	}
    939 	bzero(crp, sizeof(struct cryptop));
    940 
    941 	while (num--) {
    942 		crd = pool_get(&cryptodesc_pool, 0);
    943 		if (crd == NULL) {
    944 			splx(s);
    945 			crypto_freereq(crp);
    946 			return NULL;
    947 		}
    948 
    949 		bzero(crd, sizeof(struct cryptodesc));
    950 		crd->crd_next = crp->crp_desc;
    951 		crp->crp_desc = crd;
    952 	}
    953 
    954 	splx(s);
    955 	return crp;
    956 }
    957 
    958 /*
    959  * Invoke the callback on behalf of the driver.
    960  */
    961 void
    962 crypto_done(struct cryptop *crp)
    963 {
    964 	if (crp->crp_etype != 0)
    965 		cryptostats.cs_errs++;
    966 #ifdef CRYPTO_TIMING
    967 	if (crypto_timing)
    968 		crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
    969 #endif
    970 	/*
    971 	 * On netbsd 1.6O, CBIMM does its wake_one() before the requestor
    972 	 * has done its tsleep().
    973 	 */
    974 #ifndef __NetBSD__
    975 	if (crp->crp_flags & CRYPTO_F_CBIMM) {
    976 		/*
    977 		 * Do the callback directly.  This is ok when the
    978 		 * callback routine does very little (e.g. the
    979 		 * /dev/crypto callback method just does a wakeup).
    980 		 */
    981 #ifdef CRYPTO_TIMING
    982 		if (crypto_timing) {
    983 			/*
    984 			 * NB: We must copy the timestamp before
    985 			 * doing the callback as the cryptop is
    986 			 * likely to be reclaimed.
    987 			 */
    988 			struct timespec t = crp->crp_tstamp;
    989 			crypto_tstat(&cryptostats.cs_cb, &t);
    990 			crp->crp_callback(crp);
    991 			crypto_tstat(&cryptostats.cs_finis, &t);
    992 		} else
    993 #endif
    994 			crp->crp_callback(crp);
    995 	} else
    996 #endif /* __NetBSD__ */
    997 	{
    998 		int s, wasempty;
    999 		/*
   1000 		 * Normal case; queue the callback for the thread.
   1001 		 *
   1002 		 * The return queue is manipulated by the swi thread
   1003 		 * and, potentially, by crypto device drivers calling
   1004 		 * back to mark operations completed.  Thus we need
   1005 		 * to mask both while manipulating the return queue.
   1006 		 */
   1007 		s = splcrypto();
   1008 		wasempty = TAILQ_EMPTY(&crp_ret_q);
   1009 		TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
   1010 		if (wasempty)
   1011 			wakeup_one(&crp_ret_q);
   1012 		splx(s);
   1013 	}
   1014 }
   1015 
   1016 /*
   1017  * Invoke the callback on behalf of the driver.
   1018  */
   1019 void
   1020 crypto_kdone(struct cryptkop *krp)
   1021 {
   1022 	int s, wasempty;
   1023 
   1024 	if (krp->krp_status != 0)
   1025 		cryptostats.cs_kerrs++;
   1026 	/*
   1027 	 * The return queue is manipulated by the swi thread
   1028 	 * and, potentially, by crypto device drivers calling
   1029 	 * back to mark operations completed.  Thus we need
   1030 	 * to mask both while manipulating the return queue.
   1031 	 */
   1032 	s = splcrypto();
   1033 	wasempty = TAILQ_EMPTY(&crp_ret_kq);
   1034 	TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
   1035 	if (wasempty)
   1036 		wakeup_one(&crp_ret_q);
   1037 	splx(s);
   1038 }
   1039 
   1040 int
   1041 crypto_getfeat(int *featp)
   1042 {
   1043 	int hid, kalg, feat = 0;
   1044 	int s;
   1045 
   1046 	s = splcrypto();
   1047 
   1048 	if (crypto_userasymcrypto == 0)
   1049 		goto out;
   1050 
   1051 	for (hid = 0; hid < crypto_drivers_num; hid++) {
   1052 		if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
   1053 		    crypto_devallowsoft == 0) {
   1054 			continue;
   1055 		}
   1056 		if (crypto_drivers[hid].cc_kprocess == NULL)
   1057 			continue;
   1058 		for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
   1059 			if ((crypto_drivers[hid].cc_kalg[kalg] &
   1060 			    CRYPTO_ALG_FLAG_SUPPORTED) != 0)
   1061 				feat |=  1 << kalg;
   1062 	}
   1063 out:
   1064 	splx(s);
   1065 	*featp = feat;
   1066 	return (0);
   1067 }
   1068 
   1069 /*
   1070  * Software interrupt thread to dispatch crypto requests.
   1071  */
   1072 static void
   1073 cryptointr(void)
   1074 {
   1075 	struct cryptop *crp, *submit;
   1076 	struct cryptkop *krp;
   1077 	struct cryptocap *cap;
   1078 	int result, hint, s;
   1079 
   1080 	printf("crypto softint\n");
   1081 	cryptostats.cs_intrs++;
   1082 	s = splcrypto();
   1083 	do {
   1084 		/*
   1085 		 * Find the first element in the queue that can be
   1086 		 * processed and look-ahead to see if multiple ops
   1087 		 * are ready for the same driver.
   1088 		 */
   1089 		submit = NULL;
   1090 		hint = 0;
   1091 		TAILQ_FOREACH(crp, &crp_q, crp_next) {
   1092 			u_int32_t hid = SESID2HID(crp->crp_sid);
   1093 			cap = crypto_checkdriver(hid);
   1094 			if (cap == NULL || cap->cc_process == NULL) {
   1095 				/* Op needs to be migrated, process it. */
   1096 				if (submit == NULL)
   1097 					submit = crp;
   1098 				break;
   1099 			}
   1100 			if (!cap->cc_qblocked) {
   1101 				if (submit != NULL) {
   1102 					/*
   1103 					 * We stop on finding another op,
   1104 					 * regardless whether its for the same
   1105 					 * driver or not.  We could keep
   1106 					 * searching the queue but it might be
   1107 					 * better to just use a per-driver
   1108 					 * queue instead.
   1109 					 */
   1110 					if (SESID2HID(submit->crp_sid) == hid)
   1111 						hint = CRYPTO_HINT_MORE;
   1112 					break;
   1113 				} else {
   1114 					submit = crp;
   1115 					if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
   1116 						break;
   1117 					/* keep scanning for more are q'd */
   1118 				}
   1119 			}
   1120 		}
   1121 		if (submit != NULL) {
   1122 			TAILQ_REMOVE(&crp_q, submit, crp_next);
   1123 			result = crypto_invoke(submit, hint);
   1124 			if (result == ERESTART) {
   1125 				/*
   1126 				 * The driver ran out of resources, mark the
   1127 				 * driver ``blocked'' for cryptop's and put
   1128 				 * the request back in the queue.  It would
   1129 				 * best to put the request back where we got
   1130 				 * it but that's hard so for now we put it
   1131 				 * at the front.  This should be ok; putting
   1132 				 * it at the end does not work.
   1133 				 */
   1134 				/* XXX validate sid again? */
   1135 				crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
   1136 				TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
   1137 				cryptostats.cs_blocks++;
   1138 			}
   1139 		}
   1140 
   1141 		/* As above, but for key ops */
   1142 		TAILQ_FOREACH(krp, &crp_kq, krp_next) {
   1143 			cap = crypto_checkdriver(krp->krp_hid);
   1144 			if (cap == NULL || cap->cc_kprocess == NULL) {
   1145 				/* Op needs to be migrated, process it. */
   1146 				break;
   1147 			}
   1148 			if (!cap->cc_kqblocked)
   1149 				break;
   1150 		}
   1151 		if (krp != NULL) {
   1152 			TAILQ_REMOVE(&crp_kq, krp, krp_next);
   1153 			result = crypto_kinvoke(krp, 0);
   1154 			if (result == ERESTART) {
   1155 				/*
   1156 				 * The driver ran out of resources, mark the
   1157 				 * driver ``blocked'' for cryptkop's and put
   1158 				 * the request back in the queue.  It would
   1159 				 * best to put the request back where we got
   1160 				 * it but that's hard so for now we put it
   1161 				 * at the front.  This should be ok; putting
   1162 				 * it at the end does not work.
   1163 				 */
   1164 				/* XXX validate sid again? */
   1165 				crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
   1166 				TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
   1167 				cryptostats.cs_kblocks++;
   1168 			}
   1169 		}
   1170 	} while (submit != NULL || krp != NULL);
   1171 	splx(s);
   1172 }
   1173 
   1174 /*
   1175  * Kernel thread to do callbacks.
   1176  */
   1177 static void
   1178 cryptoret(void)
   1179 {
   1180 	struct cryptop *crp;
   1181 	struct cryptkop *krp;
   1182 	int s;
   1183 
   1184 	s = splcrypto();
   1185 	for (;;) {
   1186 		crp = TAILQ_FIRST(&crp_ret_q);
   1187 		if (crp != NULL)
   1188 			TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
   1189 		krp = TAILQ_FIRST(&crp_ret_kq);
   1190 		if (krp != NULL)
   1191 			TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
   1192 
   1193 		if (crp != NULL || krp != NULL) {
   1194 			splx(s);		/* lower ipl for callbacks */
   1195 			if (crp != NULL) {
   1196 #ifdef CRYPTO_TIMING
   1197 				if (crypto_timing) {
   1198 					/*
   1199 					 * NB: We must copy the timestamp before
   1200 					 * doing the callback as the cryptop is
   1201 					 * likely to be reclaimed.
   1202 					 */
   1203 					struct timespec t = crp->crp_tstamp;
   1204 					crypto_tstat(&cryptostats.cs_cb, &t);
   1205 					crp->crp_callback(crp);
   1206 					crypto_tstat(&cryptostats.cs_finis, &t);
   1207 				} else
   1208 #endif
   1209 					crp->crp_callback(crp);
   1210 			}
   1211 			if (krp != NULL)
   1212 				krp->krp_callback(krp);
   1213 			s  = splcrypto();
   1214 		} else {
   1215 			(void) tsleep(&crp_ret_q, PLOCK, "crypto_wait", 0);
   1216 			cryptostats.cs_rets++;
   1217 		}
   1218 	}
   1219 }
   1220 
   1221 
   1223 #ifdef __FreeBSD__
   1224 /*
   1225  * Initialization code, both for static and dynamic loading.
   1226  */
   1227 static int
   1228 crypto_modevent(module_t mod, int type, void *unused)
   1229 {
   1230 	int error = EINVAL;
   1231 
   1232 	switch (type) {
   1233 	case MOD_LOAD:
   1234 		error = crypto_init();
   1235 		if (error == 0 && bootverbose)
   1236 			printf("crypto: <crypto core>\n");
   1237 		break;
   1238 	case MOD_UNLOAD:
   1239 		/*XXX disallow if active sessions */
   1240 		error = 0;
   1241 		crypto_destroy();
   1242 		break;
   1243 	}
   1244 	return error;
   1245 }
   1246 static moduledata_t crypto_mod = {
   1247 	"crypto",
   1248 	crypto_modevent,
   1249 	0
   1250 };
   1251 
   1252 MODULE_VERSION(crypto, 1);
   1253 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
   1254 #endif /* __FreeBSD__ */
   1255 
   1256 
   1257