crypto.c revision 1.92.2.2 1 /* $NetBSD: crypto.c,v 1.92.2.2 2017/07/18 06:01:37 knakahara Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4
5 /*-
6 * Copyright (c) 2008 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Coyote Point Systems, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
36 *
37 * This code was written by Angelos D. Keromytis in Athens, Greece, in
38 * February 2000. Network Security Technologies Inc. (NSTI) kindly
39 * supported the development of this code.
40 *
41 * Copyright (c) 2000, 2001 Angelos D. Keromytis
42 *
43 * Permission to use, copy, and modify this software with or without fee
44 * is hereby granted, provided that this entire notice is included in
45 * all source code copies of any software which is or includes a copy or
46 * modification of this software.
47 *
48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
52 * PURPOSE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.92.2.2 2017/07/18 06:01:37 knakahara Exp $");
57
58 #include <sys/param.h>
59 #include <sys/reboot.h>
60 #include <sys/systm.h>
61 #include <sys/malloc.h>
62 #include <sys/proc.h>
63 #include <sys/pool.h>
64 #include <sys/kthread.h>
65 #include <sys/once.h>
66 #include <sys/sysctl.h>
67 #include <sys/intr.h>
68 #include <sys/errno.h>
69 #include <sys/module.h>
70 #include <sys/xcall.h>
71
72 #if defined(_KERNEL_OPT)
73 #include "opt_ocf.h"
74 #endif
75
76 #include <opencrypto/cryptodev.h>
77 #include <opencrypto/xform.h> /* XXX for M_XDATA */
78
79 static kmutex_t crypto_q_mtx;
80 static kmutex_t crypto_ret_q_mtx;
81
82 /* below are kludges for residual code wrtitten to FreeBSD interfaces */
83 #define SWI_CRYPTO 17
84 #define register_swi(lvl, fn) \
85 softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, (void (*)(void *))fn, NULL)
86 #define unregister_swi(lvl, fn) softint_disestablish(softintr_cookie)
87 #define setsoftcrypto(x) \
88 do{ \
89 kpreempt_disable(); \
90 softint_schedule(x); \
91 kpreempt_enable(); \
92 }while(0)
93
94 int crypto_ret_q_check(struct cryptop *);
95
96 /*
97 * Crypto drivers register themselves by allocating a slot in the
98 * crypto_drivers table with crypto_get_driverid() and then registering
99 * each algorithm they support with crypto_register() and crypto_kregister().
100 */
101 static kmutex_t crypto_drv_mtx;
102 /* Don't directly access crypto_drivers[i], use crypto_checkdriver(i). */
103 static struct cryptocap *crypto_drivers;
104 static int crypto_drivers_num;
105 static void *softintr_cookie;
106 static int crypto_exit_flag;
107
108 static void *crypto_ret_si;
109
110 /*
111 * There are two queues for crypto requests; one for symmetric (e.g.
112 * cipher) operations and one for asymmetric (e.g. MOD) operations.
113 * See below for how synchronization is handled.
114 */
115 static TAILQ_HEAD(,cryptop) crp_q = /* request queues */
116 TAILQ_HEAD_INITIALIZER(crp_q);
117 static TAILQ_HEAD(,cryptkop) crp_kq =
118 TAILQ_HEAD_INITIALIZER(crp_kq);
119
120 /*
121 * There are two queues for processing completed crypto requests; one
122 * for the symmetric and one for the asymmetric ops. We only need one
123 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
124 * for how synchronization is handled.
125 */
126 static TAILQ_HEAD(crprethead, cryptop) crp_ret_q = /* callback queues */
127 TAILQ_HEAD_INITIALIZER(crp_ret_q);
128 static TAILQ_HEAD(krprethead, cryptkop) crp_ret_kq =
129 TAILQ_HEAD_INITIALIZER(crp_ret_kq);
130
131 #define DEFINIT_CRYPTO_Q_LEN(name) \
132 static int crypto_##name##_len = 0
133
134 #define DEFINIT_CRYPTO_Q_DROPS(name) \
135 static int crypto_##name##_drops = 0
136
137 #define DEFINIT_CRYPTO_Q_MAXLEN(name, defval) \
138 static int crypto_##name##_maxlen = defval
139
140 #define CRYPTO_Q_INC(name) \
141 do { \
142 crypto_##name##_len++; \
143 } while(0);
144
145 #define CRYPTO_Q_DEC(name) \
146 do { \
147 crypto_##name##_len--; \
148 } while(0);
149
150 #define CRYPTO_Q_INC_DROPS(name) \
151 do { \
152 crypto_##name##_drops++; \
153 } while(0);
154
155 #define CRYPTO_Q_IS_FULL(name) \
156 (crypto_##name##_maxlen > 0 \
157 && (crypto_##name##_len > crypto_##name##_maxlen))
158
159 /*
160 * current queue length.
161 */
162 DEFINIT_CRYPTO_Q_LEN(crp_ret_q);
163 DEFINIT_CRYPTO_Q_LEN(crp_ret_kq);
164
165 /*
166 * queue dropped count.
167 */
168 DEFINIT_CRYPTO_Q_DROPS(crp_ret_q);
169 DEFINIT_CRYPTO_Q_DROPS(crp_ret_kq);
170
171 #ifndef CRYPTO_RET_Q_MAXLEN
172 #define CRYPTO_RET_Q_MAXLEN 0
173 #endif
174 #ifndef CRYPTO_RET_KQ_MAXLEN
175 #define CRYPTO_RET_KQ_MAXLEN 0
176 #endif
177 /*
178 * queue length limit.
179 * default value is 0. <=0 means unlimited.
180 */
181 DEFINIT_CRYPTO_Q_MAXLEN(crp_ret_q, CRYPTO_RET_Q_MAXLEN);
182 DEFINIT_CRYPTO_Q_MAXLEN(crp_ret_kq, CRYPTO_RET_KQ_MAXLEN);
183
184 /*
185 * TODO:
186 * make percpu
187 */
188 static int
189 sysctl_opencrypto_q_len(SYSCTLFN_ARGS)
190 {
191 int error;
192
193 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
194 if (error || newp == NULL)
195 return error;
196
197 return 0;
198 }
199
200 /*
201 * TODO:
202 * make percpu
203 */
204 static int
205 sysctl_opencrypto_q_drops(SYSCTLFN_ARGS)
206 {
207 int error;
208
209 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
210 if (error || newp == NULL)
211 return error;
212
213 return 0;
214 }
215
216 /*
217 * need to make percpu?
218 */
219 static int
220 sysctl_opencrypto_q_maxlen(SYSCTLFN_ARGS)
221 {
222 int error;
223
224 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
225 if (error || newp == NULL)
226 return error;
227
228 return 0;
229 }
230
231 /*
232 * Crypto op and desciptor data structures are allocated
233 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
234 */
235 struct pool cryptop_pool;
236 struct pool cryptodesc_pool;
237 struct pool cryptkop_pool;
238
239 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
240 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
241 /*
242 * cryptodevallowsoft is (intended to be) sysctl'able, controlling
243 * access to hardware versus software transforms as below:
244 *
245 * crypto_devallowsoft < 0: Force userlevel requests to use software
246 * transforms, always
247 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel
248 * requests for non-accelerated transforms
249 * (handling the latter in software)
250 * crypto_devallowsoft > 0: Allow user requests only for transforms which
251 * are hardware-accelerated.
252 */
253 int crypto_devallowsoft = 1; /* only use hardware crypto */
254
255 static void
256 sysctl_opencrypto_setup(struct sysctllog **clog)
257 {
258 const struct sysctlnode *ocnode;
259 const struct sysctlnode *retqnode, *retkqnode;
260
261 sysctl_createv(clog, 0, NULL, NULL,
262 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
263 CTLTYPE_INT, "usercrypto",
264 SYSCTL_DESCR("Enable/disable user-mode access to "
265 "crypto support"),
266 NULL, 0, &crypto_usercrypto, 0,
267 CTL_KERN, CTL_CREATE, CTL_EOL);
268 sysctl_createv(clog, 0, NULL, NULL,
269 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
270 CTLTYPE_INT, "userasymcrypto",
271 SYSCTL_DESCR("Enable/disable user-mode access to "
272 "asymmetric crypto support"),
273 NULL, 0, &crypto_userasymcrypto, 0,
274 CTL_KERN, CTL_CREATE, CTL_EOL);
275 sysctl_createv(clog, 0, NULL, NULL,
276 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
277 CTLTYPE_INT, "cryptodevallowsoft",
278 SYSCTL_DESCR("Enable/disable use of software "
279 "asymmetric crypto support"),
280 NULL, 0, &crypto_devallowsoft, 0,
281 CTL_KERN, CTL_CREATE, CTL_EOL);
282
283 sysctl_createv(clog, 0, NULL, &ocnode,
284 CTLFLAG_PERMANENT,
285 CTLTYPE_NODE, "opencrypto",
286 SYSCTL_DESCR("opencrypto related entries"),
287 NULL, 0, NULL, 0,
288 CTL_CREATE, CTL_EOL);
289
290 sysctl_createv(clog, 0, &ocnode, &retqnode,
291 CTLFLAG_PERMANENT,
292 CTLTYPE_NODE, "crypto_ret_q",
293 SYSCTL_DESCR("crypto_ret_q related entries"),
294 NULL, 0, NULL, 0,
295 CTL_CREATE, CTL_EOL);
296 sysctl_createv(clog, 0, &retqnode, NULL,
297 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
298 CTLTYPE_INT, "len",
299 SYSCTL_DESCR("Current queue length"),
300 sysctl_opencrypto_q_len, 0,
301 (void *)&crypto_crp_ret_q_len, 0,
302 CTL_CREATE, CTL_EOL);
303 sysctl_createv(clog, 0, &retqnode, NULL,
304 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
305 CTLTYPE_INT, "drops",
306 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
307 sysctl_opencrypto_q_drops, 0,
308 (void *)&crypto_crp_ret_q_drops, 0,
309 CTL_CREATE, CTL_EOL);
310 sysctl_createv(clog, 0, &retqnode, NULL,
311 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
312 CTLTYPE_INT, "maxlen",
313 SYSCTL_DESCR("Maximum allowed queue length"),
314 sysctl_opencrypto_q_maxlen, 0,
315 (void *)&crypto_crp_ret_q_maxlen, 0,
316 CTL_CREATE, CTL_EOL);
317
318 sysctl_createv(clog, 0, &ocnode, &retkqnode,
319 CTLFLAG_PERMANENT,
320 CTLTYPE_NODE, "crypto_ret_kq",
321 SYSCTL_DESCR("crypto_ret_kq related entries"),
322 NULL, 0, NULL, 0,
323 CTL_CREATE, CTL_EOL);
324 sysctl_createv(clog, 0, &retkqnode, NULL,
325 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
326 CTLTYPE_INT, "len",
327 SYSCTL_DESCR("Current queue length"),
328 sysctl_opencrypto_q_len, 0,
329 (void *)&crypto_crp_ret_kq_len, 0,
330 CTL_CREATE, CTL_EOL);
331 sysctl_createv(clog, 0, &retkqnode, NULL,
332 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
333 CTLTYPE_INT, "drops",
334 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
335 sysctl_opencrypto_q_drops, 0,
336 (void *)&crypto_crp_ret_kq_drops, 0,
337 CTL_CREATE, CTL_EOL);
338 sysctl_createv(clog, 0, &retkqnode, NULL,
339 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
340 CTLTYPE_INT, "maxlen",
341 SYSCTL_DESCR("Maximum allowed queue length"),
342 sysctl_opencrypto_q_maxlen, 0,
343 (void *)&crypto_crp_ret_kq_maxlen, 0,
344 CTL_CREATE, CTL_EOL);
345 }
346
347 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
348
349 /*
350 * Synchronization: read carefully, this is non-trivial.
351 *
352 * Crypto requests are submitted via crypto_dispatch. Typically
353 * these come in from network protocols at spl0 (output path) or
354 * spl[,soft]net (input path).
355 *
356 * Requests are typically passed on the driver directly, but they
357 * may also be queued for processing by a software interrupt thread,
358 * cryptointr, that runs at splsoftcrypto. This thread dispatches
359 * the requests to crypto drivers (h/w or s/w) who call crypto_done
360 * when a request is complete. Hardware crypto drivers are assumed
361 * to register their IRQ's as network devices so their interrupt handlers
362 * and subsequent "done callbacks" happen at spl[imp,net].
363 *
364 * Completed crypto ops are queued for a separate kernel thread that
365 * handles the callbacks at spl0. This decoupling insures the crypto
366 * driver interrupt service routine is not delayed while the callback
367 * takes place and that callbacks are delivered after a context switch
368 * (as opposed to a software interrupt that clients must block).
369 *
370 * This scheme is not intended for SMP machines.
371 */
372 static void cryptointr(void); /* swi thread to dispatch ops */
373 static void cryptoret_softint(void *); /* kernel thread for callbacks*/
374 static int crypto_destroy(bool);
375 static int crypto_invoke(struct cryptop *crp, int hint);
376 static int crypto_kinvoke(struct cryptkop *krp, int hint);
377
378 static struct cryptocap *crypto_checkdriver_lock(u_int32_t);
379 static struct cryptocap *crypto_checkdriver_uninit(u_int32_t);
380 static struct cryptocap *crypto_checkdriver(u_int32_t);
381 static void crypto_driver_lock(struct cryptocap *);
382 static void crypto_driver_unlock(struct cryptocap *);
383 static void crypto_driver_clear(struct cryptocap *);
384
385 static struct cryptostats cryptostats;
386 #ifdef CRYPTO_TIMING
387 static int crypto_timing = 0;
388 #endif
389
390 static struct sysctllog *sysctl_opencrypto_clog;
391
392 static int
393 crypto_init0(void)
394 {
395
396 mutex_init(&crypto_drv_mtx, MUTEX_DEFAULT, IPL_NONE);
397 mutex_init(&crypto_q_mtx, MUTEX_DEFAULT, IPL_NONE);
398 mutex_init(&crypto_ret_q_mtx, MUTEX_DEFAULT, IPL_NET);
399 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
400 0, "cryptop", NULL, IPL_NET);
401 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
402 0, "cryptodesc", NULL, IPL_NET);
403 pool_init(&cryptkop_pool, sizeof(struct cryptkop), 0, 0,
404 0, "cryptkop", NULL, IPL_NET);
405
406 crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL *
407 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
408 if (crypto_drivers == NULL) {
409 printf("crypto_init: cannot malloc driver table\n");
410 return ENOMEM;
411 }
412 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
413
414 softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
415 if (softintr_cookie == NULL) {
416 printf("crypto_init: cannot establish request queue handler\n");
417 return crypto_destroy(false);
418 }
419
420 crypto_ret_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE|SOFTINT_RCPU,
421 &cryptoret_softint, NULL);
422 if (crypto_ret_si == NULL) {
423 printf("crypto_init: cannot establish ret queue handler\n");
424 return crypto_destroy(false);
425 }
426
427 sysctl_opencrypto_setup(&sysctl_opencrypto_clog);
428
429 return 0;
430 }
431
432 int
433 crypto_init(void)
434 {
435 static ONCE_DECL(crypto_init_once);
436
437 return RUN_ONCE(&crypto_init_once, crypto_init0);
438 }
439
440 static int
441 crypto_destroy(bool exit_kthread)
442 {
443 int i;
444
445 if (exit_kthread) {
446 struct cryptocap *cap = NULL;
447 uint64_t where;
448
449 /* if we have any in-progress requests, don't unload */
450 mutex_enter(&crypto_q_mtx);
451 if (!TAILQ_EMPTY(&crp_q) || !TAILQ_EMPTY(&crp_kq)) {
452 mutex_exit(&crypto_q_mtx);
453 return EBUSY;
454 }
455 mutex_exit(&crypto_q_mtx);
456 /* FIXME:
457 * prohibit enqueue to crp_q and crp_kq after here.
458 */
459
460 mutex_enter(&crypto_drv_mtx);
461 for (i = 0; i < crypto_drivers_num; i++) {
462 cap = crypto_checkdriver(i);
463 if (cap == NULL)
464 continue;
465 if (cap->cc_sessions != 0) {
466 mutex_exit(&crypto_drv_mtx);
467 return EBUSY;
468 }
469 }
470 mutex_exit(&crypto_drv_mtx);
471 /* FIXME:
472 * prohibit touch crypto_drivers[] and each element after here.
473 */
474
475 /*
476 * Ensure cryptoret_softint() is never scheduled and then wait
477 * for last softint_execute().
478 */
479 mutex_spin_enter(&crypto_ret_q_mtx);
480 crypto_exit_flag = 1;
481 mutex_spin_exit(&crypto_ret_q_mtx);
482 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
483 xc_wait(where);
484 }
485
486 if (sysctl_opencrypto_clog != NULL)
487 sysctl_teardown(&sysctl_opencrypto_clog);
488
489 if (crypto_ret_si != NULL)
490 softint_disestablish(crypto_ret_si);
491
492 if (softintr_cookie != NULL)
493 unregister_swi(SWI_CRYPTO, cryptointr);
494
495 mutex_enter(&crypto_drv_mtx);
496 if (crypto_drivers != NULL)
497 free(crypto_drivers, M_CRYPTO_DATA);
498 mutex_exit(&crypto_drv_mtx);
499
500 pool_destroy(&cryptop_pool);
501 pool_destroy(&cryptodesc_pool);
502 pool_destroy(&cryptkop_pool);
503
504 mutex_destroy(&crypto_ret_q_mtx);
505 mutex_destroy(&crypto_q_mtx);
506 mutex_destroy(&crypto_drv_mtx);
507
508 return 0;
509 }
510
511 static bool
512 crypto_driver_suitable(struct cryptocap *cap, struct cryptoini *cri)
513 {
514 struct cryptoini *cr;
515
516 for (cr = cri; cr; cr = cr->cri_next)
517 if (cap->cc_alg[cr->cri_alg] == 0) {
518 DPRINTF("alg %d not supported\n", cr->cri_alg);
519 return false;
520 }
521
522 return true;
523 }
524
525 #define CRYPTO_ACCEPT_HARDWARE 0x1
526 #define CRYPTO_ACCEPT_SOFTWARE 0x2
527 /*
528 * The algorithm we use here is pretty stupid; just use the
529 * first driver that supports all the algorithms we need.
530 * If there are multiple drivers we choose the driver with
531 * the fewest active sessions. We prefer hardware-backed
532 * drivers to software ones.
533 *
534 * XXX We need more smarts here (in real life too, but that's
535 * XXX another story altogether).
536 */
537 static struct cryptocap *
538 crypto_select_driver_lock(struct cryptoini *cri, int hard)
539 {
540 u_int32_t hid;
541 int accept;
542 struct cryptocap *cap, *best;
543
544 best = NULL;
545 /*
546 * hard == 0 can use both hardware and software drivers.
547 * We use hardware drivers prior to software drivers, so search
548 * hardware drivers at first time.
549 */
550 if (hard >= 0)
551 accept = CRYPTO_ACCEPT_HARDWARE;
552 else
553 accept = CRYPTO_ACCEPT_SOFTWARE;
554 again:
555 for (hid = 0; hid < crypto_drivers_num; hid++) {
556 cap = crypto_checkdriver(hid);
557 if (cap == NULL)
558 continue;
559
560 crypto_driver_lock(cap);
561
562 /*
563 * If it's not initialized or has remaining sessions
564 * referencing it, skip.
565 */
566 if (cap->cc_newsession == NULL ||
567 (cap->cc_flags & CRYPTOCAP_F_CLEANUP)) {
568 crypto_driver_unlock(cap);
569 continue;
570 }
571
572 /* Hardware required -- ignore software drivers. */
573 if ((accept & CRYPTO_ACCEPT_SOFTWARE) == 0
574 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE)) {
575 crypto_driver_unlock(cap);
576 continue;
577 }
578 /* Software required -- ignore hardware drivers. */
579 if ((accept & CRYPTO_ACCEPT_HARDWARE) == 0
580 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) {
581 crypto_driver_unlock(cap);
582 continue;
583 }
584
585 /* See if all the algorithms are supported. */
586 if (crypto_driver_suitable(cap, cri)) {
587 if (best == NULL) {
588 /* keep holding crypto_driver_lock(cap) */
589 best = cap;
590 continue;
591 } else if (cap->cc_sessions < best->cc_sessions) {
592 crypto_driver_unlock(best);
593 /* keep holding crypto_driver_lock(cap) */
594 best = cap;
595 continue;
596 }
597 }
598
599 crypto_driver_unlock(cap);
600 }
601 if (best == NULL && hard == 0
602 && (accept & CRYPTO_ACCEPT_SOFTWARE) == 0) {
603 accept = CRYPTO_ACCEPT_SOFTWARE;
604 goto again;
605 }
606
607 return best;
608 }
609
610 /*
611 * Create a new session.
612 */
613 int
614 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
615 {
616 struct cryptocap *cap;
617 int err = EINVAL;
618
619 mutex_enter(&crypto_drv_mtx);
620
621 cap = crypto_select_driver_lock(cri, hard);
622 if (cap != NULL) {
623 u_int32_t hid, lid;
624
625 hid = cap - crypto_drivers;
626 /*
627 * Can't do everything in one session.
628 *
629 * XXX Fix this. We need to inject a "virtual" session layer right
630 * XXX about here.
631 */
632
633 /* Call the driver initialization routine. */
634 lid = hid; /* Pass the driver ID. */
635 crypto_driver_unlock(cap);
636 err = cap->cc_newsession(cap->cc_arg, &lid, cri);
637 crypto_driver_lock(cap);
638 if (err == 0) {
639 (*sid) = hid;
640 (*sid) <<= 32;
641 (*sid) |= (lid & 0xffffffff);
642 (cap->cc_sessions)++;
643 } else {
644 DPRINTF("crypto_drivers[%d].cc_newsession() failed. error=%d\n",
645 hid, err);
646 }
647 crypto_driver_unlock(cap);
648 }
649
650 mutex_exit(&crypto_drv_mtx);
651
652 return err;
653 }
654
655 /*
656 * Delete an existing session (or a reserved session on an unregistered
657 * driver).
658 */
659 int
660 crypto_freesession(u_int64_t sid)
661 {
662 struct cryptocap *cap;
663 int err = 0;
664
665 /* Determine two IDs. */
666 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(sid));
667 if (cap == NULL)
668 return ENOENT;
669
670 if (cap->cc_sessions)
671 (cap->cc_sessions)--;
672
673 /* Call the driver cleanup routine, if available. */
674 if (cap->cc_freesession)
675 err = cap->cc_freesession(cap->cc_arg, sid);
676 else
677 err = 0;
678
679 /*
680 * If this was the last session of a driver marked as invalid,
681 * make the entry available for reuse.
682 */
683 if ((cap->cc_flags & CRYPTOCAP_F_CLEANUP) && cap->cc_sessions == 0)
684 crypto_driver_clear(cap);
685
686 crypto_driver_unlock(cap);
687 return err;
688 }
689
690 static bool
691 crypto_checkdriver_initialized(const struct cryptocap *cap)
692 {
693
694 return cap->cc_process != NULL ||
695 (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0 ||
696 cap->cc_sessions != 0;
697 }
698
699 /*
700 * Return an unused driver id. Used by drivers prior to registering
701 * support for the algorithms they handle.
702 */
703 int32_t
704 crypto_get_driverid(u_int32_t flags)
705 {
706 struct cryptocap *newdrv;
707 struct cryptocap *cap = NULL;
708 int i;
709
710 (void)crypto_init(); /* XXX oh, this is foul! */
711
712 mutex_enter(&crypto_drv_mtx);
713 for (i = 0; i < crypto_drivers_num; i++) {
714 cap = crypto_checkdriver_uninit(i);
715 if (cap == NULL || crypto_checkdriver_initialized(cap))
716 continue;
717 break;
718 }
719
720 /* Out of entries, allocate some more. */
721 if (cap == NULL) {
722 /* Be careful about wrap-around. */
723 if (2 * crypto_drivers_num <= crypto_drivers_num) {
724 mutex_exit(&crypto_drv_mtx);
725 printf("crypto: driver count wraparound!\n");
726 return -1;
727 }
728
729 newdrv = malloc(2 * crypto_drivers_num *
730 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
731 if (newdrv == NULL) {
732 mutex_exit(&crypto_drv_mtx);
733 printf("crypto: no space to expand driver table!\n");
734 return -1;
735 }
736
737 memcpy(newdrv, crypto_drivers,
738 crypto_drivers_num * sizeof(struct cryptocap));
739
740 crypto_drivers_num *= 2;
741
742 free(crypto_drivers, M_CRYPTO_DATA);
743 crypto_drivers = newdrv;
744
745 cap = crypto_checkdriver_uninit(i);
746 KASSERT(cap != NULL);
747 }
748
749 /* NB: state is zero'd on free */
750 cap->cc_sessions = 1; /* Mark */
751 cap->cc_flags = flags;
752 mutex_init(&cap->cc_lock, MUTEX_DEFAULT, IPL_NET);
753
754 if (bootverbose)
755 printf("crypto: assign driver %u, flags %u\n", i, flags);
756
757 mutex_exit(&crypto_drv_mtx);
758
759 return i;
760 }
761
762 static struct cryptocap *
763 crypto_checkdriver_lock(u_int32_t hid)
764 {
765 struct cryptocap *cap;
766
767 KASSERT(crypto_drivers != NULL);
768
769 if (hid >= crypto_drivers_num)
770 return NULL;
771
772 cap = &crypto_drivers[hid];
773 mutex_enter(&cap->cc_lock);
774 return cap;
775 }
776
777 /*
778 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two
779 * situations
780 * - crypto_drivers[] may not be allocated
781 * - crypto_drivers[hid] may not be initialized
782 */
783 static struct cryptocap *
784 crypto_checkdriver_uninit(u_int32_t hid)
785 {
786
787 KASSERT(mutex_owned(&crypto_drv_mtx));
788
789 if (crypto_drivers == NULL)
790 return NULL;
791
792 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
793 }
794
795 /*
796 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two
797 * situations
798 * - crypto_drivers[] may not be allocated
799 * - crypto_drivers[hid] may not be initialized
800 */
801 static struct cryptocap *
802 crypto_checkdriver(u_int32_t hid)
803 {
804
805 KASSERT(mutex_owned(&crypto_drv_mtx));
806
807 if (crypto_drivers == NULL || hid >= crypto_drivers_num)
808 return NULL;
809
810 struct cryptocap *cap = &crypto_drivers[hid];
811 return crypto_checkdriver_initialized(cap) ? cap : NULL;
812 }
813
814 static inline void
815 crypto_driver_lock(struct cryptocap *cap)
816 {
817
818 KASSERT(cap != NULL);
819
820 mutex_enter(&cap->cc_lock);
821 }
822
823 static inline void
824 crypto_driver_unlock(struct cryptocap *cap)
825 {
826
827 KASSERT(cap != NULL);
828
829 mutex_exit(&cap->cc_lock);
830 }
831
832 static void
833 crypto_driver_clear(struct cryptocap *cap)
834 {
835
836 if (cap == NULL)
837 return;
838
839 KASSERT(mutex_owned(&cap->cc_lock));
840
841 cap->cc_sessions = 0;
842 memset(&cap->cc_max_op_len, 0, sizeof(cap->cc_max_op_len));
843 memset(&cap->cc_alg, 0, sizeof(cap->cc_alg));
844 memset(&cap->cc_kalg, 0, sizeof(cap->cc_kalg));
845 cap->cc_flags = 0;
846 cap->cc_qblocked = 0;
847 cap->cc_kqblocked = 0;
848
849 cap->cc_arg = NULL;
850 cap->cc_newsession = NULL;
851 cap->cc_process = NULL;
852 cap->cc_freesession = NULL;
853 cap->cc_kprocess = NULL;
854 }
855
856 /*
857 * Register support for a key-related algorithm. This routine
858 * is called once for each algorithm supported a driver.
859 */
860 int
861 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
862 int (*kprocess)(void *, struct cryptkop *, int),
863 void *karg)
864 {
865 struct cryptocap *cap;
866 int err;
867
868 mutex_enter(&crypto_drv_mtx);
869
870 cap = crypto_checkdriver_lock(driverid);
871 if (cap != NULL &&
872 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
873 /*
874 * XXX Do some performance testing to determine placing.
875 * XXX We probably need an auxiliary data structure that
876 * XXX describes relative performances.
877 */
878
879 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
880 if (bootverbose) {
881 printf("crypto: driver %u registers key alg %u "
882 " flags %u\n",
883 driverid,
884 kalg,
885 flags
886 );
887 }
888
889 if (cap->cc_kprocess == NULL) {
890 cap->cc_karg = karg;
891 cap->cc_kprocess = kprocess;
892 }
893 err = 0;
894 } else
895 err = EINVAL;
896
897 mutex_exit(&crypto_drv_mtx);
898 return err;
899 }
900
901 /*
902 * Register support for a non-key-related algorithm. This routine
903 * is called once for each such algorithm supported by a driver.
904 */
905 int
906 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
907 u_int32_t flags,
908 int (*newses)(void *, u_int32_t*, struct cryptoini*),
909 int (*freeses)(void *, u_int64_t),
910 int (*process)(void *, struct cryptop *, int),
911 void *arg)
912 {
913 struct cryptocap *cap;
914 int err;
915
916 cap = crypto_checkdriver_lock(driverid);
917 if (cap == NULL)
918 return EINVAL;
919
920 /* NB: algorithms are in the range [1..max] */
921 if (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) {
922 /*
923 * XXX Do some performance testing to determine placing.
924 * XXX We probably need an auxiliary data structure that
925 * XXX describes relative performances.
926 */
927
928 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
929 cap->cc_max_op_len[alg] = maxoplen;
930 if (bootverbose) {
931 printf("crypto: driver %u registers alg %u "
932 "flags %u maxoplen %u\n",
933 driverid,
934 alg,
935 flags,
936 maxoplen
937 );
938 }
939
940 if (cap->cc_process == NULL) {
941 cap->cc_arg = arg;
942 cap->cc_newsession = newses;
943 cap->cc_process = process;
944 cap->cc_freesession = freeses;
945 cap->cc_sessions = 0; /* Unmark */
946 }
947 err = 0;
948 } else
949 err = EINVAL;
950
951 crypto_driver_unlock(cap);
952
953 return err;
954 }
955
956 static int
957 crypto_unregister_locked(struct cryptocap *cap, int alg, bool all)
958 {
959 int i;
960 u_int32_t ses;
961 bool lastalg = true;
962
963 KASSERT(cap != NULL);
964 KASSERT(mutex_owned(&cap->cc_lock));
965
966 if (alg < CRYPTO_ALGORITHM_MIN || CRYPTO_ALGORITHM_MAX < alg)
967 return EINVAL;
968
969 if (!all && cap->cc_alg[alg] == 0)
970 return EINVAL;
971
972 cap->cc_alg[alg] = 0;
973 cap->cc_max_op_len[alg] = 0;
974
975 if (all) {
976 if (alg != CRYPTO_ALGORITHM_MAX)
977 lastalg = false;
978 } else {
979 /* Was this the last algorithm ? */
980 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++)
981 if (cap->cc_alg[i] != 0) {
982 lastalg = false;
983 break;
984 }
985 }
986 if (lastalg) {
987 ses = cap->cc_sessions;
988 crypto_driver_clear(cap);
989 if (ses != 0) {
990 /*
991 * If there are pending sessions, just mark as invalid.
992 */
993 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
994 cap->cc_sessions = ses;
995 }
996 }
997
998 return 0;
999 }
1000
1001 /*
1002 * Unregister a crypto driver. If there are pending sessions using it,
1003 * leave enough information around so that subsequent calls using those
1004 * sessions will correctly detect the driver has been unregistered and
1005 * reroute requests.
1006 */
1007 int
1008 crypto_unregister(u_int32_t driverid, int alg)
1009 {
1010 int err;
1011 struct cryptocap *cap;
1012
1013 cap = crypto_checkdriver_lock(driverid);
1014 err = crypto_unregister_locked(cap, alg, false);
1015 crypto_driver_unlock(cap);
1016
1017 return err;
1018 }
1019
1020 /*
1021 * Unregister all algorithms associated with a crypto driver.
1022 * If there are pending sessions using it, leave enough information
1023 * around so that subsequent calls using those sessions will
1024 * correctly detect the driver has been unregistered and reroute
1025 * requests.
1026 */
1027 int
1028 crypto_unregister_all(u_int32_t driverid)
1029 {
1030 int err, i;
1031 struct cryptocap *cap;
1032
1033 cap = crypto_checkdriver_lock(driverid);
1034 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
1035 err = crypto_unregister_locked(cap, i, true);
1036 if (err)
1037 break;
1038 }
1039 crypto_driver_unlock(cap);
1040
1041 return err;
1042 }
1043
1044 /*
1045 * Clear blockage on a driver. The what parameter indicates whether
1046 * the driver is now ready for cryptop's and/or cryptokop's.
1047 */
1048 int
1049 crypto_unblock(u_int32_t driverid, int what)
1050 {
1051 struct cryptocap *cap;
1052 int needwakeup = 0;
1053
1054 cap = crypto_checkdriver_lock(driverid);
1055 if (cap == NULL)
1056 return EINVAL;
1057
1058 if (what & CRYPTO_SYMQ) {
1059 needwakeup |= cap->cc_qblocked;
1060 cap->cc_qblocked = 0;
1061 }
1062 if (what & CRYPTO_ASYMQ) {
1063 needwakeup |= cap->cc_kqblocked;
1064 cap->cc_kqblocked = 0;
1065 }
1066 crypto_driver_unlock(cap);
1067 if (needwakeup)
1068 setsoftcrypto(softintr_cookie);
1069
1070 return 0;
1071 }
1072
1073 /*
1074 * Dispatch a crypto request to a driver or queue
1075 * it, to be processed by the kernel thread.
1076 */
1077 int
1078 crypto_dispatch(struct cryptop *crp)
1079 {
1080 int result;
1081 struct cryptocap *cap;
1082
1083 KASSERT(crp != NULL);
1084
1085 DPRINTF("crp %p, alg %d\n", crp, crp->crp_desc->crd_alg);
1086
1087 cryptostats.cs_ops++;
1088
1089 #ifdef CRYPTO_TIMING
1090 if (crypto_timing)
1091 nanouptime(&crp->crp_tstamp);
1092 #endif
1093
1094 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
1095 int wasempty;
1096 /*
1097 * Caller marked the request as ``ok to delay'';
1098 * queue it for the swi thread. This is desirable
1099 * when the operation is low priority and/or suitable
1100 * for batching.
1101 *
1102 * don't care list order in batch job.
1103 */
1104 mutex_enter(&crypto_q_mtx);
1105 wasempty = TAILQ_EMPTY(&crp_q);
1106 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1107 mutex_exit(&crypto_q_mtx);
1108 if (wasempty)
1109 setsoftcrypto(softintr_cookie);
1110
1111 return 0;
1112 }
1113
1114 mutex_enter(&crypto_q_mtx);
1115 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid));
1116 /*
1117 * TODO:
1118 * If we can ensure the driver has been valid until the driver is
1119 * done crypto_unregister(), this migrate operation is not required.
1120 */
1121 if (cap == NULL) {
1122 /*
1123 * The driver must be detached, so this request will migrate
1124 * to other drivers in cryptointr() later.
1125 */
1126 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1127 result = 0;
1128 goto out;
1129 }
1130
1131 if (cap->cc_qblocked != 0) {
1132 crypto_driver_unlock(cap);
1133 /*
1134 * The driver is blocked, just queue the op until
1135 * it unblocks and the swi thread gets kicked.
1136 */
1137 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1138 result = 0;
1139 goto out;
1140 }
1141
1142 /*
1143 * Caller marked the request to be processed
1144 * immediately; dispatch it directly to the
1145 * driver unless the driver is currently blocked.
1146 */
1147 crypto_driver_unlock(cap);
1148 result = crypto_invoke(crp, 0);
1149 if (result == ERESTART) {
1150 /*
1151 * The driver ran out of resources, mark the
1152 * driver ``blocked'' for cryptop's and put
1153 * the op on the queue.
1154 */
1155 crypto_driver_lock(cap);
1156 cap->cc_qblocked = 1;
1157 crypto_driver_unlock(cap);
1158 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
1159 cryptostats.cs_blocks++;
1160
1161 /*
1162 * The crp is enqueued to crp_q, that is,
1163 * no error occurs. So, this function should
1164 * not return error.
1165 */
1166 result = 0;
1167 }
1168
1169 out:
1170 mutex_exit(&crypto_q_mtx);
1171 return result;
1172 }
1173
1174 /*
1175 * Add an asymetric crypto request to a queue,
1176 * to be processed by the kernel thread.
1177 */
1178 int
1179 crypto_kdispatch(struct cryptkop *krp)
1180 {
1181 struct cryptocap *cap;
1182 int result;
1183
1184 KASSERT(krp != NULL);
1185
1186 cryptostats.cs_kops++;
1187
1188 mutex_enter(&crypto_q_mtx);
1189 cap = crypto_checkdriver_lock(krp->krp_hid);
1190 /*
1191 * TODO:
1192 * If we can ensure the driver has been valid until the driver is
1193 * done crypto_unregister(), this migrate operation is not required.
1194 */
1195 if (cap == NULL) {
1196 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1197 result = 0;
1198 goto out;
1199 }
1200
1201 if (cap->cc_kqblocked != 0) {
1202 crypto_driver_unlock(cap);
1203 /*
1204 * The driver is blocked, just queue the op until
1205 * it unblocks and the swi thread gets kicked.
1206 */
1207 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1208 result = 0;
1209 goto out;
1210 }
1211
1212 crypto_driver_unlock(cap);
1213 result = crypto_kinvoke(krp, 0);
1214 if (result == ERESTART) {
1215 /*
1216 * The driver ran out of resources, mark the
1217 * driver ``blocked'' for cryptop's and put
1218 * the op on the queue.
1219 */
1220 crypto_driver_lock(cap);
1221 cap->cc_kqblocked = 1;
1222 crypto_driver_unlock(cap);
1223 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1224 cryptostats.cs_kblocks++;
1225
1226 /*
1227 * The krp is enqueued to crp_kq, that is,
1228 * no error occurs. So, this function should
1229 * not return error.
1230 */
1231 result = 0;
1232 }
1233
1234 out:
1235 mutex_exit(&crypto_q_mtx);
1236 return result;
1237 }
1238
1239 /*
1240 * Dispatch an assymetric crypto request to the appropriate crypto devices.
1241 */
1242 static int
1243 crypto_kinvoke(struct cryptkop *krp, int hint)
1244 {
1245 struct cryptocap *cap = NULL;
1246 u_int32_t hid;
1247 int error;
1248
1249 KASSERT(krp != NULL);
1250
1251 /* Sanity checks. */
1252 if (krp->krp_callback == NULL) {
1253 cv_destroy(&krp->krp_cv);
1254 crypto_kfreereq(krp);
1255 return EINVAL;
1256 }
1257
1258 mutex_enter(&crypto_drv_mtx);
1259 for (hid = 0; hid < crypto_drivers_num; hid++) {
1260 cap = crypto_checkdriver(hid);
1261 if (cap == NULL)
1262 continue;
1263 crypto_driver_lock(cap);
1264 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1265 crypto_devallowsoft == 0) {
1266 crypto_driver_unlock(cap);
1267 continue;
1268 }
1269 if (cap->cc_kprocess == NULL) {
1270 crypto_driver_unlock(cap);
1271 continue;
1272 }
1273 if ((cap->cc_kalg[krp->krp_op] &
1274 CRYPTO_ALG_FLAG_SUPPORTED) == 0) {
1275 crypto_driver_unlock(cap);
1276 continue;
1277 }
1278 break;
1279 }
1280 mutex_exit(&crypto_drv_mtx);
1281 if (cap != NULL) {
1282 int (*process)(void *, struct cryptkop *, int);
1283 void *arg;
1284
1285 process = cap->cc_kprocess;
1286 arg = cap->cc_karg;
1287 krp->krp_hid = hid;
1288 krp->reqcpu = curcpu();
1289 crypto_driver_unlock(cap);
1290 error = (*process)(arg, krp, hint);
1291 } else {
1292 error = ENODEV;
1293 }
1294
1295 if (error) {
1296 krp->krp_status = error;
1297 crypto_kdone(krp);
1298 }
1299 return 0;
1300 }
1301
1302 #ifdef CRYPTO_TIMING
1303 static void
1304 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
1305 {
1306 struct timespec now, t;
1307
1308 nanouptime(&now);
1309 t.tv_sec = now.tv_sec - tv->tv_sec;
1310 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
1311 if (t.tv_nsec < 0) {
1312 t.tv_sec--;
1313 t.tv_nsec += 1000000000;
1314 }
1315 timespecadd(&ts->acc, &t, &t);
1316 if (timespeccmp(&t, &ts->min, <))
1317 ts->min = t;
1318 if (timespeccmp(&t, &ts->max, >))
1319 ts->max = t;
1320 ts->count++;
1321
1322 *tv = now;
1323 }
1324 #endif
1325
1326 /*
1327 * Dispatch a crypto request to the appropriate crypto devices.
1328 */
1329 static int
1330 crypto_invoke(struct cryptop *crp, int hint)
1331 {
1332 struct cryptocap *cap;
1333
1334 KASSERT(crp != NULL);
1335
1336 #ifdef CRYPTO_TIMING
1337 if (crypto_timing)
1338 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1339 #endif
1340 /* Sanity checks. */
1341 if (crp->crp_callback == NULL) {
1342 return EINVAL;
1343 }
1344 if (crp->crp_desc == NULL) {
1345 crp->crp_etype = EINVAL;
1346 crypto_done(crp);
1347 return 0;
1348 }
1349
1350 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid));
1351 if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
1352 int (*process)(void *, struct cryptop *, int);
1353 void *arg;
1354
1355 process = cap->cc_process;
1356 arg = cap->cc_arg;
1357 crp->reqcpu = curcpu();
1358
1359 /*
1360 * Invoke the driver to process the request.
1361 */
1362 DPRINTF("calling process for %p\n", crp);
1363 crypto_driver_unlock(cap);
1364 return (*process)(arg, crp, hint);
1365 } else {
1366 struct cryptodesc *crd;
1367 u_int64_t nid = 0;
1368
1369 if (cap != NULL)
1370 crypto_driver_unlock(cap);
1371
1372 /*
1373 * Driver has unregistered; migrate the session and return
1374 * an error to the caller so they'll resubmit the op.
1375 */
1376 crypto_freesession(crp->crp_sid);
1377
1378 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
1379 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
1380
1381 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
1382 crp->crp_sid = nid;
1383
1384 crp->crp_etype = EAGAIN;
1385
1386 crypto_done(crp);
1387 return 0;
1388 }
1389 }
1390
1391 /*
1392 * Release a set of crypto descriptors.
1393 */
1394 void
1395 crypto_freereq(struct cryptop *crp)
1396 {
1397 struct cryptodesc *crd;
1398
1399 if (crp == NULL)
1400 return;
1401 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1402
1403 /* sanity check */
1404 if (crp->crp_flags & CRYPTO_F_ONRETQ) {
1405 panic("crypto_freereq() freeing crp on RETQ\n");
1406 }
1407
1408 while ((crd = crp->crp_desc) != NULL) {
1409 crp->crp_desc = crd->crd_next;
1410 pool_put(&cryptodesc_pool, crd);
1411 }
1412 pool_put(&cryptop_pool, crp);
1413 }
1414
1415 /*
1416 * Acquire a set of crypto descriptors.
1417 */
1418 struct cryptop *
1419 crypto_getreq(int num)
1420 {
1421 struct cryptodesc *crd;
1422 struct cryptop *crp;
1423
1424 /*
1425 * When crp_ret_q is full, we restrict here to avoid crp_ret_q overflow
1426 * by error callback.
1427 */
1428 if (CRYPTO_Q_IS_FULL(crp_ret_q)) {
1429 CRYPTO_Q_INC_DROPS(crp_ret_q);
1430 return NULL;
1431 }
1432
1433 crp = pool_get(&cryptop_pool, 0);
1434 if (crp == NULL) {
1435 return NULL;
1436 }
1437 memset(crp, 0, sizeof(struct cryptop));
1438
1439 while (num--) {
1440 crd = pool_get(&cryptodesc_pool, 0);
1441 if (crd == NULL) {
1442 crypto_freereq(crp);
1443 return NULL;
1444 }
1445
1446 memset(crd, 0, sizeof(struct cryptodesc));
1447 crd->crd_next = crp->crp_desc;
1448 crp->crp_desc = crd;
1449 }
1450
1451 return crp;
1452 }
1453
1454 /*
1455 * Release a set of asymmetric crypto descriptors.
1456 * Currently, support one descriptor only.
1457 */
1458 void
1459 crypto_kfreereq(struct cryptkop *krp)
1460 {
1461
1462 if (krp == NULL)
1463 return;
1464
1465 DPRINTF("krp %p\n", krp);
1466
1467 /* sanity check */
1468 if (krp->krp_flags & CRYPTO_F_ONRETQ) {
1469 panic("crypto_kfreereq() freeing krp on RETQ\n");
1470 }
1471
1472 pool_put(&cryptkop_pool, krp);
1473 }
1474
1475 /*
1476 * Acquire a set of asymmetric crypto descriptors.
1477 * Currently, support one descriptor only.
1478 */
1479 struct cryptkop *
1480 crypto_kgetreq(int num __unused, int prflags)
1481 {
1482 struct cryptkop *krp;
1483
1484 /*
1485 * When crp_ret_kq is full, we restrict here to avoid crp_ret_kq
1486 * overflow by error callback.
1487 */
1488 if (CRYPTO_Q_IS_FULL(crp_ret_kq)) {
1489 CRYPTO_Q_INC_DROPS(crp_ret_kq);
1490 return NULL;
1491 }
1492
1493 krp = pool_get(&cryptkop_pool, prflags);
1494 if (krp == NULL) {
1495 return NULL;
1496 }
1497 memset(krp, 0, sizeof(struct cryptkop));
1498
1499 return krp;
1500 }
1501
1502 /*
1503 * Invoke the callback on behalf of the driver.
1504 */
1505 void
1506 crypto_done(struct cryptop *crp)
1507 {
1508
1509 KASSERT(crp != NULL);
1510
1511 if (crp->crp_etype != 0)
1512 cryptostats.cs_errs++;
1513 #ifdef CRYPTO_TIMING
1514 if (crypto_timing)
1515 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1516 #endif
1517 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1518
1519 /*
1520 * Normal case; queue the callback for the thread.
1521 *
1522 * The return queue is manipulated by the swi thread
1523 * and, potentially, by crypto device drivers calling
1524 * back to mark operations completed. Thus we need
1525 * to mask both while manipulating the return queue.
1526 */
1527 if (crp->crp_flags & CRYPTO_F_CBIMM) {
1528 /*
1529 * Do the callback directly. This is ok when the
1530 * callback routine does very little (e.g. the
1531 * /dev/crypto callback method just does a wakeup).
1532 */
1533 crp->crp_flags |= CRYPTO_F_DONE;
1534
1535 #ifdef CRYPTO_TIMING
1536 if (crypto_timing) {
1537 /*
1538 * NB: We must copy the timestamp before
1539 * doing the callback as the cryptop is
1540 * likely to be reclaimed.
1541 */
1542 struct timespec t = crp->crp_tstamp;
1543 crypto_tstat(&cryptostats.cs_cb, &t);
1544 crp->crp_callback(crp);
1545 crypto_tstat(&cryptostats.cs_finis, &t);
1546 } else
1547 #endif
1548 crp->crp_callback(crp);
1549 } else {
1550 crp->crp_flags |= CRYPTO_F_DONE;
1551 #if 0
1552 if (crp->crp_flags & CRYPTO_F_USER) {
1553 /*
1554 * TODO:
1555 * If crp->crp_flags & CRYPTO_F_USER and the used
1556 * encryption driver does all the processing in
1557 * the same context, we can skip enqueueing crp_ret_q
1558 * and softint_schedule(crypto_ret_si).
1559 */
1560 DPRINTF("lid[%u]: crp %p CRYPTO_F_USER\n",
1561 CRYPTO_SESID2LID(crp->crp_sid), crp);
1562 } else
1563 #endif
1564 {
1565 int wasempty;
1566
1567 mutex_spin_enter(&crypto_ret_q_mtx);
1568 wasempty = TAILQ_EMPTY(&crp_ret_q);
1569 DPRINTF("lid[%u]: queueing %p\n",
1570 CRYPTO_SESID2LID(crp->crp_sid), crp);
1571 crp->crp_flags |= CRYPTO_F_ONRETQ;
1572 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1573 CRYPTO_Q_INC(crp_ret_q);
1574 if (wasempty && crypto_exit_flag == 0) {
1575 DPRINTF("lid[%u]: waking cryptoret, "
1576 "crp %p hit empty queue\n.",
1577 CRYPTO_SESID2LID(crp->crp_sid), crp);
1578 softint_schedule_cpu(crypto_ret_si, crp->reqcpu);
1579 }
1580 mutex_spin_exit(&crypto_ret_q_mtx);
1581 }
1582 }
1583 }
1584
1585 /*
1586 * Invoke the callback on behalf of the driver.
1587 */
1588 void
1589 crypto_kdone(struct cryptkop *krp)
1590 {
1591
1592 KASSERT(krp != NULL);
1593
1594 if (krp->krp_status != 0)
1595 cryptostats.cs_kerrs++;
1596
1597 krp->krp_flags |= CRYPTO_F_DONE;
1598
1599 /*
1600 * The return queue is manipulated by the swi thread
1601 * and, potentially, by crypto device drivers calling
1602 * back to mark operations completed. Thus we need
1603 * to mask both while manipulating the return queue.
1604 */
1605 if (krp->krp_flags & CRYPTO_F_CBIMM) {
1606 krp->krp_callback(krp);
1607 } else {
1608 int wasempty;
1609
1610 mutex_spin_enter(&crypto_ret_q_mtx);
1611 wasempty = TAILQ_EMPTY(&crp_ret_kq);
1612 krp->krp_flags |= CRYPTO_F_ONRETQ;
1613 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1614 CRYPTO_Q_INC(crp_ret_kq);
1615 if (wasempty && crypto_exit_flag == 0)
1616 softint_schedule_cpu(crypto_ret_si, krp->reqcpu);
1617 mutex_spin_exit(&crypto_ret_q_mtx);
1618 }
1619 }
1620
1621 int
1622 crypto_getfeat(int *featp)
1623 {
1624
1625 if (crypto_userasymcrypto == 0) {
1626 *featp = 0;
1627 return 0;
1628 }
1629
1630 mutex_enter(&crypto_drv_mtx);
1631
1632 int feat = 0;
1633 for (int hid = 0; hid < crypto_drivers_num; hid++) {
1634 struct cryptocap *cap;
1635 cap = crypto_checkdriver(hid);
1636 if (cap == NULL)
1637 continue;
1638
1639 crypto_driver_lock(cap);
1640
1641 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1642 crypto_devallowsoft == 0)
1643 goto unlock;
1644
1645 if (cap->cc_kprocess == NULL)
1646 goto unlock;
1647
1648 for (int kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1649 if ((cap->cc_kalg[kalg] &
1650 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1651 feat |= 1 << kalg;
1652
1653 unlock: crypto_driver_unlock(cap);
1654 }
1655
1656 mutex_exit(&crypto_drv_mtx);
1657 *featp = feat;
1658 return (0);
1659 }
1660
1661 /*
1662 * Software interrupt thread to dispatch crypto requests.
1663 */
1664 static void
1665 cryptointr(void)
1666 {
1667 struct cryptop *crp, *submit, *cnext;
1668 struct cryptkop *krp, *knext;
1669 struct cryptocap *cap;
1670 int result, hint;
1671
1672 cryptostats.cs_intrs++;
1673 mutex_enter(&crypto_q_mtx);
1674 do {
1675 /*
1676 * Find the first element in the queue that can be
1677 * processed and look-ahead to see if multiple ops
1678 * are ready for the same driver.
1679 */
1680 submit = NULL;
1681 hint = 0;
1682 TAILQ_FOREACH_SAFE(crp, &crp_q, crp_next, cnext) {
1683 u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid);
1684 cap = crypto_checkdriver_lock(hid);
1685 if (cap == NULL || cap->cc_process == NULL) {
1686 if (cap != NULL)
1687 crypto_driver_unlock(cap);
1688 /* Op needs to be migrated, process it. */
1689 submit = crp;
1690 break;
1691 }
1692
1693 /*
1694 * skip blocked crp regardless of CRYPTO_F_BATCH
1695 */
1696 if (cap->cc_qblocked != 0) {
1697 crypto_driver_unlock(cap);
1698 continue;
1699 }
1700 crypto_driver_unlock(cap);
1701
1702 /*
1703 * skip batch crp until the end of crp_q
1704 */
1705 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
1706 if (submit == NULL) {
1707 submit = crp;
1708 } else {
1709 if (CRYPTO_SESID2HID(submit->crp_sid)
1710 == hid)
1711 hint = CRYPTO_HINT_MORE;
1712 }
1713
1714 continue;
1715 }
1716
1717 /*
1718 * found first crp which is neither blocked nor batch.
1719 */
1720 submit = crp;
1721 /*
1722 * batch crp can be processed much later, so clear hint.
1723 */
1724 hint = 0;
1725 break;
1726 }
1727 if (submit != NULL) {
1728 TAILQ_REMOVE(&crp_q, submit, crp_next);
1729 result = crypto_invoke(submit, hint);
1730 /* we must take here as the TAILQ op or kinvoke
1731 may need this mutex below. sigh. */
1732 if (result == ERESTART) {
1733 /*
1734 * The driver ran out of resources, mark the
1735 * driver ``blocked'' for cryptop's and put
1736 * the request back in the queue. It would
1737 * best to put the request back where we got
1738 * it but that's hard so for now we put it
1739 * at the front. This should be ok; putting
1740 * it at the end does not work.
1741 */
1742 /* validate sid again */
1743 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(submit->crp_sid));
1744 if (cap == NULL) {
1745 /* migrate again, sigh... */
1746 TAILQ_INSERT_TAIL(&crp_q, submit, crp_next);
1747 } else {
1748 cap->cc_qblocked = 1;
1749 crypto_driver_unlock(cap);
1750 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1751 cryptostats.cs_blocks++;
1752 }
1753 }
1754 }
1755
1756 /* As above, but for key ops */
1757 TAILQ_FOREACH_SAFE(krp, &crp_kq, krp_next, knext) {
1758 cap = crypto_checkdriver_lock(krp->krp_hid);
1759 if (cap == NULL || cap->cc_kprocess == NULL) {
1760 if (cap != NULL)
1761 crypto_driver_unlock(cap);
1762 /* Op needs to be migrated, process it. */
1763 break;
1764 }
1765 if (!cap->cc_kqblocked) {
1766 crypto_driver_unlock(cap);
1767 break;
1768 }
1769 crypto_driver_unlock(cap);
1770 }
1771 if (krp != NULL) {
1772 TAILQ_REMOVE(&crp_kq, krp, krp_next);
1773 result = crypto_kinvoke(krp, 0);
1774 /* the next iteration will want the mutex. :-/ */
1775 if (result == ERESTART) {
1776 /*
1777 * The driver ran out of resources, mark the
1778 * driver ``blocked'' for cryptkop's and put
1779 * the request back in the queue. It would
1780 * best to put the request back where we got
1781 * it but that's hard so for now we put it
1782 * at the front. This should be ok; putting
1783 * it at the end does not work.
1784 */
1785 /* validate sid again */
1786 cap = crypto_checkdriver_lock(krp->krp_hid);
1787 if (cap == NULL) {
1788 /* migrate again, sigh... */
1789 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1790 } else {
1791 cap->cc_kqblocked = 1;
1792 crypto_driver_unlock(cap);
1793 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1794 cryptostats.cs_kblocks++;
1795 }
1796 }
1797 }
1798 } while (submit != NULL || krp != NULL);
1799 mutex_exit(&crypto_q_mtx);
1800 }
1801
1802 /*
1803 * softint handler to do callbacks.
1804 */
1805 static void
1806 cryptoret_softint(void *arg __unused)
1807 {
1808
1809 mutex_spin_enter(&crypto_ret_q_mtx);
1810 for (;;) {
1811 struct cryptop *crp;
1812 struct cryptkop *krp;
1813
1814 crp = TAILQ_FIRST(&crp_ret_q);
1815 if (crp != NULL) {
1816 TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1817 CRYPTO_Q_DEC(crp_ret_q);
1818 crp->crp_flags &= ~CRYPTO_F_ONRETQ;
1819 }
1820 krp = TAILQ_FIRST(&crp_ret_kq);
1821 if (krp != NULL) {
1822 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1823 CRYPTO_Q_DEC(crp_ret_kq);
1824 krp->krp_flags &= ~CRYPTO_F_ONRETQ;
1825 }
1826
1827 /* drop before calling any callbacks. */
1828 if (crp == NULL && krp == NULL)
1829 break;
1830
1831 mutex_spin_exit(&crypto_ret_q_mtx);
1832 if (crp != NULL) {
1833 #ifdef CRYPTO_TIMING
1834 if (crypto_timing) {
1835 /*
1836 * NB: We must copy the timestamp before
1837 * doing the callback as the cryptop is
1838 * likely to be reclaimed.
1839 */
1840 struct timespec t = crp->crp_tstamp;
1841 crypto_tstat(&cryptostats.cs_cb, &t);
1842 crp->crp_callback(crp);
1843 crypto_tstat(&cryptostats.cs_finis, &t);
1844 } else
1845 #endif
1846 {
1847 crp->crp_callback(crp);
1848 }
1849 }
1850 if (krp != NULL)
1851 krp->krp_callback(krp);
1852
1853 mutex_spin_enter(&crypto_ret_q_mtx);
1854 }
1855 mutex_spin_exit(&crypto_ret_q_mtx);
1856 }
1857
1858 /* NetBSD module interface */
1859
1860 MODULE(MODULE_CLASS_MISC, opencrypto, NULL);
1861
1862 static int
1863 opencrypto_modcmd(modcmd_t cmd, void *opaque)
1864 {
1865 int error = 0;
1866
1867 switch (cmd) {
1868 case MODULE_CMD_INIT:
1869 #ifdef _MODULE
1870 error = crypto_init();
1871 #endif
1872 break;
1873 case MODULE_CMD_FINI:
1874 #ifdef _MODULE
1875 error = crypto_destroy(true);
1876 #endif
1877 break;
1878 default:
1879 error = ENOTTY;
1880 }
1881 return error;
1882 }
1883