crypto.c revision 1.78 1 /* $NetBSD: crypto.c,v 1.78 2017/05/31 02:17:49 knakahara Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4
5 /*-
6 * Copyright (c) 2008 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Coyote Point Systems, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
36 *
37 * This code was written by Angelos D. Keromytis in Athens, Greece, in
38 * February 2000. Network Security Technologies Inc. (NSTI) kindly
39 * supported the development of this code.
40 *
41 * Copyright (c) 2000, 2001 Angelos D. Keromytis
42 *
43 * Permission to use, copy, and modify this software with or without fee
44 * is hereby granted, provided that this entire notice is included in
45 * all source code copies of any software which is or includes a copy or
46 * modification of this software.
47 *
48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
52 * PURPOSE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.78 2017/05/31 02:17:49 knakahara Exp $");
57
58 #include <sys/param.h>
59 #include <sys/reboot.h>
60 #include <sys/systm.h>
61 #include <sys/malloc.h>
62 #include <sys/proc.h>
63 #include <sys/pool.h>
64 #include <sys/kthread.h>
65 #include <sys/once.h>
66 #include <sys/sysctl.h>
67 #include <sys/intr.h>
68 #include <sys/errno.h>
69 #include <sys/module.h>
70
71 #if defined(_KERNEL_OPT)
72 #include "opt_ocf.h"
73 #endif
74
75 #include <opencrypto/cryptodev.h>
76 #include <opencrypto/xform.h> /* XXX for M_XDATA */
77
78 static kmutex_t crypto_q_mtx;
79 static kmutex_t crypto_ret_q_mtx;
80 static kcondvar_t cryptoret_cv;
81
82 /* below are kludges for residual code wrtitten to FreeBSD interfaces */
83 #define SWI_CRYPTO 17
84 #define register_swi(lvl, fn) \
85 softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, (void (*)(void *))fn, NULL)
86 #define unregister_swi(lvl, fn) softint_disestablish(softintr_cookie)
87 #define setsoftcrypto(x) \
88 do{ \
89 kpreempt_disable(); \
90 softint_schedule(x); \
91 kpreempt_enable(); \
92 }while(0)
93
94 int crypto_ret_q_check(struct cryptop *);
95
96 /*
97 * Crypto drivers register themselves by allocating a slot in the
98 * crypto_drivers table with crypto_get_driverid() and then registering
99 * each algorithm they support with crypto_register() and crypto_kregister().
100 */
101 static kmutex_t crypto_drv_mtx;
102 /* Don't directly access crypto_drivers[i], use crypto_checkdriver(i). */
103 static struct cryptocap *crypto_drivers;
104 static int crypto_drivers_num;
105 static void *softintr_cookie;
106 static int crypto_exit_flag;
107
108 /*
109 * There are two queues for crypto requests; one for symmetric (e.g.
110 * cipher) operations and one for asymmetric (e.g. MOD) operations.
111 * See below for how synchronization is handled.
112 */
113 static TAILQ_HEAD(,cryptop) crp_q = /* request queues */
114 TAILQ_HEAD_INITIALIZER(crp_q);
115 static TAILQ_HEAD(,cryptkop) crp_kq =
116 TAILQ_HEAD_INITIALIZER(crp_kq);
117
118 /*
119 * There are two queues for processing completed crypto requests; one
120 * for the symmetric and one for the asymmetric ops. We only need one
121 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
122 * for how synchronization is handled.
123 */
124 static TAILQ_HEAD(crprethead, cryptop) crp_ret_q = /* callback queues */
125 TAILQ_HEAD_INITIALIZER(crp_ret_q);
126 static TAILQ_HEAD(krprethead, cryptkop) crp_ret_kq =
127 TAILQ_HEAD_INITIALIZER(crp_ret_kq);
128
129 #define DEFINIT_CRYPTO_Q_LEN(name) \
130 static int crypto_##name##_len = 0
131
132 #define DEFINIT_CRYPTO_Q_DROPS(name) \
133 static int crypto_##name##_drops = 0
134
135 #define DEFINIT_CRYPTO_Q_MAXLEN(name, defval) \
136 static int crypto_##name##_maxlen = defval
137
138 #define CRYPTO_Q_INC(name) \
139 do { \
140 crypto_##name##_len++; \
141 } while(0);
142
143 #define CRYPTO_Q_DEC(name) \
144 do { \
145 crypto_##name##_len--; \
146 } while(0);
147
148 #define CRYPTO_Q_INC_DROPS(name) \
149 do { \
150 crypto_##name##_drops++; \
151 } while(0);
152
153 #define CRYPTO_Q_IS_FULL(name) \
154 (crypto_##name##_maxlen > 0 \
155 && (crypto_##name##_len > crypto_##name##_maxlen))
156
157 /*
158 * current queue length.
159 */
160 DEFINIT_CRYPTO_Q_LEN(crp_ret_q);
161 DEFINIT_CRYPTO_Q_LEN(crp_ret_kq);
162
163 /*
164 * queue dropped count.
165 */
166 DEFINIT_CRYPTO_Q_DROPS(crp_ret_q);
167 DEFINIT_CRYPTO_Q_DROPS(crp_ret_kq);
168
169 #ifndef CRYPTO_RET_Q_MAXLEN
170 #define CRYPTO_RET_Q_MAXLEN 0
171 #endif
172 #ifndef CRYPTO_RET_KQ_MAXLEN
173 #define CRYPTO_RET_KQ_MAXLEN 0
174 #endif
175 /*
176 * queue length limit.
177 * default value is 0. <=0 means unlimited.
178 */
179 DEFINIT_CRYPTO_Q_MAXLEN(crp_ret_q, CRYPTO_RET_Q_MAXLEN);
180 DEFINIT_CRYPTO_Q_MAXLEN(crp_ret_kq, CRYPTO_RET_KQ_MAXLEN);
181
182 /*
183 * TODO:
184 * make percpu
185 */
186 static int
187 sysctl_opencrypto_q_len(SYSCTLFN_ARGS)
188 {
189 int error;
190
191 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
192 if (error || newp == NULL)
193 return error;
194
195 return 0;
196 }
197
198 /*
199 * TODO:
200 * make percpu
201 */
202 static int
203 sysctl_opencrypto_q_drops(SYSCTLFN_ARGS)
204 {
205 int error;
206
207 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
208 if (error || newp == NULL)
209 return error;
210
211 return 0;
212 }
213
214 /*
215 * need to make percpu?
216 */
217 static int
218 sysctl_opencrypto_q_maxlen(SYSCTLFN_ARGS)
219 {
220 int error;
221
222 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
223 if (error || newp == NULL)
224 return error;
225
226 return 0;
227 }
228
229 /*
230 * Crypto op and desciptor data structures are allocated
231 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
232 */
233 struct pool cryptop_pool;
234 struct pool cryptodesc_pool;
235 struct pool cryptkop_pool;
236
237 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
238 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
239 /*
240 * cryptodevallowsoft is (intended to be) sysctl'able, controlling
241 * access to hardware versus software transforms as below:
242 *
243 * crypto_devallowsoft < 0: Force userlevel requests to use software
244 * transforms, always
245 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel
246 * requests for non-accelerated transforms
247 * (handling the latter in software)
248 * crypto_devallowsoft > 0: Allow user requests only for transforms which
249 * are hardware-accelerated.
250 */
251 int crypto_devallowsoft = 1; /* only use hardware crypto */
252
253 static void
254 sysctl_opencrypto_setup(struct sysctllog **clog)
255 {
256 const struct sysctlnode *ocnode;
257 const struct sysctlnode *retqnode, *retkqnode;
258
259 sysctl_createv(clog, 0, NULL, NULL,
260 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
261 CTLTYPE_INT, "usercrypto",
262 SYSCTL_DESCR("Enable/disable user-mode access to "
263 "crypto support"),
264 NULL, 0, &crypto_usercrypto, 0,
265 CTL_KERN, CTL_CREATE, CTL_EOL);
266 sysctl_createv(clog, 0, NULL, NULL,
267 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
268 CTLTYPE_INT, "userasymcrypto",
269 SYSCTL_DESCR("Enable/disable user-mode access to "
270 "asymmetric crypto support"),
271 NULL, 0, &crypto_userasymcrypto, 0,
272 CTL_KERN, CTL_CREATE, CTL_EOL);
273 sysctl_createv(clog, 0, NULL, NULL,
274 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
275 CTLTYPE_INT, "cryptodevallowsoft",
276 SYSCTL_DESCR("Enable/disable use of software "
277 "asymmetric crypto support"),
278 NULL, 0, &crypto_devallowsoft, 0,
279 CTL_KERN, CTL_CREATE, CTL_EOL);
280
281 sysctl_createv(clog, 0, NULL, &ocnode,
282 CTLFLAG_PERMANENT,
283 CTLTYPE_NODE, "opencrypto",
284 SYSCTL_DESCR("opencrypto related entries"),
285 NULL, 0, NULL, 0,
286 CTL_CREATE, CTL_EOL);
287
288 sysctl_createv(clog, 0, &ocnode, &retqnode,
289 CTLFLAG_PERMANENT,
290 CTLTYPE_NODE, "crypto_ret_q",
291 SYSCTL_DESCR("crypto_ret_q related entries"),
292 NULL, 0, NULL, 0,
293 CTL_CREATE, CTL_EOL);
294 sysctl_createv(clog, 0, &retqnode, NULL,
295 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
296 CTLTYPE_INT, "len",
297 SYSCTL_DESCR("Current queue length"),
298 sysctl_opencrypto_q_len, 0,
299 (void *)&crypto_crp_ret_q_len, 0,
300 CTL_CREATE, CTL_EOL);
301 sysctl_createv(clog, 0, &retqnode, NULL,
302 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
303 CTLTYPE_INT, "drops",
304 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
305 sysctl_opencrypto_q_drops, 0,
306 (void *)&crypto_crp_ret_q_drops, 0,
307 CTL_CREATE, CTL_EOL);
308 sysctl_createv(clog, 0, &retqnode, NULL,
309 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
310 CTLTYPE_INT, "maxlen",
311 SYSCTL_DESCR("Maximum allowed queue length"),
312 sysctl_opencrypto_q_maxlen, 0,
313 (void *)&crypto_crp_ret_q_maxlen, 0,
314 CTL_CREATE, CTL_EOL);
315
316 sysctl_createv(clog, 0, &ocnode, &retkqnode,
317 CTLFLAG_PERMANENT,
318 CTLTYPE_NODE, "crypto_ret_kq",
319 SYSCTL_DESCR("crypto_ret_kq related entries"),
320 NULL, 0, NULL, 0,
321 CTL_CREATE, CTL_EOL);
322 sysctl_createv(clog, 0, &retkqnode, NULL,
323 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
324 CTLTYPE_INT, "len",
325 SYSCTL_DESCR("Current queue length"),
326 sysctl_opencrypto_q_len, 0,
327 (void *)&crypto_crp_ret_kq_len, 0,
328 CTL_CREATE, CTL_EOL);
329 sysctl_createv(clog, 0, &retkqnode, NULL,
330 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
331 CTLTYPE_INT, "drops",
332 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
333 sysctl_opencrypto_q_drops, 0,
334 (void *)&crypto_crp_ret_kq_drops, 0,
335 CTL_CREATE, CTL_EOL);
336 sysctl_createv(clog, 0, &retkqnode, NULL,
337 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
338 CTLTYPE_INT, "maxlen",
339 SYSCTL_DESCR("Maximum allowed queue length"),
340 sysctl_opencrypto_q_maxlen, 0,
341 (void *)&crypto_crp_ret_kq_maxlen, 0,
342 CTL_CREATE, CTL_EOL);
343 }
344
345 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
346
347 /*
348 * Synchronization: read carefully, this is non-trivial.
349 *
350 * Crypto requests are submitted via crypto_dispatch. Typically
351 * these come in from network protocols at spl0 (output path) or
352 * spl[,soft]net (input path).
353 *
354 * Requests are typically passed on the driver directly, but they
355 * may also be queued for processing by a software interrupt thread,
356 * cryptointr, that runs at splsoftcrypto. This thread dispatches
357 * the requests to crypto drivers (h/w or s/w) who call crypto_done
358 * when a request is complete. Hardware crypto drivers are assumed
359 * to register their IRQ's as network devices so their interrupt handlers
360 * and subsequent "done callbacks" happen at spl[imp,net].
361 *
362 * Completed crypto ops are queued for a separate kernel thread that
363 * handles the callbacks at spl0. This decoupling insures the crypto
364 * driver interrupt service routine is not delayed while the callback
365 * takes place and that callbacks are delivered after a context switch
366 * (as opposed to a software interrupt that clients must block).
367 *
368 * This scheme is not intended for SMP machines.
369 */
370 static void cryptointr(void); /* swi thread to dispatch ops */
371 static void cryptoret(void); /* kernel thread for callbacks*/
372 static struct lwp *cryptothread;
373 static int crypto_destroy(bool);
374 static int crypto_invoke(struct cryptop *crp, int hint);
375 static int crypto_kinvoke(struct cryptkop *krp, int hint);
376
377 static struct cryptocap *crypto_checkdriver(u_int32_t);
378
379 static struct cryptostats cryptostats;
380 #ifdef CRYPTO_TIMING
381 static int crypto_timing = 0;
382 #endif
383
384 static struct sysctllog *sysctl_opencrypto_clog;
385
386 static int
387 crypto_init0(void)
388 {
389 int error;
390
391 mutex_init(&crypto_drv_mtx, MUTEX_DEFAULT, IPL_NONE);
392 mutex_init(&crypto_q_mtx, MUTEX_DEFAULT, IPL_NET);
393 mutex_init(&crypto_ret_q_mtx, MUTEX_DEFAULT, IPL_NET);
394 cv_init(&cryptoret_cv, "crypto_w");
395 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
396 0, "cryptop", NULL, IPL_NET);
397 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
398 0, "cryptodesc", NULL, IPL_NET);
399 pool_init(&cryptkop_pool, sizeof(struct cryptkop), 0, 0,
400 0, "cryptkop", NULL, IPL_NET);
401
402 crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL *
403 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
404 if (crypto_drivers == NULL) {
405 printf("crypto_init: cannot malloc driver table\n");
406 return ENOMEM;
407 }
408 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
409
410 softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
411 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
412 (void (*)(void *))cryptoret, NULL, &cryptothread, "cryptoret");
413 if (error) {
414 printf("crypto_init: cannot start cryptoret thread; error %d",
415 error);
416 return crypto_destroy(false);
417 }
418
419 sysctl_opencrypto_setup(&sysctl_opencrypto_clog);
420
421 return 0;
422 }
423
424 int
425 crypto_init(void)
426 {
427 static ONCE_DECL(crypto_init_once);
428
429 return RUN_ONCE(&crypto_init_once, crypto_init0);
430 }
431
432 static int
433 crypto_destroy(bool exit_kthread)
434 {
435 int i;
436
437 if (exit_kthread) {
438 struct cryptocap *cap = NULL;
439
440 mutex_spin_enter(&crypto_ret_q_mtx);
441
442 /* if we have any in-progress requests, don't unload */
443 if (!TAILQ_EMPTY(&crp_q) || !TAILQ_EMPTY(&crp_kq)) {
444 mutex_spin_exit(&crypto_ret_q_mtx);
445 return EBUSY;
446 }
447
448 for (i = 0; i < crypto_drivers_num; i++) {
449 cap = crypto_checkdriver(i);
450 if (cap == NULL)
451 continue;
452 if (cap->cc_sessions != 0)
453 break;
454 }
455 if (cap != NULL) {
456 mutex_spin_exit(&crypto_ret_q_mtx);
457 return EBUSY;
458 }
459
460 /* kick the cryptoret thread and wait for it to exit */
461 crypto_exit_flag = 1;
462 cv_signal(&cryptoret_cv);
463
464 while (crypto_exit_flag != 0)
465 cv_wait(&cryptoret_cv, &crypto_ret_q_mtx);
466 mutex_spin_exit(&crypto_ret_q_mtx);
467 }
468
469 if (sysctl_opencrypto_clog != NULL)
470 sysctl_teardown(&sysctl_opencrypto_clog);
471
472 unregister_swi(SWI_CRYPTO, cryptointr);
473
474 mutex_enter(&crypto_drv_mtx);
475 if (crypto_drivers != NULL)
476 free(crypto_drivers, M_CRYPTO_DATA);
477 mutex_exit(&crypto_drv_mtx);
478
479 pool_destroy(&cryptop_pool);
480 pool_destroy(&cryptodesc_pool);
481 pool_destroy(&cryptkop_pool);
482
483 cv_destroy(&cryptoret_cv);
484
485 mutex_destroy(&crypto_ret_q_mtx);
486 mutex_destroy(&crypto_q_mtx);
487 mutex_destroy(&crypto_drv_mtx);
488
489 return 0;
490 }
491
492 /*
493 * Create a new session.
494 */
495 int
496 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
497 {
498 struct cryptoini *cr;
499 struct cryptocap *cap;
500 u_int32_t hid, lid;
501 int err = EINVAL;
502
503 mutex_enter(&crypto_drv_mtx);
504
505 /*
506 * The algorithm we use here is pretty stupid; just use the
507 * first driver that supports all the algorithms we need.
508 *
509 * XXX We need more smarts here (in real life too, but that's
510 * XXX another story altogether).
511 */
512
513 for (hid = 0; hid < crypto_drivers_num; hid++) {
514 cap = crypto_checkdriver(hid);
515 if (cap == NULL)
516 continue;
517
518 /*
519 * If it's not initialized or has remaining sessions
520 * referencing it, skip.
521 */
522 if (cap->cc_newsession == NULL ||
523 (cap->cc_flags & CRYPTOCAP_F_CLEANUP))
524 continue;
525
526 /* Hardware required -- ignore software drivers. */
527 if (hard > 0 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE))
528 continue;
529 /* Software required -- ignore hardware drivers. */
530 if (hard < 0 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
531 continue;
532
533 /* See if all the algorithms are supported. */
534 for (cr = cri; cr; cr = cr->cri_next)
535 if (cap->cc_alg[cr->cri_alg] == 0) {
536 DPRINTF("alg %d not supported\n", cr->cri_alg);
537 break;
538 }
539
540 if (cr == NULL) {
541 /* Ok, all algorithms are supported. */
542
543 /*
544 * Can't do everything in one session.
545 *
546 * XXX Fix this. We need to inject a "virtual" session layer right
547 * XXX about here.
548 */
549
550 /* Call the driver initialization routine. */
551 lid = hid; /* Pass the driver ID. */
552 err = cap->cc_newsession(cap->cc_arg, &lid, cri);
553 if (err == 0) {
554 (*sid) = hid;
555 (*sid) <<= 32;
556 (*sid) |= (lid & 0xffffffff);
557 (cap->cc_sessions)++;
558 } else {
559 DPRINTF("crypto_drivers[%d].cc_newsession() failed. error=%d\n",
560 hid, err);
561 }
562 goto done;
563 /*break;*/
564 }
565 }
566 done:
567 mutex_exit(&crypto_drv_mtx);
568 return err;
569 }
570
571 /*
572 * Delete an existing session (or a reserved session on an unregistered
573 * driver).
574 */
575 int
576 crypto_freesession(u_int64_t sid)
577 {
578 struct cryptocap *cap;
579 int err = 0;
580
581 mutex_enter(&crypto_drv_mtx);
582
583 /* Determine two IDs. */
584 cap = crypto_checkdriver(CRYPTO_SESID2HID(sid));
585 if (cap == NULL) {
586 err = ENOENT;
587 goto done;
588 }
589
590 if (cap->cc_sessions)
591 (cap->cc_sessions)--;
592
593 /* Call the driver cleanup routine, if available. */
594 if (cap->cc_freesession)
595 err = cap->cc_freesession(cap->cc_arg, sid);
596 else
597 err = 0;
598
599 /*
600 * If this was the last session of a driver marked as invalid,
601 * make the entry available for reuse.
602 */
603 if ((cap->cc_flags & CRYPTOCAP_F_CLEANUP) && cap->cc_sessions == 0)
604 memset(cap, 0, sizeof(struct cryptocap));
605
606 done:
607 mutex_exit(&crypto_drv_mtx);
608 return err;
609 }
610
611 /*
612 * Return an unused driver id. Used by drivers prior to registering
613 * support for the algorithms they handle.
614 */
615 int32_t
616 crypto_get_driverid(u_int32_t flags)
617 {
618 struct cryptocap *newdrv;
619 struct cryptocap *cap = NULL;
620 int i;
621
622 (void)crypto_init(); /* XXX oh, this is foul! */
623
624 mutex_enter(&crypto_drv_mtx);
625 for (i = 0; i < crypto_drivers_num; i++) {
626 cap = crypto_checkdriver(i);
627 if (cap == NULL)
628 continue;
629 if (cap->cc_process == NULL &&
630 (cap->cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
631 cap->cc_sessions == 0)
632 break;
633 }
634
635 /* Out of entries, allocate some more. */
636 if (cap == NULL) {
637 /* Be careful about wrap-around. */
638 if (2 * crypto_drivers_num <= crypto_drivers_num) {
639 mutex_exit(&crypto_drv_mtx);
640 printf("crypto: driver count wraparound!\n");
641 return -1;
642 }
643
644 newdrv = malloc(2 * crypto_drivers_num *
645 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
646 if (newdrv == NULL) {
647 mutex_exit(&crypto_drv_mtx);
648 printf("crypto: no space to expand driver table!\n");
649 return -1;
650 }
651
652 memcpy(newdrv, crypto_drivers,
653 crypto_drivers_num * sizeof(struct cryptocap));
654
655 crypto_drivers_num *= 2;
656
657 free(crypto_drivers, M_CRYPTO_DATA);
658 crypto_drivers = newdrv;
659
660 cap = crypto_checkdriver(i);
661 KASSERT(cap != NULL);
662 }
663
664 /* NB: state is zero'd on free */
665 cap->cc_sessions = 1; /* Mark */
666 cap->cc_flags = flags;
667
668 if (bootverbose)
669 printf("crypto: assign driver %u, flags %u\n", i, flags);
670
671 mutex_exit(&crypto_drv_mtx);
672
673 return i;
674 }
675
676 static struct cryptocap *
677 crypto_checkdriver(u_int32_t hid)
678 {
679 if (crypto_drivers == NULL)
680 return NULL;
681 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
682 }
683
684 /*
685 * Register support for a key-related algorithm. This routine
686 * is called once for each algorithm supported a driver.
687 */
688 int
689 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
690 int (*kprocess)(void *, struct cryptkop *, int),
691 void *karg)
692 {
693 struct cryptocap *cap;
694 int err;
695
696 mutex_enter(&crypto_drv_mtx);
697
698 cap = crypto_checkdriver(driverid);
699 if (cap != NULL &&
700 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
701 /*
702 * XXX Do some performance testing to determine placing.
703 * XXX We probably need an auxiliary data structure that
704 * XXX describes relative performances.
705 */
706
707 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
708 if (bootverbose) {
709 printf("crypto: driver %u registers key alg %u "
710 " flags %u\n",
711 driverid,
712 kalg,
713 flags
714 );
715 }
716
717 if (cap->cc_kprocess == NULL) {
718 cap->cc_karg = karg;
719 cap->cc_kprocess = kprocess;
720 }
721 err = 0;
722 } else
723 err = EINVAL;
724
725 mutex_exit(&crypto_drv_mtx);
726 return err;
727 }
728
729 /*
730 * Register support for a non-key-related algorithm. This routine
731 * is called once for each such algorithm supported by a driver.
732 */
733 int
734 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
735 u_int32_t flags,
736 int (*newses)(void *, u_int32_t*, struct cryptoini*),
737 int (*freeses)(void *, u_int64_t),
738 int (*process)(void *, struct cryptop *, int),
739 void *arg)
740 {
741 struct cryptocap *cap;
742 int err;
743
744 mutex_enter(&crypto_drv_mtx);
745
746 cap = crypto_checkdriver(driverid);
747 /* NB: algorithms are in the range [1..max] */
748 if (cap != NULL &&
749 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
750 /*
751 * XXX Do some performance testing to determine placing.
752 * XXX We probably need an auxiliary data structure that
753 * XXX describes relative performances.
754 */
755
756 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
757 cap->cc_max_op_len[alg] = maxoplen;
758 if (bootverbose) {
759 printf("crypto: driver %u registers alg %u "
760 "flags %u maxoplen %u\n",
761 driverid,
762 alg,
763 flags,
764 maxoplen
765 );
766 }
767
768 if (cap->cc_process == NULL) {
769 cap->cc_arg = arg;
770 cap->cc_newsession = newses;
771 cap->cc_process = process;
772 cap->cc_freesession = freeses;
773 cap->cc_sessions = 0; /* Unmark */
774 }
775 err = 0;
776 } else
777 err = EINVAL;
778
779 mutex_exit(&crypto_drv_mtx);
780 return err;
781 }
782
783 static int
784 crypto_unregister_locked(u_int32_t driverid, int alg, bool all)
785 {
786 int i;
787 u_int32_t ses;
788 struct cryptocap *cap;
789 bool lastalg = true;
790
791 KASSERT(mutex_owned(&crypto_drv_mtx));
792
793 if (alg < CRYPTO_ALGORITHM_MIN || CRYPTO_ALGORITHM_MAX < alg)
794 return EINVAL;
795
796 cap = crypto_checkdriver(driverid);
797 if (cap == NULL || (!all && cap->cc_alg[alg] == 0))
798 return EINVAL;
799
800 cap->cc_alg[alg] = 0;
801 cap->cc_max_op_len[alg] = 0;
802
803 if (all) {
804 if (alg != CRYPTO_ALGORITHM_MAX)
805 lastalg = false;
806 } else {
807 /* Was this the last algorithm ? */
808 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++)
809 if (cap->cc_alg[i] != 0) {
810 lastalg = false;
811 break;
812 }
813 }
814 if (lastalg) {
815 ses = cap->cc_sessions;
816 memset(cap, 0, sizeof(struct cryptocap));
817 if (ses != 0) {
818 /*
819 * If there are pending sessions, just mark as invalid.
820 */
821 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
822 cap->cc_sessions = ses;
823 }
824 }
825
826 return 0;
827 }
828
829 /*
830 * Unregister a crypto driver. If there are pending sessions using it,
831 * leave enough information around so that subsequent calls using those
832 * sessions will correctly detect the driver has been unregistered and
833 * reroute requests.
834 */
835 int
836 crypto_unregister(u_int32_t driverid, int alg)
837 {
838 int err;
839
840 mutex_enter(&crypto_drv_mtx);
841 err = crypto_unregister_locked(driverid, alg, false);
842 mutex_exit(&crypto_drv_mtx);
843
844 return err;
845 }
846
847 /*
848 * Unregister all algorithms associated with a crypto driver.
849 * If there are pending sessions using it, leave enough information
850 * around so that subsequent calls using those sessions will
851 * correctly detect the driver has been unregistered and reroute
852 * requests.
853 */
854 int
855 crypto_unregister_all(u_int32_t driverid)
856 {
857 int err, i;
858
859 mutex_enter(&crypto_drv_mtx);
860 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
861 err = crypto_unregister_locked(driverid, i, true);
862 if (err)
863 break;
864 }
865 mutex_exit(&crypto_drv_mtx);
866
867 return err;
868 }
869
870 /*
871 * Clear blockage on a driver. The what parameter indicates whether
872 * the driver is now ready for cryptop's and/or cryptokop's.
873 */
874 int
875 crypto_unblock(u_int32_t driverid, int what)
876 {
877 struct cryptocap *cap;
878 int needwakeup = 0;
879
880 mutex_spin_enter(&crypto_q_mtx);
881 cap = crypto_checkdriver(driverid);
882 if (cap == NULL) {
883 mutex_spin_exit(&crypto_q_mtx);
884 return EINVAL;
885 }
886
887 if (what & CRYPTO_SYMQ) {
888 needwakeup |= cap->cc_qblocked;
889 cap->cc_qblocked = 0;
890 }
891 if (what & CRYPTO_ASYMQ) {
892 needwakeup |= cap->cc_kqblocked;
893 cap->cc_kqblocked = 0;
894 }
895 mutex_spin_exit(&crypto_q_mtx);
896 if (needwakeup)
897 setsoftcrypto(softintr_cookie);
898
899 return 0;
900 }
901
902 /*
903 * Dispatch a crypto request to a driver or queue
904 * it, to be processed by the kernel thread.
905 */
906 int
907 crypto_dispatch(struct cryptop *crp)
908 {
909 int result;
910 struct cryptocap *cap;
911
912 KASSERT(crp != NULL);
913
914 DPRINTF("crp %p, alg %d\n", crp, crp->crp_desc->crd_alg);
915
916 cryptostats.cs_ops++;
917
918 #ifdef CRYPTO_TIMING
919 if (crypto_timing)
920 nanouptime(&crp->crp_tstamp);
921 #endif
922
923 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
924 int wasempty = TAILQ_EMPTY(&crp_q);
925 /*
926 * Caller marked the request as ``ok to delay'';
927 * queue it for the swi thread. This is desirable
928 * when the operation is low priority and/or suitable
929 * for batching.
930 */
931 mutex_spin_enter(&crypto_q_mtx);
932 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
933 mutex_spin_exit(&crypto_q_mtx);
934 if (wasempty)
935 setsoftcrypto(softintr_cookie);
936
937 return 0;
938 }
939
940 mutex_spin_enter(&crypto_q_mtx);
941
942 cap = crypto_checkdriver(CRYPTO_SESID2HID(crp->crp_sid));
943 /*
944 * TODO:
945 * If we can ensure the driver has been valid until the driver is
946 * done crypto_unregister(), this migrate operation is not required.
947 */
948 if (cap == NULL) {
949 /*
950 * The driver must be detached, so this request will migrate
951 * to other drivers in cryptointr() later.
952 */
953 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
954 mutex_spin_exit(&crypto_q_mtx);
955
956 return 0;
957 }
958
959 /*
960 * TODO:
961 * cap->cc_qblocked should be protected by a spin lock other than
962 * crypto_q_mtx.
963 */
964 if (cap->cc_qblocked != 0) {
965 /*
966 * The driver is blocked, just queue the op until
967 * it unblocks and the swi thread gets kicked.
968 */
969 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
970 mutex_spin_exit(&crypto_q_mtx);
971
972 return 0;
973 }
974
975 /*
976 * Caller marked the request to be processed
977 * immediately; dispatch it directly to the
978 * driver unless the driver is currently blocked.
979 */
980 mutex_spin_exit(&crypto_q_mtx);
981 result = crypto_invoke(crp, 0);
982 if (result == ERESTART) {
983 /*
984 * The driver ran out of resources, mark the
985 * driver ``blocked'' for cryptop's and put
986 * the op on the queue.
987 */
988 mutex_spin_enter(&crypto_q_mtx);
989 cap->cc_qblocked = 1;
990 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
991 cryptostats.cs_blocks++;
992 mutex_spin_exit(&crypto_q_mtx);
993
994 /*
995 * The crp is enqueued to crp_q, that is,
996 * no error occurs. So, this function should
997 * not return error.
998 */
999 result = 0;
1000 }
1001
1002 return result;
1003 }
1004
1005 /*
1006 * Add an asymetric crypto request to a queue,
1007 * to be processed by the kernel thread.
1008 */
1009 int
1010 crypto_kdispatch(struct cryptkop *krp)
1011 {
1012 struct cryptocap *cap;
1013 int result;
1014
1015 KASSERT(krp != NULL);
1016
1017 mutex_spin_enter(&crypto_q_mtx);
1018 cryptostats.cs_kops++;
1019
1020 cap = crypto_checkdriver(krp->krp_hid);
1021 /*
1022 * TODO:
1023 * If we can ensure the driver has been valid until the driver is
1024 * done crypto_unregister(), this migrate operation is not required.
1025 */
1026 if (cap == NULL) {
1027 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1028 mutex_spin_exit(&crypto_q_mtx);
1029
1030 return 0;
1031 }
1032
1033 if (cap->cc_kqblocked != 0) {
1034 /*
1035 * The driver is blocked, just queue the op until
1036 * it unblocks and the swi thread gets kicked.
1037 */
1038 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1039 mutex_spin_exit(&crypto_q_mtx);
1040
1041 return 0;
1042 }
1043
1044 mutex_spin_exit(&crypto_q_mtx);
1045 result = crypto_kinvoke(krp, 0);
1046 if (result == ERESTART) {
1047 /*
1048 * The driver ran out of resources, mark the
1049 * driver ``blocked'' for cryptop's and put
1050 * the op on the queue.
1051 */
1052 mutex_spin_enter(&crypto_q_mtx);
1053 cap->cc_kqblocked = 1;
1054 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1055 cryptostats.cs_kblocks++;
1056 mutex_spin_exit(&crypto_q_mtx);
1057
1058 /*
1059 * The krp is enqueued to crp_kq, that is,
1060 * no error occurs. So, this function should
1061 * not return error.
1062 */
1063 result = 0;
1064 }
1065
1066 return result;
1067 }
1068
1069 /*
1070 * Dispatch an assymetric crypto request to the appropriate crypto devices.
1071 */
1072 static int
1073 crypto_kinvoke(struct cryptkop *krp, int hint)
1074 {
1075 struct cryptocap *cap = NULL;
1076 u_int32_t hid;
1077 int error;
1078
1079 KASSERT(krp != NULL);
1080
1081 /* Sanity checks. */
1082 if (krp->krp_callback == NULL) {
1083 cv_destroy(&krp->krp_cv);
1084 crypto_kfreereq(krp);
1085 return EINVAL;
1086 }
1087
1088 mutex_enter(&crypto_drv_mtx);
1089 for (hid = 0; hid < crypto_drivers_num; hid++) {
1090 cap = crypto_checkdriver(hid);
1091 if (cap == NULL)
1092 continue;
1093 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1094 crypto_devallowsoft == 0)
1095 continue;
1096 if (cap->cc_kprocess == NULL)
1097 continue;
1098 if ((cap->cc_kalg[krp->krp_op] &
1099 CRYPTO_ALG_FLAG_SUPPORTED) == 0)
1100 continue;
1101 break;
1102 }
1103 if (cap != NULL) {
1104 int (*process)(void *, struct cryptkop *, int);
1105 void *arg;
1106
1107 process = cap->cc_kprocess;
1108 arg = cap->cc_karg;
1109 mutex_exit(&crypto_drv_mtx);
1110 krp->krp_hid = hid;
1111 error = (*process)(arg, krp, hint);
1112 } else {
1113 mutex_exit(&crypto_drv_mtx);
1114 error = ENODEV;
1115 }
1116
1117 if (error) {
1118 krp->krp_status = error;
1119 crypto_kdone(krp);
1120 }
1121 return 0;
1122 }
1123
1124 #ifdef CRYPTO_TIMING
1125 static void
1126 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
1127 {
1128 struct timespec now, t;
1129
1130 nanouptime(&now);
1131 t.tv_sec = now.tv_sec - tv->tv_sec;
1132 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
1133 if (t.tv_nsec < 0) {
1134 t.tv_sec--;
1135 t.tv_nsec += 1000000000;
1136 }
1137 timespecadd(&ts->acc, &t, &t);
1138 if (timespeccmp(&t, &ts->min, <))
1139 ts->min = t;
1140 if (timespeccmp(&t, &ts->max, >))
1141 ts->max = t;
1142 ts->count++;
1143
1144 *tv = now;
1145 }
1146 #endif
1147
1148 /*
1149 * Dispatch a crypto request to the appropriate crypto devices.
1150 */
1151 static int
1152 crypto_invoke(struct cryptop *crp, int hint)
1153 {
1154 struct cryptocap *cap;
1155
1156 KASSERT(crp != NULL);
1157
1158 #ifdef CRYPTO_TIMING
1159 if (crypto_timing)
1160 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1161 #endif
1162 /* Sanity checks. */
1163 if (crp->crp_callback == NULL) {
1164 return EINVAL;
1165 }
1166 if (crp->crp_desc == NULL) {
1167 crp->crp_etype = EINVAL;
1168 crypto_done(crp);
1169 return 0;
1170 }
1171
1172 cap = crypto_checkdriver(CRYPTO_SESID2HID(crp->crp_sid));
1173 if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
1174 int (*process)(void *, struct cryptop *, int);
1175 void *arg;
1176
1177 process = cap->cc_process;
1178 arg = cap->cc_arg;
1179
1180 /*
1181 * Invoke the driver to process the request.
1182 */
1183 DPRINTF("calling process for %p\n", crp);
1184 return (*process)(arg, crp, hint);
1185 } else {
1186 struct cryptodesc *crd;
1187 u_int64_t nid = 0;
1188
1189 /*
1190 * Driver has unregistered; migrate the session and return
1191 * an error to the caller so they'll resubmit the op.
1192 */
1193 crypto_freesession(crp->crp_sid);
1194
1195 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
1196 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
1197
1198 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
1199 crp->crp_sid = nid;
1200
1201 crp->crp_etype = EAGAIN;
1202
1203 crypto_done(crp);
1204 return 0;
1205 }
1206 }
1207
1208 /*
1209 * Release a set of crypto descriptors.
1210 */
1211 void
1212 crypto_freereq(struct cryptop *crp)
1213 {
1214 struct cryptodesc *crd;
1215
1216 if (crp == NULL)
1217 return;
1218 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1219
1220 /* sanity check */
1221 if (crp->crp_flags & CRYPTO_F_ONRETQ) {
1222 panic("crypto_freereq() freeing crp on RETQ\n");
1223 }
1224
1225 while ((crd = crp->crp_desc) != NULL) {
1226 crp->crp_desc = crd->crd_next;
1227 pool_put(&cryptodesc_pool, crd);
1228 }
1229 pool_put(&cryptop_pool, crp);
1230 }
1231
1232 /*
1233 * Acquire a set of crypto descriptors.
1234 */
1235 struct cryptop *
1236 crypto_getreq(int num)
1237 {
1238 struct cryptodesc *crd;
1239 struct cryptop *crp;
1240
1241 /*
1242 * When crp_ret_q is full, we restrict here to avoid crp_ret_q overflow
1243 * by error callback.
1244 */
1245 if (CRYPTO_Q_IS_FULL(crp_ret_q)) {
1246 CRYPTO_Q_INC_DROPS(crp_ret_q);
1247 return NULL;
1248 }
1249
1250 crp = pool_get(&cryptop_pool, 0);
1251 if (crp == NULL) {
1252 return NULL;
1253 }
1254 memset(crp, 0, sizeof(struct cryptop));
1255
1256 while (num--) {
1257 crd = pool_get(&cryptodesc_pool, 0);
1258 if (crd == NULL) {
1259 crypto_freereq(crp);
1260 return NULL;
1261 }
1262
1263 memset(crd, 0, sizeof(struct cryptodesc));
1264 crd->crd_next = crp->crp_desc;
1265 crp->crp_desc = crd;
1266 }
1267
1268 return crp;
1269 }
1270
1271 /*
1272 * Release a set of asymmetric crypto descriptors.
1273 * Currently, support one descriptor only.
1274 */
1275 void
1276 crypto_kfreereq(struct cryptkop *krp)
1277 {
1278
1279 if (krp == NULL)
1280 return;
1281
1282 DPRINTF("krp %p\n", krp);
1283
1284 /* sanity check */
1285 if (krp->krp_flags & CRYPTO_F_ONRETQ) {
1286 panic("crypto_kfreereq() freeing krp on RETQ\n");
1287 }
1288
1289 pool_put(&cryptkop_pool, krp);
1290 }
1291
1292 /*
1293 * Acquire a set of asymmetric crypto descriptors.
1294 * Currently, support one descriptor only.
1295 */
1296 struct cryptkop *
1297 crypto_kgetreq(int num __unused, int prflags)
1298 {
1299 struct cryptkop *krp;
1300
1301 /*
1302 * When crp_ret_kq is full, we restrict here to avoid crp_ret_kq
1303 * overflow by error callback.
1304 */
1305 if (CRYPTO_Q_IS_FULL(crp_ret_kq)) {
1306 CRYPTO_Q_INC_DROPS(crp_ret_kq);
1307 return NULL;
1308 }
1309
1310 krp = pool_get(&cryptkop_pool, prflags);
1311 if (krp == NULL) {
1312 return NULL;
1313 }
1314 memset(krp, 0, sizeof(struct cryptkop));
1315
1316 return krp;
1317 }
1318
1319 /*
1320 * Invoke the callback on behalf of the driver.
1321 */
1322 void
1323 crypto_done(struct cryptop *crp)
1324 {
1325 int wasempty;
1326
1327 KASSERT(crp != NULL);
1328
1329 if (crp->crp_etype != 0)
1330 cryptostats.cs_errs++;
1331 #ifdef CRYPTO_TIMING
1332 if (crypto_timing)
1333 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1334 #endif
1335 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1336
1337 /*
1338 * Normal case; queue the callback for the thread.
1339 *
1340 * The return queue is manipulated by the swi thread
1341 * and, potentially, by crypto device drivers calling
1342 * back to mark operations completed. Thus we need
1343 * to mask both while manipulating the return queue.
1344 */
1345 if (crp->crp_flags & CRYPTO_F_CBIMM) {
1346 /*
1347 * Do the callback directly. This is ok when the
1348 * callback routine does very little (e.g. the
1349 * /dev/crypto callback method just does a wakeup).
1350 */
1351 mutex_spin_enter(&crypto_ret_q_mtx);
1352 crp->crp_flags |= CRYPTO_F_DONE;
1353 mutex_spin_exit(&crypto_ret_q_mtx);
1354
1355 #ifdef CRYPTO_TIMING
1356 if (crypto_timing) {
1357 /*
1358 * NB: We must copy the timestamp before
1359 * doing the callback as the cryptop is
1360 * likely to be reclaimed.
1361 */
1362 struct timespec t = crp->crp_tstamp;
1363 crypto_tstat(&cryptostats.cs_cb, &t);
1364 crp->crp_callback(crp);
1365 crypto_tstat(&cryptostats.cs_finis, &t);
1366 } else
1367 #endif
1368 crp->crp_callback(crp);
1369 } else {
1370 mutex_spin_enter(&crypto_ret_q_mtx);
1371 crp->crp_flags |= CRYPTO_F_DONE;
1372 #if 0
1373 if (crp->crp_flags & CRYPTO_F_USER) {
1374 /*
1375 * TODO:
1376 * If crp->crp_flags & CRYPTO_F_USER and the used
1377 * encryption driver does all the processing in
1378 * the same context, we can skip enqueueing crp_ret_q
1379 * and cv_signal(&cryptoret_cv).
1380 */
1381 DPRINTF("lid[%u]: crp %p CRYPTO_F_USER\n",
1382 CRYPTO_SESID2LID(crp->crp_sid), crp);
1383 } else
1384 #endif
1385 {
1386 wasempty = TAILQ_EMPTY(&crp_ret_q);
1387 DPRINTF("lid[%u]: queueing %p\n",
1388 CRYPTO_SESID2LID(crp->crp_sid), crp);
1389 crp->crp_flags |= CRYPTO_F_ONRETQ;
1390 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1391 CRYPTO_Q_INC(crp_ret_q);
1392 if (wasempty) {
1393 DPRINTF("lid[%u]: waking cryptoret, "
1394 "crp %p hit empty queue\n.",
1395 CRYPTO_SESID2LID(crp->crp_sid), crp);
1396 cv_signal(&cryptoret_cv);
1397 }
1398 }
1399 mutex_spin_exit(&crypto_ret_q_mtx);
1400 }
1401 }
1402
1403 /*
1404 * Invoke the callback on behalf of the driver.
1405 */
1406 void
1407 crypto_kdone(struct cryptkop *krp)
1408 {
1409 int wasempty;
1410
1411 KASSERT(krp != NULL);
1412
1413 if (krp->krp_status != 0)
1414 cryptostats.cs_kerrs++;
1415
1416 krp->krp_flags |= CRYPTO_F_DONE;
1417
1418 /*
1419 * The return queue is manipulated by the swi thread
1420 * and, potentially, by crypto device drivers calling
1421 * back to mark operations completed. Thus we need
1422 * to mask both while manipulating the return queue.
1423 */
1424 if (krp->krp_flags & CRYPTO_F_CBIMM) {
1425 krp->krp_callback(krp);
1426 } else {
1427 mutex_spin_enter(&crypto_ret_q_mtx);
1428 wasempty = TAILQ_EMPTY(&crp_ret_kq);
1429 krp->krp_flags |= CRYPTO_F_ONRETQ;
1430 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1431 CRYPTO_Q_INC(crp_ret_kq);
1432 if (wasempty)
1433 cv_signal(&cryptoret_cv);
1434 mutex_spin_exit(&crypto_ret_q_mtx);
1435 }
1436 }
1437
1438 int
1439 crypto_getfeat(int *featp)
1440 {
1441 int hid, kalg, feat = 0;
1442
1443 if (crypto_userasymcrypto == 0)
1444 return 0;
1445
1446 mutex_enter(&crypto_drv_mtx);
1447
1448 for (hid = 0; hid < crypto_drivers_num; hid++) {
1449 struct cryptocap *cap;
1450 cap = crypto_checkdriver(hid);
1451 if (cap == NULL)
1452 continue;
1453
1454 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1455 crypto_devallowsoft == 0) {
1456 continue;
1457 }
1458 if (cap->cc_kprocess == NULL)
1459 continue;
1460 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1461 if ((cap->cc_kalg[kalg] &
1462 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1463 feat |= 1 << kalg;
1464 }
1465
1466 mutex_exit(&crypto_drv_mtx);
1467 *featp = feat;
1468 return (0);
1469 }
1470
1471 /*
1472 * Software interrupt thread to dispatch crypto requests.
1473 */
1474 static void
1475 cryptointr(void)
1476 {
1477 struct cryptop *crp, *submit, *cnext;
1478 struct cryptkop *krp, *knext;
1479 struct cryptocap *cap;
1480 int result, hint;
1481
1482 cryptostats.cs_intrs++;
1483 mutex_spin_enter(&crypto_q_mtx);
1484 do {
1485 /*
1486 * Find the first element in the queue that can be
1487 * processed and look-ahead to see if multiple ops
1488 * are ready for the same driver.
1489 */
1490 submit = NULL;
1491 hint = 0;
1492 TAILQ_FOREACH_SAFE(crp, &crp_q, crp_next, cnext) {
1493 u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid);
1494 cap = crypto_checkdriver(hid);
1495 if (cap == NULL || cap->cc_process == NULL) {
1496 /* Op needs to be migrated, process it. */
1497 submit = crp;
1498 break;
1499 }
1500
1501 /*
1502 * skip blocked crp regardless of CRYPTO_F_BATCH
1503 */
1504 if (cap->cc_qblocked != 0)
1505 continue;
1506
1507 /*
1508 * skip batch crp until the end of crp_q
1509 */
1510 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
1511 if (submit == NULL) {
1512 submit = crp;
1513 } else {
1514 if (CRYPTO_SESID2HID(submit->crp_sid)
1515 == hid)
1516 hint = CRYPTO_HINT_MORE;
1517 }
1518
1519 continue;
1520 }
1521
1522 /*
1523 * found first crp which is neither blocked nor batch.
1524 */
1525 submit = crp;
1526 /*
1527 * batch crp can be processed much later, so clear hint.
1528 */
1529 hint = 0;
1530 break;
1531 }
1532 if (submit != NULL) {
1533 TAILQ_REMOVE(&crp_q, submit, crp_next);
1534 mutex_spin_exit(&crypto_q_mtx);
1535 result = crypto_invoke(submit, hint);
1536 /* we must take here as the TAILQ op or kinvoke
1537 may need this mutex below. sigh. */
1538 mutex_spin_enter(&crypto_q_mtx);
1539 if (result == ERESTART) {
1540 /*
1541 * The driver ran out of resources, mark the
1542 * driver ``blocked'' for cryptop's and put
1543 * the request back in the queue. It would
1544 * best to put the request back where we got
1545 * it but that's hard so for now we put it
1546 * at the front. This should be ok; putting
1547 * it at the end does not work.
1548 */
1549 /* validate sid again */
1550 cap = crypto_checkdriver(CRYPTO_SESID2HID(submit->crp_sid));
1551 if (cap == NULL) {
1552 /* migrate again, sigh... */
1553 TAILQ_INSERT_TAIL(&crp_q, submit, crp_next);
1554 } else {
1555 cap->cc_qblocked = 1;
1556 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1557 cryptostats.cs_blocks++;
1558 }
1559 }
1560 }
1561
1562 /* As above, but for key ops */
1563 TAILQ_FOREACH_SAFE(krp, &crp_kq, krp_next, knext) {
1564 cap = crypto_checkdriver(krp->krp_hid);
1565 if (cap == NULL || cap->cc_kprocess == NULL) {
1566 /* Op needs to be migrated, process it. */
1567 break;
1568 }
1569 if (!cap->cc_kqblocked)
1570 break;
1571 }
1572 if (krp != NULL) {
1573 TAILQ_REMOVE(&crp_kq, krp, krp_next);
1574 mutex_spin_exit(&crypto_q_mtx);
1575 result = crypto_kinvoke(krp, 0);
1576 /* the next iteration will want the mutex. :-/ */
1577 mutex_spin_enter(&crypto_q_mtx);
1578 if (result == ERESTART) {
1579 /*
1580 * The driver ran out of resources, mark the
1581 * driver ``blocked'' for cryptkop's and put
1582 * the request back in the queue. It would
1583 * best to put the request back where we got
1584 * it but that's hard so for now we put it
1585 * at the front. This should be ok; putting
1586 * it at the end does not work.
1587 */
1588 /* validate sid again */
1589 cap = crypto_checkdriver(krp->krp_hid);
1590 if (cap == NULL) {
1591 /* migrate again, sigh... */
1592 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1593 } else {
1594 cap->cc_kqblocked = 1;
1595 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1596 cryptostats.cs_kblocks++;
1597 }
1598 }
1599 }
1600 } while (submit != NULL || krp != NULL);
1601 mutex_spin_exit(&crypto_q_mtx);
1602 }
1603
1604 /*
1605 * Kernel thread to do callbacks.
1606 */
1607 static void
1608 cryptoret(void)
1609 {
1610 struct cryptop *crp;
1611 struct cryptkop *krp;
1612
1613 mutex_spin_enter(&crypto_ret_q_mtx);
1614 for (;;) {
1615 crp = TAILQ_FIRST(&crp_ret_q);
1616 if (crp != NULL) {
1617 TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1618 CRYPTO_Q_DEC(crp_ret_q);
1619 crp->crp_flags &= ~CRYPTO_F_ONRETQ;
1620 }
1621 krp = TAILQ_FIRST(&crp_ret_kq);
1622 if (krp != NULL) {
1623 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1624 CRYPTO_Q_DEC(crp_ret_kq);
1625 krp->krp_flags &= ~CRYPTO_F_ONRETQ;
1626 }
1627
1628 /* drop before calling any callbacks. */
1629 if (crp == NULL && krp == NULL) {
1630
1631 /* Check for the exit condition. */
1632 if (crypto_exit_flag != 0) {
1633
1634 /* Time to die. */
1635 crypto_exit_flag = 0;
1636 cv_broadcast(&cryptoret_cv);
1637 mutex_spin_exit(&crypto_ret_q_mtx);
1638 kthread_exit(0);
1639 }
1640
1641 cryptostats.cs_rets++;
1642 cv_wait(&cryptoret_cv, &crypto_ret_q_mtx);
1643 continue;
1644 }
1645
1646 mutex_spin_exit(&crypto_ret_q_mtx);
1647
1648 if (crp != NULL) {
1649 #ifdef CRYPTO_TIMING
1650 if (crypto_timing) {
1651 /*
1652 * NB: We must copy the timestamp before
1653 * doing the callback as the cryptop is
1654 * likely to be reclaimed.
1655 */
1656 struct timespec t = crp->crp_tstamp;
1657 crypto_tstat(&cryptostats.cs_cb, &t);
1658 crp->crp_callback(crp);
1659 crypto_tstat(&cryptostats.cs_finis, &t);
1660 } else
1661 #endif
1662 {
1663 crp->crp_callback(crp);
1664 }
1665 }
1666 if (krp != NULL)
1667 krp->krp_callback(krp);
1668
1669 mutex_spin_enter(&crypto_ret_q_mtx);
1670 }
1671 }
1672
1673 /* NetBSD module interface */
1674
1675 MODULE(MODULE_CLASS_MISC, opencrypto, NULL);
1676
1677 static int
1678 opencrypto_modcmd(modcmd_t cmd, void *opaque)
1679 {
1680 int error = 0;
1681
1682 switch (cmd) {
1683 case MODULE_CMD_INIT:
1684 #ifdef _MODULE
1685 error = crypto_init();
1686 #endif
1687 break;
1688 case MODULE_CMD_FINI:
1689 #ifdef _MODULE
1690 error = crypto_destroy(true);
1691 #endif
1692 break;
1693 default:
1694 error = ENOTTY;
1695 }
1696 return error;
1697 }
1698