crypto.c revision 1.96 1 /* $NetBSD: crypto.c,v 1.96 2017/07/26 06:44:01 knakahara Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4
5 /*-
6 * Copyright (c) 2008 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Coyote Point Systems, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
36 *
37 * This code was written by Angelos D. Keromytis in Athens, Greece, in
38 * February 2000. Network Security Technologies Inc. (NSTI) kindly
39 * supported the development of this code.
40 *
41 * Copyright (c) 2000, 2001 Angelos D. Keromytis
42 *
43 * Permission to use, copy, and modify this software with or without fee
44 * is hereby granted, provided that this entire notice is included in
45 * all source code copies of any software which is or includes a copy or
46 * modification of this software.
47 *
48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
52 * PURPOSE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.96 2017/07/26 06:44:01 knakahara Exp $");
57
58 #include <sys/param.h>
59 #include <sys/reboot.h>
60 #include <sys/systm.h>
61 #include <sys/malloc.h>
62 #include <sys/proc.h>
63 #include <sys/pool.h>
64 #include <sys/kthread.h>
65 #include <sys/once.h>
66 #include <sys/sysctl.h>
67 #include <sys/intr.h>
68 #include <sys/errno.h>
69 #include <sys/module.h>
70 #include <sys/xcall.h>
71 #include <sys/device.h>
72 #include <sys/cpu.h>
73 #include <sys/percpu.h>
74 #include <sys/kmem.h>
75
76 #if defined(_KERNEL_OPT)
77 #include "opt_ocf.h"
78 #endif
79
80 #include <opencrypto/cryptodev.h>
81 #include <opencrypto/xform.h> /* XXX for M_XDATA */
82
83 /* below are kludges for residual code wrtitten to FreeBSD interfaces */
84 #define SWI_CRYPTO 17
85 #define register_swi(lvl, fn) \
86 softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, (void (*)(void *))fn, NULL)
87 #define unregister_swi(lvl, fn) softint_disestablish(softintr_cookie)
88 #define setsoftcrypto(x) \
89 do{ \
90 kpreempt_disable(); \
91 softint_schedule(x); \
92 kpreempt_enable(); \
93 }while(0)
94
95 int crypto_ret_q_check(struct cryptop *);
96
97 /*
98 * Crypto drivers register themselves by allocating a slot in the
99 * crypto_drivers table with crypto_get_driverid() and then registering
100 * each algorithm they support with crypto_register() and crypto_kregister().
101 */
102 static kmutex_t crypto_drv_mtx;
103 /* Don't directly access crypto_drivers[i], use crypto_checkdriver(i). */
104 static struct cryptocap *crypto_drivers;
105 static int crypto_drivers_num;
106 static void *softintr_cookie;
107
108 static void *crypto_ret_si;
109
110 /*
111 * There are two queues for crypto requests; one for symmetric (e.g.
112 * cipher) operations and one for asymmetric (e.g. MOD) operations.
113 * See below for how synchronization is handled.
114 */
115 TAILQ_HEAD(crypto_crp_q, cryptop);
116 TAILQ_HEAD(crypto_crp_kq, cryptkop);
117 struct crypto_crp_qs {
118 struct crypto_crp_q crp_q;
119 struct crypto_crp_kq crp_kq;
120 };
121 static percpu_t *crypto_crp_qs_percpu;
122
123 static inline struct crypto_crp_qs *
124 crypto_get_crp_qs(int *s)
125 {
126
127 KASSERT(s != NULL);
128
129 *s = splsoftnet();
130 return percpu_getref(crypto_crp_qs_percpu);
131 }
132
133 static inline void
134 crypto_put_crp_qs(int *s)
135 {
136
137 KASSERT(s != NULL);
138
139 percpu_putref(crypto_crp_qs_percpu);
140 splx(*s);
141 }
142
143 static void
144 crypto_crp_q_is_busy_pc(void *p, void *arg, struct cpu_info *ci __unused)
145 {
146 struct crypto_crp_qs *qs_pc = p;
147 bool *isempty = arg;
148
149 if (!TAILQ_EMPTY(&qs_pc->crp_q) || !TAILQ_EMPTY(&qs_pc->crp_kq))
150 *isempty = true;
151 }
152
153 static void
154 crypto_crp_qs_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
155 {
156 struct crypto_crp_qs *qs = p;
157
158 TAILQ_INIT(&qs->crp_q);
159 TAILQ_INIT(&qs->crp_kq);
160 }
161
162 /*
163 * There are two queues for processing completed crypto requests; one
164 * for the symmetric and one for the asymmetric ops. We only need one
165 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
166 * for how synchronization is handled.
167 */
168 TAILQ_HEAD(crypto_crp_ret_q, cryptop);
169 TAILQ_HEAD(crypto_crp_ret_kq, cryptkop);
170 struct crypto_crp_ret_qs {
171 kmutex_t crp_ret_q_mtx;
172 bool crp_ret_q_exit_flag;
173
174 struct crypto_crp_ret_q crp_ret_q;
175 int crp_ret_q_len;
176 int crp_ret_q_maxlen; /* queue length limit. <=0 means unlimited. */
177 int crp_ret_q_drops;
178
179 struct crypto_crp_ret_kq crp_ret_kq;
180 int crp_ret_kq_len;
181 int crp_ret_kq_maxlen; /* queue length limit. <=0 means unlimited. */
182 int crp_ret_kq_drops;
183 };
184 struct crypto_crp_ret_qs **crypto_crp_ret_qs_list;
185
186
187 static inline struct crypto_crp_ret_qs *
188 crypto_get_crp_ret_qs(struct cpu_info *ci)
189 {
190 u_int cpuid;
191 struct crypto_crp_ret_qs *qs;
192
193 KASSERT(ci != NULL);
194
195 cpuid = cpu_index(ci);
196 qs = crypto_crp_ret_qs_list[cpuid];
197 mutex_enter(&qs->crp_ret_q_mtx);
198 return qs;
199 }
200
201 static inline void
202 crypto_put_crp_ret_qs(struct cpu_info *ci)
203 {
204 u_int cpuid;
205 struct crypto_crp_ret_qs *qs;
206
207 KASSERT(ci != NULL);
208
209 cpuid = cpu_index(ci);
210 qs = crypto_crp_ret_qs_list[cpuid];
211 mutex_exit(&qs->crp_ret_q_mtx);
212 }
213
214 #ifndef CRYPTO_RET_Q_MAXLEN
215 #define CRYPTO_RET_Q_MAXLEN 0
216 #endif
217 #ifndef CRYPTO_RET_KQ_MAXLEN
218 #define CRYPTO_RET_KQ_MAXLEN 0
219 #endif
220
221 static int
222 sysctl_opencrypto_q_len(SYSCTLFN_ARGS)
223 {
224 int error, len = 0;
225 struct sysctlnode node = *rnode;
226
227 for (int i = 0; i < ncpu; i++) {
228 struct crypto_crp_ret_qs *qs;
229 struct cpu_info *ci = cpu_lookup(i);
230
231 qs = crypto_get_crp_ret_qs(ci);
232 len += qs->crp_ret_q_len;
233 crypto_put_crp_ret_qs(ci);
234 }
235
236 node.sysctl_data = &len;
237 error = sysctl_lookup(SYSCTLFN_CALL(&node));
238 if (error || newp == NULL)
239 return error;
240
241 return 0;
242 }
243
244 static int
245 sysctl_opencrypto_q_drops(SYSCTLFN_ARGS)
246 {
247 int error, drops = 0;
248 struct sysctlnode node = *rnode;
249
250 for (int i = 0; i < ncpu; i++) {
251 struct crypto_crp_ret_qs *qs;
252 struct cpu_info *ci = cpu_lookup(i);
253
254 qs = crypto_get_crp_ret_qs(ci);
255 drops += qs->crp_ret_q_drops;
256 crypto_put_crp_ret_qs(ci);
257 }
258
259 node.sysctl_data = &drops;
260 error = sysctl_lookup(SYSCTLFN_CALL(&node));
261 if (error || newp == NULL)
262 return error;
263
264 return 0;
265 }
266
267 static int
268 sysctl_opencrypto_q_maxlen(SYSCTLFN_ARGS)
269 {
270 int error, maxlen;
271 struct crypto_crp_ret_qs *qs;
272 struct sysctlnode node = *rnode;
273
274 /* each crp_ret_kq_maxlen is the same. */
275 qs = crypto_get_crp_ret_qs(curcpu());
276 maxlen = qs->crp_ret_q_maxlen;
277 crypto_put_crp_ret_qs(curcpu());
278
279 node.sysctl_data = &maxlen;
280 error = sysctl_lookup(SYSCTLFN_CALL(&node));
281 if (error || newp == NULL)
282 return error;
283
284 for (int i = 0; i < ncpu; i++) {
285 struct cpu_info *ci = cpu_lookup(i);
286
287 qs = crypto_get_crp_ret_qs(ci);
288 qs->crp_ret_q_maxlen = maxlen;
289 crypto_put_crp_ret_qs(ci);
290 }
291
292 return 0;
293 }
294
295 static int
296 sysctl_opencrypto_kq_len(SYSCTLFN_ARGS)
297 {
298 int error, len = 0;
299 struct sysctlnode node = *rnode;
300
301 for (int i = 0; i < ncpu; i++) {
302 struct crypto_crp_ret_qs *qs;
303 struct cpu_info *ci = cpu_lookup(i);
304
305 qs = crypto_get_crp_ret_qs(ci);
306 len += qs->crp_ret_kq_len;
307 crypto_put_crp_ret_qs(ci);
308 }
309
310 node.sysctl_data = &len;
311 error = sysctl_lookup(SYSCTLFN_CALL(&node));
312 if (error || newp == NULL)
313 return error;
314
315 return 0;
316 }
317
318 static int
319 sysctl_opencrypto_kq_drops(SYSCTLFN_ARGS)
320 {
321 int error, drops = 0;
322 struct sysctlnode node = *rnode;
323
324 for (int i = 0; i < ncpu; i++) {
325 struct crypto_crp_ret_qs *qs;
326 struct cpu_info *ci = cpu_lookup(i);
327
328 qs = crypto_get_crp_ret_qs(ci);
329 drops += qs->crp_ret_kq_drops;
330 crypto_put_crp_ret_qs(ci);
331 }
332
333 node.sysctl_data = &drops;
334 error = sysctl_lookup(SYSCTLFN_CALL(&node));
335 if (error || newp == NULL)
336 return error;
337
338 return 0;
339 }
340
341 static int
342 sysctl_opencrypto_kq_maxlen(SYSCTLFN_ARGS)
343 {
344 int error, maxlen;
345 struct crypto_crp_ret_qs *qs;
346 struct sysctlnode node = *rnode;
347
348 /* each crp_ret_kq_maxlen is the same. */
349 qs = crypto_get_crp_ret_qs(curcpu());
350 maxlen = qs->crp_ret_kq_maxlen;
351 crypto_put_crp_ret_qs(curcpu());
352
353 node.sysctl_data = &maxlen;
354 error = sysctl_lookup(SYSCTLFN_CALL(&node));
355 if (error || newp == NULL)
356 return error;
357
358 for (int i = 0; i < ncpu; i++) {
359 struct cpu_info *ci = cpu_lookup(i);
360
361 qs = crypto_get_crp_ret_qs(ci);
362 qs->crp_ret_kq_maxlen = maxlen;
363 crypto_put_crp_ret_qs(ci);
364 }
365
366 return 0;
367 }
368
369 /*
370 * Crypto op and desciptor data structures are allocated
371 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
372 */
373 struct pool cryptop_pool;
374 struct pool cryptodesc_pool;
375 struct pool cryptkop_pool;
376
377 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
378 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
379 /*
380 * cryptodevallowsoft is (intended to be) sysctl'able, controlling
381 * access to hardware versus software transforms as below:
382 *
383 * crypto_devallowsoft < 0: Force userlevel requests to use software
384 * transforms, always
385 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel
386 * requests for non-accelerated transforms
387 * (handling the latter in software)
388 * crypto_devallowsoft > 0: Allow user requests only for transforms which
389 * are hardware-accelerated.
390 */
391 int crypto_devallowsoft = 1; /* only use hardware crypto */
392
393 static void
394 sysctl_opencrypto_setup(struct sysctllog **clog)
395 {
396 const struct sysctlnode *ocnode;
397 const struct sysctlnode *retqnode, *retkqnode;
398
399 sysctl_createv(clog, 0, NULL, NULL,
400 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
401 CTLTYPE_INT, "usercrypto",
402 SYSCTL_DESCR("Enable/disable user-mode access to "
403 "crypto support"),
404 NULL, 0, &crypto_usercrypto, 0,
405 CTL_KERN, CTL_CREATE, CTL_EOL);
406 sysctl_createv(clog, 0, NULL, NULL,
407 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
408 CTLTYPE_INT, "userasymcrypto",
409 SYSCTL_DESCR("Enable/disable user-mode access to "
410 "asymmetric crypto support"),
411 NULL, 0, &crypto_userasymcrypto, 0,
412 CTL_KERN, CTL_CREATE, CTL_EOL);
413 sysctl_createv(clog, 0, NULL, NULL,
414 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
415 CTLTYPE_INT, "cryptodevallowsoft",
416 SYSCTL_DESCR("Enable/disable use of software "
417 "asymmetric crypto support"),
418 NULL, 0, &crypto_devallowsoft, 0,
419 CTL_KERN, CTL_CREATE, CTL_EOL);
420
421 sysctl_createv(clog, 0, NULL, &ocnode,
422 CTLFLAG_PERMANENT,
423 CTLTYPE_NODE, "opencrypto",
424 SYSCTL_DESCR("opencrypto related entries"),
425 NULL, 0, NULL, 0,
426 CTL_CREATE, CTL_EOL);
427
428 sysctl_createv(clog, 0, &ocnode, &retqnode,
429 CTLFLAG_PERMANENT,
430 CTLTYPE_NODE, "crypto_ret_q",
431 SYSCTL_DESCR("crypto_ret_q related entries"),
432 NULL, 0, NULL, 0,
433 CTL_CREATE, CTL_EOL);
434 sysctl_createv(clog, 0, &retqnode, NULL,
435 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
436 CTLTYPE_INT, "len",
437 SYSCTL_DESCR("Current queue length"),
438 sysctl_opencrypto_q_len, 0,
439 NULL, 0,
440 CTL_CREATE, CTL_EOL);
441 sysctl_createv(clog, 0, &retqnode, NULL,
442 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
443 CTLTYPE_INT, "drops",
444 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
445 sysctl_opencrypto_q_drops, 0,
446 NULL, 0,
447 CTL_CREATE, CTL_EOL);
448 sysctl_createv(clog, 0, &retqnode, NULL,
449 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
450 CTLTYPE_INT, "maxlen",
451 SYSCTL_DESCR("Maximum allowed queue length"),
452 sysctl_opencrypto_q_maxlen, 0,
453 NULL, 0,
454 CTL_CREATE, CTL_EOL);
455
456
457 sysctl_createv(clog, 0, &ocnode, &retkqnode,
458 CTLFLAG_PERMANENT,
459 CTLTYPE_NODE, "crypto_ret_kq",
460 SYSCTL_DESCR("crypto_ret_kq related entries"),
461 NULL, 0, NULL, 0,
462 CTL_CREATE, CTL_EOL);
463 sysctl_createv(clog, 0, &retkqnode, NULL,
464 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
465 CTLTYPE_INT, "len",
466 SYSCTL_DESCR("Current queue length"),
467 sysctl_opencrypto_kq_len, 0,
468 NULL, 0,
469 CTL_CREATE, CTL_EOL);
470 sysctl_createv(clog, 0, &retkqnode, NULL,
471 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
472 CTLTYPE_INT, "drops",
473 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
474 sysctl_opencrypto_kq_drops, 0,
475 NULL, 0,
476 CTL_CREATE, CTL_EOL);
477 sysctl_createv(clog, 0, &retkqnode, NULL,
478 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
479 CTLTYPE_INT, "maxlen",
480 SYSCTL_DESCR("Maximum allowed queue length"),
481 sysctl_opencrypto_kq_maxlen, 0,
482 NULL, 0,
483 CTL_CREATE, CTL_EOL);
484 }
485
486 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
487
488 /*
489 * Synchronization: read carefully, this is non-trivial.
490 *
491 * Crypto requests are submitted via crypto_dispatch. Typically
492 * these come in from network protocols at spl0 (output path) or
493 * spl[,soft]net (input path).
494 *
495 * Requests are typically passed on the driver directly, but they
496 * may also be queued for processing by a software interrupt thread,
497 * cryptointr, that runs at splsoftcrypto. This thread dispatches
498 * the requests to crypto drivers (h/w or s/w) who call crypto_done
499 * when a request is complete. Hardware crypto drivers are assumed
500 * to register their IRQ's as network devices so their interrupt handlers
501 * and subsequent "done callbacks" happen at spl[imp,net].
502 *
503 * Completed crypto ops are queued for a separate kernel thread that
504 * handles the callbacks at spl0. This decoupling insures the crypto
505 * driver interrupt service routine is not delayed while the callback
506 * takes place and that callbacks are delivered after a context switch
507 * (as opposed to a software interrupt that clients must block).
508 *
509 * This scheme is not intended for SMP machines.
510 */
511 static void cryptointr(void); /* swi thread to dispatch ops */
512 static void cryptoret_softint(void *); /* kernel thread for callbacks*/
513 static int crypto_destroy(bool);
514 static int crypto_invoke(struct cryptop *crp, int hint);
515 static int crypto_kinvoke(struct cryptkop *krp, int hint);
516
517 static struct cryptocap *crypto_checkdriver_lock(u_int32_t);
518 static struct cryptocap *crypto_checkdriver_uninit(u_int32_t);
519 static struct cryptocap *crypto_checkdriver(u_int32_t);
520 static void crypto_driver_lock(struct cryptocap *);
521 static void crypto_driver_unlock(struct cryptocap *);
522 static void crypto_driver_clear(struct cryptocap *);
523
524 static int crypto_init_finalize(device_t);
525
526 static struct cryptostats cryptostats;
527 #ifdef CRYPTO_TIMING
528 static int crypto_timing = 0;
529 #endif
530
531 static struct sysctllog *sysctl_opencrypto_clog;
532
533 static int
534 crypto_crp_ret_qs_init(void)
535 {
536 int i, j;
537
538 crypto_crp_ret_qs_list = kmem_alloc(sizeof(struct crypto_crp_ret_qs *) * ncpu,
539 KM_NOSLEEP);
540 if (crypto_crp_ret_qs_list == NULL) {
541 printf("crypto_init: crypto_crp_qs_list\n");
542 return ENOMEM;
543 }
544
545 for (i = 0; i < ncpu; i++) {
546 struct crypto_crp_ret_qs *qs;
547 qs = kmem_alloc(sizeof(struct crypto_crp_ret_qs), KM_NOSLEEP);
548 if (qs == NULL)
549 break;
550
551 mutex_init(&qs->crp_ret_q_mtx, MUTEX_DEFAULT, IPL_NET);
552 qs->crp_ret_q_exit_flag = false;
553
554 TAILQ_INIT(&qs->crp_ret_q);
555 qs->crp_ret_q_len = 0;
556 qs->crp_ret_q_maxlen = CRYPTO_RET_Q_MAXLEN;
557 qs->crp_ret_q_drops = 0;
558
559 TAILQ_INIT(&qs->crp_ret_kq);
560 qs->crp_ret_kq_len = 0;
561 qs->crp_ret_kq_maxlen = CRYPTO_RET_KQ_MAXLEN;
562 qs->crp_ret_kq_drops = 0;
563
564 crypto_crp_ret_qs_list[i] = qs;
565 }
566 if (i == ncpu)
567 return 0;
568
569 for (j = 0; j < i; j++) {
570 struct crypto_crp_ret_qs *qs = crypto_crp_ret_qs_list[j];
571
572 mutex_destroy(&qs->crp_ret_q_mtx);
573 kmem_free(qs, sizeof(struct crypto_crp_ret_qs));
574 }
575 kmem_free(crypto_crp_ret_qs_list, sizeof(struct crypto_crp_ret_qs *) * ncpu);
576
577 return ENOMEM;
578 }
579
580 static int
581 crypto_init0(void)
582 {
583 int error;
584
585 mutex_init(&crypto_drv_mtx, MUTEX_DEFAULT, IPL_NONE);
586 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
587 0, "cryptop", NULL, IPL_NET);
588 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
589 0, "cryptodesc", NULL, IPL_NET);
590 pool_init(&cryptkop_pool, sizeof(struct cryptkop), 0, 0,
591 0, "cryptkop", NULL, IPL_NET);
592
593 crypto_crp_qs_percpu = percpu_alloc(sizeof(struct crypto_crp_qs));
594 percpu_foreach(crypto_crp_qs_percpu, crypto_crp_qs_init_pc, NULL);
595
596 error = crypto_crp_ret_qs_init();
597 if (error) {
598 printf("crypto_init: cannot malloc ret_q list\n");
599 return ENOMEM;
600 }
601
602 crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL *
603 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
604 if (crypto_drivers == NULL) {
605 printf("crypto_init: cannot malloc driver table\n");
606 return ENOMEM;
607 }
608 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
609
610 softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
611 if (softintr_cookie == NULL) {
612 printf("crypto_init: cannot establish request queue handler\n");
613 return crypto_destroy(false);
614 }
615
616 /*
617 * Some encryption devices (such as mvcesa) are attached before
618 * ipi_sysinit(). That causes an assertion in ipi_register() as
619 * crypto_ret_si softint uses SOFTINT_RCPU.
620 */
621 if (config_finalize_register(NULL, crypto_init_finalize) != 0) {
622 printf("crypto_init: cannot register crypto_init_finalize\n");
623 return crypto_destroy(false);
624 }
625
626 sysctl_opencrypto_setup(&sysctl_opencrypto_clog);
627
628 return 0;
629 }
630
631 static int
632 crypto_init_finalize(device_t self __unused)
633 {
634
635 crypto_ret_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE|SOFTINT_RCPU,
636 &cryptoret_softint, NULL);
637 KASSERT(crypto_ret_si != NULL);
638
639 return 0;
640 }
641
642 int
643 crypto_init(void)
644 {
645 static ONCE_DECL(crypto_init_once);
646
647 return RUN_ONCE(&crypto_init_once, crypto_init0);
648 }
649
650 static int
651 crypto_destroy(bool exit_kthread)
652 {
653 int i;
654
655 if (exit_kthread) {
656 struct cryptocap *cap = NULL;
657 uint64_t where;
658 bool is_busy = false;
659
660 /* if we have any in-progress requests, don't unload */
661 percpu_foreach(crypto_crp_qs_percpu, crypto_crp_q_is_busy_pc,
662 &is_busy);
663 if (is_busy)
664 return EBUSY;
665 /* FIXME:
666 * prohibit enqueue to crp_q and crp_kq after here.
667 */
668
669 mutex_enter(&crypto_drv_mtx);
670 for (i = 0; i < crypto_drivers_num; i++) {
671 cap = crypto_checkdriver(i);
672 if (cap == NULL)
673 continue;
674 if (cap->cc_sessions != 0) {
675 mutex_exit(&crypto_drv_mtx);
676 return EBUSY;
677 }
678 }
679 mutex_exit(&crypto_drv_mtx);
680 /* FIXME:
681 * prohibit touch crypto_drivers[] and each element after here.
682 */
683
684 /*
685 * Ensure cryptoret_softint() is never scheduled and then wait
686 * for last softint_execute().
687 */
688 for (i = 0; i < ncpu; i++) {
689 struct crypto_crp_ret_qs *qs;
690 struct cpu_info *ci = cpu_lookup(i);
691
692 qs = crypto_get_crp_ret_qs(ci);
693 qs->crp_ret_q_exit_flag = true;
694 crypto_put_crp_ret_qs(ci);
695 }
696 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
697 xc_wait(where);
698 }
699
700 if (sysctl_opencrypto_clog != NULL)
701 sysctl_teardown(&sysctl_opencrypto_clog);
702
703 if (crypto_ret_si != NULL)
704 softint_disestablish(crypto_ret_si);
705
706 if (softintr_cookie != NULL)
707 unregister_swi(SWI_CRYPTO, cryptointr);
708
709 mutex_enter(&crypto_drv_mtx);
710 if (crypto_drivers != NULL)
711 free(crypto_drivers, M_CRYPTO_DATA);
712 mutex_exit(&crypto_drv_mtx);
713
714 percpu_free(crypto_crp_qs_percpu, sizeof(struct crypto_crp_qs));
715
716 pool_destroy(&cryptop_pool);
717 pool_destroy(&cryptodesc_pool);
718 pool_destroy(&cryptkop_pool);
719
720 mutex_destroy(&crypto_drv_mtx);
721
722 return 0;
723 }
724
725 static bool
726 crypto_driver_suitable(struct cryptocap *cap, struct cryptoini *cri)
727 {
728 struct cryptoini *cr;
729
730 for (cr = cri; cr; cr = cr->cri_next)
731 if (cap->cc_alg[cr->cri_alg] == 0) {
732 DPRINTF("alg %d not supported\n", cr->cri_alg);
733 return false;
734 }
735
736 return true;
737 }
738
739 #define CRYPTO_ACCEPT_HARDWARE 0x1
740 #define CRYPTO_ACCEPT_SOFTWARE 0x2
741 /*
742 * The algorithm we use here is pretty stupid; just use the
743 * first driver that supports all the algorithms we need.
744 * If there are multiple drivers we choose the driver with
745 * the fewest active sessions. We prefer hardware-backed
746 * drivers to software ones.
747 *
748 * XXX We need more smarts here (in real life too, but that's
749 * XXX another story altogether).
750 */
751 static struct cryptocap *
752 crypto_select_driver_lock(struct cryptoini *cri, int hard)
753 {
754 u_int32_t hid;
755 int accept;
756 struct cryptocap *cap, *best;
757
758 best = NULL;
759 /*
760 * hard == 0 can use both hardware and software drivers.
761 * We use hardware drivers prior to software drivers, so search
762 * hardware drivers at first time.
763 */
764 if (hard >= 0)
765 accept = CRYPTO_ACCEPT_HARDWARE;
766 else
767 accept = CRYPTO_ACCEPT_SOFTWARE;
768 again:
769 for (hid = 0; hid < crypto_drivers_num; hid++) {
770 cap = crypto_checkdriver(hid);
771 if (cap == NULL)
772 continue;
773
774 crypto_driver_lock(cap);
775
776 /*
777 * If it's not initialized or has remaining sessions
778 * referencing it, skip.
779 */
780 if (cap->cc_newsession == NULL ||
781 (cap->cc_flags & CRYPTOCAP_F_CLEANUP)) {
782 crypto_driver_unlock(cap);
783 continue;
784 }
785
786 /* Hardware required -- ignore software drivers. */
787 if ((accept & CRYPTO_ACCEPT_SOFTWARE) == 0
788 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE)) {
789 crypto_driver_unlock(cap);
790 continue;
791 }
792 /* Software required -- ignore hardware drivers. */
793 if ((accept & CRYPTO_ACCEPT_HARDWARE) == 0
794 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) {
795 crypto_driver_unlock(cap);
796 continue;
797 }
798
799 /* See if all the algorithms are supported. */
800 if (crypto_driver_suitable(cap, cri)) {
801 if (best == NULL) {
802 /* keep holding crypto_driver_lock(cap) */
803 best = cap;
804 continue;
805 } else if (cap->cc_sessions < best->cc_sessions) {
806 crypto_driver_unlock(best);
807 /* keep holding crypto_driver_lock(cap) */
808 best = cap;
809 continue;
810 }
811 }
812
813 crypto_driver_unlock(cap);
814 }
815 if (best == NULL && hard == 0
816 && (accept & CRYPTO_ACCEPT_SOFTWARE) == 0) {
817 accept = CRYPTO_ACCEPT_SOFTWARE;
818 goto again;
819 }
820
821 return best;
822 }
823
824 /*
825 * Create a new session.
826 */
827 int
828 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
829 {
830 struct cryptocap *cap;
831 int err = EINVAL;
832
833 mutex_enter(&crypto_drv_mtx);
834
835 cap = crypto_select_driver_lock(cri, hard);
836 if (cap != NULL) {
837 u_int32_t hid, lid;
838
839 hid = cap - crypto_drivers;
840 /*
841 * Can't do everything in one session.
842 *
843 * XXX Fix this. We need to inject a "virtual" session layer right
844 * XXX about here.
845 */
846
847 /* Call the driver initialization routine. */
848 lid = hid; /* Pass the driver ID. */
849 crypto_driver_unlock(cap);
850 err = cap->cc_newsession(cap->cc_arg, &lid, cri);
851 crypto_driver_lock(cap);
852 if (err == 0) {
853 (*sid) = hid;
854 (*sid) <<= 32;
855 (*sid) |= (lid & 0xffffffff);
856 (cap->cc_sessions)++;
857 } else {
858 DPRINTF("crypto_drivers[%d].cc_newsession() failed. error=%d\n",
859 hid, err);
860 }
861 crypto_driver_unlock(cap);
862 }
863
864 mutex_exit(&crypto_drv_mtx);
865
866 return err;
867 }
868
869 /*
870 * Delete an existing session (or a reserved session on an unregistered
871 * driver).
872 */
873 int
874 crypto_freesession(u_int64_t sid)
875 {
876 struct cryptocap *cap;
877 int err = 0;
878
879 /* Determine two IDs. */
880 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(sid));
881 if (cap == NULL)
882 return ENOENT;
883
884 if (cap->cc_sessions)
885 (cap->cc_sessions)--;
886
887 /* Call the driver cleanup routine, if available. */
888 if (cap->cc_freesession)
889 err = cap->cc_freesession(cap->cc_arg, sid);
890 else
891 err = 0;
892
893 /*
894 * If this was the last session of a driver marked as invalid,
895 * make the entry available for reuse.
896 */
897 if ((cap->cc_flags & CRYPTOCAP_F_CLEANUP) && cap->cc_sessions == 0)
898 crypto_driver_clear(cap);
899
900 crypto_driver_unlock(cap);
901 return err;
902 }
903
904 static bool
905 crypto_checkdriver_initialized(const struct cryptocap *cap)
906 {
907
908 return cap->cc_process != NULL ||
909 (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0 ||
910 cap->cc_sessions != 0;
911 }
912
913 /*
914 * Return an unused driver id. Used by drivers prior to registering
915 * support for the algorithms they handle.
916 */
917 int32_t
918 crypto_get_driverid(u_int32_t flags)
919 {
920 struct cryptocap *newdrv;
921 struct cryptocap *cap = NULL;
922 int i;
923
924 (void)crypto_init(); /* XXX oh, this is foul! */
925
926 mutex_enter(&crypto_drv_mtx);
927 for (i = 0; i < crypto_drivers_num; i++) {
928 cap = crypto_checkdriver_uninit(i);
929 if (cap == NULL || crypto_checkdriver_initialized(cap))
930 continue;
931 break;
932 }
933
934 /* Out of entries, allocate some more. */
935 if (cap == NULL) {
936 /* Be careful about wrap-around. */
937 if (2 * crypto_drivers_num <= crypto_drivers_num) {
938 mutex_exit(&crypto_drv_mtx);
939 printf("crypto: driver count wraparound!\n");
940 return -1;
941 }
942
943 newdrv = malloc(2 * crypto_drivers_num *
944 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
945 if (newdrv == NULL) {
946 mutex_exit(&crypto_drv_mtx);
947 printf("crypto: no space to expand driver table!\n");
948 return -1;
949 }
950
951 memcpy(newdrv, crypto_drivers,
952 crypto_drivers_num * sizeof(struct cryptocap));
953
954 crypto_drivers_num *= 2;
955
956 free(crypto_drivers, M_CRYPTO_DATA);
957 crypto_drivers = newdrv;
958
959 cap = crypto_checkdriver_uninit(i);
960 KASSERT(cap != NULL);
961 }
962
963 /* NB: state is zero'd on free */
964 cap->cc_sessions = 1; /* Mark */
965 cap->cc_flags = flags;
966 mutex_init(&cap->cc_lock, MUTEX_DEFAULT, IPL_NET);
967
968 if (bootverbose)
969 printf("crypto: assign driver %u, flags %u\n", i, flags);
970
971 mutex_exit(&crypto_drv_mtx);
972
973 return i;
974 }
975
976 static struct cryptocap *
977 crypto_checkdriver_lock(u_int32_t hid)
978 {
979 struct cryptocap *cap;
980
981 KASSERT(crypto_drivers != NULL);
982
983 if (hid >= crypto_drivers_num)
984 return NULL;
985
986 cap = &crypto_drivers[hid];
987 mutex_enter(&cap->cc_lock);
988 return cap;
989 }
990
991 /*
992 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two
993 * situations
994 * - crypto_drivers[] may not be allocated
995 * - crypto_drivers[hid] may not be initialized
996 */
997 static struct cryptocap *
998 crypto_checkdriver_uninit(u_int32_t hid)
999 {
1000
1001 KASSERT(mutex_owned(&crypto_drv_mtx));
1002
1003 if (crypto_drivers == NULL)
1004 return NULL;
1005
1006 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
1007 }
1008
1009 /*
1010 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two
1011 * situations
1012 * - crypto_drivers[] may not be allocated
1013 * - crypto_drivers[hid] may not be initialized
1014 */
1015 static struct cryptocap *
1016 crypto_checkdriver(u_int32_t hid)
1017 {
1018
1019 KASSERT(mutex_owned(&crypto_drv_mtx));
1020
1021 if (crypto_drivers == NULL || hid >= crypto_drivers_num)
1022 return NULL;
1023
1024 struct cryptocap *cap = &crypto_drivers[hid];
1025 return crypto_checkdriver_initialized(cap) ? cap : NULL;
1026 }
1027
1028 static inline void
1029 crypto_driver_lock(struct cryptocap *cap)
1030 {
1031
1032 KASSERT(cap != NULL);
1033
1034 mutex_enter(&cap->cc_lock);
1035 }
1036
1037 static inline void
1038 crypto_driver_unlock(struct cryptocap *cap)
1039 {
1040
1041 KASSERT(cap != NULL);
1042
1043 mutex_exit(&cap->cc_lock);
1044 }
1045
1046 static void
1047 crypto_driver_clear(struct cryptocap *cap)
1048 {
1049
1050 if (cap == NULL)
1051 return;
1052
1053 KASSERT(mutex_owned(&cap->cc_lock));
1054
1055 cap->cc_sessions = 0;
1056 memset(&cap->cc_max_op_len, 0, sizeof(cap->cc_max_op_len));
1057 memset(&cap->cc_alg, 0, sizeof(cap->cc_alg));
1058 memset(&cap->cc_kalg, 0, sizeof(cap->cc_kalg));
1059 cap->cc_flags = 0;
1060 cap->cc_qblocked = 0;
1061 cap->cc_kqblocked = 0;
1062
1063 cap->cc_arg = NULL;
1064 cap->cc_newsession = NULL;
1065 cap->cc_process = NULL;
1066 cap->cc_freesession = NULL;
1067 cap->cc_kprocess = NULL;
1068 }
1069
1070 /*
1071 * Register support for a key-related algorithm. This routine
1072 * is called once for each algorithm supported a driver.
1073 */
1074 int
1075 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
1076 int (*kprocess)(void *, struct cryptkop *, int),
1077 void *karg)
1078 {
1079 struct cryptocap *cap;
1080 int err;
1081
1082 mutex_enter(&crypto_drv_mtx);
1083
1084 cap = crypto_checkdriver_lock(driverid);
1085 if (cap != NULL &&
1086 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
1087 /*
1088 * XXX Do some performance testing to determine placing.
1089 * XXX We probably need an auxiliary data structure that
1090 * XXX describes relative performances.
1091 */
1092
1093 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1094 if (bootverbose) {
1095 printf("crypto: driver %u registers key alg %u "
1096 " flags %u\n",
1097 driverid,
1098 kalg,
1099 flags
1100 );
1101 }
1102
1103 if (cap->cc_kprocess == NULL) {
1104 cap->cc_karg = karg;
1105 cap->cc_kprocess = kprocess;
1106 }
1107 err = 0;
1108 } else
1109 err = EINVAL;
1110
1111 mutex_exit(&crypto_drv_mtx);
1112 return err;
1113 }
1114
1115 /*
1116 * Register support for a non-key-related algorithm. This routine
1117 * is called once for each such algorithm supported by a driver.
1118 */
1119 int
1120 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
1121 u_int32_t flags,
1122 int (*newses)(void *, u_int32_t*, struct cryptoini*),
1123 int (*freeses)(void *, u_int64_t),
1124 int (*process)(void *, struct cryptop *, int),
1125 void *arg)
1126 {
1127 struct cryptocap *cap;
1128 int err;
1129
1130 cap = crypto_checkdriver_lock(driverid);
1131 if (cap == NULL)
1132 return EINVAL;
1133
1134 /* NB: algorithms are in the range [1..max] */
1135 if (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) {
1136 /*
1137 * XXX Do some performance testing to determine placing.
1138 * XXX We probably need an auxiliary data structure that
1139 * XXX describes relative performances.
1140 */
1141
1142 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1143 cap->cc_max_op_len[alg] = maxoplen;
1144 if (bootverbose) {
1145 printf("crypto: driver %u registers alg %u "
1146 "flags %u maxoplen %u\n",
1147 driverid,
1148 alg,
1149 flags,
1150 maxoplen
1151 );
1152 }
1153
1154 if (cap->cc_process == NULL) {
1155 cap->cc_arg = arg;
1156 cap->cc_newsession = newses;
1157 cap->cc_process = process;
1158 cap->cc_freesession = freeses;
1159 cap->cc_sessions = 0; /* Unmark */
1160 }
1161 err = 0;
1162 } else
1163 err = EINVAL;
1164
1165 crypto_driver_unlock(cap);
1166
1167 return err;
1168 }
1169
1170 static int
1171 crypto_unregister_locked(struct cryptocap *cap, int alg, bool all)
1172 {
1173 int i;
1174 u_int32_t ses;
1175 bool lastalg = true;
1176
1177 KASSERT(cap != NULL);
1178 KASSERT(mutex_owned(&cap->cc_lock));
1179
1180 if (alg < CRYPTO_ALGORITHM_MIN || CRYPTO_ALGORITHM_MAX < alg)
1181 return EINVAL;
1182
1183 if (!all && cap->cc_alg[alg] == 0)
1184 return EINVAL;
1185
1186 cap->cc_alg[alg] = 0;
1187 cap->cc_max_op_len[alg] = 0;
1188
1189 if (all) {
1190 if (alg != CRYPTO_ALGORITHM_MAX)
1191 lastalg = false;
1192 } else {
1193 /* Was this the last algorithm ? */
1194 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++)
1195 if (cap->cc_alg[i] != 0) {
1196 lastalg = false;
1197 break;
1198 }
1199 }
1200 if (lastalg) {
1201 ses = cap->cc_sessions;
1202 crypto_driver_clear(cap);
1203 if (ses != 0) {
1204 /*
1205 * If there are pending sessions, just mark as invalid.
1206 */
1207 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1208 cap->cc_sessions = ses;
1209 }
1210 }
1211
1212 return 0;
1213 }
1214
1215 /*
1216 * Unregister a crypto driver. If there are pending sessions using it,
1217 * leave enough information around so that subsequent calls using those
1218 * sessions will correctly detect the driver has been unregistered and
1219 * reroute requests.
1220 */
1221 int
1222 crypto_unregister(u_int32_t driverid, int alg)
1223 {
1224 int err;
1225 struct cryptocap *cap;
1226
1227 cap = crypto_checkdriver_lock(driverid);
1228 err = crypto_unregister_locked(cap, alg, false);
1229 crypto_driver_unlock(cap);
1230
1231 return err;
1232 }
1233
1234 /*
1235 * Unregister all algorithms associated with a crypto driver.
1236 * If there are pending sessions using it, leave enough information
1237 * around so that subsequent calls using those sessions will
1238 * correctly detect the driver has been unregistered and reroute
1239 * requests.
1240 */
1241 int
1242 crypto_unregister_all(u_int32_t driverid)
1243 {
1244 int err, i;
1245 struct cryptocap *cap;
1246
1247 cap = crypto_checkdriver_lock(driverid);
1248 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
1249 err = crypto_unregister_locked(cap, i, true);
1250 if (err)
1251 break;
1252 }
1253 crypto_driver_unlock(cap);
1254
1255 return err;
1256 }
1257
1258 /*
1259 * Clear blockage on a driver. The what parameter indicates whether
1260 * the driver is now ready for cryptop's and/or cryptokop's.
1261 */
1262 int
1263 crypto_unblock(u_int32_t driverid, int what)
1264 {
1265 struct cryptocap *cap;
1266 int needwakeup = 0;
1267
1268 cap = crypto_checkdriver_lock(driverid);
1269 if (cap == NULL)
1270 return EINVAL;
1271
1272 if (what & CRYPTO_SYMQ) {
1273 needwakeup |= cap->cc_qblocked;
1274 cap->cc_qblocked = 0;
1275 }
1276 if (what & CRYPTO_ASYMQ) {
1277 needwakeup |= cap->cc_kqblocked;
1278 cap->cc_kqblocked = 0;
1279 }
1280 crypto_driver_unlock(cap);
1281 if (needwakeup)
1282 setsoftcrypto(softintr_cookie);
1283
1284 return 0;
1285 }
1286
1287 /*
1288 * Dispatch a crypto request to a driver or queue
1289 * it, to be processed by the kernel thread.
1290 */
1291 int
1292 crypto_dispatch(struct cryptop *crp)
1293 {
1294 int result, s;
1295 struct cryptocap *cap;
1296 struct crypto_crp_qs *crp_qs;
1297 struct crypto_crp_q *crp_q;
1298
1299 KASSERT(crp != NULL);
1300
1301 DPRINTF("crp %p, alg %d\n", crp, crp->crp_desc->crd_alg);
1302
1303 cryptostats.cs_ops++;
1304
1305 #ifdef CRYPTO_TIMING
1306 if (crypto_timing)
1307 nanouptime(&crp->crp_tstamp);
1308 #endif
1309
1310 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
1311 int wasempty;
1312 /*
1313 * Caller marked the request as ``ok to delay'';
1314 * queue it for the swi thread. This is desirable
1315 * when the operation is low priority and/or suitable
1316 * for batching.
1317 *
1318 * don't care list order in batch job.
1319 */
1320 crp_qs = crypto_get_crp_qs(&s);
1321 crp_q = &crp_qs->crp_q;
1322 wasempty = TAILQ_EMPTY(crp_q);
1323 TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
1324 crypto_put_crp_qs(&s);
1325 crp_q = NULL;
1326 if (wasempty)
1327 setsoftcrypto(softintr_cookie);
1328
1329 return 0;
1330 }
1331
1332 crp_qs = crypto_get_crp_qs(&s);
1333 crp_q = &crp_qs->crp_q;
1334 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid));
1335 /*
1336 * TODO:
1337 * If we can ensure the driver has been valid until the driver is
1338 * done crypto_unregister(), this migrate operation is not required.
1339 */
1340 if (cap == NULL) {
1341 /*
1342 * The driver must be detached, so this request will migrate
1343 * to other drivers in cryptointr() later.
1344 */
1345 TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
1346 result = 0;
1347 goto out;
1348 }
1349
1350 if (cap->cc_qblocked != 0) {
1351 crypto_driver_unlock(cap);
1352 /*
1353 * The driver is blocked, just queue the op until
1354 * it unblocks and the swi thread gets kicked.
1355 */
1356 TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
1357 result = 0;
1358 goto out;
1359 }
1360
1361 /*
1362 * Caller marked the request to be processed
1363 * immediately; dispatch it directly to the
1364 * driver unless the driver is currently blocked.
1365 */
1366 crypto_driver_unlock(cap);
1367 result = crypto_invoke(crp, 0);
1368 if (result == ERESTART) {
1369 /*
1370 * The driver ran out of resources, mark the
1371 * driver ``blocked'' for cryptop's and put
1372 * the op on the queue.
1373 */
1374 crypto_driver_lock(cap);
1375 cap->cc_qblocked = 1;
1376 crypto_driver_unlock(cap);
1377 TAILQ_INSERT_HEAD(crp_q, crp, crp_next);
1378 cryptostats.cs_blocks++;
1379
1380 /*
1381 * The crp is enqueued to crp_q, that is,
1382 * no error occurs. So, this function should
1383 * not return error.
1384 */
1385 result = 0;
1386 }
1387
1388 out:
1389 crypto_put_crp_qs(&s);
1390 return result;
1391 }
1392
1393 /*
1394 * Add an asymetric crypto request to a queue,
1395 * to be processed by the kernel thread.
1396 */
1397 int
1398 crypto_kdispatch(struct cryptkop *krp)
1399 {
1400 int result, s;
1401 struct cryptocap *cap;
1402 struct crypto_crp_qs *crp_qs;
1403 struct crypto_crp_kq *crp_kq;
1404
1405 KASSERT(krp != NULL);
1406
1407 cryptostats.cs_kops++;
1408
1409 crp_qs = crypto_get_crp_qs(&s);
1410 crp_kq = &crp_qs->crp_kq;
1411 cap = crypto_checkdriver_lock(krp->krp_hid);
1412 /*
1413 * TODO:
1414 * If we can ensure the driver has been valid until the driver is
1415 * done crypto_unregister(), this migrate operation is not required.
1416 */
1417 if (cap == NULL) {
1418 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
1419 result = 0;
1420 goto out;
1421 }
1422
1423 if (cap->cc_kqblocked != 0) {
1424 crypto_driver_unlock(cap);
1425 /*
1426 * The driver is blocked, just queue the op until
1427 * it unblocks and the swi thread gets kicked.
1428 */
1429 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
1430 result = 0;
1431 goto out;
1432 }
1433
1434 crypto_driver_unlock(cap);
1435 result = crypto_kinvoke(krp, 0);
1436 if (result == ERESTART) {
1437 /*
1438 * The driver ran out of resources, mark the
1439 * driver ``blocked'' for cryptop's and put
1440 * the op on the queue.
1441 */
1442 crypto_driver_lock(cap);
1443 cap->cc_kqblocked = 1;
1444 crypto_driver_unlock(cap);
1445 TAILQ_INSERT_HEAD(crp_kq, krp, krp_next);
1446 cryptostats.cs_kblocks++;
1447
1448 /*
1449 * The krp is enqueued to crp_kq, that is,
1450 * no error occurs. So, this function should
1451 * not return error.
1452 */
1453 result = 0;
1454 }
1455
1456 out:
1457 crypto_put_crp_qs(&s);
1458 return result;
1459 }
1460
1461 /*
1462 * Dispatch an assymetric crypto request to the appropriate crypto devices.
1463 */
1464 static int
1465 crypto_kinvoke(struct cryptkop *krp, int hint)
1466 {
1467 struct cryptocap *cap = NULL;
1468 u_int32_t hid;
1469 int error;
1470
1471 KASSERT(krp != NULL);
1472
1473 /* Sanity checks. */
1474 if (krp->krp_callback == NULL) {
1475 cv_destroy(&krp->krp_cv);
1476 crypto_kfreereq(krp);
1477 return EINVAL;
1478 }
1479
1480 mutex_enter(&crypto_drv_mtx);
1481 for (hid = 0; hid < crypto_drivers_num; hid++) {
1482 cap = crypto_checkdriver(hid);
1483 if (cap == NULL)
1484 continue;
1485 crypto_driver_lock(cap);
1486 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1487 crypto_devallowsoft == 0) {
1488 crypto_driver_unlock(cap);
1489 continue;
1490 }
1491 if (cap->cc_kprocess == NULL) {
1492 crypto_driver_unlock(cap);
1493 continue;
1494 }
1495 if ((cap->cc_kalg[krp->krp_op] &
1496 CRYPTO_ALG_FLAG_SUPPORTED) == 0) {
1497 crypto_driver_unlock(cap);
1498 continue;
1499 }
1500 break;
1501 }
1502 mutex_exit(&crypto_drv_mtx);
1503 if (cap != NULL) {
1504 int (*process)(void *, struct cryptkop *, int);
1505 void *arg;
1506
1507 process = cap->cc_kprocess;
1508 arg = cap->cc_karg;
1509 krp->krp_hid = hid;
1510 krp->reqcpu = curcpu();
1511 crypto_driver_unlock(cap);
1512 error = (*process)(arg, krp, hint);
1513 } else {
1514 error = ENODEV;
1515 }
1516
1517 if (error) {
1518 krp->krp_status = error;
1519 crypto_kdone(krp);
1520 }
1521 return 0;
1522 }
1523
1524 #ifdef CRYPTO_TIMING
1525 static void
1526 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
1527 {
1528 struct timespec now, t;
1529
1530 nanouptime(&now);
1531 t.tv_sec = now.tv_sec - tv->tv_sec;
1532 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
1533 if (t.tv_nsec < 0) {
1534 t.tv_sec--;
1535 t.tv_nsec += 1000000000;
1536 }
1537 timespecadd(&ts->acc, &t, &t);
1538 if (timespeccmp(&t, &ts->min, <))
1539 ts->min = t;
1540 if (timespeccmp(&t, &ts->max, >))
1541 ts->max = t;
1542 ts->count++;
1543
1544 *tv = now;
1545 }
1546 #endif
1547
1548 /*
1549 * Dispatch a crypto request to the appropriate crypto devices.
1550 */
1551 static int
1552 crypto_invoke(struct cryptop *crp, int hint)
1553 {
1554 struct cryptocap *cap;
1555
1556 KASSERT(crp != NULL);
1557
1558 #ifdef CRYPTO_TIMING
1559 if (crypto_timing)
1560 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1561 #endif
1562 /* Sanity checks. */
1563 if (crp->crp_callback == NULL) {
1564 return EINVAL;
1565 }
1566 if (crp->crp_desc == NULL) {
1567 crp->crp_etype = EINVAL;
1568 crypto_done(crp);
1569 return 0;
1570 }
1571
1572 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid));
1573 if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
1574 int (*process)(void *, struct cryptop *, int);
1575 void *arg;
1576
1577 process = cap->cc_process;
1578 arg = cap->cc_arg;
1579 crp->reqcpu = curcpu();
1580
1581 /*
1582 * Invoke the driver to process the request.
1583 */
1584 DPRINTF("calling process for %p\n", crp);
1585 crypto_driver_unlock(cap);
1586 return (*process)(arg, crp, hint);
1587 } else {
1588 struct cryptodesc *crd;
1589 u_int64_t nid = 0;
1590
1591 if (cap != NULL)
1592 crypto_driver_unlock(cap);
1593
1594 /*
1595 * Driver has unregistered; migrate the session and return
1596 * an error to the caller so they'll resubmit the op.
1597 */
1598 crypto_freesession(crp->crp_sid);
1599
1600 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
1601 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
1602
1603 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
1604 crp->crp_sid = nid;
1605
1606 crp->crp_etype = EAGAIN;
1607
1608 crypto_done(crp);
1609 return 0;
1610 }
1611 }
1612
1613 /*
1614 * Release a set of crypto descriptors.
1615 */
1616 void
1617 crypto_freereq(struct cryptop *crp)
1618 {
1619 struct cryptodesc *crd;
1620
1621 if (crp == NULL)
1622 return;
1623 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1624
1625 /* sanity check */
1626 if (crp->crp_flags & CRYPTO_F_ONRETQ) {
1627 panic("crypto_freereq() freeing crp on RETQ\n");
1628 }
1629
1630 while ((crd = crp->crp_desc) != NULL) {
1631 crp->crp_desc = crd->crd_next;
1632 pool_put(&cryptodesc_pool, crd);
1633 }
1634 pool_put(&cryptop_pool, crp);
1635 }
1636
1637 /*
1638 * Acquire a set of crypto descriptors.
1639 */
1640 struct cryptop *
1641 crypto_getreq(int num)
1642 {
1643 struct cryptodesc *crd;
1644 struct cryptop *crp;
1645 struct crypto_crp_ret_qs *qs;
1646
1647 /*
1648 * When crp_ret_q is full, we restrict here to avoid crp_ret_q overflow
1649 * by error callback.
1650 */
1651 qs = crypto_get_crp_ret_qs(curcpu());
1652 if (qs->crp_ret_q_maxlen > 0
1653 && qs->crp_ret_q_len > qs->crp_ret_q_maxlen) {
1654 qs->crp_ret_q_drops++;
1655 crypto_put_crp_ret_qs(curcpu());
1656 return NULL;
1657 }
1658 crypto_put_crp_ret_qs(curcpu());
1659
1660 crp = pool_get(&cryptop_pool, 0);
1661 if (crp == NULL) {
1662 return NULL;
1663 }
1664 memset(crp, 0, sizeof(struct cryptop));
1665
1666 while (num--) {
1667 crd = pool_get(&cryptodesc_pool, 0);
1668 if (crd == NULL) {
1669 crypto_freereq(crp);
1670 return NULL;
1671 }
1672
1673 memset(crd, 0, sizeof(struct cryptodesc));
1674 crd->crd_next = crp->crp_desc;
1675 crp->crp_desc = crd;
1676 }
1677
1678 return crp;
1679 }
1680
1681 /*
1682 * Release a set of asymmetric crypto descriptors.
1683 * Currently, support one descriptor only.
1684 */
1685 void
1686 crypto_kfreereq(struct cryptkop *krp)
1687 {
1688
1689 if (krp == NULL)
1690 return;
1691
1692 DPRINTF("krp %p\n", krp);
1693
1694 /* sanity check */
1695 if (krp->krp_flags & CRYPTO_F_ONRETQ) {
1696 panic("crypto_kfreereq() freeing krp on RETQ\n");
1697 }
1698
1699 pool_put(&cryptkop_pool, krp);
1700 }
1701
1702 /*
1703 * Acquire a set of asymmetric crypto descriptors.
1704 * Currently, support one descriptor only.
1705 */
1706 struct cryptkop *
1707 crypto_kgetreq(int num __unused, int prflags)
1708 {
1709 struct cryptkop *krp;
1710 struct crypto_crp_ret_qs *qs;
1711
1712 /*
1713 * When crp_ret_kq is full, we restrict here to avoid crp_ret_kq
1714 * overflow by error callback.
1715 */
1716 qs = crypto_get_crp_ret_qs(curcpu());
1717 if (qs->crp_ret_kq_maxlen > 0
1718 && qs->crp_ret_kq_len > qs->crp_ret_kq_maxlen) {
1719 qs->crp_ret_kq_drops++;
1720 crypto_put_crp_ret_qs(curcpu());
1721 return NULL;
1722 }
1723 crypto_put_crp_ret_qs(curcpu());
1724
1725 krp = pool_get(&cryptkop_pool, prflags);
1726 if (krp == NULL) {
1727 return NULL;
1728 }
1729 memset(krp, 0, sizeof(struct cryptkop));
1730
1731 return krp;
1732 }
1733
1734 /*
1735 * Invoke the callback on behalf of the driver.
1736 */
1737 void
1738 crypto_done(struct cryptop *crp)
1739 {
1740
1741 KASSERT(crp != NULL);
1742
1743 if (crp->crp_etype != 0)
1744 cryptostats.cs_errs++;
1745 #ifdef CRYPTO_TIMING
1746 if (crypto_timing)
1747 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1748 #endif
1749 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1750
1751 /*
1752 * Normal case; queue the callback for the thread.
1753 *
1754 * The return queue is manipulated by the swi thread
1755 * and, potentially, by crypto device drivers calling
1756 * back to mark operations completed. Thus we need
1757 * to mask both while manipulating the return queue.
1758 */
1759 if (crp->crp_flags & CRYPTO_F_CBIMM) {
1760 /*
1761 * Do the callback directly. This is ok when the
1762 * callback routine does very little (e.g. the
1763 * /dev/crypto callback method just does a wakeup).
1764 */
1765 crp->crp_flags |= CRYPTO_F_DONE;
1766
1767 #ifdef CRYPTO_TIMING
1768 if (crypto_timing) {
1769 /*
1770 * NB: We must copy the timestamp before
1771 * doing the callback as the cryptop is
1772 * likely to be reclaimed.
1773 */
1774 struct timespec t = crp->crp_tstamp;
1775 crypto_tstat(&cryptostats.cs_cb, &t);
1776 crp->crp_callback(crp);
1777 crypto_tstat(&cryptostats.cs_finis, &t);
1778 } else
1779 #endif
1780 crp->crp_callback(crp);
1781 } else {
1782 crp->crp_flags |= CRYPTO_F_DONE;
1783 #if 0
1784 if (crp->crp_flags & CRYPTO_F_USER) {
1785 /*
1786 * TODO:
1787 * If crp->crp_flags & CRYPTO_F_USER and the used
1788 * encryption driver does all the processing in
1789 * the same context, we can skip enqueueing crp_ret_q
1790 * and softint_schedule(crypto_ret_si).
1791 */
1792 DPRINTF("lid[%u]: crp %p CRYPTO_F_USER\n",
1793 CRYPTO_SESID2LID(crp->crp_sid), crp);
1794 } else
1795 #endif
1796 {
1797 int wasempty;
1798 struct crypto_crp_ret_qs *qs;
1799 struct crypto_crp_ret_q *crp_ret_q;;
1800
1801 qs = crypto_get_crp_ret_qs(crp->reqcpu);
1802 crp_ret_q = &qs->crp_ret_q;
1803 wasempty = TAILQ_EMPTY(crp_ret_q);
1804 DPRINTF("lid[%u]: queueing %p\n",
1805 CRYPTO_SESID2LID(crp->crp_sid), crp);
1806 crp->crp_flags |= CRYPTO_F_ONRETQ;
1807 TAILQ_INSERT_TAIL(crp_ret_q, crp, crp_next);
1808 qs->crp_ret_q_len++;
1809 if (wasempty && !qs->crp_ret_q_exit_flag) {
1810 DPRINTF("lid[%u]: waking cryptoret,"
1811 "crp %p hit empty queue\n.",
1812 CRYPTO_SESID2LID(crp->crp_sid), crp);
1813 softint_schedule_cpu(crypto_ret_si, crp->reqcpu);
1814 }
1815 crypto_put_crp_ret_qs(crp->reqcpu);
1816 }
1817 }
1818 }
1819
1820 /*
1821 * Invoke the callback on behalf of the driver.
1822 */
1823 void
1824 crypto_kdone(struct cryptkop *krp)
1825 {
1826
1827 KASSERT(krp != NULL);
1828
1829 if (krp->krp_status != 0)
1830 cryptostats.cs_kerrs++;
1831
1832 krp->krp_flags |= CRYPTO_F_DONE;
1833
1834 /*
1835 * The return queue is manipulated by the swi thread
1836 * and, potentially, by crypto device drivers calling
1837 * back to mark operations completed. Thus we need
1838 * to mask both while manipulating the return queue.
1839 */
1840 if (krp->krp_flags & CRYPTO_F_CBIMM) {
1841 krp->krp_callback(krp);
1842 } else {
1843 int wasempty;
1844 struct crypto_crp_ret_qs *qs;
1845 struct crypto_crp_ret_kq *crp_ret_kq;;
1846
1847 qs = crypto_get_crp_ret_qs(krp->reqcpu);
1848 crp_ret_kq = &qs->crp_ret_kq;
1849
1850 wasempty = TAILQ_EMPTY(crp_ret_kq);
1851 krp->krp_flags |= CRYPTO_F_ONRETQ;
1852 TAILQ_INSERT_TAIL(crp_ret_kq, krp, krp_next);
1853 qs->crp_ret_kq_len++;
1854 if (wasempty && !qs->crp_ret_q_exit_flag)
1855 softint_schedule_cpu(crypto_ret_si, krp->reqcpu);
1856 crypto_put_crp_ret_qs(krp->reqcpu);
1857 }
1858 }
1859
1860 int
1861 crypto_getfeat(int *featp)
1862 {
1863
1864 if (crypto_userasymcrypto == 0) {
1865 *featp = 0;
1866 return 0;
1867 }
1868
1869 mutex_enter(&crypto_drv_mtx);
1870
1871 int feat = 0;
1872 for (int hid = 0; hid < crypto_drivers_num; hid++) {
1873 struct cryptocap *cap;
1874 cap = crypto_checkdriver(hid);
1875 if (cap == NULL)
1876 continue;
1877
1878 crypto_driver_lock(cap);
1879
1880 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1881 crypto_devallowsoft == 0)
1882 goto unlock;
1883
1884 if (cap->cc_kprocess == NULL)
1885 goto unlock;
1886
1887 for (int kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1888 if ((cap->cc_kalg[kalg] &
1889 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1890 feat |= 1 << kalg;
1891
1892 unlock: crypto_driver_unlock(cap);
1893 }
1894
1895 mutex_exit(&crypto_drv_mtx);
1896 *featp = feat;
1897 return (0);
1898 }
1899
1900 /*
1901 * Software interrupt thread to dispatch crypto requests.
1902 */
1903 static void
1904 cryptointr(void)
1905 {
1906 struct cryptop *crp, *submit, *cnext;
1907 struct cryptkop *krp, *knext;
1908 struct cryptocap *cap;
1909 struct crypto_crp_qs *crp_qs;
1910 struct crypto_crp_q *crp_q;
1911 struct crypto_crp_kq *crp_kq;
1912 int result, hint, s;
1913
1914 cryptostats.cs_intrs++;
1915 crp_qs = crypto_get_crp_qs(&s);
1916 crp_q = &crp_qs->crp_q;
1917 crp_kq = &crp_qs->crp_kq;
1918 do {
1919 /*
1920 * Find the first element in the queue that can be
1921 * processed and look-ahead to see if multiple ops
1922 * are ready for the same driver.
1923 */
1924 submit = NULL;
1925 hint = 0;
1926 TAILQ_FOREACH_SAFE(crp, crp_q, crp_next, cnext) {
1927 u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid);
1928 cap = crypto_checkdriver_lock(hid);
1929 if (cap == NULL || cap->cc_process == NULL) {
1930 if (cap != NULL)
1931 crypto_driver_unlock(cap);
1932 /* Op needs to be migrated, process it. */
1933 submit = crp;
1934 break;
1935 }
1936
1937 /*
1938 * skip blocked crp regardless of CRYPTO_F_BATCH
1939 */
1940 if (cap->cc_qblocked != 0) {
1941 crypto_driver_unlock(cap);
1942 continue;
1943 }
1944 crypto_driver_unlock(cap);
1945
1946 /*
1947 * skip batch crp until the end of crp_q
1948 */
1949 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
1950 if (submit == NULL) {
1951 submit = crp;
1952 } else {
1953 if (CRYPTO_SESID2HID(submit->crp_sid)
1954 == hid)
1955 hint = CRYPTO_HINT_MORE;
1956 }
1957
1958 continue;
1959 }
1960
1961 /*
1962 * found first crp which is neither blocked nor batch.
1963 */
1964 submit = crp;
1965 /*
1966 * batch crp can be processed much later, so clear hint.
1967 */
1968 hint = 0;
1969 break;
1970 }
1971 if (submit != NULL) {
1972 TAILQ_REMOVE(crp_q, submit, crp_next);
1973 result = crypto_invoke(submit, hint);
1974 /* we must take here as the TAILQ op or kinvoke
1975 may need this mutex below. sigh. */
1976 if (result == ERESTART) {
1977 /*
1978 * The driver ran out of resources, mark the
1979 * driver ``blocked'' for cryptop's and put
1980 * the request back in the queue. It would
1981 * best to put the request back where we got
1982 * it but that's hard so for now we put it
1983 * at the front. This should be ok; putting
1984 * it at the end does not work.
1985 */
1986 /* validate sid again */
1987 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(submit->crp_sid));
1988 if (cap == NULL) {
1989 /* migrate again, sigh... */
1990 TAILQ_INSERT_TAIL(crp_q, submit, crp_next);
1991 } else {
1992 cap->cc_qblocked = 1;
1993 crypto_driver_unlock(cap);
1994 TAILQ_INSERT_HEAD(crp_q, submit, crp_next);
1995 cryptostats.cs_blocks++;
1996 }
1997 }
1998 }
1999
2000 /* As above, but for key ops */
2001 TAILQ_FOREACH_SAFE(krp, crp_kq, krp_next, knext) {
2002 cap = crypto_checkdriver_lock(krp->krp_hid);
2003 if (cap == NULL || cap->cc_kprocess == NULL) {
2004 if (cap != NULL)
2005 crypto_driver_unlock(cap);
2006 /* Op needs to be migrated, process it. */
2007 break;
2008 }
2009 if (!cap->cc_kqblocked) {
2010 crypto_driver_unlock(cap);
2011 break;
2012 }
2013 crypto_driver_unlock(cap);
2014 }
2015 if (krp != NULL) {
2016 TAILQ_REMOVE(crp_kq, krp, krp_next);
2017 result = crypto_kinvoke(krp, 0);
2018 /* the next iteration will want the mutex. :-/ */
2019 if (result == ERESTART) {
2020 /*
2021 * The driver ran out of resources, mark the
2022 * driver ``blocked'' for cryptkop's and put
2023 * the request back in the queue. It would
2024 * best to put the request back where we got
2025 * it but that's hard so for now we put it
2026 * at the front. This should be ok; putting
2027 * it at the end does not work.
2028 */
2029 /* validate sid again */
2030 cap = crypto_checkdriver_lock(krp->krp_hid);
2031 if (cap == NULL) {
2032 /* migrate again, sigh... */
2033 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
2034 } else {
2035 cap->cc_kqblocked = 1;
2036 crypto_driver_unlock(cap);
2037 TAILQ_INSERT_HEAD(crp_kq, krp, krp_next);
2038 cryptostats.cs_kblocks++;
2039 }
2040 }
2041 }
2042 } while (submit != NULL || krp != NULL);
2043 crypto_put_crp_qs(&s);
2044 }
2045
2046 /*
2047 * softint handler to do callbacks.
2048 */
2049 static void
2050 cryptoret_softint(void *arg __unused)
2051 {
2052 struct crypto_crp_ret_qs *qs;
2053 struct crypto_crp_ret_q *crp_ret_q;;
2054 struct crypto_crp_ret_kq *crp_ret_kq;;
2055
2056 qs = crypto_get_crp_ret_qs(curcpu());
2057 crp_ret_q = &qs->crp_ret_q;
2058 crp_ret_kq = &qs->crp_ret_kq;
2059 for (;;) {
2060 struct cryptop *crp;
2061 struct cryptkop *krp;
2062
2063 crp = TAILQ_FIRST(crp_ret_q);
2064 if (crp != NULL) {
2065 TAILQ_REMOVE(crp_ret_q, crp, crp_next);
2066 qs->crp_ret_q_len--;
2067 crp->crp_flags &= ~CRYPTO_F_ONRETQ;
2068 }
2069 krp = TAILQ_FIRST(crp_ret_kq);
2070 if (krp != NULL) {
2071 TAILQ_REMOVE(crp_ret_kq, krp, krp_next);
2072 qs->crp_ret_q_len--;
2073 krp->krp_flags &= ~CRYPTO_F_ONRETQ;
2074 }
2075
2076 /* drop before calling any callbacks. */
2077 if (crp == NULL && krp == NULL)
2078 break;
2079
2080 mutex_spin_exit(&qs->crp_ret_q_mtx);
2081 if (crp != NULL) {
2082 #ifdef CRYPTO_TIMING
2083 if (crypto_timing) {
2084 /*
2085 * NB: We must copy the timestamp before
2086 * doing the callback as the cryptop is
2087 * likely to be reclaimed.
2088 */
2089 struct timespec t = crp->crp_tstamp;
2090 crypto_tstat(&cryptostats.cs_cb, &t);
2091 crp->crp_callback(crp);
2092 crypto_tstat(&cryptostats.cs_finis, &t);
2093 } else
2094 #endif
2095 {
2096 crp->crp_callback(crp);
2097 }
2098 }
2099 if (krp != NULL)
2100 krp->krp_callback(krp);
2101
2102 mutex_spin_enter(&qs->crp_ret_q_mtx);
2103 }
2104 crypto_put_crp_ret_qs(curcpu());
2105 }
2106
2107 /* NetBSD module interface */
2108
2109 MODULE(MODULE_CLASS_MISC, opencrypto, NULL);
2110
2111 static int
2112 opencrypto_modcmd(modcmd_t cmd, void *opaque)
2113 {
2114 int error = 0;
2115
2116 switch (cmd) {
2117 case MODULE_CMD_INIT:
2118 #ifdef _MODULE
2119 error = crypto_init();
2120 #endif
2121 break;
2122 case MODULE_CMD_FINI:
2123 #ifdef _MODULE
2124 error = crypto_destroy(true);
2125 #endif
2126 break;
2127 default:
2128 error = ENOTTY;
2129 }
2130 return error;
2131 }
2132