crypto.c revision 1.22 1 /* $NetBSD: crypto.c,v 1.22 2008/02/01 04:52:35 tls Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.22 2008/02/01 04:52:35 tls Exp $");
28
29 /* XXX FIXME: should be defopt'ed */
30 #define CRYPTO_TIMING /* enable cryptop timing stuff */
31
32 #include <sys/param.h>
33 #include <sys/reboot.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/proc.h>
37 #include <sys/pool.h>
38 #include <sys/kthread.h>
39 #include <sys/once.h>
40 #include <sys/sysctl.h>
41 #include <sys/intr.h>
42
43 #include <opencrypto/cryptodev.h>
44 #include <opencrypto/xform.h> /* XXX for M_XDATA */
45
46 #define splcrypto splnet
47 /* below is kludges to check whats still missing */
48 #define SWI_CRYPTO 17
49 #define register_swi(lvl, fn) \
50 softint_establish(SOFTINT_NET, (void (*)(void*))fn, NULL)
51 #define unregister_swi(lvl, fn) softint_disestablish(softintr_cookie)
52 #define setsoftcrypto(x) softint_schedule(x)
53
54 #define SESID2HID(sid) (((sid) >> 32) & 0xffffffff)
55
56 /*
57 * Crypto drivers register themselves by allocating a slot in the
58 * crypto_drivers table with crypto_get_driverid() and then registering
59 * each algorithm they support with crypto_register() and crypto_kregister().
60 */
61 static struct cryptocap *crypto_drivers;
62 static int crypto_drivers_num;
63 static void* softintr_cookie;
64
65 /*
66 * There are two queues for crypto requests; one for symmetric (e.g.
67 * cipher) operations and one for asymmetric (e.g. MOD) operations.
68 * See below for how synchronization is handled.
69 */
70 static TAILQ_HEAD(,cryptop) crp_q = /* request queues */
71 TAILQ_HEAD_INITIALIZER(crp_q);
72 static TAILQ_HEAD(,cryptkop) crp_kq =
73 TAILQ_HEAD_INITIALIZER(crp_kq);
74
75 /*
76 * There are two queues for processing completed crypto requests; one
77 * for the symmetric and one for the asymmetric ops. We only need one
78 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
79 * for how synchronization is handled.
80 */
81 static TAILQ_HEAD(,cryptop) crp_ret_q = /* callback queues */
82 TAILQ_HEAD_INITIALIZER(crp_ret_q);
83 static TAILQ_HEAD(,cryptkop) crp_ret_kq =
84 TAILQ_HEAD_INITIALIZER(crp_ret_kq);
85
86 /*
87 * Crypto op and desciptor data structures are allocated
88 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
89 */
90 struct pool cryptop_pool;
91 struct pool cryptodesc_pool;
92 int crypto_pool_initialized = 0;
93
94 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
95 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
96 /*
97 * cryptodevallowsoft is (intended to be) sysctl'able, controlling
98 * access to hardware versus software transforms as below:
99 *
100 * crypto_devallowsoft < 0: Force userlevel requests to use software
101 * transforms, always
102 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel
103 * requests for non-accelerated transforms
104 * (handling the latter in software)
105 * crypto_devallowsoft > 0: Allow user requests only for transforms which
106 * are hardware-accelerated.
107 */
108 int crypto_devallowsoft = 1; /* only use hardware crypto */
109
110 SYSCTL_SETUP(sysctl_opencrypto_setup, "sysctl opencrypto subtree setup")
111 {
112 sysctl_createv(clog, 0, NULL, NULL,
113 CTLFLAG_PERMANENT,
114 CTLTYPE_NODE, "kern", NULL,
115 NULL, 0, NULL, 0,
116 CTL_KERN, CTL_EOL);
117 sysctl_createv(clog, 0, NULL, NULL,
118 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
119 CTLTYPE_INT, "usercrypto",
120 SYSCTL_DESCR("Enable/disable user-mode access to "
121 "crypto support"),
122 NULL, 0, &crypto_usercrypto, 0,
123 CTL_KERN, CTL_CREATE, CTL_EOL);
124 sysctl_createv(clog, 0, NULL, NULL,
125 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
126 CTLTYPE_INT, "userasymcrypto",
127 SYSCTL_DESCR("Enable/disable user-mode access to "
128 "asymmetric crypto support"),
129 NULL, 0, &crypto_userasymcrypto, 0,
130 CTL_KERN, CTL_CREATE, CTL_EOL);
131 sysctl_createv(clog, 0, NULL, NULL,
132 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
133 CTLTYPE_INT, "cryptodevallowsoft",
134 SYSCTL_DESCR("Enable/disable use of software "
135 "asymmetric crypto support"),
136 NULL, 0, &crypto_devallowsoft, 0,
137 CTL_KERN, CTL_CREATE, CTL_EOL);
138 }
139
140 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
141
142 /*
143 * Synchronization: read carefully, this is non-trivial.
144 *
145 * Crypto requests are submitted via crypto_dispatch. Typically
146 * these come in from network protocols at spl0 (output path) or
147 * spl[,soft]net (input path).
148 *
149 * Requests are typically passed on the driver directly, but they
150 * may also be queued for processing by a software interrupt thread,
151 * cryptointr, that runs at splsoftcrypto. This thread dispatches
152 * the requests to crypto drivers (h/w or s/w) who call crypto_done
153 * when a request is complete. Hardware crypto drivers are assumed
154 * to register their IRQ's as network devices so their interrupt handlers
155 * and subsequent "done callbacks" happen at spl[imp,net].
156 *
157 * Completed crypto ops are queued for a separate kernel thread that
158 * handles the callbacks at spl0. This decoupling insures the crypto
159 * driver interrupt service routine is not delayed while the callback
160 * takes place and that callbacks are delivered after a context switch
161 * (as opposed to a software interrupt that clients must block).
162 *
163 * This scheme is not intended for SMP machines.
164 */
165 static void cryptointr(void); /* swi thread to dispatch ops */
166 static void cryptoret(void); /* kernel thread for callbacks*/
167 static struct lwp *cryptothread;
168 static void crypto_destroy(void);
169 static int crypto_invoke(struct cryptop *crp, int hint);
170 static int crypto_kinvoke(struct cryptkop *krp, int hint);
171
172 static struct cryptostats cryptostats;
173 static int crypto_timing = 0;
174
175
176 static int
177 crypto_init0(void)
178 {
179 int error;
180
181
182 crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL *
183 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
184 if (crypto_drivers == NULL) {
185 printf("crypto_init: cannot malloc driver table\n");
186 return 0;
187 }
188 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
189
190 softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
191 error = kthread_create(PRI_NONE, 0, NULL, (void (*)(void*))cryptoret,
192 NULL, &cryptothread, "cryptoret");
193 if (error) {
194 printf("crypto_init: cannot start cryptoret thread; error %d",
195 error);
196 crypto_destroy();
197 }
198
199 return 0;
200 }
201
202 void
203 crypto_init(void)
204 {
205 static ONCE_DECL(crypto_init_once);
206
207 RUN_ONCE(&crypto_init_once, crypto_init0);
208 }
209
210 static void
211 crypto_destroy(void)
212 {
213 /* XXX no wait to reclaim zones */
214 if (crypto_drivers != NULL)
215 free(crypto_drivers, M_CRYPTO_DATA);
216 unregister_swi(SWI_CRYPTO, cryptointr);
217 }
218
219 /*
220 * Create a new session.
221 */
222 int
223 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
224 {
225 struct cryptoini *cr;
226 u_int32_t hid, lid;
227 int err = EINVAL;
228 int s;
229
230 s = splcrypto();
231
232 if (crypto_drivers == NULL)
233 goto done;
234
235 /*
236 * The algorithm we use here is pretty stupid; just use the
237 * first driver that supports all the algorithms we need.
238 *
239 * XXX We need more smarts here (in real life too, but that's
240 * XXX another story altogether).
241 */
242
243 for (hid = 0; hid < crypto_drivers_num; hid++) {
244 /*
245 * If it's not initialized or has remaining sessions
246 * referencing it, skip.
247 */
248 if (crypto_drivers[hid].cc_newsession == NULL ||
249 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
250 continue;
251
252 /* Hardware required -- ignore software drivers. */
253 if (hard > 0 &&
254 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
255 continue;
256 /* Software required -- ignore hardware drivers. */
257 if (hard < 0 &&
258 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
259 continue;
260
261 /* See if all the algorithms are supported. */
262 for (cr = cri; cr; cr = cr->cri_next)
263 if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
264 break;
265
266 if (cr == NULL) {
267 /* Ok, all algorithms are supported. */
268
269 /*
270 * Can't do everything in one session.
271 *
272 * XXX Fix this. We need to inject a "virtual" session layer right
273 * XXX about here.
274 */
275
276 /* Call the driver initialization routine. */
277 lid = hid; /* Pass the driver ID. */
278 err = crypto_drivers[hid].cc_newsession(
279 crypto_drivers[hid].cc_arg, &lid, cri);
280 if (err == 0) {
281 (*sid) = hid;
282 (*sid) <<= 32;
283 (*sid) |= (lid & 0xffffffff);
284 crypto_drivers[hid].cc_sessions++;
285 }
286 goto done;
287 /*break;*/
288 }
289 }
290 done:
291 splx(s);
292 return err;
293 }
294
295 /*
296 * Delete an existing session (or a reserved session on an unregistered
297 * driver).
298 */
299 int
300 crypto_freesession(u_int64_t sid)
301 {
302 u_int32_t hid;
303 int err = 0;
304 int s;
305
306 s = splcrypto();
307
308 if (crypto_drivers == NULL) {
309 err = EINVAL;
310 goto done;
311 }
312
313 /* Determine two IDs. */
314 hid = SESID2HID(sid);
315
316 if (hid >= crypto_drivers_num) {
317 err = ENOENT;
318 goto done;
319 }
320
321 if (crypto_drivers[hid].cc_sessions)
322 crypto_drivers[hid].cc_sessions--;
323
324 /* Call the driver cleanup routine, if available. */
325 if (crypto_drivers[hid].cc_freesession)
326 err = crypto_drivers[hid].cc_freesession(
327 crypto_drivers[hid].cc_arg, sid);
328 else
329 err = 0;
330
331 /*
332 * If this was the last session of a driver marked as invalid,
333 * make the entry available for reuse.
334 */
335 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
336 crypto_drivers[hid].cc_sessions == 0)
337 bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
338
339 done:
340 splx(s);
341 return err;
342 }
343
344 /*
345 * Return an unused driver id. Used by drivers prior to registering
346 * support for the algorithms they handle.
347 */
348 int32_t
349 crypto_get_driverid(u_int32_t flags)
350 {
351 struct cryptocap *newdrv;
352 int i, s;
353
354 crypto_init();
355
356 s = splcrypto();
357 for (i = 0; i < crypto_drivers_num; i++)
358 if (crypto_drivers[i].cc_process == NULL &&
359 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
360 crypto_drivers[i].cc_sessions == 0)
361 break;
362
363 /* Out of entries, allocate some more. */
364 if (i == crypto_drivers_num) {
365 /* Be careful about wrap-around. */
366 if (2 * crypto_drivers_num <= crypto_drivers_num) {
367 splx(s);
368 printf("crypto: driver count wraparound!\n");
369 return -1;
370 }
371
372 newdrv = malloc(2 * crypto_drivers_num *
373 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
374 if (newdrv == NULL) {
375 splx(s);
376 printf("crypto: no space to expand driver table!\n");
377 return -1;
378 }
379
380 bcopy(crypto_drivers, newdrv,
381 crypto_drivers_num * sizeof(struct cryptocap));
382
383 crypto_drivers_num *= 2;
384
385 free(crypto_drivers, M_CRYPTO_DATA);
386 crypto_drivers = newdrv;
387 }
388
389 /* NB: state is zero'd on free */
390 crypto_drivers[i].cc_sessions = 1; /* Mark */
391 crypto_drivers[i].cc_flags = flags;
392
393 if (bootverbose)
394 printf("crypto: assign driver %u, flags %u\n", i, flags);
395
396 splx(s);
397
398 return i;
399 }
400
401 static struct cryptocap *
402 crypto_checkdriver(u_int32_t hid)
403 {
404 if (crypto_drivers == NULL)
405 return NULL;
406 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
407 }
408
409 /*
410 * Register support for a key-related algorithm. This routine
411 * is called once for each algorithm supported a driver.
412 */
413 int
414 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
415 int (*kprocess)(void*, struct cryptkop *, int),
416 void *karg)
417 {
418 int s;
419 struct cryptocap *cap;
420 int err;
421
422 s = splcrypto();
423
424 cap = crypto_checkdriver(driverid);
425 if (cap != NULL &&
426 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
427 /*
428 * XXX Do some performance testing to determine placing.
429 * XXX We probably need an auxiliary data structure that
430 * XXX describes relative performances.
431 */
432
433 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
434 if (bootverbose)
435 printf("crypto: driver %u registers key alg %u flags %u\n"
436 , driverid
437 , kalg
438 , flags
439 );
440
441 if (cap->cc_kprocess == NULL) {
442 cap->cc_karg = karg;
443 cap->cc_kprocess = kprocess;
444 }
445 err = 0;
446 } else
447 err = EINVAL;
448
449 splx(s);
450 return err;
451 }
452
453 /*
454 * Register support for a non-key-related algorithm. This routine
455 * is called once for each such algorithm supported by a driver.
456 */
457 int
458 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
459 u_int32_t flags,
460 int (*newses)(void*, u_int32_t*, struct cryptoini*),
461 int (*freeses)(void*, u_int64_t),
462 int (*process)(void*, struct cryptop *, int),
463 void *arg)
464 {
465 struct cryptocap *cap;
466 int s, err;
467
468 s = splcrypto();
469
470 cap = crypto_checkdriver(driverid);
471 /* NB: algorithms are in the range [1..max] */
472 if (cap != NULL &&
473 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
474 /*
475 * XXX Do some performance testing to determine placing.
476 * XXX We probably need an auxiliary data structure that
477 * XXX describes relative performances.
478 */
479
480 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
481 cap->cc_max_op_len[alg] = maxoplen;
482 if (bootverbose)
483 printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
484 , driverid
485 , alg
486 , flags
487 , maxoplen
488 );
489
490 if (cap->cc_process == NULL) {
491 cap->cc_arg = arg;
492 cap->cc_newsession = newses;
493 cap->cc_process = process;
494 cap->cc_freesession = freeses;
495 cap->cc_sessions = 0; /* Unmark */
496 }
497 err = 0;
498 } else
499 err = EINVAL;
500
501 splx(s);
502 return err;
503 }
504
505 /*
506 * Unregister a crypto driver. If there are pending sessions using it,
507 * leave enough information around so that subsequent calls using those
508 * sessions will correctly detect the driver has been unregistered and
509 * reroute requests.
510 */
511 int
512 crypto_unregister(u_int32_t driverid, int alg)
513 {
514 int i, err, s;
515 u_int32_t ses;
516 struct cryptocap *cap;
517
518 s = splcrypto();
519
520 cap = crypto_checkdriver(driverid);
521 if (cap != NULL &&
522 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
523 cap->cc_alg[alg] != 0) {
524 cap->cc_alg[alg] = 0;
525 cap->cc_max_op_len[alg] = 0;
526
527 /* Was this the last algorithm ? */
528 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
529 if (cap->cc_alg[i] != 0)
530 break;
531
532 if (i == CRYPTO_ALGORITHM_MAX + 1) {
533 ses = cap->cc_sessions;
534 bzero(cap, sizeof(struct cryptocap));
535 if (ses != 0) {
536 /*
537 * If there are pending sessions, just mark as invalid.
538 */
539 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
540 cap->cc_sessions = ses;
541 }
542 }
543 err = 0;
544 } else
545 err = EINVAL;
546
547 splx(s);
548 return err;
549 }
550
551 /*
552 * Unregister all algorithms associated with a crypto driver.
553 * If there are pending sessions using it, leave enough information
554 * around so that subsequent calls using those sessions will
555 * correctly detect the driver has been unregistered and reroute
556 * requests.
557 */
558 int
559 crypto_unregister_all(u_int32_t driverid)
560 {
561 int i, err, s = splcrypto();
562 u_int32_t ses;
563 struct cryptocap *cap;
564
565 cap = crypto_checkdriver(driverid);
566 if (cap != NULL) {
567 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
568 cap->cc_alg[i] = 0;
569 cap->cc_max_op_len[i] = 0;
570 }
571 ses = cap->cc_sessions;
572 bzero(cap, sizeof(struct cryptocap));
573 if (ses != 0) {
574 /*
575 * If there are pending sessions, just mark as invalid.
576 */
577 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
578 cap->cc_sessions = ses;
579 }
580 err = 0;
581 } else
582 err = EINVAL;
583
584 splx(s);
585 return err;
586 }
587
588 /*
589 * Clear blockage on a driver. The what parameter indicates whether
590 * the driver is now ready for cryptop's and/or cryptokop's.
591 */
592 int
593 crypto_unblock(u_int32_t driverid, int what)
594 {
595 struct cryptocap *cap;
596 int needwakeup, err, s;
597
598 s = splcrypto();
599 cap = crypto_checkdriver(driverid);
600 if (cap != NULL) {
601 needwakeup = 0;
602 if (what & CRYPTO_SYMQ) {
603 needwakeup |= cap->cc_qblocked;
604 cap->cc_qblocked = 0;
605 }
606 if (what & CRYPTO_ASYMQ) {
607 needwakeup |= cap->cc_kqblocked;
608 cap->cc_kqblocked = 0;
609 }
610 if (needwakeup) {
611 setsoftcrypto(softintr_cookie);
612 }
613 err = 0;
614 } else
615 err = EINVAL;
616 splx(s);
617
618 return err;
619 }
620
621 /*
622 * Dispatch a crypto request to a driver or queue
623 * it, to be processed by the kernel thread.
624 */
625 int
626 crypto_dispatch(struct cryptop *crp)
627 {
628 u_int32_t hid = SESID2HID(crp->crp_sid);
629 int s, result;
630
631 s = splcrypto();
632
633 cryptostats.cs_ops++;
634
635 #ifdef CRYPTO_TIMING
636 if (crypto_timing)
637 nanouptime(&crp->crp_tstamp);
638 #endif
639 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
640 struct cryptocap *cap;
641 /*
642 * Caller marked the request to be processed
643 * immediately; dispatch it directly to the
644 * driver unless the driver is currently blocked.
645 */
646 cap = crypto_checkdriver(hid);
647 if (cap && !cap->cc_qblocked) {
648 result = crypto_invoke(crp, 0);
649 if (result == ERESTART) {
650 /*
651 * The driver ran out of resources, mark the
652 * driver ``blocked'' for cryptop's and put
653 * the op on the queue.
654 */
655 crypto_drivers[hid].cc_qblocked = 1;
656 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
657 cryptostats.cs_blocks++;
658 }
659 } else {
660 /*
661 * The driver is blocked, just queue the op until
662 * it unblocks and the swi thread gets kicked.
663 */
664 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
665 result = 0;
666 }
667 } else {
668 int wasempty = TAILQ_EMPTY(&crp_q);
669 /*
670 * Caller marked the request as ``ok to delay'';
671 * queue it for the swi thread. This is desirable
672 * when the operation is low priority and/or suitable
673 * for batching.
674 */
675 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
676 if (wasempty) {
677 setsoftcrypto(softintr_cookie);
678 }
679
680 result = 0;
681 }
682 splx(s);
683
684 return result;
685 }
686
687 /*
688 * Add an asymetric crypto request to a queue,
689 * to be processed by the kernel thread.
690 */
691 int
692 crypto_kdispatch(struct cryptkop *krp)
693 {
694 struct cryptocap *cap;
695 int s, result;
696
697 s = splcrypto();
698 cryptostats.cs_kops++;
699
700 cap = crypto_checkdriver(krp->krp_hid);
701 if (cap && !cap->cc_kqblocked) {
702 result = crypto_kinvoke(krp, 0);
703 if (result == ERESTART) {
704 /*
705 * The driver ran out of resources, mark the
706 * driver ``blocked'' for cryptop's and put
707 * the op on the queue.
708 */
709 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
710 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
711 cryptostats.cs_kblocks++;
712 }
713 } else {
714 /*
715 * The driver is blocked, just queue the op until
716 * it unblocks and the swi thread gets kicked.
717 */
718 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
719 result = 0;
720 }
721 splx(s);
722
723 return result;
724 }
725
726 /*
727 * Dispatch an assymetric crypto request to the appropriate crypto devices.
728 */
729 static int
730 crypto_kinvoke(struct cryptkop *krp, int hint)
731 {
732 u_int32_t hid;
733 int error;
734
735 /* Sanity checks. */
736 if (krp == NULL)
737 return EINVAL;
738 if (krp->krp_callback == NULL) {
739 free(krp, M_XDATA); /* XXX allocated in cryptodev */
740 return EINVAL;
741 }
742
743 for (hid = 0; hid < crypto_drivers_num; hid++) {
744 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
745 crypto_devallowsoft == 0)
746 continue;
747 if (crypto_drivers[hid].cc_kprocess == NULL)
748 continue;
749 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
750 CRYPTO_ALG_FLAG_SUPPORTED) == 0)
751 continue;
752 break;
753 }
754 if (hid < crypto_drivers_num) {
755 krp->krp_hid = hid;
756 error = crypto_drivers[hid].cc_kprocess(
757 crypto_drivers[hid].cc_karg, krp, hint);
758 } else {
759 error = ENODEV;
760 }
761
762 if (error) {
763 krp->krp_status = error;
764 crypto_kdone(krp);
765 }
766 return 0;
767 }
768
769 #ifdef CRYPTO_TIMING
770 static void
771 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
772 {
773 struct timespec now, t;
774
775 nanouptime(&now);
776 t.tv_sec = now.tv_sec - tv->tv_sec;
777 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
778 if (t.tv_nsec < 0) {
779 t.tv_sec--;
780 t.tv_nsec += 1000000000;
781 }
782 timespecadd(&ts->acc, &t, &t);
783 if (timespeccmp(&t, &ts->min, <))
784 ts->min = t;
785 if (timespeccmp(&t, &ts->max, >))
786 ts->max = t;
787 ts->count++;
788
789 *tv = now;
790 }
791 #endif
792
793 /*
794 * Dispatch a crypto request to the appropriate crypto devices.
795 */
796 static int
797 crypto_invoke(struct cryptop *crp, int hint)
798 {
799 u_int32_t hid;
800 int (*process)(void*, struct cryptop *, int);
801
802 #ifdef CRYPTO_TIMING
803 if (crypto_timing)
804 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
805 #endif
806 /* Sanity checks. */
807 if (crp == NULL)
808 return EINVAL;
809 if (crp->crp_callback == NULL) {
810 crypto_freereq(crp);
811 return EINVAL;
812 }
813 if (crp->crp_desc == NULL) {
814 crp->crp_etype = EINVAL;
815 crypto_done(crp);
816 return 0;
817 }
818
819 hid = SESID2HID(crp->crp_sid);
820 if (hid < crypto_drivers_num) {
821 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
822 crypto_freesession(crp->crp_sid);
823 process = crypto_drivers[hid].cc_process;
824 } else {
825 process = NULL;
826 }
827
828 if (process == NULL) {
829 struct cryptodesc *crd;
830 u_int64_t nid = 0;
831
832 /*
833 * Driver has unregistered; migrate the session and return
834 * an error to the caller so they'll resubmit the op.
835 */
836 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
837 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
838
839 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
840 crp->crp_sid = nid;
841
842 crp->crp_etype = EAGAIN;
843 crypto_done(crp);
844 return 0;
845 } else {
846 /*
847 * Invoke the driver to process the request.
848 */
849 return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
850 }
851 }
852
853 /*
854 * Release a set of crypto descriptors.
855 */
856 void
857 crypto_freereq(struct cryptop *crp)
858 {
859 struct cryptodesc *crd;
860 int s;
861
862 if (crp == NULL)
863 return;
864
865 s = splcrypto();
866
867 while ((crd = crp->crp_desc) != NULL) {
868 crp->crp_desc = crd->crd_next;
869 pool_put(&cryptodesc_pool, crd);
870 }
871
872 pool_put(&cryptop_pool, crp);
873 splx(s);
874 }
875
876 /*
877 * Acquire a set of crypto descriptors.
878 */
879 struct cryptop *
880 crypto_getreq(int num)
881 {
882 struct cryptodesc *crd;
883 struct cryptop *crp;
884 int s;
885
886 s = splcrypto();
887
888 if (crypto_pool_initialized == 0) {
889 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
890 0, "cryptop", NULL, IPL_NET);
891 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
892 0, "cryptodesc", NULL, IPL_NET);
893 crypto_pool_initialized = 1;
894 }
895
896 crp = pool_get(&cryptop_pool, 0);
897 if (crp == NULL) {
898 splx(s);
899 return NULL;
900 }
901 bzero(crp, sizeof(struct cryptop));
902
903 while (num--) {
904 crd = pool_get(&cryptodesc_pool, 0);
905 if (crd == NULL) {
906 splx(s);
907 crypto_freereq(crp);
908 return NULL;
909 }
910
911 bzero(crd, sizeof(struct cryptodesc));
912 crd->crd_next = crp->crp_desc;
913 crp->crp_desc = crd;
914 }
915
916 splx(s);
917 return crp;
918 }
919
920 /*
921 * Invoke the callback on behalf of the driver.
922 */
923 void
924 crypto_done(struct cryptop *crp)
925 {
926 if (crp->crp_etype != 0)
927 cryptostats.cs_errs++;
928 #ifdef CRYPTO_TIMING
929 if (crypto_timing)
930 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
931 #endif
932 /*
933 * On netbsd 1.6O, CBIMM does its wake_one() before the requestor
934 * has done its tsleep().
935 */
936 {
937 int s, wasempty;
938 /*
939 * Normal case; queue the callback for the thread.
940 *
941 * The return queue is manipulated by the swi thread
942 * and, potentially, by crypto device drivers calling
943 * back to mark operations completed. Thus we need
944 * to mask both while manipulating the return queue.
945 */
946 s = splcrypto();
947 wasempty = TAILQ_EMPTY(&crp_ret_q);
948 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
949 if (wasempty)
950 wakeup_one(&crp_ret_q);
951 splx(s);
952 }
953 }
954
955 /*
956 * Invoke the callback on behalf of the driver.
957 */
958 void
959 crypto_kdone(struct cryptkop *krp)
960 {
961 int s, wasempty;
962
963 if (krp->krp_status != 0)
964 cryptostats.cs_kerrs++;
965 /*
966 * The return queue is manipulated by the swi thread
967 * and, potentially, by crypto device drivers calling
968 * back to mark operations completed. Thus we need
969 * to mask both while manipulating the return queue.
970 */
971 s = splcrypto();
972 wasempty = TAILQ_EMPTY(&crp_ret_kq);
973 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
974 if (wasempty)
975 wakeup_one(&crp_ret_q);
976 splx(s);
977 }
978
979 int
980 crypto_getfeat(int *featp)
981 {
982 int hid, kalg, feat = 0;
983 int s;
984
985 s = splcrypto();
986
987 if (crypto_userasymcrypto == 0)
988 goto out;
989
990 for (hid = 0; hid < crypto_drivers_num; hid++) {
991 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
992 crypto_devallowsoft == 0) {
993 continue;
994 }
995 if (crypto_drivers[hid].cc_kprocess == NULL)
996 continue;
997 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
998 if ((crypto_drivers[hid].cc_kalg[kalg] &
999 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1000 feat |= 1 << kalg;
1001 }
1002 out:
1003 splx(s);
1004 *featp = feat;
1005 return (0);
1006 }
1007
1008 /*
1009 * Software interrupt thread to dispatch crypto requests.
1010 */
1011 static void
1012 cryptointr(void)
1013 {
1014 struct cryptop *crp, *submit;
1015 struct cryptkop *krp;
1016 struct cryptocap *cap;
1017 int result, hint, s;
1018
1019 printf("crypto softint\n");
1020 cryptostats.cs_intrs++;
1021 s = splcrypto();
1022 do {
1023 /*
1024 * Find the first element in the queue that can be
1025 * processed and look-ahead to see if multiple ops
1026 * are ready for the same driver.
1027 */
1028 submit = NULL;
1029 hint = 0;
1030 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1031 u_int32_t hid = SESID2HID(crp->crp_sid);
1032 cap = crypto_checkdriver(hid);
1033 if (cap == NULL || cap->cc_process == NULL) {
1034 /* Op needs to be migrated, process it. */
1035 if (submit == NULL)
1036 submit = crp;
1037 break;
1038 }
1039 if (!cap->cc_qblocked) {
1040 if (submit != NULL) {
1041 /*
1042 * We stop on finding another op,
1043 * regardless whether its for the same
1044 * driver or not. We could keep
1045 * searching the queue but it might be
1046 * better to just use a per-driver
1047 * queue instead.
1048 */
1049 if (SESID2HID(submit->crp_sid) == hid)
1050 hint = CRYPTO_HINT_MORE;
1051 break;
1052 } else {
1053 submit = crp;
1054 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1055 break;
1056 /* keep scanning for more are q'd */
1057 }
1058 }
1059 }
1060 if (submit != NULL) {
1061 TAILQ_REMOVE(&crp_q, submit, crp_next);
1062 result = crypto_invoke(submit, hint);
1063 if (result == ERESTART) {
1064 /*
1065 * The driver ran out of resources, mark the
1066 * driver ``blocked'' for cryptop's and put
1067 * the request back in the queue. It would
1068 * best to put the request back where we got
1069 * it but that's hard so for now we put it
1070 * at the front. This should be ok; putting
1071 * it at the end does not work.
1072 */
1073 /* XXX validate sid again? */
1074 crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1075 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1076 cryptostats.cs_blocks++;
1077 }
1078 }
1079
1080 /* As above, but for key ops */
1081 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1082 cap = crypto_checkdriver(krp->krp_hid);
1083 if (cap == NULL || cap->cc_kprocess == NULL) {
1084 /* Op needs to be migrated, process it. */
1085 break;
1086 }
1087 if (!cap->cc_kqblocked)
1088 break;
1089 }
1090 if (krp != NULL) {
1091 TAILQ_REMOVE(&crp_kq, krp, krp_next);
1092 result = crypto_kinvoke(krp, 0);
1093 if (result == ERESTART) {
1094 /*
1095 * The driver ran out of resources, mark the
1096 * driver ``blocked'' for cryptkop's and put
1097 * the request back in the queue. It would
1098 * best to put the request back where we got
1099 * it but that's hard so for now we put it
1100 * at the front. This should be ok; putting
1101 * it at the end does not work.
1102 */
1103 /* XXX validate sid again? */
1104 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1105 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1106 cryptostats.cs_kblocks++;
1107 }
1108 }
1109 } while (submit != NULL || krp != NULL);
1110 splx(s);
1111 }
1112
1113 /*
1114 * Kernel thread to do callbacks.
1115 */
1116 static void
1117 cryptoret(void)
1118 {
1119 struct cryptop *crp;
1120 struct cryptkop *krp;
1121 int s;
1122
1123 s = splcrypto();
1124 for (;;) {
1125 crp = TAILQ_FIRST(&crp_ret_q);
1126 if (crp != NULL)
1127 TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1128 krp = TAILQ_FIRST(&crp_ret_kq);
1129 if (krp != NULL)
1130 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1131
1132 if (crp != NULL || krp != NULL) {
1133 splx(s); /* lower ipl for callbacks */
1134 if (crp != NULL) {
1135 #ifdef CRYPTO_TIMING
1136 if (crypto_timing) {
1137 /*
1138 * NB: We must copy the timestamp before
1139 * doing the callback as the cryptop is
1140 * likely to be reclaimed.
1141 */
1142 struct timespec t = crp->crp_tstamp;
1143 crypto_tstat(&cryptostats.cs_cb, &t);
1144 crp->crp_callback(crp);
1145 crypto_tstat(&cryptostats.cs_finis, &t);
1146 } else
1147 #endif
1148 crp->crp_callback(crp);
1149 }
1150 if (krp != NULL)
1151 krp->krp_callback(krp);
1152 s = splcrypto();
1153 } else {
1154 (void) tsleep(&crp_ret_q, PLOCK, "crypto_wait", 0);
1155 cryptostats.cs_rets++;
1156 }
1157 }
1158 }
1159
1160
1162
1163
1164