crypto.c revision 1.1 1 /* $NetBSD: crypto.c,v 1.1 2003/07/25 21:12:43 jonathan Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos (at) cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.1 2003/07/25 21:12:43 jonathan Exp $");
28
29 /* XXX FIXME: should be defopt'ed */
30 #define CRYPTO_TIMING /* enable cryptop timing stuff */
31
32 #include <sys/param.h>
33 #include <sys/reboot.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/proc.h>
37 #include <sys/pool.h>
38 #include <opencrypto/cryptodev.h>
39 #include <opencrypto/cryptosoft.h> /* swcr_init() */
40 #include <sys/kthread.h>
41
42 #include <opencrypto/xform.h> /* XXX for M_XDATA */
43
44
45 #ifdef __NetBSD__
46 #define splcrypto splnet
47 /* below is kludges to check whats still missing */
48 #define SWI_CRYPTO 17
49 #define register_swi(lvl, fn) \
50 softintr_establish(IPL_SOFTNET, (void (*)(void*))fn, NULL)
51 #define unregister_swi(lvl, fn) softintr_disestablish(softintr_cookie)
52 #define setsoftcrypto(x) softintr_schedule(x)
53 #define nanouptime(tp) microtime((struct timeval*)(tp))
54 #endif
55
56 #define SESID2HID(sid) (((sid) >> 32) & 0xffffffff)
57
58 /*
59 * Crypto drivers register themselves by allocating a slot in the
60 * crypto_drivers table with crypto_get_driverid() and then registering
61 * each algorithm they support with crypto_register() and crypto_kregister().
62 */
63 static struct cryptocap *crypto_drivers = NULL;
64 static int crypto_drivers_num = 0;
65 static void* softintr_cookie;
66
67 /*
68 * There are two queues for crypto requests; one for symmetric (e.g.
69 * cipher) operations and one for asymmetric (e.g. MOD) operations.
70 * See below for how synchronization is handled.
71 */
72 static TAILQ_HEAD(,cryptop) crp_q; /* request queues */
73 static TAILQ_HEAD(,cryptkop) crp_kq;
74
75 /*
76 * There are two queues for processing completed crypto requests; one
77 * for the symmetric and one for the asymmetric ops. We only need one
78 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
79 * for how synchronization is handled.
80 */
81 static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */
82 static TAILQ_HEAD(,cryptkop) crp_ret_kq;
83
84 /*
85 * Crypto op and desciptor data structures are allocated
86 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
87 */
88 struct pool cryptop_pool;
89 struct pool cryptodesc_pool;
90 int crypto_pool_initialized = 0;
91
92 #ifdef __NetBSD__
93 void cryptoattach(int); void opencryptoattach(int);
94 static void deferred_crypto_thread(void *arg);
95 #endif
96
97 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
98 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
99 int crypto_devallowsoft = 0; /* only use hardware crypto for asym */
100 #ifdef __FreeBSD__
101 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW,
102 &crypto_usercrypto, 0,
103 "Enable/disable user-mode access to crypto support");
104 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
105 &crypto_userasymcrypto, 0,
106 "Enable/disable user-mode access to asymmetric crypto support");
107 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
108 &crypto_devallowsoft, 0,
109 "Enable/disable use of software asym crypto support");
110 #endif
111
112 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
113
114 /*
115 * Synchronization: read carefully, this is non-trivial.
116 *
117 * Crypto requests are submitted via crypto_dispatch. Typically
118 * these come in from network protocols at spl0 (output path) or
119 * spl[,soft]net (input path).
120 *
121 * Requests are typically passed on the driver directly, but they
122 * may also be queued for processing by a software interrupt thread,
123 * cryptointr, that runs at splsoftcrypto. This thread dispatches
124 * the requests to crypto drivers (h/w or s/w) who call crypto_done
125 * when a request is complete. Hardware crypto drivers are assumed
126 * to register their IRQ's as network devices so their interrupt handlers
127 * and subsequent "done callbacks" happen at spl[imp,net].
128 *
129 * Completed crypto ops are queued for a separate kernel thread that
130 * handles the callbacks at spl0. This decoupling insures the crypto
131 * driver interrupt service routine is not delayed while the callback
132 * takes place and that callbacks are delivered after a context switch
133 * (as opposed to a software interrupt that clients must block).
134 *
135 * This scheme is not intended for SMP machines.
136 */
137 static void cryptointr(void); /* swi thread to dispatch ops */
138 static void cryptoret(void); /* kernel thread for callbacks*/
139 static struct proc *cryptoproc;
140 static void crypto_destroy(void);
141 static int crypto_invoke(struct cryptop *crp, int hint);
142 static int crypto_kinvoke(struct cryptkop *krp, int hint);
143
144 static struct cryptostats cryptostats;
145 static int crypto_timing = 0;
146
147 #ifdef __FreeBSD__
148 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
149 cryptostats, "Crypto system statistics");
150
151 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
152 &crypto_timing, 0, "Enable/disable crypto timing support");
153 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
154 cryptostats, "Crypto system statistics");
155 #endif __FreeBSD__
156
157 static int
158 crypto_init(void)
159 {
160 int error;
161
162 #ifdef __FreeBSD__
163
164 cryptop_zone = zinit("cryptop", sizeof (struct cryptop), 0, 0, 1);
165 cryptodesc_zone = zinit("cryptodesc", sizeof (struct cryptodesc),
166 0, 0, 1);
167 if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
168 printf("crypto_init: cannot setup crypto zones\n");
169 return ENOMEM;
170 }
171 #endif
172
173 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
174 crypto_drivers = malloc(crypto_drivers_num *
175 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
176 if (crypto_drivers == NULL) {
177 printf("crypto_init: cannot malloc driver table\n");
178 return ENOMEM;
179 }
180
181 TAILQ_INIT(&crp_q);
182 TAILQ_INIT(&crp_kq);
183
184 TAILQ_INIT(&crp_ret_q);
185 TAILQ_INIT(&crp_ret_kq);
186
187 softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
188 #ifdef __FreeBSD__
189 error = kthread_create((void (*)(void *)) cryptoret, NULL,
190 &cryptoproc, "cryptoret");
191 if (error) {
192 printf("crypto_init: cannot start cryptoret thread; error %d",
193 error);
194 crypto_destroy();
195 }
196 #else
197 /* defer thread creation until after boot */
198 kthread_create( deferred_crypto_thread, NULL);
199 #endif
200 return error;
201 }
202
203 static void
204 crypto_destroy(void)
205 {
206 /* XXX no wait to reclaim zones */
207 if (crypto_drivers != NULL)
208 free(crypto_drivers, M_CRYPTO_DATA);
209 unregister_swi(SWI_CRYPTO, cryptointr);
210 }
211
212 /*
213 * Create a new session.
214 */
215 int
216 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
217 {
218 struct cryptoini *cr;
219 u_int32_t hid, lid;
220 int err = EINVAL;
221 int s;
222
223 s = splcrypto();
224
225 if (crypto_drivers == NULL)
226 goto done;
227
228 /*
229 * The algorithm we use here is pretty stupid; just use the
230 * first driver that supports all the algorithms we need.
231 *
232 * XXX We need more smarts here (in real life too, but that's
233 * XXX another story altogether).
234 */
235
236 for (hid = 0; hid < crypto_drivers_num; hid++) {
237 /*
238 * If it's not initialized or has remaining sessions
239 * referencing it, skip.
240 */
241 if (crypto_drivers[hid].cc_newsession == NULL ||
242 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
243 continue;
244
245 /* Hardware required -- ignore software drivers. */
246 if (hard > 0 &&
247 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
248 continue;
249 /* Software required -- ignore hardware drivers. */
250 if (hard < 0 &&
251 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
252 continue;
253
254 /* See if all the algorithms are supported. */
255 for (cr = cri; cr; cr = cr->cri_next)
256 if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
257 break;
258
259 if (cr == NULL) {
260 /* Ok, all algorithms are supported. */
261
262 /*
263 * Can't do everything in one session.
264 *
265 * XXX Fix this. We need to inject a "virtual" session layer right
266 * XXX about here.
267 */
268
269 /* Call the driver initialization routine. */
270 lid = hid; /* Pass the driver ID. */
271 err = crypto_drivers[hid].cc_newsession(
272 crypto_drivers[hid].cc_arg, &lid, cri);
273 if (err == 0) {
274 (*sid) = hid;
275 (*sid) <<= 32;
276 (*sid) |= (lid & 0xffffffff);
277 crypto_drivers[hid].cc_sessions++;
278 }
279 goto done;
280 /*break;*/
281 }
282 }
283 done:
284 splx(s);
285 return err;
286 }
287
288 /*
289 * Delete an existing session (or a reserved session on an unregistered
290 * driver).
291 */
292 int
293 crypto_freesession(u_int64_t sid)
294 {
295 u_int32_t hid;
296 int err = 0;
297 int s;
298
299 s = splcrypto();
300
301 if (crypto_drivers == NULL) {
302 err = EINVAL;
303 goto done;
304 }
305
306 /* Determine two IDs. */
307 hid = SESID2HID(sid);
308
309 if (hid >= crypto_drivers_num) {
310 err = ENOENT;
311 goto done;
312 }
313
314 if (crypto_drivers[hid].cc_sessions)
315 crypto_drivers[hid].cc_sessions--;
316
317 /* Call the driver cleanup routine, if available. */
318 if (crypto_drivers[hid].cc_freesession)
319 err = crypto_drivers[hid].cc_freesession(
320 crypto_drivers[hid].cc_arg, sid);
321 else
322 err = 0;
323
324 /*
325 * If this was the last session of a driver marked as invalid,
326 * make the entry available for reuse.
327 */
328 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
329 crypto_drivers[hid].cc_sessions == 0)
330 bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
331
332 done:
333 splx(s);
334 return err;
335 }
336
337 /*
338 * Return an unused driver id. Used by drivers prior to registering
339 * support for the algorithms they handle.
340 */
341 int32_t
342 crypto_get_driverid(u_int32_t flags)
343 {
344 struct cryptocap *newdrv;
345 int i, s;
346
347 s = splcrypto();
348 for (i = 0; i < crypto_drivers_num; i++)
349 if (crypto_drivers[i].cc_process == NULL &&
350 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
351 crypto_drivers[i].cc_sessions == 0)
352 break;
353
354 /* Out of entries, allocate some more. */
355 if (i == crypto_drivers_num) {
356 /* Be careful about wrap-around. */
357 if (2 * crypto_drivers_num <= crypto_drivers_num) {
358 splx(s);
359 printf("crypto: driver count wraparound!\n");
360 return -1;
361 }
362
363 newdrv = malloc(2 * crypto_drivers_num *
364 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
365 if (newdrv == NULL) {
366 splx(s);
367 printf("crypto: no space to expand driver table!\n");
368 return -1;
369 }
370
371 bcopy(crypto_drivers, newdrv,
372 crypto_drivers_num * sizeof(struct cryptocap));
373
374 crypto_drivers_num *= 2;
375
376 free(crypto_drivers, M_CRYPTO_DATA);
377 crypto_drivers = newdrv;
378 }
379
380 /* NB: state is zero'd on free */
381 crypto_drivers[i].cc_sessions = 1; /* Mark */
382 crypto_drivers[i].cc_flags = flags;
383
384 if (bootverbose)
385 printf("crypto: assign driver %u, flags %u\n", i, flags);
386
387 splx(s);
388
389 return i;
390 }
391
392 static struct cryptocap *
393 crypto_checkdriver(u_int32_t hid)
394 {
395 if (crypto_drivers == NULL)
396 return NULL;
397 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
398 }
399
400 /*
401 * Register support for a key-related algorithm. This routine
402 * is called once for each algorithm supported a driver.
403 */
404 int
405 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
406 int (*kprocess)(void*, struct cryptkop *, int),
407 void *karg)
408 {
409 int s;
410 struct cryptocap *cap;
411 int err;
412
413 s = splcrypto();
414
415 cap = crypto_checkdriver(driverid);
416 if (cap != NULL &&
417 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
418 /*
419 * XXX Do some performance testing to determine placing.
420 * XXX We probably need an auxiliary data structure that
421 * XXX describes relative performances.
422 */
423
424 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
425 if (bootverbose)
426 printf("crypto: driver %u registers key alg %u flags %u\n"
427 , driverid
428 , kalg
429 , flags
430 );
431
432 if (cap->cc_kprocess == NULL) {
433 cap->cc_karg = karg;
434 cap->cc_kprocess = kprocess;
435 }
436 err = 0;
437 } else
438 err = EINVAL;
439
440 splx(s);
441 return err;
442 }
443
444 /*
445 * Register support for a non-key-related algorithm. This routine
446 * is called once for each such algorithm supported by a driver.
447 */
448 int
449 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
450 u_int32_t flags,
451 int (*newses)(void*, u_int32_t*, struct cryptoini*),
452 int (*freeses)(void*, u_int64_t),
453 int (*process)(void*, struct cryptop *, int),
454 void *arg)
455 {
456 struct cryptocap *cap;
457 int s, err;
458
459 s = splcrypto();
460
461 cap = crypto_checkdriver(driverid);
462 /* NB: algorithms are in the range [1..max] */
463 if (cap != NULL &&
464 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
465 /*
466 * XXX Do some performance testing to determine placing.
467 * XXX We probably need an auxiliary data structure that
468 * XXX describes relative performances.
469 */
470
471 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
472 cap->cc_max_op_len[alg] = maxoplen;
473 if (bootverbose)
474 printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
475 , driverid
476 , alg
477 , flags
478 , maxoplen
479 );
480
481 if (cap->cc_process == NULL) {
482 cap->cc_arg = arg;
483 cap->cc_newsession = newses;
484 cap->cc_process = process;
485 cap->cc_freesession = freeses;
486 cap->cc_sessions = 0; /* Unmark */
487 }
488 err = 0;
489 } else
490 err = EINVAL;
491
492 splx(s);
493 return err;
494 }
495
496 /*
497 * Unregister a crypto driver. If there are pending sessions using it,
498 * leave enough information around so that subsequent calls using those
499 * sessions will correctly detect the driver has been unregistered and
500 * reroute requests.
501 */
502 int
503 crypto_unregister(u_int32_t driverid, int alg)
504 {
505 int i, err, s;
506 u_int32_t ses;
507 struct cryptocap *cap;
508
509 s = splcrypto();
510
511 cap = crypto_checkdriver(driverid);
512 if (cap != NULL &&
513 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
514 cap->cc_alg[alg] != 0) {
515 cap->cc_alg[alg] = 0;
516 cap->cc_max_op_len[alg] = 0;
517
518 /* Was this the last algorithm ? */
519 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
520 if (cap->cc_alg[i] != 0)
521 break;
522
523 if (i == CRYPTO_ALGORITHM_MAX + 1) {
524 ses = cap->cc_sessions;
525 bzero(cap, sizeof(struct cryptocap));
526 if (ses != 0) {
527 /*
528 * If there are pending sessions, just mark as invalid.
529 */
530 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
531 cap->cc_sessions = ses;
532 }
533 }
534 err = 0;
535 } else
536 err = EINVAL;
537
538 splx(s);
539 return err;
540 }
541
542 /*
543 * Unregister all algorithms associated with a crypto driver.
544 * If there are pending sessions using it, leave enough information
545 * around so that subsequent calls using those sessions will
546 * correctly detect the driver has been unregistered and reroute
547 * requests.
548 */
549 int
550 crypto_unregister_all(u_int32_t driverid)
551 {
552 int i, err, s = splcrypto();
553 u_int32_t ses;
554 struct cryptocap *cap;
555
556 cap = crypto_checkdriver(driverid);
557 if (cap != NULL) {
558 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
559 cap->cc_alg[i] = 0;
560 cap->cc_max_op_len[i] = 0;
561 }
562 ses = cap->cc_sessions;
563 bzero(cap, sizeof(struct cryptocap));
564 if (ses != 0) {
565 /*
566 * If there are pending sessions, just mark as invalid.
567 */
568 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
569 cap->cc_sessions = ses;
570 }
571 err = 0;
572 } else
573 err = EINVAL;
574
575 splx(s);
576 return err;
577 }
578
579 /*
580 * Clear blockage on a driver. The what parameter indicates whether
581 * the driver is now ready for cryptop's and/or cryptokop's.
582 */
583 int
584 crypto_unblock(u_int32_t driverid, int what)
585 {
586 struct cryptocap *cap;
587 int needwakeup, err, s;
588
589 s = splcrypto();
590 cap = crypto_checkdriver(driverid);
591 if (cap != NULL) {
592 needwakeup = 0;
593 if (what & CRYPTO_SYMQ) {
594 needwakeup |= cap->cc_qblocked;
595 cap->cc_qblocked = 0;
596 }
597 if (what & CRYPTO_ASYMQ) {
598 needwakeup |= cap->cc_kqblocked;
599 cap->cc_kqblocked = 0;
600 }
601 if (needwakeup) {
602 setsoftcrypto(softintr_cookie);
603 }
604 err = 0;
605 } else
606 err = EINVAL;
607 splx(s);
608
609 return err;
610 }
611
612 /*
613 * Dispatch a crypto request to a driver or queue
614 * it, to be processed by the kernel thread.
615 */
616 int
617 crypto_dispatch(struct cryptop *crp)
618 {
619 u_int32_t hid = SESID2HID(crp->crp_sid);
620 int s, result;
621
622 s = splcrypto();
623
624 cryptostats.cs_ops++;
625
626 #ifdef CRYPTO_TIMING
627 if (crypto_timing)
628 nanouptime(&crp->crp_tstamp);
629 #endif
630 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
631 struct cryptocap *cap;
632 /*
633 * Caller marked the request to be processed
634 * immediately; dispatch it directly to the
635 * driver unless the driver is currently blocked.
636 */
637 cap = crypto_checkdriver(hid);
638 if (cap && !cap->cc_qblocked) {
639 result = crypto_invoke(crp, 0);
640 if (result == ERESTART) {
641 /*
642 * The driver ran out of resources, mark the
643 * driver ``blocked'' for cryptop's and put
644 * the op on the queue.
645 */
646 crypto_drivers[hid].cc_qblocked = 1;
647 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
648 cryptostats.cs_blocks++;
649 }
650 } else {
651 /*
652 * The driver is blocked, just queue the op until
653 * it unblocks and the swi thread gets kicked.
654 */
655 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
656 result = 0;
657 }
658 } else {
659 int wasempty = TAILQ_EMPTY(&crp_q);
660 /*
661 * Caller marked the request as ``ok to delay'';
662 * queue it for the swi thread. This is desirable
663 * when the operation is low priority and/or suitable
664 * for batching.
665 */
666 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
667 if (wasempty) {
668 setsoftcrypto(softintr_cookie);
669 }
670
671 result = 0;
672 }
673 splx(s);
674
675 return result;
676 }
677
678 /*
679 * Add an asymetric crypto request to a queue,
680 * to be processed by the kernel thread.
681 */
682 int
683 crypto_kdispatch(struct cryptkop *krp)
684 {
685 struct cryptocap *cap;
686 int s, result;
687
688 s = splcrypto();
689 cryptostats.cs_kops++;
690
691 cap = crypto_checkdriver(krp->krp_hid);
692 if (cap && !cap->cc_kqblocked) {
693 result = crypto_kinvoke(krp, 0);
694 if (result == ERESTART) {
695 /*
696 * The driver ran out of resources, mark the
697 * driver ``blocked'' for cryptop's and put
698 * the op on the queue.
699 */
700 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
701 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
702 cryptostats.cs_kblocks++;
703 }
704 } else {
705 /*
706 * The driver is blocked, just queue the op until
707 * it unblocks and the swi thread gets kicked.
708 */
709 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
710 result = 0;
711 }
712 splx(s);
713
714 return result;
715 }
716
717 /*
718 * Dispatch an assymetric crypto request to the appropriate crypto devices.
719 */
720 static int
721 crypto_kinvoke(struct cryptkop *krp, int hint)
722 {
723 u_int32_t hid;
724 int error;
725
726 /* Sanity checks. */
727 if (krp == NULL)
728 return EINVAL;
729 if (krp->krp_callback == NULL) {
730 free(krp, M_XDATA); /* XXX allocated in cryptodev */
731 return EINVAL;
732 }
733
734 for (hid = 0; hid < crypto_drivers_num; hid++) {
735 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
736 crypto_devallowsoft == 0)
737 continue;
738 if (crypto_drivers[hid].cc_kprocess == NULL)
739 continue;
740 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
741 CRYPTO_ALG_FLAG_SUPPORTED) == 0)
742 continue;
743 break;
744 }
745 if (hid < crypto_drivers_num) {
746 krp->krp_hid = hid;
747 error = crypto_drivers[hid].cc_kprocess(
748 crypto_drivers[hid].cc_karg, krp, hint);
749 } else {
750 error = ENODEV;
751 }
752
753 if (error) {
754 krp->krp_status = error;
755 crypto_kdone(krp);
756 }
757 return 0;
758 }
759
760 #ifdef CRYPTO_TIMING
761 static void
762 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
763 {
764 struct timespec now, t;
765
766 nanouptime(&now);
767 t.tv_sec = now.tv_sec - tv->tv_sec;
768 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
769 if (t.tv_nsec < 0) {
770 t.tv_sec--;
771 t.tv_nsec += 1000000000;
772 }
773 timespecadd(&ts->acc, &t, &t);
774 if (timespeccmp(&t, &ts->min, <))
775 ts->min = t;
776 if (timespeccmp(&t, &ts->max, >))
777 ts->max = t;
778 ts->count++;
779
780 *tv = now;
781 }
782 #endif
783
784 /*
785 * Dispatch a crypto request to the appropriate crypto devices.
786 */
787 static int
788 crypto_invoke(struct cryptop *crp, int hint)
789 {
790 u_int32_t hid;
791 int (*process)(void*, struct cryptop *, int);
792
793 #ifdef CRYPTO_TIMING
794 if (crypto_timing)
795 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
796 #endif
797 /* Sanity checks. */
798 if (crp == NULL)
799 return EINVAL;
800 if (crp->crp_callback == NULL) {
801 crypto_freereq(crp);
802 return EINVAL;
803 }
804 if (crp->crp_desc == NULL) {
805 crp->crp_etype = EINVAL;
806 crypto_done(crp);
807 return 0;
808 }
809
810 hid = SESID2HID(crp->crp_sid);
811 if (hid < crypto_drivers_num) {
812 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
813 crypto_freesession(crp->crp_sid);
814 process = crypto_drivers[hid].cc_process;
815 } else {
816 process = NULL;
817 }
818
819 if (process == NULL) {
820 struct cryptodesc *crd;
821 u_int64_t nid;
822
823 /*
824 * Driver has unregistered; migrate the session and return
825 * an error to the caller so they'll resubmit the op.
826 */
827 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
828 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
829
830 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
831 crp->crp_sid = nid;
832
833 crp->crp_etype = EAGAIN;
834 crypto_done(crp);
835 return 0;
836 } else {
837 /*
838 * Invoke the driver to process the request.
839 */
840 return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
841 }
842 }
843
844 /*
845 * Release a set of crypto descriptors.
846 */
847 void
848 crypto_freereq(struct cryptop *crp)
849 {
850 struct cryptodesc *crd;
851 int s;
852
853 if (crp == NULL)
854 return;
855
856 s = splcrypto();
857
858 while ((crd = crp->crp_desc) != NULL) {
859 crp->crp_desc = crd->crd_next;
860 pool_put(&cryptodesc_pool, crd);
861 }
862
863 pool_put(&cryptop_pool, crp);
864 splx(s);
865 }
866
867 /*
868 * Acquire a set of crypto descriptors.
869 */
870 struct cryptop *
871 crypto_getreq(int num)
872 {
873 struct cryptodesc *crd;
874 struct cryptop *crp;
875 int s;
876
877 s = splcrypto();
878
879 if (crypto_pool_initialized == 0) {
880 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
881 0, "cryptop", NULL);
882 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
883 0, "cryptodesc", NULL);
884 crypto_pool_initialized = 1;
885 }
886
887 crp = pool_get(&cryptop_pool, 0);
888 if (crp == NULL) {
889 splx(s);
890 return NULL;
891 }
892 bzero(crp, sizeof(struct cryptop));
893
894 while (num--) {
895 crd = pool_get(&cryptodesc_pool, 0);
896 if (crd == NULL) {
897 splx(s);
898 crypto_freereq(crp);
899 return NULL;
900 }
901
902 bzero(crd, sizeof(struct cryptodesc));
903 crd->crd_next = crp->crp_desc;
904 crp->crp_desc = crd;
905 }
906
907 splx(s);
908 return crp;
909 }
910
911 /*
912 * Invoke the callback on behalf of the driver.
913 */
914 void
915 crypto_done(struct cryptop *crp)
916 {
917 if (crp->crp_etype != 0)
918 cryptostats.cs_errs++;
919 #ifdef CRYPTO_TIMING
920 if (crypto_timing)
921 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
922 #endif
923 /*
924 * On netbsd 1.6O, CBIMM does its wake_one() before the requestor
925 * has done its tsleep().
926 */
927 #ifndef __NetBSD__
928 if (crp->crp_flags & CRYPTO_F_CBIMM) {
929 /*
930 * Do the callback directly. This is ok when the
931 * callback routine does very little (e.g. the
932 * /dev/crypto callback method just does a wakeup).
933 */
934 #ifdef CRYPTO_TIMING
935 if (crypto_timing) {
936 /*
937 * NB: We must copy the timestamp before
938 * doing the callback as the cryptop is
939 * likely to be reclaimed.
940 */
941 struct timespec t = crp->crp_tstamp;
942 crypto_tstat(&cryptostats.cs_cb, &t);
943 crp->crp_callback(crp);
944 crypto_tstat(&cryptostats.cs_finis, &t);
945 } else
946 #endif
947 crp->crp_callback(crp);
948 } else
949 #endif /* __NetBSD__ */
950 {
951 int s, wasempty;
952 /*
953 * Normal case; queue the callback for the thread.
954 *
955 * The return queue is manipulated by the swi thread
956 * and, potentially, by crypto device drivers calling
957 * back to mark operations completed. Thus we need
958 * to mask both while manipulating the return queue.
959 */
960 s = splcrypto();
961 wasempty = TAILQ_EMPTY(&crp_ret_q);
962 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
963 if (wasempty)
964 wakeup_one(&crp_ret_q);
965 splx(s);
966 }
967 }
968
969 /*
970 * Invoke the callback on behalf of the driver.
971 */
972 void
973 crypto_kdone(struct cryptkop *krp)
974 {
975 int s, wasempty;
976
977 if (krp->krp_status != 0)
978 cryptostats.cs_kerrs++;
979 /*
980 * The return queue is manipulated by the swi thread
981 * and, potentially, by crypto device drivers calling
982 * back to mark operations completed. Thus we need
983 * to mask both while manipulating the return queue.
984 */
985 s = splcrypto();
986 wasempty = TAILQ_EMPTY(&crp_ret_kq);
987 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
988 if (wasempty)
989 wakeup_one(&crp_ret_q);
990 splx(s);
991 }
992
993 int
994 crypto_getfeat(int *featp)
995 {
996 int hid, kalg, feat = 0;
997 int s;
998
999 s = splcrypto();
1000
1001 if (crypto_userasymcrypto == 0)
1002 goto out;
1003
1004 for (hid = 0; hid < crypto_drivers_num; hid++) {
1005 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1006 !crypto_devallowsoft) {
1007 continue;
1008 }
1009 if (crypto_drivers[hid].cc_kprocess == NULL)
1010 continue;
1011 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1012 if ((crypto_drivers[hid].cc_kalg[kalg] &
1013 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1014 feat |= 1 << kalg;
1015 }
1016 out:
1017 splx(s);
1018 *featp = feat;
1019 return (0);
1020 }
1021
1022 /*
1023 * Software interrupt thread to dispatch crypto requests.
1024 */
1025 static void
1026 cryptointr(void)
1027 {
1028 struct cryptop *crp, *submit;
1029 struct cryptkop *krp;
1030 struct cryptocap *cap;
1031 int result, hint, s;
1032
1033 printf("crypto softint\n");
1034 cryptostats.cs_intrs++;
1035 s = splcrypto();
1036 do {
1037 /*
1038 * Find the first element in the queue that can be
1039 * processed and look-ahead to see if multiple ops
1040 * are ready for the same driver.
1041 */
1042 submit = NULL;
1043 hint = 0;
1044 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1045 u_int32_t hid = SESID2HID(crp->crp_sid);
1046 cap = crypto_checkdriver(hid);
1047 if (cap == NULL || cap->cc_process == NULL) {
1048 /* Op needs to be migrated, process it. */
1049 if (submit == NULL)
1050 submit = crp;
1051 break;
1052 }
1053 if (!cap->cc_qblocked) {
1054 if (submit != NULL) {
1055 /*
1056 * We stop on finding another op,
1057 * regardless whether its for the same
1058 * driver or not. We could keep
1059 * searching the queue but it might be
1060 * better to just use a per-driver
1061 * queue instead.
1062 */
1063 if (SESID2HID(submit->crp_sid) == hid)
1064 hint = CRYPTO_HINT_MORE;
1065 break;
1066 } else {
1067 submit = crp;
1068 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1069 break;
1070 /* keep scanning for more are q'd */
1071 }
1072 }
1073 }
1074 if (submit != NULL) {
1075 TAILQ_REMOVE(&crp_q, submit, crp_next);
1076 result = crypto_invoke(submit, hint);
1077 if (result == ERESTART) {
1078 /*
1079 * The driver ran out of resources, mark the
1080 * driver ``blocked'' for cryptop's and put
1081 * the request back in the queue. It would
1082 * best to put the request back where we got
1083 * it but that's hard so for now we put it
1084 * at the front. This should be ok; putting
1085 * it at the end does not work.
1086 */
1087 /* XXX validate sid again? */
1088 crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1089 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1090 cryptostats.cs_blocks++;
1091 }
1092 }
1093
1094 /* As above, but for key ops */
1095 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1096 cap = crypto_checkdriver(krp->krp_hid);
1097 if (cap == NULL || cap->cc_kprocess == NULL) {
1098 /* Op needs to be migrated, process it. */
1099 break;
1100 }
1101 if (!cap->cc_kqblocked)
1102 break;
1103 }
1104 if (krp != NULL) {
1105 TAILQ_REMOVE(&crp_kq, krp, krp_next);
1106 result = crypto_kinvoke(krp, 0);
1107 if (result == ERESTART) {
1108 /*
1109 * The driver ran out of resources, mark the
1110 * driver ``blocked'' for cryptkop's and put
1111 * the request back in the queue. It would
1112 * best to put the request back where we got
1113 * it but that's hard so for now we put it
1114 * at the front. This should be ok; putting
1115 * it at the end does not work.
1116 */
1117 /* XXX validate sid again? */
1118 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1119 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1120 cryptostats.cs_kblocks++;
1121 }
1122 }
1123 } while (submit != NULL || krp != NULL);
1124 splx(s);
1125 }
1126
1127 /*
1128 * Kernel thread to do callbacks.
1129 */
1130 static void
1131 cryptoret(void)
1132 {
1133 struct cryptop *crp;
1134 struct cryptkop *krp;
1135 int s;
1136
1137 s = splcrypto();
1138 for (;;) {
1139 crp = TAILQ_FIRST(&crp_ret_q);
1140 if (crp != NULL)
1141 TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1142 krp = TAILQ_FIRST(&crp_ret_kq);
1143 if (krp != NULL)
1144 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1145
1146 if (crp != NULL || krp != NULL) {
1147 splx(s); /* lower ipl for callbacks */
1148 if (crp != NULL) {
1149 #ifdef CRYPTO_TIMING
1150 if (crypto_timing) {
1151 /*
1152 * NB: We must copy the timestamp before
1153 * doing the callback as the cryptop is
1154 * likely to be reclaimed.
1155 */
1156 struct timespec t = crp->crp_tstamp;
1157 crypto_tstat(&cryptostats.cs_cb, &t);
1158 crp->crp_callback(crp);
1159 crypto_tstat(&cryptostats.cs_finis, &t);
1160 } else
1161 #endif
1162 crp->crp_callback(crp);
1163 }
1164 if (krp != NULL)
1165 krp->krp_callback(krp);
1166 s = splcrypto();
1167 } else {
1168 (void) tsleep(&crp_ret_q, PLOCK, "crypto_wait", 0);
1169 cryptostats.cs_rets++;
1170 }
1171 }
1172 }
1173
1174 static void
1176 deferred_crypto_thread(void *arg)
1177 {
1178 int error;
1179
1180 error = kthread_create1((void (*)(void*)) cryptoret, NULL,
1181 &cryptoproc, "cryptoret");
1182 if (error) {
1183 printf("crypto_init: cannot start cryptoret thread; error %d",
1184 error);
1185 crypto_destroy();
1186 }
1187
1188 }
1189
1190 void
1191 opencryptoattach(int n)
1192 {
1193 /* XXX in absence of FreeBSD mod_init(), call init hooks here */
1194 printf("cryptoattach\n");
1195 crypto_init();
1196 swcr_init();
1197 }
1198
1199 #ifdef __FreeBSD__
1200 /*
1201 * Initialization code, both for static and dynamic loading.
1202 */
1203 static int
1204 crypto_modevent(module_t mod, int type, void *unused)
1205 {
1206 int error = EINVAL;
1207
1208 switch (type) {
1209 case MOD_LOAD:
1210 error = crypto_init();
1211 if (error == 0 && bootverbose)
1212 printf("crypto: <crypto core>\n");
1213 break;
1214 case MOD_UNLOAD:
1215 /*XXX disallow if active sessions */
1216 error = 0;
1217 crypto_destroy();
1218 break;
1219 }
1220 return error;
1221 }
1222 static moduledata_t crypto_mod = {
1223 "crypto",
1224 crypto_modevent,
1225 0
1226 };
1227
1228 MODULE_VERSION(crypto, 1);
1229 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
1230 #endif __FreeBSD__
1231
1232
1233