cryptodev.c revision 1.47 1 /* $NetBSD: cryptodev.c,v 1.47 2009/04/04 10:12:52 ad Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.4.2.4 2003/06/03 00:09:02 sam Exp $ */
3 /* $OpenBSD: cryptodev.c,v 1.53 2002/07/10 22:21:30 mickey Exp $ */
4
5 /*-
6 * Copyright (c) 2008 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Coyote Point Systems, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2001 Theo de Raadt
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 *
60 * Effort sponsored in part by the Defense Advanced Research Projects
61 * Agency (DARPA) and Air Force Research Laboratory, Air Force
62 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
63 *
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: cryptodev.c,v 1.47 2009/04/04 10:12:52 ad Exp $");
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kmem.h>
72 #include <sys/malloc.h>
73 #include <sys/mbuf.h>
74 #include <sys/pool.h>
75 #include <sys/sysctl.h>
76 #include <sys/file.h>
77 #include <sys/filedesc.h>
78 #include <sys/errno.h>
79 #include <sys/md5.h>
80 #include <sys/sha1.h>
81 #include <sys/conf.h>
82 #include <sys/device.h>
83 #include <sys/kauth.h>
84 #include <sys/select.h>
85 #include <sys/poll.h>
86 #include <sys/atomic.h>
87
88 #include "opt_ocf.h"
89 #include <opencrypto/cryptodev.h>
90 #include <opencrypto/ocryptodev.h>
91 #include <opencrypto/xform.h>
92
93 struct csession {
94 TAILQ_ENTRY(csession) next;
95 u_int64_t sid;
96 u_int32_t ses;
97
98 u_int32_t cipher; /* note: shares name space in crd_alg */
99 struct enc_xform *txform;
100 u_int32_t mac; /* note: shares name space in crd_alg */
101 struct auth_hash *thash;
102 u_int32_t comp_alg; /* note: shares name space in crd_alg */
103 struct comp_algo *tcomp;
104
105 void * key;
106 int keylen;
107 u_char tmp_iv[EALG_MAX_BLOCK_LEN];
108
109 void * mackey;
110 int mackeylen;
111 u_char tmp_mac[CRYPTO_MAX_MAC_LEN];
112
113 struct iovec iovec[1]; /* user requests never have more */
114 struct uio uio;
115 int error;
116 };
117
118 struct fcrypt {
119 TAILQ_HEAD(csessionlist, csession) csessions;
120 TAILQ_HEAD(crprethead, cryptop) crp_ret_mq;
121 TAILQ_HEAD(krprethead, cryptkop) crp_ret_mkq;
122 int sesn;
123 struct selinfo sinfo;
124 u_int32_t requestid;
125 };
126
127 /* For our fixed-size allocations */
128 static struct pool fcrpl;
129 static struct pool csepl;
130
131 /* Declaration of master device (fd-cloning/ctxt-allocating) entrypoints */
132 static int cryptoopen(dev_t dev, int flag, int mode, struct lwp *l);
133 static int cryptoread(dev_t dev, struct uio *uio, int ioflag);
134 static int cryptowrite(dev_t dev, struct uio *uio, int ioflag);
135 static int cryptoselect(dev_t dev, int rw, struct lwp *l);
136
137 /* Declaration of cloned-device (per-ctxt) entrypoints */
138 static int cryptof_read(struct file *, off_t *, struct uio *,
139 kauth_cred_t, int);
140 static int cryptof_write(struct file *, off_t *, struct uio *,
141 kauth_cred_t, int);
142 static int cryptof_ioctl(struct file *, u_long, void *);
143 static int cryptof_close(struct file *);
144 static int cryptof_poll(struct file *, int);
145
146 static const struct fileops cryptofops = {
147 .fo_read = cryptof_read,
148 .fo_write = cryptof_write,
149 .fo_ioctl = cryptof_ioctl,
150 .fo_fcntl = fnullop_fcntl,
151 .fo_poll = cryptof_poll,
152 .fo_stat = fbadop_stat,
153 .fo_close = cryptof_close,
154 .fo_kqfilter = fnullop_kqfilter,
155 .fo_drain = fnullop_drain,
156 };
157
158 struct csession *cryptodev_csefind(struct fcrypt *, u_int);
159 static struct csession *csefind(struct fcrypt *, u_int);
160 static int csedelete(struct fcrypt *, struct csession *);
161 static struct csession *cseadd(struct fcrypt *, struct csession *);
162 static struct csession *csecreate(struct fcrypt *, u_int64_t, void *,
163 u_int64_t, void *, u_int64_t, u_int32_t, u_int32_t, u_int32_t,
164 struct enc_xform *, struct auth_hash *, struct comp_algo *);
165 static int csefree(struct csession *);
166
167 static int cryptodev_key(struct crypt_kop *);
168 static int cryptodev_mkey(struct fcrypt *, struct crypt_n_kop *, int);
169 static int cryptodev_msessionfin(struct fcrypt *, int, u_int32_t *);
170
171 static int cryptodev_cb(void *);
172 static int cryptodevkey_cb(void *);
173
174 static int cryptodev_mcb(void *);
175 static int cryptodevkey_mcb(void *);
176
177 static int cryptodev_getmstatus(struct fcrypt *, struct crypt_result *,
178 int);
179 static int cryptodev_getstatus(struct fcrypt *, struct crypt_result *);
180
181 extern int ocryptof_ioctl(struct file *, u_long, void *);
182
183 /*
184 * sysctl-able control variables for /dev/crypto now defined in crypto.c:
185 * crypto_usercrypto, crypto_userasmcrypto, crypto_devallowsoft.
186 */
187
188 /* ARGSUSED */
189 int
190 cryptof_read(file_t *fp, off_t *poff,
191 struct uio *uio, kauth_cred_t cred, int flags)
192 {
193 return EIO;
194 }
195
196 /* ARGSUSED */
197 int
198 cryptof_write(file_t *fp, off_t *poff,
199 struct uio *uio, kauth_cred_t cred, int flags)
200 {
201 return EIO;
202 }
203
204 /* ARGSUSED */
205 int
206 cryptof_ioctl(struct file *fp, u_long cmd, void *data)
207 {
208 struct fcrypt *fcr = fp->f_data;
209 struct csession *cse;
210 struct session_op *sop;
211 struct session_n_op *snop;
212 struct crypt_op *cop;
213 struct crypt_mop *mop;
214 struct crypt_mkop *mkop;
215 struct crypt_n_op *cnop;
216 struct crypt_n_kop *knop;
217 struct crypt_sgop *sgop;
218 struct crypt_sfop *sfop;
219 struct cryptret *crypt_ret;
220 struct crypt_result *crypt_res;
221 u_int32_t ses;
222 u_int32_t *sesid;
223 int error = 0;
224 size_t count;
225
226 /* backwards compatibility */
227 file_t *criofp;
228 struct fcrypt *criofcr;
229 int criofd;
230
231 switch (cmd) {
232 case CRIOGET: /* XXX deprecated, remove after 5.0 */
233 if ((error = fd_allocfile(&criofp, &criofd)) != 0)
234 return error;
235 criofcr = pool_get(&fcrpl, PR_WAITOK);
236 mutex_spin_enter(&crypto_mtx);
237 TAILQ_INIT(&criofcr->csessions);
238 TAILQ_INIT(&criofcr->crp_ret_mq);
239 TAILQ_INIT(&criofcr->crp_ret_mkq);
240 selinit(&criofcr->sinfo);
241
242 /*
243 * Don't ever return session 0, to allow detection of
244 * failed creation attempts with multi-create ioctl.
245 */
246 criofcr->sesn = 1;
247 criofcr->requestid = 1;
248 mutex_spin_exit(&crypto_mtx);
249 (void)fd_clone(criofp, criofd, (FREAD|FWRITE),
250 &cryptofops, criofcr);
251 *(u_int32_t *)data = criofd;
252 return error;
253 break;
254 case CIOCGSESSION:
255 sop = (struct session_op *)data;
256 error = cryptodev_session(fcr, sop);
257 break;
258 case CIOCNGSESSION:
259 sgop = (struct crypt_sgop *)data;
260 snop = kmem_alloc((sgop->count *
261 sizeof(struct session_n_op)), KM_SLEEP);
262 error = copyin(sgop->sessions, snop, sgop->count *
263 sizeof(struct session_n_op));
264 if (error) {
265 goto mbail;
266 }
267
268 error = cryptodev_msession(fcr, snop, sgop->count);
269 if (error) {
270 goto mbail;
271 }
272
273 error = copyout(snop, sgop->sessions, sgop->count *
274 sizeof(struct session_n_op));
275 mbail:
276 kmem_free(snop, sgop->count * sizeof(struct session_n_op));
277 break;
278 case CIOCFSESSION:
279 mutex_spin_enter(&crypto_mtx);
280 ses = *(u_int32_t *)data;
281 cse = csefind(fcr, ses);
282 if (cse == NULL)
283 return EINVAL;
284 csedelete(fcr, cse);
285 error = csefree(cse);
286 mutex_spin_exit(&crypto_mtx);
287 break;
288 case CIOCNFSESSION:
289 sfop = (struct crypt_sfop *)data;
290 sesid = kmem_alloc((sfop->count * sizeof(u_int32_t)),
291 KM_SLEEP);
292 error = copyin(sfop->sesid, sesid,
293 (sfop->count * sizeof(u_int32_t)));
294 if (!error) {
295 error = cryptodev_msessionfin(fcr, sfop->count, sesid);
296 }
297 kmem_free(sesid, (sfop->count * sizeof(u_int32_t)));
298 break;
299 case CIOCCRYPT:
300 mutex_spin_enter(&crypto_mtx);
301 cop = (struct crypt_op *)data;
302 cse = csefind(fcr, cop->ses);
303 mutex_spin_exit(&crypto_mtx);
304 if (cse == NULL) {
305 DPRINTF(("csefind failed\n"));
306 return EINVAL;
307 }
308 error = cryptodev_op(cse, cop, curlwp);
309 DPRINTF(("cryptodev_op error = %d\n", error));
310 break;
311 case CIOCNCRYPTM:
312 mop = (struct crypt_mop *)data;
313 cnop = kmem_alloc((mop->count * sizeof(struct crypt_n_op)),
314 KM_SLEEP);
315 error = copyin(mop->reqs, cnop,
316 (mop->count * sizeof(struct crypt_n_op)));
317 if(!error) {
318 error = cryptodev_mop(fcr, cnop, mop->count, curlwp);
319 if (!error) {
320 error = copyout(cnop, mop->reqs,
321 (mop->count * sizeof(struct crypt_n_op)));
322 }
323 }
324 kmem_free(cnop, (mop->count * sizeof(struct crypt_n_op)));
325 break;
326 case CIOCKEY:
327 error = cryptodev_key((struct crypt_kop *)data);
328 DPRINTF(("cryptodev_key error = %d\n", error));
329 break;
330 case CIOCNFKEYM:
331 mkop = (struct crypt_mkop *)data;
332 knop = kmem_alloc((mkop->count * sizeof(struct crypt_n_kop)),
333 KM_SLEEP);
334 error = copyin(mkop->reqs, knop,
335 (mkop->count * sizeof(struct crypt_n_kop)));
336 if (!error) {
337 error = cryptodev_mkey(fcr, knop, mkop->count);
338 if (!error)
339 error = copyout(knop, mkop->reqs,
340 (mkop->count * sizeof(struct crypt_n_kop)));
341 }
342 kmem_free(knop, (mkop->count * sizeof(struct crypt_n_kop)));
343 break;
344 case CIOCASYMFEAT:
345 error = crypto_getfeat((int *)data);
346 break;
347 case CIOCNCRYPTRETM:
348 crypt_ret = (struct cryptret *)data;
349 count = crypt_ret->count;
350 crypt_res = kmem_alloc((count * sizeof(struct crypt_result)),
351 KM_SLEEP);
352 error = copyin(crypt_ret->results, crypt_res,
353 (count * sizeof(struct crypt_result)));
354 if (error)
355 goto reterr;
356 crypt_ret->count = cryptodev_getmstatus(fcr, crypt_res,
357 crypt_ret->count);
358 /* sanity check count */
359 if (crypt_ret->count > count) {
360 printf("%s.%d: error returned count %zd > original "
361 " count %zd\n",
362 __FILE__, __LINE__, crypt_ret->count, count);
363 crypt_ret->count = count;
364
365 }
366 error = copyout(crypt_res, crypt_ret->results,
367 (crypt_ret->count * sizeof(struct crypt_result)));
368 reterr:
369 kmem_free(crypt_res, (count * sizeof(struct crypt_result)));
370 break;
371 case CIOCNCRYPTRET:
372 error = cryptodev_getstatus(fcr, (struct crypt_result *)data);
373 break;
374 default:
375 /* Check for backward compatible commands */
376 error = ocryptof_ioctl(fp, cmd, data);
377 }
378 return error;
379 }
380
381 int
382 cryptodev_op(struct csession *cse, struct crypt_op *cop, struct lwp *l)
383 {
384 struct cryptop *crp = NULL;
385 struct cryptodesc *crde = NULL, *crda = NULL, *crdc = NULL;
386 int error;
387 int iov_len = cop->len;
388 int flags=0;
389 int dst_len; /* copyout size */
390
391 if (cop->len > 256*1024-4)
392 return E2BIG;
393
394 if (cse->txform) {
395 if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0)
396 return EINVAL;
397 }
398
399 DPRINTF(("cryptodev_op[%d]: iov_len %d\n", (uint32_t)cse->sid, iov_len));
400 if ((cse->tcomp) && cop->dst_len) {
401 if (iov_len < cop->dst_len) {
402 /* Need larger iov to deal with decompress */
403 iov_len = cop->dst_len;
404 }
405 DPRINTF(("cryptodev_op: iov_len -> %d for decompress\n", iov_len));
406 }
407
408 (void)memset(&cse->uio, 0, sizeof(cse->uio));
409 cse->uio.uio_iovcnt = 1;
410 cse->uio.uio_resid = 0;
411 cse->uio.uio_rw = UIO_WRITE;
412 cse->uio.uio_iov = cse->iovec;
413 UIO_SETUP_SYSSPACE(&cse->uio);
414 memset(&cse->iovec, 0, sizeof(cse->iovec));
415
416 /* the iov needs to be big enough to handle the uncompressed
417 * data.... */
418 cse->uio.uio_iov[0].iov_len = iov_len;
419 cse->uio.uio_iov[0].iov_base = kmem_alloc(iov_len, KM_SLEEP);
420 cse->uio.uio_resid = cse->uio.uio_iov[0].iov_len;
421 DPRINTF(("cryptodev_op[%d]: uio.iov_base %p malloced %d bytes\n",
422 (uint32_t)cse->sid, cse->uio.uio_iov[0].iov_base, iov_len));
423
424 crp = crypto_getreq((cse->tcomp != NULL) + (cse->txform != NULL) + (cse->thash != NULL));
425 if (crp == NULL) {
426 error = ENOMEM;
427 goto bail;
428 }
429 DPRINTF(("cryptodev_op[%d]: crp %p\n", (uint32_t)cse->sid, crp));
430
431 /* crds are always ordered tcomp, thash, then txform */
432 /* with optional missing links */
433
434 /* XXX: If we're going to compress then hash or encrypt, we need
435 * to be able to pass on the new size of the data.
436 */
437
438 if (cse->tcomp) {
439 crdc = crp->crp_desc;
440 }
441
442 if (cse->thash) {
443 crda = crdc ? crdc->crd_next : crp->crp_desc;
444 if (cse->txform && crda)
445 crde = crda->crd_next;
446 } else {
447 if (cse->txform) {
448 crde = crdc ? crdc->crd_next : crp->crp_desc;
449 } else if (!cse->tcomp) {
450 error = EINVAL;
451 goto bail;
452 }
453 }
454
455 DPRINTF(("ocf[%d]: iov_len %d, cop->len %d\n",
456 (uint32_t)cse->sid,
457 cse->uio.uio_iov[0].iov_len,
458 cop->len));
459
460 if ((error = copyin(cop->src, cse->uio.uio_iov[0].iov_base, cop->len)))
461 {
462 printf("copyin failed %s %d \n", (char *)cop->src, error);
463 goto bail;
464 }
465
466 if (crdc) {
467 switch (cop->op) {
468 case COP_COMP:
469 crdc->crd_flags |= CRD_F_COMP;
470 break;
471 case COP_DECOMP:
472 crdc->crd_flags &= ~CRD_F_COMP;
473 break;
474 default:
475 break;
476 }
477 /* more data to follow? */
478 if (cop->flags & COP_F_MORE) {
479 flags |= CRYPTO_F_MORE;
480 }
481 crdc->crd_len = cop->len;
482 crdc->crd_inject = 0;
483
484 crdc->crd_alg = cse->comp_alg;
485 crdc->crd_key = NULL;
486 crdc->crd_klen = 0;
487 DPRINTF(("cryptodev_op[%d]: crdc setup for comp_alg %d.\n",
488 (uint32_t)cse->sid, crdc->crd_alg));
489 }
490
491 if (crda) {
492 crda->crd_skip = 0;
493 crda->crd_len = cop->len;
494 crda->crd_inject = 0; /* ??? */
495
496 crda->crd_alg = cse->mac;
497 crda->crd_key = cse->mackey;
498 crda->crd_klen = cse->mackeylen * 8;
499 DPRINTF(("cryptodev_op: crda setup for mac %d.\n", crda->crd_alg));
500 }
501
502 if (crde) {
503 switch (cop->op) {
504 case COP_ENCRYPT:
505 crde->crd_flags |= CRD_F_ENCRYPT;
506 break;
507 case COP_DECRYPT:
508 crde->crd_flags &= ~CRD_F_ENCRYPT;
509 break;
510 default:
511 break;
512 }
513 crde->crd_len = cop->len;
514 crde->crd_inject = 0;
515
516 crde->crd_alg = cse->cipher;
517 crde->crd_key = cse->key;
518 crde->crd_klen = cse->keylen * 8;
519 DPRINTF(("cryptodev_op: crde setup for cipher %d.\n", crde->crd_alg));
520 }
521
522
523 crp->crp_ilen = cop->len;
524 /* The reqest is flagged as CRYPTO_F_USER as long as it is running
525 * in the user IOCTL thread. This flag lets us skip using the retq for
526 * the request if it completes immediately. If the request ends up being
527 * delayed or is not completed immediately the flag is removed.
528 */
529 crp->crp_flags = CRYPTO_F_IOV | (cop->flags & COP_F_BATCH) | CRYPTO_F_USER |
530 flags;
531 crp->crp_buf = (void *)&cse->uio;
532 crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
533 crp->crp_sid = cse->sid;
534 crp->crp_opaque = (void *)cse;
535
536 if (cop->iv) {
537 if (crde == NULL) {
538 error = EINVAL;
539 goto bail;
540 }
541 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
542 error = EINVAL;
543 goto bail;
544 }
545 if ((error = copyin(cop->iv, cse->tmp_iv,
546 cse->txform->blocksize)))
547 goto bail;
548 (void)memcpy(crde->crd_iv, cse->tmp_iv, cse->txform->blocksize);
549 crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
550 crde->crd_skip = 0;
551 } else if (crde) {
552 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
553 crde->crd_skip = 0;
554 } else {
555 crde->crd_flags |= CRD_F_IV_PRESENT;
556 crde->crd_skip = cse->txform->blocksize;
557 crde->crd_len -= cse->txform->blocksize;
558 }
559 }
560
561 if (cop->mac) {
562 if (crda == NULL) {
563 error = EINVAL;
564 goto bail;
565 }
566 crp->crp_mac=cse->tmp_mac;
567 }
568
569 /*
570 * XXX there was a comment here which said that we went to
571 * XXX splcrypto() but needed to only if CRYPTO_F_CBIMM,
572 * XXX disabled on NetBSD since 1.6O due to a race condition.
573 * XXX But crypto_dispatch went to splcrypto() itself! (And
574 * XXX now takes the crypto_mtx mutex itself). We do, however,
575 *
576 * XXX need to hold the mutex across the call to cv_wait().
577 * XXX (should we arrange for crypto_dispatch to return to
578 * XXX us with it held? it seems quite ugly to do so.)
579 */
580 #ifdef notyet
581 eagain:
582 #endif
583 error = crypto_dispatch(crp);
584 mutex_spin_enter(&crypto_mtx);
585
586 /*
587 * If the request was going to be completed by the
588 * ioctl thread then it would have been done by now.
589 * Remove the F_USER flag it so crypto_done() is not confused
590 * if the crypto device calls it after this point.
591 */
592 crp->crp_flags &= ~(CRYPTO_F_USER);
593
594 switch (error) {
595 #ifdef notyet /* don't loop forever -- but EAGAIN not possible here yet */
596 case EAGAIN:
597 mutex_spin_exit(&crypto_mtx);
598 goto eagain;
599 break;
600 #endif
601 case 0:
602 break;
603 default:
604 DPRINTF(("cryptodev_op: not waiting, error.\n"));
605 mutex_spin_exit(&crypto_mtx);
606 goto bail;
607 }
608
609 while (!(crp->crp_flags & CRYPTO_F_DONE)) {
610 DPRINTF(("cryptodev_op[%d]: sleeping on cv %08x for crp %08x\n",
611 (uint32_t)cse->sid, (uint32_t)&crp->crp_cv,
612 (uint32_t)crp));
613 cv_wait(&crp->crp_cv, &crypto_mtx); /* XXX cv_wait_sig? */
614 }
615 if (crp->crp_flags & CRYPTO_F_ONRETQ) {
616 /* XXX this should never happen now with the CRYPTO_F_USER flag
617 * changes.
618 */
619 DPRINTF(("cryptodev_op: DONE, not woken by cryptoret.\n"));
620 (void)crypto_ret_q_remove(crp);
621 }
622 mutex_spin_exit(&crypto_mtx);
623
624 if (crp->crp_etype != 0) {
625 DPRINTF(("cryptodev_op: crp_etype %d\n", crp->crp_etype));
626 error = crp->crp_etype;
627 goto bail;
628 }
629
630 if (cse->error) {
631 DPRINTF(("cryptodev_op: cse->error %d\n", cse->error));
632 error = cse->error;
633 goto bail;
634 }
635
636 dst_len = crp->crp_ilen;
637 /* let the user know how much data was returned */
638 if (crp->crp_olen) {
639 dst_len = cop->dst_len = crp->crp_olen;
640 }
641 crp->len = dst_len;
642
643 if (cop->dst) {
644 DPRINTF(("cryptodev_op: copyout %d bytes to %p\n", dst_len, cop->dst));
645 }
646 if (cop->dst &&
647 (error = copyout(cse->uio.uio_iov[0].iov_base, cop->dst, dst_len)))
648 {
649 DPRINTF(("cryptodev_op: copyout error %d\n", error));
650 goto bail;
651 }
652
653 if (cop->mac &&
654 (error = copyout(crp->crp_mac, cop->mac, cse->thash->authsize))) {
655 DPRINTF(("cryptodev_op: mac copyout error %d\n", error));
656 goto bail;
657 }
658
659
660 bail:
661 if (crp) {
662 crypto_freereq(crp);
663 }
664 if (cse->uio.uio_iov[0].iov_base) {
665 kmem_free(cse->uio.uio_iov[0].iov_base,iov_len);
666 }
667
668 return error;
669 }
670
671 static int
672 cryptodev_cb(void *op)
673 {
674 struct cryptop *crp = (struct cryptop *) op;
675 struct csession *cse = (struct csession *)crp->crp_opaque;
676 int error = 0;
677
678 mutex_spin_enter(&crypto_mtx);
679 cse->error = crp->crp_etype;
680 if (crp->crp_etype == EAGAIN) {
681 /* always drop mutex to call dispatch routine */
682 mutex_spin_exit(&crypto_mtx);
683 error = crypto_dispatch(crp);
684 mutex_spin_enter(&crypto_mtx);
685 }
686 if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
687 cv_signal(&crp->crp_cv);
688 }
689 mutex_spin_exit(&crypto_mtx);
690 return 0;
691 }
692
693 static int
694 cryptodev_mcb(void *op)
695 {
696 struct cryptop *crp = (struct cryptop *) op;
697 struct csession *cse = (struct csession *)crp->crp_opaque;
698 int error=0;
699
700 mutex_spin_enter(&crypto_mtx);
701 cse->error = crp->crp_etype;
702 if (crp->crp_etype == EAGAIN) {
703 mutex_spin_exit(&crypto_mtx);
704 error = crypto_dispatch(crp);
705 mutex_spin_enter(&crypto_mtx);
706 }
707 if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
708 cv_signal(&crp->crp_cv);
709 }
710
711 TAILQ_INSERT_TAIL(&crp->fcrp->crp_ret_mq, crp, crp_next);
712 selnotify(&crp->fcrp->sinfo, 0, 0);
713 mutex_spin_exit(&crypto_mtx);
714 return 0;
715 }
716
717 static int
718 cryptodevkey_cb(void *op)
719 {
720 struct cryptkop *krp = op;
721
722 mutex_spin_enter(&crypto_mtx);
723 cv_signal(&krp->krp_cv);
724 mutex_spin_exit(&crypto_mtx);
725 return 0;
726 }
727
728 static int
729 cryptodevkey_mcb(void *op)
730 {
731 struct cryptkop *krp = op;
732
733 mutex_spin_enter(&crypto_mtx);
734 cv_signal(&krp->krp_cv);
735 TAILQ_INSERT_TAIL(&krp->fcrp->crp_ret_mkq, krp, krp_next);
736 selnotify(&krp->fcrp->sinfo, 0, 0);
737 mutex_spin_exit(&crypto_mtx);
738 return 0;
739 }
740
741 static int
742 cryptodev_key(struct crypt_kop *kop)
743 {
744 struct cryptkop *krp = NULL;
745 int error = EINVAL;
746 int in, out, size, i;
747
748 if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM)
749 return EFBIG;
750
751 in = kop->crk_iparams;
752 out = kop->crk_oparams;
753 switch (kop->crk_op) {
754 case CRK_MOD_EXP:
755 if (in == 3 && out == 1)
756 break;
757 return EINVAL;
758 case CRK_MOD_EXP_CRT:
759 if (in == 6 && out == 1)
760 break;
761 return EINVAL;
762 case CRK_DSA_SIGN:
763 if (in == 5 && out == 2)
764 break;
765 return EINVAL;
766 case CRK_DSA_VERIFY:
767 if (in == 7 && out == 0)
768 break;
769 return EINVAL;
770 case CRK_DH_COMPUTE_KEY:
771 if (in == 3 && out == 1)
772 break;
773 return EINVAL;
774 case CRK_MOD_ADD:
775 if (in == 3 && out == 1)
776 break;
777 return EINVAL;
778 case CRK_MOD_ADDINV:
779 if (in == 2 && out == 1)
780 break;
781 return EINVAL;
782 case CRK_MOD_SUB:
783 if (in == 3 && out == 1)
784 break;
785 return EINVAL;
786 case CRK_MOD_MULT:
787 if (in == 3 && out == 1)
788 break;
789 return EINVAL;
790 case CRK_MOD_MULTINV:
791 if (in == 2 && out == 1)
792 break;
793 return EINVAL;
794 case CRK_MOD:
795 if (in == 2 && out == 1)
796 break;
797 return EINVAL;
798 default:
799 return EINVAL;
800 }
801
802 krp = pool_get(&cryptkop_pool, PR_WAITOK);
803 (void)memset(krp, 0, sizeof *krp);
804 cv_init(&krp->krp_cv, "crykdev");
805 krp->krp_op = kop->crk_op;
806 krp->krp_status = kop->crk_status;
807 krp->krp_iparams = kop->crk_iparams;
808 krp->krp_oparams = kop->crk_oparams;
809 krp->krp_status = 0;
810 krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
811
812 for (i = 0; i < CRK_MAXPARAM; i++)
813 krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
814 for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
815 size = (krp->krp_param[i].crp_nbits + 7) / 8;
816 if (size == 0)
817 continue;
818 krp->krp_param[i].crp_p = kmem_alloc(size, KM_SLEEP);
819 if (i >= krp->krp_iparams)
820 continue;
821 error = copyin(kop->crk_param[i].crp_p,
822 krp->krp_param[i].crp_p, size);
823 if (error)
824 goto fail;
825 }
826
827 error = crypto_kdispatch(krp);
828 if (error != 0) {
829 goto fail;
830 }
831
832 mutex_spin_enter(&crypto_mtx);
833 while (!(krp->krp_flags & CRYPTO_F_DONE)) {
834 cv_wait(&krp->krp_cv, &crypto_mtx); /* XXX cv_wait_sig? */
835 }
836 if (krp->krp_flags & CRYPTO_F_ONRETQ) {
837 DPRINTF(("cryptodev_key: DONE early, not via cryptoret.\n"));
838 (void)crypto_ret_kq_remove(krp);
839 }
840 mutex_spin_exit(&crypto_mtx);
841
842 if (krp->krp_status != 0) {
843 DPRINTF(("cryptodev_key: krp->krp_status 0x%08x\n",
844 krp->krp_status));
845 error = krp->krp_status;
846 goto fail;
847 }
848
849 for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams;
850 i++) {
851 size = (krp->krp_param[i].crp_nbits + 7) / 8;
852 if (size == 0)
853 continue;
854 error = copyout(krp->krp_param[i].crp_p,
855 kop->crk_param[i].crp_p, size);
856 if (error) {
857 DPRINTF(("cryptodev_key: copyout oparam %d failed, "
858 "error=%d\n", i-krp->krp_iparams, error));
859 goto fail;
860 }
861 }
862
863 fail:
864 kop->crk_status = krp->krp_status;
865 for (i = 0; i < CRK_MAXPARAM; i++) {
866 struct crparam *kp = &(krp->krp_param[i]);
867 if (krp->krp_param[i].crp_p) {
868 size = (kp->crp_nbits + 7) / 8;
869 KASSERT(size > 0);
870 (void)memset(kp->crp_p, 0, size);
871 kmem_free(kp->crp_p, size);
872 }
873 }
874 cv_destroy(&krp->krp_cv);
875 pool_put(&cryptkop_pool, krp);
876 DPRINTF(("cryptodev_key: error=0x%08x\n", error));
877 return error;
878 }
879
880 /* ARGSUSED */
881 static int
882 cryptof_close(struct file *fp)
883 {
884 struct fcrypt *fcr = fp->f_data;
885 struct csession *cse;
886
887 mutex_spin_enter(&crypto_mtx);
888 while ((cse = TAILQ_FIRST(&fcr->csessions))) {
889 TAILQ_REMOVE(&fcr->csessions, cse, next);
890 (void)csefree(cse);
891 }
892 seldestroy(&fcr->sinfo);
893 fp->f_data = NULL;
894 mutex_spin_exit(&crypto_mtx);
895
896 pool_put(&fcrpl, fcr);
897 return 0;
898 }
899
900 /* needed for compatibility module */
901 struct csession *cryptodev_csefind(struct fcrypt *fcr, u_int ses)
902 {
903 return csefind(fcr, ses);
904 }
905
906 /* csefind: call with crypto_mtx held. */
907 static struct csession *
908 csefind(struct fcrypt *fcr, u_int ses)
909 {
910 struct csession *cse, *cnext, *ret = NULL;
911
912 KASSERT(mutex_owned(&crypto_mtx));
913 TAILQ_FOREACH_SAFE(cse, &fcr->csessions, next, cnext)
914 if (cse->ses == ses)
915 ret = cse;
916
917 return ret;
918 }
919
920 /* csedelete: call with crypto_mtx held. */
921 static int
922 csedelete(struct fcrypt *fcr, struct csession *cse_del)
923 {
924 struct csession *cse, *cnext;
925 int ret = 0;
926
927 KASSERT(mutex_owned(&crypto_mtx));
928 TAILQ_FOREACH_SAFE(cse, &fcr->csessions, next, cnext) {
929 if (cse == cse_del) {
930 TAILQ_REMOVE(&fcr->csessions, cse, next);
931 ret = 1;
932 }
933 }
934 return ret;
935 }
936
937 /* cseadd: call with crypto_mtx held. */
938 static struct csession *
939 cseadd(struct fcrypt *fcr, struct csession *cse)
940 {
941 KASSERT(mutex_owned(&crypto_mtx));
942 /* don't let session ID wrap! */
943 if (fcr->sesn + 1 == 0) return NULL;
944 TAILQ_INSERT_TAIL(&fcr->csessions, cse, next);
945 cse->ses = fcr->sesn++;
946 return cse;
947 }
948
949 /* csecreate: call with crypto_mtx held. */
950 static struct csession *
951 csecreate(struct fcrypt *fcr, u_int64_t sid, void *key, u_int64_t keylen,
952 void *mackey, u_int64_t mackeylen, u_int32_t cipher, u_int32_t mac,
953 u_int32_t comp_alg, struct enc_xform *txform, struct auth_hash *thash,
954 struct comp_algo *tcomp)
955 {
956 struct csession *cse;
957
958 KASSERT(mutex_owned(&crypto_mtx));
959 cse = pool_get(&csepl, PR_NOWAIT);
960 if (cse == NULL)
961 return NULL;
962 cse->key = key;
963 cse->keylen = keylen/8;
964 cse->mackey = mackey;
965 cse->mackeylen = mackeylen/8;
966 cse->sid = sid;
967 cse->cipher = cipher;
968 cse->mac = mac;
969 cse->comp_alg = comp_alg;
970 cse->txform = txform;
971 cse->thash = thash;
972 cse->tcomp = tcomp;
973 cse->error = 0;
974 if (cseadd(fcr, cse))
975 return cse;
976 else {
977 pool_put(&csepl, cse);
978 return NULL;
979 }
980 }
981
982 /* csefree: call with crypto_mtx held. */
983 static int
984 csefree(struct csession *cse)
985 {
986 int error;
987
988 KASSERT(mutex_owned(&crypto_mtx));
989 error = crypto_freesession(cse->sid);
990 if (cse->key)
991 free(cse->key, M_XDATA);
992 if (cse->mackey)
993 free(cse->mackey, M_XDATA);
994 pool_put(&csepl, cse);
995 return error;
996 }
997
998 static int
999 cryptoopen(dev_t dev, int flag, int mode,
1000 struct lwp *l)
1001 {
1002 file_t *fp;
1003 struct fcrypt *fcr;
1004 int fd, error;
1005
1006 if (crypto_usercrypto == 0)
1007 return ENXIO;
1008
1009 if ((error = fd_allocfile(&fp, &fd)) != 0)
1010 return error;
1011
1012 fcr = pool_get(&fcrpl, PR_WAITOK);
1013 mutex_spin_enter(&crypto_mtx);
1014 TAILQ_INIT(&fcr->csessions);
1015 TAILQ_INIT(&fcr->crp_ret_mq);
1016 TAILQ_INIT(&fcr->crp_ret_mkq);
1017 selinit(&fcr->sinfo);
1018 /*
1019 * Don't ever return session 0, to allow detection of
1020 * failed creation attempts with multi-create ioctl.
1021 */
1022 fcr->sesn = 1;
1023 fcr->requestid = 1;
1024 mutex_spin_exit(&crypto_mtx);
1025 return fd_clone(fp, fd, flag, &cryptofops, fcr);
1026 }
1027
1028 static int
1029 cryptoread(dev_t dev, struct uio *uio, int ioflag)
1030 {
1031 return EIO;
1032 }
1033
1034 static int
1035 cryptowrite(dev_t dev, struct uio *uio, int ioflag)
1036 {
1037 return EIO;
1038 }
1039
1040 int
1041 cryptoselect(dev_t dev, int rw, struct lwp *l)
1042 {
1043 return 0;
1044 }
1045
1046 /*static*/
1047 struct cdevsw crypto_cdevsw = {
1048 /* open */ cryptoopen,
1049 /* close */ noclose,
1050 /* read */ cryptoread,
1051 /* write */ cryptowrite,
1052 /* ioctl */ noioctl,
1053 /* ttstop?*/ nostop,
1054 /* ??*/ notty,
1055 /* poll */ cryptoselect /*nopoll*/,
1056 /* mmap */ nommap,
1057 /* kqfilter */ nokqfilter,
1058 /* type */ D_OTHER,
1059 };
1060
1061 int
1062 cryptodev_mop(struct fcrypt *fcr,
1063 struct crypt_n_op * cnop,
1064 int count, struct lwp *l)
1065 {
1066 struct cryptop *crp = NULL;
1067 struct cryptodesc *crde = NULL, *crda = NULL, *crdc = NULL;
1068 int req, error=0;
1069 struct csession *cse;
1070 int flags=0;
1071 int iov_len;
1072
1073 for (req = 0; req < count; req++) {
1074 mutex_spin_enter(&crypto_mtx);
1075 cse = csefind(fcr, cnop[req].ses);
1076 if (cse == NULL) {
1077 DPRINTF(("csefind failed\n"));
1078 cnop[req].status = EINVAL;
1079 mutex_spin_exit(&crypto_mtx);
1080 continue;
1081 }
1082 mutex_spin_exit(&crypto_mtx);
1083
1084 if (cnop[req].len > 256*1024-4) {
1085 DPRINTF(("length failed\n"));
1086 cnop[req].status = EINVAL;
1087 continue;
1088 }
1089 if (cse->txform) {
1090 if (cnop[req].len == 0 ||
1091 (cnop[req].len % cse->txform->blocksize) != 0) {
1092 cnop[req].status = EINVAL;
1093 continue;
1094 }
1095 }
1096
1097 crp = crypto_getreq((cse->txform != NULL) +
1098 (cse->thash != NULL) +
1099 (cse->tcomp != NULL));
1100 if (crp == NULL) {
1101 cnop[req].status = ENOMEM;
1102 goto bail;
1103 }
1104
1105 iov_len = cnop[req].len;
1106 /* got a compression/decompression max size? */
1107 if ((cse->tcomp) && cnop[req].dst_len) {
1108 if (iov_len < cnop[req].dst_len) {
1109 /* Need larger iov to deal with decompress */
1110 iov_len = cnop[req].dst_len;
1111 }
1112 DPRINTF(("cryptodev_mop: iov_len -> %d for decompress\n", iov_len));
1113 }
1114
1115 (void)memset(&crp->uio, 0, sizeof(crp->uio));
1116 crp->uio.uio_iovcnt = 1;
1117 crp->uio.uio_resid = 0;
1118 crp->uio.uio_rw = UIO_WRITE;
1119 crp->uio.uio_iov = crp->iovec;
1120 UIO_SETUP_SYSSPACE(&crp->uio);
1121 memset(&crp->iovec, 0, sizeof(crp->iovec));
1122 crp->uio.uio_iov[0].iov_len = iov_len;
1123 DPRINTF(("cryptodev_mop: kmem_alloc(%d) for iov \n", iov_len));
1124 crp->uio.uio_iov[0].iov_base = kmem_alloc(iov_len, KM_SLEEP);
1125 crp->uio.uio_resid = crp->uio.uio_iov[0].iov_len;
1126
1127 if (cse->tcomp) {
1128 crdc = crp->crp_desc;
1129 }
1130
1131 if (cse->thash) {
1132 crda = crdc ? crdc->crd_next : crp->crp_desc;
1133 if (cse->txform && crda)
1134 crde = crda->crd_next;
1135 } else {
1136 if (cse->txform) {
1137 crde = crdc ? crdc->crd_next : crp->crp_desc;
1138 } else if (!cse->tcomp) {
1139 error = EINVAL;
1140 goto bail;
1141 }
1142 }
1143
1144 if ((copyin(cnop[req].src,
1145 crp->uio.uio_iov[0].iov_base, cnop[req].len))) {
1146 cnop[req].status = EINVAL;
1147 goto bail;
1148 }
1149
1150 if (crdc) {
1151 switch (cnop[req].op) {
1152 case COP_COMP:
1153 crdc->crd_flags |= CRD_F_COMP;
1154 break;
1155 case COP_DECOMP:
1156 crdc->crd_flags &= ~CRD_F_COMP;
1157 break;
1158 default:
1159 break;
1160 }
1161 /* more data to follow? */
1162 if (cnop[req].flags & COP_F_MORE) {
1163 flags |= CRYPTO_F_MORE;
1164 }
1165 crdc->crd_len = cnop[req].len;
1166 crdc->crd_inject = 0;
1167
1168 crdc->crd_alg = cse->comp_alg;
1169 crdc->crd_key = NULL;
1170 crdc->crd_klen = 0;
1171 DPRINTF(("cryptodev_mop[%d]: crdc setup for comp_alg %d"
1172 " len %d.\n",
1173 (uint32_t)cse->sid, crdc->crd_alg,
1174 crdc->crd_len));
1175 }
1176
1177 if (crda) {
1178 crda->crd_skip = 0;
1179 crda->crd_len = cnop[req].len;
1180 crda->crd_inject = 0; /* ??? */
1181
1182 crda->crd_alg = cse->mac;
1183 crda->crd_key = cse->mackey;
1184 crda->crd_klen = cse->mackeylen * 8;
1185 }
1186
1187 if (crde) {
1188 if (cnop[req].op == COP_ENCRYPT)
1189 crde->crd_flags |= CRD_F_ENCRYPT;
1190 else
1191 crde->crd_flags &= ~CRD_F_ENCRYPT;
1192 crde->crd_len = cnop[req].len;
1193 crde->crd_inject = 0;
1194
1195 crde->crd_alg = cse->cipher;
1196 #ifdef notyet /* XXX must notify h/w driver new key, drain */
1197 if(cnop[req].key && cnop[req].keylen) {
1198 crde->crd_key = malloc(cnop[req].keylen,
1199 M_XDATA, M_WAITOK);
1200 if((error = copyin(cnop[req].key,
1201 crde->crd_key, cnop[req].keylen))) {
1202 cnop[req].status = EINVAL;
1203 goto bail;
1204 }
1205 crde->crd_klen = cnop[req].keylen * 8;
1206 } else { ... }
1207 #endif
1208 crde->crd_key = cse->key;
1209 crde->crd_klen = cse->keylen * 8;
1210 }
1211
1212 crp->crp_ilen = cnop[req].len;
1213 crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM |
1214 (cnop[req].flags & COP_F_BATCH) | flags;
1215 crp->crp_buf = (void *)&crp->uio;
1216 crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_mcb;
1217 crp->crp_sid = cse->sid;
1218 crp->crp_opaque = (void *)cse;
1219 crp->fcrp = fcr;
1220 crp->dst = cnop[req].dst;
1221 crp->len = cnop[req].len; /* input len, iov may be larger */
1222 crp->mac = cnop[req].mac;
1223 DPRINTF(("cryptodev_mop: iov_base %p dst %p len %d mac %p\n",
1224 crp->uio.uio_iov[0].iov_base, crp->dst, crp->len,
1225 crp->mac));
1226
1227 if (cnop[req].iv) {
1228 if (crde == NULL) {
1229 cnop[req].status = EINVAL;
1230 goto bail;
1231 }
1232 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
1233 cnop[req].status = EINVAL;
1234 goto bail;
1235 }
1236 if ((error = copyin(cnop[req].iv, crp->tmp_iv,
1237 cse->txform->blocksize))) {
1238 cnop[req].status = EINVAL;
1239 goto bail;
1240 }
1241 (void)memcpy(crde->crd_iv, crp->tmp_iv,
1242 cse->txform->blocksize);
1243 crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1244 crde->crd_skip = 0;
1245 } else if (crde) {
1246 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
1247 crde->crd_skip = 0;
1248 } else {
1249 crde->crd_flags |= CRD_F_IV_PRESENT;
1250 crde->crd_skip = cse->txform->blocksize;
1251 crde->crd_len -= cse->txform->blocksize;
1252 }
1253 }
1254
1255 if (cnop[req].mac) {
1256 if (crda == NULL) {
1257 cnop[req].status = EINVAL;
1258 goto bail;
1259 }
1260 crp->crp_mac=cse->tmp_mac;
1261 }
1262 cnop[req].reqid = atomic_inc_32_nv(&(fcr->requestid));
1263 crp->crp_reqid = cnop[req].reqid;
1264 crp->crp_usropaque = cnop[req].opaque;
1265 #ifdef notyet
1266 eagain:
1267 #endif
1268 cnop[req].status = crypto_dispatch(crp);
1269 mutex_spin_enter(&crypto_mtx); /* XXX why mutex? */
1270
1271 switch (cnop[req].status) {
1272 #ifdef notyet /* don't loop forever -- but EAGAIN not possible here yet */
1273 case EAGAIN:
1274 mutex_spin_exit(&crypto_mtx);
1275 goto eagain;
1276 break;
1277 #endif
1278 case 0:
1279 break;
1280 default:
1281 DPRINTF(("cryptodev_op: not waiting, error.\n"));
1282 mutex_spin_exit(&crypto_mtx);
1283 goto bail;
1284 }
1285
1286 mutex_spin_exit(&crypto_mtx);
1287 bail:
1288 if (cnop[req].status) {
1289 if (crp) {
1290 if (crp->uio.uio_iov[0].iov_base) {
1291 kmem_free(crp->uio.uio_iov[0].iov_base,
1292 crp->uio.uio_iov[0].iov_len);
1293 }
1294 crypto_freereq(crp);
1295 }
1296 error = 0;
1297 }
1298 }
1299 return error;
1300 }
1301
1302 static int
1303 cryptodev_mkey(struct fcrypt *fcr, struct crypt_n_kop *kop, int count)
1304 {
1305 struct cryptkop *krp = NULL;
1306 int error = EINVAL;
1307 int in, out, size, i, req;
1308
1309 for (req = 0; req < count; req++) {
1310 if (kop[req].crk_iparams + kop[req].crk_oparams > CRK_MAXPARAM)
1311 return EFBIG;
1312
1313 in = kop[req].crk_iparams;
1314 out = kop[req].crk_oparams;
1315 switch (kop[req].crk_op) {
1316 case CRK_MOD_EXP:
1317 if (in == 3 && out == 1)
1318 break;
1319 kop[req].crk_status = EINVAL;
1320 continue;
1321 case CRK_MOD_EXP_CRT:
1322 if (in == 6 && out == 1)
1323 break;
1324 kop[req].crk_status = EINVAL;
1325 continue;
1326 case CRK_DSA_SIGN:
1327 if (in == 5 && out == 2)
1328 break;
1329 kop[req].crk_status = EINVAL;
1330 continue;
1331 case CRK_DSA_VERIFY:
1332 if (in == 7 && out == 0)
1333 break;
1334 kop[req].crk_status = EINVAL;
1335 continue;
1336 case CRK_DH_COMPUTE_KEY:
1337 if (in == 3 && out == 1)
1338 break;
1339 kop[req].crk_status = EINVAL;
1340 continue;
1341 case CRK_MOD_ADD:
1342 if (in == 3 && out == 1)
1343 break;
1344 kop[req].crk_status = EINVAL;
1345 continue;
1346 case CRK_MOD_ADDINV:
1347 if (in == 2 && out == 1)
1348 break;
1349 kop[req].crk_status = EINVAL;
1350 continue;
1351 case CRK_MOD_SUB:
1352 if (in == 3 && out == 1)
1353 break;
1354 kop[req].crk_status = EINVAL;
1355 continue;
1356 case CRK_MOD_MULT:
1357 if (in == 3 && out == 1)
1358 break;
1359 kop[req].crk_status = EINVAL;
1360 continue;
1361 case CRK_MOD_MULTINV:
1362 if (in == 2 && out == 1)
1363 break;
1364 kop[req].crk_status = EINVAL;
1365 continue;
1366 case CRK_MOD:
1367 if (in == 2 && out == 1)
1368 break;
1369 kop[req].crk_status = EINVAL;
1370 continue;
1371 default:
1372 kop[req].crk_status = EINVAL;
1373 continue;
1374 }
1375
1376 krp = pool_get(&cryptkop_pool, PR_WAITOK);
1377 (void)memset(krp, 0, sizeof *krp);
1378 cv_init(&krp->krp_cv, "crykdev");
1379 krp->krp_op = kop[req].crk_op;
1380 krp->krp_status = kop[req].crk_status;
1381 krp->krp_iparams = kop[req].crk_iparams;
1382 krp->krp_oparams = kop[req].crk_oparams;
1383 krp->krp_status = 0;
1384 krp->krp_callback =
1385 (int (*) (struct cryptkop *)) cryptodevkey_mcb;
1386 (void)memcpy(krp->crk_param, kop[req].crk_param,
1387 sizeof(kop[req].crk_param));
1388
1389 krp->krp_flags = CRYPTO_F_CBIMM;
1390
1391 for (i = 0; i < CRK_MAXPARAM; i++)
1392 krp->krp_param[i].crp_nbits =
1393 kop[req].crk_param[i].crp_nbits;
1394 for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
1395 size = (krp->krp_param[i].crp_nbits + 7) / 8;
1396 if (size == 0)
1397 continue;
1398 krp->krp_param[i].crp_p =
1399 kmem_alloc(size, KM_SLEEP);
1400 if (i >= krp->krp_iparams)
1401 continue;
1402 kop[req].crk_status =
1403 copyin(kop[req].crk_param[i].crp_p,
1404 krp->krp_param[i].crp_p, size);
1405 if (kop[req].crk_status)
1406 goto fail;
1407 }
1408 krp->fcrp = fcr;
1409
1410 kop[req].crk_reqid = atomic_inc_32_nv(&(fcr->requestid));
1411 krp->krp_reqid = kop[req].crk_reqid;
1412 krp->krp_usropaque = kop[req].crk_opaque;
1413
1414 kop[req].crk_status = crypto_kdispatch(krp);
1415 if (kop[req].crk_status != 0) {
1416 goto fail;
1417 }
1418
1419 fail:
1420 if(kop[req].crk_status) {
1421 if (krp) {
1422 kop[req].crk_status = krp->krp_status;
1423 for (i = 0; i < CRK_MAXPARAM; i++) {
1424 struct crparam *kp =
1425 &(krp->krp_param[i]);
1426 if (kp->crp_p) {
1427 size = (kp->crp_nbits + 7) / 8;
1428 KASSERT(size > 0);
1429 memset(kp->crp_p, 0, size);
1430 kmem_free(kp->crp_p, size);
1431 }
1432 }
1433 cv_destroy(&krp->krp_cv);
1434 pool_put(&cryptkop_pool, krp);
1435 }
1436 }
1437 error = 0;
1438 }
1439 DPRINTF(("cryptodev_key: error=0x%08x\n", error));
1440 return error;
1441 }
1442
1443 int
1444 cryptodev_session(struct fcrypt *fcr, struct session_op *sop)
1445 {
1446 struct cryptoini cria, crie;
1447 struct cryptoini cric; /* compressor */
1448 struct cryptoini *crihead = NULL;
1449 struct enc_xform *txform = NULL;
1450 struct auth_hash *thash = NULL;
1451 struct comp_algo *tcomp = NULL;
1452 struct csession *cse;
1453 u_int64_t sid;
1454 int error = 0;
1455
1456 DPRINTF(("cryptodev_session() cipher=%d, mac=%d\n", sop->cipher, sop->mac));
1457
1458 /* XXX there must be a way to not embed the list of xforms here */
1459 switch (sop->cipher) {
1460 case 0:
1461 break;
1462 case CRYPTO_DES_CBC:
1463 txform = &enc_xform_des;
1464 break;
1465 case CRYPTO_3DES_CBC:
1466 txform = &enc_xform_3des;
1467 break;
1468 case CRYPTO_BLF_CBC:
1469 txform = &enc_xform_blf;
1470 break;
1471 case CRYPTO_CAST_CBC:
1472 txform = &enc_xform_cast5;
1473 case CRYPTO_SKIPJACK_CBC:
1474 txform = &enc_xform_skipjack;
1475 break;
1476 case CRYPTO_AES_CBC:
1477 txform = &enc_xform_rijndael128;
1478 break;
1479 case CRYPTO_NULL_CBC:
1480 txform = &enc_xform_null;
1481 break;
1482 case CRYPTO_ARC4:
1483 txform = &enc_xform_arc4;
1484 break;
1485 default:
1486 DPRINTF(("Invalid cipher %d\n", sop->cipher));
1487 return EINVAL;
1488 }
1489
1490 switch (sop->comp_alg) {
1491 case 0:
1492 break;
1493 case CRYPTO_DEFLATE_COMP:
1494 tcomp = &comp_algo_deflate;
1495 break;
1496 case CRYPTO_GZIP_COMP:
1497 tcomp = &comp_algo_gzip;
1498 DPRINTF(("cryptodev_session() tcomp for GZIP\n"));
1499 break;
1500 default:
1501 DPRINTF(("Invalid compression alg %d\n", sop->comp_alg));
1502 return EINVAL;
1503 }
1504
1505 switch (sop->mac) {
1506 case 0:
1507 break;
1508 case CRYPTO_MD5_HMAC:
1509 thash = &auth_hash_hmac_md5;
1510 break;
1511 case CRYPTO_SHA1_HMAC:
1512 thash = &auth_hash_hmac_sha1;
1513 break;
1514 case CRYPTO_MD5_HMAC_96:
1515 thash = &auth_hash_hmac_md5_96;
1516 break;
1517 case CRYPTO_SHA1_HMAC_96:
1518 thash = &auth_hash_hmac_sha1_96;
1519 break;
1520 case CRYPTO_SHA2_HMAC:
1521 /* XXX switching on key length seems questionable */
1522 if (sop->mackeylen == auth_hash_hmac_sha2_256.keysize) {
1523 thash = &auth_hash_hmac_sha2_256;
1524 } else if (sop->mackeylen == auth_hash_hmac_sha2_384.keysize) {
1525 thash = &auth_hash_hmac_sha2_384;
1526 } else if (sop->mackeylen == auth_hash_hmac_sha2_512.keysize) {
1527 thash = &auth_hash_hmac_sha2_512;
1528 } else {
1529 DPRINTF(("Invalid mackeylen %d\n", sop->mackeylen));
1530 return EINVAL;
1531 }
1532 break;
1533 case CRYPTO_RIPEMD160_HMAC:
1534 thash = &auth_hash_hmac_ripemd_160;
1535 break;
1536 case CRYPTO_RIPEMD160_HMAC_96:
1537 thash = &auth_hash_hmac_ripemd_160_96;
1538 break;
1539 case CRYPTO_MD5:
1540 thash = &auth_hash_md5;
1541 break;
1542 case CRYPTO_SHA1:
1543 thash = &auth_hash_sha1;
1544 break;
1545 case CRYPTO_NULL_HMAC:
1546 thash = &auth_hash_null;
1547 break;
1548 default:
1549 DPRINTF(("Invalid mac %d\n", sop->mac));
1550 return EINVAL;
1551 }
1552
1553 memset(&crie, 0, sizeof(crie));
1554 memset(&cria, 0, sizeof(cria));
1555 memset(&cric, 0, sizeof(cric));
1556
1557 if (tcomp) {
1558 cric.cri_alg = tcomp->type;
1559 cric.cri_klen = 0;
1560 DPRINTF(("tcomp->type = %d\n", tcomp->type));
1561
1562 crihead = &cric;
1563 if (thash) {
1564 cric.cri_next = &cria;
1565 } else if (txform) {
1566 cric.cri_next = &crie;
1567 }
1568 }
1569
1570 if (txform) {
1571 crie.cri_alg = txform->type;
1572 crie.cri_klen = sop->keylen * 8;
1573 if (sop->keylen > txform->maxkey ||
1574 sop->keylen < txform->minkey) {
1575 DPRINTF(("keylen %d not in [%d,%d]\n",
1576 sop->keylen, txform->minkey, txform->maxkey));
1577 error = EINVAL;
1578 goto bail;
1579 }
1580
1581 crie.cri_key = malloc(crie.cri_klen / 8, M_XDATA, M_WAITOK);
1582 if ((error = copyin(sop->key, crie.cri_key, crie.cri_klen / 8)))
1583 goto bail;
1584 if (!crihead) {
1585 crihead = &crie;
1586 }
1587 }
1588
1589 if (thash) {
1590 cria.cri_alg = thash->type;
1591 cria.cri_klen = sop->mackeylen * 8;
1592 if (sop->mackeylen != thash->keysize) {
1593 DPRINTF(("mackeylen %d != keysize %d\n",
1594 sop->mackeylen, thash->keysize));
1595 error = EINVAL;
1596 goto bail;
1597 }
1598 if (cria.cri_klen) {
1599 cria.cri_key = malloc(cria.cri_klen / 8, M_XDATA,
1600 M_WAITOK);
1601 if ((error = copyin(sop->mackey, cria.cri_key,
1602 cria.cri_klen / 8))) {
1603 goto bail;
1604 }
1605 }
1606 if (txform)
1607 cria.cri_next = &crie; /* XXX forces enc then hash? */
1608 if (!crihead) {
1609 crihead = &cria;
1610 }
1611 }
1612
1613 /* crypto_newsession requires that we hold the mutex. */
1614 mutex_spin_enter(&crypto_mtx);
1615 error = crypto_newsession(&sid, crihead, crypto_devallowsoft);
1616 if (!error) {
1617 DPRINTF(("cyrptodev_session: got session %d\n", (uint32_t)sid));
1618 cse = csecreate(fcr, sid, crie.cri_key, crie.cri_klen,
1619 cria.cri_key, cria.cri_klen, (txform ? sop->cipher : 0), sop->mac,
1620 (tcomp ? sop->comp_alg : 0), txform, thash, tcomp);
1621 if (cse != NULL) {
1622 sop->ses = cse->ses;
1623 } else {
1624 DPRINTF(("csecreate failed\n"));
1625 crypto_freesession(sid);
1626 error = EINVAL;
1627 }
1628 } else {
1629 DPRINTF(("SIOCSESSION violates kernel parameters %d\n",
1630 error));
1631 }
1632 mutex_spin_exit(&crypto_mtx);
1633 bail:
1634 if (error) {
1635 if (crie.cri_key) {
1636 memset(crie.cri_key, 0, crie.cri_klen / 8);
1637 free(crie.cri_key, M_XDATA);
1638 }
1639 if (cria.cri_key) {
1640 memset(cria.cri_key, 0, cria.cri_klen / 8);
1641 free(cria.cri_key, M_XDATA);
1642 }
1643 }
1644 return error;
1645 }
1646
1647 int
1648 cryptodev_msession(struct fcrypt *fcr, struct session_n_op *sn_ops,
1649 int count)
1650 {
1651 int i;
1652
1653 for (i = 0; i < count; i++, sn_ops++) {
1654 struct session_op s_op;
1655 s_op.cipher = sn_ops->cipher;
1656 s_op.mac = sn_ops->mac;
1657 s_op.keylen = sn_ops->keylen;
1658 s_op.key = sn_ops->key;
1659 s_op.mackeylen = sn_ops->mackeylen;
1660 s_op.mackey = sn_ops->mackey;
1661
1662 sn_ops->status = cryptodev_session(fcr, &s_op);
1663 sn_ops->ses = s_op.ses;
1664 }
1665
1666 return 0;
1667 }
1668
1669 static int
1670 cryptodev_msessionfin(struct fcrypt *fcr, int count, u_int32_t *sesid)
1671 {
1672 struct csession *cse;
1673 int req, error = 0;
1674
1675 mutex_spin_enter(&crypto_mtx);
1676 for(req = 0; req < count; req++) {
1677 cse = csefind(fcr, sesid[req]);
1678 if (cse == NULL)
1679 continue;
1680 csedelete(fcr, cse);
1681 error = csefree(cse);
1682 }
1683 mutex_spin_exit(&crypto_mtx);
1684 return 0;
1685 }
1686
1687 /*
1688 * collect as many completed requests as are availble, or count completed
1689 * requests whichever is less.
1690 * return the number of requests.
1691 */
1692 static int
1693 cryptodev_getmstatus(struct fcrypt *fcr, struct crypt_result *crypt_res,
1694 int count)
1695 {
1696 struct cryptop *crp = NULL;
1697 struct cryptkop *krp = NULL;
1698 struct csession *cse;
1699 int i, size, req = 0;
1700 int completed=0;
1701
1702 /* On queue so nobody else can grab them
1703 * and copyout can be delayed-- no locking */
1704 TAILQ_HEAD(, cryptop) crp_delfree_q =
1705 TAILQ_HEAD_INITIALIZER(crp_delfree_q);
1706 TAILQ_HEAD(, cryptkop) krp_delfree_q =
1707 TAILQ_HEAD_INITIALIZER(krp_delfree_q);
1708
1709 /* at this point we do not know which response user is requesting for
1710 * (symmetric or asymmetric) so we copyout one from each i.e if the
1711 * count is 2 then 1 from symmetric and 1 from asymmetric queue and
1712 * if 3 then 2 symmetric and 1 asymmetric and so on */
1713
1714 /* pull off a list of requests while protected from changes */
1715 mutex_spin_enter(&crypto_mtx);
1716 while (req < count) {
1717 crp = TAILQ_FIRST(&fcr->crp_ret_mq);
1718 if (crp) {
1719 TAILQ_REMOVE(&fcr->crp_ret_mq, crp, crp_next);
1720 TAILQ_INSERT_TAIL(&crp_delfree_q, crp, crp_next);
1721 cse = (struct csession *)crp->crp_opaque;
1722
1723 /* see if the session is still valid */
1724 cse = csefind(fcr, cse->ses);
1725 if (cse != NULL) {
1726 crypt_res[req].status = 0;
1727 } else {
1728 DPRINTF(("csefind failed\n"));
1729 crypt_res[req].status = EINVAL;
1730 }
1731 req++;
1732 }
1733 if(req < count) {
1734 crypt_res[req].status = 0;
1735 krp = TAILQ_FIRST(&fcr->crp_ret_mkq);
1736 if (krp) {
1737 TAILQ_REMOVE(&fcr->crp_ret_mkq, krp, krp_next);
1738 TAILQ_INSERT_TAIL(&krp_delfree_q, krp, krp_next);
1739 req++;
1740 }
1741 }
1742 }
1743 mutex_spin_exit(&crypto_mtx);
1744
1745 /* now do all the work outside the mutex */
1746 for(req=0; req < count ;) {
1747 crp = TAILQ_FIRST(&crp_delfree_q);
1748 if (crp) {
1749 if (crypt_res[req].status != 0) {
1750 /* csefind failed during collection */
1751 goto bail;
1752 }
1753 cse = (struct csession *)crp->crp_opaque;
1754 crypt_res[req].reqid = crp->crp_reqid;
1755 crypt_res[req].opaque = crp->crp_usropaque;
1756 completed++;
1757
1758 if (crp->crp_etype != 0) {
1759 crypt_res[req].status = crp->crp_etype;
1760 goto bail;
1761 }
1762
1763 if (cse->error) {
1764 crypt_res[req].status = cse->error;
1765 goto bail;
1766 }
1767
1768 if (crp->dst && (crypt_res[req].status =
1769 copyout(crp->uio.uio_iov[0].iov_base, crp->dst,
1770 crp->len)))
1771 goto bail;
1772
1773 if (crp->mac && (crypt_res[req].status =
1774 copyout(crp->crp_mac, crp->mac,
1775 cse->thash->authsize)))
1776 goto bail;
1777
1778 bail:
1779 TAILQ_REMOVE(&crp_delfree_q, crp, crp_next);
1780 kmem_free(crp->uio.uio_iov[0].iov_base,
1781 crp->uio.uio_iov[0].iov_len);
1782 crypto_freereq(crp);
1783 req++;
1784 }
1785
1786 if (req < count) {
1787 krp = TAILQ_FIRST(&krp_delfree_q);
1788 if (krp) {
1789 crypt_res[req].reqid = krp->krp_reqid;
1790 crypt_res[req].opaque = krp->krp_usropaque;
1791 completed++;
1792 if (krp->krp_status != 0) {
1793 DPRINTF(("cryptodev_key: "
1794 "krp->krp_status 0x%08x\n",
1795 krp->krp_status));
1796 crypt_res[req].status = krp->krp_status;
1797 goto fail;
1798 }
1799
1800 for (i = krp->krp_iparams; i < krp->krp_iparams
1801 + krp->krp_oparams; i++) {
1802 size = (krp->krp_param[i].crp_nbits
1803 + 7) / 8;
1804 if (size == 0)
1805 continue;
1806 crypt_res[req].status = copyout
1807 (krp->krp_param[i].crp_p,
1808 krp->crk_param[i].crp_p, size);
1809 if (crypt_res[req].status) {
1810 DPRINTF(("cryptodev_key: "
1811 "copyout oparam %d failed, "
1812 "error=%d\n",
1813 i - krp->krp_iparams,
1814 crypt_res[req].status));
1815 goto fail;
1816 }
1817 }
1818 fail:
1819 TAILQ_REMOVE(&krp_delfree_q, krp, krp_next);
1820 /* not sure what to do for this */
1821 /* kop[req].crk_status = krp->krp_status; */
1822 for (i = 0; i < CRK_MAXPARAM; i++) {
1823 struct crparam *kp = &(krp->krp_param[i]);
1824 if (kp->crp_p) {
1825 size = (kp->crp_nbits + 7) / 8;
1826 KASSERT(size > 0);
1827 (void)memset(kp->crp_p, 0, size);
1828 kmem_free(kp->crp_p, size);
1829 }
1830 }
1831 cv_destroy(&krp->krp_cv);
1832 pool_put(&cryptkop_pool, krp);
1833 req++;
1834 }
1835 }
1836 }
1837
1838 return completed;
1839 }
1840
1841 static int
1842 cryptodev_getstatus (struct fcrypt *fcr, struct crypt_result *crypt_res)
1843 {
1844 struct cryptop *crp = NULL, *cnext;
1845 struct cryptkop *krp = NULL, *knext;
1846 struct csession *cse;
1847 int i, size, req = 0;
1848
1849 mutex_spin_enter(&crypto_mtx);
1850 /* Here we dont know for which request the user is requesting the
1851 * response so checking in both the queues */
1852 TAILQ_FOREACH_SAFE(crp, &fcr->crp_ret_mq, crp_next, cnext) {
1853 if(crp && (crp->crp_reqid == crypt_res->reqid)) {
1854 cse = (struct csession *)crp->crp_opaque;
1855 crypt_res->opaque = crp->crp_usropaque;
1856 cse = csefind(fcr, cse->ses);
1857 if (cse == NULL) {
1858 DPRINTF(("csefind failed\n"));
1859 crypt_res->status = EINVAL;
1860 goto bail;
1861 }
1862
1863 if (crp->crp_etype != 0) {
1864 crypt_res->status = crp->crp_etype;
1865 goto bail;
1866 }
1867
1868 if (cse->error) {
1869 crypt_res->status = cse->error;
1870 goto bail;
1871 }
1872
1873 if (crp->dst && (crypt_res->status =
1874 copyout(crp->uio.uio_iov[0].iov_base,
1875 crp->dst, crp->len)))
1876 goto bail;
1877
1878 if (crp->mac && (crypt_res->status =
1879 copyout(crp->crp_mac, crp->mac,
1880 cse->thash->authsize)))
1881 goto bail;
1882 bail:
1883 TAILQ_REMOVE(&fcr->crp_ret_mq, crp, crp_next);
1884
1885 mutex_spin_exit(&crypto_mtx);
1886 crypto_freereq(crp);
1887 return 0;
1888 }
1889 }
1890
1891 TAILQ_FOREACH_SAFE(krp, &fcr->crp_ret_mkq, krp_next, knext) {
1892 if(krp && (krp->krp_reqid == crypt_res->reqid)) {
1893 crypt_res[req].opaque = krp->krp_usropaque;
1894 if (krp->krp_status != 0) {
1895 DPRINTF(("cryptodev_key: "
1896 "krp->krp_status 0x%08x\n",
1897 krp->krp_status));
1898 crypt_res[req].status = krp->krp_status;
1899 goto fail;
1900 }
1901
1902 for (i = krp->krp_iparams; i < krp->krp_iparams +
1903 krp->krp_oparams; i++) {
1904 size = (krp->krp_param[i].crp_nbits + 7) / 8;
1905 if (size == 0)
1906 continue;
1907 crypt_res[req].status = copyout(
1908 krp->krp_param[i].crp_p,
1909 krp->crk_param[i].crp_p, size);
1910 if (crypt_res[req].status) {
1911 DPRINTF(("cryptodev_key: copyout oparam"
1912 "%d failed, error=%d\n",
1913 i - krp->krp_iparams,
1914 crypt_res[req].status));
1915 goto fail;
1916 }
1917 }
1918 fail:
1919 TAILQ_REMOVE(&fcr->crp_ret_mkq, krp, krp_next);
1920 mutex_spin_exit(&crypto_mtx);
1921 /* not sure what to do for this */
1922 /* kop[req].crk_status = krp->krp_status; */
1923 for (i = 0; i < CRK_MAXPARAM; i++) {
1924 struct crparam *kp = &(krp->krp_param[i]);
1925 if (kp->crp_p) {
1926 size = (kp->crp_nbits + 7) / 8;
1927 KASSERT(size > 0);
1928 memset(kp->crp_p, 0, size);
1929 kmem_free(kp->crp_p, size);
1930 }
1931 }
1932 cv_destroy(&krp->krp_cv);
1933 pool_put(&cryptkop_pool, krp);
1934 return 0;
1935 }
1936 }
1937 mutex_spin_exit(&crypto_mtx);
1938 return EINPROGRESS;
1939 }
1940
1941 static int
1942 cryptof_poll(struct file *fp, int events)
1943 {
1944 struct fcrypt *fcr = (struct fcrypt *)fp->f_data;
1945 int revents = 0;
1946
1947 if (!(events & (POLLIN | POLLRDNORM))) {
1948 /* only support read and POLLIN */
1949 return 0;
1950 }
1951
1952 mutex_spin_enter(&crypto_mtx);
1953 if (TAILQ_EMPTY(&fcr->crp_ret_mq) && TAILQ_EMPTY(&fcr->crp_ret_mkq)) {
1954 /* no completed requests pending, save the poll for later */
1955 selrecord(curlwp, &fcr->sinfo);
1956 } else {
1957 /* let the app(s) know that there are completed requests */
1958 revents = events & (POLLIN | POLLRDNORM);
1959 }
1960 mutex_spin_exit(&crypto_mtx);
1961
1962 return revents;
1963 }
1964
1965 /*
1966 * Pseudo-device initialization routine for /dev/crypto
1967 */
1968 void cryptoattach(int);
1969
1970 void
1971 cryptoattach(int num)
1972 {
1973 pool_init(&fcrpl, sizeof(struct fcrypt), 0, 0, 0, "fcrpl",
1974 NULL, IPL_NET); /* XXX IPL_NET ("splcrypto") */
1975 pool_init(&csepl, sizeof(struct csession), 0, 0, 0, "csepl",
1976 NULL, IPL_NET); /* XXX IPL_NET ("splcrypto") */
1977
1978 /*
1979 * Preallocate space for 64 users, with 5 sessions each.
1980 * (consider that a TLS protocol session requires at least
1981 * 3DES, MD5, and SHA1 (both hashes are used in the PRF) for
1982 * the negotiation, plus HMAC_SHA1 for the actual SSL records,
1983 * consuming one session here for each algorithm.
1984 */
1985 pool_prime(&fcrpl, 64);
1986 pool_prime(&csepl, 64 * 5);
1987 }
1988