cryptodev.c revision 1.54 1 /* $NetBSD: cryptodev.c,v 1.54 2011/02/18 19:56:01 drochner Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.4.2.4 2003/06/03 00:09:02 sam Exp $ */
3 /* $OpenBSD: cryptodev.c,v 1.53 2002/07/10 22:21:30 mickey Exp $ */
4
5 /*-
6 * Copyright (c) 2008 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Coyote Point Systems, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2001 Theo de Raadt
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 *
60 * Effort sponsored in part by the Defense Advanced Research Projects
61 * Agency (DARPA) and Air Force Research Laboratory, Air Force
62 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
63 *
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: cryptodev.c,v 1.54 2011/02/18 19:56:01 drochner Exp $");
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kmem.h>
72 #include <sys/malloc.h>
73 #include <sys/mbuf.h>
74 #include <sys/pool.h>
75 #include <sys/sysctl.h>
76 #include <sys/file.h>
77 #include <sys/filedesc.h>
78 #include <sys/errno.h>
79 #include <sys/md5.h>
80 #include <sys/sha1.h>
81 #include <sys/conf.h>
82 #include <sys/device.h>
83 #include <sys/kauth.h>
84 #include <sys/select.h>
85 #include <sys/poll.h>
86 #include <sys/atomic.h>
87 #include <sys/stat.h>
88
89 #include "opt_ocf.h"
90 #include <opencrypto/cryptodev.h>
91 #include <opencrypto/ocryptodev.h>
92 #include <opencrypto/xform.h>
93
94 struct csession {
95 TAILQ_ENTRY(csession) next;
96 u_int64_t sid;
97 u_int32_t ses;
98
99 u_int32_t cipher; /* note: shares name space in crd_alg */
100 const struct enc_xform *txform;
101 u_int32_t mac; /* note: shares name space in crd_alg */
102 const struct auth_hash *thash;
103 u_int32_t comp_alg; /* note: shares name space in crd_alg */
104 const struct comp_algo *tcomp;
105
106 void * key;
107 int keylen;
108 u_char tmp_iv[EALG_MAX_BLOCK_LEN];
109
110 void * mackey;
111 int mackeylen;
112 u_char tmp_mac[CRYPTO_MAX_MAC_LEN];
113
114 struct iovec iovec[1]; /* user requests never have more */
115 struct uio uio;
116 int error;
117 };
118
119 struct fcrypt {
120 TAILQ_HEAD(csessionlist, csession) csessions;
121 TAILQ_HEAD(crprethead, cryptop) crp_ret_mq;
122 TAILQ_HEAD(krprethead, cryptkop) crp_ret_mkq;
123 int sesn;
124 struct selinfo sinfo;
125 u_int32_t requestid;
126 struct timespec atime;
127 struct timespec mtime;
128 struct timespec btime;
129 };
130
131 /* For our fixed-size allocations */
132 static struct pool fcrpl;
133 static struct pool csepl;
134
135 /* Declaration of master device (fd-cloning/ctxt-allocating) entrypoints */
136 static int cryptoopen(dev_t dev, int flag, int mode, struct lwp *l);
137 static int cryptoread(dev_t dev, struct uio *uio, int ioflag);
138 static int cryptowrite(dev_t dev, struct uio *uio, int ioflag);
139 static int cryptoselect(dev_t dev, int rw, struct lwp *l);
140
141 /* Declaration of cloned-device (per-ctxt) entrypoints */
142 static int cryptof_read(struct file *, off_t *, struct uio *,
143 kauth_cred_t, int);
144 static int cryptof_write(struct file *, off_t *, struct uio *,
145 kauth_cred_t, int);
146 static int cryptof_ioctl(struct file *, u_long, void *);
147 static int cryptof_close(struct file *);
148 static int cryptof_poll(struct file *, int);
149 static int cryptof_stat(struct file *, struct stat *);
150
151 static const struct fileops cryptofops = {
152 .fo_read = cryptof_read,
153 .fo_write = cryptof_write,
154 .fo_ioctl = cryptof_ioctl,
155 .fo_fcntl = fnullop_fcntl,
156 .fo_poll = cryptof_poll,
157 .fo_stat = cryptof_stat,
158 .fo_close = cryptof_close,
159 .fo_kqfilter = fnullop_kqfilter,
160 .fo_restart = fnullop_restart,
161 };
162
163 struct csession *cryptodev_csefind(struct fcrypt *, u_int);
164 static struct csession *csefind(struct fcrypt *, u_int);
165 static int csedelete(struct fcrypt *, struct csession *);
166 static struct csession *cseadd(struct fcrypt *, struct csession *);
167 static struct csession *csecreate(struct fcrypt *, u_int64_t, void *,
168 u_int64_t, void *, u_int64_t, u_int32_t, u_int32_t, u_int32_t,
169 const struct enc_xform *, const struct auth_hash *,
170 const struct comp_algo *);
171 static int csefree(struct csession *);
172
173 static int cryptodev_key(struct crypt_kop *);
174 static int cryptodev_mkey(struct fcrypt *, struct crypt_n_kop *, int);
175 static int cryptodev_msessionfin(struct fcrypt *, int, u_int32_t *);
176
177 static int cryptodev_cb(void *);
178 static int cryptodevkey_cb(void *);
179
180 static int cryptodev_mcb(void *);
181 static int cryptodevkey_mcb(void *);
182
183 static int cryptodev_getmstatus(struct fcrypt *, struct crypt_result *,
184 int);
185 static int cryptodev_getstatus(struct fcrypt *, struct crypt_result *);
186
187 extern int ocryptof_ioctl(struct file *, u_long, void *);
188
189 /*
190 * sysctl-able control variables for /dev/crypto now defined in crypto.c:
191 * crypto_usercrypto, crypto_userasmcrypto, crypto_devallowsoft.
192 */
193
194 /* ARGSUSED */
195 int
196 cryptof_read(file_t *fp, off_t *poff,
197 struct uio *uio, kauth_cred_t cred, int flags)
198 {
199 return EIO;
200 }
201
202 /* ARGSUSED */
203 int
204 cryptof_write(file_t *fp, off_t *poff,
205 struct uio *uio, kauth_cred_t cred, int flags)
206 {
207 return EIO;
208 }
209
210 /* ARGSUSED */
211 int
212 cryptof_ioctl(struct file *fp, u_long cmd, void *data)
213 {
214 struct fcrypt *fcr = fp->f_data;
215 struct csession *cse;
216 struct session_op *sop;
217 struct session_n_op *snop;
218 struct crypt_op *cop;
219 struct crypt_mop *mop;
220 struct crypt_mkop *mkop;
221 struct crypt_n_op *cnop;
222 struct crypt_n_kop *knop;
223 struct crypt_sgop *sgop;
224 struct crypt_sfop *sfop;
225 struct cryptret *crypt_ret;
226 struct crypt_result *crypt_res;
227 u_int32_t ses;
228 u_int32_t *sesid;
229 int error = 0;
230 size_t count;
231
232 /* backwards compatibility */
233 file_t *criofp;
234 struct fcrypt *criofcr;
235 int criofd;
236
237 mutex_spin_enter(&crypto_mtx);
238 getnanotime(&fcr->atime);
239 mutex_spin_exit(&crypto_mtx);
240
241 switch (cmd) {
242 case CRIOGET: /* XXX deprecated, remove after 5.0 */
243 if ((error = fd_allocfile(&criofp, &criofd)) != 0)
244 return error;
245 criofcr = pool_get(&fcrpl, PR_WAITOK);
246 mutex_spin_enter(&crypto_mtx);
247 TAILQ_INIT(&criofcr->csessions);
248 TAILQ_INIT(&criofcr->crp_ret_mq);
249 TAILQ_INIT(&criofcr->crp_ret_mkq);
250 selinit(&criofcr->sinfo);
251
252 /*
253 * Don't ever return session 0, to allow detection of
254 * failed creation attempts with multi-create ioctl.
255 */
256 criofcr->sesn = 1;
257 criofcr->requestid = 1;
258 mutex_spin_exit(&crypto_mtx);
259 (void)fd_clone(criofp, criofd, (FREAD|FWRITE),
260 &cryptofops, criofcr);
261 *(u_int32_t *)data = criofd;
262 return error;
263 break;
264 case CIOCGSESSION:
265 sop = (struct session_op *)data;
266 error = cryptodev_session(fcr, sop);
267 break;
268 case CIOCNGSESSION:
269 sgop = (struct crypt_sgop *)data;
270 snop = kmem_alloc((sgop->count *
271 sizeof(struct session_n_op)), KM_SLEEP);
272 error = copyin(sgop->sessions, snop, sgop->count *
273 sizeof(struct session_n_op));
274 if (error) {
275 goto mbail;
276 }
277
278 mutex_spin_enter(&crypto_mtx);
279 fcr->mtime = fcr->atime;
280 mutex_spin_exit(&crypto_mtx);
281 error = cryptodev_msession(fcr, snop, sgop->count);
282 if (error) {
283 goto mbail;
284 }
285
286 error = copyout(snop, sgop->sessions, sgop->count *
287 sizeof(struct session_n_op));
288 mbail:
289 kmem_free(snop, sgop->count * sizeof(struct session_n_op));
290 break;
291 case CIOCFSESSION:
292 mutex_spin_enter(&crypto_mtx);
293 fcr->mtime = fcr->atime;
294 ses = *(u_int32_t *)data;
295 cse = csefind(fcr, ses);
296 if (cse == NULL)
297 return EINVAL;
298 csedelete(fcr, cse);
299 error = csefree(cse);
300 mutex_spin_exit(&crypto_mtx);
301 break;
302 case CIOCNFSESSION:
303 mutex_spin_enter(&crypto_mtx);
304 fcr->mtime = fcr->atime;
305 mutex_spin_exit(&crypto_mtx);
306 sfop = (struct crypt_sfop *)data;
307 sesid = kmem_alloc((sfop->count * sizeof(u_int32_t)),
308 KM_SLEEP);
309 error = copyin(sfop->sesid, sesid,
310 (sfop->count * sizeof(u_int32_t)));
311 if (!error) {
312 error = cryptodev_msessionfin(fcr, sfop->count, sesid);
313 }
314 kmem_free(sesid, (sfop->count * sizeof(u_int32_t)));
315 break;
316 case CIOCCRYPT:
317 mutex_spin_enter(&crypto_mtx);
318 fcr->mtime = fcr->atime;
319 cop = (struct crypt_op *)data;
320 cse = csefind(fcr, cop->ses);
321 mutex_spin_exit(&crypto_mtx);
322 if (cse == NULL) {
323 DPRINTF(("csefind failed\n"));
324 return EINVAL;
325 }
326 error = cryptodev_op(cse, cop, curlwp);
327 DPRINTF(("cryptodev_op error = %d\n", error));
328 break;
329 case CIOCNCRYPTM:
330 mutex_spin_enter(&crypto_mtx);
331 fcr->mtime = fcr->atime;
332 mutex_spin_exit(&crypto_mtx);
333 mop = (struct crypt_mop *)data;
334 cnop = kmem_alloc((mop->count * sizeof(struct crypt_n_op)),
335 KM_SLEEP);
336 error = copyin(mop->reqs, cnop,
337 (mop->count * sizeof(struct crypt_n_op)));
338 if(!error) {
339 error = cryptodev_mop(fcr, cnop, mop->count, curlwp);
340 if (!error) {
341 error = copyout(cnop, mop->reqs,
342 (mop->count * sizeof(struct crypt_n_op)));
343 }
344 }
345 kmem_free(cnop, (mop->count * sizeof(struct crypt_n_op)));
346 break;
347 case CIOCKEY:
348 error = cryptodev_key((struct crypt_kop *)data);
349 DPRINTF(("cryptodev_key error = %d\n", error));
350 break;
351 case CIOCNFKEYM:
352 mutex_spin_enter(&crypto_mtx);
353 fcr->mtime = fcr->atime;
354 mutex_spin_exit(&crypto_mtx);
355 mkop = (struct crypt_mkop *)data;
356 knop = kmem_alloc((mkop->count * sizeof(struct crypt_n_kop)),
357 KM_SLEEP);
358 error = copyin(mkop->reqs, knop,
359 (mkop->count * sizeof(struct crypt_n_kop)));
360 if (!error) {
361 error = cryptodev_mkey(fcr, knop, mkop->count);
362 if (!error)
363 error = copyout(knop, mkop->reqs,
364 (mkop->count * sizeof(struct crypt_n_kop)));
365 }
366 kmem_free(knop, (mkop->count * sizeof(struct crypt_n_kop)));
367 break;
368 case CIOCASYMFEAT:
369 error = crypto_getfeat((int *)data);
370 break;
371 case CIOCNCRYPTRETM:
372 mutex_spin_enter(&crypto_mtx);
373 fcr->mtime = fcr->atime;
374 mutex_spin_exit(&crypto_mtx);
375 crypt_ret = (struct cryptret *)data;
376 count = crypt_ret->count;
377 crypt_res = kmem_alloc((count * sizeof(struct crypt_result)),
378 KM_SLEEP);
379 error = copyin(crypt_ret->results, crypt_res,
380 (count * sizeof(struct crypt_result)));
381 if (error)
382 goto reterr;
383 crypt_ret->count = cryptodev_getmstatus(fcr, crypt_res,
384 crypt_ret->count);
385 /* sanity check count */
386 if (crypt_ret->count > count) {
387 printf("%s.%d: error returned count %zd > original "
388 " count %zd\n",
389 __FILE__, __LINE__, crypt_ret->count, count);
390 crypt_ret->count = count;
391
392 }
393 error = copyout(crypt_res, crypt_ret->results,
394 (crypt_ret->count * sizeof(struct crypt_result)));
395 reterr:
396 kmem_free(crypt_res, (count * sizeof(struct crypt_result)));
397 break;
398 case CIOCNCRYPTRET:
399 error = cryptodev_getstatus(fcr, (struct crypt_result *)data);
400 break;
401 default:
402 /* Check for backward compatible commands */
403 error = ocryptof_ioctl(fp, cmd, data);
404 }
405 return error;
406 }
407
408 int
409 cryptodev_op(struct csession *cse, struct crypt_op *cop, struct lwp *l)
410 {
411 struct cryptop *crp = NULL;
412 struct cryptodesc *crde = NULL, *crda = NULL, *crdc = NULL;
413 int error;
414 int iov_len = cop->len;
415 int flags=0;
416 int dst_len; /* copyout size */
417
418 if (cop->len > 256*1024-4)
419 return E2BIG;
420
421 if (cse->txform) {
422 if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0)
423 return EINVAL;
424 }
425
426 DPRINTF(("cryptodev_op[%u]: iov_len %d\n",
427 CRYPTO_SESID2LID(cse->sid), iov_len));
428 if ((cse->tcomp) && cop->dst_len) {
429 if (iov_len < cop->dst_len) {
430 /* Need larger iov to deal with decompress */
431 iov_len = cop->dst_len;
432 }
433 DPRINTF(("cryptodev_op: iov_len -> %d for decompress\n", iov_len));
434 }
435
436 (void)memset(&cse->uio, 0, sizeof(cse->uio));
437 cse->uio.uio_iovcnt = 1;
438 cse->uio.uio_resid = 0;
439 cse->uio.uio_rw = UIO_WRITE;
440 cse->uio.uio_iov = cse->iovec;
441 UIO_SETUP_SYSSPACE(&cse->uio);
442 memset(&cse->iovec, 0, sizeof(cse->iovec));
443
444 /* the iov needs to be big enough to handle the uncompressed
445 * data.... */
446 cse->uio.uio_iov[0].iov_len = iov_len;
447 cse->uio.uio_iov[0].iov_base = kmem_alloc(iov_len, KM_SLEEP);
448 cse->uio.uio_resid = cse->uio.uio_iov[0].iov_len;
449 DPRINTF(("cryptodev_op[%u]: uio.iov_base %p malloced %d bytes\n",
450 CRYPTO_SESID2LID(cse->sid),
451 cse->uio.uio_iov[0].iov_base, iov_len));
452
453 crp = crypto_getreq((cse->tcomp != NULL) + (cse->txform != NULL) + (cse->thash != NULL));
454 if (crp == NULL) {
455 error = ENOMEM;
456 goto bail;
457 }
458 DPRINTF(("cryptodev_op[%u]: crp %p\n",
459 CRYPTO_SESID2LID(cse->sid), crp));
460
461 /* crds are always ordered tcomp, thash, then txform */
462 /* with optional missing links */
463
464 /* XXX: If we're going to compress then hash or encrypt, we need
465 * to be able to pass on the new size of the data.
466 */
467
468 if (cse->tcomp) {
469 crdc = crp->crp_desc;
470 }
471
472 if (cse->thash) {
473 crda = crdc ? crdc->crd_next : crp->crp_desc;
474 if (cse->txform && crda)
475 crde = crda->crd_next;
476 } else {
477 if (cse->txform) {
478 crde = crdc ? crdc->crd_next : crp->crp_desc;
479 } else if (!cse->tcomp) {
480 error = EINVAL;
481 goto bail;
482 }
483 }
484
485 DPRINTF(("ocf[%u]: iov_len %zu, cop->len %u\n",
486 CRYPTO_SESID2LID(cse->sid),
487 cse->uio.uio_iov[0].iov_len,
488 cop->len));
489
490 if ((error = copyin(cop->src, cse->uio.uio_iov[0].iov_base, cop->len)))
491 {
492 printf("copyin failed %s %d \n", (char *)cop->src, error);
493 goto bail;
494 }
495
496 if (crdc) {
497 switch (cop->op) {
498 case COP_COMP:
499 crdc->crd_flags |= CRD_F_COMP;
500 break;
501 case COP_DECOMP:
502 crdc->crd_flags &= ~CRD_F_COMP;
503 break;
504 default:
505 break;
506 }
507 /* more data to follow? */
508 if (cop->flags & COP_F_MORE) {
509 flags |= CRYPTO_F_MORE;
510 }
511 crdc->crd_len = cop->len;
512 crdc->crd_inject = 0;
513
514 crdc->crd_alg = cse->comp_alg;
515 crdc->crd_key = NULL;
516 crdc->crd_klen = 0;
517 DPRINTF(("cryptodev_op[%u]: crdc setup for comp_alg %d.\n",
518 CRYPTO_SESID2LID(cse->sid), crdc->crd_alg));
519 }
520
521 if (crda) {
522 crda->crd_skip = 0;
523 crda->crd_len = cop->len;
524 crda->crd_inject = 0; /* ??? */
525
526 crda->crd_alg = cse->mac;
527 crda->crd_key = cse->mackey;
528 crda->crd_klen = cse->mackeylen * 8;
529 DPRINTF(("cryptodev_op: crda setup for mac %d.\n", crda->crd_alg));
530 }
531
532 if (crde) {
533 switch (cop->op) {
534 case COP_ENCRYPT:
535 crde->crd_flags |= CRD_F_ENCRYPT;
536 break;
537 case COP_DECRYPT:
538 crde->crd_flags &= ~CRD_F_ENCRYPT;
539 break;
540 default:
541 break;
542 }
543 crde->crd_len = cop->len;
544 crde->crd_inject = 0;
545
546 crde->crd_alg = cse->cipher;
547 crde->crd_key = cse->key;
548 crde->crd_klen = cse->keylen * 8;
549 DPRINTF(("cryptodev_op: crde setup for cipher %d.\n", crde->crd_alg));
550 }
551
552
553 crp->crp_ilen = cop->len;
554 /* The reqest is flagged as CRYPTO_F_USER as long as it is running
555 * in the user IOCTL thread. This flag lets us skip using the retq for
556 * the request if it completes immediately. If the request ends up being
557 * delayed or is not completed immediately the flag is removed.
558 */
559 crp->crp_flags = CRYPTO_F_IOV | (cop->flags & COP_F_BATCH) | CRYPTO_F_USER |
560 flags;
561 crp->crp_buf = (void *)&cse->uio;
562 crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
563 crp->crp_sid = cse->sid;
564 crp->crp_opaque = (void *)cse;
565
566 if (cop->iv) {
567 if (crde == NULL) {
568 error = EINVAL;
569 goto bail;
570 }
571 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
572 error = EINVAL;
573 goto bail;
574 }
575 if ((error = copyin(cop->iv, cse->tmp_iv,
576 cse->txform->blocksize)))
577 goto bail;
578 (void)memcpy(crde->crd_iv, cse->tmp_iv, cse->txform->blocksize);
579 crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
580 crde->crd_skip = 0;
581 } else if (crde) {
582 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
583 crde->crd_skip = 0;
584 } else {
585 crde->crd_flags |= CRD_F_IV_PRESENT;
586 crde->crd_skip = cse->txform->blocksize;
587 crde->crd_len -= cse->txform->blocksize;
588 }
589 }
590
591 if (cop->mac) {
592 if (crda == NULL) {
593 error = EINVAL;
594 goto bail;
595 }
596 crp->crp_mac=cse->tmp_mac;
597 }
598
599 /*
600 * XXX there was a comment here which said that we went to
601 * XXX splcrypto() but needed to only if CRYPTO_F_CBIMM,
602 * XXX disabled on NetBSD since 1.6O due to a race condition.
603 * XXX But crypto_dispatch went to splcrypto() itself! (And
604 * XXX now takes the crypto_mtx mutex itself). We do, however,
605 * XXX need to hold the mutex across the call to cv_wait().
606 * XXX (should we arrange for crypto_dispatch to return to
607 * XXX us with it held? it seems quite ugly to do so.)
608 */
609 #ifdef notyet
610 eagain:
611 #endif
612 error = crypto_dispatch(crp);
613 mutex_spin_enter(&crypto_mtx);
614
615 /*
616 * If the request was going to be completed by the
617 * ioctl thread then it would have been done by now.
618 * Remove the F_USER flag so crypto_done() is not confused
619 * if the crypto device calls it after this point.
620 */
621 crp->crp_flags &= ~(CRYPTO_F_USER);
622
623 switch (error) {
624 #ifdef notyet /* don't loop forever -- but EAGAIN not possible here yet */
625 case EAGAIN:
626 mutex_spin_exit(&crypto_mtx);
627 goto eagain;
628 break;
629 #endif
630 case 0:
631 break;
632 default:
633 DPRINTF(("cryptodev_op: not waiting, error.\n"));
634 mutex_spin_exit(&crypto_mtx);
635 goto bail;
636 }
637
638 while (!(crp->crp_flags & CRYPTO_F_DONE)) {
639 DPRINTF(("cryptodev_op[%d]: sleeping on cv %p for crp %p\n",
640 (uint32_t)cse->sid, &crp->crp_cv, crp));
641 cv_wait(&crp->crp_cv, &crypto_mtx); /* XXX cv_wait_sig? */
642 }
643 if (crp->crp_flags & CRYPTO_F_ONRETQ) {
644 /* XXX this should never happen now with the CRYPTO_F_USER flag
645 * changes.
646 */
647 DPRINTF(("cryptodev_op: DONE, not woken by cryptoret.\n"));
648 (void)crypto_ret_q_remove(crp);
649 }
650 mutex_spin_exit(&crypto_mtx);
651
652 if (crp->crp_etype != 0) {
653 DPRINTF(("cryptodev_op: crp_etype %d\n", crp->crp_etype));
654 error = crp->crp_etype;
655 goto bail;
656 }
657
658 if (cse->error) {
659 DPRINTF(("cryptodev_op: cse->error %d\n", cse->error));
660 error = cse->error;
661 goto bail;
662 }
663
664 dst_len = crp->crp_ilen;
665 /* let the user know how much data was returned */
666 if (crp->crp_olen) {
667 dst_len = cop->dst_len = crp->crp_olen;
668 }
669 crp->len = dst_len;
670
671 if (cop->dst) {
672 DPRINTF(("cryptodev_op: copyout %d bytes to %p\n", dst_len, cop->dst));
673 }
674 if (cop->dst &&
675 (error = copyout(cse->uio.uio_iov[0].iov_base, cop->dst, dst_len)))
676 {
677 DPRINTF(("cryptodev_op: copyout error %d\n", error));
678 goto bail;
679 }
680
681 if (cop->mac &&
682 (error = copyout(crp->crp_mac, cop->mac, cse->thash->authsize))) {
683 DPRINTF(("cryptodev_op: mac copyout error %d\n", error));
684 goto bail;
685 }
686
687
688 bail:
689 if (crp) {
690 crypto_freereq(crp);
691 }
692 if (cse->uio.uio_iov[0].iov_base) {
693 kmem_free(cse->uio.uio_iov[0].iov_base,iov_len);
694 }
695
696 return error;
697 }
698
699 static int
700 cryptodev_cb(void *op)
701 {
702 struct cryptop *crp = (struct cryptop *) op;
703 struct csession *cse = (struct csession *)crp->crp_opaque;
704 int error = 0;
705
706 mutex_spin_enter(&crypto_mtx);
707 cse->error = crp->crp_etype;
708 if (crp->crp_etype == EAGAIN) {
709 /* always drop mutex to call dispatch routine */
710 mutex_spin_exit(&crypto_mtx);
711 error = crypto_dispatch(crp);
712 mutex_spin_enter(&crypto_mtx);
713 }
714 if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
715 cv_signal(&crp->crp_cv);
716 }
717 mutex_spin_exit(&crypto_mtx);
718 return 0;
719 }
720
721 static int
722 cryptodev_mcb(void *op)
723 {
724 struct cryptop *crp = (struct cryptop *) op;
725 struct csession *cse = (struct csession *)crp->crp_opaque;
726 int error=0;
727
728 mutex_spin_enter(&crypto_mtx);
729 cse->error = crp->crp_etype;
730 if (crp->crp_etype == EAGAIN) {
731 mutex_spin_exit(&crypto_mtx);
732 error = crypto_dispatch(crp);
733 mutex_spin_enter(&crypto_mtx);
734 }
735 if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
736 cv_signal(&crp->crp_cv);
737 }
738
739 TAILQ_INSERT_TAIL(&crp->fcrp->crp_ret_mq, crp, crp_next);
740 selnotify(&crp->fcrp->sinfo, 0, 0);
741 mutex_spin_exit(&crypto_mtx);
742 return 0;
743 }
744
745 static int
746 cryptodevkey_cb(void *op)
747 {
748 struct cryptkop *krp = op;
749
750 mutex_spin_enter(&crypto_mtx);
751 cv_signal(&krp->krp_cv);
752 mutex_spin_exit(&crypto_mtx);
753 return 0;
754 }
755
756 static int
757 cryptodevkey_mcb(void *op)
758 {
759 struct cryptkop *krp = op;
760
761 mutex_spin_enter(&crypto_mtx);
762 cv_signal(&krp->krp_cv);
763 TAILQ_INSERT_TAIL(&krp->fcrp->crp_ret_mkq, krp, krp_next);
764 selnotify(&krp->fcrp->sinfo, 0, 0);
765 mutex_spin_exit(&crypto_mtx);
766 return 0;
767 }
768
769 static int
770 cryptodev_key(struct crypt_kop *kop)
771 {
772 struct cryptkop *krp = NULL;
773 int error = EINVAL;
774 int in, out, size, i;
775
776 if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM)
777 return EFBIG;
778
779 in = kop->crk_iparams;
780 out = kop->crk_oparams;
781 switch (kop->crk_op) {
782 case CRK_MOD_EXP:
783 if (in == 3 && out == 1)
784 break;
785 return EINVAL;
786 case CRK_MOD_EXP_CRT:
787 if (in == 6 && out == 1)
788 break;
789 return EINVAL;
790 case CRK_DSA_SIGN:
791 if (in == 5 && out == 2)
792 break;
793 return EINVAL;
794 case CRK_DSA_VERIFY:
795 if (in == 7 && out == 0)
796 break;
797 return EINVAL;
798 case CRK_DH_COMPUTE_KEY:
799 if (in == 3 && out == 1)
800 break;
801 return EINVAL;
802 case CRK_MOD_ADD:
803 if (in == 3 && out == 1)
804 break;
805 return EINVAL;
806 case CRK_MOD_ADDINV:
807 if (in == 2 && out == 1)
808 break;
809 return EINVAL;
810 case CRK_MOD_SUB:
811 if (in == 3 && out == 1)
812 break;
813 return EINVAL;
814 case CRK_MOD_MULT:
815 if (in == 3 && out == 1)
816 break;
817 return EINVAL;
818 case CRK_MOD_MULTINV:
819 if (in == 2 && out == 1)
820 break;
821 return EINVAL;
822 case CRK_MOD:
823 if (in == 2 && out == 1)
824 break;
825 return EINVAL;
826 default:
827 return EINVAL;
828 }
829
830 krp = pool_get(&cryptkop_pool, PR_WAITOK);
831 (void)memset(krp, 0, sizeof *krp);
832 cv_init(&krp->krp_cv, "crykdev");
833 krp->krp_op = kop->crk_op;
834 krp->krp_status = kop->crk_status;
835 krp->krp_iparams = kop->crk_iparams;
836 krp->krp_oparams = kop->crk_oparams;
837 krp->krp_status = 0;
838 krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
839
840 for (i = 0; i < CRK_MAXPARAM; i++)
841 krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
842 for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
843 size = (krp->krp_param[i].crp_nbits + 7) / 8;
844 if (size == 0)
845 continue;
846 krp->krp_param[i].crp_p = kmem_alloc(size, KM_SLEEP);
847 if (i >= krp->krp_iparams)
848 continue;
849 error = copyin(kop->crk_param[i].crp_p,
850 krp->krp_param[i].crp_p, size);
851 if (error)
852 goto fail;
853 }
854
855 error = crypto_kdispatch(krp);
856 if (error != 0) {
857 goto fail;
858 }
859
860 mutex_spin_enter(&crypto_mtx);
861 while (!(krp->krp_flags & CRYPTO_F_DONE)) {
862 cv_wait(&krp->krp_cv, &crypto_mtx); /* XXX cv_wait_sig? */
863 }
864 if (krp->krp_flags & CRYPTO_F_ONRETQ) {
865 DPRINTF(("cryptodev_key: DONE early, not via cryptoret.\n"));
866 (void)crypto_ret_kq_remove(krp);
867 }
868 mutex_spin_exit(&crypto_mtx);
869
870 if (krp->krp_status != 0) {
871 DPRINTF(("cryptodev_key: krp->krp_status 0x%08x\n",
872 krp->krp_status));
873 error = krp->krp_status;
874 goto fail;
875 }
876
877 for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams;
878 i++) {
879 size = (krp->krp_param[i].crp_nbits + 7) / 8;
880 if (size == 0)
881 continue;
882 error = copyout(krp->krp_param[i].crp_p,
883 kop->crk_param[i].crp_p, size);
884 if (error) {
885 DPRINTF(("cryptodev_key: copyout oparam %d failed, "
886 "error=%d\n", i-krp->krp_iparams, error));
887 goto fail;
888 }
889 }
890
891 fail:
892 kop->crk_status = krp->krp_status;
893 for (i = 0; i < CRK_MAXPARAM; i++) {
894 struct crparam *kp = &(krp->krp_param[i]);
895 if (krp->krp_param[i].crp_p) {
896 size = (kp->crp_nbits + 7) / 8;
897 KASSERT(size > 0);
898 (void)memset(kp->crp_p, 0, size);
899 kmem_free(kp->crp_p, size);
900 }
901 }
902 cv_destroy(&krp->krp_cv);
903 pool_put(&cryptkop_pool, krp);
904 DPRINTF(("cryptodev_key: error=0x%08x\n", error));
905 return error;
906 }
907
908 /* ARGSUSED */
909 static int
910 cryptof_close(struct file *fp)
911 {
912 struct fcrypt *fcr = fp->f_data;
913 struct csession *cse;
914
915 mutex_spin_enter(&crypto_mtx);
916 while ((cse = TAILQ_FIRST(&fcr->csessions))) {
917 TAILQ_REMOVE(&fcr->csessions, cse, next);
918 (void)csefree(cse);
919 }
920 seldestroy(&fcr->sinfo);
921 fp->f_data = NULL;
922 mutex_spin_exit(&crypto_mtx);
923
924 pool_put(&fcrpl, fcr);
925 return 0;
926 }
927
928 /* needed for compatibility module */
929 struct csession *cryptodev_csefind(struct fcrypt *fcr, u_int ses)
930 {
931 return csefind(fcr, ses);
932 }
933
934 /* csefind: call with crypto_mtx held. */
935 static struct csession *
936 csefind(struct fcrypt *fcr, u_int ses)
937 {
938 struct csession *cse, *cnext, *ret = NULL;
939
940 KASSERT(mutex_owned(&crypto_mtx));
941 TAILQ_FOREACH_SAFE(cse, &fcr->csessions, next, cnext)
942 if (cse->ses == ses)
943 ret = cse;
944
945 return ret;
946 }
947
948 /* csedelete: call with crypto_mtx held. */
949 static int
950 csedelete(struct fcrypt *fcr, struct csession *cse_del)
951 {
952 struct csession *cse, *cnext;
953 int ret = 0;
954
955 KASSERT(mutex_owned(&crypto_mtx));
956 TAILQ_FOREACH_SAFE(cse, &fcr->csessions, next, cnext) {
957 if (cse == cse_del) {
958 TAILQ_REMOVE(&fcr->csessions, cse, next);
959 ret = 1;
960 }
961 }
962 return ret;
963 }
964
965 /* cseadd: call with crypto_mtx held. */
966 static struct csession *
967 cseadd(struct fcrypt *fcr, struct csession *cse)
968 {
969 KASSERT(mutex_owned(&crypto_mtx));
970 /* don't let session ID wrap! */
971 if (fcr->sesn + 1 == 0) return NULL;
972 TAILQ_INSERT_TAIL(&fcr->csessions, cse, next);
973 cse->ses = fcr->sesn++;
974 return cse;
975 }
976
977 /* csecreate: call with crypto_mtx held. */
978 static struct csession *
979 csecreate(struct fcrypt *fcr, u_int64_t sid, void *key, u_int64_t keylen,
980 void *mackey, u_int64_t mackeylen, u_int32_t cipher, u_int32_t mac,
981 u_int32_t comp_alg, const struct enc_xform *txform,
982 const struct auth_hash *thash, const struct comp_algo *tcomp)
983 {
984 struct csession *cse;
985
986 KASSERT(mutex_owned(&crypto_mtx));
987 cse = pool_get(&csepl, PR_NOWAIT);
988 if (cse == NULL)
989 return NULL;
990 cse->key = key;
991 cse->keylen = keylen/8;
992 cse->mackey = mackey;
993 cse->mackeylen = mackeylen/8;
994 cse->sid = sid;
995 cse->cipher = cipher;
996 cse->mac = mac;
997 cse->comp_alg = comp_alg;
998 cse->txform = txform;
999 cse->thash = thash;
1000 cse->tcomp = tcomp;
1001 cse->error = 0;
1002 if (cseadd(fcr, cse))
1003 return cse;
1004 else {
1005 pool_put(&csepl, cse);
1006 return NULL;
1007 }
1008 }
1009
1010 /* csefree: call with crypto_mtx held. */
1011 static int
1012 csefree(struct csession *cse)
1013 {
1014 int error;
1015
1016 KASSERT(mutex_owned(&crypto_mtx));
1017 error = crypto_freesession(cse->sid);
1018 if (cse->key)
1019 free(cse->key, M_XDATA);
1020 if (cse->mackey)
1021 free(cse->mackey, M_XDATA);
1022 pool_put(&csepl, cse);
1023 return error;
1024 }
1025
1026 static int
1027 cryptoopen(dev_t dev, int flag, int mode,
1028 struct lwp *l)
1029 {
1030 file_t *fp;
1031 struct fcrypt *fcr;
1032 int fd, error;
1033
1034 if (crypto_usercrypto == 0)
1035 return ENXIO;
1036
1037 if ((error = fd_allocfile(&fp, &fd)) != 0)
1038 return error;
1039
1040 fcr = pool_get(&fcrpl, PR_WAITOK);
1041 getnanotime(&fcr->btime);
1042 fcr->atime = fcr->mtime = fcr->btime;
1043 mutex_spin_enter(&crypto_mtx);
1044 TAILQ_INIT(&fcr->csessions);
1045 TAILQ_INIT(&fcr->crp_ret_mq);
1046 TAILQ_INIT(&fcr->crp_ret_mkq);
1047 selinit(&fcr->sinfo);
1048 /*
1049 * Don't ever return session 0, to allow detection of
1050 * failed creation attempts with multi-create ioctl.
1051 */
1052 fcr->sesn = 1;
1053 fcr->requestid = 1;
1054 mutex_spin_exit(&crypto_mtx);
1055 return fd_clone(fp, fd, flag, &cryptofops, fcr);
1056 }
1057
1058 static int
1059 cryptoread(dev_t dev, struct uio *uio, int ioflag)
1060 {
1061 return EIO;
1062 }
1063
1064 static int
1065 cryptowrite(dev_t dev, struct uio *uio, int ioflag)
1066 {
1067 return EIO;
1068 }
1069
1070 int
1071 cryptoselect(dev_t dev, int rw, struct lwp *l)
1072 {
1073 return 0;
1074 }
1075
1076 /*static*/
1077 struct cdevsw crypto_cdevsw = {
1078 /* open */ cryptoopen,
1079 /* close */ noclose,
1080 /* read */ cryptoread,
1081 /* write */ cryptowrite,
1082 /* ioctl */ noioctl,
1083 /* ttstop?*/ nostop,
1084 /* ??*/ notty,
1085 /* poll */ cryptoselect /*nopoll*/,
1086 /* mmap */ nommap,
1087 /* kqfilter */ nokqfilter,
1088 /* type */ D_OTHER,
1089 };
1090
1091 int
1092 cryptodev_mop(struct fcrypt *fcr,
1093 struct crypt_n_op * cnop,
1094 int count, struct lwp *l)
1095 {
1096 struct cryptop *crp = NULL;
1097 struct cryptodesc *crde = NULL, *crda = NULL, *crdc = NULL;
1098 int req, error=0;
1099 struct csession *cse;
1100 int flags=0;
1101 int iov_len;
1102
1103 for (req = 0; req < count; req++) {
1104 mutex_spin_enter(&crypto_mtx);
1105 cse = csefind(fcr, cnop[req].ses);
1106 if (cse == NULL) {
1107 DPRINTF(("csefind failed\n"));
1108 cnop[req].status = EINVAL;
1109 mutex_spin_exit(&crypto_mtx);
1110 continue;
1111 }
1112 mutex_spin_exit(&crypto_mtx);
1113
1114 if (cnop[req].len > 256*1024-4) {
1115 DPRINTF(("length failed\n"));
1116 cnop[req].status = EINVAL;
1117 continue;
1118 }
1119 if (cse->txform) {
1120 if (cnop[req].len == 0 ||
1121 (cnop[req].len % cse->txform->blocksize) != 0) {
1122 cnop[req].status = EINVAL;
1123 continue;
1124 }
1125 }
1126
1127 crp = crypto_getreq((cse->txform != NULL) +
1128 (cse->thash != NULL) +
1129 (cse->tcomp != NULL));
1130 if (crp == NULL) {
1131 cnop[req].status = ENOMEM;
1132 goto bail;
1133 }
1134
1135 iov_len = cnop[req].len;
1136 /* got a compression/decompression max size? */
1137 if ((cse->tcomp) && cnop[req].dst_len) {
1138 if (iov_len < cnop[req].dst_len) {
1139 /* Need larger iov to deal with decompress */
1140 iov_len = cnop[req].dst_len;
1141 }
1142 DPRINTF(("cryptodev_mop: iov_len -> %d for decompress\n", iov_len));
1143 }
1144
1145 (void)memset(&crp->uio, 0, sizeof(crp->uio));
1146 crp->uio.uio_iovcnt = 1;
1147 crp->uio.uio_resid = 0;
1148 crp->uio.uio_rw = UIO_WRITE;
1149 crp->uio.uio_iov = crp->iovec;
1150 UIO_SETUP_SYSSPACE(&crp->uio);
1151 memset(&crp->iovec, 0, sizeof(crp->iovec));
1152 crp->uio.uio_iov[0].iov_len = iov_len;
1153 DPRINTF(("cryptodev_mop: kmem_alloc(%d) for iov \n", iov_len));
1154 crp->uio.uio_iov[0].iov_base = kmem_alloc(iov_len, KM_SLEEP);
1155 crp->uio.uio_resid = crp->uio.uio_iov[0].iov_len;
1156
1157 if (cse->tcomp) {
1158 crdc = crp->crp_desc;
1159 }
1160
1161 if (cse->thash) {
1162 crda = crdc ? crdc->crd_next : crp->crp_desc;
1163 if (cse->txform && crda)
1164 crde = crda->crd_next;
1165 } else {
1166 if (cse->txform) {
1167 crde = crdc ? crdc->crd_next : crp->crp_desc;
1168 } else if (!cse->tcomp) {
1169 error = EINVAL;
1170 goto bail;
1171 }
1172 }
1173
1174 if ((copyin(cnop[req].src,
1175 crp->uio.uio_iov[0].iov_base, cnop[req].len))) {
1176 cnop[req].status = EINVAL;
1177 goto bail;
1178 }
1179
1180 if (crdc) {
1181 switch (cnop[req].op) {
1182 case COP_COMP:
1183 crdc->crd_flags |= CRD_F_COMP;
1184 break;
1185 case COP_DECOMP:
1186 crdc->crd_flags &= ~CRD_F_COMP;
1187 break;
1188 default:
1189 break;
1190 }
1191 /* more data to follow? */
1192 if (cnop[req].flags & COP_F_MORE) {
1193 flags |= CRYPTO_F_MORE;
1194 }
1195 crdc->crd_len = cnop[req].len;
1196 crdc->crd_inject = 0;
1197
1198 crdc->crd_alg = cse->comp_alg;
1199 crdc->crd_key = NULL;
1200 crdc->crd_klen = 0;
1201 DPRINTF(("cryptodev_mop[%d]: crdc setup for comp_alg %d"
1202 " len %d.\n",
1203 (uint32_t)cse->sid, crdc->crd_alg,
1204 crdc->crd_len));
1205 }
1206
1207 if (crda) {
1208 crda->crd_skip = 0;
1209 crda->crd_len = cnop[req].len;
1210 crda->crd_inject = 0; /* ??? */
1211
1212 crda->crd_alg = cse->mac;
1213 crda->crd_key = cse->mackey;
1214 crda->crd_klen = cse->mackeylen * 8;
1215 }
1216
1217 if (crde) {
1218 if (cnop[req].op == COP_ENCRYPT)
1219 crde->crd_flags |= CRD_F_ENCRYPT;
1220 else
1221 crde->crd_flags &= ~CRD_F_ENCRYPT;
1222 crde->crd_len = cnop[req].len;
1223 crde->crd_inject = 0;
1224
1225 crde->crd_alg = cse->cipher;
1226 #ifdef notyet /* XXX must notify h/w driver new key, drain */
1227 if(cnop[req].key && cnop[req].keylen) {
1228 crde->crd_key = malloc(cnop[req].keylen,
1229 M_XDATA, M_WAITOK);
1230 if((error = copyin(cnop[req].key,
1231 crde->crd_key, cnop[req].keylen))) {
1232 cnop[req].status = EINVAL;
1233 goto bail;
1234 }
1235 crde->crd_klen = cnop[req].keylen * 8;
1236 } else { ... }
1237 #endif
1238 crde->crd_key = cse->key;
1239 crde->crd_klen = cse->keylen * 8;
1240 }
1241
1242 crp->crp_ilen = cnop[req].len;
1243 crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM |
1244 (cnop[req].flags & COP_F_BATCH) | flags;
1245 crp->crp_buf = (void *)&crp->uio;
1246 crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_mcb;
1247 crp->crp_sid = cse->sid;
1248 crp->crp_opaque = (void *)cse;
1249 crp->fcrp = fcr;
1250 crp->dst = cnop[req].dst;
1251 crp->len = cnop[req].len; /* input len, iov may be larger */
1252 crp->mac = cnop[req].mac;
1253 DPRINTF(("cryptodev_mop: iov_base %p dst %p len %d mac %p\n",
1254 crp->uio.uio_iov[0].iov_base, crp->dst, crp->len,
1255 crp->mac));
1256
1257 if (cnop[req].iv) {
1258 if (crde == NULL) {
1259 cnop[req].status = EINVAL;
1260 goto bail;
1261 }
1262 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
1263 cnop[req].status = EINVAL;
1264 goto bail;
1265 }
1266 if ((error = copyin(cnop[req].iv, crp->tmp_iv,
1267 cse->txform->blocksize))) {
1268 cnop[req].status = EINVAL;
1269 goto bail;
1270 }
1271 (void)memcpy(crde->crd_iv, crp->tmp_iv,
1272 cse->txform->blocksize);
1273 crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1274 crde->crd_skip = 0;
1275 } else if (crde) {
1276 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
1277 crde->crd_skip = 0;
1278 } else {
1279 crde->crd_flags |= CRD_F_IV_PRESENT;
1280 crde->crd_skip = cse->txform->blocksize;
1281 crde->crd_len -= cse->txform->blocksize;
1282 }
1283 }
1284
1285 if (cnop[req].mac) {
1286 if (crda == NULL) {
1287 cnop[req].status = EINVAL;
1288 goto bail;
1289 }
1290 crp->crp_mac=cse->tmp_mac;
1291 }
1292 cnop[req].reqid = atomic_inc_32_nv(&(fcr->requestid));
1293 crp->crp_reqid = cnop[req].reqid;
1294 crp->crp_usropaque = cnop[req].opaque;
1295 #ifdef notyet
1296 eagain:
1297 #endif
1298 cnop[req].status = crypto_dispatch(crp);
1299 mutex_spin_enter(&crypto_mtx); /* XXX why mutex? */
1300
1301 switch (cnop[req].status) {
1302 #ifdef notyet /* don't loop forever -- but EAGAIN not possible here yet */
1303 case EAGAIN:
1304 mutex_spin_exit(&crypto_mtx);
1305 goto eagain;
1306 break;
1307 #endif
1308 case 0:
1309 break;
1310 default:
1311 DPRINTF(("cryptodev_op: not waiting, error.\n"));
1312 mutex_spin_exit(&crypto_mtx);
1313 goto bail;
1314 }
1315
1316 mutex_spin_exit(&crypto_mtx);
1317 bail:
1318 if (cnop[req].status) {
1319 if (crp) {
1320 if (crp->uio.uio_iov[0].iov_base) {
1321 kmem_free(crp->uio.uio_iov[0].iov_base,
1322 crp->uio.uio_iov[0].iov_len);
1323 }
1324 crypto_freereq(crp);
1325 }
1326 error = 0;
1327 }
1328 }
1329 return error;
1330 }
1331
1332 static int
1333 cryptodev_mkey(struct fcrypt *fcr, struct crypt_n_kop *kop, int count)
1334 {
1335 struct cryptkop *krp = NULL;
1336 int error = EINVAL;
1337 int in, out, size, i, req;
1338
1339 for (req = 0; req < count; req++) {
1340 if (kop[req].crk_iparams + kop[req].crk_oparams > CRK_MAXPARAM)
1341 return EFBIG;
1342
1343 in = kop[req].crk_iparams;
1344 out = kop[req].crk_oparams;
1345 switch (kop[req].crk_op) {
1346 case CRK_MOD_EXP:
1347 if (in == 3 && out == 1)
1348 break;
1349 kop[req].crk_status = EINVAL;
1350 continue;
1351 case CRK_MOD_EXP_CRT:
1352 if (in == 6 && out == 1)
1353 break;
1354 kop[req].crk_status = EINVAL;
1355 continue;
1356 case CRK_DSA_SIGN:
1357 if (in == 5 && out == 2)
1358 break;
1359 kop[req].crk_status = EINVAL;
1360 continue;
1361 case CRK_DSA_VERIFY:
1362 if (in == 7 && out == 0)
1363 break;
1364 kop[req].crk_status = EINVAL;
1365 continue;
1366 case CRK_DH_COMPUTE_KEY:
1367 if (in == 3 && out == 1)
1368 break;
1369 kop[req].crk_status = EINVAL;
1370 continue;
1371 case CRK_MOD_ADD:
1372 if (in == 3 && out == 1)
1373 break;
1374 kop[req].crk_status = EINVAL;
1375 continue;
1376 case CRK_MOD_ADDINV:
1377 if (in == 2 && out == 1)
1378 break;
1379 kop[req].crk_status = EINVAL;
1380 continue;
1381 case CRK_MOD_SUB:
1382 if (in == 3 && out == 1)
1383 break;
1384 kop[req].crk_status = EINVAL;
1385 continue;
1386 case CRK_MOD_MULT:
1387 if (in == 3 && out == 1)
1388 break;
1389 kop[req].crk_status = EINVAL;
1390 continue;
1391 case CRK_MOD_MULTINV:
1392 if (in == 2 && out == 1)
1393 break;
1394 kop[req].crk_status = EINVAL;
1395 continue;
1396 case CRK_MOD:
1397 if (in == 2 && out == 1)
1398 break;
1399 kop[req].crk_status = EINVAL;
1400 continue;
1401 default:
1402 kop[req].crk_status = EINVAL;
1403 continue;
1404 }
1405
1406 krp = pool_get(&cryptkop_pool, PR_WAITOK);
1407 (void)memset(krp, 0, sizeof *krp);
1408 cv_init(&krp->krp_cv, "crykdev");
1409 krp->krp_op = kop[req].crk_op;
1410 krp->krp_status = kop[req].crk_status;
1411 krp->krp_iparams = kop[req].crk_iparams;
1412 krp->krp_oparams = kop[req].crk_oparams;
1413 krp->krp_status = 0;
1414 krp->krp_callback =
1415 (int (*) (struct cryptkop *)) cryptodevkey_mcb;
1416 (void)memcpy(krp->crk_param, kop[req].crk_param,
1417 sizeof(kop[req].crk_param));
1418
1419 krp->krp_flags = CRYPTO_F_CBIMM;
1420
1421 for (i = 0; i < CRK_MAXPARAM; i++)
1422 krp->krp_param[i].crp_nbits =
1423 kop[req].crk_param[i].crp_nbits;
1424 for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
1425 size = (krp->krp_param[i].crp_nbits + 7) / 8;
1426 if (size == 0)
1427 continue;
1428 krp->krp_param[i].crp_p =
1429 kmem_alloc(size, KM_SLEEP);
1430 if (i >= krp->krp_iparams)
1431 continue;
1432 kop[req].crk_status =
1433 copyin(kop[req].crk_param[i].crp_p,
1434 krp->krp_param[i].crp_p, size);
1435 if (kop[req].crk_status)
1436 goto fail;
1437 }
1438 krp->fcrp = fcr;
1439
1440 kop[req].crk_reqid = atomic_inc_32_nv(&(fcr->requestid));
1441 krp->krp_reqid = kop[req].crk_reqid;
1442 krp->krp_usropaque = kop[req].crk_opaque;
1443
1444 kop[req].crk_status = crypto_kdispatch(krp);
1445 if (kop[req].crk_status != 0) {
1446 goto fail;
1447 }
1448
1449 fail:
1450 if(kop[req].crk_status) {
1451 if (krp) {
1452 kop[req].crk_status = krp->krp_status;
1453 for (i = 0; i < CRK_MAXPARAM; i++) {
1454 struct crparam *kp =
1455 &(krp->krp_param[i]);
1456 if (kp->crp_p) {
1457 size = (kp->crp_nbits + 7) / 8;
1458 KASSERT(size > 0);
1459 memset(kp->crp_p, 0, size);
1460 kmem_free(kp->crp_p, size);
1461 }
1462 }
1463 cv_destroy(&krp->krp_cv);
1464 pool_put(&cryptkop_pool, krp);
1465 }
1466 }
1467 error = 0;
1468 }
1469 DPRINTF(("cryptodev_key: error=0x%08x\n", error));
1470 return error;
1471 }
1472
1473 int
1474 cryptodev_session(struct fcrypt *fcr, struct session_op *sop)
1475 {
1476 struct cryptoini cria, crie;
1477 struct cryptoini cric; /* compressor */
1478 struct cryptoini *crihead = NULL;
1479 const struct enc_xform *txform = NULL;
1480 const struct auth_hash *thash = NULL;
1481 const struct comp_algo *tcomp = NULL;
1482 struct csession *cse;
1483 u_int64_t sid;
1484 int error = 0;
1485
1486 DPRINTF(("cryptodev_session() cipher=%d, mac=%d\n", sop->cipher, sop->mac));
1487
1488 /* XXX there must be a way to not embed the list of xforms here */
1489 switch (sop->cipher) {
1490 case 0:
1491 break;
1492 case CRYPTO_DES_CBC:
1493 txform = &enc_xform_des;
1494 break;
1495 case CRYPTO_3DES_CBC:
1496 txform = &enc_xform_3des;
1497 break;
1498 case CRYPTO_BLF_CBC:
1499 txform = &enc_xform_blf;
1500 break;
1501 case CRYPTO_CAST_CBC:
1502 txform = &enc_xform_cast5;
1503 break;
1504 case CRYPTO_SKIPJACK_CBC:
1505 txform = &enc_xform_skipjack;
1506 break;
1507 case CRYPTO_AES_CBC:
1508 txform = &enc_xform_rijndael128;
1509 break;
1510 case CRYPTO_NULL_CBC:
1511 txform = &enc_xform_null;
1512 break;
1513 case CRYPTO_ARC4:
1514 txform = &enc_xform_arc4;
1515 break;
1516 default:
1517 DPRINTF(("Invalid cipher %d\n", sop->cipher));
1518 return EINVAL;
1519 }
1520
1521 switch (sop->comp_alg) {
1522 case 0:
1523 break;
1524 case CRYPTO_DEFLATE_COMP:
1525 tcomp = &comp_algo_deflate;
1526 break;
1527 case CRYPTO_GZIP_COMP:
1528 tcomp = &comp_algo_gzip;
1529 DPRINTF(("cryptodev_session() tcomp for GZIP\n"));
1530 break;
1531 default:
1532 DPRINTF(("Invalid compression alg %d\n", sop->comp_alg));
1533 return EINVAL;
1534 }
1535
1536 switch (sop->mac) {
1537 case 0:
1538 break;
1539 case CRYPTO_MD5_HMAC:
1540 thash = &auth_hash_hmac_md5;
1541 break;
1542 case CRYPTO_SHA1_HMAC:
1543 thash = &auth_hash_hmac_sha1;
1544 break;
1545 case CRYPTO_MD5_HMAC_96:
1546 thash = &auth_hash_hmac_md5_96;
1547 break;
1548 case CRYPTO_SHA1_HMAC_96:
1549 thash = &auth_hash_hmac_sha1_96;
1550 break;
1551 case CRYPTO_SHA2_HMAC:
1552 /* XXX switching on key length seems questionable */
1553 if (sop->mackeylen == auth_hash_hmac_sha2_256.keysize) {
1554 thash = &auth_hash_hmac_sha2_256;
1555 } else if (sop->mackeylen == auth_hash_hmac_sha2_384.keysize) {
1556 thash = &auth_hash_hmac_sha2_384;
1557 } else if (sop->mackeylen == auth_hash_hmac_sha2_512.keysize) {
1558 thash = &auth_hash_hmac_sha2_512;
1559 } else {
1560 DPRINTF(("Invalid mackeylen %d\n", sop->mackeylen));
1561 return EINVAL;
1562 }
1563 break;
1564 case CRYPTO_RIPEMD160_HMAC:
1565 thash = &auth_hash_hmac_ripemd_160;
1566 break;
1567 case CRYPTO_RIPEMD160_HMAC_96:
1568 thash = &auth_hash_hmac_ripemd_160_96;
1569 break;
1570 case CRYPTO_MD5:
1571 thash = &auth_hash_md5;
1572 break;
1573 case CRYPTO_SHA1:
1574 thash = &auth_hash_sha1;
1575 break;
1576 case CRYPTO_NULL_HMAC:
1577 thash = &auth_hash_null;
1578 break;
1579 default:
1580 DPRINTF(("Invalid mac %d\n", sop->mac));
1581 return EINVAL;
1582 }
1583
1584 memset(&crie, 0, sizeof(crie));
1585 memset(&cria, 0, sizeof(cria));
1586 memset(&cric, 0, sizeof(cric));
1587
1588 if (tcomp) {
1589 cric.cri_alg = tcomp->type;
1590 cric.cri_klen = 0;
1591 DPRINTF(("tcomp->type = %d\n", tcomp->type));
1592
1593 crihead = &cric;
1594 if (thash) {
1595 cric.cri_next = &cria;
1596 } else if (txform) {
1597 cric.cri_next = &crie;
1598 }
1599 }
1600
1601 if (txform) {
1602 crie.cri_alg = txform->type;
1603 crie.cri_klen = sop->keylen * 8;
1604 if (sop->keylen > txform->maxkey ||
1605 sop->keylen < txform->minkey) {
1606 DPRINTF(("keylen %d not in [%d,%d]\n",
1607 sop->keylen, txform->minkey, txform->maxkey));
1608 error = EINVAL;
1609 goto bail;
1610 }
1611
1612 crie.cri_key = malloc(crie.cri_klen / 8, M_XDATA, M_WAITOK);
1613 if ((error = copyin(sop->key, crie.cri_key, crie.cri_klen / 8)))
1614 goto bail;
1615 if (!crihead) {
1616 crihead = &crie;
1617 }
1618 }
1619
1620 if (thash) {
1621 cria.cri_alg = thash->type;
1622 cria.cri_klen = sop->mackeylen * 8;
1623 if (sop->mackeylen != thash->keysize) {
1624 DPRINTF(("mackeylen %d != keysize %d\n",
1625 sop->mackeylen, thash->keysize));
1626 error = EINVAL;
1627 goto bail;
1628 }
1629 if (cria.cri_klen) {
1630 cria.cri_key = malloc(cria.cri_klen / 8, M_XDATA,
1631 M_WAITOK);
1632 if ((error = copyin(sop->mackey, cria.cri_key,
1633 cria.cri_klen / 8))) {
1634 goto bail;
1635 }
1636 }
1637 if (txform)
1638 cria.cri_next = &crie; /* XXX forces enc then hash? */
1639 if (!crihead) {
1640 crihead = &cria;
1641 }
1642 }
1643
1644 /* crypto_newsession requires that we hold the mutex. */
1645 mutex_spin_enter(&crypto_mtx);
1646 error = crypto_newsession(&sid, crihead, crypto_devallowsoft);
1647 if (!error) {
1648 DPRINTF(("cyrptodev_session: got session %d\n", (uint32_t)sid));
1649 cse = csecreate(fcr, sid, crie.cri_key, crie.cri_klen,
1650 cria.cri_key, cria.cri_klen, (txform ? sop->cipher : 0), sop->mac,
1651 (tcomp ? sop->comp_alg : 0), txform, thash, tcomp);
1652 if (cse != NULL) {
1653 sop->ses = cse->ses;
1654 } else {
1655 DPRINTF(("csecreate failed\n"));
1656 crypto_freesession(sid);
1657 error = EINVAL;
1658 }
1659 } else {
1660 DPRINTF(("SIOCSESSION violates kernel parameters %d\n",
1661 error));
1662 }
1663 mutex_spin_exit(&crypto_mtx);
1664 bail:
1665 if (error) {
1666 if (crie.cri_key) {
1667 memset(crie.cri_key, 0, crie.cri_klen / 8);
1668 free(crie.cri_key, M_XDATA);
1669 }
1670 if (cria.cri_key) {
1671 memset(cria.cri_key, 0, cria.cri_klen / 8);
1672 free(cria.cri_key, M_XDATA);
1673 }
1674 }
1675 return error;
1676 }
1677
1678 int
1679 cryptodev_msession(struct fcrypt *fcr, struct session_n_op *sn_ops,
1680 int count)
1681 {
1682 int i;
1683
1684 for (i = 0; i < count; i++, sn_ops++) {
1685 struct session_op s_op;
1686 s_op.cipher = sn_ops->cipher;
1687 s_op.mac = sn_ops->mac;
1688 s_op.keylen = sn_ops->keylen;
1689 s_op.key = sn_ops->key;
1690 s_op.mackeylen = sn_ops->mackeylen;
1691 s_op.mackey = sn_ops->mackey;
1692
1693 sn_ops->status = cryptodev_session(fcr, &s_op);
1694 sn_ops->ses = s_op.ses;
1695 }
1696
1697 return 0;
1698 }
1699
1700 static int
1701 cryptodev_msessionfin(struct fcrypt *fcr, int count, u_int32_t *sesid)
1702 {
1703 struct csession *cse;
1704 int req, error = 0;
1705
1706 mutex_spin_enter(&crypto_mtx);
1707 for(req = 0; req < count; req++) {
1708 cse = csefind(fcr, sesid[req]);
1709 if (cse == NULL)
1710 continue;
1711 csedelete(fcr, cse);
1712 error = csefree(cse);
1713 }
1714 mutex_spin_exit(&crypto_mtx);
1715 return 0;
1716 }
1717
1718 /*
1719 * collect as many completed requests as are availble, or count completed
1720 * requests whichever is less.
1721 * return the number of requests.
1722 */
1723 static int
1724 cryptodev_getmstatus(struct fcrypt *fcr, struct crypt_result *crypt_res,
1725 int count)
1726 {
1727 struct cryptop *crp = NULL;
1728 struct cryptkop *krp = NULL;
1729 struct csession *cse;
1730 int i, size, req = 0;
1731 int completed=0;
1732
1733 /* On queue so nobody else can grab them
1734 * and copyout can be delayed-- no locking */
1735 TAILQ_HEAD(, cryptop) crp_delfree_q =
1736 TAILQ_HEAD_INITIALIZER(crp_delfree_q);
1737 TAILQ_HEAD(, cryptkop) krp_delfree_q =
1738 TAILQ_HEAD_INITIALIZER(krp_delfree_q);
1739
1740 /* at this point we do not know which response user is requesting for
1741 * (symmetric or asymmetric) so we copyout one from each i.e if the
1742 * count is 2 then 1 from symmetric and 1 from asymmetric queue and
1743 * if 3 then 2 symmetric and 1 asymmetric and so on */
1744
1745 /* pull off a list of requests while protected from changes */
1746 mutex_spin_enter(&crypto_mtx);
1747 while (req < count) {
1748 crp = TAILQ_FIRST(&fcr->crp_ret_mq);
1749 if (crp) {
1750 TAILQ_REMOVE(&fcr->crp_ret_mq, crp, crp_next);
1751 TAILQ_INSERT_TAIL(&crp_delfree_q, crp, crp_next);
1752 cse = (struct csession *)crp->crp_opaque;
1753
1754 /* see if the session is still valid */
1755 cse = csefind(fcr, cse->ses);
1756 if (cse != NULL) {
1757 crypt_res[req].status = 0;
1758 } else {
1759 DPRINTF(("csefind failed\n"));
1760 crypt_res[req].status = EINVAL;
1761 }
1762 req++;
1763 }
1764 if(req < count) {
1765 crypt_res[req].status = 0;
1766 krp = TAILQ_FIRST(&fcr->crp_ret_mkq);
1767 if (krp) {
1768 TAILQ_REMOVE(&fcr->crp_ret_mkq, krp, krp_next);
1769 TAILQ_INSERT_TAIL(&krp_delfree_q, krp, krp_next);
1770 req++;
1771 }
1772 }
1773 }
1774 mutex_spin_exit(&crypto_mtx);
1775
1776 /* now do all the work outside the mutex */
1777 for(req=0; req < count ;) {
1778 crp = TAILQ_FIRST(&crp_delfree_q);
1779 if (crp) {
1780 if (crypt_res[req].status != 0) {
1781 /* csefind failed during collection */
1782 goto bail;
1783 }
1784 cse = (struct csession *)crp->crp_opaque;
1785 crypt_res[req].reqid = crp->crp_reqid;
1786 crypt_res[req].opaque = crp->crp_usropaque;
1787 completed++;
1788
1789 if (crp->crp_etype != 0) {
1790 crypt_res[req].status = crp->crp_etype;
1791 goto bail;
1792 }
1793
1794 if (cse->error) {
1795 crypt_res[req].status = cse->error;
1796 goto bail;
1797 }
1798
1799 if (crp->dst && (crypt_res[req].status =
1800 copyout(crp->uio.uio_iov[0].iov_base, crp->dst,
1801 crp->len)))
1802 goto bail;
1803
1804 if (crp->mac && (crypt_res[req].status =
1805 copyout(crp->crp_mac, crp->mac,
1806 cse->thash->authsize)))
1807 goto bail;
1808
1809 bail:
1810 TAILQ_REMOVE(&crp_delfree_q, crp, crp_next);
1811 kmem_free(crp->uio.uio_iov[0].iov_base,
1812 crp->uio.uio_iov[0].iov_len);
1813 crypto_freereq(crp);
1814 req++;
1815 }
1816
1817 if (req < count) {
1818 krp = TAILQ_FIRST(&krp_delfree_q);
1819 if (krp) {
1820 crypt_res[req].reqid = krp->krp_reqid;
1821 crypt_res[req].opaque = krp->krp_usropaque;
1822 completed++;
1823 if (krp->krp_status != 0) {
1824 DPRINTF(("cryptodev_key: "
1825 "krp->krp_status 0x%08x\n",
1826 krp->krp_status));
1827 crypt_res[req].status = krp->krp_status;
1828 goto fail;
1829 }
1830
1831 for (i = krp->krp_iparams; i < krp->krp_iparams
1832 + krp->krp_oparams; i++) {
1833 size = (krp->krp_param[i].crp_nbits
1834 + 7) / 8;
1835 if (size == 0)
1836 continue;
1837 crypt_res[req].status = copyout
1838 (krp->krp_param[i].crp_p,
1839 krp->crk_param[i].crp_p, size);
1840 if (crypt_res[req].status) {
1841 DPRINTF(("cryptodev_key: "
1842 "copyout oparam %d failed, "
1843 "error=%d\n",
1844 i - krp->krp_iparams,
1845 crypt_res[req].status));
1846 goto fail;
1847 }
1848 }
1849 fail:
1850 TAILQ_REMOVE(&krp_delfree_q, krp, krp_next);
1851 /* not sure what to do for this */
1852 /* kop[req].crk_status = krp->krp_status; */
1853 for (i = 0; i < CRK_MAXPARAM; i++) {
1854 struct crparam *kp = &(krp->krp_param[i]);
1855 if (kp->crp_p) {
1856 size = (kp->crp_nbits + 7) / 8;
1857 KASSERT(size > 0);
1858 (void)memset(kp->crp_p, 0, size);
1859 kmem_free(kp->crp_p, size);
1860 }
1861 }
1862 cv_destroy(&krp->krp_cv);
1863 pool_put(&cryptkop_pool, krp);
1864 req++;
1865 }
1866 }
1867 }
1868
1869 return completed;
1870 }
1871
1872 static int
1873 cryptodev_getstatus (struct fcrypt *fcr, struct crypt_result *crypt_res)
1874 {
1875 struct cryptop *crp = NULL, *cnext;
1876 struct cryptkop *krp = NULL, *knext;
1877 struct csession *cse;
1878 int i, size, req = 0;
1879
1880 mutex_spin_enter(&crypto_mtx);
1881 /* Here we dont know for which request the user is requesting the
1882 * response so checking in both the queues */
1883 TAILQ_FOREACH_SAFE(crp, &fcr->crp_ret_mq, crp_next, cnext) {
1884 if(crp && (crp->crp_reqid == crypt_res->reqid)) {
1885 cse = (struct csession *)crp->crp_opaque;
1886 crypt_res->opaque = crp->crp_usropaque;
1887 cse = csefind(fcr, cse->ses);
1888 if (cse == NULL) {
1889 DPRINTF(("csefind failed\n"));
1890 crypt_res->status = EINVAL;
1891 goto bail;
1892 }
1893
1894 if (crp->crp_etype != 0) {
1895 crypt_res->status = crp->crp_etype;
1896 goto bail;
1897 }
1898
1899 if (cse->error) {
1900 crypt_res->status = cse->error;
1901 goto bail;
1902 }
1903
1904 if (crp->dst && (crypt_res->status =
1905 copyout(crp->uio.uio_iov[0].iov_base,
1906 crp->dst, crp->len)))
1907 goto bail;
1908
1909 if (crp->mac && (crypt_res->status =
1910 copyout(crp->crp_mac, crp->mac,
1911 cse->thash->authsize)))
1912 goto bail;
1913 bail:
1914 TAILQ_REMOVE(&fcr->crp_ret_mq, crp, crp_next);
1915
1916 mutex_spin_exit(&crypto_mtx);
1917 crypto_freereq(crp);
1918 return 0;
1919 }
1920 }
1921
1922 TAILQ_FOREACH_SAFE(krp, &fcr->crp_ret_mkq, krp_next, knext) {
1923 if(krp && (krp->krp_reqid == crypt_res->reqid)) {
1924 crypt_res[req].opaque = krp->krp_usropaque;
1925 if (krp->krp_status != 0) {
1926 DPRINTF(("cryptodev_key: "
1927 "krp->krp_status 0x%08x\n",
1928 krp->krp_status));
1929 crypt_res[req].status = krp->krp_status;
1930 goto fail;
1931 }
1932
1933 for (i = krp->krp_iparams; i < krp->krp_iparams +
1934 krp->krp_oparams; i++) {
1935 size = (krp->krp_param[i].crp_nbits + 7) / 8;
1936 if (size == 0)
1937 continue;
1938 crypt_res[req].status = copyout(
1939 krp->krp_param[i].crp_p,
1940 krp->crk_param[i].crp_p, size);
1941 if (crypt_res[req].status) {
1942 DPRINTF(("cryptodev_key: copyout oparam"
1943 "%d failed, error=%d\n",
1944 i - krp->krp_iparams,
1945 crypt_res[req].status));
1946 goto fail;
1947 }
1948 }
1949 fail:
1950 TAILQ_REMOVE(&fcr->crp_ret_mkq, krp, krp_next);
1951 mutex_spin_exit(&crypto_mtx);
1952 /* not sure what to do for this */
1953 /* kop[req].crk_status = krp->krp_status; */
1954 for (i = 0; i < CRK_MAXPARAM; i++) {
1955 struct crparam *kp = &(krp->krp_param[i]);
1956 if (kp->crp_p) {
1957 size = (kp->crp_nbits + 7) / 8;
1958 KASSERT(size > 0);
1959 memset(kp->crp_p, 0, size);
1960 kmem_free(kp->crp_p, size);
1961 }
1962 }
1963 cv_destroy(&krp->krp_cv);
1964 pool_put(&cryptkop_pool, krp);
1965 return 0;
1966 }
1967 }
1968 mutex_spin_exit(&crypto_mtx);
1969 return EINPROGRESS;
1970 }
1971
1972 static int
1973 cryptof_stat(struct file *fp, struct stat *st)
1974 {
1975 struct fcrypt *fcr = fp->f_data;
1976
1977 (void)memset(st, 0, sizeof(st));
1978
1979 mutex_spin_enter(&crypto_mtx);
1980 st->st_dev = makedev(cdevsw_lookup_major(&crypto_cdevsw), fcr->sesn);
1981 st->st_atimespec = fcr->atime;
1982 st->st_mtimespec = fcr->mtime;
1983 st->st_ctimespec = st->st_birthtimespec = fcr->btime;
1984 st->st_uid = kauth_cred_geteuid(fp->f_cred);
1985 st->st_gid = kauth_cred_getegid(fp->f_cred);
1986 mutex_spin_exit(&crypto_mtx);
1987
1988 return 0;
1989 }
1990
1991 static int
1992 cryptof_poll(struct file *fp, int events)
1993 {
1994 struct fcrypt *fcr = (struct fcrypt *)fp->f_data;
1995 int revents = 0;
1996
1997 if (!(events & (POLLIN | POLLRDNORM))) {
1998 /* only support read and POLLIN */
1999 return 0;
2000 }
2001
2002 mutex_spin_enter(&crypto_mtx);
2003 if (TAILQ_EMPTY(&fcr->crp_ret_mq) && TAILQ_EMPTY(&fcr->crp_ret_mkq)) {
2004 /* no completed requests pending, save the poll for later */
2005 selrecord(curlwp, &fcr->sinfo);
2006 } else {
2007 /* let the app(s) know that there are completed requests */
2008 revents = events & (POLLIN | POLLRDNORM);
2009 }
2010 mutex_spin_exit(&crypto_mtx);
2011
2012 return revents;
2013 }
2014
2015 /*
2016 * Pseudo-device initialization routine for /dev/crypto
2017 */
2018 void cryptoattach(int);
2019
2020 void
2021 cryptoattach(int num)
2022 {
2023 pool_init(&fcrpl, sizeof(struct fcrypt), 0, 0, 0, "fcrpl",
2024 NULL, IPL_NET); /* XXX IPL_NET ("splcrypto") */
2025 pool_init(&csepl, sizeof(struct csession), 0, 0, 0, "csepl",
2026 NULL, IPL_NET); /* XXX IPL_NET ("splcrypto") */
2027
2028 /*
2029 * Preallocate space for 64 users, with 5 sessions each.
2030 * (consider that a TLS protocol session requires at least
2031 * 3DES, MD5, and SHA1 (both hashes are used in the PRF) for
2032 * the negotiation, plus HMAC_SHA1 for the actual SSL records,
2033 * consuming one session here for each algorithm.
2034 */
2035 pool_prime(&fcrpl, 64);
2036 pool_prime(&csepl, 64 * 5);
2037 }
2038