ubsec.c revision 1.13 1 /* $NetBSD: ubsec.c,v 1.13 2007/07/09 21:00:58 ad Exp $ */
2 /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.6 2003/01/23 21:06:43 sam Exp $ */
3 /* $OpenBSD: ubsec.c,v 1.127 2003/06/04 14:04:58 jason Exp $ */
4
5 /*
6 * Copyright (c) 2000 Jason L. Wright (jason (at) thought.net)
7 * Copyright (c) 2000 Theo de Raadt (deraadt (at) openbsd.org)
8 * Copyright (c) 2001 Patrik Lindergren (patrik (at) ipunplugged.com)
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Effort sponsored in part by the Defense Advanced Research Projects
32 * Agency (DARPA) and Air Force Research Laboratory, Air Force
33 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
34 *
35 */
36
37 #undef UBSEC_DEBUG
38
39 /*
40 * uBsec 5[56]01, bcm580xx, bcm582x hardware crypto accelerator
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/endian.h>
47 #ifdef __NetBSD__
48 #define letoh16 htole16
49 #define letoh32 htole32
50 #define UBSEC_NO_RNG /* until statistically tested */
51 #endif
52 #include <sys/errno.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/mbuf.h>
56 #include <sys/device.h>
57 #include <sys/queue.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #include <opencrypto/cryptodev.h>
62 #include <opencrypto/xform.h>
63 #ifdef __OpenBSD__
64 #include <dev/rndvar.h>
65 #include <sys/md5k.h>
66 #else
67 #include <sys/rnd.h>
68 #include <sys/md5.h>
69 #endif
70 #include <sys/sha1.h>
71
72 #include <dev/pci/pcireg.h>
73 #include <dev/pci/pcivar.h>
74 #include <dev/pci/pcidevs.h>
75
76 #include <dev/pci/ubsecreg.h>
77 #include <dev/pci/ubsecvar.h>
78
79 /*
80 * Prototypes and count for the pci_device structure
81 */
82 static int ubsec_probe(struct device *, struct cfdata *, void *);
83 static void ubsec_attach(struct device *, struct device *, void *);
84 static void ubsec_reset_board(struct ubsec_softc *);
85 static void ubsec_init_board(struct ubsec_softc *);
86 static void ubsec_init_pciregs(struct pci_attach_args *pa);
87 static void ubsec_cleanchip(struct ubsec_softc *);
88 static void ubsec_totalreset(struct ubsec_softc *);
89 static int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *);
90
91 #ifdef __OpenBSD__
92 struct cfattach ubsec_ca = {
93 sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach,
94 };
95
96 struct cfdriver ubsec_cd = {
97 0, "ubsec", DV_DULL
98 };
99 #else
100 CFATTACH_DECL(ubsec, sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach,
101 NULL, NULL);
102 extern struct cfdriver ubsec_cd;
103 #endif
104
105 /* patchable */
106 #ifdef UBSEC_DEBUG
107 extern int ubsec_debug;
108 int ubsec_debug=1;
109 #endif
110
111 static int ubsec_intr(void *);
112 static int ubsec_newsession(void*, u_int32_t *, struct cryptoini *);
113 static int ubsec_freesession(void*, u_int64_t);
114 static int ubsec_process(void*, struct cryptop *, int hint);
115 static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *);
116 static void ubsec_feed(struct ubsec_softc *);
117 static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int);
118 static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *);
119 static void ubsec_feed2(struct ubsec_softc *);
120 #ifndef UBSEC_NO_RNG
121 static void ubsec_rng(void *);
122 #endif /* UBSEC_NO_RNG */
123 static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t,
124 struct ubsec_dma_alloc *, int);
125 static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *);
126 static int ubsec_dmamap_aligned(bus_dmamap_t);
127
128 static int ubsec_kprocess(void*, struct cryptkop *, int);
129 static int ubsec_kprocess_modexp_sw(struct ubsec_softc *,
130 struct cryptkop *, int);
131 static int ubsec_kprocess_modexp_hw(struct ubsec_softc *,
132 struct cryptkop *, int);
133 static int ubsec_kprocess_rsapriv(struct ubsec_softc *,
134 struct cryptkop *, int);
135 static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *);
136 static int ubsec_ksigbits(struct crparam *);
137 static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int);
138 static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int);
139
140 #ifdef UBSEC_DEBUG
141 static void ubsec_dump_pb(volatile struct ubsec_pktbuf *);
142 static void ubsec_dump_mcr(struct ubsec_mcr *);
143 static void ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *);
144 #endif
145
146 #define READ_REG(sc,r) \
147 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
148
149 #define WRITE_REG(sc,reg,val) \
150 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
151
152 #define SWAP32(x) (x) = htole32(ntohl((x)))
153 #ifndef HTOLE32
154 #define HTOLE32(x) (x) = htole32(x)
155 #endif
156
157 struct ubsec_stats ubsecstats;
158
159 /*
160 * ubsec_maxbatch controls the number of crypto ops to voluntarily
161 * collect into one submission to the hardware. This batching happens
162 * when ops are dispatched from the crypto subsystem with a hint that
163 * more are to follow immediately. These ops must also not be marked
164 * with a ``no delay'' flag.
165 */
166 static int ubsec_maxbatch = 1;
167 #ifdef SYSCTL_INT
168 SYSCTL_INT(_kern, OID_AUTO, ubsec_maxbatch, CTLFLAG_RW, &ubsec_maxbatch,
169 0, "Broadcom driver: max ops to batch w/o interrupt");
170 #endif
171
172 /*
173 * ubsec_maxaggr controls the number of crypto ops to submit to the
174 * hardware as a unit. This aggregation reduces the number of interrupts
175 * to the host at the expense of increased latency (for all but the last
176 * operation). For network traffic setting this to one yields the highest
177 * performance but at the expense of more interrupt processing.
178 */
179 static int ubsec_maxaggr = 1;
180 #ifdef SYSCTL_INT
181 SYSCTL_INT(_kern, OID_AUTO, ubsec_maxaggr, CTLFLAG_RW, &ubsec_maxaggr,
182 0, "Broadcom driver: max ops to aggregate under one interrupt");
183 #endif
184
185 static const struct ubsec_product {
186 pci_vendor_id_t ubsec_vendor;
187 pci_product_id_t ubsec_product;
188 int ubsec_flags;
189 int ubsec_statmask;
190 const char *ubsec_name;
191 } ubsec_products[] = {
192 { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5501,
193 0,
194 BS_STAT_MCR1_DONE | BS_STAT_DMAERR,
195 "Bluesteel 5501"
196 },
197 { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5601,
198 UBS_FLAGS_KEY | UBS_FLAGS_RNG,
199 BS_STAT_MCR1_DONE | BS_STAT_DMAERR,
200 "Bluesteel 5601"
201 },
202
203 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5801,
204 0,
205 BS_STAT_MCR1_DONE | BS_STAT_DMAERR,
206 "Broadcom BCM5801"
207 },
208
209 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5802,
210 UBS_FLAGS_KEY | UBS_FLAGS_RNG,
211 BS_STAT_MCR1_DONE | BS_STAT_DMAERR,
212 "Broadcom BCM5802"
213 },
214
215 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5805,
216 UBS_FLAGS_KEY | UBS_FLAGS_RNG,
217 BS_STAT_MCR1_DONE | BS_STAT_DMAERR,
218 "Broadcom BCM5805"
219 },
220
221 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5820,
222 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX |
223 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY,
224 BS_STAT_MCR1_DONE | BS_STAT_DMAERR,
225 "Broadcom BCM5820"
226 },
227
228 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5821,
229 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX |
230 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY,
231 BS_STAT_MCR1_DONE | BS_STAT_DMAERR |
232 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY,
233 "Broadcom BCM5821"
234 },
235 { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_SCA1K,
236 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX |
237 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY,
238 BS_STAT_MCR1_DONE | BS_STAT_DMAERR |
239 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY,
240 "Sun Crypto Accelerator 1000"
241 },
242 { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_5821,
243 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX |
244 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY,
245 BS_STAT_MCR1_DONE | BS_STAT_DMAERR |
246 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY,
247 "Broadcom BCM5821 (Sun)"
248 },
249
250 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5822,
251 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX |
252 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY,
253 BS_STAT_MCR1_DONE | BS_STAT_DMAERR |
254 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY,
255 "Broadcom BCM5822"
256 },
257
258 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5823,
259 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX |
260 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY,
261 BS_STAT_MCR1_DONE | BS_STAT_DMAERR |
262 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY,
263 "Broadcom BCM5823"
264 },
265
266 { 0, 0,
267 0,
268 0,
269 NULL
270 }
271 };
272
273 static const struct ubsec_product *
274 ubsec_lookup(const struct pci_attach_args *pa)
275 {
276 const struct ubsec_product *up;
277
278 for (up = ubsec_products; up->ubsec_name != NULL; up++) {
279 if (PCI_VENDOR(pa->pa_id) == up->ubsec_vendor &&
280 PCI_PRODUCT(pa->pa_id) == up->ubsec_product)
281 return (up);
282 }
283 return (NULL);
284 }
285
286 static int
287 ubsec_probe(struct device *parent, struct cfdata *match,
288 void *aux)
289 {
290 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
291
292 if (ubsec_lookup(pa) != NULL)
293 return (1);
294
295 return (0);
296 }
297
298 static void
299 ubsec_attach(struct device *parent, struct device *self, void *aux)
300 {
301 struct ubsec_softc *sc = (struct ubsec_softc *)self;
302 struct pci_attach_args *pa = aux;
303 const struct ubsec_product *up;
304 pci_chipset_tag_t pc = pa->pa_pc;
305 pci_intr_handle_t ih;
306 const char *intrstr = NULL;
307 struct ubsec_dma *dmap;
308 u_int32_t cmd, i;
309
310 up = ubsec_lookup(pa);
311 if (up == NULL) {
312 printf("\n");
313 panic("ubsec_attach: impossible");
314 }
315
316 aprint_naive(": Crypto processor\n");
317 aprint_normal(": %s, rev. %d\n", up->ubsec_name,
318 PCI_REVISION(pa->pa_class));
319
320 SIMPLEQ_INIT(&sc->sc_queue);
321 SIMPLEQ_INIT(&sc->sc_qchip);
322 SIMPLEQ_INIT(&sc->sc_queue2);
323 SIMPLEQ_INIT(&sc->sc_qchip2);
324 SIMPLEQ_INIT(&sc->sc_q2free);
325
326 sc->sc_flags = up->ubsec_flags;
327 sc->sc_statmask = up->ubsec_statmask;
328
329 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
330 cmd |= PCI_COMMAND_MASTER_ENABLE;
331 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
332
333 if (pci_mapreg_map(pa, BS_BAR, PCI_MAPREG_TYPE_MEM, 0,
334 &sc->sc_st, &sc->sc_sh, NULL, NULL)) {
335 aprint_error("%s: can't find mem space",
336 sc->sc_dv.dv_xname);
337 return;
338 }
339
340 sc->sc_dmat = pa->pa_dmat;
341
342 if (pci_intr_map(pa, &ih)) {
343 aprint_error("%s: couldn't map interrupt\n",
344 sc->sc_dv.dv_xname);
345 return;
346 }
347 intrstr = pci_intr_string(pc, ih);
348 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ubsec_intr, sc);
349 if (sc->sc_ih == NULL) {
350 aprint_error("%s: couldn't establish interrupt",
351 sc->sc_dv.dv_xname);
352 if (intrstr != NULL)
353 aprint_normal(" at %s", intrstr);
354 aprint_normal("\n");
355 return;
356 }
357 aprint_normal("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
358
359 sc->sc_cid = crypto_get_driverid(0);
360 if (sc->sc_cid < 0) {
361 aprint_error("%s: couldn't get crypto driver id\n",
362 sc->sc_dv.dv_xname);
363 pci_intr_disestablish(pc, sc->sc_ih);
364 return;
365 }
366
367 SIMPLEQ_INIT(&sc->sc_freequeue);
368 dmap = sc->sc_dmaa;
369 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) {
370 struct ubsec_q *q;
371
372 q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q),
373 M_DEVBUF, M_NOWAIT);
374 if (q == NULL) {
375 aprint_error("%s: can't allocate queue buffers\n",
376 sc->sc_dv.dv_xname);
377 break;
378 }
379
380 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk),
381 &dmap->d_alloc, 0)) {
382 aprint_error("%s: can't allocate dma buffers\n",
383 sc->sc_dv.dv_xname);
384 free(q, M_DEVBUF);
385 break;
386 }
387 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr;
388
389 q->q_dma = dmap;
390 sc->sc_queuea[i] = q;
391
392 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
393 }
394
395 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
396 ubsec_newsession, ubsec_freesession, ubsec_process, sc);
397 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
398 ubsec_newsession, ubsec_freesession, ubsec_process, sc);
399 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0,
400 ubsec_newsession, ubsec_freesession, ubsec_process, sc);
401 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0,
402 ubsec_newsession, ubsec_freesession, ubsec_process, sc);
403
404 /*
405 * Reset Broadcom chip
406 */
407 ubsec_reset_board(sc);
408
409 /*
410 * Init Broadcom specific PCI settings
411 */
412 ubsec_init_pciregs(pa);
413
414 /*
415 * Init Broadcom chip
416 */
417 ubsec_init_board(sc);
418
419 #ifndef UBSEC_NO_RNG
420 if (sc->sc_flags & UBS_FLAGS_RNG) {
421 sc->sc_statmask |= BS_STAT_MCR2_DONE;
422
423 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
424 &sc->sc_rng.rng_q.q_mcr, 0))
425 goto skip_rng;
426
427 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass),
428 &sc->sc_rng.rng_q.q_ctx, 0)) {
429 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr);
430 goto skip_rng;
431 }
432
433 if (ubsec_dma_malloc(sc, sizeof(u_int32_t) *
434 UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) {
435 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx);
436 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr);
437 goto skip_rng;
438 }
439
440 if (hz >= 100)
441 sc->sc_rnghz = hz / 100;
442 else
443 sc->sc_rnghz = 1;
444 #ifdef __OpenBSD__
445 timeout_set(&sc->sc_rngto, ubsec_rng, sc);
446 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
447 #else
448 callout_init(&sc->sc_rngto, 0);
449 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc);
450 #endif
451 skip_rng:
452 if (sc->sc_rnghz)
453 aprint_normal("%s: random number generator enabled\n",
454 sc->sc_dv.dv_xname);
455 else
456 aprint_error("%s: WARNING: random number generator "
457 "disabled\n", sc->sc_dv.dv_xname);
458 }
459 #endif /* UBSEC_NO_RNG */
460
461 if (sc->sc_flags & UBS_FLAGS_KEY) {
462 sc->sc_statmask |= BS_STAT_MCR2_DONE;
463
464 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0,
465 ubsec_kprocess, sc);
466 #if 0
467 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0,
468 ubsec_kprocess, sc);
469 #endif
470 }
471 }
472
473 /*
474 * UBSEC Interrupt routine
475 */
476 static int
477 ubsec_intr(void *arg)
478 {
479 struct ubsec_softc *sc = arg;
480 volatile u_int32_t stat;
481 struct ubsec_q *q;
482 struct ubsec_dma *dmap;
483 int npkts = 0, i;
484
485 stat = READ_REG(sc, BS_STAT);
486 stat &= sc->sc_statmask;
487 if (stat == 0) {
488 return (0);
489 }
490
491 WRITE_REG(sc, BS_STAT, stat); /* IACK */
492
493 /*
494 * Check to see if we have any packets waiting for us
495 */
496 if ((stat & BS_STAT_MCR1_DONE)) {
497 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) {
498 q = SIMPLEQ_FIRST(&sc->sc_qchip);
499 dmap = q->q_dma;
500
501 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0)
502 break;
503
504 q = SIMPLEQ_FIRST(&sc->sc_qchip);
505 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next);
506
507 npkts = q->q_nstacked_mcrs;
508 sc->sc_nqchip -= 1+npkts;
509 /*
510 * search for further sc_qchip ubsec_q's that share
511 * the same MCR, and complete them too, they must be
512 * at the top.
513 */
514 for (i = 0; i < npkts; i++) {
515 if(q->q_stacked_mcr[i])
516 ubsec_callback(sc, q->q_stacked_mcr[i]);
517 else
518 break;
519 }
520 ubsec_callback(sc, q);
521 }
522
523 /*
524 * Don't send any more packet to chip if there has been
525 * a DMAERR.
526 */
527 if (!(stat & BS_STAT_DMAERR))
528 ubsec_feed(sc);
529 }
530
531 /*
532 * Check to see if we have any key setups/rng's waiting for us
533 */
534 if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) &&
535 (stat & BS_STAT_MCR2_DONE)) {
536 struct ubsec_q2 *q2;
537 struct ubsec_mcr *mcr;
538
539 while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) {
540 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2);
541
542 bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map,
543 0, q2->q_mcr.dma_map->dm_mapsize,
544 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
545
546 mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr;
547 if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) {
548 bus_dmamap_sync(sc->sc_dmat,
549 q2->q_mcr.dma_map, 0,
550 q2->q_mcr.dma_map->dm_mapsize,
551 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
552 break;
553 }
554 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2);
555 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, /*q2,*/ q_next);
556 ubsec_callback2(sc, q2);
557 /*
558 * Don't send any more packet to chip if there has been
559 * a DMAERR.
560 */
561 if (!(stat & BS_STAT_DMAERR))
562 ubsec_feed2(sc);
563 }
564 }
565
566 /*
567 * Check to see if we got any DMA Error
568 */
569 if (stat & BS_STAT_DMAERR) {
570 #ifdef UBSEC_DEBUG
571 if (ubsec_debug) {
572 volatile u_int32_t a = READ_REG(sc, BS_ERR);
573
574 printf("%s: dmaerr %s@%08x\n", sc->sc_dv.dv_xname,
575 (a & BS_ERR_READ) ? "read" : "write",
576 a & BS_ERR_ADDR);
577 }
578 #endif /* UBSEC_DEBUG */
579 ubsecstats.hst_dmaerr++;
580 ubsec_totalreset(sc);
581 ubsec_feed(sc);
582 }
583
584 if (sc->sc_needwakeup) { /* XXX check high watermark */
585 int wkeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
586 #ifdef UBSEC_DEBUG
587 if (ubsec_debug)
588 printf("%s: wakeup crypto (%x)\n", sc->sc_dv.dv_xname,
589 sc->sc_needwakeup);
590 #endif /* UBSEC_DEBUG */
591 sc->sc_needwakeup &= ~wkeup;
592 crypto_unblock(sc->sc_cid, wkeup);
593 }
594 return (1);
595 }
596
597 /*
598 * ubsec_feed() - aggregate and post requests to chip
599 * OpenBSD comments:
600 * It is assumed that the caller set splnet()
601 */
602 static void
603 ubsec_feed(struct ubsec_softc *sc)
604 {
605 struct ubsec_q *q, *q2;
606 int npkts, i;
607 void *v;
608 u_int32_t stat;
609 #ifdef UBSEC_DEBUG
610 static int max;
611 #endif /* UBSEC_DEBUG */
612
613 npkts = sc->sc_nqueue;
614 if (npkts > ubsecstats.hst_maxqueue)
615 ubsecstats.hst_maxqueue = npkts;
616 if (npkts < 2)
617 goto feed1;
618
619 /*
620 * Decide how many ops to combine in a single MCR. We cannot
621 * aggregate more than UBS_MAX_AGGR because this is the number
622 * of slots defined in the data structure. Otherwise we clamp
623 * based on the tunable parameter ubsec_maxaggr. Note that
624 * aggregation can happen in two ways: either by batching ops
625 * from above or because the h/w backs up and throttles us.
626 * Aggregating ops reduces the number of interrupts to the host
627 * but also (potentially) increases the latency for processing
628 * completed ops as we only get an interrupt when all aggregated
629 * ops have completed.
630 */
631 if (npkts > UBS_MAX_AGGR)
632 npkts = UBS_MAX_AGGR;
633 if (npkts > ubsec_maxaggr)
634 npkts = ubsec_maxaggr;
635 if (npkts > ubsecstats.hst_maxbatch)
636 ubsecstats.hst_maxbatch = npkts;
637 if (npkts < 2)
638 goto feed1;
639 ubsecstats.hst_totbatch += npkts-1;
640
641 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
642 if (stat & BS_STAT_DMAERR) {
643 ubsec_totalreset(sc);
644 ubsecstats.hst_dmaerr++;
645 } else {
646 ubsecstats.hst_mcr1full++;
647 }
648 return;
649 }
650
651 #ifdef UBSEC_DEBUG
652 if (ubsec_debug)
653 printf("merging %d records\n", npkts);
654 /* XXX temporary aggregation statistics reporting code */
655 if (max < npkts) {
656 max = npkts;
657 printf("%s: new max aggregate %d\n", sc->sc_dv.dv_xname, max);
658 }
659 #endif /* UBSEC_DEBUG */
660
661 q = SIMPLEQ_FIRST(&sc->sc_queue);
662 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next);
663 --sc->sc_nqueue;
664
665 bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
666 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
667 if (q->q_dst_map != NULL)
668 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
669 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
670
671 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */
672
673 for (i = 0; i < q->q_nstacked_mcrs; i++) {
674 q2 = SIMPLEQ_FIRST(&sc->sc_queue);
675 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map,
676 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
677 if (q2->q_dst_map != NULL)
678 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map,
679 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
680 q2= SIMPLEQ_FIRST(&sc->sc_queue);
681 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q2,*/ q_next);
682 --sc->sc_nqueue;
683
684 v = ((void *)&q2->q_dma->d_dma->d_mcr);
685 v = (char*)v + (sizeof(struct ubsec_mcr) -
686 sizeof(struct ubsec_mcr_add));
687 bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add));
688 q->q_stacked_mcr[i] = q2;
689 }
690 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts);
691 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
692 sc->sc_nqchip += npkts;
693 if (sc->sc_nqchip > ubsecstats.hst_maxqchip)
694 ubsecstats.hst_maxqchip = sc->sc_nqchip;
695 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
696 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
697 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
698 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
699 offsetof(struct ubsec_dmachunk, d_mcr));
700 return;
701
702 feed1:
703 while (!SIMPLEQ_EMPTY(&sc->sc_queue)) {
704 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
705 if (stat & BS_STAT_DMAERR) {
706 ubsec_totalreset(sc);
707 ubsecstats.hst_dmaerr++;
708 } else {
709 ubsecstats.hst_mcr1full++;
710 }
711 break;
712 }
713
714 q = SIMPLEQ_FIRST(&sc->sc_queue);
715
716 bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
717 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
718 if (q->q_dst_map != NULL)
719 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
720 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
721 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
722 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
723 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
724
725 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
726 offsetof(struct ubsec_dmachunk, d_mcr));
727 #ifdef UBSEC_DEBUG
728 if (ubsec_debug)
729 printf("feed: q->chip %p %08x stat %08x\n",
730 q, (u_int32_t)q->q_dma->d_alloc.dma_paddr,
731 stat);
732 #endif /* UBSEC_DEBUG */
733 q = SIMPLEQ_FIRST(&sc->sc_queue);
734 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next);
735 --sc->sc_nqueue;
736 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
737 sc->sc_nqchip++;
738 }
739 if (sc->sc_nqchip > ubsecstats.hst_maxqchip)
740 ubsecstats.hst_maxqchip = sc->sc_nqchip;
741 }
742
743 /*
744 * Allocate a new 'session' and return an encoded session id. 'sidp'
745 * contains our registration id, and should contain an encoded session
746 * id on successful allocation.
747 */
748 static int
749 ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
750 {
751 struct cryptoini *c, *encini = NULL, *macini = NULL;
752 struct ubsec_softc *sc;
753 struct ubsec_session *ses = NULL;
754 MD5_CTX md5ctx;
755 SHA1_CTX sha1ctx;
756 int i, sesn;
757
758 sc = arg;
759 KASSERT(sc != NULL /*, ("ubsec_newsession: null softc")*/);
760
761 if (sidp == NULL || cri == NULL || sc == NULL)
762 return (EINVAL);
763
764 for (c = cri; c != NULL; c = c->cri_next) {
765 if (c->cri_alg == CRYPTO_MD5_HMAC ||
766 c->cri_alg == CRYPTO_SHA1_HMAC) {
767 if (macini)
768 return (EINVAL);
769 macini = c;
770 } else if (c->cri_alg == CRYPTO_DES_CBC ||
771 c->cri_alg == CRYPTO_3DES_CBC) {
772 if (encini)
773 return (EINVAL);
774 encini = c;
775 } else
776 return (EINVAL);
777 }
778 if (encini == NULL && macini == NULL)
779 return (EINVAL);
780
781 if (sc->sc_sessions == NULL) {
782 ses = sc->sc_sessions = (struct ubsec_session *)malloc(
783 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT);
784 if (ses == NULL)
785 return (ENOMEM);
786 sesn = 0;
787 sc->sc_nsessions = 1;
788 } else {
789 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
790 if (sc->sc_sessions[sesn].ses_used == 0) {
791 ses = &sc->sc_sessions[sesn];
792 break;
793 }
794 }
795
796 if (ses == NULL) {
797 sesn = sc->sc_nsessions;
798 ses = (struct ubsec_session *)malloc((sesn + 1) *
799 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT);
800 if (ses == NULL)
801 return (ENOMEM);
802 bcopy(sc->sc_sessions, ses, sesn *
803 sizeof(struct ubsec_session));
804 bzero(sc->sc_sessions, sesn *
805 sizeof(struct ubsec_session));
806 free(sc->sc_sessions, M_DEVBUF);
807 sc->sc_sessions = ses;
808 ses = &sc->sc_sessions[sesn];
809 sc->sc_nsessions++;
810 }
811 }
812
813 bzero(ses, sizeof(struct ubsec_session));
814 ses->ses_used = 1;
815 if (encini) {
816 /* get an IV, network byte order */
817 #ifdef __NetBSD__
818 rnd_extract_data(ses->ses_iv,
819 sizeof(ses->ses_iv), RND_EXTRACT_ANY);
820 #else
821 get_random_bytes(ses->ses_iv, sizeof(ses->ses_iv));
822 #endif
823
824 /* Go ahead and compute key in ubsec's byte order */
825 if (encini->cri_alg == CRYPTO_DES_CBC) {
826 bcopy(encini->cri_key, &ses->ses_deskey[0], 8);
827 bcopy(encini->cri_key, &ses->ses_deskey[2], 8);
828 bcopy(encini->cri_key, &ses->ses_deskey[4], 8);
829 } else
830 bcopy(encini->cri_key, ses->ses_deskey, 24);
831
832 SWAP32(ses->ses_deskey[0]);
833 SWAP32(ses->ses_deskey[1]);
834 SWAP32(ses->ses_deskey[2]);
835 SWAP32(ses->ses_deskey[3]);
836 SWAP32(ses->ses_deskey[4]);
837 SWAP32(ses->ses_deskey[5]);
838 }
839
840 if (macini) {
841 for (i = 0; i < macini->cri_klen / 8; i++)
842 macini->cri_key[i] ^= HMAC_IPAD_VAL;
843
844 if (macini->cri_alg == CRYPTO_MD5_HMAC) {
845 MD5Init(&md5ctx);
846 MD5Update(&md5ctx, macini->cri_key,
847 macini->cri_klen / 8);
848 MD5Update(&md5ctx, hmac_ipad_buffer,
849 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
850 bcopy(md5ctx.state, ses->ses_hminner,
851 sizeof(md5ctx.state));
852 } else {
853 SHA1Init(&sha1ctx);
854 SHA1Update(&sha1ctx, macini->cri_key,
855 macini->cri_klen / 8);
856 SHA1Update(&sha1ctx, hmac_ipad_buffer,
857 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
858 bcopy(sha1ctx.state, ses->ses_hminner,
859 sizeof(sha1ctx.state));
860 }
861
862 for (i = 0; i < macini->cri_klen / 8; i++)
863 macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
864
865 if (macini->cri_alg == CRYPTO_MD5_HMAC) {
866 MD5Init(&md5ctx);
867 MD5Update(&md5ctx, macini->cri_key,
868 macini->cri_klen / 8);
869 MD5Update(&md5ctx, hmac_opad_buffer,
870 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
871 bcopy(md5ctx.state, ses->ses_hmouter,
872 sizeof(md5ctx.state));
873 } else {
874 SHA1Init(&sha1ctx);
875 SHA1Update(&sha1ctx, macini->cri_key,
876 macini->cri_klen / 8);
877 SHA1Update(&sha1ctx, hmac_opad_buffer,
878 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
879 bcopy(sha1ctx.state, ses->ses_hmouter,
880 sizeof(sha1ctx.state));
881 }
882
883 for (i = 0; i < macini->cri_klen / 8; i++)
884 macini->cri_key[i] ^= HMAC_OPAD_VAL;
885 }
886
887 *sidp = UBSEC_SID(device_unit(&sc->sc_dv), sesn);
888 return (0);
889 }
890
891 /*
892 * Deallocate a session.
893 */
894 static int
895 ubsec_freesession(void *arg, u_int64_t tid)
896 {
897 struct ubsec_softc *sc;
898 int session;
899 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
900
901 sc = arg;
902 KASSERT(sc != NULL /*, ("ubsec_freesession: null softc")*/);
903
904 session = UBSEC_SESSION(sid);
905 if (session >= sc->sc_nsessions)
906 return (EINVAL);
907
908 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
909 return (0);
910 }
911
912 #ifdef __FreeBSD__ /* Ugly gratuitous changes to bus_dma */
913 static void
914 ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
915 {
916 struct ubsec_operand *op = arg;
917
918 KASSERT(nsegs <= UBS_MAX_SCATTER
919 /*, ("Too many DMA segments returned when mapping operand")*/);
920 #ifdef UBSEC_DEBUG
921 if (ubsec_debug)
922 printf("ubsec_op_cb: mapsize %u nsegs %d\n",
923 (u_int) mapsize, nsegs);
924 #endif
925 op->mapsize = mapsize;
926 op->nsegs = nsegs;
927 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
928 }
929 #endif
930
931 static int
932 ubsec_process(void *arg, struct cryptop *crp, int hint)
933 {
934 struct ubsec_q *q = NULL;
935 #ifdef __OpenBSD__
936 int card;
937 #endif
938 int err = 0, i, j, s, nicealign;
939 struct ubsec_softc *sc;
940 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
941 int encoffset = 0, macoffset = 0, cpskip, cpoffset;
942 int sskip, dskip, stheend, dtheend;
943 int16_t coffset;
944 struct ubsec_session *ses;
945 struct ubsec_pktctx ctx;
946 struct ubsec_dma *dmap = NULL;
947
948 sc = arg;
949 KASSERT(sc != NULL /*, ("ubsec_process: null softc")*/);
950
951 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
952 ubsecstats.hst_invalid++;
953 return (EINVAL);
954 }
955 if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
956 ubsecstats.hst_badsession++;
957 return (EINVAL);
958 }
959
960 s = splnet();
961
962 if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
963 ubsecstats.hst_queuefull++;
964 sc->sc_needwakeup |= CRYPTO_SYMQ;
965 splx(s);
966 return(ERESTART);
967 }
968
969 q = SIMPLEQ_FIRST(&sc->sc_freequeue);
970 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, /*q,*/ q_next);
971 splx(s);
972
973 dmap = q->q_dma; /* Save dma pointer */
974 bzero(q, sizeof(struct ubsec_q));
975 bzero(&ctx, sizeof(ctx));
976
977 q->q_sesn = UBSEC_SESSION(crp->crp_sid);
978 q->q_dma = dmap;
979 ses = &sc->sc_sessions[q->q_sesn];
980
981 if (crp->crp_flags & CRYPTO_F_IMBUF) {
982 q->q_src_m = (struct mbuf *)crp->crp_buf;
983 q->q_dst_m = (struct mbuf *)crp->crp_buf;
984 } else if (crp->crp_flags & CRYPTO_F_IOV) {
985 q->q_src_io = (struct uio *)crp->crp_buf;
986 q->q_dst_io = (struct uio *)crp->crp_buf;
987 } else {
988 ubsecstats.hst_badflags++;
989 err = EINVAL;
990 goto errout; /* XXX we don't handle contiguous blocks! */
991 }
992
993 bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr));
994
995 dmap->d_dma->d_mcr.mcr_pkts = htole16(1);
996 dmap->d_dma->d_mcr.mcr_flags = 0;
997 q->q_crp = crp;
998
999 crd1 = crp->crp_desc;
1000 if (crd1 == NULL) {
1001 ubsecstats.hst_nodesc++;
1002 err = EINVAL;
1003 goto errout;
1004 }
1005 crd2 = crd1->crd_next;
1006
1007 if (crd2 == NULL) {
1008 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
1009 crd1->crd_alg == CRYPTO_SHA1_HMAC) {
1010 maccrd = crd1;
1011 enccrd = NULL;
1012 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
1013 crd1->crd_alg == CRYPTO_3DES_CBC) {
1014 maccrd = NULL;
1015 enccrd = crd1;
1016 } else {
1017 ubsecstats.hst_badalg++;
1018 err = EINVAL;
1019 goto errout;
1020 }
1021 } else {
1022 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
1023 crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
1024 (crd2->crd_alg == CRYPTO_DES_CBC ||
1025 crd2->crd_alg == CRYPTO_3DES_CBC) &&
1026 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
1027 maccrd = crd1;
1028 enccrd = crd2;
1029 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
1030 crd1->crd_alg == CRYPTO_3DES_CBC) &&
1031 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
1032 crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
1033 (crd1->crd_flags & CRD_F_ENCRYPT)) {
1034 enccrd = crd1;
1035 maccrd = crd2;
1036 } else {
1037 /*
1038 * We cannot order the ubsec as requested
1039 */
1040 ubsecstats.hst_badalg++;
1041 err = EINVAL;
1042 goto errout;
1043 }
1044 }
1045
1046 if (enccrd) {
1047 encoffset = enccrd->crd_skip;
1048 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES);
1049
1050 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
1051 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV;
1052
1053 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1054 bcopy(enccrd->crd_iv, ctx.pc_iv, 8);
1055 else {
1056 ctx.pc_iv[0] = ses->ses_iv[0];
1057 ctx.pc_iv[1] = ses->ses_iv[1];
1058 }
1059
1060 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1061 if (crp->crp_flags & CRYPTO_F_IMBUF)
1062 m_copyback(q->q_src_m,
1063 enccrd->crd_inject,
1064 8, (void *)ctx.pc_iv);
1065 else if (crp->crp_flags & CRYPTO_F_IOV)
1066 cuio_copyback(q->q_src_io,
1067 enccrd->crd_inject,
1068 8, (void *)ctx.pc_iv);
1069 }
1070 } else {
1071 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND);
1072
1073 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1074 bcopy(enccrd->crd_iv, ctx.pc_iv, 8);
1075 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1076 m_copydata(q->q_src_m, enccrd->crd_inject,
1077 8, (void *)ctx.pc_iv);
1078 else if (crp->crp_flags & CRYPTO_F_IOV)
1079 cuio_copydata(q->q_src_io,
1080 enccrd->crd_inject, 8,
1081 (void *)ctx.pc_iv);
1082 }
1083
1084 ctx.pc_deskey[0] = ses->ses_deskey[0];
1085 ctx.pc_deskey[1] = ses->ses_deskey[1];
1086 ctx.pc_deskey[2] = ses->ses_deskey[2];
1087 ctx.pc_deskey[3] = ses->ses_deskey[3];
1088 ctx.pc_deskey[4] = ses->ses_deskey[4];
1089 ctx.pc_deskey[5] = ses->ses_deskey[5];
1090 SWAP32(ctx.pc_iv[0]);
1091 SWAP32(ctx.pc_iv[1]);
1092 }
1093
1094 if (maccrd) {
1095 macoffset = maccrd->crd_skip;
1096
1097 if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
1098 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5);
1099 else
1100 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1);
1101
1102 for (i = 0; i < 5; i++) {
1103 ctx.pc_hminner[i] = ses->ses_hminner[i];
1104 ctx.pc_hmouter[i] = ses->ses_hmouter[i];
1105
1106 HTOLE32(ctx.pc_hminner[i]);
1107 HTOLE32(ctx.pc_hmouter[i]);
1108 }
1109 }
1110
1111 if (enccrd && maccrd) {
1112 /*
1113 * ubsec cannot handle packets where the end of encryption
1114 * and authentication are not the same, or where the
1115 * encrypted part begins before the authenticated part.
1116 */
1117 if ((encoffset + enccrd->crd_len) !=
1118 (macoffset + maccrd->crd_len)) {
1119 ubsecstats.hst_lenmismatch++;
1120 err = EINVAL;
1121 goto errout;
1122 }
1123 if (enccrd->crd_skip < maccrd->crd_skip) {
1124 ubsecstats.hst_skipmismatch++;
1125 err = EINVAL;
1126 goto errout;
1127 }
1128 sskip = maccrd->crd_skip;
1129 cpskip = dskip = enccrd->crd_skip;
1130 stheend = maccrd->crd_len;
1131 dtheend = enccrd->crd_len;
1132 coffset = enccrd->crd_skip - maccrd->crd_skip;
1133 cpoffset = cpskip + dtheend;
1134 #ifdef UBSEC_DEBUG
1135 if (ubsec_debug) {
1136 printf("mac: skip %d, len %d, inject %d\n",
1137 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject);
1138 printf("enc: skip %d, len %d, inject %d\n",
1139 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject);
1140 printf("src: skip %d, len %d\n", sskip, stheend);
1141 printf("dst: skip %d, len %d\n", dskip, dtheend);
1142 printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
1143 coffset, stheend, cpskip, cpoffset);
1144 }
1145 #endif
1146 } else {
1147 cpskip = dskip = sskip = macoffset + encoffset;
1148 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len;
1149 cpoffset = cpskip + dtheend;
1150 coffset = 0;
1151 }
1152 ctx.pc_offset = htole16(coffset >> 2);
1153
1154 /* XXX FIXME: jonathan asks, what the heck's that 0xfff0? */
1155 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER,
1156 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) {
1157 err = ENOMEM;
1158 goto errout;
1159 }
1160 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1161 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
1162 q->q_src_m, BUS_DMA_NOWAIT) != 0) {
1163 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1164 q->q_src_map = NULL;
1165 ubsecstats.hst_noload++;
1166 err = ENOMEM;
1167 goto errout;
1168 }
1169 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1170 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
1171 q->q_src_io, BUS_DMA_NOWAIT) != 0) {
1172 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1173 q->q_src_map = NULL;
1174 ubsecstats.hst_noload++;
1175 err = ENOMEM;
1176 goto errout;
1177 }
1178 }
1179 nicealign = ubsec_dmamap_aligned(q->q_src_map);
1180
1181 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend);
1182
1183 #ifdef UBSEC_DEBUG
1184 if (ubsec_debug)
1185 printf("src skip: %d nicealign: %u\n", sskip, nicealign);
1186 #endif
1187 for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) {
1188 struct ubsec_pktbuf *pb;
1189 bus_size_t packl = q->q_src_map->dm_segs[i].ds_len;
1190 bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr;
1191
1192 if (sskip >= packl) {
1193 sskip -= packl;
1194 continue;
1195 }
1196
1197 packl -= sskip;
1198 packp += sskip;
1199 sskip = 0;
1200
1201 if (packl > 0xfffc) {
1202 err = EIO;
1203 goto errout;
1204 }
1205
1206 if (j == 0)
1207 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf;
1208 else
1209 pb = &dmap->d_dma->d_sbuf[j - 1];
1210
1211 pb->pb_addr = htole32(packp);
1212
1213 if (stheend) {
1214 if (packl > stheend) {
1215 pb->pb_len = htole32(stheend);
1216 stheend = 0;
1217 } else {
1218 pb->pb_len = htole32(packl);
1219 stheend -= packl;
1220 }
1221 } else
1222 pb->pb_len = htole32(packl);
1223
1224 if ((i + 1) == q->q_src_map->dm_nsegs)
1225 pb->pb_next = 0;
1226 else
1227 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1228 offsetof(struct ubsec_dmachunk, d_sbuf[j]));
1229 j++;
1230 }
1231
1232 if (enccrd == NULL && maccrd != NULL) {
1233 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0;
1234 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0;
1235 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr +
1236 offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1237 #ifdef UBSEC_DEBUG
1238 if (ubsec_debug)
1239 printf("opkt: %x %x %x\n",
1240 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr,
1241 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len,
1242 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next);
1243
1244 #endif
1245 } else {
1246 if (crp->crp_flags & CRYPTO_F_IOV) {
1247 if (!nicealign) {
1248 ubsecstats.hst_iovmisaligned++;
1249 err = EINVAL;
1250 goto errout;
1251 }
1252 /* XXX: ``what the heck's that'' 0xfff0? */
1253 if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
1254 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
1255 &q->q_dst_map) != 0) {
1256 ubsecstats.hst_nomap++;
1257 err = ENOMEM;
1258 goto errout;
1259 }
1260 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map,
1261 q->q_dst_io, BUS_DMA_NOWAIT) != 0) {
1262 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1263 q->q_dst_map = NULL;
1264 ubsecstats.hst_noload++;
1265 err = ENOMEM;
1266 goto errout;
1267 }
1268 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1269 if (nicealign) {
1270 q->q_dst_m = q->q_src_m;
1271 q->q_dst_map = q->q_src_map;
1272 } else {
1273 int totlen, len;
1274 struct mbuf *m, *top, **mp;
1275
1276 ubsecstats.hst_unaligned++;
1277 totlen = q->q_src_map->dm_mapsize;
1278 if (q->q_src_m->m_flags & M_PKTHDR) {
1279 len = MHLEN;
1280 MGETHDR(m, M_DONTWAIT, MT_DATA);
1281 /*XXX FIXME: m_dup_pkthdr */
1282 if (m && 1 /*!m_dup_pkthdr(m, q->q_src_m, M_DONTWAIT)*/) {
1283 m_free(m);
1284 m = NULL;
1285 }
1286 } else {
1287 len = MLEN;
1288 MGET(m, M_DONTWAIT, MT_DATA);
1289 }
1290 if (m == NULL) {
1291 ubsecstats.hst_nombuf++;
1292 err = sc->sc_nqueue ? ERESTART : ENOMEM;
1293 goto errout;
1294 }
1295 if (len == MHLEN)
1296 /*XXX was M_DUP_PKTHDR*/
1297 M_COPY_PKTHDR(m, q->q_src_m);
1298 if (totlen >= MINCLSIZE) {
1299 MCLGET(m, M_DONTWAIT);
1300 if ((m->m_flags & M_EXT) == 0) {
1301 m_free(m);
1302 ubsecstats.hst_nomcl++;
1303 err = sc->sc_nqueue ? ERESTART : ENOMEM;
1304 goto errout;
1305 }
1306 len = MCLBYTES;
1307 }
1308 m->m_len = len;
1309 top = NULL;
1310 mp = ⊤
1311
1312 while (totlen > 0) {
1313 if (top) {
1314 MGET(m, M_DONTWAIT, MT_DATA);
1315 if (m == NULL) {
1316 m_freem(top);
1317 ubsecstats.hst_nombuf++;
1318 err = sc->sc_nqueue ? ERESTART : ENOMEM;
1319 goto errout;
1320 }
1321 len = MLEN;
1322 }
1323 if (top && totlen >= MINCLSIZE) {
1324 MCLGET(m, M_DONTWAIT);
1325 if ((m->m_flags & M_EXT) == 0) {
1326 *mp = m;
1327 m_freem(top);
1328 ubsecstats.hst_nomcl++;
1329 err = sc->sc_nqueue ? ERESTART : ENOMEM;
1330 goto errout;
1331 }
1332 len = MCLBYTES;
1333 }
1334 m->m_len = len = min(totlen, len);
1335 totlen -= len;
1336 *mp = m;
1337 mp = &m->m_next;
1338 }
1339 q->q_dst_m = top;
1340 ubsec_mcopy(q->q_src_m, q->q_dst_m,
1341 cpskip, cpoffset);
1342 /* XXX again, what the heck is that 0xfff0? */
1343 if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
1344 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
1345 &q->q_dst_map) != 0) {
1346 ubsecstats.hst_nomap++;
1347 err = ENOMEM;
1348 goto errout;
1349 }
1350 if (bus_dmamap_load_mbuf(sc->sc_dmat,
1351 q->q_dst_map, q->q_dst_m,
1352 BUS_DMA_NOWAIT) != 0) {
1353 bus_dmamap_destroy(sc->sc_dmat,
1354 q->q_dst_map);
1355 q->q_dst_map = NULL;
1356 ubsecstats.hst_noload++;
1357 err = ENOMEM;
1358 goto errout;
1359 }
1360 }
1361 } else {
1362 ubsecstats.hst_badflags++;
1363 err = EINVAL;
1364 goto errout;
1365 }
1366
1367 #ifdef UBSEC_DEBUG
1368 if (ubsec_debug)
1369 printf("dst skip: %d\n", dskip);
1370 #endif
1371 for (i = j = 0; i < q->q_dst_map->dm_nsegs; i++) {
1372 struct ubsec_pktbuf *pb;
1373 bus_size_t packl = q->q_dst_map->dm_segs[i].ds_len;
1374 bus_addr_t packp = q->q_dst_map->dm_segs[i].ds_addr;
1375
1376 if (dskip >= packl) {
1377 dskip -= packl;
1378 continue;
1379 }
1380
1381 packl -= dskip;
1382 packp += dskip;
1383 dskip = 0;
1384
1385 if (packl > 0xfffc) {
1386 err = EIO;
1387 goto errout;
1388 }
1389
1390 if (j == 0)
1391 pb = &dmap->d_dma->d_mcr.mcr_opktbuf;
1392 else
1393 pb = &dmap->d_dma->d_dbuf[j - 1];
1394
1395 pb->pb_addr = htole32(packp);
1396
1397 if (dtheend) {
1398 if (packl > dtheend) {
1399 pb->pb_len = htole32(dtheend);
1400 dtheend = 0;
1401 } else {
1402 pb->pb_len = htole32(packl);
1403 dtheend -= packl;
1404 }
1405 } else
1406 pb->pb_len = htole32(packl);
1407
1408 if ((i + 1) == q->q_dst_map->dm_nsegs) {
1409 if (maccrd)
1410 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1411 offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1412 else
1413 pb->pb_next = 0;
1414 } else
1415 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1416 offsetof(struct ubsec_dmachunk, d_dbuf[j]));
1417 j++;
1418 }
1419 }
1420
1421 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr +
1422 offsetof(struct ubsec_dmachunk, d_ctx));
1423
1424 if (sc->sc_flags & UBS_FLAGS_LONGCTX) {
1425 struct ubsec_pktctx_long *ctxl;
1426
1427 ctxl = (struct ubsec_pktctx_long *)((char *)dmap->d_alloc.dma_vaddr +
1428 offsetof(struct ubsec_dmachunk, d_ctx));
1429
1430 /* transform small context into long context */
1431 ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long));
1432 ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC);
1433 ctxl->pc_flags = ctx.pc_flags;
1434 ctxl->pc_offset = ctx.pc_offset;
1435 for (i = 0; i < 6; i++)
1436 ctxl->pc_deskey[i] = ctx.pc_deskey[i];
1437 for (i = 0; i < 5; i++)
1438 ctxl->pc_hminner[i] = ctx.pc_hminner[i];
1439 for (i = 0; i < 5; i++)
1440 ctxl->pc_hmouter[i] = ctx.pc_hmouter[i];
1441 ctxl->pc_iv[0] = ctx.pc_iv[0];
1442 ctxl->pc_iv[1] = ctx.pc_iv[1];
1443 } else
1444 memcpy((char *)dmap->d_alloc.dma_vaddr +
1445 offsetof(struct ubsec_dmachunk, d_ctx), &ctx,
1446 sizeof(struct ubsec_pktctx));
1447
1448 s = splnet();
1449 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
1450 sc->sc_nqueue++;
1451 ubsecstats.hst_ipackets++;
1452 ubsecstats.hst_ibytes += dmap->d_alloc.dma_map->dm_mapsize;
1453 if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= ubsec_maxbatch)
1454 ubsec_feed(sc);
1455 splx(s);
1456 return (0);
1457
1458 errout:
1459 if (q != NULL) {
1460 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
1461 m_freem(q->q_dst_m);
1462
1463 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
1464 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1465 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1466 }
1467 if (q->q_src_map != NULL) {
1468 bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1469 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1470 }
1471
1472 s = splnet();
1473 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1474 splx(s);
1475 }
1476 #if 0 /* jonathan says: this openbsd code seems to be subsumed elsewhere */
1477 if (err == EINVAL)
1478 ubsecstats.hst_invalid++;
1479 else
1480 ubsecstats.hst_nomem++;
1481 #endif
1482 if (err != ERESTART) {
1483 crp->crp_etype = err;
1484 crypto_done(crp);
1485 } else {
1486 sc->sc_needwakeup |= CRYPTO_SYMQ;
1487 }
1488 return (err);
1489 }
1490
1491 static void
1492 ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q)
1493 {
1494 struct cryptop *crp = (struct cryptop *)q->q_crp;
1495 struct cryptodesc *crd;
1496 struct ubsec_dma *dmap = q->q_dma;
1497
1498 ubsecstats.hst_opackets++;
1499 ubsecstats.hst_obytes += dmap->d_alloc.dma_size;
1500
1501 bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0,
1502 dmap->d_alloc.dma_map->dm_mapsize,
1503 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1504 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
1505 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
1506 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1507 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1508 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1509 }
1510 bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
1511 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1512 bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1513 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1514
1515 if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) {
1516 m_freem(q->q_src_m);
1517 crp->crp_buf = (void *)q->q_dst_m;
1518 }
1519
1520 /* copy out IV for future use */
1521 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) {
1522 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1523 if (crd->crd_alg != CRYPTO_DES_CBC &&
1524 crd->crd_alg != CRYPTO_3DES_CBC)
1525 continue;
1526 if (crp->crp_flags & CRYPTO_F_IMBUF)
1527 m_copydata((struct mbuf *)crp->crp_buf,
1528 crd->crd_skip + crd->crd_len - 8, 8,
1529 (void *)sc->sc_sessions[q->q_sesn].ses_iv);
1530 else if (crp->crp_flags & CRYPTO_F_IOV) {
1531 cuio_copydata((struct uio *)crp->crp_buf,
1532 crd->crd_skip + crd->crd_len - 8, 8,
1533 (void *)sc->sc_sessions[q->q_sesn].ses_iv);
1534 }
1535 break;
1536 }
1537 }
1538
1539 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1540 if (crd->crd_alg != CRYPTO_MD5_HMAC &&
1541 crd->crd_alg != CRYPTO_SHA1_HMAC)
1542 continue;
1543 if (crp->crp_flags & CRYPTO_F_IMBUF)
1544 m_copyback((struct mbuf *)crp->crp_buf,
1545 crd->crd_inject, 12,
1546 (void *)dmap->d_dma->d_macbuf);
1547 else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac)
1548 bcopy((void *)dmap->d_dma->d_macbuf,
1549 crp->crp_mac, 12);
1550 break;
1551 }
1552 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1553 crypto_done(crp);
1554 }
1555
1556 static void
1557 ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset)
1558 {
1559 int i, j, dlen, slen;
1560 char *dptr, *sptr;
1561
1562 j = 0;
1563 sptr = srcm->m_data;
1564 slen = srcm->m_len;
1565 dptr = dstm->m_data;
1566 dlen = dstm->m_len;
1567
1568 while (1) {
1569 for (i = 0; i < min(slen, dlen); i++) {
1570 if (j < hoffset || j >= toffset)
1571 *dptr++ = *sptr++;
1572 slen--;
1573 dlen--;
1574 j++;
1575 }
1576 if (slen == 0) {
1577 srcm = srcm->m_next;
1578 if (srcm == NULL)
1579 return;
1580 sptr = srcm->m_data;
1581 slen = srcm->m_len;
1582 }
1583 if (dlen == 0) {
1584 dstm = dstm->m_next;
1585 if (dstm == NULL)
1586 return;
1587 dptr = dstm->m_data;
1588 dlen = dstm->m_len;
1589 }
1590 }
1591 }
1592
1593 /*
1594 * feed the key generator, must be called at splnet() or higher.
1595 */
1596 static void
1597 ubsec_feed2(struct ubsec_softc *sc)
1598 {
1599 struct ubsec_q2 *q;
1600
1601 while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) {
1602 if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL)
1603 break;
1604 q = SIMPLEQ_FIRST(&sc->sc_queue2);
1605
1606 bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0,
1607 q->q_mcr.dma_map->dm_mapsize,
1608 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1609 bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0,
1610 q->q_ctx.dma_map->dm_mapsize,
1611 BUS_DMASYNC_PREWRITE);
1612
1613 WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr);
1614 q = SIMPLEQ_FIRST(&sc->sc_queue2);
1615 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, /*q,*/ q_next);
1616 --sc->sc_nqueue2;
1617 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next);
1618 }
1619 }
1620
1621 /*
1622 * Callback for handling random numbers
1623 */
1624 static void
1625 ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q)
1626 {
1627 struct cryptkop *krp;
1628 struct ubsec_ctx_keyop *ctx;
1629
1630 ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr;
1631 bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0,
1632 q->q_ctx.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1633
1634 switch (q->q_type) {
1635 #ifndef UBSEC_NO_RNG
1636 case UBS_CTXOP_RNGSHA1:
1637 case UBS_CTXOP_RNGBYPASS: {
1638 struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q;
1639 u_int32_t *p;
1640 int i;
1641
1642 bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0,
1643 rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1644 p = (u_int32_t *)rng->rng_buf.dma_vaddr;
1645 #ifndef __NetBSD__
1646 for (i = 0; i < UBSEC_RNG_BUFSIZ; p++, i++)
1647 add_true_randomness(letoh32(*p));
1648 rng->rng_used = 0;
1649 #else
1650 /* XXX NetBSD rnd subsystem too weak */
1651 i = 0; (void)i; /* shut off gcc warnings */
1652 #endif
1653 #ifdef __OpenBSD__
1654 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
1655 #else
1656 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc);
1657 #endif
1658 break;
1659 }
1660 #endif
1661 case UBS_CTXOP_MODEXP: {
1662 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q;
1663 u_int rlen, clen;
1664
1665 krp = me->me_krp;
1666 rlen = (me->me_modbits + 7) / 8;
1667 clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8;
1668
1669 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map,
1670 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1671 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map,
1672 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1673 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map,
1674 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1675 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map,
1676 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1677
1678 if (clen < rlen)
1679 krp->krp_status = E2BIG;
1680 else {
1681 if (sc->sc_flags & UBS_FLAGS_HWNORM) {
1682 bzero(krp->krp_param[krp->krp_iparams].crp_p,
1683 (krp->krp_param[krp->krp_iparams].crp_nbits
1684 + 7) / 8);
1685 bcopy(me->me_C.dma_vaddr,
1686 krp->krp_param[krp->krp_iparams].crp_p,
1687 (me->me_modbits + 7) / 8);
1688 } else
1689 ubsec_kshift_l(me->me_shiftbits,
1690 me->me_C.dma_vaddr, me->me_normbits,
1691 krp->krp_param[krp->krp_iparams].crp_p,
1692 krp->krp_param[krp->krp_iparams].crp_nbits);
1693 }
1694
1695 crypto_kdone(krp);
1696
1697 /* bzero all potentially sensitive data */
1698 bzero(me->me_E.dma_vaddr, me->me_E.dma_size);
1699 bzero(me->me_M.dma_vaddr, me->me_M.dma_size);
1700 bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
1701 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size);
1702
1703 /* Can't free here, so put us on the free list. */
1704 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next);
1705 break;
1706 }
1707 case UBS_CTXOP_RSAPRIV: {
1708 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q;
1709 u_int len;
1710
1711 krp = rp->rpr_krp;
1712 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 0,
1713 rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1714 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 0,
1715 rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1716
1717 len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8;
1718 bcopy(rp->rpr_msgout.dma_vaddr,
1719 krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len);
1720
1721 crypto_kdone(krp);
1722
1723 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size);
1724 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size);
1725 bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size);
1726
1727 /* Can't free here, so put us on the free list. */
1728 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next);
1729 break;
1730 }
1731 default:
1732 printf("%s: unknown ctx op: %x\n", sc->sc_dv.dv_xname,
1733 letoh16(ctx->ctx_op));
1734 break;
1735 }
1736 }
1737
1738 #ifndef UBSEC_NO_RNG
1739 static void
1740 ubsec_rng(void *vsc)
1741 {
1742 struct ubsec_softc *sc = vsc;
1743 struct ubsec_q2_rng *rng = &sc->sc_rng;
1744 struct ubsec_mcr *mcr;
1745 struct ubsec_ctx_rngbypass *ctx;
1746 int s;
1747
1748 s = splnet();
1749 if (rng->rng_used) {
1750 splx(s);
1751 return;
1752 }
1753 sc->sc_nqueue2++;
1754 if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE)
1755 goto out;
1756
1757 mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr;
1758 ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr;
1759
1760 mcr->mcr_pkts = htole16(1);
1761 mcr->mcr_flags = 0;
1762 mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr);
1763 mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0;
1764 mcr->mcr_ipktbuf.pb_len = 0;
1765 mcr->mcr_reserved = mcr->mcr_pktlen = 0;
1766 mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr);
1767 mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) &
1768 UBS_PKTBUF_LEN);
1769 mcr->mcr_opktbuf.pb_next = 0;
1770
1771 ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass));
1772 ctx->rbp_op = htole16(UBS_CTXOP_RNGSHA1);
1773 rng->rng_q.q_type = UBS_CTXOP_RNGSHA1;
1774
1775 bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0,
1776 rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1777
1778 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next);
1779 rng->rng_used = 1;
1780 ubsec_feed2(sc);
1781 ubsecstats.hst_rng++;
1782 splx(s);
1783
1784 return;
1785
1786 out:
1787 /*
1788 * Something weird happened, generate our own call back.
1789 */
1790 sc->sc_nqueue2--;
1791 splx(s);
1792 #ifdef __OpenBSD__
1793 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
1794 #else
1795 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc);
1796 #endif
1797 }
1798 #endif /* UBSEC_NO_RNG */
1799
1800 static int
1801 ubsec_dma_malloc(struct ubsec_softc *sc, bus_size_t size,
1802 struct ubsec_dma_alloc *dma,int mapflags)
1803 {
1804 int r;
1805
1806 if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
1807 &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) != 0)
1808 goto fail_0;
1809
1810 if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg,
1811 size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0)
1812 goto fail_1;
1813
1814 if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1815 BUS_DMA_NOWAIT, &dma->dma_map)) != 0)
1816 goto fail_2;
1817
1818 if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
1819 size, NULL, BUS_DMA_NOWAIT)) != 0)
1820 goto fail_3;
1821
1822 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
1823 dma->dma_size = size;
1824 return (0);
1825
1826 fail_3:
1827 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1828 fail_2:
1829 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
1830 fail_1:
1831 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1832 fail_0:
1833 dma->dma_map = NULL;
1834 return (r);
1835 }
1836
1837 static void
1838 ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma)
1839 {
1840 bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
1841 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size);
1842 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1843 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1844 }
1845
1846 /*
1847 * Resets the board. Values in the regesters are left as is
1848 * from the reset (i.e. initial values are assigned elsewhere).
1849 */
1850 static void
1851 ubsec_reset_board(struct ubsec_softc *sc)
1852 {
1853 volatile u_int32_t ctrl;
1854
1855 ctrl = READ_REG(sc, BS_CTRL);
1856 ctrl |= BS_CTRL_RESET;
1857 WRITE_REG(sc, BS_CTRL, ctrl);
1858
1859 /*
1860 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us
1861 */
1862 DELAY(10);
1863 }
1864
1865 /*
1866 * Init Broadcom registers
1867 */
1868 static void
1869 ubsec_init_board(struct ubsec_softc *sc)
1870 {
1871 u_int32_t ctrl;
1872
1873 ctrl = READ_REG(sc, BS_CTRL);
1874 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64);
1875 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT;
1876
1877 /*
1878 * XXX: Sam Leffler's code has (UBS_FLAGS_KEY|UBS_FLAGS_RNG)).
1879 * anyone got hw docs?
1880 */
1881 if (sc->sc_flags & UBS_FLAGS_KEY)
1882 ctrl |= BS_CTRL_MCR2INT;
1883 else
1884 ctrl &= ~BS_CTRL_MCR2INT;
1885
1886 if (sc->sc_flags & UBS_FLAGS_HWNORM)
1887 ctrl &= ~BS_CTRL_SWNORM;
1888
1889 WRITE_REG(sc, BS_CTRL, ctrl);
1890 }
1891
1892 /*
1893 * Init Broadcom PCI registers
1894 */
1895 static void
1896 ubsec_init_pciregs(struct pci_attach_args *pa)
1897 {
1898 pci_chipset_tag_t pc = pa->pa_pc;
1899 u_int32_t misc;
1900
1901 /*
1902 * This will set the cache line size to 1, this will
1903 * force the BCM58xx chip just to do burst read/writes.
1904 * Cache line read/writes are to slow
1905 */
1906 misc = pci_conf_read(pc, pa->pa_tag, PCI_BHLC_REG);
1907 misc = (misc & ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT))
1908 | ((UBS_DEF_CACHELINE & 0xff) << PCI_CACHELINE_SHIFT);
1909 pci_conf_write(pc, pa->pa_tag, PCI_BHLC_REG, misc);
1910 }
1911
1912 /*
1913 * Clean up after a chip crash.
1914 * It is assumed that the caller in splnet()
1915 */
1916 static void
1917 ubsec_cleanchip(struct ubsec_softc *sc)
1918 {
1919 struct ubsec_q *q;
1920
1921 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) {
1922 q = SIMPLEQ_FIRST(&sc->sc_qchip);
1923 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next);
1924 ubsec_free_q(sc, q);
1925 }
1926 sc->sc_nqchip = 0;
1927 }
1928
1929 /*
1930 * free a ubsec_q
1931 * It is assumed that the caller is within splnet()
1932 */
1933 static int
1934 ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q)
1935 {
1936 struct ubsec_q *q2;
1937 struct cryptop *crp;
1938 int npkts;
1939 int i;
1940
1941 npkts = q->q_nstacked_mcrs;
1942
1943 for (i = 0; i < npkts; i++) {
1944 if(q->q_stacked_mcr[i]) {
1945 q2 = q->q_stacked_mcr[i];
1946
1947 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m))
1948 m_freem(q2->q_dst_m);
1949
1950 crp = (struct cryptop *)q2->q_crp;
1951
1952 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next);
1953
1954 crp->crp_etype = EFAULT;
1955 crypto_done(crp);
1956 } else {
1957 break;
1958 }
1959 }
1960
1961 /*
1962 * Free header MCR
1963 */
1964 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
1965 m_freem(q->q_dst_m);
1966
1967 crp = (struct cryptop *)q->q_crp;
1968
1969 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1970
1971 crp->crp_etype = EFAULT;
1972 crypto_done(crp);
1973 return(0);
1974 }
1975
1976 /*
1977 * Routine to reset the chip and clean up.
1978 * It is assumed that the caller is in splnet()
1979 */
1980 static void
1981 ubsec_totalreset(struct ubsec_softc *sc)
1982 {
1983 ubsec_reset_board(sc);
1984 ubsec_init_board(sc);
1985 ubsec_cleanchip(sc);
1986 }
1987
1988 static int
1989 ubsec_dmamap_aligned(bus_dmamap_t map)
1990 {
1991 int i;
1992
1993 for (i = 0; i < map->dm_nsegs; i++) {
1994 if (map->dm_segs[i].ds_addr & 3)
1995 return (0);
1996 if ((i != (map->dm_nsegs - 1)) &&
1997 (map->dm_segs[i].ds_len & 3))
1998 return (0);
1999 }
2000 return (1);
2001 }
2002
2003 #ifdef __OpenBSD__
2004 struct ubsec_softc *
2005 ubsec_kfind(struct cryptkop *krp)
2006 {
2007 struct ubsec_softc *sc;
2008 int i;
2009
2010 for (i = 0; i < ubsec_cd.cd_ndevs; i++) {
2011 sc = ubsec_cd.cd_devs[i];
2012 if (sc == NULL)
2013 continue;
2014 if (sc->sc_cid == krp->krp_hid)
2015 return (sc);
2016 }
2017 return (NULL);
2018 }
2019 #endif
2020
2021 static void
2022 ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q)
2023 {
2024 switch (q->q_type) {
2025 case UBS_CTXOP_MODEXP: {
2026 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q;
2027
2028 ubsec_dma_free(sc, &me->me_q.q_mcr);
2029 ubsec_dma_free(sc, &me->me_q.q_ctx);
2030 ubsec_dma_free(sc, &me->me_M);
2031 ubsec_dma_free(sc, &me->me_E);
2032 ubsec_dma_free(sc, &me->me_C);
2033 ubsec_dma_free(sc, &me->me_epb);
2034 free(me, M_DEVBUF);
2035 break;
2036 }
2037 case UBS_CTXOP_RSAPRIV: {
2038 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q;
2039
2040 ubsec_dma_free(sc, &rp->rpr_q.q_mcr);
2041 ubsec_dma_free(sc, &rp->rpr_q.q_ctx);
2042 ubsec_dma_free(sc, &rp->rpr_msgin);
2043 ubsec_dma_free(sc, &rp->rpr_msgout);
2044 free(rp, M_DEVBUF);
2045 break;
2046 }
2047 default:
2048 printf("%s: invalid kfree 0x%x\n", sc->sc_dv.dv_xname,
2049 q->q_type);
2050 break;
2051 }
2052 }
2053
2054 static int
2055 ubsec_kprocess(void *arg, struct cryptkop *krp, int hint)
2056 {
2057 struct ubsec_softc *sc;
2058 int r;
2059
2060 if (krp == NULL || krp->krp_callback == NULL)
2061 return (EINVAL);
2062 #ifdef __OpenBSD__
2063 if ((sc = ubsec_kfind(krp)) == NULL)
2064 return (EINVAL);
2065 #else
2066 sc = arg;
2067 KASSERT(sc != NULL /*, ("ubsec_kprocess: null softc")*/);
2068 #endif
2069
2070 while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) {
2071 struct ubsec_q2 *q;
2072
2073 q = SIMPLEQ_FIRST(&sc->sc_q2free);
2074 SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, /*q,*/ q_next);
2075 ubsec_kfree(sc, q);
2076 }
2077
2078 switch (krp->krp_op) {
2079 case CRK_MOD_EXP:
2080 if (sc->sc_flags & UBS_FLAGS_HWNORM)
2081 r = ubsec_kprocess_modexp_hw(sc, krp, hint);
2082 else
2083 r = ubsec_kprocess_modexp_sw(sc, krp, hint);
2084 break;
2085 case CRK_MOD_EXP_CRT:
2086 r = ubsec_kprocess_rsapriv(sc, krp, hint);
2087 break;
2088 default:
2089 printf("%s: kprocess: invalid op 0x%x\n",
2090 sc->sc_dv.dv_xname, krp->krp_op);
2091 krp->krp_status = EOPNOTSUPP;
2092 crypto_kdone(krp);
2093 r = 0;
2094 }
2095 return (r);
2096 }
2097
2098 /*
2099 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization)
2100 */
2101 static int
2102 ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp,
2103 int hint)
2104 {
2105 struct ubsec_q2_modexp *me;
2106 struct ubsec_mcr *mcr;
2107 struct ubsec_ctx_modexp *ctx;
2108 struct ubsec_pktbuf *epb;
2109 int s, err = 0;
2110 u_int nbits, normbits, mbits, shiftbits, ebits;
2111
2112 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT);
2113 if (me == NULL) {
2114 err = ENOMEM;
2115 goto errout;
2116 }
2117 bzero(me, sizeof *me);
2118 me->me_krp = krp;
2119 me->me_q.q_type = UBS_CTXOP_MODEXP;
2120
2121 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]);
2122 if (nbits <= 512)
2123 normbits = 512;
2124 else if (nbits <= 768)
2125 normbits = 768;
2126 else if (nbits <= 1024)
2127 normbits = 1024;
2128 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536)
2129 normbits = 1536;
2130 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048)
2131 normbits = 2048;
2132 else {
2133 err = E2BIG;
2134 goto errout;
2135 }
2136
2137 shiftbits = normbits - nbits;
2138
2139 me->me_modbits = nbits;
2140 me->me_shiftbits = shiftbits;
2141 me->me_normbits = normbits;
2142
2143 /* Sanity check: result bits must be >= true modulus bits. */
2144 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) {
2145 err = ERANGE;
2146 goto errout;
2147 }
2148
2149 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
2150 &me->me_q.q_mcr, 0)) {
2151 err = ENOMEM;
2152 goto errout;
2153 }
2154 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr;
2155
2156 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp),
2157 &me->me_q.q_ctx, 0)) {
2158 err = ENOMEM;
2159 goto errout;
2160 }
2161
2162 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]);
2163 if (mbits > nbits) {
2164 err = E2BIG;
2165 goto errout;
2166 }
2167 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) {
2168 err = ENOMEM;
2169 goto errout;
2170 }
2171 ubsec_kshift_r(shiftbits,
2172 krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits,
2173 me->me_M.dma_vaddr, normbits);
2174
2175 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) {
2176 err = ENOMEM;
2177 goto errout;
2178 }
2179 bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2180
2181 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]);
2182 if (ebits > nbits) {
2183 err = E2BIG;
2184 goto errout;
2185 }
2186 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) {
2187 err = ENOMEM;
2188 goto errout;
2189 }
2190 ubsec_kshift_r(shiftbits,
2191 krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits,
2192 me->me_E.dma_vaddr, normbits);
2193
2194 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf),
2195 &me->me_epb, 0)) {
2196 err = ENOMEM;
2197 goto errout;
2198 }
2199 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr;
2200 epb->pb_addr = htole32(me->me_E.dma_paddr);
2201 epb->pb_next = 0;
2202 epb->pb_len = htole32(normbits / 8);
2203
2204 #ifdef UBSEC_DEBUG
2205 if (ubsec_debug) {
2206 printf("Epb ");
2207 ubsec_dump_pb(epb);
2208 }
2209 #endif
2210
2211 mcr->mcr_pkts = htole16(1);
2212 mcr->mcr_flags = 0;
2213 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr);
2214 mcr->mcr_reserved = 0;
2215 mcr->mcr_pktlen = 0;
2216
2217 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr);
2218 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8);
2219 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr);
2220
2221 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr);
2222 mcr->mcr_opktbuf.pb_next = 0;
2223 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8);
2224
2225 #ifdef DIAGNOSTIC
2226 /* Misaligned output buffer will hang the chip. */
2227 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0)
2228 panic("%s: modexp invalid addr 0x%x",
2229 sc->sc_dv.dv_xname, letoh32(mcr->mcr_opktbuf.pb_addr));
2230 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0)
2231 panic("%s: modexp invalid len 0x%x",
2232 sc->sc_dv.dv_xname, letoh32(mcr->mcr_opktbuf.pb_len));
2233 #endif
2234
2235 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr;
2236 bzero(ctx, sizeof(*ctx));
2237 ubsec_kshift_r(shiftbits,
2238 krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits,
2239 ctx->me_N, normbits);
2240 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t)));
2241 ctx->me_op = htole16(UBS_CTXOP_MODEXP);
2242 ctx->me_E_len = htole16(nbits);
2243 ctx->me_N_len = htole16(nbits);
2244
2245 #ifdef UBSEC_DEBUG
2246 if (ubsec_debug) {
2247 ubsec_dump_mcr(mcr);
2248 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx);
2249 }
2250 #endif
2251
2252 /*
2253 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2254 * everything else.
2255 */
2256 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map,
2257 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2258 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map,
2259 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2260 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map,
2261 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2262 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map,
2263 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2264
2265 /* Enqueue and we're done... */
2266 s = splnet();
2267 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next);
2268 ubsec_feed2(sc);
2269 ubsecstats.hst_modexp++;
2270 splx(s);
2271
2272 return (0);
2273
2274 errout:
2275 if (me != NULL) {
2276 if (me->me_q.q_mcr.dma_map != NULL)
2277 ubsec_dma_free(sc, &me->me_q.q_mcr);
2278 if (me->me_q.q_ctx.dma_map != NULL) {
2279 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size);
2280 ubsec_dma_free(sc, &me->me_q.q_ctx);
2281 }
2282 if (me->me_M.dma_map != NULL) {
2283 bzero(me->me_M.dma_vaddr, me->me_M.dma_size);
2284 ubsec_dma_free(sc, &me->me_M);
2285 }
2286 if (me->me_E.dma_map != NULL) {
2287 bzero(me->me_E.dma_vaddr, me->me_E.dma_size);
2288 ubsec_dma_free(sc, &me->me_E);
2289 }
2290 if (me->me_C.dma_map != NULL) {
2291 bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2292 ubsec_dma_free(sc, &me->me_C);
2293 }
2294 if (me->me_epb.dma_map != NULL)
2295 ubsec_dma_free(sc, &me->me_epb);
2296 free(me, M_DEVBUF);
2297 }
2298 krp->krp_status = err;
2299 crypto_kdone(krp);
2300 return (0);
2301 }
2302
2303 /*
2304 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization)
2305 */
2306 static int
2307 ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp,
2308 int hint)
2309 {
2310 struct ubsec_q2_modexp *me;
2311 struct ubsec_mcr *mcr;
2312 struct ubsec_ctx_modexp *ctx;
2313 struct ubsec_pktbuf *epb;
2314 int s, err = 0;
2315 u_int nbits, normbits, mbits, shiftbits, ebits;
2316
2317 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT);
2318 if (me == NULL) {
2319 err = ENOMEM;
2320 goto errout;
2321 }
2322 bzero(me, sizeof *me);
2323 me->me_krp = krp;
2324 me->me_q.q_type = UBS_CTXOP_MODEXP;
2325
2326 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]);
2327 if (nbits <= 512)
2328 normbits = 512;
2329 else if (nbits <= 768)
2330 normbits = 768;
2331 else if (nbits <= 1024)
2332 normbits = 1024;
2333 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536)
2334 normbits = 1536;
2335 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048)
2336 normbits = 2048;
2337 else {
2338 err = E2BIG;
2339 goto errout;
2340 }
2341
2342 shiftbits = normbits - nbits;
2343
2344 /* XXX ??? */
2345 me->me_modbits = nbits;
2346 me->me_shiftbits = shiftbits;
2347 me->me_normbits = normbits;
2348
2349 /* Sanity check: result bits must be >= true modulus bits. */
2350 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) {
2351 err = ERANGE;
2352 goto errout;
2353 }
2354
2355 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
2356 &me->me_q.q_mcr, 0)) {
2357 err = ENOMEM;
2358 goto errout;
2359 }
2360 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr;
2361
2362 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp),
2363 &me->me_q.q_ctx, 0)) {
2364 err = ENOMEM;
2365 goto errout;
2366 }
2367
2368 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]);
2369 if (mbits > nbits) {
2370 err = E2BIG;
2371 goto errout;
2372 }
2373 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) {
2374 err = ENOMEM;
2375 goto errout;
2376 }
2377 bzero(me->me_M.dma_vaddr, normbits / 8);
2378 bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p,
2379 me->me_M.dma_vaddr, (mbits + 7) / 8);
2380
2381 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) {
2382 err = ENOMEM;
2383 goto errout;
2384 }
2385 bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2386
2387 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]);
2388 if (ebits > nbits) {
2389 err = E2BIG;
2390 goto errout;
2391 }
2392 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) {
2393 err = ENOMEM;
2394 goto errout;
2395 }
2396 bzero(me->me_E.dma_vaddr, normbits / 8);
2397 bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p,
2398 me->me_E.dma_vaddr, (ebits + 7) / 8);
2399
2400 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf),
2401 &me->me_epb, 0)) {
2402 err = ENOMEM;
2403 goto errout;
2404 }
2405 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr;
2406 epb->pb_addr = htole32(me->me_E.dma_paddr);
2407 epb->pb_next = 0;
2408 epb->pb_len = htole32((ebits + 7) / 8);
2409
2410 #ifdef UBSEC_DEBUG
2411 if (ubsec_debug) {
2412 printf("Epb ");
2413 ubsec_dump_pb(epb);
2414 }
2415 #endif
2416
2417 mcr->mcr_pkts = htole16(1);
2418 mcr->mcr_flags = 0;
2419 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr);
2420 mcr->mcr_reserved = 0;
2421 mcr->mcr_pktlen = 0;
2422
2423 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr);
2424 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8);
2425 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr);
2426
2427 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr);
2428 mcr->mcr_opktbuf.pb_next = 0;
2429 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8);
2430
2431 #ifdef DIAGNOSTIC
2432 /* Misaligned output buffer will hang the chip. */
2433 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0)
2434 panic("%s: modexp invalid addr 0x%x",
2435 sc->sc_dv.dv_xname, letoh32(mcr->mcr_opktbuf.pb_addr));
2436 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0)
2437 panic("%s: modexp invalid len 0x%x",
2438 sc->sc_dv.dv_xname, letoh32(mcr->mcr_opktbuf.pb_len));
2439 #endif
2440
2441 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr;
2442 bzero(ctx, sizeof(*ctx));
2443 bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N,
2444 (nbits + 7) / 8);
2445 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t)));
2446 ctx->me_op = htole16(UBS_CTXOP_MODEXP);
2447 ctx->me_E_len = htole16(ebits);
2448 ctx->me_N_len = htole16(nbits);
2449
2450 #ifdef UBSEC_DEBUG
2451 if (ubsec_debug) {
2452 ubsec_dump_mcr(mcr);
2453 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx);
2454 }
2455 #endif
2456
2457 /*
2458 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2459 * everything else.
2460 */
2461 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map,
2462 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2463 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map,
2464 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2465 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map,
2466 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2467 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map,
2468 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2469
2470 /* Enqueue and we're done... */
2471 s = splnet();
2472 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next);
2473 ubsec_feed2(sc);
2474 splx(s);
2475
2476 return (0);
2477
2478 errout:
2479 if (me != NULL) {
2480 if (me->me_q.q_mcr.dma_map != NULL)
2481 ubsec_dma_free(sc, &me->me_q.q_mcr);
2482 if (me->me_q.q_ctx.dma_map != NULL) {
2483 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size);
2484 ubsec_dma_free(sc, &me->me_q.q_ctx);
2485 }
2486 if (me->me_M.dma_map != NULL) {
2487 bzero(me->me_M.dma_vaddr, me->me_M.dma_size);
2488 ubsec_dma_free(sc, &me->me_M);
2489 }
2490 if (me->me_E.dma_map != NULL) {
2491 bzero(me->me_E.dma_vaddr, me->me_E.dma_size);
2492 ubsec_dma_free(sc, &me->me_E);
2493 }
2494 if (me->me_C.dma_map != NULL) {
2495 bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2496 ubsec_dma_free(sc, &me->me_C);
2497 }
2498 if (me->me_epb.dma_map != NULL)
2499 ubsec_dma_free(sc, &me->me_epb);
2500 free(me, M_DEVBUF);
2501 }
2502 krp->krp_status = err;
2503 crypto_kdone(krp);
2504 return (0);
2505 }
2506
2507 static int
2508 ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp,
2509 int hint)
2510 {
2511 struct ubsec_q2_rsapriv *rp = NULL;
2512 struct ubsec_mcr *mcr;
2513 struct ubsec_ctx_rsapriv *ctx;
2514 int s, err = 0;
2515 u_int padlen, msglen;
2516
2517 msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]);
2518 padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]);
2519 if (msglen > padlen)
2520 padlen = msglen;
2521
2522 if (padlen <= 256)
2523 padlen = 256;
2524 else if (padlen <= 384)
2525 padlen = 384;
2526 else if (padlen <= 512)
2527 padlen = 512;
2528 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768)
2529 padlen = 768;
2530 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024)
2531 padlen = 1024;
2532 else {
2533 err = E2BIG;
2534 goto errout;
2535 }
2536
2537 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) {
2538 err = E2BIG;
2539 goto errout;
2540 }
2541
2542 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) {
2543 err = E2BIG;
2544 goto errout;
2545 }
2546
2547 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) {
2548 err = E2BIG;
2549 goto errout;
2550 }
2551
2552 rp = (struct ubsec_q2_rsapriv *)malloc(sizeof *rp, M_DEVBUF, M_NOWAIT);
2553 if (rp == NULL)
2554 return (ENOMEM);
2555 bzero(rp, sizeof *rp);
2556 rp->rpr_krp = krp;
2557 rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV;
2558
2559 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
2560 &rp->rpr_q.q_mcr, 0)) {
2561 err = ENOMEM;
2562 goto errout;
2563 }
2564 mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr;
2565
2566 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv),
2567 &rp->rpr_q.q_ctx, 0)) {
2568 err = ENOMEM;
2569 goto errout;
2570 }
2571 ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr;
2572 bzero(ctx, sizeof *ctx);
2573
2574 /* Copy in p */
2575 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p,
2576 &ctx->rpr_buf[0 * (padlen / 8)],
2577 (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8);
2578
2579 /* Copy in q */
2580 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p,
2581 &ctx->rpr_buf[1 * (padlen / 8)],
2582 (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8);
2583
2584 /* Copy in dp */
2585 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p,
2586 &ctx->rpr_buf[2 * (padlen / 8)],
2587 (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8);
2588
2589 /* Copy in dq */
2590 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p,
2591 &ctx->rpr_buf[3 * (padlen / 8)],
2592 (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8);
2593
2594 /* Copy in pinv */
2595 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p,
2596 &ctx->rpr_buf[4 * (padlen / 8)],
2597 (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8);
2598
2599 msglen = padlen * 2;
2600
2601 /* Copy in input message (aligned buffer/length). */
2602 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) {
2603 /* Is this likely? */
2604 err = E2BIG;
2605 goto errout;
2606 }
2607 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) {
2608 err = ENOMEM;
2609 goto errout;
2610 }
2611 bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8);
2612 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p,
2613 rp->rpr_msgin.dma_vaddr,
2614 (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8);
2615
2616 /* Prepare space for output message (aligned buffer/length). */
2617 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) {
2618 /* Is this likely? */
2619 err = E2BIG;
2620 goto errout;
2621 }
2622 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) {
2623 err = ENOMEM;
2624 goto errout;
2625 }
2626 bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8);
2627
2628 mcr->mcr_pkts = htole16(1);
2629 mcr->mcr_flags = 0;
2630 mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr);
2631 mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr);
2632 mcr->mcr_ipktbuf.pb_next = 0;
2633 mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size);
2634 mcr->mcr_reserved = 0;
2635 mcr->mcr_pktlen = htole16(msglen);
2636 mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr);
2637 mcr->mcr_opktbuf.pb_next = 0;
2638 mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size);
2639
2640 #ifdef DIAGNOSTIC
2641 if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) {
2642 panic("%s: rsapriv: invalid msgin 0x%lx(0x%lx)",
2643 sc->sc_dv.dv_xname, (u_long) rp->rpr_msgin.dma_paddr,
2644 (u_long) rp->rpr_msgin.dma_size);
2645 }
2646 if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) {
2647 panic("%s: rsapriv: invalid msgout 0x%lx(0x%lx)",
2648 sc->sc_dv.dv_xname, (u_long) rp->rpr_msgout.dma_paddr,
2649 (u_long) rp->rpr_msgout.dma_size);
2650 }
2651 #endif
2652
2653 ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8));
2654 ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV);
2655 ctx->rpr_q_len = htole16(padlen);
2656 ctx->rpr_p_len = htole16(padlen);
2657
2658 /*
2659 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2660 * everything else.
2661 */
2662 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map,
2663 0, rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2664 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map,
2665 0, rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2666
2667 /* Enqueue and we're done... */
2668 s = splnet();
2669 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next);
2670 ubsec_feed2(sc);
2671 ubsecstats.hst_modexpcrt++;
2672 splx(s);
2673 return (0);
2674
2675 errout:
2676 if (rp != NULL) {
2677 if (rp->rpr_q.q_mcr.dma_map != NULL)
2678 ubsec_dma_free(sc, &rp->rpr_q.q_mcr);
2679 if (rp->rpr_msgin.dma_map != NULL) {
2680 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size);
2681 ubsec_dma_free(sc, &rp->rpr_msgin);
2682 }
2683 if (rp->rpr_msgout.dma_map != NULL) {
2684 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size);
2685 ubsec_dma_free(sc, &rp->rpr_msgout);
2686 }
2687 free(rp, M_DEVBUF);
2688 }
2689 krp->krp_status = err;
2690 crypto_kdone(krp);
2691 return (0);
2692 }
2693
2694 #ifdef UBSEC_DEBUG
2695 static void
2696 ubsec_dump_pb(volatile struct ubsec_pktbuf *pb)
2697 {
2698 printf("addr 0x%x (0x%x) next 0x%x\n",
2699 pb->pb_addr, pb->pb_len, pb->pb_next);
2700 }
2701
2702 static void
2703 ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *c)
2704 {
2705 printf("CTX (0x%x):\n", c->ctx_len);
2706 switch (letoh16(c->ctx_op)) {
2707 case UBS_CTXOP_RNGBYPASS:
2708 case UBS_CTXOP_RNGSHA1:
2709 break;
2710 case UBS_CTXOP_MODEXP:
2711 {
2712 struct ubsec_ctx_modexp *cx = (void *)c;
2713 int i, len;
2714
2715 printf(" Elen %u, Nlen %u\n",
2716 letoh16(cx->me_E_len), letoh16(cx->me_N_len));
2717 len = (cx->me_N_len + 7)/8;
2718 for (i = 0; i < len; i++)
2719 printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]);
2720 printf("\n");
2721 break;
2722 }
2723 default:
2724 printf("unknown context: %x\n", c->ctx_op);
2725 }
2726 printf("END CTX\n");
2727 }
2728
2729 static void
2730 ubsec_dump_mcr(struct ubsec_mcr *mcr)
2731 {
2732 volatile struct ubsec_mcr_add *ma;
2733 int i;
2734
2735 printf("MCR:\n");
2736 printf(" pkts: %u, flags 0x%x\n",
2737 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags));
2738 ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp;
2739 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) {
2740 printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i,
2741 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen),
2742 letoh16(ma->mcr_reserved));
2743 printf(" %d: ipkt ", i);
2744 ubsec_dump_pb(&ma->mcr_ipktbuf);
2745 printf(" %d: opkt ", i);
2746 ubsec_dump_pb(&ma->mcr_opktbuf);
2747 ma++;
2748 }
2749 printf("END MCR\n");
2750 }
2751 #endif /* UBSEC_DEBUG */
2752
2753 /*
2754 * Return the number of significant bits of a big number.
2755 */
2756 static int
2757 ubsec_ksigbits(struct crparam *cr)
2758 {
2759 u_int plen = (cr->crp_nbits + 7) / 8;
2760 int i, sig = plen * 8;
2761 u_int8_t c, *p = cr->crp_p;
2762
2763 for (i = plen - 1; i >= 0; i--) {
2764 c = p[i];
2765 if (c != 0) {
2766 while ((c & 0x80) == 0) {
2767 sig--;
2768 c <<= 1;
2769 }
2770 break;
2771 }
2772 sig -= 8;
2773 }
2774 return (sig);
2775 }
2776
2777 static void
2778 ubsec_kshift_r(u_int shiftbits, u_int8_t *src, u_int srcbits,
2779 u_int8_t *dst, u_int dstbits)
2780 {
2781 u_int slen, dlen;
2782 int i, si, di, n;
2783
2784 slen = (srcbits + 7) / 8;
2785 dlen = (dstbits + 7) / 8;
2786
2787 for (i = 0; i < slen; i++)
2788 dst[i] = src[i];
2789 for (i = 0; i < dlen - slen; i++)
2790 dst[slen + i] = 0;
2791
2792 n = shiftbits / 8;
2793 if (n != 0) {
2794 si = dlen - n - 1;
2795 di = dlen - 1;
2796 while (si >= 0)
2797 dst[di--] = dst[si--];
2798 while (di >= 0)
2799 dst[di--] = 0;
2800 }
2801
2802 n = shiftbits % 8;
2803 if (n != 0) {
2804 for (i = dlen - 1; i > 0; i--)
2805 dst[i] = (dst[i] << n) |
2806 (dst[i - 1] >> (8 - n));
2807 dst[0] = dst[0] << n;
2808 }
2809 }
2810
2811 static void
2812 ubsec_kshift_l(u_int shiftbits, u_int8_t *src, u_int srcbits,
2813 u_int8_t *dst, u_int dstbits)
2814 {
2815 int slen, dlen, i, n;
2816
2817 slen = (srcbits + 7) / 8;
2818 dlen = (dstbits + 7) / 8;
2819
2820 n = shiftbits / 8;
2821 for (i = 0; i < slen; i++)
2822 dst[i] = src[i + n];
2823 for (i = 0; i < dlen - slen; i++)
2824 dst[slen + i] = 0;
2825
2826 n = shiftbits % 8;
2827 if (n != 0) {
2828 for (i = 0; i < (dlen - 1); i++)
2829 dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n));
2830 dst[dlen - 1] = dst[dlen - 1] >> n;
2831 }
2832 }
2833