hifn7751.c revision 1.16 1 /* $NetBSD: hifn7751.c,v 1.16 2003/08/28 01:53:06 thorpej Exp $ */
2 /* $FreeBSD: hifn7751.c,v 1.5.2.6 2003/07/02 17:04:50 sam Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.139 2003/03/13 20:08:06 jason Exp $ */
4
5 /*
6 * Invertex AEON / Hifn 7751 driver
7 * Copyright (c) 1999 Invertex Inc. All rights reserved.
8 * Copyright (c) 1999 Theo de Raadt
9 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10 * http://www.netsec.net
11 *
12 * This driver is based on a previous driver by Invertex, for which they
13 * requested: Please send any comments, feedback, bug-fixes, or feature
14 * requests to software (at) invertex.com.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. The name of the author may not be used to endorse or promote products
26 * derived from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
33 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
37 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Effort sponsored in part by the Defense Advanced Research Projects
40 * Agency (DARPA) and Air Force Research Laboratory, Air Force
41 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
42 *
43 */
44
45 /*
46 * Driver for the Hifn 7751 encryption processor.
47 */
48
49 #include <sys/cdefs.h>
50 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.16 2003/08/28 01:53:06 thorpej Exp $");
51
52 #include "rnd.h"
53 #include "opencrypto.h"
54
55 #if NRND == 0 || NOPENCRYPTO == 0
56 #error hifn7751 requires rnd and opencrypto pseudo-devices
57 #endif
58
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/proc.h>
63 #include <sys/errno.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/mbuf.h>
67 #include <sys/device.h>
68
69 #include <uvm/uvm_extern.h>
70
71
72 #ifdef __OpenBSD__
73 #include <crypto/crypto.h>
74 #include <dev/rndvar.h>
75 #else
76 #include <opencrypto/cryptodev.h>
77 #include <sys/rnd.h>
78 #endif
79
80 #include <dev/pci/pcireg.h>
81 #include <dev/pci/pcivar.h>
82 #include <dev/pci/pcidevs.h>
83
84 #include <dev/pci/hifn7751reg.h>
85 #include <dev/pci/hifn7751var.h>
86
87 #undef HIFN_DEBUG
88
89 #ifdef __NetBSD__
90 #define HIFN_NO_RNG /* until statistically tested */
91 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */
92 #endif
93
94 #ifdef HIFN_DEBUG
95 extern int hifn_debug; /* patchable */
96 int hifn_debug = 1;
97 #endif
98
99 #ifdef __OpenBSD__
100 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */
101 #endif
102
103 /*
104 * Prototypes and count for the pci_device structure
105 */
106 #ifdef __OpenBSD__
107 int hifn_probe((struct device *, void *, void *);
108 #else
109 int hifn_probe(struct device *, struct cfdata *, void *);
110 #endif
111 void hifn_attach(struct device *, struct device *, void *);
112
113 CFATTACH_DECL(hifn, sizeof(struct hifn_softc),
114 hifn_probe, hifn_attach, NULL, NULL);
115
116 #ifdef __OpenBSD__
117 struct cfdriver hifn_cd = {
118 0, "hifn", DV_DULL
119 };
120 #endif
121
122 void hifn_reset_board(struct hifn_softc *, int);
123 void hifn_reset_puc(struct hifn_softc *);
124 void hifn_puc_wait(struct hifn_softc *);
125 const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
126 void hifn_set_retry(struct hifn_softc *);
127 void hifn_init_dma(struct hifn_softc *);
128 void hifn_init_pci_registers(struct hifn_softc *);
129 int hifn_sramsize(struct hifn_softc *);
130 int hifn_dramsize(struct hifn_softc *);
131 int hifn_ramtype(struct hifn_softc *);
132 void hifn_sessions(struct hifn_softc *);
133 int hifn_intr(void *);
134 u_int hifn_write_command(struct hifn_command *, u_int8_t *);
135 u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
136 int hifn_newsession(void*, u_int32_t *, struct cryptoini *);
137 int hifn_freesession(void*, u_int64_t);
138 int hifn_process(void*, struct cryptop *, int);
139 void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
140 int hifn_crypto(struct hifn_softc *, struct hifn_command *,
141 struct cryptop*, int);
142 int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
143 int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
144 int hifn_dmamap_aligned(bus_dmamap_t);
145 int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
146 int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
147 int hifn_init_pubrng(struct hifn_softc *);
148 #ifndef HIFN_NO_RNG
149 static void hifn_rng(void *);
150 #endif
151 void hifn_tick(void *);
152 void hifn_abort(struct hifn_softc *);
153 void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
154 void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
155 u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
156 #ifdef HAVE_CRYPTO_LZS
157 int hifn_compression(struct hifn_softc *, struct cryptop *,
158 struct hifn_command *);
159 struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
160 int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
161 void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
162 u_int8_t *);
163 #endif /* HAVE_CRYPTO_LZS */
164
165
166 #ifdef notyet
167 int hifn_compression(struct hifn_softc *, struct cryptop *,
168 struct hifn_command *);
169 struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
170 int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
171 void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
172 u_int8_t *);
173 #endif
174
175 struct hifn_stats hifnstats;
176
177 static const struct hifn_product {
178 pci_vendor_id_t hifn_vendor;
179 pci_product_id_t hifn_product;
180 int hifn_flags;
181 const char *hifn_name;
182 } hifn_products[] = {
183 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
184 0,
185 "Invertex AEON",
186 },
187
188 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
189 0,
190 "Hi/Fn 7751",
191 },
192 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
193 0,
194 "Hi/Fn 7751 (NetSec)"
195 },
196
197 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
198 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
199 "Hi/Fn 7811",
200 },
201
202 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
203 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
204 "Hi/Fn 7951",
205 },
206
207
208 { 0, 0,
209 0,
210 NULL
211 }
212 };
213
214 static const struct hifn_product *
215 hifn_lookup(const struct pci_attach_args *pa)
216 {
217 const struct hifn_product *hp;
218
219 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
220 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
221 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
222 return (hp);
223 }
224 return (NULL);
225 }
226
227 int
228 hifn_probe(parent, match, aux)
229 struct device *parent;
230 #ifdef __OpenBSD__
231 void *match;
232 #else
233 struct cfdata *match;
234 #endif
235 void *aux;
236 {
237 struct pci_attach_args *pa = (struct pci_attach_args *) aux;
238
239 if (hifn_lookup(pa) != NULL)
240 return (1);
241
242 return (0);
243 }
244
245 void
246 hifn_attach(parent, self, aux)
247 struct device *parent, *self;
248 void *aux;
249 {
250 struct hifn_softc *sc = (struct hifn_softc *)self;
251 struct pci_attach_args *pa = aux;
252 const struct hifn_product *hp;
253 pci_chipset_tag_t pc = pa->pa_pc;
254 pci_intr_handle_t ih;
255 const char *intrstr = NULL;
256 const char *hifncap;
257 char rbase;
258 bus_size_t iosize0, iosize1;
259 u_int32_t cmd;
260 u_int16_t ena;
261 bus_dma_segment_t seg;
262 bus_dmamap_t dmamap;
263 int rseg;
264 caddr_t kva;
265
266 hp = hifn_lookup(pa);
267 if (hp == NULL) {
268 printf("\n");
269 panic("hifn_attach: impossible");
270 }
271
272 aprint_naive(": Crypto processor\n");
273 aprint_normal(": %s, rev. %d\n", hp->hifn_name,
274 PCI_REVISION(pa->pa_class));
275
276 sc->sc_pci_pc = pa->pa_pc;
277 sc->sc_pci_tag = pa->pa_tag;
278
279 sc->sc_flags = hp->hifn_flags;
280
281 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
282 cmd |= PCI_COMMAND_MASTER_ENABLE;
283 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
284 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
285
286 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
287 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
288 aprint_error("%s: can't map mem space %d\n",
289 sc->sc_dv.dv_xname, 0);
290 return;
291 }
292
293 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
294 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
295 aprint_error("%s: can't find mem space %d\n",
296 sc->sc_dv.dv_xname, 1);
297 goto fail_io0;
298 }
299
300 hifn_set_retry(sc);
301
302 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
303 sc->sc_waw_lastgroup = -1;
304 sc->sc_waw_lastreg = 1;
305 }
306
307 sc->sc_dmat = pa->pa_dmat;
308 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
309 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
310 aprint_error("%s: can't alloc DMA buffer\n",
311 sc->sc_dv.dv_xname);
312 goto fail_io1;
313 }
314 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
315 BUS_DMA_NOWAIT)) {
316 aprint_error("%s: can't map DMA buffers (%lu bytes)\n",
317 sc->sc_dv.dv_xname, (u_long)sizeof(*sc->sc_dma));
318 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
319 goto fail_io1;
320 }
321 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
322 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
323 aprint_error("%s: can't create DMA map\n",
324 sc->sc_dv.dv_xname);
325 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
326 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
327 goto fail_io1;
328 }
329 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
330 NULL, BUS_DMA_NOWAIT)) {
331 aprint_error("%s: can't load DMA map\n",
332 sc->sc_dv.dv_xname);
333 bus_dmamap_destroy(sc->sc_dmat, dmamap);
334 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
335 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
336 goto fail_io1;
337 }
338 sc->sc_dmamap = dmamap;
339 sc->sc_dma = (struct hifn_dma *)kva;
340 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
341
342 hifn_reset_board(sc, 0);
343
344 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
345 aprint_error("%s: crypto enabling failed\n",
346 sc->sc_dv.dv_xname);
347 goto fail_mem;
348 }
349 hifn_reset_puc(sc);
350
351 hifn_init_dma(sc);
352 hifn_init_pci_registers(sc);
353
354 if (hifn_ramtype(sc))
355 goto fail_mem;
356
357 if (sc->sc_drammodel == 0)
358 hifn_sramsize(sc);
359 else
360 hifn_dramsize(sc);
361
362 /*
363 * Workaround for NetSec 7751 rev A: half ram size because two
364 * of the address lines were left floating
365 */
366 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
367 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
368 PCI_REVISION(pa->pa_class) == 0x61)
369 sc->sc_ramsize >>= 1;
370
371 if (pci_intr_map(pa, &ih)) {
372 aprint_error("%s: couldn't map interrupt\n",
373 sc->sc_dv.dv_xname);
374 goto fail_mem;
375 }
376 intrstr = pci_intr_string(pc, ih);
377 #ifdef __OpenBSD__
378 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
379 self->dv_xname);
380 #else
381 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
382 #endif
383 if (sc->sc_ih == NULL) {
384 aprint_error("%s: couldn't establish interrupt\n",
385 sc->sc_dv.dv_xname);
386 if (intrstr != NULL)
387 aprint_normal(" at %s", intrstr);
388 aprint_normal("\n");
389 goto fail_mem;
390 }
391
392 hifn_sessions(sc);
393
394 rseg = sc->sc_ramsize / 1024;
395 rbase = 'K';
396 if (sc->sc_ramsize >= (1024 * 1024)) {
397 rbase = 'M';
398 rseg /= 1024;
399 }
400 aprint_normal("%s: %s, %d%cB %cram, interrupting at %s\n",
401 sc->sc_dv.dv_xname, hifncap, rseg, rbase,
402 sc->sc_drammodel ? 'd' : 's', intrstr);
403
404 sc->sc_cid = crypto_get_driverid(0);
405 if (sc->sc_cid < 0) {
406 aprint_error("%s: couldn't get crypto driver id\n",
407 sc->sc_dv.dv_xname);
408 goto fail_intr;
409 }
410
411 WRITE_REG_0(sc, HIFN_0_PUCNFG,
412 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
413 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
414
415 switch (ena) {
416 case HIFN_PUSTAT_ENA_2:
417 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
418 hifn_newsession, hifn_freesession, hifn_process, sc);
419 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
420 hifn_newsession, hifn_freesession, hifn_process, sc);
421 /*FALLTHROUGH*/
422 case HIFN_PUSTAT_ENA_1:
423 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
424 hifn_newsession, hifn_freesession, hifn_process, sc);
425 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
426 hifn_newsession, hifn_freesession, hifn_process, sc);
427 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0,
428 hifn_newsession, hifn_freesession, hifn_process, sc);
429 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0,
430 hifn_newsession, hifn_freesession, hifn_process, sc);
431 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
432 hifn_newsession, hifn_freesession, hifn_process, sc);
433 break;
434 }
435
436 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
437 sc->sc_dmamap->dm_mapsize,
438 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
439
440 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
441 hifn_init_pubrng(sc);
442
443 #ifdef __OpenBSD__
444 timeout_set(&sc->sc_tickto, hifn_tick, sc);
445 timeout_add(&sc->sc_tickto, hz);
446 #else
447 callout_init(&sc->sc_tickto);
448 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
449 #endif
450 return;
451
452 fail_intr:
453 pci_intr_disestablish(pc, sc->sc_ih);
454 fail_mem:
455 bus_dmamap_unload(sc->sc_dmat, dmamap);
456 bus_dmamap_destroy(sc->sc_dmat, dmamap);
457 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
458 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
459
460 /* Turn off DMA polling */
461 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
462 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
463
464 fail_io1:
465 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
466 fail_io0:
467 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
468 }
469
470 int
471 hifn_init_pubrng(sc)
472 struct hifn_softc *sc;
473 {
474 u_int32_t r;
475 int i;
476
477 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
478 /* Reset 7951 public key/rng engine */
479 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
480 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
481
482 for (i = 0; i < 100; i++) {
483 DELAY(1000);
484 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
485 HIFN_PUBRST_RESET) == 0)
486 break;
487 }
488
489 if (i == 100) {
490 printf("%s: public key init failed\n",
491 sc->sc_dv.dv_xname);
492 return (1);
493 }
494 }
495
496 /* Enable the rng, if available */
497 if (sc->sc_flags & HIFN_HAS_RNG) {
498 if (sc->sc_flags & HIFN_IS_7811) {
499 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
500 if (r & HIFN_7811_RNGENA_ENA) {
501 r &= ~HIFN_7811_RNGENA_ENA;
502 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
503 }
504 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
505 HIFN_7811_RNGCFG_DEFL);
506 r |= HIFN_7811_RNGENA_ENA;
507 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
508 } else
509 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
510 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
511 HIFN_RNGCFG_ENA);
512
513 sc->sc_rngfirst = 1;
514 if (hz >= 100)
515 sc->sc_rnghz = hz / 100;
516 else
517 sc->sc_rnghz = 1;
518 #ifndef HIFN_NO_RNG
519 #ifdef __OpenBSD__
520 timeout_set(&sc->sc_rngto, hifn_rng, sc);
521 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
522 #else /* !__OpenBSD__ */
523 callout_init(&sc->sc_rngto);
524 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
525 #endif /* !__OpenBSD__ */
526 #endif /* HIFN_NO_RNG */
527 }
528
529 /* Enable public key engine, if available */
530 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
531 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
532 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
533 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
534 }
535
536 return (0);
537 }
538
539 #ifndef HIFN_NO_RNG
540 static void
541 hifn_rng(vsc)
542 void *vsc;
543 {
544 #ifndef __NetBSD__
545 struct hifn_softc *sc = vsc;
546 u_int32_t num1, sts, num2;
547 int i;
548
549 if (sc->sc_flags & HIFN_IS_7811) {
550 for (i = 0; i < 5; i++) {
551 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
552 if (sts & HIFN_7811_RNGSTS_UFL) {
553 printf("%s: RNG underflow: disabling\n",
554 sc->sc_dv.dv_xname);
555 return;
556 }
557 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
558 break;
559
560 /*
561 * There are at least two words in the RNG FIFO
562 * at this point.
563 */
564 num1 = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
565 num2 = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
566 if (sc->sc_rngfirst)
567 sc->sc_rngfirst = 0;
568 else {
569 add_true_randomness(num1);
570 add_true_randomness(num2);
571 }
572 }
573 } else {
574 num1 = READ_REG_1(sc, HIFN_1_RNG_DATA);
575
576 if (sc->sc_rngfirst)
577 sc->sc_rngfirst = 0;
578 else
579 add_true_randomness(num1);
580 }
581
582 #ifdef __OpenBSD__
583 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
584 #else
585 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
586 #endif
587 #endif /*!__NetBSD__*/
588 }
589 #endif
590
591 void
592 hifn_puc_wait(sc)
593 struct hifn_softc *sc;
594 {
595 int i;
596
597 for (i = 5000; i > 0; i--) {
598 DELAY(1);
599 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
600 break;
601 }
602 if (!i)
603 printf("%s: proc unit did not reset\n", sc->sc_dv.dv_xname);
604 }
605
606 /*
607 * Reset the processing unit.
608 */
609 void
610 hifn_reset_puc(sc)
611 struct hifn_softc *sc;
612 {
613 /* Reset processing unit */
614 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
615 hifn_puc_wait(sc);
616 }
617
618 void
619 hifn_set_retry(sc)
620 struct hifn_softc *sc;
621 {
622 u_int32_t r;
623
624 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
625 r &= 0xffff0000;
626 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
627 }
628
629 /*
630 * Resets the board. Values in the regesters are left as is
631 * from the reset (i.e. initial values are assigned elsewhere).
632 */
633 void
634 hifn_reset_board(struct hifn_softc *sc, int full)
635 {
636 u_int32_t reg;
637
638 /*
639 * Set polling in the DMA configuration register to zero. 0x7 avoids
640 * resetting the board and zeros out the other fields.
641 */
642 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
643 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
644
645 /*
646 * Now that polling has been disabled, we have to wait 1 ms
647 * before resetting the board.
648 */
649 DELAY(1000);
650
651 /* Reset the DMA unit */
652 if (full) {
653 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
654 DELAY(1000);
655 } else {
656 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
657 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
658 hifn_reset_puc(sc);
659 }
660
661 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
662
663 /* Bring dma unit out of reset */
664 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
665 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
666
667 hifn_puc_wait(sc);
668
669 hifn_set_retry(sc);
670
671 if (sc->sc_flags & HIFN_IS_7811) {
672 for (reg = 0; reg < 1000; reg++) {
673 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
674 HIFN_MIPSRST_CRAMINIT)
675 break;
676 DELAY(1000);
677 }
678 if (reg == 1000)
679 printf(": cram init timeout\n");
680 }
681 }
682
683 u_int32_t
684 hifn_next_signature(a, cnt)
685 u_int32_t a;
686 u_int cnt;
687 {
688 int i;
689 u_int32_t v;
690
691 for (i = 0; i < cnt; i++) {
692
693 /* get the parity */
694 v = a & 0x80080125;
695 v ^= v >> 16;
696 v ^= v >> 8;
697 v ^= v >> 4;
698 v ^= v >> 2;
699 v ^= v >> 1;
700
701 a = (v & 1) ^ (a << 1);
702 }
703
704 return a;
705 }
706
707 struct pci2id {
708 u_short pci_vendor;
709 u_short pci_prod;
710 char card_id[13];
711 } pci2id[] = {
712 {
713 PCI_VENDOR_HIFN,
714 PCI_PRODUCT_HIFN_7951,
715 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00, 0x00 }
717 }, {
718 PCI_VENDOR_NETSEC,
719 PCI_PRODUCT_NETSEC_7751,
720 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00, 0x00 }
722 }, {
723 PCI_VENDOR_INVERTEX,
724 PCI_PRODUCT_INVERTEX_AEON,
725 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00, 0x00 }
727 }, {
728 PCI_VENDOR_HIFN,
729 PCI_PRODUCT_HIFN_7811,
730 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
731 0x00, 0x00, 0x00, 0x00, 0x00 }
732 }, {
733 /*
734 * Other vendors share this PCI ID as well, such as
735 * http://www.powercrypt.com, and obviously they also
736 * use the same key.
737 */
738 PCI_VENDOR_HIFN,
739 PCI_PRODUCT_HIFN_7751,
740 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
741 0x00, 0x00, 0x00, 0x00, 0x00 }
742 },
743 };
744
745 /*
746 * Checks to see if crypto is already enabled. If crypto isn't enable,
747 * "hifn_enable_crypto" is called to enable it. The check is important,
748 * as enabling crypto twice will lock the board.
749 */
750 const char *
751 hifn_enable_crypto(sc, pciid)
752 struct hifn_softc *sc;
753 pcireg_t pciid;
754 {
755 u_int32_t dmacfg, ramcfg, encl, addr, i;
756 char *offtbl = NULL;
757
758 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
759 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
760 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
761 offtbl = pci2id[i].card_id;
762 break;
763 }
764 }
765
766 if (offtbl == NULL) {
767 #ifdef HIFN_DEBUG
768 aprint_debug("%s: Unknown card!\n", sc->sc_dv.dv_xname);
769 #endif
770 return (NULL);
771 }
772
773 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
774 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
775
776 /*
777 * The RAM config register's encrypt level bit needs to be set before
778 * every read performed on the encryption level register.
779 */
780 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
781
782 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
783
784 /*
785 * Make sure we don't re-unlock. Two unlocks kills chip until the
786 * next reboot.
787 */
788 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
789 #ifdef HIFN_DEBUG
790 aprint_debug("%s: Strong Crypto already enabled!\n",
791 sc->sc_dv.dv_xname);
792 #endif
793 goto report;
794 }
795
796 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
797 #ifdef HIFN_DEBUG
798 aprint_debug("%s: Unknown encryption level\n",
799 sc->sc_dv.dv_xname);
800 #endif
801 return (NULL);
802 }
803
804 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
805 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
806 DELAY(1000);
807 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
808 DELAY(1000);
809 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
810 DELAY(1000);
811
812 for (i = 0; i <= 12; i++) {
813 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
814 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
815
816 DELAY(1000);
817 }
818
819 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
820 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
821
822 #ifdef HIFN_DEBUG
823 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
824 aprint_debug("Encryption engine is permanently locked until next system reset.");
825 else
826 aprint_debug("Encryption engine enabled successfully!");
827 #endif
828
829 report:
830 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
831 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
832
833 switch (encl) {
834 case HIFN_PUSTAT_ENA_0:
835 return ("LZS-only (no encr/auth)");
836
837 case HIFN_PUSTAT_ENA_1:
838 return ("DES");
839
840 case HIFN_PUSTAT_ENA_2:
841 return ("3DES");
842
843 default:
844 return ("disabled");
845 }
846 /* NOTREACHED */
847 }
848
849 /*
850 * Give initial values to the registers listed in the "Register Space"
851 * section of the HIFN Software Development reference manual.
852 */
853 void
854 hifn_init_pci_registers(sc)
855 struct hifn_softc *sc;
856 {
857 /* write fixed values needed by the Initialization registers */
858 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
859 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
860 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
861
862 /* write all 4 ring address registers */
863 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
864 offsetof(struct hifn_dma, cmdr[0]));
865 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
866 offsetof(struct hifn_dma, srcr[0]));
867 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
868 offsetof(struct hifn_dma, dstr[0]));
869 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
870 offsetof(struct hifn_dma, resr[0]));
871
872 DELAY(2000);
873
874 /* write status register */
875 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
876 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
877 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
878 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
879 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
880 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
881 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
882 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
883 HIFN_DMACSR_S_WAIT |
884 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
885 HIFN_DMACSR_C_WAIT |
886 HIFN_DMACSR_ENGINE |
887 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
888 HIFN_DMACSR_PUBDONE : 0) |
889 ((sc->sc_flags & HIFN_IS_7811) ?
890 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
891
892 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
893 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
894 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
895 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
896 HIFN_DMAIER_ENGINE |
897 ((sc->sc_flags & HIFN_IS_7811) ?
898 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
899 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
900 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
901 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
902
903 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
904 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
905 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
906 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
907
908 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
909 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
910 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
911 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
912 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
913 }
914
915 /*
916 * The maximum number of sessions supported by the card
917 * is dependent on the amount of context ram, which
918 * encryption algorithms are enabled, and how compression
919 * is configured. This should be configured before this
920 * routine is called.
921 */
922 void
923 hifn_sessions(sc)
924 struct hifn_softc *sc;
925 {
926 u_int32_t pucnfg;
927 int ctxsize;
928
929 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
930
931 if (pucnfg & HIFN_PUCNFG_COMPSING) {
932 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
933 ctxsize = 128;
934 else
935 ctxsize = 512;
936 sc->sc_maxses = 1 +
937 ((sc->sc_ramsize - 32768) / ctxsize);
938 }
939 else
940 sc->sc_maxses = sc->sc_ramsize / 16384;
941
942 if (sc->sc_maxses > 2048)
943 sc->sc_maxses = 2048;
944 }
945
946 /*
947 * Determine ram type (sram or dram). Board should be just out of a reset
948 * state when this is called.
949 */
950 int
951 hifn_ramtype(sc)
952 struct hifn_softc *sc;
953 {
954 u_int8_t data[8], dataexpect[8];
955 int i;
956
957 for (i = 0; i < sizeof(data); i++)
958 data[i] = dataexpect[i] = 0x55;
959 if (hifn_writeramaddr(sc, 0, data))
960 return (-1);
961 if (hifn_readramaddr(sc, 0, data))
962 return (-1);
963 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
964 sc->sc_drammodel = 1;
965 return (0);
966 }
967
968 for (i = 0; i < sizeof(data); i++)
969 data[i] = dataexpect[i] = 0xaa;
970 if (hifn_writeramaddr(sc, 0, data))
971 return (-1);
972 if (hifn_readramaddr(sc, 0, data))
973 return (-1);
974 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
975 sc->sc_drammodel = 1;
976 return (0);
977 }
978
979 return (0);
980 }
981
982 #define HIFN_SRAM_MAX (32 << 20)
983 #define HIFN_SRAM_STEP_SIZE 16384
984 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
985
986 int
987 hifn_sramsize(sc)
988 struct hifn_softc *sc;
989 {
990 u_int32_t a;
991 u_int8_t data[8];
992 u_int8_t dataexpect[sizeof(data)];
993 int32_t i;
994
995 for (i = 0; i < sizeof(data); i++)
996 data[i] = dataexpect[i] = i ^ 0x5a;
997
998 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
999 a = i * HIFN_SRAM_STEP_SIZE;
1000 bcopy(&i, data, sizeof(i));
1001 hifn_writeramaddr(sc, a, data);
1002 }
1003
1004 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1005 a = i * HIFN_SRAM_STEP_SIZE;
1006 bcopy(&i, dataexpect, sizeof(i));
1007 if (hifn_readramaddr(sc, a, data) < 0)
1008 return (0);
1009 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1010 return (0);
1011 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1012 }
1013
1014 return (0);
1015 }
1016
1017 /*
1018 * XXX For dram boards, one should really try all of the
1019 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1020 * is already set up correctly.
1021 */
1022 int
1023 hifn_dramsize(sc)
1024 struct hifn_softc *sc;
1025 {
1026 u_int32_t cnfg;
1027
1028 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1029 HIFN_PUCNFG_DRAMMASK;
1030 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1031 return (0);
1032 }
1033
1034 void
1035 hifn_alloc_slot(sc, cmdp, srcp, dstp, resp)
1036 struct hifn_softc *sc;
1037 int *cmdp, *srcp, *dstp, *resp;
1038 {
1039 struct hifn_dma *dma = sc->sc_dma;
1040
1041 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1042 dma->cmdi = 0;
1043 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1044 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1045 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1046 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1047 }
1048 *cmdp = dma->cmdi++;
1049 dma->cmdk = dma->cmdi;
1050
1051 if (dma->srci == HIFN_D_SRC_RSIZE) {
1052 dma->srci = 0;
1053 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1054 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1055 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1056 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1057 }
1058 *srcp = dma->srci++;
1059 dma->srck = dma->srci;
1060
1061 if (dma->dsti == HIFN_D_DST_RSIZE) {
1062 dma->dsti = 0;
1063 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1064 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1065 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1066 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1067 }
1068 *dstp = dma->dsti++;
1069 dma->dstk = dma->dsti;
1070
1071 if (dma->resi == HIFN_D_RES_RSIZE) {
1072 dma->resi = 0;
1073 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1074 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1075 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1076 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1077 }
1078 *resp = dma->resi++;
1079 dma->resk = dma->resi;
1080 }
1081
1082 int
1083 hifn_writeramaddr(sc, addr, data)
1084 struct hifn_softc *sc;
1085 int addr;
1086 u_int8_t *data;
1087 {
1088 struct hifn_dma *dma = sc->sc_dma;
1089 struct hifn_base_command wc;
1090 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1091 int r, cmdi, resi, srci, dsti;
1092
1093 wc.masks = htole16(3 << 13);
1094 wc.session_num = htole16(addr >> 14);
1095 wc.total_source_count = htole16(8);
1096 wc.total_dest_count = htole16(addr & 0x3fff);
1097
1098 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1099
1100 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1101 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1102 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1103
1104 /* build write command */
1105 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1106 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1107 bcopy(data, &dma->test_src, sizeof(dma->test_src));
1108
1109 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1110 + offsetof(struct hifn_dma, test_src));
1111 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1112 + offsetof(struct hifn_dma, test_dst));
1113
1114 dma->cmdr[cmdi].l = htole32(16 | masks);
1115 dma->srcr[srci].l = htole32(8 | masks);
1116 dma->dstr[dsti].l = htole32(4 | masks);
1117 dma->resr[resi].l = htole32(4 | masks);
1118
1119 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1120 0, sc->sc_dmamap->dm_mapsize,
1121 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1122
1123 for (r = 10000; r >= 0; r--) {
1124 DELAY(10);
1125 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1126 0, sc->sc_dmamap->dm_mapsize,
1127 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1128 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1129 break;
1130 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1131 0, sc->sc_dmamap->dm_mapsize,
1132 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1133 }
1134 if (r == 0) {
1135 printf("%s: writeramaddr -- "
1136 "result[%d](addr %d) still valid\n",
1137 sc->sc_dv.dv_xname, resi, addr);
1138 r = -1;
1139 return (-1);
1140 } else
1141 r = 0;
1142
1143 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1144 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1145 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1146
1147 return (r);
1148 }
1149
1150 int
1151 hifn_readramaddr(sc, addr, data)
1152 struct hifn_softc *sc;
1153 int addr;
1154 u_int8_t *data;
1155 {
1156 struct hifn_dma *dma = sc->sc_dma;
1157 struct hifn_base_command rc;
1158 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1159 int r, cmdi, srci, dsti, resi;
1160
1161 rc.masks = htole16(2 << 13);
1162 rc.session_num = htole16(addr >> 14);
1163 rc.total_source_count = htole16(addr & 0x3fff);
1164 rc.total_dest_count = htole16(8);
1165
1166 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1167
1168 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1169 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1170 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1171
1172 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1173 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1174
1175 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1176 offsetof(struct hifn_dma, test_src));
1177 dma->test_src = 0;
1178 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1179 offsetof(struct hifn_dma, test_dst));
1180 dma->test_dst = 0;
1181 dma->cmdr[cmdi].l = htole32(8 | masks);
1182 dma->srcr[srci].l = htole32(8 | masks);
1183 dma->dstr[dsti].l = htole32(8 | masks);
1184 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1185
1186 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1187 0, sc->sc_dmamap->dm_mapsize,
1188 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1189
1190 for (r = 10000; r >= 0; r--) {
1191 DELAY(10);
1192 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1193 0, sc->sc_dmamap->dm_mapsize,
1194 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1195 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1196 break;
1197 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1198 0, sc->sc_dmamap->dm_mapsize,
1199 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1200 }
1201 if (r == 0) {
1202 printf("%s: readramaddr -- "
1203 "result[%d](addr %d) still valid\n",
1204 sc->sc_dv.dv_xname, resi, addr);
1205 r = -1;
1206 } else {
1207 r = 0;
1208 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1209 }
1210
1211 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1212 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1213 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1214
1215 return (r);
1216 }
1217
1218 /*
1219 * Initialize the descriptor rings.
1220 */
1221 void
1222 hifn_init_dma(sc)
1223 struct hifn_softc *sc;
1224 {
1225 struct hifn_dma *dma = sc->sc_dma;
1226 int i;
1227
1228 hifn_set_retry(sc);
1229
1230 /* initialize static pointer values */
1231 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1232 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1233 offsetof(struct hifn_dma, command_bufs[i][0]));
1234 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1235 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1236 offsetof(struct hifn_dma, result_bufs[i][0]));
1237
1238 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1239 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1240 offsetof(struct hifn_dma, cmdr[0]));
1241 dma->srcr[HIFN_D_SRC_RSIZE].p =
1242 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1243 offsetof(struct hifn_dma, srcr[0]));
1244 dma->dstr[HIFN_D_DST_RSIZE].p =
1245 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1246 offsetof(struct hifn_dma, dstr[0]));
1247 dma->resr[HIFN_D_RES_RSIZE].p =
1248 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1249 offsetof(struct hifn_dma, resr[0]));
1250
1251 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1252 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1253 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1254 }
1255
1256 /*
1257 * Writes out the raw command buffer space. Returns the
1258 * command buffer size.
1259 */
1260 u_int
1261 hifn_write_command(cmd, buf)
1262 struct hifn_command *cmd;
1263 u_int8_t *buf;
1264 {
1265 u_int8_t *buf_pos;
1266 struct hifn_base_command *base_cmd;
1267 struct hifn_mac_command *mac_cmd;
1268 struct hifn_crypt_command *cry_cmd;
1269 struct hifn_comp_command *comp_cmd;
1270 int using_mac, using_crypt, using_comp, len;
1271 u_int32_t dlen, slen;
1272
1273 buf_pos = buf;
1274 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1275 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1276 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1277
1278 base_cmd = (struct hifn_base_command *)buf_pos;
1279 base_cmd->masks = htole16(cmd->base_masks);
1280 slen = cmd->src_map->dm_mapsize;
1281 if (cmd->sloplen)
1282 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1283 sizeof(u_int32_t);
1284 else
1285 dlen = cmd->dst_map->dm_mapsize;
1286 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1287 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1288 dlen >>= 16;
1289 slen >>= 16;
1290 base_cmd->session_num = htole16(cmd->session_num |
1291 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1292 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1293 buf_pos += sizeof(struct hifn_base_command);
1294
1295 if (using_comp) {
1296 comp_cmd = (struct hifn_comp_command *)buf_pos;
1297 dlen = cmd->compcrd->crd_len;
1298 comp_cmd->source_count = htole16(dlen & 0xffff);
1299 dlen >>= 16;
1300 comp_cmd->masks = htole16(cmd->comp_masks |
1301 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1302 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1303 comp_cmd->reserved = 0;
1304 buf_pos += sizeof(struct hifn_comp_command);
1305 }
1306
1307 if (using_mac) {
1308 mac_cmd = (struct hifn_mac_command *)buf_pos;
1309 dlen = cmd->maccrd->crd_len;
1310 mac_cmd->source_count = htole16(dlen & 0xffff);
1311 dlen >>= 16;
1312 mac_cmd->masks = htole16(cmd->mac_masks |
1313 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1314 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1315 mac_cmd->reserved = 0;
1316 buf_pos += sizeof(struct hifn_mac_command);
1317 }
1318
1319 if (using_crypt) {
1320 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1321 dlen = cmd->enccrd->crd_len;
1322 cry_cmd->source_count = htole16(dlen & 0xffff);
1323 dlen >>= 16;
1324 cry_cmd->masks = htole16(cmd->cry_masks |
1325 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1326 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1327 cry_cmd->reserved = 0;
1328 buf_pos += sizeof(struct hifn_crypt_command);
1329 }
1330
1331 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1332 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1333 buf_pos += HIFN_MAC_KEY_LENGTH;
1334 }
1335
1336 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1337 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1338 case HIFN_CRYPT_CMD_ALG_3DES:
1339 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1340 buf_pos += HIFN_3DES_KEY_LENGTH;
1341 break;
1342 case HIFN_CRYPT_CMD_ALG_DES:
1343 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1344 buf_pos += cmd->cklen;
1345 break;
1346 case HIFN_CRYPT_CMD_ALG_RC4:
1347 len = 256;
1348 do {
1349 int clen;
1350
1351 clen = MIN(cmd->cklen, len);
1352 bcopy(cmd->ck, buf_pos, clen);
1353 len -= clen;
1354 buf_pos += clen;
1355 } while (len > 0);
1356 bzero(buf_pos, 4);
1357 buf_pos += 4;
1358 break;
1359 }
1360 }
1361
1362 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1363 bcopy(cmd->iv, buf_pos, HIFN_IV_LENGTH);
1364 buf_pos += HIFN_IV_LENGTH;
1365 }
1366
1367 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1368 HIFN_BASE_CMD_COMP)) == 0) {
1369 bzero(buf_pos, 8);
1370 buf_pos += 8;
1371 }
1372
1373 return (buf_pos - buf);
1374 }
1375
1376 int
1377 hifn_dmamap_aligned(map)
1378 bus_dmamap_t map;
1379 {
1380 int i;
1381
1382 for (i = 0; i < map->dm_nsegs; i++) {
1383 if (map->dm_segs[i].ds_addr & 3)
1384 return (0);
1385 if ((i != (map->dm_nsegs - 1)) &&
1386 (map->dm_segs[i].ds_len & 3))
1387 return (0);
1388 }
1389 return (1);
1390 }
1391
1392 int
1393 hifn_dmamap_load_dst(sc, cmd)
1394 struct hifn_softc *sc;
1395 struct hifn_command *cmd;
1396 {
1397 struct hifn_dma *dma = sc->sc_dma;
1398 bus_dmamap_t map = cmd->dst_map;
1399 u_int32_t p, l;
1400 int idx, used = 0, i;
1401
1402 idx = dma->dsti;
1403 for (i = 0; i < map->dm_nsegs - 1; i++) {
1404 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1405 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1406 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1407 HIFN_DSTR_SYNC(sc, idx,
1408 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1409 used++;
1410
1411 if (++idx == HIFN_D_DST_RSIZE) {
1412 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1413 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1414 HIFN_DSTR_SYNC(sc, idx,
1415 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1416 idx = 0;
1417 }
1418 }
1419
1420 if (cmd->sloplen == 0) {
1421 p = map->dm_segs[i].ds_addr;
1422 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1423 map->dm_segs[i].ds_len;
1424 } else {
1425 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1426 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1427 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1428 sizeof(u_int32_t);
1429
1430 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1431 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1432 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1433 HIFN_D_MASKDONEIRQ |
1434 (map->dm_segs[i].ds_len - cmd->sloplen));
1435 HIFN_DSTR_SYNC(sc, idx,
1436 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1437 used++;
1438
1439 if (++idx == HIFN_D_DST_RSIZE) {
1440 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1441 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1442 HIFN_DSTR_SYNC(sc, idx,
1443 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1444 idx = 0;
1445 }
1446 }
1447 }
1448 dma->dstr[idx].p = htole32(p);
1449 dma->dstr[idx].l = htole32(l);
1450 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1451 used++;
1452
1453 if (++idx == HIFN_D_DST_RSIZE) {
1454 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1455 HIFN_D_MASKDONEIRQ);
1456 HIFN_DSTR_SYNC(sc, idx,
1457 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1458 idx = 0;
1459 }
1460
1461 dma->dsti = idx;
1462 dma->dstu += used;
1463 return (idx);
1464 }
1465
1466 int
1467 hifn_dmamap_load_src(sc, cmd)
1468 struct hifn_softc *sc;
1469 struct hifn_command *cmd;
1470 {
1471 struct hifn_dma *dma = sc->sc_dma;
1472 bus_dmamap_t map = cmd->src_map;
1473 int idx, i;
1474 u_int32_t last = 0;
1475
1476 idx = dma->srci;
1477 for (i = 0; i < map->dm_nsegs; i++) {
1478 if (i == map->dm_nsegs - 1)
1479 last = HIFN_D_LAST;
1480
1481 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1482 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1483 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1484 HIFN_SRCR_SYNC(sc, idx,
1485 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1486
1487 if (++idx == HIFN_D_SRC_RSIZE) {
1488 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1489 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1490 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1491 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1492 idx = 0;
1493 }
1494 }
1495 dma->srci = idx;
1496 dma->srcu += map->dm_nsegs;
1497 return (idx);
1498 }
1499
1500 int
1501 hifn_crypto(
1502 struct hifn_softc *sc,
1503 struct hifn_command *cmd,
1504 struct cryptop *crp,
1505 int hint)
1506
1507 {
1508 struct hifn_dma *dma = sc->sc_dma;
1509 u_int32_t cmdlen;
1510 int cmdi, resi, s, err = 0;
1511
1512 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1513 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1514 return (ENOMEM);
1515
1516 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1517 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1518 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1519 err = ENOMEM;
1520 goto err_srcmap1;
1521 }
1522 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1523 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1524 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1525 err = ENOMEM;
1526 goto err_srcmap1;
1527 }
1528 } else {
1529 err = EINVAL;
1530 goto err_srcmap1;
1531 }
1532
1533 if (hifn_dmamap_aligned(cmd->src_map)) {
1534 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1535 if (crp->crp_flags & CRYPTO_F_IOV)
1536 cmd->dstu.dst_io = cmd->srcu.src_io;
1537 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1538 cmd->dstu.dst_m = cmd->srcu.src_m;
1539 cmd->dst_map = cmd->src_map;
1540 } else {
1541 if (crp->crp_flags & CRYPTO_F_IOV) {
1542 err = EINVAL;
1543 goto err_srcmap;
1544 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1545 int totlen, len;
1546 struct mbuf *m, *m0, *mlast;
1547
1548 totlen = cmd->src_map->dm_mapsize;
1549 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1550 len = MHLEN;
1551 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1552 } else {
1553 len = MLEN;
1554 MGET(m0, M_DONTWAIT, MT_DATA);
1555 }
1556 if (m0 == NULL) {
1557 err = ENOMEM;
1558 goto err_srcmap;
1559 }
1560 if (len == MHLEN)
1561 M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1562 if (totlen >= MINCLSIZE) {
1563 MCLGET(m0, M_DONTWAIT);
1564 if (m0->m_flags & M_EXT)
1565 len = MCLBYTES;
1566 }
1567 totlen -= len;
1568 m0->m_pkthdr.len = m0->m_len = len;
1569 mlast = m0;
1570
1571 while (totlen > 0) {
1572 MGET(m, M_DONTWAIT, MT_DATA);
1573 if (m == NULL) {
1574 err = ENOMEM;
1575 m_freem(m0);
1576 goto err_srcmap;
1577 }
1578 len = MLEN;
1579 if (totlen >= MINCLSIZE) {
1580 MCLGET(m, M_DONTWAIT);
1581 if (m->m_flags & M_EXT)
1582 len = MCLBYTES;
1583 }
1584
1585 m->m_len = len;
1586 if (m0->m_flags & M_PKTHDR)
1587 m0->m_pkthdr.len += len;
1588 totlen -= len;
1589
1590 mlast->m_next = m;
1591 mlast = m;
1592 }
1593 cmd->dstu.dst_m = m0;
1594 }
1595 }
1596
1597 if (cmd->dst_map == NULL) {
1598 if (bus_dmamap_create(sc->sc_dmat,
1599 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1600 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1601 err = ENOMEM;
1602 goto err_srcmap;
1603 }
1604 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1605 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1606 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1607 err = ENOMEM;
1608 goto err_dstmap1;
1609 }
1610 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1611 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1612 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1613 err = ENOMEM;
1614 goto err_dstmap1;
1615 }
1616 }
1617 }
1618
1619 #ifdef HIFN_DEBUG
1620 if (hifn_debug)
1621 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1622 sc->sc_dv.dv_xname,
1623 READ_REG_1(sc, HIFN_1_DMA_CSR),
1624 READ_REG_1(sc, HIFN_1_DMA_IER),
1625 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1626 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1627 #endif
1628
1629 if (cmd->src_map == cmd->dst_map)
1630 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1631 0, cmd->src_map->dm_mapsize,
1632 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1633 else {
1634 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1635 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1636 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1637 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1638 }
1639
1640 s = splnet();
1641
1642 /*
1643 * need 1 cmd, and 1 res
1644 * need N src, and N dst
1645 */
1646 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1647 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1648 splx(s);
1649 err = ENOMEM;
1650 goto err_dstmap;
1651 }
1652 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1653 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1654 splx(s);
1655 err = ENOMEM;
1656 goto err_dstmap;
1657 }
1658
1659 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1660 dma->cmdi = 0;
1661 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1662 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1663 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1664 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1665 }
1666 cmdi = dma->cmdi++;
1667 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1668 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1669
1670 /* .p for command/result already set */
1671 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1672 HIFN_D_MASKDONEIRQ);
1673 HIFN_CMDR_SYNC(sc, cmdi,
1674 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1675 dma->cmdu++;
1676 if (sc->sc_c_busy == 0) {
1677 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1678 sc->sc_c_busy = 1;
1679 SET_LED(sc, HIFN_MIPSRST_LED0);
1680 }
1681
1682 /*
1683 * We don't worry about missing an interrupt (which a "command wait"
1684 * interrupt salvages us from), unless there is more than one command
1685 * in the queue.
1686 */
1687 if (dma->cmdu > 1) {
1688 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1689 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1690 }
1691
1692 hifnstats.hst_ipackets++;
1693 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1694
1695 hifn_dmamap_load_src(sc, cmd);
1696 if (sc->sc_s_busy == 0) {
1697 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1698 sc->sc_s_busy = 1;
1699 SET_LED(sc, HIFN_MIPSRST_LED1);
1700 }
1701
1702 /*
1703 * Unlike other descriptors, we don't mask done interrupt from
1704 * result descriptor.
1705 */
1706 #ifdef HIFN_DEBUG
1707 if (hifn_debug)
1708 printf("load res\n");
1709 #endif
1710 if (dma->resi == HIFN_D_RES_RSIZE) {
1711 dma->resi = 0;
1712 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1713 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1714 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1715 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1716 }
1717 resi = dma->resi++;
1718 dma->hifn_commands[resi] = cmd;
1719 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1720 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1721 HIFN_D_VALID | HIFN_D_LAST);
1722 HIFN_RESR_SYNC(sc, resi,
1723 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1724 dma->resu++;
1725 if (sc->sc_r_busy == 0) {
1726 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1727 sc->sc_r_busy = 1;
1728 SET_LED(sc, HIFN_MIPSRST_LED2);
1729 }
1730
1731 if (cmd->sloplen)
1732 cmd->slopidx = resi;
1733
1734 hifn_dmamap_load_dst(sc, cmd);
1735
1736 if (sc->sc_d_busy == 0) {
1737 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1738 sc->sc_d_busy = 1;
1739 }
1740
1741 #ifdef HIFN_DEBUG
1742 if (hifn_debug)
1743 printf("%s: command: stat %8x ier %8x\n",
1744 sc->sc_dv.dv_xname,
1745 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1746 #endif
1747
1748 sc->sc_active = 5;
1749 splx(s);
1750 return (err); /* success */
1751
1752 err_dstmap:
1753 if (cmd->src_map != cmd->dst_map)
1754 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1755 err_dstmap1:
1756 if (cmd->src_map != cmd->dst_map)
1757 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1758 err_srcmap:
1759 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1760 cmd->srcu.src_m != cmd->dstu.dst_m)
1761 m_freem(cmd->dstu.dst_m);
1762 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1763 err_srcmap1:
1764 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1765 return (err);
1766 }
1767
1768 void
1769 hifn_tick(vsc)
1770 void *vsc;
1771 {
1772 struct hifn_softc *sc = vsc;
1773 int s;
1774
1775 s = splnet();
1776 if (sc->sc_active == 0) {
1777 struct hifn_dma *dma = sc->sc_dma;
1778 u_int32_t r = 0;
1779
1780 if (dma->cmdu == 0 && sc->sc_c_busy) {
1781 sc->sc_c_busy = 0;
1782 r |= HIFN_DMACSR_C_CTRL_DIS;
1783 CLR_LED(sc, HIFN_MIPSRST_LED0);
1784 }
1785 if (dma->srcu == 0 && sc->sc_s_busy) {
1786 sc->sc_s_busy = 0;
1787 r |= HIFN_DMACSR_S_CTRL_DIS;
1788 CLR_LED(sc, HIFN_MIPSRST_LED1);
1789 }
1790 if (dma->dstu == 0 && sc->sc_d_busy) {
1791 sc->sc_d_busy = 0;
1792 r |= HIFN_DMACSR_D_CTRL_DIS;
1793 }
1794 if (dma->resu == 0 && sc->sc_r_busy) {
1795 sc->sc_r_busy = 0;
1796 r |= HIFN_DMACSR_R_CTRL_DIS;
1797 CLR_LED(sc, HIFN_MIPSRST_LED2);
1798 }
1799 if (r)
1800 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1801 }
1802 else
1803 sc->sc_active--;
1804 splx(s);
1805 #ifdef __OpenBSD__
1806 timeout_add(&sc->sc_tickto, hz);
1807 #else
1808 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1809 #endif
1810 }
1811
1812 int
1813 hifn_intr(void *arg)
1814 {
1815 struct hifn_softc *sc = arg;
1816 struct hifn_dma *dma = sc->sc_dma;
1817 u_int32_t dmacsr, restart;
1818 int i, u;
1819
1820 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1821
1822 #ifdef HIFN_DEBUG
1823 if (hifn_debug)
1824 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1825 sc->sc_dv.dv_xname,
1826 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1827 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1828 #endif
1829
1830 /* Nothing in the DMA unit interrupted */
1831 if ((dmacsr & sc->sc_dmaier) == 0)
1832 return (0);
1833
1834 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1835
1836 if (dmacsr & HIFN_DMACSR_ENGINE)
1837 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
1838
1839 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1840 (dmacsr & HIFN_DMACSR_PUBDONE))
1841 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1842 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1843
1844 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1845 if (restart)
1846 printf("%s: overrun %x\n", sc->sc_dv.dv_xname, dmacsr);
1847
1848 if (sc->sc_flags & HIFN_IS_7811) {
1849 if (dmacsr & HIFN_DMACSR_ILLR)
1850 printf("%s: illegal read\n", sc->sc_dv.dv_xname);
1851 if (dmacsr & HIFN_DMACSR_ILLW)
1852 printf("%s: illegal write\n", sc->sc_dv.dv_xname);
1853 }
1854
1855 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1856 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1857 if (restart) {
1858 printf("%s: abort, resetting.\n", sc->sc_dv.dv_xname);
1859 hifnstats.hst_abort++;
1860 hifn_abort(sc);
1861 return (1);
1862 }
1863
1864 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
1865 /*
1866 * If no slots to process and we receive a "waiting on
1867 * command" interrupt, we disable the "waiting on command"
1868 * (by clearing it).
1869 */
1870 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1871 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1872 }
1873
1874 /* clear the rings */
1875 i = dma->resk;
1876 while (dma->resu != 0) {
1877 HIFN_RESR_SYNC(sc, i,
1878 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1879 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
1880 HIFN_RESR_SYNC(sc, i,
1881 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1882 break;
1883 }
1884
1885 if (i != HIFN_D_RES_RSIZE) {
1886 struct hifn_command *cmd;
1887 u_int8_t *macbuf = NULL;
1888
1889 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
1890 cmd = dma->hifn_commands[i];
1891 KASSERT(cmd != NULL
1892 /*("hifn_intr: null command slot %u", i)*/);
1893 dma->hifn_commands[i] = NULL;
1894
1895 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
1896 macbuf = dma->result_bufs[i];
1897 macbuf += 12;
1898 }
1899
1900 hifn_callback(sc, cmd, macbuf);
1901 hifnstats.hst_opackets++;
1902 }
1903
1904 if (++i == (HIFN_D_RES_RSIZE + 1))
1905 i = 0;
1906 else
1907 dma->resu--;
1908 }
1909 dma->resk = i;
1910
1911 i = dma->srck; u = dma->srcu;
1912 while (u != 0) {
1913 HIFN_SRCR_SYNC(sc, i,
1914 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1915 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
1916 HIFN_SRCR_SYNC(sc, i,
1917 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1918 break;
1919 }
1920 if (++i == (HIFN_D_SRC_RSIZE + 1))
1921 i = 0;
1922 else
1923 u--;
1924 }
1925 dma->srck = i; dma->srcu = u;
1926
1927 i = dma->cmdk; u = dma->cmdu;
1928 while (u != 0) {
1929 HIFN_CMDR_SYNC(sc, i,
1930 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1931 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
1932 HIFN_CMDR_SYNC(sc, i,
1933 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1934 break;
1935 }
1936 if (i != HIFN_D_CMD_RSIZE) {
1937 u--;
1938 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
1939 }
1940 if (++i == (HIFN_D_CMD_RSIZE + 1))
1941 i = 0;
1942 }
1943 dma->cmdk = i; dma->cmdu = u;
1944
1945 return (1);
1946 }
1947
1948 /*
1949 * Allocate a new 'session' and return an encoded session id. 'sidp'
1950 * contains our registration id, and should contain an encoded session
1951 * id on successful allocation.
1952 */
1953 int
1954 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
1955 {
1956 struct cryptoini *c;
1957 struct hifn_softc *sc = arg;
1958 int i, mac = 0, cry = 0, comp = 0;
1959
1960 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
1961 if (sidp == NULL || cri == NULL || sc == NULL)
1962 return (EINVAL);
1963
1964 for (i = 0; i < sc->sc_maxses; i++)
1965 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
1966 break;
1967 if (i == sc->sc_maxses)
1968 return (ENOMEM);
1969
1970 for (c = cri; c != NULL; c = c->cri_next) {
1971 switch (c->cri_alg) {
1972 case CRYPTO_MD5:
1973 case CRYPTO_SHA1:
1974 case CRYPTO_MD5_HMAC:
1975 case CRYPTO_SHA1_HMAC:
1976 if (mac)
1977 return (EINVAL);
1978 mac = 1;
1979 break;
1980 case CRYPTO_DES_CBC:
1981 case CRYPTO_3DES_CBC:
1982 #ifdef __NetBSD__
1983 rnd_extract_data(sc->sc_sessions[i].hs_iv,
1984 HIFN_IV_LENGTH, RND_EXTRACT_ANY);
1985 #else /* FreeBSD and OpenBSD have get_random_bytes */
1986 /* XXX this may read fewer, does it matter? */
1987 get_random_bytes(sc->sc_sessions[i].hs_iv,
1988 HIFN_IV_LENGTH);
1989 #endif
1990 /*FALLTHROUGH*/
1991 case CRYPTO_ARC4:
1992 if (cry)
1993 return (EINVAL);
1994 cry = 1;
1995 break;
1996 #ifdef HAVE_CRYPTO_LSZ
1997 case CRYPTO_LZS_COMP:
1998 if (comp)
1999 return (EINVAL);
2000 comp = 1;
2001 break;
2002 #endif
2003 default:
2004 return (EINVAL);
2005 }
2006 }
2007 if (mac == 0 && cry == 0 && comp == 0)
2008 return (EINVAL);
2009
2010 /*
2011 * XXX only want to support compression without chaining to
2012 * MAC/crypt engine right now
2013 */
2014 if ((comp && mac) || (comp && cry))
2015 return (EINVAL);
2016
2017 *sidp = HIFN_SID(sc->sc_dv.dv_unit, i);
2018 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2019
2020 return (0);
2021 }
2022
2023 /*
2024 * Deallocate a session.
2025 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2026 * XXX to blow away any keys already stored there.
2027 */
2028 int
2029 hifn_freesession(void *arg, u_int64_t tid)
2030 {
2031 struct hifn_softc *sc = arg;
2032 int session;
2033 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2034
2035 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2036 if (sc == NULL)
2037 return (EINVAL);
2038
2039 session = HIFN_SESSION(sid);
2040 if (session >= sc->sc_maxses)
2041 return (EINVAL);
2042
2043 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
2044 return (0);
2045 }
2046
2047 int
2048 hifn_process(void *arg, struct cryptop *crp, int hint)
2049 {
2050 struct hifn_softc *sc = arg;
2051 struct hifn_command *cmd = NULL;
2052 int session, err;
2053 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2054
2055 if (crp == NULL || crp->crp_callback == NULL) {
2056 hifnstats.hst_invalid++;
2057 return (EINVAL);
2058 }
2059 session = HIFN_SESSION(crp->crp_sid);
2060
2061 if (sc == NULL || session >= sc->sc_maxses) {
2062 err = EINVAL;
2063 goto errout;
2064 }
2065
2066 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2067 M_DEVBUF, M_NOWAIT|M_ZERO);
2068 if (cmd == NULL) {
2069 hifnstats.hst_nomem++;
2070 err = ENOMEM;
2071 goto errout;
2072 }
2073
2074 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2075 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2076 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2077 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2078 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2079 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2080 } else {
2081 err = EINVAL;
2082 goto errout; /* XXX we don't handle contiguous buffers! */
2083 }
2084
2085 crd1 = crp->crp_desc;
2086 if (crd1 == NULL) {
2087 err = EINVAL;
2088 goto errout;
2089 }
2090 crd2 = crd1->crd_next;
2091
2092 if (crd2 == NULL) {
2093 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2094 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2095 crd1->crd_alg == CRYPTO_SHA1 ||
2096 crd1->crd_alg == CRYPTO_MD5) {
2097 maccrd = crd1;
2098 enccrd = NULL;
2099 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2100 crd1->crd_alg == CRYPTO_3DES_CBC ||
2101 crd1->crd_alg == CRYPTO_ARC4) {
2102 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2103 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2104 maccrd = NULL;
2105 enccrd = crd1;
2106 #ifdef HAVE_CRYPTO_LSZ
2107 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2108 return (hifn_compression(sc, crp, cmd));
2109 #endif
2110 } else {
2111 err = EINVAL;
2112 goto errout;
2113 }
2114 } else {
2115 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2116 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2117 crd1->crd_alg == CRYPTO_MD5 ||
2118 crd1->crd_alg == CRYPTO_SHA1) &&
2119 (crd2->crd_alg == CRYPTO_DES_CBC ||
2120 crd2->crd_alg == CRYPTO_3DES_CBC ||
2121 crd2->crd_alg == CRYPTO_ARC4) &&
2122 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2123 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2124 maccrd = crd1;
2125 enccrd = crd2;
2126 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2127 crd1->crd_alg == CRYPTO_ARC4 ||
2128 crd1->crd_alg == CRYPTO_3DES_CBC) &&
2129 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2130 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2131 crd2->crd_alg == CRYPTO_MD5 ||
2132 crd2->crd_alg == CRYPTO_SHA1) &&
2133 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2134 enccrd = crd1;
2135 maccrd = crd2;
2136 } else {
2137 /*
2138 * We cannot order the 7751 as requested
2139 */
2140 err = EINVAL;
2141 goto errout;
2142 }
2143 }
2144
2145 if (enccrd) {
2146 cmd->enccrd = enccrd;
2147 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2148 switch (enccrd->crd_alg) {
2149 case CRYPTO_ARC4:
2150 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2151 if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2152 != sc->sc_sessions[session].hs_prev_op)
2153 sc->sc_sessions[session].hs_state =
2154 HS_STATE_USED;
2155 break;
2156 case CRYPTO_DES_CBC:
2157 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2158 HIFN_CRYPT_CMD_MODE_CBC |
2159 HIFN_CRYPT_CMD_NEW_IV;
2160 break;
2161 case CRYPTO_3DES_CBC:
2162 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2163 HIFN_CRYPT_CMD_MODE_CBC |
2164 HIFN_CRYPT_CMD_NEW_IV;
2165 break;
2166 default:
2167 err = EINVAL;
2168 goto errout;
2169 }
2170 if (enccrd->crd_alg != CRYPTO_ARC4) {
2171 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2172 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2173 bcopy(enccrd->crd_iv, cmd->iv,
2174 HIFN_IV_LENGTH);
2175 else
2176 bcopy(sc->sc_sessions[session].hs_iv,
2177 cmd->iv, HIFN_IV_LENGTH);
2178
2179 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2180 == 0) {
2181 if (crp->crp_flags & CRYPTO_F_IMBUF)
2182 m_copyback(cmd->srcu.src_m,
2183 enccrd->crd_inject,
2184 HIFN_IV_LENGTH, cmd->iv);
2185 else if (crp->crp_flags & CRYPTO_F_IOV)
2186 cuio_copyback(cmd->srcu.src_io,
2187 enccrd->crd_inject,
2188 HIFN_IV_LENGTH, cmd->iv);
2189 }
2190 } else {
2191 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2192 bcopy(enccrd->crd_iv, cmd->iv,
2193 HIFN_IV_LENGTH);
2194 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2195 m_copydata(cmd->srcu.src_m,
2196 enccrd->crd_inject,
2197 HIFN_IV_LENGTH, cmd->iv);
2198 else if (crp->crp_flags & CRYPTO_F_IOV)
2199 cuio_copydata(cmd->srcu.src_io,
2200 enccrd->crd_inject,
2201 HIFN_IV_LENGTH, cmd->iv);
2202 }
2203 }
2204
2205 cmd->ck = enccrd->crd_key;
2206 cmd->cklen = enccrd->crd_klen >> 3;
2207
2208 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2209 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2210 }
2211
2212 if (maccrd) {
2213 cmd->maccrd = maccrd;
2214 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2215
2216 switch (maccrd->crd_alg) {
2217 case CRYPTO_MD5:
2218 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2219 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2220 HIFN_MAC_CMD_POS_IPSEC;
2221 break;
2222 case CRYPTO_MD5_HMAC:
2223 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2224 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2225 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2226 break;
2227 case CRYPTO_SHA1:
2228 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2229 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2230 HIFN_MAC_CMD_POS_IPSEC;
2231 break;
2232 case CRYPTO_SHA1_HMAC:
2233 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2234 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2235 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2236 break;
2237 }
2238
2239 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2240 maccrd->crd_alg == CRYPTO_MD5_HMAC) &&
2241 sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2242 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2243 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2244 bzero(cmd->mac + (maccrd->crd_klen >> 3),
2245 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2246 }
2247 }
2248
2249 cmd->crp = crp;
2250 cmd->session_num = session;
2251 cmd->softc = sc;
2252
2253 err = hifn_crypto(sc, cmd, crp, hint);
2254 if (err == 0) {
2255 if (enccrd)
2256 sc->sc_sessions[session].hs_prev_op =
2257 enccrd->crd_flags & CRD_F_ENCRYPT;
2258 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2259 sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2260 return 0;
2261 } else if (err == ERESTART) {
2262 /*
2263 * There weren't enough resources to dispatch the request
2264 * to the part. Notify the caller so they'll requeue this
2265 * request and resubmit it again soon.
2266 */
2267 #ifdef HIFN_DEBUG
2268 if (hifn_debug)
2269 printf(sc->sc_dv.dv_xname, "requeue request\n");
2270 #endif
2271 free(cmd, M_DEVBUF);
2272 sc->sc_needwakeup |= CRYPTO_SYMQ;
2273 return (err);
2274 }
2275
2276 errout:
2277 if (cmd != NULL)
2278 free(cmd, M_DEVBUF);
2279 if (err == EINVAL)
2280 hifnstats.hst_invalid++;
2281 else
2282 hifnstats.hst_nomem++;
2283 crp->crp_etype = err;
2284 crypto_done(crp);
2285 return (0);
2286 }
2287
2288 void
2289 hifn_abort(struct hifn_softc *sc)
2290 {
2291 struct hifn_dma *dma = sc->sc_dma;
2292 struct hifn_command *cmd;
2293 struct cryptop *crp;
2294 int i, u;
2295
2296 i = dma->resk; u = dma->resu;
2297 while (u != 0) {
2298 cmd = dma->hifn_commands[i];
2299 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2300 dma->hifn_commands[i] = NULL;
2301 crp = cmd->crp;
2302
2303 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2304 /* Salvage what we can. */
2305 u_int8_t *macbuf;
2306
2307 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2308 macbuf = dma->result_bufs[i];
2309 macbuf += 12;
2310 } else
2311 macbuf = NULL;
2312 hifnstats.hst_opackets++;
2313 hifn_callback(sc, cmd, macbuf);
2314 } else {
2315 if (cmd->src_map == cmd->dst_map) {
2316 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2317 0, cmd->src_map->dm_mapsize,
2318 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2319 } else {
2320 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2321 0, cmd->src_map->dm_mapsize,
2322 BUS_DMASYNC_POSTWRITE);
2323 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2324 0, cmd->dst_map->dm_mapsize,
2325 BUS_DMASYNC_POSTREAD);
2326 }
2327
2328 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2329 m_freem(cmd->srcu.src_m);
2330 crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2331 }
2332
2333 /* non-shared buffers cannot be restarted */
2334 if (cmd->src_map != cmd->dst_map) {
2335 /*
2336 * XXX should be EAGAIN, delayed until
2337 * after the reset.
2338 */
2339 crp->crp_etype = ENOMEM;
2340 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2341 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2342 } else
2343 crp->crp_etype = ENOMEM;
2344
2345 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2346 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2347
2348 free(cmd, M_DEVBUF);
2349 if (crp->crp_etype != EAGAIN)
2350 crypto_done(crp);
2351 }
2352
2353 if (++i == HIFN_D_RES_RSIZE)
2354 i = 0;
2355 u--;
2356 }
2357 dma->resk = i; dma->resu = u;
2358
2359 /* Force upload of key next time */
2360 for (i = 0; i < sc->sc_maxses; i++)
2361 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2362 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2363
2364 hifn_reset_board(sc, 1);
2365 hifn_init_dma(sc);
2366 hifn_init_pci_registers(sc);
2367 }
2368
2369 void
2370 hifn_callback(sc, cmd, resbuf)
2371 struct hifn_softc *sc;
2372 struct hifn_command *cmd;
2373 u_int8_t *resbuf;
2374 {
2375 struct hifn_dma *dma = sc->sc_dma;
2376 struct cryptop *crp = cmd->crp;
2377 struct cryptodesc *crd;
2378 struct mbuf *m;
2379 int totlen, i, u;
2380
2381 if (cmd->src_map == cmd->dst_map)
2382 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2383 0, cmd->src_map->dm_mapsize,
2384 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2385 else {
2386 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2387 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2388 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2389 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2390 }
2391
2392 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2393 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2394 crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2395 totlen = cmd->src_map->dm_mapsize;
2396 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2397 if (totlen < m->m_len) {
2398 m->m_len = totlen;
2399 totlen = 0;
2400 } else
2401 totlen -= m->m_len;
2402 }
2403 cmd->dstu.dst_m->m_pkthdr.len =
2404 cmd->srcu.src_m->m_pkthdr.len;
2405 m_freem(cmd->srcu.src_m);
2406 }
2407 }
2408
2409 if (cmd->sloplen != 0) {
2410 if (crp->crp_flags & CRYPTO_F_IMBUF)
2411 m_copyback((struct mbuf *)crp->crp_buf,
2412 cmd->src_map->dm_mapsize - cmd->sloplen,
2413 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2414 else if (crp->crp_flags & CRYPTO_F_IOV)
2415 cuio_copyback((struct uio *)crp->crp_buf,
2416 cmd->src_map->dm_mapsize - cmd->sloplen,
2417 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2418 }
2419
2420 i = dma->dstk; u = dma->dstu;
2421 while (u != 0) {
2422 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2423 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2424 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2425 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2426 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2427 offsetof(struct hifn_dma, dstr[i]),
2428 sizeof(struct hifn_desc),
2429 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2430 break;
2431 }
2432 if (++i == (HIFN_D_DST_RSIZE + 1))
2433 i = 0;
2434 else
2435 u--;
2436 }
2437 dma->dstk = i; dma->dstu = u;
2438
2439 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2440
2441 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2442 HIFN_BASE_CMD_CRYPT) {
2443 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2444 if (crd->crd_alg != CRYPTO_DES_CBC &&
2445 crd->crd_alg != CRYPTO_3DES_CBC)
2446 continue;
2447 if (crp->crp_flags & CRYPTO_F_IMBUF)
2448 m_copydata((struct mbuf *)crp->crp_buf,
2449 crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH,
2450 HIFN_IV_LENGTH,
2451 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2452 else if (crp->crp_flags & CRYPTO_F_IOV) {
2453 cuio_copydata((struct uio *)crp->crp_buf,
2454 crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH,
2455 HIFN_IV_LENGTH,
2456 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2457 }
2458 /* XXX We do not handle contig data */
2459 break;
2460 }
2461 }
2462
2463 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2464 u_int8_t *macbuf;
2465
2466 macbuf = resbuf + sizeof(struct hifn_base_result);
2467 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2468 macbuf += sizeof(struct hifn_comp_result);
2469 macbuf += sizeof(struct hifn_mac_result);
2470
2471 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2472 int len;
2473
2474 if (crd->crd_alg == CRYPTO_MD5)
2475 len = 16;
2476 else if (crd->crd_alg == CRYPTO_SHA1)
2477 len = 20;
2478 else if (crd->crd_alg == CRYPTO_MD5_HMAC ||
2479 crd->crd_alg == CRYPTO_SHA1_HMAC)
2480 len = 12;
2481 else
2482 continue;
2483
2484 if (crp->crp_flags & CRYPTO_F_IMBUF)
2485 m_copyback((struct mbuf *)crp->crp_buf,
2486 crd->crd_inject, len, macbuf);
2487 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2488 bcopy((caddr_t)macbuf, crp->crp_mac, len);
2489 break;
2490 }
2491 }
2492
2493 if (cmd->src_map != cmd->dst_map) {
2494 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2495 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2496 }
2497 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2498 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2499 free(cmd, M_DEVBUF);
2500 crypto_done(crp);
2501 }
2502
2503 #ifdef HAVE_CRYPTO_LSZ
2504
2505 int
2506 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2507 struct hifn_command *cmd)
2508 {
2509 struct cryptodesc *crd = crp->crp_desc;
2510 int s, err = 0;
2511
2512 cmd->compcrd = crd;
2513 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2514
2515 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2516 /*
2517 * XXX can only handle mbufs right now since we can
2518 * XXX dynamically resize them.
2519 */
2520 err = EINVAL;
2521 return (ENOMEM);
2522 }
2523
2524 if ((crd->crd_flags & CRD_F_COMP) == 0)
2525 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2526 if (crd->crd_alg == CRYPTO_LZS_COMP)
2527 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2528 HIFN_COMP_CMD_CLEARHIST;
2529
2530 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2531 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2532 err = ENOMEM;
2533 goto fail;
2534 }
2535
2536 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2537 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2538 err = ENOMEM;
2539 goto fail;
2540 }
2541
2542 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2543 int len;
2544
2545 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2546 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2547 err = ENOMEM;
2548 goto fail;
2549 }
2550
2551 len = cmd->src_map->dm_mapsize / MCLBYTES;
2552 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2553 len++;
2554 len *= MCLBYTES;
2555
2556 if ((crd->crd_flags & CRD_F_COMP) == 0)
2557 len *= 4;
2558
2559 if (len > HIFN_MAX_DMALEN)
2560 len = HIFN_MAX_DMALEN;
2561
2562 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2563 if (cmd->dstu.dst_m == NULL) {
2564 err = ENOMEM;
2565 goto fail;
2566 }
2567
2568 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2569 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2570 err = ENOMEM;
2571 goto fail;
2572 }
2573 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2574 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2575 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2576 err = ENOMEM;
2577 goto fail;
2578 }
2579 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2580 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2581 err = ENOMEM;
2582 goto fail;
2583 }
2584 }
2585
2586 if (cmd->src_map == cmd->dst_map)
2587 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2588 0, cmd->src_map->dm_mapsize,
2589 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2590 else {
2591 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2592 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2593 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2594 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2595 }
2596
2597 cmd->crp = crp;
2598 /*
2599 * Always use session 0. The modes of compression we use are
2600 * stateless and there is always at least one compression
2601 * context, zero.
2602 */
2603 cmd->session_num = 0;
2604 cmd->softc = sc;
2605
2606 s = splnet();
2607 err = hifn_compress_enter(sc, cmd);
2608 splx(s);
2609
2610 if (err != 0)
2611 goto fail;
2612 return (0);
2613
2614 fail:
2615 if (cmd->dst_map != NULL) {
2616 if (cmd->dst_map->dm_nsegs > 0)
2617 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2618 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2619 }
2620 if (cmd->src_map != NULL) {
2621 if (cmd->src_map->dm_nsegs > 0)
2622 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2623 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2624 }
2625 free(cmd, M_DEVBUF);
2626 if (err == EINVAL)
2627 hifnstats.hst_invalid++;
2628 else
2629 hifnstats.hst_nomem++;
2630 crp->crp_etype = err;
2631 crypto_done(crp);
2632 return (0);
2633 }
2634
2635 /*
2636 * must be called at splnet()
2637 */
2638 int
2639 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2640 {
2641 struct hifn_dma *dma = sc->sc_dma;
2642 int cmdi, resi;
2643 u_int32_t cmdlen;
2644
2645 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2646 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2647 return (ENOMEM);
2648
2649 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2650 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2651 return (ENOMEM);
2652
2653 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2654 dma->cmdi = 0;
2655 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2656 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2657 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2658 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2659 }
2660 cmdi = dma->cmdi++;
2661 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2662 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2663
2664 /* .p for command/result already set */
2665 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2666 HIFN_D_MASKDONEIRQ);
2667 HIFN_CMDR_SYNC(sc, cmdi,
2668 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2669 dma->cmdu++;
2670 if (sc->sc_c_busy == 0) {
2671 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2672 sc->sc_c_busy = 1;
2673 SET_LED(sc, HIFN_MIPSRST_LED0);
2674 }
2675
2676 /*
2677 * We don't worry about missing an interrupt (which a "command wait"
2678 * interrupt salvages us from), unless there is more than one command
2679 * in the queue.
2680 */
2681 if (dma->cmdu > 1) {
2682 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2683 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2684 }
2685
2686 hifnstats.hst_ipackets++;
2687 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2688
2689 hifn_dmamap_load_src(sc, cmd);
2690 if (sc->sc_s_busy == 0) {
2691 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2692 sc->sc_s_busy = 1;
2693 SET_LED(sc, HIFN_MIPSRST_LED1);
2694 }
2695
2696 /*
2697 * Unlike other descriptors, we don't mask done interrupt from
2698 * result descriptor.
2699 */
2700 if (dma->resi == HIFN_D_RES_RSIZE) {
2701 dma->resi = 0;
2702 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2703 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2704 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2705 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2706 }
2707 resi = dma->resi++;
2708 dma->hifn_commands[resi] = cmd;
2709 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2710 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2711 HIFN_D_VALID | HIFN_D_LAST);
2712 HIFN_RESR_SYNC(sc, resi,
2713 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2714 dma->resu++;
2715 if (sc->sc_r_busy == 0) {
2716 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2717 sc->sc_r_busy = 1;
2718 SET_LED(sc, HIFN_MIPSRST_LED2);
2719 }
2720
2721 if (cmd->sloplen)
2722 cmd->slopidx = resi;
2723
2724 hifn_dmamap_load_dst(sc, cmd);
2725
2726 if (sc->sc_d_busy == 0) {
2727 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2728 sc->sc_d_busy = 1;
2729 }
2730 sc->sc_active = 5;
2731 cmd->cmd_callback = hifn_callback_comp;
2732 return (0);
2733 }
2734
2735 void
2736 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2737 u_int8_t *resbuf)
2738 {
2739 struct hifn_base_result baseres;
2740 struct cryptop *crp = cmd->crp;
2741 struct hifn_dma *dma = sc->sc_dma;
2742 struct mbuf *m;
2743 int err = 0, i, u;
2744 u_int32_t olen;
2745 bus_size_t dstsize;
2746
2747 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2748 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2749 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2750 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2751
2752 dstsize = cmd->dst_map->dm_mapsize;
2753 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2754
2755 bcopy(resbuf, &baseres, sizeof(struct hifn_base_result));
2756
2757 i = dma->dstk; u = dma->dstu;
2758 while (u != 0) {
2759 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2760 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2761 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2762 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2763 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2764 offsetof(struct hifn_dma, dstr[i]),
2765 sizeof(struct hifn_desc),
2766 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2767 break;
2768 }
2769 if (++i == (HIFN_D_DST_RSIZE + 1))
2770 i = 0;
2771 else
2772 u--;
2773 }
2774 dma->dstk = i; dma->dstu = u;
2775
2776 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
2777 bus_size_t xlen;
2778
2779 xlen = dstsize;
2780
2781 m_freem(cmd->dstu.dst_m);
2782
2783 if (xlen == HIFN_MAX_DMALEN) {
2784 /* We've done all we can. */
2785 err = E2BIG;
2786 goto out;
2787 }
2788
2789 xlen += MCLBYTES;
2790
2791 if (xlen > HIFN_MAX_DMALEN)
2792 xlen = HIFN_MAX_DMALEN;
2793
2794 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
2795 cmd->srcu.src_m);
2796 if (cmd->dstu.dst_m == NULL) {
2797 err = ENOMEM;
2798 goto out;
2799 }
2800 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2801 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2802 err = ENOMEM;
2803 goto out;
2804 }
2805
2806 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2807 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2808 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2809 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2810
2811 /* already at splnet... */
2812 err = hifn_compress_enter(sc, cmd);
2813 if (err != 0)
2814 goto out;
2815 return;
2816 }
2817
2818 olen = dstsize - (letoh16(baseres.dst_cnt) |
2819 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
2820 HIFN_BASE_RES_DSTLEN_S) << 16));
2821
2822 crp->crp_olen = olen - cmd->compcrd->crd_skip;
2823
2824 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2825 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2826 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2827
2828 m = cmd->dstu.dst_m;
2829 if (m->m_flags & M_PKTHDR)
2830 m->m_pkthdr.len = olen;
2831 crp->crp_buf = (caddr_t)m;
2832 for (; m != NULL; m = m->m_next) {
2833 if (olen >= m->m_len)
2834 olen -= m->m_len;
2835 else {
2836 m->m_len = olen;
2837 olen = 0;
2838 }
2839 }
2840
2841 m_freem(cmd->srcu.src_m);
2842 free(cmd, M_DEVBUF);
2843 crp->crp_etype = 0;
2844 crypto_done(crp);
2845 return;
2846
2847 out:
2848 if (cmd->dst_map != NULL) {
2849 if (cmd->src_map->dm_nsegs != 0)
2850 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2851 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2852 }
2853 if (cmd->src_map != NULL) {
2854 if (cmd->src_map->dm_nsegs != 0)
2855 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2856 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2857 }
2858 if (cmd->dstu.dst_m != NULL)
2859 m_freem(cmd->dstu.dst_m);
2860 free(cmd, M_DEVBUF);
2861 crp->crp_etype = err;
2862 crypto_done(crp);
2863 }
2864
2865 struct mbuf *
2866 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
2867 {
2868 int len;
2869 struct mbuf *m, *m0, *mlast;
2870
2871 if (mtemplate->m_flags & M_PKTHDR) {
2872 len = MHLEN;
2873 MGETHDR(m0, M_DONTWAIT, MT_DATA);
2874 } else {
2875 len = MLEN;
2876 MGET(m0, M_DONTWAIT, MT_DATA);
2877 }
2878 if (m0 == NULL)
2879 return (NULL);
2880 if (len == MHLEN)
2881 M_DUP_PKTHDR(m0, mtemplate);
2882 MCLGET(m0, M_DONTWAIT);
2883 if (!(m0->m_flags & M_EXT))
2884 m_freem(m0);
2885 len = MCLBYTES;
2886
2887 totlen -= len;
2888 m0->m_pkthdr.len = m0->m_len = len;
2889 mlast = m0;
2890
2891 while (totlen > 0) {
2892 MGET(m, M_DONTWAIT, MT_DATA);
2893 if (m == NULL) {
2894 m_freem(m0);
2895 return (NULL);
2896 }
2897 MCLGET(m, M_DONTWAIT);
2898 if (!(m->m_flags & M_EXT)) {
2899 m_freem(m0);
2900 return (NULL);
2901 }
2902 len = MCLBYTES;
2903 m->m_len = len;
2904 if (m0->m_flags & M_PKTHDR)
2905 m0->m_pkthdr.len += len;
2906 totlen -= len;
2907
2908 mlast->m_next = m;
2909 mlast = m;
2910 }
2911
2912 return (m0);
2913 }
2914 #endif /* HAVE_CRYPTO_LSZ */
2915
2916 void
2917 hifn_write_4(sc, reggrp, reg, val)
2918 struct hifn_softc *sc;
2919 int reggrp;
2920 bus_size_t reg;
2921 u_int32_t val;
2922 {
2923 /*
2924 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2925 * and Group 1 registers; avoid conditions that could create
2926 * burst writes by doing a read in between the writes.
2927 */
2928 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
2929 if (sc->sc_waw_lastgroup == reggrp &&
2930 sc->sc_waw_lastreg == reg - 4) {
2931 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2932 }
2933 sc->sc_waw_lastgroup = reggrp;
2934 sc->sc_waw_lastreg = reg;
2935 }
2936 if (reggrp == 0)
2937 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2938 else
2939 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2940
2941 }
2942
2943 u_int32_t
2944 hifn_read_4(sc, reggrp, reg)
2945 struct hifn_softc *sc;
2946 int reggrp;
2947 bus_size_t reg;
2948 {
2949 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
2950 sc->sc_waw_lastgroup = -1;
2951 sc->sc_waw_lastreg = 1;
2952 }
2953 if (reggrp == 0)
2954 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
2955 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
2956 }
2957