hifn7751.c revision 1.20 1 /* $NetBSD: hifn7751.c,v 1.20 2003/11/16 00:22:09 jonathan Exp $ */
2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */
4
5 /*
6 * Invertex AEON / Hifn 7751 driver
7 * Copyright (c) 1999 Invertex Inc. All rights reserved.
8 * Copyright (c) 1999 Theo de Raadt
9 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10 * http://www.netsec.net
11 * Copyright (c) 2003 Hifn Inc.
12 *
13 * This driver is based on a previous driver by Invertex, for which they
14 * requested: Please send any comments, feedback, bug-fixes, or feature
15 * requests to software (at) invertex.com.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. The name of the author may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Effort sponsored in part by the Defense Advanced Research Projects
41 * Agency (DARPA) and Air Force Research Laboratory, Air Force
42 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 *
44 */
45
46 /*
47 * Driver for various Hifn pre-HIPP encryption processors.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.20 2003/11/16 00:22:09 jonathan Exp $");
52
53 #include "rnd.h"
54 #include "opencrypto.h"
55
56 #if NRND == 0 || NOPENCRYPTO == 0
57 #error hifn7751 requires rnd and opencrypto pseudo-devices
58 #endif
59
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/proc.h>
64 #include <sys/errno.h>
65 #include <sys/malloc.h>
66 #include <sys/kernel.h>
67 #include <sys/mbuf.h>
68 #include <sys/device.h>
69
70 #include <uvm/uvm_extern.h>
71
72
73 #ifdef __OpenBSD__
74 #include <crypto/crypto.h>
75 #include <dev/rndvar.h>
76 #else
77 #include <opencrypto/cryptodev.h>
78 #include <sys/rnd.h>
79 #endif
80
81 #include <dev/pci/pcireg.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcidevs.h>
84
85 #include <dev/pci/hifn7751reg.h>
86 #include <dev/pci/hifn7751var.h>
87
88 #undef HIFN_DEBUG
89
90 #ifdef __NetBSD__
91 #define HIFN_NO_RNG /* until statistically tested */
92 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */
93 #endif
94
95 #ifdef HIFN_DEBUG
96 extern int hifn_debug; /* patchable */
97 int hifn_debug = 1;
98 #endif
99
100 #ifdef __OpenBSD__
101 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */
102 #endif
103
104 /*
105 * Prototypes and count for the pci_device structure
106 */
107 #ifdef __OpenBSD__
108 int hifn_probe((struct device *, void *, void *);
109 #else
110 int hifn_probe(struct device *, struct cfdata *, void *);
111 #endif
112 void hifn_attach(struct device *, struct device *, void *);
113
114 CFATTACH_DECL(hifn, sizeof(struct hifn_softc),
115 hifn_probe, hifn_attach, NULL, NULL);
116
117 #ifdef __OpenBSD__
118 struct cfdriver hifn_cd = {
119 0, "hifn", DV_DULL
120 };
121 #endif
122
123 void hifn_reset_board(struct hifn_softc *, int);
124 void hifn_reset_puc(struct hifn_softc *);
125 void hifn_puc_wait(struct hifn_softc *);
126 const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
127 void hifn_set_retry(struct hifn_softc *);
128 void hifn_init_dma(struct hifn_softc *);
129 void hifn_init_pci_registers(struct hifn_softc *);
130 int hifn_sramsize(struct hifn_softc *);
131 int hifn_dramsize(struct hifn_softc *);
132 int hifn_ramtype(struct hifn_softc *);
133 void hifn_sessions(struct hifn_softc *);
134 int hifn_intr(void *);
135 u_int hifn_write_command(struct hifn_command *, u_int8_t *);
136 u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
137 int hifn_newsession(void*, u_int32_t *, struct cryptoini *);
138 int hifn_freesession(void*, u_int64_t);
139 int hifn_process(void*, struct cryptop *, int);
140 void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
141 int hifn_crypto(struct hifn_softc *, struct hifn_command *,
142 struct cryptop*, int);
143 int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
144 int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
145 int hifn_dmamap_aligned(bus_dmamap_t);
146 int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
147 int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
148 int hifn_init_pubrng(struct hifn_softc *);
149 #ifndef HIFN_NO_RNG
150 static void hifn_rng(void *);
151 #endif
152 void hifn_tick(void *);
153 void hifn_abort(struct hifn_softc *);
154 void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
155 void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
156 u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
157 #ifdef HAVE_CRYPTO_LZS
158 int hifn_compression(struct hifn_softc *, struct cryptop *,
159 struct hifn_command *);
160 struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
161 int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
162 void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
163 u_int8_t *);
164 #endif /* HAVE_CRYPTO_LZS */
165
166
167 #ifdef notyet
168 int hifn_compression(struct hifn_softc *, struct cryptop *,
169 struct hifn_command *);
170 struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
171 int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
172 void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
173 u_int8_t *);
174 #endif
175
176 struct hifn_stats hifnstats;
177
178 static const struct hifn_product {
179 pci_vendor_id_t hifn_vendor;
180 pci_product_id_t hifn_product;
181 int hifn_flags;
182 const char *hifn_name;
183 } hifn_products[] = {
184 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
185 0,
186 "Invertex AEON",
187 },
188
189 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
190 0,
191 "Hifn 7751",
192 },
193 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
194 0,
195 "Hifn 7751 (NetSec)"
196 },
197
198 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
199 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
200 "Hifn 7811",
201 },
202
203 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
204 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
205 "Hifn 7951",
206 },
207
208 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
209 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
210 "Hifn 7955",
211 },
212
213 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
214 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
215 "Hifn 7956",
216 },
217
218
219 { 0, 0,
220 0,
221 NULL
222 }
223 };
224
225 static const struct hifn_product *
226 hifn_lookup(const struct pci_attach_args *pa)
227 {
228 const struct hifn_product *hp;
229
230 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
231 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
232 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
233 return (hp);
234 }
235 return (NULL);
236 }
237
238 int
239 hifn_probe(struct device *parent, struct cfdata *match, void *aux)
240 {
241 struct pci_attach_args *pa = (struct pci_attach_args *) aux;
242
243 if (hifn_lookup(pa) != NULL)
244 return (1);
245
246 return (0);
247 }
248
249 void
250 hifn_attach(struct device *parent, struct device *self, void *aux)
251 {
252 struct hifn_softc *sc = (struct hifn_softc *)self;
253 struct pci_attach_args *pa = aux;
254 const struct hifn_product *hp;
255 pci_chipset_tag_t pc = pa->pa_pc;
256 pci_intr_handle_t ih;
257 const char *intrstr = NULL;
258 const char *hifncap;
259 char rbase;
260 bus_size_t iosize0, iosize1;
261 u_int32_t cmd;
262 u_int16_t ena;
263 bus_dma_segment_t seg;
264 bus_dmamap_t dmamap;
265 int rseg;
266 caddr_t kva;
267
268 hp = hifn_lookup(pa);
269 if (hp == NULL) {
270 printf("\n");
271 panic("hifn_attach: impossible");
272 }
273
274 aprint_naive(": Crypto processor\n");
275 aprint_normal(": %s, rev. %d\n", hp->hifn_name,
276 PCI_REVISION(pa->pa_class));
277
278 sc->sc_pci_pc = pa->pa_pc;
279 sc->sc_pci_tag = pa->pa_tag;
280
281 sc->sc_flags = hp->hifn_flags;
282
283 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
284 cmd |= PCI_COMMAND_MASTER_ENABLE;
285 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
286
287 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
288 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
289 aprint_error("%s: can't map mem space %d\n",
290 sc->sc_dv.dv_xname, 0);
291 return;
292 }
293
294 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
295 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
296 aprint_error("%s: can't find mem space %d\n",
297 sc->sc_dv.dv_xname, 1);
298 goto fail_io0;
299 }
300
301 hifn_set_retry(sc);
302
303 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
304 sc->sc_waw_lastgroup = -1;
305 sc->sc_waw_lastreg = 1;
306 }
307
308 sc->sc_dmat = pa->pa_dmat;
309 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
310 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
311 aprint_error("%s: can't alloc DMA buffer\n",
312 sc->sc_dv.dv_xname);
313 goto fail_io1;
314 }
315 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
316 BUS_DMA_NOWAIT)) {
317 aprint_error("%s: can't map DMA buffers (%lu bytes)\n",
318 sc->sc_dv.dv_xname, (u_long)sizeof(*sc->sc_dma));
319 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
320 goto fail_io1;
321 }
322 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
323 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
324 aprint_error("%s: can't create DMA map\n",
325 sc->sc_dv.dv_xname);
326 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
327 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
328 goto fail_io1;
329 }
330 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
331 NULL, BUS_DMA_NOWAIT)) {
332 aprint_error("%s: can't load DMA map\n",
333 sc->sc_dv.dv_xname);
334 bus_dmamap_destroy(sc->sc_dmat, dmamap);
335 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
336 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
337 goto fail_io1;
338 }
339 sc->sc_dmamap = dmamap;
340 sc->sc_dma = (struct hifn_dma *)kva;
341 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
342
343 hifn_reset_board(sc, 0);
344
345 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
346 aprint_error("%s: crypto enabling failed\n",
347 sc->sc_dv.dv_xname);
348 goto fail_mem;
349 }
350 hifn_reset_puc(sc);
351
352 hifn_init_dma(sc);
353 hifn_init_pci_registers(sc);
354
355 /* XXX can't dynamically determine ram type for 795x; force dram */
356 if (sc->sc_flags & HIFN_IS_7956)
357 sc->sc_drammodel = 1;
358 else if (hifn_ramtype(sc))
359 goto fail_mem;
360
361 if (sc->sc_drammodel == 0)
362 hifn_sramsize(sc);
363 else
364 hifn_dramsize(sc);
365
366 /*
367 * Workaround for NetSec 7751 rev A: half ram size because two
368 * of the address lines were left floating
369 */
370 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
371 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
372 PCI_REVISION(pa->pa_class) == 0x61)
373 sc->sc_ramsize >>= 1;
374
375 if (pci_intr_map(pa, &ih)) {
376 aprint_error("%s: couldn't map interrupt\n",
377 sc->sc_dv.dv_xname);
378 goto fail_mem;
379 }
380 intrstr = pci_intr_string(pc, ih);
381 #ifdef __OpenBSD__
382 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
383 self->dv_xname);
384 #else
385 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
386 #endif
387 if (sc->sc_ih == NULL) {
388 aprint_error("%s: couldn't establish interrupt\n",
389 sc->sc_dv.dv_xname);
390 if (intrstr != NULL)
391 aprint_normal(" at %s", intrstr);
392 aprint_normal("\n");
393 goto fail_mem;
394 }
395
396 hifn_sessions(sc);
397
398 rseg = sc->sc_ramsize / 1024;
399 rbase = 'K';
400 if (sc->sc_ramsize >= (1024 * 1024)) {
401 rbase = 'M';
402 rseg /= 1024;
403 }
404 aprint_normal("%s: %s, %d%cB %cram, interrupting at %s\n",
405 sc->sc_dv.dv_xname, hifncap, rseg, rbase,
406 sc->sc_drammodel ? 'd' : 's', intrstr);
407
408 sc->sc_cid = crypto_get_driverid(0);
409 if (sc->sc_cid < 0) {
410 aprint_error("%s: couldn't get crypto driver id\n",
411 sc->sc_dv.dv_xname);
412 goto fail_intr;
413 }
414
415 WRITE_REG_0(sc, HIFN_0_PUCNFG,
416 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
417 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
418
419 switch (ena) {
420 case HIFN_PUSTAT_ENA_2:
421 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
422 hifn_newsession, hifn_freesession, hifn_process, sc);
423 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
424 hifn_newsession, hifn_freesession, hifn_process, sc);
425 if (sc->sc_flags & HIFN_HAS_AES)
426 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
427 hifn_newsession, hifn_freesession,
428 hifn_process, sc);
429 /*FALLTHROUGH*/
430 case HIFN_PUSTAT_ENA_1:
431 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
432 hifn_newsession, hifn_freesession, hifn_process, sc);
433 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
434 hifn_newsession, hifn_freesession, hifn_process, sc);
435 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0,
436 hifn_newsession, hifn_freesession, hifn_process, sc);
437 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0,
438 hifn_newsession, hifn_freesession, hifn_process, sc);
439 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
440 hifn_newsession, hifn_freesession, hifn_process, sc);
441 break;
442 }
443
444 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
445 sc->sc_dmamap->dm_mapsize,
446 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
447
448 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
449 hifn_init_pubrng(sc);
450
451 #ifdef __OpenBSD__
452 timeout_set(&sc->sc_tickto, hifn_tick, sc);
453 timeout_add(&sc->sc_tickto, hz);
454 #else
455 callout_init(&sc->sc_tickto);
456 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
457 #endif
458 return;
459
460 fail_intr:
461 pci_intr_disestablish(pc, sc->sc_ih);
462 fail_mem:
463 bus_dmamap_unload(sc->sc_dmat, dmamap);
464 bus_dmamap_destroy(sc->sc_dmat, dmamap);
465 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
466 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
467
468 /* Turn off DMA polling */
469 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
470 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
471
472 fail_io1:
473 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
474 fail_io0:
475 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
476 }
477
478 int
479 hifn_init_pubrng(struct hifn_softc *sc)
480 {
481 u_int32_t r;
482 int i;
483
484 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
485 /* Reset 7951 public key/rng engine */
486 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
487 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
488
489 for (i = 0; i < 100; i++) {
490 DELAY(1000);
491 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
492 HIFN_PUBRST_RESET) == 0)
493 break;
494 }
495
496 if (i == 100) {
497 printf("%s: public key init failed\n",
498 sc->sc_dv.dv_xname);
499 return (1);
500 }
501 }
502
503 /* Enable the rng, if available */
504 if (sc->sc_flags & HIFN_HAS_RNG) {
505 if (sc->sc_flags & HIFN_IS_7811) {
506 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
507 if (r & HIFN_7811_RNGENA_ENA) {
508 r &= ~HIFN_7811_RNGENA_ENA;
509 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
510 }
511 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
512 HIFN_7811_RNGCFG_DEFL);
513 r |= HIFN_7811_RNGENA_ENA;
514 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
515 } else
516 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
517 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
518 HIFN_RNGCFG_ENA);
519
520 sc->sc_rngfirst = 1;
521 if (hz >= 100)
522 sc->sc_rnghz = hz / 100;
523 else
524 sc->sc_rnghz = 1;
525 #ifndef HIFN_NO_RNG
526 #ifdef __OpenBSD__
527 timeout_set(&sc->sc_rngto, hifn_rng, sc);
528 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
529 #else /* !__OpenBSD__ */
530 callout_init(&sc->sc_rngto);
531 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
532 #endif /* !__OpenBSD__ */
533 #endif /* HIFN_NO_RNG */
534 }
535
536 /* Enable public key engine, if available */
537 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
538 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
539 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
540 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
541 }
542
543 return (0);
544 }
545
546 #ifndef HIFN_NO_RNG
547 static void
548 hifn_rng(void *vsc)
549 {
550 #ifndef __NetBSD__
551 struct hifn_softc *sc = vsc;
552 u_int32_t num1, sts, num2;
553 int i;
554
555 if (sc->sc_flags & HIFN_IS_7811) {
556 for (i = 0; i < 5; i++) {
557 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
558 if (sts & HIFN_7811_RNGSTS_UFL) {
559 printf("%s: RNG underflow: disabling\n",
560 sc->sc_dv.dv_xname);
561 return;
562 }
563 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
564 break;
565
566 /*
567 * There are at least two words in the RNG FIFO
568 * at this point.
569 */
570 num1 = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
571 num2 = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
572 if (sc->sc_rngfirst)
573 sc->sc_rngfirst = 0;
574 else {
575 add_true_randomness(num1);
576 add_true_randomness(num2);
577 }
578 }
579 } else {
580 num1 = READ_REG_1(sc, HIFN_1_RNG_DATA);
581
582 if (sc->sc_rngfirst)
583 sc->sc_rngfirst = 0;
584 else
585 add_true_randomness(num1);
586 }
587
588 #ifdef __OpenBSD__
589 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
590 #else
591 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
592 #endif
593 #endif /*!__NetBSD__*/
594 }
595 #endif
596
597 void
598 hifn_puc_wait(struct hifn_softc *sc)
599 {
600 int i;
601
602 for (i = 5000; i > 0; i--) {
603 DELAY(1);
604 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
605 break;
606 }
607 if (!i)
608 printf("%s: proc unit did not reset\n", sc->sc_dv.dv_xname);
609 }
610
611 /*
612 * Reset the processing unit.
613 */
614 void
615 hifn_reset_puc(struct hifn_softc *sc)
616 {
617 /* Reset processing unit */
618 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
619 hifn_puc_wait(sc);
620 }
621
622 void
623 hifn_set_retry(struct hifn_softc *sc)
624 {
625 u_int32_t r;
626
627 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
628 r &= 0xffff0000;
629 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
630 }
631
632 /*
633 * Resets the board. Values in the regesters are left as is
634 * from the reset (i.e. initial values are assigned elsewhere).
635 */
636 void
637 hifn_reset_board(struct hifn_softc *sc, int full)
638 {
639 u_int32_t reg;
640
641 /*
642 * Set polling in the DMA configuration register to zero. 0x7 avoids
643 * resetting the board and zeros out the other fields.
644 */
645 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
646 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
647
648 /*
649 * Now that polling has been disabled, we have to wait 1 ms
650 * before resetting the board.
651 */
652 DELAY(1000);
653
654 /* Reset the DMA unit */
655 if (full) {
656 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
657 DELAY(1000);
658 } else {
659 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
660 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
661 hifn_reset_puc(sc);
662 }
663
664 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
665
666 /* Bring dma unit out of reset */
667 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
668 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
669
670 hifn_puc_wait(sc);
671
672 hifn_set_retry(sc);
673
674 if (sc->sc_flags & HIFN_IS_7811) {
675 for (reg = 0; reg < 1000; reg++) {
676 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
677 HIFN_MIPSRST_CRAMINIT)
678 break;
679 DELAY(1000);
680 }
681 if (reg == 1000)
682 printf(": cram init timeout\n");
683 }
684 }
685
686 u_int32_t
687 hifn_next_signature(u_int32_t a, u_int cnt)
688 {
689 int i;
690 u_int32_t v;
691
692 for (i = 0; i < cnt; i++) {
693
694 /* get the parity */
695 v = a & 0x80080125;
696 v ^= v >> 16;
697 v ^= v >> 8;
698 v ^= v >> 4;
699 v ^= v >> 2;
700 v ^= v >> 1;
701
702 a = (v & 1) ^ (a << 1);
703 }
704
705 return a;
706 }
707
708 struct pci2id {
709 u_short pci_vendor;
710 u_short pci_prod;
711 char card_id[13];
712 } pci2id[] = {
713 {
714 PCI_VENDOR_HIFN,
715 PCI_PRODUCT_HIFN_7951,
716 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
717 0x00, 0x00, 0x00, 0x00, 0x00 }
718 }, {
719 PCI_VENDOR_HIFN,
720 PCI_PRODUCT_HIFN_7955,
721 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00, 0x00 }
723 }, {
724 PCI_VENDOR_HIFN,
725 PCI_PRODUCT_HIFN_7956,
726 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00, 0x00 }
728 }, {
729 PCI_VENDOR_NETSEC,
730 PCI_PRODUCT_NETSEC_7751,
731 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
732 0x00, 0x00, 0x00, 0x00, 0x00 }
733 }, {
734 PCI_VENDOR_INVERTEX,
735 PCI_PRODUCT_INVERTEX_AEON,
736 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
737 0x00, 0x00, 0x00, 0x00, 0x00 }
738 }, {
739 PCI_VENDOR_HIFN,
740 PCI_PRODUCT_HIFN_7811,
741 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
742 0x00, 0x00, 0x00, 0x00, 0x00 }
743 }, {
744 /*
745 * Other vendors share this PCI ID as well, such as
746 * http://www.powercrypt.com, and obviously they also
747 * use the same key.
748 */
749 PCI_VENDOR_HIFN,
750 PCI_PRODUCT_HIFN_7751,
751 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, 0x00, 0x00, 0x00 }
753 },
754 };
755
756 /*
757 * Checks to see if crypto is already enabled. If crypto isn't enable,
758 * "hifn_enable_crypto" is called to enable it. The check is important,
759 * as enabling crypto twice will lock the board.
760 */
761 const char *
762 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
763 {
764 u_int32_t dmacfg, ramcfg, encl, addr, i;
765 char *offtbl = NULL;
766
767 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
768 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
769 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
770 offtbl = pci2id[i].card_id;
771 break;
772 }
773 }
774
775 if (offtbl == NULL) {
776 #ifdef HIFN_DEBUG
777 aprint_debug("%s: Unknown card!\n", sc->sc_dv.dv_xname);
778 #endif
779 return (NULL);
780 }
781
782 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
783 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
784
785 /*
786 * The RAM config register's encrypt level bit needs to be set before
787 * every read performed on the encryption level register.
788 */
789 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
790
791 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
792
793 /*
794 * Make sure we don't re-unlock. Two unlocks kills chip until the
795 * next reboot.
796 */
797 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
798 #ifdef HIFN_DEBUG
799 aprint_debug("%s: Strong Crypto already enabled!\n",
800 sc->sc_dv.dv_xname);
801 #endif
802 goto report;
803 }
804
805 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
806 #ifdef HIFN_DEBUG
807 aprint_debug("%s: Unknown encryption level\n",
808 sc->sc_dv.dv_xname);
809 #endif
810 return (NULL);
811 }
812
813 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
814 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
815 DELAY(1000);
816 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
817 DELAY(1000);
818 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
819 DELAY(1000);
820
821 for (i = 0; i <= 12; i++) {
822 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
823 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
824
825 DELAY(1000);
826 }
827
828 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
829 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
830
831 #ifdef HIFN_DEBUG
832 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
833 aprint_debug("Encryption engine is permanently locked until next system reset.");
834 else
835 aprint_debug("Encryption engine enabled successfully!");
836 #endif
837
838 report:
839 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
840 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
841
842 switch (encl) {
843 case HIFN_PUSTAT_ENA_0:
844 return ("LZS-only (no encr/auth)");
845
846 case HIFN_PUSTAT_ENA_1:
847 return ("DES");
848
849 case HIFN_PUSTAT_ENA_2:
850 return ("3DES");
851
852 default:
853 return ("disabled");
854 }
855 /* NOTREACHED */
856 }
857
858 /*
859 * Give initial values to the registers listed in the "Register Space"
860 * section of the HIFN Software Development reference manual.
861 */
862 void
863 hifn_init_pci_registers(struct hifn_softc *sc)
864 {
865 /* write fixed values needed by the Initialization registers */
866 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
867 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
868 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
869
870 /* write all 4 ring address registers */
871 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
872 offsetof(struct hifn_dma, cmdr[0]));
873 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
874 offsetof(struct hifn_dma, srcr[0]));
875 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
876 offsetof(struct hifn_dma, dstr[0]));
877 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
878 offsetof(struct hifn_dma, resr[0]));
879
880 DELAY(2000);
881
882 /* write status register */
883 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
884 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
885 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
886 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
887 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
888 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
889 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
890 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
891 HIFN_DMACSR_S_WAIT |
892 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
893 HIFN_DMACSR_C_WAIT |
894 HIFN_DMACSR_ENGINE |
895 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
896 HIFN_DMACSR_PUBDONE : 0) |
897 ((sc->sc_flags & HIFN_IS_7811) ?
898 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
899
900 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
901 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
902 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
903 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
904 HIFN_DMAIER_ENGINE |
905 ((sc->sc_flags & HIFN_IS_7811) ?
906 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
907 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
908 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
909 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
910
911 if (sc->sc_flags & HIFN_IS_7956) {
912 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
913 HIFN_PUCNFG_TCALLPHASES |
914 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
915 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
916 } else {
917 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
918 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
919 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
920 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
921 }
922
923 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
924 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
925 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
926 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
927 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
928 }
929
930 /*
931 * The maximum number of sessions supported by the card
932 * is dependent on the amount of context ram, which
933 * encryption algorithms are enabled, and how compression
934 * is configured. This should be configured before this
935 * routine is called.
936 */
937 void
938 hifn_sessions(struct hifn_softc *sc)
939 {
940 u_int32_t pucnfg;
941 int ctxsize;
942
943 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
944
945 if (pucnfg & HIFN_PUCNFG_COMPSING) {
946 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
947 ctxsize = 128;
948 else
949 ctxsize = 512;
950 /*
951 * 7955/7956 has internal context memory of 32K
952 */
953 if (sc->sc_flags & HIFN_IS_7956)
954 sc->sc_maxses = 32768 / ctxsize;
955 else
956 sc->sc_maxses = 1 +
957 ((sc->sc_ramsize - 32768) / ctxsize);
958 }
959 else
960 sc->sc_maxses = sc->sc_ramsize / 16384;
961
962 if (sc->sc_maxses > 2048)
963 sc->sc_maxses = 2048;
964 }
965
966 /*
967 * Determine ram type (sram or dram). Board should be just out of a reset
968 * state when this is called.
969 */
970 int
971 hifn_ramtype(struct hifn_softc *sc)
972 {
973 u_int8_t data[8], dataexpect[8];
974 int i;
975
976 for (i = 0; i < sizeof(data); i++)
977 data[i] = dataexpect[i] = 0x55;
978 if (hifn_writeramaddr(sc, 0, data))
979 return (-1);
980 if (hifn_readramaddr(sc, 0, data))
981 return (-1);
982 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
983 sc->sc_drammodel = 1;
984 return (0);
985 }
986
987 for (i = 0; i < sizeof(data); i++)
988 data[i] = dataexpect[i] = 0xaa;
989 if (hifn_writeramaddr(sc, 0, data))
990 return (-1);
991 if (hifn_readramaddr(sc, 0, data))
992 return (-1);
993 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
994 sc->sc_drammodel = 1;
995 return (0);
996 }
997
998 return (0);
999 }
1000
1001 #define HIFN_SRAM_MAX (32 << 20)
1002 #define HIFN_SRAM_STEP_SIZE 16384
1003 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1004
1005 int
1006 hifn_sramsize(struct hifn_softc *sc)
1007 {
1008 u_int32_t a;
1009 u_int8_t data[8];
1010 u_int8_t dataexpect[sizeof(data)];
1011 int32_t i;
1012
1013 for (i = 0; i < sizeof(data); i++)
1014 data[i] = dataexpect[i] = i ^ 0x5a;
1015
1016 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1017 a = i * HIFN_SRAM_STEP_SIZE;
1018 bcopy(&i, data, sizeof(i));
1019 hifn_writeramaddr(sc, a, data);
1020 }
1021
1022 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1023 a = i * HIFN_SRAM_STEP_SIZE;
1024 bcopy(&i, dataexpect, sizeof(i));
1025 if (hifn_readramaddr(sc, a, data) < 0)
1026 return (0);
1027 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1028 return (0);
1029 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1030 }
1031
1032 return (0);
1033 }
1034
1035 /*
1036 * XXX For dram boards, one should really try all of the
1037 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1038 * is already set up correctly.
1039 */
1040 int
1041 hifn_dramsize(struct hifn_softc *sc)
1042 {
1043 u_int32_t cnfg;
1044
1045 if (sc->sc_flags & HIFN_IS_7956) {
1046 /*
1047 * 7955/7956 have a fixed internal ram of only 32K.
1048 */
1049 sc->sc_ramsize = 32768;
1050 } else {
1051 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1052 HIFN_PUCNFG_DRAMMASK;
1053 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1054 }
1055 return (0);
1056 }
1057
1058 void
1059 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1060 int *resp)
1061 {
1062 struct hifn_dma *dma = sc->sc_dma;
1063
1064 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1065 dma->cmdi = 0;
1066 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1067 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1068 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1069 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1070 }
1071 *cmdp = dma->cmdi++;
1072 dma->cmdk = dma->cmdi;
1073
1074 if (dma->srci == HIFN_D_SRC_RSIZE) {
1075 dma->srci = 0;
1076 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1077 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1078 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1079 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1080 }
1081 *srcp = dma->srci++;
1082 dma->srck = dma->srci;
1083
1084 if (dma->dsti == HIFN_D_DST_RSIZE) {
1085 dma->dsti = 0;
1086 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1087 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1088 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1089 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1090 }
1091 *dstp = dma->dsti++;
1092 dma->dstk = dma->dsti;
1093
1094 if (dma->resi == HIFN_D_RES_RSIZE) {
1095 dma->resi = 0;
1096 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1097 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1098 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1099 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1100 }
1101 *resp = dma->resi++;
1102 dma->resk = dma->resi;
1103 }
1104
1105 int
1106 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1107 {
1108 struct hifn_dma *dma = sc->sc_dma;
1109 struct hifn_base_command wc;
1110 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1111 int r, cmdi, resi, srci, dsti;
1112
1113 wc.masks = htole16(3 << 13);
1114 wc.session_num = htole16(addr >> 14);
1115 wc.total_source_count = htole16(8);
1116 wc.total_dest_count = htole16(addr & 0x3fff);
1117
1118 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1119
1120 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1121 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1122 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1123
1124 /* build write command */
1125 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1126 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1127 bcopy(data, &dma->test_src, sizeof(dma->test_src));
1128
1129 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1130 + offsetof(struct hifn_dma, test_src));
1131 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1132 + offsetof(struct hifn_dma, test_dst));
1133
1134 dma->cmdr[cmdi].l = htole32(16 | masks);
1135 dma->srcr[srci].l = htole32(8 | masks);
1136 dma->dstr[dsti].l = htole32(4 | masks);
1137 dma->resr[resi].l = htole32(4 | masks);
1138
1139 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1140 0, sc->sc_dmamap->dm_mapsize,
1141 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1142
1143 for (r = 10000; r >= 0; r--) {
1144 DELAY(10);
1145 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1146 0, sc->sc_dmamap->dm_mapsize,
1147 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1148 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1149 break;
1150 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1151 0, sc->sc_dmamap->dm_mapsize,
1152 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1153 }
1154 if (r == 0) {
1155 printf("%s: writeramaddr -- "
1156 "result[%d](addr %d) still valid\n",
1157 sc->sc_dv.dv_xname, resi, addr);
1158 r = -1;
1159 return (-1);
1160 } else
1161 r = 0;
1162
1163 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1164 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1165 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1166
1167 return (r);
1168 }
1169
1170 int
1171 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1172 {
1173 struct hifn_dma *dma = sc->sc_dma;
1174 struct hifn_base_command rc;
1175 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1176 int r, cmdi, srci, dsti, resi;
1177
1178 rc.masks = htole16(2 << 13);
1179 rc.session_num = htole16(addr >> 14);
1180 rc.total_source_count = htole16(addr & 0x3fff);
1181 rc.total_dest_count = htole16(8);
1182
1183 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1184
1185 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1186 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1187 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1188
1189 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1190 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1191
1192 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1193 offsetof(struct hifn_dma, test_src));
1194 dma->test_src = 0;
1195 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1196 offsetof(struct hifn_dma, test_dst));
1197 dma->test_dst = 0;
1198 dma->cmdr[cmdi].l = htole32(8 | masks);
1199 dma->srcr[srci].l = htole32(8 | masks);
1200 dma->dstr[dsti].l = htole32(8 | masks);
1201 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1202
1203 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1204 0, sc->sc_dmamap->dm_mapsize,
1205 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1206
1207 for (r = 10000; r >= 0; r--) {
1208 DELAY(10);
1209 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1210 0, sc->sc_dmamap->dm_mapsize,
1211 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1212 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1213 break;
1214 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1215 0, sc->sc_dmamap->dm_mapsize,
1216 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1217 }
1218 if (r == 0) {
1219 printf("%s: readramaddr -- "
1220 "result[%d](addr %d) still valid\n",
1221 sc->sc_dv.dv_xname, resi, addr);
1222 r = -1;
1223 } else {
1224 r = 0;
1225 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1226 }
1227
1228 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1229 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1230 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1231
1232 return (r);
1233 }
1234
1235 /*
1236 * Initialize the descriptor rings.
1237 */
1238 void
1239 hifn_init_dma(struct hifn_softc *sc)
1240 {
1241 struct hifn_dma *dma = sc->sc_dma;
1242 int i;
1243
1244 hifn_set_retry(sc);
1245
1246 /* initialize static pointer values */
1247 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1248 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1249 offsetof(struct hifn_dma, command_bufs[i][0]));
1250 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1251 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1252 offsetof(struct hifn_dma, result_bufs[i][0]));
1253
1254 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1255 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1256 offsetof(struct hifn_dma, cmdr[0]));
1257 dma->srcr[HIFN_D_SRC_RSIZE].p =
1258 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1259 offsetof(struct hifn_dma, srcr[0]));
1260 dma->dstr[HIFN_D_DST_RSIZE].p =
1261 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1262 offsetof(struct hifn_dma, dstr[0]));
1263 dma->resr[HIFN_D_RES_RSIZE].p =
1264 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1265 offsetof(struct hifn_dma, resr[0]));
1266
1267 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1268 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1269 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1270 }
1271
1272 /*
1273 * Writes out the raw command buffer space. Returns the
1274 * command buffer size.
1275 */
1276 u_int
1277 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1278 {
1279 u_int8_t *buf_pos;
1280 struct hifn_base_command *base_cmd;
1281 struct hifn_mac_command *mac_cmd;
1282 struct hifn_crypt_command *cry_cmd;
1283 struct hifn_comp_command *comp_cmd;
1284 int using_mac, using_crypt, using_comp, len, ivlen;
1285 u_int32_t dlen, slen;
1286
1287 buf_pos = buf;
1288 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1289 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1290 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1291
1292 base_cmd = (struct hifn_base_command *)buf_pos;
1293 base_cmd->masks = htole16(cmd->base_masks);
1294 slen = cmd->src_map->dm_mapsize;
1295 if (cmd->sloplen)
1296 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1297 sizeof(u_int32_t);
1298 else
1299 dlen = cmd->dst_map->dm_mapsize;
1300 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1301 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1302 dlen >>= 16;
1303 slen >>= 16;
1304 base_cmd->session_num = htole16(cmd->session_num |
1305 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1306 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1307 buf_pos += sizeof(struct hifn_base_command);
1308
1309 if (using_comp) {
1310 comp_cmd = (struct hifn_comp_command *)buf_pos;
1311 dlen = cmd->compcrd->crd_len;
1312 comp_cmd->source_count = htole16(dlen & 0xffff);
1313 dlen >>= 16;
1314 comp_cmd->masks = htole16(cmd->comp_masks |
1315 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1316 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1317 comp_cmd->reserved = 0;
1318 buf_pos += sizeof(struct hifn_comp_command);
1319 }
1320
1321 if (using_mac) {
1322 mac_cmd = (struct hifn_mac_command *)buf_pos;
1323 dlen = cmd->maccrd->crd_len;
1324 mac_cmd->source_count = htole16(dlen & 0xffff);
1325 dlen >>= 16;
1326 mac_cmd->masks = htole16(cmd->mac_masks |
1327 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1328 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1329 mac_cmd->reserved = 0;
1330 buf_pos += sizeof(struct hifn_mac_command);
1331 }
1332
1333 if (using_crypt) {
1334 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1335 dlen = cmd->enccrd->crd_len;
1336 cry_cmd->source_count = htole16(dlen & 0xffff);
1337 dlen >>= 16;
1338 cry_cmd->masks = htole16(cmd->cry_masks |
1339 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1340 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1341 cry_cmd->reserved = 0;
1342 buf_pos += sizeof(struct hifn_crypt_command);
1343 }
1344
1345 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1346 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1347 buf_pos += HIFN_MAC_KEY_LENGTH;
1348 }
1349
1350 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1351 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1352 case HIFN_CRYPT_CMD_ALG_3DES:
1353 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1354 buf_pos += HIFN_3DES_KEY_LENGTH;
1355 break;
1356 case HIFN_CRYPT_CMD_ALG_DES:
1357 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1358 buf_pos += HIFN_DES_KEY_LENGTH;
1359 break;
1360 case HIFN_CRYPT_CMD_ALG_RC4:
1361 len = 256;
1362 do {
1363 int clen;
1364
1365 clen = MIN(cmd->cklen, len);
1366 bcopy(cmd->ck, buf_pos, clen);
1367 len -= clen;
1368 buf_pos += clen;
1369 } while (len > 0);
1370 bzero(buf_pos, 4);
1371 buf_pos += 4;
1372 break;
1373 case HIFN_CRYPT_CMD_ALG_AES:
1374 /*
1375 * AES keys are variable 128, 192 and
1376 * 256 bits (16, 24 and 32 bytes).
1377 */
1378 bcopy(cmd->ck, buf_pos, cmd->cklen);
1379 buf_pos += cmd->cklen;
1380 break;
1381 }
1382 }
1383
1384 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1385 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1386 case HIFN_CRYPT_CMD_ALG_AES:
1387 ivlen = HIFN_AES_IV_LENGTH;
1388 break;
1389 default:
1390 ivlen = HIFN_IV_LENGTH;
1391 break;
1392 }
1393 bcopy(cmd->iv, buf_pos, ivlen);
1394 buf_pos += ivlen;
1395 }
1396
1397 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1398 HIFN_BASE_CMD_COMP)) == 0) {
1399 bzero(buf_pos, 8);
1400 buf_pos += 8;
1401 }
1402
1403 return (buf_pos - buf);
1404 }
1405
1406 int
1407 hifn_dmamap_aligned(bus_dmamap_t map)
1408 {
1409 int i;
1410
1411 for (i = 0; i < map->dm_nsegs; i++) {
1412 if (map->dm_segs[i].ds_addr & 3)
1413 return (0);
1414 if ((i != (map->dm_nsegs - 1)) &&
1415 (map->dm_segs[i].ds_len & 3))
1416 return (0);
1417 }
1418 return (1);
1419 }
1420
1421 int
1422 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1423 {
1424 struct hifn_dma *dma = sc->sc_dma;
1425 bus_dmamap_t map = cmd->dst_map;
1426 u_int32_t p, l;
1427 int idx, used = 0, i;
1428
1429 idx = dma->dsti;
1430 for (i = 0; i < map->dm_nsegs - 1; i++) {
1431 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1432 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1433 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1434 HIFN_DSTR_SYNC(sc, idx,
1435 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1436 used++;
1437
1438 if (++idx == HIFN_D_DST_RSIZE) {
1439 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1440 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1441 HIFN_DSTR_SYNC(sc, idx,
1442 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1443 idx = 0;
1444 }
1445 }
1446
1447 if (cmd->sloplen == 0) {
1448 p = map->dm_segs[i].ds_addr;
1449 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1450 map->dm_segs[i].ds_len;
1451 } else {
1452 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1453 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1454 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1455 sizeof(u_int32_t);
1456
1457 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1458 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1459 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1460 HIFN_D_MASKDONEIRQ |
1461 (map->dm_segs[i].ds_len - cmd->sloplen));
1462 HIFN_DSTR_SYNC(sc, idx,
1463 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1464 used++;
1465
1466 if (++idx == HIFN_D_DST_RSIZE) {
1467 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1468 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1469 HIFN_DSTR_SYNC(sc, idx,
1470 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1471 idx = 0;
1472 }
1473 }
1474 }
1475 dma->dstr[idx].p = htole32(p);
1476 dma->dstr[idx].l = htole32(l);
1477 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1478 used++;
1479
1480 if (++idx == HIFN_D_DST_RSIZE) {
1481 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1482 HIFN_D_MASKDONEIRQ);
1483 HIFN_DSTR_SYNC(sc, idx,
1484 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1485 idx = 0;
1486 }
1487
1488 dma->dsti = idx;
1489 dma->dstu += used;
1490 return (idx);
1491 }
1492
1493 int
1494 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1495 {
1496 struct hifn_dma *dma = sc->sc_dma;
1497 bus_dmamap_t map = cmd->src_map;
1498 int idx, i;
1499 u_int32_t last = 0;
1500
1501 idx = dma->srci;
1502 for (i = 0; i < map->dm_nsegs; i++) {
1503 if (i == map->dm_nsegs - 1)
1504 last = HIFN_D_LAST;
1505
1506 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1507 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1508 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1509 HIFN_SRCR_SYNC(sc, idx,
1510 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1511
1512 if (++idx == HIFN_D_SRC_RSIZE) {
1513 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1514 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1515 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1516 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1517 idx = 0;
1518 }
1519 }
1520 dma->srci = idx;
1521 dma->srcu += map->dm_nsegs;
1522 return (idx);
1523 }
1524
1525 int
1526 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1527 struct cryptop *crp, int hint)
1528 {
1529 struct hifn_dma *dma = sc->sc_dma;
1530 u_int32_t cmdlen;
1531 int cmdi, resi, s, err = 0;
1532
1533 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1534 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1535 return (ENOMEM);
1536
1537 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1538 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1539 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1540 err = ENOMEM;
1541 goto err_srcmap1;
1542 }
1543 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1544 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1545 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1546 err = ENOMEM;
1547 goto err_srcmap1;
1548 }
1549 } else {
1550 err = EINVAL;
1551 goto err_srcmap1;
1552 }
1553
1554 if (hifn_dmamap_aligned(cmd->src_map)) {
1555 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1556 if (crp->crp_flags & CRYPTO_F_IOV)
1557 cmd->dstu.dst_io = cmd->srcu.src_io;
1558 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1559 cmd->dstu.dst_m = cmd->srcu.src_m;
1560 cmd->dst_map = cmd->src_map;
1561 } else {
1562 if (crp->crp_flags & CRYPTO_F_IOV) {
1563 err = EINVAL;
1564 goto err_srcmap;
1565 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1566 int totlen, len;
1567 struct mbuf *m, *m0, *mlast;
1568
1569 totlen = cmd->src_map->dm_mapsize;
1570 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1571 len = MHLEN;
1572 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1573 } else {
1574 len = MLEN;
1575 MGET(m0, M_DONTWAIT, MT_DATA);
1576 }
1577 if (m0 == NULL) {
1578 err = ENOMEM;
1579 goto err_srcmap;
1580 }
1581 if (len == MHLEN)
1582 M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1583 if (totlen >= MINCLSIZE) {
1584 MCLGET(m0, M_DONTWAIT);
1585 if (m0->m_flags & M_EXT)
1586 len = MCLBYTES;
1587 }
1588 totlen -= len;
1589 m0->m_pkthdr.len = m0->m_len = len;
1590 mlast = m0;
1591
1592 while (totlen > 0) {
1593 MGET(m, M_DONTWAIT, MT_DATA);
1594 if (m == NULL) {
1595 err = ENOMEM;
1596 m_freem(m0);
1597 goto err_srcmap;
1598 }
1599 len = MLEN;
1600 if (totlen >= MINCLSIZE) {
1601 MCLGET(m, M_DONTWAIT);
1602 if (m->m_flags & M_EXT)
1603 len = MCLBYTES;
1604 }
1605
1606 m->m_len = len;
1607 if (m0->m_flags & M_PKTHDR)
1608 m0->m_pkthdr.len += len;
1609 totlen -= len;
1610
1611 mlast->m_next = m;
1612 mlast = m;
1613 }
1614 cmd->dstu.dst_m = m0;
1615 }
1616 }
1617
1618 if (cmd->dst_map == NULL) {
1619 if (bus_dmamap_create(sc->sc_dmat,
1620 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1621 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1622 err = ENOMEM;
1623 goto err_srcmap;
1624 }
1625 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1626 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1627 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1628 err = ENOMEM;
1629 goto err_dstmap1;
1630 }
1631 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1632 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1633 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1634 err = ENOMEM;
1635 goto err_dstmap1;
1636 }
1637 }
1638 }
1639
1640 #ifdef HIFN_DEBUG
1641 if (hifn_debug)
1642 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1643 sc->sc_dv.dv_xname,
1644 READ_REG_1(sc, HIFN_1_DMA_CSR),
1645 READ_REG_1(sc, HIFN_1_DMA_IER),
1646 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1647 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1648 #endif
1649
1650 if (cmd->src_map == cmd->dst_map)
1651 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1652 0, cmd->src_map->dm_mapsize,
1653 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1654 else {
1655 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1656 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1657 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1658 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1659 }
1660
1661 s = splnet();
1662
1663 /*
1664 * need 1 cmd, and 1 res
1665 * need N src, and N dst
1666 */
1667 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1668 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1669 splx(s);
1670 err = ENOMEM;
1671 goto err_dstmap;
1672 }
1673 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1674 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1675 splx(s);
1676 err = ENOMEM;
1677 goto err_dstmap;
1678 }
1679
1680 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1681 dma->cmdi = 0;
1682 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1683 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1684 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1685 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1686 }
1687 cmdi = dma->cmdi++;
1688 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1689 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1690
1691 /* .p for command/result already set */
1692 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1693 HIFN_D_MASKDONEIRQ);
1694 HIFN_CMDR_SYNC(sc, cmdi,
1695 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1696 dma->cmdu++;
1697 if (sc->sc_c_busy == 0) {
1698 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1699 sc->sc_c_busy = 1;
1700 SET_LED(sc, HIFN_MIPSRST_LED0);
1701 }
1702
1703 /*
1704 * We don't worry about missing an interrupt (which a "command wait"
1705 * interrupt salvages us from), unless there is more than one command
1706 * in the queue.
1707 */
1708 if (dma->cmdu > 1) {
1709 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1710 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1711 }
1712
1713 hifnstats.hst_ipackets++;
1714 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1715
1716 hifn_dmamap_load_src(sc, cmd);
1717 if (sc->sc_s_busy == 0) {
1718 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1719 sc->sc_s_busy = 1;
1720 SET_LED(sc, HIFN_MIPSRST_LED1);
1721 }
1722
1723 /*
1724 * Unlike other descriptors, we don't mask done interrupt from
1725 * result descriptor.
1726 */
1727 #ifdef HIFN_DEBUG
1728 if (hifn_debug)
1729 printf("load res\n");
1730 #endif
1731 if (dma->resi == HIFN_D_RES_RSIZE) {
1732 dma->resi = 0;
1733 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1734 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1735 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1736 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1737 }
1738 resi = dma->resi++;
1739 dma->hifn_commands[resi] = cmd;
1740 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1741 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1742 HIFN_D_VALID | HIFN_D_LAST);
1743 HIFN_RESR_SYNC(sc, resi,
1744 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1745 dma->resu++;
1746 if (sc->sc_r_busy == 0) {
1747 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1748 sc->sc_r_busy = 1;
1749 SET_LED(sc, HIFN_MIPSRST_LED2);
1750 }
1751
1752 if (cmd->sloplen)
1753 cmd->slopidx = resi;
1754
1755 hifn_dmamap_load_dst(sc, cmd);
1756
1757 if (sc->sc_d_busy == 0) {
1758 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1759 sc->sc_d_busy = 1;
1760 }
1761
1762 #ifdef HIFN_DEBUG
1763 if (hifn_debug)
1764 printf("%s: command: stat %8x ier %8x\n",
1765 sc->sc_dv.dv_xname,
1766 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1767 #endif
1768
1769 sc->sc_active = 5;
1770 splx(s);
1771 return (err); /* success */
1772
1773 err_dstmap:
1774 if (cmd->src_map != cmd->dst_map)
1775 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1776 err_dstmap1:
1777 if (cmd->src_map != cmd->dst_map)
1778 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1779 err_srcmap:
1780 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1781 cmd->srcu.src_m != cmd->dstu.dst_m)
1782 m_freem(cmd->dstu.dst_m);
1783 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1784 err_srcmap1:
1785 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1786 return (err);
1787 }
1788
1789 void
1790 hifn_tick(void *vsc)
1791 {
1792 struct hifn_softc *sc = vsc;
1793 int s;
1794
1795 s = splnet();
1796 if (sc->sc_active == 0) {
1797 struct hifn_dma *dma = sc->sc_dma;
1798 u_int32_t r = 0;
1799
1800 if (dma->cmdu == 0 && sc->sc_c_busy) {
1801 sc->sc_c_busy = 0;
1802 r |= HIFN_DMACSR_C_CTRL_DIS;
1803 CLR_LED(sc, HIFN_MIPSRST_LED0);
1804 }
1805 if (dma->srcu == 0 && sc->sc_s_busy) {
1806 sc->sc_s_busy = 0;
1807 r |= HIFN_DMACSR_S_CTRL_DIS;
1808 CLR_LED(sc, HIFN_MIPSRST_LED1);
1809 }
1810 if (dma->dstu == 0 && sc->sc_d_busy) {
1811 sc->sc_d_busy = 0;
1812 r |= HIFN_DMACSR_D_CTRL_DIS;
1813 }
1814 if (dma->resu == 0 && sc->sc_r_busy) {
1815 sc->sc_r_busy = 0;
1816 r |= HIFN_DMACSR_R_CTRL_DIS;
1817 CLR_LED(sc, HIFN_MIPSRST_LED2);
1818 }
1819 if (r)
1820 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1821 }
1822 else
1823 sc->sc_active--;
1824 splx(s);
1825 #ifdef __OpenBSD__
1826 timeout_add(&sc->sc_tickto, hz);
1827 #else
1828 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1829 #endif
1830 }
1831
1832 int
1833 hifn_intr(void *arg)
1834 {
1835 struct hifn_softc *sc = arg;
1836 struct hifn_dma *dma = sc->sc_dma;
1837 u_int32_t dmacsr, restart;
1838 int i, u;
1839
1840 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1841
1842 #ifdef HIFN_DEBUG
1843 if (hifn_debug)
1844 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1845 sc->sc_dv.dv_xname,
1846 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1847 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1848 #endif
1849
1850 /* Nothing in the DMA unit interrupted */
1851 if ((dmacsr & sc->sc_dmaier) == 0)
1852 return (0);
1853
1854 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1855
1856 if (dmacsr & HIFN_DMACSR_ENGINE)
1857 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
1858
1859 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1860 (dmacsr & HIFN_DMACSR_PUBDONE))
1861 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1862 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1863
1864 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1865 if (restart)
1866 printf("%s: overrun %x\n", sc->sc_dv.dv_xname, dmacsr);
1867
1868 if (sc->sc_flags & HIFN_IS_7811) {
1869 if (dmacsr & HIFN_DMACSR_ILLR)
1870 printf("%s: illegal read\n", sc->sc_dv.dv_xname);
1871 if (dmacsr & HIFN_DMACSR_ILLW)
1872 printf("%s: illegal write\n", sc->sc_dv.dv_xname);
1873 }
1874
1875 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1876 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1877 if (restart) {
1878 printf("%s: abort, resetting.\n", sc->sc_dv.dv_xname);
1879 hifnstats.hst_abort++;
1880 hifn_abort(sc);
1881 return (1);
1882 }
1883
1884 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
1885 /*
1886 * If no slots to process and we receive a "waiting on
1887 * command" interrupt, we disable the "waiting on command"
1888 * (by clearing it).
1889 */
1890 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1891 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1892 }
1893
1894 /* clear the rings */
1895 i = dma->resk;
1896 while (dma->resu != 0) {
1897 HIFN_RESR_SYNC(sc, i,
1898 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1899 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
1900 HIFN_RESR_SYNC(sc, i,
1901 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1902 break;
1903 }
1904
1905 if (i != HIFN_D_RES_RSIZE) {
1906 struct hifn_command *cmd;
1907 u_int8_t *macbuf = NULL;
1908
1909 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
1910 cmd = dma->hifn_commands[i];
1911 KASSERT(cmd != NULL
1912 /*("hifn_intr: null command slot %u", i)*/);
1913 dma->hifn_commands[i] = NULL;
1914
1915 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
1916 macbuf = dma->result_bufs[i];
1917 macbuf += 12;
1918 }
1919
1920 hifn_callback(sc, cmd, macbuf);
1921 hifnstats.hst_opackets++;
1922 }
1923
1924 if (++i == (HIFN_D_RES_RSIZE + 1))
1925 i = 0;
1926 else
1927 dma->resu--;
1928 }
1929 dma->resk = i;
1930
1931 i = dma->srck; u = dma->srcu;
1932 while (u != 0) {
1933 HIFN_SRCR_SYNC(sc, i,
1934 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1935 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
1936 HIFN_SRCR_SYNC(sc, i,
1937 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1938 break;
1939 }
1940 if (++i == (HIFN_D_SRC_RSIZE + 1))
1941 i = 0;
1942 else
1943 u--;
1944 }
1945 dma->srck = i; dma->srcu = u;
1946
1947 i = dma->cmdk; u = dma->cmdu;
1948 while (u != 0) {
1949 HIFN_CMDR_SYNC(sc, i,
1950 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1951 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
1952 HIFN_CMDR_SYNC(sc, i,
1953 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1954 break;
1955 }
1956 if (i != HIFN_D_CMD_RSIZE) {
1957 u--;
1958 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
1959 }
1960 if (++i == (HIFN_D_CMD_RSIZE + 1))
1961 i = 0;
1962 }
1963 dma->cmdk = i; dma->cmdu = u;
1964
1965 return (1);
1966 }
1967
1968 /*
1969 * Allocate a new 'session' and return an encoded session id. 'sidp'
1970 * contains our registration id, and should contain an encoded session
1971 * id on successful allocation.
1972 */
1973 int
1974 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
1975 {
1976 struct cryptoini *c;
1977 struct hifn_softc *sc = arg;
1978 int i, mac = 0, cry = 0, comp = 0;
1979
1980 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
1981 if (sidp == NULL || cri == NULL || sc == NULL)
1982 return (EINVAL);
1983
1984 for (i = 0; i < sc->sc_maxses; i++)
1985 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
1986 break;
1987 if (i == sc->sc_maxses)
1988 return (ENOMEM);
1989
1990 for (c = cri; c != NULL; c = c->cri_next) {
1991 switch (c->cri_alg) {
1992 case CRYPTO_MD5:
1993 case CRYPTO_SHA1:
1994 case CRYPTO_MD5_HMAC:
1995 case CRYPTO_SHA1_HMAC:
1996 if (mac)
1997 return (EINVAL);
1998 mac = 1;
1999 break;
2000 case CRYPTO_DES_CBC:
2001 case CRYPTO_3DES_CBC:
2002 case CRYPTO_AES_CBC:
2003 #ifdef __NetBSD__
2004 rnd_extract_data(sc->sc_sessions[i].hs_iv,
2005 c->cri_alg == CRYPTO_AES_CBC ?
2006 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH,
2007 RND_EXTRACT_ANY);
2008 #else /* FreeBSD and OpenBSD have get_random_bytes */
2009 /* XXX this may read fewer, does it matter? */
2010 get_random_bytes(sc->sc_sessions[i].hs_iv,
2011 c->cri_alg == CRYPTO_AES_CBC ?
2012 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2013 #endif
2014 /*FALLTHROUGH*/
2015 case CRYPTO_ARC4:
2016 if (cry)
2017 return (EINVAL);
2018 cry = 1;
2019 break;
2020 #ifdef HAVE_CRYPTO_LSZ
2021 case CRYPTO_LZS_COMP:
2022 if (comp)
2023 return (EINVAL);
2024 comp = 1;
2025 break;
2026 #endif
2027 default:
2028 return (EINVAL);
2029 }
2030 }
2031 if (mac == 0 && cry == 0 && comp == 0)
2032 return (EINVAL);
2033
2034 /*
2035 * XXX only want to support compression without chaining to
2036 * MAC/crypt engine right now
2037 */
2038 if ((comp && mac) || (comp && cry))
2039 return (EINVAL);
2040
2041 *sidp = HIFN_SID(sc->sc_dv.dv_unit, i);
2042 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2043
2044 return (0);
2045 }
2046
2047 /*
2048 * Deallocate a session.
2049 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2050 * XXX to blow away any keys already stored there.
2051 */
2052 int
2053 hifn_freesession(void *arg, u_int64_t tid)
2054 {
2055 struct hifn_softc *sc = arg;
2056 int session;
2057 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2058
2059 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2060 if (sc == NULL)
2061 return (EINVAL);
2062
2063 session = HIFN_SESSION(sid);
2064 if (session >= sc->sc_maxses)
2065 return (EINVAL);
2066
2067 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
2068 return (0);
2069 }
2070
2071 int
2072 hifn_process(void *arg, struct cryptop *crp, int hint)
2073 {
2074 struct hifn_softc *sc = arg;
2075 struct hifn_command *cmd = NULL;
2076 int session, err, ivlen;
2077 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2078
2079 if (crp == NULL || crp->crp_callback == NULL) {
2080 hifnstats.hst_invalid++;
2081 return (EINVAL);
2082 }
2083 session = HIFN_SESSION(crp->crp_sid);
2084
2085 if (sc == NULL || session >= sc->sc_maxses) {
2086 err = EINVAL;
2087 goto errout;
2088 }
2089
2090 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2091 M_DEVBUF, M_NOWAIT|M_ZERO);
2092 if (cmd == NULL) {
2093 hifnstats.hst_nomem++;
2094 err = ENOMEM;
2095 goto errout;
2096 }
2097
2098 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2099 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2100 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2101 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2102 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2103 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2104 } else {
2105 err = EINVAL;
2106 goto errout; /* XXX we don't handle contiguous buffers! */
2107 }
2108
2109 crd1 = crp->crp_desc;
2110 if (crd1 == NULL) {
2111 err = EINVAL;
2112 goto errout;
2113 }
2114 crd2 = crd1->crd_next;
2115
2116 if (crd2 == NULL) {
2117 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2118 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2119 crd1->crd_alg == CRYPTO_SHA1 ||
2120 crd1->crd_alg == CRYPTO_MD5) {
2121 maccrd = crd1;
2122 enccrd = NULL;
2123 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2124 crd1->crd_alg == CRYPTO_3DES_CBC ||
2125 crd1->crd_alg == CRYPTO_AES_CBC ||
2126 crd1->crd_alg == CRYPTO_ARC4) {
2127 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2128 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2129 maccrd = NULL;
2130 enccrd = crd1;
2131 #ifdef HAVE_CRYPTO_LSZ
2132 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2133 return (hifn_compression(sc, crp, cmd));
2134 #endif
2135 } else {
2136 err = EINVAL;
2137 goto errout;
2138 }
2139 } else {
2140 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2141 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2142 crd1->crd_alg == CRYPTO_MD5 ||
2143 crd1->crd_alg == CRYPTO_SHA1) &&
2144 (crd2->crd_alg == CRYPTO_DES_CBC ||
2145 crd2->crd_alg == CRYPTO_3DES_CBC ||
2146 crd2->crd_alg == CRYPTO_AES_CBC ||
2147 crd2->crd_alg == CRYPTO_ARC4) &&
2148 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2149 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2150 maccrd = crd1;
2151 enccrd = crd2;
2152 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2153 crd1->crd_alg == CRYPTO_ARC4 ||
2154 crd1->crd_alg == CRYPTO_3DES_CBC ||
2155 crd1->crd_alg == CRYPTO_AES_CBC) &&
2156 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2157 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2158 crd2->crd_alg == CRYPTO_MD5 ||
2159 crd2->crd_alg == CRYPTO_SHA1) &&
2160 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2161 enccrd = crd1;
2162 maccrd = crd2;
2163 } else {
2164 /*
2165 * We cannot order the 7751 as requested
2166 */
2167 err = EINVAL;
2168 goto errout;
2169 }
2170 }
2171
2172 if (enccrd) {
2173 cmd->enccrd = enccrd;
2174 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2175 switch (enccrd->crd_alg) {
2176 case CRYPTO_ARC4:
2177 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2178 if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2179 != sc->sc_sessions[session].hs_prev_op)
2180 sc->sc_sessions[session].hs_state =
2181 HS_STATE_USED;
2182 break;
2183 case CRYPTO_DES_CBC:
2184 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2185 HIFN_CRYPT_CMD_MODE_CBC |
2186 HIFN_CRYPT_CMD_NEW_IV;
2187 break;
2188 case CRYPTO_3DES_CBC:
2189 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2190 HIFN_CRYPT_CMD_MODE_CBC |
2191 HIFN_CRYPT_CMD_NEW_IV;
2192 break;
2193 case CRYPTO_AES_CBC:
2194 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2195 HIFN_CRYPT_CMD_MODE_CBC |
2196 HIFN_CRYPT_CMD_NEW_IV;
2197 break;
2198 default:
2199 err = EINVAL;
2200 goto errout;
2201 }
2202 if (enccrd->crd_alg != CRYPTO_ARC4) {
2203 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2204 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2205 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2206 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2207 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2208 else
2209 bcopy(sc->sc_sessions[session].hs_iv,
2210 cmd->iv, ivlen);
2211
2212 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2213 == 0) {
2214 if (crp->crp_flags & CRYPTO_F_IMBUF)
2215 m_copyback(cmd->srcu.src_m,
2216 enccrd->crd_inject,
2217 ivlen, cmd->iv);
2218 else if (crp->crp_flags & CRYPTO_F_IOV)
2219 cuio_copyback(cmd->srcu.src_io,
2220 enccrd->crd_inject,
2221 ivlen, cmd->iv);
2222 }
2223 } else {
2224 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2225 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2226 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2227 m_copydata(cmd->srcu.src_m,
2228 enccrd->crd_inject, ivlen, cmd->iv);
2229 else if (crp->crp_flags & CRYPTO_F_IOV)
2230 cuio_copydata(cmd->srcu.src_io,
2231 enccrd->crd_inject, ivlen, cmd->iv);
2232 }
2233 }
2234
2235 cmd->ck = enccrd->crd_key;
2236 cmd->cklen = enccrd->crd_klen >> 3;
2237
2238 /*
2239 * Need to specify the size for the AES key in the masks.
2240 */
2241 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2242 HIFN_CRYPT_CMD_ALG_AES) {
2243 switch (cmd->cklen) {
2244 case 16:
2245 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2246 break;
2247 case 24:
2248 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2249 break;
2250 case 32:
2251 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2252 break;
2253 default:
2254 err = EINVAL;
2255 goto errout;
2256 }
2257 }
2258
2259 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2260 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2261 }
2262
2263 if (maccrd) {
2264 cmd->maccrd = maccrd;
2265 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2266
2267 switch (maccrd->crd_alg) {
2268 case CRYPTO_MD5:
2269 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2270 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2271 HIFN_MAC_CMD_POS_IPSEC;
2272 break;
2273 case CRYPTO_MD5_HMAC:
2274 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2275 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2276 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2277 break;
2278 case CRYPTO_SHA1:
2279 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2280 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2281 HIFN_MAC_CMD_POS_IPSEC;
2282 break;
2283 case CRYPTO_SHA1_HMAC:
2284 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2285 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2286 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2287 break;
2288 }
2289
2290 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2291 maccrd->crd_alg == CRYPTO_MD5_HMAC) &&
2292 sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2293 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2294 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2295 bzero(cmd->mac + (maccrd->crd_klen >> 3),
2296 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2297 }
2298 }
2299
2300 cmd->crp = crp;
2301 cmd->session_num = session;
2302 cmd->softc = sc;
2303
2304 err = hifn_crypto(sc, cmd, crp, hint);
2305 if (err == 0) {
2306 if (enccrd)
2307 sc->sc_sessions[session].hs_prev_op =
2308 enccrd->crd_flags & CRD_F_ENCRYPT;
2309 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2310 sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2311 return 0;
2312 } else if (err == ERESTART) {
2313 /*
2314 * There weren't enough resources to dispatch the request
2315 * to the part. Notify the caller so they'll requeue this
2316 * request and resubmit it again soon.
2317 */
2318 #ifdef HIFN_DEBUG
2319 if (hifn_debug)
2320 printf(sc->sc_dv.dv_xname, "requeue request\n");
2321 #endif
2322 free(cmd, M_DEVBUF);
2323 sc->sc_needwakeup |= CRYPTO_SYMQ;
2324 return (err);
2325 }
2326
2327 errout:
2328 if (cmd != NULL)
2329 free(cmd, M_DEVBUF);
2330 if (err == EINVAL)
2331 hifnstats.hst_invalid++;
2332 else
2333 hifnstats.hst_nomem++;
2334 crp->crp_etype = err;
2335 crypto_done(crp);
2336 return (0);
2337 }
2338
2339 void
2340 hifn_abort(struct hifn_softc *sc)
2341 {
2342 struct hifn_dma *dma = sc->sc_dma;
2343 struct hifn_command *cmd;
2344 struct cryptop *crp;
2345 int i, u;
2346
2347 i = dma->resk; u = dma->resu;
2348 while (u != 0) {
2349 cmd = dma->hifn_commands[i];
2350 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2351 dma->hifn_commands[i] = NULL;
2352 crp = cmd->crp;
2353
2354 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2355 /* Salvage what we can. */
2356 u_int8_t *macbuf;
2357
2358 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2359 macbuf = dma->result_bufs[i];
2360 macbuf += 12;
2361 } else
2362 macbuf = NULL;
2363 hifnstats.hst_opackets++;
2364 hifn_callback(sc, cmd, macbuf);
2365 } else {
2366 if (cmd->src_map == cmd->dst_map) {
2367 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2368 0, cmd->src_map->dm_mapsize,
2369 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2370 } else {
2371 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2372 0, cmd->src_map->dm_mapsize,
2373 BUS_DMASYNC_POSTWRITE);
2374 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2375 0, cmd->dst_map->dm_mapsize,
2376 BUS_DMASYNC_POSTREAD);
2377 }
2378
2379 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2380 m_freem(cmd->srcu.src_m);
2381 crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2382 }
2383
2384 /* non-shared buffers cannot be restarted */
2385 if (cmd->src_map != cmd->dst_map) {
2386 /*
2387 * XXX should be EAGAIN, delayed until
2388 * after the reset.
2389 */
2390 crp->crp_etype = ENOMEM;
2391 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2392 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2393 } else
2394 crp->crp_etype = ENOMEM;
2395
2396 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2397 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2398
2399 free(cmd, M_DEVBUF);
2400 if (crp->crp_etype != EAGAIN)
2401 crypto_done(crp);
2402 }
2403
2404 if (++i == HIFN_D_RES_RSIZE)
2405 i = 0;
2406 u--;
2407 }
2408 dma->resk = i; dma->resu = u;
2409
2410 /* Force upload of key next time */
2411 for (i = 0; i < sc->sc_maxses; i++)
2412 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2413 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2414
2415 hifn_reset_board(sc, 1);
2416 hifn_init_dma(sc);
2417 hifn_init_pci_registers(sc);
2418 }
2419
2420 void
2421 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf)
2422 {
2423 struct hifn_dma *dma = sc->sc_dma;
2424 struct cryptop *crp = cmd->crp;
2425 struct cryptodesc *crd;
2426 struct mbuf *m;
2427 int totlen, i, u, ivlen;
2428
2429 if (cmd->src_map == cmd->dst_map)
2430 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2431 0, cmd->src_map->dm_mapsize,
2432 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2433 else {
2434 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2435 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2436 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2437 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2438 }
2439
2440 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2441 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2442 crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2443 totlen = cmd->src_map->dm_mapsize;
2444 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2445 if (totlen < m->m_len) {
2446 m->m_len = totlen;
2447 totlen = 0;
2448 } else
2449 totlen -= m->m_len;
2450 }
2451 cmd->dstu.dst_m->m_pkthdr.len =
2452 cmd->srcu.src_m->m_pkthdr.len;
2453 m_freem(cmd->srcu.src_m);
2454 }
2455 }
2456
2457 if (cmd->sloplen != 0) {
2458 if (crp->crp_flags & CRYPTO_F_IMBUF)
2459 m_copyback((struct mbuf *)crp->crp_buf,
2460 cmd->src_map->dm_mapsize - cmd->sloplen,
2461 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2462 else if (crp->crp_flags & CRYPTO_F_IOV)
2463 cuio_copyback((struct uio *)crp->crp_buf,
2464 cmd->src_map->dm_mapsize - cmd->sloplen,
2465 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2466 }
2467
2468 i = dma->dstk; u = dma->dstu;
2469 while (u != 0) {
2470 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2471 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2472 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2473 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2474 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2475 offsetof(struct hifn_dma, dstr[i]),
2476 sizeof(struct hifn_desc),
2477 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2478 break;
2479 }
2480 if (++i == (HIFN_D_DST_RSIZE + 1))
2481 i = 0;
2482 else
2483 u--;
2484 }
2485 dma->dstk = i; dma->dstu = u;
2486
2487 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2488
2489 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2490 HIFN_BASE_CMD_CRYPT) {
2491 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2492 if (crd->crd_alg != CRYPTO_DES_CBC &&
2493 crd->crd_alg != CRYPTO_3DES_CBC &&
2494 crd->crd_alg != CRYPTO_AES_CBC)
2495 continue;
2496 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2497 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2498 if (crp->crp_flags & CRYPTO_F_IMBUF)
2499 m_copydata((struct mbuf *)crp->crp_buf,
2500 crd->crd_skip + crd->crd_len - ivlen,
2501 ivlen,
2502 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2503 else if (crp->crp_flags & CRYPTO_F_IOV) {
2504 cuio_copydata((struct uio *)crp->crp_buf,
2505 crd->crd_skip + crd->crd_len - ivlen,
2506 ivlen,
2507 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2508 }
2509 /* XXX We do not handle contig data */
2510 break;
2511 }
2512 }
2513
2514 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2515 u_int8_t *macbuf;
2516
2517 macbuf = resbuf + sizeof(struct hifn_base_result);
2518 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2519 macbuf += sizeof(struct hifn_comp_result);
2520 macbuf += sizeof(struct hifn_mac_result);
2521
2522 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2523 int len;
2524
2525 if (crd->crd_alg == CRYPTO_MD5)
2526 len = 16;
2527 else if (crd->crd_alg == CRYPTO_SHA1)
2528 len = 20;
2529 else if (crd->crd_alg == CRYPTO_MD5_HMAC ||
2530 crd->crd_alg == CRYPTO_SHA1_HMAC)
2531 len = 12;
2532 else
2533 continue;
2534
2535 if (crp->crp_flags & CRYPTO_F_IMBUF)
2536 m_copyback((struct mbuf *)crp->crp_buf,
2537 crd->crd_inject, len, macbuf);
2538 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2539 bcopy((caddr_t)macbuf, crp->crp_mac, len);
2540 break;
2541 }
2542 }
2543
2544 if (cmd->src_map != cmd->dst_map) {
2545 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2546 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2547 }
2548 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2549 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2550 free(cmd, M_DEVBUF);
2551 crypto_done(crp);
2552 }
2553
2554 #ifdef HAVE_CRYPTO_LSZ
2555
2556 int
2557 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2558 struct hifn_command *cmd)
2559 {
2560 struct cryptodesc *crd = crp->crp_desc;
2561 int s, err = 0;
2562
2563 cmd->compcrd = crd;
2564 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2565
2566 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2567 /*
2568 * XXX can only handle mbufs right now since we can
2569 * XXX dynamically resize them.
2570 */
2571 err = EINVAL;
2572 return (ENOMEM);
2573 }
2574
2575 if ((crd->crd_flags & CRD_F_COMP) == 0)
2576 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2577 if (crd->crd_alg == CRYPTO_LZS_COMP)
2578 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2579 HIFN_COMP_CMD_CLEARHIST;
2580
2581 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2582 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2583 err = ENOMEM;
2584 goto fail;
2585 }
2586
2587 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2588 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2589 err = ENOMEM;
2590 goto fail;
2591 }
2592
2593 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2594 int len;
2595
2596 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2597 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2598 err = ENOMEM;
2599 goto fail;
2600 }
2601
2602 len = cmd->src_map->dm_mapsize / MCLBYTES;
2603 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2604 len++;
2605 len *= MCLBYTES;
2606
2607 if ((crd->crd_flags & CRD_F_COMP) == 0)
2608 len *= 4;
2609
2610 if (len > HIFN_MAX_DMALEN)
2611 len = HIFN_MAX_DMALEN;
2612
2613 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2614 if (cmd->dstu.dst_m == NULL) {
2615 err = ENOMEM;
2616 goto fail;
2617 }
2618
2619 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2620 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2621 err = ENOMEM;
2622 goto fail;
2623 }
2624 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2625 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2626 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2627 err = ENOMEM;
2628 goto fail;
2629 }
2630 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2631 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2632 err = ENOMEM;
2633 goto fail;
2634 }
2635 }
2636
2637 if (cmd->src_map == cmd->dst_map)
2638 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2639 0, cmd->src_map->dm_mapsize,
2640 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2641 else {
2642 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2643 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2644 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2645 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2646 }
2647
2648 cmd->crp = crp;
2649 /*
2650 * Always use session 0. The modes of compression we use are
2651 * stateless and there is always at least one compression
2652 * context, zero.
2653 */
2654 cmd->session_num = 0;
2655 cmd->softc = sc;
2656
2657 s = splnet();
2658 err = hifn_compress_enter(sc, cmd);
2659 splx(s);
2660
2661 if (err != 0)
2662 goto fail;
2663 return (0);
2664
2665 fail:
2666 if (cmd->dst_map != NULL) {
2667 if (cmd->dst_map->dm_nsegs > 0)
2668 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2669 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2670 }
2671 if (cmd->src_map != NULL) {
2672 if (cmd->src_map->dm_nsegs > 0)
2673 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2674 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2675 }
2676 free(cmd, M_DEVBUF);
2677 if (err == EINVAL)
2678 hifnstats.hst_invalid++;
2679 else
2680 hifnstats.hst_nomem++;
2681 crp->crp_etype = err;
2682 crypto_done(crp);
2683 return (0);
2684 }
2685
2686 /*
2687 * must be called at splnet()
2688 */
2689 int
2690 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2691 {
2692 struct hifn_dma *dma = sc->sc_dma;
2693 int cmdi, resi;
2694 u_int32_t cmdlen;
2695
2696 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2697 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2698 return (ENOMEM);
2699
2700 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2701 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2702 return (ENOMEM);
2703
2704 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2705 dma->cmdi = 0;
2706 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2707 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2708 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2709 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2710 }
2711 cmdi = dma->cmdi++;
2712 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2713 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2714
2715 /* .p for command/result already set */
2716 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2717 HIFN_D_MASKDONEIRQ);
2718 HIFN_CMDR_SYNC(sc, cmdi,
2719 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2720 dma->cmdu++;
2721 if (sc->sc_c_busy == 0) {
2722 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2723 sc->sc_c_busy = 1;
2724 SET_LED(sc, HIFN_MIPSRST_LED0);
2725 }
2726
2727 /*
2728 * We don't worry about missing an interrupt (which a "command wait"
2729 * interrupt salvages us from), unless there is more than one command
2730 * in the queue.
2731 */
2732 if (dma->cmdu > 1) {
2733 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2734 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2735 }
2736
2737 hifnstats.hst_ipackets++;
2738 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2739
2740 hifn_dmamap_load_src(sc, cmd);
2741 if (sc->sc_s_busy == 0) {
2742 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2743 sc->sc_s_busy = 1;
2744 SET_LED(sc, HIFN_MIPSRST_LED1);
2745 }
2746
2747 /*
2748 * Unlike other descriptors, we don't mask done interrupt from
2749 * result descriptor.
2750 */
2751 if (dma->resi == HIFN_D_RES_RSIZE) {
2752 dma->resi = 0;
2753 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2754 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2755 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2756 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2757 }
2758 resi = dma->resi++;
2759 dma->hifn_commands[resi] = cmd;
2760 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2761 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2762 HIFN_D_VALID | HIFN_D_LAST);
2763 HIFN_RESR_SYNC(sc, resi,
2764 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2765 dma->resu++;
2766 if (sc->sc_r_busy == 0) {
2767 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2768 sc->sc_r_busy = 1;
2769 SET_LED(sc, HIFN_MIPSRST_LED2);
2770 }
2771
2772 if (cmd->sloplen)
2773 cmd->slopidx = resi;
2774
2775 hifn_dmamap_load_dst(sc, cmd);
2776
2777 if (sc->sc_d_busy == 0) {
2778 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2779 sc->sc_d_busy = 1;
2780 }
2781 sc->sc_active = 5;
2782 cmd->cmd_callback = hifn_callback_comp;
2783 return (0);
2784 }
2785
2786 void
2787 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2788 u_int8_t *resbuf)
2789 {
2790 struct hifn_base_result baseres;
2791 struct cryptop *crp = cmd->crp;
2792 struct hifn_dma *dma = sc->sc_dma;
2793 struct mbuf *m;
2794 int err = 0, i, u;
2795 u_int32_t olen;
2796 bus_size_t dstsize;
2797
2798 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2799 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2800 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2801 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2802
2803 dstsize = cmd->dst_map->dm_mapsize;
2804 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2805
2806 bcopy(resbuf, &baseres, sizeof(struct hifn_base_result));
2807
2808 i = dma->dstk; u = dma->dstu;
2809 while (u != 0) {
2810 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2811 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2812 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2813 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2814 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2815 offsetof(struct hifn_dma, dstr[i]),
2816 sizeof(struct hifn_desc),
2817 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2818 break;
2819 }
2820 if (++i == (HIFN_D_DST_RSIZE + 1))
2821 i = 0;
2822 else
2823 u--;
2824 }
2825 dma->dstk = i; dma->dstu = u;
2826
2827 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
2828 bus_size_t xlen;
2829
2830 xlen = dstsize;
2831
2832 m_freem(cmd->dstu.dst_m);
2833
2834 if (xlen == HIFN_MAX_DMALEN) {
2835 /* We've done all we can. */
2836 err = E2BIG;
2837 goto out;
2838 }
2839
2840 xlen += MCLBYTES;
2841
2842 if (xlen > HIFN_MAX_DMALEN)
2843 xlen = HIFN_MAX_DMALEN;
2844
2845 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
2846 cmd->srcu.src_m);
2847 if (cmd->dstu.dst_m == NULL) {
2848 err = ENOMEM;
2849 goto out;
2850 }
2851 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2852 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2853 err = ENOMEM;
2854 goto out;
2855 }
2856
2857 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2858 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2859 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2860 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2861
2862 /* already at splnet... */
2863 err = hifn_compress_enter(sc, cmd);
2864 if (err != 0)
2865 goto out;
2866 return;
2867 }
2868
2869 olen = dstsize - (letoh16(baseres.dst_cnt) |
2870 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
2871 HIFN_BASE_RES_DSTLEN_S) << 16));
2872
2873 crp->crp_olen = olen - cmd->compcrd->crd_skip;
2874
2875 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2876 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2877 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2878
2879 m = cmd->dstu.dst_m;
2880 if (m->m_flags & M_PKTHDR)
2881 m->m_pkthdr.len = olen;
2882 crp->crp_buf = (caddr_t)m;
2883 for (; m != NULL; m = m->m_next) {
2884 if (olen >= m->m_len)
2885 olen -= m->m_len;
2886 else {
2887 m->m_len = olen;
2888 olen = 0;
2889 }
2890 }
2891
2892 m_freem(cmd->srcu.src_m);
2893 free(cmd, M_DEVBUF);
2894 crp->crp_etype = 0;
2895 crypto_done(crp);
2896 return;
2897
2898 out:
2899 if (cmd->dst_map != NULL) {
2900 if (cmd->src_map->dm_nsegs != 0)
2901 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2902 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2903 }
2904 if (cmd->src_map != NULL) {
2905 if (cmd->src_map->dm_nsegs != 0)
2906 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2907 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2908 }
2909 if (cmd->dstu.dst_m != NULL)
2910 m_freem(cmd->dstu.dst_m);
2911 free(cmd, M_DEVBUF);
2912 crp->crp_etype = err;
2913 crypto_done(crp);
2914 }
2915
2916 struct mbuf *
2917 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
2918 {
2919 int len;
2920 struct mbuf *m, *m0, *mlast;
2921
2922 if (mtemplate->m_flags & M_PKTHDR) {
2923 len = MHLEN;
2924 MGETHDR(m0, M_DONTWAIT, MT_DATA);
2925 } else {
2926 len = MLEN;
2927 MGET(m0, M_DONTWAIT, MT_DATA);
2928 }
2929 if (m0 == NULL)
2930 return (NULL);
2931 if (len == MHLEN)
2932 M_DUP_PKTHDR(m0, mtemplate);
2933 MCLGET(m0, M_DONTWAIT);
2934 if (!(m0->m_flags & M_EXT))
2935 m_freem(m0);
2936 len = MCLBYTES;
2937
2938 totlen -= len;
2939 m0->m_pkthdr.len = m0->m_len = len;
2940 mlast = m0;
2941
2942 while (totlen > 0) {
2943 MGET(m, M_DONTWAIT, MT_DATA);
2944 if (m == NULL) {
2945 m_freem(m0);
2946 return (NULL);
2947 }
2948 MCLGET(m, M_DONTWAIT);
2949 if (!(m->m_flags & M_EXT)) {
2950 m_freem(m0);
2951 return (NULL);
2952 }
2953 len = MCLBYTES;
2954 m->m_len = len;
2955 if (m0->m_flags & M_PKTHDR)
2956 m0->m_pkthdr.len += len;
2957 totlen -= len;
2958
2959 mlast->m_next = m;
2960 mlast = m;
2961 }
2962
2963 return (m0);
2964 }
2965 #endif /* HAVE_CRYPTO_LSZ */
2966
2967 void
2968 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val)
2969 {
2970 /*
2971 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2972 * and Group 1 registers; avoid conditions that could create
2973 * burst writes by doing a read in between the writes.
2974 */
2975 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
2976 if (sc->sc_waw_lastgroup == reggrp &&
2977 sc->sc_waw_lastreg == reg - 4) {
2978 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2979 }
2980 sc->sc_waw_lastgroup = reggrp;
2981 sc->sc_waw_lastreg = reg;
2982 }
2983 if (reggrp == 0)
2984 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2985 else
2986 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2987
2988 }
2989
2990 u_int32_t
2991 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
2992 {
2993 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
2994 sc->sc_waw_lastgroup = -1;
2995 sc->sc_waw_lastreg = 1;
2996 }
2997 if (reggrp == 0)
2998 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
2999 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3000 }
3001