hifn7751.c revision 1.32 1 /* $NetBSD: hifn7751.c,v 1.32 2006/10/12 01:31:29 christos Exp $ */
2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */
4
5 /*
6 * Invertex AEON / Hifn 7751 driver
7 * Copyright (c) 1999 Invertex Inc. All rights reserved.
8 * Copyright (c) 1999 Theo de Raadt
9 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10 * http://www.netsec.net
11 * Copyright (c) 2003 Hifn Inc.
12 *
13 * This driver is based on a previous driver by Invertex, for which they
14 * requested: Please send any comments, feedback, bug-fixes, or feature
15 * requests to software (at) invertex.com.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. The name of the author may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Effort sponsored in part by the Defense Advanced Research Projects
41 * Agency (DARPA) and Air Force Research Laboratory, Air Force
42 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 *
44 */
45
46 /*
47 * Driver for various Hifn pre-HIPP encryption processors.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.32 2006/10/12 01:31:29 christos Exp $");
52
53 #include "rnd.h"
54
55 #if NRND == 0
56 #error hifn7751 requires rnd pseudo-devices
57 #endif
58
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/proc.h>
63 #include <sys/errno.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/mbuf.h>
67 #include <sys/device.h>
68
69 #include <uvm/uvm_extern.h>
70
71
72 #ifdef __OpenBSD__
73 #include <crypto/crypto.h>
74 #include <dev/rndvar.h>
75 #else
76 #include <opencrypto/cryptodev.h>
77 #include <sys/rnd.h>
78 #endif
79
80 #include <dev/pci/pcireg.h>
81 #include <dev/pci/pcivar.h>
82 #include <dev/pci/pcidevs.h>
83
84 #include <dev/pci/hifn7751reg.h>
85 #include <dev/pci/hifn7751var.h>
86
87 #undef HIFN_DEBUG
88
89 #ifdef __NetBSD__
90 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */
91 #endif
92
93 #ifdef HIFN_DEBUG
94 extern int hifn_debug; /* patchable */
95 int hifn_debug = 1;
96 #endif
97
98 #ifdef __OpenBSD__
99 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */
100 #endif
101
102 /*
103 * Prototypes and count for the pci_device structure
104 */
105 #ifdef __OpenBSD__
106 static int hifn_probe((struct device *, void *, void *);
107 #else
108 static int hifn_probe(struct device *, struct cfdata *, void *);
109 #endif
110 static void hifn_attach(struct device *, struct device *, void *);
111
112 CFATTACH_DECL(hifn, sizeof(struct hifn_softc),
113 hifn_probe, hifn_attach, NULL, NULL);
114
115 #ifdef __OpenBSD__
116 struct cfdriver hifn_cd = {
117 0, "hifn", DV_DULL
118 };
119 #endif
120
121 static void hifn_reset_board(struct hifn_softc *, int);
122 static void hifn_reset_puc(struct hifn_softc *);
123 static void hifn_puc_wait(struct hifn_softc *);
124 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
125 static void hifn_set_retry(struct hifn_softc *);
126 static void hifn_init_dma(struct hifn_softc *);
127 static void hifn_init_pci_registers(struct hifn_softc *);
128 static int hifn_sramsize(struct hifn_softc *);
129 static int hifn_dramsize(struct hifn_softc *);
130 static int hifn_ramtype(struct hifn_softc *);
131 static void hifn_sessions(struct hifn_softc *);
132 static int hifn_intr(void *);
133 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
134 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
135 static int hifn_newsession(void*, u_int32_t *, struct cryptoini *);
136 static int hifn_freesession(void*, u_int64_t);
137 static int hifn_process(void*, struct cryptop *, int);
138 static void hifn_callback(struct hifn_softc *, struct hifn_command *,
139 u_int8_t *);
140 static int hifn_crypto(struct hifn_softc *, struct hifn_command *,
141 struct cryptop*, int);
142 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
143 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
144 static int hifn_dmamap_aligned(bus_dmamap_t);
145 static int hifn_dmamap_load_src(struct hifn_softc *,
146 struct hifn_command *);
147 static int hifn_dmamap_load_dst(struct hifn_softc *,
148 struct hifn_command *);
149 static int hifn_init_pubrng(struct hifn_softc *);
150 static void hifn_rng(void *);
151 static void hifn_tick(void *);
152 static void hifn_abort(struct hifn_softc *);
153 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
154 int *);
155 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
156 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
157 #ifdef HAVE_CRYPTO_LZS
158 static int hifn_compression(struct hifn_softc *, struct cryptop *,
159 struct hifn_command *);
160 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
161 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
162 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
163 u_int8_t *);
164 #endif /* HAVE_CRYPTO_LZS */
165
166
167 struct hifn_stats hifnstats;
168
169 static const struct hifn_product {
170 pci_vendor_id_t hifn_vendor;
171 pci_product_id_t hifn_product;
172 int hifn_flags;
173 const char *hifn_name;
174 } hifn_products[] = {
175 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
176 0,
177 "Invertex AEON",
178 },
179
180 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
181 0,
182 "Hifn 7751",
183 },
184 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
185 0,
186 "Hifn 7751 (NetSec)"
187 },
188
189 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
190 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
191 "Hifn 7811",
192 },
193
194 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
195 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
196 "Hifn 7951",
197 },
198
199 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
200 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
201 "Hifn 7955",
202 },
203
204 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
205 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
206 "Hifn 7956",
207 },
208
209
210 { 0, 0,
211 0,
212 NULL
213 }
214 };
215
216 static const struct hifn_product *
217 hifn_lookup(const struct pci_attach_args *pa)
218 {
219 const struct hifn_product *hp;
220
221 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
222 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
223 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
224 return (hp);
225 }
226 return (NULL);
227 }
228
229 static int
230 hifn_probe(struct device *parent __unused, struct cfdata *match __unused,
231 void *aux)
232 {
233 struct pci_attach_args *pa = (struct pci_attach_args *) aux;
234
235 if (hifn_lookup(pa) != NULL)
236 return (1);
237
238 return (0);
239 }
240
241 static void
242 hifn_attach(struct device *parent __unused, struct device *self, void *aux)
243 {
244 struct hifn_softc *sc = (struct hifn_softc *)self;
245 struct pci_attach_args *pa = aux;
246 const struct hifn_product *hp;
247 pci_chipset_tag_t pc = pa->pa_pc;
248 pci_intr_handle_t ih;
249 const char *intrstr = NULL;
250 const char *hifncap;
251 char rbase;
252 bus_size_t iosize0, iosize1;
253 u_int32_t cmd;
254 u_int16_t ena;
255 bus_dma_segment_t seg;
256 bus_dmamap_t dmamap;
257 int rseg;
258 caddr_t kva;
259
260 hp = hifn_lookup(pa);
261 if (hp == NULL) {
262 printf("\n");
263 panic("hifn_attach: impossible");
264 }
265
266 aprint_naive(": Crypto processor\n");
267 aprint_normal(": %s, rev. %d\n", hp->hifn_name,
268 PCI_REVISION(pa->pa_class));
269
270 sc->sc_pci_pc = pa->pa_pc;
271 sc->sc_pci_tag = pa->pa_tag;
272
273 sc->sc_flags = hp->hifn_flags;
274
275 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
276 cmd |= PCI_COMMAND_MASTER_ENABLE;
277 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
278
279 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
280 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
281 aprint_error("%s: can't map mem space %d\n",
282 sc->sc_dv.dv_xname, 0);
283 return;
284 }
285
286 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
287 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
288 aprint_error("%s: can't find mem space %d\n",
289 sc->sc_dv.dv_xname, 1);
290 goto fail_io0;
291 }
292
293 hifn_set_retry(sc);
294
295 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
296 sc->sc_waw_lastgroup = -1;
297 sc->sc_waw_lastreg = 1;
298 }
299
300 sc->sc_dmat = pa->pa_dmat;
301 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
302 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
303 aprint_error("%s: can't alloc DMA buffer\n",
304 sc->sc_dv.dv_xname);
305 goto fail_io1;
306 }
307 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
308 BUS_DMA_NOWAIT)) {
309 aprint_error("%s: can't map DMA buffers (%lu bytes)\n",
310 sc->sc_dv.dv_xname, (u_long)sizeof(*sc->sc_dma));
311 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
312 goto fail_io1;
313 }
314 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
315 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
316 aprint_error("%s: can't create DMA map\n",
317 sc->sc_dv.dv_xname);
318 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
319 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
320 goto fail_io1;
321 }
322 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
323 NULL, BUS_DMA_NOWAIT)) {
324 aprint_error("%s: can't load DMA map\n",
325 sc->sc_dv.dv_xname);
326 bus_dmamap_destroy(sc->sc_dmat, dmamap);
327 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
328 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
329 goto fail_io1;
330 }
331 sc->sc_dmamap = dmamap;
332 sc->sc_dma = (struct hifn_dma *)kva;
333 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
334
335 hifn_reset_board(sc, 0);
336
337 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
338 aprint_error("%s: crypto enabling failed\n",
339 sc->sc_dv.dv_xname);
340 goto fail_mem;
341 }
342 hifn_reset_puc(sc);
343
344 hifn_init_dma(sc);
345 hifn_init_pci_registers(sc);
346
347 /* XXX can't dynamically determine ram type for 795x; force dram */
348 if (sc->sc_flags & HIFN_IS_7956)
349 sc->sc_drammodel = 1;
350 else if (hifn_ramtype(sc))
351 goto fail_mem;
352
353 if (sc->sc_drammodel == 0)
354 hifn_sramsize(sc);
355 else
356 hifn_dramsize(sc);
357
358 /*
359 * Workaround for NetSec 7751 rev A: half ram size because two
360 * of the address lines were left floating
361 */
362 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
363 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
364 PCI_REVISION(pa->pa_class) == 0x61)
365 sc->sc_ramsize >>= 1;
366
367 if (pci_intr_map(pa, &ih)) {
368 aprint_error("%s: couldn't map interrupt\n",
369 sc->sc_dv.dv_xname);
370 goto fail_mem;
371 }
372 intrstr = pci_intr_string(pc, ih);
373 #ifdef __OpenBSD__
374 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
375 self->dv_xname);
376 #else
377 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
378 #endif
379 if (sc->sc_ih == NULL) {
380 aprint_error("%s: couldn't establish interrupt\n",
381 sc->sc_dv.dv_xname);
382 if (intrstr != NULL)
383 aprint_normal(" at %s", intrstr);
384 aprint_normal("\n");
385 goto fail_mem;
386 }
387
388 hifn_sessions(sc);
389
390 rseg = sc->sc_ramsize / 1024;
391 rbase = 'K';
392 if (sc->sc_ramsize >= (1024 * 1024)) {
393 rbase = 'M';
394 rseg /= 1024;
395 }
396 aprint_normal("%s: %s, %d%cB %cram, interrupting at %s\n",
397 sc->sc_dv.dv_xname, hifncap, rseg, rbase,
398 sc->sc_drammodel ? 'd' : 's', intrstr);
399
400 sc->sc_cid = crypto_get_driverid(0);
401 if (sc->sc_cid < 0) {
402 aprint_error("%s: couldn't get crypto driver id\n",
403 sc->sc_dv.dv_xname);
404 goto fail_intr;
405 }
406
407 WRITE_REG_0(sc, HIFN_0_PUCNFG,
408 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
409 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
410
411 switch (ena) {
412 case HIFN_PUSTAT_ENA_2:
413 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
414 hifn_newsession, hifn_freesession, hifn_process, sc);
415 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
416 hifn_newsession, hifn_freesession, hifn_process, sc);
417 if (sc->sc_flags & HIFN_HAS_AES)
418 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
419 hifn_newsession, hifn_freesession,
420 hifn_process, sc);
421 /*FALLTHROUGH*/
422 case HIFN_PUSTAT_ENA_1:
423 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
424 hifn_newsession, hifn_freesession, hifn_process, sc);
425 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
426 hifn_newsession, hifn_freesession, hifn_process, sc);
427 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0,
428 hifn_newsession, hifn_freesession, hifn_process, sc);
429 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0,
430 hifn_newsession, hifn_freesession, hifn_process, sc);
431 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
432 hifn_newsession, hifn_freesession, hifn_process, sc);
433 break;
434 }
435
436 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
437 sc->sc_dmamap->dm_mapsize,
438 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
439
440 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
441 hifn_init_pubrng(sc);
442
443 #ifdef __OpenBSD__
444 timeout_set(&sc->sc_tickto, hifn_tick, sc);
445 timeout_add(&sc->sc_tickto, hz);
446 #else
447 callout_init(&sc->sc_tickto);
448 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
449 #endif
450 return;
451
452 fail_intr:
453 pci_intr_disestablish(pc, sc->sc_ih);
454 fail_mem:
455 bus_dmamap_unload(sc->sc_dmat, dmamap);
456 bus_dmamap_destroy(sc->sc_dmat, dmamap);
457 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
458 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
459
460 /* Turn off DMA polling */
461 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
462 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
463
464 fail_io1:
465 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
466 fail_io0:
467 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
468 }
469
470 static int
471 hifn_init_pubrng(struct hifn_softc *sc)
472 {
473 u_int32_t r;
474 int i;
475
476 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
477 /* Reset 7951 public key/rng engine */
478 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
479 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
480
481 for (i = 0; i < 100; i++) {
482 DELAY(1000);
483 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
484 HIFN_PUBRST_RESET) == 0)
485 break;
486 }
487
488 if (i == 100) {
489 printf("%s: public key init failed\n",
490 sc->sc_dv.dv_xname);
491 return (1);
492 }
493 }
494
495 /* Enable the rng, if available */
496 if (sc->sc_flags & HIFN_HAS_RNG) {
497 if (sc->sc_flags & HIFN_IS_7811) {
498 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
499 if (r & HIFN_7811_RNGENA_ENA) {
500 r &= ~HIFN_7811_RNGENA_ENA;
501 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
502 }
503 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
504 HIFN_7811_RNGCFG_DEFL);
505 r |= HIFN_7811_RNGENA_ENA;
506 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
507 } else
508 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
509 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
510 HIFN_RNGCFG_ENA);
511
512 /*
513 * The Hifn RNG documentation states that at their
514 * recommended "conservative" RNG config values,
515 * the RNG must warm up for 0.4s before providing
516 * data that meet their worst-case estimate of 0.06
517 * bits of random data per output register bit.
518 */
519 DELAY(4000);
520
521 #ifdef __NetBSD__
522 /*
523 * XXX Careful! The use of RND_FLAG_NO_ESTIMATE
524 * XXX here is unobvious: we later feed raw bits
525 * XXX into the "entropy pool" with rnd_add_data,
526 * XXX explicitly supplying an entropy estimate.
527 * XXX In this context, NO_ESTIMATE serves only
528 * XXX to prevent rnd_add_data from trying to
529 * XXX use the *time at which we added the data*
530 * XXX as entropy, which is not a good idea since
531 * XXX we add data periodically from a callout.
532 */
533 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dv.dv_xname,
534 RND_TYPE_RNG, RND_FLAG_NO_ESTIMATE);
535 #endif
536
537 sc->sc_rngfirst = 1;
538 if (hz >= 100)
539 sc->sc_rnghz = hz / 100;
540 else
541 sc->sc_rnghz = 1;
542 #ifdef __OpenBSD__
543 timeout_set(&sc->sc_rngto, hifn_rng, sc);
544 #else /* !__OpenBSD__ */
545 callout_init(&sc->sc_rngto);
546 #endif /* !__OpenBSD__ */
547 }
548
549 /* Enable public key engine, if available */
550 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
551 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
552 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
553 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
554 }
555
556 /* Call directly into the RNG once to prime the pool. */
557 hifn_rng(sc); /* Sets callout/timeout at end */
558
559 return (0);
560 }
561
562 static void
563 hifn_rng(void *vsc)
564 {
565 struct hifn_softc *sc = vsc;
566 #ifdef __NetBSD__
567 u_int32_t num[HIFN_RNG_BITSPER * RND_ENTROPY_THRESHOLD];
568 #else
569 u_int32_t num[2];
570 #endif
571 u_int32_t sts;
572 int i;
573
574 if (sc->sc_flags & HIFN_IS_7811) {
575 for (i = 0; i < 5; i++) { /* XXX why 5? */
576 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
577 if (sts & HIFN_7811_RNGSTS_UFL) {
578 printf("%s: RNG underflow: disabling\n",
579 sc->sc_dv.dv_xname);
580 return;
581 }
582 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
583 break;
584
585 /*
586 * There are at least two words in the RNG FIFO
587 * at this point.
588 */
589 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
590 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
591
592 if (sc->sc_rngfirst)
593 sc->sc_rngfirst = 0;
594 #ifdef __NetBSD__
595 rnd_add_data(&sc->sc_rnd_source, num,
596 2 * sizeof(num[0]),
597 (2 * sizeof(num[0]) * NBBY) /
598 HIFN_RNG_BITSPER);
599 #else
600 /*
601 * XXX This is a really bad idea.
602 * XXX Hifn estimate as little as 0.06
603 * XXX actual bits of entropy per output
604 * XXX register bit. How can we tell the
605 * XXX kernel RNG subsystem we're handing
606 * XXX it 64 "true" random bits, for any
607 * XXX sane value of "true"?
608 * XXX
609 * XXX The right thing to do here, if we
610 * XXX cannot supply an estimate ourselves,
611 * XXX would be to hash the bits locally.
612 */
613 add_true_randomness(num[0]);
614 add_true_randomness(num[1]);
615 #endif
616
617 }
618 } else {
619 #ifdef __NetBSD__
620 /* First time through, try to help fill the pool. */
621 int nwords = sc->sc_rngfirst ?
622 sizeof(num) / sizeof(num[0]) : 4;
623 #else
624 int nwords = 2;
625 #endif
626 /*
627 * We must be *extremely* careful here. The Hifn
628 * 795x differ from the published 6500 RNG design
629 * in more ways than the obvious lack of the output
630 * FIFO and LFSR control registers. In fact, there
631 * is only one LFSR, instead of the 6500's two, and
632 * it's 32 bits, not 31.
633 *
634 * Further, a block diagram obtained from Hifn shows
635 * a very curious latching of this register: the LFSR
636 * rotates at a frequency of RNG_Clk / 8, but the
637 * RNG_Data register is latched at a frequency of
638 * RNG_Clk, which means that it is possible for
639 * consecutive reads of the RNG_Data register to read
640 * identical state from the LFSR. The simplest
641 * workaround seems to be to read eight samples from
642 * the register for each one that we use. Since each
643 * read must require at least one PCI cycle, and
644 * RNG_Clk is at least PCI_Clk, this is safe.
645 */
646
647
648 if (sc->sc_rngfirst) {
649 sc->sc_rngfirst = 0;
650 }
651
652
653 for(i = 0 ; i < nwords * 8; i++)
654 {
655 volatile u_int32_t regtmp;
656 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA);
657 num[i / 8] = regtmp;
658 }
659 #ifdef __NetBSD__
660 rnd_add_data(&sc->sc_rnd_source, num,
661 nwords * sizeof(num[0]),
662 (nwords * sizeof(num[0]) * NBBY) /
663 HIFN_RNG_BITSPER);
664 #else
665 /* XXX a bad idea; see 7811 block above */
666 add_true_randomness(num[0]);
667 #endif
668 }
669
670 #ifdef __OpenBSD__
671 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
672 #else
673 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
674 #endif
675 }
676
677 static void
678 hifn_puc_wait(struct hifn_softc *sc)
679 {
680 int i;
681
682 for (i = 5000; i > 0; i--) {
683 DELAY(1);
684 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
685 break;
686 }
687 if (!i)
688 printf("%s: proc unit did not reset\n", sc->sc_dv.dv_xname);
689 }
690
691 /*
692 * Reset the processing unit.
693 */
694 static void
695 hifn_reset_puc(struct hifn_softc *sc)
696 {
697 /* Reset processing unit */
698 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
699 hifn_puc_wait(sc);
700 }
701
702 static void
703 hifn_set_retry(struct hifn_softc *sc)
704 {
705 u_int32_t r;
706
707 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
708 r &= 0xffff0000;
709 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
710 }
711
712 /*
713 * Resets the board. Values in the regesters are left as is
714 * from the reset (i.e. initial values are assigned elsewhere).
715 */
716 static void
717 hifn_reset_board(struct hifn_softc *sc, int full)
718 {
719 u_int32_t reg;
720
721 /*
722 * Set polling in the DMA configuration register to zero. 0x7 avoids
723 * resetting the board and zeros out the other fields.
724 */
725 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
726 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
727
728 /*
729 * Now that polling has been disabled, we have to wait 1 ms
730 * before resetting the board.
731 */
732 DELAY(1000);
733
734 /* Reset the DMA unit */
735 if (full) {
736 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
737 DELAY(1000);
738 } else {
739 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
740 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
741 hifn_reset_puc(sc);
742 }
743
744 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
745
746 /* Bring dma unit out of reset */
747 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
748 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
749
750 hifn_puc_wait(sc);
751
752 hifn_set_retry(sc);
753
754 if (sc->sc_flags & HIFN_IS_7811) {
755 for (reg = 0; reg < 1000; reg++) {
756 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
757 HIFN_MIPSRST_CRAMINIT)
758 break;
759 DELAY(1000);
760 }
761 if (reg == 1000)
762 printf(": cram init timeout\n");
763 }
764 }
765
766 static u_int32_t
767 hifn_next_signature(u_int32_t a, u_int cnt)
768 {
769 int i;
770 u_int32_t v;
771
772 for (i = 0; i < cnt; i++) {
773
774 /* get the parity */
775 v = a & 0x80080125;
776 v ^= v >> 16;
777 v ^= v >> 8;
778 v ^= v >> 4;
779 v ^= v >> 2;
780 v ^= v >> 1;
781
782 a = (v & 1) ^ (a << 1);
783 }
784
785 return a;
786 }
787
788 static struct pci2id {
789 u_short pci_vendor;
790 u_short pci_prod;
791 char card_id[13];
792 } const pci2id[] = {
793 {
794 PCI_VENDOR_HIFN,
795 PCI_PRODUCT_HIFN_7951,
796 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00, 0x00 }
798 }, {
799 PCI_VENDOR_HIFN,
800 PCI_PRODUCT_HIFN_7955,
801 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
802 0x00, 0x00, 0x00, 0x00, 0x00 }
803 }, {
804 PCI_VENDOR_HIFN,
805 PCI_PRODUCT_HIFN_7956,
806 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00, 0x00 }
808 }, {
809 PCI_VENDOR_NETSEC,
810 PCI_PRODUCT_NETSEC_7751,
811 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00, 0x00 }
813 }, {
814 PCI_VENDOR_INVERTEX,
815 PCI_PRODUCT_INVERTEX_AEON,
816 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00, 0x00 }
818 }, {
819 PCI_VENDOR_HIFN,
820 PCI_PRODUCT_HIFN_7811,
821 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00, 0x00 }
823 }, {
824 /*
825 * Other vendors share this PCI ID as well, such as
826 * http://www.powercrypt.com, and obviously they also
827 * use the same key.
828 */
829 PCI_VENDOR_HIFN,
830 PCI_PRODUCT_HIFN_7751,
831 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
832 0x00, 0x00, 0x00, 0x00, 0x00 }
833 },
834 };
835
836 /*
837 * Checks to see if crypto is already enabled. If crypto isn't enable,
838 * "hifn_enable_crypto" is called to enable it. The check is important,
839 * as enabling crypto twice will lock the board.
840 */
841 static const char *
842 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
843 {
844 u_int32_t dmacfg, ramcfg, encl, addr, i;
845 const char *offtbl = NULL;
846
847 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
848 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
849 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
850 offtbl = pci2id[i].card_id;
851 break;
852 }
853 }
854
855 if (offtbl == NULL) {
856 #ifdef HIFN_DEBUG
857 aprint_debug("%s: Unknown card!\n", sc->sc_dv.dv_xname);
858 #endif
859 return (NULL);
860 }
861
862 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
863 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
864
865 /*
866 * The RAM config register's encrypt level bit needs to be set before
867 * every read performed on the encryption level register.
868 */
869 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
870
871 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
872
873 /*
874 * Make sure we don't re-unlock. Two unlocks kills chip until the
875 * next reboot.
876 */
877 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
878 #ifdef HIFN_DEBUG
879 aprint_debug("%s: Strong Crypto already enabled!\n",
880 sc->sc_dv.dv_xname);
881 #endif
882 goto report;
883 }
884
885 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
886 #ifdef HIFN_DEBUG
887 aprint_debug("%s: Unknown encryption level\n",
888 sc->sc_dv.dv_xname);
889 #endif
890 return (NULL);
891 }
892
893 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
894 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
895 DELAY(1000);
896 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
897 DELAY(1000);
898 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
899 DELAY(1000);
900
901 for (i = 0; i <= 12; i++) {
902 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
903 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
904
905 DELAY(1000);
906 }
907
908 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
909 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
910
911 #ifdef HIFN_DEBUG
912 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
913 aprint_debug("Encryption engine is permanently locked until next system reset.");
914 else
915 aprint_debug("Encryption engine enabled successfully!");
916 #endif
917
918 report:
919 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
920 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
921
922 switch (encl) {
923 case HIFN_PUSTAT_ENA_0:
924 return ("LZS-only (no encr/auth)");
925
926 case HIFN_PUSTAT_ENA_1:
927 return ("DES");
928
929 case HIFN_PUSTAT_ENA_2:
930 if (sc->sc_flags & HIFN_HAS_AES)
931 return ("3DES/AES");
932 else
933 return ("3DES");
934
935 default:
936 return ("disabled");
937 }
938 /* NOTREACHED */
939 }
940
941 /*
942 * Give initial values to the registers listed in the "Register Space"
943 * section of the HIFN Software Development reference manual.
944 */
945 static void
946 hifn_init_pci_registers(struct hifn_softc *sc)
947 {
948 /* write fixed values needed by the Initialization registers */
949 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
950 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
951 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
952
953 /* write all 4 ring address registers */
954 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
955 offsetof(struct hifn_dma, cmdr[0]));
956 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
957 offsetof(struct hifn_dma, srcr[0]));
958 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
959 offsetof(struct hifn_dma, dstr[0]));
960 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
961 offsetof(struct hifn_dma, resr[0]));
962
963 DELAY(2000);
964
965 /* write status register */
966 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
967 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
968 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
969 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
970 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
971 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
972 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
973 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
974 HIFN_DMACSR_S_WAIT |
975 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
976 HIFN_DMACSR_C_WAIT |
977 HIFN_DMACSR_ENGINE |
978 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
979 HIFN_DMACSR_PUBDONE : 0) |
980 ((sc->sc_flags & HIFN_IS_7811) ?
981 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
982
983 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
984 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
985 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
986 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
987 HIFN_DMAIER_ENGINE |
988 ((sc->sc_flags & HIFN_IS_7811) ?
989 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
990 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
991 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
992 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
993
994 if (sc->sc_flags & HIFN_IS_7956) {
995 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
996 HIFN_PUCNFG_TCALLPHASES |
997 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
998 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
999 } else {
1000 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1001 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1002 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1003 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1004 }
1005
1006 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1007 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1008 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1009 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1010 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1011 }
1012
1013 /*
1014 * The maximum number of sessions supported by the card
1015 * is dependent on the amount of context ram, which
1016 * encryption algorithms are enabled, and how compression
1017 * is configured. This should be configured before this
1018 * routine is called.
1019 */
1020 static void
1021 hifn_sessions(struct hifn_softc *sc)
1022 {
1023 u_int32_t pucnfg;
1024 int ctxsize;
1025
1026 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1027
1028 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1029 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1030 ctxsize = 128;
1031 else
1032 ctxsize = 512;
1033 /*
1034 * 7955/7956 has internal context memory of 32K
1035 */
1036 if (sc->sc_flags & HIFN_IS_7956)
1037 sc->sc_maxses = 32768 / ctxsize;
1038 else
1039 sc->sc_maxses = 1 +
1040 ((sc->sc_ramsize - 32768) / ctxsize);
1041 }
1042 else
1043 sc->sc_maxses = sc->sc_ramsize / 16384;
1044
1045 if (sc->sc_maxses > 2048)
1046 sc->sc_maxses = 2048;
1047 }
1048
1049 /*
1050 * Determine ram type (sram or dram). Board should be just out of a reset
1051 * state when this is called.
1052 */
1053 static int
1054 hifn_ramtype(struct hifn_softc *sc)
1055 {
1056 u_int8_t data[8], dataexpect[8];
1057 int i;
1058
1059 for (i = 0; i < sizeof(data); i++)
1060 data[i] = dataexpect[i] = 0x55;
1061 if (hifn_writeramaddr(sc, 0, data))
1062 return (-1);
1063 if (hifn_readramaddr(sc, 0, data))
1064 return (-1);
1065 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1066 sc->sc_drammodel = 1;
1067 return (0);
1068 }
1069
1070 for (i = 0; i < sizeof(data); i++)
1071 data[i] = dataexpect[i] = 0xaa;
1072 if (hifn_writeramaddr(sc, 0, data))
1073 return (-1);
1074 if (hifn_readramaddr(sc, 0, data))
1075 return (-1);
1076 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1077 sc->sc_drammodel = 1;
1078 return (0);
1079 }
1080
1081 return (0);
1082 }
1083
1084 #define HIFN_SRAM_MAX (32 << 20)
1085 #define HIFN_SRAM_STEP_SIZE 16384
1086 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1087
1088 static int
1089 hifn_sramsize(struct hifn_softc *sc)
1090 {
1091 u_int32_t a;
1092 u_int8_t data[8];
1093 u_int8_t dataexpect[sizeof(data)];
1094 int32_t i;
1095
1096 for (i = 0; i < sizeof(data); i++)
1097 data[i] = dataexpect[i] = i ^ 0x5a;
1098
1099 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1100 a = i * HIFN_SRAM_STEP_SIZE;
1101 bcopy(&i, data, sizeof(i));
1102 hifn_writeramaddr(sc, a, data);
1103 }
1104
1105 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1106 a = i * HIFN_SRAM_STEP_SIZE;
1107 bcopy(&i, dataexpect, sizeof(i));
1108 if (hifn_readramaddr(sc, a, data) < 0)
1109 return (0);
1110 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1111 return (0);
1112 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1113 }
1114
1115 return (0);
1116 }
1117
1118 /*
1119 * XXX For dram boards, one should really try all of the
1120 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1121 * is already set up correctly.
1122 */
1123 static int
1124 hifn_dramsize(struct hifn_softc *sc)
1125 {
1126 u_int32_t cnfg;
1127
1128 if (sc->sc_flags & HIFN_IS_7956) {
1129 /*
1130 * 7955/7956 have a fixed internal ram of only 32K.
1131 */
1132 sc->sc_ramsize = 32768;
1133 } else {
1134 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1135 HIFN_PUCNFG_DRAMMASK;
1136 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1137 }
1138 return (0);
1139 }
1140
1141 static void
1142 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1143 int *resp)
1144 {
1145 struct hifn_dma *dma = sc->sc_dma;
1146
1147 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1148 dma->cmdi = 0;
1149 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1150 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1151 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1152 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1153 }
1154 *cmdp = dma->cmdi++;
1155 dma->cmdk = dma->cmdi;
1156
1157 if (dma->srci == HIFN_D_SRC_RSIZE) {
1158 dma->srci = 0;
1159 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1160 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1161 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1162 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1163 }
1164 *srcp = dma->srci++;
1165 dma->srck = dma->srci;
1166
1167 if (dma->dsti == HIFN_D_DST_RSIZE) {
1168 dma->dsti = 0;
1169 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1170 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1171 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1172 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1173 }
1174 *dstp = dma->dsti++;
1175 dma->dstk = dma->dsti;
1176
1177 if (dma->resi == HIFN_D_RES_RSIZE) {
1178 dma->resi = 0;
1179 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1180 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1181 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1182 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1183 }
1184 *resp = dma->resi++;
1185 dma->resk = dma->resi;
1186 }
1187
1188 static int
1189 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1190 {
1191 struct hifn_dma *dma = sc->sc_dma;
1192 struct hifn_base_command wc;
1193 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1194 int r, cmdi, resi, srci, dsti;
1195
1196 wc.masks = htole16(3 << 13);
1197 wc.session_num = htole16(addr >> 14);
1198 wc.total_source_count = htole16(8);
1199 wc.total_dest_count = htole16(addr & 0x3fff);
1200
1201 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1202
1203 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1204 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1205 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1206
1207 /* build write command */
1208 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1209 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1210 bcopy(data, &dma->test_src, sizeof(dma->test_src));
1211
1212 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1213 + offsetof(struct hifn_dma, test_src));
1214 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1215 + offsetof(struct hifn_dma, test_dst));
1216
1217 dma->cmdr[cmdi].l = htole32(16 | masks);
1218 dma->srcr[srci].l = htole32(8 | masks);
1219 dma->dstr[dsti].l = htole32(4 | masks);
1220 dma->resr[resi].l = htole32(4 | masks);
1221
1222 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1223 0, sc->sc_dmamap->dm_mapsize,
1224 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1225
1226 for (r = 10000; r >= 0; r--) {
1227 DELAY(10);
1228 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1229 0, sc->sc_dmamap->dm_mapsize,
1230 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1231 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1232 break;
1233 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1234 0, sc->sc_dmamap->dm_mapsize,
1235 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1236 }
1237 if (r == 0) {
1238 printf("%s: writeramaddr -- "
1239 "result[%d](addr %d) still valid\n",
1240 sc->sc_dv.dv_xname, resi, addr);
1241 r = -1;
1242 return (-1);
1243 } else
1244 r = 0;
1245
1246 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1247 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1248 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1249
1250 return (r);
1251 }
1252
1253 static int
1254 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1255 {
1256 struct hifn_dma *dma = sc->sc_dma;
1257 struct hifn_base_command rc;
1258 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1259 int r, cmdi, srci, dsti, resi;
1260
1261 rc.masks = htole16(2 << 13);
1262 rc.session_num = htole16(addr >> 14);
1263 rc.total_source_count = htole16(addr & 0x3fff);
1264 rc.total_dest_count = htole16(8);
1265
1266 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1267
1268 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1269 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1270 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1271
1272 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1273 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1274
1275 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1276 offsetof(struct hifn_dma, test_src));
1277 dma->test_src = 0;
1278 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1279 offsetof(struct hifn_dma, test_dst));
1280 dma->test_dst = 0;
1281 dma->cmdr[cmdi].l = htole32(8 | masks);
1282 dma->srcr[srci].l = htole32(8 | masks);
1283 dma->dstr[dsti].l = htole32(8 | masks);
1284 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1285
1286 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1287 0, sc->sc_dmamap->dm_mapsize,
1288 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1289
1290 for (r = 10000; r >= 0; r--) {
1291 DELAY(10);
1292 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1293 0, sc->sc_dmamap->dm_mapsize,
1294 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1295 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1296 break;
1297 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1298 0, sc->sc_dmamap->dm_mapsize,
1299 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1300 }
1301 if (r == 0) {
1302 printf("%s: readramaddr -- "
1303 "result[%d](addr %d) still valid\n",
1304 sc->sc_dv.dv_xname, resi, addr);
1305 r = -1;
1306 } else {
1307 r = 0;
1308 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1309 }
1310
1311 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1312 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1313 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1314
1315 return (r);
1316 }
1317
1318 /*
1319 * Initialize the descriptor rings.
1320 */
1321 static void
1322 hifn_init_dma(struct hifn_softc *sc)
1323 {
1324 struct hifn_dma *dma = sc->sc_dma;
1325 int i;
1326
1327 hifn_set_retry(sc);
1328
1329 /* initialize static pointer values */
1330 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1331 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1332 offsetof(struct hifn_dma, command_bufs[i][0]));
1333 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1334 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1335 offsetof(struct hifn_dma, result_bufs[i][0]));
1336
1337 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1338 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1339 offsetof(struct hifn_dma, cmdr[0]));
1340 dma->srcr[HIFN_D_SRC_RSIZE].p =
1341 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1342 offsetof(struct hifn_dma, srcr[0]));
1343 dma->dstr[HIFN_D_DST_RSIZE].p =
1344 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1345 offsetof(struct hifn_dma, dstr[0]));
1346 dma->resr[HIFN_D_RES_RSIZE].p =
1347 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1348 offsetof(struct hifn_dma, resr[0]));
1349
1350 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1351 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1352 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1353 }
1354
1355 /*
1356 * Writes out the raw command buffer space. Returns the
1357 * command buffer size.
1358 */
1359 static u_int
1360 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1361 {
1362 u_int8_t *buf_pos;
1363 struct hifn_base_command *base_cmd;
1364 struct hifn_mac_command *mac_cmd;
1365 struct hifn_crypt_command *cry_cmd;
1366 struct hifn_comp_command *comp_cmd;
1367 int using_mac, using_crypt, using_comp, len, ivlen;
1368 u_int32_t dlen, slen;
1369
1370 buf_pos = buf;
1371 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1372 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1373 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1374
1375 base_cmd = (struct hifn_base_command *)buf_pos;
1376 base_cmd->masks = htole16(cmd->base_masks);
1377 slen = cmd->src_map->dm_mapsize;
1378 if (cmd->sloplen)
1379 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1380 sizeof(u_int32_t);
1381 else
1382 dlen = cmd->dst_map->dm_mapsize;
1383 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1384 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1385 dlen >>= 16;
1386 slen >>= 16;
1387 base_cmd->session_num = htole16(cmd->session_num |
1388 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1389 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1390 buf_pos += sizeof(struct hifn_base_command);
1391
1392 if (using_comp) {
1393 comp_cmd = (struct hifn_comp_command *)buf_pos;
1394 dlen = cmd->compcrd->crd_len;
1395 comp_cmd->source_count = htole16(dlen & 0xffff);
1396 dlen >>= 16;
1397 comp_cmd->masks = htole16(cmd->comp_masks |
1398 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1399 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1400 comp_cmd->reserved = 0;
1401 buf_pos += sizeof(struct hifn_comp_command);
1402 }
1403
1404 if (using_mac) {
1405 mac_cmd = (struct hifn_mac_command *)buf_pos;
1406 dlen = cmd->maccrd->crd_len;
1407 mac_cmd->source_count = htole16(dlen & 0xffff);
1408 dlen >>= 16;
1409 mac_cmd->masks = htole16(cmd->mac_masks |
1410 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1411 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1412 mac_cmd->reserved = 0;
1413 buf_pos += sizeof(struct hifn_mac_command);
1414 }
1415
1416 if (using_crypt) {
1417 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1418 dlen = cmd->enccrd->crd_len;
1419 cry_cmd->source_count = htole16(dlen & 0xffff);
1420 dlen >>= 16;
1421 cry_cmd->masks = htole16(cmd->cry_masks |
1422 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1423 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1424 cry_cmd->reserved = 0;
1425 buf_pos += sizeof(struct hifn_crypt_command);
1426 }
1427
1428 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1429 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1430 buf_pos += HIFN_MAC_KEY_LENGTH;
1431 }
1432
1433 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1434 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1435 case HIFN_CRYPT_CMD_ALG_3DES:
1436 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1437 buf_pos += HIFN_3DES_KEY_LENGTH;
1438 break;
1439 case HIFN_CRYPT_CMD_ALG_DES:
1440 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1441 buf_pos += HIFN_DES_KEY_LENGTH;
1442 break;
1443 case HIFN_CRYPT_CMD_ALG_RC4:
1444 len = 256;
1445 do {
1446 int clen;
1447
1448 clen = MIN(cmd->cklen, len);
1449 bcopy(cmd->ck, buf_pos, clen);
1450 len -= clen;
1451 buf_pos += clen;
1452 } while (len > 0);
1453 bzero(buf_pos, 4);
1454 buf_pos += 4;
1455 break;
1456 case HIFN_CRYPT_CMD_ALG_AES:
1457 /*
1458 * AES keys are variable 128, 192 and
1459 * 256 bits (16, 24 and 32 bytes).
1460 */
1461 bcopy(cmd->ck, buf_pos, cmd->cklen);
1462 buf_pos += cmd->cklen;
1463 break;
1464 }
1465 }
1466
1467 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1468 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1469 case HIFN_CRYPT_CMD_ALG_AES:
1470 ivlen = HIFN_AES_IV_LENGTH;
1471 break;
1472 default:
1473 ivlen = HIFN_IV_LENGTH;
1474 break;
1475 }
1476 bcopy(cmd->iv, buf_pos, ivlen);
1477 buf_pos += ivlen;
1478 }
1479
1480 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1481 HIFN_BASE_CMD_COMP)) == 0) {
1482 bzero(buf_pos, 8);
1483 buf_pos += 8;
1484 }
1485
1486 return (buf_pos - buf);
1487 }
1488
1489 static int
1490 hifn_dmamap_aligned(bus_dmamap_t map)
1491 {
1492 int i;
1493
1494 for (i = 0; i < map->dm_nsegs; i++) {
1495 if (map->dm_segs[i].ds_addr & 3)
1496 return (0);
1497 if ((i != (map->dm_nsegs - 1)) &&
1498 (map->dm_segs[i].ds_len & 3))
1499 return (0);
1500 }
1501 return (1);
1502 }
1503
1504 static int
1505 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1506 {
1507 struct hifn_dma *dma = sc->sc_dma;
1508 bus_dmamap_t map = cmd->dst_map;
1509 u_int32_t p, l;
1510 int idx, used = 0, i;
1511
1512 idx = dma->dsti;
1513 for (i = 0; i < map->dm_nsegs - 1; i++) {
1514 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1515 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1516 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1517 HIFN_DSTR_SYNC(sc, idx,
1518 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1519 used++;
1520
1521 if (++idx == HIFN_D_DST_RSIZE) {
1522 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1523 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1524 HIFN_DSTR_SYNC(sc, idx,
1525 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1526 idx = 0;
1527 }
1528 }
1529
1530 if (cmd->sloplen == 0) {
1531 p = map->dm_segs[i].ds_addr;
1532 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1533 map->dm_segs[i].ds_len;
1534 } else {
1535 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1536 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1537 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1538 sizeof(u_int32_t);
1539
1540 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1541 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1542 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1543 HIFN_D_MASKDONEIRQ |
1544 (map->dm_segs[i].ds_len - cmd->sloplen));
1545 HIFN_DSTR_SYNC(sc, idx,
1546 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1547 used++;
1548
1549 if (++idx == HIFN_D_DST_RSIZE) {
1550 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1551 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1552 HIFN_DSTR_SYNC(sc, idx,
1553 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1554 idx = 0;
1555 }
1556 }
1557 }
1558 dma->dstr[idx].p = htole32(p);
1559 dma->dstr[idx].l = htole32(l);
1560 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1561 used++;
1562
1563 if (++idx == HIFN_D_DST_RSIZE) {
1564 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1565 HIFN_D_MASKDONEIRQ);
1566 HIFN_DSTR_SYNC(sc, idx,
1567 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1568 idx = 0;
1569 }
1570
1571 dma->dsti = idx;
1572 dma->dstu += used;
1573 return (idx);
1574 }
1575
1576 static int
1577 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1578 {
1579 struct hifn_dma *dma = sc->sc_dma;
1580 bus_dmamap_t map = cmd->src_map;
1581 int idx, i;
1582 u_int32_t last = 0;
1583
1584 idx = dma->srci;
1585 for (i = 0; i < map->dm_nsegs; i++) {
1586 if (i == map->dm_nsegs - 1)
1587 last = HIFN_D_LAST;
1588
1589 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1590 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1591 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1592 HIFN_SRCR_SYNC(sc, idx,
1593 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1594
1595 if (++idx == HIFN_D_SRC_RSIZE) {
1596 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1597 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1598 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1599 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1600 idx = 0;
1601 }
1602 }
1603 dma->srci = idx;
1604 dma->srcu += map->dm_nsegs;
1605 return (idx);
1606 }
1607
1608 static int
1609 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1610 struct cryptop *crp, int hint __unused)
1611 {
1612 struct hifn_dma *dma = sc->sc_dma;
1613 u_int32_t cmdlen;
1614 int cmdi, resi, s, err = 0;
1615
1616 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1617 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1618 return (ENOMEM);
1619
1620 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1621 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1622 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1623 err = ENOMEM;
1624 goto err_srcmap1;
1625 }
1626 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1627 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1628 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1629 err = ENOMEM;
1630 goto err_srcmap1;
1631 }
1632 } else {
1633 err = EINVAL;
1634 goto err_srcmap1;
1635 }
1636
1637 if (hifn_dmamap_aligned(cmd->src_map)) {
1638 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1639 if (crp->crp_flags & CRYPTO_F_IOV)
1640 cmd->dstu.dst_io = cmd->srcu.src_io;
1641 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1642 cmd->dstu.dst_m = cmd->srcu.src_m;
1643 cmd->dst_map = cmd->src_map;
1644 } else {
1645 if (crp->crp_flags & CRYPTO_F_IOV) {
1646 err = EINVAL;
1647 goto err_srcmap;
1648 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1649 int totlen, len;
1650 struct mbuf *m, *m0, *mlast;
1651
1652 totlen = cmd->src_map->dm_mapsize;
1653 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1654 len = MHLEN;
1655 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1656 } else {
1657 len = MLEN;
1658 MGET(m0, M_DONTWAIT, MT_DATA);
1659 }
1660 if (m0 == NULL) {
1661 err = ENOMEM;
1662 goto err_srcmap;
1663 }
1664 if (len == MHLEN)
1665 M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1666 if (totlen >= MINCLSIZE) {
1667 MCLGET(m0, M_DONTWAIT);
1668 if (m0->m_flags & M_EXT)
1669 len = MCLBYTES;
1670 }
1671 totlen -= len;
1672 m0->m_pkthdr.len = m0->m_len = len;
1673 mlast = m0;
1674
1675 while (totlen > 0) {
1676 MGET(m, M_DONTWAIT, MT_DATA);
1677 if (m == NULL) {
1678 err = ENOMEM;
1679 m_freem(m0);
1680 goto err_srcmap;
1681 }
1682 len = MLEN;
1683 if (totlen >= MINCLSIZE) {
1684 MCLGET(m, M_DONTWAIT);
1685 if (m->m_flags & M_EXT)
1686 len = MCLBYTES;
1687 }
1688
1689 m->m_len = len;
1690 if (m0->m_flags & M_PKTHDR)
1691 m0->m_pkthdr.len += len;
1692 totlen -= len;
1693
1694 mlast->m_next = m;
1695 mlast = m;
1696 }
1697 cmd->dstu.dst_m = m0;
1698 }
1699 }
1700
1701 if (cmd->dst_map == NULL) {
1702 if (bus_dmamap_create(sc->sc_dmat,
1703 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1704 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1705 err = ENOMEM;
1706 goto err_srcmap;
1707 }
1708 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1709 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1710 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1711 err = ENOMEM;
1712 goto err_dstmap1;
1713 }
1714 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1715 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1716 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1717 err = ENOMEM;
1718 goto err_dstmap1;
1719 }
1720 }
1721 }
1722
1723 #ifdef HIFN_DEBUG
1724 if (hifn_debug)
1725 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1726 sc->sc_dv.dv_xname,
1727 READ_REG_1(sc, HIFN_1_DMA_CSR),
1728 READ_REG_1(sc, HIFN_1_DMA_IER),
1729 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1730 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1731 #endif
1732
1733 if (cmd->src_map == cmd->dst_map)
1734 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1735 0, cmd->src_map->dm_mapsize,
1736 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1737 else {
1738 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1739 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1740 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1741 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1742 }
1743
1744 s = splnet();
1745
1746 /*
1747 * need 1 cmd, and 1 res
1748 * need N src, and N dst
1749 */
1750 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1751 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1752 splx(s);
1753 err = ENOMEM;
1754 goto err_dstmap;
1755 }
1756 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1757 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1758 splx(s);
1759 err = ENOMEM;
1760 goto err_dstmap;
1761 }
1762
1763 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1764 dma->cmdi = 0;
1765 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1766 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1767 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1768 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1769 }
1770 cmdi = dma->cmdi++;
1771 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1772 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1773
1774 /* .p for command/result already set */
1775 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1776 HIFN_D_MASKDONEIRQ);
1777 HIFN_CMDR_SYNC(sc, cmdi,
1778 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1779 dma->cmdu++;
1780 if (sc->sc_c_busy == 0) {
1781 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1782 sc->sc_c_busy = 1;
1783 SET_LED(sc, HIFN_MIPSRST_LED0);
1784 }
1785
1786 /*
1787 * We don't worry about missing an interrupt (which a "command wait"
1788 * interrupt salvages us from), unless there is more than one command
1789 * in the queue.
1790 *
1791 * XXX We do seem to miss some interrupts. So we always enable
1792 * XXX command wait. From OpenBSD revision 1.149.
1793 *
1794 */
1795 #if 0
1796 if (dma->cmdu > 1) {
1797 #endif
1798 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1799 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1800 #if 0
1801 }
1802 #endif
1803
1804 hifnstats.hst_ipackets++;
1805 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1806
1807 hifn_dmamap_load_src(sc, cmd);
1808 if (sc->sc_s_busy == 0) {
1809 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1810 sc->sc_s_busy = 1;
1811 SET_LED(sc, HIFN_MIPSRST_LED1);
1812 }
1813
1814 /*
1815 * Unlike other descriptors, we don't mask done interrupt from
1816 * result descriptor.
1817 */
1818 #ifdef HIFN_DEBUG
1819 if (hifn_debug)
1820 printf("load res\n");
1821 #endif
1822 if (dma->resi == HIFN_D_RES_RSIZE) {
1823 dma->resi = 0;
1824 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1825 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1826 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1827 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1828 }
1829 resi = dma->resi++;
1830 dma->hifn_commands[resi] = cmd;
1831 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1832 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1833 HIFN_D_VALID | HIFN_D_LAST);
1834 HIFN_RESR_SYNC(sc, resi,
1835 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1836 dma->resu++;
1837 if (sc->sc_r_busy == 0) {
1838 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1839 sc->sc_r_busy = 1;
1840 SET_LED(sc, HIFN_MIPSRST_LED2);
1841 }
1842
1843 if (cmd->sloplen)
1844 cmd->slopidx = resi;
1845
1846 hifn_dmamap_load_dst(sc, cmd);
1847
1848 if (sc->sc_d_busy == 0) {
1849 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1850 sc->sc_d_busy = 1;
1851 }
1852
1853 #ifdef HIFN_DEBUG
1854 if (hifn_debug)
1855 printf("%s: command: stat %8x ier %8x\n",
1856 sc->sc_dv.dv_xname,
1857 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1858 #endif
1859
1860 sc->sc_active = 5;
1861 splx(s);
1862 return (err); /* success */
1863
1864 err_dstmap:
1865 if (cmd->src_map != cmd->dst_map)
1866 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1867 err_dstmap1:
1868 if (cmd->src_map != cmd->dst_map)
1869 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1870 err_srcmap:
1871 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1872 cmd->srcu.src_m != cmd->dstu.dst_m)
1873 m_freem(cmd->dstu.dst_m);
1874 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1875 err_srcmap1:
1876 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1877 return (err);
1878 }
1879
1880 static void
1881 hifn_tick(void *vsc)
1882 {
1883 struct hifn_softc *sc = vsc;
1884 int s;
1885
1886 s = splnet();
1887 if (sc->sc_active == 0) {
1888 struct hifn_dma *dma = sc->sc_dma;
1889 u_int32_t r = 0;
1890
1891 if (dma->cmdu == 0 && sc->sc_c_busy) {
1892 sc->sc_c_busy = 0;
1893 r |= HIFN_DMACSR_C_CTRL_DIS;
1894 CLR_LED(sc, HIFN_MIPSRST_LED0);
1895 }
1896 if (dma->srcu == 0 && sc->sc_s_busy) {
1897 sc->sc_s_busy = 0;
1898 r |= HIFN_DMACSR_S_CTRL_DIS;
1899 CLR_LED(sc, HIFN_MIPSRST_LED1);
1900 }
1901 if (dma->dstu == 0 && sc->sc_d_busy) {
1902 sc->sc_d_busy = 0;
1903 r |= HIFN_DMACSR_D_CTRL_DIS;
1904 }
1905 if (dma->resu == 0 && sc->sc_r_busy) {
1906 sc->sc_r_busy = 0;
1907 r |= HIFN_DMACSR_R_CTRL_DIS;
1908 CLR_LED(sc, HIFN_MIPSRST_LED2);
1909 }
1910 if (r)
1911 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1912 }
1913 else
1914 sc->sc_active--;
1915 splx(s);
1916 #ifdef __OpenBSD__
1917 timeout_add(&sc->sc_tickto, hz);
1918 #else
1919 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1920 #endif
1921 }
1922
1923 static int
1924 hifn_intr(void *arg)
1925 {
1926 struct hifn_softc *sc = arg;
1927 struct hifn_dma *dma = sc->sc_dma;
1928 u_int32_t dmacsr, restart;
1929 int i, u;
1930
1931 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1932
1933 #ifdef HIFN_DEBUG
1934 if (hifn_debug)
1935 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1936 sc->sc_dv.dv_xname,
1937 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1938 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1939 #endif
1940
1941 /* Nothing in the DMA unit interrupted */
1942 if ((dmacsr & sc->sc_dmaier) == 0)
1943 return (0);
1944
1945 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1946
1947 if (dmacsr & HIFN_DMACSR_ENGINE)
1948 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
1949
1950 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1951 (dmacsr & HIFN_DMACSR_PUBDONE))
1952 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1953 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1954
1955 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1956 if (restart)
1957 printf("%s: overrun %x\n", sc->sc_dv.dv_xname, dmacsr);
1958
1959 if (sc->sc_flags & HIFN_IS_7811) {
1960 if (dmacsr & HIFN_DMACSR_ILLR)
1961 printf("%s: illegal read\n", sc->sc_dv.dv_xname);
1962 if (dmacsr & HIFN_DMACSR_ILLW)
1963 printf("%s: illegal write\n", sc->sc_dv.dv_xname);
1964 }
1965
1966 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1967 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1968 if (restart) {
1969 printf("%s: abort, resetting.\n", sc->sc_dv.dv_xname);
1970 hifnstats.hst_abort++;
1971 hifn_abort(sc);
1972 return (1);
1973 }
1974
1975 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
1976 /*
1977 * If no slots to process and we receive a "waiting on
1978 * command" interrupt, we disable the "waiting on command"
1979 * (by clearing it).
1980 */
1981 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1982 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1983 }
1984
1985 /* clear the rings */
1986 i = dma->resk;
1987 while (dma->resu != 0) {
1988 HIFN_RESR_SYNC(sc, i,
1989 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1990 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
1991 HIFN_RESR_SYNC(sc, i,
1992 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1993 break;
1994 }
1995
1996 if (i != HIFN_D_RES_RSIZE) {
1997 struct hifn_command *cmd;
1998
1999 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2000 cmd = dma->hifn_commands[i];
2001 KASSERT(cmd != NULL
2002 /*("hifn_intr: null command slot %u", i)*/);
2003 dma->hifn_commands[i] = NULL;
2004
2005 hifn_callback(sc, cmd, dma->result_bufs[i]);
2006 hifnstats.hst_opackets++;
2007 }
2008
2009 if (++i == (HIFN_D_RES_RSIZE + 1))
2010 i = 0;
2011 else
2012 dma->resu--;
2013 }
2014 dma->resk = i;
2015
2016 i = dma->srck; u = dma->srcu;
2017 while (u != 0) {
2018 HIFN_SRCR_SYNC(sc, i,
2019 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2020 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2021 HIFN_SRCR_SYNC(sc, i,
2022 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2023 break;
2024 }
2025 if (++i == (HIFN_D_SRC_RSIZE + 1))
2026 i = 0;
2027 else
2028 u--;
2029 }
2030 dma->srck = i; dma->srcu = u;
2031
2032 i = dma->cmdk; u = dma->cmdu;
2033 while (u != 0) {
2034 HIFN_CMDR_SYNC(sc, i,
2035 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2036 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2037 HIFN_CMDR_SYNC(sc, i,
2038 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2039 break;
2040 }
2041 if (i != HIFN_D_CMD_RSIZE) {
2042 u--;
2043 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2044 }
2045 if (++i == (HIFN_D_CMD_RSIZE + 1))
2046 i = 0;
2047 }
2048 dma->cmdk = i; dma->cmdu = u;
2049
2050 return (1);
2051 }
2052
2053 /*
2054 * Allocate a new 'session' and return an encoded session id. 'sidp'
2055 * contains our registration id, and should contain an encoded session
2056 * id on successful allocation.
2057 */
2058 static int
2059 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2060 {
2061 struct cryptoini *c;
2062 struct hifn_softc *sc = arg;
2063 int i, mac = 0, cry = 0, comp = 0;
2064
2065 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2066 if (sidp == NULL || cri == NULL || sc == NULL)
2067 return (EINVAL);
2068
2069 for (i = 0; i < sc->sc_maxses; i++)
2070 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2071 break;
2072 if (i == sc->sc_maxses)
2073 return (ENOMEM);
2074
2075 for (c = cri; c != NULL; c = c->cri_next) {
2076 switch (c->cri_alg) {
2077 case CRYPTO_MD5:
2078 case CRYPTO_SHA1:
2079 case CRYPTO_MD5_HMAC:
2080 case CRYPTO_SHA1_HMAC:
2081 if (mac)
2082 return (EINVAL);
2083 mac = 1;
2084 break;
2085 case CRYPTO_DES_CBC:
2086 case CRYPTO_3DES_CBC:
2087 case CRYPTO_AES_CBC:
2088 /* Note that this is an initialization
2089 vector, not a cipher key; any function
2090 giving sufficient Hamming distance
2091 between outputs is fine. Use of RC4
2092 to generate IVs has been FIPS140-2
2093 certified by several labs. */
2094 #ifdef __NetBSD__
2095 arc4randbytes(sc->sc_sessions[i].hs_iv,
2096 c->cri_alg == CRYPTO_AES_CBC ?
2097 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2098 #else /* FreeBSD and OpenBSD have get_random_bytes */
2099 /* XXX this may read fewer, does it matter? */
2100 get_random_bytes(sc->sc_sessions[i].hs_iv,
2101 c->cri_alg == CRYPTO_AES_CBC ?
2102 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2103 #endif
2104 /*FALLTHROUGH*/
2105 case CRYPTO_ARC4:
2106 if (cry)
2107 return (EINVAL);
2108 cry = 1;
2109 break;
2110 #ifdef HAVE_CRYPTO_LZS
2111 case CRYPTO_LZS_COMP:
2112 if (comp)
2113 return (EINVAL);
2114 comp = 1;
2115 break;
2116 #endif
2117 default:
2118 return (EINVAL);
2119 }
2120 }
2121 if (mac == 0 && cry == 0 && comp == 0)
2122 return (EINVAL);
2123
2124 /*
2125 * XXX only want to support compression without chaining to
2126 * MAC/crypt engine right now
2127 */
2128 if ((comp && mac) || (comp && cry))
2129 return (EINVAL);
2130
2131 *sidp = HIFN_SID(device_unit(&sc->sc_dv), i);
2132 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2133
2134 return (0);
2135 }
2136
2137 /*
2138 * Deallocate a session.
2139 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2140 * XXX to blow away any keys already stored there.
2141 */
2142 static int
2143 hifn_freesession(void *arg, u_int64_t tid)
2144 {
2145 struct hifn_softc *sc = arg;
2146 int session;
2147 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2148
2149 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2150 if (sc == NULL)
2151 return (EINVAL);
2152
2153 session = HIFN_SESSION(sid);
2154 if (session >= sc->sc_maxses)
2155 return (EINVAL);
2156
2157 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
2158 return (0);
2159 }
2160
2161 static int
2162 hifn_process(void *arg, struct cryptop *crp, int hint)
2163 {
2164 struct hifn_softc *sc = arg;
2165 struct hifn_command *cmd = NULL;
2166 int session, err, ivlen;
2167 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2168
2169 if (crp == NULL || crp->crp_callback == NULL) {
2170 hifnstats.hst_invalid++;
2171 return (EINVAL);
2172 }
2173 session = HIFN_SESSION(crp->crp_sid);
2174
2175 if (sc == NULL || session >= sc->sc_maxses) {
2176 err = EINVAL;
2177 goto errout;
2178 }
2179
2180 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2181 M_DEVBUF, M_NOWAIT|M_ZERO);
2182 if (cmd == NULL) {
2183 hifnstats.hst_nomem++;
2184 err = ENOMEM;
2185 goto errout;
2186 }
2187
2188 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2189 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2190 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2191 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2192 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2193 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2194 } else {
2195 err = EINVAL;
2196 goto errout; /* XXX we don't handle contiguous buffers! */
2197 }
2198
2199 crd1 = crp->crp_desc;
2200 if (crd1 == NULL) {
2201 err = EINVAL;
2202 goto errout;
2203 }
2204 crd2 = crd1->crd_next;
2205
2206 if (crd2 == NULL) {
2207 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2208 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2209 crd1->crd_alg == CRYPTO_SHA1 ||
2210 crd1->crd_alg == CRYPTO_MD5) {
2211 maccrd = crd1;
2212 enccrd = NULL;
2213 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2214 crd1->crd_alg == CRYPTO_3DES_CBC ||
2215 crd1->crd_alg == CRYPTO_AES_CBC ||
2216 crd1->crd_alg == CRYPTO_ARC4) {
2217 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2218 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2219 maccrd = NULL;
2220 enccrd = crd1;
2221 #ifdef HAVE_CRYPTO_LZS
2222 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2223 return (hifn_compression(sc, crp, cmd));
2224 #endif
2225 } else {
2226 err = EINVAL;
2227 goto errout;
2228 }
2229 } else {
2230 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2231 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2232 crd1->crd_alg == CRYPTO_MD5 ||
2233 crd1->crd_alg == CRYPTO_SHA1) &&
2234 (crd2->crd_alg == CRYPTO_DES_CBC ||
2235 crd2->crd_alg == CRYPTO_3DES_CBC ||
2236 crd2->crd_alg == CRYPTO_AES_CBC ||
2237 crd2->crd_alg == CRYPTO_ARC4) &&
2238 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2239 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2240 maccrd = crd1;
2241 enccrd = crd2;
2242 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2243 crd1->crd_alg == CRYPTO_ARC4 ||
2244 crd1->crd_alg == CRYPTO_3DES_CBC ||
2245 crd1->crd_alg == CRYPTO_AES_CBC) &&
2246 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2247 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2248 crd2->crd_alg == CRYPTO_MD5 ||
2249 crd2->crd_alg == CRYPTO_SHA1) &&
2250 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2251 enccrd = crd1;
2252 maccrd = crd2;
2253 } else {
2254 /*
2255 * We cannot order the 7751 as requested
2256 */
2257 err = EINVAL;
2258 goto errout;
2259 }
2260 }
2261
2262 if (enccrd) {
2263 cmd->enccrd = enccrd;
2264 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2265 switch (enccrd->crd_alg) {
2266 case CRYPTO_ARC4:
2267 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2268 if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2269 != sc->sc_sessions[session].hs_prev_op)
2270 sc->sc_sessions[session].hs_state =
2271 HS_STATE_USED;
2272 break;
2273 case CRYPTO_DES_CBC:
2274 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2275 HIFN_CRYPT_CMD_MODE_CBC |
2276 HIFN_CRYPT_CMD_NEW_IV;
2277 break;
2278 case CRYPTO_3DES_CBC:
2279 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2280 HIFN_CRYPT_CMD_MODE_CBC |
2281 HIFN_CRYPT_CMD_NEW_IV;
2282 break;
2283 case CRYPTO_AES_CBC:
2284 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2285 HIFN_CRYPT_CMD_MODE_CBC |
2286 HIFN_CRYPT_CMD_NEW_IV;
2287 break;
2288 default:
2289 err = EINVAL;
2290 goto errout;
2291 }
2292 if (enccrd->crd_alg != CRYPTO_ARC4) {
2293 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2294 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2295 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2296 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2297 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2298 else
2299 bcopy(sc->sc_sessions[session].hs_iv,
2300 cmd->iv, ivlen);
2301
2302 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2303 == 0) {
2304 if (crp->crp_flags & CRYPTO_F_IMBUF)
2305 m_copyback(cmd->srcu.src_m,
2306 enccrd->crd_inject,
2307 ivlen, cmd->iv);
2308 else if (crp->crp_flags & CRYPTO_F_IOV)
2309 cuio_copyback(cmd->srcu.src_io,
2310 enccrd->crd_inject,
2311 ivlen, cmd->iv);
2312 }
2313 } else {
2314 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2315 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2316 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2317 m_copydata(cmd->srcu.src_m,
2318 enccrd->crd_inject, ivlen, cmd->iv);
2319 else if (crp->crp_flags & CRYPTO_F_IOV)
2320 cuio_copydata(cmd->srcu.src_io,
2321 enccrd->crd_inject, ivlen, cmd->iv);
2322 }
2323 }
2324
2325 cmd->ck = enccrd->crd_key;
2326 cmd->cklen = enccrd->crd_klen >> 3;
2327
2328 /*
2329 * Need to specify the size for the AES key in the masks.
2330 */
2331 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2332 HIFN_CRYPT_CMD_ALG_AES) {
2333 switch (cmd->cklen) {
2334 case 16:
2335 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2336 break;
2337 case 24:
2338 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2339 break;
2340 case 32:
2341 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2342 break;
2343 default:
2344 err = EINVAL;
2345 goto errout;
2346 }
2347 }
2348
2349 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2350 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2351 }
2352
2353 if (maccrd) {
2354 cmd->maccrd = maccrd;
2355 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2356
2357 switch (maccrd->crd_alg) {
2358 case CRYPTO_MD5:
2359 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2360 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2361 HIFN_MAC_CMD_POS_IPSEC;
2362 break;
2363 case CRYPTO_MD5_HMAC:
2364 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2365 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2366 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2367 break;
2368 case CRYPTO_SHA1:
2369 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2370 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2371 HIFN_MAC_CMD_POS_IPSEC;
2372 break;
2373 case CRYPTO_SHA1_HMAC:
2374 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2375 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2376 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2377 break;
2378 }
2379
2380 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2381 maccrd->crd_alg == CRYPTO_MD5_HMAC) &&
2382 sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2383 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2384 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2385 bzero(cmd->mac + (maccrd->crd_klen >> 3),
2386 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2387 }
2388 }
2389
2390 cmd->crp = crp;
2391 cmd->session_num = session;
2392 cmd->softc = sc;
2393
2394 err = hifn_crypto(sc, cmd, crp, hint);
2395 if (err == 0) {
2396 if (enccrd)
2397 sc->sc_sessions[session].hs_prev_op =
2398 enccrd->crd_flags & CRD_F_ENCRYPT;
2399 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2400 sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2401 return 0;
2402 } else if (err == ERESTART) {
2403 /*
2404 * There weren't enough resources to dispatch the request
2405 * to the part. Notify the caller so they'll requeue this
2406 * request and resubmit it again soon.
2407 */
2408 #ifdef HIFN_DEBUG
2409 if (hifn_debug)
2410 printf(sc->sc_dv.dv_xname, "requeue request\n");
2411 #endif
2412 free(cmd, M_DEVBUF);
2413 sc->sc_needwakeup |= CRYPTO_SYMQ;
2414 return (err);
2415 }
2416
2417 errout:
2418 if (cmd != NULL)
2419 free(cmd, M_DEVBUF);
2420 if (err == EINVAL)
2421 hifnstats.hst_invalid++;
2422 else
2423 hifnstats.hst_nomem++;
2424 crp->crp_etype = err;
2425 crypto_done(crp);
2426 return (0);
2427 }
2428
2429 static void
2430 hifn_abort(struct hifn_softc *sc)
2431 {
2432 struct hifn_dma *dma = sc->sc_dma;
2433 struct hifn_command *cmd;
2434 struct cryptop *crp;
2435 int i, u;
2436
2437 i = dma->resk; u = dma->resu;
2438 while (u != 0) {
2439 cmd = dma->hifn_commands[i];
2440 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2441 dma->hifn_commands[i] = NULL;
2442 crp = cmd->crp;
2443
2444 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2445 /* Salvage what we can. */
2446 hifnstats.hst_opackets++;
2447 hifn_callback(sc, cmd, dma->result_bufs[i]);
2448 } else {
2449 if (cmd->src_map == cmd->dst_map) {
2450 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2451 0, cmd->src_map->dm_mapsize,
2452 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2453 } else {
2454 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2455 0, cmd->src_map->dm_mapsize,
2456 BUS_DMASYNC_POSTWRITE);
2457 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2458 0, cmd->dst_map->dm_mapsize,
2459 BUS_DMASYNC_POSTREAD);
2460 }
2461
2462 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2463 m_freem(cmd->srcu.src_m);
2464 crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2465 }
2466
2467 /* non-shared buffers cannot be restarted */
2468 if (cmd->src_map != cmd->dst_map) {
2469 /*
2470 * XXX should be EAGAIN, delayed until
2471 * after the reset.
2472 */
2473 crp->crp_etype = ENOMEM;
2474 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2475 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2476 } else
2477 crp->crp_etype = ENOMEM;
2478
2479 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2480 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2481
2482 free(cmd, M_DEVBUF);
2483 if (crp->crp_etype != EAGAIN)
2484 crypto_done(crp);
2485 }
2486
2487 if (++i == HIFN_D_RES_RSIZE)
2488 i = 0;
2489 u--;
2490 }
2491 dma->resk = i; dma->resu = u;
2492
2493 /* Force upload of key next time */
2494 for (i = 0; i < sc->sc_maxses; i++)
2495 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2496 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2497
2498 hifn_reset_board(sc, 1);
2499 hifn_init_dma(sc);
2500 hifn_init_pci_registers(sc);
2501 }
2502
2503 static void
2504 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf)
2505 {
2506 struct hifn_dma *dma = sc->sc_dma;
2507 struct cryptop *crp = cmd->crp;
2508 struct cryptodesc *crd;
2509 struct mbuf *m;
2510 int totlen, i, u, ivlen;
2511
2512 if (cmd->src_map == cmd->dst_map)
2513 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2514 0, cmd->src_map->dm_mapsize,
2515 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2516 else {
2517 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2518 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2519 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2520 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2521 }
2522
2523 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2524 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2525 crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2526 totlen = cmd->src_map->dm_mapsize;
2527 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2528 if (totlen < m->m_len) {
2529 m->m_len = totlen;
2530 totlen = 0;
2531 } else
2532 totlen -= m->m_len;
2533 }
2534 cmd->dstu.dst_m->m_pkthdr.len =
2535 cmd->srcu.src_m->m_pkthdr.len;
2536 m_freem(cmd->srcu.src_m);
2537 }
2538 }
2539
2540 if (cmd->sloplen != 0) {
2541 if (crp->crp_flags & CRYPTO_F_IMBUF)
2542 m_copyback((struct mbuf *)crp->crp_buf,
2543 cmd->src_map->dm_mapsize - cmd->sloplen,
2544 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2545 else if (crp->crp_flags & CRYPTO_F_IOV)
2546 cuio_copyback((struct uio *)crp->crp_buf,
2547 cmd->src_map->dm_mapsize - cmd->sloplen,
2548 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2549 }
2550
2551 i = dma->dstk; u = dma->dstu;
2552 while (u != 0) {
2553 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2554 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2555 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2556 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2557 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2558 offsetof(struct hifn_dma, dstr[i]),
2559 sizeof(struct hifn_desc),
2560 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2561 break;
2562 }
2563 if (++i == (HIFN_D_DST_RSIZE + 1))
2564 i = 0;
2565 else
2566 u--;
2567 }
2568 dma->dstk = i; dma->dstu = u;
2569
2570 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2571
2572 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2573 HIFN_BASE_CMD_CRYPT) {
2574 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2575 if (crd->crd_alg != CRYPTO_DES_CBC &&
2576 crd->crd_alg != CRYPTO_3DES_CBC &&
2577 crd->crd_alg != CRYPTO_AES_CBC)
2578 continue;
2579 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2580 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2581 if (crp->crp_flags & CRYPTO_F_IMBUF)
2582 m_copydata((struct mbuf *)crp->crp_buf,
2583 crd->crd_skip + crd->crd_len - ivlen,
2584 ivlen,
2585 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2586 else if (crp->crp_flags & CRYPTO_F_IOV) {
2587 cuio_copydata((struct uio *)crp->crp_buf,
2588 crd->crd_skip + crd->crd_len - ivlen,
2589 ivlen,
2590 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2591 }
2592 /* XXX We do not handle contig data */
2593 break;
2594 }
2595 }
2596
2597 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2598 u_int8_t *macbuf;
2599
2600 macbuf = resbuf + sizeof(struct hifn_base_result);
2601 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2602 macbuf += sizeof(struct hifn_comp_result);
2603 macbuf += sizeof(struct hifn_mac_result);
2604
2605 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2606 int len;
2607
2608 if (crd->crd_alg == CRYPTO_MD5)
2609 len = 16;
2610 else if (crd->crd_alg == CRYPTO_SHA1)
2611 len = 20;
2612 else if (crd->crd_alg == CRYPTO_MD5_HMAC ||
2613 crd->crd_alg == CRYPTO_SHA1_HMAC)
2614 len = 12;
2615 else
2616 continue;
2617
2618 if (crp->crp_flags & CRYPTO_F_IMBUF)
2619 m_copyback((struct mbuf *)crp->crp_buf,
2620 crd->crd_inject, len, macbuf);
2621 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2622 bcopy((caddr_t)macbuf, crp->crp_mac, len);
2623 break;
2624 }
2625 }
2626
2627 if (cmd->src_map != cmd->dst_map) {
2628 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2629 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2630 }
2631 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2632 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2633 free(cmd, M_DEVBUF);
2634 crypto_done(crp);
2635 }
2636
2637 #ifdef HAVE_CRYPTO_LZS
2638
2639 static int
2640 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2641 struct hifn_command *cmd)
2642 {
2643 struct cryptodesc *crd = crp->crp_desc;
2644 int s, err = 0;
2645
2646 cmd->compcrd = crd;
2647 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2648
2649 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2650 /*
2651 * XXX can only handle mbufs right now since we can
2652 * XXX dynamically resize them.
2653 */
2654 err = EINVAL;
2655 return (ENOMEM);
2656 }
2657
2658 if ((crd->crd_flags & CRD_F_COMP) == 0)
2659 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2660 if (crd->crd_alg == CRYPTO_LZS_COMP)
2661 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2662 HIFN_COMP_CMD_CLEARHIST;
2663
2664 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2665 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2666 err = ENOMEM;
2667 goto fail;
2668 }
2669
2670 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2671 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2672 err = ENOMEM;
2673 goto fail;
2674 }
2675
2676 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2677 int len;
2678
2679 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2680 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2681 err = ENOMEM;
2682 goto fail;
2683 }
2684
2685 len = cmd->src_map->dm_mapsize / MCLBYTES;
2686 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2687 len++;
2688 len *= MCLBYTES;
2689
2690 if ((crd->crd_flags & CRD_F_COMP) == 0)
2691 len *= 4;
2692
2693 if (len > HIFN_MAX_DMALEN)
2694 len = HIFN_MAX_DMALEN;
2695
2696 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2697 if (cmd->dstu.dst_m == NULL) {
2698 err = ENOMEM;
2699 goto fail;
2700 }
2701
2702 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2703 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2704 err = ENOMEM;
2705 goto fail;
2706 }
2707 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2708 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2709 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2710 err = ENOMEM;
2711 goto fail;
2712 }
2713 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2714 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2715 err = ENOMEM;
2716 goto fail;
2717 }
2718 }
2719
2720 if (cmd->src_map == cmd->dst_map)
2721 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2722 0, cmd->src_map->dm_mapsize,
2723 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2724 else {
2725 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2726 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2727 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2728 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2729 }
2730
2731 cmd->crp = crp;
2732 /*
2733 * Always use session 0. The modes of compression we use are
2734 * stateless and there is always at least one compression
2735 * context, zero.
2736 */
2737 cmd->session_num = 0;
2738 cmd->softc = sc;
2739
2740 s = splnet();
2741 err = hifn_compress_enter(sc, cmd);
2742 splx(s);
2743
2744 if (err != 0)
2745 goto fail;
2746 return (0);
2747
2748 fail:
2749 if (cmd->dst_map != NULL) {
2750 if (cmd->dst_map->dm_nsegs > 0)
2751 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2752 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2753 }
2754 if (cmd->src_map != NULL) {
2755 if (cmd->src_map->dm_nsegs > 0)
2756 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2757 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2758 }
2759 free(cmd, M_DEVBUF);
2760 if (err == EINVAL)
2761 hifnstats.hst_invalid++;
2762 else
2763 hifnstats.hst_nomem++;
2764 crp->crp_etype = err;
2765 crypto_done(crp);
2766 return (0);
2767 }
2768
2769 /*
2770 * must be called at splnet()
2771 */
2772 static int
2773 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2774 {
2775 struct hifn_dma *dma = sc->sc_dma;
2776 int cmdi, resi;
2777 u_int32_t cmdlen;
2778
2779 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2780 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2781 return (ENOMEM);
2782
2783 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2784 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2785 return (ENOMEM);
2786
2787 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2788 dma->cmdi = 0;
2789 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2790 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2791 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2792 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2793 }
2794 cmdi = dma->cmdi++;
2795 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2796 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2797
2798 /* .p for command/result already set */
2799 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2800 HIFN_D_MASKDONEIRQ);
2801 HIFN_CMDR_SYNC(sc, cmdi,
2802 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2803 dma->cmdu++;
2804 if (sc->sc_c_busy == 0) {
2805 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2806 sc->sc_c_busy = 1;
2807 SET_LED(sc, HIFN_MIPSRST_LED0);
2808 }
2809
2810 /*
2811 * We don't worry about missing an interrupt (which a "command wait"
2812 * interrupt salvages us from), unless there is more than one command
2813 * in the queue.
2814 */
2815 if (dma->cmdu > 1) {
2816 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2817 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2818 }
2819
2820 hifnstats.hst_ipackets++;
2821 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2822
2823 hifn_dmamap_load_src(sc, cmd);
2824 if (sc->sc_s_busy == 0) {
2825 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2826 sc->sc_s_busy = 1;
2827 SET_LED(sc, HIFN_MIPSRST_LED1);
2828 }
2829
2830 /*
2831 * Unlike other descriptors, we don't mask done interrupt from
2832 * result descriptor.
2833 */
2834 if (dma->resi == HIFN_D_RES_RSIZE) {
2835 dma->resi = 0;
2836 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2837 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2838 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2839 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2840 }
2841 resi = dma->resi++;
2842 dma->hifn_commands[resi] = cmd;
2843 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2844 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2845 HIFN_D_VALID | HIFN_D_LAST);
2846 HIFN_RESR_SYNC(sc, resi,
2847 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2848 dma->resu++;
2849 if (sc->sc_r_busy == 0) {
2850 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2851 sc->sc_r_busy = 1;
2852 SET_LED(sc, HIFN_MIPSRST_LED2);
2853 }
2854
2855 if (cmd->sloplen)
2856 cmd->slopidx = resi;
2857
2858 hifn_dmamap_load_dst(sc, cmd);
2859
2860 if (sc->sc_d_busy == 0) {
2861 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2862 sc->sc_d_busy = 1;
2863 }
2864 sc->sc_active = 5;
2865 cmd->cmd_callback = hifn_callback_comp;
2866 return (0);
2867 }
2868
2869 static void
2870 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2871 u_int8_t *resbuf)
2872 {
2873 struct hifn_base_result baseres;
2874 struct cryptop *crp = cmd->crp;
2875 struct hifn_dma *dma = sc->sc_dma;
2876 struct mbuf *m;
2877 int err = 0, i, u;
2878 u_int32_t olen;
2879 bus_size_t dstsize;
2880
2881 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2882 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2883 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2884 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2885
2886 dstsize = cmd->dst_map->dm_mapsize;
2887 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2888
2889 bcopy(resbuf, &baseres, sizeof(struct hifn_base_result));
2890
2891 i = dma->dstk; u = dma->dstu;
2892 while (u != 0) {
2893 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2894 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2895 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2896 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2897 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2898 offsetof(struct hifn_dma, dstr[i]),
2899 sizeof(struct hifn_desc),
2900 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2901 break;
2902 }
2903 if (++i == (HIFN_D_DST_RSIZE + 1))
2904 i = 0;
2905 else
2906 u--;
2907 }
2908 dma->dstk = i; dma->dstu = u;
2909
2910 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
2911 bus_size_t xlen;
2912
2913 xlen = dstsize;
2914
2915 m_freem(cmd->dstu.dst_m);
2916
2917 if (xlen == HIFN_MAX_DMALEN) {
2918 /* We've done all we can. */
2919 err = E2BIG;
2920 goto out;
2921 }
2922
2923 xlen += MCLBYTES;
2924
2925 if (xlen > HIFN_MAX_DMALEN)
2926 xlen = HIFN_MAX_DMALEN;
2927
2928 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
2929 cmd->srcu.src_m);
2930 if (cmd->dstu.dst_m == NULL) {
2931 err = ENOMEM;
2932 goto out;
2933 }
2934 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2935 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2936 err = ENOMEM;
2937 goto out;
2938 }
2939
2940 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2941 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2942 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2943 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2944
2945 /* already at splnet... */
2946 err = hifn_compress_enter(sc, cmd);
2947 if (err != 0)
2948 goto out;
2949 return;
2950 }
2951
2952 olen = dstsize - (letoh16(baseres.dst_cnt) |
2953 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
2954 HIFN_BASE_RES_DSTLEN_S) << 16));
2955
2956 crp->crp_olen = olen - cmd->compcrd->crd_skip;
2957
2958 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2959 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2960 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2961
2962 m = cmd->dstu.dst_m;
2963 if (m->m_flags & M_PKTHDR)
2964 m->m_pkthdr.len = olen;
2965 crp->crp_buf = (caddr_t)m;
2966 for (; m != NULL; m = m->m_next) {
2967 if (olen >= m->m_len)
2968 olen -= m->m_len;
2969 else {
2970 m->m_len = olen;
2971 olen = 0;
2972 }
2973 }
2974
2975 m_freem(cmd->srcu.src_m);
2976 free(cmd, M_DEVBUF);
2977 crp->crp_etype = 0;
2978 crypto_done(crp);
2979 return;
2980
2981 out:
2982 if (cmd->dst_map != NULL) {
2983 if (cmd->src_map->dm_nsegs != 0)
2984 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2985 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2986 }
2987 if (cmd->src_map != NULL) {
2988 if (cmd->src_map->dm_nsegs != 0)
2989 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2990 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2991 }
2992 if (cmd->dstu.dst_m != NULL)
2993 m_freem(cmd->dstu.dst_m);
2994 free(cmd, M_DEVBUF);
2995 crp->crp_etype = err;
2996 crypto_done(crp);
2997 }
2998
2999 static struct mbuf *
3000 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
3001 {
3002 int len;
3003 struct mbuf *m, *m0, *mlast;
3004
3005 if (mtemplate->m_flags & M_PKTHDR) {
3006 len = MHLEN;
3007 MGETHDR(m0, M_DONTWAIT, MT_DATA);
3008 } else {
3009 len = MLEN;
3010 MGET(m0, M_DONTWAIT, MT_DATA);
3011 }
3012 if (m0 == NULL)
3013 return (NULL);
3014 if (len == MHLEN)
3015 M_DUP_PKTHDR(m0, mtemplate);
3016 MCLGET(m0, M_DONTWAIT);
3017 if (!(m0->m_flags & M_EXT))
3018 m_freem(m0);
3019 len = MCLBYTES;
3020
3021 totlen -= len;
3022 m0->m_pkthdr.len = m0->m_len = len;
3023 mlast = m0;
3024
3025 while (totlen > 0) {
3026 MGET(m, M_DONTWAIT, MT_DATA);
3027 if (m == NULL) {
3028 m_freem(m0);
3029 return (NULL);
3030 }
3031 MCLGET(m, M_DONTWAIT);
3032 if (!(m->m_flags & M_EXT)) {
3033 m_freem(m0);
3034 return (NULL);
3035 }
3036 len = MCLBYTES;
3037 m->m_len = len;
3038 if (m0->m_flags & M_PKTHDR)
3039 m0->m_pkthdr.len += len;
3040 totlen -= len;
3041
3042 mlast->m_next = m;
3043 mlast = m;
3044 }
3045
3046 return (m0);
3047 }
3048 #endif /* HAVE_CRYPTO_LZS */
3049
3050 static void
3051 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val)
3052 {
3053 /*
3054 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
3055 * and Group 1 registers; avoid conditions that could create
3056 * burst writes by doing a read in between the writes.
3057 */
3058 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3059 if (sc->sc_waw_lastgroup == reggrp &&
3060 sc->sc_waw_lastreg == reg - 4) {
3061 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
3062 }
3063 sc->sc_waw_lastgroup = reggrp;
3064 sc->sc_waw_lastreg = reg;
3065 }
3066 if (reggrp == 0)
3067 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3068 else
3069 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3070
3071 }
3072
3073 static u_int32_t
3074 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3075 {
3076 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3077 sc->sc_waw_lastgroup = -1;
3078 sc->sc_waw_lastreg = 1;
3079 }
3080 if (reggrp == 0)
3081 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3082 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3083 }
3084