hifn7751.c revision 1.27 1 /* $NetBSD: hifn7751.c,v 1.27 2005/10/16 00:14:22 tls Exp $ */
2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */
4
5 /*
6 * Invertex AEON / Hifn 7751 driver
7 * Copyright (c) 1999 Invertex Inc. All rights reserved.
8 * Copyright (c) 1999 Theo de Raadt
9 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10 * http://www.netsec.net
11 * Copyright (c) 2003 Hifn Inc.
12 *
13 * This driver is based on a previous driver by Invertex, for which they
14 * requested: Please send any comments, feedback, bug-fixes, or feature
15 * requests to software (at) invertex.com.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. The name of the author may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Effort sponsored in part by the Defense Advanced Research Projects
41 * Agency (DARPA) and Air Force Research Laboratory, Air Force
42 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 *
44 */
45
46 /*
47 * Driver for various Hifn pre-HIPP encryption processors.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.27 2005/10/16 00:14:22 tls Exp $");
52
53 #include "rnd.h"
54 #include "opencrypto.h"
55
56 #if NRND == 0 || NOPENCRYPTO == 0
57 #error hifn7751 requires rnd and opencrypto pseudo-devices
58 #endif
59
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/proc.h>
64 #include <sys/errno.h>
65 #include <sys/malloc.h>
66 #include <sys/kernel.h>
67 #include <sys/mbuf.h>
68 #include <sys/device.h>
69
70 #include <uvm/uvm_extern.h>
71
72
73 #ifdef __OpenBSD__
74 #include <crypto/crypto.h>
75 #include <dev/rndvar.h>
76 #else
77 #include <opencrypto/cryptodev.h>
78 #include <sys/rnd.h>
79 #endif
80
81 #include <dev/pci/pcireg.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcidevs.h>
84
85 #include <dev/pci/hifn7751reg.h>
86 #include <dev/pci/hifn7751var.h>
87
88 #undef HIFN_DEBUG
89
90 #ifdef __NetBSD__
91 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */
92 #endif
93
94 #ifdef HIFN_DEBUG
95 extern int hifn_debug; /* patchable */
96 int hifn_debug = 1;
97 #endif
98
99 #ifdef __OpenBSD__
100 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */
101 #endif
102
103 /*
104 * Prototypes and count for the pci_device structure
105 */
106 #ifdef __OpenBSD__
107 static int hifn_probe((struct device *, void *, void *);
108 #else
109 static int hifn_probe(struct device *, struct cfdata *, void *);
110 #endif
111 static void hifn_attach(struct device *, struct device *, void *);
112
113 CFATTACH_DECL(hifn, sizeof(struct hifn_softc),
114 hifn_probe, hifn_attach, NULL, NULL);
115
116 #ifdef __OpenBSD__
117 struct cfdriver hifn_cd = {
118 0, "hifn", DV_DULL
119 };
120 #endif
121
122 static void hifn_reset_board(struct hifn_softc *, int);
123 static void hifn_reset_puc(struct hifn_softc *);
124 static void hifn_puc_wait(struct hifn_softc *);
125 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
126 static void hifn_set_retry(struct hifn_softc *);
127 static void hifn_init_dma(struct hifn_softc *);
128 static void hifn_init_pci_registers(struct hifn_softc *);
129 static int hifn_sramsize(struct hifn_softc *);
130 static int hifn_dramsize(struct hifn_softc *);
131 static int hifn_ramtype(struct hifn_softc *);
132 static void hifn_sessions(struct hifn_softc *);
133 static int hifn_intr(void *);
134 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
135 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
136 static int hifn_newsession(void*, u_int32_t *, struct cryptoini *);
137 static int hifn_freesession(void*, u_int64_t);
138 static int hifn_process(void*, struct cryptop *, int);
139 static void hifn_callback(struct hifn_softc *, struct hifn_command *,
140 u_int8_t *);
141 static int hifn_crypto(struct hifn_softc *, struct hifn_command *,
142 struct cryptop*, int);
143 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
144 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
145 static int hifn_dmamap_aligned(bus_dmamap_t);
146 static int hifn_dmamap_load_src(struct hifn_softc *,
147 struct hifn_command *);
148 static int hifn_dmamap_load_dst(struct hifn_softc *,
149 struct hifn_command *);
150 static int hifn_init_pubrng(struct hifn_softc *);
151 static void hifn_rng(void *);
152 static void hifn_tick(void *);
153 static void hifn_abort(struct hifn_softc *);
154 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
155 int *);
156 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
157 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
158 #ifdef HAVE_CRYPTO_LZS
159 static int hifn_compression(struct hifn_softc *, struct cryptop *,
160 struct hifn_command *);
161 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
162 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
163 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
164 u_int8_t *);
165 #endif /* HAVE_CRYPTO_LZS */
166
167
168 struct hifn_stats hifnstats;
169
170 static const struct hifn_product {
171 pci_vendor_id_t hifn_vendor;
172 pci_product_id_t hifn_product;
173 int hifn_flags;
174 const char *hifn_name;
175 } hifn_products[] = {
176 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
177 0,
178 "Invertex AEON",
179 },
180
181 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
182 0,
183 "Hifn 7751",
184 },
185 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
186 0,
187 "Hifn 7751 (NetSec)"
188 },
189
190 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
191 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
192 "Hifn 7811",
193 },
194
195 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
196 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
197 "Hifn 7951",
198 },
199
200 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
201 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
202 "Hifn 7955",
203 },
204
205 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
206 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
207 "Hifn 7956",
208 },
209
210
211 { 0, 0,
212 0,
213 NULL
214 }
215 };
216
217 static const struct hifn_product *
218 hifn_lookup(const struct pci_attach_args *pa)
219 {
220 const struct hifn_product *hp;
221
222 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
223 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
224 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
225 return (hp);
226 }
227 return (NULL);
228 }
229
230 static int
231 hifn_probe(struct device *parent, struct cfdata *match, void *aux)
232 {
233 struct pci_attach_args *pa = (struct pci_attach_args *) aux;
234
235 if (hifn_lookup(pa) != NULL)
236 return (1);
237
238 return (0);
239 }
240
241 static void
242 hifn_attach(struct device *parent, struct device *self, void *aux)
243 {
244 struct hifn_softc *sc = (struct hifn_softc *)self;
245 struct pci_attach_args *pa = aux;
246 const struct hifn_product *hp;
247 pci_chipset_tag_t pc = pa->pa_pc;
248 pci_intr_handle_t ih;
249 const char *intrstr = NULL;
250 const char *hifncap;
251 char rbase;
252 bus_size_t iosize0, iosize1;
253 u_int32_t cmd;
254 u_int16_t ena;
255 bus_dma_segment_t seg;
256 bus_dmamap_t dmamap;
257 int rseg;
258 caddr_t kva;
259
260 hp = hifn_lookup(pa);
261 if (hp == NULL) {
262 printf("\n");
263 panic("hifn_attach: impossible");
264 }
265
266 aprint_naive(": Crypto processor\n");
267 aprint_normal(": %s, rev. %d\n", hp->hifn_name,
268 PCI_REVISION(pa->pa_class));
269
270 sc->sc_pci_pc = pa->pa_pc;
271 sc->sc_pci_tag = pa->pa_tag;
272
273 sc->sc_flags = hp->hifn_flags;
274
275 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
276 cmd |= PCI_COMMAND_MASTER_ENABLE;
277 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
278
279 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
280 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
281 aprint_error("%s: can't map mem space %d\n",
282 sc->sc_dv.dv_xname, 0);
283 return;
284 }
285
286 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
287 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
288 aprint_error("%s: can't find mem space %d\n",
289 sc->sc_dv.dv_xname, 1);
290 goto fail_io0;
291 }
292
293 hifn_set_retry(sc);
294
295 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
296 sc->sc_waw_lastgroup = -1;
297 sc->sc_waw_lastreg = 1;
298 }
299
300 sc->sc_dmat = pa->pa_dmat;
301 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
302 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
303 aprint_error("%s: can't alloc DMA buffer\n",
304 sc->sc_dv.dv_xname);
305 goto fail_io1;
306 }
307 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
308 BUS_DMA_NOWAIT)) {
309 aprint_error("%s: can't map DMA buffers (%lu bytes)\n",
310 sc->sc_dv.dv_xname, (u_long)sizeof(*sc->sc_dma));
311 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
312 goto fail_io1;
313 }
314 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
315 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
316 aprint_error("%s: can't create DMA map\n",
317 sc->sc_dv.dv_xname);
318 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
319 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
320 goto fail_io1;
321 }
322 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
323 NULL, BUS_DMA_NOWAIT)) {
324 aprint_error("%s: can't load DMA map\n",
325 sc->sc_dv.dv_xname);
326 bus_dmamap_destroy(sc->sc_dmat, dmamap);
327 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
328 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
329 goto fail_io1;
330 }
331 sc->sc_dmamap = dmamap;
332 sc->sc_dma = (struct hifn_dma *)kva;
333 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
334
335 hifn_reset_board(sc, 0);
336
337 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
338 aprint_error("%s: crypto enabling failed\n",
339 sc->sc_dv.dv_xname);
340 goto fail_mem;
341 }
342 hifn_reset_puc(sc);
343
344 hifn_init_dma(sc);
345 hifn_init_pci_registers(sc);
346
347 /* XXX can't dynamically determine ram type for 795x; force dram */
348 if (sc->sc_flags & HIFN_IS_7956)
349 sc->sc_drammodel = 1;
350 else if (hifn_ramtype(sc))
351 goto fail_mem;
352
353 if (sc->sc_drammodel == 0)
354 hifn_sramsize(sc);
355 else
356 hifn_dramsize(sc);
357
358 /*
359 * Workaround for NetSec 7751 rev A: half ram size because two
360 * of the address lines were left floating
361 */
362 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
363 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
364 PCI_REVISION(pa->pa_class) == 0x61)
365 sc->sc_ramsize >>= 1;
366
367 if (pci_intr_map(pa, &ih)) {
368 aprint_error("%s: couldn't map interrupt\n",
369 sc->sc_dv.dv_xname);
370 goto fail_mem;
371 }
372 intrstr = pci_intr_string(pc, ih);
373 #ifdef __OpenBSD__
374 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
375 self->dv_xname);
376 #else
377 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
378 #endif
379 if (sc->sc_ih == NULL) {
380 aprint_error("%s: couldn't establish interrupt\n",
381 sc->sc_dv.dv_xname);
382 if (intrstr != NULL)
383 aprint_normal(" at %s", intrstr);
384 aprint_normal("\n");
385 goto fail_mem;
386 }
387
388 hifn_sessions(sc);
389
390 rseg = sc->sc_ramsize / 1024;
391 rbase = 'K';
392 if (sc->sc_ramsize >= (1024 * 1024)) {
393 rbase = 'M';
394 rseg /= 1024;
395 }
396 aprint_normal("%s: %s, %d%cB %cram, interrupting at %s\n",
397 sc->sc_dv.dv_xname, hifncap, rseg, rbase,
398 sc->sc_drammodel ? 'd' : 's', intrstr);
399
400 sc->sc_cid = crypto_get_driverid(0);
401 if (sc->sc_cid < 0) {
402 aprint_error("%s: couldn't get crypto driver id\n",
403 sc->sc_dv.dv_xname);
404 goto fail_intr;
405 }
406
407 WRITE_REG_0(sc, HIFN_0_PUCNFG,
408 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
409 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
410
411 switch (ena) {
412 case HIFN_PUSTAT_ENA_2:
413 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
414 hifn_newsession, hifn_freesession, hifn_process, sc);
415 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
416 hifn_newsession, hifn_freesession, hifn_process, sc);
417 if (sc->sc_flags & HIFN_HAS_AES)
418 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
419 hifn_newsession, hifn_freesession,
420 hifn_process, sc);
421 /*FALLTHROUGH*/
422 case HIFN_PUSTAT_ENA_1:
423 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
424 hifn_newsession, hifn_freesession, hifn_process, sc);
425 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
426 hifn_newsession, hifn_freesession, hifn_process, sc);
427 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0,
428 hifn_newsession, hifn_freesession, hifn_process, sc);
429 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0,
430 hifn_newsession, hifn_freesession, hifn_process, sc);
431 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
432 hifn_newsession, hifn_freesession, hifn_process, sc);
433 break;
434 }
435
436 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
437 sc->sc_dmamap->dm_mapsize,
438 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
439
440 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
441 hifn_init_pubrng(sc);
442
443 #ifdef __OpenBSD__
444 timeout_set(&sc->sc_tickto, hifn_tick, sc);
445 timeout_add(&sc->sc_tickto, hz);
446 #else
447 callout_init(&sc->sc_tickto);
448 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
449 #endif
450 return;
451
452 fail_intr:
453 pci_intr_disestablish(pc, sc->sc_ih);
454 fail_mem:
455 bus_dmamap_unload(sc->sc_dmat, dmamap);
456 bus_dmamap_destroy(sc->sc_dmat, dmamap);
457 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
458 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
459
460 /* Turn off DMA polling */
461 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
462 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
463
464 fail_io1:
465 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
466 fail_io0:
467 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
468 }
469
470 static int
471 hifn_init_pubrng(struct hifn_softc *sc)
472 {
473 u_int32_t r;
474 int i;
475
476 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
477 /* Reset 7951 public key/rng engine */
478 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
479 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
480
481 for (i = 0; i < 100; i++) {
482 DELAY(1000);
483 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
484 HIFN_PUBRST_RESET) == 0)
485 break;
486 }
487
488 if (i == 100) {
489 printf("%s: public key init failed\n",
490 sc->sc_dv.dv_xname);
491 return (1);
492 }
493 }
494
495 /* Enable the rng, if available */
496 if (sc->sc_flags & HIFN_HAS_RNG) {
497 if (sc->sc_flags & HIFN_IS_7811) {
498 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
499 if (r & HIFN_7811_RNGENA_ENA) {
500 r &= ~HIFN_7811_RNGENA_ENA;
501 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
502 }
503 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
504 HIFN_7811_RNGCFG_DEFL);
505 r |= HIFN_7811_RNGENA_ENA;
506 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
507 } else
508 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
509 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
510 HIFN_RNGCFG_ENA);
511
512 /*
513 * The Hifn RNG documentation states that at their
514 * recommended "conservative" RNG config values,
515 * the RNG must warm up for 0.4s before providing
516 * data that meet their worst-case estimate of 0.06
517 * bits of random data per output register bit.
518 */
519 DELAY(4000);
520
521 #ifdef __NetBSD__
522 /*
523 * XXX Careful! The use of RND_FLAG_NO_ESTIMATE
524 * XXX here is unobvious: we later feed raw bits
525 * XXX into the "entropy pool" with rnd_add_data,
526 * XXX explicitly supplying an entropy estimate.
527 * XXX In this context, NO_ESTIMATE serves only
528 * XXX to prevent rnd_add_data from trying to
529 * XXX use the *time at which we added the data*
530 * XXX as entropy, which is not a good idea since
531 * XXX we add data periodically from a callout.
532 */
533 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dv.dv_xname,
534 RND_TYPE_RNG, RND_FLAG_NO_ESTIMATE);
535 #endif
536
537 sc->sc_rngfirst = 1;
538 if (hz >= 100)
539 sc->sc_rnghz = hz / 100;
540 else
541 sc->sc_rnghz = 1;
542 #ifdef __OpenBSD__
543 timeout_set(&sc->sc_rngto, hifn_rng, sc);
544 #else /* !__OpenBSD__ */
545 callout_init(&sc->sc_rngto);
546 #endif /* !__OpenBSD__ */
547 }
548
549 /* Enable public key engine, if available */
550 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
551 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
552 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
553 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
554 }
555
556 /* Call directly into the RNG once to prime the pool. */
557 hifn_rng(sc); /* Sets callout/timeout at end */
558
559 return (0);
560 }
561
562 static void
563 hifn_rng(void *vsc)
564 {
565 struct hifn_softc *sc = vsc;
566 #ifdef __NetBSD__
567 u_int32_t num[HIFN_RNG_BITSPER * RND_ENTROPY_THRESHOLD];
568 #else
569 u_int32_t num[2];
570 #endif
571 u_int32_t sts;
572 int i;
573
574 if (sc->sc_flags & HIFN_IS_7811) {
575 for (i = 0; i < 5; i++) { /* XXX why 5? */
576 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
577 if (sts & HIFN_7811_RNGSTS_UFL) {
578 printf("%s: RNG underflow: disabling\n",
579 sc->sc_dv.dv_xname);
580 return;
581 }
582 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
583 break;
584
585 /*
586 * There are at least two words in the RNG FIFO
587 * at this point.
588 */
589 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
590 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
591
592 if (sc->sc_rngfirst)
593 sc->sc_rngfirst = 0;
594 #ifdef __NetBSD__
595 rnd_add_data(&sc->sc_rnd_source, num,
596 2 * sizeof(num[0]),
597 (2 * sizeof(num[0]) * NBBY) /
598 HIFN_RNG_BITSPER);
599 #else
600 /*
601 * XXX This is a really bad idea.
602 * XXX Hifn estimate as little as 0.06
603 * XXX actual bits of entropy per output
604 * XXX register bit. How can we tell the
605 * XXX kernel RNG subsystem we're handing
606 * XXX it 64 "true" random bits, for any
607 * XXX sane value of "true"?
608 * XXX
609 * XXX The right thing to do here, if we
610 * XXX cannot supply an estimate ourselves,
611 * XXX would be to hash the bits locally.
612 */
613 add_true_randomness(num[0]);
614 add_true_randomness(num[1]);
615 #endif
616
617 }
618 } else {
619 #ifdef __NetBSD__
620 /* First time through, try to help fill the pool. */
621 int nwords = sc->sc_rngfirst ?
622 sizeof(num) / sizeof(num[0]) : 4;
623 #else
624 int nwords = 2;
625 #endif
626 /*
627 * We must be *extremely* careful here. The Hifn
628 * 795x differ from the published 6500 RNG design
629 * in more ways than the obvious lack of the output
630 * FIFO and LFSR control registers. In fact, there
631 * is only one LFSR, instead of the 6500's two, and
632 * it's 32 bits, not 31.
633 *
634 * Further, a block diagram obtained from Hifn shows
635 * a very curious latching of this register: the LFSR
636 * rotates at a frequency of RNG_Clk / 8, but the
637 * RNG_Data register is latched at a frequency of
638 * RNG_Clk, which means that it is possible for
639 * consecutive reads of the RNG_Data register to read
640 * identical state from the LFSR. The simplest
641 * workaround seems to be to read eight samples from
642 * the register for each one that we use. Since each
643 * read must require at least one PCI cycle, and
644 * RNG_Clk is at least PCI_Clk, this is safe.
645 */
646
647
648 if (sc->sc_rngfirst) {
649 sc->sc_rngfirst = 0;
650 }
651
652
653 for(i = 0 ; i < nwords * 8; i++)
654 {
655 volatile u_int32_t regtmp;
656 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA);
657 num[i / 8] = regtmp;
658 }
659 #ifdef __NetBSD__
660 rnd_add_data(&sc->sc_rnd_source, num,
661 nwords * sizeof(num[0]),
662 (nwords * sizeof(num[0]) * NBBY) /
663 HIFN_RNG_BITSPER);
664 #else
665 /* XXX a bad idea; see 7811 block above */
666 add_true_randomness(num[0]);
667 #endif
668 }
669
670 #ifdef __OpenBSD__
671 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
672 #else
673 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
674 #endif
675 }
676
677 static void
678 hifn_puc_wait(struct hifn_softc *sc)
679 {
680 int i;
681
682 for (i = 5000; i > 0; i--) {
683 DELAY(1);
684 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
685 break;
686 }
687 if (!i)
688 printf("%s: proc unit did not reset\n", sc->sc_dv.dv_xname);
689 }
690
691 /*
692 * Reset the processing unit.
693 */
694 static void
695 hifn_reset_puc(struct hifn_softc *sc)
696 {
697 /* Reset processing unit */
698 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
699 hifn_puc_wait(sc);
700 }
701
702 static void
703 hifn_set_retry(struct hifn_softc *sc)
704 {
705 u_int32_t r;
706
707 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
708 r &= 0xffff0000;
709 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
710 }
711
712 /*
713 * Resets the board. Values in the regesters are left as is
714 * from the reset (i.e. initial values are assigned elsewhere).
715 */
716 static void
717 hifn_reset_board(struct hifn_softc *sc, int full)
718 {
719 u_int32_t reg;
720
721 /*
722 * Set polling in the DMA configuration register to zero. 0x7 avoids
723 * resetting the board and zeros out the other fields.
724 */
725 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
726 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
727
728 /*
729 * Now that polling has been disabled, we have to wait 1 ms
730 * before resetting the board.
731 */
732 DELAY(1000);
733
734 /* Reset the DMA unit */
735 if (full) {
736 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
737 DELAY(1000);
738 } else {
739 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
740 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
741 hifn_reset_puc(sc);
742 }
743
744 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
745
746 /* Bring dma unit out of reset */
747 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
748 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
749
750 hifn_puc_wait(sc);
751
752 hifn_set_retry(sc);
753
754 if (sc->sc_flags & HIFN_IS_7811) {
755 for (reg = 0; reg < 1000; reg++) {
756 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
757 HIFN_MIPSRST_CRAMINIT)
758 break;
759 DELAY(1000);
760 }
761 if (reg == 1000)
762 printf(": cram init timeout\n");
763 }
764 }
765
766 static u_int32_t
767 hifn_next_signature(u_int32_t a, u_int cnt)
768 {
769 int i;
770 u_int32_t v;
771
772 for (i = 0; i < cnt; i++) {
773
774 /* get the parity */
775 v = a & 0x80080125;
776 v ^= v >> 16;
777 v ^= v >> 8;
778 v ^= v >> 4;
779 v ^= v >> 2;
780 v ^= v >> 1;
781
782 a = (v & 1) ^ (a << 1);
783 }
784
785 return a;
786 }
787
788 struct pci2id {
789 u_short pci_vendor;
790 u_short pci_prod;
791 char card_id[13];
792 } static const pci2id[] = {
793 {
794 PCI_VENDOR_HIFN,
795 PCI_PRODUCT_HIFN_7951,
796 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00, 0x00 }
798 }, {
799 PCI_VENDOR_HIFN,
800 PCI_PRODUCT_HIFN_7955,
801 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
802 0x00, 0x00, 0x00, 0x00, 0x00 }
803 }, {
804 PCI_VENDOR_HIFN,
805 PCI_PRODUCT_HIFN_7956,
806 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00, 0x00 }
808 }, {
809 PCI_VENDOR_NETSEC,
810 PCI_PRODUCT_NETSEC_7751,
811 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00, 0x00 }
813 }, {
814 PCI_VENDOR_INVERTEX,
815 PCI_PRODUCT_INVERTEX_AEON,
816 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00, 0x00 }
818 }, {
819 PCI_VENDOR_HIFN,
820 PCI_PRODUCT_HIFN_7811,
821 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00, 0x00 }
823 }, {
824 /*
825 * Other vendors share this PCI ID as well, such as
826 * http://www.powercrypt.com, and obviously they also
827 * use the same key.
828 */
829 PCI_VENDOR_HIFN,
830 PCI_PRODUCT_HIFN_7751,
831 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
832 0x00, 0x00, 0x00, 0x00, 0x00 }
833 },
834 };
835
836 /*
837 * Checks to see if crypto is already enabled. If crypto isn't enable,
838 * "hifn_enable_crypto" is called to enable it. The check is important,
839 * as enabling crypto twice will lock the board.
840 */
841 static const char *
842 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
843 {
844 u_int32_t dmacfg, ramcfg, encl, addr, i;
845 const char *offtbl = NULL;
846
847 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
848 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
849 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
850 offtbl = pci2id[i].card_id;
851 break;
852 }
853 }
854
855 if (offtbl == NULL) {
856 #ifdef HIFN_DEBUG
857 aprint_debug("%s: Unknown card!\n", sc->sc_dv.dv_xname);
858 #endif
859 return (NULL);
860 }
861
862 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
863 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
864
865 /*
866 * The RAM config register's encrypt level bit needs to be set before
867 * every read performed on the encryption level register.
868 */
869 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
870
871 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
872
873 /*
874 * Make sure we don't re-unlock. Two unlocks kills chip until the
875 * next reboot.
876 */
877 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
878 #ifdef HIFN_DEBUG
879 aprint_debug("%s: Strong Crypto already enabled!\n",
880 sc->sc_dv.dv_xname);
881 #endif
882 goto report;
883 }
884
885 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
886 #ifdef HIFN_DEBUG
887 aprint_debug("%s: Unknown encryption level\n",
888 sc->sc_dv.dv_xname);
889 #endif
890 return (NULL);
891 }
892
893 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
894 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
895 DELAY(1000);
896 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
897 DELAY(1000);
898 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
899 DELAY(1000);
900
901 for (i = 0; i <= 12; i++) {
902 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
903 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
904
905 DELAY(1000);
906 }
907
908 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
909 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
910
911 #ifdef HIFN_DEBUG
912 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
913 aprint_debug("Encryption engine is permanently locked until next system reset.");
914 else
915 aprint_debug("Encryption engine enabled successfully!");
916 #endif
917
918 report:
919 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
920 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
921
922 switch (encl) {
923 case HIFN_PUSTAT_ENA_0:
924 return ("LZS-only (no encr/auth)");
925
926 case HIFN_PUSTAT_ENA_1:
927 return ("DES");
928
929 case HIFN_PUSTAT_ENA_2:
930 if (sc->sc_flags & HIFN_HAS_AES)
931 return ("3DES/AES");
932 else
933 return ("3DES");
934
935 default:
936 return ("disabled");
937 }
938 /* NOTREACHED */
939 }
940
941 /*
942 * Give initial values to the registers listed in the "Register Space"
943 * section of the HIFN Software Development reference manual.
944 */
945 static void
946 hifn_init_pci_registers(struct hifn_softc *sc)
947 {
948 /* write fixed values needed by the Initialization registers */
949 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
950 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
951 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
952
953 /* write all 4 ring address registers */
954 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
955 offsetof(struct hifn_dma, cmdr[0]));
956 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
957 offsetof(struct hifn_dma, srcr[0]));
958 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
959 offsetof(struct hifn_dma, dstr[0]));
960 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
961 offsetof(struct hifn_dma, resr[0]));
962
963 DELAY(2000);
964
965 /* write status register */
966 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
967 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
968 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
969 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
970 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
971 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
972 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
973 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
974 HIFN_DMACSR_S_WAIT |
975 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
976 HIFN_DMACSR_C_WAIT |
977 HIFN_DMACSR_ENGINE |
978 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
979 HIFN_DMACSR_PUBDONE : 0) |
980 ((sc->sc_flags & HIFN_IS_7811) ?
981 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
982
983 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
984 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
985 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
986 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
987 HIFN_DMAIER_ENGINE |
988 ((sc->sc_flags & HIFN_IS_7811) ?
989 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
990 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
991 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
992 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
993
994 if (sc->sc_flags & HIFN_IS_7956) {
995 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
996 HIFN_PUCNFG_TCALLPHASES |
997 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
998 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
999 } else {
1000 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1001 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1002 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1003 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1004 }
1005
1006 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1007 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1008 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1009 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1010 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1011 }
1012
1013 /*
1014 * The maximum number of sessions supported by the card
1015 * is dependent on the amount of context ram, which
1016 * encryption algorithms are enabled, and how compression
1017 * is configured. This should be configured before this
1018 * routine is called.
1019 */
1020 static void
1021 hifn_sessions(struct hifn_softc *sc)
1022 {
1023 u_int32_t pucnfg;
1024 int ctxsize;
1025
1026 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1027
1028 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1029 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1030 ctxsize = 128;
1031 else
1032 ctxsize = 512;
1033 /*
1034 * 7955/7956 has internal context memory of 32K
1035 */
1036 if (sc->sc_flags & HIFN_IS_7956)
1037 sc->sc_maxses = 32768 / ctxsize;
1038 else
1039 sc->sc_maxses = 1 +
1040 ((sc->sc_ramsize - 32768) / ctxsize);
1041 }
1042 else
1043 sc->sc_maxses = sc->sc_ramsize / 16384;
1044
1045 if (sc->sc_maxses > 2048)
1046 sc->sc_maxses = 2048;
1047 }
1048
1049 /*
1050 * Determine ram type (sram or dram). Board should be just out of a reset
1051 * state when this is called.
1052 */
1053 static int
1054 hifn_ramtype(struct hifn_softc *sc)
1055 {
1056 u_int8_t data[8], dataexpect[8];
1057 int i;
1058
1059 for (i = 0; i < sizeof(data); i++)
1060 data[i] = dataexpect[i] = 0x55;
1061 if (hifn_writeramaddr(sc, 0, data))
1062 return (-1);
1063 if (hifn_readramaddr(sc, 0, data))
1064 return (-1);
1065 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1066 sc->sc_drammodel = 1;
1067 return (0);
1068 }
1069
1070 for (i = 0; i < sizeof(data); i++)
1071 data[i] = dataexpect[i] = 0xaa;
1072 if (hifn_writeramaddr(sc, 0, data))
1073 return (-1);
1074 if (hifn_readramaddr(sc, 0, data))
1075 return (-1);
1076 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1077 sc->sc_drammodel = 1;
1078 return (0);
1079 }
1080
1081 return (0);
1082 }
1083
1084 #define HIFN_SRAM_MAX (32 << 20)
1085 #define HIFN_SRAM_STEP_SIZE 16384
1086 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1087
1088 static int
1089 hifn_sramsize(struct hifn_softc *sc)
1090 {
1091 u_int32_t a;
1092 u_int8_t data[8];
1093 u_int8_t dataexpect[sizeof(data)];
1094 int32_t i;
1095
1096 for (i = 0; i < sizeof(data); i++)
1097 data[i] = dataexpect[i] = i ^ 0x5a;
1098
1099 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1100 a = i * HIFN_SRAM_STEP_SIZE;
1101 bcopy(&i, data, sizeof(i));
1102 hifn_writeramaddr(sc, a, data);
1103 }
1104
1105 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1106 a = i * HIFN_SRAM_STEP_SIZE;
1107 bcopy(&i, dataexpect, sizeof(i));
1108 if (hifn_readramaddr(sc, a, data) < 0)
1109 return (0);
1110 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1111 return (0);
1112 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1113 }
1114
1115 return (0);
1116 }
1117
1118 /*
1119 * XXX For dram boards, one should really try all of the
1120 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1121 * is already set up correctly.
1122 */
1123 static int
1124 hifn_dramsize(struct hifn_softc *sc)
1125 {
1126 u_int32_t cnfg;
1127
1128 if (sc->sc_flags & HIFN_IS_7956) {
1129 /*
1130 * 7955/7956 have a fixed internal ram of only 32K.
1131 */
1132 sc->sc_ramsize = 32768;
1133 } else {
1134 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1135 HIFN_PUCNFG_DRAMMASK;
1136 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1137 }
1138 return (0);
1139 }
1140
1141 static void
1142 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1143 int *resp)
1144 {
1145 struct hifn_dma *dma = sc->sc_dma;
1146
1147 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1148 dma->cmdi = 0;
1149 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1150 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1151 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1152 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1153 }
1154 *cmdp = dma->cmdi++;
1155 dma->cmdk = dma->cmdi;
1156
1157 if (dma->srci == HIFN_D_SRC_RSIZE) {
1158 dma->srci = 0;
1159 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1160 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1161 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1162 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1163 }
1164 *srcp = dma->srci++;
1165 dma->srck = dma->srci;
1166
1167 if (dma->dsti == HIFN_D_DST_RSIZE) {
1168 dma->dsti = 0;
1169 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1170 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1171 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1172 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1173 }
1174 *dstp = dma->dsti++;
1175 dma->dstk = dma->dsti;
1176
1177 if (dma->resi == HIFN_D_RES_RSIZE) {
1178 dma->resi = 0;
1179 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1180 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1181 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1182 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1183 }
1184 *resp = dma->resi++;
1185 dma->resk = dma->resi;
1186 }
1187
1188 static int
1189 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1190 {
1191 struct hifn_dma *dma = sc->sc_dma;
1192 struct hifn_base_command wc;
1193 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1194 int r, cmdi, resi, srci, dsti;
1195
1196 wc.masks = htole16(3 << 13);
1197 wc.session_num = htole16(addr >> 14);
1198 wc.total_source_count = htole16(8);
1199 wc.total_dest_count = htole16(addr & 0x3fff);
1200
1201 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1202
1203 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1204 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1205 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1206
1207 /* build write command */
1208 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1209 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1210 bcopy(data, &dma->test_src, sizeof(dma->test_src));
1211
1212 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1213 + offsetof(struct hifn_dma, test_src));
1214 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1215 + offsetof(struct hifn_dma, test_dst));
1216
1217 dma->cmdr[cmdi].l = htole32(16 | masks);
1218 dma->srcr[srci].l = htole32(8 | masks);
1219 dma->dstr[dsti].l = htole32(4 | masks);
1220 dma->resr[resi].l = htole32(4 | masks);
1221
1222 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1223 0, sc->sc_dmamap->dm_mapsize,
1224 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1225
1226 for (r = 10000; r >= 0; r--) {
1227 DELAY(10);
1228 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1229 0, sc->sc_dmamap->dm_mapsize,
1230 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1231 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1232 break;
1233 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1234 0, sc->sc_dmamap->dm_mapsize,
1235 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1236 }
1237 if (r == 0) {
1238 printf("%s: writeramaddr -- "
1239 "result[%d](addr %d) still valid\n",
1240 sc->sc_dv.dv_xname, resi, addr);
1241 r = -1;
1242 return (-1);
1243 } else
1244 r = 0;
1245
1246 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1247 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1248 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1249
1250 return (r);
1251 }
1252
1253 static int
1254 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1255 {
1256 struct hifn_dma *dma = sc->sc_dma;
1257 struct hifn_base_command rc;
1258 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1259 int r, cmdi, srci, dsti, resi;
1260
1261 rc.masks = htole16(2 << 13);
1262 rc.session_num = htole16(addr >> 14);
1263 rc.total_source_count = htole16(addr & 0x3fff);
1264 rc.total_dest_count = htole16(8);
1265
1266 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1267
1268 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1269 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1270 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1271
1272 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1273 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1274
1275 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1276 offsetof(struct hifn_dma, test_src));
1277 dma->test_src = 0;
1278 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1279 offsetof(struct hifn_dma, test_dst));
1280 dma->test_dst = 0;
1281 dma->cmdr[cmdi].l = htole32(8 | masks);
1282 dma->srcr[srci].l = htole32(8 | masks);
1283 dma->dstr[dsti].l = htole32(8 | masks);
1284 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1285
1286 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1287 0, sc->sc_dmamap->dm_mapsize,
1288 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1289
1290 for (r = 10000; r >= 0; r--) {
1291 DELAY(10);
1292 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1293 0, sc->sc_dmamap->dm_mapsize,
1294 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1295 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1296 break;
1297 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1298 0, sc->sc_dmamap->dm_mapsize,
1299 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1300 }
1301 if (r == 0) {
1302 printf("%s: readramaddr -- "
1303 "result[%d](addr %d) still valid\n",
1304 sc->sc_dv.dv_xname, resi, addr);
1305 r = -1;
1306 } else {
1307 r = 0;
1308 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1309 }
1310
1311 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1312 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1313 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1314
1315 return (r);
1316 }
1317
1318 /*
1319 * Initialize the descriptor rings.
1320 */
1321 static void
1322 hifn_init_dma(struct hifn_softc *sc)
1323 {
1324 struct hifn_dma *dma = sc->sc_dma;
1325 int i;
1326
1327 hifn_set_retry(sc);
1328
1329 /* initialize static pointer values */
1330 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1331 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1332 offsetof(struct hifn_dma, command_bufs[i][0]));
1333 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1334 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1335 offsetof(struct hifn_dma, result_bufs[i][0]));
1336
1337 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1338 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1339 offsetof(struct hifn_dma, cmdr[0]));
1340 dma->srcr[HIFN_D_SRC_RSIZE].p =
1341 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1342 offsetof(struct hifn_dma, srcr[0]));
1343 dma->dstr[HIFN_D_DST_RSIZE].p =
1344 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1345 offsetof(struct hifn_dma, dstr[0]));
1346 dma->resr[HIFN_D_RES_RSIZE].p =
1347 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1348 offsetof(struct hifn_dma, resr[0]));
1349
1350 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1351 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1352 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1353 }
1354
1355 /*
1356 * Writes out the raw command buffer space. Returns the
1357 * command buffer size.
1358 */
1359 static u_int
1360 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1361 {
1362 u_int8_t *buf_pos;
1363 struct hifn_base_command *base_cmd;
1364 struct hifn_mac_command *mac_cmd;
1365 struct hifn_crypt_command *cry_cmd;
1366 struct hifn_comp_command *comp_cmd;
1367 int using_mac, using_crypt, using_comp, len, ivlen;
1368 u_int32_t dlen, slen;
1369
1370 buf_pos = buf;
1371 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1372 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1373 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1374
1375 base_cmd = (struct hifn_base_command *)buf_pos;
1376 base_cmd->masks = htole16(cmd->base_masks);
1377 slen = cmd->src_map->dm_mapsize;
1378 if (cmd->sloplen)
1379 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1380 sizeof(u_int32_t);
1381 else
1382 dlen = cmd->dst_map->dm_mapsize;
1383 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1384 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1385 dlen >>= 16;
1386 slen >>= 16;
1387 base_cmd->session_num = htole16(cmd->session_num |
1388 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1389 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1390 buf_pos += sizeof(struct hifn_base_command);
1391
1392 if (using_comp) {
1393 comp_cmd = (struct hifn_comp_command *)buf_pos;
1394 dlen = cmd->compcrd->crd_len;
1395 comp_cmd->source_count = htole16(dlen & 0xffff);
1396 dlen >>= 16;
1397 comp_cmd->masks = htole16(cmd->comp_masks |
1398 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1399 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1400 comp_cmd->reserved = 0;
1401 buf_pos += sizeof(struct hifn_comp_command);
1402 }
1403
1404 if (using_mac) {
1405 mac_cmd = (struct hifn_mac_command *)buf_pos;
1406 dlen = cmd->maccrd->crd_len;
1407 mac_cmd->source_count = htole16(dlen & 0xffff);
1408 dlen >>= 16;
1409 mac_cmd->masks = htole16(cmd->mac_masks |
1410 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1411 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1412 mac_cmd->reserved = 0;
1413 buf_pos += sizeof(struct hifn_mac_command);
1414 }
1415
1416 if (using_crypt) {
1417 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1418 dlen = cmd->enccrd->crd_len;
1419 cry_cmd->source_count = htole16(dlen & 0xffff);
1420 dlen >>= 16;
1421 cry_cmd->masks = htole16(cmd->cry_masks |
1422 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1423 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1424 cry_cmd->reserved = 0;
1425 buf_pos += sizeof(struct hifn_crypt_command);
1426 }
1427
1428 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1429 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1430 buf_pos += HIFN_MAC_KEY_LENGTH;
1431 }
1432
1433 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1434 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1435 case HIFN_CRYPT_CMD_ALG_3DES:
1436 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1437 buf_pos += HIFN_3DES_KEY_LENGTH;
1438 break;
1439 case HIFN_CRYPT_CMD_ALG_DES:
1440 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1441 buf_pos += HIFN_DES_KEY_LENGTH;
1442 break;
1443 case HIFN_CRYPT_CMD_ALG_RC4:
1444 len = 256;
1445 do {
1446 int clen;
1447
1448 clen = MIN(cmd->cklen, len);
1449 bcopy(cmd->ck, buf_pos, clen);
1450 len -= clen;
1451 buf_pos += clen;
1452 } while (len > 0);
1453 bzero(buf_pos, 4);
1454 buf_pos += 4;
1455 break;
1456 case HIFN_CRYPT_CMD_ALG_AES:
1457 /*
1458 * AES keys are variable 128, 192 and
1459 * 256 bits (16, 24 and 32 bytes).
1460 */
1461 bcopy(cmd->ck, buf_pos, cmd->cklen);
1462 buf_pos += cmd->cklen;
1463 break;
1464 }
1465 }
1466
1467 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1468 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1469 case HIFN_CRYPT_CMD_ALG_AES:
1470 ivlen = HIFN_AES_IV_LENGTH;
1471 break;
1472 default:
1473 ivlen = HIFN_IV_LENGTH;
1474 break;
1475 }
1476 bcopy(cmd->iv, buf_pos, ivlen);
1477 buf_pos += ivlen;
1478 }
1479
1480 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1481 HIFN_BASE_CMD_COMP)) == 0) {
1482 bzero(buf_pos, 8);
1483 buf_pos += 8;
1484 }
1485
1486 return (buf_pos - buf);
1487 }
1488
1489 static int
1490 hifn_dmamap_aligned(bus_dmamap_t map)
1491 {
1492 int i;
1493
1494 for (i = 0; i < map->dm_nsegs; i++) {
1495 if (map->dm_segs[i].ds_addr & 3)
1496 return (0);
1497 if ((i != (map->dm_nsegs - 1)) &&
1498 (map->dm_segs[i].ds_len & 3))
1499 return (0);
1500 }
1501 return (1);
1502 }
1503
1504 static int
1505 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1506 {
1507 struct hifn_dma *dma = sc->sc_dma;
1508 bus_dmamap_t map = cmd->dst_map;
1509 u_int32_t p, l;
1510 int idx, used = 0, i;
1511
1512 idx = dma->dsti;
1513 for (i = 0; i < map->dm_nsegs - 1; i++) {
1514 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1515 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1516 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1517 HIFN_DSTR_SYNC(sc, idx,
1518 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1519 used++;
1520
1521 if (++idx == HIFN_D_DST_RSIZE) {
1522 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1523 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1524 HIFN_DSTR_SYNC(sc, idx,
1525 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1526 idx = 0;
1527 }
1528 }
1529
1530 if (cmd->sloplen == 0) {
1531 p = map->dm_segs[i].ds_addr;
1532 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1533 map->dm_segs[i].ds_len;
1534 } else {
1535 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1536 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1537 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1538 sizeof(u_int32_t);
1539
1540 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1541 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1542 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1543 HIFN_D_MASKDONEIRQ |
1544 (map->dm_segs[i].ds_len - cmd->sloplen));
1545 HIFN_DSTR_SYNC(sc, idx,
1546 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1547 used++;
1548
1549 if (++idx == HIFN_D_DST_RSIZE) {
1550 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1551 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1552 HIFN_DSTR_SYNC(sc, idx,
1553 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1554 idx = 0;
1555 }
1556 }
1557 }
1558 dma->dstr[idx].p = htole32(p);
1559 dma->dstr[idx].l = htole32(l);
1560 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1561 used++;
1562
1563 if (++idx == HIFN_D_DST_RSIZE) {
1564 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1565 HIFN_D_MASKDONEIRQ);
1566 HIFN_DSTR_SYNC(sc, idx,
1567 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1568 idx = 0;
1569 }
1570
1571 dma->dsti = idx;
1572 dma->dstu += used;
1573 return (idx);
1574 }
1575
1576 static int
1577 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1578 {
1579 struct hifn_dma *dma = sc->sc_dma;
1580 bus_dmamap_t map = cmd->src_map;
1581 int idx, i;
1582 u_int32_t last = 0;
1583
1584 idx = dma->srci;
1585 for (i = 0; i < map->dm_nsegs; i++) {
1586 if (i == map->dm_nsegs - 1)
1587 last = HIFN_D_LAST;
1588
1589 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1590 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1591 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1592 HIFN_SRCR_SYNC(sc, idx,
1593 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1594
1595 if (++idx == HIFN_D_SRC_RSIZE) {
1596 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1597 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1598 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1599 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1600 idx = 0;
1601 }
1602 }
1603 dma->srci = idx;
1604 dma->srcu += map->dm_nsegs;
1605 return (idx);
1606 }
1607
1608 static int
1609 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1610 struct cryptop *crp, int hint)
1611 {
1612 struct hifn_dma *dma = sc->sc_dma;
1613 u_int32_t cmdlen;
1614 int cmdi, resi, s, err = 0;
1615
1616 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1617 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1618 return (ENOMEM);
1619
1620 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1621 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1622 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1623 err = ENOMEM;
1624 goto err_srcmap1;
1625 }
1626 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1627 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1628 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1629 err = ENOMEM;
1630 goto err_srcmap1;
1631 }
1632 } else {
1633 err = EINVAL;
1634 goto err_srcmap1;
1635 }
1636
1637 if (hifn_dmamap_aligned(cmd->src_map)) {
1638 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1639 if (crp->crp_flags & CRYPTO_F_IOV)
1640 cmd->dstu.dst_io = cmd->srcu.src_io;
1641 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1642 cmd->dstu.dst_m = cmd->srcu.src_m;
1643 cmd->dst_map = cmd->src_map;
1644 } else {
1645 if (crp->crp_flags & CRYPTO_F_IOV) {
1646 err = EINVAL;
1647 goto err_srcmap;
1648 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1649 int totlen, len;
1650 struct mbuf *m, *m0, *mlast;
1651
1652 totlen = cmd->src_map->dm_mapsize;
1653 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1654 len = MHLEN;
1655 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1656 } else {
1657 len = MLEN;
1658 MGET(m0, M_DONTWAIT, MT_DATA);
1659 }
1660 if (m0 == NULL) {
1661 err = ENOMEM;
1662 goto err_srcmap;
1663 }
1664 if (len == MHLEN)
1665 M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1666 if (totlen >= MINCLSIZE) {
1667 MCLGET(m0, M_DONTWAIT);
1668 if (m0->m_flags & M_EXT)
1669 len = MCLBYTES;
1670 }
1671 totlen -= len;
1672 m0->m_pkthdr.len = m0->m_len = len;
1673 mlast = m0;
1674
1675 while (totlen > 0) {
1676 MGET(m, M_DONTWAIT, MT_DATA);
1677 if (m == NULL) {
1678 err = ENOMEM;
1679 m_freem(m0);
1680 goto err_srcmap;
1681 }
1682 len = MLEN;
1683 if (totlen >= MINCLSIZE) {
1684 MCLGET(m, M_DONTWAIT);
1685 if (m->m_flags & M_EXT)
1686 len = MCLBYTES;
1687 }
1688
1689 m->m_len = len;
1690 if (m0->m_flags & M_PKTHDR)
1691 m0->m_pkthdr.len += len;
1692 totlen -= len;
1693
1694 mlast->m_next = m;
1695 mlast = m;
1696 }
1697 cmd->dstu.dst_m = m0;
1698 }
1699 }
1700
1701 if (cmd->dst_map == NULL) {
1702 if (bus_dmamap_create(sc->sc_dmat,
1703 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1704 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1705 err = ENOMEM;
1706 goto err_srcmap;
1707 }
1708 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1709 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1710 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1711 err = ENOMEM;
1712 goto err_dstmap1;
1713 }
1714 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1715 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1716 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1717 err = ENOMEM;
1718 goto err_dstmap1;
1719 }
1720 }
1721 }
1722
1723 #ifdef HIFN_DEBUG
1724 if (hifn_debug)
1725 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1726 sc->sc_dv.dv_xname,
1727 READ_REG_1(sc, HIFN_1_DMA_CSR),
1728 READ_REG_1(sc, HIFN_1_DMA_IER),
1729 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1730 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1731 #endif
1732
1733 if (cmd->src_map == cmd->dst_map)
1734 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1735 0, cmd->src_map->dm_mapsize,
1736 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1737 else {
1738 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1739 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1740 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1741 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1742 }
1743
1744 s = splnet();
1745
1746 /*
1747 * need 1 cmd, and 1 res
1748 * need N src, and N dst
1749 */
1750 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1751 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1752 splx(s);
1753 err = ENOMEM;
1754 goto err_dstmap;
1755 }
1756 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1757 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1758 splx(s);
1759 err = ENOMEM;
1760 goto err_dstmap;
1761 }
1762
1763 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1764 dma->cmdi = 0;
1765 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1766 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1767 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1768 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1769 }
1770 cmdi = dma->cmdi++;
1771 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1772 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1773
1774 /* .p for command/result already set */
1775 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1776 HIFN_D_MASKDONEIRQ);
1777 HIFN_CMDR_SYNC(sc, cmdi,
1778 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1779 dma->cmdu++;
1780 if (sc->sc_c_busy == 0) {
1781 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1782 sc->sc_c_busy = 1;
1783 SET_LED(sc, HIFN_MIPSRST_LED0);
1784 }
1785
1786 /*
1787 * We don't worry about missing an interrupt (which a "command wait"
1788 * interrupt salvages us from), unless there is more than one command
1789 * in the queue.
1790 *
1791 * XXX We do seem to miss some interrupts. So we always enable
1792 * XXX command wait. From OpenBSD revision 1.149.
1793 *
1794 */
1795 #if 0
1796 if (dma->cmdu > 1) {
1797 #endif
1798 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1799 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1800 #if 0
1801 }
1802 #endif
1803
1804 hifnstats.hst_ipackets++;
1805 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1806
1807 hifn_dmamap_load_src(sc, cmd);
1808 if (sc->sc_s_busy == 0) {
1809 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1810 sc->sc_s_busy = 1;
1811 SET_LED(sc, HIFN_MIPSRST_LED1);
1812 }
1813
1814 /*
1815 * Unlike other descriptors, we don't mask done interrupt from
1816 * result descriptor.
1817 */
1818 #ifdef HIFN_DEBUG
1819 if (hifn_debug)
1820 printf("load res\n");
1821 #endif
1822 if (dma->resi == HIFN_D_RES_RSIZE) {
1823 dma->resi = 0;
1824 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1825 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1826 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1827 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1828 }
1829 resi = dma->resi++;
1830 dma->hifn_commands[resi] = cmd;
1831 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1832 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1833 HIFN_D_VALID | HIFN_D_LAST);
1834 HIFN_RESR_SYNC(sc, resi,
1835 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1836 dma->resu++;
1837 if (sc->sc_r_busy == 0) {
1838 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1839 sc->sc_r_busy = 1;
1840 SET_LED(sc, HIFN_MIPSRST_LED2);
1841 }
1842
1843 if (cmd->sloplen)
1844 cmd->slopidx = resi;
1845
1846 hifn_dmamap_load_dst(sc, cmd);
1847
1848 if (sc->sc_d_busy == 0) {
1849 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1850 sc->sc_d_busy = 1;
1851 }
1852
1853 #ifdef HIFN_DEBUG
1854 if (hifn_debug)
1855 printf("%s: command: stat %8x ier %8x\n",
1856 sc->sc_dv.dv_xname,
1857 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1858 #endif
1859
1860 sc->sc_active = 5;
1861 splx(s);
1862 return (err); /* success */
1863
1864 err_dstmap:
1865 if (cmd->src_map != cmd->dst_map)
1866 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1867 err_dstmap1:
1868 if (cmd->src_map != cmd->dst_map)
1869 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1870 err_srcmap:
1871 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1872 cmd->srcu.src_m != cmd->dstu.dst_m)
1873 m_freem(cmd->dstu.dst_m);
1874 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1875 err_srcmap1:
1876 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1877 return (err);
1878 }
1879
1880 static void
1881 hifn_tick(void *vsc)
1882 {
1883 struct hifn_softc *sc = vsc;
1884 int s;
1885
1886 s = splnet();
1887 if (sc->sc_active == 0) {
1888 struct hifn_dma *dma = sc->sc_dma;
1889 u_int32_t r = 0;
1890
1891 if (dma->cmdu == 0 && sc->sc_c_busy) {
1892 sc->sc_c_busy = 0;
1893 r |= HIFN_DMACSR_C_CTRL_DIS;
1894 CLR_LED(sc, HIFN_MIPSRST_LED0);
1895 }
1896 if (dma->srcu == 0 && sc->sc_s_busy) {
1897 sc->sc_s_busy = 0;
1898 r |= HIFN_DMACSR_S_CTRL_DIS;
1899 CLR_LED(sc, HIFN_MIPSRST_LED1);
1900 }
1901 if (dma->dstu == 0 && sc->sc_d_busy) {
1902 sc->sc_d_busy = 0;
1903 r |= HIFN_DMACSR_D_CTRL_DIS;
1904 }
1905 if (dma->resu == 0 && sc->sc_r_busy) {
1906 sc->sc_r_busy = 0;
1907 r |= HIFN_DMACSR_R_CTRL_DIS;
1908 CLR_LED(sc, HIFN_MIPSRST_LED2);
1909 }
1910 if (r)
1911 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1912 }
1913 else
1914 sc->sc_active--;
1915 splx(s);
1916 #ifdef __OpenBSD__
1917 timeout_add(&sc->sc_tickto, hz);
1918 #else
1919 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1920 #endif
1921 }
1922
1923 static int
1924 hifn_intr(void *arg)
1925 {
1926 struct hifn_softc *sc = arg;
1927 struct hifn_dma *dma = sc->sc_dma;
1928 u_int32_t dmacsr, restart;
1929 int i, u;
1930
1931 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1932
1933 #ifdef HIFN_DEBUG
1934 if (hifn_debug)
1935 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1936 sc->sc_dv.dv_xname,
1937 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1938 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1939 #endif
1940
1941 /* Nothing in the DMA unit interrupted */
1942 if ((dmacsr & sc->sc_dmaier) == 0)
1943 return (0);
1944
1945 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1946
1947 if (dmacsr & HIFN_DMACSR_ENGINE)
1948 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
1949
1950 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1951 (dmacsr & HIFN_DMACSR_PUBDONE))
1952 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1953 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1954
1955 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1956 if (restart)
1957 printf("%s: overrun %x\n", sc->sc_dv.dv_xname, dmacsr);
1958
1959 if (sc->sc_flags & HIFN_IS_7811) {
1960 if (dmacsr & HIFN_DMACSR_ILLR)
1961 printf("%s: illegal read\n", sc->sc_dv.dv_xname);
1962 if (dmacsr & HIFN_DMACSR_ILLW)
1963 printf("%s: illegal write\n", sc->sc_dv.dv_xname);
1964 }
1965
1966 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1967 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1968 if (restart) {
1969 printf("%s: abort, resetting.\n", sc->sc_dv.dv_xname);
1970 hifnstats.hst_abort++;
1971 hifn_abort(sc);
1972 return (1);
1973 }
1974
1975 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
1976 /*
1977 * If no slots to process and we receive a "waiting on
1978 * command" interrupt, we disable the "waiting on command"
1979 * (by clearing it).
1980 */
1981 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1982 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1983 }
1984
1985 /* clear the rings */
1986 i = dma->resk;
1987 while (dma->resu != 0) {
1988 HIFN_RESR_SYNC(sc, i,
1989 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1990 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
1991 HIFN_RESR_SYNC(sc, i,
1992 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1993 break;
1994 }
1995
1996 if (i != HIFN_D_RES_RSIZE) {
1997 struct hifn_command *cmd;
1998 u_int8_t *macbuf = NULL;
1999
2000 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2001 cmd = dma->hifn_commands[i];
2002 KASSERT(cmd != NULL
2003 /*("hifn_intr: null command slot %u", i)*/);
2004 dma->hifn_commands[i] = NULL;
2005
2006 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2007 macbuf = dma->result_bufs[i];
2008 macbuf += 12;
2009 }
2010
2011 hifn_callback(sc, cmd, macbuf);
2012 hifnstats.hst_opackets++;
2013 }
2014
2015 if (++i == (HIFN_D_RES_RSIZE + 1))
2016 i = 0;
2017 else
2018 dma->resu--;
2019 }
2020 dma->resk = i;
2021
2022 i = dma->srck; u = dma->srcu;
2023 while (u != 0) {
2024 HIFN_SRCR_SYNC(sc, i,
2025 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2026 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2027 HIFN_SRCR_SYNC(sc, i,
2028 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2029 break;
2030 }
2031 if (++i == (HIFN_D_SRC_RSIZE + 1))
2032 i = 0;
2033 else
2034 u--;
2035 }
2036 dma->srck = i; dma->srcu = u;
2037
2038 i = dma->cmdk; u = dma->cmdu;
2039 while (u != 0) {
2040 HIFN_CMDR_SYNC(sc, i,
2041 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2042 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2043 HIFN_CMDR_SYNC(sc, i,
2044 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2045 break;
2046 }
2047 if (i != HIFN_D_CMD_RSIZE) {
2048 u--;
2049 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2050 }
2051 if (++i == (HIFN_D_CMD_RSIZE + 1))
2052 i = 0;
2053 }
2054 dma->cmdk = i; dma->cmdu = u;
2055
2056 return (1);
2057 }
2058
2059 /*
2060 * Allocate a new 'session' and return an encoded session id. 'sidp'
2061 * contains our registration id, and should contain an encoded session
2062 * id on successful allocation.
2063 */
2064 static int
2065 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2066 {
2067 struct cryptoini *c;
2068 struct hifn_softc *sc = arg;
2069 int i, mac = 0, cry = 0, comp = 0;
2070
2071 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2072 if (sidp == NULL || cri == NULL || sc == NULL)
2073 return (EINVAL);
2074
2075 for (i = 0; i < sc->sc_maxses; i++)
2076 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2077 break;
2078 if (i == sc->sc_maxses)
2079 return (ENOMEM);
2080
2081 for (c = cri; c != NULL; c = c->cri_next) {
2082 switch (c->cri_alg) {
2083 case CRYPTO_MD5:
2084 case CRYPTO_SHA1:
2085 case CRYPTO_MD5_HMAC:
2086 case CRYPTO_SHA1_HMAC:
2087 if (mac)
2088 return (EINVAL);
2089 mac = 1;
2090 break;
2091 case CRYPTO_DES_CBC:
2092 case CRYPTO_3DES_CBC:
2093 case CRYPTO_AES_CBC:
2094 /* Note that this is an initialization
2095 vector, not a cipher key; any function
2096 giving sufficient Hamming distance
2097 between outputs is fine. Use of RC4
2098 to generate IVs has been FIPS140-2
2099 certified by several labs. */
2100 #ifdef __NetBSD__
2101 arc4randbytes(sc->sc_sessions[i].hs_iv,
2102 c->cri_alg == CRYPTO_AES_CBC ?
2103 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2104 #else /* FreeBSD and OpenBSD have get_random_bytes */
2105 /* XXX this may read fewer, does it matter? */
2106 get_random_bytes(sc->sc_sessions[i].hs_iv,
2107 c->cri_alg == CRYPTO_AES_CBC ?
2108 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2109 #endif
2110 /*FALLTHROUGH*/
2111 case CRYPTO_ARC4:
2112 if (cry)
2113 return (EINVAL);
2114 cry = 1;
2115 break;
2116 #ifdef HAVE_CRYPTO_LZS
2117 case CRYPTO_LZS_COMP:
2118 if (comp)
2119 return (EINVAL);
2120 comp = 1;
2121 break;
2122 #endif
2123 default:
2124 return (EINVAL);
2125 }
2126 }
2127 if (mac == 0 && cry == 0 && comp == 0)
2128 return (EINVAL);
2129
2130 /*
2131 * XXX only want to support compression without chaining to
2132 * MAC/crypt engine right now
2133 */
2134 if ((comp && mac) || (comp && cry))
2135 return (EINVAL);
2136
2137 *sidp = HIFN_SID(sc->sc_dv.dv_unit, i);
2138 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2139
2140 return (0);
2141 }
2142
2143 /*
2144 * Deallocate a session.
2145 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2146 * XXX to blow away any keys already stored there.
2147 */
2148 static int
2149 hifn_freesession(void *arg, u_int64_t tid)
2150 {
2151 struct hifn_softc *sc = arg;
2152 int session;
2153 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2154
2155 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2156 if (sc == NULL)
2157 return (EINVAL);
2158
2159 session = HIFN_SESSION(sid);
2160 if (session >= sc->sc_maxses)
2161 return (EINVAL);
2162
2163 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
2164 return (0);
2165 }
2166
2167 static int
2168 hifn_process(void *arg, struct cryptop *crp, int hint)
2169 {
2170 struct hifn_softc *sc = arg;
2171 struct hifn_command *cmd = NULL;
2172 int session, err, ivlen;
2173 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2174
2175 if (crp == NULL || crp->crp_callback == NULL) {
2176 hifnstats.hst_invalid++;
2177 return (EINVAL);
2178 }
2179 session = HIFN_SESSION(crp->crp_sid);
2180
2181 if (sc == NULL || session >= sc->sc_maxses) {
2182 err = EINVAL;
2183 goto errout;
2184 }
2185
2186 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2187 M_DEVBUF, M_NOWAIT|M_ZERO);
2188 if (cmd == NULL) {
2189 hifnstats.hst_nomem++;
2190 err = ENOMEM;
2191 goto errout;
2192 }
2193
2194 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2195 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2196 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2197 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2198 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2199 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2200 } else {
2201 err = EINVAL;
2202 goto errout; /* XXX we don't handle contiguous buffers! */
2203 }
2204
2205 crd1 = crp->crp_desc;
2206 if (crd1 == NULL) {
2207 err = EINVAL;
2208 goto errout;
2209 }
2210 crd2 = crd1->crd_next;
2211
2212 if (crd2 == NULL) {
2213 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2214 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2215 crd1->crd_alg == CRYPTO_SHA1 ||
2216 crd1->crd_alg == CRYPTO_MD5) {
2217 maccrd = crd1;
2218 enccrd = NULL;
2219 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2220 crd1->crd_alg == CRYPTO_3DES_CBC ||
2221 crd1->crd_alg == CRYPTO_AES_CBC ||
2222 crd1->crd_alg == CRYPTO_ARC4) {
2223 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2224 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2225 maccrd = NULL;
2226 enccrd = crd1;
2227 #ifdef HAVE_CRYPTO_LZS
2228 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2229 return (hifn_compression(sc, crp, cmd));
2230 #endif
2231 } else {
2232 err = EINVAL;
2233 goto errout;
2234 }
2235 } else {
2236 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2237 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2238 crd1->crd_alg == CRYPTO_MD5 ||
2239 crd1->crd_alg == CRYPTO_SHA1) &&
2240 (crd2->crd_alg == CRYPTO_DES_CBC ||
2241 crd2->crd_alg == CRYPTO_3DES_CBC ||
2242 crd2->crd_alg == CRYPTO_AES_CBC ||
2243 crd2->crd_alg == CRYPTO_ARC4) &&
2244 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2245 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2246 maccrd = crd1;
2247 enccrd = crd2;
2248 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2249 crd1->crd_alg == CRYPTO_ARC4 ||
2250 crd1->crd_alg == CRYPTO_3DES_CBC ||
2251 crd1->crd_alg == CRYPTO_AES_CBC) &&
2252 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2253 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2254 crd2->crd_alg == CRYPTO_MD5 ||
2255 crd2->crd_alg == CRYPTO_SHA1) &&
2256 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2257 enccrd = crd1;
2258 maccrd = crd2;
2259 } else {
2260 /*
2261 * We cannot order the 7751 as requested
2262 */
2263 err = EINVAL;
2264 goto errout;
2265 }
2266 }
2267
2268 if (enccrd) {
2269 cmd->enccrd = enccrd;
2270 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2271 switch (enccrd->crd_alg) {
2272 case CRYPTO_ARC4:
2273 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2274 if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2275 != sc->sc_sessions[session].hs_prev_op)
2276 sc->sc_sessions[session].hs_state =
2277 HS_STATE_USED;
2278 break;
2279 case CRYPTO_DES_CBC:
2280 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2281 HIFN_CRYPT_CMD_MODE_CBC |
2282 HIFN_CRYPT_CMD_NEW_IV;
2283 break;
2284 case CRYPTO_3DES_CBC:
2285 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2286 HIFN_CRYPT_CMD_MODE_CBC |
2287 HIFN_CRYPT_CMD_NEW_IV;
2288 break;
2289 case CRYPTO_AES_CBC:
2290 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2291 HIFN_CRYPT_CMD_MODE_CBC |
2292 HIFN_CRYPT_CMD_NEW_IV;
2293 break;
2294 default:
2295 err = EINVAL;
2296 goto errout;
2297 }
2298 if (enccrd->crd_alg != CRYPTO_ARC4) {
2299 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2300 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2301 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2302 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2303 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2304 else
2305 bcopy(sc->sc_sessions[session].hs_iv,
2306 cmd->iv, ivlen);
2307
2308 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2309 == 0) {
2310 if (crp->crp_flags & CRYPTO_F_IMBUF)
2311 m_copyback(cmd->srcu.src_m,
2312 enccrd->crd_inject,
2313 ivlen, cmd->iv);
2314 else if (crp->crp_flags & CRYPTO_F_IOV)
2315 cuio_copyback(cmd->srcu.src_io,
2316 enccrd->crd_inject,
2317 ivlen, cmd->iv);
2318 }
2319 } else {
2320 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2321 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2322 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2323 m_copydata(cmd->srcu.src_m,
2324 enccrd->crd_inject, ivlen, cmd->iv);
2325 else if (crp->crp_flags & CRYPTO_F_IOV)
2326 cuio_copydata(cmd->srcu.src_io,
2327 enccrd->crd_inject, ivlen, cmd->iv);
2328 }
2329 }
2330
2331 cmd->ck = enccrd->crd_key;
2332 cmd->cklen = enccrd->crd_klen >> 3;
2333
2334 /*
2335 * Need to specify the size for the AES key in the masks.
2336 */
2337 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2338 HIFN_CRYPT_CMD_ALG_AES) {
2339 switch (cmd->cklen) {
2340 case 16:
2341 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2342 break;
2343 case 24:
2344 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2345 break;
2346 case 32:
2347 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2348 break;
2349 default:
2350 err = EINVAL;
2351 goto errout;
2352 }
2353 }
2354
2355 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2356 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2357 }
2358
2359 if (maccrd) {
2360 cmd->maccrd = maccrd;
2361 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2362
2363 switch (maccrd->crd_alg) {
2364 case CRYPTO_MD5:
2365 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2366 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2367 HIFN_MAC_CMD_POS_IPSEC;
2368 break;
2369 case CRYPTO_MD5_HMAC:
2370 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2371 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2372 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2373 break;
2374 case CRYPTO_SHA1:
2375 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2376 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2377 HIFN_MAC_CMD_POS_IPSEC;
2378 break;
2379 case CRYPTO_SHA1_HMAC:
2380 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2381 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2382 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2383 break;
2384 }
2385
2386 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2387 maccrd->crd_alg == CRYPTO_MD5_HMAC) &&
2388 sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2389 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2390 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2391 bzero(cmd->mac + (maccrd->crd_klen >> 3),
2392 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2393 }
2394 }
2395
2396 cmd->crp = crp;
2397 cmd->session_num = session;
2398 cmd->softc = sc;
2399
2400 err = hifn_crypto(sc, cmd, crp, hint);
2401 if (err == 0) {
2402 if (enccrd)
2403 sc->sc_sessions[session].hs_prev_op =
2404 enccrd->crd_flags & CRD_F_ENCRYPT;
2405 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2406 sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2407 return 0;
2408 } else if (err == ERESTART) {
2409 /*
2410 * There weren't enough resources to dispatch the request
2411 * to the part. Notify the caller so they'll requeue this
2412 * request and resubmit it again soon.
2413 */
2414 #ifdef HIFN_DEBUG
2415 if (hifn_debug)
2416 printf(sc->sc_dv.dv_xname, "requeue request\n");
2417 #endif
2418 free(cmd, M_DEVBUF);
2419 sc->sc_needwakeup |= CRYPTO_SYMQ;
2420 return (err);
2421 }
2422
2423 errout:
2424 if (cmd != NULL)
2425 free(cmd, M_DEVBUF);
2426 if (err == EINVAL)
2427 hifnstats.hst_invalid++;
2428 else
2429 hifnstats.hst_nomem++;
2430 crp->crp_etype = err;
2431 crypto_done(crp);
2432 return (0);
2433 }
2434
2435 static void
2436 hifn_abort(struct hifn_softc *sc)
2437 {
2438 struct hifn_dma *dma = sc->sc_dma;
2439 struct hifn_command *cmd;
2440 struct cryptop *crp;
2441 int i, u;
2442
2443 i = dma->resk; u = dma->resu;
2444 while (u != 0) {
2445 cmd = dma->hifn_commands[i];
2446 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2447 dma->hifn_commands[i] = NULL;
2448 crp = cmd->crp;
2449
2450 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2451 /* Salvage what we can. */
2452 u_int8_t *macbuf;
2453
2454 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2455 macbuf = dma->result_bufs[i];
2456 macbuf += 12;
2457 } else
2458 macbuf = NULL;
2459 hifnstats.hst_opackets++;
2460 hifn_callback(sc, cmd, macbuf);
2461 } else {
2462 if (cmd->src_map == cmd->dst_map) {
2463 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2464 0, cmd->src_map->dm_mapsize,
2465 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2466 } else {
2467 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2468 0, cmd->src_map->dm_mapsize,
2469 BUS_DMASYNC_POSTWRITE);
2470 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2471 0, cmd->dst_map->dm_mapsize,
2472 BUS_DMASYNC_POSTREAD);
2473 }
2474
2475 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2476 m_freem(cmd->srcu.src_m);
2477 crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2478 }
2479
2480 /* non-shared buffers cannot be restarted */
2481 if (cmd->src_map != cmd->dst_map) {
2482 /*
2483 * XXX should be EAGAIN, delayed until
2484 * after the reset.
2485 */
2486 crp->crp_etype = ENOMEM;
2487 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2488 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2489 } else
2490 crp->crp_etype = ENOMEM;
2491
2492 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2493 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2494
2495 free(cmd, M_DEVBUF);
2496 if (crp->crp_etype != EAGAIN)
2497 crypto_done(crp);
2498 }
2499
2500 if (++i == HIFN_D_RES_RSIZE)
2501 i = 0;
2502 u--;
2503 }
2504 dma->resk = i; dma->resu = u;
2505
2506 /* Force upload of key next time */
2507 for (i = 0; i < sc->sc_maxses; i++)
2508 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2509 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2510
2511 hifn_reset_board(sc, 1);
2512 hifn_init_dma(sc);
2513 hifn_init_pci_registers(sc);
2514 }
2515
2516 static void
2517 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf)
2518 {
2519 struct hifn_dma *dma = sc->sc_dma;
2520 struct cryptop *crp = cmd->crp;
2521 struct cryptodesc *crd;
2522 struct mbuf *m;
2523 int totlen, i, u, ivlen;
2524
2525 if (cmd->src_map == cmd->dst_map)
2526 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2527 0, cmd->src_map->dm_mapsize,
2528 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2529 else {
2530 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2531 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2532 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2533 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2534 }
2535
2536 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2537 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2538 crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2539 totlen = cmd->src_map->dm_mapsize;
2540 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2541 if (totlen < m->m_len) {
2542 m->m_len = totlen;
2543 totlen = 0;
2544 } else
2545 totlen -= m->m_len;
2546 }
2547 cmd->dstu.dst_m->m_pkthdr.len =
2548 cmd->srcu.src_m->m_pkthdr.len;
2549 m_freem(cmd->srcu.src_m);
2550 }
2551 }
2552
2553 if (cmd->sloplen != 0) {
2554 if (crp->crp_flags & CRYPTO_F_IMBUF)
2555 m_copyback((struct mbuf *)crp->crp_buf,
2556 cmd->src_map->dm_mapsize - cmd->sloplen,
2557 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2558 else if (crp->crp_flags & CRYPTO_F_IOV)
2559 cuio_copyback((struct uio *)crp->crp_buf,
2560 cmd->src_map->dm_mapsize - cmd->sloplen,
2561 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2562 }
2563
2564 i = dma->dstk; u = dma->dstu;
2565 while (u != 0) {
2566 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2567 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2568 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2569 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2570 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2571 offsetof(struct hifn_dma, dstr[i]),
2572 sizeof(struct hifn_desc),
2573 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2574 break;
2575 }
2576 if (++i == (HIFN_D_DST_RSIZE + 1))
2577 i = 0;
2578 else
2579 u--;
2580 }
2581 dma->dstk = i; dma->dstu = u;
2582
2583 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2584
2585 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2586 HIFN_BASE_CMD_CRYPT) {
2587 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2588 if (crd->crd_alg != CRYPTO_DES_CBC &&
2589 crd->crd_alg != CRYPTO_3DES_CBC &&
2590 crd->crd_alg != CRYPTO_AES_CBC)
2591 continue;
2592 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2593 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2594 if (crp->crp_flags & CRYPTO_F_IMBUF)
2595 m_copydata((struct mbuf *)crp->crp_buf,
2596 crd->crd_skip + crd->crd_len - ivlen,
2597 ivlen,
2598 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2599 else if (crp->crp_flags & CRYPTO_F_IOV) {
2600 cuio_copydata((struct uio *)crp->crp_buf,
2601 crd->crd_skip + crd->crd_len - ivlen,
2602 ivlen,
2603 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2604 }
2605 /* XXX We do not handle contig data */
2606 break;
2607 }
2608 }
2609
2610 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2611 u_int8_t *macbuf;
2612
2613 macbuf = resbuf + sizeof(struct hifn_base_result);
2614 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2615 macbuf += sizeof(struct hifn_comp_result);
2616 macbuf += sizeof(struct hifn_mac_result);
2617
2618 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2619 int len;
2620
2621 if (crd->crd_alg == CRYPTO_MD5)
2622 len = 16;
2623 else if (crd->crd_alg == CRYPTO_SHA1)
2624 len = 20;
2625 else if (crd->crd_alg == CRYPTO_MD5_HMAC ||
2626 crd->crd_alg == CRYPTO_SHA1_HMAC)
2627 len = 12;
2628 else
2629 continue;
2630
2631 if (crp->crp_flags & CRYPTO_F_IMBUF)
2632 m_copyback((struct mbuf *)crp->crp_buf,
2633 crd->crd_inject, len, macbuf);
2634 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2635 bcopy((caddr_t)macbuf, crp->crp_mac, len);
2636 break;
2637 }
2638 }
2639
2640 if (cmd->src_map != cmd->dst_map) {
2641 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2642 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2643 }
2644 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2645 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2646 free(cmd, M_DEVBUF);
2647 crypto_done(crp);
2648 }
2649
2650 #ifdef HAVE_CRYPTO_LZS
2651
2652 static int
2653 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2654 struct hifn_command *cmd)
2655 {
2656 struct cryptodesc *crd = crp->crp_desc;
2657 int s, err = 0;
2658
2659 cmd->compcrd = crd;
2660 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2661
2662 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2663 /*
2664 * XXX can only handle mbufs right now since we can
2665 * XXX dynamically resize them.
2666 */
2667 err = EINVAL;
2668 return (ENOMEM);
2669 }
2670
2671 if ((crd->crd_flags & CRD_F_COMP) == 0)
2672 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2673 if (crd->crd_alg == CRYPTO_LZS_COMP)
2674 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2675 HIFN_COMP_CMD_CLEARHIST;
2676
2677 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2678 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2679 err = ENOMEM;
2680 goto fail;
2681 }
2682
2683 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2684 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2685 err = ENOMEM;
2686 goto fail;
2687 }
2688
2689 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2690 int len;
2691
2692 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2693 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2694 err = ENOMEM;
2695 goto fail;
2696 }
2697
2698 len = cmd->src_map->dm_mapsize / MCLBYTES;
2699 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2700 len++;
2701 len *= MCLBYTES;
2702
2703 if ((crd->crd_flags & CRD_F_COMP) == 0)
2704 len *= 4;
2705
2706 if (len > HIFN_MAX_DMALEN)
2707 len = HIFN_MAX_DMALEN;
2708
2709 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2710 if (cmd->dstu.dst_m == NULL) {
2711 err = ENOMEM;
2712 goto fail;
2713 }
2714
2715 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2716 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2717 err = ENOMEM;
2718 goto fail;
2719 }
2720 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2721 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2722 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2723 err = ENOMEM;
2724 goto fail;
2725 }
2726 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2727 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2728 err = ENOMEM;
2729 goto fail;
2730 }
2731 }
2732
2733 if (cmd->src_map == cmd->dst_map)
2734 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2735 0, cmd->src_map->dm_mapsize,
2736 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2737 else {
2738 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2739 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2740 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2741 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2742 }
2743
2744 cmd->crp = crp;
2745 /*
2746 * Always use session 0. The modes of compression we use are
2747 * stateless and there is always at least one compression
2748 * context, zero.
2749 */
2750 cmd->session_num = 0;
2751 cmd->softc = sc;
2752
2753 s = splnet();
2754 err = hifn_compress_enter(sc, cmd);
2755 splx(s);
2756
2757 if (err != 0)
2758 goto fail;
2759 return (0);
2760
2761 fail:
2762 if (cmd->dst_map != NULL) {
2763 if (cmd->dst_map->dm_nsegs > 0)
2764 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2765 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2766 }
2767 if (cmd->src_map != NULL) {
2768 if (cmd->src_map->dm_nsegs > 0)
2769 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2770 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2771 }
2772 free(cmd, M_DEVBUF);
2773 if (err == EINVAL)
2774 hifnstats.hst_invalid++;
2775 else
2776 hifnstats.hst_nomem++;
2777 crp->crp_etype = err;
2778 crypto_done(crp);
2779 return (0);
2780 }
2781
2782 /*
2783 * must be called at splnet()
2784 */
2785 static int
2786 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2787 {
2788 struct hifn_dma *dma = sc->sc_dma;
2789 int cmdi, resi;
2790 u_int32_t cmdlen;
2791
2792 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2793 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2794 return (ENOMEM);
2795
2796 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2797 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2798 return (ENOMEM);
2799
2800 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2801 dma->cmdi = 0;
2802 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2803 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2804 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2805 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2806 }
2807 cmdi = dma->cmdi++;
2808 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2809 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2810
2811 /* .p for command/result already set */
2812 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2813 HIFN_D_MASKDONEIRQ);
2814 HIFN_CMDR_SYNC(sc, cmdi,
2815 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2816 dma->cmdu++;
2817 if (sc->sc_c_busy == 0) {
2818 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2819 sc->sc_c_busy = 1;
2820 SET_LED(sc, HIFN_MIPSRST_LED0);
2821 }
2822
2823 /*
2824 * We don't worry about missing an interrupt (which a "command wait"
2825 * interrupt salvages us from), unless there is more than one command
2826 * in the queue.
2827 */
2828 if (dma->cmdu > 1) {
2829 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2830 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2831 }
2832
2833 hifnstats.hst_ipackets++;
2834 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2835
2836 hifn_dmamap_load_src(sc, cmd);
2837 if (sc->sc_s_busy == 0) {
2838 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2839 sc->sc_s_busy = 1;
2840 SET_LED(sc, HIFN_MIPSRST_LED1);
2841 }
2842
2843 /*
2844 * Unlike other descriptors, we don't mask done interrupt from
2845 * result descriptor.
2846 */
2847 if (dma->resi == HIFN_D_RES_RSIZE) {
2848 dma->resi = 0;
2849 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2850 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2851 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2852 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2853 }
2854 resi = dma->resi++;
2855 dma->hifn_commands[resi] = cmd;
2856 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2857 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2858 HIFN_D_VALID | HIFN_D_LAST);
2859 HIFN_RESR_SYNC(sc, resi,
2860 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2861 dma->resu++;
2862 if (sc->sc_r_busy == 0) {
2863 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2864 sc->sc_r_busy = 1;
2865 SET_LED(sc, HIFN_MIPSRST_LED2);
2866 }
2867
2868 if (cmd->sloplen)
2869 cmd->slopidx = resi;
2870
2871 hifn_dmamap_load_dst(sc, cmd);
2872
2873 if (sc->sc_d_busy == 0) {
2874 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2875 sc->sc_d_busy = 1;
2876 }
2877 sc->sc_active = 5;
2878 cmd->cmd_callback = hifn_callback_comp;
2879 return (0);
2880 }
2881
2882 static void
2883 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2884 u_int8_t *resbuf)
2885 {
2886 struct hifn_base_result baseres;
2887 struct cryptop *crp = cmd->crp;
2888 struct hifn_dma *dma = sc->sc_dma;
2889 struct mbuf *m;
2890 int err = 0, i, u;
2891 u_int32_t olen;
2892 bus_size_t dstsize;
2893
2894 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2895 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2896 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2897 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2898
2899 dstsize = cmd->dst_map->dm_mapsize;
2900 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2901
2902 bcopy(resbuf, &baseres, sizeof(struct hifn_base_result));
2903
2904 i = dma->dstk; u = dma->dstu;
2905 while (u != 0) {
2906 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2907 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2908 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2909 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2910 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2911 offsetof(struct hifn_dma, dstr[i]),
2912 sizeof(struct hifn_desc),
2913 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2914 break;
2915 }
2916 if (++i == (HIFN_D_DST_RSIZE + 1))
2917 i = 0;
2918 else
2919 u--;
2920 }
2921 dma->dstk = i; dma->dstu = u;
2922
2923 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
2924 bus_size_t xlen;
2925
2926 xlen = dstsize;
2927
2928 m_freem(cmd->dstu.dst_m);
2929
2930 if (xlen == HIFN_MAX_DMALEN) {
2931 /* We've done all we can. */
2932 err = E2BIG;
2933 goto out;
2934 }
2935
2936 xlen += MCLBYTES;
2937
2938 if (xlen > HIFN_MAX_DMALEN)
2939 xlen = HIFN_MAX_DMALEN;
2940
2941 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
2942 cmd->srcu.src_m);
2943 if (cmd->dstu.dst_m == NULL) {
2944 err = ENOMEM;
2945 goto out;
2946 }
2947 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2948 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2949 err = ENOMEM;
2950 goto out;
2951 }
2952
2953 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2954 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2955 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2956 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2957
2958 /* already at splnet... */
2959 err = hifn_compress_enter(sc, cmd);
2960 if (err != 0)
2961 goto out;
2962 return;
2963 }
2964
2965 olen = dstsize - (letoh16(baseres.dst_cnt) |
2966 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
2967 HIFN_BASE_RES_DSTLEN_S) << 16));
2968
2969 crp->crp_olen = olen - cmd->compcrd->crd_skip;
2970
2971 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2972 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2973 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2974
2975 m = cmd->dstu.dst_m;
2976 if (m->m_flags & M_PKTHDR)
2977 m->m_pkthdr.len = olen;
2978 crp->crp_buf = (caddr_t)m;
2979 for (; m != NULL; m = m->m_next) {
2980 if (olen >= m->m_len)
2981 olen -= m->m_len;
2982 else {
2983 m->m_len = olen;
2984 olen = 0;
2985 }
2986 }
2987
2988 m_freem(cmd->srcu.src_m);
2989 free(cmd, M_DEVBUF);
2990 crp->crp_etype = 0;
2991 crypto_done(crp);
2992 return;
2993
2994 out:
2995 if (cmd->dst_map != NULL) {
2996 if (cmd->src_map->dm_nsegs != 0)
2997 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2998 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2999 }
3000 if (cmd->src_map != NULL) {
3001 if (cmd->src_map->dm_nsegs != 0)
3002 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3003 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3004 }
3005 if (cmd->dstu.dst_m != NULL)
3006 m_freem(cmd->dstu.dst_m);
3007 free(cmd, M_DEVBUF);
3008 crp->crp_etype = err;
3009 crypto_done(crp);
3010 }
3011
3012 static struct mbuf *
3013 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
3014 {
3015 int len;
3016 struct mbuf *m, *m0, *mlast;
3017
3018 if (mtemplate->m_flags & M_PKTHDR) {
3019 len = MHLEN;
3020 MGETHDR(m0, M_DONTWAIT, MT_DATA);
3021 } else {
3022 len = MLEN;
3023 MGET(m0, M_DONTWAIT, MT_DATA);
3024 }
3025 if (m0 == NULL)
3026 return (NULL);
3027 if (len == MHLEN)
3028 M_DUP_PKTHDR(m0, mtemplate);
3029 MCLGET(m0, M_DONTWAIT);
3030 if (!(m0->m_flags & M_EXT))
3031 m_freem(m0);
3032 len = MCLBYTES;
3033
3034 totlen -= len;
3035 m0->m_pkthdr.len = m0->m_len = len;
3036 mlast = m0;
3037
3038 while (totlen > 0) {
3039 MGET(m, M_DONTWAIT, MT_DATA);
3040 if (m == NULL) {
3041 m_freem(m0);
3042 return (NULL);
3043 }
3044 MCLGET(m, M_DONTWAIT);
3045 if (!(m->m_flags & M_EXT)) {
3046 m_freem(m0);
3047 return (NULL);
3048 }
3049 len = MCLBYTES;
3050 m->m_len = len;
3051 if (m0->m_flags & M_PKTHDR)
3052 m0->m_pkthdr.len += len;
3053 totlen -= len;
3054
3055 mlast->m_next = m;
3056 mlast = m;
3057 }
3058
3059 return (m0);
3060 }
3061 #endif /* HAVE_CRYPTO_LZS */
3062
3063 static void
3064 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val)
3065 {
3066 /*
3067 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
3068 * and Group 1 registers; avoid conditions that could create
3069 * burst writes by doing a read in between the writes.
3070 */
3071 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3072 if (sc->sc_waw_lastgroup == reggrp &&
3073 sc->sc_waw_lastreg == reg - 4) {
3074 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
3075 }
3076 sc->sc_waw_lastgroup = reggrp;
3077 sc->sc_waw_lastreg = reg;
3078 }
3079 if (reggrp == 0)
3080 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3081 else
3082 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3083
3084 }
3085
3086 static u_int32_t
3087 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3088 {
3089 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3090 sc->sc_waw_lastgroup = -1;
3091 sc->sc_waw_lastreg = 1;
3092 }
3093 if (reggrp == 0)
3094 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3095 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3096 }
3097