hifn7751.c revision 1.47 1 /* $NetBSD: hifn7751.c,v 1.47 2011/11/19 22:51:23 tls Exp $ */
2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */
4
5 /*
6 * Invertex AEON / Hifn 7751 driver
7 * Copyright (c) 1999 Invertex Inc. All rights reserved.
8 * Copyright (c) 1999 Theo de Raadt
9 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10 * http://www.netsec.net
11 * Copyright (c) 2003 Hifn Inc.
12 *
13 * This driver is based on a previous driver by Invertex, for which they
14 * requested: Please send any comments, feedback, bug-fixes, or feature
15 * requests to software (at) invertex.com.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. The name of the author may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Effort sponsored in part by the Defense Advanced Research Projects
41 * Agency (DARPA) and Air Force Research Laboratory, Air Force
42 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 *
44 */
45
46 /*
47 * Driver for various Hifn pre-HIPP encryption processors.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.47 2011/11/19 22:51:23 tls Exp $");
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/errno.h>
57 #include <sys/malloc.h>
58 #include <sys/kernel.h>
59 #include <sys/mbuf.h>
60 #include <sys/device.h>
61
62 #ifdef __OpenBSD__
63 #include <crypto/crypto.h>
64 #include <dev/rndvar.h>
65 #else
66 #include <opencrypto/cryptodev.h>
67 #include <sys/cprng.h>
68 #endif
69
70 #include <dev/pci/pcireg.h>
71 #include <dev/pci/pcivar.h>
72 #include <dev/pci/pcidevs.h>
73
74 #include <dev/pci/hifn7751reg.h>
75 #include <dev/pci/hifn7751var.h>
76
77 #undef HIFN_DEBUG
78
79 #ifdef __NetBSD__
80 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */
81 #endif
82
83 #ifdef HIFN_DEBUG
84 extern int hifn_debug; /* patchable */
85 int hifn_debug = 1;
86 #endif
87
88 #ifdef __OpenBSD__
89 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */
90 #endif
91
92 /*
93 * Prototypes and count for the pci_device structure
94 */
95 #ifdef __OpenBSD__
96 static int hifn_probe((struct device *, void *, void *);
97 #else
98 static int hifn_probe(device_t, cfdata_t, void *);
99 #endif
100 static void hifn_attach(device_t, device_t, void *);
101
102 CFATTACH_DECL(hifn, sizeof(struct hifn_softc),
103 hifn_probe, hifn_attach, NULL, NULL);
104
105 #ifdef __OpenBSD__
106 struct cfdriver hifn_cd = {
107 0, "hifn", DV_DULL
108 };
109 #endif
110
111 static void hifn_reset_board(struct hifn_softc *, int);
112 static void hifn_reset_puc(struct hifn_softc *);
113 static void hifn_puc_wait(struct hifn_softc *);
114 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
115 static void hifn_set_retry(struct hifn_softc *);
116 static void hifn_init_dma(struct hifn_softc *);
117 static void hifn_init_pci_registers(struct hifn_softc *);
118 static int hifn_sramsize(struct hifn_softc *);
119 static int hifn_dramsize(struct hifn_softc *);
120 static int hifn_ramtype(struct hifn_softc *);
121 static void hifn_sessions(struct hifn_softc *);
122 static int hifn_intr(void *);
123 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
124 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
125 static int hifn_newsession(void*, u_int32_t *, struct cryptoini *);
126 static int hifn_freesession(void*, u_int64_t);
127 static int hifn_process(void*, struct cryptop *, int);
128 static void hifn_callback(struct hifn_softc *, struct hifn_command *,
129 u_int8_t *);
130 static int hifn_crypto(struct hifn_softc *, struct hifn_command *,
131 struct cryptop*, int);
132 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
133 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
134 static int hifn_dmamap_aligned(bus_dmamap_t);
135 static int hifn_dmamap_load_src(struct hifn_softc *,
136 struct hifn_command *);
137 static int hifn_dmamap_load_dst(struct hifn_softc *,
138 struct hifn_command *);
139 static int hifn_init_pubrng(struct hifn_softc *);
140 static void hifn_rng(void *);
141 static void hifn_tick(void *);
142 static void hifn_abort(struct hifn_softc *);
143 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
144 int *);
145 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
146 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
147 #ifdef HAVE_CRYPTO_LZS
148 static int hifn_compression(struct hifn_softc *, struct cryptop *,
149 struct hifn_command *);
150 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
151 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
152 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
153 u_int8_t *);
154 #endif /* HAVE_CRYPTO_LZS */
155
156
157 struct hifn_stats hifnstats;
158
159 static const struct hifn_product {
160 pci_vendor_id_t hifn_vendor;
161 pci_product_id_t hifn_product;
162 int hifn_flags;
163 const char *hifn_name;
164 } hifn_products[] = {
165 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
166 0,
167 "Invertex AEON",
168 },
169
170 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
171 0,
172 "Hifn 7751",
173 },
174 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
175 0,
176 "Hifn 7751 (NetSec)"
177 },
178
179 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
180 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
181 "Hifn 7811",
182 },
183
184 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
185 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
186 "Hifn 7951",
187 },
188
189 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
190 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
191 "Hifn 7955",
192 },
193
194 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
195 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
196 "Hifn 7956",
197 },
198
199
200 { 0, 0,
201 0,
202 NULL
203 }
204 };
205
206 static const struct hifn_product *
207 hifn_lookup(const struct pci_attach_args *pa)
208 {
209 const struct hifn_product *hp;
210
211 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
212 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
213 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
214 return (hp);
215 }
216 return (NULL);
217 }
218
219 static int
220 hifn_probe(device_t parent, cfdata_t match, void *aux)
221 {
222 struct pci_attach_args *pa = aux;
223
224 if (hifn_lookup(pa) != NULL)
225 return 1;
226
227 return 0;
228 }
229
230 static void
231 hifn_attach(device_t parent, device_t self, void *aux)
232 {
233 struct hifn_softc *sc = device_private(self);
234 struct pci_attach_args *pa = aux;
235 const struct hifn_product *hp;
236 pci_chipset_tag_t pc = pa->pa_pc;
237 pci_intr_handle_t ih;
238 const char *intrstr = NULL;
239 const char *hifncap;
240 char rbase;
241 bus_size_t iosize0, iosize1;
242 u_int32_t cmd;
243 u_int16_t ena;
244 bus_dma_segment_t seg;
245 bus_dmamap_t dmamap;
246 int rseg;
247 void *kva;
248
249 hp = hifn_lookup(pa);
250 if (hp == NULL) {
251 printf("\n");
252 panic("hifn_attach: impossible");
253 }
254
255 aprint_naive(": Crypto processor\n");
256 aprint_normal(": %s, rev. %d\n", hp->hifn_name,
257 PCI_REVISION(pa->pa_class));
258
259 sc->sc_pci_pc = pa->pa_pc;
260 sc->sc_pci_tag = pa->pa_tag;
261
262 sc->sc_flags = hp->hifn_flags;
263
264 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
265 cmd |= PCI_COMMAND_MASTER_ENABLE;
266 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
267
268 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
269 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
270 aprint_error_dev(&sc->sc_dv, "can't map mem space %d\n", 0);
271 return;
272 }
273
274 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
275 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
276 aprint_error_dev(&sc->sc_dv, "can't find mem space %d\n", 1);
277 goto fail_io0;
278 }
279
280 hifn_set_retry(sc);
281
282 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
283 sc->sc_waw_lastgroup = -1;
284 sc->sc_waw_lastreg = 1;
285 }
286
287 sc->sc_dmat = pa->pa_dmat;
288 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
289 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
290 aprint_error_dev(&sc->sc_dv, "can't alloc DMA buffer\n");
291 goto fail_io1;
292 }
293 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
294 BUS_DMA_NOWAIT)) {
295 aprint_error_dev(&sc->sc_dv, "can't map DMA buffers (%lu bytes)\n",
296 (u_long)sizeof(*sc->sc_dma));
297 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
298 goto fail_io1;
299 }
300 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
301 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
302 aprint_error_dev(&sc->sc_dv, "can't create DMA map\n");
303 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
304 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
305 goto fail_io1;
306 }
307 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
308 NULL, BUS_DMA_NOWAIT)) {
309 aprint_error_dev(&sc->sc_dv, "can't load DMA map\n");
310 bus_dmamap_destroy(sc->sc_dmat, dmamap);
311 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
312 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
313 goto fail_io1;
314 }
315 sc->sc_dmamap = dmamap;
316 sc->sc_dma = (struct hifn_dma *)kva;
317 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
318
319 hifn_reset_board(sc, 0);
320
321 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
322 aprint_error_dev(&sc->sc_dv, "crypto enabling failed\n");
323 goto fail_mem;
324 }
325 hifn_reset_puc(sc);
326
327 hifn_init_dma(sc);
328 hifn_init_pci_registers(sc);
329
330 /* XXX can't dynamically determine ram type for 795x; force dram */
331 if (sc->sc_flags & HIFN_IS_7956)
332 sc->sc_drammodel = 1;
333 else if (hifn_ramtype(sc))
334 goto fail_mem;
335
336 if (sc->sc_drammodel == 0)
337 hifn_sramsize(sc);
338 else
339 hifn_dramsize(sc);
340
341 /*
342 * Workaround for NetSec 7751 rev A: half ram size because two
343 * of the address lines were left floating
344 */
345 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
346 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
347 PCI_REVISION(pa->pa_class) == 0x61)
348 sc->sc_ramsize >>= 1;
349
350 if (pci_intr_map(pa, &ih)) {
351 aprint_error_dev(&sc->sc_dv, "couldn't map interrupt\n");
352 goto fail_mem;
353 }
354 intrstr = pci_intr_string(pc, ih);
355 #ifdef __OpenBSD__
356 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
357 self->dv_xname);
358 #else
359 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
360 #endif
361 if (sc->sc_ih == NULL) {
362 aprint_error_dev(&sc->sc_dv, "couldn't establish interrupt\n");
363 if (intrstr != NULL)
364 aprint_error(" at %s", intrstr);
365 aprint_error("\n");
366 goto fail_mem;
367 }
368
369 hifn_sessions(sc);
370
371 rseg = sc->sc_ramsize / 1024;
372 rbase = 'K';
373 if (sc->sc_ramsize >= (1024 * 1024)) {
374 rbase = 'M';
375 rseg /= 1024;
376 }
377 aprint_normal_dev(&sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n",
378 hifncap, rseg, rbase,
379 sc->sc_drammodel ? 'D' : 'S', intrstr);
380
381 sc->sc_cid = crypto_get_driverid(0);
382 if (sc->sc_cid < 0) {
383 aprint_error_dev(&sc->sc_dv, "couldn't get crypto driver id\n");
384 goto fail_intr;
385 }
386
387 WRITE_REG_0(sc, HIFN_0_PUCNFG,
388 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
389 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
390
391 switch (ena) {
392 case HIFN_PUSTAT_ENA_2:
393 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
394 hifn_newsession, hifn_freesession, hifn_process, sc);
395 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
396 hifn_newsession, hifn_freesession, hifn_process, sc);
397 if (sc->sc_flags & HIFN_HAS_AES)
398 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
399 hifn_newsession, hifn_freesession,
400 hifn_process, sc);
401 /*FALLTHROUGH*/
402 case HIFN_PUSTAT_ENA_1:
403 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
404 hifn_newsession, hifn_freesession, hifn_process, sc);
405 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
406 hifn_newsession, hifn_freesession, hifn_process, sc);
407 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0,
408 hifn_newsession, hifn_freesession, hifn_process, sc);
409 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0,
410 hifn_newsession, hifn_freesession, hifn_process, sc);
411 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
412 hifn_newsession, hifn_freesession, hifn_process, sc);
413 break;
414 }
415
416 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
417 sc->sc_dmamap->dm_mapsize,
418 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
419
420 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
421 hifn_init_pubrng(sc);
422
423 #ifdef __OpenBSD__
424 timeout_set(&sc->sc_tickto, hifn_tick, sc);
425 timeout_add(&sc->sc_tickto, hz);
426 #else
427 callout_init(&sc->sc_tickto, 0);
428 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
429 #endif
430 return;
431
432 fail_intr:
433 pci_intr_disestablish(pc, sc->sc_ih);
434 fail_mem:
435 bus_dmamap_unload(sc->sc_dmat, dmamap);
436 bus_dmamap_destroy(sc->sc_dmat, dmamap);
437 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
438 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
439
440 /* Turn off DMA polling */
441 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
442 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
443
444 fail_io1:
445 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
446 fail_io0:
447 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
448 }
449
450 static int
451 hifn_init_pubrng(struct hifn_softc *sc)
452 {
453 u_int32_t r;
454 int i;
455
456 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
457 /* Reset 7951 public key/rng engine */
458 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
459 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
460
461 for (i = 0; i < 100; i++) {
462 DELAY(1000);
463 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
464 HIFN_PUBRST_RESET) == 0)
465 break;
466 }
467
468 if (i == 100) {
469 printf("%s: public key init failed\n",
470 device_xname(&sc->sc_dv));
471 return (1);
472 }
473 }
474
475 /* Enable the rng, if available */
476 if (sc->sc_flags & HIFN_HAS_RNG) {
477 if (sc->sc_flags & HIFN_IS_7811) {
478 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
479 if (r & HIFN_7811_RNGENA_ENA) {
480 r &= ~HIFN_7811_RNGENA_ENA;
481 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
482 }
483 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
484 HIFN_7811_RNGCFG_DEFL);
485 r |= HIFN_7811_RNGENA_ENA;
486 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
487 } else
488 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
489 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
490 HIFN_RNGCFG_ENA);
491
492 /*
493 * The Hifn RNG documentation states that at their
494 * recommended "conservative" RNG config values,
495 * the RNG must warm up for 0.4s before providing
496 * data that meet their worst-case estimate of 0.06
497 * bits of random data per output register bit.
498 */
499 DELAY(4000);
500
501 #ifdef __NetBSD__
502 /*
503 * XXX Careful! The use of RND_FLAG_NO_ESTIMATE
504 * XXX here is unobvious: we later feed raw bits
505 * XXX into the "entropy pool" with rnd_add_data,
506 * XXX explicitly supplying an entropy estimate.
507 * XXX In this context, NO_ESTIMATE serves only
508 * XXX to prevent rnd_add_data from trying to
509 * XXX use the *time at which we added the data*
510 * XXX as entropy, which is not a good idea since
511 * XXX we add data periodically from a callout.
512 */
513 rnd_attach_source(&sc->sc_rnd_source, device_xname(&sc->sc_dv),
514 RND_TYPE_RNG, RND_FLAG_NO_ESTIMATE);
515 #endif
516
517 sc->sc_rngfirst = 1;
518 if (hz >= 100)
519 sc->sc_rnghz = hz / 100;
520 else
521 sc->sc_rnghz = 1;
522 #ifdef __OpenBSD__
523 timeout_set(&sc->sc_rngto, hifn_rng, sc);
524 #else /* !__OpenBSD__ */
525 callout_init(&sc->sc_rngto, 0);
526 #endif /* !__OpenBSD__ */
527 }
528
529 /* Enable public key engine, if available */
530 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
531 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
532 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
533 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
534 }
535
536 /* Call directly into the RNG once to prime the pool. */
537 hifn_rng(sc); /* Sets callout/timeout at end */
538
539 return (0);
540 }
541
542 static void
543 hifn_rng(void *vsc)
544 {
545 struct hifn_softc *sc = vsc;
546 #ifdef __NetBSD__
547 u_int32_t num[HIFN_RNG_BITSPER * RND_ENTROPY_THRESHOLD];
548 #else
549 u_int32_t num[2];
550 #endif
551 u_int32_t sts;
552 int i;
553
554 if (sc->sc_flags & HIFN_IS_7811) {
555 for (i = 0; i < 5; i++) { /* XXX why 5? */
556 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
557 if (sts & HIFN_7811_RNGSTS_UFL) {
558 printf("%s: RNG underflow: disabling\n",
559 device_xname(&sc->sc_dv));
560 return;
561 }
562 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
563 break;
564
565 /*
566 * There are at least two words in the RNG FIFO
567 * at this point.
568 */
569 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
570 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
571
572 if (sc->sc_rngfirst)
573 sc->sc_rngfirst = 0;
574 #ifdef __NetBSD__
575 rnd_add_data(&sc->sc_rnd_source, num,
576 2 * sizeof(num[0]),
577 (2 * sizeof(num[0]) * NBBY) /
578 HIFN_RNG_BITSPER);
579 #else
580 /*
581 * XXX This is a really bad idea.
582 * XXX Hifn estimate as little as 0.06
583 * XXX actual bits of entropy per output
584 * XXX register bit. How can we tell the
585 * XXX kernel RNG subsystem we're handing
586 * XXX it 64 "true" random bits, for any
587 * XXX sane value of "true"?
588 * XXX
589 * XXX The right thing to do here, if we
590 * XXX cannot supply an estimate ourselves,
591 * XXX would be to hash the bits locally.
592 */
593 add_true_randomness(num[0]);
594 add_true_randomness(num[1]);
595 #endif
596
597 }
598 } else {
599 #ifdef __NetBSD__
600 /* First time through, try to help fill the pool. */
601 int nwords = sc->sc_rngfirst ?
602 sizeof(num) / sizeof(num[0]) : 4;
603 #else
604 int nwords = 2;
605 #endif
606 /*
607 * We must be *extremely* careful here. The Hifn
608 * 795x differ from the published 6500 RNG design
609 * in more ways than the obvious lack of the output
610 * FIFO and LFSR control registers. In fact, there
611 * is only one LFSR, instead of the 6500's two, and
612 * it's 32 bits, not 31.
613 *
614 * Further, a block diagram obtained from Hifn shows
615 * a very curious latching of this register: the LFSR
616 * rotates at a frequency of RNG_Clk / 8, but the
617 * RNG_Data register is latched at a frequency of
618 * RNG_Clk, which means that it is possible for
619 * consecutive reads of the RNG_Data register to read
620 * identical state from the LFSR. The simplest
621 * workaround seems to be to read eight samples from
622 * the register for each one that we use. Since each
623 * read must require at least one PCI cycle, and
624 * RNG_Clk is at least PCI_Clk, this is safe.
625 */
626
627
628 if (sc->sc_rngfirst) {
629 sc->sc_rngfirst = 0;
630 }
631
632
633 for(i = 0 ; i < nwords * 8; i++)
634 {
635 volatile u_int32_t regtmp;
636 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA);
637 num[i / 8] = regtmp;
638 }
639 #ifdef __NetBSD__
640 rnd_add_data(&sc->sc_rnd_source, num,
641 nwords * sizeof(num[0]),
642 (nwords * sizeof(num[0]) * NBBY) /
643 HIFN_RNG_BITSPER);
644 #else
645 /* XXX a bad idea; see 7811 block above */
646 add_true_randomness(num[0]);
647 #endif
648 }
649
650 #ifdef __OpenBSD__
651 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
652 #else
653 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
654 #endif
655 }
656
657 static void
658 hifn_puc_wait(struct hifn_softc *sc)
659 {
660 int i;
661
662 for (i = 5000; i > 0; i--) {
663 DELAY(1);
664 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
665 break;
666 }
667 if (!i)
668 printf("%s: proc unit did not reset\n", device_xname(&sc->sc_dv));
669 }
670
671 /*
672 * Reset the processing unit.
673 */
674 static void
675 hifn_reset_puc(struct hifn_softc *sc)
676 {
677 /* Reset processing unit */
678 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
679 hifn_puc_wait(sc);
680 }
681
682 static void
683 hifn_set_retry(struct hifn_softc *sc)
684 {
685 u_int32_t r;
686
687 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
688 r &= 0xffff0000;
689 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
690 }
691
692 /*
693 * Resets the board. Values in the regesters are left as is
694 * from the reset (i.e. initial values are assigned elsewhere).
695 */
696 static void
697 hifn_reset_board(struct hifn_softc *sc, int full)
698 {
699 u_int32_t reg;
700
701 /*
702 * Set polling in the DMA configuration register to zero. 0x7 avoids
703 * resetting the board and zeros out the other fields.
704 */
705 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
706 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
707
708 /*
709 * Now that polling has been disabled, we have to wait 1 ms
710 * before resetting the board.
711 */
712 DELAY(1000);
713
714 /* Reset the DMA unit */
715 if (full) {
716 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
717 DELAY(1000);
718 } else {
719 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
720 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
721 hifn_reset_puc(sc);
722 }
723
724 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
725
726 /* Bring dma unit out of reset */
727 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
728 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
729
730 hifn_puc_wait(sc);
731
732 hifn_set_retry(sc);
733
734 if (sc->sc_flags & HIFN_IS_7811) {
735 for (reg = 0; reg < 1000; reg++) {
736 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
737 HIFN_MIPSRST_CRAMINIT)
738 break;
739 DELAY(1000);
740 }
741 if (reg == 1000)
742 printf(": cram init timeout\n");
743 }
744 }
745
746 static u_int32_t
747 hifn_next_signature(u_int32_t a, u_int cnt)
748 {
749 int i;
750 u_int32_t v;
751
752 for (i = 0; i < cnt; i++) {
753
754 /* get the parity */
755 v = a & 0x80080125;
756 v ^= v >> 16;
757 v ^= v >> 8;
758 v ^= v >> 4;
759 v ^= v >> 2;
760 v ^= v >> 1;
761
762 a = (v & 1) ^ (a << 1);
763 }
764
765 return a;
766 }
767
768 static struct pci2id {
769 u_short pci_vendor;
770 u_short pci_prod;
771 char card_id[13];
772 } const pci2id[] = {
773 {
774 PCI_VENDOR_HIFN,
775 PCI_PRODUCT_HIFN_7951,
776 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, 0x00, 0x00, 0x00 }
778 }, {
779 PCI_VENDOR_HIFN,
780 PCI_PRODUCT_HIFN_7955,
781 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x00, 0x00, 0x00 }
783 }, {
784 PCI_VENDOR_HIFN,
785 PCI_PRODUCT_HIFN_7956,
786 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x00, 0x00 }
788 }, {
789 PCI_VENDOR_NETSEC,
790 PCI_PRODUCT_NETSEC_7751,
791 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
792 0x00, 0x00, 0x00, 0x00, 0x00 }
793 }, {
794 PCI_VENDOR_INVERTEX,
795 PCI_PRODUCT_INVERTEX_AEON,
796 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00, 0x00 }
798 }, {
799 PCI_VENDOR_HIFN,
800 PCI_PRODUCT_HIFN_7811,
801 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
802 0x00, 0x00, 0x00, 0x00, 0x00 }
803 }, {
804 /*
805 * Other vendors share this PCI ID as well, such as
806 * http://www.powercrypt.com, and obviously they also
807 * use the same key.
808 */
809 PCI_VENDOR_HIFN,
810 PCI_PRODUCT_HIFN_7751,
811 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00, 0x00 }
813 },
814 };
815
816 /*
817 * Checks to see if crypto is already enabled. If crypto isn't enable,
818 * "hifn_enable_crypto" is called to enable it. The check is important,
819 * as enabling crypto twice will lock the board.
820 */
821 static const char *
822 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
823 {
824 u_int32_t dmacfg, ramcfg, encl, addr, i;
825 const char *offtbl = NULL;
826
827 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
828 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
829 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
830 offtbl = pci2id[i].card_id;
831 break;
832 }
833 }
834
835 if (offtbl == NULL) {
836 #ifdef HIFN_DEBUG
837 aprint_debug_dev(&sc->sc_dv, "Unknown card!\n");
838 #endif
839 return (NULL);
840 }
841
842 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
843 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
844
845 /*
846 * The RAM config register's encrypt level bit needs to be set before
847 * every read performed on the encryption level register.
848 */
849 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
850
851 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
852
853 /*
854 * Make sure we don't re-unlock. Two unlocks kills chip until the
855 * next reboot.
856 */
857 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
858 #ifdef HIFN_DEBUG
859 aprint_debug_dev(&sc->sc_dv, "Strong Crypto already enabled!\n");
860 #endif
861 goto report;
862 }
863
864 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
865 #ifdef HIFN_DEBUG
866 aprint_debug_dev(&sc->sc_dv, "Unknown encryption level\n");
867 #endif
868 return (NULL);
869 }
870
871 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
872 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
873 DELAY(1000);
874 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
875 DELAY(1000);
876 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
877 DELAY(1000);
878
879 for (i = 0; i <= 12; i++) {
880 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
881 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
882
883 DELAY(1000);
884 }
885
886 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
887 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
888
889 #ifdef HIFN_DEBUG
890 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
891 aprint_debug("Encryption engine is permanently locked until next system reset.");
892 else
893 aprint_debug("Encryption engine enabled successfully!");
894 #endif
895
896 report:
897 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
898 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
899
900 switch (encl) {
901 case HIFN_PUSTAT_ENA_0:
902 return ("LZS-only (no encr/auth)");
903
904 case HIFN_PUSTAT_ENA_1:
905 return ("DES");
906
907 case HIFN_PUSTAT_ENA_2:
908 if (sc->sc_flags & HIFN_HAS_AES)
909 return ("3DES/AES");
910 else
911 return ("3DES");
912
913 default:
914 return ("disabled");
915 }
916 /* NOTREACHED */
917 }
918
919 /*
920 * Give initial values to the registers listed in the "Register Space"
921 * section of the HIFN Software Development reference manual.
922 */
923 static void
924 hifn_init_pci_registers(struct hifn_softc *sc)
925 {
926 /* write fixed values needed by the Initialization registers */
927 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
928 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
929 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
930
931 /* write all 4 ring address registers */
932 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
933 offsetof(struct hifn_dma, cmdr[0]));
934 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
935 offsetof(struct hifn_dma, srcr[0]));
936 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
937 offsetof(struct hifn_dma, dstr[0]));
938 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
939 offsetof(struct hifn_dma, resr[0]));
940
941 DELAY(2000);
942
943 /* write status register */
944 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
945 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
946 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
947 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
948 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
949 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
950 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
951 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
952 HIFN_DMACSR_S_WAIT |
953 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
954 HIFN_DMACSR_C_WAIT |
955 HIFN_DMACSR_ENGINE |
956 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
957 HIFN_DMACSR_PUBDONE : 0) |
958 ((sc->sc_flags & HIFN_IS_7811) ?
959 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
960
961 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
962 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
963 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
964 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
965 HIFN_DMAIER_ENGINE |
966 ((sc->sc_flags & HIFN_IS_7811) ?
967 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
968 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
969 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
970 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
971
972 if (sc->sc_flags & HIFN_IS_7956) {
973 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
974 HIFN_PUCNFG_TCALLPHASES |
975 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
976 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
977 } else {
978 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
979 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
980 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
981 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
982 }
983
984 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
985 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
986 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
987 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
988 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
989 }
990
991 /*
992 * The maximum number of sessions supported by the card
993 * is dependent on the amount of context ram, which
994 * encryption algorithms are enabled, and how compression
995 * is configured. This should be configured before this
996 * routine is called.
997 */
998 static void
999 hifn_sessions(struct hifn_softc *sc)
1000 {
1001 u_int32_t pucnfg;
1002 int ctxsize;
1003
1004 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1005
1006 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1007 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1008 ctxsize = 128;
1009 else
1010 ctxsize = 512;
1011 /*
1012 * 7955/7956 has internal context memory of 32K
1013 */
1014 if (sc->sc_flags & HIFN_IS_7956)
1015 sc->sc_maxses = 32768 / ctxsize;
1016 else
1017 sc->sc_maxses = 1 +
1018 ((sc->sc_ramsize - 32768) / ctxsize);
1019 }
1020 else
1021 sc->sc_maxses = sc->sc_ramsize / 16384;
1022
1023 if (sc->sc_maxses > 2048)
1024 sc->sc_maxses = 2048;
1025 }
1026
1027 /*
1028 * Determine ram type (sram or dram). Board should be just out of a reset
1029 * state when this is called.
1030 */
1031 static int
1032 hifn_ramtype(struct hifn_softc *sc)
1033 {
1034 u_int8_t data[8], dataexpect[8];
1035 int i;
1036
1037 for (i = 0; i < sizeof(data); i++)
1038 data[i] = dataexpect[i] = 0x55;
1039 if (hifn_writeramaddr(sc, 0, data))
1040 return (-1);
1041 if (hifn_readramaddr(sc, 0, data))
1042 return (-1);
1043 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1044 sc->sc_drammodel = 1;
1045 return (0);
1046 }
1047
1048 for (i = 0; i < sizeof(data); i++)
1049 data[i] = dataexpect[i] = 0xaa;
1050 if (hifn_writeramaddr(sc, 0, data))
1051 return (-1);
1052 if (hifn_readramaddr(sc, 0, data))
1053 return (-1);
1054 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1055 sc->sc_drammodel = 1;
1056 return (0);
1057 }
1058
1059 return (0);
1060 }
1061
1062 #define HIFN_SRAM_MAX (32 << 20)
1063 #define HIFN_SRAM_STEP_SIZE 16384
1064 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1065
1066 static int
1067 hifn_sramsize(struct hifn_softc *sc)
1068 {
1069 u_int32_t a;
1070 u_int8_t data[8];
1071 u_int8_t dataexpect[sizeof(data)];
1072 int32_t i;
1073
1074 for (i = 0; i < sizeof(data); i++)
1075 data[i] = dataexpect[i] = i ^ 0x5a;
1076
1077 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1078 a = i * HIFN_SRAM_STEP_SIZE;
1079 memcpy(data, &i, sizeof(i));
1080 hifn_writeramaddr(sc, a, data);
1081 }
1082
1083 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1084 a = i * HIFN_SRAM_STEP_SIZE;
1085 memcpy(dataexpect, &i, sizeof(i));
1086 if (hifn_readramaddr(sc, a, data) < 0)
1087 return (0);
1088 if (memcmp(data, dataexpect, sizeof(data)) != 0)
1089 return (0);
1090 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1091 }
1092
1093 return (0);
1094 }
1095
1096 /*
1097 * XXX For dram boards, one should really try all of the
1098 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1099 * is already set up correctly.
1100 */
1101 static int
1102 hifn_dramsize(struct hifn_softc *sc)
1103 {
1104 u_int32_t cnfg;
1105
1106 if (sc->sc_flags & HIFN_IS_7956) {
1107 /*
1108 * 7955/7956 have a fixed internal ram of only 32K.
1109 */
1110 sc->sc_ramsize = 32768;
1111 } else {
1112 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1113 HIFN_PUCNFG_DRAMMASK;
1114 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1115 }
1116 return (0);
1117 }
1118
1119 static void
1120 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1121 int *resp)
1122 {
1123 struct hifn_dma *dma = sc->sc_dma;
1124
1125 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1126 dma->cmdi = 0;
1127 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1128 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1129 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1130 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1131 }
1132 *cmdp = dma->cmdi++;
1133 dma->cmdk = dma->cmdi;
1134
1135 if (dma->srci == HIFN_D_SRC_RSIZE) {
1136 dma->srci = 0;
1137 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1138 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1139 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1140 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1141 }
1142 *srcp = dma->srci++;
1143 dma->srck = dma->srci;
1144
1145 if (dma->dsti == HIFN_D_DST_RSIZE) {
1146 dma->dsti = 0;
1147 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1148 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1149 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1150 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1151 }
1152 *dstp = dma->dsti++;
1153 dma->dstk = dma->dsti;
1154
1155 if (dma->resi == HIFN_D_RES_RSIZE) {
1156 dma->resi = 0;
1157 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1158 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1159 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1160 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1161 }
1162 *resp = dma->resi++;
1163 dma->resk = dma->resi;
1164 }
1165
1166 static int
1167 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1168 {
1169 struct hifn_dma *dma = sc->sc_dma;
1170 struct hifn_base_command wc;
1171 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1172 int r, cmdi, resi, srci, dsti;
1173
1174 wc.masks = htole16(3 << 13);
1175 wc.session_num = htole16(addr >> 14);
1176 wc.total_source_count = htole16(8);
1177 wc.total_dest_count = htole16(addr & 0x3fff);
1178
1179 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1180
1181 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1182 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1183 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1184
1185 /* build write command */
1186 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1187 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1188 memcpy(&dma->test_src, data, sizeof(dma->test_src));
1189
1190 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1191 + offsetof(struct hifn_dma, test_src));
1192 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1193 + offsetof(struct hifn_dma, test_dst));
1194
1195 dma->cmdr[cmdi].l = htole32(16 | masks);
1196 dma->srcr[srci].l = htole32(8 | masks);
1197 dma->dstr[dsti].l = htole32(4 | masks);
1198 dma->resr[resi].l = htole32(4 | masks);
1199
1200 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1201 0, sc->sc_dmamap->dm_mapsize,
1202 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1203
1204 for (r = 10000; r >= 0; r--) {
1205 DELAY(10);
1206 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1207 0, sc->sc_dmamap->dm_mapsize,
1208 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1209 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1210 break;
1211 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1212 0, sc->sc_dmamap->dm_mapsize,
1213 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1214 }
1215 if (r == 0) {
1216 printf("%s: writeramaddr -- "
1217 "result[%d](addr %d) still valid\n",
1218 device_xname(&sc->sc_dv), resi, addr);
1219 r = -1;
1220 return (-1);
1221 } else
1222 r = 0;
1223
1224 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1225 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1226 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1227
1228 return (r);
1229 }
1230
1231 static int
1232 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1233 {
1234 struct hifn_dma *dma = sc->sc_dma;
1235 struct hifn_base_command rc;
1236 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1237 int r, cmdi, srci, dsti, resi;
1238
1239 rc.masks = htole16(2 << 13);
1240 rc.session_num = htole16(addr >> 14);
1241 rc.total_source_count = htole16(addr & 0x3fff);
1242 rc.total_dest_count = htole16(8);
1243
1244 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1245
1246 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1247 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1248 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1249
1250 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1251 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1252
1253 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1254 offsetof(struct hifn_dma, test_src));
1255 dma->test_src = 0;
1256 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1257 offsetof(struct hifn_dma, test_dst));
1258 dma->test_dst = 0;
1259 dma->cmdr[cmdi].l = htole32(8 | masks);
1260 dma->srcr[srci].l = htole32(8 | masks);
1261 dma->dstr[dsti].l = htole32(8 | masks);
1262 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1263
1264 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1265 0, sc->sc_dmamap->dm_mapsize,
1266 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1267
1268 for (r = 10000; r >= 0; r--) {
1269 DELAY(10);
1270 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1271 0, sc->sc_dmamap->dm_mapsize,
1272 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1273 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1274 break;
1275 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1276 0, sc->sc_dmamap->dm_mapsize,
1277 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1278 }
1279 if (r == 0) {
1280 printf("%s: readramaddr -- "
1281 "result[%d](addr %d) still valid\n",
1282 device_xname(&sc->sc_dv), resi, addr);
1283 r = -1;
1284 } else {
1285 r = 0;
1286 memcpy(data, &dma->test_dst, sizeof(dma->test_dst));
1287 }
1288
1289 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1290 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1291 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1292
1293 return (r);
1294 }
1295
1296 /*
1297 * Initialize the descriptor rings.
1298 */
1299 static void
1300 hifn_init_dma(struct hifn_softc *sc)
1301 {
1302 struct hifn_dma *dma = sc->sc_dma;
1303 int i;
1304
1305 hifn_set_retry(sc);
1306
1307 /* initialize static pointer values */
1308 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1309 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1310 offsetof(struct hifn_dma, command_bufs[i][0]));
1311 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1312 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1313 offsetof(struct hifn_dma, result_bufs[i][0]));
1314
1315 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1316 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1317 offsetof(struct hifn_dma, cmdr[0]));
1318 dma->srcr[HIFN_D_SRC_RSIZE].p =
1319 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1320 offsetof(struct hifn_dma, srcr[0]));
1321 dma->dstr[HIFN_D_DST_RSIZE].p =
1322 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1323 offsetof(struct hifn_dma, dstr[0]));
1324 dma->resr[HIFN_D_RES_RSIZE].p =
1325 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1326 offsetof(struct hifn_dma, resr[0]));
1327
1328 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1329 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1330 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1331 }
1332
1333 /*
1334 * Writes out the raw command buffer space. Returns the
1335 * command buffer size.
1336 */
1337 static u_int
1338 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1339 {
1340 u_int8_t *buf_pos;
1341 struct hifn_base_command *base_cmd;
1342 struct hifn_mac_command *mac_cmd;
1343 struct hifn_crypt_command *cry_cmd;
1344 struct hifn_comp_command *comp_cmd;
1345 int using_mac, using_crypt, using_comp, len, ivlen;
1346 u_int32_t dlen, slen;
1347
1348 buf_pos = buf;
1349 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1350 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1351 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1352
1353 base_cmd = (struct hifn_base_command *)buf_pos;
1354 base_cmd->masks = htole16(cmd->base_masks);
1355 slen = cmd->src_map->dm_mapsize;
1356 if (cmd->sloplen)
1357 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1358 sizeof(u_int32_t);
1359 else
1360 dlen = cmd->dst_map->dm_mapsize;
1361 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1362 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1363 dlen >>= 16;
1364 slen >>= 16;
1365 base_cmd->session_num = htole16(cmd->session_num |
1366 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1367 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1368 buf_pos += sizeof(struct hifn_base_command);
1369
1370 if (using_comp) {
1371 comp_cmd = (struct hifn_comp_command *)buf_pos;
1372 dlen = cmd->compcrd->crd_len;
1373 comp_cmd->source_count = htole16(dlen & 0xffff);
1374 dlen >>= 16;
1375 comp_cmd->masks = htole16(cmd->comp_masks |
1376 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1377 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1378 comp_cmd->reserved = 0;
1379 buf_pos += sizeof(struct hifn_comp_command);
1380 }
1381
1382 if (using_mac) {
1383 mac_cmd = (struct hifn_mac_command *)buf_pos;
1384 dlen = cmd->maccrd->crd_len;
1385 mac_cmd->source_count = htole16(dlen & 0xffff);
1386 dlen >>= 16;
1387 mac_cmd->masks = htole16(cmd->mac_masks |
1388 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1389 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1390 mac_cmd->reserved = 0;
1391 buf_pos += sizeof(struct hifn_mac_command);
1392 }
1393
1394 if (using_crypt) {
1395 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1396 dlen = cmd->enccrd->crd_len;
1397 cry_cmd->source_count = htole16(dlen & 0xffff);
1398 dlen >>= 16;
1399 cry_cmd->masks = htole16(cmd->cry_masks |
1400 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1401 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1402 cry_cmd->reserved = 0;
1403 buf_pos += sizeof(struct hifn_crypt_command);
1404 }
1405
1406 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1407 memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH);
1408 buf_pos += HIFN_MAC_KEY_LENGTH;
1409 }
1410
1411 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1412 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1413 case HIFN_CRYPT_CMD_ALG_3DES:
1414 memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH);
1415 buf_pos += HIFN_3DES_KEY_LENGTH;
1416 break;
1417 case HIFN_CRYPT_CMD_ALG_DES:
1418 memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH);
1419 buf_pos += HIFN_DES_KEY_LENGTH;
1420 break;
1421 case HIFN_CRYPT_CMD_ALG_RC4:
1422 len = 256;
1423 do {
1424 int clen;
1425
1426 clen = MIN(cmd->cklen, len);
1427 memcpy(buf_pos, cmd->ck, clen);
1428 len -= clen;
1429 buf_pos += clen;
1430 } while (len > 0);
1431 memset(buf_pos, 0, 4);
1432 buf_pos += 4;
1433 break;
1434 case HIFN_CRYPT_CMD_ALG_AES:
1435 /*
1436 * AES keys are variable 128, 192 and
1437 * 256 bits (16, 24 and 32 bytes).
1438 */
1439 memcpy(buf_pos, cmd->ck, cmd->cklen);
1440 buf_pos += cmd->cklen;
1441 break;
1442 }
1443 }
1444
1445 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1446 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1447 case HIFN_CRYPT_CMD_ALG_AES:
1448 ivlen = HIFN_AES_IV_LENGTH;
1449 break;
1450 default:
1451 ivlen = HIFN_IV_LENGTH;
1452 break;
1453 }
1454 memcpy(buf_pos, cmd->iv, ivlen);
1455 buf_pos += ivlen;
1456 }
1457
1458 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1459 HIFN_BASE_CMD_COMP)) == 0) {
1460 memset(buf_pos, 0, 8);
1461 buf_pos += 8;
1462 }
1463
1464 return (buf_pos - buf);
1465 }
1466
1467 static int
1468 hifn_dmamap_aligned(bus_dmamap_t map)
1469 {
1470 int i;
1471
1472 for (i = 0; i < map->dm_nsegs; i++) {
1473 if (map->dm_segs[i].ds_addr & 3)
1474 return (0);
1475 if ((i != (map->dm_nsegs - 1)) &&
1476 (map->dm_segs[i].ds_len & 3))
1477 return (0);
1478 }
1479 return (1);
1480 }
1481
1482 static int
1483 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1484 {
1485 struct hifn_dma *dma = sc->sc_dma;
1486 bus_dmamap_t map = cmd->dst_map;
1487 u_int32_t p, l;
1488 int idx, used = 0, i;
1489
1490 idx = dma->dsti;
1491 for (i = 0; i < map->dm_nsegs - 1; i++) {
1492 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1493 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1494 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1495 HIFN_DSTR_SYNC(sc, idx,
1496 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1497 used++;
1498
1499 if (++idx == HIFN_D_DST_RSIZE) {
1500 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1501 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1502 HIFN_DSTR_SYNC(sc, idx,
1503 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1504 idx = 0;
1505 }
1506 }
1507
1508 if (cmd->sloplen == 0) {
1509 p = map->dm_segs[i].ds_addr;
1510 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1511 map->dm_segs[i].ds_len;
1512 } else {
1513 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1514 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1515 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1516 sizeof(u_int32_t);
1517
1518 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1519 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1520 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1521 HIFN_D_MASKDONEIRQ |
1522 (map->dm_segs[i].ds_len - cmd->sloplen));
1523 HIFN_DSTR_SYNC(sc, idx,
1524 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1525 used++;
1526
1527 if (++idx == HIFN_D_DST_RSIZE) {
1528 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1529 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1530 HIFN_DSTR_SYNC(sc, idx,
1531 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1532 idx = 0;
1533 }
1534 }
1535 }
1536 dma->dstr[idx].p = htole32(p);
1537 dma->dstr[idx].l = htole32(l);
1538 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1539 used++;
1540
1541 if (++idx == HIFN_D_DST_RSIZE) {
1542 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1543 HIFN_D_MASKDONEIRQ);
1544 HIFN_DSTR_SYNC(sc, idx,
1545 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1546 idx = 0;
1547 }
1548
1549 dma->dsti = idx;
1550 dma->dstu += used;
1551 return (idx);
1552 }
1553
1554 static int
1555 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1556 {
1557 struct hifn_dma *dma = sc->sc_dma;
1558 bus_dmamap_t map = cmd->src_map;
1559 int idx, i;
1560 u_int32_t last = 0;
1561
1562 idx = dma->srci;
1563 for (i = 0; i < map->dm_nsegs; i++) {
1564 if (i == map->dm_nsegs - 1)
1565 last = HIFN_D_LAST;
1566
1567 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1568 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1569 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1570 HIFN_SRCR_SYNC(sc, idx,
1571 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1572
1573 if (++idx == HIFN_D_SRC_RSIZE) {
1574 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1575 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1576 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1577 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1578 idx = 0;
1579 }
1580 }
1581 dma->srci = idx;
1582 dma->srcu += map->dm_nsegs;
1583 return (idx);
1584 }
1585
1586 static int
1587 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1588 struct cryptop *crp, int hint)
1589 {
1590 struct hifn_dma *dma = sc->sc_dma;
1591 u_int32_t cmdlen;
1592 int cmdi, resi, s, err = 0;
1593
1594 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1595 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1596 return (ENOMEM);
1597
1598 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1599 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1600 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1601 err = ENOMEM;
1602 goto err_srcmap1;
1603 }
1604 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1605 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1606 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1607 err = ENOMEM;
1608 goto err_srcmap1;
1609 }
1610 } else {
1611 err = EINVAL;
1612 goto err_srcmap1;
1613 }
1614
1615 if (hifn_dmamap_aligned(cmd->src_map)) {
1616 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1617 if (crp->crp_flags & CRYPTO_F_IOV)
1618 cmd->dstu.dst_io = cmd->srcu.src_io;
1619 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1620 cmd->dstu.dst_m = cmd->srcu.src_m;
1621 cmd->dst_map = cmd->src_map;
1622 } else {
1623 if (crp->crp_flags & CRYPTO_F_IOV) {
1624 err = EINVAL;
1625 goto err_srcmap;
1626 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1627 int totlen, len;
1628 struct mbuf *m, *m0, *mlast;
1629
1630 totlen = cmd->src_map->dm_mapsize;
1631 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1632 len = MHLEN;
1633 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1634 } else {
1635 len = MLEN;
1636 MGET(m0, M_DONTWAIT, MT_DATA);
1637 }
1638 if (m0 == NULL) {
1639 err = ENOMEM;
1640 goto err_srcmap;
1641 }
1642 if (len == MHLEN)
1643 M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1644 if (totlen >= MINCLSIZE) {
1645 MCLGET(m0, M_DONTWAIT);
1646 if (m0->m_flags & M_EXT)
1647 len = MCLBYTES;
1648 }
1649 totlen -= len;
1650 m0->m_pkthdr.len = m0->m_len = len;
1651 mlast = m0;
1652
1653 while (totlen > 0) {
1654 MGET(m, M_DONTWAIT, MT_DATA);
1655 if (m == NULL) {
1656 err = ENOMEM;
1657 m_freem(m0);
1658 goto err_srcmap;
1659 }
1660 len = MLEN;
1661 if (totlen >= MINCLSIZE) {
1662 MCLGET(m, M_DONTWAIT);
1663 if (m->m_flags & M_EXT)
1664 len = MCLBYTES;
1665 }
1666
1667 m->m_len = len;
1668 if (m0->m_flags & M_PKTHDR)
1669 m0->m_pkthdr.len += len;
1670 totlen -= len;
1671
1672 mlast->m_next = m;
1673 mlast = m;
1674 }
1675 cmd->dstu.dst_m = m0;
1676 }
1677 }
1678
1679 if (cmd->dst_map == NULL) {
1680 if (bus_dmamap_create(sc->sc_dmat,
1681 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1682 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1683 err = ENOMEM;
1684 goto err_srcmap;
1685 }
1686 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1687 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1688 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1689 err = ENOMEM;
1690 goto err_dstmap1;
1691 }
1692 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1693 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1694 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1695 err = ENOMEM;
1696 goto err_dstmap1;
1697 }
1698 }
1699 }
1700
1701 #ifdef HIFN_DEBUG
1702 if (hifn_debug)
1703 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1704 device_xname(&sc->sc_dv),
1705 READ_REG_1(sc, HIFN_1_DMA_CSR),
1706 READ_REG_1(sc, HIFN_1_DMA_IER),
1707 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1708 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1709 #endif
1710
1711 if (cmd->src_map == cmd->dst_map)
1712 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1713 0, cmd->src_map->dm_mapsize,
1714 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1715 else {
1716 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1717 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1718 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1719 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1720 }
1721
1722 s = splnet();
1723
1724 /*
1725 * need 1 cmd, and 1 res
1726 * need N src, and N dst
1727 */
1728 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1729 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1730 splx(s);
1731 err = ENOMEM;
1732 goto err_dstmap;
1733 }
1734 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1735 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1736 splx(s);
1737 err = ENOMEM;
1738 goto err_dstmap;
1739 }
1740
1741 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1742 dma->cmdi = 0;
1743 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1744 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1745 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1746 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1747 }
1748 cmdi = dma->cmdi++;
1749 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1750 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1751
1752 /* .p for command/result already set */
1753 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1754 HIFN_D_MASKDONEIRQ);
1755 HIFN_CMDR_SYNC(sc, cmdi,
1756 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1757 dma->cmdu++;
1758 if (sc->sc_c_busy == 0) {
1759 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1760 sc->sc_c_busy = 1;
1761 SET_LED(sc, HIFN_MIPSRST_LED0);
1762 }
1763
1764 /*
1765 * We don't worry about missing an interrupt (which a "command wait"
1766 * interrupt salvages us from), unless there is more than one command
1767 * in the queue.
1768 *
1769 * XXX We do seem to miss some interrupts. So we always enable
1770 * XXX command wait. From OpenBSD revision 1.149.
1771 *
1772 */
1773 #if 0
1774 if (dma->cmdu > 1) {
1775 #endif
1776 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1777 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1778 #if 0
1779 }
1780 #endif
1781
1782 hifnstats.hst_ipackets++;
1783 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1784
1785 hifn_dmamap_load_src(sc, cmd);
1786 if (sc->sc_s_busy == 0) {
1787 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1788 sc->sc_s_busy = 1;
1789 SET_LED(sc, HIFN_MIPSRST_LED1);
1790 }
1791
1792 /*
1793 * Unlike other descriptors, we don't mask done interrupt from
1794 * result descriptor.
1795 */
1796 #ifdef HIFN_DEBUG
1797 if (hifn_debug)
1798 printf("load res\n");
1799 #endif
1800 if (dma->resi == HIFN_D_RES_RSIZE) {
1801 dma->resi = 0;
1802 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1803 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1804 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1805 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1806 }
1807 resi = dma->resi++;
1808 dma->hifn_commands[resi] = cmd;
1809 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1810 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1811 HIFN_D_VALID | HIFN_D_LAST);
1812 HIFN_RESR_SYNC(sc, resi,
1813 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1814 dma->resu++;
1815 if (sc->sc_r_busy == 0) {
1816 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1817 sc->sc_r_busy = 1;
1818 SET_LED(sc, HIFN_MIPSRST_LED2);
1819 }
1820
1821 if (cmd->sloplen)
1822 cmd->slopidx = resi;
1823
1824 hifn_dmamap_load_dst(sc, cmd);
1825
1826 if (sc->sc_d_busy == 0) {
1827 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1828 sc->sc_d_busy = 1;
1829 }
1830
1831 #ifdef HIFN_DEBUG
1832 if (hifn_debug)
1833 printf("%s: command: stat %8x ier %8x\n",
1834 device_xname(&sc->sc_dv),
1835 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1836 #endif
1837
1838 sc->sc_active = 5;
1839 splx(s);
1840 return (err); /* success */
1841
1842 err_dstmap:
1843 if (cmd->src_map != cmd->dst_map)
1844 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1845 err_dstmap1:
1846 if (cmd->src_map != cmd->dst_map)
1847 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1848 err_srcmap:
1849 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1850 cmd->srcu.src_m != cmd->dstu.dst_m)
1851 m_freem(cmd->dstu.dst_m);
1852 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1853 err_srcmap1:
1854 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1855 return (err);
1856 }
1857
1858 static void
1859 hifn_tick(void *vsc)
1860 {
1861 struct hifn_softc *sc = vsc;
1862 int s;
1863
1864 s = splnet();
1865 if (sc->sc_active == 0) {
1866 struct hifn_dma *dma = sc->sc_dma;
1867 u_int32_t r = 0;
1868
1869 if (dma->cmdu == 0 && sc->sc_c_busy) {
1870 sc->sc_c_busy = 0;
1871 r |= HIFN_DMACSR_C_CTRL_DIS;
1872 CLR_LED(sc, HIFN_MIPSRST_LED0);
1873 }
1874 if (dma->srcu == 0 && sc->sc_s_busy) {
1875 sc->sc_s_busy = 0;
1876 r |= HIFN_DMACSR_S_CTRL_DIS;
1877 CLR_LED(sc, HIFN_MIPSRST_LED1);
1878 }
1879 if (dma->dstu == 0 && sc->sc_d_busy) {
1880 sc->sc_d_busy = 0;
1881 r |= HIFN_DMACSR_D_CTRL_DIS;
1882 }
1883 if (dma->resu == 0 && sc->sc_r_busy) {
1884 sc->sc_r_busy = 0;
1885 r |= HIFN_DMACSR_R_CTRL_DIS;
1886 CLR_LED(sc, HIFN_MIPSRST_LED2);
1887 }
1888 if (r)
1889 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1890 }
1891 else
1892 sc->sc_active--;
1893 splx(s);
1894 #ifdef __OpenBSD__
1895 timeout_add(&sc->sc_tickto, hz);
1896 #else
1897 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1898 #endif
1899 }
1900
1901 static int
1902 hifn_intr(void *arg)
1903 {
1904 struct hifn_softc *sc = arg;
1905 struct hifn_dma *dma = sc->sc_dma;
1906 u_int32_t dmacsr, restart;
1907 int i, u;
1908
1909 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1910
1911 #ifdef HIFN_DEBUG
1912 if (hifn_debug)
1913 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1914 device_xname(&sc->sc_dv),
1915 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1916 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1917 #endif
1918
1919 /* Nothing in the DMA unit interrupted */
1920 if ((dmacsr & sc->sc_dmaier) == 0)
1921 return (0);
1922
1923 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1924
1925 if (dmacsr & HIFN_DMACSR_ENGINE)
1926 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
1927
1928 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1929 (dmacsr & HIFN_DMACSR_PUBDONE))
1930 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1931 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1932
1933 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1934 if (restart)
1935 printf("%s: overrun %x\n", device_xname(&sc->sc_dv), dmacsr);
1936
1937 if (sc->sc_flags & HIFN_IS_7811) {
1938 if (dmacsr & HIFN_DMACSR_ILLR)
1939 printf("%s: illegal read\n", device_xname(&sc->sc_dv));
1940 if (dmacsr & HIFN_DMACSR_ILLW)
1941 printf("%s: illegal write\n", device_xname(&sc->sc_dv));
1942 }
1943
1944 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1945 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1946 if (restart) {
1947 printf("%s: abort, resetting.\n", device_xname(&sc->sc_dv));
1948 hifnstats.hst_abort++;
1949 hifn_abort(sc);
1950 return (1);
1951 }
1952
1953 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
1954 /*
1955 * If no slots to process and we receive a "waiting on
1956 * command" interrupt, we disable the "waiting on command"
1957 * (by clearing it).
1958 */
1959 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1960 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1961 }
1962
1963 /* clear the rings */
1964 i = dma->resk;
1965 while (dma->resu != 0) {
1966 HIFN_RESR_SYNC(sc, i,
1967 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1968 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
1969 HIFN_RESR_SYNC(sc, i,
1970 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1971 break;
1972 }
1973
1974 if (i != HIFN_D_RES_RSIZE) {
1975 struct hifn_command *cmd;
1976
1977 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
1978 cmd = dma->hifn_commands[i];
1979 KASSERT(cmd != NULL
1980 /*("hifn_intr: null command slot %u", i)*/);
1981 dma->hifn_commands[i] = NULL;
1982
1983 hifn_callback(sc, cmd, dma->result_bufs[i]);
1984 hifnstats.hst_opackets++;
1985 }
1986
1987 if (++i == (HIFN_D_RES_RSIZE + 1))
1988 i = 0;
1989 else
1990 dma->resu--;
1991 }
1992 dma->resk = i;
1993
1994 i = dma->srck; u = dma->srcu;
1995 while (u != 0) {
1996 HIFN_SRCR_SYNC(sc, i,
1997 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1998 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
1999 HIFN_SRCR_SYNC(sc, i,
2000 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2001 break;
2002 }
2003 if (++i == (HIFN_D_SRC_RSIZE + 1))
2004 i = 0;
2005 else
2006 u--;
2007 }
2008 dma->srck = i; dma->srcu = u;
2009
2010 i = dma->cmdk; u = dma->cmdu;
2011 while (u != 0) {
2012 HIFN_CMDR_SYNC(sc, i,
2013 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2014 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2015 HIFN_CMDR_SYNC(sc, i,
2016 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2017 break;
2018 }
2019 if (i != HIFN_D_CMD_RSIZE) {
2020 u--;
2021 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2022 }
2023 if (++i == (HIFN_D_CMD_RSIZE + 1))
2024 i = 0;
2025 }
2026 dma->cmdk = i; dma->cmdu = u;
2027
2028 return (1);
2029 }
2030
2031 /*
2032 * Allocate a new 'session' and return an encoded session id. 'sidp'
2033 * contains our registration id, and should contain an encoded session
2034 * id on successful allocation.
2035 */
2036 static int
2037 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2038 {
2039 struct cryptoini *c;
2040 struct hifn_softc *sc = arg;
2041 int i, mac = 0, cry = 0, comp = 0;
2042
2043 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2044 if (sidp == NULL || cri == NULL || sc == NULL)
2045 return (EINVAL);
2046
2047 for (i = 0; i < sc->sc_maxses; i++)
2048 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2049 break;
2050 if (i == sc->sc_maxses)
2051 return (ENOMEM);
2052
2053 for (c = cri; c != NULL; c = c->cri_next) {
2054 switch (c->cri_alg) {
2055 case CRYPTO_MD5:
2056 case CRYPTO_SHA1:
2057 case CRYPTO_MD5_HMAC_96:
2058 case CRYPTO_SHA1_HMAC_96:
2059 if (mac)
2060 return (EINVAL);
2061 mac = 1;
2062 break;
2063 case CRYPTO_DES_CBC:
2064 case CRYPTO_3DES_CBC:
2065 case CRYPTO_AES_CBC:
2066 /* Note that this is an initialization
2067 vector, not a cipher key; any function
2068 giving sufficient Hamming distance
2069 between outputs is fine. Use of RC4
2070 to generate IVs has been FIPS140-2
2071 certified by several labs. */
2072 #ifdef __NetBSD__
2073 cprng_fast(sc->sc_sessions[i].hs_iv,
2074 c->cri_alg == CRYPTO_AES_CBC ?
2075 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2076 #else /* FreeBSD and OpenBSD have get_random_bytes */
2077 /* XXX this may read fewer, does it matter? */
2078 get_random_bytes(sc->sc_sessions[i].hs_iv,
2079 c->cri_alg == CRYPTO_AES_CBC ?
2080 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2081 #endif
2082 /*FALLTHROUGH*/
2083 case CRYPTO_ARC4:
2084 if (cry)
2085 return (EINVAL);
2086 cry = 1;
2087 break;
2088 #ifdef HAVE_CRYPTO_LZS
2089 case CRYPTO_LZS_COMP:
2090 if (comp)
2091 return (EINVAL);
2092 comp = 1;
2093 break;
2094 #endif
2095 default:
2096 return (EINVAL);
2097 }
2098 }
2099 if (mac == 0 && cry == 0 && comp == 0)
2100 return (EINVAL);
2101
2102 /*
2103 * XXX only want to support compression without chaining to
2104 * MAC/crypt engine right now
2105 */
2106 if ((comp && mac) || (comp && cry))
2107 return (EINVAL);
2108
2109 *sidp = HIFN_SID(device_unit(&sc->sc_dv), i);
2110 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2111
2112 return (0);
2113 }
2114
2115 /*
2116 * Deallocate a session.
2117 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2118 * XXX to blow away any keys already stored there.
2119 */
2120 static int
2121 hifn_freesession(void *arg, u_int64_t tid)
2122 {
2123 struct hifn_softc *sc = arg;
2124 int session;
2125 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2126
2127 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2128 if (sc == NULL)
2129 return (EINVAL);
2130
2131 session = HIFN_SESSION(sid);
2132 if (session >= sc->sc_maxses)
2133 return (EINVAL);
2134
2135 memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session]));
2136 return (0);
2137 }
2138
2139 static int
2140 hifn_process(void *arg, struct cryptop *crp, int hint)
2141 {
2142 struct hifn_softc *sc = arg;
2143 struct hifn_command *cmd = NULL;
2144 int session, err, ivlen;
2145 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2146
2147 if (crp == NULL || crp->crp_callback == NULL) {
2148 hifnstats.hst_invalid++;
2149 return (EINVAL);
2150 }
2151 session = HIFN_SESSION(crp->crp_sid);
2152
2153 if (sc == NULL || session >= sc->sc_maxses) {
2154 err = EINVAL;
2155 goto errout;
2156 }
2157
2158 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2159 M_DEVBUF, M_NOWAIT|M_ZERO);
2160 if (cmd == NULL) {
2161 hifnstats.hst_nomem++;
2162 err = ENOMEM;
2163 goto errout;
2164 }
2165
2166 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2167 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2168 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2169 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2170 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2171 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2172 } else {
2173 err = EINVAL;
2174 goto errout; /* XXX we don't handle contiguous buffers! */
2175 }
2176
2177 crd1 = crp->crp_desc;
2178 if (crd1 == NULL) {
2179 err = EINVAL;
2180 goto errout;
2181 }
2182 crd2 = crd1->crd_next;
2183
2184 if (crd2 == NULL) {
2185 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2186 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2187 crd1->crd_alg == CRYPTO_SHA1 ||
2188 crd1->crd_alg == CRYPTO_MD5) {
2189 maccrd = crd1;
2190 enccrd = NULL;
2191 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2192 crd1->crd_alg == CRYPTO_3DES_CBC ||
2193 crd1->crd_alg == CRYPTO_AES_CBC ||
2194 crd1->crd_alg == CRYPTO_ARC4) {
2195 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2196 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2197 maccrd = NULL;
2198 enccrd = crd1;
2199 #ifdef HAVE_CRYPTO_LZS
2200 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2201 return (hifn_compression(sc, crp, cmd));
2202 #endif
2203 } else {
2204 err = EINVAL;
2205 goto errout;
2206 }
2207 } else {
2208 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2209 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2210 crd1->crd_alg == CRYPTO_MD5 ||
2211 crd1->crd_alg == CRYPTO_SHA1) &&
2212 (crd2->crd_alg == CRYPTO_DES_CBC ||
2213 crd2->crd_alg == CRYPTO_3DES_CBC ||
2214 crd2->crd_alg == CRYPTO_AES_CBC ||
2215 crd2->crd_alg == CRYPTO_ARC4) &&
2216 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2217 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2218 maccrd = crd1;
2219 enccrd = crd2;
2220 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2221 crd1->crd_alg == CRYPTO_ARC4 ||
2222 crd1->crd_alg == CRYPTO_3DES_CBC ||
2223 crd1->crd_alg == CRYPTO_AES_CBC) &&
2224 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 ||
2225 crd2->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2226 crd2->crd_alg == CRYPTO_MD5 ||
2227 crd2->crd_alg == CRYPTO_SHA1) &&
2228 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2229 enccrd = crd1;
2230 maccrd = crd2;
2231 } else {
2232 /*
2233 * We cannot order the 7751 as requested
2234 */
2235 err = EINVAL;
2236 goto errout;
2237 }
2238 }
2239
2240 if (enccrd) {
2241 cmd->enccrd = enccrd;
2242 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2243 switch (enccrd->crd_alg) {
2244 case CRYPTO_ARC4:
2245 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2246 if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2247 != sc->sc_sessions[session].hs_prev_op)
2248 sc->sc_sessions[session].hs_state =
2249 HS_STATE_USED;
2250 break;
2251 case CRYPTO_DES_CBC:
2252 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2253 HIFN_CRYPT_CMD_MODE_CBC |
2254 HIFN_CRYPT_CMD_NEW_IV;
2255 break;
2256 case CRYPTO_3DES_CBC:
2257 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2258 HIFN_CRYPT_CMD_MODE_CBC |
2259 HIFN_CRYPT_CMD_NEW_IV;
2260 break;
2261 case CRYPTO_AES_CBC:
2262 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2263 HIFN_CRYPT_CMD_MODE_CBC |
2264 HIFN_CRYPT_CMD_NEW_IV;
2265 break;
2266 default:
2267 err = EINVAL;
2268 goto errout;
2269 }
2270 if (enccrd->crd_alg != CRYPTO_ARC4) {
2271 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2272 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2273 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2274 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2275 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2276 else
2277 bcopy(sc->sc_sessions[session].hs_iv,
2278 cmd->iv, ivlen);
2279
2280 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2281 == 0) {
2282 if (crp->crp_flags & CRYPTO_F_IMBUF)
2283 m_copyback(cmd->srcu.src_m,
2284 enccrd->crd_inject,
2285 ivlen, cmd->iv);
2286 else if (crp->crp_flags & CRYPTO_F_IOV)
2287 cuio_copyback(cmd->srcu.src_io,
2288 enccrd->crd_inject,
2289 ivlen, cmd->iv);
2290 }
2291 } else {
2292 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2293 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2294 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2295 m_copydata(cmd->srcu.src_m,
2296 enccrd->crd_inject, ivlen, cmd->iv);
2297 else if (crp->crp_flags & CRYPTO_F_IOV)
2298 cuio_copydata(cmd->srcu.src_io,
2299 enccrd->crd_inject, ivlen, cmd->iv);
2300 }
2301 }
2302
2303 cmd->ck = enccrd->crd_key;
2304 cmd->cklen = enccrd->crd_klen >> 3;
2305
2306 /*
2307 * Need to specify the size for the AES key in the masks.
2308 */
2309 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2310 HIFN_CRYPT_CMD_ALG_AES) {
2311 switch (cmd->cklen) {
2312 case 16:
2313 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2314 break;
2315 case 24:
2316 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2317 break;
2318 case 32:
2319 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2320 break;
2321 default:
2322 err = EINVAL;
2323 goto errout;
2324 }
2325 }
2326
2327 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2328 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2329 }
2330
2331 if (maccrd) {
2332 cmd->maccrd = maccrd;
2333 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2334
2335 switch (maccrd->crd_alg) {
2336 case CRYPTO_MD5:
2337 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2338 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2339 HIFN_MAC_CMD_POS_IPSEC;
2340 break;
2341 case CRYPTO_MD5_HMAC_96:
2342 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2343 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2344 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2345 break;
2346 case CRYPTO_SHA1:
2347 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2348 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2349 HIFN_MAC_CMD_POS_IPSEC;
2350 break;
2351 case CRYPTO_SHA1_HMAC_96:
2352 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2353 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2354 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2355 break;
2356 }
2357
2358 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2359 maccrd->crd_alg == CRYPTO_MD5_HMAC_96) &&
2360 sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2361 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2362 memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3);
2363 memset(cmd->mac + (maccrd->crd_klen >> 3), 0,
2364 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2365 }
2366 }
2367
2368 cmd->crp = crp;
2369 cmd->session_num = session;
2370 cmd->softc = sc;
2371
2372 err = hifn_crypto(sc, cmd, crp, hint);
2373 if (err == 0) {
2374 if (enccrd)
2375 sc->sc_sessions[session].hs_prev_op =
2376 enccrd->crd_flags & CRD_F_ENCRYPT;
2377 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2378 sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2379 return 0;
2380 } else if (err == ERESTART) {
2381 /*
2382 * There weren't enough resources to dispatch the request
2383 * to the part. Notify the caller so they'll requeue this
2384 * request and resubmit it again soon.
2385 */
2386 #ifdef HIFN_DEBUG
2387 if (hifn_debug)
2388 printf("%s: requeue request\n", device_xname(&sc->sc_dv));
2389 #endif
2390 free(cmd, M_DEVBUF);
2391 sc->sc_needwakeup |= CRYPTO_SYMQ;
2392 return (err);
2393 }
2394
2395 errout:
2396 if (cmd != NULL)
2397 free(cmd, M_DEVBUF);
2398 if (err == EINVAL)
2399 hifnstats.hst_invalid++;
2400 else
2401 hifnstats.hst_nomem++;
2402 crp->crp_etype = err;
2403 crypto_done(crp);
2404 return (0);
2405 }
2406
2407 static void
2408 hifn_abort(struct hifn_softc *sc)
2409 {
2410 struct hifn_dma *dma = sc->sc_dma;
2411 struct hifn_command *cmd;
2412 struct cryptop *crp;
2413 int i, u;
2414
2415 i = dma->resk; u = dma->resu;
2416 while (u != 0) {
2417 cmd = dma->hifn_commands[i];
2418 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2419 dma->hifn_commands[i] = NULL;
2420 crp = cmd->crp;
2421
2422 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2423 /* Salvage what we can. */
2424 hifnstats.hst_opackets++;
2425 hifn_callback(sc, cmd, dma->result_bufs[i]);
2426 } else {
2427 if (cmd->src_map == cmd->dst_map) {
2428 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2429 0, cmd->src_map->dm_mapsize,
2430 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2431 } else {
2432 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2433 0, cmd->src_map->dm_mapsize,
2434 BUS_DMASYNC_POSTWRITE);
2435 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2436 0, cmd->dst_map->dm_mapsize,
2437 BUS_DMASYNC_POSTREAD);
2438 }
2439
2440 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2441 m_freem(cmd->srcu.src_m);
2442 crp->crp_buf = (void *)cmd->dstu.dst_m;
2443 }
2444
2445 /* non-shared buffers cannot be restarted */
2446 if (cmd->src_map != cmd->dst_map) {
2447 /*
2448 * XXX should be EAGAIN, delayed until
2449 * after the reset.
2450 */
2451 crp->crp_etype = ENOMEM;
2452 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2453 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2454 } else
2455 crp->crp_etype = ENOMEM;
2456
2457 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2458 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2459
2460 free(cmd, M_DEVBUF);
2461 if (crp->crp_etype != EAGAIN)
2462 crypto_done(crp);
2463 }
2464
2465 if (++i == HIFN_D_RES_RSIZE)
2466 i = 0;
2467 u--;
2468 }
2469 dma->resk = i; dma->resu = u;
2470
2471 /* Force upload of key next time */
2472 for (i = 0; i < sc->sc_maxses; i++)
2473 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2474 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2475
2476 hifn_reset_board(sc, 1);
2477 hifn_init_dma(sc);
2478 hifn_init_pci_registers(sc);
2479 }
2480
2481 static void
2482 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf)
2483 {
2484 struct hifn_dma *dma = sc->sc_dma;
2485 struct cryptop *crp = cmd->crp;
2486 struct cryptodesc *crd;
2487 struct mbuf *m;
2488 int totlen, i, u, ivlen;
2489
2490 if (cmd->src_map == cmd->dst_map)
2491 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2492 0, cmd->src_map->dm_mapsize,
2493 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2494 else {
2495 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2496 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2497 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2498 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2499 }
2500
2501 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2502 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2503 crp->crp_buf = (void *)cmd->dstu.dst_m;
2504 totlen = cmd->src_map->dm_mapsize;
2505 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2506 if (totlen < m->m_len) {
2507 m->m_len = totlen;
2508 totlen = 0;
2509 } else
2510 totlen -= m->m_len;
2511 }
2512 cmd->dstu.dst_m->m_pkthdr.len =
2513 cmd->srcu.src_m->m_pkthdr.len;
2514 m_freem(cmd->srcu.src_m);
2515 }
2516 }
2517
2518 if (cmd->sloplen != 0) {
2519 if (crp->crp_flags & CRYPTO_F_IMBUF)
2520 m_copyback((struct mbuf *)crp->crp_buf,
2521 cmd->src_map->dm_mapsize - cmd->sloplen,
2522 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2523 else if (crp->crp_flags & CRYPTO_F_IOV)
2524 cuio_copyback((struct uio *)crp->crp_buf,
2525 cmd->src_map->dm_mapsize - cmd->sloplen,
2526 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2527 }
2528
2529 i = dma->dstk; u = dma->dstu;
2530 while (u != 0) {
2531 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2532 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2533 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2534 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2535 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2536 offsetof(struct hifn_dma, dstr[i]),
2537 sizeof(struct hifn_desc),
2538 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2539 break;
2540 }
2541 if (++i == (HIFN_D_DST_RSIZE + 1))
2542 i = 0;
2543 else
2544 u--;
2545 }
2546 dma->dstk = i; dma->dstu = u;
2547
2548 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2549
2550 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2551 HIFN_BASE_CMD_CRYPT) {
2552 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2553 if (crd->crd_alg != CRYPTO_DES_CBC &&
2554 crd->crd_alg != CRYPTO_3DES_CBC &&
2555 crd->crd_alg != CRYPTO_AES_CBC)
2556 continue;
2557 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2558 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2559 if (crp->crp_flags & CRYPTO_F_IMBUF)
2560 m_copydata((struct mbuf *)crp->crp_buf,
2561 crd->crd_skip + crd->crd_len - ivlen,
2562 ivlen,
2563 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2564 else if (crp->crp_flags & CRYPTO_F_IOV) {
2565 cuio_copydata((struct uio *)crp->crp_buf,
2566 crd->crd_skip + crd->crd_len - ivlen,
2567 ivlen,
2568 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2569 }
2570 /* XXX We do not handle contig data */
2571 break;
2572 }
2573 }
2574
2575 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2576 u_int8_t *macbuf;
2577
2578 macbuf = resbuf + sizeof(struct hifn_base_result);
2579 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2580 macbuf += sizeof(struct hifn_comp_result);
2581 macbuf += sizeof(struct hifn_mac_result);
2582
2583 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2584 int len;
2585
2586 if (crd->crd_alg == CRYPTO_MD5)
2587 len = 16;
2588 else if (crd->crd_alg == CRYPTO_SHA1)
2589 len = 20;
2590 else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 ||
2591 crd->crd_alg == CRYPTO_SHA1_HMAC_96)
2592 len = 12;
2593 else
2594 continue;
2595
2596 if (crp->crp_flags & CRYPTO_F_IMBUF)
2597 m_copyback((struct mbuf *)crp->crp_buf,
2598 crd->crd_inject, len, macbuf);
2599 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2600 memcpy(crp->crp_mac, (void *)macbuf, len);
2601 break;
2602 }
2603 }
2604
2605 if (cmd->src_map != cmd->dst_map) {
2606 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2607 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2608 }
2609 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2610 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2611 free(cmd, M_DEVBUF);
2612 crypto_done(crp);
2613 }
2614
2615 #ifdef HAVE_CRYPTO_LZS
2616
2617 static int
2618 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2619 struct hifn_command *cmd)
2620 {
2621 struct cryptodesc *crd = crp->crp_desc;
2622 int s, err = 0;
2623
2624 cmd->compcrd = crd;
2625 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2626
2627 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2628 /*
2629 * XXX can only handle mbufs right now since we can
2630 * XXX dynamically resize them.
2631 */
2632 err = EINVAL;
2633 return (ENOMEM);
2634 }
2635
2636 if ((crd->crd_flags & CRD_F_COMP) == 0)
2637 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2638 if (crd->crd_alg == CRYPTO_LZS_COMP)
2639 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2640 HIFN_COMP_CMD_CLEARHIST;
2641
2642 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2643 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2644 err = ENOMEM;
2645 goto fail;
2646 }
2647
2648 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2649 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2650 err = ENOMEM;
2651 goto fail;
2652 }
2653
2654 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2655 int len;
2656
2657 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2658 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2659 err = ENOMEM;
2660 goto fail;
2661 }
2662
2663 len = cmd->src_map->dm_mapsize / MCLBYTES;
2664 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2665 len++;
2666 len *= MCLBYTES;
2667
2668 if ((crd->crd_flags & CRD_F_COMP) == 0)
2669 len *= 4;
2670
2671 if (len > HIFN_MAX_DMALEN)
2672 len = HIFN_MAX_DMALEN;
2673
2674 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2675 if (cmd->dstu.dst_m == NULL) {
2676 err = ENOMEM;
2677 goto fail;
2678 }
2679
2680 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2681 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2682 err = ENOMEM;
2683 goto fail;
2684 }
2685 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2686 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2687 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2688 err = ENOMEM;
2689 goto fail;
2690 }
2691 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2692 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2693 err = ENOMEM;
2694 goto fail;
2695 }
2696 }
2697
2698 if (cmd->src_map == cmd->dst_map)
2699 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2700 0, cmd->src_map->dm_mapsize,
2701 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2702 else {
2703 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2704 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2705 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2706 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2707 }
2708
2709 cmd->crp = crp;
2710 /*
2711 * Always use session 0. The modes of compression we use are
2712 * stateless and there is always at least one compression
2713 * context, zero.
2714 */
2715 cmd->session_num = 0;
2716 cmd->softc = sc;
2717
2718 s = splnet();
2719 err = hifn_compress_enter(sc, cmd);
2720 splx(s);
2721
2722 if (err != 0)
2723 goto fail;
2724 return (0);
2725
2726 fail:
2727 if (cmd->dst_map != NULL) {
2728 if (cmd->dst_map->dm_nsegs > 0)
2729 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2730 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2731 }
2732 if (cmd->src_map != NULL) {
2733 if (cmd->src_map->dm_nsegs > 0)
2734 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2735 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2736 }
2737 free(cmd, M_DEVBUF);
2738 if (err == EINVAL)
2739 hifnstats.hst_invalid++;
2740 else
2741 hifnstats.hst_nomem++;
2742 crp->crp_etype = err;
2743 crypto_done(crp);
2744 return (0);
2745 }
2746
2747 /*
2748 * must be called at splnet()
2749 */
2750 static int
2751 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2752 {
2753 struct hifn_dma *dma = sc->sc_dma;
2754 int cmdi, resi;
2755 u_int32_t cmdlen;
2756
2757 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2758 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2759 return (ENOMEM);
2760
2761 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2762 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2763 return (ENOMEM);
2764
2765 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2766 dma->cmdi = 0;
2767 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2768 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2769 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2770 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2771 }
2772 cmdi = dma->cmdi++;
2773 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2774 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2775
2776 /* .p for command/result already set */
2777 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2778 HIFN_D_MASKDONEIRQ);
2779 HIFN_CMDR_SYNC(sc, cmdi,
2780 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2781 dma->cmdu++;
2782 if (sc->sc_c_busy == 0) {
2783 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2784 sc->sc_c_busy = 1;
2785 SET_LED(sc, HIFN_MIPSRST_LED0);
2786 }
2787
2788 /*
2789 * We don't worry about missing an interrupt (which a "command wait"
2790 * interrupt salvages us from), unless there is more than one command
2791 * in the queue.
2792 */
2793 if (dma->cmdu > 1) {
2794 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2795 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2796 }
2797
2798 hifnstats.hst_ipackets++;
2799 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2800
2801 hifn_dmamap_load_src(sc, cmd);
2802 if (sc->sc_s_busy == 0) {
2803 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2804 sc->sc_s_busy = 1;
2805 SET_LED(sc, HIFN_MIPSRST_LED1);
2806 }
2807
2808 /*
2809 * Unlike other descriptors, we don't mask done interrupt from
2810 * result descriptor.
2811 */
2812 if (dma->resi == HIFN_D_RES_RSIZE) {
2813 dma->resi = 0;
2814 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2815 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2816 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2817 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2818 }
2819 resi = dma->resi++;
2820 dma->hifn_commands[resi] = cmd;
2821 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2822 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2823 HIFN_D_VALID | HIFN_D_LAST);
2824 HIFN_RESR_SYNC(sc, resi,
2825 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2826 dma->resu++;
2827 if (sc->sc_r_busy == 0) {
2828 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2829 sc->sc_r_busy = 1;
2830 SET_LED(sc, HIFN_MIPSRST_LED2);
2831 }
2832
2833 if (cmd->sloplen)
2834 cmd->slopidx = resi;
2835
2836 hifn_dmamap_load_dst(sc, cmd);
2837
2838 if (sc->sc_d_busy == 0) {
2839 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2840 sc->sc_d_busy = 1;
2841 }
2842 sc->sc_active = 5;
2843 cmd->cmd_callback = hifn_callback_comp;
2844 return (0);
2845 }
2846
2847 static void
2848 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2849 u_int8_t *resbuf)
2850 {
2851 struct hifn_base_result baseres;
2852 struct cryptop *crp = cmd->crp;
2853 struct hifn_dma *dma = sc->sc_dma;
2854 struct mbuf *m;
2855 int err = 0, i, u;
2856 u_int32_t olen;
2857 bus_size_t dstsize;
2858
2859 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2860 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2861 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2862 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2863
2864 dstsize = cmd->dst_map->dm_mapsize;
2865 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2866
2867 memcpy(&baseres, resbuf, sizeof(struct hifn_base_result));
2868
2869 i = dma->dstk; u = dma->dstu;
2870 while (u != 0) {
2871 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2872 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2873 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2874 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2875 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2876 offsetof(struct hifn_dma, dstr[i]),
2877 sizeof(struct hifn_desc),
2878 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2879 break;
2880 }
2881 if (++i == (HIFN_D_DST_RSIZE + 1))
2882 i = 0;
2883 else
2884 u--;
2885 }
2886 dma->dstk = i; dma->dstu = u;
2887
2888 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
2889 bus_size_t xlen;
2890
2891 xlen = dstsize;
2892
2893 m_freem(cmd->dstu.dst_m);
2894
2895 if (xlen == HIFN_MAX_DMALEN) {
2896 /* We've done all we can. */
2897 err = E2BIG;
2898 goto out;
2899 }
2900
2901 xlen += MCLBYTES;
2902
2903 if (xlen > HIFN_MAX_DMALEN)
2904 xlen = HIFN_MAX_DMALEN;
2905
2906 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
2907 cmd->srcu.src_m);
2908 if (cmd->dstu.dst_m == NULL) {
2909 err = ENOMEM;
2910 goto out;
2911 }
2912 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2913 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2914 err = ENOMEM;
2915 goto out;
2916 }
2917
2918 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2919 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2920 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2921 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2922
2923 /* already at splnet... */
2924 err = hifn_compress_enter(sc, cmd);
2925 if (err != 0)
2926 goto out;
2927 return;
2928 }
2929
2930 olen = dstsize - (letoh16(baseres.dst_cnt) |
2931 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
2932 HIFN_BASE_RES_DSTLEN_S) << 16));
2933
2934 crp->crp_olen = olen - cmd->compcrd->crd_skip;
2935
2936 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2937 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2938 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2939
2940 m = cmd->dstu.dst_m;
2941 if (m->m_flags & M_PKTHDR)
2942 m->m_pkthdr.len = olen;
2943 crp->crp_buf = (void *)m;
2944 for (; m != NULL; m = m->m_next) {
2945 if (olen >= m->m_len)
2946 olen -= m->m_len;
2947 else {
2948 m->m_len = olen;
2949 olen = 0;
2950 }
2951 }
2952
2953 m_freem(cmd->srcu.src_m);
2954 free(cmd, M_DEVBUF);
2955 crp->crp_etype = 0;
2956 crypto_done(crp);
2957 return;
2958
2959 out:
2960 if (cmd->dst_map != NULL) {
2961 if (cmd->src_map->dm_nsegs != 0)
2962 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2963 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2964 }
2965 if (cmd->src_map != NULL) {
2966 if (cmd->src_map->dm_nsegs != 0)
2967 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2968 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2969 }
2970 if (cmd->dstu.dst_m != NULL)
2971 m_freem(cmd->dstu.dst_m);
2972 free(cmd, M_DEVBUF);
2973 crp->crp_etype = err;
2974 crypto_done(crp);
2975 }
2976
2977 static struct mbuf *
2978 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
2979 {
2980 int len;
2981 struct mbuf *m, *m0, *mlast;
2982
2983 if (mtemplate->m_flags & M_PKTHDR) {
2984 len = MHLEN;
2985 MGETHDR(m0, M_DONTWAIT, MT_DATA);
2986 } else {
2987 len = MLEN;
2988 MGET(m0, M_DONTWAIT, MT_DATA);
2989 }
2990 if (m0 == NULL)
2991 return (NULL);
2992 if (len == MHLEN)
2993 M_DUP_PKTHDR(m0, mtemplate);
2994 MCLGET(m0, M_DONTWAIT);
2995 if (!(m0->m_flags & M_EXT))
2996 m_freem(m0);
2997 len = MCLBYTES;
2998
2999 totlen -= len;
3000 m0->m_pkthdr.len = m0->m_len = len;
3001 mlast = m0;
3002
3003 while (totlen > 0) {
3004 MGET(m, M_DONTWAIT, MT_DATA);
3005 if (m == NULL) {
3006 m_freem(m0);
3007 return (NULL);
3008 }
3009 MCLGET(m, M_DONTWAIT);
3010 if (!(m->m_flags & M_EXT)) {
3011 m_freem(m0);
3012 return (NULL);
3013 }
3014 len = MCLBYTES;
3015 m->m_len = len;
3016 if (m0->m_flags & M_PKTHDR)
3017 m0->m_pkthdr.len += len;
3018 totlen -= len;
3019
3020 mlast->m_next = m;
3021 mlast = m;
3022 }
3023
3024 return (m0);
3025 }
3026 #endif /* HAVE_CRYPTO_LZS */
3027
3028 static void
3029 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val)
3030 {
3031 /*
3032 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
3033 * and Group 1 registers; avoid conditions that could create
3034 * burst writes by doing a read in between the writes.
3035 */
3036 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3037 if (sc->sc_waw_lastgroup == reggrp &&
3038 sc->sc_waw_lastreg == reg - 4) {
3039 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
3040 }
3041 sc->sc_waw_lastgroup = reggrp;
3042 sc->sc_waw_lastreg = reg;
3043 }
3044 if (reggrp == 0)
3045 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3046 else
3047 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3048
3049 }
3050
3051 static u_int32_t
3052 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3053 {
3054 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3055 sc->sc_waw_lastgroup = -1;
3056 sc->sc_waw_lastreg = 1;
3057 }
3058 if (reggrp == 0)
3059 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3060 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3061 }
3062