hifn7751.c revision 1.46 1 /* $NetBSD: hifn7751.c,v 1.46 2010/11/13 13:52:05 uebayasi Exp $ */
2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */
4
5 /*
6 * Invertex AEON / Hifn 7751 driver
7 * Copyright (c) 1999 Invertex Inc. All rights reserved.
8 * Copyright (c) 1999 Theo de Raadt
9 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10 * http://www.netsec.net
11 * Copyright (c) 2003 Hifn Inc.
12 *
13 * This driver is based on a previous driver by Invertex, for which they
14 * requested: Please send any comments, feedback, bug-fixes, or feature
15 * requests to software (at) invertex.com.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. The name of the author may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Effort sponsored in part by the Defense Advanced Research Projects
41 * Agency (DARPA) and Air Force Research Laboratory, Air Force
42 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 *
44 */
45
46 /*
47 * Driver for various Hifn pre-HIPP encryption processors.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.46 2010/11/13 13:52:05 uebayasi Exp $");
52
53 #include "rnd.h"
54
55 #if NRND == 0
56 #error hifn7751 requires rnd pseudo-devices
57 #endif
58
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/proc.h>
63 #include <sys/errno.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/mbuf.h>
67 #include <sys/device.h>
68
69 #ifdef __OpenBSD__
70 #include <crypto/crypto.h>
71 #include <dev/rndvar.h>
72 #else
73 #include <opencrypto/cryptodev.h>
74 #include <sys/rnd.h>
75 #endif
76
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pcidevs.h>
80
81 #include <dev/pci/hifn7751reg.h>
82 #include <dev/pci/hifn7751var.h>
83
84 #undef HIFN_DEBUG
85
86 #ifdef __NetBSD__
87 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */
88 #endif
89
90 #ifdef HIFN_DEBUG
91 extern int hifn_debug; /* patchable */
92 int hifn_debug = 1;
93 #endif
94
95 #ifdef __OpenBSD__
96 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */
97 #endif
98
99 /*
100 * Prototypes and count for the pci_device structure
101 */
102 #ifdef __OpenBSD__
103 static int hifn_probe((struct device *, void *, void *);
104 #else
105 static int hifn_probe(device_t, cfdata_t, void *);
106 #endif
107 static void hifn_attach(device_t, device_t, void *);
108
109 CFATTACH_DECL(hifn, sizeof(struct hifn_softc),
110 hifn_probe, hifn_attach, NULL, NULL);
111
112 #ifdef __OpenBSD__
113 struct cfdriver hifn_cd = {
114 0, "hifn", DV_DULL
115 };
116 #endif
117
118 static void hifn_reset_board(struct hifn_softc *, int);
119 static void hifn_reset_puc(struct hifn_softc *);
120 static void hifn_puc_wait(struct hifn_softc *);
121 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
122 static void hifn_set_retry(struct hifn_softc *);
123 static void hifn_init_dma(struct hifn_softc *);
124 static void hifn_init_pci_registers(struct hifn_softc *);
125 static int hifn_sramsize(struct hifn_softc *);
126 static int hifn_dramsize(struct hifn_softc *);
127 static int hifn_ramtype(struct hifn_softc *);
128 static void hifn_sessions(struct hifn_softc *);
129 static int hifn_intr(void *);
130 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
131 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
132 static int hifn_newsession(void*, u_int32_t *, struct cryptoini *);
133 static int hifn_freesession(void*, u_int64_t);
134 static int hifn_process(void*, struct cryptop *, int);
135 static void hifn_callback(struct hifn_softc *, struct hifn_command *,
136 u_int8_t *);
137 static int hifn_crypto(struct hifn_softc *, struct hifn_command *,
138 struct cryptop*, int);
139 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
140 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
141 static int hifn_dmamap_aligned(bus_dmamap_t);
142 static int hifn_dmamap_load_src(struct hifn_softc *,
143 struct hifn_command *);
144 static int hifn_dmamap_load_dst(struct hifn_softc *,
145 struct hifn_command *);
146 static int hifn_init_pubrng(struct hifn_softc *);
147 static void hifn_rng(void *);
148 static void hifn_tick(void *);
149 static void hifn_abort(struct hifn_softc *);
150 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
151 int *);
152 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
153 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
154 #ifdef HAVE_CRYPTO_LZS
155 static int hifn_compression(struct hifn_softc *, struct cryptop *,
156 struct hifn_command *);
157 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
158 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
159 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
160 u_int8_t *);
161 #endif /* HAVE_CRYPTO_LZS */
162
163
164 struct hifn_stats hifnstats;
165
166 static const struct hifn_product {
167 pci_vendor_id_t hifn_vendor;
168 pci_product_id_t hifn_product;
169 int hifn_flags;
170 const char *hifn_name;
171 } hifn_products[] = {
172 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
173 0,
174 "Invertex AEON",
175 },
176
177 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
178 0,
179 "Hifn 7751",
180 },
181 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
182 0,
183 "Hifn 7751 (NetSec)"
184 },
185
186 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
187 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
188 "Hifn 7811",
189 },
190
191 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
192 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
193 "Hifn 7951",
194 },
195
196 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
197 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
198 "Hifn 7955",
199 },
200
201 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
202 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
203 "Hifn 7956",
204 },
205
206
207 { 0, 0,
208 0,
209 NULL
210 }
211 };
212
213 static const struct hifn_product *
214 hifn_lookup(const struct pci_attach_args *pa)
215 {
216 const struct hifn_product *hp;
217
218 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
219 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
220 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
221 return (hp);
222 }
223 return (NULL);
224 }
225
226 static int
227 hifn_probe(device_t parent, cfdata_t match, void *aux)
228 {
229 struct pci_attach_args *pa = aux;
230
231 if (hifn_lookup(pa) != NULL)
232 return 1;
233
234 return 0;
235 }
236
237 static void
238 hifn_attach(device_t parent, device_t self, void *aux)
239 {
240 struct hifn_softc *sc = device_private(self);
241 struct pci_attach_args *pa = aux;
242 const struct hifn_product *hp;
243 pci_chipset_tag_t pc = pa->pa_pc;
244 pci_intr_handle_t ih;
245 const char *intrstr = NULL;
246 const char *hifncap;
247 char rbase;
248 bus_size_t iosize0, iosize1;
249 u_int32_t cmd;
250 u_int16_t ena;
251 bus_dma_segment_t seg;
252 bus_dmamap_t dmamap;
253 int rseg;
254 void *kva;
255
256 hp = hifn_lookup(pa);
257 if (hp == NULL) {
258 printf("\n");
259 panic("hifn_attach: impossible");
260 }
261
262 aprint_naive(": Crypto processor\n");
263 aprint_normal(": %s, rev. %d\n", hp->hifn_name,
264 PCI_REVISION(pa->pa_class));
265
266 sc->sc_pci_pc = pa->pa_pc;
267 sc->sc_pci_tag = pa->pa_tag;
268
269 sc->sc_flags = hp->hifn_flags;
270
271 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
272 cmd |= PCI_COMMAND_MASTER_ENABLE;
273 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
274
275 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
276 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
277 aprint_error_dev(&sc->sc_dv, "can't map mem space %d\n", 0);
278 return;
279 }
280
281 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
282 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
283 aprint_error_dev(&sc->sc_dv, "can't find mem space %d\n", 1);
284 goto fail_io0;
285 }
286
287 hifn_set_retry(sc);
288
289 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
290 sc->sc_waw_lastgroup = -1;
291 sc->sc_waw_lastreg = 1;
292 }
293
294 sc->sc_dmat = pa->pa_dmat;
295 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
296 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
297 aprint_error_dev(&sc->sc_dv, "can't alloc DMA buffer\n");
298 goto fail_io1;
299 }
300 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
301 BUS_DMA_NOWAIT)) {
302 aprint_error_dev(&sc->sc_dv, "can't map DMA buffers (%lu bytes)\n",
303 (u_long)sizeof(*sc->sc_dma));
304 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
305 goto fail_io1;
306 }
307 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
308 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
309 aprint_error_dev(&sc->sc_dv, "can't create DMA map\n");
310 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
311 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
312 goto fail_io1;
313 }
314 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
315 NULL, BUS_DMA_NOWAIT)) {
316 aprint_error_dev(&sc->sc_dv, "can't load DMA map\n");
317 bus_dmamap_destroy(sc->sc_dmat, dmamap);
318 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
319 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
320 goto fail_io1;
321 }
322 sc->sc_dmamap = dmamap;
323 sc->sc_dma = (struct hifn_dma *)kva;
324 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
325
326 hifn_reset_board(sc, 0);
327
328 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
329 aprint_error_dev(&sc->sc_dv, "crypto enabling failed\n");
330 goto fail_mem;
331 }
332 hifn_reset_puc(sc);
333
334 hifn_init_dma(sc);
335 hifn_init_pci_registers(sc);
336
337 /* XXX can't dynamically determine ram type for 795x; force dram */
338 if (sc->sc_flags & HIFN_IS_7956)
339 sc->sc_drammodel = 1;
340 else if (hifn_ramtype(sc))
341 goto fail_mem;
342
343 if (sc->sc_drammodel == 0)
344 hifn_sramsize(sc);
345 else
346 hifn_dramsize(sc);
347
348 /*
349 * Workaround for NetSec 7751 rev A: half ram size because two
350 * of the address lines were left floating
351 */
352 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
353 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
354 PCI_REVISION(pa->pa_class) == 0x61)
355 sc->sc_ramsize >>= 1;
356
357 if (pci_intr_map(pa, &ih)) {
358 aprint_error_dev(&sc->sc_dv, "couldn't map interrupt\n");
359 goto fail_mem;
360 }
361 intrstr = pci_intr_string(pc, ih);
362 #ifdef __OpenBSD__
363 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
364 self->dv_xname);
365 #else
366 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
367 #endif
368 if (sc->sc_ih == NULL) {
369 aprint_error_dev(&sc->sc_dv, "couldn't establish interrupt\n");
370 if (intrstr != NULL)
371 aprint_error(" at %s", intrstr);
372 aprint_error("\n");
373 goto fail_mem;
374 }
375
376 hifn_sessions(sc);
377
378 rseg = sc->sc_ramsize / 1024;
379 rbase = 'K';
380 if (sc->sc_ramsize >= (1024 * 1024)) {
381 rbase = 'M';
382 rseg /= 1024;
383 }
384 aprint_normal_dev(&sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n",
385 hifncap, rseg, rbase,
386 sc->sc_drammodel ? 'D' : 'S', intrstr);
387
388 sc->sc_cid = crypto_get_driverid(0);
389 if (sc->sc_cid < 0) {
390 aprint_error_dev(&sc->sc_dv, "couldn't get crypto driver id\n");
391 goto fail_intr;
392 }
393
394 WRITE_REG_0(sc, HIFN_0_PUCNFG,
395 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
396 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
397
398 switch (ena) {
399 case HIFN_PUSTAT_ENA_2:
400 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
401 hifn_newsession, hifn_freesession, hifn_process, sc);
402 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
403 hifn_newsession, hifn_freesession, hifn_process, sc);
404 if (sc->sc_flags & HIFN_HAS_AES)
405 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
406 hifn_newsession, hifn_freesession,
407 hifn_process, sc);
408 /*FALLTHROUGH*/
409 case HIFN_PUSTAT_ENA_1:
410 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
411 hifn_newsession, hifn_freesession, hifn_process, sc);
412 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
413 hifn_newsession, hifn_freesession, hifn_process, sc);
414 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0,
415 hifn_newsession, hifn_freesession, hifn_process, sc);
416 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0,
417 hifn_newsession, hifn_freesession, hifn_process, sc);
418 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
419 hifn_newsession, hifn_freesession, hifn_process, sc);
420 break;
421 }
422
423 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
424 sc->sc_dmamap->dm_mapsize,
425 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
426
427 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
428 hifn_init_pubrng(sc);
429
430 #ifdef __OpenBSD__
431 timeout_set(&sc->sc_tickto, hifn_tick, sc);
432 timeout_add(&sc->sc_tickto, hz);
433 #else
434 callout_init(&sc->sc_tickto, 0);
435 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
436 #endif
437 return;
438
439 fail_intr:
440 pci_intr_disestablish(pc, sc->sc_ih);
441 fail_mem:
442 bus_dmamap_unload(sc->sc_dmat, dmamap);
443 bus_dmamap_destroy(sc->sc_dmat, dmamap);
444 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
445 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
446
447 /* Turn off DMA polling */
448 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
449 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
450
451 fail_io1:
452 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
453 fail_io0:
454 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
455 }
456
457 static int
458 hifn_init_pubrng(struct hifn_softc *sc)
459 {
460 u_int32_t r;
461 int i;
462
463 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
464 /* Reset 7951 public key/rng engine */
465 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
466 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
467
468 for (i = 0; i < 100; i++) {
469 DELAY(1000);
470 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
471 HIFN_PUBRST_RESET) == 0)
472 break;
473 }
474
475 if (i == 100) {
476 printf("%s: public key init failed\n",
477 device_xname(&sc->sc_dv));
478 return (1);
479 }
480 }
481
482 /* Enable the rng, if available */
483 if (sc->sc_flags & HIFN_HAS_RNG) {
484 if (sc->sc_flags & HIFN_IS_7811) {
485 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
486 if (r & HIFN_7811_RNGENA_ENA) {
487 r &= ~HIFN_7811_RNGENA_ENA;
488 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
489 }
490 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
491 HIFN_7811_RNGCFG_DEFL);
492 r |= HIFN_7811_RNGENA_ENA;
493 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
494 } else
495 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
496 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
497 HIFN_RNGCFG_ENA);
498
499 /*
500 * The Hifn RNG documentation states that at their
501 * recommended "conservative" RNG config values,
502 * the RNG must warm up for 0.4s before providing
503 * data that meet their worst-case estimate of 0.06
504 * bits of random data per output register bit.
505 */
506 DELAY(4000);
507
508 #ifdef __NetBSD__
509 /*
510 * XXX Careful! The use of RND_FLAG_NO_ESTIMATE
511 * XXX here is unobvious: we later feed raw bits
512 * XXX into the "entropy pool" with rnd_add_data,
513 * XXX explicitly supplying an entropy estimate.
514 * XXX In this context, NO_ESTIMATE serves only
515 * XXX to prevent rnd_add_data from trying to
516 * XXX use the *time at which we added the data*
517 * XXX as entropy, which is not a good idea since
518 * XXX we add data periodically from a callout.
519 */
520 rnd_attach_source(&sc->sc_rnd_source, device_xname(&sc->sc_dv),
521 RND_TYPE_RNG, RND_FLAG_NO_ESTIMATE);
522 #endif
523
524 sc->sc_rngfirst = 1;
525 if (hz >= 100)
526 sc->sc_rnghz = hz / 100;
527 else
528 sc->sc_rnghz = 1;
529 #ifdef __OpenBSD__
530 timeout_set(&sc->sc_rngto, hifn_rng, sc);
531 #else /* !__OpenBSD__ */
532 callout_init(&sc->sc_rngto, 0);
533 #endif /* !__OpenBSD__ */
534 }
535
536 /* Enable public key engine, if available */
537 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
538 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
539 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
540 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
541 }
542
543 /* Call directly into the RNG once to prime the pool. */
544 hifn_rng(sc); /* Sets callout/timeout at end */
545
546 return (0);
547 }
548
549 static void
550 hifn_rng(void *vsc)
551 {
552 struct hifn_softc *sc = vsc;
553 #ifdef __NetBSD__
554 u_int32_t num[HIFN_RNG_BITSPER * RND_ENTROPY_THRESHOLD];
555 #else
556 u_int32_t num[2];
557 #endif
558 u_int32_t sts;
559 int i;
560
561 if (sc->sc_flags & HIFN_IS_7811) {
562 for (i = 0; i < 5; i++) { /* XXX why 5? */
563 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
564 if (sts & HIFN_7811_RNGSTS_UFL) {
565 printf("%s: RNG underflow: disabling\n",
566 device_xname(&sc->sc_dv));
567 return;
568 }
569 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
570 break;
571
572 /*
573 * There are at least two words in the RNG FIFO
574 * at this point.
575 */
576 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
577 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
578
579 if (sc->sc_rngfirst)
580 sc->sc_rngfirst = 0;
581 #ifdef __NetBSD__
582 rnd_add_data(&sc->sc_rnd_source, num,
583 2 * sizeof(num[0]),
584 (2 * sizeof(num[0]) * NBBY) /
585 HIFN_RNG_BITSPER);
586 #else
587 /*
588 * XXX This is a really bad idea.
589 * XXX Hifn estimate as little as 0.06
590 * XXX actual bits of entropy per output
591 * XXX register bit. How can we tell the
592 * XXX kernel RNG subsystem we're handing
593 * XXX it 64 "true" random bits, for any
594 * XXX sane value of "true"?
595 * XXX
596 * XXX The right thing to do here, if we
597 * XXX cannot supply an estimate ourselves,
598 * XXX would be to hash the bits locally.
599 */
600 add_true_randomness(num[0]);
601 add_true_randomness(num[1]);
602 #endif
603
604 }
605 } else {
606 #ifdef __NetBSD__
607 /* First time through, try to help fill the pool. */
608 int nwords = sc->sc_rngfirst ?
609 sizeof(num) / sizeof(num[0]) : 4;
610 #else
611 int nwords = 2;
612 #endif
613 /*
614 * We must be *extremely* careful here. The Hifn
615 * 795x differ from the published 6500 RNG design
616 * in more ways than the obvious lack of the output
617 * FIFO and LFSR control registers. In fact, there
618 * is only one LFSR, instead of the 6500's two, and
619 * it's 32 bits, not 31.
620 *
621 * Further, a block diagram obtained from Hifn shows
622 * a very curious latching of this register: the LFSR
623 * rotates at a frequency of RNG_Clk / 8, but the
624 * RNG_Data register is latched at a frequency of
625 * RNG_Clk, which means that it is possible for
626 * consecutive reads of the RNG_Data register to read
627 * identical state from the LFSR. The simplest
628 * workaround seems to be to read eight samples from
629 * the register for each one that we use. Since each
630 * read must require at least one PCI cycle, and
631 * RNG_Clk is at least PCI_Clk, this is safe.
632 */
633
634
635 if (sc->sc_rngfirst) {
636 sc->sc_rngfirst = 0;
637 }
638
639
640 for(i = 0 ; i < nwords * 8; i++)
641 {
642 volatile u_int32_t regtmp;
643 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA);
644 num[i / 8] = regtmp;
645 }
646 #ifdef __NetBSD__
647 rnd_add_data(&sc->sc_rnd_source, num,
648 nwords * sizeof(num[0]),
649 (nwords * sizeof(num[0]) * NBBY) /
650 HIFN_RNG_BITSPER);
651 #else
652 /* XXX a bad idea; see 7811 block above */
653 add_true_randomness(num[0]);
654 #endif
655 }
656
657 #ifdef __OpenBSD__
658 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
659 #else
660 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
661 #endif
662 }
663
664 static void
665 hifn_puc_wait(struct hifn_softc *sc)
666 {
667 int i;
668
669 for (i = 5000; i > 0; i--) {
670 DELAY(1);
671 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
672 break;
673 }
674 if (!i)
675 printf("%s: proc unit did not reset\n", device_xname(&sc->sc_dv));
676 }
677
678 /*
679 * Reset the processing unit.
680 */
681 static void
682 hifn_reset_puc(struct hifn_softc *sc)
683 {
684 /* Reset processing unit */
685 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
686 hifn_puc_wait(sc);
687 }
688
689 static void
690 hifn_set_retry(struct hifn_softc *sc)
691 {
692 u_int32_t r;
693
694 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
695 r &= 0xffff0000;
696 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
697 }
698
699 /*
700 * Resets the board. Values in the regesters are left as is
701 * from the reset (i.e. initial values are assigned elsewhere).
702 */
703 static void
704 hifn_reset_board(struct hifn_softc *sc, int full)
705 {
706 u_int32_t reg;
707
708 /*
709 * Set polling in the DMA configuration register to zero. 0x7 avoids
710 * resetting the board and zeros out the other fields.
711 */
712 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
713 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
714
715 /*
716 * Now that polling has been disabled, we have to wait 1 ms
717 * before resetting the board.
718 */
719 DELAY(1000);
720
721 /* Reset the DMA unit */
722 if (full) {
723 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
724 DELAY(1000);
725 } else {
726 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
727 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
728 hifn_reset_puc(sc);
729 }
730
731 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
732
733 /* Bring dma unit out of reset */
734 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
735 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
736
737 hifn_puc_wait(sc);
738
739 hifn_set_retry(sc);
740
741 if (sc->sc_flags & HIFN_IS_7811) {
742 for (reg = 0; reg < 1000; reg++) {
743 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
744 HIFN_MIPSRST_CRAMINIT)
745 break;
746 DELAY(1000);
747 }
748 if (reg == 1000)
749 printf(": cram init timeout\n");
750 }
751 }
752
753 static u_int32_t
754 hifn_next_signature(u_int32_t a, u_int cnt)
755 {
756 int i;
757 u_int32_t v;
758
759 for (i = 0; i < cnt; i++) {
760
761 /* get the parity */
762 v = a & 0x80080125;
763 v ^= v >> 16;
764 v ^= v >> 8;
765 v ^= v >> 4;
766 v ^= v >> 2;
767 v ^= v >> 1;
768
769 a = (v & 1) ^ (a << 1);
770 }
771
772 return a;
773 }
774
775 static struct pci2id {
776 u_short pci_vendor;
777 u_short pci_prod;
778 char card_id[13];
779 } const pci2id[] = {
780 {
781 PCI_VENDOR_HIFN,
782 PCI_PRODUCT_HIFN_7951,
783 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
784 0x00, 0x00, 0x00, 0x00, 0x00 }
785 }, {
786 PCI_VENDOR_HIFN,
787 PCI_PRODUCT_HIFN_7955,
788 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
789 0x00, 0x00, 0x00, 0x00, 0x00 }
790 }, {
791 PCI_VENDOR_HIFN,
792 PCI_PRODUCT_HIFN_7956,
793 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
794 0x00, 0x00, 0x00, 0x00, 0x00 }
795 }, {
796 PCI_VENDOR_NETSEC,
797 PCI_PRODUCT_NETSEC_7751,
798 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x00, 0x00, 0x00, 0x00 }
800 }, {
801 PCI_VENDOR_INVERTEX,
802 PCI_PRODUCT_INVERTEX_AEON,
803 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
804 0x00, 0x00, 0x00, 0x00, 0x00 }
805 }, {
806 PCI_VENDOR_HIFN,
807 PCI_PRODUCT_HIFN_7811,
808 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
809 0x00, 0x00, 0x00, 0x00, 0x00 }
810 }, {
811 /*
812 * Other vendors share this PCI ID as well, such as
813 * http://www.powercrypt.com, and obviously they also
814 * use the same key.
815 */
816 PCI_VENDOR_HIFN,
817 PCI_PRODUCT_HIFN_7751,
818 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00, 0x00 }
820 },
821 };
822
823 /*
824 * Checks to see if crypto is already enabled. If crypto isn't enable,
825 * "hifn_enable_crypto" is called to enable it. The check is important,
826 * as enabling crypto twice will lock the board.
827 */
828 static const char *
829 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
830 {
831 u_int32_t dmacfg, ramcfg, encl, addr, i;
832 const char *offtbl = NULL;
833
834 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
835 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
836 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
837 offtbl = pci2id[i].card_id;
838 break;
839 }
840 }
841
842 if (offtbl == NULL) {
843 #ifdef HIFN_DEBUG
844 aprint_debug_dev(&sc->sc_dv, "Unknown card!\n");
845 #endif
846 return (NULL);
847 }
848
849 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
850 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
851
852 /*
853 * The RAM config register's encrypt level bit needs to be set before
854 * every read performed on the encryption level register.
855 */
856 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
857
858 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
859
860 /*
861 * Make sure we don't re-unlock. Two unlocks kills chip until the
862 * next reboot.
863 */
864 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
865 #ifdef HIFN_DEBUG
866 aprint_debug_dev(&sc->sc_dv, "Strong Crypto already enabled!\n");
867 #endif
868 goto report;
869 }
870
871 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
872 #ifdef HIFN_DEBUG
873 aprint_debug_dev(&sc->sc_dv, "Unknown encryption level\n");
874 #endif
875 return (NULL);
876 }
877
878 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
879 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
880 DELAY(1000);
881 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
882 DELAY(1000);
883 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
884 DELAY(1000);
885
886 for (i = 0; i <= 12; i++) {
887 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
888 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
889
890 DELAY(1000);
891 }
892
893 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
894 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
895
896 #ifdef HIFN_DEBUG
897 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
898 aprint_debug("Encryption engine is permanently locked until next system reset.");
899 else
900 aprint_debug("Encryption engine enabled successfully!");
901 #endif
902
903 report:
904 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
905 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
906
907 switch (encl) {
908 case HIFN_PUSTAT_ENA_0:
909 return ("LZS-only (no encr/auth)");
910
911 case HIFN_PUSTAT_ENA_1:
912 return ("DES");
913
914 case HIFN_PUSTAT_ENA_2:
915 if (sc->sc_flags & HIFN_HAS_AES)
916 return ("3DES/AES");
917 else
918 return ("3DES");
919
920 default:
921 return ("disabled");
922 }
923 /* NOTREACHED */
924 }
925
926 /*
927 * Give initial values to the registers listed in the "Register Space"
928 * section of the HIFN Software Development reference manual.
929 */
930 static void
931 hifn_init_pci_registers(struct hifn_softc *sc)
932 {
933 /* write fixed values needed by the Initialization registers */
934 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
935 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
936 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
937
938 /* write all 4 ring address registers */
939 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
940 offsetof(struct hifn_dma, cmdr[0]));
941 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
942 offsetof(struct hifn_dma, srcr[0]));
943 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
944 offsetof(struct hifn_dma, dstr[0]));
945 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
946 offsetof(struct hifn_dma, resr[0]));
947
948 DELAY(2000);
949
950 /* write status register */
951 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
952 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
953 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
954 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
955 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
956 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
957 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
958 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
959 HIFN_DMACSR_S_WAIT |
960 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
961 HIFN_DMACSR_C_WAIT |
962 HIFN_DMACSR_ENGINE |
963 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
964 HIFN_DMACSR_PUBDONE : 0) |
965 ((sc->sc_flags & HIFN_IS_7811) ?
966 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
967
968 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
969 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
970 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
971 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
972 HIFN_DMAIER_ENGINE |
973 ((sc->sc_flags & HIFN_IS_7811) ?
974 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
975 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
976 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
977 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
978
979 if (sc->sc_flags & HIFN_IS_7956) {
980 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
981 HIFN_PUCNFG_TCALLPHASES |
982 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
983 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
984 } else {
985 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
986 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
987 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
988 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
989 }
990
991 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
992 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
993 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
994 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
995 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
996 }
997
998 /*
999 * The maximum number of sessions supported by the card
1000 * is dependent on the amount of context ram, which
1001 * encryption algorithms are enabled, and how compression
1002 * is configured. This should be configured before this
1003 * routine is called.
1004 */
1005 static void
1006 hifn_sessions(struct hifn_softc *sc)
1007 {
1008 u_int32_t pucnfg;
1009 int ctxsize;
1010
1011 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1012
1013 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1014 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1015 ctxsize = 128;
1016 else
1017 ctxsize = 512;
1018 /*
1019 * 7955/7956 has internal context memory of 32K
1020 */
1021 if (sc->sc_flags & HIFN_IS_7956)
1022 sc->sc_maxses = 32768 / ctxsize;
1023 else
1024 sc->sc_maxses = 1 +
1025 ((sc->sc_ramsize - 32768) / ctxsize);
1026 }
1027 else
1028 sc->sc_maxses = sc->sc_ramsize / 16384;
1029
1030 if (sc->sc_maxses > 2048)
1031 sc->sc_maxses = 2048;
1032 }
1033
1034 /*
1035 * Determine ram type (sram or dram). Board should be just out of a reset
1036 * state when this is called.
1037 */
1038 static int
1039 hifn_ramtype(struct hifn_softc *sc)
1040 {
1041 u_int8_t data[8], dataexpect[8];
1042 int i;
1043
1044 for (i = 0; i < sizeof(data); i++)
1045 data[i] = dataexpect[i] = 0x55;
1046 if (hifn_writeramaddr(sc, 0, data))
1047 return (-1);
1048 if (hifn_readramaddr(sc, 0, data))
1049 return (-1);
1050 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1051 sc->sc_drammodel = 1;
1052 return (0);
1053 }
1054
1055 for (i = 0; i < sizeof(data); i++)
1056 data[i] = dataexpect[i] = 0xaa;
1057 if (hifn_writeramaddr(sc, 0, data))
1058 return (-1);
1059 if (hifn_readramaddr(sc, 0, data))
1060 return (-1);
1061 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1062 sc->sc_drammodel = 1;
1063 return (0);
1064 }
1065
1066 return (0);
1067 }
1068
1069 #define HIFN_SRAM_MAX (32 << 20)
1070 #define HIFN_SRAM_STEP_SIZE 16384
1071 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1072
1073 static int
1074 hifn_sramsize(struct hifn_softc *sc)
1075 {
1076 u_int32_t a;
1077 u_int8_t data[8];
1078 u_int8_t dataexpect[sizeof(data)];
1079 int32_t i;
1080
1081 for (i = 0; i < sizeof(data); i++)
1082 data[i] = dataexpect[i] = i ^ 0x5a;
1083
1084 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1085 a = i * HIFN_SRAM_STEP_SIZE;
1086 memcpy(data, &i, sizeof(i));
1087 hifn_writeramaddr(sc, a, data);
1088 }
1089
1090 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1091 a = i * HIFN_SRAM_STEP_SIZE;
1092 memcpy(dataexpect, &i, sizeof(i));
1093 if (hifn_readramaddr(sc, a, data) < 0)
1094 return (0);
1095 if (memcmp(data, dataexpect, sizeof(data)) != 0)
1096 return (0);
1097 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1098 }
1099
1100 return (0);
1101 }
1102
1103 /*
1104 * XXX For dram boards, one should really try all of the
1105 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1106 * is already set up correctly.
1107 */
1108 static int
1109 hifn_dramsize(struct hifn_softc *sc)
1110 {
1111 u_int32_t cnfg;
1112
1113 if (sc->sc_flags & HIFN_IS_7956) {
1114 /*
1115 * 7955/7956 have a fixed internal ram of only 32K.
1116 */
1117 sc->sc_ramsize = 32768;
1118 } else {
1119 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1120 HIFN_PUCNFG_DRAMMASK;
1121 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1122 }
1123 return (0);
1124 }
1125
1126 static void
1127 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1128 int *resp)
1129 {
1130 struct hifn_dma *dma = sc->sc_dma;
1131
1132 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1133 dma->cmdi = 0;
1134 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1135 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1136 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1137 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1138 }
1139 *cmdp = dma->cmdi++;
1140 dma->cmdk = dma->cmdi;
1141
1142 if (dma->srci == HIFN_D_SRC_RSIZE) {
1143 dma->srci = 0;
1144 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1145 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1146 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1147 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1148 }
1149 *srcp = dma->srci++;
1150 dma->srck = dma->srci;
1151
1152 if (dma->dsti == HIFN_D_DST_RSIZE) {
1153 dma->dsti = 0;
1154 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1155 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1156 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1157 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1158 }
1159 *dstp = dma->dsti++;
1160 dma->dstk = dma->dsti;
1161
1162 if (dma->resi == HIFN_D_RES_RSIZE) {
1163 dma->resi = 0;
1164 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1165 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1166 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1167 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1168 }
1169 *resp = dma->resi++;
1170 dma->resk = dma->resi;
1171 }
1172
1173 static int
1174 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1175 {
1176 struct hifn_dma *dma = sc->sc_dma;
1177 struct hifn_base_command wc;
1178 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1179 int r, cmdi, resi, srci, dsti;
1180
1181 wc.masks = htole16(3 << 13);
1182 wc.session_num = htole16(addr >> 14);
1183 wc.total_source_count = htole16(8);
1184 wc.total_dest_count = htole16(addr & 0x3fff);
1185
1186 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1187
1188 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1189 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1190 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1191
1192 /* build write command */
1193 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1194 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1195 memcpy(&dma->test_src, data, sizeof(dma->test_src));
1196
1197 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1198 + offsetof(struct hifn_dma, test_src));
1199 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1200 + offsetof(struct hifn_dma, test_dst));
1201
1202 dma->cmdr[cmdi].l = htole32(16 | masks);
1203 dma->srcr[srci].l = htole32(8 | masks);
1204 dma->dstr[dsti].l = htole32(4 | masks);
1205 dma->resr[resi].l = htole32(4 | masks);
1206
1207 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1208 0, sc->sc_dmamap->dm_mapsize,
1209 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1210
1211 for (r = 10000; r >= 0; r--) {
1212 DELAY(10);
1213 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1214 0, sc->sc_dmamap->dm_mapsize,
1215 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1216 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1217 break;
1218 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1219 0, sc->sc_dmamap->dm_mapsize,
1220 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1221 }
1222 if (r == 0) {
1223 printf("%s: writeramaddr -- "
1224 "result[%d](addr %d) still valid\n",
1225 device_xname(&sc->sc_dv), resi, addr);
1226 r = -1;
1227 return (-1);
1228 } else
1229 r = 0;
1230
1231 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1232 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1233 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1234
1235 return (r);
1236 }
1237
1238 static int
1239 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1240 {
1241 struct hifn_dma *dma = sc->sc_dma;
1242 struct hifn_base_command rc;
1243 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1244 int r, cmdi, srci, dsti, resi;
1245
1246 rc.masks = htole16(2 << 13);
1247 rc.session_num = htole16(addr >> 14);
1248 rc.total_source_count = htole16(addr & 0x3fff);
1249 rc.total_dest_count = htole16(8);
1250
1251 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1252
1253 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1254 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1255 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1256
1257 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1258 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1259
1260 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1261 offsetof(struct hifn_dma, test_src));
1262 dma->test_src = 0;
1263 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1264 offsetof(struct hifn_dma, test_dst));
1265 dma->test_dst = 0;
1266 dma->cmdr[cmdi].l = htole32(8 | masks);
1267 dma->srcr[srci].l = htole32(8 | masks);
1268 dma->dstr[dsti].l = htole32(8 | masks);
1269 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1270
1271 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1272 0, sc->sc_dmamap->dm_mapsize,
1273 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1274
1275 for (r = 10000; r >= 0; r--) {
1276 DELAY(10);
1277 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1278 0, sc->sc_dmamap->dm_mapsize,
1279 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1280 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1281 break;
1282 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1283 0, sc->sc_dmamap->dm_mapsize,
1284 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1285 }
1286 if (r == 0) {
1287 printf("%s: readramaddr -- "
1288 "result[%d](addr %d) still valid\n",
1289 device_xname(&sc->sc_dv), resi, addr);
1290 r = -1;
1291 } else {
1292 r = 0;
1293 memcpy(data, &dma->test_dst, sizeof(dma->test_dst));
1294 }
1295
1296 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1297 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1298 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1299
1300 return (r);
1301 }
1302
1303 /*
1304 * Initialize the descriptor rings.
1305 */
1306 static void
1307 hifn_init_dma(struct hifn_softc *sc)
1308 {
1309 struct hifn_dma *dma = sc->sc_dma;
1310 int i;
1311
1312 hifn_set_retry(sc);
1313
1314 /* initialize static pointer values */
1315 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1316 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1317 offsetof(struct hifn_dma, command_bufs[i][0]));
1318 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1319 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1320 offsetof(struct hifn_dma, result_bufs[i][0]));
1321
1322 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1323 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1324 offsetof(struct hifn_dma, cmdr[0]));
1325 dma->srcr[HIFN_D_SRC_RSIZE].p =
1326 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1327 offsetof(struct hifn_dma, srcr[0]));
1328 dma->dstr[HIFN_D_DST_RSIZE].p =
1329 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1330 offsetof(struct hifn_dma, dstr[0]));
1331 dma->resr[HIFN_D_RES_RSIZE].p =
1332 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1333 offsetof(struct hifn_dma, resr[0]));
1334
1335 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1336 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1337 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1338 }
1339
1340 /*
1341 * Writes out the raw command buffer space. Returns the
1342 * command buffer size.
1343 */
1344 static u_int
1345 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1346 {
1347 u_int8_t *buf_pos;
1348 struct hifn_base_command *base_cmd;
1349 struct hifn_mac_command *mac_cmd;
1350 struct hifn_crypt_command *cry_cmd;
1351 struct hifn_comp_command *comp_cmd;
1352 int using_mac, using_crypt, using_comp, len, ivlen;
1353 u_int32_t dlen, slen;
1354
1355 buf_pos = buf;
1356 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1357 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1358 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1359
1360 base_cmd = (struct hifn_base_command *)buf_pos;
1361 base_cmd->masks = htole16(cmd->base_masks);
1362 slen = cmd->src_map->dm_mapsize;
1363 if (cmd->sloplen)
1364 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1365 sizeof(u_int32_t);
1366 else
1367 dlen = cmd->dst_map->dm_mapsize;
1368 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1369 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1370 dlen >>= 16;
1371 slen >>= 16;
1372 base_cmd->session_num = htole16(cmd->session_num |
1373 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1374 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1375 buf_pos += sizeof(struct hifn_base_command);
1376
1377 if (using_comp) {
1378 comp_cmd = (struct hifn_comp_command *)buf_pos;
1379 dlen = cmd->compcrd->crd_len;
1380 comp_cmd->source_count = htole16(dlen & 0xffff);
1381 dlen >>= 16;
1382 comp_cmd->masks = htole16(cmd->comp_masks |
1383 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1384 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1385 comp_cmd->reserved = 0;
1386 buf_pos += sizeof(struct hifn_comp_command);
1387 }
1388
1389 if (using_mac) {
1390 mac_cmd = (struct hifn_mac_command *)buf_pos;
1391 dlen = cmd->maccrd->crd_len;
1392 mac_cmd->source_count = htole16(dlen & 0xffff);
1393 dlen >>= 16;
1394 mac_cmd->masks = htole16(cmd->mac_masks |
1395 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1396 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1397 mac_cmd->reserved = 0;
1398 buf_pos += sizeof(struct hifn_mac_command);
1399 }
1400
1401 if (using_crypt) {
1402 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1403 dlen = cmd->enccrd->crd_len;
1404 cry_cmd->source_count = htole16(dlen & 0xffff);
1405 dlen >>= 16;
1406 cry_cmd->masks = htole16(cmd->cry_masks |
1407 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1408 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1409 cry_cmd->reserved = 0;
1410 buf_pos += sizeof(struct hifn_crypt_command);
1411 }
1412
1413 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1414 memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH);
1415 buf_pos += HIFN_MAC_KEY_LENGTH;
1416 }
1417
1418 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1419 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1420 case HIFN_CRYPT_CMD_ALG_3DES:
1421 memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH);
1422 buf_pos += HIFN_3DES_KEY_LENGTH;
1423 break;
1424 case HIFN_CRYPT_CMD_ALG_DES:
1425 memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH);
1426 buf_pos += HIFN_DES_KEY_LENGTH;
1427 break;
1428 case HIFN_CRYPT_CMD_ALG_RC4:
1429 len = 256;
1430 do {
1431 int clen;
1432
1433 clen = MIN(cmd->cklen, len);
1434 memcpy(buf_pos, cmd->ck, clen);
1435 len -= clen;
1436 buf_pos += clen;
1437 } while (len > 0);
1438 memset(buf_pos, 0, 4);
1439 buf_pos += 4;
1440 break;
1441 case HIFN_CRYPT_CMD_ALG_AES:
1442 /*
1443 * AES keys are variable 128, 192 and
1444 * 256 bits (16, 24 and 32 bytes).
1445 */
1446 memcpy(buf_pos, cmd->ck, cmd->cklen);
1447 buf_pos += cmd->cklen;
1448 break;
1449 }
1450 }
1451
1452 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1453 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1454 case HIFN_CRYPT_CMD_ALG_AES:
1455 ivlen = HIFN_AES_IV_LENGTH;
1456 break;
1457 default:
1458 ivlen = HIFN_IV_LENGTH;
1459 break;
1460 }
1461 memcpy(buf_pos, cmd->iv, ivlen);
1462 buf_pos += ivlen;
1463 }
1464
1465 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1466 HIFN_BASE_CMD_COMP)) == 0) {
1467 memset(buf_pos, 0, 8);
1468 buf_pos += 8;
1469 }
1470
1471 return (buf_pos - buf);
1472 }
1473
1474 static int
1475 hifn_dmamap_aligned(bus_dmamap_t map)
1476 {
1477 int i;
1478
1479 for (i = 0; i < map->dm_nsegs; i++) {
1480 if (map->dm_segs[i].ds_addr & 3)
1481 return (0);
1482 if ((i != (map->dm_nsegs - 1)) &&
1483 (map->dm_segs[i].ds_len & 3))
1484 return (0);
1485 }
1486 return (1);
1487 }
1488
1489 static int
1490 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1491 {
1492 struct hifn_dma *dma = sc->sc_dma;
1493 bus_dmamap_t map = cmd->dst_map;
1494 u_int32_t p, l;
1495 int idx, used = 0, i;
1496
1497 idx = dma->dsti;
1498 for (i = 0; i < map->dm_nsegs - 1; i++) {
1499 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1500 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1501 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1502 HIFN_DSTR_SYNC(sc, idx,
1503 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1504 used++;
1505
1506 if (++idx == HIFN_D_DST_RSIZE) {
1507 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1508 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1509 HIFN_DSTR_SYNC(sc, idx,
1510 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1511 idx = 0;
1512 }
1513 }
1514
1515 if (cmd->sloplen == 0) {
1516 p = map->dm_segs[i].ds_addr;
1517 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1518 map->dm_segs[i].ds_len;
1519 } else {
1520 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1521 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1522 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1523 sizeof(u_int32_t);
1524
1525 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1526 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1527 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1528 HIFN_D_MASKDONEIRQ |
1529 (map->dm_segs[i].ds_len - cmd->sloplen));
1530 HIFN_DSTR_SYNC(sc, idx,
1531 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1532 used++;
1533
1534 if (++idx == HIFN_D_DST_RSIZE) {
1535 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1536 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1537 HIFN_DSTR_SYNC(sc, idx,
1538 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1539 idx = 0;
1540 }
1541 }
1542 }
1543 dma->dstr[idx].p = htole32(p);
1544 dma->dstr[idx].l = htole32(l);
1545 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1546 used++;
1547
1548 if (++idx == HIFN_D_DST_RSIZE) {
1549 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1550 HIFN_D_MASKDONEIRQ);
1551 HIFN_DSTR_SYNC(sc, idx,
1552 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1553 idx = 0;
1554 }
1555
1556 dma->dsti = idx;
1557 dma->dstu += used;
1558 return (idx);
1559 }
1560
1561 static int
1562 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1563 {
1564 struct hifn_dma *dma = sc->sc_dma;
1565 bus_dmamap_t map = cmd->src_map;
1566 int idx, i;
1567 u_int32_t last = 0;
1568
1569 idx = dma->srci;
1570 for (i = 0; i < map->dm_nsegs; i++) {
1571 if (i == map->dm_nsegs - 1)
1572 last = HIFN_D_LAST;
1573
1574 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1575 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1576 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1577 HIFN_SRCR_SYNC(sc, idx,
1578 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1579
1580 if (++idx == HIFN_D_SRC_RSIZE) {
1581 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1582 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1583 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1584 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1585 idx = 0;
1586 }
1587 }
1588 dma->srci = idx;
1589 dma->srcu += map->dm_nsegs;
1590 return (idx);
1591 }
1592
1593 static int
1594 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1595 struct cryptop *crp, int hint)
1596 {
1597 struct hifn_dma *dma = sc->sc_dma;
1598 u_int32_t cmdlen;
1599 int cmdi, resi, s, err = 0;
1600
1601 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1602 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1603 return (ENOMEM);
1604
1605 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1606 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1607 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1608 err = ENOMEM;
1609 goto err_srcmap1;
1610 }
1611 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1612 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1613 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1614 err = ENOMEM;
1615 goto err_srcmap1;
1616 }
1617 } else {
1618 err = EINVAL;
1619 goto err_srcmap1;
1620 }
1621
1622 if (hifn_dmamap_aligned(cmd->src_map)) {
1623 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1624 if (crp->crp_flags & CRYPTO_F_IOV)
1625 cmd->dstu.dst_io = cmd->srcu.src_io;
1626 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1627 cmd->dstu.dst_m = cmd->srcu.src_m;
1628 cmd->dst_map = cmd->src_map;
1629 } else {
1630 if (crp->crp_flags & CRYPTO_F_IOV) {
1631 err = EINVAL;
1632 goto err_srcmap;
1633 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1634 int totlen, len;
1635 struct mbuf *m, *m0, *mlast;
1636
1637 totlen = cmd->src_map->dm_mapsize;
1638 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1639 len = MHLEN;
1640 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1641 } else {
1642 len = MLEN;
1643 MGET(m0, M_DONTWAIT, MT_DATA);
1644 }
1645 if (m0 == NULL) {
1646 err = ENOMEM;
1647 goto err_srcmap;
1648 }
1649 if (len == MHLEN)
1650 M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1651 if (totlen >= MINCLSIZE) {
1652 MCLGET(m0, M_DONTWAIT);
1653 if (m0->m_flags & M_EXT)
1654 len = MCLBYTES;
1655 }
1656 totlen -= len;
1657 m0->m_pkthdr.len = m0->m_len = len;
1658 mlast = m0;
1659
1660 while (totlen > 0) {
1661 MGET(m, M_DONTWAIT, MT_DATA);
1662 if (m == NULL) {
1663 err = ENOMEM;
1664 m_freem(m0);
1665 goto err_srcmap;
1666 }
1667 len = MLEN;
1668 if (totlen >= MINCLSIZE) {
1669 MCLGET(m, M_DONTWAIT);
1670 if (m->m_flags & M_EXT)
1671 len = MCLBYTES;
1672 }
1673
1674 m->m_len = len;
1675 if (m0->m_flags & M_PKTHDR)
1676 m0->m_pkthdr.len += len;
1677 totlen -= len;
1678
1679 mlast->m_next = m;
1680 mlast = m;
1681 }
1682 cmd->dstu.dst_m = m0;
1683 }
1684 }
1685
1686 if (cmd->dst_map == NULL) {
1687 if (bus_dmamap_create(sc->sc_dmat,
1688 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1689 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1690 err = ENOMEM;
1691 goto err_srcmap;
1692 }
1693 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1694 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1695 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1696 err = ENOMEM;
1697 goto err_dstmap1;
1698 }
1699 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1700 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1701 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1702 err = ENOMEM;
1703 goto err_dstmap1;
1704 }
1705 }
1706 }
1707
1708 #ifdef HIFN_DEBUG
1709 if (hifn_debug)
1710 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1711 device_xname(&sc->sc_dv),
1712 READ_REG_1(sc, HIFN_1_DMA_CSR),
1713 READ_REG_1(sc, HIFN_1_DMA_IER),
1714 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1715 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1716 #endif
1717
1718 if (cmd->src_map == cmd->dst_map)
1719 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1720 0, cmd->src_map->dm_mapsize,
1721 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1722 else {
1723 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1724 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1725 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1726 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1727 }
1728
1729 s = splnet();
1730
1731 /*
1732 * need 1 cmd, and 1 res
1733 * need N src, and N dst
1734 */
1735 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1736 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1737 splx(s);
1738 err = ENOMEM;
1739 goto err_dstmap;
1740 }
1741 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1742 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1743 splx(s);
1744 err = ENOMEM;
1745 goto err_dstmap;
1746 }
1747
1748 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1749 dma->cmdi = 0;
1750 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1751 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1752 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1753 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1754 }
1755 cmdi = dma->cmdi++;
1756 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1757 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1758
1759 /* .p for command/result already set */
1760 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1761 HIFN_D_MASKDONEIRQ);
1762 HIFN_CMDR_SYNC(sc, cmdi,
1763 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1764 dma->cmdu++;
1765 if (sc->sc_c_busy == 0) {
1766 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1767 sc->sc_c_busy = 1;
1768 SET_LED(sc, HIFN_MIPSRST_LED0);
1769 }
1770
1771 /*
1772 * We don't worry about missing an interrupt (which a "command wait"
1773 * interrupt salvages us from), unless there is more than one command
1774 * in the queue.
1775 *
1776 * XXX We do seem to miss some interrupts. So we always enable
1777 * XXX command wait. From OpenBSD revision 1.149.
1778 *
1779 */
1780 #if 0
1781 if (dma->cmdu > 1) {
1782 #endif
1783 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1784 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1785 #if 0
1786 }
1787 #endif
1788
1789 hifnstats.hst_ipackets++;
1790 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1791
1792 hifn_dmamap_load_src(sc, cmd);
1793 if (sc->sc_s_busy == 0) {
1794 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1795 sc->sc_s_busy = 1;
1796 SET_LED(sc, HIFN_MIPSRST_LED1);
1797 }
1798
1799 /*
1800 * Unlike other descriptors, we don't mask done interrupt from
1801 * result descriptor.
1802 */
1803 #ifdef HIFN_DEBUG
1804 if (hifn_debug)
1805 printf("load res\n");
1806 #endif
1807 if (dma->resi == HIFN_D_RES_RSIZE) {
1808 dma->resi = 0;
1809 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1810 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1811 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1812 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1813 }
1814 resi = dma->resi++;
1815 dma->hifn_commands[resi] = cmd;
1816 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1817 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1818 HIFN_D_VALID | HIFN_D_LAST);
1819 HIFN_RESR_SYNC(sc, resi,
1820 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1821 dma->resu++;
1822 if (sc->sc_r_busy == 0) {
1823 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1824 sc->sc_r_busy = 1;
1825 SET_LED(sc, HIFN_MIPSRST_LED2);
1826 }
1827
1828 if (cmd->sloplen)
1829 cmd->slopidx = resi;
1830
1831 hifn_dmamap_load_dst(sc, cmd);
1832
1833 if (sc->sc_d_busy == 0) {
1834 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1835 sc->sc_d_busy = 1;
1836 }
1837
1838 #ifdef HIFN_DEBUG
1839 if (hifn_debug)
1840 printf("%s: command: stat %8x ier %8x\n",
1841 device_xname(&sc->sc_dv),
1842 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1843 #endif
1844
1845 sc->sc_active = 5;
1846 splx(s);
1847 return (err); /* success */
1848
1849 err_dstmap:
1850 if (cmd->src_map != cmd->dst_map)
1851 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1852 err_dstmap1:
1853 if (cmd->src_map != cmd->dst_map)
1854 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1855 err_srcmap:
1856 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1857 cmd->srcu.src_m != cmd->dstu.dst_m)
1858 m_freem(cmd->dstu.dst_m);
1859 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1860 err_srcmap1:
1861 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1862 return (err);
1863 }
1864
1865 static void
1866 hifn_tick(void *vsc)
1867 {
1868 struct hifn_softc *sc = vsc;
1869 int s;
1870
1871 s = splnet();
1872 if (sc->sc_active == 0) {
1873 struct hifn_dma *dma = sc->sc_dma;
1874 u_int32_t r = 0;
1875
1876 if (dma->cmdu == 0 && sc->sc_c_busy) {
1877 sc->sc_c_busy = 0;
1878 r |= HIFN_DMACSR_C_CTRL_DIS;
1879 CLR_LED(sc, HIFN_MIPSRST_LED0);
1880 }
1881 if (dma->srcu == 0 && sc->sc_s_busy) {
1882 sc->sc_s_busy = 0;
1883 r |= HIFN_DMACSR_S_CTRL_DIS;
1884 CLR_LED(sc, HIFN_MIPSRST_LED1);
1885 }
1886 if (dma->dstu == 0 && sc->sc_d_busy) {
1887 sc->sc_d_busy = 0;
1888 r |= HIFN_DMACSR_D_CTRL_DIS;
1889 }
1890 if (dma->resu == 0 && sc->sc_r_busy) {
1891 sc->sc_r_busy = 0;
1892 r |= HIFN_DMACSR_R_CTRL_DIS;
1893 CLR_LED(sc, HIFN_MIPSRST_LED2);
1894 }
1895 if (r)
1896 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1897 }
1898 else
1899 sc->sc_active--;
1900 splx(s);
1901 #ifdef __OpenBSD__
1902 timeout_add(&sc->sc_tickto, hz);
1903 #else
1904 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1905 #endif
1906 }
1907
1908 static int
1909 hifn_intr(void *arg)
1910 {
1911 struct hifn_softc *sc = arg;
1912 struct hifn_dma *dma = sc->sc_dma;
1913 u_int32_t dmacsr, restart;
1914 int i, u;
1915
1916 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1917
1918 #ifdef HIFN_DEBUG
1919 if (hifn_debug)
1920 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1921 device_xname(&sc->sc_dv),
1922 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1923 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1924 #endif
1925
1926 /* Nothing in the DMA unit interrupted */
1927 if ((dmacsr & sc->sc_dmaier) == 0)
1928 return (0);
1929
1930 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1931
1932 if (dmacsr & HIFN_DMACSR_ENGINE)
1933 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
1934
1935 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1936 (dmacsr & HIFN_DMACSR_PUBDONE))
1937 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1938 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1939
1940 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1941 if (restart)
1942 printf("%s: overrun %x\n", device_xname(&sc->sc_dv), dmacsr);
1943
1944 if (sc->sc_flags & HIFN_IS_7811) {
1945 if (dmacsr & HIFN_DMACSR_ILLR)
1946 printf("%s: illegal read\n", device_xname(&sc->sc_dv));
1947 if (dmacsr & HIFN_DMACSR_ILLW)
1948 printf("%s: illegal write\n", device_xname(&sc->sc_dv));
1949 }
1950
1951 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1952 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1953 if (restart) {
1954 printf("%s: abort, resetting.\n", device_xname(&sc->sc_dv));
1955 hifnstats.hst_abort++;
1956 hifn_abort(sc);
1957 return (1);
1958 }
1959
1960 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
1961 /*
1962 * If no slots to process and we receive a "waiting on
1963 * command" interrupt, we disable the "waiting on command"
1964 * (by clearing it).
1965 */
1966 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1967 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1968 }
1969
1970 /* clear the rings */
1971 i = dma->resk;
1972 while (dma->resu != 0) {
1973 HIFN_RESR_SYNC(sc, i,
1974 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1975 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
1976 HIFN_RESR_SYNC(sc, i,
1977 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1978 break;
1979 }
1980
1981 if (i != HIFN_D_RES_RSIZE) {
1982 struct hifn_command *cmd;
1983
1984 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
1985 cmd = dma->hifn_commands[i];
1986 KASSERT(cmd != NULL
1987 /*("hifn_intr: null command slot %u", i)*/);
1988 dma->hifn_commands[i] = NULL;
1989
1990 hifn_callback(sc, cmd, dma->result_bufs[i]);
1991 hifnstats.hst_opackets++;
1992 }
1993
1994 if (++i == (HIFN_D_RES_RSIZE + 1))
1995 i = 0;
1996 else
1997 dma->resu--;
1998 }
1999 dma->resk = i;
2000
2001 i = dma->srck; u = dma->srcu;
2002 while (u != 0) {
2003 HIFN_SRCR_SYNC(sc, i,
2004 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2005 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2006 HIFN_SRCR_SYNC(sc, i,
2007 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2008 break;
2009 }
2010 if (++i == (HIFN_D_SRC_RSIZE + 1))
2011 i = 0;
2012 else
2013 u--;
2014 }
2015 dma->srck = i; dma->srcu = u;
2016
2017 i = dma->cmdk; u = dma->cmdu;
2018 while (u != 0) {
2019 HIFN_CMDR_SYNC(sc, i,
2020 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2021 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2022 HIFN_CMDR_SYNC(sc, i,
2023 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2024 break;
2025 }
2026 if (i != HIFN_D_CMD_RSIZE) {
2027 u--;
2028 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2029 }
2030 if (++i == (HIFN_D_CMD_RSIZE + 1))
2031 i = 0;
2032 }
2033 dma->cmdk = i; dma->cmdu = u;
2034
2035 return (1);
2036 }
2037
2038 /*
2039 * Allocate a new 'session' and return an encoded session id. 'sidp'
2040 * contains our registration id, and should contain an encoded session
2041 * id on successful allocation.
2042 */
2043 static int
2044 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2045 {
2046 struct cryptoini *c;
2047 struct hifn_softc *sc = arg;
2048 int i, mac = 0, cry = 0, comp = 0;
2049
2050 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2051 if (sidp == NULL || cri == NULL || sc == NULL)
2052 return (EINVAL);
2053
2054 for (i = 0; i < sc->sc_maxses; i++)
2055 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2056 break;
2057 if (i == sc->sc_maxses)
2058 return (ENOMEM);
2059
2060 for (c = cri; c != NULL; c = c->cri_next) {
2061 switch (c->cri_alg) {
2062 case CRYPTO_MD5:
2063 case CRYPTO_SHA1:
2064 case CRYPTO_MD5_HMAC_96:
2065 case CRYPTO_SHA1_HMAC_96:
2066 if (mac)
2067 return (EINVAL);
2068 mac = 1;
2069 break;
2070 case CRYPTO_DES_CBC:
2071 case CRYPTO_3DES_CBC:
2072 case CRYPTO_AES_CBC:
2073 /* Note that this is an initialization
2074 vector, not a cipher key; any function
2075 giving sufficient Hamming distance
2076 between outputs is fine. Use of RC4
2077 to generate IVs has been FIPS140-2
2078 certified by several labs. */
2079 #ifdef __NetBSD__
2080 arc4randbytes(sc->sc_sessions[i].hs_iv,
2081 c->cri_alg == CRYPTO_AES_CBC ?
2082 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2083 #else /* FreeBSD and OpenBSD have get_random_bytes */
2084 /* XXX this may read fewer, does it matter? */
2085 get_random_bytes(sc->sc_sessions[i].hs_iv,
2086 c->cri_alg == CRYPTO_AES_CBC ?
2087 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2088 #endif
2089 /*FALLTHROUGH*/
2090 case CRYPTO_ARC4:
2091 if (cry)
2092 return (EINVAL);
2093 cry = 1;
2094 break;
2095 #ifdef HAVE_CRYPTO_LZS
2096 case CRYPTO_LZS_COMP:
2097 if (comp)
2098 return (EINVAL);
2099 comp = 1;
2100 break;
2101 #endif
2102 default:
2103 return (EINVAL);
2104 }
2105 }
2106 if (mac == 0 && cry == 0 && comp == 0)
2107 return (EINVAL);
2108
2109 /*
2110 * XXX only want to support compression without chaining to
2111 * MAC/crypt engine right now
2112 */
2113 if ((comp && mac) || (comp && cry))
2114 return (EINVAL);
2115
2116 *sidp = HIFN_SID(device_unit(&sc->sc_dv), i);
2117 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2118
2119 return (0);
2120 }
2121
2122 /*
2123 * Deallocate a session.
2124 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2125 * XXX to blow away any keys already stored there.
2126 */
2127 static int
2128 hifn_freesession(void *arg, u_int64_t tid)
2129 {
2130 struct hifn_softc *sc = arg;
2131 int session;
2132 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2133
2134 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2135 if (sc == NULL)
2136 return (EINVAL);
2137
2138 session = HIFN_SESSION(sid);
2139 if (session >= sc->sc_maxses)
2140 return (EINVAL);
2141
2142 memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session]));
2143 return (0);
2144 }
2145
2146 static int
2147 hifn_process(void *arg, struct cryptop *crp, int hint)
2148 {
2149 struct hifn_softc *sc = arg;
2150 struct hifn_command *cmd = NULL;
2151 int session, err, ivlen;
2152 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2153
2154 if (crp == NULL || crp->crp_callback == NULL) {
2155 hifnstats.hst_invalid++;
2156 return (EINVAL);
2157 }
2158 session = HIFN_SESSION(crp->crp_sid);
2159
2160 if (sc == NULL || session >= sc->sc_maxses) {
2161 err = EINVAL;
2162 goto errout;
2163 }
2164
2165 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2166 M_DEVBUF, M_NOWAIT|M_ZERO);
2167 if (cmd == NULL) {
2168 hifnstats.hst_nomem++;
2169 err = ENOMEM;
2170 goto errout;
2171 }
2172
2173 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2174 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2175 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2176 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2177 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2178 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2179 } else {
2180 err = EINVAL;
2181 goto errout; /* XXX we don't handle contiguous buffers! */
2182 }
2183
2184 crd1 = crp->crp_desc;
2185 if (crd1 == NULL) {
2186 err = EINVAL;
2187 goto errout;
2188 }
2189 crd2 = crd1->crd_next;
2190
2191 if (crd2 == NULL) {
2192 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2193 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2194 crd1->crd_alg == CRYPTO_SHA1 ||
2195 crd1->crd_alg == CRYPTO_MD5) {
2196 maccrd = crd1;
2197 enccrd = NULL;
2198 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2199 crd1->crd_alg == CRYPTO_3DES_CBC ||
2200 crd1->crd_alg == CRYPTO_AES_CBC ||
2201 crd1->crd_alg == CRYPTO_ARC4) {
2202 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2203 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2204 maccrd = NULL;
2205 enccrd = crd1;
2206 #ifdef HAVE_CRYPTO_LZS
2207 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2208 return (hifn_compression(sc, crp, cmd));
2209 #endif
2210 } else {
2211 err = EINVAL;
2212 goto errout;
2213 }
2214 } else {
2215 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2216 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2217 crd1->crd_alg == CRYPTO_MD5 ||
2218 crd1->crd_alg == CRYPTO_SHA1) &&
2219 (crd2->crd_alg == CRYPTO_DES_CBC ||
2220 crd2->crd_alg == CRYPTO_3DES_CBC ||
2221 crd2->crd_alg == CRYPTO_AES_CBC ||
2222 crd2->crd_alg == CRYPTO_ARC4) &&
2223 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2224 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2225 maccrd = crd1;
2226 enccrd = crd2;
2227 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2228 crd1->crd_alg == CRYPTO_ARC4 ||
2229 crd1->crd_alg == CRYPTO_3DES_CBC ||
2230 crd1->crd_alg == CRYPTO_AES_CBC) &&
2231 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 ||
2232 crd2->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2233 crd2->crd_alg == CRYPTO_MD5 ||
2234 crd2->crd_alg == CRYPTO_SHA1) &&
2235 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2236 enccrd = crd1;
2237 maccrd = crd2;
2238 } else {
2239 /*
2240 * We cannot order the 7751 as requested
2241 */
2242 err = EINVAL;
2243 goto errout;
2244 }
2245 }
2246
2247 if (enccrd) {
2248 cmd->enccrd = enccrd;
2249 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2250 switch (enccrd->crd_alg) {
2251 case CRYPTO_ARC4:
2252 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2253 if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2254 != sc->sc_sessions[session].hs_prev_op)
2255 sc->sc_sessions[session].hs_state =
2256 HS_STATE_USED;
2257 break;
2258 case CRYPTO_DES_CBC:
2259 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2260 HIFN_CRYPT_CMD_MODE_CBC |
2261 HIFN_CRYPT_CMD_NEW_IV;
2262 break;
2263 case CRYPTO_3DES_CBC:
2264 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2265 HIFN_CRYPT_CMD_MODE_CBC |
2266 HIFN_CRYPT_CMD_NEW_IV;
2267 break;
2268 case CRYPTO_AES_CBC:
2269 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2270 HIFN_CRYPT_CMD_MODE_CBC |
2271 HIFN_CRYPT_CMD_NEW_IV;
2272 break;
2273 default:
2274 err = EINVAL;
2275 goto errout;
2276 }
2277 if (enccrd->crd_alg != CRYPTO_ARC4) {
2278 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2279 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2280 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2281 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2282 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2283 else
2284 bcopy(sc->sc_sessions[session].hs_iv,
2285 cmd->iv, ivlen);
2286
2287 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2288 == 0) {
2289 if (crp->crp_flags & CRYPTO_F_IMBUF)
2290 m_copyback(cmd->srcu.src_m,
2291 enccrd->crd_inject,
2292 ivlen, cmd->iv);
2293 else if (crp->crp_flags & CRYPTO_F_IOV)
2294 cuio_copyback(cmd->srcu.src_io,
2295 enccrd->crd_inject,
2296 ivlen, cmd->iv);
2297 }
2298 } else {
2299 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2300 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2301 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2302 m_copydata(cmd->srcu.src_m,
2303 enccrd->crd_inject, ivlen, cmd->iv);
2304 else if (crp->crp_flags & CRYPTO_F_IOV)
2305 cuio_copydata(cmd->srcu.src_io,
2306 enccrd->crd_inject, ivlen, cmd->iv);
2307 }
2308 }
2309
2310 cmd->ck = enccrd->crd_key;
2311 cmd->cklen = enccrd->crd_klen >> 3;
2312
2313 /*
2314 * Need to specify the size for the AES key in the masks.
2315 */
2316 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2317 HIFN_CRYPT_CMD_ALG_AES) {
2318 switch (cmd->cklen) {
2319 case 16:
2320 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2321 break;
2322 case 24:
2323 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2324 break;
2325 case 32:
2326 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2327 break;
2328 default:
2329 err = EINVAL;
2330 goto errout;
2331 }
2332 }
2333
2334 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2335 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2336 }
2337
2338 if (maccrd) {
2339 cmd->maccrd = maccrd;
2340 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2341
2342 switch (maccrd->crd_alg) {
2343 case CRYPTO_MD5:
2344 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2345 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2346 HIFN_MAC_CMD_POS_IPSEC;
2347 break;
2348 case CRYPTO_MD5_HMAC_96:
2349 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2350 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2351 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2352 break;
2353 case CRYPTO_SHA1:
2354 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2355 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2356 HIFN_MAC_CMD_POS_IPSEC;
2357 break;
2358 case CRYPTO_SHA1_HMAC_96:
2359 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2360 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2361 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2362 break;
2363 }
2364
2365 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2366 maccrd->crd_alg == CRYPTO_MD5_HMAC_96) &&
2367 sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2368 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2369 memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3);
2370 memset(cmd->mac + (maccrd->crd_klen >> 3), 0,
2371 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2372 }
2373 }
2374
2375 cmd->crp = crp;
2376 cmd->session_num = session;
2377 cmd->softc = sc;
2378
2379 err = hifn_crypto(sc, cmd, crp, hint);
2380 if (err == 0) {
2381 if (enccrd)
2382 sc->sc_sessions[session].hs_prev_op =
2383 enccrd->crd_flags & CRD_F_ENCRYPT;
2384 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2385 sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2386 return 0;
2387 } else if (err == ERESTART) {
2388 /*
2389 * There weren't enough resources to dispatch the request
2390 * to the part. Notify the caller so they'll requeue this
2391 * request and resubmit it again soon.
2392 */
2393 #ifdef HIFN_DEBUG
2394 if (hifn_debug)
2395 printf("%s: requeue request\n", device_xname(&sc->sc_dv));
2396 #endif
2397 free(cmd, M_DEVBUF);
2398 sc->sc_needwakeup |= CRYPTO_SYMQ;
2399 return (err);
2400 }
2401
2402 errout:
2403 if (cmd != NULL)
2404 free(cmd, M_DEVBUF);
2405 if (err == EINVAL)
2406 hifnstats.hst_invalid++;
2407 else
2408 hifnstats.hst_nomem++;
2409 crp->crp_etype = err;
2410 crypto_done(crp);
2411 return (0);
2412 }
2413
2414 static void
2415 hifn_abort(struct hifn_softc *sc)
2416 {
2417 struct hifn_dma *dma = sc->sc_dma;
2418 struct hifn_command *cmd;
2419 struct cryptop *crp;
2420 int i, u;
2421
2422 i = dma->resk; u = dma->resu;
2423 while (u != 0) {
2424 cmd = dma->hifn_commands[i];
2425 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2426 dma->hifn_commands[i] = NULL;
2427 crp = cmd->crp;
2428
2429 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2430 /* Salvage what we can. */
2431 hifnstats.hst_opackets++;
2432 hifn_callback(sc, cmd, dma->result_bufs[i]);
2433 } else {
2434 if (cmd->src_map == cmd->dst_map) {
2435 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2436 0, cmd->src_map->dm_mapsize,
2437 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2438 } else {
2439 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2440 0, cmd->src_map->dm_mapsize,
2441 BUS_DMASYNC_POSTWRITE);
2442 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2443 0, cmd->dst_map->dm_mapsize,
2444 BUS_DMASYNC_POSTREAD);
2445 }
2446
2447 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2448 m_freem(cmd->srcu.src_m);
2449 crp->crp_buf = (void *)cmd->dstu.dst_m;
2450 }
2451
2452 /* non-shared buffers cannot be restarted */
2453 if (cmd->src_map != cmd->dst_map) {
2454 /*
2455 * XXX should be EAGAIN, delayed until
2456 * after the reset.
2457 */
2458 crp->crp_etype = ENOMEM;
2459 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2460 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2461 } else
2462 crp->crp_etype = ENOMEM;
2463
2464 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2465 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2466
2467 free(cmd, M_DEVBUF);
2468 if (crp->crp_etype != EAGAIN)
2469 crypto_done(crp);
2470 }
2471
2472 if (++i == HIFN_D_RES_RSIZE)
2473 i = 0;
2474 u--;
2475 }
2476 dma->resk = i; dma->resu = u;
2477
2478 /* Force upload of key next time */
2479 for (i = 0; i < sc->sc_maxses; i++)
2480 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2481 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2482
2483 hifn_reset_board(sc, 1);
2484 hifn_init_dma(sc);
2485 hifn_init_pci_registers(sc);
2486 }
2487
2488 static void
2489 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf)
2490 {
2491 struct hifn_dma *dma = sc->sc_dma;
2492 struct cryptop *crp = cmd->crp;
2493 struct cryptodesc *crd;
2494 struct mbuf *m;
2495 int totlen, i, u, ivlen;
2496
2497 if (cmd->src_map == cmd->dst_map)
2498 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2499 0, cmd->src_map->dm_mapsize,
2500 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2501 else {
2502 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2503 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2504 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2505 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2506 }
2507
2508 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2509 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2510 crp->crp_buf = (void *)cmd->dstu.dst_m;
2511 totlen = cmd->src_map->dm_mapsize;
2512 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2513 if (totlen < m->m_len) {
2514 m->m_len = totlen;
2515 totlen = 0;
2516 } else
2517 totlen -= m->m_len;
2518 }
2519 cmd->dstu.dst_m->m_pkthdr.len =
2520 cmd->srcu.src_m->m_pkthdr.len;
2521 m_freem(cmd->srcu.src_m);
2522 }
2523 }
2524
2525 if (cmd->sloplen != 0) {
2526 if (crp->crp_flags & CRYPTO_F_IMBUF)
2527 m_copyback((struct mbuf *)crp->crp_buf,
2528 cmd->src_map->dm_mapsize - cmd->sloplen,
2529 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2530 else if (crp->crp_flags & CRYPTO_F_IOV)
2531 cuio_copyback((struct uio *)crp->crp_buf,
2532 cmd->src_map->dm_mapsize - cmd->sloplen,
2533 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2534 }
2535
2536 i = dma->dstk; u = dma->dstu;
2537 while (u != 0) {
2538 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2539 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2540 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2541 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2542 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2543 offsetof(struct hifn_dma, dstr[i]),
2544 sizeof(struct hifn_desc),
2545 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2546 break;
2547 }
2548 if (++i == (HIFN_D_DST_RSIZE + 1))
2549 i = 0;
2550 else
2551 u--;
2552 }
2553 dma->dstk = i; dma->dstu = u;
2554
2555 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2556
2557 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2558 HIFN_BASE_CMD_CRYPT) {
2559 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2560 if (crd->crd_alg != CRYPTO_DES_CBC &&
2561 crd->crd_alg != CRYPTO_3DES_CBC &&
2562 crd->crd_alg != CRYPTO_AES_CBC)
2563 continue;
2564 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2565 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2566 if (crp->crp_flags & CRYPTO_F_IMBUF)
2567 m_copydata((struct mbuf *)crp->crp_buf,
2568 crd->crd_skip + crd->crd_len - ivlen,
2569 ivlen,
2570 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2571 else if (crp->crp_flags & CRYPTO_F_IOV) {
2572 cuio_copydata((struct uio *)crp->crp_buf,
2573 crd->crd_skip + crd->crd_len - ivlen,
2574 ivlen,
2575 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2576 }
2577 /* XXX We do not handle contig data */
2578 break;
2579 }
2580 }
2581
2582 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2583 u_int8_t *macbuf;
2584
2585 macbuf = resbuf + sizeof(struct hifn_base_result);
2586 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2587 macbuf += sizeof(struct hifn_comp_result);
2588 macbuf += sizeof(struct hifn_mac_result);
2589
2590 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2591 int len;
2592
2593 if (crd->crd_alg == CRYPTO_MD5)
2594 len = 16;
2595 else if (crd->crd_alg == CRYPTO_SHA1)
2596 len = 20;
2597 else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 ||
2598 crd->crd_alg == CRYPTO_SHA1_HMAC_96)
2599 len = 12;
2600 else
2601 continue;
2602
2603 if (crp->crp_flags & CRYPTO_F_IMBUF)
2604 m_copyback((struct mbuf *)crp->crp_buf,
2605 crd->crd_inject, len, macbuf);
2606 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2607 memcpy(crp->crp_mac, (void *)macbuf, len);
2608 break;
2609 }
2610 }
2611
2612 if (cmd->src_map != cmd->dst_map) {
2613 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2614 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2615 }
2616 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2617 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2618 free(cmd, M_DEVBUF);
2619 crypto_done(crp);
2620 }
2621
2622 #ifdef HAVE_CRYPTO_LZS
2623
2624 static int
2625 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2626 struct hifn_command *cmd)
2627 {
2628 struct cryptodesc *crd = crp->crp_desc;
2629 int s, err = 0;
2630
2631 cmd->compcrd = crd;
2632 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2633
2634 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2635 /*
2636 * XXX can only handle mbufs right now since we can
2637 * XXX dynamically resize them.
2638 */
2639 err = EINVAL;
2640 return (ENOMEM);
2641 }
2642
2643 if ((crd->crd_flags & CRD_F_COMP) == 0)
2644 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2645 if (crd->crd_alg == CRYPTO_LZS_COMP)
2646 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2647 HIFN_COMP_CMD_CLEARHIST;
2648
2649 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2650 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2651 err = ENOMEM;
2652 goto fail;
2653 }
2654
2655 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2656 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2657 err = ENOMEM;
2658 goto fail;
2659 }
2660
2661 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2662 int len;
2663
2664 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2665 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2666 err = ENOMEM;
2667 goto fail;
2668 }
2669
2670 len = cmd->src_map->dm_mapsize / MCLBYTES;
2671 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2672 len++;
2673 len *= MCLBYTES;
2674
2675 if ((crd->crd_flags & CRD_F_COMP) == 0)
2676 len *= 4;
2677
2678 if (len > HIFN_MAX_DMALEN)
2679 len = HIFN_MAX_DMALEN;
2680
2681 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2682 if (cmd->dstu.dst_m == NULL) {
2683 err = ENOMEM;
2684 goto fail;
2685 }
2686
2687 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2688 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2689 err = ENOMEM;
2690 goto fail;
2691 }
2692 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2693 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2694 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2695 err = ENOMEM;
2696 goto fail;
2697 }
2698 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2699 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2700 err = ENOMEM;
2701 goto fail;
2702 }
2703 }
2704
2705 if (cmd->src_map == cmd->dst_map)
2706 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2707 0, cmd->src_map->dm_mapsize,
2708 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2709 else {
2710 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2711 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2712 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2713 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2714 }
2715
2716 cmd->crp = crp;
2717 /*
2718 * Always use session 0. The modes of compression we use are
2719 * stateless and there is always at least one compression
2720 * context, zero.
2721 */
2722 cmd->session_num = 0;
2723 cmd->softc = sc;
2724
2725 s = splnet();
2726 err = hifn_compress_enter(sc, cmd);
2727 splx(s);
2728
2729 if (err != 0)
2730 goto fail;
2731 return (0);
2732
2733 fail:
2734 if (cmd->dst_map != NULL) {
2735 if (cmd->dst_map->dm_nsegs > 0)
2736 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2737 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2738 }
2739 if (cmd->src_map != NULL) {
2740 if (cmd->src_map->dm_nsegs > 0)
2741 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2742 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2743 }
2744 free(cmd, M_DEVBUF);
2745 if (err == EINVAL)
2746 hifnstats.hst_invalid++;
2747 else
2748 hifnstats.hst_nomem++;
2749 crp->crp_etype = err;
2750 crypto_done(crp);
2751 return (0);
2752 }
2753
2754 /*
2755 * must be called at splnet()
2756 */
2757 static int
2758 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2759 {
2760 struct hifn_dma *dma = sc->sc_dma;
2761 int cmdi, resi;
2762 u_int32_t cmdlen;
2763
2764 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2765 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2766 return (ENOMEM);
2767
2768 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2769 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2770 return (ENOMEM);
2771
2772 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2773 dma->cmdi = 0;
2774 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2775 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2776 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2777 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2778 }
2779 cmdi = dma->cmdi++;
2780 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2781 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2782
2783 /* .p for command/result already set */
2784 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2785 HIFN_D_MASKDONEIRQ);
2786 HIFN_CMDR_SYNC(sc, cmdi,
2787 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2788 dma->cmdu++;
2789 if (sc->sc_c_busy == 0) {
2790 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2791 sc->sc_c_busy = 1;
2792 SET_LED(sc, HIFN_MIPSRST_LED0);
2793 }
2794
2795 /*
2796 * We don't worry about missing an interrupt (which a "command wait"
2797 * interrupt salvages us from), unless there is more than one command
2798 * in the queue.
2799 */
2800 if (dma->cmdu > 1) {
2801 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2802 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2803 }
2804
2805 hifnstats.hst_ipackets++;
2806 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2807
2808 hifn_dmamap_load_src(sc, cmd);
2809 if (sc->sc_s_busy == 0) {
2810 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2811 sc->sc_s_busy = 1;
2812 SET_LED(sc, HIFN_MIPSRST_LED1);
2813 }
2814
2815 /*
2816 * Unlike other descriptors, we don't mask done interrupt from
2817 * result descriptor.
2818 */
2819 if (dma->resi == HIFN_D_RES_RSIZE) {
2820 dma->resi = 0;
2821 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2822 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2823 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2824 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2825 }
2826 resi = dma->resi++;
2827 dma->hifn_commands[resi] = cmd;
2828 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2829 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2830 HIFN_D_VALID | HIFN_D_LAST);
2831 HIFN_RESR_SYNC(sc, resi,
2832 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2833 dma->resu++;
2834 if (sc->sc_r_busy == 0) {
2835 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2836 sc->sc_r_busy = 1;
2837 SET_LED(sc, HIFN_MIPSRST_LED2);
2838 }
2839
2840 if (cmd->sloplen)
2841 cmd->slopidx = resi;
2842
2843 hifn_dmamap_load_dst(sc, cmd);
2844
2845 if (sc->sc_d_busy == 0) {
2846 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2847 sc->sc_d_busy = 1;
2848 }
2849 sc->sc_active = 5;
2850 cmd->cmd_callback = hifn_callback_comp;
2851 return (0);
2852 }
2853
2854 static void
2855 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2856 u_int8_t *resbuf)
2857 {
2858 struct hifn_base_result baseres;
2859 struct cryptop *crp = cmd->crp;
2860 struct hifn_dma *dma = sc->sc_dma;
2861 struct mbuf *m;
2862 int err = 0, i, u;
2863 u_int32_t olen;
2864 bus_size_t dstsize;
2865
2866 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2867 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2868 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2869 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2870
2871 dstsize = cmd->dst_map->dm_mapsize;
2872 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2873
2874 memcpy(&baseres, resbuf, sizeof(struct hifn_base_result));
2875
2876 i = dma->dstk; u = dma->dstu;
2877 while (u != 0) {
2878 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2879 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2880 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2881 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2882 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2883 offsetof(struct hifn_dma, dstr[i]),
2884 sizeof(struct hifn_desc),
2885 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2886 break;
2887 }
2888 if (++i == (HIFN_D_DST_RSIZE + 1))
2889 i = 0;
2890 else
2891 u--;
2892 }
2893 dma->dstk = i; dma->dstu = u;
2894
2895 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
2896 bus_size_t xlen;
2897
2898 xlen = dstsize;
2899
2900 m_freem(cmd->dstu.dst_m);
2901
2902 if (xlen == HIFN_MAX_DMALEN) {
2903 /* We've done all we can. */
2904 err = E2BIG;
2905 goto out;
2906 }
2907
2908 xlen += MCLBYTES;
2909
2910 if (xlen > HIFN_MAX_DMALEN)
2911 xlen = HIFN_MAX_DMALEN;
2912
2913 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
2914 cmd->srcu.src_m);
2915 if (cmd->dstu.dst_m == NULL) {
2916 err = ENOMEM;
2917 goto out;
2918 }
2919 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2920 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2921 err = ENOMEM;
2922 goto out;
2923 }
2924
2925 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2926 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2927 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2928 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2929
2930 /* already at splnet... */
2931 err = hifn_compress_enter(sc, cmd);
2932 if (err != 0)
2933 goto out;
2934 return;
2935 }
2936
2937 olen = dstsize - (letoh16(baseres.dst_cnt) |
2938 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
2939 HIFN_BASE_RES_DSTLEN_S) << 16));
2940
2941 crp->crp_olen = olen - cmd->compcrd->crd_skip;
2942
2943 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2944 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2945 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2946
2947 m = cmd->dstu.dst_m;
2948 if (m->m_flags & M_PKTHDR)
2949 m->m_pkthdr.len = olen;
2950 crp->crp_buf = (void *)m;
2951 for (; m != NULL; m = m->m_next) {
2952 if (olen >= m->m_len)
2953 olen -= m->m_len;
2954 else {
2955 m->m_len = olen;
2956 olen = 0;
2957 }
2958 }
2959
2960 m_freem(cmd->srcu.src_m);
2961 free(cmd, M_DEVBUF);
2962 crp->crp_etype = 0;
2963 crypto_done(crp);
2964 return;
2965
2966 out:
2967 if (cmd->dst_map != NULL) {
2968 if (cmd->src_map->dm_nsegs != 0)
2969 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2970 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2971 }
2972 if (cmd->src_map != NULL) {
2973 if (cmd->src_map->dm_nsegs != 0)
2974 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2975 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2976 }
2977 if (cmd->dstu.dst_m != NULL)
2978 m_freem(cmd->dstu.dst_m);
2979 free(cmd, M_DEVBUF);
2980 crp->crp_etype = err;
2981 crypto_done(crp);
2982 }
2983
2984 static struct mbuf *
2985 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
2986 {
2987 int len;
2988 struct mbuf *m, *m0, *mlast;
2989
2990 if (mtemplate->m_flags & M_PKTHDR) {
2991 len = MHLEN;
2992 MGETHDR(m0, M_DONTWAIT, MT_DATA);
2993 } else {
2994 len = MLEN;
2995 MGET(m0, M_DONTWAIT, MT_DATA);
2996 }
2997 if (m0 == NULL)
2998 return (NULL);
2999 if (len == MHLEN)
3000 M_DUP_PKTHDR(m0, mtemplate);
3001 MCLGET(m0, M_DONTWAIT);
3002 if (!(m0->m_flags & M_EXT))
3003 m_freem(m0);
3004 len = MCLBYTES;
3005
3006 totlen -= len;
3007 m0->m_pkthdr.len = m0->m_len = len;
3008 mlast = m0;
3009
3010 while (totlen > 0) {
3011 MGET(m, M_DONTWAIT, MT_DATA);
3012 if (m == NULL) {
3013 m_freem(m0);
3014 return (NULL);
3015 }
3016 MCLGET(m, M_DONTWAIT);
3017 if (!(m->m_flags & M_EXT)) {
3018 m_freem(m0);
3019 return (NULL);
3020 }
3021 len = MCLBYTES;
3022 m->m_len = len;
3023 if (m0->m_flags & M_PKTHDR)
3024 m0->m_pkthdr.len += len;
3025 totlen -= len;
3026
3027 mlast->m_next = m;
3028 mlast = m;
3029 }
3030
3031 return (m0);
3032 }
3033 #endif /* HAVE_CRYPTO_LZS */
3034
3035 static void
3036 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val)
3037 {
3038 /*
3039 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
3040 * and Group 1 registers; avoid conditions that could create
3041 * burst writes by doing a read in between the writes.
3042 */
3043 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3044 if (sc->sc_waw_lastgroup == reggrp &&
3045 sc->sc_waw_lastreg == reg - 4) {
3046 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
3047 }
3048 sc->sc_waw_lastgroup = reggrp;
3049 sc->sc_waw_lastreg = reg;
3050 }
3051 if (reggrp == 0)
3052 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3053 else
3054 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3055
3056 }
3057
3058 static u_int32_t
3059 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3060 {
3061 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3062 sc->sc_waw_lastgroup = -1;
3063 sc->sc_waw_lastreg = 1;
3064 }
3065 if (reggrp == 0)
3066 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3067 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3068 }
3069