hifn7751.c revision 1.70 1 /* $NetBSD: hifn7751.c,v 1.70 2020/05/17 00:52:31 riastradh Exp $ */
2 /* $OpenBSD: hifn7751.c,v 1.179 2020/01/11 21:34:03 cheloha Exp $ */
3
4 /*
5 * Invertex AEON / Hifn 7751 driver
6 * Copyright (c) 1999 Invertex Inc. All rights reserved.
7 * Copyright (c) 1999 Theo de Raadt
8 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
9 * http://www.netsec.net
10 * Copyright (c) 2003 Hifn Inc.
11 *
12 * This driver is based on a previous driver by Invertex, for which they
13 * requested: Please send any comments, feedback, bug-fixes, or feature
14 * requests to software (at) invertex.com.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. The name of the author may not be used to endorse or promote products
26 * derived from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
33 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
37 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Effort sponsored in part by the Defense Advanced Research Projects
40 * Agency (DARPA) and Air Force Research Laboratory, Air Force
41 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
42 *
43 */
44
45 /*
46 * Driver for various Hifn encryption processors.
47 */
48
49 #include <sys/cdefs.h>
50 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.70 2020/05/17 00:52:31 riastradh Exp $");
51
52 #include <sys/param.h>
53 #include <sys/cprng.h>
54 #include <sys/device.h>
55 #include <sys/endian.h>
56 #include <sys/errno.h>
57 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/module.h>
61 #include <sys/mutex.h>
62 #include <sys/proc.h>
63 #include <sys/rndsource.h>
64 #include <sys/sha1.h>
65 #include <sys/systm.h>
66
67 #include <opencrypto/cryptodev.h>
68
69 #include <dev/pci/pcireg.h>
70 #include <dev/pci/pcivar.h>
71 #include <dev/pci/pcidevs.h>
72
73 #include <dev/pci/hifn7751reg.h>
74 #include <dev/pci/hifn7751var.h>
75
76 #undef HIFN_DEBUG
77
78 #ifdef HIFN_DEBUG
79 extern int hifn_debug; /* patchable */
80 int hifn_debug = 1;
81 #endif
82
83 /*
84 * Prototypes and count for the pci_device structure
85 */
86 static int hifn_match(device_t, cfdata_t, void *);
87 static void hifn_attach(device_t, device_t, void *);
88 static int hifn_detach(device_t, int);
89
90 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
91 hifn_match, hifn_attach, hifn_detach, NULL);
92
93 static void hifn_reset_board(struct hifn_softc *, int);
94 static void hifn_reset_puc(struct hifn_softc *);
95 static void hifn_puc_wait(struct hifn_softc *);
96 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
97 static void hifn_set_retry(struct hifn_softc *);
98 static void hifn_init_dma(struct hifn_softc *);
99 static void hifn_init_pci_registers(struct hifn_softc *);
100 static int hifn_sramsize(struct hifn_softc *);
101 static int hifn_dramsize(struct hifn_softc *);
102 static int hifn_ramtype(struct hifn_softc *);
103 static void hifn_sessions(struct hifn_softc *);
104 static int hifn_intr(void *);
105 static u_int hifn_write_command(struct hifn_command *, uint8_t *);
106 static uint32_t hifn_next_signature(uint32_t a, u_int cnt);
107 static int hifn_newsession(void*, uint32_t *, struct cryptoini *);
108 static int hifn_freesession(void*, uint64_t);
109 static int hifn_process(void*, struct cryptop *, int);
110 static void hifn_callback(struct hifn_softc *, struct hifn_command *,
111 uint8_t *);
112 static int hifn_crypto(struct hifn_softc *, struct hifn_command *,
113 struct cryptop*, int);
114 static int hifn_readramaddr(struct hifn_softc *, int, uint8_t *);
115 static int hifn_writeramaddr(struct hifn_softc *, int, uint8_t *);
116 static int hifn_dmamap_aligned(bus_dmamap_t);
117 static int hifn_dmamap_load_src(struct hifn_softc *,
118 struct hifn_command *);
119 static int hifn_dmamap_load_dst(struct hifn_softc *,
120 struct hifn_command *);
121 static int hifn_init_pubrng(struct hifn_softc *);
122 static void hifn_rng(void *);
123 static void hifn_rng_locked(void *);
124 static void hifn_tick(void *);
125 static void hifn_abort(struct hifn_softc *);
126 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
127 int *);
128 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, uint32_t);
129 static uint32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
130 #ifdef CRYPTO_LZS_COMP
131 static int hifn_compression(struct hifn_softc *, struct cryptop *,
132 struct hifn_command *);
133 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
134 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
135 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
136 uint8_t *);
137 #endif /* CRYPTO_LZS_COMP */
138
139 struct hifn_stats hifnstats;
140
141 static const struct hifn_product {
142 pci_vendor_id_t hifn_vendor;
143 pci_product_id_t hifn_product;
144 int hifn_flags;
145 const char *hifn_name;
146 } hifn_products[] = {
147 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
148 0,
149 "Invertex AEON",
150 },
151
152 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
153 0,
154 "Hifn 7751",
155 },
156 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
157 0,
158 "Hifn 7751 (NetSec)"
159 },
160
161 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
162 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
163 "Hifn 7811",
164 },
165
166 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
167 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
168 "Hifn 7951",
169 },
170
171 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
172 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
173 "Hifn 7955",
174 },
175
176 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
177 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
178 "Hifn 7956",
179 },
180
181 { 0, 0,
182 0,
183 NULL
184 }
185 };
186
187 static const struct hifn_product *
188 hifn_lookup(const struct pci_attach_args *pa)
189 {
190 const struct hifn_product *hp;
191
192 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
193 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
194 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
195 return (hp);
196 }
197 return (NULL);
198 }
199
200 static int
201 hifn_match(device_t parent, cfdata_t match, void *aux)
202 {
203 struct pci_attach_args *pa = aux;
204
205 if (hifn_lookup(pa) != NULL)
206 return 1;
207
208 return 0;
209 }
210
211 static void
212 hifn_attach(device_t parent, device_t self, void *aux)
213 {
214 struct hifn_softc *sc = device_private(self);
215 struct pci_attach_args *pa = aux;
216 const struct hifn_product *hp;
217 pci_chipset_tag_t pc = pa->pa_pc;
218 pci_intr_handle_t ih;
219 const char *intrstr = NULL;
220 const char *hifncap;
221 char rbase;
222 uint32_t cmd;
223 uint16_t ena;
224 bus_dma_segment_t seg;
225 bus_dmamap_t dmamap;
226 int rseg;
227 void *kva;
228 char intrbuf[PCI_INTRSTR_LEN];
229
230 hp = hifn_lookup(pa);
231 if (hp == NULL) {
232 printf("\n");
233 panic("hifn_attach: impossible");
234 }
235
236 pci_aprint_devinfo_fancy(pa, "Crypto processor", hp->hifn_name, 1);
237
238 sc->sc_dv = self;
239 sc->sc_pci_pc = pa->pa_pc;
240 sc->sc_pci_tag = pa->pa_tag;
241
242 sc->sc_flags = hp->hifn_flags;
243
244 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
245 cmd |= PCI_COMMAND_MASTER_ENABLE;
246 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
247
248 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
249 &sc->sc_st0, &sc->sc_sh0, NULL, &sc->sc_iosz0)) {
250 aprint_error_dev(sc->sc_dv, "can't map mem space %d\n", 0);
251 return;
252 }
253
254 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
255 &sc->sc_st1, &sc->sc_sh1, NULL, &sc->sc_iosz1)) {
256 aprint_error_dev(sc->sc_dv, "can't find mem space %d\n", 1);
257 goto fail_io0;
258 }
259
260 hifn_set_retry(sc);
261
262 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
263 sc->sc_waw_lastgroup = -1;
264 sc->sc_waw_lastreg = 1;
265 }
266
267 sc->sc_dmat = pa->pa_dmat;
268 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
269 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
270 aprint_error_dev(sc->sc_dv, "can't alloc DMA buffer\n");
271 goto fail_io1;
272 }
273 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
274 BUS_DMA_NOWAIT)) {
275 aprint_error_dev(sc->sc_dv, "can't map DMA buffers (%lu bytes)\n",
276 (u_long)sizeof(*sc->sc_dma));
277 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
278 goto fail_io1;
279 }
280 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
281 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
282 aprint_error_dev(sc->sc_dv, "can't create DMA map\n");
283 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
284 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
285 goto fail_io1;
286 }
287 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
288 NULL, BUS_DMA_NOWAIT)) {
289 aprint_error_dev(sc->sc_dv, "can't load DMA map\n");
290 bus_dmamap_destroy(sc->sc_dmat, dmamap);
291 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
292 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
293 goto fail_io1;
294 }
295 sc->sc_dmamap = dmamap;
296 sc->sc_dma = (struct hifn_dma *)kva;
297 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
298
299 hifn_reset_board(sc, 0);
300
301 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
302 aprint_error_dev(sc->sc_dv, "crypto enabling failed\n");
303 goto fail_mem;
304 }
305 hifn_reset_puc(sc);
306
307 hifn_init_dma(sc);
308 hifn_init_pci_registers(sc);
309
310 /* XXX can't dynamically determine ram type for 795x; force dram */
311 if (sc->sc_flags & HIFN_IS_7956)
312 sc->sc_drammodel = 1;
313 else if (hifn_ramtype(sc))
314 goto fail_mem;
315
316 if (sc->sc_drammodel == 0)
317 hifn_sramsize(sc);
318 else
319 hifn_dramsize(sc);
320
321 /*
322 * Workaround for NetSec 7751 rev A: half ram size because two
323 * of the address lines were left floating
324 */
325 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
326 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
327 PCI_REVISION(pa->pa_class) == 0x61)
328 sc->sc_ramsize >>= 1;
329
330 if (pci_intr_map(pa, &ih)) {
331 aprint_error_dev(sc->sc_dv, "couldn't map interrupt\n");
332 goto fail_mem;
333 }
334 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
335 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, hifn_intr, sc,
336 device_xname(self));
337 if (sc->sc_ih == NULL) {
338 aprint_error_dev(sc->sc_dv, "couldn't establish interrupt\n");
339 if (intrstr != NULL)
340 aprint_error(" at %s", intrstr);
341 aprint_error("\n");
342 goto fail_mem;
343 }
344
345 hifn_sessions(sc);
346
347 rseg = sc->sc_ramsize / 1024;
348 rbase = 'K';
349 if (sc->sc_ramsize >= (1024 * 1024)) {
350 rbase = 'M';
351 rseg /= 1024;
352 }
353 aprint_normal_dev(sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n",
354 hifncap, rseg, rbase,
355 sc->sc_drammodel ? 'D' : 'S', intrstr);
356
357 sc->sc_cid = crypto_get_driverid(0);
358 if (sc->sc_cid < 0) {
359 aprint_error_dev(sc->sc_dv, "couldn't get crypto driver id\n");
360 goto fail_intr;
361 }
362
363 WRITE_REG_0(sc, HIFN_0_PUCNFG,
364 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
365 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
366
367 switch (ena) {
368 case HIFN_PUSTAT_ENA_2:
369 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
370 hifn_newsession, hifn_freesession, hifn_process, sc);
371 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
372 hifn_newsession, hifn_freesession, hifn_process, sc);
373 if (sc->sc_flags & HIFN_HAS_AES)
374 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
375 hifn_newsession, hifn_freesession,
376 hifn_process, sc);
377 /*FALLTHROUGH*/
378 case HIFN_PUSTAT_ENA_1:
379 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
380 hifn_newsession, hifn_freesession, hifn_process, sc);
381 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
382 hifn_newsession, hifn_freesession, hifn_process, sc);
383 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0,
384 hifn_newsession, hifn_freesession, hifn_process, sc);
385 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0,
386 hifn_newsession, hifn_freesession, hifn_process, sc);
387 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
388 hifn_newsession, hifn_freesession, hifn_process, sc);
389 break;
390 }
391
392 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
393 sc->sc_dmamap->dm_mapsize,
394 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
395
396 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM);
397
398 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) {
399 hifn_init_pubrng(sc);
400 }
401
402 callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
403 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
404 return;
405
406 fail_intr:
407 pci_intr_disestablish(pc, sc->sc_ih);
408 fail_mem:
409 bus_dmamap_unload(sc->sc_dmat, dmamap);
410 bus_dmamap_destroy(sc->sc_dmat, dmamap);
411 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
412 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
413
414 /* Turn off DMA polling */
415 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
416 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
417
418 fail_io1:
419 bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1);
420 fail_io0:
421 bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0);
422 }
423
424 static int
425 hifn_detach(device_t self, int flags)
426 {
427 struct hifn_softc *sc = device_private(self);
428
429 hifn_abort(sc);
430
431 hifn_reset_board(sc, 1);
432
433 pci_intr_disestablish(sc->sc_pci_pc, sc->sc_ih);
434
435 crypto_unregister_all(sc->sc_cid);
436
437 rnd_detach_source(&sc->sc_rnd_source);
438
439 mutex_enter(&sc->sc_mtx);
440 callout_halt(&sc->sc_tickto, NULL);
441 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
442 callout_halt(&sc->sc_rngto, NULL);
443 mutex_exit(&sc->sc_mtx);
444
445 bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1);
446 bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0);
447
448 /*
449 * XXX It's not clear if any additional buffers have been
450 * XXX allocated and require free()ing
451 */
452
453 return 0;
454 }
455
456 MODULE(MODULE_CLASS_DRIVER, hifn, "pci,opencrypto");
457
458 #ifdef _MODULE
459 #include "ioconf.c"
460 #endif
461
462 static int
463 hifn_modcmd(modcmd_t cmd, void *data)
464 {
465 int error = 0;
466
467 switch (cmd) {
468 case MODULE_CMD_INIT:
469 #ifdef _MODULE
470 error = config_init_component(cfdriver_ioconf_hifn,
471 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
472 #endif
473 return error;
474 case MODULE_CMD_FINI:
475 #ifdef _MODULE
476 error = config_fini_component(cfdriver_ioconf_hifn,
477 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
478 #endif
479 return error;
480 default:
481 return ENOTTY;
482 }
483 }
484
485 static void
486 hifn_rng_get(size_t bytes, void *priv)
487 {
488 struct hifn_softc *sc = priv;
489
490 mutex_enter(&sc->sc_mtx);
491 sc->sc_rng_need = bytes;
492 callout_reset(&sc->sc_rngto, 0, hifn_rng, sc);
493 mutex_exit(&sc->sc_mtx);
494 }
495
496 static int
497 hifn_init_pubrng(struct hifn_softc *sc)
498 {
499 uint32_t r;
500 int i;
501
502 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
503 /* Reset 7951 public key/rng engine */
504 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
505 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
506
507 for (i = 0; i < 100; i++) {
508 DELAY(1000);
509 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
510 HIFN_PUBRST_RESET) == 0)
511 break;
512 }
513
514 if (i == 100) {
515 printf("%s: public key init failed\n",
516 device_xname(sc->sc_dv));
517 return (1);
518 }
519 }
520
521 /* Enable the rng, if available */
522 if (sc->sc_flags & HIFN_HAS_RNG) {
523 if (sc->sc_flags & HIFN_IS_7811) {
524 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
525 if (r & HIFN_7811_RNGENA_ENA) {
526 r &= ~HIFN_7811_RNGENA_ENA;
527 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
528 }
529 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
530 HIFN_7811_RNGCFG_DEFL);
531 r |= HIFN_7811_RNGENA_ENA;
532 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
533 } else
534 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
535 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
536 HIFN_RNGCFG_ENA);
537
538 /*
539 * The Hifn RNG documentation states that at their
540 * recommended "conservative" RNG config values,
541 * the RNG must warm up for 0.4s before providing
542 * data that meet their worst-case estimate of 0.06
543 * bits of random data per output register bit.
544 */
545 DELAY(4000);
546
547 if (hz >= 100)
548 sc->sc_rnghz = hz / 100;
549 else
550 sc->sc_rnghz = 1;
551 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
552 rndsource_setcb(&sc->sc_rnd_source, hifn_rng_get, sc);
553 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv),
554 RND_TYPE_RNG, RND_FLAG_COLLECT_VALUE|RND_FLAG_HASCB);
555 }
556
557 /* Enable public key engine, if available */
558 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
559 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
560 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
561 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
562 }
563
564 return (0);
565 }
566
567 static void
568 hifn_rng_locked(void *vsc)
569 {
570 struct hifn_softc *sc = vsc;
571 uint32_t num[64];
572 uint32_t sts;
573 int i;
574 size_t got, gotent;
575
576 if (sc->sc_rng_need < 1) {
577 callout_stop(&sc->sc_rngto);
578 return;
579 }
580
581 if (sc->sc_flags & HIFN_IS_7811) {
582 for (i = 0; i < 5; i++) { /* XXX why 5? */
583 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
584 if (sts & HIFN_7811_RNGSTS_UFL) {
585 printf("%s: RNG underflow: disabling\n",
586 device_xname(sc->sc_dv));
587 return;
588 }
589 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
590 break;
591
592 /*
593 * There are at least two words in the RNG FIFO
594 * at this point.
595 */
596 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
597 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
598 got = 2 * sizeof(num[0]);
599 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
600 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
601 sc->sc_rng_need -= gotent;
602 }
603 } else {
604 int nwords = 0;
605
606 if (sc->sc_rng_need) {
607 nwords = (sc->sc_rng_need * NBBY) / HIFN_RNG_BITSPER;
608 nwords = MIN((int)__arraycount(num), nwords);
609 }
610
611 if (nwords < 2) {
612 nwords = 2;
613 }
614
615 /*
616 * We must be *extremely* careful here. The Hifn
617 * 795x differ from the published 6500 RNG design
618 * in more ways than the obvious lack of the output
619 * FIFO and LFSR control registers. In fact, there
620 * is only one LFSR, instead of the 6500's two, and
621 * it's 32 bits, not 31.
622 *
623 * Further, a block diagram obtained from Hifn shows
624 * a very curious latching of this register: the LFSR
625 * rotates at a frequency of RNG_Clk / 8, but the
626 * RNG_Data register is latched at a frequency of
627 * RNG_Clk, which means that it is possible for
628 * consecutive reads of the RNG_Data register to read
629 * identical state from the LFSR. The simplest
630 * workaround seems to be to read eight samples from
631 * the register for each one that we use. Since each
632 * read must require at least one PCI cycle, and
633 * RNG_Clk is at least PCI_Clk, this is safe.
634 */
635 for (i = 0 ; i < nwords * 8; i++) {
636 volatile uint32_t regtmp;
637 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA);
638 num[i / 8] = regtmp;
639 }
640
641 got = nwords * sizeof(num[0]);
642 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
643 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
644 sc->sc_rng_need -= gotent;
645 }
646
647 if (sc->sc_rng_need > 0) {
648 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
649 }
650 }
651
652 static void
653 hifn_rng(void *vsc)
654 {
655 struct hifn_softc *sc = vsc;
656
657 mutex_spin_enter(&sc->sc_mtx);
658 hifn_rng_locked(vsc);
659 mutex_spin_exit(&sc->sc_mtx);
660 }
661
662 static void
663 hifn_puc_wait(struct hifn_softc *sc)
664 {
665 int i;
666
667 for (i = 5000; i > 0; i--) {
668 DELAY(1);
669 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
670 break;
671 }
672 if (!i)
673 printf("%s: proc unit did not reset\n", device_xname(sc->sc_dv));
674 }
675
676 /*
677 * Reset the processing unit.
678 */
679 static void
680 hifn_reset_puc(struct hifn_softc *sc)
681 {
682 /* Reset processing unit */
683 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
684 hifn_puc_wait(sc);
685 }
686
687 static void
688 hifn_set_retry(struct hifn_softc *sc)
689 {
690 uint32_t r;
691
692 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
693 r &= 0xffff0000;
694 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
695 }
696
697 /*
698 * Resets the board. Values in the regesters are left as is
699 * from the reset (i.e. initial values are assigned elsewhere).
700 */
701 static void
702 hifn_reset_board(struct hifn_softc *sc, int full)
703 {
704 uint32_t reg;
705
706 /*
707 * Set polling in the DMA configuration register to zero. 0x7 avoids
708 * resetting the board and zeros out the other fields.
709 */
710 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
711 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
712
713 /*
714 * Now that polling has been disabled, we have to wait 1 ms
715 * before resetting the board.
716 */
717 DELAY(1000);
718
719 /* Reset the DMA unit */
720 if (full) {
721 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
722 DELAY(1000);
723 } else {
724 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
725 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
726 hifn_reset_puc(sc);
727 }
728
729 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
730
731 /* Bring dma unit out of reset */
732 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
733 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
734
735 hifn_puc_wait(sc);
736
737 hifn_set_retry(sc);
738
739 if (sc->sc_flags & HIFN_IS_7811) {
740 for (reg = 0; reg < 1000; reg++) {
741 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
742 HIFN_MIPSRST_CRAMINIT)
743 break;
744 DELAY(1000);
745 }
746 if (reg == 1000)
747 printf(": cram init timeout\n");
748 }
749 }
750
751 static uint32_t
752 hifn_next_signature(uint32_t a, u_int cnt)
753 {
754 u_int i;
755 uint32_t v;
756
757 for (i = 0; i < cnt; i++) {
758
759 /* get the parity */
760 v = a & 0x80080125;
761 v ^= v >> 16;
762 v ^= v >> 8;
763 v ^= v >> 4;
764 v ^= v >> 2;
765 v ^= v >> 1;
766
767 a = (v & 1) ^ (a << 1);
768 }
769
770 return a;
771 }
772
773 static struct pci2id {
774 u_short pci_vendor;
775 u_short pci_prod;
776 char card_id[13];
777 } const pci2id[] = {
778 {
779 PCI_VENDOR_HIFN,
780 PCI_PRODUCT_HIFN_7951,
781 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x00, 0x00, 0x00 }
783 }, {
784 PCI_VENDOR_HIFN,
785 PCI_PRODUCT_HIFN_7955,
786 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x00, 0x00 }
788 }, {
789 PCI_VENDOR_HIFN,
790 PCI_PRODUCT_HIFN_7956,
791 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
792 0x00, 0x00, 0x00, 0x00, 0x00 }
793 }, {
794 PCI_VENDOR_NETSEC,
795 PCI_PRODUCT_NETSEC_7751,
796 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00, 0x00 }
798 }, {
799 PCI_VENDOR_INVERTEX,
800 PCI_PRODUCT_INVERTEX_AEON,
801 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
802 0x00, 0x00, 0x00, 0x00, 0x00 }
803 }, {
804 PCI_VENDOR_HIFN,
805 PCI_PRODUCT_HIFN_7811,
806 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00, 0x00 }
808 }, {
809 /*
810 * Other vendors share this PCI ID as well, such as
811 * powercrypt, and obviously they also
812 * use the same key.
813 */
814 PCI_VENDOR_HIFN,
815 PCI_PRODUCT_HIFN_7751,
816 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00, 0x00 }
818 },
819 };
820
821 /*
822 * Checks to see if crypto is already enabled. If crypto isn't enable,
823 * "hifn_enable_crypto" is called to enable it. The check is important,
824 * as enabling crypto twice will lock the board.
825 */
826 static const char *
827 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
828 {
829 uint32_t dmacfg, ramcfg, encl, addr, i;
830 const char *offtbl = NULL;
831
832 for (i = 0; i < __arraycount(pci2id); i++) {
833 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
834 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
835 offtbl = pci2id[i].card_id;
836 break;
837 }
838 }
839
840 if (offtbl == NULL) {
841 #ifdef HIFN_DEBUG
842 aprint_debug_dev(sc->sc_dv, "Unknown card!\n");
843 #endif
844 return (NULL);
845 }
846
847 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
848 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
849
850 /*
851 * The RAM config register's encrypt level bit needs to be set before
852 * every read performed on the encryption level register.
853 */
854 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
855
856 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
857
858 /*
859 * Make sure we don't re-unlock. Two unlocks kills chip until the
860 * next reboot.
861 */
862 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
863 #ifdef HIFN_DEBUG
864 aprint_debug_dev(sc->sc_dv, "Strong Crypto already enabled!\n");
865 #endif
866 goto report;
867 }
868
869 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
870 #ifdef HIFN_DEBUG
871 aprint_debug_dev(sc->sc_dv, "Unknown encryption level\n");
872 #endif
873 return (NULL);
874 }
875
876 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
877 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
878 DELAY(1000);
879 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
880 DELAY(1000);
881 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
882 DELAY(1000);
883
884 for (i = 0; i <= 12; i++) {
885 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
886 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
887
888 DELAY(1000);
889 }
890
891 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
892 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
893
894 #ifdef HIFN_DEBUG
895 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
896 aprint_debug("Encryption engine is permanently locked until next system reset.");
897 else
898 aprint_debug("Encryption engine enabled successfully!");
899 #endif
900
901 report:
902 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
903 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
904
905 switch (encl) {
906 case HIFN_PUSTAT_ENA_0:
907 return ("LZS-only (no encr/auth)");
908
909 case HIFN_PUSTAT_ENA_1:
910 return ("DES");
911
912 case HIFN_PUSTAT_ENA_2:
913 if (sc->sc_flags & HIFN_HAS_AES)
914 return ("3DES/AES");
915 else
916 return ("3DES");
917
918 default:
919 return ("disabled");
920 }
921 /* NOTREACHED */
922 }
923
924 /*
925 * Give initial values to the registers listed in the "Register Space"
926 * section of the HIFN Software Development reference manual.
927 */
928 static void
929 hifn_init_pci_registers(struct hifn_softc *sc)
930 {
931 /* write fixed values needed by the Initialization registers */
932 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
933 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
934 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
935
936 /* write all 4 ring address registers */
937 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
938 offsetof(struct hifn_dma, cmdr[0]));
939 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
940 offsetof(struct hifn_dma, srcr[0]));
941 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
942 offsetof(struct hifn_dma, dstr[0]));
943 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
944 offsetof(struct hifn_dma, resr[0]));
945
946 DELAY(2000);
947
948 /* write status register */
949 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
950 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
951 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
952 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
953 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
954 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
955 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
956 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
957 HIFN_DMACSR_S_WAIT |
958 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
959 HIFN_DMACSR_C_WAIT |
960 HIFN_DMACSR_ENGINE |
961 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
962 HIFN_DMACSR_PUBDONE : 0) |
963 ((sc->sc_flags & HIFN_IS_7811) ?
964 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
965
966 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
967 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
968 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
969 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
970 HIFN_DMAIER_ENGINE |
971 ((sc->sc_flags & HIFN_IS_7811) ?
972 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
973 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
974 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
975 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
976
977 if (sc->sc_flags & HIFN_IS_7956) {
978 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
979 HIFN_PUCNFG_TCALLPHASES |
980 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
981 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
982 } else {
983 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
984 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
985 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
986 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
987 }
988
989 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
990 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
991 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
992 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
993 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
994 }
995
996 /*
997 * The maximum number of sessions supported by the card
998 * is dependent on the amount of context ram, which
999 * encryption algorithms are enabled, and how compression
1000 * is configured. This should be configured before this
1001 * routine is called.
1002 */
1003 static void
1004 hifn_sessions(struct hifn_softc *sc)
1005 {
1006 uint32_t pucnfg;
1007 int ctxsize;
1008
1009 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1010
1011 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1012 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1013 ctxsize = 128;
1014 else
1015 ctxsize = 512;
1016 /*
1017 * 7955/7956 has internal context memory of 32K
1018 */
1019 if (sc->sc_flags & HIFN_IS_7956)
1020 sc->sc_maxses = 32768 / ctxsize;
1021 else
1022 sc->sc_maxses = 1 +
1023 ((sc->sc_ramsize - 32768) / ctxsize);
1024 } else
1025 sc->sc_maxses = sc->sc_ramsize / 16384;
1026
1027 if (sc->sc_maxses > 2048)
1028 sc->sc_maxses = 2048;
1029 }
1030
1031 /*
1032 * Determine ram type (sram or dram). Board should be just out of a reset
1033 * state when this is called.
1034 */
1035 static int
1036 hifn_ramtype(struct hifn_softc *sc)
1037 {
1038 uint8_t data[8], dataexpect[8];
1039 size_t i;
1040
1041 for (i = 0; i < sizeof(data); i++)
1042 data[i] = dataexpect[i] = 0x55;
1043 if (hifn_writeramaddr(sc, 0, data))
1044 return (-1);
1045 if (hifn_readramaddr(sc, 0, data))
1046 return (-1);
1047 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1048 sc->sc_drammodel = 1;
1049 return (0);
1050 }
1051
1052 for (i = 0; i < sizeof(data); i++)
1053 data[i] = dataexpect[i] = 0xaa;
1054 if (hifn_writeramaddr(sc, 0, data))
1055 return (-1);
1056 if (hifn_readramaddr(sc, 0, data))
1057 return (-1);
1058 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1059 sc->sc_drammodel = 1;
1060 return (0);
1061 }
1062
1063 return (0);
1064 }
1065
1066 #define HIFN_SRAM_MAX (32 << 20)
1067 #define HIFN_SRAM_STEP_SIZE 16384
1068 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1069
1070 static int
1071 hifn_sramsize(struct hifn_softc *sc)
1072 {
1073 uint32_t a, b;
1074 uint8_t data[8];
1075 uint8_t dataexpect[sizeof(data)];
1076 size_t i;
1077
1078 for (i = 0; i < sizeof(data); i++)
1079 data[i] = dataexpect[i] = i ^ 0x5a;
1080
1081 a = HIFN_SRAM_GRANULARITY * HIFN_SRAM_STEP_SIZE;
1082 b = HIFN_SRAM_GRANULARITY;
1083 for (i = 0; i < HIFN_SRAM_GRANULARITY; ++i) {
1084 a -= HIFN_SRAM_STEP_SIZE;
1085 b -= 1;
1086 le32enc(data, b);
1087 hifn_writeramaddr(sc, a, data);
1088 }
1089
1090 a = 0;
1091 b = 0;
1092 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1093 le32enc(dataexpect, b);
1094 if (hifn_readramaddr(sc, a, data) < 0)
1095 return (0);
1096 if (memcmp(data, dataexpect, sizeof(data)) != 0)
1097 return (0);
1098
1099 a += HIFN_SRAM_STEP_SIZE;
1100 b += 1;
1101 sc->sc_ramsize = a;
1102 }
1103
1104 return (0);
1105 }
1106
1107 /*
1108 * XXX For dram boards, one should really try all of the
1109 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1110 * is already set up correctly.
1111 */
1112 static int
1113 hifn_dramsize(struct hifn_softc *sc)
1114 {
1115 uint32_t cnfg;
1116
1117 if (sc->sc_flags & HIFN_IS_7956) {
1118 /*
1119 * 7955/7956 have a fixed internal ram of only 32K.
1120 */
1121 sc->sc_ramsize = 32768;
1122 } else {
1123 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1124 HIFN_PUCNFG_DRAMMASK;
1125 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1126 }
1127 return (0);
1128 }
1129
1130 static void
1131 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1132 int *resp)
1133 {
1134 struct hifn_dma *dma = sc->sc_dma;
1135
1136 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1137 dma->cmdi = 0;
1138 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1139 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1140 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1141 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1142 }
1143 *cmdp = dma->cmdi++;
1144 dma->cmdk = dma->cmdi;
1145
1146 if (dma->srci == HIFN_D_SRC_RSIZE) {
1147 dma->srci = 0;
1148 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1149 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1150 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1151 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1152 }
1153 *srcp = dma->srci++;
1154 dma->srck = dma->srci;
1155
1156 if (dma->dsti == HIFN_D_DST_RSIZE) {
1157 dma->dsti = 0;
1158 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1159 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1160 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1161 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1162 }
1163 *dstp = dma->dsti++;
1164 dma->dstk = dma->dsti;
1165
1166 if (dma->resi == HIFN_D_RES_RSIZE) {
1167 dma->resi = 0;
1168 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1169 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1170 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1171 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1172 }
1173 *resp = dma->resi++;
1174 dma->resk = dma->resi;
1175 }
1176
1177 static int
1178 hifn_writeramaddr(struct hifn_softc *sc, int addr, uint8_t *data)
1179 {
1180 struct hifn_dma *dma = sc->sc_dma;
1181 struct hifn_base_command wc;
1182 const uint32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1183 int r, cmdi, resi, srci, dsti;
1184
1185 wc.masks = htole16(3 << 13);
1186 wc.session_num = htole16(addr >> 14);
1187 wc.total_source_count = htole16(8);
1188 wc.total_dest_count = htole16(addr & 0x3fff);
1189
1190 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1191
1192 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1193 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1194 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1195
1196 /* build write command */
1197 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1198 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1199 memcpy(&dma->test_src, data, sizeof(dma->test_src));
1200
1201 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1202 + offsetof(struct hifn_dma, test_src));
1203 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1204 + offsetof(struct hifn_dma, test_dst));
1205
1206 dma->cmdr[cmdi].l = htole32(16 | masks);
1207 dma->srcr[srci].l = htole32(8 | masks);
1208 dma->dstr[dsti].l = htole32(4 | masks);
1209 dma->resr[resi].l = htole32(4 | masks);
1210
1211 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1212 0, sc->sc_dmamap->dm_mapsize,
1213 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1214
1215 for (r = 10000; r >= 0; r--) {
1216 DELAY(10);
1217 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1218 0, sc->sc_dmamap->dm_mapsize,
1219 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1220 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1221 break;
1222 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1223 0, sc->sc_dmamap->dm_mapsize,
1224 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1225 }
1226 if (r == 0) {
1227 printf("%s: writeramaddr -- "
1228 "result[%d](addr %d) still valid\n",
1229 device_xname(sc->sc_dv), resi, addr);
1230 return (-1);
1231 } else
1232 r = 0;
1233
1234 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1235 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1236 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1237
1238 return (r);
1239 }
1240
1241 static int
1242 hifn_readramaddr(struct hifn_softc *sc, int addr, uint8_t *data)
1243 {
1244 struct hifn_dma *dma = sc->sc_dma;
1245 struct hifn_base_command rc;
1246 const uint32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1247 int r, cmdi, srci, dsti, resi;
1248
1249 rc.masks = htole16(2 << 13);
1250 rc.session_num = htole16(addr >> 14);
1251 rc.total_source_count = htole16(addr & 0x3fff);
1252 rc.total_dest_count = htole16(8);
1253
1254 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1255
1256 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1257 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1258 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1259
1260 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1261 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1262
1263 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1264 offsetof(struct hifn_dma, test_src));
1265 dma->test_src = 0;
1266 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1267 offsetof(struct hifn_dma, test_dst));
1268 dma->test_dst = 0;
1269 dma->cmdr[cmdi].l = htole32(8 | masks);
1270 dma->srcr[srci].l = htole32(8 | masks);
1271 dma->dstr[dsti].l = htole32(8 | masks);
1272 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1273
1274 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1275 0, sc->sc_dmamap->dm_mapsize,
1276 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1277
1278 for (r = 10000; r >= 0; r--) {
1279 DELAY(10);
1280 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1281 0, sc->sc_dmamap->dm_mapsize,
1282 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1283 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1284 break;
1285 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1286 0, sc->sc_dmamap->dm_mapsize,
1287 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1288 }
1289 if (r == 0) {
1290 printf("%s: readramaddr -- "
1291 "result[%d](addr %d) still valid\n",
1292 device_xname(sc->sc_dv), resi, addr);
1293 r = -1;
1294 } else {
1295 r = 0;
1296 memcpy(data, &dma->test_dst, sizeof(dma->test_dst));
1297 }
1298
1299 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1300 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1301 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1302
1303 return (r);
1304 }
1305
1306 /*
1307 * Initialize the descriptor rings.
1308 */
1309 static void
1310 hifn_init_dma(struct hifn_softc *sc)
1311 {
1312 struct hifn_dma *dma = sc->sc_dma;
1313 int i;
1314
1315 hifn_set_retry(sc);
1316
1317 /* initialize static pointer values */
1318 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1319 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1320 offsetof(struct hifn_dma, command_bufs[i][0]));
1321 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1322 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1323 offsetof(struct hifn_dma, result_bufs[i][0]));
1324
1325 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1326 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1327 offsetof(struct hifn_dma, cmdr[0]));
1328 dma->srcr[HIFN_D_SRC_RSIZE].p =
1329 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1330 offsetof(struct hifn_dma, srcr[0]));
1331 dma->dstr[HIFN_D_DST_RSIZE].p =
1332 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1333 offsetof(struct hifn_dma, dstr[0]));
1334 dma->resr[HIFN_D_RES_RSIZE].p =
1335 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1336 offsetof(struct hifn_dma, resr[0]));
1337
1338 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1339 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1340 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1341 }
1342
1343 /*
1344 * Writes out the raw command buffer space. Returns the
1345 * command buffer size.
1346 */
1347 static u_int
1348 hifn_write_command(struct hifn_command *cmd, uint8_t *buf)
1349 {
1350 uint8_t *buf_pos;
1351 struct hifn_base_command *base_cmd;
1352 struct hifn_mac_command *mac_cmd;
1353 struct hifn_crypt_command *cry_cmd;
1354 struct hifn_comp_command *comp_cmd;
1355 int using_mac, using_crypt, using_comp, len, ivlen;
1356 uint32_t dlen, slen;
1357
1358 buf_pos = buf;
1359 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1360 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1361 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1362
1363 base_cmd = (struct hifn_base_command *)buf_pos;
1364 base_cmd->masks = htole16(cmd->base_masks);
1365 slen = cmd->src_map->dm_mapsize;
1366 if (cmd->sloplen)
1367 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1368 sizeof(uint32_t);
1369 else
1370 dlen = cmd->dst_map->dm_mapsize;
1371 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1372 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1373 dlen >>= 16;
1374 slen >>= 16;
1375 base_cmd->session_num = htole16(
1376 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1377 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1378 buf_pos += sizeof(struct hifn_base_command);
1379
1380 if (using_comp) {
1381 comp_cmd = (struct hifn_comp_command *)buf_pos;
1382 dlen = cmd->compcrd->crd_len;
1383 comp_cmd->source_count = htole16(dlen & 0xffff);
1384 dlen >>= 16;
1385 comp_cmd->masks = htole16(cmd->comp_masks |
1386 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1387 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1388 comp_cmd->reserved = 0;
1389 buf_pos += sizeof(struct hifn_comp_command);
1390 }
1391
1392 if (using_mac) {
1393 mac_cmd = (struct hifn_mac_command *)buf_pos;
1394 dlen = cmd->maccrd->crd_len;
1395 mac_cmd->source_count = htole16(dlen & 0xffff);
1396 dlen >>= 16;
1397 mac_cmd->masks = htole16(cmd->mac_masks |
1398 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1399 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1400 mac_cmd->reserved = 0;
1401 buf_pos += sizeof(struct hifn_mac_command);
1402 }
1403
1404 if (using_crypt) {
1405 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1406 dlen = cmd->enccrd->crd_len;
1407 cry_cmd->source_count = htole16(dlen & 0xffff);
1408 dlen >>= 16;
1409 cry_cmd->masks = htole16(cmd->cry_masks |
1410 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1411 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1412 cry_cmd->reserved = 0;
1413 buf_pos += sizeof(struct hifn_crypt_command);
1414 }
1415
1416 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1417 memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH);
1418 buf_pos += HIFN_MAC_KEY_LENGTH;
1419 }
1420
1421 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1422 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1423 case HIFN_CRYPT_CMD_ALG_3DES:
1424 memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH);
1425 buf_pos += HIFN_3DES_KEY_LENGTH;
1426 break;
1427 case HIFN_CRYPT_CMD_ALG_DES:
1428 memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH);
1429 buf_pos += HIFN_DES_KEY_LENGTH;
1430 break;
1431 case HIFN_CRYPT_CMD_ALG_RC4:
1432 len = 256;
1433 do {
1434 int clen;
1435
1436 clen = MIN(cmd->cklen, len);
1437 memcpy(buf_pos, cmd->ck, clen);
1438 len -= clen;
1439 buf_pos += clen;
1440 } while (len > 0);
1441 memset(buf_pos, 0, 4);
1442 buf_pos += 4;
1443 break;
1444 case HIFN_CRYPT_CMD_ALG_AES:
1445 /*
1446 * AES keys are variable 128, 192 and
1447 * 256 bits (16, 24 and 32 bytes).
1448 */
1449 memcpy(buf_pos, cmd->ck, cmd->cklen);
1450 buf_pos += cmd->cklen;
1451 break;
1452 }
1453 }
1454
1455 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1456 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1457 case HIFN_CRYPT_CMD_ALG_AES:
1458 ivlen = HIFN_AES_IV_LENGTH;
1459 break;
1460 default:
1461 ivlen = HIFN_IV_LENGTH;
1462 break;
1463 }
1464 memcpy(buf_pos, cmd->iv, ivlen);
1465 buf_pos += ivlen;
1466 }
1467
1468 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1469 HIFN_BASE_CMD_COMP)) == 0) {
1470 memset(buf_pos, 0, 8);
1471 buf_pos += 8;
1472 }
1473
1474 return (buf_pos - buf);
1475 }
1476
1477 static int
1478 hifn_dmamap_aligned(bus_dmamap_t map)
1479 {
1480 int i;
1481
1482 for (i = 0; i < map->dm_nsegs; i++) {
1483 if (map->dm_segs[i].ds_addr & 3)
1484 return (0);
1485 if ((i != (map->dm_nsegs - 1)) &&
1486 (map->dm_segs[i].ds_len & 3))
1487 return (0);
1488 }
1489 return (1);
1490 }
1491
1492 static int
1493 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1494 {
1495 struct hifn_dma *dma = sc->sc_dma;
1496 bus_dmamap_t map = cmd->dst_map;
1497 uint32_t p, l;
1498 int idx, used = 0, i;
1499
1500 idx = dma->dsti;
1501 for (i = 0; i < map->dm_nsegs - 1; i++) {
1502 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1503 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1504 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1505 HIFN_DSTR_SYNC(sc, idx,
1506 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1507 used++;
1508
1509 if (++idx == HIFN_D_DST_RSIZE) {
1510 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1511 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1512 HIFN_DSTR_SYNC(sc, idx,
1513 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1514 idx = 0;
1515 }
1516 }
1517
1518 if (cmd->sloplen == 0) {
1519 p = map->dm_segs[i].ds_addr;
1520 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1521 map->dm_segs[i].ds_len;
1522 } else {
1523 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1524 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1525 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1526 sizeof(uint32_t);
1527
1528 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1529 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1530 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1531 HIFN_D_MASKDONEIRQ |
1532 (map->dm_segs[i].ds_len - cmd->sloplen));
1533 HIFN_DSTR_SYNC(sc, idx,
1534 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1535 used++;
1536
1537 if (++idx == HIFN_D_DST_RSIZE) {
1538 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1539 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1540 HIFN_DSTR_SYNC(sc, idx,
1541 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1542 idx = 0;
1543 }
1544 }
1545 }
1546 dma->dstr[idx].p = htole32(p);
1547 dma->dstr[idx].l = htole32(l);
1548 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1549 used++;
1550
1551 if (++idx == HIFN_D_DST_RSIZE) {
1552 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1553 HIFN_D_MASKDONEIRQ);
1554 HIFN_DSTR_SYNC(sc, idx,
1555 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1556 idx = 0;
1557 }
1558
1559 dma->dsti = idx;
1560 dma->dstu += used;
1561 return (idx);
1562 }
1563
1564 static int
1565 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1566 {
1567 struct hifn_dma *dma = sc->sc_dma;
1568 bus_dmamap_t map = cmd->src_map;
1569 int idx, i;
1570 uint32_t last = 0;
1571
1572 idx = dma->srci;
1573 for (i = 0; i < map->dm_nsegs; i++) {
1574 if (i == map->dm_nsegs - 1)
1575 last = HIFN_D_LAST;
1576
1577 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1578 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1579 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1580 HIFN_SRCR_SYNC(sc, idx,
1581 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1582
1583 if (++idx == HIFN_D_SRC_RSIZE) {
1584 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1585 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1586 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1587 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1588 idx = 0;
1589 }
1590 }
1591 dma->srci = idx;
1592 dma->srcu += map->dm_nsegs;
1593 return (idx);
1594 }
1595
1596 static int
1597 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1598 struct cryptop *crp, int hint)
1599 {
1600 struct hifn_dma *dma = sc->sc_dma;
1601 uint32_t cmdlen;
1602 int cmdi, resi, err = 0;
1603
1604 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1605 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1606 return (ENOMEM);
1607
1608 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1609 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1610 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1611 err = ENOMEM;
1612 goto err_srcmap1;
1613 }
1614 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1615 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1616 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1617 err = ENOMEM;
1618 goto err_srcmap1;
1619 }
1620 } else {
1621 err = EINVAL;
1622 goto err_srcmap1;
1623 }
1624
1625 if (hifn_dmamap_aligned(cmd->src_map)) {
1626 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1627 if (crp->crp_flags & CRYPTO_F_IOV)
1628 cmd->dstu.dst_io = cmd->srcu.src_io;
1629 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1630 cmd->dstu.dst_m = cmd->srcu.src_m;
1631 cmd->dst_map = cmd->src_map;
1632 } else {
1633 if (crp->crp_flags & CRYPTO_F_IOV) {
1634 err = EINVAL;
1635 goto err_srcmap;
1636 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1637 int totlen, len;
1638 struct mbuf *m, *m0, *mlast;
1639
1640 totlen = cmd->src_map->dm_mapsize;
1641 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1642 len = MHLEN;
1643 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1644 } else {
1645 len = MLEN;
1646 MGET(m0, M_DONTWAIT, MT_DATA);
1647 }
1648 if (m0 == NULL) {
1649 err = ENOMEM;
1650 goto err_srcmap;
1651 }
1652 if (len == MHLEN)
1653 m_copy_pkthdr(m0, cmd->srcu.src_m);
1654 if (totlen >= MINCLSIZE) {
1655 MCLGET(m0, M_DONTWAIT);
1656 if (m0->m_flags & M_EXT)
1657 len = MCLBYTES;
1658 }
1659 totlen -= len;
1660 m0->m_pkthdr.len = m0->m_len = len;
1661 mlast = m0;
1662
1663 while (totlen > 0) {
1664 MGET(m, M_DONTWAIT, MT_DATA);
1665 if (m == NULL) {
1666 err = ENOMEM;
1667 m_freem(m0);
1668 goto err_srcmap;
1669 }
1670 len = MLEN;
1671 if (totlen >= MINCLSIZE) {
1672 MCLGET(m, M_DONTWAIT);
1673 if (m->m_flags & M_EXT)
1674 len = MCLBYTES;
1675 }
1676
1677 m->m_len = len;
1678 if (m0->m_flags & M_PKTHDR)
1679 m0->m_pkthdr.len += len;
1680 totlen -= len;
1681
1682 mlast->m_next = m;
1683 mlast = m;
1684 }
1685 cmd->dstu.dst_m = m0;
1686 }
1687 }
1688
1689 if (cmd->dst_map == NULL) {
1690 if (bus_dmamap_create(sc->sc_dmat,
1691 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1692 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1693 err = ENOMEM;
1694 goto err_srcmap;
1695 }
1696 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1697 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1698 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1699 err = ENOMEM;
1700 goto err_dstmap1;
1701 }
1702 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1703 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1704 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1705 err = ENOMEM;
1706 goto err_dstmap1;
1707 }
1708 }
1709 }
1710
1711 #ifdef HIFN_DEBUG
1712 if (hifn_debug)
1713 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1714 device_xname(sc->sc_dv),
1715 READ_REG_1(sc, HIFN_1_DMA_CSR),
1716 READ_REG_1(sc, HIFN_1_DMA_IER),
1717 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1718 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1719 #endif
1720
1721 if (cmd->src_map == cmd->dst_map)
1722 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1723 0, cmd->src_map->dm_mapsize,
1724 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1725 else {
1726 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1727 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1728 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1729 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1730 }
1731
1732 /*
1733 * need 1 cmd, and 1 res
1734 * need N src, and N dst
1735 */
1736 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1737 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1738 err = ENOMEM;
1739 goto err_dstmap;
1740 }
1741 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1742 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1743 err = ENOMEM;
1744 goto err_dstmap;
1745 }
1746
1747 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1748 dma->cmdi = 0;
1749 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1750 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1751 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1752 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1753 }
1754 cmdi = dma->cmdi++;
1755 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1756 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1757
1758 /* .p for command/result already set */
1759 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1760 HIFN_D_MASKDONEIRQ);
1761 HIFN_CMDR_SYNC(sc, cmdi,
1762 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1763 dma->cmdu++;
1764 if (sc->sc_c_busy == 0) {
1765 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1766 sc->sc_c_busy = 1;
1767 SET_LED(sc, HIFN_MIPSRST_LED0);
1768 }
1769
1770 /*
1771 * Always enable the command wait interrupt. We are obviously
1772 * missing an interrupt or two somewhere. Enabling the command wait
1773 * interrupt will guarantee we get called periodically until all
1774 * of the queues are drained and thus work around this.
1775 */
1776 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1777 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1778
1779 hifnstats.hst_ipackets++;
1780 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1781
1782 hifn_dmamap_load_src(sc, cmd);
1783 if (sc->sc_s_busy == 0) {
1784 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1785 sc->sc_s_busy = 1;
1786 SET_LED(sc, HIFN_MIPSRST_LED1);
1787 }
1788
1789 /*
1790 * Unlike other descriptors, we don't mask done interrupt from
1791 * result descriptor.
1792 */
1793 #ifdef HIFN_DEBUG
1794 if (hifn_debug)
1795 printf("load res\n");
1796 #endif
1797 if (dma->resi == HIFN_D_RES_RSIZE) {
1798 dma->resi = 0;
1799 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1800 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1801 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1802 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1803 }
1804 resi = dma->resi++;
1805 dma->hifn_commands[resi] = cmd;
1806 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1807 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1808 HIFN_D_VALID | HIFN_D_LAST);
1809 HIFN_RESR_SYNC(sc, resi,
1810 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1811 dma->resu++;
1812 if (sc->sc_r_busy == 0) {
1813 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1814 sc->sc_r_busy = 1;
1815 SET_LED(sc, HIFN_MIPSRST_LED2);
1816 }
1817
1818 if (cmd->sloplen)
1819 cmd->slopidx = resi;
1820
1821 hifn_dmamap_load_dst(sc, cmd);
1822
1823 if (sc->sc_d_busy == 0) {
1824 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1825 sc->sc_d_busy = 1;
1826 }
1827
1828 #ifdef HIFN_DEBUG
1829 if (hifn_debug)
1830 printf("%s: command: stat %8x ier %8x\n",
1831 device_xname(sc->sc_dv),
1832 READ_REG_1(sc, HIFN_1_DMA_CSR),
1833 READ_REG_1(sc, HIFN_1_DMA_IER));
1834 #endif
1835
1836 sc->sc_active = 5;
1837 return (err); /* success */
1838
1839 err_dstmap:
1840 if (cmd->src_map != cmd->dst_map)
1841 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1842 err_dstmap1:
1843 if (cmd->src_map != cmd->dst_map)
1844 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1845 err_srcmap:
1846 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1847 cmd->srcu.src_m != cmd->dstu.dst_m)
1848 m_freem(cmd->dstu.dst_m);
1849 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1850 err_srcmap1:
1851 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1852 return (err);
1853 }
1854
1855 static void
1856 hifn_tick(void *vsc)
1857 {
1858 struct hifn_softc *sc = vsc;
1859
1860 mutex_spin_enter(&sc->sc_mtx);
1861 if (sc->sc_active == 0) {
1862 struct hifn_dma *dma = sc->sc_dma;
1863 uint32_t r = 0;
1864
1865 if (dma->cmdu == 0 && sc->sc_c_busy) {
1866 sc->sc_c_busy = 0;
1867 r |= HIFN_DMACSR_C_CTRL_DIS;
1868 CLR_LED(sc, HIFN_MIPSRST_LED0);
1869 }
1870 if (dma->srcu == 0 && sc->sc_s_busy) {
1871 sc->sc_s_busy = 0;
1872 r |= HIFN_DMACSR_S_CTRL_DIS;
1873 CLR_LED(sc, HIFN_MIPSRST_LED1);
1874 }
1875 if (dma->dstu == 0 && sc->sc_d_busy) {
1876 sc->sc_d_busy = 0;
1877 r |= HIFN_DMACSR_D_CTRL_DIS;
1878 }
1879 if (dma->resu == 0 && sc->sc_r_busy) {
1880 sc->sc_r_busy = 0;
1881 r |= HIFN_DMACSR_R_CTRL_DIS;
1882 CLR_LED(sc, HIFN_MIPSRST_LED2);
1883 }
1884 if (r)
1885 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1886 } else
1887 sc->sc_active--;
1888 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1889 mutex_spin_exit(&sc->sc_mtx);
1890 }
1891
1892 static int
1893 hifn_intr(void *arg)
1894 {
1895 struct hifn_softc *sc = arg;
1896 struct hifn_dma *dma = sc->sc_dma;
1897 uint32_t dmacsr, restart;
1898 int i, u;
1899
1900 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1901
1902 #ifdef HIFN_DEBUG
1903 if (hifn_debug)
1904 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1905 device_xname(sc->sc_dv),
1906 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1907 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1908 #endif
1909
1910 mutex_spin_enter(&sc->sc_mtx);
1911
1912 /* Nothing in the DMA unit interrupted */
1913 if ((dmacsr & sc->sc_dmaier) == 0) {
1914 mutex_spin_exit(&sc->sc_mtx);
1915 return (0);
1916 }
1917
1918 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1919
1920 if (dmacsr & HIFN_DMACSR_ENGINE)
1921 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
1922
1923 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1924 (dmacsr & HIFN_DMACSR_PUBDONE))
1925 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1926 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1927
1928 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1929 if (restart)
1930 printf("%s: overrun %x\n", device_xname(sc->sc_dv), dmacsr);
1931
1932 if (sc->sc_flags & HIFN_IS_7811) {
1933 if (dmacsr & HIFN_DMACSR_ILLR)
1934 printf("%s: illegal read\n", device_xname(sc->sc_dv));
1935 if (dmacsr & HIFN_DMACSR_ILLW)
1936 printf("%s: illegal write\n", device_xname(sc->sc_dv));
1937 }
1938
1939 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1940 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1941 if (restart) {
1942 printf("%s: abort, resetting.\n", device_xname(sc->sc_dv));
1943 hifnstats.hst_abort++;
1944 hifn_abort(sc);
1945 goto out;
1946 }
1947
1948 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
1949 /*
1950 * If no slots to process and we receive a "waiting on
1951 * command" interrupt, we disable the "waiting on command"
1952 * (by clearing it).
1953 */
1954 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1955 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1956 }
1957
1958 /* clear the rings */
1959 i = dma->resk;
1960 while (dma->resu != 0) {
1961 HIFN_RESR_SYNC(sc, i,
1962 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1963 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
1964 HIFN_RESR_SYNC(sc, i,
1965 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1966 break;
1967 }
1968
1969 if (i != HIFN_D_RES_RSIZE) {
1970 struct hifn_command *cmd;
1971
1972 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
1973 cmd = dma->hifn_commands[i];
1974 KASSERT(cmd != NULL
1975 /*("hifn_intr: null command slot %u", i)*/);
1976 dma->hifn_commands[i] = NULL;
1977
1978 hifn_callback(sc, cmd, dma->result_bufs[i]);
1979 hifnstats.hst_opackets++;
1980 }
1981
1982 if (++i == (HIFN_D_RES_RSIZE + 1))
1983 i = 0;
1984 else
1985 dma->resu--;
1986 }
1987 dma->resk = i;
1988
1989 i = dma->srck; u = dma->srcu;
1990 while (u != 0) {
1991 HIFN_SRCR_SYNC(sc, i,
1992 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1993 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
1994 HIFN_SRCR_SYNC(sc, i,
1995 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1996 break;
1997 }
1998 if (++i == (HIFN_D_SRC_RSIZE + 1))
1999 i = 0;
2000 else
2001 u--;
2002 }
2003 dma->srck = i; dma->srcu = u;
2004
2005 i = dma->cmdk; u = dma->cmdu;
2006 while (u != 0) {
2007 HIFN_CMDR_SYNC(sc, i,
2008 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2009 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2010 HIFN_CMDR_SYNC(sc, i,
2011 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2012 break;
2013 }
2014 if (i != HIFN_D_CMD_RSIZE) {
2015 u--;
2016 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2017 }
2018 if (++i == (HIFN_D_CMD_RSIZE + 1))
2019 i = 0;
2020 }
2021 dma->cmdk = i; dma->cmdu = u;
2022
2023 out:
2024 mutex_spin_exit(&sc->sc_mtx);
2025 return (1);
2026 }
2027
2028 /*
2029 * Allocate a new 'session' and return an encoded session id. 'sidp'
2030 * contains our registration id, and should contain an encoded session
2031 * id on successful allocation.
2032 */
2033 static int
2034 hifn_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
2035 {
2036 struct cryptoini *c;
2037 struct hifn_softc *sc = arg;
2038 int i, mac = 0, cry = 0, comp = 0, retval = EINVAL;
2039
2040 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2041 if (sidp == NULL || cri == NULL || sc == NULL)
2042 return retval;
2043
2044 mutex_spin_enter(&sc->sc_mtx);
2045 for (i = 0; i < sc->sc_maxses; i++)
2046 if (isclr(sc->sc_sessions, i))
2047 break;
2048 if (i == sc->sc_maxses) {
2049 retval = ENOMEM;
2050 goto out;
2051 }
2052
2053 for (c = cri; c != NULL; c = c->cri_next) {
2054 switch (c->cri_alg) {
2055 case CRYPTO_MD5:
2056 case CRYPTO_SHA1:
2057 case CRYPTO_MD5_HMAC_96:
2058 case CRYPTO_SHA1_HMAC_96:
2059 if (mac) {
2060 goto out;
2061 }
2062 mac = 1;
2063 break;
2064 case CRYPTO_DES_CBC:
2065 case CRYPTO_3DES_CBC:
2066 case CRYPTO_AES_CBC:
2067 case CRYPTO_ARC4:
2068 if (cry) {
2069 goto out;
2070 }
2071 cry = 1;
2072 break;
2073 #ifdef CRYPTO_LZS_COMP
2074 case CRYPTO_LZS_COMP:
2075 if (comp) {
2076 goto out;
2077 }
2078 comp = 1;
2079 break;
2080 #endif
2081 default:
2082 goto out;
2083 }
2084 }
2085 if (mac == 0 && cry == 0 && comp == 0) {
2086 goto out;
2087 }
2088
2089 /*
2090 * XXX only want to support compression without chaining to
2091 * MAC/crypt engine right now
2092 */
2093 if ((comp && mac) || (comp && cry)) {
2094 goto out;
2095 }
2096
2097 *sidp = HIFN_SID(device_unit(sc->sc_dv), i);
2098 setbit(sc->sc_sessions, i);
2099
2100 retval = 0;
2101 out:
2102 mutex_spin_exit(&sc->sc_mtx);
2103 return retval;
2104 }
2105
2106 /*
2107 * Deallocate a session.
2108 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2109 * XXX to blow away any keys already stored there.
2110 */
2111 static int
2112 hifn_freesession(void *arg, uint64_t tid)
2113 {
2114 struct hifn_softc *sc = arg;
2115 int session;
2116 uint32_t sid = ((uint32_t) tid) & 0xffffffff;
2117
2118 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2119 if (sc == NULL)
2120 return (EINVAL);
2121
2122 mutex_spin_enter(&sc->sc_mtx);
2123 session = HIFN_SESSION(sid);
2124 if (session >= sc->sc_maxses) {
2125 mutex_spin_exit(&sc->sc_mtx);
2126 return (EINVAL);
2127 }
2128 clrbit(sc->sc_sessions, session);
2129 mutex_spin_exit(&sc->sc_mtx);
2130 return (0);
2131 }
2132
2133 static int
2134 hifn_process(void *arg, struct cryptop *crp, int hint)
2135 {
2136 struct hifn_softc *sc = arg;
2137 struct hifn_command *cmd = NULL;
2138 int session, err = 0, ivlen;
2139 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2140
2141 if (crp == NULL || crp->crp_callback == NULL) {
2142 hifnstats.hst_invalid++;
2143 return (EINVAL);
2144 }
2145
2146 mutex_spin_enter(&sc->sc_mtx);
2147 session = HIFN_SESSION(crp->crp_sid);
2148 if (session >= sc->sc_maxses) {
2149 err = EINVAL;
2150 goto errout;
2151 }
2152
2153 cmd = malloc(sizeof(*cmd), M_DEVBUF, M_NOWAIT | M_ZERO);
2154 if (cmd == NULL) {
2155 hifnstats.hst_nomem++;
2156 err = ENOMEM;
2157 goto errout;
2158 }
2159
2160 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2161 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2162 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2163 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2164 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2165 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2166 } else {
2167 err = EINVAL;
2168 goto errout; /* XXX we don't handle contiguous buffers! */
2169 }
2170
2171 crd1 = crp->crp_desc;
2172 if (crd1 == NULL) {
2173 err = EINVAL;
2174 goto errout;
2175 }
2176 crd2 = crd1->crd_next;
2177
2178 if (crd2 == NULL) {
2179 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2180 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2181 crd1->crd_alg == CRYPTO_SHA1 ||
2182 crd1->crd_alg == CRYPTO_MD5) {
2183 maccrd = crd1;
2184 enccrd = NULL;
2185 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2186 crd1->crd_alg == CRYPTO_3DES_CBC ||
2187 crd1->crd_alg == CRYPTO_AES_CBC ||
2188 crd1->crd_alg == CRYPTO_ARC4) {
2189 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2190 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2191 maccrd = NULL;
2192 enccrd = crd1;
2193 #ifdef CRYPTO_LZS_COMP
2194 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2195 return (hifn_compression(sc, crp, cmd));
2196 #endif
2197 } else {
2198 err = EINVAL;
2199 goto errout;
2200 }
2201 } else {
2202 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2203 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2204 crd1->crd_alg == CRYPTO_MD5 ||
2205 crd1->crd_alg == CRYPTO_SHA1) &&
2206 (crd2->crd_alg == CRYPTO_DES_CBC ||
2207 crd2->crd_alg == CRYPTO_3DES_CBC ||
2208 crd2->crd_alg == CRYPTO_AES_CBC ||
2209 crd2->crd_alg == CRYPTO_ARC4) &&
2210 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2211 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2212 maccrd = crd1;
2213 enccrd = crd2;
2214 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2215 crd1->crd_alg == CRYPTO_ARC4 ||
2216 crd1->crd_alg == CRYPTO_3DES_CBC ||
2217 crd1->crd_alg == CRYPTO_AES_CBC) &&
2218 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 ||
2219 crd2->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2220 crd2->crd_alg == CRYPTO_MD5 ||
2221 crd2->crd_alg == CRYPTO_SHA1) &&
2222 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2223 enccrd = crd1;
2224 maccrd = crd2;
2225 } else {
2226 /*
2227 * We cannot order the 7751 as requested
2228 */
2229 err = EINVAL;
2230 goto errout;
2231 }
2232 }
2233
2234 if (enccrd) {
2235 cmd->enccrd = enccrd;
2236 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2237 switch (enccrd->crd_alg) {
2238 case CRYPTO_ARC4:
2239 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2240 break;
2241 case CRYPTO_DES_CBC:
2242 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2243 HIFN_CRYPT_CMD_MODE_CBC |
2244 HIFN_CRYPT_CMD_NEW_IV;
2245 break;
2246 case CRYPTO_3DES_CBC:
2247 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2248 HIFN_CRYPT_CMD_MODE_CBC |
2249 HIFN_CRYPT_CMD_NEW_IV;
2250 break;
2251 case CRYPTO_AES_CBC:
2252 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2253 HIFN_CRYPT_CMD_MODE_CBC |
2254 HIFN_CRYPT_CMD_NEW_IV;
2255 break;
2256 default:
2257 err = EINVAL;
2258 goto errout;
2259 }
2260 if (enccrd->crd_alg != CRYPTO_ARC4) {
2261 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2262 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2263 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2264 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2265 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2266 else
2267 cprng_fast(cmd->iv, ivlen);
2268
2269 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2270 == 0) {
2271 if (crp->crp_flags & CRYPTO_F_IMBUF)
2272 m_copyback(cmd->srcu.src_m,
2273 enccrd->crd_inject,
2274 ivlen, cmd->iv);
2275 else if (crp->crp_flags & CRYPTO_F_IOV)
2276 cuio_copyback(cmd->srcu.src_io,
2277 enccrd->crd_inject,
2278 ivlen, cmd->iv);
2279 }
2280 } else {
2281 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2282 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2283 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2284 m_copydata(cmd->srcu.src_m,
2285 enccrd->crd_inject, ivlen, cmd->iv);
2286 else if (crp->crp_flags & CRYPTO_F_IOV)
2287 cuio_copydata(cmd->srcu.src_io,
2288 enccrd->crd_inject,
2289 ivlen, cmd->iv);
2290 }
2291 }
2292
2293 cmd->ck = enccrd->crd_key;
2294 cmd->cklen = enccrd->crd_klen >> 3;
2295 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2296
2297 /*
2298 * Need to specify the size for the AES key in the masks.
2299 */
2300 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2301 HIFN_CRYPT_CMD_ALG_AES) {
2302 switch (cmd->cklen) {
2303 case 16:
2304 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2305 break;
2306 case 24:
2307 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2308 break;
2309 case 32:
2310 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2311 break;
2312 default:
2313 err = EINVAL;
2314 goto errout;
2315 }
2316 }
2317 }
2318
2319 if (maccrd) {
2320 cmd->maccrd = maccrd;
2321 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2322
2323 switch (maccrd->crd_alg) {
2324 case CRYPTO_MD5:
2325 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2326 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2327 HIFN_MAC_CMD_POS_IPSEC;
2328 break;
2329 case CRYPTO_MD5_HMAC_96:
2330 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2331 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2332 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2333 break;
2334 case CRYPTO_SHA1:
2335 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2336 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2337 HIFN_MAC_CMD_POS_IPSEC;
2338 break;
2339 case CRYPTO_SHA1_HMAC_96:
2340 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2341 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2342 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2343 break;
2344 }
2345
2346 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2347 maccrd->crd_alg == CRYPTO_MD5_HMAC_96) {
2348 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2349 memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3);
2350 memset(cmd->mac + (maccrd->crd_klen >> 3), 0,
2351 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2352 }
2353 }
2354
2355 cmd->crp = crp;
2356 cmd->session_num = session;
2357 cmd->softc = sc;
2358
2359 err = hifn_crypto(sc, cmd, crp, hint);
2360 if (err == 0) {
2361 return 0;
2362 } else if (err == ERESTART) {
2363 /*
2364 * There weren't enough resources to dispatch the request
2365 * to the part. Notify the caller so they'll requeue this
2366 * request and resubmit it again soon.
2367 */
2368 #ifdef HIFN_DEBUG
2369 if (hifn_debug)
2370 printf("%s: requeue request\n", device_xname(sc->sc_dv));
2371 #endif
2372 free(cmd, M_DEVBUF);
2373 sc->sc_needwakeup |= CRYPTO_SYMQ;
2374 mutex_spin_exit(&sc->sc_mtx);
2375 return (err);
2376 }
2377
2378 errout:
2379 if (cmd != NULL) {
2380 explicit_memset(cmd, 0, sizeof(*cmd));
2381 free(cmd, M_DEVBUF);
2382 }
2383 if (err == EINVAL)
2384 hifnstats.hst_invalid++;
2385 else
2386 hifnstats.hst_nomem++;
2387 crp->crp_etype = err;
2388 mutex_spin_exit(&sc->sc_mtx);
2389 crypto_done(crp);
2390 return (0);
2391 }
2392
2393 static void
2394 hifn_abort(struct hifn_softc *sc)
2395 {
2396 struct hifn_dma *dma = sc->sc_dma;
2397 struct hifn_command *cmd;
2398 struct cryptop *crp;
2399 int i, u;
2400
2401 i = dma->resk; u = dma->resu;
2402 while (u != 0) {
2403 cmd = dma->hifn_commands[i];
2404 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2405 dma->hifn_commands[i] = NULL;
2406 crp = cmd->crp;
2407
2408 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2409 /* Salvage what we can. */
2410 hifnstats.hst_opackets++;
2411 hifn_callback(sc, cmd, dma->result_bufs[i]);
2412 } else {
2413 if (cmd->src_map == cmd->dst_map) {
2414 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2415 0, cmd->src_map->dm_mapsize,
2416 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2417 } else {
2418 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2419 0, cmd->src_map->dm_mapsize,
2420 BUS_DMASYNC_POSTWRITE);
2421 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2422 0, cmd->dst_map->dm_mapsize,
2423 BUS_DMASYNC_POSTREAD);
2424 }
2425
2426 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2427 m_freem(cmd->srcu.src_m);
2428 crp->crp_buf = (void *)cmd->dstu.dst_m;
2429 }
2430
2431 /* non-shared buffers cannot be restarted */
2432 if (cmd->src_map != cmd->dst_map) {
2433 /*
2434 * XXX should be EAGAIN, delayed until
2435 * after the reset.
2436 */
2437 crp->crp_etype = ENOMEM;
2438 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2439 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2440 } else
2441 crp->crp_etype = ENOMEM;
2442
2443 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2444 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2445
2446 explicit_memset(cmd, 0, sizeof(*cmd));
2447 free(cmd, M_DEVBUF);
2448 if (crp->crp_etype != EAGAIN)
2449 crypto_done(crp);
2450 }
2451
2452 if (++i == HIFN_D_RES_RSIZE)
2453 i = 0;
2454 u--;
2455 }
2456 dma->resk = i; dma->resu = u;
2457
2458 hifn_reset_board(sc, 1);
2459 hifn_init_dma(sc);
2460 hifn_init_pci_registers(sc);
2461 }
2462
2463 static void
2464 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, uint8_t *resbuf)
2465 {
2466 struct hifn_dma *dma = sc->sc_dma;
2467 struct cryptop *crp = cmd->crp;
2468 struct cryptodesc *crd;
2469 struct mbuf *m;
2470 int totlen, i, u;
2471
2472 if (cmd->src_map == cmd->dst_map)
2473 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2474 0, cmd->src_map->dm_mapsize,
2475 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2476 else {
2477 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2478 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2479 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2480 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2481 }
2482
2483 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2484 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2485 crp->crp_buf = (void *)cmd->dstu.dst_m;
2486 totlen = cmd->src_map->dm_mapsize;
2487 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2488 if (totlen < m->m_len) {
2489 m->m_len = totlen;
2490 totlen = 0;
2491 } else
2492 totlen -= m->m_len;
2493 }
2494 cmd->dstu.dst_m->m_pkthdr.len =
2495 cmd->srcu.src_m->m_pkthdr.len;
2496 m_freem(cmd->srcu.src_m);
2497 }
2498 }
2499
2500 if (cmd->sloplen != 0) {
2501 if (crp->crp_flags & CRYPTO_F_IMBUF)
2502 m_copyback((struct mbuf *)crp->crp_buf,
2503 cmd->src_map->dm_mapsize - cmd->sloplen,
2504 cmd->sloplen, &dma->slop[cmd->slopidx]);
2505 else if (crp->crp_flags & CRYPTO_F_IOV)
2506 cuio_copyback((struct uio *)crp->crp_buf,
2507 cmd->src_map->dm_mapsize - cmd->sloplen,
2508 cmd->sloplen, &dma->slop[cmd->slopidx]);
2509 }
2510
2511 i = dma->dstk; u = dma->dstu;
2512 while (u != 0) {
2513 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2514 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2515 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2516 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2517 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2518 offsetof(struct hifn_dma, dstr[i]),
2519 sizeof(struct hifn_desc),
2520 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2521 break;
2522 }
2523 if (++i == (HIFN_D_DST_RSIZE + 1))
2524 i = 0;
2525 else
2526 u--;
2527 }
2528 dma->dstk = i; dma->dstu = u;
2529
2530 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2531
2532 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2533 uint8_t *macbuf;
2534
2535 macbuf = resbuf + sizeof(struct hifn_base_result);
2536 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2537 macbuf += sizeof(struct hifn_comp_result);
2538 macbuf += sizeof(struct hifn_mac_result);
2539
2540 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2541 int len;
2542
2543 if (crd->crd_alg == CRYPTO_MD5)
2544 len = 16;
2545 else if (crd->crd_alg == CRYPTO_SHA1)
2546 len = 20;
2547 else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 ||
2548 crd->crd_alg == CRYPTO_SHA1_HMAC_96)
2549 len = 12;
2550 else
2551 continue;
2552
2553 if (crp->crp_flags & CRYPTO_F_IMBUF)
2554 m_copyback((struct mbuf *)crp->crp_buf,
2555 crd->crd_inject, len, macbuf);
2556 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2557 memcpy(crp->crp_mac, (void *)macbuf, len);
2558 break;
2559 }
2560 }
2561
2562 if (cmd->src_map != cmd->dst_map) {
2563 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2564 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2565 }
2566 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2567 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2568 explicit_memset(cmd, 0, sizeof(*cmd));
2569 free(cmd, M_DEVBUF);
2570 crypto_done(crp);
2571 }
2572
2573 #ifdef CRYPTO_LZS_COMP
2574
2575 static int
2576 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2577 struct hifn_command *cmd)
2578 {
2579 struct cryptodesc *crd = crp->crp_desc;
2580 int s, err = 0;
2581
2582 cmd->compcrd = crd;
2583 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2584
2585 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2586 /*
2587 * XXX can only handle mbufs right now since we can
2588 * XXX dynamically resize them.
2589 */
2590 err = EINVAL;
2591 return (ENOMEM);
2592 }
2593
2594 if ((crd->crd_flags & CRD_F_COMP) == 0)
2595 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2596 if (crd->crd_alg == CRYPTO_LZS_COMP)
2597 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2598 HIFN_COMP_CMD_CLEARHIST;
2599
2600 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2601 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2602 err = ENOMEM;
2603 goto fail;
2604 }
2605
2606 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2607 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2608 err = ENOMEM;
2609 goto fail;
2610 }
2611
2612 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2613 int len;
2614
2615 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2616 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2617 err = ENOMEM;
2618 goto fail;
2619 }
2620
2621 len = cmd->src_map->dm_mapsize / MCLBYTES;
2622 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2623 len++;
2624 len *= MCLBYTES;
2625
2626 if ((crd->crd_flags & CRD_F_COMP) == 0)
2627 len *= 4;
2628
2629 if (len > HIFN_MAX_DMALEN)
2630 len = HIFN_MAX_DMALEN;
2631
2632 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2633 if (cmd->dstu.dst_m == NULL) {
2634 err = ENOMEM;
2635 goto fail;
2636 }
2637
2638 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2639 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2640 err = ENOMEM;
2641 goto fail;
2642 }
2643 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2644 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2645 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2646 err = ENOMEM;
2647 goto fail;
2648 }
2649 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2650 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2651 err = ENOMEM;
2652 goto fail;
2653 }
2654 }
2655
2656 if (cmd->src_map == cmd->dst_map)
2657 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2658 0, cmd->src_map->dm_mapsize,
2659 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2660 else {
2661 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2662 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2663 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2664 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2665 }
2666
2667 cmd->crp = crp;
2668 /*
2669 * Always use session 0. The modes of compression we use are
2670 * stateless and there is always at least one compression
2671 * context, zero.
2672 */
2673 cmd->session_num = 0;
2674 cmd->softc = sc;
2675
2676 err = hifn_compress_enter(sc, cmd);
2677
2678 if (err != 0)
2679 goto fail;
2680 return (0);
2681
2682 fail:
2683 if (cmd->dst_map != NULL) {
2684 if (cmd->dst_map->dm_nsegs > 0)
2685 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2686 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2687 }
2688 if (cmd->src_map != NULL) {
2689 if (cmd->src_map->dm_nsegs > 0)
2690 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2691 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2692 }
2693 explicit_memset(cmd, 0, sizeof(*cmd));
2694 free(cmd, M_DEVBUF);
2695 if (err == EINVAL)
2696 hifnstats.hst_invalid++;
2697 else
2698 hifnstats.hst_nomem++;
2699 crp->crp_etype = err;
2700 crypto_done(crp);
2701 return (0);
2702 }
2703
2704 static int
2705 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2706 {
2707 struct hifn_dma *dma = sc->sc_dma;
2708 int cmdi, resi;
2709 uint32_t cmdlen;
2710
2711 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2712 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2713 return (ENOMEM);
2714
2715 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2716 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2717 return (ENOMEM);
2718
2719 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2720 dma->cmdi = 0;
2721 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2722 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2723 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2724 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2725 }
2726 cmdi = dma->cmdi++;
2727 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2728 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2729
2730 /* .p for command/result already set */
2731 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2732 HIFN_D_MASKDONEIRQ);
2733 HIFN_CMDR_SYNC(sc, cmdi,
2734 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2735 dma->cmdu++;
2736 if (sc->sc_c_busy == 0) {
2737 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2738 sc->sc_c_busy = 1;
2739 SET_LED(sc, HIFN_MIPSRST_LED0);
2740 }
2741
2742 /*
2743 * Always enable the command wait interrupt. We are obviously
2744 * missing an interrupt or two somewhere. Enabling the command wait
2745 * interrupt will guarantee we get called periodically until all
2746 * of the queues are drained and thus work around this.
2747 */
2748 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2749 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2750
2751 hifnstats.hst_ipackets++;
2752 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2753
2754 hifn_dmamap_load_src(sc, cmd);
2755 if (sc->sc_s_busy == 0) {
2756 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2757 sc->sc_s_busy = 1;
2758 SET_LED(sc, HIFN_MIPSRST_LED1);
2759 }
2760
2761 /*
2762 * Unlike other descriptors, we don't mask done interrupt from
2763 * result descriptor.
2764 */
2765 if (dma->resi == HIFN_D_RES_RSIZE) {
2766 dma->resi = 0;
2767 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2768 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2769 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2770 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2771 }
2772 resi = dma->resi++;
2773 dma->hifn_commands[resi] = cmd;
2774 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2775 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2776 HIFN_D_VALID | HIFN_D_LAST);
2777 HIFN_RESR_SYNC(sc, resi,
2778 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2779 dma->resu++;
2780 if (sc->sc_r_busy == 0) {
2781 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2782 sc->sc_r_busy = 1;
2783 SET_LED(sc, HIFN_MIPSRST_LED2);
2784 }
2785
2786 if (cmd->sloplen)
2787 cmd->slopidx = resi;
2788
2789 hifn_dmamap_load_dst(sc, cmd);
2790
2791 if (sc->sc_d_busy == 0) {
2792 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2793 sc->sc_d_busy = 1;
2794 }
2795 sc->sc_active = 5;
2796 cmd->cmd_callback = hifn_callback_comp;
2797 return (0);
2798 }
2799
2800 static void
2801 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2802 uint8_t *resbuf)
2803 {
2804 struct hifn_base_result baseres;
2805 struct cryptop *crp = cmd->crp;
2806 struct hifn_dma *dma = sc->sc_dma;
2807 struct mbuf *m;
2808 int err = 0, i, u;
2809 uint32_t olen;
2810 bus_size_t dstsize;
2811
2812 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2813 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2814 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2815 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2816
2817 dstsize = cmd->dst_map->dm_mapsize;
2818 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2819
2820 memcpy(&baseres, resbuf, sizeof(struct hifn_base_result));
2821
2822 i = dma->dstk; u = dma->dstu;
2823 while (u != 0) {
2824 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2825 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2826 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2827 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2828 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2829 offsetof(struct hifn_dma, dstr[i]),
2830 sizeof(struct hifn_desc),
2831 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2832 break;
2833 }
2834 if (++i == (HIFN_D_DST_RSIZE + 1))
2835 i = 0;
2836 else
2837 u--;
2838 }
2839 dma->dstk = i; dma->dstu = u;
2840
2841 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
2842 bus_size_t xlen;
2843
2844 xlen = dstsize;
2845
2846 m_freem(cmd->dstu.dst_m);
2847
2848 if (xlen == HIFN_MAX_DMALEN) {
2849 /* We've done all we can. */
2850 err = E2BIG;
2851 goto out;
2852 }
2853
2854 xlen += MCLBYTES;
2855
2856 if (xlen > HIFN_MAX_DMALEN)
2857 xlen = HIFN_MAX_DMALEN;
2858
2859 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
2860 cmd->srcu.src_m);
2861 if (cmd->dstu.dst_m == NULL) {
2862 err = ENOMEM;
2863 goto out;
2864 }
2865 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2866 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2867 err = ENOMEM;
2868 goto out;
2869 }
2870
2871 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2872 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2873 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2874 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2875
2876 err = hifn_compress_enter(sc, cmd);
2877 if (err != 0)
2878 goto out;
2879 return;
2880 }
2881
2882 olen = dstsize - (letoh16(baseres.dst_cnt) |
2883 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
2884 HIFN_BASE_RES_DSTLEN_S) << 16));
2885
2886 crp->crp_olen = olen - cmd->compcrd->crd_skip;
2887
2888 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2889 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2890 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2891
2892 m = cmd->dstu.dst_m;
2893 if (m->m_flags & M_PKTHDR)
2894 m->m_pkthdr.len = olen;
2895 crp->crp_buf = (void *)m;
2896 for (; m != NULL; m = m->m_next) {
2897 if (olen >= m->m_len)
2898 olen -= m->m_len;
2899 else {
2900 m->m_len = olen;
2901 olen = 0;
2902 }
2903 }
2904
2905 m_freem(cmd->srcu.src_m);
2906 explicit_memset(cmd, 0, sizeof(*cmd));
2907 free(cmd, M_DEVBUF);
2908 crp->crp_etype = 0;
2909 crypto_done(crp);
2910 return;
2911
2912 out:
2913 if (cmd->dst_map != NULL) {
2914 if (cmd->src_map->dm_nsegs != 0)
2915 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2916 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2917 }
2918 if (cmd->src_map != NULL) {
2919 if (cmd->src_map->dm_nsegs != 0)
2920 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2921 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2922 }
2923 m_freem(cmd->dstu.dst_m);
2924 explicit_memset(cmd, 0, sizeof(*cmd));
2925 free(cmd, M_DEVBUF);
2926 crp->crp_etype = err;
2927 crypto_done(crp);
2928 }
2929
2930 static struct mbuf *
2931 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
2932 {
2933 int len;
2934 struct mbuf *m, *m0, *mlast;
2935
2936 if (mtemplate->m_flags & M_PKTHDR) {
2937 len = MHLEN;
2938 MGETHDR(m0, M_DONTWAIT, MT_DATA);
2939 } else {
2940 len = MLEN;
2941 MGET(m0, M_DONTWAIT, MT_DATA);
2942 }
2943 if (m0 == NULL)
2944 return (NULL);
2945 if (len == MHLEN)
2946 m_copy_pkthdr(m0, mtemplate);
2947 MCLGET(m0, M_DONTWAIT);
2948 if (!(m0->m_flags & M_EXT)) {
2949 m_freem(m0);
2950 return (NULL);
2951 }
2952 len = MCLBYTES;
2953
2954 totlen -= len;
2955 m0->m_pkthdr.len = m0->m_len = len;
2956 mlast = m0;
2957
2958 while (totlen > 0) {
2959 MGET(m, M_DONTWAIT, MT_DATA);
2960 if (m == NULL) {
2961 m_freem(m0);
2962 return (NULL);
2963 }
2964 MCLGET(m, M_DONTWAIT);
2965 if (!(m->m_flags & M_EXT)) {
2966 m_free(m);
2967 m_freem(m0);
2968 return (NULL);
2969 }
2970 len = MCLBYTES;
2971 m->m_len = len;
2972 if (m0->m_flags & M_PKTHDR)
2973 m0->m_pkthdr.len += len;
2974 totlen -= len;
2975
2976 mlast->m_next = m;
2977 mlast = m;
2978 }
2979
2980 return (m0);
2981 }
2982 #endif /* CRYPTO_LZS_COMP */
2983
2984 static void
2985 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, uint32_t val)
2986 {
2987 /*
2988 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2989 * and Group 1 registers; avoid conditions that could create
2990 * burst writes by doing a read in between the writes.
2991 */
2992 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
2993 if (sc->sc_waw_lastgroup == reggrp &&
2994 sc->sc_waw_lastreg == reg - 4) {
2995 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2996 }
2997 sc->sc_waw_lastgroup = reggrp;
2998 sc->sc_waw_lastreg = reg;
2999 }
3000 if (reggrp == 0)
3001 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3002 else
3003 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3004
3005 }
3006
3007 static uint32_t
3008 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3009 {
3010 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3011 sc->sc_waw_lastgroup = -1;
3012 sc->sc_waw_lastreg = 1;
3013 }
3014 if (reggrp == 0)
3015 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3016 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3017 }
3018