hifn7751.c revision 1.53 1 /* $NetBSD: hifn7751.c,v 1.53 2014/01/03 16:09:22 pgoyette Exp $ */
2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */
4
5 /*
6 * Invertex AEON / Hifn 7751 driver
7 * Copyright (c) 1999 Invertex Inc. All rights reserved.
8 * Copyright (c) 1999 Theo de Raadt
9 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10 * http://www.netsec.net
11 * Copyright (c) 2003 Hifn Inc.
12 *
13 * This driver is based on a previous driver by Invertex, for which they
14 * requested: Please send any comments, feedback, bug-fixes, or feature
15 * requests to software (at) invertex.com.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. The name of the author may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Effort sponsored in part by the Defense Advanced Research Projects
41 * Agency (DARPA) and Air Force Research Laboratory, Air Force
42 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 *
44 */
45
46 /*
47 * Driver for various Hifn pre-HIPP encryption processors.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.53 2014/01/03 16:09:22 pgoyette Exp $");
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/errno.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/mbuf.h>
61 #include <sys/device.h>
62 #include <sys/module.h>
63
64 #ifdef __OpenBSD__
65 #include <crypto/crypto.h>
66 #include <dev/rndvar.h>
67 #else
68 #include <opencrypto/cryptodev.h>
69 #include <sys/cprng.h>
70 #include <sys/rnd.h>
71 #include <sys/sha1.h>
72 #endif
73
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76 #include <dev/pci/pcidevs.h>
77
78 #include <dev/pci/hifn7751reg.h>
79 #include <dev/pci/hifn7751var.h>
80
81 #undef HIFN_DEBUG
82
83 #ifdef __NetBSD__
84 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */
85 #endif
86
87 #ifdef HIFN_DEBUG
88 extern int hifn_debug; /* patchable */
89 int hifn_debug = 1;
90 #endif
91
92 #ifdef __OpenBSD__
93 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */
94 #endif
95
96 /*
97 * Prototypes and count for the pci_device structure
98 */
99 #ifdef __OpenBSD__
100 static int hifn_probe((struct device *, void *, void *);
101 #else
102 static int hifn_probe(device_t, cfdata_t, void *);
103 #endif
104 static void hifn_attach(device_t, device_t, void *);
105 #ifdef __NetBSD__
106 static int hifn_detach(device_t, int);
107
108 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
109 hifn_probe, hifn_attach, hifn_detach, NULL);
110 #else
111 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
112 hifn_probe, hifn_attach, NULL, NULL);
113 #endif
114
115 #ifdef __OpenBSD__
116 struct cfdriver hifn_cd = {
117 0, "hifn", DV_DULL
118 };
119 #endif
120
121 static void hifn_reset_board(struct hifn_softc *, int);
122 static void hifn_reset_puc(struct hifn_softc *);
123 static void hifn_puc_wait(struct hifn_softc *);
124 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
125 static void hifn_set_retry(struct hifn_softc *);
126 static void hifn_init_dma(struct hifn_softc *);
127 static void hifn_init_pci_registers(struct hifn_softc *);
128 static int hifn_sramsize(struct hifn_softc *);
129 static int hifn_dramsize(struct hifn_softc *);
130 static int hifn_ramtype(struct hifn_softc *);
131 static void hifn_sessions(struct hifn_softc *);
132 static int hifn_intr(void *);
133 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
134 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
135 static int hifn_newsession(void*, u_int32_t *, struct cryptoini *);
136 static int hifn_freesession(void*, u_int64_t);
137 static int hifn_process(void*, struct cryptop *, int);
138 static void hifn_callback(struct hifn_softc *, struct hifn_command *,
139 u_int8_t *);
140 static int hifn_crypto(struct hifn_softc *, struct hifn_command *,
141 struct cryptop*, int);
142 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
143 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
144 static int hifn_dmamap_aligned(bus_dmamap_t);
145 static int hifn_dmamap_load_src(struct hifn_softc *,
146 struct hifn_command *);
147 static int hifn_dmamap_load_dst(struct hifn_softc *,
148 struct hifn_command *);
149 static int hifn_init_pubrng(struct hifn_softc *);
150 static void hifn_rng(void *);
151 static void hifn_rng_locked(void *);
152 static void hifn_tick(void *);
153 static void hifn_abort(struct hifn_softc *);
154 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
155 int *);
156 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
157 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
158 #ifdef HAVE_CRYPTO_LZS
159 static int hifn_compression(struct hifn_softc *, struct cryptop *,
160 struct hifn_command *);
161 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
162 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
163 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
164 u_int8_t *);
165 #endif /* HAVE_CRYPTO_LZS */
166
167 struct hifn_stats hifnstats;
168
169 static const struct hifn_product {
170 pci_vendor_id_t hifn_vendor;
171 pci_product_id_t hifn_product;
172 int hifn_flags;
173 const char *hifn_name;
174 } hifn_products[] = {
175 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
176 0,
177 "Invertex AEON",
178 },
179
180 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
181 0,
182 "Hifn 7751",
183 },
184 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
185 0,
186 "Hifn 7751 (NetSec)"
187 },
188
189 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
190 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
191 "Hifn 7811",
192 },
193
194 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
195 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
196 "Hifn 7951",
197 },
198
199 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
200 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
201 "Hifn 7955",
202 },
203
204 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
205 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
206 "Hifn 7956",
207 },
208
209
210 { 0, 0,
211 0,
212 NULL
213 }
214 };
215
216 static const struct hifn_product *
217 hifn_lookup(const struct pci_attach_args *pa)
218 {
219 const struct hifn_product *hp;
220
221 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
222 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
223 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
224 return (hp);
225 }
226 return (NULL);
227 }
228
229 static int
230 hifn_probe(device_t parent, cfdata_t match, void *aux)
231 {
232 struct pci_attach_args *pa = aux;
233
234 if (hifn_lookup(pa) != NULL)
235 return 1;
236
237 return 0;
238 }
239
240 static void
241 hifn_attach(device_t parent, device_t self, void *aux)
242 {
243 struct hifn_softc *sc = device_private(self);
244 struct pci_attach_args *pa = aux;
245 const struct hifn_product *hp;
246 pci_chipset_tag_t pc = pa->pa_pc;
247 pci_intr_handle_t ih;
248 const char *intrstr = NULL;
249 const char *hifncap;
250 char rbase;
251 #ifdef __NetBSD__
252 #define iosize0 sc->sc_iosz0
253 #define iosize1 sc->sc_iosz1
254 #else
255 bus_size_t iosize0, iosize1;
256 #endif
257 u_int32_t cmd;
258 u_int16_t ena;
259 bus_dma_segment_t seg;
260 bus_dmamap_t dmamap;
261 int rseg;
262 void *kva;
263
264 hp = hifn_lookup(pa);
265 if (hp == NULL) {
266 printf("\n");
267 panic("hifn_attach: impossible");
268 }
269
270 pci_aprint_devinfo_fancy(pa, "Crypto processor", hp->hifn_name, 1);
271
272 sc->sc_dv = self;
273 sc->sc_pci_pc = pa->pa_pc;
274 sc->sc_pci_tag = pa->pa_tag;
275
276 sc->sc_flags = hp->hifn_flags;
277
278 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
279 cmd |= PCI_COMMAND_MASTER_ENABLE;
280 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
281
282 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
283 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
284 aprint_error_dev(sc->sc_dv, "can't map mem space %d\n", 0);
285 return;
286 }
287
288 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
289 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
290 aprint_error_dev(sc->sc_dv, "can't find mem space %d\n", 1);
291 goto fail_io0;
292 }
293
294 hifn_set_retry(sc);
295
296 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
297 sc->sc_waw_lastgroup = -1;
298 sc->sc_waw_lastreg = 1;
299 }
300
301 sc->sc_dmat = pa->pa_dmat;
302 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
303 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
304 aprint_error_dev(sc->sc_dv, "can't alloc DMA buffer\n");
305 goto fail_io1;
306 }
307 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
308 BUS_DMA_NOWAIT)) {
309 aprint_error_dev(sc->sc_dv, "can't map DMA buffers (%lu bytes)\n",
310 (u_long)sizeof(*sc->sc_dma));
311 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
312 goto fail_io1;
313 }
314 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
315 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
316 aprint_error_dev(sc->sc_dv, "can't create DMA map\n");
317 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
318 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
319 goto fail_io1;
320 }
321 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
322 NULL, BUS_DMA_NOWAIT)) {
323 aprint_error_dev(sc->sc_dv, "can't load DMA map\n");
324 bus_dmamap_destroy(sc->sc_dmat, dmamap);
325 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
326 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
327 goto fail_io1;
328 }
329 sc->sc_dmamap = dmamap;
330 sc->sc_dma = (struct hifn_dma *)kva;
331 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
332
333 hifn_reset_board(sc, 0);
334
335 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
336 aprint_error_dev(sc->sc_dv, "crypto enabling failed\n");
337 goto fail_mem;
338 }
339 hifn_reset_puc(sc);
340
341 hifn_init_dma(sc);
342 hifn_init_pci_registers(sc);
343
344 /* XXX can't dynamically determine ram type for 795x; force dram */
345 if (sc->sc_flags & HIFN_IS_7956)
346 sc->sc_drammodel = 1;
347 else if (hifn_ramtype(sc))
348 goto fail_mem;
349
350 if (sc->sc_drammodel == 0)
351 hifn_sramsize(sc);
352 else
353 hifn_dramsize(sc);
354
355 /*
356 * Workaround for NetSec 7751 rev A: half ram size because two
357 * of the address lines were left floating
358 */
359 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
360 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
361 PCI_REVISION(pa->pa_class) == 0x61)
362 sc->sc_ramsize >>= 1;
363
364 if (pci_intr_map(pa, &ih)) {
365 aprint_error_dev(sc->sc_dv, "couldn't map interrupt\n");
366 goto fail_mem;
367 }
368 intrstr = pci_intr_string(pc, ih);
369 #ifdef __OpenBSD__
370 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
371 device_xname(self));
372 #else
373 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
374 #endif
375 if (sc->sc_ih == NULL) {
376 aprint_error_dev(sc->sc_dv, "couldn't establish interrupt\n");
377 if (intrstr != NULL)
378 aprint_error(" at %s", intrstr);
379 aprint_error("\n");
380 goto fail_mem;
381 }
382
383 hifn_sessions(sc);
384
385 rseg = sc->sc_ramsize / 1024;
386 rbase = 'K';
387 if (sc->sc_ramsize >= (1024 * 1024)) {
388 rbase = 'M';
389 rseg /= 1024;
390 }
391 aprint_normal_dev(sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n",
392 hifncap, rseg, rbase,
393 sc->sc_drammodel ? 'D' : 'S', intrstr);
394
395 sc->sc_cid = crypto_get_driverid(0);
396 if (sc->sc_cid < 0) {
397 aprint_error_dev(sc->sc_dv, "couldn't get crypto driver id\n");
398 goto fail_intr;
399 }
400
401 WRITE_REG_0(sc, HIFN_0_PUCNFG,
402 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
403 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
404
405 switch (ena) {
406 case HIFN_PUSTAT_ENA_2:
407 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
408 hifn_newsession, hifn_freesession, hifn_process, sc);
409 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
410 hifn_newsession, hifn_freesession, hifn_process, sc);
411 if (sc->sc_flags & HIFN_HAS_AES)
412 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
413 hifn_newsession, hifn_freesession,
414 hifn_process, sc);
415 /*FALLTHROUGH*/
416 case HIFN_PUSTAT_ENA_1:
417 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
418 hifn_newsession, hifn_freesession, hifn_process, sc);
419 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
420 hifn_newsession, hifn_freesession, hifn_process, sc);
421 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0,
422 hifn_newsession, hifn_freesession, hifn_process, sc);
423 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0,
424 hifn_newsession, hifn_freesession, hifn_process, sc);
425 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
426 hifn_newsession, hifn_freesession, hifn_process, sc);
427 break;
428 }
429
430 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
431 sc->sc_dmamap->dm_mapsize,
432 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
433
434 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) {
435 hifn_init_pubrng(sc);
436 sc->sc_rng_need = RND_POOLBITS / NBBY;
437 }
438
439 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM);
440
441 #ifdef __OpenBSD__
442 timeout_set(&sc->sc_tickto, hifn_tick, sc);
443 timeout_add(&sc->sc_tickto, hz);
444 #else
445 callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
446 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
447 #endif
448 return;
449
450 fail_intr:
451 pci_intr_disestablish(pc, sc->sc_ih);
452 fail_mem:
453 bus_dmamap_unload(sc->sc_dmat, dmamap);
454 bus_dmamap_destroy(sc->sc_dmat, dmamap);
455 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
456 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
457
458 /* Turn off DMA polling */
459 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
460 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
461
462 fail_io1:
463 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
464 fail_io0:
465 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
466 }
467
468 #ifdef __NetBSD__
469 static int
470 hifn_detach(device_t self, int flags)
471 {
472 struct hifn_softc *sc = device_private(self);
473
474 hifn_abort(sc);
475
476 hifn_reset_board(sc, 1);
477
478 pci_intr_disestablish(sc->sc_pci_pc, sc->sc_ih);
479
480 crypto_unregister_all(sc->sc_cid);
481
482 rnd_detach_source(&sc->sc_rnd_source);
483
484 mutex_enter(&sc->sc_mtx);
485 callout_halt(&sc->sc_tickto, NULL);
486 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
487 callout_halt(&sc->sc_rngto, NULL);
488 mutex_exit(&sc->sc_mtx);
489
490 bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1);
491 bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0);
492
493 /*
494 * XXX It's not clear if any additional buffers have been
495 * XXX allocated and require free()ing
496 */
497
498 return 0;
499 }
500
501 MODULE(MODULE_CLASS_DRIVER, hifn, "pci,opencrypto");
502
503 #ifdef _MODULE
504 #include "ioconf.c"
505 #endif
506
507 static int
508 hifn_modcmd(modcmd_t cmd, void *data)
509 {
510 int error = 0;
511
512 switch(cmd) {
513 case MODULE_CMD_INIT:
514 #ifdef _MODULE
515 error = config_init_component(cfdriver_ioconf_hifn,
516 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
517 #endif
518 return error;
519 case MODULE_CMD_FINI:
520 #ifdef _MODULE
521 error = config_fini_component(cfdriver_ioconf_hifn,
522 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
523 #endif
524 return error;
525 default:
526 return ENOTTY;
527 }
528 }
529
530 #endif /* ifdef __NetBSD__ */
531
532 static void
533 hifn_rng_get(size_t bytes, void *priv)
534 {
535 struct hifn_softc *sc = priv;
536
537 mutex_enter(&sc->sc_mtx);
538 sc->sc_rng_need = bytes;
539
540 hifn_rng_locked(sc);
541 mutex_exit(&sc->sc_mtx);
542 }
543
544 static int
545 hifn_init_pubrng(struct hifn_softc *sc)
546 {
547 u_int32_t r;
548 int i;
549
550 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
551 /* Reset 7951 public key/rng engine */
552 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
553 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
554
555 for (i = 0; i < 100; i++) {
556 DELAY(1000);
557 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
558 HIFN_PUBRST_RESET) == 0)
559 break;
560 }
561
562 if (i == 100) {
563 printf("%s: public key init failed\n",
564 device_xname(sc->sc_dv));
565 return (1);
566 }
567 }
568
569 /* Enable the rng, if available */
570 if (sc->sc_flags & HIFN_HAS_RNG) {
571 if (sc->sc_flags & HIFN_IS_7811) {
572 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
573 if (r & HIFN_7811_RNGENA_ENA) {
574 r &= ~HIFN_7811_RNGENA_ENA;
575 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
576 }
577 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
578 HIFN_7811_RNGCFG_DEFL);
579 r |= HIFN_7811_RNGENA_ENA;
580 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
581 } else
582 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
583 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
584 HIFN_RNGCFG_ENA);
585
586 /*
587 * The Hifn RNG documentation states that at their
588 * recommended "conservative" RNG config values,
589 * the RNG must warm up for 0.4s before providing
590 * data that meet their worst-case estimate of 0.06
591 * bits of random data per output register bit.
592 */
593 DELAY(4000);
594
595 #ifdef __NetBSD__
596 rndsource_setcb(&sc->sc_rnd_source, hifn_rng_get, sc);
597 /*
598 * XXX Careful! The use of RND_FLAG_NO_ESTIMATE
599 * XXX here is unobvious: we later feed raw bits
600 * XXX into the "entropy pool" with rnd_add_data,
601 * XXX explicitly supplying an entropy estimate.
602 * XXX In this context, NO_ESTIMATE serves only
603 * XXX to prevent rnd_add_data from trying to
604 * XXX use the *time at which we added the data*
605 * XXX as entropy, which is not a good idea since
606 * XXX we add data periodically from a callout.
607 */
608 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv),
609 RND_TYPE_RNG,
610 RND_FLAG_NO_ESTIMATE|RND_FLAG_HASCB);
611 #endif
612
613 if (hz >= 100)
614 sc->sc_rnghz = hz / 100;
615 else
616 sc->sc_rnghz = 1;
617 #ifdef __OpenBSD__
618 timeout_set(&sc->sc_rngto, hifn_rng, sc);
619 #else /* !__OpenBSD__ */
620 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
621 #endif /* !__OpenBSD__ */
622 }
623
624 /* Enable public key engine, if available */
625 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
626 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
627 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
628 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
629 }
630
631 /* Call directly into the RNG once to prime the pool. */
632 hifn_rng(sc); /* Sets callout/timeout at end */
633
634 return (0);
635 }
636
637 static void
638 hifn_rng_locked(void *vsc)
639 {
640 struct hifn_softc *sc = vsc;
641 #ifdef __NetBSD__
642 uint32_t num[64];
643 #else
644 uint32_t num[2];
645 #endif
646 uint32_t sts;
647 int i;
648 size_t got, gotent;
649
650 if (sc->sc_rng_need < 1) {
651 callout_stop(&sc->sc_rngto);
652 return;
653 }
654
655 if (sc->sc_flags & HIFN_IS_7811) {
656 for (i = 0; i < 5; i++) { /* XXX why 5? */
657 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
658 if (sts & HIFN_7811_RNGSTS_UFL) {
659 printf("%s: RNG underflow: disabling\n",
660 device_xname(sc->sc_dv));
661 return;
662 }
663 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
664 break;
665
666 /*
667 * There are at least two words in the RNG FIFO
668 * at this point.
669 */
670 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
671 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
672 got = 2 * sizeof(num[0]);
673 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
674
675 #ifdef __NetBSD__
676 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
677 sc->sc_rng_need -= gotent;
678 #else
679 /*
680 * XXX This is a really bad idea.
681 * XXX Hifn estimate as little as 0.06
682 * XXX actual bits of entropy per output
683 * XXX register bit. How can we tell the
684 * XXX kernel RNG subsystem we're handing
685 * XXX it 64 "true" random bits, for any
686 * XXX sane value of "true"?
687 * XXX
688 * XXX The right thing to do here, if we
689 * XXX cannot supply an estimate ourselves,
690 * XXX would be to hash the bits locally.
691 */
692 add_true_randomness(num[0]);
693 add_true_randomness(num[1]);
694 #endif
695
696 }
697 } else {
698 int nwords = 0;
699
700 if (sc->sc_rng_need) {
701 nwords = (sc->sc_rng_need * NBBY) / HIFN_RNG_BITSPER;
702 }
703
704 if (nwords < 2) {
705 nwords = 2;
706 }
707
708 /*
709 * We must be *extremely* careful here. The Hifn
710 * 795x differ from the published 6500 RNG design
711 * in more ways than the obvious lack of the output
712 * FIFO and LFSR control registers. In fact, there
713 * is only one LFSR, instead of the 6500's two, and
714 * it's 32 bits, not 31.
715 *
716 * Further, a block diagram obtained from Hifn shows
717 * a very curious latching of this register: the LFSR
718 * rotates at a frequency of RNG_Clk / 8, but the
719 * RNG_Data register is latched at a frequency of
720 * RNG_Clk, which means that it is possible for
721 * consecutive reads of the RNG_Data register to read
722 * identical state from the LFSR. The simplest
723 * workaround seems to be to read eight samples from
724 * the register for each one that we use. Since each
725 * read must require at least one PCI cycle, and
726 * RNG_Clk is at least PCI_Clk, this is safe.
727 */
728 for(i = 0 ; i < nwords * 8; i++)
729 {
730 volatile u_int32_t regtmp;
731 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA);
732 num[i / 8] = regtmp;
733 }
734
735 got = nwords * sizeof(num[0]);
736 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
737 #ifdef __NetBSD__
738 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
739 sc->sc_rng_need -= gotent;
740 #else
741 /* XXX a bad idea; see 7811 block above */
742 add_true_randomness(num[0]);
743 #endif
744 }
745
746 #ifdef __OpenBSD__
747 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
748 #else
749 if (sc->sc_rng_need > 0) {
750 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
751 }
752 #endif
753 }
754
755 static void
756 hifn_rng(void *vsc)
757 {
758 struct hifn_softc *sc = vsc;
759
760 mutex_spin_enter(&sc->sc_mtx);
761 hifn_rng_locked(vsc);
762 mutex_spin_exit(&sc->sc_mtx);
763 }
764
765 static void
766 hifn_puc_wait(struct hifn_softc *sc)
767 {
768 int i;
769
770 for (i = 5000; i > 0; i--) {
771 DELAY(1);
772 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
773 break;
774 }
775 if (!i)
776 printf("%s: proc unit did not reset\n", device_xname(sc->sc_dv));
777 }
778
779 /*
780 * Reset the processing unit.
781 */
782 static void
783 hifn_reset_puc(struct hifn_softc *sc)
784 {
785 /* Reset processing unit */
786 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
787 hifn_puc_wait(sc);
788 }
789
790 static void
791 hifn_set_retry(struct hifn_softc *sc)
792 {
793 u_int32_t r;
794
795 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
796 r &= 0xffff0000;
797 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
798 }
799
800 /*
801 * Resets the board. Values in the regesters are left as is
802 * from the reset (i.e. initial values are assigned elsewhere).
803 */
804 static void
805 hifn_reset_board(struct hifn_softc *sc, int full)
806 {
807 u_int32_t reg;
808
809 /*
810 * Set polling in the DMA configuration register to zero. 0x7 avoids
811 * resetting the board and zeros out the other fields.
812 */
813 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
814 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
815
816 /*
817 * Now that polling has been disabled, we have to wait 1 ms
818 * before resetting the board.
819 */
820 DELAY(1000);
821
822 /* Reset the DMA unit */
823 if (full) {
824 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
825 DELAY(1000);
826 } else {
827 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
828 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
829 hifn_reset_puc(sc);
830 }
831
832 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
833
834 /* Bring dma unit out of reset */
835 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
836 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
837
838 hifn_puc_wait(sc);
839
840 hifn_set_retry(sc);
841
842 if (sc->sc_flags & HIFN_IS_7811) {
843 for (reg = 0; reg < 1000; reg++) {
844 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
845 HIFN_MIPSRST_CRAMINIT)
846 break;
847 DELAY(1000);
848 }
849 if (reg == 1000)
850 printf(": cram init timeout\n");
851 }
852 }
853
854 static u_int32_t
855 hifn_next_signature(u_int32_t a, u_int cnt)
856 {
857 int i;
858 u_int32_t v;
859
860 for (i = 0; i < cnt; i++) {
861
862 /* get the parity */
863 v = a & 0x80080125;
864 v ^= v >> 16;
865 v ^= v >> 8;
866 v ^= v >> 4;
867 v ^= v >> 2;
868 v ^= v >> 1;
869
870 a = (v & 1) ^ (a << 1);
871 }
872
873 return a;
874 }
875
876 static struct pci2id {
877 u_short pci_vendor;
878 u_short pci_prod;
879 char card_id[13];
880 } const pci2id[] = {
881 {
882 PCI_VENDOR_HIFN,
883 PCI_PRODUCT_HIFN_7951,
884 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00, 0x00 }
886 }, {
887 PCI_VENDOR_HIFN,
888 PCI_PRODUCT_HIFN_7955,
889 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
890 0x00, 0x00, 0x00, 0x00, 0x00 }
891 }, {
892 PCI_VENDOR_HIFN,
893 PCI_PRODUCT_HIFN_7956,
894 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
895 0x00, 0x00, 0x00, 0x00, 0x00 }
896 }, {
897 PCI_VENDOR_NETSEC,
898 PCI_PRODUCT_NETSEC_7751,
899 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
900 0x00, 0x00, 0x00, 0x00, 0x00 }
901 }, {
902 PCI_VENDOR_INVERTEX,
903 PCI_PRODUCT_INVERTEX_AEON,
904 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00, 0x00 }
906 }, {
907 PCI_VENDOR_HIFN,
908 PCI_PRODUCT_HIFN_7811,
909 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
910 0x00, 0x00, 0x00, 0x00, 0x00 }
911 }, {
912 /*
913 * Other vendors share this PCI ID as well, such as
914 * http://www.powercrypt.com, and obviously they also
915 * use the same key.
916 */
917 PCI_VENDOR_HIFN,
918 PCI_PRODUCT_HIFN_7751,
919 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
920 0x00, 0x00, 0x00, 0x00, 0x00 }
921 },
922 };
923
924 /*
925 * Checks to see if crypto is already enabled. If crypto isn't enable,
926 * "hifn_enable_crypto" is called to enable it. The check is important,
927 * as enabling crypto twice will lock the board.
928 */
929 static const char *
930 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
931 {
932 u_int32_t dmacfg, ramcfg, encl, addr, i;
933 const char *offtbl = NULL;
934
935 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
936 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
937 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
938 offtbl = pci2id[i].card_id;
939 break;
940 }
941 }
942
943 if (offtbl == NULL) {
944 #ifdef HIFN_DEBUG
945 aprint_debug_dev(sc->sc_dv, "Unknown card!\n");
946 #endif
947 return (NULL);
948 }
949
950 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
951 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
952
953 /*
954 * The RAM config register's encrypt level bit needs to be set before
955 * every read performed on the encryption level register.
956 */
957 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
958
959 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
960
961 /*
962 * Make sure we don't re-unlock. Two unlocks kills chip until the
963 * next reboot.
964 */
965 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
966 #ifdef HIFN_DEBUG
967 aprint_debug_dev(sc->sc_dv, "Strong Crypto already enabled!\n");
968 #endif
969 goto report;
970 }
971
972 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
973 #ifdef HIFN_DEBUG
974 aprint_debug_dev(sc->sc_dv, "Unknown encryption level\n");
975 #endif
976 return (NULL);
977 }
978
979 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
980 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
981 DELAY(1000);
982 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
983 DELAY(1000);
984 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
985 DELAY(1000);
986
987 for (i = 0; i <= 12; i++) {
988 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
989 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
990
991 DELAY(1000);
992 }
993
994 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
995 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
996
997 #ifdef HIFN_DEBUG
998 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
999 aprint_debug("Encryption engine is permanently locked until next system reset.");
1000 else
1001 aprint_debug("Encryption engine enabled successfully!");
1002 #endif
1003
1004 report:
1005 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1006 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1007
1008 switch (encl) {
1009 case HIFN_PUSTAT_ENA_0:
1010 return ("LZS-only (no encr/auth)");
1011
1012 case HIFN_PUSTAT_ENA_1:
1013 return ("DES");
1014
1015 case HIFN_PUSTAT_ENA_2:
1016 if (sc->sc_flags & HIFN_HAS_AES)
1017 return ("3DES/AES");
1018 else
1019 return ("3DES");
1020
1021 default:
1022 return ("disabled");
1023 }
1024 /* NOTREACHED */
1025 }
1026
1027 /*
1028 * Give initial values to the registers listed in the "Register Space"
1029 * section of the HIFN Software Development reference manual.
1030 */
1031 static void
1032 hifn_init_pci_registers(struct hifn_softc *sc)
1033 {
1034 /* write fixed values needed by the Initialization registers */
1035 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1036 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1037 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1038
1039 /* write all 4 ring address registers */
1040 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1041 offsetof(struct hifn_dma, cmdr[0]));
1042 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1043 offsetof(struct hifn_dma, srcr[0]));
1044 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1045 offsetof(struct hifn_dma, dstr[0]));
1046 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1047 offsetof(struct hifn_dma, resr[0]));
1048
1049 DELAY(2000);
1050
1051 /* write status register */
1052 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1053 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1054 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1055 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1056 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1057 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1058 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1059 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1060 HIFN_DMACSR_S_WAIT |
1061 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1062 HIFN_DMACSR_C_WAIT |
1063 HIFN_DMACSR_ENGINE |
1064 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1065 HIFN_DMACSR_PUBDONE : 0) |
1066 ((sc->sc_flags & HIFN_IS_7811) ?
1067 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1068
1069 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1070 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1071 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1072 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1073 HIFN_DMAIER_ENGINE |
1074 ((sc->sc_flags & HIFN_IS_7811) ?
1075 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1076 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1077 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1078 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
1079
1080 if (sc->sc_flags & HIFN_IS_7956) {
1081 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1082 HIFN_PUCNFG_TCALLPHASES |
1083 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1084 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
1085 } else {
1086 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1087 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1088 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1089 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1090 }
1091
1092 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1093 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1094 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1095 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1096 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1097 }
1098
1099 /*
1100 * The maximum number of sessions supported by the card
1101 * is dependent on the amount of context ram, which
1102 * encryption algorithms are enabled, and how compression
1103 * is configured. This should be configured before this
1104 * routine is called.
1105 */
1106 static void
1107 hifn_sessions(struct hifn_softc *sc)
1108 {
1109 u_int32_t pucnfg;
1110 int ctxsize;
1111
1112 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1113
1114 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1115 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1116 ctxsize = 128;
1117 else
1118 ctxsize = 512;
1119 /*
1120 * 7955/7956 has internal context memory of 32K
1121 */
1122 if (sc->sc_flags & HIFN_IS_7956)
1123 sc->sc_maxses = 32768 / ctxsize;
1124 else
1125 sc->sc_maxses = 1 +
1126 ((sc->sc_ramsize - 32768) / ctxsize);
1127 }
1128 else
1129 sc->sc_maxses = sc->sc_ramsize / 16384;
1130
1131 if (sc->sc_maxses > 2048)
1132 sc->sc_maxses = 2048;
1133 }
1134
1135 /*
1136 * Determine ram type (sram or dram). Board should be just out of a reset
1137 * state when this is called.
1138 */
1139 static int
1140 hifn_ramtype(struct hifn_softc *sc)
1141 {
1142 u_int8_t data[8], dataexpect[8];
1143 int i;
1144
1145 for (i = 0; i < sizeof(data); i++)
1146 data[i] = dataexpect[i] = 0x55;
1147 if (hifn_writeramaddr(sc, 0, data))
1148 return (-1);
1149 if (hifn_readramaddr(sc, 0, data))
1150 return (-1);
1151 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1152 sc->sc_drammodel = 1;
1153 return (0);
1154 }
1155
1156 for (i = 0; i < sizeof(data); i++)
1157 data[i] = dataexpect[i] = 0xaa;
1158 if (hifn_writeramaddr(sc, 0, data))
1159 return (-1);
1160 if (hifn_readramaddr(sc, 0, data))
1161 return (-1);
1162 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1163 sc->sc_drammodel = 1;
1164 return (0);
1165 }
1166
1167 return (0);
1168 }
1169
1170 #define HIFN_SRAM_MAX (32 << 20)
1171 #define HIFN_SRAM_STEP_SIZE 16384
1172 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1173
1174 static int
1175 hifn_sramsize(struct hifn_softc *sc)
1176 {
1177 u_int32_t a;
1178 u_int8_t data[8];
1179 u_int8_t dataexpect[sizeof(data)];
1180 int32_t i;
1181
1182 for (i = 0; i < sizeof(data); i++)
1183 data[i] = dataexpect[i] = i ^ 0x5a;
1184
1185 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1186 a = i * HIFN_SRAM_STEP_SIZE;
1187 memcpy(data, &i, sizeof(i));
1188 hifn_writeramaddr(sc, a, data);
1189 }
1190
1191 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1192 a = i * HIFN_SRAM_STEP_SIZE;
1193 memcpy(dataexpect, &i, sizeof(i));
1194 if (hifn_readramaddr(sc, a, data) < 0)
1195 return (0);
1196 if (memcmp(data, dataexpect, sizeof(data)) != 0)
1197 return (0);
1198 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1199 }
1200
1201 return (0);
1202 }
1203
1204 /*
1205 * XXX For dram boards, one should really try all of the
1206 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1207 * is already set up correctly.
1208 */
1209 static int
1210 hifn_dramsize(struct hifn_softc *sc)
1211 {
1212 u_int32_t cnfg;
1213
1214 if (sc->sc_flags & HIFN_IS_7956) {
1215 /*
1216 * 7955/7956 have a fixed internal ram of only 32K.
1217 */
1218 sc->sc_ramsize = 32768;
1219 } else {
1220 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1221 HIFN_PUCNFG_DRAMMASK;
1222 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1223 }
1224 return (0);
1225 }
1226
1227 static void
1228 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1229 int *resp)
1230 {
1231 struct hifn_dma *dma = sc->sc_dma;
1232
1233 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1234 dma->cmdi = 0;
1235 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1236 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1237 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1238 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1239 }
1240 *cmdp = dma->cmdi++;
1241 dma->cmdk = dma->cmdi;
1242
1243 if (dma->srci == HIFN_D_SRC_RSIZE) {
1244 dma->srci = 0;
1245 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1246 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1247 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1248 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1249 }
1250 *srcp = dma->srci++;
1251 dma->srck = dma->srci;
1252
1253 if (dma->dsti == HIFN_D_DST_RSIZE) {
1254 dma->dsti = 0;
1255 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1256 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1257 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1258 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1259 }
1260 *dstp = dma->dsti++;
1261 dma->dstk = dma->dsti;
1262
1263 if (dma->resi == HIFN_D_RES_RSIZE) {
1264 dma->resi = 0;
1265 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1266 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1267 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1268 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1269 }
1270 *resp = dma->resi++;
1271 dma->resk = dma->resi;
1272 }
1273
1274 static int
1275 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1276 {
1277 struct hifn_dma *dma = sc->sc_dma;
1278 struct hifn_base_command wc;
1279 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1280 int r, cmdi, resi, srci, dsti;
1281
1282 wc.masks = htole16(3 << 13);
1283 wc.session_num = htole16(addr >> 14);
1284 wc.total_source_count = htole16(8);
1285 wc.total_dest_count = htole16(addr & 0x3fff);
1286
1287 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1288
1289 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1290 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1291 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1292
1293 /* build write command */
1294 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1295 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1296 memcpy(&dma->test_src, data, sizeof(dma->test_src));
1297
1298 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1299 + offsetof(struct hifn_dma, test_src));
1300 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1301 + offsetof(struct hifn_dma, test_dst));
1302
1303 dma->cmdr[cmdi].l = htole32(16 | masks);
1304 dma->srcr[srci].l = htole32(8 | masks);
1305 dma->dstr[dsti].l = htole32(4 | masks);
1306 dma->resr[resi].l = htole32(4 | masks);
1307
1308 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1309 0, sc->sc_dmamap->dm_mapsize,
1310 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1311
1312 for (r = 10000; r >= 0; r--) {
1313 DELAY(10);
1314 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1315 0, sc->sc_dmamap->dm_mapsize,
1316 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1317 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1318 break;
1319 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1320 0, sc->sc_dmamap->dm_mapsize,
1321 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1322 }
1323 if (r == 0) {
1324 printf("%s: writeramaddr -- "
1325 "result[%d](addr %d) still valid\n",
1326 device_xname(sc->sc_dv), resi, addr);
1327 r = -1;
1328 return (-1);
1329 } else
1330 r = 0;
1331
1332 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1333 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1334 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1335
1336 return (r);
1337 }
1338
1339 static int
1340 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1341 {
1342 struct hifn_dma *dma = sc->sc_dma;
1343 struct hifn_base_command rc;
1344 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1345 int r, cmdi, srci, dsti, resi;
1346
1347 rc.masks = htole16(2 << 13);
1348 rc.session_num = htole16(addr >> 14);
1349 rc.total_source_count = htole16(addr & 0x3fff);
1350 rc.total_dest_count = htole16(8);
1351
1352 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1353
1354 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1355 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1356 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1357
1358 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1359 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1360
1361 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1362 offsetof(struct hifn_dma, test_src));
1363 dma->test_src = 0;
1364 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1365 offsetof(struct hifn_dma, test_dst));
1366 dma->test_dst = 0;
1367 dma->cmdr[cmdi].l = htole32(8 | masks);
1368 dma->srcr[srci].l = htole32(8 | masks);
1369 dma->dstr[dsti].l = htole32(8 | masks);
1370 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1371
1372 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1373 0, sc->sc_dmamap->dm_mapsize,
1374 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1375
1376 for (r = 10000; r >= 0; r--) {
1377 DELAY(10);
1378 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1379 0, sc->sc_dmamap->dm_mapsize,
1380 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1381 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1382 break;
1383 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1384 0, sc->sc_dmamap->dm_mapsize,
1385 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1386 }
1387 if (r == 0) {
1388 printf("%s: readramaddr -- "
1389 "result[%d](addr %d) still valid\n",
1390 device_xname(sc->sc_dv), resi, addr);
1391 r = -1;
1392 } else {
1393 r = 0;
1394 memcpy(data, &dma->test_dst, sizeof(dma->test_dst));
1395 }
1396
1397 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1398 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1399 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1400
1401 return (r);
1402 }
1403
1404 /*
1405 * Initialize the descriptor rings.
1406 */
1407 static void
1408 hifn_init_dma(struct hifn_softc *sc)
1409 {
1410 struct hifn_dma *dma = sc->sc_dma;
1411 int i;
1412
1413 hifn_set_retry(sc);
1414
1415 /* initialize static pointer values */
1416 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1417 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1418 offsetof(struct hifn_dma, command_bufs[i][0]));
1419 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1420 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1421 offsetof(struct hifn_dma, result_bufs[i][0]));
1422
1423 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1424 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1425 offsetof(struct hifn_dma, cmdr[0]));
1426 dma->srcr[HIFN_D_SRC_RSIZE].p =
1427 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1428 offsetof(struct hifn_dma, srcr[0]));
1429 dma->dstr[HIFN_D_DST_RSIZE].p =
1430 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1431 offsetof(struct hifn_dma, dstr[0]));
1432 dma->resr[HIFN_D_RES_RSIZE].p =
1433 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1434 offsetof(struct hifn_dma, resr[0]));
1435
1436 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1437 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1438 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1439 }
1440
1441 /*
1442 * Writes out the raw command buffer space. Returns the
1443 * command buffer size.
1444 */
1445 static u_int
1446 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1447 {
1448 u_int8_t *buf_pos;
1449 struct hifn_base_command *base_cmd;
1450 struct hifn_mac_command *mac_cmd;
1451 struct hifn_crypt_command *cry_cmd;
1452 struct hifn_comp_command *comp_cmd;
1453 int using_mac, using_crypt, using_comp, len, ivlen;
1454 u_int32_t dlen, slen;
1455
1456 buf_pos = buf;
1457 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1458 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1459 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1460
1461 base_cmd = (struct hifn_base_command *)buf_pos;
1462 base_cmd->masks = htole16(cmd->base_masks);
1463 slen = cmd->src_map->dm_mapsize;
1464 if (cmd->sloplen)
1465 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1466 sizeof(u_int32_t);
1467 else
1468 dlen = cmd->dst_map->dm_mapsize;
1469 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1470 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1471 dlen >>= 16;
1472 slen >>= 16;
1473 base_cmd->session_num = htole16(cmd->session_num |
1474 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1475 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1476 buf_pos += sizeof(struct hifn_base_command);
1477
1478 if (using_comp) {
1479 comp_cmd = (struct hifn_comp_command *)buf_pos;
1480 dlen = cmd->compcrd->crd_len;
1481 comp_cmd->source_count = htole16(dlen & 0xffff);
1482 dlen >>= 16;
1483 comp_cmd->masks = htole16(cmd->comp_masks |
1484 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1485 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1486 comp_cmd->reserved = 0;
1487 buf_pos += sizeof(struct hifn_comp_command);
1488 }
1489
1490 if (using_mac) {
1491 mac_cmd = (struct hifn_mac_command *)buf_pos;
1492 dlen = cmd->maccrd->crd_len;
1493 mac_cmd->source_count = htole16(dlen & 0xffff);
1494 dlen >>= 16;
1495 mac_cmd->masks = htole16(cmd->mac_masks |
1496 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1497 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1498 mac_cmd->reserved = 0;
1499 buf_pos += sizeof(struct hifn_mac_command);
1500 }
1501
1502 if (using_crypt) {
1503 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1504 dlen = cmd->enccrd->crd_len;
1505 cry_cmd->source_count = htole16(dlen & 0xffff);
1506 dlen >>= 16;
1507 cry_cmd->masks = htole16(cmd->cry_masks |
1508 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1509 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1510 cry_cmd->reserved = 0;
1511 buf_pos += sizeof(struct hifn_crypt_command);
1512 }
1513
1514 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1515 memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH);
1516 buf_pos += HIFN_MAC_KEY_LENGTH;
1517 }
1518
1519 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1520 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1521 case HIFN_CRYPT_CMD_ALG_3DES:
1522 memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH);
1523 buf_pos += HIFN_3DES_KEY_LENGTH;
1524 break;
1525 case HIFN_CRYPT_CMD_ALG_DES:
1526 memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH);
1527 buf_pos += HIFN_DES_KEY_LENGTH;
1528 break;
1529 case HIFN_CRYPT_CMD_ALG_RC4:
1530 len = 256;
1531 do {
1532 int clen;
1533
1534 clen = MIN(cmd->cklen, len);
1535 memcpy(buf_pos, cmd->ck, clen);
1536 len -= clen;
1537 buf_pos += clen;
1538 } while (len > 0);
1539 memset(buf_pos, 0, 4);
1540 buf_pos += 4;
1541 break;
1542 case HIFN_CRYPT_CMD_ALG_AES:
1543 /*
1544 * AES keys are variable 128, 192 and
1545 * 256 bits (16, 24 and 32 bytes).
1546 */
1547 memcpy(buf_pos, cmd->ck, cmd->cklen);
1548 buf_pos += cmd->cklen;
1549 break;
1550 }
1551 }
1552
1553 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1554 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1555 case HIFN_CRYPT_CMD_ALG_AES:
1556 ivlen = HIFN_AES_IV_LENGTH;
1557 break;
1558 default:
1559 ivlen = HIFN_IV_LENGTH;
1560 break;
1561 }
1562 memcpy(buf_pos, cmd->iv, ivlen);
1563 buf_pos += ivlen;
1564 }
1565
1566 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1567 HIFN_BASE_CMD_COMP)) == 0) {
1568 memset(buf_pos, 0, 8);
1569 buf_pos += 8;
1570 }
1571
1572 return (buf_pos - buf);
1573 }
1574
1575 static int
1576 hifn_dmamap_aligned(bus_dmamap_t map)
1577 {
1578 int i;
1579
1580 for (i = 0; i < map->dm_nsegs; i++) {
1581 if (map->dm_segs[i].ds_addr & 3)
1582 return (0);
1583 if ((i != (map->dm_nsegs - 1)) &&
1584 (map->dm_segs[i].ds_len & 3))
1585 return (0);
1586 }
1587 return (1);
1588 }
1589
1590 static int
1591 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1592 {
1593 struct hifn_dma *dma = sc->sc_dma;
1594 bus_dmamap_t map = cmd->dst_map;
1595 u_int32_t p, l;
1596 int idx, used = 0, i;
1597
1598 idx = dma->dsti;
1599 for (i = 0; i < map->dm_nsegs - 1; i++) {
1600 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1601 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1602 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1603 HIFN_DSTR_SYNC(sc, idx,
1604 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1605 used++;
1606
1607 if (++idx == HIFN_D_DST_RSIZE) {
1608 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1609 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1610 HIFN_DSTR_SYNC(sc, idx,
1611 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1612 idx = 0;
1613 }
1614 }
1615
1616 if (cmd->sloplen == 0) {
1617 p = map->dm_segs[i].ds_addr;
1618 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1619 map->dm_segs[i].ds_len;
1620 } else {
1621 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1622 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1623 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1624 sizeof(u_int32_t);
1625
1626 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1627 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1628 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1629 HIFN_D_MASKDONEIRQ |
1630 (map->dm_segs[i].ds_len - cmd->sloplen));
1631 HIFN_DSTR_SYNC(sc, idx,
1632 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1633 used++;
1634
1635 if (++idx == HIFN_D_DST_RSIZE) {
1636 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1637 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1638 HIFN_DSTR_SYNC(sc, idx,
1639 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1640 idx = 0;
1641 }
1642 }
1643 }
1644 dma->dstr[idx].p = htole32(p);
1645 dma->dstr[idx].l = htole32(l);
1646 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1647 used++;
1648
1649 if (++idx == HIFN_D_DST_RSIZE) {
1650 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1651 HIFN_D_MASKDONEIRQ);
1652 HIFN_DSTR_SYNC(sc, idx,
1653 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1654 idx = 0;
1655 }
1656
1657 dma->dsti = idx;
1658 dma->dstu += used;
1659 return (idx);
1660 }
1661
1662 static int
1663 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1664 {
1665 struct hifn_dma *dma = sc->sc_dma;
1666 bus_dmamap_t map = cmd->src_map;
1667 int idx, i;
1668 u_int32_t last = 0;
1669
1670 idx = dma->srci;
1671 for (i = 0; i < map->dm_nsegs; i++) {
1672 if (i == map->dm_nsegs - 1)
1673 last = HIFN_D_LAST;
1674
1675 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1676 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1677 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1678 HIFN_SRCR_SYNC(sc, idx,
1679 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1680
1681 if (++idx == HIFN_D_SRC_RSIZE) {
1682 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1683 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1684 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1685 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1686 idx = 0;
1687 }
1688 }
1689 dma->srci = idx;
1690 dma->srcu += map->dm_nsegs;
1691 return (idx);
1692 }
1693
1694 static int
1695 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1696 struct cryptop *crp, int hint)
1697 {
1698 struct hifn_dma *dma = sc->sc_dma;
1699 u_int32_t cmdlen;
1700 int cmdi, resi, err = 0;
1701
1702 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1703 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1704 return (ENOMEM);
1705
1706 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1707 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1708 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1709 err = ENOMEM;
1710 goto err_srcmap1;
1711 }
1712 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1713 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1714 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1715 err = ENOMEM;
1716 goto err_srcmap1;
1717 }
1718 } else {
1719 err = EINVAL;
1720 goto err_srcmap1;
1721 }
1722
1723 if (hifn_dmamap_aligned(cmd->src_map)) {
1724 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1725 if (crp->crp_flags & CRYPTO_F_IOV)
1726 cmd->dstu.dst_io = cmd->srcu.src_io;
1727 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1728 cmd->dstu.dst_m = cmd->srcu.src_m;
1729 cmd->dst_map = cmd->src_map;
1730 } else {
1731 if (crp->crp_flags & CRYPTO_F_IOV) {
1732 err = EINVAL;
1733 goto err_srcmap;
1734 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1735 int totlen, len;
1736 struct mbuf *m, *m0, *mlast;
1737
1738 totlen = cmd->src_map->dm_mapsize;
1739 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1740 len = MHLEN;
1741 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1742 } else {
1743 len = MLEN;
1744 MGET(m0, M_DONTWAIT, MT_DATA);
1745 }
1746 if (m0 == NULL) {
1747 err = ENOMEM;
1748 goto err_srcmap;
1749 }
1750 if (len == MHLEN)
1751 M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1752 if (totlen >= MINCLSIZE) {
1753 MCLGET(m0, M_DONTWAIT);
1754 if (m0->m_flags & M_EXT)
1755 len = MCLBYTES;
1756 }
1757 totlen -= len;
1758 m0->m_pkthdr.len = m0->m_len = len;
1759 mlast = m0;
1760
1761 while (totlen > 0) {
1762 MGET(m, M_DONTWAIT, MT_DATA);
1763 if (m == NULL) {
1764 err = ENOMEM;
1765 m_freem(m0);
1766 goto err_srcmap;
1767 }
1768 len = MLEN;
1769 if (totlen >= MINCLSIZE) {
1770 MCLGET(m, M_DONTWAIT);
1771 if (m->m_flags & M_EXT)
1772 len = MCLBYTES;
1773 }
1774
1775 m->m_len = len;
1776 if (m0->m_flags & M_PKTHDR)
1777 m0->m_pkthdr.len += len;
1778 totlen -= len;
1779
1780 mlast->m_next = m;
1781 mlast = m;
1782 }
1783 cmd->dstu.dst_m = m0;
1784 }
1785 }
1786
1787 if (cmd->dst_map == NULL) {
1788 if (bus_dmamap_create(sc->sc_dmat,
1789 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1790 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1791 err = ENOMEM;
1792 goto err_srcmap;
1793 }
1794 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1795 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1796 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1797 err = ENOMEM;
1798 goto err_dstmap1;
1799 }
1800 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1801 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1802 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1803 err = ENOMEM;
1804 goto err_dstmap1;
1805 }
1806 }
1807 }
1808
1809 #ifdef HIFN_DEBUG
1810 if (hifn_debug)
1811 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1812 device_xname(sc->sc_dv),
1813 READ_REG_1(sc, HIFN_1_DMA_CSR),
1814 READ_REG_1(sc, HIFN_1_DMA_IER),
1815 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1816 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1817 #endif
1818
1819 if (cmd->src_map == cmd->dst_map)
1820 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1821 0, cmd->src_map->dm_mapsize,
1822 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1823 else {
1824 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1825 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1826 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1827 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1828 }
1829
1830 /*
1831 * need 1 cmd, and 1 res
1832 * need N src, and N dst
1833 */
1834 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1835 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1836 err = ENOMEM;
1837 goto err_dstmap;
1838 }
1839 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1840 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1841 err = ENOMEM;
1842 goto err_dstmap;
1843 }
1844
1845 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1846 dma->cmdi = 0;
1847 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1848 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1849 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1850 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1851 }
1852 cmdi = dma->cmdi++;
1853 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1854 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1855
1856 /* .p for command/result already set */
1857 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1858 HIFN_D_MASKDONEIRQ);
1859 HIFN_CMDR_SYNC(sc, cmdi,
1860 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1861 dma->cmdu++;
1862 if (sc->sc_c_busy == 0) {
1863 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1864 sc->sc_c_busy = 1;
1865 SET_LED(sc, HIFN_MIPSRST_LED0);
1866 }
1867
1868 /*
1869 * We don't worry about missing an interrupt (which a "command wait"
1870 * interrupt salvages us from), unless there is more than one command
1871 * in the queue.
1872 *
1873 * XXX We do seem to miss some interrupts. So we always enable
1874 * XXX command wait. From OpenBSD revision 1.149.
1875 *
1876 */
1877 #if 0
1878 if (dma->cmdu > 1) {
1879 #endif
1880 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1881 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1882 #if 0
1883 }
1884 #endif
1885
1886 hifnstats.hst_ipackets++;
1887 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1888
1889 hifn_dmamap_load_src(sc, cmd);
1890 if (sc->sc_s_busy == 0) {
1891 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1892 sc->sc_s_busy = 1;
1893 SET_LED(sc, HIFN_MIPSRST_LED1);
1894 }
1895
1896 /*
1897 * Unlike other descriptors, we don't mask done interrupt from
1898 * result descriptor.
1899 */
1900 #ifdef HIFN_DEBUG
1901 if (hifn_debug)
1902 printf("load res\n");
1903 #endif
1904 if (dma->resi == HIFN_D_RES_RSIZE) {
1905 dma->resi = 0;
1906 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1907 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1908 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1909 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1910 }
1911 resi = dma->resi++;
1912 dma->hifn_commands[resi] = cmd;
1913 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1914 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1915 HIFN_D_VALID | HIFN_D_LAST);
1916 HIFN_RESR_SYNC(sc, resi,
1917 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1918 dma->resu++;
1919 if (sc->sc_r_busy == 0) {
1920 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1921 sc->sc_r_busy = 1;
1922 SET_LED(sc, HIFN_MIPSRST_LED2);
1923 }
1924
1925 if (cmd->sloplen)
1926 cmd->slopidx = resi;
1927
1928 hifn_dmamap_load_dst(sc, cmd);
1929
1930 if (sc->sc_d_busy == 0) {
1931 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1932 sc->sc_d_busy = 1;
1933 }
1934
1935 #ifdef HIFN_DEBUG
1936 if (hifn_debug)
1937 printf("%s: command: stat %8x ier %8x\n",
1938 device_xname(sc->sc_dv),
1939 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1940 #endif
1941
1942 sc->sc_active = 5;
1943 return (err); /* success */
1944
1945 err_dstmap:
1946 if (cmd->src_map != cmd->dst_map)
1947 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1948 err_dstmap1:
1949 if (cmd->src_map != cmd->dst_map)
1950 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1951 err_srcmap:
1952 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1953 cmd->srcu.src_m != cmd->dstu.dst_m)
1954 m_freem(cmd->dstu.dst_m);
1955 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1956 err_srcmap1:
1957 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1958 return (err);
1959 }
1960
1961 static void
1962 hifn_tick(void *vsc)
1963 {
1964 struct hifn_softc *sc = vsc;
1965
1966 mutex_spin_enter(&sc->sc_mtx);
1967 if (sc->sc_active == 0) {
1968 struct hifn_dma *dma = sc->sc_dma;
1969 u_int32_t r = 0;
1970
1971 if (dma->cmdu == 0 && sc->sc_c_busy) {
1972 sc->sc_c_busy = 0;
1973 r |= HIFN_DMACSR_C_CTRL_DIS;
1974 CLR_LED(sc, HIFN_MIPSRST_LED0);
1975 }
1976 if (dma->srcu == 0 && sc->sc_s_busy) {
1977 sc->sc_s_busy = 0;
1978 r |= HIFN_DMACSR_S_CTRL_DIS;
1979 CLR_LED(sc, HIFN_MIPSRST_LED1);
1980 }
1981 if (dma->dstu == 0 && sc->sc_d_busy) {
1982 sc->sc_d_busy = 0;
1983 r |= HIFN_DMACSR_D_CTRL_DIS;
1984 }
1985 if (dma->resu == 0 && sc->sc_r_busy) {
1986 sc->sc_r_busy = 0;
1987 r |= HIFN_DMACSR_R_CTRL_DIS;
1988 CLR_LED(sc, HIFN_MIPSRST_LED2);
1989 }
1990 if (r)
1991 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1992 }
1993 else
1994 sc->sc_active--;
1995 #ifdef __OpenBSD__
1996 timeout_add(&sc->sc_tickto, hz);
1997 #else
1998 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1999 #endif
2000 mutex_spin_exit(&sc->sc_mtx);
2001 }
2002
2003 static int
2004 hifn_intr(void *arg)
2005 {
2006 struct hifn_softc *sc = arg;
2007 struct hifn_dma *dma = sc->sc_dma;
2008 u_int32_t dmacsr, restart;
2009 int i, u;
2010
2011 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2012
2013 #ifdef HIFN_DEBUG
2014 if (hifn_debug)
2015 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
2016 device_xname(sc->sc_dv),
2017 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
2018 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2019 #endif
2020
2021 mutex_spin_enter(&sc->sc_mtx);
2022
2023 /* Nothing in the DMA unit interrupted */
2024 if ((dmacsr & sc->sc_dmaier) == 0) {
2025 mutex_spin_exit(&sc->sc_mtx);
2026 return (0);
2027 }
2028
2029 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2030
2031 if (dmacsr & HIFN_DMACSR_ENGINE)
2032 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
2033
2034 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2035 (dmacsr & HIFN_DMACSR_PUBDONE))
2036 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2037 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2038
2039 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
2040 if (restart)
2041 printf("%s: overrun %x\n", device_xname(sc->sc_dv), dmacsr);
2042
2043 if (sc->sc_flags & HIFN_IS_7811) {
2044 if (dmacsr & HIFN_DMACSR_ILLR)
2045 printf("%s: illegal read\n", device_xname(sc->sc_dv));
2046 if (dmacsr & HIFN_DMACSR_ILLW)
2047 printf("%s: illegal write\n", device_xname(sc->sc_dv));
2048 }
2049
2050 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2051 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2052 if (restart) {
2053 printf("%s: abort, resetting.\n", device_xname(sc->sc_dv));
2054 hifnstats.hst_abort++;
2055 hifn_abort(sc);
2056 goto out;
2057 }
2058
2059 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
2060 /*
2061 * If no slots to process and we receive a "waiting on
2062 * command" interrupt, we disable the "waiting on command"
2063 * (by clearing it).
2064 */
2065 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2066 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2067 }
2068
2069 /* clear the rings */
2070 i = dma->resk;
2071 while (dma->resu != 0) {
2072 HIFN_RESR_SYNC(sc, i,
2073 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2074 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2075 HIFN_RESR_SYNC(sc, i,
2076 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2077 break;
2078 }
2079
2080 if (i != HIFN_D_RES_RSIZE) {
2081 struct hifn_command *cmd;
2082
2083 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2084 cmd = dma->hifn_commands[i];
2085 KASSERT(cmd != NULL
2086 /*("hifn_intr: null command slot %u", i)*/);
2087 dma->hifn_commands[i] = NULL;
2088
2089 hifn_callback(sc, cmd, dma->result_bufs[i]);
2090 hifnstats.hst_opackets++;
2091 }
2092
2093 if (++i == (HIFN_D_RES_RSIZE + 1))
2094 i = 0;
2095 else
2096 dma->resu--;
2097 }
2098 dma->resk = i;
2099
2100 i = dma->srck; u = dma->srcu;
2101 while (u != 0) {
2102 HIFN_SRCR_SYNC(sc, i,
2103 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2104 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2105 HIFN_SRCR_SYNC(sc, i,
2106 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2107 break;
2108 }
2109 if (++i == (HIFN_D_SRC_RSIZE + 1))
2110 i = 0;
2111 else
2112 u--;
2113 }
2114 dma->srck = i; dma->srcu = u;
2115
2116 i = dma->cmdk; u = dma->cmdu;
2117 while (u != 0) {
2118 HIFN_CMDR_SYNC(sc, i,
2119 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2120 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2121 HIFN_CMDR_SYNC(sc, i,
2122 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2123 break;
2124 }
2125 if (i != HIFN_D_CMD_RSIZE) {
2126 u--;
2127 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2128 }
2129 if (++i == (HIFN_D_CMD_RSIZE + 1))
2130 i = 0;
2131 }
2132 dma->cmdk = i; dma->cmdu = u;
2133
2134 out:
2135 mutex_spin_exit(&sc->sc_mtx);
2136 return (1);
2137 }
2138
2139 /*
2140 * Allocate a new 'session' and return an encoded session id. 'sidp'
2141 * contains our registration id, and should contain an encoded session
2142 * id on successful allocation.
2143 */
2144 static int
2145 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2146 {
2147 struct cryptoini *c;
2148 struct hifn_softc *sc = arg;
2149 int i, mac = 0, cry = 0, comp = 0, retval = EINVAL;
2150
2151 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2152 if (sidp == NULL || cri == NULL || sc == NULL)
2153 return retval;
2154
2155 mutex_spin_enter(&sc->sc_mtx);
2156
2157 for (i = 0; i < sc->sc_maxses; i++)
2158 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2159 break;
2160 if (i == sc->sc_maxses) {
2161 retval = ENOMEM;
2162 goto out;
2163 }
2164
2165 for (c = cri; c != NULL; c = c->cri_next) {
2166 switch (c->cri_alg) {
2167 case CRYPTO_MD5:
2168 case CRYPTO_SHA1:
2169 case CRYPTO_MD5_HMAC_96:
2170 case CRYPTO_SHA1_HMAC_96:
2171 if (mac) {
2172 goto out;
2173 }
2174 mac = 1;
2175 break;
2176 case CRYPTO_DES_CBC:
2177 case CRYPTO_3DES_CBC:
2178 case CRYPTO_AES_CBC:
2179 /* Note that this is an initialization
2180 vector, not a cipher key; any function
2181 giving sufficient Hamming distance
2182 between outputs is fine. Use of RC4
2183 to generate IVs has been FIPS140-2
2184 certified by several labs. */
2185 #ifdef __NetBSD__
2186 cprng_fast(sc->sc_sessions[i].hs_iv,
2187 c->cri_alg == CRYPTO_AES_CBC ?
2188 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2189 #else /* FreeBSD and OpenBSD have get_random_bytes */
2190 /* XXX this may read fewer, does it matter? */
2191 get_random_bytes(sc->sc_sessions[i].hs_iv,
2192 c->cri_alg == CRYPTO_AES_CBC ?
2193 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2194 #endif
2195 /*FALLTHROUGH*/
2196 case CRYPTO_ARC4:
2197 if (cry) {
2198 goto out;
2199 }
2200 cry = 1;
2201 break;
2202 #ifdef HAVE_CRYPTO_LZS
2203 case CRYPTO_LZS_COMP:
2204 if (comp) {
2205 goto out;
2206 }
2207 comp = 1;
2208 break;
2209 #endif
2210 default:
2211 goto out;
2212 }
2213 }
2214 if (mac == 0 && cry == 0 && comp == 0) {
2215 goto out;
2216 }
2217
2218 /*
2219 * XXX only want to support compression without chaining to
2220 * MAC/crypt engine right now
2221 */
2222 if ((comp && mac) || (comp && cry)) {
2223 goto out;
2224 }
2225
2226 *sidp = HIFN_SID(device_unit(sc->sc_dv), i);
2227 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2228
2229 retval = 0;
2230 out:
2231 mutex_spin_exit(&sc->sc_mtx);
2232 return retval;
2233 }
2234
2235 /*
2236 * Deallocate a session.
2237 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2238 * XXX to blow away any keys already stored there.
2239 */
2240 static int
2241 hifn_freesession(void *arg, u_int64_t tid)
2242 {
2243 struct hifn_softc *sc = arg;
2244 int session;
2245 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2246
2247 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2248 if (sc == NULL)
2249 return (EINVAL);
2250
2251 mutex_spin_enter(&sc->sc_mtx);
2252 session = HIFN_SESSION(sid);
2253 if (session >= sc->sc_maxses) {
2254 mutex_spin_exit(&sc->sc_mtx);
2255 return (EINVAL);
2256 }
2257
2258 memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session]));
2259 mutex_spin_exit(&sc->sc_mtx);
2260 return (0);
2261 }
2262
2263 static int
2264 hifn_process(void *arg, struct cryptop *crp, int hint)
2265 {
2266 struct hifn_softc *sc = arg;
2267 struct hifn_command *cmd = NULL;
2268 int session, err, ivlen;
2269 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2270
2271 if (crp == NULL || crp->crp_callback == NULL) {
2272 hifnstats.hst_invalid++;
2273 return (EINVAL);
2274 }
2275
2276 mutex_spin_enter(&sc->sc_mtx);
2277 session = HIFN_SESSION(crp->crp_sid);
2278
2279 if (sc == NULL || session >= sc->sc_maxses) {
2280 err = EINVAL;
2281 goto errout;
2282 }
2283
2284 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2285 M_DEVBUF, M_NOWAIT|M_ZERO);
2286 if (cmd == NULL) {
2287 hifnstats.hst_nomem++;
2288 err = ENOMEM;
2289 goto errout;
2290 }
2291
2292 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2293 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2294 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2295 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2296 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2297 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2298 } else {
2299 err = EINVAL;
2300 goto errout; /* XXX we don't handle contiguous buffers! */
2301 }
2302
2303 crd1 = crp->crp_desc;
2304 if (crd1 == NULL) {
2305 err = EINVAL;
2306 goto errout;
2307 }
2308 crd2 = crd1->crd_next;
2309
2310 if (crd2 == NULL) {
2311 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2312 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2313 crd1->crd_alg == CRYPTO_SHA1 ||
2314 crd1->crd_alg == CRYPTO_MD5) {
2315 maccrd = crd1;
2316 enccrd = NULL;
2317 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2318 crd1->crd_alg == CRYPTO_3DES_CBC ||
2319 crd1->crd_alg == CRYPTO_AES_CBC ||
2320 crd1->crd_alg == CRYPTO_ARC4) {
2321 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2322 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2323 maccrd = NULL;
2324 enccrd = crd1;
2325 #ifdef HAVE_CRYPTO_LZS
2326 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2327 return (hifn_compression(sc, crp, cmd));
2328 #endif
2329 } else {
2330 err = EINVAL;
2331 goto errout;
2332 }
2333 } else {
2334 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2335 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2336 crd1->crd_alg == CRYPTO_MD5 ||
2337 crd1->crd_alg == CRYPTO_SHA1) &&
2338 (crd2->crd_alg == CRYPTO_DES_CBC ||
2339 crd2->crd_alg == CRYPTO_3DES_CBC ||
2340 crd2->crd_alg == CRYPTO_AES_CBC ||
2341 crd2->crd_alg == CRYPTO_ARC4) &&
2342 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2343 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2344 maccrd = crd1;
2345 enccrd = crd2;
2346 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2347 crd1->crd_alg == CRYPTO_ARC4 ||
2348 crd1->crd_alg == CRYPTO_3DES_CBC ||
2349 crd1->crd_alg == CRYPTO_AES_CBC) &&
2350 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 ||
2351 crd2->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2352 crd2->crd_alg == CRYPTO_MD5 ||
2353 crd2->crd_alg == CRYPTO_SHA1) &&
2354 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2355 enccrd = crd1;
2356 maccrd = crd2;
2357 } else {
2358 /*
2359 * We cannot order the 7751 as requested
2360 */
2361 err = EINVAL;
2362 goto errout;
2363 }
2364 }
2365
2366 if (enccrd) {
2367 cmd->enccrd = enccrd;
2368 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2369 switch (enccrd->crd_alg) {
2370 case CRYPTO_ARC4:
2371 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2372 if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2373 != sc->sc_sessions[session].hs_prev_op)
2374 sc->sc_sessions[session].hs_state =
2375 HS_STATE_USED;
2376 break;
2377 case CRYPTO_DES_CBC:
2378 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2379 HIFN_CRYPT_CMD_MODE_CBC |
2380 HIFN_CRYPT_CMD_NEW_IV;
2381 break;
2382 case CRYPTO_3DES_CBC:
2383 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2384 HIFN_CRYPT_CMD_MODE_CBC |
2385 HIFN_CRYPT_CMD_NEW_IV;
2386 break;
2387 case CRYPTO_AES_CBC:
2388 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2389 HIFN_CRYPT_CMD_MODE_CBC |
2390 HIFN_CRYPT_CMD_NEW_IV;
2391 break;
2392 default:
2393 err = EINVAL;
2394 goto errout;
2395 }
2396 if (enccrd->crd_alg != CRYPTO_ARC4) {
2397 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2398 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2399 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2400 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2401 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2402 else
2403 bcopy(sc->sc_sessions[session].hs_iv,
2404 cmd->iv, ivlen);
2405
2406 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2407 == 0) {
2408 if (crp->crp_flags & CRYPTO_F_IMBUF)
2409 m_copyback(cmd->srcu.src_m,
2410 enccrd->crd_inject,
2411 ivlen, cmd->iv);
2412 else if (crp->crp_flags & CRYPTO_F_IOV)
2413 cuio_copyback(cmd->srcu.src_io,
2414 enccrd->crd_inject,
2415 ivlen, cmd->iv);
2416 }
2417 } else {
2418 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2419 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2420 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2421 m_copydata(cmd->srcu.src_m,
2422 enccrd->crd_inject, ivlen, cmd->iv);
2423 else if (crp->crp_flags & CRYPTO_F_IOV)
2424 cuio_copydata(cmd->srcu.src_io,
2425 enccrd->crd_inject, ivlen, cmd->iv);
2426 }
2427 }
2428
2429 cmd->ck = enccrd->crd_key;
2430 cmd->cklen = enccrd->crd_klen >> 3;
2431
2432 /*
2433 * Need to specify the size for the AES key in the masks.
2434 */
2435 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2436 HIFN_CRYPT_CMD_ALG_AES) {
2437 switch (cmd->cklen) {
2438 case 16:
2439 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2440 break;
2441 case 24:
2442 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2443 break;
2444 case 32:
2445 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2446 break;
2447 default:
2448 err = EINVAL;
2449 goto errout;
2450 }
2451 }
2452
2453 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2454 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2455 }
2456
2457 if (maccrd) {
2458 cmd->maccrd = maccrd;
2459 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2460
2461 switch (maccrd->crd_alg) {
2462 case CRYPTO_MD5:
2463 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2464 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2465 HIFN_MAC_CMD_POS_IPSEC;
2466 break;
2467 case CRYPTO_MD5_HMAC_96:
2468 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2469 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2470 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2471 break;
2472 case CRYPTO_SHA1:
2473 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2474 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2475 HIFN_MAC_CMD_POS_IPSEC;
2476 break;
2477 case CRYPTO_SHA1_HMAC_96:
2478 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2479 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2480 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2481 break;
2482 }
2483
2484 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2485 maccrd->crd_alg == CRYPTO_MD5_HMAC_96) &&
2486 sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2487 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2488 memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3);
2489 memset(cmd->mac + (maccrd->crd_klen >> 3), 0,
2490 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2491 }
2492 }
2493
2494 cmd->crp = crp;
2495 cmd->session_num = session;
2496 cmd->softc = sc;
2497
2498 err = hifn_crypto(sc, cmd, crp, hint);
2499 if (err == 0) {
2500 if (enccrd)
2501 sc->sc_sessions[session].hs_prev_op =
2502 enccrd->crd_flags & CRD_F_ENCRYPT;
2503 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2504 sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2505 mutex_spin_exit(&sc->sc_mtx);
2506 return 0;
2507 } else if (err == ERESTART) {
2508 /*
2509 * There weren't enough resources to dispatch the request
2510 * to the part. Notify the caller so they'll requeue this
2511 * request and resubmit it again soon.
2512 */
2513 #ifdef HIFN_DEBUG
2514 if (hifn_debug)
2515 printf("%s: requeue request\n", device_xname(sc->sc_dv));
2516 #endif
2517 free(cmd, M_DEVBUF);
2518 sc->sc_needwakeup |= CRYPTO_SYMQ;
2519 mutex_spin_exit(&sc->sc_mtx);
2520 return (err);
2521 }
2522
2523 errout:
2524 if (cmd != NULL)
2525 free(cmd, M_DEVBUF);
2526 if (err == EINVAL)
2527 hifnstats.hst_invalid++;
2528 else
2529 hifnstats.hst_nomem++;
2530 crp->crp_etype = err;
2531 mutex_spin_exit(&sc->sc_mtx);
2532 crypto_done(crp);
2533 return (0);
2534 }
2535
2536 static void
2537 hifn_abort(struct hifn_softc *sc)
2538 {
2539 struct hifn_dma *dma = sc->sc_dma;
2540 struct hifn_command *cmd;
2541 struct cryptop *crp;
2542 int i, u;
2543
2544 i = dma->resk; u = dma->resu;
2545 while (u != 0) {
2546 cmd = dma->hifn_commands[i];
2547 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2548 dma->hifn_commands[i] = NULL;
2549 crp = cmd->crp;
2550
2551 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2552 /* Salvage what we can. */
2553 hifnstats.hst_opackets++;
2554 hifn_callback(sc, cmd, dma->result_bufs[i]);
2555 } else {
2556 if (cmd->src_map == cmd->dst_map) {
2557 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2558 0, cmd->src_map->dm_mapsize,
2559 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2560 } else {
2561 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2562 0, cmd->src_map->dm_mapsize,
2563 BUS_DMASYNC_POSTWRITE);
2564 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2565 0, cmd->dst_map->dm_mapsize,
2566 BUS_DMASYNC_POSTREAD);
2567 }
2568
2569 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2570 m_freem(cmd->srcu.src_m);
2571 crp->crp_buf = (void *)cmd->dstu.dst_m;
2572 }
2573
2574 /* non-shared buffers cannot be restarted */
2575 if (cmd->src_map != cmd->dst_map) {
2576 /*
2577 * XXX should be EAGAIN, delayed until
2578 * after the reset.
2579 */
2580 crp->crp_etype = ENOMEM;
2581 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2582 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2583 } else
2584 crp->crp_etype = ENOMEM;
2585
2586 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2587 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2588
2589 free(cmd, M_DEVBUF);
2590 if (crp->crp_etype != EAGAIN)
2591 crypto_done(crp);
2592 }
2593
2594 if (++i == HIFN_D_RES_RSIZE)
2595 i = 0;
2596 u--;
2597 }
2598 dma->resk = i; dma->resu = u;
2599
2600 /* Force upload of key next time */
2601 for (i = 0; i < sc->sc_maxses; i++)
2602 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2603 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2604
2605 hifn_reset_board(sc, 1);
2606 hifn_init_dma(sc);
2607 hifn_init_pci_registers(sc);
2608 }
2609
2610 static void
2611 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf)
2612 {
2613 struct hifn_dma *dma = sc->sc_dma;
2614 struct cryptop *crp = cmd->crp;
2615 struct cryptodesc *crd;
2616 struct mbuf *m;
2617 int totlen, i, u, ivlen;
2618
2619 if (cmd->src_map == cmd->dst_map)
2620 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2621 0, cmd->src_map->dm_mapsize,
2622 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2623 else {
2624 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2625 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2626 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2627 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2628 }
2629
2630 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2631 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2632 crp->crp_buf = (void *)cmd->dstu.dst_m;
2633 totlen = cmd->src_map->dm_mapsize;
2634 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2635 if (totlen < m->m_len) {
2636 m->m_len = totlen;
2637 totlen = 0;
2638 } else
2639 totlen -= m->m_len;
2640 }
2641 cmd->dstu.dst_m->m_pkthdr.len =
2642 cmd->srcu.src_m->m_pkthdr.len;
2643 m_freem(cmd->srcu.src_m);
2644 }
2645 }
2646
2647 if (cmd->sloplen != 0) {
2648 if (crp->crp_flags & CRYPTO_F_IMBUF)
2649 m_copyback((struct mbuf *)crp->crp_buf,
2650 cmd->src_map->dm_mapsize - cmd->sloplen,
2651 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2652 else if (crp->crp_flags & CRYPTO_F_IOV)
2653 cuio_copyback((struct uio *)crp->crp_buf,
2654 cmd->src_map->dm_mapsize - cmd->sloplen,
2655 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2656 }
2657
2658 i = dma->dstk; u = dma->dstu;
2659 while (u != 0) {
2660 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2661 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2662 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2663 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2664 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2665 offsetof(struct hifn_dma, dstr[i]),
2666 sizeof(struct hifn_desc),
2667 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2668 break;
2669 }
2670 if (++i == (HIFN_D_DST_RSIZE + 1))
2671 i = 0;
2672 else
2673 u--;
2674 }
2675 dma->dstk = i; dma->dstu = u;
2676
2677 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2678
2679 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2680 HIFN_BASE_CMD_CRYPT) {
2681 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2682 if (crd->crd_alg != CRYPTO_DES_CBC &&
2683 crd->crd_alg != CRYPTO_3DES_CBC &&
2684 crd->crd_alg != CRYPTO_AES_CBC)
2685 continue;
2686 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2687 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2688 if (crp->crp_flags & CRYPTO_F_IMBUF)
2689 m_copydata((struct mbuf *)crp->crp_buf,
2690 crd->crd_skip + crd->crd_len - ivlen,
2691 ivlen,
2692 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2693 else if (crp->crp_flags & CRYPTO_F_IOV) {
2694 cuio_copydata((struct uio *)crp->crp_buf,
2695 crd->crd_skip + crd->crd_len - ivlen,
2696 ivlen,
2697 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2698 }
2699 /* XXX We do not handle contig data */
2700 break;
2701 }
2702 }
2703
2704 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2705 u_int8_t *macbuf;
2706
2707 macbuf = resbuf + sizeof(struct hifn_base_result);
2708 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2709 macbuf += sizeof(struct hifn_comp_result);
2710 macbuf += sizeof(struct hifn_mac_result);
2711
2712 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2713 int len;
2714
2715 if (crd->crd_alg == CRYPTO_MD5)
2716 len = 16;
2717 else if (crd->crd_alg == CRYPTO_SHA1)
2718 len = 20;
2719 else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 ||
2720 crd->crd_alg == CRYPTO_SHA1_HMAC_96)
2721 len = 12;
2722 else
2723 continue;
2724
2725 if (crp->crp_flags & CRYPTO_F_IMBUF)
2726 m_copyback((struct mbuf *)crp->crp_buf,
2727 crd->crd_inject, len, macbuf);
2728 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2729 memcpy(crp->crp_mac, (void *)macbuf, len);
2730 break;
2731 }
2732 }
2733
2734 if (cmd->src_map != cmd->dst_map) {
2735 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2736 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2737 }
2738 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2739 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2740 free(cmd, M_DEVBUF);
2741 crypto_done(crp);
2742 }
2743
2744 #ifdef HAVE_CRYPTO_LZS
2745
2746 static int
2747 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2748 struct hifn_command *cmd)
2749 {
2750 struct cryptodesc *crd = crp->crp_desc;
2751 int s, err = 0;
2752
2753 cmd->compcrd = crd;
2754 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2755
2756 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2757 /*
2758 * XXX can only handle mbufs right now since we can
2759 * XXX dynamically resize them.
2760 */
2761 err = EINVAL;
2762 return (ENOMEM);
2763 }
2764
2765 if ((crd->crd_flags & CRD_F_COMP) == 0)
2766 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2767 if (crd->crd_alg == CRYPTO_LZS_COMP)
2768 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2769 HIFN_COMP_CMD_CLEARHIST;
2770
2771 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2772 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2773 err = ENOMEM;
2774 goto fail;
2775 }
2776
2777 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2778 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2779 err = ENOMEM;
2780 goto fail;
2781 }
2782
2783 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2784 int len;
2785
2786 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2787 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2788 err = ENOMEM;
2789 goto fail;
2790 }
2791
2792 len = cmd->src_map->dm_mapsize / MCLBYTES;
2793 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2794 len++;
2795 len *= MCLBYTES;
2796
2797 if ((crd->crd_flags & CRD_F_COMP) == 0)
2798 len *= 4;
2799
2800 if (len > HIFN_MAX_DMALEN)
2801 len = HIFN_MAX_DMALEN;
2802
2803 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2804 if (cmd->dstu.dst_m == NULL) {
2805 err = ENOMEM;
2806 goto fail;
2807 }
2808
2809 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2810 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2811 err = ENOMEM;
2812 goto fail;
2813 }
2814 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2815 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2816 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2817 err = ENOMEM;
2818 goto fail;
2819 }
2820 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2821 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2822 err = ENOMEM;
2823 goto fail;
2824 }
2825 }
2826
2827 if (cmd->src_map == cmd->dst_map)
2828 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2829 0, cmd->src_map->dm_mapsize,
2830 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2831 else {
2832 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2833 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2834 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2835 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2836 }
2837
2838 cmd->crp = crp;
2839 /*
2840 * Always use session 0. The modes of compression we use are
2841 * stateless and there is always at least one compression
2842 * context, zero.
2843 */
2844 cmd->session_num = 0;
2845 cmd->softc = sc;
2846
2847 err = hifn_compress_enter(sc, cmd);
2848
2849 if (err != 0)
2850 goto fail;
2851 return (0);
2852
2853 fail:
2854 if (cmd->dst_map != NULL) {
2855 if (cmd->dst_map->dm_nsegs > 0)
2856 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2857 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2858 }
2859 if (cmd->src_map != NULL) {
2860 if (cmd->src_map->dm_nsegs > 0)
2861 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2862 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2863 }
2864 free(cmd, M_DEVBUF);
2865 if (err == EINVAL)
2866 hifnstats.hst_invalid++;
2867 else
2868 hifnstats.hst_nomem++;
2869 crp->crp_etype = err;
2870 crypto_done(crp);
2871 return (0);
2872 }
2873
2874 static int
2875 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2876 {
2877 struct hifn_dma *dma = sc->sc_dma;
2878 int cmdi, resi;
2879 u_int32_t cmdlen;
2880
2881 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2882 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2883 return (ENOMEM);
2884
2885 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2886 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2887 return (ENOMEM);
2888
2889 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2890 dma->cmdi = 0;
2891 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2892 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2893 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2894 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2895 }
2896 cmdi = dma->cmdi++;
2897 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2898 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2899
2900 /* .p for command/result already set */
2901 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2902 HIFN_D_MASKDONEIRQ);
2903 HIFN_CMDR_SYNC(sc, cmdi,
2904 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2905 dma->cmdu++;
2906 if (sc->sc_c_busy == 0) {
2907 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2908 sc->sc_c_busy = 1;
2909 SET_LED(sc, HIFN_MIPSRST_LED0);
2910 }
2911
2912 /*
2913 * We don't worry about missing an interrupt (which a "command wait"
2914 * interrupt salvages us from), unless there is more than one command
2915 * in the queue.
2916 */
2917 if (dma->cmdu > 1) {
2918 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2919 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2920 }
2921
2922 hifnstats.hst_ipackets++;
2923 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2924
2925 hifn_dmamap_load_src(sc, cmd);
2926 if (sc->sc_s_busy == 0) {
2927 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2928 sc->sc_s_busy = 1;
2929 SET_LED(sc, HIFN_MIPSRST_LED1);
2930 }
2931
2932 /*
2933 * Unlike other descriptors, we don't mask done interrupt from
2934 * result descriptor.
2935 */
2936 if (dma->resi == HIFN_D_RES_RSIZE) {
2937 dma->resi = 0;
2938 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2939 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2940 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2941 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2942 }
2943 resi = dma->resi++;
2944 dma->hifn_commands[resi] = cmd;
2945 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2946 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2947 HIFN_D_VALID | HIFN_D_LAST);
2948 HIFN_RESR_SYNC(sc, resi,
2949 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2950 dma->resu++;
2951 if (sc->sc_r_busy == 0) {
2952 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2953 sc->sc_r_busy = 1;
2954 SET_LED(sc, HIFN_MIPSRST_LED2);
2955 }
2956
2957 if (cmd->sloplen)
2958 cmd->slopidx = resi;
2959
2960 hifn_dmamap_load_dst(sc, cmd);
2961
2962 if (sc->sc_d_busy == 0) {
2963 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2964 sc->sc_d_busy = 1;
2965 }
2966 sc->sc_active = 5;
2967 cmd->cmd_callback = hifn_callback_comp;
2968 return (0);
2969 }
2970
2971 static void
2972 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2973 u_int8_t *resbuf)
2974 {
2975 struct hifn_base_result baseres;
2976 struct cryptop *crp = cmd->crp;
2977 struct hifn_dma *dma = sc->sc_dma;
2978 struct mbuf *m;
2979 int err = 0, i, u;
2980 u_int32_t olen;
2981 bus_size_t dstsize;
2982
2983 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2984 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2985 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2986 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2987
2988 dstsize = cmd->dst_map->dm_mapsize;
2989 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2990
2991 memcpy(&baseres, resbuf, sizeof(struct hifn_base_result));
2992
2993 i = dma->dstk; u = dma->dstu;
2994 while (u != 0) {
2995 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2996 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2997 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2998 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2999 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
3000 offsetof(struct hifn_dma, dstr[i]),
3001 sizeof(struct hifn_desc),
3002 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3003 break;
3004 }
3005 if (++i == (HIFN_D_DST_RSIZE + 1))
3006 i = 0;
3007 else
3008 u--;
3009 }
3010 dma->dstk = i; dma->dstu = u;
3011
3012 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
3013 bus_size_t xlen;
3014
3015 xlen = dstsize;
3016
3017 m_freem(cmd->dstu.dst_m);
3018
3019 if (xlen == HIFN_MAX_DMALEN) {
3020 /* We've done all we can. */
3021 err = E2BIG;
3022 goto out;
3023 }
3024
3025 xlen += MCLBYTES;
3026
3027 if (xlen > HIFN_MAX_DMALEN)
3028 xlen = HIFN_MAX_DMALEN;
3029
3030 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
3031 cmd->srcu.src_m);
3032 if (cmd->dstu.dst_m == NULL) {
3033 err = ENOMEM;
3034 goto out;
3035 }
3036 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
3037 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
3038 err = ENOMEM;
3039 goto out;
3040 }
3041
3042 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3043 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3044 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
3045 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
3046
3047 err = hifn_compress_enter(sc, cmd);
3048 if (err != 0)
3049 goto out;
3050 return;
3051 }
3052
3053 olen = dstsize - (letoh16(baseres.dst_cnt) |
3054 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
3055 HIFN_BASE_RES_DSTLEN_S) << 16));
3056
3057 crp->crp_olen = olen - cmd->compcrd->crd_skip;
3058
3059 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3060 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3061 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
3062
3063 m = cmd->dstu.dst_m;
3064 if (m->m_flags & M_PKTHDR)
3065 m->m_pkthdr.len = olen;
3066 crp->crp_buf = (void *)m;
3067 for (; m != NULL; m = m->m_next) {
3068 if (olen >= m->m_len)
3069 olen -= m->m_len;
3070 else {
3071 m->m_len = olen;
3072 olen = 0;
3073 }
3074 }
3075
3076 m_freem(cmd->srcu.src_m);
3077 free(cmd, M_DEVBUF);
3078 crp->crp_etype = 0;
3079 crypto_done(crp);
3080 return;
3081
3082 out:
3083 if (cmd->dst_map != NULL) {
3084 if (cmd->src_map->dm_nsegs != 0)
3085 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3086 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
3087 }
3088 if (cmd->src_map != NULL) {
3089 if (cmd->src_map->dm_nsegs != 0)
3090 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3091 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3092 }
3093 if (cmd->dstu.dst_m != NULL)
3094 m_freem(cmd->dstu.dst_m);
3095 free(cmd, M_DEVBUF);
3096 crp->crp_etype = err;
3097 crypto_done(crp);
3098 }
3099
3100 static struct mbuf *
3101 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
3102 {
3103 int len;
3104 struct mbuf *m, *m0, *mlast;
3105
3106 if (mtemplate->m_flags & M_PKTHDR) {
3107 len = MHLEN;
3108 MGETHDR(m0, M_DONTWAIT, MT_DATA);
3109 } else {
3110 len = MLEN;
3111 MGET(m0, M_DONTWAIT, MT_DATA);
3112 }
3113 if (m0 == NULL)
3114 return (NULL);
3115 if (len == MHLEN)
3116 M_DUP_PKTHDR(m0, mtemplate);
3117 MCLGET(m0, M_DONTWAIT);
3118 if (!(m0->m_flags & M_EXT))
3119 m_freem(m0);
3120 len = MCLBYTES;
3121
3122 totlen -= len;
3123 m0->m_pkthdr.len = m0->m_len = len;
3124 mlast = m0;
3125
3126 while (totlen > 0) {
3127 MGET(m, M_DONTWAIT, MT_DATA);
3128 if (m == NULL) {
3129 m_freem(m0);
3130 return (NULL);
3131 }
3132 MCLGET(m, M_DONTWAIT);
3133 if (!(m->m_flags & M_EXT)) {
3134 m_freem(m0);
3135 return (NULL);
3136 }
3137 len = MCLBYTES;
3138 m->m_len = len;
3139 if (m0->m_flags & M_PKTHDR)
3140 m0->m_pkthdr.len += len;
3141 totlen -= len;
3142
3143 mlast->m_next = m;
3144 mlast = m;
3145 }
3146
3147 return (m0);
3148 }
3149 #endif /* HAVE_CRYPTO_LZS */
3150
3151 static void
3152 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val)
3153 {
3154 /*
3155 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
3156 * and Group 1 registers; avoid conditions that could create
3157 * burst writes by doing a read in between the writes.
3158 */
3159 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3160 if (sc->sc_waw_lastgroup == reggrp &&
3161 sc->sc_waw_lastreg == reg - 4) {
3162 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
3163 }
3164 sc->sc_waw_lastgroup = reggrp;
3165 sc->sc_waw_lastreg = reg;
3166 }
3167 if (reggrp == 0)
3168 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3169 else
3170 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3171
3172 }
3173
3174 static u_int32_t
3175 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3176 {
3177 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3178 sc->sc_waw_lastgroup = -1;
3179 sc->sc_waw_lastreg = 1;
3180 }
3181 if (reggrp == 0)
3182 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3183 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3184 }
3185