hifn7751.c revision 1.65 1 /* $NetBSD: hifn7751.c,v 1.65 2020/02/29 16:36:25 mlelstv Exp $ */
2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */
4
5 /*
6 * Invertex AEON / Hifn 7751 driver
7 * Copyright (c) 1999 Invertex Inc. All rights reserved.
8 * Copyright (c) 1999 Theo de Raadt
9 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10 * http://www.netsec.net
11 * Copyright (c) 2003 Hifn Inc.
12 *
13 * This driver is based on a previous driver by Invertex, for which they
14 * requested: Please send any comments, feedback, bug-fixes, or feature
15 * requests to software (at) invertex.com.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. The name of the author may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Effort sponsored in part by the Defense Advanced Research Projects
41 * Agency (DARPA) and Air Force Research Laboratory, Air Force
42 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 *
44 */
45
46 /*
47 * Driver for various Hifn pre-HIPP encryption processors.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.65 2020/02/29 16:36:25 mlelstv Exp $");
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/errno.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/mbuf.h>
61 #include <sys/device.h>
62 #include <sys/module.h>
63 #include <sys/endian.h>
64
65 #ifdef __OpenBSD__
66 #include <crypto/crypto.h>
67 #include <dev/rndvar.h>
68 #else
69 #include <opencrypto/cryptodev.h>
70 #include <sys/cprng.h>
71 #include <sys/rndpool.h>
72 #include <sys/rndsource.h>
73 #include <sys/sha1.h>
74 #endif
75
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcidevs.h>
79
80 #include <dev/pci/hifn7751reg.h>
81 #include <dev/pci/hifn7751var.h>
82
83 #undef HIFN_DEBUG
84
85 #ifdef __NetBSD__
86 #define M_DUP_PKTHDR m_copy_pkthdr /* XXX */
87 #endif
88
89 #ifdef HIFN_DEBUG
90 extern int hifn_debug; /* patchable */
91 int hifn_debug = 1;
92 #endif
93
94 #ifdef __OpenBSD__
95 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */
96 #endif
97
98 /*
99 * Prototypes and count for the pci_device structure
100 */
101 #ifdef __OpenBSD__
102 static int hifn_probe((struct device *, void *, void *);
103 #else
104 static int hifn_probe(device_t, cfdata_t, void *);
105 #endif
106 static void hifn_attach(device_t, device_t, void *);
107 #ifdef __NetBSD__
108 static int hifn_detach(device_t, int);
109
110 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
111 hifn_probe, hifn_attach, hifn_detach, NULL);
112 #else
113 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
114 hifn_probe, hifn_attach, NULL, NULL);
115 #endif
116
117 #ifdef __OpenBSD__
118 struct cfdriver hifn_cd = {
119 0, "hifn", DV_DULL
120 };
121 #endif
122
123 static void hifn_reset_board(struct hifn_softc *, int);
124 static void hifn_reset_puc(struct hifn_softc *);
125 static void hifn_puc_wait(struct hifn_softc *);
126 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
127 static void hifn_set_retry(struct hifn_softc *);
128 static void hifn_init_dma(struct hifn_softc *);
129 static void hifn_init_pci_registers(struct hifn_softc *);
130 static int hifn_sramsize(struct hifn_softc *);
131 static int hifn_dramsize(struct hifn_softc *);
132 static int hifn_ramtype(struct hifn_softc *);
133 static void hifn_sessions(struct hifn_softc *);
134 static int hifn_intr(void *);
135 static u_int hifn_write_command(struct hifn_command *, uint8_t *);
136 static uint32_t hifn_next_signature(uint32_t a, u_int cnt);
137 static int hifn_newsession(void*, uint32_t *, struct cryptoini *);
138 static int hifn_freesession(void*, uint64_t);
139 static int hifn_process(void*, struct cryptop *, int);
140 static void hifn_callback(struct hifn_softc *, struct hifn_command *,
141 uint8_t *);
142 static int hifn_crypto(struct hifn_softc *, struct hifn_command *,
143 struct cryptop*, int);
144 static int hifn_readramaddr(struct hifn_softc *, int, uint8_t *);
145 static int hifn_writeramaddr(struct hifn_softc *, int, uint8_t *);
146 static int hifn_dmamap_aligned(bus_dmamap_t);
147 static int hifn_dmamap_load_src(struct hifn_softc *,
148 struct hifn_command *);
149 static int hifn_dmamap_load_dst(struct hifn_softc *,
150 struct hifn_command *);
151 static int hifn_init_pubrng(struct hifn_softc *);
152 static void hifn_rng(void *);
153 static void hifn_rng_locked(void *);
154 static void hifn_tick(void *);
155 static void hifn_abort(struct hifn_softc *);
156 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
157 int *);
158 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, uint32_t);
159 static uint32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
160 #ifdef HAVE_CRYPTO_LZS
161 static int hifn_compression(struct hifn_softc *, struct cryptop *,
162 struct hifn_command *);
163 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
164 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
165 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
166 uint8_t *);
167 #endif /* HAVE_CRYPTO_LZS */
168
169 struct hifn_stats hifnstats;
170
171 static const struct hifn_product {
172 pci_vendor_id_t hifn_vendor;
173 pci_product_id_t hifn_product;
174 int hifn_flags;
175 const char *hifn_name;
176 } hifn_products[] = {
177 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
178 0,
179 "Invertex AEON",
180 },
181
182 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
183 0,
184 "Hifn 7751",
185 },
186 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
187 0,
188 "Hifn 7751 (NetSec)"
189 },
190
191 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
192 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
193 "Hifn 7811",
194 },
195
196 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
197 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
198 "Hifn 7951",
199 },
200
201 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
202 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
203 "Hifn 7955",
204 },
205
206 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
207 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
208 "Hifn 7956",
209 },
210
211
212 { 0, 0,
213 0,
214 NULL
215 }
216 };
217
218 static const struct hifn_product *
219 hifn_lookup(const struct pci_attach_args *pa)
220 {
221 const struct hifn_product *hp;
222
223 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
224 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
225 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
226 return (hp);
227 }
228 return (NULL);
229 }
230
231 static int
232 hifn_probe(device_t parent, cfdata_t match, void *aux)
233 {
234 struct pci_attach_args *pa = aux;
235
236 if (hifn_lookup(pa) != NULL)
237 return 1;
238
239 return 0;
240 }
241
242 static void
243 hifn_attach(device_t parent, device_t self, void *aux)
244 {
245 struct hifn_softc *sc = device_private(self);
246 struct pci_attach_args *pa = aux;
247 const struct hifn_product *hp;
248 pci_chipset_tag_t pc = pa->pa_pc;
249 pci_intr_handle_t ih;
250 const char *intrstr = NULL;
251 const char *hifncap;
252 char rbase;
253 #ifdef __NetBSD__
254 #define iosize0 sc->sc_iosz0
255 #define iosize1 sc->sc_iosz1
256 #else
257 bus_size_t iosize0, iosize1;
258 #endif
259 uint32_t cmd;
260 uint16_t ena;
261 bus_dma_segment_t seg;
262 bus_dmamap_t dmamap;
263 int rseg;
264 void *kva;
265 char intrbuf[PCI_INTRSTR_LEN];
266
267 hp = hifn_lookup(pa);
268 if (hp == NULL) {
269 printf("\n");
270 panic("hifn_attach: impossible");
271 }
272
273 pci_aprint_devinfo_fancy(pa, "Crypto processor", hp->hifn_name, 1);
274
275 sc->sc_dv = self;
276 sc->sc_pci_pc = pa->pa_pc;
277 sc->sc_pci_tag = pa->pa_tag;
278
279 sc->sc_flags = hp->hifn_flags;
280
281 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
282 cmd |= PCI_COMMAND_MASTER_ENABLE;
283 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
284
285 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
286 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
287 aprint_error_dev(sc->sc_dv, "can't map mem space %d\n", 0);
288 return;
289 }
290
291 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
292 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
293 aprint_error_dev(sc->sc_dv, "can't find mem space %d\n", 1);
294 goto fail_io0;
295 }
296
297 hifn_set_retry(sc);
298
299 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
300 sc->sc_waw_lastgroup = -1;
301 sc->sc_waw_lastreg = 1;
302 }
303
304 sc->sc_dmat = pa->pa_dmat;
305 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
306 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
307 aprint_error_dev(sc->sc_dv, "can't alloc DMA buffer\n");
308 goto fail_io1;
309 }
310 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
311 BUS_DMA_NOWAIT)) {
312 aprint_error_dev(sc->sc_dv, "can't map DMA buffers (%lu bytes)\n",
313 (u_long)sizeof(*sc->sc_dma));
314 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
315 goto fail_io1;
316 }
317 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
318 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
319 aprint_error_dev(sc->sc_dv, "can't create DMA map\n");
320 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
321 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
322 goto fail_io1;
323 }
324 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
325 NULL, BUS_DMA_NOWAIT)) {
326 aprint_error_dev(sc->sc_dv, "can't load DMA map\n");
327 bus_dmamap_destroy(sc->sc_dmat, dmamap);
328 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
329 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
330 goto fail_io1;
331 }
332 sc->sc_dmamap = dmamap;
333 sc->sc_dma = (struct hifn_dma *)kva;
334 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
335
336 hifn_reset_board(sc, 0);
337
338 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
339 aprint_error_dev(sc->sc_dv, "crypto enabling failed\n");
340 goto fail_mem;
341 }
342 hifn_reset_puc(sc);
343
344 hifn_init_dma(sc);
345 hifn_init_pci_registers(sc);
346
347 /* XXX can't dynamically determine ram type for 795x; force dram */
348 if (sc->sc_flags & HIFN_IS_7956)
349 sc->sc_drammodel = 1;
350 else if (hifn_ramtype(sc))
351 goto fail_mem;
352
353 if (sc->sc_drammodel == 0)
354 hifn_sramsize(sc);
355 else
356 hifn_dramsize(sc);
357
358 /*
359 * Workaround for NetSec 7751 rev A: half ram size because two
360 * of the address lines were left floating
361 */
362 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
363 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
364 PCI_REVISION(pa->pa_class) == 0x61)
365 sc->sc_ramsize >>= 1;
366
367 if (pci_intr_map(pa, &ih)) {
368 aprint_error_dev(sc->sc_dv, "couldn't map interrupt\n");
369 goto fail_mem;
370 }
371 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
372 #ifdef __OpenBSD__
373 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
374 device_xname(self));
375 #else
376 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, hifn_intr, sc,
377 device_xname(self));
378 #endif
379 if (sc->sc_ih == NULL) {
380 aprint_error_dev(sc->sc_dv, "couldn't establish interrupt\n");
381 if (intrstr != NULL)
382 aprint_error(" at %s", intrstr);
383 aprint_error("\n");
384 goto fail_mem;
385 }
386
387 hifn_sessions(sc);
388
389 rseg = sc->sc_ramsize / 1024;
390 rbase = 'K';
391 if (sc->sc_ramsize >= (1024 * 1024)) {
392 rbase = 'M';
393 rseg /= 1024;
394 }
395 aprint_normal_dev(sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n",
396 hifncap, rseg, rbase,
397 sc->sc_drammodel ? 'D' : 'S', intrstr);
398
399 sc->sc_cid = crypto_get_driverid(0);
400 if (sc->sc_cid < 0) {
401 aprint_error_dev(sc->sc_dv, "couldn't get crypto driver id\n");
402 goto fail_intr;
403 }
404
405 WRITE_REG_0(sc, HIFN_0_PUCNFG,
406 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
407 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
408
409 switch (ena) {
410 case HIFN_PUSTAT_ENA_2:
411 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
412 hifn_newsession, hifn_freesession, hifn_process, sc);
413 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
414 hifn_newsession, hifn_freesession, hifn_process, sc);
415 if (sc->sc_flags & HIFN_HAS_AES)
416 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
417 hifn_newsession, hifn_freesession,
418 hifn_process, sc);
419 /*FALLTHROUGH*/
420 case HIFN_PUSTAT_ENA_1:
421 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
422 hifn_newsession, hifn_freesession, hifn_process, sc);
423 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
424 hifn_newsession, hifn_freesession, hifn_process, sc);
425 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0,
426 hifn_newsession, hifn_freesession, hifn_process, sc);
427 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0,
428 hifn_newsession, hifn_freesession, hifn_process, sc);
429 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
430 hifn_newsession, hifn_freesession, hifn_process, sc);
431 break;
432 }
433
434 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
435 sc->sc_dmamap->dm_mapsize,
436 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
437
438 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM);
439
440 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) {
441 hifn_init_pubrng(sc);
442 sc->sc_rng_need = RND_POOLBITS / NBBY;
443 }
444
445 #ifdef __OpenBSD__
446 timeout_set(&sc->sc_tickto, hifn_tick, sc);
447 timeout_add(&sc->sc_tickto, hz);
448 #else
449 callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
450 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
451 #endif
452 return;
453
454 fail_intr:
455 pci_intr_disestablish(pc, sc->sc_ih);
456 fail_mem:
457 bus_dmamap_unload(sc->sc_dmat, dmamap);
458 bus_dmamap_destroy(sc->sc_dmat, dmamap);
459 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
460 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
461
462 /* Turn off DMA polling */
463 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
464 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
465
466 fail_io1:
467 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
468 fail_io0:
469 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
470 }
471
472 #ifdef __NetBSD__
473 static int
474 hifn_detach(device_t self, int flags)
475 {
476 struct hifn_softc *sc = device_private(self);
477
478 hifn_abort(sc);
479
480 hifn_reset_board(sc, 1);
481
482 pci_intr_disestablish(sc->sc_pci_pc, sc->sc_ih);
483
484 crypto_unregister_all(sc->sc_cid);
485
486 rnd_detach_source(&sc->sc_rnd_source);
487
488 mutex_enter(&sc->sc_mtx);
489 callout_halt(&sc->sc_tickto, NULL);
490 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
491 callout_halt(&sc->sc_rngto, NULL);
492 mutex_exit(&sc->sc_mtx);
493
494 bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1);
495 bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0);
496
497 /*
498 * XXX It's not clear if any additional buffers have been
499 * XXX allocated and require free()ing
500 */
501
502 return 0;
503 }
504
505 MODULE(MODULE_CLASS_DRIVER, hifn, "pci,opencrypto");
506
507 #ifdef _MODULE
508 #include "ioconf.c"
509 #endif
510
511 static int
512 hifn_modcmd(modcmd_t cmd, void *data)
513 {
514 int error = 0;
515
516 switch (cmd) {
517 case MODULE_CMD_INIT:
518 #ifdef _MODULE
519 error = config_init_component(cfdriver_ioconf_hifn,
520 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
521 #endif
522 return error;
523 case MODULE_CMD_FINI:
524 #ifdef _MODULE
525 error = config_fini_component(cfdriver_ioconf_hifn,
526 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
527 #endif
528 return error;
529 default:
530 return ENOTTY;
531 }
532 }
533
534 #endif /* ifdef __NetBSD__ */
535
536 static void
537 hifn_rng_get(size_t bytes, void *priv)
538 {
539 struct hifn_softc *sc = priv;
540
541 mutex_enter(&sc->sc_mtx);
542 sc->sc_rng_need = bytes;
543 callout_reset(&sc->sc_rngto, 0, hifn_rng, sc);
544 mutex_exit(&sc->sc_mtx);
545 }
546
547 static int
548 hifn_init_pubrng(struct hifn_softc *sc)
549 {
550 uint32_t r;
551 int i;
552
553 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
554 /* Reset 7951 public key/rng engine */
555 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
556 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
557
558 for (i = 0; i < 100; i++) {
559 DELAY(1000);
560 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
561 HIFN_PUBRST_RESET) == 0)
562 break;
563 }
564
565 if (i == 100) {
566 printf("%s: public key init failed\n",
567 device_xname(sc->sc_dv));
568 return (1);
569 }
570 }
571
572 /* Enable the rng, if available */
573 if (sc->sc_flags & HIFN_HAS_RNG) {
574 if (sc->sc_flags & HIFN_IS_7811) {
575 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
576 if (r & HIFN_7811_RNGENA_ENA) {
577 r &= ~HIFN_7811_RNGENA_ENA;
578 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
579 }
580 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
581 HIFN_7811_RNGCFG_DEFL);
582 r |= HIFN_7811_RNGENA_ENA;
583 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
584 } else
585 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
586 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
587 HIFN_RNGCFG_ENA);
588
589 /*
590 * The Hifn RNG documentation states that at their
591 * recommended "conservative" RNG config values,
592 * the RNG must warm up for 0.4s before providing
593 * data that meet their worst-case estimate of 0.06
594 * bits of random data per output register bit.
595 */
596 DELAY(4000);
597
598 #ifdef __NetBSD__
599 rndsource_setcb(&sc->sc_rnd_source, hifn_rng_get, sc);
600 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv),
601 RND_TYPE_RNG,
602 RND_FLAG_COLLECT_VALUE|RND_FLAG_HASCB);
603 #endif
604
605 if (hz >= 100)
606 sc->sc_rnghz = hz / 100;
607 else
608 sc->sc_rnghz = 1;
609 #ifdef __OpenBSD__
610 timeout_set(&sc->sc_rngto, hifn_rng, sc);
611 #else /* !__OpenBSD__ */
612 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
613 #endif /* !__OpenBSD__ */
614 }
615
616 /* Enable public key engine, if available */
617 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
618 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
619 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
620 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
621 }
622
623 /* Call directly into the RNG once to prime the pool. */
624 hifn_rng(sc); /* Sets callout/timeout at end */
625
626 return (0);
627 }
628
629 static void
630 hifn_rng_locked(void *vsc)
631 {
632 struct hifn_softc *sc = vsc;
633 #ifdef __NetBSD__
634 uint32_t num[64];
635 #else
636 uint32_t num[2];
637 #endif
638 uint32_t sts;
639 int i;
640 size_t got, gotent;
641
642 if (sc->sc_rng_need < 1) {
643 callout_stop(&sc->sc_rngto);
644 return;
645 }
646
647 if (sc->sc_flags & HIFN_IS_7811) {
648 for (i = 0; i < 5; i++) { /* XXX why 5? */
649 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
650 if (sts & HIFN_7811_RNGSTS_UFL) {
651 printf("%s: RNG underflow: disabling\n",
652 device_xname(sc->sc_dv));
653 return;
654 }
655 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
656 break;
657
658 /*
659 * There are at least two words in the RNG FIFO
660 * at this point.
661 */
662 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
663 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
664 got = 2 * sizeof(num[0]);
665 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
666
667 #ifdef __NetBSD__
668 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
669 sc->sc_rng_need -= gotent;
670 #else
671 /*
672 * XXX This is a really bad idea.
673 * XXX Hifn estimate as little as 0.06
674 * XXX actual bits of entropy per output
675 * XXX register bit. How can we tell the
676 * XXX kernel RNG subsystem we're handing
677 * XXX it 64 "true" random bits, for any
678 * XXX sane value of "true"?
679 * XXX
680 * XXX The right thing to do here, if we
681 * XXX cannot supply an estimate ourselves,
682 * XXX would be to hash the bits locally.
683 */
684 add_true_randomness(num[0]);
685 add_true_randomness(num[1]);
686 #endif
687 }
688 } else {
689 int nwords = 0;
690
691 if (sc->sc_rng_need) {
692 nwords = (sc->sc_rng_need * NBBY) / HIFN_RNG_BITSPER;
693 nwords = MIN((int)__arraycount(num), nwords);
694 }
695
696 if (nwords < 2) {
697 nwords = 2;
698 }
699
700 /*
701 * We must be *extremely* careful here. The Hifn
702 * 795x differ from the published 6500 RNG design
703 * in more ways than the obvious lack of the output
704 * FIFO and LFSR control registers. In fact, there
705 * is only one LFSR, instead of the 6500's two, and
706 * it's 32 bits, not 31.
707 *
708 * Further, a block diagram obtained from Hifn shows
709 * a very curious latching of this register: the LFSR
710 * rotates at a frequency of RNG_Clk / 8, but the
711 * RNG_Data register is latched at a frequency of
712 * RNG_Clk, which means that it is possible for
713 * consecutive reads of the RNG_Data register to read
714 * identical state from the LFSR. The simplest
715 * workaround seems to be to read eight samples from
716 * the register for each one that we use. Since each
717 * read must require at least one PCI cycle, and
718 * RNG_Clk is at least PCI_Clk, this is safe.
719 */
720 for (i = 0 ; i < nwords * 8; i++) {
721 volatile uint32_t regtmp;
722 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA);
723 num[i / 8] = regtmp;
724 }
725
726 got = nwords * sizeof(num[0]);
727 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
728 #ifdef __NetBSD__
729 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
730 sc->sc_rng_need -= gotent;
731 #else
732 /* XXX a bad idea; see 7811 block above */
733 add_true_randomness(num[0]);
734 #endif
735 }
736
737 #ifdef __OpenBSD__
738 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
739 #else
740 if (sc->sc_rng_need > 0) {
741 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
742 }
743 #endif
744 }
745
746 static void
747 hifn_rng(void *vsc)
748 {
749 struct hifn_softc *sc = vsc;
750
751 mutex_spin_enter(&sc->sc_mtx);
752 hifn_rng_locked(vsc);
753 mutex_spin_exit(&sc->sc_mtx);
754 }
755
756 static void
757 hifn_puc_wait(struct hifn_softc *sc)
758 {
759 int i;
760
761 for (i = 5000; i > 0; i--) {
762 DELAY(1);
763 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
764 break;
765 }
766 if (!i)
767 printf("%s: proc unit did not reset\n", device_xname(sc->sc_dv));
768 }
769
770 /*
771 * Reset the processing unit.
772 */
773 static void
774 hifn_reset_puc(struct hifn_softc *sc)
775 {
776 /* Reset processing unit */
777 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
778 hifn_puc_wait(sc);
779 }
780
781 static void
782 hifn_set_retry(struct hifn_softc *sc)
783 {
784 uint32_t r;
785
786 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
787 r &= 0xffff0000;
788 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
789 }
790
791 /*
792 * Resets the board. Values in the regesters are left as is
793 * from the reset (i.e. initial values are assigned elsewhere).
794 */
795 static void
796 hifn_reset_board(struct hifn_softc *sc, int full)
797 {
798 uint32_t reg;
799
800 /*
801 * Set polling in the DMA configuration register to zero. 0x7 avoids
802 * resetting the board and zeros out the other fields.
803 */
804 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
805 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
806
807 /*
808 * Now that polling has been disabled, we have to wait 1 ms
809 * before resetting the board.
810 */
811 DELAY(1000);
812
813 /* Reset the DMA unit */
814 if (full) {
815 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
816 DELAY(1000);
817 } else {
818 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
819 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
820 hifn_reset_puc(sc);
821 }
822
823 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
824
825 /* Bring dma unit out of reset */
826 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
827 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
828
829 hifn_puc_wait(sc);
830
831 hifn_set_retry(sc);
832
833 if (sc->sc_flags & HIFN_IS_7811) {
834 for (reg = 0; reg < 1000; reg++) {
835 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
836 HIFN_MIPSRST_CRAMINIT)
837 break;
838 DELAY(1000);
839 }
840 if (reg == 1000)
841 printf(": cram init timeout\n");
842 }
843 }
844
845 static uint32_t
846 hifn_next_signature(uint32_t a, u_int cnt)
847 {
848 u_int i;
849 uint32_t v;
850
851 for (i = 0; i < cnt; i++) {
852
853 /* get the parity */
854 v = a & 0x80080125;
855 v ^= v >> 16;
856 v ^= v >> 8;
857 v ^= v >> 4;
858 v ^= v >> 2;
859 v ^= v >> 1;
860
861 a = (v & 1) ^ (a << 1);
862 }
863
864 return a;
865 }
866
867 static struct pci2id {
868 u_short pci_vendor;
869 u_short pci_prod;
870 char card_id[13];
871 } const pci2id[] = {
872 {
873 PCI_VENDOR_HIFN,
874 PCI_PRODUCT_HIFN_7951,
875 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00, 0x00 }
877 }, {
878 PCI_VENDOR_HIFN,
879 PCI_PRODUCT_HIFN_7955,
880 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00, 0x00 }
882 }, {
883 PCI_VENDOR_HIFN,
884 PCI_PRODUCT_HIFN_7956,
885 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00, 0x00 }
887 }, {
888 PCI_VENDOR_NETSEC,
889 PCI_PRODUCT_NETSEC_7751,
890 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
891 0x00, 0x00, 0x00, 0x00, 0x00 }
892 }, {
893 PCI_VENDOR_INVERTEX,
894 PCI_PRODUCT_INVERTEX_AEON,
895 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
896 0x00, 0x00, 0x00, 0x00, 0x00 }
897 }, {
898 PCI_VENDOR_HIFN,
899 PCI_PRODUCT_HIFN_7811,
900 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00, 0x00 }
902 }, {
903 /*
904 * Other vendors share this PCI ID as well, such as
905 * http://www.powercrypt.com, and obviously they also
906 * use the same key.
907 */
908 PCI_VENDOR_HIFN,
909 PCI_PRODUCT_HIFN_7751,
910 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
911 0x00, 0x00, 0x00, 0x00, 0x00 }
912 },
913 };
914
915 /*
916 * Checks to see if crypto is already enabled. If crypto isn't enable,
917 * "hifn_enable_crypto" is called to enable it. The check is important,
918 * as enabling crypto twice will lock the board.
919 */
920 static const char *
921 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
922 {
923 uint32_t dmacfg, ramcfg, encl, addr, i;
924 const char *offtbl = NULL;
925
926 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
927 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
928 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
929 offtbl = pci2id[i].card_id;
930 break;
931 }
932 }
933
934 if (offtbl == NULL) {
935 #ifdef HIFN_DEBUG
936 aprint_debug_dev(sc->sc_dv, "Unknown card!\n");
937 #endif
938 return (NULL);
939 }
940
941 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
942 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
943
944 /*
945 * The RAM config register's encrypt level bit needs to be set before
946 * every read performed on the encryption level register.
947 */
948 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
949
950 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
951
952 /*
953 * Make sure we don't re-unlock. Two unlocks kills chip until the
954 * next reboot.
955 */
956 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
957 #ifdef HIFN_DEBUG
958 aprint_debug_dev(sc->sc_dv, "Strong Crypto already enabled!\n");
959 #endif
960 goto report;
961 }
962
963 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
964 #ifdef HIFN_DEBUG
965 aprint_debug_dev(sc->sc_dv, "Unknown encryption level\n");
966 #endif
967 return (NULL);
968 }
969
970 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
971 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
972 DELAY(1000);
973 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
974 DELAY(1000);
975 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
976 DELAY(1000);
977
978 for (i = 0; i <= 12; i++) {
979 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
980 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
981
982 DELAY(1000);
983 }
984
985 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
986 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
987
988 #ifdef HIFN_DEBUG
989 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
990 aprint_debug("Encryption engine is permanently locked until next system reset.");
991 else
992 aprint_debug("Encryption engine enabled successfully!");
993 #endif
994
995 report:
996 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
997 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
998
999 switch (encl) {
1000 case HIFN_PUSTAT_ENA_0:
1001 return ("LZS-only (no encr/auth)");
1002
1003 case HIFN_PUSTAT_ENA_1:
1004 return ("DES");
1005
1006 case HIFN_PUSTAT_ENA_2:
1007 if (sc->sc_flags & HIFN_HAS_AES)
1008 return ("3DES/AES");
1009 else
1010 return ("3DES");
1011
1012 default:
1013 return ("disabled");
1014 }
1015 /* NOTREACHED */
1016 }
1017
1018 /*
1019 * Give initial values to the registers listed in the "Register Space"
1020 * section of the HIFN Software Development reference manual.
1021 */
1022 static void
1023 hifn_init_pci_registers(struct hifn_softc *sc)
1024 {
1025 /* write fixed values needed by the Initialization registers */
1026 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1027 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1028 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1029
1030 /* write all 4 ring address registers */
1031 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1032 offsetof(struct hifn_dma, cmdr[0]));
1033 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1034 offsetof(struct hifn_dma, srcr[0]));
1035 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1036 offsetof(struct hifn_dma, dstr[0]));
1037 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1038 offsetof(struct hifn_dma, resr[0]));
1039
1040 DELAY(2000);
1041
1042 /* write status register */
1043 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1044 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1045 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1046 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1047 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1048 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1049 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1050 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1051 HIFN_DMACSR_S_WAIT |
1052 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1053 HIFN_DMACSR_C_WAIT |
1054 HIFN_DMACSR_ENGINE |
1055 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1056 HIFN_DMACSR_PUBDONE : 0) |
1057 ((sc->sc_flags & HIFN_IS_7811) ?
1058 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1059
1060 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1061 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1062 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1063 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1064 HIFN_DMAIER_ENGINE |
1065 ((sc->sc_flags & HIFN_IS_7811) ?
1066 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1067 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1068 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1069 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
1070
1071 if (sc->sc_flags & HIFN_IS_7956) {
1072 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1073 HIFN_PUCNFG_TCALLPHASES |
1074 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1075 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
1076 } else {
1077 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1078 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1079 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1080 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1081 }
1082
1083 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1084 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1085 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1086 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1087 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1088 }
1089
1090 /*
1091 * The maximum number of sessions supported by the card
1092 * is dependent on the amount of context ram, which
1093 * encryption algorithms are enabled, and how compression
1094 * is configured. This should be configured before this
1095 * routine is called.
1096 */
1097 static void
1098 hifn_sessions(struct hifn_softc *sc)
1099 {
1100 uint32_t pucnfg;
1101 int ctxsize;
1102
1103 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1104
1105 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1106 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1107 ctxsize = 128;
1108 else
1109 ctxsize = 512;
1110 /*
1111 * 7955/7956 has internal context memory of 32K
1112 */
1113 if (sc->sc_flags & HIFN_IS_7956)
1114 sc->sc_maxses = 32768 / ctxsize;
1115 else
1116 sc->sc_maxses = 1 +
1117 ((sc->sc_ramsize - 32768) / ctxsize);
1118 } else
1119 sc->sc_maxses = sc->sc_ramsize / 16384;
1120
1121 if (sc->sc_maxses > 2048)
1122 sc->sc_maxses = 2048;
1123 }
1124
1125 /*
1126 * Determine ram type (sram or dram). Board should be just out of a reset
1127 * state when this is called.
1128 */
1129 static int
1130 hifn_ramtype(struct hifn_softc *sc)
1131 {
1132 uint8_t data[8], dataexpect[8];
1133 size_t i;
1134
1135 for (i = 0; i < sizeof(data); i++)
1136 data[i] = dataexpect[i] = 0x55;
1137 if (hifn_writeramaddr(sc, 0, data))
1138 return (-1);
1139 if (hifn_readramaddr(sc, 0, data))
1140 return (-1);
1141 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1142 sc->sc_drammodel = 1;
1143 return (0);
1144 }
1145
1146 for (i = 0; i < sizeof(data); i++)
1147 data[i] = dataexpect[i] = 0xaa;
1148 if (hifn_writeramaddr(sc, 0, data))
1149 return (-1);
1150 if (hifn_readramaddr(sc, 0, data))
1151 return (-1);
1152 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1153 sc->sc_drammodel = 1;
1154 return (0);
1155 }
1156
1157 return (0);
1158 }
1159
1160 #define HIFN_SRAM_MAX (32 << 20)
1161 #define HIFN_SRAM_STEP_SIZE 16384
1162 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1163
1164 static int
1165 hifn_sramsize(struct hifn_softc *sc)
1166 {
1167 uint32_t a, b;
1168 uint8_t data[8];
1169 uint8_t dataexpect[sizeof(data)];
1170 size_t i;
1171
1172 for (i = 0; i < sizeof(data); i++)
1173 data[i] = dataexpect[i] = i ^ 0x5a;
1174
1175 a = HIFN_SRAM_GRANULARITY * HIFN_SRAM_STEP_SIZE;
1176 b = HIFN_SRAM_GRANULARITY;
1177 for (i = 0; i < HIFN_SRAM_GRANULARITY; ++i) {
1178 a -= HIFN_SRAM_STEP_SIZE;
1179 b -= 1;
1180 le32enc(data, b);
1181 hifn_writeramaddr(sc, a, data);
1182 }
1183
1184 a = 0;
1185 b = 0;
1186 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1187 le32enc(dataexpect, b);
1188 if (hifn_readramaddr(sc, a, data) < 0)
1189 return (0);
1190 if (memcmp(data, dataexpect, sizeof(data)) != 0)
1191 return (0);
1192
1193 a += HIFN_SRAM_STEP_SIZE;
1194 b += 1;
1195 sc->sc_ramsize = a;
1196 }
1197
1198 return (0);
1199 }
1200
1201 /*
1202 * XXX For dram boards, one should really try all of the
1203 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1204 * is already set up correctly.
1205 */
1206 static int
1207 hifn_dramsize(struct hifn_softc *sc)
1208 {
1209 uint32_t cnfg;
1210
1211 if (sc->sc_flags & HIFN_IS_7956) {
1212 /*
1213 * 7955/7956 have a fixed internal ram of only 32K.
1214 */
1215 sc->sc_ramsize = 32768;
1216 } else {
1217 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1218 HIFN_PUCNFG_DRAMMASK;
1219 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1220 }
1221 return (0);
1222 }
1223
1224 static void
1225 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1226 int *resp)
1227 {
1228 struct hifn_dma *dma = sc->sc_dma;
1229
1230 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1231 dma->cmdi = 0;
1232 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1233 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1234 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1235 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1236 }
1237 *cmdp = dma->cmdi++;
1238 dma->cmdk = dma->cmdi;
1239
1240 if (dma->srci == HIFN_D_SRC_RSIZE) {
1241 dma->srci = 0;
1242 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1243 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1244 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1245 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1246 }
1247 *srcp = dma->srci++;
1248 dma->srck = dma->srci;
1249
1250 if (dma->dsti == HIFN_D_DST_RSIZE) {
1251 dma->dsti = 0;
1252 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1253 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1254 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1255 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1256 }
1257 *dstp = dma->dsti++;
1258 dma->dstk = dma->dsti;
1259
1260 if (dma->resi == HIFN_D_RES_RSIZE) {
1261 dma->resi = 0;
1262 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1263 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1264 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1265 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1266 }
1267 *resp = dma->resi++;
1268 dma->resk = dma->resi;
1269 }
1270
1271 static int
1272 hifn_writeramaddr(struct hifn_softc *sc, int addr, uint8_t *data)
1273 {
1274 struct hifn_dma *dma = sc->sc_dma;
1275 struct hifn_base_command wc;
1276 const uint32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1277 int r, cmdi, resi, srci, dsti;
1278
1279 wc.masks = htole16(3 << 13);
1280 wc.session_num = htole16(addr >> 14);
1281 wc.total_source_count = htole16(8);
1282 wc.total_dest_count = htole16(addr & 0x3fff);
1283
1284 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1285
1286 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1287 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1288 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1289
1290 /* build write command */
1291 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1292 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1293 memcpy(&dma->test_src, data, sizeof(dma->test_src));
1294
1295 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1296 + offsetof(struct hifn_dma, test_src));
1297 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1298 + offsetof(struct hifn_dma, test_dst));
1299
1300 dma->cmdr[cmdi].l = htole32(16 | masks);
1301 dma->srcr[srci].l = htole32(8 | masks);
1302 dma->dstr[dsti].l = htole32(4 | masks);
1303 dma->resr[resi].l = htole32(4 | masks);
1304
1305 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1306 0, sc->sc_dmamap->dm_mapsize,
1307 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1308
1309 for (r = 10000; r >= 0; r--) {
1310 DELAY(10);
1311 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1312 0, sc->sc_dmamap->dm_mapsize,
1313 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1314 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1315 break;
1316 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1317 0, sc->sc_dmamap->dm_mapsize,
1318 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1319 }
1320 if (r == 0) {
1321 printf("%s: writeramaddr -- "
1322 "result[%d](addr %d) still valid\n",
1323 device_xname(sc->sc_dv), resi, addr);
1324 r = -1;
1325 return (-1);
1326 } else
1327 r = 0;
1328
1329 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1330 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1331 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1332
1333 return (r);
1334 }
1335
1336 static int
1337 hifn_readramaddr(struct hifn_softc *sc, int addr, uint8_t *data)
1338 {
1339 struct hifn_dma *dma = sc->sc_dma;
1340 struct hifn_base_command rc;
1341 const uint32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1342 int r, cmdi, srci, dsti, resi;
1343
1344 rc.masks = htole16(2 << 13);
1345 rc.session_num = htole16(addr >> 14);
1346 rc.total_source_count = htole16(addr & 0x3fff);
1347 rc.total_dest_count = htole16(8);
1348
1349 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1350
1351 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1352 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1353 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1354
1355 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1356 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1357
1358 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1359 offsetof(struct hifn_dma, test_src));
1360 dma->test_src = 0;
1361 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1362 offsetof(struct hifn_dma, test_dst));
1363 dma->test_dst = 0;
1364 dma->cmdr[cmdi].l = htole32(8 | masks);
1365 dma->srcr[srci].l = htole32(8 | masks);
1366 dma->dstr[dsti].l = htole32(8 | masks);
1367 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1368
1369 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1370 0, sc->sc_dmamap->dm_mapsize,
1371 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1372
1373 for (r = 10000; r >= 0; r--) {
1374 DELAY(10);
1375 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1376 0, sc->sc_dmamap->dm_mapsize,
1377 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1378 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1379 break;
1380 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1381 0, sc->sc_dmamap->dm_mapsize,
1382 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1383 }
1384 if (r == 0) {
1385 printf("%s: readramaddr -- "
1386 "result[%d](addr %d) still valid\n",
1387 device_xname(sc->sc_dv), resi, addr);
1388 r = -1;
1389 } else {
1390 r = 0;
1391 memcpy(data, &dma->test_dst, sizeof(dma->test_dst));
1392 }
1393
1394 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1395 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1396 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1397
1398 return (r);
1399 }
1400
1401 /*
1402 * Initialize the descriptor rings.
1403 */
1404 static void
1405 hifn_init_dma(struct hifn_softc *sc)
1406 {
1407 struct hifn_dma *dma = sc->sc_dma;
1408 int i;
1409
1410 hifn_set_retry(sc);
1411
1412 /* initialize static pointer values */
1413 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1414 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1415 offsetof(struct hifn_dma, command_bufs[i][0]));
1416 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1417 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1418 offsetof(struct hifn_dma, result_bufs[i][0]));
1419
1420 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1421 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1422 offsetof(struct hifn_dma, cmdr[0]));
1423 dma->srcr[HIFN_D_SRC_RSIZE].p =
1424 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1425 offsetof(struct hifn_dma, srcr[0]));
1426 dma->dstr[HIFN_D_DST_RSIZE].p =
1427 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1428 offsetof(struct hifn_dma, dstr[0]));
1429 dma->resr[HIFN_D_RES_RSIZE].p =
1430 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1431 offsetof(struct hifn_dma, resr[0]));
1432
1433 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1434 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1435 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1436 }
1437
1438 /*
1439 * Writes out the raw command buffer space. Returns the
1440 * command buffer size.
1441 */
1442 static u_int
1443 hifn_write_command(struct hifn_command *cmd, uint8_t *buf)
1444 {
1445 uint8_t *buf_pos;
1446 struct hifn_base_command *base_cmd;
1447 struct hifn_mac_command *mac_cmd;
1448 struct hifn_crypt_command *cry_cmd;
1449 struct hifn_comp_command *comp_cmd;
1450 int using_mac, using_crypt, using_comp, len, ivlen;
1451 uint32_t dlen, slen;
1452
1453 buf_pos = buf;
1454 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1455 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1456 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1457
1458 base_cmd = (struct hifn_base_command *)buf_pos;
1459 base_cmd->masks = htole16(cmd->base_masks);
1460 slen = cmd->src_map->dm_mapsize;
1461 if (cmd->sloplen)
1462 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1463 sizeof(uint32_t);
1464 else
1465 dlen = cmd->dst_map->dm_mapsize;
1466 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1467 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1468 dlen >>= 16;
1469 slen >>= 16;
1470 base_cmd->session_num = htole16(cmd->session_num |
1471 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1472 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1473 buf_pos += sizeof(struct hifn_base_command);
1474
1475 if (using_comp) {
1476 comp_cmd = (struct hifn_comp_command *)buf_pos;
1477 dlen = cmd->compcrd->crd_len;
1478 comp_cmd->source_count = htole16(dlen & 0xffff);
1479 dlen >>= 16;
1480 comp_cmd->masks = htole16(cmd->comp_masks |
1481 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1482 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1483 comp_cmd->reserved = 0;
1484 buf_pos += sizeof(struct hifn_comp_command);
1485 }
1486
1487 if (using_mac) {
1488 mac_cmd = (struct hifn_mac_command *)buf_pos;
1489 dlen = cmd->maccrd->crd_len;
1490 mac_cmd->source_count = htole16(dlen & 0xffff);
1491 dlen >>= 16;
1492 mac_cmd->masks = htole16(cmd->mac_masks |
1493 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1494 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1495 mac_cmd->reserved = 0;
1496 buf_pos += sizeof(struct hifn_mac_command);
1497 }
1498
1499 if (using_crypt) {
1500 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1501 dlen = cmd->enccrd->crd_len;
1502 cry_cmd->source_count = htole16(dlen & 0xffff);
1503 dlen >>= 16;
1504 cry_cmd->masks = htole16(cmd->cry_masks |
1505 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1506 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1507 cry_cmd->reserved = 0;
1508 buf_pos += sizeof(struct hifn_crypt_command);
1509 }
1510
1511 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1512 memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH);
1513 buf_pos += HIFN_MAC_KEY_LENGTH;
1514 }
1515
1516 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1517 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1518 case HIFN_CRYPT_CMD_ALG_3DES:
1519 memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH);
1520 buf_pos += HIFN_3DES_KEY_LENGTH;
1521 break;
1522 case HIFN_CRYPT_CMD_ALG_DES:
1523 memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH);
1524 buf_pos += HIFN_DES_KEY_LENGTH;
1525 break;
1526 case HIFN_CRYPT_CMD_ALG_RC4:
1527 len = 256;
1528 do {
1529 int clen;
1530
1531 clen = MIN(cmd->cklen, len);
1532 memcpy(buf_pos, cmd->ck, clen);
1533 len -= clen;
1534 buf_pos += clen;
1535 } while (len > 0);
1536 memset(buf_pos, 0, 4);
1537 buf_pos += 4;
1538 break;
1539 case HIFN_CRYPT_CMD_ALG_AES:
1540 /*
1541 * AES keys are variable 128, 192 and
1542 * 256 bits (16, 24 and 32 bytes).
1543 */
1544 memcpy(buf_pos, cmd->ck, cmd->cklen);
1545 buf_pos += cmd->cklen;
1546 break;
1547 }
1548 }
1549
1550 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1551 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1552 case HIFN_CRYPT_CMD_ALG_AES:
1553 ivlen = HIFN_AES_IV_LENGTH;
1554 break;
1555 default:
1556 ivlen = HIFN_IV_LENGTH;
1557 break;
1558 }
1559 memcpy(buf_pos, cmd->iv, ivlen);
1560 buf_pos += ivlen;
1561 }
1562
1563 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1564 HIFN_BASE_CMD_COMP)) == 0) {
1565 memset(buf_pos, 0, 8);
1566 buf_pos += 8;
1567 }
1568
1569 return (buf_pos - buf);
1570 }
1571
1572 static int
1573 hifn_dmamap_aligned(bus_dmamap_t map)
1574 {
1575 int i;
1576
1577 for (i = 0; i < map->dm_nsegs; i++) {
1578 if (map->dm_segs[i].ds_addr & 3)
1579 return (0);
1580 if ((i != (map->dm_nsegs - 1)) &&
1581 (map->dm_segs[i].ds_len & 3))
1582 return (0);
1583 }
1584 return (1);
1585 }
1586
1587 static int
1588 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1589 {
1590 struct hifn_dma *dma = sc->sc_dma;
1591 bus_dmamap_t map = cmd->dst_map;
1592 uint32_t p, l;
1593 int idx, used = 0, i;
1594
1595 idx = dma->dsti;
1596 for (i = 0; i < map->dm_nsegs - 1; i++) {
1597 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1598 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1599 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1600 HIFN_DSTR_SYNC(sc, idx,
1601 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1602 used++;
1603
1604 if (++idx == HIFN_D_DST_RSIZE) {
1605 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1606 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1607 HIFN_DSTR_SYNC(sc, idx,
1608 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1609 idx = 0;
1610 }
1611 }
1612
1613 if (cmd->sloplen == 0) {
1614 p = map->dm_segs[i].ds_addr;
1615 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1616 map->dm_segs[i].ds_len;
1617 } else {
1618 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1619 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1620 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1621 sizeof(uint32_t);
1622
1623 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1624 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1625 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1626 HIFN_D_MASKDONEIRQ |
1627 (map->dm_segs[i].ds_len - cmd->sloplen));
1628 HIFN_DSTR_SYNC(sc, idx,
1629 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1630 used++;
1631
1632 if (++idx == HIFN_D_DST_RSIZE) {
1633 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1634 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1635 HIFN_DSTR_SYNC(sc, idx,
1636 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1637 idx = 0;
1638 }
1639 }
1640 }
1641 dma->dstr[idx].p = htole32(p);
1642 dma->dstr[idx].l = htole32(l);
1643 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1644 used++;
1645
1646 if (++idx == HIFN_D_DST_RSIZE) {
1647 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1648 HIFN_D_MASKDONEIRQ);
1649 HIFN_DSTR_SYNC(sc, idx,
1650 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1651 idx = 0;
1652 }
1653
1654 dma->dsti = idx;
1655 dma->dstu += used;
1656 return (idx);
1657 }
1658
1659 static int
1660 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1661 {
1662 struct hifn_dma *dma = sc->sc_dma;
1663 bus_dmamap_t map = cmd->src_map;
1664 int idx, i;
1665 uint32_t last = 0;
1666
1667 idx = dma->srci;
1668 for (i = 0; i < map->dm_nsegs; i++) {
1669 if (i == map->dm_nsegs - 1)
1670 last = HIFN_D_LAST;
1671
1672 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1673 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1674 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1675 HIFN_SRCR_SYNC(sc, idx,
1676 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1677
1678 if (++idx == HIFN_D_SRC_RSIZE) {
1679 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1680 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1681 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1682 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1683 idx = 0;
1684 }
1685 }
1686 dma->srci = idx;
1687 dma->srcu += map->dm_nsegs;
1688 return (idx);
1689 }
1690
1691 static int
1692 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1693 struct cryptop *crp, int hint)
1694 {
1695 struct hifn_dma *dma = sc->sc_dma;
1696 uint32_t cmdlen;
1697 int cmdi, resi, err = 0;
1698
1699 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1700 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1701 return (ENOMEM);
1702
1703 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1704 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1705 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1706 err = ENOMEM;
1707 goto err_srcmap1;
1708 }
1709 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1710 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1711 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1712 err = ENOMEM;
1713 goto err_srcmap1;
1714 }
1715 } else {
1716 err = EINVAL;
1717 goto err_srcmap1;
1718 }
1719
1720 if (hifn_dmamap_aligned(cmd->src_map)) {
1721 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1722 if (crp->crp_flags & CRYPTO_F_IOV)
1723 cmd->dstu.dst_io = cmd->srcu.src_io;
1724 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1725 cmd->dstu.dst_m = cmd->srcu.src_m;
1726 cmd->dst_map = cmd->src_map;
1727 } else {
1728 if (crp->crp_flags & CRYPTO_F_IOV) {
1729 err = EINVAL;
1730 goto err_srcmap;
1731 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1732 int totlen, len;
1733 struct mbuf *m, *m0, *mlast;
1734
1735 totlen = cmd->src_map->dm_mapsize;
1736 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1737 len = MHLEN;
1738 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1739 } else {
1740 len = MLEN;
1741 MGET(m0, M_DONTWAIT, MT_DATA);
1742 }
1743 if (m0 == NULL) {
1744 err = ENOMEM;
1745 goto err_srcmap;
1746 }
1747 if (len == MHLEN)
1748 M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1749 if (totlen >= MINCLSIZE) {
1750 MCLGET(m0, M_DONTWAIT);
1751 if (m0->m_flags & M_EXT)
1752 len = MCLBYTES;
1753 }
1754 totlen -= len;
1755 m0->m_pkthdr.len = m0->m_len = len;
1756 mlast = m0;
1757
1758 while (totlen > 0) {
1759 MGET(m, M_DONTWAIT, MT_DATA);
1760 if (m == NULL) {
1761 err = ENOMEM;
1762 m_freem(m0);
1763 goto err_srcmap;
1764 }
1765 len = MLEN;
1766 if (totlen >= MINCLSIZE) {
1767 MCLGET(m, M_DONTWAIT);
1768 if (m->m_flags & M_EXT)
1769 len = MCLBYTES;
1770 }
1771
1772 m->m_len = len;
1773 if (m0->m_flags & M_PKTHDR)
1774 m0->m_pkthdr.len += len;
1775 totlen -= len;
1776
1777 mlast->m_next = m;
1778 mlast = m;
1779 }
1780 cmd->dstu.dst_m = m0;
1781 }
1782 }
1783
1784 if (cmd->dst_map == NULL) {
1785 if (bus_dmamap_create(sc->sc_dmat,
1786 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1787 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1788 err = ENOMEM;
1789 goto err_srcmap;
1790 }
1791 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1792 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1793 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1794 err = ENOMEM;
1795 goto err_dstmap1;
1796 }
1797 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1798 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1799 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1800 err = ENOMEM;
1801 goto err_dstmap1;
1802 }
1803 }
1804 }
1805
1806 #ifdef HIFN_DEBUG
1807 if (hifn_debug)
1808 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1809 device_xname(sc->sc_dv),
1810 READ_REG_1(sc, HIFN_1_DMA_CSR),
1811 READ_REG_1(sc, HIFN_1_DMA_IER),
1812 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1813 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1814 #endif
1815
1816 if (cmd->src_map == cmd->dst_map)
1817 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1818 0, cmd->src_map->dm_mapsize,
1819 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1820 else {
1821 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1822 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1823 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1824 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1825 }
1826
1827 /*
1828 * need 1 cmd, and 1 res
1829 * need N src, and N dst
1830 */
1831 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1832 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1833 err = ENOMEM;
1834 goto err_dstmap;
1835 }
1836 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1837 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1838 err = ENOMEM;
1839 goto err_dstmap;
1840 }
1841
1842 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1843 dma->cmdi = 0;
1844 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1845 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1846 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1847 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1848 }
1849 cmdi = dma->cmdi++;
1850 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1851 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1852
1853 /* .p for command/result already set */
1854 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1855 HIFN_D_MASKDONEIRQ);
1856 HIFN_CMDR_SYNC(sc, cmdi,
1857 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1858 dma->cmdu++;
1859 if (sc->sc_c_busy == 0) {
1860 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1861 sc->sc_c_busy = 1;
1862 SET_LED(sc, HIFN_MIPSRST_LED0);
1863 }
1864
1865 /*
1866 * We don't worry about missing an interrupt (which a "command wait"
1867 * interrupt salvages us from), unless there is more than one command
1868 * in the queue.
1869 *
1870 * XXX We do seem to miss some interrupts. So we always enable
1871 * XXX command wait. From OpenBSD revision 1.149.
1872 *
1873 */
1874 #if 0
1875 if (dma->cmdu > 1) {
1876 #endif
1877 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1878 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1879 #if 0
1880 }
1881 #endif
1882
1883 hifnstats.hst_ipackets++;
1884 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1885
1886 hifn_dmamap_load_src(sc, cmd);
1887 if (sc->sc_s_busy == 0) {
1888 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1889 sc->sc_s_busy = 1;
1890 SET_LED(sc, HIFN_MIPSRST_LED1);
1891 }
1892
1893 /*
1894 * Unlike other descriptors, we don't mask done interrupt from
1895 * result descriptor.
1896 */
1897 #ifdef HIFN_DEBUG
1898 if (hifn_debug)
1899 printf("load res\n");
1900 #endif
1901 if (dma->resi == HIFN_D_RES_RSIZE) {
1902 dma->resi = 0;
1903 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1904 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1905 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1906 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1907 }
1908 resi = dma->resi++;
1909 dma->hifn_commands[resi] = cmd;
1910 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1911 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1912 HIFN_D_VALID | HIFN_D_LAST);
1913 HIFN_RESR_SYNC(sc, resi,
1914 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1915 dma->resu++;
1916 if (sc->sc_r_busy == 0) {
1917 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1918 sc->sc_r_busy = 1;
1919 SET_LED(sc, HIFN_MIPSRST_LED2);
1920 }
1921
1922 if (cmd->sloplen)
1923 cmd->slopidx = resi;
1924
1925 hifn_dmamap_load_dst(sc, cmd);
1926
1927 if (sc->sc_d_busy == 0) {
1928 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1929 sc->sc_d_busy = 1;
1930 }
1931
1932 #ifdef HIFN_DEBUG
1933 if (hifn_debug)
1934 printf("%s: command: stat %8x ier %8x\n",
1935 device_xname(sc->sc_dv),
1936 READ_REG_1(sc, HIFN_1_DMA_CSR),
1937 READ_REG_1(sc, HIFN_1_DMA_IER));
1938 #endif
1939
1940 sc->sc_active = 5;
1941 return (err); /* success */
1942
1943 err_dstmap:
1944 if (cmd->src_map != cmd->dst_map)
1945 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1946 err_dstmap1:
1947 if (cmd->src_map != cmd->dst_map)
1948 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1949 err_srcmap:
1950 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1951 cmd->srcu.src_m != cmd->dstu.dst_m)
1952 m_freem(cmd->dstu.dst_m);
1953 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1954 err_srcmap1:
1955 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1956 return (err);
1957 }
1958
1959 static void
1960 hifn_tick(void *vsc)
1961 {
1962 struct hifn_softc *sc = vsc;
1963
1964 mutex_spin_enter(&sc->sc_mtx);
1965 if (sc->sc_active == 0) {
1966 struct hifn_dma *dma = sc->sc_dma;
1967 uint32_t r = 0;
1968
1969 if (dma->cmdu == 0 && sc->sc_c_busy) {
1970 sc->sc_c_busy = 0;
1971 r |= HIFN_DMACSR_C_CTRL_DIS;
1972 CLR_LED(sc, HIFN_MIPSRST_LED0);
1973 }
1974 if (dma->srcu == 0 && sc->sc_s_busy) {
1975 sc->sc_s_busy = 0;
1976 r |= HIFN_DMACSR_S_CTRL_DIS;
1977 CLR_LED(sc, HIFN_MIPSRST_LED1);
1978 }
1979 if (dma->dstu == 0 && sc->sc_d_busy) {
1980 sc->sc_d_busy = 0;
1981 r |= HIFN_DMACSR_D_CTRL_DIS;
1982 }
1983 if (dma->resu == 0 && sc->sc_r_busy) {
1984 sc->sc_r_busy = 0;
1985 r |= HIFN_DMACSR_R_CTRL_DIS;
1986 CLR_LED(sc, HIFN_MIPSRST_LED2);
1987 }
1988 if (r)
1989 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1990 } else
1991 sc->sc_active--;
1992 #ifdef __OpenBSD__
1993 timeout_add(&sc->sc_tickto, hz);
1994 #else
1995 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1996 #endif
1997 mutex_spin_exit(&sc->sc_mtx);
1998 }
1999
2000 static int
2001 hifn_intr(void *arg)
2002 {
2003 struct hifn_softc *sc = arg;
2004 struct hifn_dma *dma = sc->sc_dma;
2005 uint32_t dmacsr, restart;
2006 int i, u;
2007
2008 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2009
2010 #ifdef HIFN_DEBUG
2011 if (hifn_debug)
2012 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
2013 device_xname(sc->sc_dv),
2014 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
2015 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2016 #endif
2017
2018 mutex_spin_enter(&sc->sc_mtx);
2019
2020 /* Nothing in the DMA unit interrupted */
2021 if ((dmacsr & sc->sc_dmaier) == 0) {
2022 mutex_spin_exit(&sc->sc_mtx);
2023 return (0);
2024 }
2025
2026 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2027
2028 if (dmacsr & HIFN_DMACSR_ENGINE)
2029 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
2030
2031 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2032 (dmacsr & HIFN_DMACSR_PUBDONE))
2033 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2034 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2035
2036 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
2037 if (restart)
2038 printf("%s: overrun %x\n", device_xname(sc->sc_dv), dmacsr);
2039
2040 if (sc->sc_flags & HIFN_IS_7811) {
2041 if (dmacsr & HIFN_DMACSR_ILLR)
2042 printf("%s: illegal read\n", device_xname(sc->sc_dv));
2043 if (dmacsr & HIFN_DMACSR_ILLW)
2044 printf("%s: illegal write\n", device_xname(sc->sc_dv));
2045 }
2046
2047 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2048 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2049 if (restart) {
2050 printf("%s: abort, resetting.\n", device_xname(sc->sc_dv));
2051 hifnstats.hst_abort++;
2052 hifn_abort(sc);
2053 goto out;
2054 }
2055
2056 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
2057 /*
2058 * If no slots to process and we receive a "waiting on
2059 * command" interrupt, we disable the "waiting on command"
2060 * (by clearing it).
2061 */
2062 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2063 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2064 }
2065
2066 /* clear the rings */
2067 i = dma->resk;
2068 while (dma->resu != 0) {
2069 HIFN_RESR_SYNC(sc, i,
2070 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2071 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2072 HIFN_RESR_SYNC(sc, i,
2073 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2074 break;
2075 }
2076
2077 if (i != HIFN_D_RES_RSIZE) {
2078 struct hifn_command *cmd;
2079
2080 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2081 cmd = dma->hifn_commands[i];
2082 KASSERT(cmd != NULL
2083 /*("hifn_intr: null command slot %u", i)*/);
2084 dma->hifn_commands[i] = NULL;
2085
2086 hifn_callback(sc, cmd, dma->result_bufs[i]);
2087 hifnstats.hst_opackets++;
2088 }
2089
2090 if (++i == (HIFN_D_RES_RSIZE + 1))
2091 i = 0;
2092 else
2093 dma->resu--;
2094 }
2095 dma->resk = i;
2096
2097 i = dma->srck; u = dma->srcu;
2098 while (u != 0) {
2099 HIFN_SRCR_SYNC(sc, i,
2100 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2101 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2102 HIFN_SRCR_SYNC(sc, i,
2103 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2104 break;
2105 }
2106 if (++i == (HIFN_D_SRC_RSIZE + 1))
2107 i = 0;
2108 else
2109 u--;
2110 }
2111 dma->srck = i; dma->srcu = u;
2112
2113 i = dma->cmdk; u = dma->cmdu;
2114 while (u != 0) {
2115 HIFN_CMDR_SYNC(sc, i,
2116 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2117 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2118 HIFN_CMDR_SYNC(sc, i,
2119 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2120 break;
2121 }
2122 if (i != HIFN_D_CMD_RSIZE) {
2123 u--;
2124 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2125 }
2126 if (++i == (HIFN_D_CMD_RSIZE + 1))
2127 i = 0;
2128 }
2129 dma->cmdk = i; dma->cmdu = u;
2130
2131 out:
2132 mutex_spin_exit(&sc->sc_mtx);
2133 return (1);
2134 }
2135
2136 /*
2137 * Allocate a new 'session' and return an encoded session id. 'sidp'
2138 * contains our registration id, and should contain an encoded session
2139 * id on successful allocation.
2140 */
2141 static int
2142 hifn_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
2143 {
2144 struct cryptoini *c;
2145 struct hifn_softc *sc = arg;
2146 int i, mac = 0, cry = 0, comp = 0, retval = EINVAL;
2147
2148 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2149 if (sidp == NULL || cri == NULL || sc == NULL)
2150 return retval;
2151
2152 mutex_spin_enter(&sc->sc_mtx);
2153
2154 for (i = 0; i < sc->sc_maxses; i++)
2155 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2156 break;
2157 if (i == sc->sc_maxses) {
2158 retval = ENOMEM;
2159 goto out;
2160 }
2161
2162 for (c = cri; c != NULL; c = c->cri_next) {
2163 switch (c->cri_alg) {
2164 case CRYPTO_MD5:
2165 case CRYPTO_SHA1:
2166 case CRYPTO_MD5_HMAC_96:
2167 case CRYPTO_SHA1_HMAC_96:
2168 if (mac) {
2169 goto out;
2170 }
2171 mac = 1;
2172 break;
2173 case CRYPTO_DES_CBC:
2174 case CRYPTO_3DES_CBC:
2175 case CRYPTO_AES_CBC:
2176 /* Note that this is an initialization
2177 vector, not a cipher key; any function
2178 giving sufficient Hamming distance
2179 between outputs is fine. Use of RC4
2180 to generate IVs has been FIPS140-2
2181 certified by several labs. */
2182 #ifdef __NetBSD__
2183 cprng_fast(sc->sc_sessions[i].hs_iv,
2184 c->cri_alg == CRYPTO_AES_CBC ?
2185 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2186 #else /* FreeBSD and OpenBSD have get_random_bytes */
2187 /* XXX this may read fewer, does it matter? */
2188 get_random_bytes(sc->sc_sessions[i].hs_iv,
2189 c->cri_alg == CRYPTO_AES_CBC ?
2190 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2191 #endif
2192 /*FALLTHROUGH*/
2193 case CRYPTO_ARC4:
2194 if (cry) {
2195 goto out;
2196 }
2197 cry = 1;
2198 break;
2199 #ifdef HAVE_CRYPTO_LZS
2200 case CRYPTO_LZS_COMP:
2201 if (comp) {
2202 goto out;
2203 }
2204 comp = 1;
2205 break;
2206 #endif
2207 default:
2208 goto out;
2209 }
2210 }
2211 if (mac == 0 && cry == 0 && comp == 0) {
2212 goto out;
2213 }
2214
2215 /*
2216 * XXX only want to support compression without chaining to
2217 * MAC/crypt engine right now
2218 */
2219 if ((comp && mac) || (comp && cry)) {
2220 goto out;
2221 }
2222
2223 *sidp = HIFN_SID(device_unit(sc->sc_dv), i);
2224 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2225
2226 retval = 0;
2227 out:
2228 mutex_spin_exit(&sc->sc_mtx);
2229 return retval;
2230 }
2231
2232 /*
2233 * Deallocate a session.
2234 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2235 * XXX to blow away any keys already stored there.
2236 */
2237 static int
2238 hifn_freesession(void *arg, uint64_t tid)
2239 {
2240 struct hifn_softc *sc = arg;
2241 int session;
2242 uint32_t sid = ((uint32_t) tid) & 0xffffffff;
2243
2244 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2245 if (sc == NULL)
2246 return (EINVAL);
2247
2248 mutex_spin_enter(&sc->sc_mtx);
2249 session = HIFN_SESSION(sid);
2250 if (session >= sc->sc_maxses) {
2251 mutex_spin_exit(&sc->sc_mtx);
2252 return (EINVAL);
2253 }
2254
2255 memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session]));
2256 mutex_spin_exit(&sc->sc_mtx);
2257 return (0);
2258 }
2259
2260 static int
2261 hifn_process(void *arg, struct cryptop *crp, int hint)
2262 {
2263 struct hifn_softc *sc = arg;
2264 struct hifn_command *cmd = NULL;
2265 int session, err, ivlen;
2266 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2267
2268 if (crp == NULL || crp->crp_callback == NULL) {
2269 hifnstats.hst_invalid++;
2270 return (EINVAL);
2271 }
2272
2273 mutex_spin_enter(&sc->sc_mtx);
2274 session = HIFN_SESSION(crp->crp_sid);
2275
2276 if (sc == NULL || session >= sc->sc_maxses) {
2277 err = EINVAL;
2278 goto errout;
2279 }
2280
2281 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2282 M_DEVBUF, M_NOWAIT|M_ZERO);
2283 if (cmd == NULL) {
2284 hifnstats.hst_nomem++;
2285 err = ENOMEM;
2286 goto errout;
2287 }
2288
2289 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2290 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2291 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2292 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2293 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2294 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2295 } else {
2296 err = EINVAL;
2297 goto errout; /* XXX we don't handle contiguous buffers! */
2298 }
2299
2300 crd1 = crp->crp_desc;
2301 if (crd1 == NULL) {
2302 err = EINVAL;
2303 goto errout;
2304 }
2305 crd2 = crd1->crd_next;
2306
2307 if (crd2 == NULL) {
2308 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2309 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2310 crd1->crd_alg == CRYPTO_SHA1 ||
2311 crd1->crd_alg == CRYPTO_MD5) {
2312 maccrd = crd1;
2313 enccrd = NULL;
2314 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2315 crd1->crd_alg == CRYPTO_3DES_CBC ||
2316 crd1->crd_alg == CRYPTO_AES_CBC ||
2317 crd1->crd_alg == CRYPTO_ARC4) {
2318 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2319 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2320 maccrd = NULL;
2321 enccrd = crd1;
2322 #ifdef HAVE_CRYPTO_LZS
2323 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2324 return (hifn_compression(sc, crp, cmd));
2325 #endif
2326 } else {
2327 err = EINVAL;
2328 goto errout;
2329 }
2330 } else {
2331 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2332 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2333 crd1->crd_alg == CRYPTO_MD5 ||
2334 crd1->crd_alg == CRYPTO_SHA1) &&
2335 (crd2->crd_alg == CRYPTO_DES_CBC ||
2336 crd2->crd_alg == CRYPTO_3DES_CBC ||
2337 crd2->crd_alg == CRYPTO_AES_CBC ||
2338 crd2->crd_alg == CRYPTO_ARC4) &&
2339 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2340 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2341 maccrd = crd1;
2342 enccrd = crd2;
2343 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2344 crd1->crd_alg == CRYPTO_ARC4 ||
2345 crd1->crd_alg == CRYPTO_3DES_CBC ||
2346 crd1->crd_alg == CRYPTO_AES_CBC) &&
2347 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 ||
2348 crd2->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2349 crd2->crd_alg == CRYPTO_MD5 ||
2350 crd2->crd_alg == CRYPTO_SHA1) &&
2351 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2352 enccrd = crd1;
2353 maccrd = crd2;
2354 } else {
2355 /*
2356 * We cannot order the 7751 as requested
2357 */
2358 err = EINVAL;
2359 goto errout;
2360 }
2361 }
2362
2363 if (enccrd) {
2364 cmd->enccrd = enccrd;
2365 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2366 switch (enccrd->crd_alg) {
2367 case CRYPTO_ARC4:
2368 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2369 if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2370 != sc->sc_sessions[session].hs_prev_op)
2371 sc->sc_sessions[session].hs_state =
2372 HS_STATE_USED;
2373 break;
2374 case CRYPTO_DES_CBC:
2375 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2376 HIFN_CRYPT_CMD_MODE_CBC |
2377 HIFN_CRYPT_CMD_NEW_IV;
2378 break;
2379 case CRYPTO_3DES_CBC:
2380 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2381 HIFN_CRYPT_CMD_MODE_CBC |
2382 HIFN_CRYPT_CMD_NEW_IV;
2383 break;
2384 case CRYPTO_AES_CBC:
2385 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2386 HIFN_CRYPT_CMD_MODE_CBC |
2387 HIFN_CRYPT_CMD_NEW_IV;
2388 break;
2389 default:
2390 err = EINVAL;
2391 goto errout;
2392 }
2393 if (enccrd->crd_alg != CRYPTO_ARC4) {
2394 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2395 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2396 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2397 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2398 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2399 else
2400 bcopy(sc->sc_sessions[session].hs_iv,
2401 cmd->iv, ivlen);
2402
2403 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2404 == 0) {
2405 if (crp->crp_flags & CRYPTO_F_IMBUF)
2406 m_copyback(cmd->srcu.src_m,
2407 enccrd->crd_inject,
2408 ivlen, cmd->iv);
2409 else if (crp->crp_flags & CRYPTO_F_IOV)
2410 cuio_copyback(cmd->srcu.src_io,
2411 enccrd->crd_inject,
2412 ivlen, cmd->iv);
2413 }
2414 } else {
2415 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2416 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2417 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2418 m_copydata(cmd->srcu.src_m,
2419 enccrd->crd_inject, ivlen, cmd->iv);
2420 else if (crp->crp_flags & CRYPTO_F_IOV)
2421 cuio_copydata(cmd->srcu.src_io,
2422 enccrd->crd_inject, ivlen, cmd->iv);
2423 }
2424 }
2425
2426 cmd->ck = enccrd->crd_key;
2427 cmd->cklen = enccrd->crd_klen >> 3;
2428
2429 /*
2430 * Need to specify the size for the AES key in the masks.
2431 */
2432 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2433 HIFN_CRYPT_CMD_ALG_AES) {
2434 switch (cmd->cklen) {
2435 case 16:
2436 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2437 break;
2438 case 24:
2439 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2440 break;
2441 case 32:
2442 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2443 break;
2444 default:
2445 err = EINVAL;
2446 goto errout;
2447 }
2448 }
2449
2450 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2451 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2452 }
2453
2454 if (maccrd) {
2455 cmd->maccrd = maccrd;
2456 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2457
2458 switch (maccrd->crd_alg) {
2459 case CRYPTO_MD5:
2460 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2461 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2462 HIFN_MAC_CMD_POS_IPSEC;
2463 break;
2464 case CRYPTO_MD5_HMAC_96:
2465 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2466 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2467 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2468 break;
2469 case CRYPTO_SHA1:
2470 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2471 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2472 HIFN_MAC_CMD_POS_IPSEC;
2473 break;
2474 case CRYPTO_SHA1_HMAC_96:
2475 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2476 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2477 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2478 break;
2479 }
2480
2481 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2482 maccrd->crd_alg == CRYPTO_MD5_HMAC_96) &&
2483 sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2484 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2485 memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3);
2486 memset(cmd->mac + (maccrd->crd_klen >> 3), 0,
2487 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2488 }
2489 }
2490
2491 cmd->crp = crp;
2492 cmd->session_num = session;
2493 cmd->softc = sc;
2494
2495 err = hifn_crypto(sc, cmd, crp, hint);
2496 if (err == 0) {
2497 if (enccrd)
2498 sc->sc_sessions[session].hs_prev_op =
2499 enccrd->crd_flags & CRD_F_ENCRYPT;
2500 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2501 sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2502 mutex_spin_exit(&sc->sc_mtx);
2503 return 0;
2504 } else if (err == ERESTART) {
2505 /*
2506 * There weren't enough resources to dispatch the request
2507 * to the part. Notify the caller so they'll requeue this
2508 * request and resubmit it again soon.
2509 */
2510 #ifdef HIFN_DEBUG
2511 if (hifn_debug)
2512 printf("%s: requeue request\n", device_xname(sc->sc_dv));
2513 #endif
2514 free(cmd, M_DEVBUF);
2515 sc->sc_needwakeup |= CRYPTO_SYMQ;
2516 mutex_spin_exit(&sc->sc_mtx);
2517 return (err);
2518 }
2519
2520 errout:
2521 if (cmd != NULL)
2522 free(cmd, M_DEVBUF);
2523 if (err == EINVAL)
2524 hifnstats.hst_invalid++;
2525 else
2526 hifnstats.hst_nomem++;
2527 crp->crp_etype = err;
2528 mutex_spin_exit(&sc->sc_mtx);
2529 crypto_done(crp);
2530 return (0);
2531 }
2532
2533 static void
2534 hifn_abort(struct hifn_softc *sc)
2535 {
2536 struct hifn_dma *dma = sc->sc_dma;
2537 struct hifn_command *cmd;
2538 struct cryptop *crp;
2539 int i, u;
2540
2541 i = dma->resk; u = dma->resu;
2542 while (u != 0) {
2543 cmd = dma->hifn_commands[i];
2544 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2545 dma->hifn_commands[i] = NULL;
2546 crp = cmd->crp;
2547
2548 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2549 /* Salvage what we can. */
2550 hifnstats.hst_opackets++;
2551 hifn_callback(sc, cmd, dma->result_bufs[i]);
2552 } else {
2553 if (cmd->src_map == cmd->dst_map) {
2554 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2555 0, cmd->src_map->dm_mapsize,
2556 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2557 } else {
2558 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2559 0, cmd->src_map->dm_mapsize,
2560 BUS_DMASYNC_POSTWRITE);
2561 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2562 0, cmd->dst_map->dm_mapsize,
2563 BUS_DMASYNC_POSTREAD);
2564 }
2565
2566 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2567 m_freem(cmd->srcu.src_m);
2568 crp->crp_buf = (void *)cmd->dstu.dst_m;
2569 }
2570
2571 /* non-shared buffers cannot be restarted */
2572 if (cmd->src_map != cmd->dst_map) {
2573 /*
2574 * XXX should be EAGAIN, delayed until
2575 * after the reset.
2576 */
2577 crp->crp_etype = ENOMEM;
2578 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2579 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2580 } else
2581 crp->crp_etype = ENOMEM;
2582
2583 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2584 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2585
2586 free(cmd, M_DEVBUF);
2587 if (crp->crp_etype != EAGAIN)
2588 crypto_done(crp);
2589 }
2590
2591 if (++i == HIFN_D_RES_RSIZE)
2592 i = 0;
2593 u--;
2594 }
2595 dma->resk = i; dma->resu = u;
2596
2597 /* Force upload of key next time */
2598 for (i = 0; i < sc->sc_maxses; i++)
2599 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2600 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2601
2602 hifn_reset_board(sc, 1);
2603 hifn_init_dma(sc);
2604 hifn_init_pci_registers(sc);
2605 }
2606
2607 static void
2608 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, uint8_t *resbuf)
2609 {
2610 struct hifn_dma *dma = sc->sc_dma;
2611 struct cryptop *crp = cmd->crp;
2612 struct cryptodesc *crd;
2613 struct mbuf *m;
2614 int totlen, i, u, ivlen;
2615
2616 if (cmd->src_map == cmd->dst_map)
2617 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2618 0, cmd->src_map->dm_mapsize,
2619 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2620 else {
2621 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2622 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2623 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2624 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2625 }
2626
2627 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2628 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2629 crp->crp_buf = (void *)cmd->dstu.dst_m;
2630 totlen = cmd->src_map->dm_mapsize;
2631 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2632 if (totlen < m->m_len) {
2633 m->m_len = totlen;
2634 totlen = 0;
2635 } else
2636 totlen -= m->m_len;
2637 }
2638 cmd->dstu.dst_m->m_pkthdr.len =
2639 cmd->srcu.src_m->m_pkthdr.len;
2640 m_freem(cmd->srcu.src_m);
2641 }
2642 }
2643
2644 if (cmd->sloplen != 0) {
2645 if (crp->crp_flags & CRYPTO_F_IMBUF)
2646 m_copyback((struct mbuf *)crp->crp_buf,
2647 cmd->src_map->dm_mapsize - cmd->sloplen,
2648 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2649 else if (crp->crp_flags & CRYPTO_F_IOV)
2650 cuio_copyback((struct uio *)crp->crp_buf,
2651 cmd->src_map->dm_mapsize - cmd->sloplen,
2652 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2653 }
2654
2655 i = dma->dstk; u = dma->dstu;
2656 while (u != 0) {
2657 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2658 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2659 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2660 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2661 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2662 offsetof(struct hifn_dma, dstr[i]),
2663 sizeof(struct hifn_desc),
2664 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2665 break;
2666 }
2667 if (++i == (HIFN_D_DST_RSIZE + 1))
2668 i = 0;
2669 else
2670 u--;
2671 }
2672 dma->dstk = i; dma->dstu = u;
2673
2674 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2675
2676 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2677 HIFN_BASE_CMD_CRYPT) {
2678 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2679 if (crd->crd_alg != CRYPTO_DES_CBC &&
2680 crd->crd_alg != CRYPTO_3DES_CBC &&
2681 crd->crd_alg != CRYPTO_AES_CBC)
2682 continue;
2683 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2684 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2685 if (crp->crp_flags & CRYPTO_F_IMBUF)
2686 m_copydata((struct mbuf *)crp->crp_buf,
2687 crd->crd_skip + crd->crd_len - ivlen,
2688 ivlen,
2689 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2690 else if (crp->crp_flags & CRYPTO_F_IOV) {
2691 cuio_copydata((struct uio *)crp->crp_buf,
2692 crd->crd_skip + crd->crd_len - ivlen,
2693 ivlen,
2694 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2695 }
2696 /* XXX We do not handle contig data */
2697 break;
2698 }
2699 }
2700
2701 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2702 uint8_t *macbuf;
2703
2704 macbuf = resbuf + sizeof(struct hifn_base_result);
2705 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2706 macbuf += sizeof(struct hifn_comp_result);
2707 macbuf += sizeof(struct hifn_mac_result);
2708
2709 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2710 int len;
2711
2712 if (crd->crd_alg == CRYPTO_MD5)
2713 len = 16;
2714 else if (crd->crd_alg == CRYPTO_SHA1)
2715 len = 20;
2716 else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 ||
2717 crd->crd_alg == CRYPTO_SHA1_HMAC_96)
2718 len = 12;
2719 else
2720 continue;
2721
2722 if (crp->crp_flags & CRYPTO_F_IMBUF)
2723 m_copyback((struct mbuf *)crp->crp_buf,
2724 crd->crd_inject, len, macbuf);
2725 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2726 memcpy(crp->crp_mac, (void *)macbuf, len);
2727 break;
2728 }
2729 }
2730
2731 if (cmd->src_map != cmd->dst_map) {
2732 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2733 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2734 }
2735 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2736 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2737 free(cmd, M_DEVBUF);
2738 crypto_done(crp);
2739 }
2740
2741 #ifdef HAVE_CRYPTO_LZS
2742
2743 static int
2744 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2745 struct hifn_command *cmd)
2746 {
2747 struct cryptodesc *crd = crp->crp_desc;
2748 int s, err = 0;
2749
2750 cmd->compcrd = crd;
2751 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2752
2753 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2754 /*
2755 * XXX can only handle mbufs right now since we can
2756 * XXX dynamically resize them.
2757 */
2758 err = EINVAL;
2759 return (ENOMEM);
2760 }
2761
2762 if ((crd->crd_flags & CRD_F_COMP) == 0)
2763 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2764 if (crd->crd_alg == CRYPTO_LZS_COMP)
2765 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2766 HIFN_COMP_CMD_CLEARHIST;
2767
2768 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2769 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2770 err = ENOMEM;
2771 goto fail;
2772 }
2773
2774 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2775 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2776 err = ENOMEM;
2777 goto fail;
2778 }
2779
2780 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2781 int len;
2782
2783 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2784 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2785 err = ENOMEM;
2786 goto fail;
2787 }
2788
2789 len = cmd->src_map->dm_mapsize / MCLBYTES;
2790 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2791 len++;
2792 len *= MCLBYTES;
2793
2794 if ((crd->crd_flags & CRD_F_COMP) == 0)
2795 len *= 4;
2796
2797 if (len > HIFN_MAX_DMALEN)
2798 len = HIFN_MAX_DMALEN;
2799
2800 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2801 if (cmd->dstu.dst_m == NULL) {
2802 err = ENOMEM;
2803 goto fail;
2804 }
2805
2806 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2807 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2808 err = ENOMEM;
2809 goto fail;
2810 }
2811 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2812 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2813 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2814 err = ENOMEM;
2815 goto fail;
2816 }
2817 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2818 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2819 err = ENOMEM;
2820 goto fail;
2821 }
2822 }
2823
2824 if (cmd->src_map == cmd->dst_map)
2825 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2826 0, cmd->src_map->dm_mapsize,
2827 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2828 else {
2829 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2830 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2831 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2832 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2833 }
2834
2835 cmd->crp = crp;
2836 /*
2837 * Always use session 0. The modes of compression we use are
2838 * stateless and there is always at least one compression
2839 * context, zero.
2840 */
2841 cmd->session_num = 0;
2842 cmd->softc = sc;
2843
2844 err = hifn_compress_enter(sc, cmd);
2845
2846 if (err != 0)
2847 goto fail;
2848 return (0);
2849
2850 fail:
2851 if (cmd->dst_map != NULL) {
2852 if (cmd->dst_map->dm_nsegs > 0)
2853 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2854 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2855 }
2856 if (cmd->src_map != NULL) {
2857 if (cmd->src_map->dm_nsegs > 0)
2858 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2859 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2860 }
2861 free(cmd, M_DEVBUF);
2862 if (err == EINVAL)
2863 hifnstats.hst_invalid++;
2864 else
2865 hifnstats.hst_nomem++;
2866 crp->crp_etype = err;
2867 crypto_done(crp);
2868 return (0);
2869 }
2870
2871 static int
2872 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2873 {
2874 struct hifn_dma *dma = sc->sc_dma;
2875 int cmdi, resi;
2876 uint32_t cmdlen;
2877
2878 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2879 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2880 return (ENOMEM);
2881
2882 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2883 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2884 return (ENOMEM);
2885
2886 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2887 dma->cmdi = 0;
2888 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2889 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2890 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2891 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2892 }
2893 cmdi = dma->cmdi++;
2894 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2895 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2896
2897 /* .p for command/result already set */
2898 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2899 HIFN_D_MASKDONEIRQ);
2900 HIFN_CMDR_SYNC(sc, cmdi,
2901 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2902 dma->cmdu++;
2903 if (sc->sc_c_busy == 0) {
2904 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2905 sc->sc_c_busy = 1;
2906 SET_LED(sc, HIFN_MIPSRST_LED0);
2907 }
2908
2909 /*
2910 * We don't worry about missing an interrupt (which a "command wait"
2911 * interrupt salvages us from), unless there is more than one command
2912 * in the queue.
2913 */
2914 if (dma->cmdu > 1) {
2915 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2916 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2917 }
2918
2919 hifnstats.hst_ipackets++;
2920 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2921
2922 hifn_dmamap_load_src(sc, cmd);
2923 if (sc->sc_s_busy == 0) {
2924 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2925 sc->sc_s_busy = 1;
2926 SET_LED(sc, HIFN_MIPSRST_LED1);
2927 }
2928
2929 /*
2930 * Unlike other descriptors, we don't mask done interrupt from
2931 * result descriptor.
2932 */
2933 if (dma->resi == HIFN_D_RES_RSIZE) {
2934 dma->resi = 0;
2935 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2936 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2937 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2938 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2939 }
2940 resi = dma->resi++;
2941 dma->hifn_commands[resi] = cmd;
2942 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2943 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2944 HIFN_D_VALID | HIFN_D_LAST);
2945 HIFN_RESR_SYNC(sc, resi,
2946 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2947 dma->resu++;
2948 if (sc->sc_r_busy == 0) {
2949 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2950 sc->sc_r_busy = 1;
2951 SET_LED(sc, HIFN_MIPSRST_LED2);
2952 }
2953
2954 if (cmd->sloplen)
2955 cmd->slopidx = resi;
2956
2957 hifn_dmamap_load_dst(sc, cmd);
2958
2959 if (sc->sc_d_busy == 0) {
2960 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2961 sc->sc_d_busy = 1;
2962 }
2963 sc->sc_active = 5;
2964 cmd->cmd_callback = hifn_callback_comp;
2965 return (0);
2966 }
2967
2968 static void
2969 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2970 uint8_t *resbuf)
2971 {
2972 struct hifn_base_result baseres;
2973 struct cryptop *crp = cmd->crp;
2974 struct hifn_dma *dma = sc->sc_dma;
2975 struct mbuf *m;
2976 int err = 0, i, u;
2977 uint32_t olen;
2978 bus_size_t dstsize;
2979
2980 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2981 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2982 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2983 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2984
2985 dstsize = cmd->dst_map->dm_mapsize;
2986 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2987
2988 memcpy(&baseres, resbuf, sizeof(struct hifn_base_result));
2989
2990 i = dma->dstk; u = dma->dstu;
2991 while (u != 0) {
2992 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2993 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2994 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2995 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2996 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2997 offsetof(struct hifn_dma, dstr[i]),
2998 sizeof(struct hifn_desc),
2999 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3000 break;
3001 }
3002 if (++i == (HIFN_D_DST_RSIZE + 1))
3003 i = 0;
3004 else
3005 u--;
3006 }
3007 dma->dstk = i; dma->dstu = u;
3008
3009 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
3010 bus_size_t xlen;
3011
3012 xlen = dstsize;
3013
3014 m_freem(cmd->dstu.dst_m);
3015
3016 if (xlen == HIFN_MAX_DMALEN) {
3017 /* We've done all we can. */
3018 err = E2BIG;
3019 goto out;
3020 }
3021
3022 xlen += MCLBYTES;
3023
3024 if (xlen > HIFN_MAX_DMALEN)
3025 xlen = HIFN_MAX_DMALEN;
3026
3027 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
3028 cmd->srcu.src_m);
3029 if (cmd->dstu.dst_m == NULL) {
3030 err = ENOMEM;
3031 goto out;
3032 }
3033 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
3034 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
3035 err = ENOMEM;
3036 goto out;
3037 }
3038
3039 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3040 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3041 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
3042 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
3043
3044 err = hifn_compress_enter(sc, cmd);
3045 if (err != 0)
3046 goto out;
3047 return;
3048 }
3049
3050 olen = dstsize - (letoh16(baseres.dst_cnt) |
3051 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
3052 HIFN_BASE_RES_DSTLEN_S) << 16));
3053
3054 crp->crp_olen = olen - cmd->compcrd->crd_skip;
3055
3056 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3057 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3058 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
3059
3060 m = cmd->dstu.dst_m;
3061 if (m->m_flags & M_PKTHDR)
3062 m->m_pkthdr.len = olen;
3063 crp->crp_buf = (void *)m;
3064 for (; m != NULL; m = m->m_next) {
3065 if (olen >= m->m_len)
3066 olen -= m->m_len;
3067 else {
3068 m->m_len = olen;
3069 olen = 0;
3070 }
3071 }
3072
3073 m_freem(cmd->srcu.src_m);
3074 free(cmd, M_DEVBUF);
3075 crp->crp_etype = 0;
3076 crypto_done(crp);
3077 return;
3078
3079 out:
3080 if (cmd->dst_map != NULL) {
3081 if (cmd->src_map->dm_nsegs != 0)
3082 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3083 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
3084 }
3085 if (cmd->src_map != NULL) {
3086 if (cmd->src_map->dm_nsegs != 0)
3087 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3088 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3089 }
3090 if (cmd->dstu.dst_m != NULL)
3091 m_freem(cmd->dstu.dst_m);
3092 free(cmd, M_DEVBUF);
3093 crp->crp_etype = err;
3094 crypto_done(crp);
3095 }
3096
3097 static struct mbuf *
3098 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
3099 {
3100 int len;
3101 struct mbuf *m, *m0, *mlast;
3102
3103 if (mtemplate->m_flags & M_PKTHDR) {
3104 len = MHLEN;
3105 MGETHDR(m0, M_DONTWAIT, MT_DATA);
3106 } else {
3107 len = MLEN;
3108 MGET(m0, M_DONTWAIT, MT_DATA);
3109 }
3110 if (m0 == NULL)
3111 return (NULL);
3112 if (len == MHLEN)
3113 M_DUP_PKTHDR(m0, mtemplate);
3114 MCLGET(m0, M_DONTWAIT);
3115 if (!(m0->m_flags & M_EXT)) {
3116 m_freem(m0);
3117 return (NULL);
3118 }
3119 len = MCLBYTES;
3120
3121 totlen -= len;
3122 m0->m_pkthdr.len = m0->m_len = len;
3123 mlast = m0;
3124
3125 while (totlen > 0) {
3126 MGET(m, M_DONTWAIT, MT_DATA);
3127 if (m == NULL) {
3128 m_freem(m0);
3129 return (NULL);
3130 }
3131 MCLGET(m, M_DONTWAIT);
3132 if (!(m->m_flags & M_EXT)) {
3133 m_freem(m);
3134 m_freem(m0);
3135 return (NULL);
3136 }
3137 len = MCLBYTES;
3138 m->m_len = len;
3139 if (m0->m_flags & M_PKTHDR)
3140 m0->m_pkthdr.len += len;
3141 totlen -= len;
3142
3143 mlast->m_next = m;
3144 mlast = m;
3145 }
3146
3147 return (m0);
3148 }
3149 #endif /* HAVE_CRYPTO_LZS */
3150
3151 static void
3152 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, uint32_t val)
3153 {
3154 /*
3155 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
3156 * and Group 1 registers; avoid conditions that could create
3157 * burst writes by doing a read in between the writes.
3158 */
3159 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3160 if (sc->sc_waw_lastgroup == reggrp &&
3161 sc->sc_waw_lastreg == reg - 4) {
3162 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
3163 }
3164 sc->sc_waw_lastgroup = reggrp;
3165 sc->sc_waw_lastreg = reg;
3166 }
3167 if (reggrp == 0)
3168 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3169 else
3170 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3171
3172 }
3173
3174 static uint32_t
3175 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3176 {
3177 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3178 sc->sc_waw_lastgroup = -1;
3179 sc->sc_waw_lastreg = 1;
3180 }
3181 if (reggrp == 0)
3182 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3183 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3184 }
3185