hifn7751.c revision 1.56.2.2 1 /* $NetBSD: hifn7751.c,v 1.56.2.2 2015/05/11 06:15:46 msaitoh Exp $ */
2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */
4
5 /*
6 * Invertex AEON / Hifn 7751 driver
7 * Copyright (c) 1999 Invertex Inc. All rights reserved.
8 * Copyright (c) 1999 Theo de Raadt
9 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10 * http://www.netsec.net
11 * Copyright (c) 2003 Hifn Inc.
12 *
13 * This driver is based on a previous driver by Invertex, for which they
14 * requested: Please send any comments, feedback, bug-fixes, or feature
15 * requests to software (at) invertex.com.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. The name of the author may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Effort sponsored in part by the Defense Advanced Research Projects
41 * Agency (DARPA) and Air Force Research Laboratory, Air Force
42 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 *
44 */
45
46 /*
47 * Driver for various Hifn pre-HIPP encryption processors.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.56.2.2 2015/05/11 06:15:46 msaitoh Exp $");
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/errno.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/mbuf.h>
61 #include <sys/device.h>
62 #include <sys/module.h>
63
64 #ifdef __OpenBSD__
65 #include <crypto/crypto.h>
66 #include <dev/rndvar.h>
67 #else
68 #include <opencrypto/cryptodev.h>
69 #include <sys/cprng.h>
70 #include <sys/rnd.h>
71 #include <sys/sha1.h>
72 #endif
73
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76 #include <dev/pci/pcidevs.h>
77
78 #include <dev/pci/hifn7751reg.h>
79 #include <dev/pci/hifn7751var.h>
80
81 #undef HIFN_DEBUG
82
83 #ifdef __NetBSD__
84 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */
85 #endif
86
87 #ifdef HIFN_DEBUG
88 extern int hifn_debug; /* patchable */
89 int hifn_debug = 1;
90 #endif
91
92 #ifdef __OpenBSD__
93 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */
94 #endif
95
96 /*
97 * Prototypes and count for the pci_device structure
98 */
99 #ifdef __OpenBSD__
100 static int hifn_probe((struct device *, void *, void *);
101 #else
102 static int hifn_probe(device_t, cfdata_t, void *);
103 #endif
104 static void hifn_attach(device_t, device_t, void *);
105 #ifdef __NetBSD__
106 static int hifn_detach(device_t, int);
107
108 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
109 hifn_probe, hifn_attach, hifn_detach, NULL);
110 #else
111 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
112 hifn_probe, hifn_attach, NULL, NULL);
113 #endif
114
115 #ifdef __OpenBSD__
116 struct cfdriver hifn_cd = {
117 0, "hifn", DV_DULL
118 };
119 #endif
120
121 static void hifn_reset_board(struct hifn_softc *, int);
122 static void hifn_reset_puc(struct hifn_softc *);
123 static void hifn_puc_wait(struct hifn_softc *);
124 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
125 static void hifn_set_retry(struct hifn_softc *);
126 static void hifn_init_dma(struct hifn_softc *);
127 static void hifn_init_pci_registers(struct hifn_softc *);
128 static int hifn_sramsize(struct hifn_softc *);
129 static int hifn_dramsize(struct hifn_softc *);
130 static int hifn_ramtype(struct hifn_softc *);
131 static void hifn_sessions(struct hifn_softc *);
132 static int hifn_intr(void *);
133 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
134 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
135 static int hifn_newsession(void*, u_int32_t *, struct cryptoini *);
136 static int hifn_freesession(void*, u_int64_t);
137 static int hifn_process(void*, struct cryptop *, int);
138 static void hifn_callback(struct hifn_softc *, struct hifn_command *,
139 u_int8_t *);
140 static int hifn_crypto(struct hifn_softc *, struct hifn_command *,
141 struct cryptop*, int);
142 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
143 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
144 static int hifn_dmamap_aligned(bus_dmamap_t);
145 static int hifn_dmamap_load_src(struct hifn_softc *,
146 struct hifn_command *);
147 static int hifn_dmamap_load_dst(struct hifn_softc *,
148 struct hifn_command *);
149 static int hifn_init_pubrng(struct hifn_softc *);
150 static void hifn_rng(void *);
151 static void hifn_rng_locked(void *);
152 static void hifn_tick(void *);
153 static void hifn_abort(struct hifn_softc *);
154 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
155 int *);
156 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
157 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
158 #ifdef HAVE_CRYPTO_LZS
159 static int hifn_compression(struct hifn_softc *, struct cryptop *,
160 struct hifn_command *);
161 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
162 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
163 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
164 u_int8_t *);
165 #endif /* HAVE_CRYPTO_LZS */
166
167 struct hifn_stats hifnstats;
168
169 static const struct hifn_product {
170 pci_vendor_id_t hifn_vendor;
171 pci_product_id_t hifn_product;
172 int hifn_flags;
173 const char *hifn_name;
174 } hifn_products[] = {
175 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
176 0,
177 "Invertex AEON",
178 },
179
180 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
181 0,
182 "Hifn 7751",
183 },
184 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
185 0,
186 "Hifn 7751 (NetSec)"
187 },
188
189 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
190 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
191 "Hifn 7811",
192 },
193
194 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
195 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
196 "Hifn 7951",
197 },
198
199 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
200 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
201 "Hifn 7955",
202 },
203
204 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
205 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
206 "Hifn 7956",
207 },
208
209
210 { 0, 0,
211 0,
212 NULL
213 }
214 };
215
216 static const struct hifn_product *
217 hifn_lookup(const struct pci_attach_args *pa)
218 {
219 const struct hifn_product *hp;
220
221 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
222 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
223 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
224 return (hp);
225 }
226 return (NULL);
227 }
228
229 static int
230 hifn_probe(device_t parent, cfdata_t match, void *aux)
231 {
232 struct pci_attach_args *pa = aux;
233
234 if (hifn_lookup(pa) != NULL)
235 return 1;
236
237 return 0;
238 }
239
240 static void
241 hifn_attach(device_t parent, device_t self, void *aux)
242 {
243 struct hifn_softc *sc = device_private(self);
244 struct pci_attach_args *pa = aux;
245 const struct hifn_product *hp;
246 pci_chipset_tag_t pc = pa->pa_pc;
247 pci_intr_handle_t ih;
248 const char *intrstr = NULL;
249 const char *hifncap;
250 char rbase;
251 #ifdef __NetBSD__
252 #define iosize0 sc->sc_iosz0
253 #define iosize1 sc->sc_iosz1
254 #else
255 bus_size_t iosize0, iosize1;
256 #endif
257 u_int32_t cmd;
258 u_int16_t ena;
259 bus_dma_segment_t seg;
260 bus_dmamap_t dmamap;
261 int rseg;
262 void *kva;
263 char intrbuf[PCI_INTRSTR_LEN];
264
265 hp = hifn_lookup(pa);
266 if (hp == NULL) {
267 printf("\n");
268 panic("hifn_attach: impossible");
269 }
270
271 pci_aprint_devinfo_fancy(pa, "Crypto processor", hp->hifn_name, 1);
272
273 sc->sc_dv = self;
274 sc->sc_pci_pc = pa->pa_pc;
275 sc->sc_pci_tag = pa->pa_tag;
276
277 sc->sc_flags = hp->hifn_flags;
278
279 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
280 cmd |= PCI_COMMAND_MASTER_ENABLE;
281 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
282
283 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
284 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
285 aprint_error_dev(sc->sc_dv, "can't map mem space %d\n", 0);
286 return;
287 }
288
289 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
290 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
291 aprint_error_dev(sc->sc_dv, "can't find mem space %d\n", 1);
292 goto fail_io0;
293 }
294
295 hifn_set_retry(sc);
296
297 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
298 sc->sc_waw_lastgroup = -1;
299 sc->sc_waw_lastreg = 1;
300 }
301
302 sc->sc_dmat = pa->pa_dmat;
303 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
304 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
305 aprint_error_dev(sc->sc_dv, "can't alloc DMA buffer\n");
306 goto fail_io1;
307 }
308 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
309 BUS_DMA_NOWAIT)) {
310 aprint_error_dev(sc->sc_dv, "can't map DMA buffers (%lu bytes)\n",
311 (u_long)sizeof(*sc->sc_dma));
312 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
313 goto fail_io1;
314 }
315 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
316 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
317 aprint_error_dev(sc->sc_dv, "can't create DMA map\n");
318 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
319 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
320 goto fail_io1;
321 }
322 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
323 NULL, BUS_DMA_NOWAIT)) {
324 aprint_error_dev(sc->sc_dv, "can't load DMA map\n");
325 bus_dmamap_destroy(sc->sc_dmat, dmamap);
326 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
327 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
328 goto fail_io1;
329 }
330 sc->sc_dmamap = dmamap;
331 sc->sc_dma = (struct hifn_dma *)kva;
332 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
333
334 hifn_reset_board(sc, 0);
335
336 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
337 aprint_error_dev(sc->sc_dv, "crypto enabling failed\n");
338 goto fail_mem;
339 }
340 hifn_reset_puc(sc);
341
342 hifn_init_dma(sc);
343 hifn_init_pci_registers(sc);
344
345 /* XXX can't dynamically determine ram type for 795x; force dram */
346 if (sc->sc_flags & HIFN_IS_7956)
347 sc->sc_drammodel = 1;
348 else if (hifn_ramtype(sc))
349 goto fail_mem;
350
351 if (sc->sc_drammodel == 0)
352 hifn_sramsize(sc);
353 else
354 hifn_dramsize(sc);
355
356 /*
357 * Workaround for NetSec 7751 rev A: half ram size because two
358 * of the address lines were left floating
359 */
360 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
361 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
362 PCI_REVISION(pa->pa_class) == 0x61)
363 sc->sc_ramsize >>= 1;
364
365 if (pci_intr_map(pa, &ih)) {
366 aprint_error_dev(sc->sc_dv, "couldn't map interrupt\n");
367 goto fail_mem;
368 }
369 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
370 #ifdef __OpenBSD__
371 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
372 device_xname(self));
373 #else
374 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
375 #endif
376 if (sc->sc_ih == NULL) {
377 aprint_error_dev(sc->sc_dv, "couldn't establish interrupt\n");
378 if (intrstr != NULL)
379 aprint_error(" at %s", intrstr);
380 aprint_error("\n");
381 goto fail_mem;
382 }
383
384 hifn_sessions(sc);
385
386 rseg = sc->sc_ramsize / 1024;
387 rbase = 'K';
388 if (sc->sc_ramsize >= (1024 * 1024)) {
389 rbase = 'M';
390 rseg /= 1024;
391 }
392 aprint_normal_dev(sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n",
393 hifncap, rseg, rbase,
394 sc->sc_drammodel ? 'D' : 'S', intrstr);
395
396 sc->sc_cid = crypto_get_driverid(0);
397 if (sc->sc_cid < 0) {
398 aprint_error_dev(sc->sc_dv, "couldn't get crypto driver id\n");
399 goto fail_intr;
400 }
401
402 WRITE_REG_0(sc, HIFN_0_PUCNFG,
403 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
404 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
405
406 switch (ena) {
407 case HIFN_PUSTAT_ENA_2:
408 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
409 hifn_newsession, hifn_freesession, hifn_process, sc);
410 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
411 hifn_newsession, hifn_freesession, hifn_process, sc);
412 if (sc->sc_flags & HIFN_HAS_AES)
413 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
414 hifn_newsession, hifn_freesession,
415 hifn_process, sc);
416 /*FALLTHROUGH*/
417 case HIFN_PUSTAT_ENA_1:
418 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
419 hifn_newsession, hifn_freesession, hifn_process, sc);
420 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
421 hifn_newsession, hifn_freesession, hifn_process, sc);
422 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0,
423 hifn_newsession, hifn_freesession, hifn_process, sc);
424 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0,
425 hifn_newsession, hifn_freesession, hifn_process, sc);
426 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
427 hifn_newsession, hifn_freesession, hifn_process, sc);
428 break;
429 }
430
431 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
432 sc->sc_dmamap->dm_mapsize,
433 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
434
435 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM);
436
437 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) {
438 hifn_init_pubrng(sc);
439 sc->sc_rng_need = RND_POOLBITS / NBBY;
440 }
441
442 #ifdef __OpenBSD__
443 timeout_set(&sc->sc_tickto, hifn_tick, sc);
444 timeout_add(&sc->sc_tickto, hz);
445 #else
446 callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
447 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
448 #endif
449 return;
450
451 fail_intr:
452 pci_intr_disestablish(pc, sc->sc_ih);
453 fail_mem:
454 bus_dmamap_unload(sc->sc_dmat, dmamap);
455 bus_dmamap_destroy(sc->sc_dmat, dmamap);
456 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
457 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
458
459 /* Turn off DMA polling */
460 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
461 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
462
463 fail_io1:
464 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
465 fail_io0:
466 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
467 }
468
469 #ifdef __NetBSD__
470 static int
471 hifn_detach(device_t self, int flags)
472 {
473 struct hifn_softc *sc = device_private(self);
474
475 hifn_abort(sc);
476
477 hifn_reset_board(sc, 1);
478
479 pci_intr_disestablish(sc->sc_pci_pc, sc->sc_ih);
480
481 crypto_unregister_all(sc->sc_cid);
482
483 rnd_detach_source(&sc->sc_rnd_source);
484
485 mutex_enter(&sc->sc_mtx);
486 callout_halt(&sc->sc_tickto, NULL);
487 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
488 callout_halt(&sc->sc_rngto, NULL);
489 mutex_exit(&sc->sc_mtx);
490
491 bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1);
492 bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0);
493
494 /*
495 * XXX It's not clear if any additional buffers have been
496 * XXX allocated and require free()ing
497 */
498
499 return 0;
500 }
501
502 MODULE(MODULE_CLASS_DRIVER, hifn, "pci,opencrypto");
503
504 #ifdef _MODULE
505 #include "ioconf.c"
506 #endif
507
508 static int
509 hifn_modcmd(modcmd_t cmd, void *data)
510 {
511 int error = 0;
512
513 switch(cmd) {
514 case MODULE_CMD_INIT:
515 #ifdef _MODULE
516 error = config_init_component(cfdriver_ioconf_hifn,
517 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
518 #endif
519 return error;
520 case MODULE_CMD_FINI:
521 #ifdef _MODULE
522 error = config_fini_component(cfdriver_ioconf_hifn,
523 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
524 #endif
525 return error;
526 default:
527 return ENOTTY;
528 }
529 }
530
531 #endif /* ifdef __NetBSD__ */
532
533 static void
534 hifn_rng_get(size_t bytes, void *priv)
535 {
536 struct hifn_softc *sc = priv;
537
538 mutex_enter(&sc->sc_mtx);
539 sc->sc_rng_need = bytes;
540 callout_reset(&sc->sc_rngto, 0, hifn_rng, sc);
541 mutex_exit(&sc->sc_mtx);
542 }
543
544 static int
545 hifn_init_pubrng(struct hifn_softc *sc)
546 {
547 u_int32_t r;
548 int i;
549
550 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
551 /* Reset 7951 public key/rng engine */
552 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
553 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
554
555 for (i = 0; i < 100; i++) {
556 DELAY(1000);
557 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
558 HIFN_PUBRST_RESET) == 0)
559 break;
560 }
561
562 if (i == 100) {
563 printf("%s: public key init failed\n",
564 device_xname(sc->sc_dv));
565 return (1);
566 }
567 }
568
569 /* Enable the rng, if available */
570 if (sc->sc_flags & HIFN_HAS_RNG) {
571 if (sc->sc_flags & HIFN_IS_7811) {
572 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
573 if (r & HIFN_7811_RNGENA_ENA) {
574 r &= ~HIFN_7811_RNGENA_ENA;
575 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
576 }
577 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
578 HIFN_7811_RNGCFG_DEFL);
579 r |= HIFN_7811_RNGENA_ENA;
580 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
581 } else
582 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
583 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
584 HIFN_RNGCFG_ENA);
585
586 /*
587 * The Hifn RNG documentation states that at their
588 * recommended "conservative" RNG config values,
589 * the RNG must warm up for 0.4s before providing
590 * data that meet their worst-case estimate of 0.06
591 * bits of random data per output register bit.
592 */
593 DELAY(4000);
594
595 #ifdef __NetBSD__
596 rndsource_setcb(&sc->sc_rnd_source, hifn_rng_get, sc);
597 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv),
598 RND_TYPE_RNG,
599 RND_FLAG_COLLECT_VALUE|RND_FLAG_HASCB);
600 #endif
601
602 if (hz >= 100)
603 sc->sc_rnghz = hz / 100;
604 else
605 sc->sc_rnghz = 1;
606 #ifdef __OpenBSD__
607 timeout_set(&sc->sc_rngto, hifn_rng, sc);
608 #else /* !__OpenBSD__ */
609 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
610 #endif /* !__OpenBSD__ */
611 }
612
613 /* Enable public key engine, if available */
614 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
615 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
616 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
617 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
618 }
619
620 /* Call directly into the RNG once to prime the pool. */
621 hifn_rng(sc); /* Sets callout/timeout at end */
622
623 return (0);
624 }
625
626 static void
627 hifn_rng_locked(void *vsc)
628 {
629 struct hifn_softc *sc = vsc;
630 #ifdef __NetBSD__
631 uint32_t num[64];
632 #else
633 uint32_t num[2];
634 #endif
635 uint32_t sts;
636 int i;
637 size_t got, gotent;
638
639 if (sc->sc_rng_need < 1) {
640 callout_stop(&sc->sc_rngto);
641 return;
642 }
643
644 if (sc->sc_flags & HIFN_IS_7811) {
645 for (i = 0; i < 5; i++) { /* XXX why 5? */
646 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
647 if (sts & HIFN_7811_RNGSTS_UFL) {
648 printf("%s: RNG underflow: disabling\n",
649 device_xname(sc->sc_dv));
650 return;
651 }
652 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
653 break;
654
655 /*
656 * There are at least two words in the RNG FIFO
657 * at this point.
658 */
659 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
660 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
661 got = 2 * sizeof(num[0]);
662 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
663
664 #ifdef __NetBSD__
665 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
666 sc->sc_rng_need -= gotent;
667 #else
668 /*
669 * XXX This is a really bad idea.
670 * XXX Hifn estimate as little as 0.06
671 * XXX actual bits of entropy per output
672 * XXX register bit. How can we tell the
673 * XXX kernel RNG subsystem we're handing
674 * XXX it 64 "true" random bits, for any
675 * XXX sane value of "true"?
676 * XXX
677 * XXX The right thing to do here, if we
678 * XXX cannot supply an estimate ourselves,
679 * XXX would be to hash the bits locally.
680 */
681 add_true_randomness(num[0]);
682 add_true_randomness(num[1]);
683 #endif
684
685 }
686 } else {
687 int nwords = 0;
688
689 if (sc->sc_rng_need) {
690 nwords = (sc->sc_rng_need * NBBY) / HIFN_RNG_BITSPER;
691 nwords = MIN(__arraycount(num), nwords);
692 }
693
694 if (nwords < 2) {
695 nwords = 2;
696 }
697
698 /*
699 * We must be *extremely* careful here. The Hifn
700 * 795x differ from the published 6500 RNG design
701 * in more ways than the obvious lack of the output
702 * FIFO and LFSR control registers. In fact, there
703 * is only one LFSR, instead of the 6500's two, and
704 * it's 32 bits, not 31.
705 *
706 * Further, a block diagram obtained from Hifn shows
707 * a very curious latching of this register: the LFSR
708 * rotates at a frequency of RNG_Clk / 8, but the
709 * RNG_Data register is latched at a frequency of
710 * RNG_Clk, which means that it is possible for
711 * consecutive reads of the RNG_Data register to read
712 * identical state from the LFSR. The simplest
713 * workaround seems to be to read eight samples from
714 * the register for each one that we use. Since each
715 * read must require at least one PCI cycle, and
716 * RNG_Clk is at least PCI_Clk, this is safe.
717 */
718 for(i = 0 ; i < nwords * 8; i++)
719 {
720 volatile u_int32_t regtmp;
721 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA);
722 num[i / 8] = regtmp;
723 }
724
725 got = nwords * sizeof(num[0]);
726 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
727 #ifdef __NetBSD__
728 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
729 sc->sc_rng_need -= gotent;
730 #else
731 /* XXX a bad idea; see 7811 block above */
732 add_true_randomness(num[0]);
733 #endif
734 }
735
736 #ifdef __OpenBSD__
737 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
738 #else
739 if (sc->sc_rng_need > 0) {
740 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
741 }
742 #endif
743 }
744
745 static void
746 hifn_rng(void *vsc)
747 {
748 struct hifn_softc *sc = vsc;
749
750 mutex_spin_enter(&sc->sc_mtx);
751 hifn_rng_locked(vsc);
752 mutex_spin_exit(&sc->sc_mtx);
753 }
754
755 static void
756 hifn_puc_wait(struct hifn_softc *sc)
757 {
758 int i;
759
760 for (i = 5000; i > 0; i--) {
761 DELAY(1);
762 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
763 break;
764 }
765 if (!i)
766 printf("%s: proc unit did not reset\n", device_xname(sc->sc_dv));
767 }
768
769 /*
770 * Reset the processing unit.
771 */
772 static void
773 hifn_reset_puc(struct hifn_softc *sc)
774 {
775 /* Reset processing unit */
776 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
777 hifn_puc_wait(sc);
778 }
779
780 static void
781 hifn_set_retry(struct hifn_softc *sc)
782 {
783 u_int32_t r;
784
785 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
786 r &= 0xffff0000;
787 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
788 }
789
790 /*
791 * Resets the board. Values in the regesters are left as is
792 * from the reset (i.e. initial values are assigned elsewhere).
793 */
794 static void
795 hifn_reset_board(struct hifn_softc *sc, int full)
796 {
797 u_int32_t reg;
798
799 /*
800 * Set polling in the DMA configuration register to zero. 0x7 avoids
801 * resetting the board and zeros out the other fields.
802 */
803 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
804 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
805
806 /*
807 * Now that polling has been disabled, we have to wait 1 ms
808 * before resetting the board.
809 */
810 DELAY(1000);
811
812 /* Reset the DMA unit */
813 if (full) {
814 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
815 DELAY(1000);
816 } else {
817 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
818 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
819 hifn_reset_puc(sc);
820 }
821
822 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
823
824 /* Bring dma unit out of reset */
825 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
826 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
827
828 hifn_puc_wait(sc);
829
830 hifn_set_retry(sc);
831
832 if (sc->sc_flags & HIFN_IS_7811) {
833 for (reg = 0; reg < 1000; reg++) {
834 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
835 HIFN_MIPSRST_CRAMINIT)
836 break;
837 DELAY(1000);
838 }
839 if (reg == 1000)
840 printf(": cram init timeout\n");
841 }
842 }
843
844 static u_int32_t
845 hifn_next_signature(u_int32_t a, u_int cnt)
846 {
847 int i;
848 u_int32_t v;
849
850 for (i = 0; i < cnt; i++) {
851
852 /* get the parity */
853 v = a & 0x80080125;
854 v ^= v >> 16;
855 v ^= v >> 8;
856 v ^= v >> 4;
857 v ^= v >> 2;
858 v ^= v >> 1;
859
860 a = (v & 1) ^ (a << 1);
861 }
862
863 return a;
864 }
865
866 static struct pci2id {
867 u_short pci_vendor;
868 u_short pci_prod;
869 char card_id[13];
870 } const pci2id[] = {
871 {
872 PCI_VENDOR_HIFN,
873 PCI_PRODUCT_HIFN_7951,
874 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00, 0x00 }
876 }, {
877 PCI_VENDOR_HIFN,
878 PCI_PRODUCT_HIFN_7955,
879 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00, 0x00 }
881 }, {
882 PCI_VENDOR_HIFN,
883 PCI_PRODUCT_HIFN_7956,
884 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00, 0x00 }
886 }, {
887 PCI_VENDOR_NETSEC,
888 PCI_PRODUCT_NETSEC_7751,
889 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
890 0x00, 0x00, 0x00, 0x00, 0x00 }
891 }, {
892 PCI_VENDOR_INVERTEX,
893 PCI_PRODUCT_INVERTEX_AEON,
894 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
895 0x00, 0x00, 0x00, 0x00, 0x00 }
896 }, {
897 PCI_VENDOR_HIFN,
898 PCI_PRODUCT_HIFN_7811,
899 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
900 0x00, 0x00, 0x00, 0x00, 0x00 }
901 }, {
902 /*
903 * Other vendors share this PCI ID as well, such as
904 * http://www.powercrypt.com, and obviously they also
905 * use the same key.
906 */
907 PCI_VENDOR_HIFN,
908 PCI_PRODUCT_HIFN_7751,
909 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
910 0x00, 0x00, 0x00, 0x00, 0x00 }
911 },
912 };
913
914 /*
915 * Checks to see if crypto is already enabled. If crypto isn't enable,
916 * "hifn_enable_crypto" is called to enable it. The check is important,
917 * as enabling crypto twice will lock the board.
918 */
919 static const char *
920 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
921 {
922 u_int32_t dmacfg, ramcfg, encl, addr, i;
923 const char *offtbl = NULL;
924
925 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
926 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
927 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
928 offtbl = pci2id[i].card_id;
929 break;
930 }
931 }
932
933 if (offtbl == NULL) {
934 #ifdef HIFN_DEBUG
935 aprint_debug_dev(sc->sc_dv, "Unknown card!\n");
936 #endif
937 return (NULL);
938 }
939
940 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
941 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
942
943 /*
944 * The RAM config register's encrypt level bit needs to be set before
945 * every read performed on the encryption level register.
946 */
947 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
948
949 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
950
951 /*
952 * Make sure we don't re-unlock. Two unlocks kills chip until the
953 * next reboot.
954 */
955 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
956 #ifdef HIFN_DEBUG
957 aprint_debug_dev(sc->sc_dv, "Strong Crypto already enabled!\n");
958 #endif
959 goto report;
960 }
961
962 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
963 #ifdef HIFN_DEBUG
964 aprint_debug_dev(sc->sc_dv, "Unknown encryption level\n");
965 #endif
966 return (NULL);
967 }
968
969 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
970 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
971 DELAY(1000);
972 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
973 DELAY(1000);
974 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
975 DELAY(1000);
976
977 for (i = 0; i <= 12; i++) {
978 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
979 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
980
981 DELAY(1000);
982 }
983
984 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
985 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
986
987 #ifdef HIFN_DEBUG
988 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
989 aprint_debug("Encryption engine is permanently locked until next system reset.");
990 else
991 aprint_debug("Encryption engine enabled successfully!");
992 #endif
993
994 report:
995 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
996 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
997
998 switch (encl) {
999 case HIFN_PUSTAT_ENA_0:
1000 return ("LZS-only (no encr/auth)");
1001
1002 case HIFN_PUSTAT_ENA_1:
1003 return ("DES");
1004
1005 case HIFN_PUSTAT_ENA_2:
1006 if (sc->sc_flags & HIFN_HAS_AES)
1007 return ("3DES/AES");
1008 else
1009 return ("3DES");
1010
1011 default:
1012 return ("disabled");
1013 }
1014 /* NOTREACHED */
1015 }
1016
1017 /*
1018 * Give initial values to the registers listed in the "Register Space"
1019 * section of the HIFN Software Development reference manual.
1020 */
1021 static void
1022 hifn_init_pci_registers(struct hifn_softc *sc)
1023 {
1024 /* write fixed values needed by the Initialization registers */
1025 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1026 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1027 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1028
1029 /* write all 4 ring address registers */
1030 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1031 offsetof(struct hifn_dma, cmdr[0]));
1032 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1033 offsetof(struct hifn_dma, srcr[0]));
1034 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1035 offsetof(struct hifn_dma, dstr[0]));
1036 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1037 offsetof(struct hifn_dma, resr[0]));
1038
1039 DELAY(2000);
1040
1041 /* write status register */
1042 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1043 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1044 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1045 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1046 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1047 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1048 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1049 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1050 HIFN_DMACSR_S_WAIT |
1051 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1052 HIFN_DMACSR_C_WAIT |
1053 HIFN_DMACSR_ENGINE |
1054 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1055 HIFN_DMACSR_PUBDONE : 0) |
1056 ((sc->sc_flags & HIFN_IS_7811) ?
1057 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1058
1059 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1060 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1061 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1062 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1063 HIFN_DMAIER_ENGINE |
1064 ((sc->sc_flags & HIFN_IS_7811) ?
1065 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1066 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1067 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1068 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
1069
1070 if (sc->sc_flags & HIFN_IS_7956) {
1071 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1072 HIFN_PUCNFG_TCALLPHASES |
1073 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1074 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
1075 } else {
1076 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1077 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1078 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1079 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1080 }
1081
1082 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1083 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1084 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1085 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1086 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1087 }
1088
1089 /*
1090 * The maximum number of sessions supported by the card
1091 * is dependent on the amount of context ram, which
1092 * encryption algorithms are enabled, and how compression
1093 * is configured. This should be configured before this
1094 * routine is called.
1095 */
1096 static void
1097 hifn_sessions(struct hifn_softc *sc)
1098 {
1099 u_int32_t pucnfg;
1100 int ctxsize;
1101
1102 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1103
1104 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1105 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1106 ctxsize = 128;
1107 else
1108 ctxsize = 512;
1109 /*
1110 * 7955/7956 has internal context memory of 32K
1111 */
1112 if (sc->sc_flags & HIFN_IS_7956)
1113 sc->sc_maxses = 32768 / ctxsize;
1114 else
1115 sc->sc_maxses = 1 +
1116 ((sc->sc_ramsize - 32768) / ctxsize);
1117 }
1118 else
1119 sc->sc_maxses = sc->sc_ramsize / 16384;
1120
1121 if (sc->sc_maxses > 2048)
1122 sc->sc_maxses = 2048;
1123 }
1124
1125 /*
1126 * Determine ram type (sram or dram). Board should be just out of a reset
1127 * state when this is called.
1128 */
1129 static int
1130 hifn_ramtype(struct hifn_softc *sc)
1131 {
1132 u_int8_t data[8], dataexpect[8];
1133 int i;
1134
1135 for (i = 0; i < sizeof(data); i++)
1136 data[i] = dataexpect[i] = 0x55;
1137 if (hifn_writeramaddr(sc, 0, data))
1138 return (-1);
1139 if (hifn_readramaddr(sc, 0, data))
1140 return (-1);
1141 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1142 sc->sc_drammodel = 1;
1143 return (0);
1144 }
1145
1146 for (i = 0; i < sizeof(data); i++)
1147 data[i] = dataexpect[i] = 0xaa;
1148 if (hifn_writeramaddr(sc, 0, data))
1149 return (-1);
1150 if (hifn_readramaddr(sc, 0, data))
1151 return (-1);
1152 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1153 sc->sc_drammodel = 1;
1154 return (0);
1155 }
1156
1157 return (0);
1158 }
1159
1160 #define HIFN_SRAM_MAX (32 << 20)
1161 #define HIFN_SRAM_STEP_SIZE 16384
1162 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1163
1164 static int
1165 hifn_sramsize(struct hifn_softc *sc)
1166 {
1167 u_int32_t a;
1168 u_int8_t data[8];
1169 u_int8_t dataexpect[sizeof(data)];
1170 int32_t i;
1171
1172 for (i = 0; i < sizeof(data); i++)
1173 data[i] = dataexpect[i] = i ^ 0x5a;
1174
1175 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1176 a = i * HIFN_SRAM_STEP_SIZE;
1177 memcpy(data, &i, sizeof(i));
1178 hifn_writeramaddr(sc, a, data);
1179 }
1180
1181 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1182 a = i * HIFN_SRAM_STEP_SIZE;
1183 memcpy(dataexpect, &i, sizeof(i));
1184 if (hifn_readramaddr(sc, a, data) < 0)
1185 return (0);
1186 if (memcmp(data, dataexpect, sizeof(data)) != 0)
1187 return (0);
1188 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1189 }
1190
1191 return (0);
1192 }
1193
1194 /*
1195 * XXX For dram boards, one should really try all of the
1196 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1197 * is already set up correctly.
1198 */
1199 static int
1200 hifn_dramsize(struct hifn_softc *sc)
1201 {
1202 u_int32_t cnfg;
1203
1204 if (sc->sc_flags & HIFN_IS_7956) {
1205 /*
1206 * 7955/7956 have a fixed internal ram of only 32K.
1207 */
1208 sc->sc_ramsize = 32768;
1209 } else {
1210 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1211 HIFN_PUCNFG_DRAMMASK;
1212 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1213 }
1214 return (0);
1215 }
1216
1217 static void
1218 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1219 int *resp)
1220 {
1221 struct hifn_dma *dma = sc->sc_dma;
1222
1223 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1224 dma->cmdi = 0;
1225 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1226 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1227 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1228 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1229 }
1230 *cmdp = dma->cmdi++;
1231 dma->cmdk = dma->cmdi;
1232
1233 if (dma->srci == HIFN_D_SRC_RSIZE) {
1234 dma->srci = 0;
1235 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1236 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1237 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1238 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1239 }
1240 *srcp = dma->srci++;
1241 dma->srck = dma->srci;
1242
1243 if (dma->dsti == HIFN_D_DST_RSIZE) {
1244 dma->dsti = 0;
1245 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1246 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1247 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1248 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1249 }
1250 *dstp = dma->dsti++;
1251 dma->dstk = dma->dsti;
1252
1253 if (dma->resi == HIFN_D_RES_RSIZE) {
1254 dma->resi = 0;
1255 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1256 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1257 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1258 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1259 }
1260 *resp = dma->resi++;
1261 dma->resk = dma->resi;
1262 }
1263
1264 static int
1265 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1266 {
1267 struct hifn_dma *dma = sc->sc_dma;
1268 struct hifn_base_command wc;
1269 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1270 int r, cmdi, resi, srci, dsti;
1271
1272 wc.masks = htole16(3 << 13);
1273 wc.session_num = htole16(addr >> 14);
1274 wc.total_source_count = htole16(8);
1275 wc.total_dest_count = htole16(addr & 0x3fff);
1276
1277 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1278
1279 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1280 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1281 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1282
1283 /* build write command */
1284 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1285 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1286 memcpy(&dma->test_src, data, sizeof(dma->test_src));
1287
1288 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1289 + offsetof(struct hifn_dma, test_src));
1290 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1291 + offsetof(struct hifn_dma, test_dst));
1292
1293 dma->cmdr[cmdi].l = htole32(16 | masks);
1294 dma->srcr[srci].l = htole32(8 | masks);
1295 dma->dstr[dsti].l = htole32(4 | masks);
1296 dma->resr[resi].l = htole32(4 | masks);
1297
1298 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1299 0, sc->sc_dmamap->dm_mapsize,
1300 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1301
1302 for (r = 10000; r >= 0; r--) {
1303 DELAY(10);
1304 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1305 0, sc->sc_dmamap->dm_mapsize,
1306 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1307 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1308 break;
1309 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1310 0, sc->sc_dmamap->dm_mapsize,
1311 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1312 }
1313 if (r == 0) {
1314 printf("%s: writeramaddr -- "
1315 "result[%d](addr %d) still valid\n",
1316 device_xname(sc->sc_dv), resi, addr);
1317 r = -1;
1318 return (-1);
1319 } else
1320 r = 0;
1321
1322 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1323 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1324 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1325
1326 return (r);
1327 }
1328
1329 static int
1330 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1331 {
1332 struct hifn_dma *dma = sc->sc_dma;
1333 struct hifn_base_command rc;
1334 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1335 int r, cmdi, srci, dsti, resi;
1336
1337 rc.masks = htole16(2 << 13);
1338 rc.session_num = htole16(addr >> 14);
1339 rc.total_source_count = htole16(addr & 0x3fff);
1340 rc.total_dest_count = htole16(8);
1341
1342 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1343
1344 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1345 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1346 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1347
1348 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1349 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1350
1351 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1352 offsetof(struct hifn_dma, test_src));
1353 dma->test_src = 0;
1354 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1355 offsetof(struct hifn_dma, test_dst));
1356 dma->test_dst = 0;
1357 dma->cmdr[cmdi].l = htole32(8 | masks);
1358 dma->srcr[srci].l = htole32(8 | masks);
1359 dma->dstr[dsti].l = htole32(8 | masks);
1360 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1361
1362 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1363 0, sc->sc_dmamap->dm_mapsize,
1364 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1365
1366 for (r = 10000; r >= 0; r--) {
1367 DELAY(10);
1368 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1369 0, sc->sc_dmamap->dm_mapsize,
1370 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1371 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1372 break;
1373 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1374 0, sc->sc_dmamap->dm_mapsize,
1375 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1376 }
1377 if (r == 0) {
1378 printf("%s: readramaddr -- "
1379 "result[%d](addr %d) still valid\n",
1380 device_xname(sc->sc_dv), resi, addr);
1381 r = -1;
1382 } else {
1383 r = 0;
1384 memcpy(data, &dma->test_dst, sizeof(dma->test_dst));
1385 }
1386
1387 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1388 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1389 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1390
1391 return (r);
1392 }
1393
1394 /*
1395 * Initialize the descriptor rings.
1396 */
1397 static void
1398 hifn_init_dma(struct hifn_softc *sc)
1399 {
1400 struct hifn_dma *dma = sc->sc_dma;
1401 int i;
1402
1403 hifn_set_retry(sc);
1404
1405 /* initialize static pointer values */
1406 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1407 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1408 offsetof(struct hifn_dma, command_bufs[i][0]));
1409 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1410 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1411 offsetof(struct hifn_dma, result_bufs[i][0]));
1412
1413 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1414 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1415 offsetof(struct hifn_dma, cmdr[0]));
1416 dma->srcr[HIFN_D_SRC_RSIZE].p =
1417 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1418 offsetof(struct hifn_dma, srcr[0]));
1419 dma->dstr[HIFN_D_DST_RSIZE].p =
1420 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1421 offsetof(struct hifn_dma, dstr[0]));
1422 dma->resr[HIFN_D_RES_RSIZE].p =
1423 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1424 offsetof(struct hifn_dma, resr[0]));
1425
1426 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1427 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1428 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1429 }
1430
1431 /*
1432 * Writes out the raw command buffer space. Returns the
1433 * command buffer size.
1434 */
1435 static u_int
1436 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1437 {
1438 u_int8_t *buf_pos;
1439 struct hifn_base_command *base_cmd;
1440 struct hifn_mac_command *mac_cmd;
1441 struct hifn_crypt_command *cry_cmd;
1442 struct hifn_comp_command *comp_cmd;
1443 int using_mac, using_crypt, using_comp, len, ivlen;
1444 u_int32_t dlen, slen;
1445
1446 buf_pos = buf;
1447 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1448 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1449 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1450
1451 base_cmd = (struct hifn_base_command *)buf_pos;
1452 base_cmd->masks = htole16(cmd->base_masks);
1453 slen = cmd->src_map->dm_mapsize;
1454 if (cmd->sloplen)
1455 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1456 sizeof(u_int32_t);
1457 else
1458 dlen = cmd->dst_map->dm_mapsize;
1459 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1460 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1461 dlen >>= 16;
1462 slen >>= 16;
1463 base_cmd->session_num = htole16(cmd->session_num |
1464 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1465 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1466 buf_pos += sizeof(struct hifn_base_command);
1467
1468 if (using_comp) {
1469 comp_cmd = (struct hifn_comp_command *)buf_pos;
1470 dlen = cmd->compcrd->crd_len;
1471 comp_cmd->source_count = htole16(dlen & 0xffff);
1472 dlen >>= 16;
1473 comp_cmd->masks = htole16(cmd->comp_masks |
1474 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1475 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1476 comp_cmd->reserved = 0;
1477 buf_pos += sizeof(struct hifn_comp_command);
1478 }
1479
1480 if (using_mac) {
1481 mac_cmd = (struct hifn_mac_command *)buf_pos;
1482 dlen = cmd->maccrd->crd_len;
1483 mac_cmd->source_count = htole16(dlen & 0xffff);
1484 dlen >>= 16;
1485 mac_cmd->masks = htole16(cmd->mac_masks |
1486 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1487 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1488 mac_cmd->reserved = 0;
1489 buf_pos += sizeof(struct hifn_mac_command);
1490 }
1491
1492 if (using_crypt) {
1493 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1494 dlen = cmd->enccrd->crd_len;
1495 cry_cmd->source_count = htole16(dlen & 0xffff);
1496 dlen >>= 16;
1497 cry_cmd->masks = htole16(cmd->cry_masks |
1498 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1499 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1500 cry_cmd->reserved = 0;
1501 buf_pos += sizeof(struct hifn_crypt_command);
1502 }
1503
1504 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1505 memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH);
1506 buf_pos += HIFN_MAC_KEY_LENGTH;
1507 }
1508
1509 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1510 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1511 case HIFN_CRYPT_CMD_ALG_3DES:
1512 memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH);
1513 buf_pos += HIFN_3DES_KEY_LENGTH;
1514 break;
1515 case HIFN_CRYPT_CMD_ALG_DES:
1516 memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH);
1517 buf_pos += HIFN_DES_KEY_LENGTH;
1518 break;
1519 case HIFN_CRYPT_CMD_ALG_RC4:
1520 len = 256;
1521 do {
1522 int clen;
1523
1524 clen = MIN(cmd->cklen, len);
1525 memcpy(buf_pos, cmd->ck, clen);
1526 len -= clen;
1527 buf_pos += clen;
1528 } while (len > 0);
1529 memset(buf_pos, 0, 4);
1530 buf_pos += 4;
1531 break;
1532 case HIFN_CRYPT_CMD_ALG_AES:
1533 /*
1534 * AES keys are variable 128, 192 and
1535 * 256 bits (16, 24 and 32 bytes).
1536 */
1537 memcpy(buf_pos, cmd->ck, cmd->cklen);
1538 buf_pos += cmd->cklen;
1539 break;
1540 }
1541 }
1542
1543 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1544 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1545 case HIFN_CRYPT_CMD_ALG_AES:
1546 ivlen = HIFN_AES_IV_LENGTH;
1547 break;
1548 default:
1549 ivlen = HIFN_IV_LENGTH;
1550 break;
1551 }
1552 memcpy(buf_pos, cmd->iv, ivlen);
1553 buf_pos += ivlen;
1554 }
1555
1556 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1557 HIFN_BASE_CMD_COMP)) == 0) {
1558 memset(buf_pos, 0, 8);
1559 buf_pos += 8;
1560 }
1561
1562 return (buf_pos - buf);
1563 }
1564
1565 static int
1566 hifn_dmamap_aligned(bus_dmamap_t map)
1567 {
1568 int i;
1569
1570 for (i = 0; i < map->dm_nsegs; i++) {
1571 if (map->dm_segs[i].ds_addr & 3)
1572 return (0);
1573 if ((i != (map->dm_nsegs - 1)) &&
1574 (map->dm_segs[i].ds_len & 3))
1575 return (0);
1576 }
1577 return (1);
1578 }
1579
1580 static int
1581 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1582 {
1583 struct hifn_dma *dma = sc->sc_dma;
1584 bus_dmamap_t map = cmd->dst_map;
1585 u_int32_t p, l;
1586 int idx, used = 0, i;
1587
1588 idx = dma->dsti;
1589 for (i = 0; i < map->dm_nsegs - 1; i++) {
1590 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1591 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1592 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1593 HIFN_DSTR_SYNC(sc, idx,
1594 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1595 used++;
1596
1597 if (++idx == HIFN_D_DST_RSIZE) {
1598 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1599 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1600 HIFN_DSTR_SYNC(sc, idx,
1601 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1602 idx = 0;
1603 }
1604 }
1605
1606 if (cmd->sloplen == 0) {
1607 p = map->dm_segs[i].ds_addr;
1608 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1609 map->dm_segs[i].ds_len;
1610 } else {
1611 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1612 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1613 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1614 sizeof(u_int32_t);
1615
1616 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1617 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1618 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1619 HIFN_D_MASKDONEIRQ |
1620 (map->dm_segs[i].ds_len - cmd->sloplen));
1621 HIFN_DSTR_SYNC(sc, idx,
1622 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1623 used++;
1624
1625 if (++idx == HIFN_D_DST_RSIZE) {
1626 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1627 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1628 HIFN_DSTR_SYNC(sc, idx,
1629 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1630 idx = 0;
1631 }
1632 }
1633 }
1634 dma->dstr[idx].p = htole32(p);
1635 dma->dstr[idx].l = htole32(l);
1636 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1637 used++;
1638
1639 if (++idx == HIFN_D_DST_RSIZE) {
1640 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1641 HIFN_D_MASKDONEIRQ);
1642 HIFN_DSTR_SYNC(sc, idx,
1643 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1644 idx = 0;
1645 }
1646
1647 dma->dsti = idx;
1648 dma->dstu += used;
1649 return (idx);
1650 }
1651
1652 static int
1653 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1654 {
1655 struct hifn_dma *dma = sc->sc_dma;
1656 bus_dmamap_t map = cmd->src_map;
1657 int idx, i;
1658 u_int32_t last = 0;
1659
1660 idx = dma->srci;
1661 for (i = 0; i < map->dm_nsegs; i++) {
1662 if (i == map->dm_nsegs - 1)
1663 last = HIFN_D_LAST;
1664
1665 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1666 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1667 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1668 HIFN_SRCR_SYNC(sc, idx,
1669 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1670
1671 if (++idx == HIFN_D_SRC_RSIZE) {
1672 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1673 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1674 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1675 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1676 idx = 0;
1677 }
1678 }
1679 dma->srci = idx;
1680 dma->srcu += map->dm_nsegs;
1681 return (idx);
1682 }
1683
1684 static int
1685 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1686 struct cryptop *crp, int hint)
1687 {
1688 struct hifn_dma *dma = sc->sc_dma;
1689 u_int32_t cmdlen;
1690 int cmdi, resi, err = 0;
1691
1692 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1693 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1694 return (ENOMEM);
1695
1696 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1697 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1698 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1699 err = ENOMEM;
1700 goto err_srcmap1;
1701 }
1702 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1703 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1704 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1705 err = ENOMEM;
1706 goto err_srcmap1;
1707 }
1708 } else {
1709 err = EINVAL;
1710 goto err_srcmap1;
1711 }
1712
1713 if (hifn_dmamap_aligned(cmd->src_map)) {
1714 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1715 if (crp->crp_flags & CRYPTO_F_IOV)
1716 cmd->dstu.dst_io = cmd->srcu.src_io;
1717 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1718 cmd->dstu.dst_m = cmd->srcu.src_m;
1719 cmd->dst_map = cmd->src_map;
1720 } else {
1721 if (crp->crp_flags & CRYPTO_F_IOV) {
1722 err = EINVAL;
1723 goto err_srcmap;
1724 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1725 int totlen, len;
1726 struct mbuf *m, *m0, *mlast;
1727
1728 totlen = cmd->src_map->dm_mapsize;
1729 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1730 len = MHLEN;
1731 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1732 } else {
1733 len = MLEN;
1734 MGET(m0, M_DONTWAIT, MT_DATA);
1735 }
1736 if (m0 == NULL) {
1737 err = ENOMEM;
1738 goto err_srcmap;
1739 }
1740 if (len == MHLEN)
1741 M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1742 if (totlen >= MINCLSIZE) {
1743 MCLGET(m0, M_DONTWAIT);
1744 if (m0->m_flags & M_EXT)
1745 len = MCLBYTES;
1746 }
1747 totlen -= len;
1748 m0->m_pkthdr.len = m0->m_len = len;
1749 mlast = m0;
1750
1751 while (totlen > 0) {
1752 MGET(m, M_DONTWAIT, MT_DATA);
1753 if (m == NULL) {
1754 err = ENOMEM;
1755 m_freem(m0);
1756 goto err_srcmap;
1757 }
1758 len = MLEN;
1759 if (totlen >= MINCLSIZE) {
1760 MCLGET(m, M_DONTWAIT);
1761 if (m->m_flags & M_EXT)
1762 len = MCLBYTES;
1763 }
1764
1765 m->m_len = len;
1766 if (m0->m_flags & M_PKTHDR)
1767 m0->m_pkthdr.len += len;
1768 totlen -= len;
1769
1770 mlast->m_next = m;
1771 mlast = m;
1772 }
1773 cmd->dstu.dst_m = m0;
1774 }
1775 }
1776
1777 if (cmd->dst_map == NULL) {
1778 if (bus_dmamap_create(sc->sc_dmat,
1779 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1780 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1781 err = ENOMEM;
1782 goto err_srcmap;
1783 }
1784 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1785 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1786 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1787 err = ENOMEM;
1788 goto err_dstmap1;
1789 }
1790 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1791 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1792 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1793 err = ENOMEM;
1794 goto err_dstmap1;
1795 }
1796 }
1797 }
1798
1799 #ifdef HIFN_DEBUG
1800 if (hifn_debug)
1801 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1802 device_xname(sc->sc_dv),
1803 READ_REG_1(sc, HIFN_1_DMA_CSR),
1804 READ_REG_1(sc, HIFN_1_DMA_IER),
1805 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1806 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1807 #endif
1808
1809 if (cmd->src_map == cmd->dst_map)
1810 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1811 0, cmd->src_map->dm_mapsize,
1812 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1813 else {
1814 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1815 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1816 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1817 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1818 }
1819
1820 /*
1821 * need 1 cmd, and 1 res
1822 * need N src, and N dst
1823 */
1824 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1825 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1826 err = ENOMEM;
1827 goto err_dstmap;
1828 }
1829 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1830 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1831 err = ENOMEM;
1832 goto err_dstmap;
1833 }
1834
1835 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1836 dma->cmdi = 0;
1837 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1838 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1839 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1840 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1841 }
1842 cmdi = dma->cmdi++;
1843 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1844 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1845
1846 /* .p for command/result already set */
1847 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1848 HIFN_D_MASKDONEIRQ);
1849 HIFN_CMDR_SYNC(sc, cmdi,
1850 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1851 dma->cmdu++;
1852 if (sc->sc_c_busy == 0) {
1853 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1854 sc->sc_c_busy = 1;
1855 SET_LED(sc, HIFN_MIPSRST_LED0);
1856 }
1857
1858 /*
1859 * We don't worry about missing an interrupt (which a "command wait"
1860 * interrupt salvages us from), unless there is more than one command
1861 * in the queue.
1862 *
1863 * XXX We do seem to miss some interrupts. So we always enable
1864 * XXX command wait. From OpenBSD revision 1.149.
1865 *
1866 */
1867 #if 0
1868 if (dma->cmdu > 1) {
1869 #endif
1870 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1871 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1872 #if 0
1873 }
1874 #endif
1875
1876 hifnstats.hst_ipackets++;
1877 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1878
1879 hifn_dmamap_load_src(sc, cmd);
1880 if (sc->sc_s_busy == 0) {
1881 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1882 sc->sc_s_busy = 1;
1883 SET_LED(sc, HIFN_MIPSRST_LED1);
1884 }
1885
1886 /*
1887 * Unlike other descriptors, we don't mask done interrupt from
1888 * result descriptor.
1889 */
1890 #ifdef HIFN_DEBUG
1891 if (hifn_debug)
1892 printf("load res\n");
1893 #endif
1894 if (dma->resi == HIFN_D_RES_RSIZE) {
1895 dma->resi = 0;
1896 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1897 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1898 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1899 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1900 }
1901 resi = dma->resi++;
1902 dma->hifn_commands[resi] = cmd;
1903 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1904 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1905 HIFN_D_VALID | HIFN_D_LAST);
1906 HIFN_RESR_SYNC(sc, resi,
1907 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1908 dma->resu++;
1909 if (sc->sc_r_busy == 0) {
1910 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1911 sc->sc_r_busy = 1;
1912 SET_LED(sc, HIFN_MIPSRST_LED2);
1913 }
1914
1915 if (cmd->sloplen)
1916 cmd->slopidx = resi;
1917
1918 hifn_dmamap_load_dst(sc, cmd);
1919
1920 if (sc->sc_d_busy == 0) {
1921 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1922 sc->sc_d_busy = 1;
1923 }
1924
1925 #ifdef HIFN_DEBUG
1926 if (hifn_debug)
1927 printf("%s: command: stat %8x ier %8x\n",
1928 device_xname(sc->sc_dv),
1929 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1930 #endif
1931
1932 sc->sc_active = 5;
1933 return (err); /* success */
1934
1935 err_dstmap:
1936 if (cmd->src_map != cmd->dst_map)
1937 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1938 err_dstmap1:
1939 if (cmd->src_map != cmd->dst_map)
1940 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1941 err_srcmap:
1942 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1943 cmd->srcu.src_m != cmd->dstu.dst_m)
1944 m_freem(cmd->dstu.dst_m);
1945 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1946 err_srcmap1:
1947 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1948 return (err);
1949 }
1950
1951 static void
1952 hifn_tick(void *vsc)
1953 {
1954 struct hifn_softc *sc = vsc;
1955
1956 mutex_spin_enter(&sc->sc_mtx);
1957 if (sc->sc_active == 0) {
1958 struct hifn_dma *dma = sc->sc_dma;
1959 u_int32_t r = 0;
1960
1961 if (dma->cmdu == 0 && sc->sc_c_busy) {
1962 sc->sc_c_busy = 0;
1963 r |= HIFN_DMACSR_C_CTRL_DIS;
1964 CLR_LED(sc, HIFN_MIPSRST_LED0);
1965 }
1966 if (dma->srcu == 0 && sc->sc_s_busy) {
1967 sc->sc_s_busy = 0;
1968 r |= HIFN_DMACSR_S_CTRL_DIS;
1969 CLR_LED(sc, HIFN_MIPSRST_LED1);
1970 }
1971 if (dma->dstu == 0 && sc->sc_d_busy) {
1972 sc->sc_d_busy = 0;
1973 r |= HIFN_DMACSR_D_CTRL_DIS;
1974 }
1975 if (dma->resu == 0 && sc->sc_r_busy) {
1976 sc->sc_r_busy = 0;
1977 r |= HIFN_DMACSR_R_CTRL_DIS;
1978 CLR_LED(sc, HIFN_MIPSRST_LED2);
1979 }
1980 if (r)
1981 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1982 }
1983 else
1984 sc->sc_active--;
1985 #ifdef __OpenBSD__
1986 timeout_add(&sc->sc_tickto, hz);
1987 #else
1988 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1989 #endif
1990 mutex_spin_exit(&sc->sc_mtx);
1991 }
1992
1993 static int
1994 hifn_intr(void *arg)
1995 {
1996 struct hifn_softc *sc = arg;
1997 struct hifn_dma *dma = sc->sc_dma;
1998 u_int32_t dmacsr, restart;
1999 int i, u;
2000
2001 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2002
2003 #ifdef HIFN_DEBUG
2004 if (hifn_debug)
2005 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
2006 device_xname(sc->sc_dv),
2007 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
2008 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2009 #endif
2010
2011 mutex_spin_enter(&sc->sc_mtx);
2012
2013 /* Nothing in the DMA unit interrupted */
2014 if ((dmacsr & sc->sc_dmaier) == 0) {
2015 mutex_spin_exit(&sc->sc_mtx);
2016 return (0);
2017 }
2018
2019 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2020
2021 if (dmacsr & HIFN_DMACSR_ENGINE)
2022 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
2023
2024 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2025 (dmacsr & HIFN_DMACSR_PUBDONE))
2026 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2027 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2028
2029 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
2030 if (restart)
2031 printf("%s: overrun %x\n", device_xname(sc->sc_dv), dmacsr);
2032
2033 if (sc->sc_flags & HIFN_IS_7811) {
2034 if (dmacsr & HIFN_DMACSR_ILLR)
2035 printf("%s: illegal read\n", device_xname(sc->sc_dv));
2036 if (dmacsr & HIFN_DMACSR_ILLW)
2037 printf("%s: illegal write\n", device_xname(sc->sc_dv));
2038 }
2039
2040 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2041 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2042 if (restart) {
2043 printf("%s: abort, resetting.\n", device_xname(sc->sc_dv));
2044 hifnstats.hst_abort++;
2045 hifn_abort(sc);
2046 goto out;
2047 }
2048
2049 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
2050 /*
2051 * If no slots to process and we receive a "waiting on
2052 * command" interrupt, we disable the "waiting on command"
2053 * (by clearing it).
2054 */
2055 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2056 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2057 }
2058
2059 /* clear the rings */
2060 i = dma->resk;
2061 while (dma->resu != 0) {
2062 HIFN_RESR_SYNC(sc, i,
2063 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2064 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2065 HIFN_RESR_SYNC(sc, i,
2066 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2067 break;
2068 }
2069
2070 if (i != HIFN_D_RES_RSIZE) {
2071 struct hifn_command *cmd;
2072
2073 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2074 cmd = dma->hifn_commands[i];
2075 KASSERT(cmd != NULL
2076 /*("hifn_intr: null command slot %u", i)*/);
2077 dma->hifn_commands[i] = NULL;
2078
2079 hifn_callback(sc, cmd, dma->result_bufs[i]);
2080 hifnstats.hst_opackets++;
2081 }
2082
2083 if (++i == (HIFN_D_RES_RSIZE + 1))
2084 i = 0;
2085 else
2086 dma->resu--;
2087 }
2088 dma->resk = i;
2089
2090 i = dma->srck; u = dma->srcu;
2091 while (u != 0) {
2092 HIFN_SRCR_SYNC(sc, i,
2093 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2094 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2095 HIFN_SRCR_SYNC(sc, i,
2096 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2097 break;
2098 }
2099 if (++i == (HIFN_D_SRC_RSIZE + 1))
2100 i = 0;
2101 else
2102 u--;
2103 }
2104 dma->srck = i; dma->srcu = u;
2105
2106 i = dma->cmdk; u = dma->cmdu;
2107 while (u != 0) {
2108 HIFN_CMDR_SYNC(sc, i,
2109 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2110 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2111 HIFN_CMDR_SYNC(sc, i,
2112 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2113 break;
2114 }
2115 if (i != HIFN_D_CMD_RSIZE) {
2116 u--;
2117 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2118 }
2119 if (++i == (HIFN_D_CMD_RSIZE + 1))
2120 i = 0;
2121 }
2122 dma->cmdk = i; dma->cmdu = u;
2123
2124 out:
2125 mutex_spin_exit(&sc->sc_mtx);
2126 return (1);
2127 }
2128
2129 /*
2130 * Allocate a new 'session' and return an encoded session id. 'sidp'
2131 * contains our registration id, and should contain an encoded session
2132 * id on successful allocation.
2133 */
2134 static int
2135 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2136 {
2137 struct cryptoini *c;
2138 struct hifn_softc *sc = arg;
2139 int i, mac = 0, cry = 0, comp = 0, retval = EINVAL;
2140
2141 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2142 if (sidp == NULL || cri == NULL || sc == NULL)
2143 return retval;
2144
2145 mutex_spin_enter(&sc->sc_mtx);
2146
2147 for (i = 0; i < sc->sc_maxses; i++)
2148 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2149 break;
2150 if (i == sc->sc_maxses) {
2151 retval = ENOMEM;
2152 goto out;
2153 }
2154
2155 for (c = cri; c != NULL; c = c->cri_next) {
2156 switch (c->cri_alg) {
2157 case CRYPTO_MD5:
2158 case CRYPTO_SHA1:
2159 case CRYPTO_MD5_HMAC_96:
2160 case CRYPTO_SHA1_HMAC_96:
2161 if (mac) {
2162 goto out;
2163 }
2164 mac = 1;
2165 break;
2166 case CRYPTO_DES_CBC:
2167 case CRYPTO_3DES_CBC:
2168 case CRYPTO_AES_CBC:
2169 /* Note that this is an initialization
2170 vector, not a cipher key; any function
2171 giving sufficient Hamming distance
2172 between outputs is fine. Use of RC4
2173 to generate IVs has been FIPS140-2
2174 certified by several labs. */
2175 #ifdef __NetBSD__
2176 cprng_fast(sc->sc_sessions[i].hs_iv,
2177 c->cri_alg == CRYPTO_AES_CBC ?
2178 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2179 #else /* FreeBSD and OpenBSD have get_random_bytes */
2180 /* XXX this may read fewer, does it matter? */
2181 get_random_bytes(sc->sc_sessions[i].hs_iv,
2182 c->cri_alg == CRYPTO_AES_CBC ?
2183 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2184 #endif
2185 /*FALLTHROUGH*/
2186 case CRYPTO_ARC4:
2187 if (cry) {
2188 goto out;
2189 }
2190 cry = 1;
2191 break;
2192 #ifdef HAVE_CRYPTO_LZS
2193 case CRYPTO_LZS_COMP:
2194 if (comp) {
2195 goto out;
2196 }
2197 comp = 1;
2198 break;
2199 #endif
2200 default:
2201 goto out;
2202 }
2203 }
2204 if (mac == 0 && cry == 0 && comp == 0) {
2205 goto out;
2206 }
2207
2208 /*
2209 * XXX only want to support compression without chaining to
2210 * MAC/crypt engine right now
2211 */
2212 if ((comp && mac) || (comp && cry)) {
2213 goto out;
2214 }
2215
2216 *sidp = HIFN_SID(device_unit(sc->sc_dv), i);
2217 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2218
2219 retval = 0;
2220 out:
2221 mutex_spin_exit(&sc->sc_mtx);
2222 return retval;
2223 }
2224
2225 /*
2226 * Deallocate a session.
2227 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2228 * XXX to blow away any keys already stored there.
2229 */
2230 static int
2231 hifn_freesession(void *arg, u_int64_t tid)
2232 {
2233 struct hifn_softc *sc = arg;
2234 int session;
2235 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2236
2237 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2238 if (sc == NULL)
2239 return (EINVAL);
2240
2241 mutex_spin_enter(&sc->sc_mtx);
2242 session = HIFN_SESSION(sid);
2243 if (session >= sc->sc_maxses) {
2244 mutex_spin_exit(&sc->sc_mtx);
2245 return (EINVAL);
2246 }
2247
2248 memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session]));
2249 mutex_spin_exit(&sc->sc_mtx);
2250 return (0);
2251 }
2252
2253 static int
2254 hifn_process(void *arg, struct cryptop *crp, int hint)
2255 {
2256 struct hifn_softc *sc = arg;
2257 struct hifn_command *cmd = NULL;
2258 int session, err, ivlen;
2259 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2260
2261 if (crp == NULL || crp->crp_callback == NULL) {
2262 hifnstats.hst_invalid++;
2263 return (EINVAL);
2264 }
2265
2266 mutex_spin_enter(&sc->sc_mtx);
2267 session = HIFN_SESSION(crp->crp_sid);
2268
2269 if (sc == NULL || session >= sc->sc_maxses) {
2270 err = EINVAL;
2271 goto errout;
2272 }
2273
2274 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2275 M_DEVBUF, M_NOWAIT|M_ZERO);
2276 if (cmd == NULL) {
2277 hifnstats.hst_nomem++;
2278 err = ENOMEM;
2279 goto errout;
2280 }
2281
2282 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2283 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2284 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2285 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2286 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2287 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2288 } else {
2289 err = EINVAL;
2290 goto errout; /* XXX we don't handle contiguous buffers! */
2291 }
2292
2293 crd1 = crp->crp_desc;
2294 if (crd1 == NULL) {
2295 err = EINVAL;
2296 goto errout;
2297 }
2298 crd2 = crd1->crd_next;
2299
2300 if (crd2 == NULL) {
2301 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2302 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2303 crd1->crd_alg == CRYPTO_SHA1 ||
2304 crd1->crd_alg == CRYPTO_MD5) {
2305 maccrd = crd1;
2306 enccrd = NULL;
2307 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2308 crd1->crd_alg == CRYPTO_3DES_CBC ||
2309 crd1->crd_alg == CRYPTO_AES_CBC ||
2310 crd1->crd_alg == CRYPTO_ARC4) {
2311 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2312 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2313 maccrd = NULL;
2314 enccrd = crd1;
2315 #ifdef HAVE_CRYPTO_LZS
2316 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2317 return (hifn_compression(sc, crp, cmd));
2318 #endif
2319 } else {
2320 err = EINVAL;
2321 goto errout;
2322 }
2323 } else {
2324 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2325 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2326 crd1->crd_alg == CRYPTO_MD5 ||
2327 crd1->crd_alg == CRYPTO_SHA1) &&
2328 (crd2->crd_alg == CRYPTO_DES_CBC ||
2329 crd2->crd_alg == CRYPTO_3DES_CBC ||
2330 crd2->crd_alg == CRYPTO_AES_CBC ||
2331 crd2->crd_alg == CRYPTO_ARC4) &&
2332 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2333 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2334 maccrd = crd1;
2335 enccrd = crd2;
2336 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2337 crd1->crd_alg == CRYPTO_ARC4 ||
2338 crd1->crd_alg == CRYPTO_3DES_CBC ||
2339 crd1->crd_alg == CRYPTO_AES_CBC) &&
2340 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 ||
2341 crd2->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2342 crd2->crd_alg == CRYPTO_MD5 ||
2343 crd2->crd_alg == CRYPTO_SHA1) &&
2344 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2345 enccrd = crd1;
2346 maccrd = crd2;
2347 } else {
2348 /*
2349 * We cannot order the 7751 as requested
2350 */
2351 err = EINVAL;
2352 goto errout;
2353 }
2354 }
2355
2356 if (enccrd) {
2357 cmd->enccrd = enccrd;
2358 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2359 switch (enccrd->crd_alg) {
2360 case CRYPTO_ARC4:
2361 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2362 if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2363 != sc->sc_sessions[session].hs_prev_op)
2364 sc->sc_sessions[session].hs_state =
2365 HS_STATE_USED;
2366 break;
2367 case CRYPTO_DES_CBC:
2368 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2369 HIFN_CRYPT_CMD_MODE_CBC |
2370 HIFN_CRYPT_CMD_NEW_IV;
2371 break;
2372 case CRYPTO_3DES_CBC:
2373 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2374 HIFN_CRYPT_CMD_MODE_CBC |
2375 HIFN_CRYPT_CMD_NEW_IV;
2376 break;
2377 case CRYPTO_AES_CBC:
2378 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2379 HIFN_CRYPT_CMD_MODE_CBC |
2380 HIFN_CRYPT_CMD_NEW_IV;
2381 break;
2382 default:
2383 err = EINVAL;
2384 goto errout;
2385 }
2386 if (enccrd->crd_alg != CRYPTO_ARC4) {
2387 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2388 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2389 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2390 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2391 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2392 else
2393 bcopy(sc->sc_sessions[session].hs_iv,
2394 cmd->iv, ivlen);
2395
2396 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2397 == 0) {
2398 if (crp->crp_flags & CRYPTO_F_IMBUF)
2399 m_copyback(cmd->srcu.src_m,
2400 enccrd->crd_inject,
2401 ivlen, cmd->iv);
2402 else if (crp->crp_flags & CRYPTO_F_IOV)
2403 cuio_copyback(cmd->srcu.src_io,
2404 enccrd->crd_inject,
2405 ivlen, cmd->iv);
2406 }
2407 } else {
2408 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2409 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2410 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2411 m_copydata(cmd->srcu.src_m,
2412 enccrd->crd_inject, ivlen, cmd->iv);
2413 else if (crp->crp_flags & CRYPTO_F_IOV)
2414 cuio_copydata(cmd->srcu.src_io,
2415 enccrd->crd_inject, ivlen, cmd->iv);
2416 }
2417 }
2418
2419 cmd->ck = enccrd->crd_key;
2420 cmd->cklen = enccrd->crd_klen >> 3;
2421
2422 /*
2423 * Need to specify the size for the AES key in the masks.
2424 */
2425 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2426 HIFN_CRYPT_CMD_ALG_AES) {
2427 switch (cmd->cklen) {
2428 case 16:
2429 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2430 break;
2431 case 24:
2432 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2433 break;
2434 case 32:
2435 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2436 break;
2437 default:
2438 err = EINVAL;
2439 goto errout;
2440 }
2441 }
2442
2443 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2444 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2445 }
2446
2447 if (maccrd) {
2448 cmd->maccrd = maccrd;
2449 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2450
2451 switch (maccrd->crd_alg) {
2452 case CRYPTO_MD5:
2453 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2454 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2455 HIFN_MAC_CMD_POS_IPSEC;
2456 break;
2457 case CRYPTO_MD5_HMAC_96:
2458 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2459 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2460 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2461 break;
2462 case CRYPTO_SHA1:
2463 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2464 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2465 HIFN_MAC_CMD_POS_IPSEC;
2466 break;
2467 case CRYPTO_SHA1_HMAC_96:
2468 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2469 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2470 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2471 break;
2472 }
2473
2474 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2475 maccrd->crd_alg == CRYPTO_MD5_HMAC_96) &&
2476 sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2477 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2478 memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3);
2479 memset(cmd->mac + (maccrd->crd_klen >> 3), 0,
2480 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2481 }
2482 }
2483
2484 cmd->crp = crp;
2485 cmd->session_num = session;
2486 cmd->softc = sc;
2487
2488 err = hifn_crypto(sc, cmd, crp, hint);
2489 if (err == 0) {
2490 if (enccrd)
2491 sc->sc_sessions[session].hs_prev_op =
2492 enccrd->crd_flags & CRD_F_ENCRYPT;
2493 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2494 sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2495 mutex_spin_exit(&sc->sc_mtx);
2496 return 0;
2497 } else if (err == ERESTART) {
2498 /*
2499 * There weren't enough resources to dispatch the request
2500 * to the part. Notify the caller so they'll requeue this
2501 * request and resubmit it again soon.
2502 */
2503 #ifdef HIFN_DEBUG
2504 if (hifn_debug)
2505 printf("%s: requeue request\n", device_xname(sc->sc_dv));
2506 #endif
2507 free(cmd, M_DEVBUF);
2508 sc->sc_needwakeup |= CRYPTO_SYMQ;
2509 mutex_spin_exit(&sc->sc_mtx);
2510 return (err);
2511 }
2512
2513 errout:
2514 if (cmd != NULL)
2515 free(cmd, M_DEVBUF);
2516 if (err == EINVAL)
2517 hifnstats.hst_invalid++;
2518 else
2519 hifnstats.hst_nomem++;
2520 crp->crp_etype = err;
2521 mutex_spin_exit(&sc->sc_mtx);
2522 crypto_done(crp);
2523 return (0);
2524 }
2525
2526 static void
2527 hifn_abort(struct hifn_softc *sc)
2528 {
2529 struct hifn_dma *dma = sc->sc_dma;
2530 struct hifn_command *cmd;
2531 struct cryptop *crp;
2532 int i, u;
2533
2534 i = dma->resk; u = dma->resu;
2535 while (u != 0) {
2536 cmd = dma->hifn_commands[i];
2537 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2538 dma->hifn_commands[i] = NULL;
2539 crp = cmd->crp;
2540
2541 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2542 /* Salvage what we can. */
2543 hifnstats.hst_opackets++;
2544 hifn_callback(sc, cmd, dma->result_bufs[i]);
2545 } else {
2546 if (cmd->src_map == cmd->dst_map) {
2547 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2548 0, cmd->src_map->dm_mapsize,
2549 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2550 } else {
2551 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2552 0, cmd->src_map->dm_mapsize,
2553 BUS_DMASYNC_POSTWRITE);
2554 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2555 0, cmd->dst_map->dm_mapsize,
2556 BUS_DMASYNC_POSTREAD);
2557 }
2558
2559 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2560 m_freem(cmd->srcu.src_m);
2561 crp->crp_buf = (void *)cmd->dstu.dst_m;
2562 }
2563
2564 /* non-shared buffers cannot be restarted */
2565 if (cmd->src_map != cmd->dst_map) {
2566 /*
2567 * XXX should be EAGAIN, delayed until
2568 * after the reset.
2569 */
2570 crp->crp_etype = ENOMEM;
2571 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2572 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2573 } else
2574 crp->crp_etype = ENOMEM;
2575
2576 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2577 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2578
2579 free(cmd, M_DEVBUF);
2580 if (crp->crp_etype != EAGAIN)
2581 crypto_done(crp);
2582 }
2583
2584 if (++i == HIFN_D_RES_RSIZE)
2585 i = 0;
2586 u--;
2587 }
2588 dma->resk = i; dma->resu = u;
2589
2590 /* Force upload of key next time */
2591 for (i = 0; i < sc->sc_maxses; i++)
2592 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2593 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2594
2595 hifn_reset_board(sc, 1);
2596 hifn_init_dma(sc);
2597 hifn_init_pci_registers(sc);
2598 }
2599
2600 static void
2601 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf)
2602 {
2603 struct hifn_dma *dma = sc->sc_dma;
2604 struct cryptop *crp = cmd->crp;
2605 struct cryptodesc *crd;
2606 struct mbuf *m;
2607 int totlen, i, u, ivlen;
2608
2609 if (cmd->src_map == cmd->dst_map)
2610 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2611 0, cmd->src_map->dm_mapsize,
2612 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2613 else {
2614 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2615 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2616 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2617 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2618 }
2619
2620 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2621 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2622 crp->crp_buf = (void *)cmd->dstu.dst_m;
2623 totlen = cmd->src_map->dm_mapsize;
2624 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2625 if (totlen < m->m_len) {
2626 m->m_len = totlen;
2627 totlen = 0;
2628 } else
2629 totlen -= m->m_len;
2630 }
2631 cmd->dstu.dst_m->m_pkthdr.len =
2632 cmd->srcu.src_m->m_pkthdr.len;
2633 m_freem(cmd->srcu.src_m);
2634 }
2635 }
2636
2637 if (cmd->sloplen != 0) {
2638 if (crp->crp_flags & CRYPTO_F_IMBUF)
2639 m_copyback((struct mbuf *)crp->crp_buf,
2640 cmd->src_map->dm_mapsize - cmd->sloplen,
2641 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2642 else if (crp->crp_flags & CRYPTO_F_IOV)
2643 cuio_copyback((struct uio *)crp->crp_buf,
2644 cmd->src_map->dm_mapsize - cmd->sloplen,
2645 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2646 }
2647
2648 i = dma->dstk; u = dma->dstu;
2649 while (u != 0) {
2650 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2651 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2652 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2653 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2654 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2655 offsetof(struct hifn_dma, dstr[i]),
2656 sizeof(struct hifn_desc),
2657 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2658 break;
2659 }
2660 if (++i == (HIFN_D_DST_RSIZE + 1))
2661 i = 0;
2662 else
2663 u--;
2664 }
2665 dma->dstk = i; dma->dstu = u;
2666
2667 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2668
2669 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2670 HIFN_BASE_CMD_CRYPT) {
2671 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2672 if (crd->crd_alg != CRYPTO_DES_CBC &&
2673 crd->crd_alg != CRYPTO_3DES_CBC &&
2674 crd->crd_alg != CRYPTO_AES_CBC)
2675 continue;
2676 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2677 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2678 if (crp->crp_flags & CRYPTO_F_IMBUF)
2679 m_copydata((struct mbuf *)crp->crp_buf,
2680 crd->crd_skip + crd->crd_len - ivlen,
2681 ivlen,
2682 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2683 else if (crp->crp_flags & CRYPTO_F_IOV) {
2684 cuio_copydata((struct uio *)crp->crp_buf,
2685 crd->crd_skip + crd->crd_len - ivlen,
2686 ivlen,
2687 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2688 }
2689 /* XXX We do not handle contig data */
2690 break;
2691 }
2692 }
2693
2694 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2695 u_int8_t *macbuf;
2696
2697 macbuf = resbuf + sizeof(struct hifn_base_result);
2698 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2699 macbuf += sizeof(struct hifn_comp_result);
2700 macbuf += sizeof(struct hifn_mac_result);
2701
2702 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2703 int len;
2704
2705 if (crd->crd_alg == CRYPTO_MD5)
2706 len = 16;
2707 else if (crd->crd_alg == CRYPTO_SHA1)
2708 len = 20;
2709 else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 ||
2710 crd->crd_alg == CRYPTO_SHA1_HMAC_96)
2711 len = 12;
2712 else
2713 continue;
2714
2715 if (crp->crp_flags & CRYPTO_F_IMBUF)
2716 m_copyback((struct mbuf *)crp->crp_buf,
2717 crd->crd_inject, len, macbuf);
2718 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2719 memcpy(crp->crp_mac, (void *)macbuf, len);
2720 break;
2721 }
2722 }
2723
2724 if (cmd->src_map != cmd->dst_map) {
2725 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2726 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2727 }
2728 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2729 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2730 free(cmd, M_DEVBUF);
2731 crypto_done(crp);
2732 }
2733
2734 #ifdef HAVE_CRYPTO_LZS
2735
2736 static int
2737 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2738 struct hifn_command *cmd)
2739 {
2740 struct cryptodesc *crd = crp->crp_desc;
2741 int s, err = 0;
2742
2743 cmd->compcrd = crd;
2744 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2745
2746 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2747 /*
2748 * XXX can only handle mbufs right now since we can
2749 * XXX dynamically resize them.
2750 */
2751 err = EINVAL;
2752 return (ENOMEM);
2753 }
2754
2755 if ((crd->crd_flags & CRD_F_COMP) == 0)
2756 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2757 if (crd->crd_alg == CRYPTO_LZS_COMP)
2758 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2759 HIFN_COMP_CMD_CLEARHIST;
2760
2761 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2762 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2763 err = ENOMEM;
2764 goto fail;
2765 }
2766
2767 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2768 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2769 err = ENOMEM;
2770 goto fail;
2771 }
2772
2773 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2774 int len;
2775
2776 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2777 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2778 err = ENOMEM;
2779 goto fail;
2780 }
2781
2782 len = cmd->src_map->dm_mapsize / MCLBYTES;
2783 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2784 len++;
2785 len *= MCLBYTES;
2786
2787 if ((crd->crd_flags & CRD_F_COMP) == 0)
2788 len *= 4;
2789
2790 if (len > HIFN_MAX_DMALEN)
2791 len = HIFN_MAX_DMALEN;
2792
2793 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2794 if (cmd->dstu.dst_m == NULL) {
2795 err = ENOMEM;
2796 goto fail;
2797 }
2798
2799 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2800 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2801 err = ENOMEM;
2802 goto fail;
2803 }
2804 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2805 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2806 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2807 err = ENOMEM;
2808 goto fail;
2809 }
2810 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2811 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2812 err = ENOMEM;
2813 goto fail;
2814 }
2815 }
2816
2817 if (cmd->src_map == cmd->dst_map)
2818 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2819 0, cmd->src_map->dm_mapsize,
2820 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2821 else {
2822 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2823 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2824 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2825 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2826 }
2827
2828 cmd->crp = crp;
2829 /*
2830 * Always use session 0. The modes of compression we use are
2831 * stateless and there is always at least one compression
2832 * context, zero.
2833 */
2834 cmd->session_num = 0;
2835 cmd->softc = sc;
2836
2837 err = hifn_compress_enter(sc, cmd);
2838
2839 if (err != 0)
2840 goto fail;
2841 return (0);
2842
2843 fail:
2844 if (cmd->dst_map != NULL) {
2845 if (cmd->dst_map->dm_nsegs > 0)
2846 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2847 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2848 }
2849 if (cmd->src_map != NULL) {
2850 if (cmd->src_map->dm_nsegs > 0)
2851 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2852 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2853 }
2854 free(cmd, M_DEVBUF);
2855 if (err == EINVAL)
2856 hifnstats.hst_invalid++;
2857 else
2858 hifnstats.hst_nomem++;
2859 crp->crp_etype = err;
2860 crypto_done(crp);
2861 return (0);
2862 }
2863
2864 static int
2865 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2866 {
2867 struct hifn_dma *dma = sc->sc_dma;
2868 int cmdi, resi;
2869 u_int32_t cmdlen;
2870
2871 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2872 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2873 return (ENOMEM);
2874
2875 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2876 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2877 return (ENOMEM);
2878
2879 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2880 dma->cmdi = 0;
2881 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2882 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2883 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2884 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2885 }
2886 cmdi = dma->cmdi++;
2887 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2888 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2889
2890 /* .p for command/result already set */
2891 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2892 HIFN_D_MASKDONEIRQ);
2893 HIFN_CMDR_SYNC(sc, cmdi,
2894 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2895 dma->cmdu++;
2896 if (sc->sc_c_busy == 0) {
2897 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2898 sc->sc_c_busy = 1;
2899 SET_LED(sc, HIFN_MIPSRST_LED0);
2900 }
2901
2902 /*
2903 * We don't worry about missing an interrupt (which a "command wait"
2904 * interrupt salvages us from), unless there is more than one command
2905 * in the queue.
2906 */
2907 if (dma->cmdu > 1) {
2908 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2909 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2910 }
2911
2912 hifnstats.hst_ipackets++;
2913 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2914
2915 hifn_dmamap_load_src(sc, cmd);
2916 if (sc->sc_s_busy == 0) {
2917 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2918 sc->sc_s_busy = 1;
2919 SET_LED(sc, HIFN_MIPSRST_LED1);
2920 }
2921
2922 /*
2923 * Unlike other descriptors, we don't mask done interrupt from
2924 * result descriptor.
2925 */
2926 if (dma->resi == HIFN_D_RES_RSIZE) {
2927 dma->resi = 0;
2928 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2929 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2930 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2931 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2932 }
2933 resi = dma->resi++;
2934 dma->hifn_commands[resi] = cmd;
2935 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2936 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2937 HIFN_D_VALID | HIFN_D_LAST);
2938 HIFN_RESR_SYNC(sc, resi,
2939 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2940 dma->resu++;
2941 if (sc->sc_r_busy == 0) {
2942 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2943 sc->sc_r_busy = 1;
2944 SET_LED(sc, HIFN_MIPSRST_LED2);
2945 }
2946
2947 if (cmd->sloplen)
2948 cmd->slopidx = resi;
2949
2950 hifn_dmamap_load_dst(sc, cmd);
2951
2952 if (sc->sc_d_busy == 0) {
2953 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2954 sc->sc_d_busy = 1;
2955 }
2956 sc->sc_active = 5;
2957 cmd->cmd_callback = hifn_callback_comp;
2958 return (0);
2959 }
2960
2961 static void
2962 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2963 u_int8_t *resbuf)
2964 {
2965 struct hifn_base_result baseres;
2966 struct cryptop *crp = cmd->crp;
2967 struct hifn_dma *dma = sc->sc_dma;
2968 struct mbuf *m;
2969 int err = 0, i, u;
2970 u_int32_t olen;
2971 bus_size_t dstsize;
2972
2973 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2974 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2975 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2976 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2977
2978 dstsize = cmd->dst_map->dm_mapsize;
2979 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2980
2981 memcpy(&baseres, resbuf, sizeof(struct hifn_base_result));
2982
2983 i = dma->dstk; u = dma->dstu;
2984 while (u != 0) {
2985 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2986 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2987 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2988 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2989 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2990 offsetof(struct hifn_dma, dstr[i]),
2991 sizeof(struct hifn_desc),
2992 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2993 break;
2994 }
2995 if (++i == (HIFN_D_DST_RSIZE + 1))
2996 i = 0;
2997 else
2998 u--;
2999 }
3000 dma->dstk = i; dma->dstu = u;
3001
3002 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
3003 bus_size_t xlen;
3004
3005 xlen = dstsize;
3006
3007 m_freem(cmd->dstu.dst_m);
3008
3009 if (xlen == HIFN_MAX_DMALEN) {
3010 /* We've done all we can. */
3011 err = E2BIG;
3012 goto out;
3013 }
3014
3015 xlen += MCLBYTES;
3016
3017 if (xlen > HIFN_MAX_DMALEN)
3018 xlen = HIFN_MAX_DMALEN;
3019
3020 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
3021 cmd->srcu.src_m);
3022 if (cmd->dstu.dst_m == NULL) {
3023 err = ENOMEM;
3024 goto out;
3025 }
3026 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
3027 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
3028 err = ENOMEM;
3029 goto out;
3030 }
3031
3032 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3033 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3034 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
3035 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
3036
3037 err = hifn_compress_enter(sc, cmd);
3038 if (err != 0)
3039 goto out;
3040 return;
3041 }
3042
3043 olen = dstsize - (letoh16(baseres.dst_cnt) |
3044 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
3045 HIFN_BASE_RES_DSTLEN_S) << 16));
3046
3047 crp->crp_olen = olen - cmd->compcrd->crd_skip;
3048
3049 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3050 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3051 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
3052
3053 m = cmd->dstu.dst_m;
3054 if (m->m_flags & M_PKTHDR)
3055 m->m_pkthdr.len = olen;
3056 crp->crp_buf = (void *)m;
3057 for (; m != NULL; m = m->m_next) {
3058 if (olen >= m->m_len)
3059 olen -= m->m_len;
3060 else {
3061 m->m_len = olen;
3062 olen = 0;
3063 }
3064 }
3065
3066 m_freem(cmd->srcu.src_m);
3067 free(cmd, M_DEVBUF);
3068 crp->crp_etype = 0;
3069 crypto_done(crp);
3070 return;
3071
3072 out:
3073 if (cmd->dst_map != NULL) {
3074 if (cmd->src_map->dm_nsegs != 0)
3075 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3076 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
3077 }
3078 if (cmd->src_map != NULL) {
3079 if (cmd->src_map->dm_nsegs != 0)
3080 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3081 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3082 }
3083 if (cmd->dstu.dst_m != NULL)
3084 m_freem(cmd->dstu.dst_m);
3085 free(cmd, M_DEVBUF);
3086 crp->crp_etype = err;
3087 crypto_done(crp);
3088 }
3089
3090 static struct mbuf *
3091 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
3092 {
3093 int len;
3094 struct mbuf *m, *m0, *mlast;
3095
3096 if (mtemplate->m_flags & M_PKTHDR) {
3097 len = MHLEN;
3098 MGETHDR(m0, M_DONTWAIT, MT_DATA);
3099 } else {
3100 len = MLEN;
3101 MGET(m0, M_DONTWAIT, MT_DATA);
3102 }
3103 if (m0 == NULL)
3104 return (NULL);
3105 if (len == MHLEN)
3106 M_DUP_PKTHDR(m0, mtemplate);
3107 MCLGET(m0, M_DONTWAIT);
3108 if (!(m0->m_flags & M_EXT))
3109 m_freem(m0);
3110 len = MCLBYTES;
3111
3112 totlen -= len;
3113 m0->m_pkthdr.len = m0->m_len = len;
3114 mlast = m0;
3115
3116 while (totlen > 0) {
3117 MGET(m, M_DONTWAIT, MT_DATA);
3118 if (m == NULL) {
3119 m_freem(m0);
3120 return (NULL);
3121 }
3122 MCLGET(m, M_DONTWAIT);
3123 if (!(m->m_flags & M_EXT)) {
3124 m_freem(m0);
3125 return (NULL);
3126 }
3127 len = MCLBYTES;
3128 m->m_len = len;
3129 if (m0->m_flags & M_PKTHDR)
3130 m0->m_pkthdr.len += len;
3131 totlen -= len;
3132
3133 mlast->m_next = m;
3134 mlast = m;
3135 }
3136
3137 return (m0);
3138 }
3139 #endif /* HAVE_CRYPTO_LZS */
3140
3141 static void
3142 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val)
3143 {
3144 /*
3145 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
3146 * and Group 1 registers; avoid conditions that could create
3147 * burst writes by doing a read in between the writes.
3148 */
3149 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3150 if (sc->sc_waw_lastgroup == reggrp &&
3151 sc->sc_waw_lastreg == reg - 4) {
3152 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
3153 }
3154 sc->sc_waw_lastgroup = reggrp;
3155 sc->sc_waw_lastreg = reg;
3156 }
3157 if (reggrp == 0)
3158 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3159 else
3160 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3161
3162 }
3163
3164 static u_int32_t
3165 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3166 {
3167 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3168 sc->sc_waw_lastgroup = -1;
3169 sc->sc_waw_lastreg = 1;
3170 }
3171 if (reggrp == 0)
3172 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3173 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3174 }
3175