hifn7751.c revision 1.52.2.1 1 /* $NetBSD: hifn7751.c,v 1.52.2.1 2014/05/18 17:45:39 rmind Exp $ */
2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */
4
5 /*
6 * Invertex AEON / Hifn 7751 driver
7 * Copyright (c) 1999 Invertex Inc. All rights reserved.
8 * Copyright (c) 1999 Theo de Raadt
9 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10 * http://www.netsec.net
11 * Copyright (c) 2003 Hifn Inc.
12 *
13 * This driver is based on a previous driver by Invertex, for which they
14 * requested: Please send any comments, feedback, bug-fixes, or feature
15 * requests to software (at) invertex.com.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. The name of the author may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Effort sponsored in part by the Defense Advanced Research Projects
41 * Agency (DARPA) and Air Force Research Laboratory, Air Force
42 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 *
44 */
45
46 /*
47 * Driver for various Hifn pre-HIPP encryption processors.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.52.2.1 2014/05/18 17:45:39 rmind Exp $");
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/errno.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/mbuf.h>
61 #include <sys/device.h>
62 #include <sys/module.h>
63
64 #ifdef __OpenBSD__
65 #include <crypto/crypto.h>
66 #include <dev/rndvar.h>
67 #else
68 #include <opencrypto/cryptodev.h>
69 #include <sys/cprng.h>
70 #include <sys/rnd.h>
71 #include <sys/sha1.h>
72 #endif
73
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76 #include <dev/pci/pcidevs.h>
77
78 #include <dev/pci/hifn7751reg.h>
79 #include <dev/pci/hifn7751var.h>
80
81 #undef HIFN_DEBUG
82
83 #ifdef __NetBSD__
84 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */
85 #endif
86
87 #ifdef HIFN_DEBUG
88 extern int hifn_debug; /* patchable */
89 int hifn_debug = 1;
90 #endif
91
92 #ifdef __OpenBSD__
93 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */
94 #endif
95
96 /*
97 * Prototypes and count for the pci_device structure
98 */
99 #ifdef __OpenBSD__
100 static int hifn_probe((struct device *, void *, void *);
101 #else
102 static int hifn_probe(device_t, cfdata_t, void *);
103 #endif
104 static void hifn_attach(device_t, device_t, void *);
105 #ifdef __NetBSD__
106 static int hifn_detach(device_t, int);
107
108 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
109 hifn_probe, hifn_attach, hifn_detach, NULL);
110 #else
111 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
112 hifn_probe, hifn_attach, NULL, NULL);
113 #endif
114
115 #ifdef __OpenBSD__
116 struct cfdriver hifn_cd = {
117 0, "hifn", DV_DULL
118 };
119 #endif
120
121 static void hifn_reset_board(struct hifn_softc *, int);
122 static void hifn_reset_puc(struct hifn_softc *);
123 static void hifn_puc_wait(struct hifn_softc *);
124 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
125 static void hifn_set_retry(struct hifn_softc *);
126 static void hifn_init_dma(struct hifn_softc *);
127 static void hifn_init_pci_registers(struct hifn_softc *);
128 static int hifn_sramsize(struct hifn_softc *);
129 static int hifn_dramsize(struct hifn_softc *);
130 static int hifn_ramtype(struct hifn_softc *);
131 static void hifn_sessions(struct hifn_softc *);
132 static int hifn_intr(void *);
133 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
134 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
135 static int hifn_newsession(void*, u_int32_t *, struct cryptoini *);
136 static int hifn_freesession(void*, u_int64_t);
137 static int hifn_process(void*, struct cryptop *, int);
138 static void hifn_callback(struct hifn_softc *, struct hifn_command *,
139 u_int8_t *);
140 static int hifn_crypto(struct hifn_softc *, struct hifn_command *,
141 struct cryptop*, int);
142 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
143 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
144 static int hifn_dmamap_aligned(bus_dmamap_t);
145 static int hifn_dmamap_load_src(struct hifn_softc *,
146 struct hifn_command *);
147 static int hifn_dmamap_load_dst(struct hifn_softc *,
148 struct hifn_command *);
149 static int hifn_init_pubrng(struct hifn_softc *);
150 static void hifn_rng(void *);
151 static void hifn_rng_locked(void *);
152 static void hifn_tick(void *);
153 static void hifn_abort(struct hifn_softc *);
154 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
155 int *);
156 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
157 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
158 #ifdef HAVE_CRYPTO_LZS
159 static int hifn_compression(struct hifn_softc *, struct cryptop *,
160 struct hifn_command *);
161 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
162 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
163 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
164 u_int8_t *);
165 #endif /* HAVE_CRYPTO_LZS */
166
167 struct hifn_stats hifnstats;
168
169 static const struct hifn_product {
170 pci_vendor_id_t hifn_vendor;
171 pci_product_id_t hifn_product;
172 int hifn_flags;
173 const char *hifn_name;
174 } hifn_products[] = {
175 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
176 0,
177 "Invertex AEON",
178 },
179
180 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
181 0,
182 "Hifn 7751",
183 },
184 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
185 0,
186 "Hifn 7751 (NetSec)"
187 },
188
189 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
190 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
191 "Hifn 7811",
192 },
193
194 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
195 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
196 "Hifn 7951",
197 },
198
199 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
200 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
201 "Hifn 7955",
202 },
203
204 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
205 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
206 "Hifn 7956",
207 },
208
209
210 { 0, 0,
211 0,
212 NULL
213 }
214 };
215
216 static const struct hifn_product *
217 hifn_lookup(const struct pci_attach_args *pa)
218 {
219 const struct hifn_product *hp;
220
221 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
222 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
223 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
224 return (hp);
225 }
226 return (NULL);
227 }
228
229 static int
230 hifn_probe(device_t parent, cfdata_t match, void *aux)
231 {
232 struct pci_attach_args *pa = aux;
233
234 if (hifn_lookup(pa) != NULL)
235 return 1;
236
237 return 0;
238 }
239
240 static void
241 hifn_attach(device_t parent, device_t self, void *aux)
242 {
243 struct hifn_softc *sc = device_private(self);
244 struct pci_attach_args *pa = aux;
245 const struct hifn_product *hp;
246 pci_chipset_tag_t pc = pa->pa_pc;
247 pci_intr_handle_t ih;
248 const char *intrstr = NULL;
249 const char *hifncap;
250 char rbase;
251 #ifdef __NetBSD__
252 #define iosize0 sc->sc_iosz0
253 #define iosize1 sc->sc_iosz1
254 #else
255 bus_size_t iosize0, iosize1;
256 #endif
257 u_int32_t cmd;
258 u_int16_t ena;
259 bus_dma_segment_t seg;
260 bus_dmamap_t dmamap;
261 int rseg;
262 void *kva;
263 char intrbuf[PCI_INTRSTR_LEN];
264
265 hp = hifn_lookup(pa);
266 if (hp == NULL) {
267 printf("\n");
268 panic("hifn_attach: impossible");
269 }
270
271 pci_aprint_devinfo_fancy(pa, "Crypto processor", hp->hifn_name, 1);
272
273 sc->sc_dv = self;
274 sc->sc_pci_pc = pa->pa_pc;
275 sc->sc_pci_tag = pa->pa_tag;
276
277 sc->sc_flags = hp->hifn_flags;
278
279 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
280 cmd |= PCI_COMMAND_MASTER_ENABLE;
281 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
282
283 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
284 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
285 aprint_error_dev(sc->sc_dv, "can't map mem space %d\n", 0);
286 return;
287 }
288
289 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
290 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
291 aprint_error_dev(sc->sc_dv, "can't find mem space %d\n", 1);
292 goto fail_io0;
293 }
294
295 hifn_set_retry(sc);
296
297 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
298 sc->sc_waw_lastgroup = -1;
299 sc->sc_waw_lastreg = 1;
300 }
301
302 sc->sc_dmat = pa->pa_dmat;
303 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
304 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
305 aprint_error_dev(sc->sc_dv, "can't alloc DMA buffer\n");
306 goto fail_io1;
307 }
308 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
309 BUS_DMA_NOWAIT)) {
310 aprint_error_dev(sc->sc_dv, "can't map DMA buffers (%lu bytes)\n",
311 (u_long)sizeof(*sc->sc_dma));
312 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
313 goto fail_io1;
314 }
315 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
316 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
317 aprint_error_dev(sc->sc_dv, "can't create DMA map\n");
318 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
319 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
320 goto fail_io1;
321 }
322 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
323 NULL, BUS_DMA_NOWAIT)) {
324 aprint_error_dev(sc->sc_dv, "can't load DMA map\n");
325 bus_dmamap_destroy(sc->sc_dmat, dmamap);
326 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
327 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
328 goto fail_io1;
329 }
330 sc->sc_dmamap = dmamap;
331 sc->sc_dma = (struct hifn_dma *)kva;
332 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
333
334 hifn_reset_board(sc, 0);
335
336 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
337 aprint_error_dev(sc->sc_dv, "crypto enabling failed\n");
338 goto fail_mem;
339 }
340 hifn_reset_puc(sc);
341
342 hifn_init_dma(sc);
343 hifn_init_pci_registers(sc);
344
345 /* XXX can't dynamically determine ram type for 795x; force dram */
346 if (sc->sc_flags & HIFN_IS_7956)
347 sc->sc_drammodel = 1;
348 else if (hifn_ramtype(sc))
349 goto fail_mem;
350
351 if (sc->sc_drammodel == 0)
352 hifn_sramsize(sc);
353 else
354 hifn_dramsize(sc);
355
356 /*
357 * Workaround for NetSec 7751 rev A: half ram size because two
358 * of the address lines were left floating
359 */
360 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
361 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
362 PCI_REVISION(pa->pa_class) == 0x61)
363 sc->sc_ramsize >>= 1;
364
365 if (pci_intr_map(pa, &ih)) {
366 aprint_error_dev(sc->sc_dv, "couldn't map interrupt\n");
367 goto fail_mem;
368 }
369 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
370 #ifdef __OpenBSD__
371 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
372 device_xname(self));
373 #else
374 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
375 #endif
376 if (sc->sc_ih == NULL) {
377 aprint_error_dev(sc->sc_dv, "couldn't establish interrupt\n");
378 if (intrstr != NULL)
379 aprint_error(" at %s", intrstr);
380 aprint_error("\n");
381 goto fail_mem;
382 }
383
384 hifn_sessions(sc);
385
386 rseg = sc->sc_ramsize / 1024;
387 rbase = 'K';
388 if (sc->sc_ramsize >= (1024 * 1024)) {
389 rbase = 'M';
390 rseg /= 1024;
391 }
392 aprint_normal_dev(sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n",
393 hifncap, rseg, rbase,
394 sc->sc_drammodel ? 'D' : 'S', intrstr);
395
396 sc->sc_cid = crypto_get_driverid(0);
397 if (sc->sc_cid < 0) {
398 aprint_error_dev(sc->sc_dv, "couldn't get crypto driver id\n");
399 goto fail_intr;
400 }
401
402 WRITE_REG_0(sc, HIFN_0_PUCNFG,
403 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
404 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
405
406 switch (ena) {
407 case HIFN_PUSTAT_ENA_2:
408 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
409 hifn_newsession, hifn_freesession, hifn_process, sc);
410 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
411 hifn_newsession, hifn_freesession, hifn_process, sc);
412 if (sc->sc_flags & HIFN_HAS_AES)
413 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
414 hifn_newsession, hifn_freesession,
415 hifn_process, sc);
416 /*FALLTHROUGH*/
417 case HIFN_PUSTAT_ENA_1:
418 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
419 hifn_newsession, hifn_freesession, hifn_process, sc);
420 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
421 hifn_newsession, hifn_freesession, hifn_process, sc);
422 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0,
423 hifn_newsession, hifn_freesession, hifn_process, sc);
424 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0,
425 hifn_newsession, hifn_freesession, hifn_process, sc);
426 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
427 hifn_newsession, hifn_freesession, hifn_process, sc);
428 break;
429 }
430
431 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
432 sc->sc_dmamap->dm_mapsize,
433 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
434
435 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) {
436 hifn_init_pubrng(sc);
437 sc->sc_rng_need = RND_POOLBITS / NBBY;
438 }
439
440 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM);
441
442 #ifdef __OpenBSD__
443 timeout_set(&sc->sc_tickto, hifn_tick, sc);
444 timeout_add(&sc->sc_tickto, hz);
445 #else
446 callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
447 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
448 #endif
449 return;
450
451 fail_intr:
452 pci_intr_disestablish(pc, sc->sc_ih);
453 fail_mem:
454 bus_dmamap_unload(sc->sc_dmat, dmamap);
455 bus_dmamap_destroy(sc->sc_dmat, dmamap);
456 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
457 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
458
459 /* Turn off DMA polling */
460 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
461 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
462
463 fail_io1:
464 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
465 fail_io0:
466 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
467 }
468
469 #ifdef __NetBSD__
470 static int
471 hifn_detach(device_t self, int flags)
472 {
473 struct hifn_softc *sc = device_private(self);
474
475 hifn_abort(sc);
476
477 hifn_reset_board(sc, 1);
478
479 pci_intr_disestablish(sc->sc_pci_pc, sc->sc_ih);
480
481 crypto_unregister_all(sc->sc_cid);
482
483 rnd_detach_source(&sc->sc_rnd_source);
484
485 mutex_enter(&sc->sc_mtx);
486 callout_halt(&sc->sc_tickto, NULL);
487 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
488 callout_halt(&sc->sc_rngto, NULL);
489 mutex_exit(&sc->sc_mtx);
490
491 bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1);
492 bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0);
493
494 /*
495 * XXX It's not clear if any additional buffers have been
496 * XXX allocated and require free()ing
497 */
498
499 return 0;
500 }
501
502 MODULE(MODULE_CLASS_DRIVER, hifn, "pci,opencrypto");
503
504 #ifdef _MODULE
505 #include "ioconf.c"
506 #endif
507
508 static int
509 hifn_modcmd(modcmd_t cmd, void *data)
510 {
511 int error = 0;
512
513 switch(cmd) {
514 case MODULE_CMD_INIT:
515 #ifdef _MODULE
516 error = config_init_component(cfdriver_ioconf_hifn,
517 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
518 #endif
519 return error;
520 case MODULE_CMD_FINI:
521 #ifdef _MODULE
522 error = config_fini_component(cfdriver_ioconf_hifn,
523 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
524 #endif
525 return error;
526 default:
527 return ENOTTY;
528 }
529 }
530
531 #endif /* ifdef __NetBSD__ */
532
533 static void
534 hifn_rng_get(size_t bytes, void *priv)
535 {
536 struct hifn_softc *sc = priv;
537
538 mutex_enter(&sc->sc_mtx);
539 sc->sc_rng_need = bytes;
540
541 hifn_rng_locked(sc);
542 mutex_exit(&sc->sc_mtx);
543 }
544
545 static int
546 hifn_init_pubrng(struct hifn_softc *sc)
547 {
548 u_int32_t r;
549 int i;
550
551 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
552 /* Reset 7951 public key/rng engine */
553 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
554 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
555
556 for (i = 0; i < 100; i++) {
557 DELAY(1000);
558 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
559 HIFN_PUBRST_RESET) == 0)
560 break;
561 }
562
563 if (i == 100) {
564 printf("%s: public key init failed\n",
565 device_xname(sc->sc_dv));
566 return (1);
567 }
568 }
569
570 /* Enable the rng, if available */
571 if (sc->sc_flags & HIFN_HAS_RNG) {
572 if (sc->sc_flags & HIFN_IS_7811) {
573 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
574 if (r & HIFN_7811_RNGENA_ENA) {
575 r &= ~HIFN_7811_RNGENA_ENA;
576 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
577 }
578 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
579 HIFN_7811_RNGCFG_DEFL);
580 r |= HIFN_7811_RNGENA_ENA;
581 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
582 } else
583 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
584 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
585 HIFN_RNGCFG_ENA);
586
587 /*
588 * The Hifn RNG documentation states that at their
589 * recommended "conservative" RNG config values,
590 * the RNG must warm up for 0.4s before providing
591 * data that meet their worst-case estimate of 0.06
592 * bits of random data per output register bit.
593 */
594 DELAY(4000);
595
596 #ifdef __NetBSD__
597 rndsource_setcb(&sc->sc_rnd_source, hifn_rng_get, sc);
598 /*
599 * XXX Careful! The use of RND_FLAG_NO_ESTIMATE
600 * XXX here is unobvious: we later feed raw bits
601 * XXX into the "entropy pool" with rnd_add_data,
602 * XXX explicitly supplying an entropy estimate.
603 * XXX In this context, NO_ESTIMATE serves only
604 * XXX to prevent rnd_add_data from trying to
605 * XXX use the *time at which we added the data*
606 * XXX as entropy, which is not a good idea since
607 * XXX we add data periodically from a callout.
608 */
609 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv),
610 RND_TYPE_RNG,
611 RND_FLAG_NO_ESTIMATE|RND_FLAG_HASCB);
612 #endif
613
614 if (hz >= 100)
615 sc->sc_rnghz = hz / 100;
616 else
617 sc->sc_rnghz = 1;
618 #ifdef __OpenBSD__
619 timeout_set(&sc->sc_rngto, hifn_rng, sc);
620 #else /* !__OpenBSD__ */
621 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
622 #endif /* !__OpenBSD__ */
623 }
624
625 /* Enable public key engine, if available */
626 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
627 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
628 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
629 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
630 }
631
632 /* Call directly into the RNG once to prime the pool. */
633 hifn_rng(sc); /* Sets callout/timeout at end */
634
635 return (0);
636 }
637
638 static void
639 hifn_rng_locked(void *vsc)
640 {
641 struct hifn_softc *sc = vsc;
642 #ifdef __NetBSD__
643 uint32_t num[64];
644 #else
645 uint32_t num[2];
646 #endif
647 uint32_t sts;
648 int i;
649 size_t got, gotent;
650
651 if (sc->sc_rng_need < 1) {
652 callout_stop(&sc->sc_rngto);
653 return;
654 }
655
656 if (sc->sc_flags & HIFN_IS_7811) {
657 for (i = 0; i < 5; i++) { /* XXX why 5? */
658 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
659 if (sts & HIFN_7811_RNGSTS_UFL) {
660 printf("%s: RNG underflow: disabling\n",
661 device_xname(sc->sc_dv));
662 return;
663 }
664 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
665 break;
666
667 /*
668 * There are at least two words in the RNG FIFO
669 * at this point.
670 */
671 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
672 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
673 got = 2 * sizeof(num[0]);
674 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
675
676 #ifdef __NetBSD__
677 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
678 sc->sc_rng_need -= gotent;
679 #else
680 /*
681 * XXX This is a really bad idea.
682 * XXX Hifn estimate as little as 0.06
683 * XXX actual bits of entropy per output
684 * XXX register bit. How can we tell the
685 * XXX kernel RNG subsystem we're handing
686 * XXX it 64 "true" random bits, for any
687 * XXX sane value of "true"?
688 * XXX
689 * XXX The right thing to do here, if we
690 * XXX cannot supply an estimate ourselves,
691 * XXX would be to hash the bits locally.
692 */
693 add_true_randomness(num[0]);
694 add_true_randomness(num[1]);
695 #endif
696
697 }
698 } else {
699 int nwords = 0;
700
701 if (sc->sc_rng_need) {
702 nwords = (sc->sc_rng_need * NBBY) / HIFN_RNG_BITSPER;
703 }
704
705 if (nwords < 2) {
706 nwords = 2;
707 }
708
709 /*
710 * We must be *extremely* careful here. The Hifn
711 * 795x differ from the published 6500 RNG design
712 * in more ways than the obvious lack of the output
713 * FIFO and LFSR control registers. In fact, there
714 * is only one LFSR, instead of the 6500's two, and
715 * it's 32 bits, not 31.
716 *
717 * Further, a block diagram obtained from Hifn shows
718 * a very curious latching of this register: the LFSR
719 * rotates at a frequency of RNG_Clk / 8, but the
720 * RNG_Data register is latched at a frequency of
721 * RNG_Clk, which means that it is possible for
722 * consecutive reads of the RNG_Data register to read
723 * identical state from the LFSR. The simplest
724 * workaround seems to be to read eight samples from
725 * the register for each one that we use. Since each
726 * read must require at least one PCI cycle, and
727 * RNG_Clk is at least PCI_Clk, this is safe.
728 */
729 for(i = 0 ; i < nwords * 8; i++)
730 {
731 volatile u_int32_t regtmp;
732 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA);
733 num[i / 8] = regtmp;
734 }
735
736 got = nwords * sizeof(num[0]);
737 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
738 #ifdef __NetBSD__
739 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
740 sc->sc_rng_need -= gotent;
741 #else
742 /* XXX a bad idea; see 7811 block above */
743 add_true_randomness(num[0]);
744 #endif
745 }
746
747 #ifdef __OpenBSD__
748 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
749 #else
750 if (sc->sc_rng_need > 0) {
751 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
752 }
753 #endif
754 }
755
756 static void
757 hifn_rng(void *vsc)
758 {
759 struct hifn_softc *sc = vsc;
760
761 mutex_spin_enter(&sc->sc_mtx);
762 hifn_rng_locked(vsc);
763 mutex_spin_exit(&sc->sc_mtx);
764 }
765
766 static void
767 hifn_puc_wait(struct hifn_softc *sc)
768 {
769 int i;
770
771 for (i = 5000; i > 0; i--) {
772 DELAY(1);
773 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
774 break;
775 }
776 if (!i)
777 printf("%s: proc unit did not reset\n", device_xname(sc->sc_dv));
778 }
779
780 /*
781 * Reset the processing unit.
782 */
783 static void
784 hifn_reset_puc(struct hifn_softc *sc)
785 {
786 /* Reset processing unit */
787 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
788 hifn_puc_wait(sc);
789 }
790
791 static void
792 hifn_set_retry(struct hifn_softc *sc)
793 {
794 u_int32_t r;
795
796 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
797 r &= 0xffff0000;
798 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
799 }
800
801 /*
802 * Resets the board. Values in the regesters are left as is
803 * from the reset (i.e. initial values are assigned elsewhere).
804 */
805 static void
806 hifn_reset_board(struct hifn_softc *sc, int full)
807 {
808 u_int32_t reg;
809
810 /*
811 * Set polling in the DMA configuration register to zero. 0x7 avoids
812 * resetting the board and zeros out the other fields.
813 */
814 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
815 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
816
817 /*
818 * Now that polling has been disabled, we have to wait 1 ms
819 * before resetting the board.
820 */
821 DELAY(1000);
822
823 /* Reset the DMA unit */
824 if (full) {
825 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
826 DELAY(1000);
827 } else {
828 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
829 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
830 hifn_reset_puc(sc);
831 }
832
833 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
834
835 /* Bring dma unit out of reset */
836 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
837 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
838
839 hifn_puc_wait(sc);
840
841 hifn_set_retry(sc);
842
843 if (sc->sc_flags & HIFN_IS_7811) {
844 for (reg = 0; reg < 1000; reg++) {
845 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
846 HIFN_MIPSRST_CRAMINIT)
847 break;
848 DELAY(1000);
849 }
850 if (reg == 1000)
851 printf(": cram init timeout\n");
852 }
853 }
854
855 static u_int32_t
856 hifn_next_signature(u_int32_t a, u_int cnt)
857 {
858 int i;
859 u_int32_t v;
860
861 for (i = 0; i < cnt; i++) {
862
863 /* get the parity */
864 v = a & 0x80080125;
865 v ^= v >> 16;
866 v ^= v >> 8;
867 v ^= v >> 4;
868 v ^= v >> 2;
869 v ^= v >> 1;
870
871 a = (v & 1) ^ (a << 1);
872 }
873
874 return a;
875 }
876
877 static struct pci2id {
878 u_short pci_vendor;
879 u_short pci_prod;
880 char card_id[13];
881 } const pci2id[] = {
882 {
883 PCI_VENDOR_HIFN,
884 PCI_PRODUCT_HIFN_7951,
885 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00, 0x00 }
887 }, {
888 PCI_VENDOR_HIFN,
889 PCI_PRODUCT_HIFN_7955,
890 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
891 0x00, 0x00, 0x00, 0x00, 0x00 }
892 }, {
893 PCI_VENDOR_HIFN,
894 PCI_PRODUCT_HIFN_7956,
895 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
896 0x00, 0x00, 0x00, 0x00, 0x00 }
897 }, {
898 PCI_VENDOR_NETSEC,
899 PCI_PRODUCT_NETSEC_7751,
900 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00, 0x00 }
902 }, {
903 PCI_VENDOR_INVERTEX,
904 PCI_PRODUCT_INVERTEX_AEON,
905 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00, 0x00 }
907 }, {
908 PCI_VENDOR_HIFN,
909 PCI_PRODUCT_HIFN_7811,
910 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
911 0x00, 0x00, 0x00, 0x00, 0x00 }
912 }, {
913 /*
914 * Other vendors share this PCI ID as well, such as
915 * http://www.powercrypt.com, and obviously they also
916 * use the same key.
917 */
918 PCI_VENDOR_HIFN,
919 PCI_PRODUCT_HIFN_7751,
920 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
921 0x00, 0x00, 0x00, 0x00, 0x00 }
922 },
923 };
924
925 /*
926 * Checks to see if crypto is already enabled. If crypto isn't enable,
927 * "hifn_enable_crypto" is called to enable it. The check is important,
928 * as enabling crypto twice will lock the board.
929 */
930 static const char *
931 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
932 {
933 u_int32_t dmacfg, ramcfg, encl, addr, i;
934 const char *offtbl = NULL;
935
936 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
937 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
938 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
939 offtbl = pci2id[i].card_id;
940 break;
941 }
942 }
943
944 if (offtbl == NULL) {
945 #ifdef HIFN_DEBUG
946 aprint_debug_dev(sc->sc_dv, "Unknown card!\n");
947 #endif
948 return (NULL);
949 }
950
951 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
952 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
953
954 /*
955 * The RAM config register's encrypt level bit needs to be set before
956 * every read performed on the encryption level register.
957 */
958 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
959
960 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
961
962 /*
963 * Make sure we don't re-unlock. Two unlocks kills chip until the
964 * next reboot.
965 */
966 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
967 #ifdef HIFN_DEBUG
968 aprint_debug_dev(sc->sc_dv, "Strong Crypto already enabled!\n");
969 #endif
970 goto report;
971 }
972
973 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
974 #ifdef HIFN_DEBUG
975 aprint_debug_dev(sc->sc_dv, "Unknown encryption level\n");
976 #endif
977 return (NULL);
978 }
979
980 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
981 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
982 DELAY(1000);
983 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
984 DELAY(1000);
985 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
986 DELAY(1000);
987
988 for (i = 0; i <= 12; i++) {
989 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
990 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
991
992 DELAY(1000);
993 }
994
995 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
996 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
997
998 #ifdef HIFN_DEBUG
999 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1000 aprint_debug("Encryption engine is permanently locked until next system reset.");
1001 else
1002 aprint_debug("Encryption engine enabled successfully!");
1003 #endif
1004
1005 report:
1006 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1007 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1008
1009 switch (encl) {
1010 case HIFN_PUSTAT_ENA_0:
1011 return ("LZS-only (no encr/auth)");
1012
1013 case HIFN_PUSTAT_ENA_1:
1014 return ("DES");
1015
1016 case HIFN_PUSTAT_ENA_2:
1017 if (sc->sc_flags & HIFN_HAS_AES)
1018 return ("3DES/AES");
1019 else
1020 return ("3DES");
1021
1022 default:
1023 return ("disabled");
1024 }
1025 /* NOTREACHED */
1026 }
1027
1028 /*
1029 * Give initial values to the registers listed in the "Register Space"
1030 * section of the HIFN Software Development reference manual.
1031 */
1032 static void
1033 hifn_init_pci_registers(struct hifn_softc *sc)
1034 {
1035 /* write fixed values needed by the Initialization registers */
1036 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1037 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1038 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1039
1040 /* write all 4 ring address registers */
1041 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1042 offsetof(struct hifn_dma, cmdr[0]));
1043 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1044 offsetof(struct hifn_dma, srcr[0]));
1045 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1046 offsetof(struct hifn_dma, dstr[0]));
1047 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1048 offsetof(struct hifn_dma, resr[0]));
1049
1050 DELAY(2000);
1051
1052 /* write status register */
1053 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1054 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1055 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1056 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1057 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1058 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1059 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1060 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1061 HIFN_DMACSR_S_WAIT |
1062 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1063 HIFN_DMACSR_C_WAIT |
1064 HIFN_DMACSR_ENGINE |
1065 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1066 HIFN_DMACSR_PUBDONE : 0) |
1067 ((sc->sc_flags & HIFN_IS_7811) ?
1068 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1069
1070 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1071 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1072 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1073 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1074 HIFN_DMAIER_ENGINE |
1075 ((sc->sc_flags & HIFN_IS_7811) ?
1076 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1077 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1078 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1079 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
1080
1081 if (sc->sc_flags & HIFN_IS_7956) {
1082 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1083 HIFN_PUCNFG_TCALLPHASES |
1084 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1085 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
1086 } else {
1087 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1088 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1089 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1090 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1091 }
1092
1093 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1094 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1095 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1096 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1097 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1098 }
1099
1100 /*
1101 * The maximum number of sessions supported by the card
1102 * is dependent on the amount of context ram, which
1103 * encryption algorithms are enabled, and how compression
1104 * is configured. This should be configured before this
1105 * routine is called.
1106 */
1107 static void
1108 hifn_sessions(struct hifn_softc *sc)
1109 {
1110 u_int32_t pucnfg;
1111 int ctxsize;
1112
1113 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1114
1115 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1116 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1117 ctxsize = 128;
1118 else
1119 ctxsize = 512;
1120 /*
1121 * 7955/7956 has internal context memory of 32K
1122 */
1123 if (sc->sc_flags & HIFN_IS_7956)
1124 sc->sc_maxses = 32768 / ctxsize;
1125 else
1126 sc->sc_maxses = 1 +
1127 ((sc->sc_ramsize - 32768) / ctxsize);
1128 }
1129 else
1130 sc->sc_maxses = sc->sc_ramsize / 16384;
1131
1132 if (sc->sc_maxses > 2048)
1133 sc->sc_maxses = 2048;
1134 }
1135
1136 /*
1137 * Determine ram type (sram or dram). Board should be just out of a reset
1138 * state when this is called.
1139 */
1140 static int
1141 hifn_ramtype(struct hifn_softc *sc)
1142 {
1143 u_int8_t data[8], dataexpect[8];
1144 int i;
1145
1146 for (i = 0; i < sizeof(data); i++)
1147 data[i] = dataexpect[i] = 0x55;
1148 if (hifn_writeramaddr(sc, 0, data))
1149 return (-1);
1150 if (hifn_readramaddr(sc, 0, data))
1151 return (-1);
1152 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1153 sc->sc_drammodel = 1;
1154 return (0);
1155 }
1156
1157 for (i = 0; i < sizeof(data); i++)
1158 data[i] = dataexpect[i] = 0xaa;
1159 if (hifn_writeramaddr(sc, 0, data))
1160 return (-1);
1161 if (hifn_readramaddr(sc, 0, data))
1162 return (-1);
1163 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1164 sc->sc_drammodel = 1;
1165 return (0);
1166 }
1167
1168 return (0);
1169 }
1170
1171 #define HIFN_SRAM_MAX (32 << 20)
1172 #define HIFN_SRAM_STEP_SIZE 16384
1173 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1174
1175 static int
1176 hifn_sramsize(struct hifn_softc *sc)
1177 {
1178 u_int32_t a;
1179 u_int8_t data[8];
1180 u_int8_t dataexpect[sizeof(data)];
1181 int32_t i;
1182
1183 for (i = 0; i < sizeof(data); i++)
1184 data[i] = dataexpect[i] = i ^ 0x5a;
1185
1186 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1187 a = i * HIFN_SRAM_STEP_SIZE;
1188 memcpy(data, &i, sizeof(i));
1189 hifn_writeramaddr(sc, a, data);
1190 }
1191
1192 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1193 a = i * HIFN_SRAM_STEP_SIZE;
1194 memcpy(dataexpect, &i, sizeof(i));
1195 if (hifn_readramaddr(sc, a, data) < 0)
1196 return (0);
1197 if (memcmp(data, dataexpect, sizeof(data)) != 0)
1198 return (0);
1199 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1200 }
1201
1202 return (0);
1203 }
1204
1205 /*
1206 * XXX For dram boards, one should really try all of the
1207 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1208 * is already set up correctly.
1209 */
1210 static int
1211 hifn_dramsize(struct hifn_softc *sc)
1212 {
1213 u_int32_t cnfg;
1214
1215 if (sc->sc_flags & HIFN_IS_7956) {
1216 /*
1217 * 7955/7956 have a fixed internal ram of only 32K.
1218 */
1219 sc->sc_ramsize = 32768;
1220 } else {
1221 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1222 HIFN_PUCNFG_DRAMMASK;
1223 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1224 }
1225 return (0);
1226 }
1227
1228 static void
1229 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1230 int *resp)
1231 {
1232 struct hifn_dma *dma = sc->sc_dma;
1233
1234 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1235 dma->cmdi = 0;
1236 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1237 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1238 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1239 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1240 }
1241 *cmdp = dma->cmdi++;
1242 dma->cmdk = dma->cmdi;
1243
1244 if (dma->srci == HIFN_D_SRC_RSIZE) {
1245 dma->srci = 0;
1246 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1247 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1248 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1249 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1250 }
1251 *srcp = dma->srci++;
1252 dma->srck = dma->srci;
1253
1254 if (dma->dsti == HIFN_D_DST_RSIZE) {
1255 dma->dsti = 0;
1256 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1257 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1258 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1259 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1260 }
1261 *dstp = dma->dsti++;
1262 dma->dstk = dma->dsti;
1263
1264 if (dma->resi == HIFN_D_RES_RSIZE) {
1265 dma->resi = 0;
1266 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1267 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1268 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1269 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1270 }
1271 *resp = dma->resi++;
1272 dma->resk = dma->resi;
1273 }
1274
1275 static int
1276 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1277 {
1278 struct hifn_dma *dma = sc->sc_dma;
1279 struct hifn_base_command wc;
1280 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1281 int r, cmdi, resi, srci, dsti;
1282
1283 wc.masks = htole16(3 << 13);
1284 wc.session_num = htole16(addr >> 14);
1285 wc.total_source_count = htole16(8);
1286 wc.total_dest_count = htole16(addr & 0x3fff);
1287
1288 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1289
1290 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1291 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1292 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1293
1294 /* build write command */
1295 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1296 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1297 memcpy(&dma->test_src, data, sizeof(dma->test_src));
1298
1299 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1300 + offsetof(struct hifn_dma, test_src));
1301 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1302 + offsetof(struct hifn_dma, test_dst));
1303
1304 dma->cmdr[cmdi].l = htole32(16 | masks);
1305 dma->srcr[srci].l = htole32(8 | masks);
1306 dma->dstr[dsti].l = htole32(4 | masks);
1307 dma->resr[resi].l = htole32(4 | masks);
1308
1309 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1310 0, sc->sc_dmamap->dm_mapsize,
1311 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1312
1313 for (r = 10000; r >= 0; r--) {
1314 DELAY(10);
1315 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1316 0, sc->sc_dmamap->dm_mapsize,
1317 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1318 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1319 break;
1320 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1321 0, sc->sc_dmamap->dm_mapsize,
1322 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1323 }
1324 if (r == 0) {
1325 printf("%s: writeramaddr -- "
1326 "result[%d](addr %d) still valid\n",
1327 device_xname(sc->sc_dv), resi, addr);
1328 r = -1;
1329 return (-1);
1330 } else
1331 r = 0;
1332
1333 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1334 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1335 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1336
1337 return (r);
1338 }
1339
1340 static int
1341 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1342 {
1343 struct hifn_dma *dma = sc->sc_dma;
1344 struct hifn_base_command rc;
1345 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1346 int r, cmdi, srci, dsti, resi;
1347
1348 rc.masks = htole16(2 << 13);
1349 rc.session_num = htole16(addr >> 14);
1350 rc.total_source_count = htole16(addr & 0x3fff);
1351 rc.total_dest_count = htole16(8);
1352
1353 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1354
1355 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1356 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1357 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1358
1359 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1360 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1361
1362 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1363 offsetof(struct hifn_dma, test_src));
1364 dma->test_src = 0;
1365 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1366 offsetof(struct hifn_dma, test_dst));
1367 dma->test_dst = 0;
1368 dma->cmdr[cmdi].l = htole32(8 | masks);
1369 dma->srcr[srci].l = htole32(8 | masks);
1370 dma->dstr[dsti].l = htole32(8 | masks);
1371 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1372
1373 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1374 0, sc->sc_dmamap->dm_mapsize,
1375 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1376
1377 for (r = 10000; r >= 0; r--) {
1378 DELAY(10);
1379 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1380 0, sc->sc_dmamap->dm_mapsize,
1381 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1382 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1383 break;
1384 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1385 0, sc->sc_dmamap->dm_mapsize,
1386 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1387 }
1388 if (r == 0) {
1389 printf("%s: readramaddr -- "
1390 "result[%d](addr %d) still valid\n",
1391 device_xname(sc->sc_dv), resi, addr);
1392 r = -1;
1393 } else {
1394 r = 0;
1395 memcpy(data, &dma->test_dst, sizeof(dma->test_dst));
1396 }
1397
1398 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1399 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1400 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1401
1402 return (r);
1403 }
1404
1405 /*
1406 * Initialize the descriptor rings.
1407 */
1408 static void
1409 hifn_init_dma(struct hifn_softc *sc)
1410 {
1411 struct hifn_dma *dma = sc->sc_dma;
1412 int i;
1413
1414 hifn_set_retry(sc);
1415
1416 /* initialize static pointer values */
1417 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1418 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1419 offsetof(struct hifn_dma, command_bufs[i][0]));
1420 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1421 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1422 offsetof(struct hifn_dma, result_bufs[i][0]));
1423
1424 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1425 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1426 offsetof(struct hifn_dma, cmdr[0]));
1427 dma->srcr[HIFN_D_SRC_RSIZE].p =
1428 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1429 offsetof(struct hifn_dma, srcr[0]));
1430 dma->dstr[HIFN_D_DST_RSIZE].p =
1431 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1432 offsetof(struct hifn_dma, dstr[0]));
1433 dma->resr[HIFN_D_RES_RSIZE].p =
1434 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1435 offsetof(struct hifn_dma, resr[0]));
1436
1437 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1438 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1439 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1440 }
1441
1442 /*
1443 * Writes out the raw command buffer space. Returns the
1444 * command buffer size.
1445 */
1446 static u_int
1447 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1448 {
1449 u_int8_t *buf_pos;
1450 struct hifn_base_command *base_cmd;
1451 struct hifn_mac_command *mac_cmd;
1452 struct hifn_crypt_command *cry_cmd;
1453 struct hifn_comp_command *comp_cmd;
1454 int using_mac, using_crypt, using_comp, len, ivlen;
1455 u_int32_t dlen, slen;
1456
1457 buf_pos = buf;
1458 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1459 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1460 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1461
1462 base_cmd = (struct hifn_base_command *)buf_pos;
1463 base_cmd->masks = htole16(cmd->base_masks);
1464 slen = cmd->src_map->dm_mapsize;
1465 if (cmd->sloplen)
1466 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1467 sizeof(u_int32_t);
1468 else
1469 dlen = cmd->dst_map->dm_mapsize;
1470 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1471 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1472 dlen >>= 16;
1473 slen >>= 16;
1474 base_cmd->session_num = htole16(cmd->session_num |
1475 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1476 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1477 buf_pos += sizeof(struct hifn_base_command);
1478
1479 if (using_comp) {
1480 comp_cmd = (struct hifn_comp_command *)buf_pos;
1481 dlen = cmd->compcrd->crd_len;
1482 comp_cmd->source_count = htole16(dlen & 0xffff);
1483 dlen >>= 16;
1484 comp_cmd->masks = htole16(cmd->comp_masks |
1485 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1486 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1487 comp_cmd->reserved = 0;
1488 buf_pos += sizeof(struct hifn_comp_command);
1489 }
1490
1491 if (using_mac) {
1492 mac_cmd = (struct hifn_mac_command *)buf_pos;
1493 dlen = cmd->maccrd->crd_len;
1494 mac_cmd->source_count = htole16(dlen & 0xffff);
1495 dlen >>= 16;
1496 mac_cmd->masks = htole16(cmd->mac_masks |
1497 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1498 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1499 mac_cmd->reserved = 0;
1500 buf_pos += sizeof(struct hifn_mac_command);
1501 }
1502
1503 if (using_crypt) {
1504 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1505 dlen = cmd->enccrd->crd_len;
1506 cry_cmd->source_count = htole16(dlen & 0xffff);
1507 dlen >>= 16;
1508 cry_cmd->masks = htole16(cmd->cry_masks |
1509 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1510 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1511 cry_cmd->reserved = 0;
1512 buf_pos += sizeof(struct hifn_crypt_command);
1513 }
1514
1515 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1516 memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH);
1517 buf_pos += HIFN_MAC_KEY_LENGTH;
1518 }
1519
1520 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1521 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1522 case HIFN_CRYPT_CMD_ALG_3DES:
1523 memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH);
1524 buf_pos += HIFN_3DES_KEY_LENGTH;
1525 break;
1526 case HIFN_CRYPT_CMD_ALG_DES:
1527 memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH);
1528 buf_pos += HIFN_DES_KEY_LENGTH;
1529 break;
1530 case HIFN_CRYPT_CMD_ALG_RC4:
1531 len = 256;
1532 do {
1533 int clen;
1534
1535 clen = MIN(cmd->cklen, len);
1536 memcpy(buf_pos, cmd->ck, clen);
1537 len -= clen;
1538 buf_pos += clen;
1539 } while (len > 0);
1540 memset(buf_pos, 0, 4);
1541 buf_pos += 4;
1542 break;
1543 case HIFN_CRYPT_CMD_ALG_AES:
1544 /*
1545 * AES keys are variable 128, 192 and
1546 * 256 bits (16, 24 and 32 bytes).
1547 */
1548 memcpy(buf_pos, cmd->ck, cmd->cklen);
1549 buf_pos += cmd->cklen;
1550 break;
1551 }
1552 }
1553
1554 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1555 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1556 case HIFN_CRYPT_CMD_ALG_AES:
1557 ivlen = HIFN_AES_IV_LENGTH;
1558 break;
1559 default:
1560 ivlen = HIFN_IV_LENGTH;
1561 break;
1562 }
1563 memcpy(buf_pos, cmd->iv, ivlen);
1564 buf_pos += ivlen;
1565 }
1566
1567 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1568 HIFN_BASE_CMD_COMP)) == 0) {
1569 memset(buf_pos, 0, 8);
1570 buf_pos += 8;
1571 }
1572
1573 return (buf_pos - buf);
1574 }
1575
1576 static int
1577 hifn_dmamap_aligned(bus_dmamap_t map)
1578 {
1579 int i;
1580
1581 for (i = 0; i < map->dm_nsegs; i++) {
1582 if (map->dm_segs[i].ds_addr & 3)
1583 return (0);
1584 if ((i != (map->dm_nsegs - 1)) &&
1585 (map->dm_segs[i].ds_len & 3))
1586 return (0);
1587 }
1588 return (1);
1589 }
1590
1591 static int
1592 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1593 {
1594 struct hifn_dma *dma = sc->sc_dma;
1595 bus_dmamap_t map = cmd->dst_map;
1596 u_int32_t p, l;
1597 int idx, used = 0, i;
1598
1599 idx = dma->dsti;
1600 for (i = 0; i < map->dm_nsegs - 1; i++) {
1601 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1602 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1603 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1604 HIFN_DSTR_SYNC(sc, idx,
1605 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1606 used++;
1607
1608 if (++idx == HIFN_D_DST_RSIZE) {
1609 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1610 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1611 HIFN_DSTR_SYNC(sc, idx,
1612 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1613 idx = 0;
1614 }
1615 }
1616
1617 if (cmd->sloplen == 0) {
1618 p = map->dm_segs[i].ds_addr;
1619 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1620 map->dm_segs[i].ds_len;
1621 } else {
1622 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1623 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1624 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1625 sizeof(u_int32_t);
1626
1627 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1628 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1629 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1630 HIFN_D_MASKDONEIRQ |
1631 (map->dm_segs[i].ds_len - cmd->sloplen));
1632 HIFN_DSTR_SYNC(sc, idx,
1633 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1634 used++;
1635
1636 if (++idx == HIFN_D_DST_RSIZE) {
1637 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1638 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1639 HIFN_DSTR_SYNC(sc, idx,
1640 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1641 idx = 0;
1642 }
1643 }
1644 }
1645 dma->dstr[idx].p = htole32(p);
1646 dma->dstr[idx].l = htole32(l);
1647 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1648 used++;
1649
1650 if (++idx == HIFN_D_DST_RSIZE) {
1651 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1652 HIFN_D_MASKDONEIRQ);
1653 HIFN_DSTR_SYNC(sc, idx,
1654 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1655 idx = 0;
1656 }
1657
1658 dma->dsti = idx;
1659 dma->dstu += used;
1660 return (idx);
1661 }
1662
1663 static int
1664 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1665 {
1666 struct hifn_dma *dma = sc->sc_dma;
1667 bus_dmamap_t map = cmd->src_map;
1668 int idx, i;
1669 u_int32_t last = 0;
1670
1671 idx = dma->srci;
1672 for (i = 0; i < map->dm_nsegs; i++) {
1673 if (i == map->dm_nsegs - 1)
1674 last = HIFN_D_LAST;
1675
1676 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1677 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1678 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1679 HIFN_SRCR_SYNC(sc, idx,
1680 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1681
1682 if (++idx == HIFN_D_SRC_RSIZE) {
1683 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1684 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1685 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1686 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1687 idx = 0;
1688 }
1689 }
1690 dma->srci = idx;
1691 dma->srcu += map->dm_nsegs;
1692 return (idx);
1693 }
1694
1695 static int
1696 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1697 struct cryptop *crp, int hint)
1698 {
1699 struct hifn_dma *dma = sc->sc_dma;
1700 u_int32_t cmdlen;
1701 int cmdi, resi, err = 0;
1702
1703 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1704 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1705 return (ENOMEM);
1706
1707 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1708 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1709 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1710 err = ENOMEM;
1711 goto err_srcmap1;
1712 }
1713 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1714 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1715 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1716 err = ENOMEM;
1717 goto err_srcmap1;
1718 }
1719 } else {
1720 err = EINVAL;
1721 goto err_srcmap1;
1722 }
1723
1724 if (hifn_dmamap_aligned(cmd->src_map)) {
1725 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1726 if (crp->crp_flags & CRYPTO_F_IOV)
1727 cmd->dstu.dst_io = cmd->srcu.src_io;
1728 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1729 cmd->dstu.dst_m = cmd->srcu.src_m;
1730 cmd->dst_map = cmd->src_map;
1731 } else {
1732 if (crp->crp_flags & CRYPTO_F_IOV) {
1733 err = EINVAL;
1734 goto err_srcmap;
1735 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1736 int totlen, len;
1737 struct mbuf *m, *m0, *mlast;
1738
1739 totlen = cmd->src_map->dm_mapsize;
1740 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1741 len = MHLEN;
1742 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1743 } else {
1744 len = MLEN;
1745 MGET(m0, M_DONTWAIT, MT_DATA);
1746 }
1747 if (m0 == NULL) {
1748 err = ENOMEM;
1749 goto err_srcmap;
1750 }
1751 if (len == MHLEN)
1752 M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1753 if (totlen >= MINCLSIZE) {
1754 MCLGET(m0, M_DONTWAIT);
1755 if (m0->m_flags & M_EXT)
1756 len = MCLBYTES;
1757 }
1758 totlen -= len;
1759 m0->m_pkthdr.len = m0->m_len = len;
1760 mlast = m0;
1761
1762 while (totlen > 0) {
1763 MGET(m, M_DONTWAIT, MT_DATA);
1764 if (m == NULL) {
1765 err = ENOMEM;
1766 m_freem(m0);
1767 goto err_srcmap;
1768 }
1769 len = MLEN;
1770 if (totlen >= MINCLSIZE) {
1771 MCLGET(m, M_DONTWAIT);
1772 if (m->m_flags & M_EXT)
1773 len = MCLBYTES;
1774 }
1775
1776 m->m_len = len;
1777 if (m0->m_flags & M_PKTHDR)
1778 m0->m_pkthdr.len += len;
1779 totlen -= len;
1780
1781 mlast->m_next = m;
1782 mlast = m;
1783 }
1784 cmd->dstu.dst_m = m0;
1785 }
1786 }
1787
1788 if (cmd->dst_map == NULL) {
1789 if (bus_dmamap_create(sc->sc_dmat,
1790 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1791 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1792 err = ENOMEM;
1793 goto err_srcmap;
1794 }
1795 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1796 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1797 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1798 err = ENOMEM;
1799 goto err_dstmap1;
1800 }
1801 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1802 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1803 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1804 err = ENOMEM;
1805 goto err_dstmap1;
1806 }
1807 }
1808 }
1809
1810 #ifdef HIFN_DEBUG
1811 if (hifn_debug)
1812 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1813 device_xname(sc->sc_dv),
1814 READ_REG_1(sc, HIFN_1_DMA_CSR),
1815 READ_REG_1(sc, HIFN_1_DMA_IER),
1816 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1817 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1818 #endif
1819
1820 if (cmd->src_map == cmd->dst_map)
1821 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1822 0, cmd->src_map->dm_mapsize,
1823 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1824 else {
1825 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1826 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1827 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1828 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1829 }
1830
1831 /*
1832 * need 1 cmd, and 1 res
1833 * need N src, and N dst
1834 */
1835 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1836 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1837 err = ENOMEM;
1838 goto err_dstmap;
1839 }
1840 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1841 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1842 err = ENOMEM;
1843 goto err_dstmap;
1844 }
1845
1846 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1847 dma->cmdi = 0;
1848 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1849 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1850 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1851 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1852 }
1853 cmdi = dma->cmdi++;
1854 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1855 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1856
1857 /* .p for command/result already set */
1858 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1859 HIFN_D_MASKDONEIRQ);
1860 HIFN_CMDR_SYNC(sc, cmdi,
1861 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1862 dma->cmdu++;
1863 if (sc->sc_c_busy == 0) {
1864 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1865 sc->sc_c_busy = 1;
1866 SET_LED(sc, HIFN_MIPSRST_LED0);
1867 }
1868
1869 /*
1870 * We don't worry about missing an interrupt (which a "command wait"
1871 * interrupt salvages us from), unless there is more than one command
1872 * in the queue.
1873 *
1874 * XXX We do seem to miss some interrupts. So we always enable
1875 * XXX command wait. From OpenBSD revision 1.149.
1876 *
1877 */
1878 #if 0
1879 if (dma->cmdu > 1) {
1880 #endif
1881 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1882 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1883 #if 0
1884 }
1885 #endif
1886
1887 hifnstats.hst_ipackets++;
1888 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1889
1890 hifn_dmamap_load_src(sc, cmd);
1891 if (sc->sc_s_busy == 0) {
1892 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1893 sc->sc_s_busy = 1;
1894 SET_LED(sc, HIFN_MIPSRST_LED1);
1895 }
1896
1897 /*
1898 * Unlike other descriptors, we don't mask done interrupt from
1899 * result descriptor.
1900 */
1901 #ifdef HIFN_DEBUG
1902 if (hifn_debug)
1903 printf("load res\n");
1904 #endif
1905 if (dma->resi == HIFN_D_RES_RSIZE) {
1906 dma->resi = 0;
1907 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1908 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1909 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1910 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1911 }
1912 resi = dma->resi++;
1913 dma->hifn_commands[resi] = cmd;
1914 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1915 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1916 HIFN_D_VALID | HIFN_D_LAST);
1917 HIFN_RESR_SYNC(sc, resi,
1918 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1919 dma->resu++;
1920 if (sc->sc_r_busy == 0) {
1921 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1922 sc->sc_r_busy = 1;
1923 SET_LED(sc, HIFN_MIPSRST_LED2);
1924 }
1925
1926 if (cmd->sloplen)
1927 cmd->slopidx = resi;
1928
1929 hifn_dmamap_load_dst(sc, cmd);
1930
1931 if (sc->sc_d_busy == 0) {
1932 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1933 sc->sc_d_busy = 1;
1934 }
1935
1936 #ifdef HIFN_DEBUG
1937 if (hifn_debug)
1938 printf("%s: command: stat %8x ier %8x\n",
1939 device_xname(sc->sc_dv),
1940 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1941 #endif
1942
1943 sc->sc_active = 5;
1944 return (err); /* success */
1945
1946 err_dstmap:
1947 if (cmd->src_map != cmd->dst_map)
1948 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1949 err_dstmap1:
1950 if (cmd->src_map != cmd->dst_map)
1951 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1952 err_srcmap:
1953 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1954 cmd->srcu.src_m != cmd->dstu.dst_m)
1955 m_freem(cmd->dstu.dst_m);
1956 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1957 err_srcmap1:
1958 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1959 return (err);
1960 }
1961
1962 static void
1963 hifn_tick(void *vsc)
1964 {
1965 struct hifn_softc *sc = vsc;
1966
1967 mutex_spin_enter(&sc->sc_mtx);
1968 if (sc->sc_active == 0) {
1969 struct hifn_dma *dma = sc->sc_dma;
1970 u_int32_t r = 0;
1971
1972 if (dma->cmdu == 0 && sc->sc_c_busy) {
1973 sc->sc_c_busy = 0;
1974 r |= HIFN_DMACSR_C_CTRL_DIS;
1975 CLR_LED(sc, HIFN_MIPSRST_LED0);
1976 }
1977 if (dma->srcu == 0 && sc->sc_s_busy) {
1978 sc->sc_s_busy = 0;
1979 r |= HIFN_DMACSR_S_CTRL_DIS;
1980 CLR_LED(sc, HIFN_MIPSRST_LED1);
1981 }
1982 if (dma->dstu == 0 && sc->sc_d_busy) {
1983 sc->sc_d_busy = 0;
1984 r |= HIFN_DMACSR_D_CTRL_DIS;
1985 }
1986 if (dma->resu == 0 && sc->sc_r_busy) {
1987 sc->sc_r_busy = 0;
1988 r |= HIFN_DMACSR_R_CTRL_DIS;
1989 CLR_LED(sc, HIFN_MIPSRST_LED2);
1990 }
1991 if (r)
1992 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1993 }
1994 else
1995 sc->sc_active--;
1996 #ifdef __OpenBSD__
1997 timeout_add(&sc->sc_tickto, hz);
1998 #else
1999 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2000 #endif
2001 mutex_spin_exit(&sc->sc_mtx);
2002 }
2003
2004 static int
2005 hifn_intr(void *arg)
2006 {
2007 struct hifn_softc *sc = arg;
2008 struct hifn_dma *dma = sc->sc_dma;
2009 u_int32_t dmacsr, restart;
2010 int i, u;
2011
2012 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2013
2014 #ifdef HIFN_DEBUG
2015 if (hifn_debug)
2016 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
2017 device_xname(sc->sc_dv),
2018 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
2019 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2020 #endif
2021
2022 mutex_spin_enter(&sc->sc_mtx);
2023
2024 /* Nothing in the DMA unit interrupted */
2025 if ((dmacsr & sc->sc_dmaier) == 0) {
2026 mutex_spin_exit(&sc->sc_mtx);
2027 return (0);
2028 }
2029
2030 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2031
2032 if (dmacsr & HIFN_DMACSR_ENGINE)
2033 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
2034
2035 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2036 (dmacsr & HIFN_DMACSR_PUBDONE))
2037 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2038 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2039
2040 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
2041 if (restart)
2042 printf("%s: overrun %x\n", device_xname(sc->sc_dv), dmacsr);
2043
2044 if (sc->sc_flags & HIFN_IS_7811) {
2045 if (dmacsr & HIFN_DMACSR_ILLR)
2046 printf("%s: illegal read\n", device_xname(sc->sc_dv));
2047 if (dmacsr & HIFN_DMACSR_ILLW)
2048 printf("%s: illegal write\n", device_xname(sc->sc_dv));
2049 }
2050
2051 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2052 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2053 if (restart) {
2054 printf("%s: abort, resetting.\n", device_xname(sc->sc_dv));
2055 hifnstats.hst_abort++;
2056 hifn_abort(sc);
2057 goto out;
2058 }
2059
2060 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
2061 /*
2062 * If no slots to process and we receive a "waiting on
2063 * command" interrupt, we disable the "waiting on command"
2064 * (by clearing it).
2065 */
2066 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2067 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2068 }
2069
2070 /* clear the rings */
2071 i = dma->resk;
2072 while (dma->resu != 0) {
2073 HIFN_RESR_SYNC(sc, i,
2074 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2075 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2076 HIFN_RESR_SYNC(sc, i,
2077 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2078 break;
2079 }
2080
2081 if (i != HIFN_D_RES_RSIZE) {
2082 struct hifn_command *cmd;
2083
2084 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2085 cmd = dma->hifn_commands[i];
2086 KASSERT(cmd != NULL
2087 /*("hifn_intr: null command slot %u", i)*/);
2088 dma->hifn_commands[i] = NULL;
2089
2090 hifn_callback(sc, cmd, dma->result_bufs[i]);
2091 hifnstats.hst_opackets++;
2092 }
2093
2094 if (++i == (HIFN_D_RES_RSIZE + 1))
2095 i = 0;
2096 else
2097 dma->resu--;
2098 }
2099 dma->resk = i;
2100
2101 i = dma->srck; u = dma->srcu;
2102 while (u != 0) {
2103 HIFN_SRCR_SYNC(sc, i,
2104 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2105 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2106 HIFN_SRCR_SYNC(sc, i,
2107 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2108 break;
2109 }
2110 if (++i == (HIFN_D_SRC_RSIZE + 1))
2111 i = 0;
2112 else
2113 u--;
2114 }
2115 dma->srck = i; dma->srcu = u;
2116
2117 i = dma->cmdk; u = dma->cmdu;
2118 while (u != 0) {
2119 HIFN_CMDR_SYNC(sc, i,
2120 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2121 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2122 HIFN_CMDR_SYNC(sc, i,
2123 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2124 break;
2125 }
2126 if (i != HIFN_D_CMD_RSIZE) {
2127 u--;
2128 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2129 }
2130 if (++i == (HIFN_D_CMD_RSIZE + 1))
2131 i = 0;
2132 }
2133 dma->cmdk = i; dma->cmdu = u;
2134
2135 out:
2136 mutex_spin_exit(&sc->sc_mtx);
2137 return (1);
2138 }
2139
2140 /*
2141 * Allocate a new 'session' and return an encoded session id. 'sidp'
2142 * contains our registration id, and should contain an encoded session
2143 * id on successful allocation.
2144 */
2145 static int
2146 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2147 {
2148 struct cryptoini *c;
2149 struct hifn_softc *sc = arg;
2150 int i, mac = 0, cry = 0, comp = 0, retval = EINVAL;
2151
2152 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2153 if (sidp == NULL || cri == NULL || sc == NULL)
2154 return retval;
2155
2156 mutex_spin_enter(&sc->sc_mtx);
2157
2158 for (i = 0; i < sc->sc_maxses; i++)
2159 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2160 break;
2161 if (i == sc->sc_maxses) {
2162 retval = ENOMEM;
2163 goto out;
2164 }
2165
2166 for (c = cri; c != NULL; c = c->cri_next) {
2167 switch (c->cri_alg) {
2168 case CRYPTO_MD5:
2169 case CRYPTO_SHA1:
2170 case CRYPTO_MD5_HMAC_96:
2171 case CRYPTO_SHA1_HMAC_96:
2172 if (mac) {
2173 goto out;
2174 }
2175 mac = 1;
2176 break;
2177 case CRYPTO_DES_CBC:
2178 case CRYPTO_3DES_CBC:
2179 case CRYPTO_AES_CBC:
2180 /* Note that this is an initialization
2181 vector, not a cipher key; any function
2182 giving sufficient Hamming distance
2183 between outputs is fine. Use of RC4
2184 to generate IVs has been FIPS140-2
2185 certified by several labs. */
2186 #ifdef __NetBSD__
2187 cprng_fast(sc->sc_sessions[i].hs_iv,
2188 c->cri_alg == CRYPTO_AES_CBC ?
2189 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2190 #else /* FreeBSD and OpenBSD have get_random_bytes */
2191 /* XXX this may read fewer, does it matter? */
2192 get_random_bytes(sc->sc_sessions[i].hs_iv,
2193 c->cri_alg == CRYPTO_AES_CBC ?
2194 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2195 #endif
2196 /*FALLTHROUGH*/
2197 case CRYPTO_ARC4:
2198 if (cry) {
2199 goto out;
2200 }
2201 cry = 1;
2202 break;
2203 #ifdef HAVE_CRYPTO_LZS
2204 case CRYPTO_LZS_COMP:
2205 if (comp) {
2206 goto out;
2207 }
2208 comp = 1;
2209 break;
2210 #endif
2211 default:
2212 goto out;
2213 }
2214 }
2215 if (mac == 0 && cry == 0 && comp == 0) {
2216 goto out;
2217 }
2218
2219 /*
2220 * XXX only want to support compression without chaining to
2221 * MAC/crypt engine right now
2222 */
2223 if ((comp && mac) || (comp && cry)) {
2224 goto out;
2225 }
2226
2227 *sidp = HIFN_SID(device_unit(sc->sc_dv), i);
2228 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2229
2230 retval = 0;
2231 out:
2232 mutex_spin_exit(&sc->sc_mtx);
2233 return retval;
2234 }
2235
2236 /*
2237 * Deallocate a session.
2238 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2239 * XXX to blow away any keys already stored there.
2240 */
2241 static int
2242 hifn_freesession(void *arg, u_int64_t tid)
2243 {
2244 struct hifn_softc *sc = arg;
2245 int session;
2246 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2247
2248 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2249 if (sc == NULL)
2250 return (EINVAL);
2251
2252 mutex_spin_enter(&sc->sc_mtx);
2253 session = HIFN_SESSION(sid);
2254 if (session >= sc->sc_maxses) {
2255 mutex_spin_exit(&sc->sc_mtx);
2256 return (EINVAL);
2257 }
2258
2259 memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session]));
2260 mutex_spin_exit(&sc->sc_mtx);
2261 return (0);
2262 }
2263
2264 static int
2265 hifn_process(void *arg, struct cryptop *crp, int hint)
2266 {
2267 struct hifn_softc *sc = arg;
2268 struct hifn_command *cmd = NULL;
2269 int session, err, ivlen;
2270 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2271
2272 if (crp == NULL || crp->crp_callback == NULL) {
2273 hifnstats.hst_invalid++;
2274 return (EINVAL);
2275 }
2276
2277 mutex_spin_enter(&sc->sc_mtx);
2278 session = HIFN_SESSION(crp->crp_sid);
2279
2280 if (sc == NULL || session >= sc->sc_maxses) {
2281 err = EINVAL;
2282 goto errout;
2283 }
2284
2285 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2286 M_DEVBUF, M_NOWAIT|M_ZERO);
2287 if (cmd == NULL) {
2288 hifnstats.hst_nomem++;
2289 err = ENOMEM;
2290 goto errout;
2291 }
2292
2293 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2294 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2295 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2296 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2297 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2298 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2299 } else {
2300 err = EINVAL;
2301 goto errout; /* XXX we don't handle contiguous buffers! */
2302 }
2303
2304 crd1 = crp->crp_desc;
2305 if (crd1 == NULL) {
2306 err = EINVAL;
2307 goto errout;
2308 }
2309 crd2 = crd1->crd_next;
2310
2311 if (crd2 == NULL) {
2312 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2313 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2314 crd1->crd_alg == CRYPTO_SHA1 ||
2315 crd1->crd_alg == CRYPTO_MD5) {
2316 maccrd = crd1;
2317 enccrd = NULL;
2318 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2319 crd1->crd_alg == CRYPTO_3DES_CBC ||
2320 crd1->crd_alg == CRYPTO_AES_CBC ||
2321 crd1->crd_alg == CRYPTO_ARC4) {
2322 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2323 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2324 maccrd = NULL;
2325 enccrd = crd1;
2326 #ifdef HAVE_CRYPTO_LZS
2327 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2328 return (hifn_compression(sc, crp, cmd));
2329 #endif
2330 } else {
2331 err = EINVAL;
2332 goto errout;
2333 }
2334 } else {
2335 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2336 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2337 crd1->crd_alg == CRYPTO_MD5 ||
2338 crd1->crd_alg == CRYPTO_SHA1) &&
2339 (crd2->crd_alg == CRYPTO_DES_CBC ||
2340 crd2->crd_alg == CRYPTO_3DES_CBC ||
2341 crd2->crd_alg == CRYPTO_AES_CBC ||
2342 crd2->crd_alg == CRYPTO_ARC4) &&
2343 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2344 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2345 maccrd = crd1;
2346 enccrd = crd2;
2347 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2348 crd1->crd_alg == CRYPTO_ARC4 ||
2349 crd1->crd_alg == CRYPTO_3DES_CBC ||
2350 crd1->crd_alg == CRYPTO_AES_CBC) &&
2351 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 ||
2352 crd2->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2353 crd2->crd_alg == CRYPTO_MD5 ||
2354 crd2->crd_alg == CRYPTO_SHA1) &&
2355 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2356 enccrd = crd1;
2357 maccrd = crd2;
2358 } else {
2359 /*
2360 * We cannot order the 7751 as requested
2361 */
2362 err = EINVAL;
2363 goto errout;
2364 }
2365 }
2366
2367 if (enccrd) {
2368 cmd->enccrd = enccrd;
2369 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2370 switch (enccrd->crd_alg) {
2371 case CRYPTO_ARC4:
2372 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2373 if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2374 != sc->sc_sessions[session].hs_prev_op)
2375 sc->sc_sessions[session].hs_state =
2376 HS_STATE_USED;
2377 break;
2378 case CRYPTO_DES_CBC:
2379 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2380 HIFN_CRYPT_CMD_MODE_CBC |
2381 HIFN_CRYPT_CMD_NEW_IV;
2382 break;
2383 case CRYPTO_3DES_CBC:
2384 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2385 HIFN_CRYPT_CMD_MODE_CBC |
2386 HIFN_CRYPT_CMD_NEW_IV;
2387 break;
2388 case CRYPTO_AES_CBC:
2389 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2390 HIFN_CRYPT_CMD_MODE_CBC |
2391 HIFN_CRYPT_CMD_NEW_IV;
2392 break;
2393 default:
2394 err = EINVAL;
2395 goto errout;
2396 }
2397 if (enccrd->crd_alg != CRYPTO_ARC4) {
2398 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2399 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2400 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2401 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2402 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2403 else
2404 bcopy(sc->sc_sessions[session].hs_iv,
2405 cmd->iv, ivlen);
2406
2407 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2408 == 0) {
2409 if (crp->crp_flags & CRYPTO_F_IMBUF)
2410 m_copyback(cmd->srcu.src_m,
2411 enccrd->crd_inject,
2412 ivlen, cmd->iv);
2413 else if (crp->crp_flags & CRYPTO_F_IOV)
2414 cuio_copyback(cmd->srcu.src_io,
2415 enccrd->crd_inject,
2416 ivlen, cmd->iv);
2417 }
2418 } else {
2419 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2420 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2421 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2422 m_copydata(cmd->srcu.src_m,
2423 enccrd->crd_inject, ivlen, cmd->iv);
2424 else if (crp->crp_flags & CRYPTO_F_IOV)
2425 cuio_copydata(cmd->srcu.src_io,
2426 enccrd->crd_inject, ivlen, cmd->iv);
2427 }
2428 }
2429
2430 cmd->ck = enccrd->crd_key;
2431 cmd->cklen = enccrd->crd_klen >> 3;
2432
2433 /*
2434 * Need to specify the size for the AES key in the masks.
2435 */
2436 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2437 HIFN_CRYPT_CMD_ALG_AES) {
2438 switch (cmd->cklen) {
2439 case 16:
2440 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2441 break;
2442 case 24:
2443 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2444 break;
2445 case 32:
2446 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2447 break;
2448 default:
2449 err = EINVAL;
2450 goto errout;
2451 }
2452 }
2453
2454 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2455 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2456 }
2457
2458 if (maccrd) {
2459 cmd->maccrd = maccrd;
2460 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2461
2462 switch (maccrd->crd_alg) {
2463 case CRYPTO_MD5:
2464 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2465 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2466 HIFN_MAC_CMD_POS_IPSEC;
2467 break;
2468 case CRYPTO_MD5_HMAC_96:
2469 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2470 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2471 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2472 break;
2473 case CRYPTO_SHA1:
2474 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2475 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2476 HIFN_MAC_CMD_POS_IPSEC;
2477 break;
2478 case CRYPTO_SHA1_HMAC_96:
2479 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2480 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2481 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2482 break;
2483 }
2484
2485 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2486 maccrd->crd_alg == CRYPTO_MD5_HMAC_96) &&
2487 sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2488 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2489 memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3);
2490 memset(cmd->mac + (maccrd->crd_klen >> 3), 0,
2491 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2492 }
2493 }
2494
2495 cmd->crp = crp;
2496 cmd->session_num = session;
2497 cmd->softc = sc;
2498
2499 err = hifn_crypto(sc, cmd, crp, hint);
2500 if (err == 0) {
2501 if (enccrd)
2502 sc->sc_sessions[session].hs_prev_op =
2503 enccrd->crd_flags & CRD_F_ENCRYPT;
2504 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2505 sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2506 mutex_spin_exit(&sc->sc_mtx);
2507 return 0;
2508 } else if (err == ERESTART) {
2509 /*
2510 * There weren't enough resources to dispatch the request
2511 * to the part. Notify the caller so they'll requeue this
2512 * request and resubmit it again soon.
2513 */
2514 #ifdef HIFN_DEBUG
2515 if (hifn_debug)
2516 printf("%s: requeue request\n", device_xname(sc->sc_dv));
2517 #endif
2518 free(cmd, M_DEVBUF);
2519 sc->sc_needwakeup |= CRYPTO_SYMQ;
2520 mutex_spin_exit(&sc->sc_mtx);
2521 return (err);
2522 }
2523
2524 errout:
2525 if (cmd != NULL)
2526 free(cmd, M_DEVBUF);
2527 if (err == EINVAL)
2528 hifnstats.hst_invalid++;
2529 else
2530 hifnstats.hst_nomem++;
2531 crp->crp_etype = err;
2532 mutex_spin_exit(&sc->sc_mtx);
2533 crypto_done(crp);
2534 return (0);
2535 }
2536
2537 static void
2538 hifn_abort(struct hifn_softc *sc)
2539 {
2540 struct hifn_dma *dma = sc->sc_dma;
2541 struct hifn_command *cmd;
2542 struct cryptop *crp;
2543 int i, u;
2544
2545 i = dma->resk; u = dma->resu;
2546 while (u != 0) {
2547 cmd = dma->hifn_commands[i];
2548 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2549 dma->hifn_commands[i] = NULL;
2550 crp = cmd->crp;
2551
2552 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2553 /* Salvage what we can. */
2554 hifnstats.hst_opackets++;
2555 hifn_callback(sc, cmd, dma->result_bufs[i]);
2556 } else {
2557 if (cmd->src_map == cmd->dst_map) {
2558 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2559 0, cmd->src_map->dm_mapsize,
2560 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2561 } else {
2562 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2563 0, cmd->src_map->dm_mapsize,
2564 BUS_DMASYNC_POSTWRITE);
2565 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2566 0, cmd->dst_map->dm_mapsize,
2567 BUS_DMASYNC_POSTREAD);
2568 }
2569
2570 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2571 m_freem(cmd->srcu.src_m);
2572 crp->crp_buf = (void *)cmd->dstu.dst_m;
2573 }
2574
2575 /* non-shared buffers cannot be restarted */
2576 if (cmd->src_map != cmd->dst_map) {
2577 /*
2578 * XXX should be EAGAIN, delayed until
2579 * after the reset.
2580 */
2581 crp->crp_etype = ENOMEM;
2582 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2583 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2584 } else
2585 crp->crp_etype = ENOMEM;
2586
2587 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2588 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2589
2590 free(cmd, M_DEVBUF);
2591 if (crp->crp_etype != EAGAIN)
2592 crypto_done(crp);
2593 }
2594
2595 if (++i == HIFN_D_RES_RSIZE)
2596 i = 0;
2597 u--;
2598 }
2599 dma->resk = i; dma->resu = u;
2600
2601 /* Force upload of key next time */
2602 for (i = 0; i < sc->sc_maxses; i++)
2603 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2604 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2605
2606 hifn_reset_board(sc, 1);
2607 hifn_init_dma(sc);
2608 hifn_init_pci_registers(sc);
2609 }
2610
2611 static void
2612 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf)
2613 {
2614 struct hifn_dma *dma = sc->sc_dma;
2615 struct cryptop *crp = cmd->crp;
2616 struct cryptodesc *crd;
2617 struct mbuf *m;
2618 int totlen, i, u, ivlen;
2619
2620 if (cmd->src_map == cmd->dst_map)
2621 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2622 0, cmd->src_map->dm_mapsize,
2623 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2624 else {
2625 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2626 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2627 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2628 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2629 }
2630
2631 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2632 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2633 crp->crp_buf = (void *)cmd->dstu.dst_m;
2634 totlen = cmd->src_map->dm_mapsize;
2635 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2636 if (totlen < m->m_len) {
2637 m->m_len = totlen;
2638 totlen = 0;
2639 } else
2640 totlen -= m->m_len;
2641 }
2642 cmd->dstu.dst_m->m_pkthdr.len =
2643 cmd->srcu.src_m->m_pkthdr.len;
2644 m_freem(cmd->srcu.src_m);
2645 }
2646 }
2647
2648 if (cmd->sloplen != 0) {
2649 if (crp->crp_flags & CRYPTO_F_IMBUF)
2650 m_copyback((struct mbuf *)crp->crp_buf,
2651 cmd->src_map->dm_mapsize - cmd->sloplen,
2652 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2653 else if (crp->crp_flags & CRYPTO_F_IOV)
2654 cuio_copyback((struct uio *)crp->crp_buf,
2655 cmd->src_map->dm_mapsize - cmd->sloplen,
2656 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2657 }
2658
2659 i = dma->dstk; u = dma->dstu;
2660 while (u != 0) {
2661 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2662 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2663 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2664 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2665 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2666 offsetof(struct hifn_dma, dstr[i]),
2667 sizeof(struct hifn_desc),
2668 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2669 break;
2670 }
2671 if (++i == (HIFN_D_DST_RSIZE + 1))
2672 i = 0;
2673 else
2674 u--;
2675 }
2676 dma->dstk = i; dma->dstu = u;
2677
2678 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2679
2680 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2681 HIFN_BASE_CMD_CRYPT) {
2682 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2683 if (crd->crd_alg != CRYPTO_DES_CBC &&
2684 crd->crd_alg != CRYPTO_3DES_CBC &&
2685 crd->crd_alg != CRYPTO_AES_CBC)
2686 continue;
2687 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2688 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2689 if (crp->crp_flags & CRYPTO_F_IMBUF)
2690 m_copydata((struct mbuf *)crp->crp_buf,
2691 crd->crd_skip + crd->crd_len - ivlen,
2692 ivlen,
2693 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2694 else if (crp->crp_flags & CRYPTO_F_IOV) {
2695 cuio_copydata((struct uio *)crp->crp_buf,
2696 crd->crd_skip + crd->crd_len - ivlen,
2697 ivlen,
2698 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2699 }
2700 /* XXX We do not handle contig data */
2701 break;
2702 }
2703 }
2704
2705 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2706 u_int8_t *macbuf;
2707
2708 macbuf = resbuf + sizeof(struct hifn_base_result);
2709 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2710 macbuf += sizeof(struct hifn_comp_result);
2711 macbuf += sizeof(struct hifn_mac_result);
2712
2713 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2714 int len;
2715
2716 if (crd->crd_alg == CRYPTO_MD5)
2717 len = 16;
2718 else if (crd->crd_alg == CRYPTO_SHA1)
2719 len = 20;
2720 else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 ||
2721 crd->crd_alg == CRYPTO_SHA1_HMAC_96)
2722 len = 12;
2723 else
2724 continue;
2725
2726 if (crp->crp_flags & CRYPTO_F_IMBUF)
2727 m_copyback((struct mbuf *)crp->crp_buf,
2728 crd->crd_inject, len, macbuf);
2729 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2730 memcpy(crp->crp_mac, (void *)macbuf, len);
2731 break;
2732 }
2733 }
2734
2735 if (cmd->src_map != cmd->dst_map) {
2736 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2737 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2738 }
2739 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2740 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2741 free(cmd, M_DEVBUF);
2742 crypto_done(crp);
2743 }
2744
2745 #ifdef HAVE_CRYPTO_LZS
2746
2747 static int
2748 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2749 struct hifn_command *cmd)
2750 {
2751 struct cryptodesc *crd = crp->crp_desc;
2752 int s, err = 0;
2753
2754 cmd->compcrd = crd;
2755 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2756
2757 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2758 /*
2759 * XXX can only handle mbufs right now since we can
2760 * XXX dynamically resize them.
2761 */
2762 err = EINVAL;
2763 return (ENOMEM);
2764 }
2765
2766 if ((crd->crd_flags & CRD_F_COMP) == 0)
2767 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2768 if (crd->crd_alg == CRYPTO_LZS_COMP)
2769 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2770 HIFN_COMP_CMD_CLEARHIST;
2771
2772 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2773 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2774 err = ENOMEM;
2775 goto fail;
2776 }
2777
2778 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2779 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2780 err = ENOMEM;
2781 goto fail;
2782 }
2783
2784 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2785 int len;
2786
2787 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2788 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2789 err = ENOMEM;
2790 goto fail;
2791 }
2792
2793 len = cmd->src_map->dm_mapsize / MCLBYTES;
2794 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2795 len++;
2796 len *= MCLBYTES;
2797
2798 if ((crd->crd_flags & CRD_F_COMP) == 0)
2799 len *= 4;
2800
2801 if (len > HIFN_MAX_DMALEN)
2802 len = HIFN_MAX_DMALEN;
2803
2804 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2805 if (cmd->dstu.dst_m == NULL) {
2806 err = ENOMEM;
2807 goto fail;
2808 }
2809
2810 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2811 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2812 err = ENOMEM;
2813 goto fail;
2814 }
2815 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2816 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2817 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2818 err = ENOMEM;
2819 goto fail;
2820 }
2821 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2822 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2823 err = ENOMEM;
2824 goto fail;
2825 }
2826 }
2827
2828 if (cmd->src_map == cmd->dst_map)
2829 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2830 0, cmd->src_map->dm_mapsize,
2831 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2832 else {
2833 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2834 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2835 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2836 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2837 }
2838
2839 cmd->crp = crp;
2840 /*
2841 * Always use session 0. The modes of compression we use are
2842 * stateless and there is always at least one compression
2843 * context, zero.
2844 */
2845 cmd->session_num = 0;
2846 cmd->softc = sc;
2847
2848 err = hifn_compress_enter(sc, cmd);
2849
2850 if (err != 0)
2851 goto fail;
2852 return (0);
2853
2854 fail:
2855 if (cmd->dst_map != NULL) {
2856 if (cmd->dst_map->dm_nsegs > 0)
2857 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2858 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2859 }
2860 if (cmd->src_map != NULL) {
2861 if (cmd->src_map->dm_nsegs > 0)
2862 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2863 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2864 }
2865 free(cmd, M_DEVBUF);
2866 if (err == EINVAL)
2867 hifnstats.hst_invalid++;
2868 else
2869 hifnstats.hst_nomem++;
2870 crp->crp_etype = err;
2871 crypto_done(crp);
2872 return (0);
2873 }
2874
2875 static int
2876 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2877 {
2878 struct hifn_dma *dma = sc->sc_dma;
2879 int cmdi, resi;
2880 u_int32_t cmdlen;
2881
2882 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2883 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2884 return (ENOMEM);
2885
2886 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2887 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2888 return (ENOMEM);
2889
2890 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2891 dma->cmdi = 0;
2892 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2893 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2894 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2895 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2896 }
2897 cmdi = dma->cmdi++;
2898 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2899 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2900
2901 /* .p for command/result already set */
2902 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2903 HIFN_D_MASKDONEIRQ);
2904 HIFN_CMDR_SYNC(sc, cmdi,
2905 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2906 dma->cmdu++;
2907 if (sc->sc_c_busy == 0) {
2908 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2909 sc->sc_c_busy = 1;
2910 SET_LED(sc, HIFN_MIPSRST_LED0);
2911 }
2912
2913 /*
2914 * We don't worry about missing an interrupt (which a "command wait"
2915 * interrupt salvages us from), unless there is more than one command
2916 * in the queue.
2917 */
2918 if (dma->cmdu > 1) {
2919 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2920 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2921 }
2922
2923 hifnstats.hst_ipackets++;
2924 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2925
2926 hifn_dmamap_load_src(sc, cmd);
2927 if (sc->sc_s_busy == 0) {
2928 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2929 sc->sc_s_busy = 1;
2930 SET_LED(sc, HIFN_MIPSRST_LED1);
2931 }
2932
2933 /*
2934 * Unlike other descriptors, we don't mask done interrupt from
2935 * result descriptor.
2936 */
2937 if (dma->resi == HIFN_D_RES_RSIZE) {
2938 dma->resi = 0;
2939 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2940 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2941 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2942 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2943 }
2944 resi = dma->resi++;
2945 dma->hifn_commands[resi] = cmd;
2946 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2947 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2948 HIFN_D_VALID | HIFN_D_LAST);
2949 HIFN_RESR_SYNC(sc, resi,
2950 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2951 dma->resu++;
2952 if (sc->sc_r_busy == 0) {
2953 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2954 sc->sc_r_busy = 1;
2955 SET_LED(sc, HIFN_MIPSRST_LED2);
2956 }
2957
2958 if (cmd->sloplen)
2959 cmd->slopidx = resi;
2960
2961 hifn_dmamap_load_dst(sc, cmd);
2962
2963 if (sc->sc_d_busy == 0) {
2964 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2965 sc->sc_d_busy = 1;
2966 }
2967 sc->sc_active = 5;
2968 cmd->cmd_callback = hifn_callback_comp;
2969 return (0);
2970 }
2971
2972 static void
2973 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2974 u_int8_t *resbuf)
2975 {
2976 struct hifn_base_result baseres;
2977 struct cryptop *crp = cmd->crp;
2978 struct hifn_dma *dma = sc->sc_dma;
2979 struct mbuf *m;
2980 int err = 0, i, u;
2981 u_int32_t olen;
2982 bus_size_t dstsize;
2983
2984 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2985 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2986 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2987 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2988
2989 dstsize = cmd->dst_map->dm_mapsize;
2990 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2991
2992 memcpy(&baseres, resbuf, sizeof(struct hifn_base_result));
2993
2994 i = dma->dstk; u = dma->dstu;
2995 while (u != 0) {
2996 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2997 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2998 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2999 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
3000 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
3001 offsetof(struct hifn_dma, dstr[i]),
3002 sizeof(struct hifn_desc),
3003 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3004 break;
3005 }
3006 if (++i == (HIFN_D_DST_RSIZE + 1))
3007 i = 0;
3008 else
3009 u--;
3010 }
3011 dma->dstk = i; dma->dstu = u;
3012
3013 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
3014 bus_size_t xlen;
3015
3016 xlen = dstsize;
3017
3018 m_freem(cmd->dstu.dst_m);
3019
3020 if (xlen == HIFN_MAX_DMALEN) {
3021 /* We've done all we can. */
3022 err = E2BIG;
3023 goto out;
3024 }
3025
3026 xlen += MCLBYTES;
3027
3028 if (xlen > HIFN_MAX_DMALEN)
3029 xlen = HIFN_MAX_DMALEN;
3030
3031 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
3032 cmd->srcu.src_m);
3033 if (cmd->dstu.dst_m == NULL) {
3034 err = ENOMEM;
3035 goto out;
3036 }
3037 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
3038 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
3039 err = ENOMEM;
3040 goto out;
3041 }
3042
3043 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3044 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3045 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
3046 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
3047
3048 err = hifn_compress_enter(sc, cmd);
3049 if (err != 0)
3050 goto out;
3051 return;
3052 }
3053
3054 olen = dstsize - (letoh16(baseres.dst_cnt) |
3055 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
3056 HIFN_BASE_RES_DSTLEN_S) << 16));
3057
3058 crp->crp_olen = olen - cmd->compcrd->crd_skip;
3059
3060 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3061 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3062 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
3063
3064 m = cmd->dstu.dst_m;
3065 if (m->m_flags & M_PKTHDR)
3066 m->m_pkthdr.len = olen;
3067 crp->crp_buf = (void *)m;
3068 for (; m != NULL; m = m->m_next) {
3069 if (olen >= m->m_len)
3070 olen -= m->m_len;
3071 else {
3072 m->m_len = olen;
3073 olen = 0;
3074 }
3075 }
3076
3077 m_freem(cmd->srcu.src_m);
3078 free(cmd, M_DEVBUF);
3079 crp->crp_etype = 0;
3080 crypto_done(crp);
3081 return;
3082
3083 out:
3084 if (cmd->dst_map != NULL) {
3085 if (cmd->src_map->dm_nsegs != 0)
3086 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3087 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
3088 }
3089 if (cmd->src_map != NULL) {
3090 if (cmd->src_map->dm_nsegs != 0)
3091 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3092 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3093 }
3094 if (cmd->dstu.dst_m != NULL)
3095 m_freem(cmd->dstu.dst_m);
3096 free(cmd, M_DEVBUF);
3097 crp->crp_etype = err;
3098 crypto_done(crp);
3099 }
3100
3101 static struct mbuf *
3102 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
3103 {
3104 int len;
3105 struct mbuf *m, *m0, *mlast;
3106
3107 if (mtemplate->m_flags & M_PKTHDR) {
3108 len = MHLEN;
3109 MGETHDR(m0, M_DONTWAIT, MT_DATA);
3110 } else {
3111 len = MLEN;
3112 MGET(m0, M_DONTWAIT, MT_DATA);
3113 }
3114 if (m0 == NULL)
3115 return (NULL);
3116 if (len == MHLEN)
3117 M_DUP_PKTHDR(m0, mtemplate);
3118 MCLGET(m0, M_DONTWAIT);
3119 if (!(m0->m_flags & M_EXT))
3120 m_freem(m0);
3121 len = MCLBYTES;
3122
3123 totlen -= len;
3124 m0->m_pkthdr.len = m0->m_len = len;
3125 mlast = m0;
3126
3127 while (totlen > 0) {
3128 MGET(m, M_DONTWAIT, MT_DATA);
3129 if (m == NULL) {
3130 m_freem(m0);
3131 return (NULL);
3132 }
3133 MCLGET(m, M_DONTWAIT);
3134 if (!(m->m_flags & M_EXT)) {
3135 m_freem(m0);
3136 return (NULL);
3137 }
3138 len = MCLBYTES;
3139 m->m_len = len;
3140 if (m0->m_flags & M_PKTHDR)
3141 m0->m_pkthdr.len += len;
3142 totlen -= len;
3143
3144 mlast->m_next = m;
3145 mlast = m;
3146 }
3147
3148 return (m0);
3149 }
3150 #endif /* HAVE_CRYPTO_LZS */
3151
3152 static void
3153 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val)
3154 {
3155 /*
3156 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
3157 * and Group 1 registers; avoid conditions that could create
3158 * burst writes by doing a read in between the writes.
3159 */
3160 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3161 if (sc->sc_waw_lastgroup == reggrp &&
3162 sc->sc_waw_lastreg == reg - 4) {
3163 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
3164 }
3165 sc->sc_waw_lastgroup = reggrp;
3166 sc->sc_waw_lastreg = reg;
3167 }
3168 if (reggrp == 0)
3169 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3170 else
3171 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3172
3173 }
3174
3175 static u_int32_t
3176 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3177 {
3178 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3179 sc->sc_waw_lastgroup = -1;
3180 sc->sc_waw_lastreg = 1;
3181 }
3182 if (reggrp == 0)
3183 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3184 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3185 }
3186