hifn7751.c revision 1.55 1 /* $NetBSD: hifn7751.c,v 1.55 2014/06/03 13:53:28 msaitoh Exp $ */
2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */
3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */
4
5 /*
6 * Invertex AEON / Hifn 7751 driver
7 * Copyright (c) 1999 Invertex Inc. All rights reserved.
8 * Copyright (c) 1999 Theo de Raadt
9 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10 * http://www.netsec.net
11 * Copyright (c) 2003 Hifn Inc.
12 *
13 * This driver is based on a previous driver by Invertex, for which they
14 * requested: Please send any comments, feedback, bug-fixes, or feature
15 * requests to software (at) invertex.com.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. The name of the author may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Effort sponsored in part by the Defense Advanced Research Projects
41 * Agency (DARPA) and Air Force Research Laboratory, Air Force
42 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 *
44 */
45
46 /*
47 * Driver for various Hifn pre-HIPP encryption processors.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.55 2014/06/03 13:53:28 msaitoh Exp $");
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/errno.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/mbuf.h>
61 #include <sys/device.h>
62 #include <sys/module.h>
63
64 #ifdef __OpenBSD__
65 #include <crypto/crypto.h>
66 #include <dev/rndvar.h>
67 #else
68 #include <opencrypto/cryptodev.h>
69 #include <sys/cprng.h>
70 #include <sys/rnd.h>
71 #include <sys/sha1.h>
72 #endif
73
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76 #include <dev/pci/pcidevs.h>
77
78 #include <dev/pci/hifn7751reg.h>
79 #include <dev/pci/hifn7751var.h>
80
81 #undef HIFN_DEBUG
82
83 #ifdef __NetBSD__
84 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */
85 #endif
86
87 #ifdef HIFN_DEBUG
88 extern int hifn_debug; /* patchable */
89 int hifn_debug = 1;
90 #endif
91
92 #ifdef __OpenBSD__
93 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */
94 #endif
95
96 /*
97 * Prototypes and count for the pci_device structure
98 */
99 #ifdef __OpenBSD__
100 static int hifn_probe((struct device *, void *, void *);
101 #else
102 static int hifn_probe(device_t, cfdata_t, void *);
103 #endif
104 static void hifn_attach(device_t, device_t, void *);
105 #ifdef __NetBSD__
106 static int hifn_detach(device_t, int);
107
108 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
109 hifn_probe, hifn_attach, hifn_detach, NULL);
110 #else
111 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
112 hifn_probe, hifn_attach, NULL, NULL);
113 #endif
114
115 #ifdef __OpenBSD__
116 struct cfdriver hifn_cd = {
117 0, "hifn", DV_DULL
118 };
119 #endif
120
121 static void hifn_reset_board(struct hifn_softc *, int);
122 static void hifn_reset_puc(struct hifn_softc *);
123 static void hifn_puc_wait(struct hifn_softc *);
124 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
125 static void hifn_set_retry(struct hifn_softc *);
126 static void hifn_init_dma(struct hifn_softc *);
127 static void hifn_init_pci_registers(struct hifn_softc *);
128 static int hifn_sramsize(struct hifn_softc *);
129 static int hifn_dramsize(struct hifn_softc *);
130 static int hifn_ramtype(struct hifn_softc *);
131 static void hifn_sessions(struct hifn_softc *);
132 static int hifn_intr(void *);
133 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
134 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
135 static int hifn_newsession(void*, u_int32_t *, struct cryptoini *);
136 static int hifn_freesession(void*, u_int64_t);
137 static int hifn_process(void*, struct cryptop *, int);
138 static void hifn_callback(struct hifn_softc *, struct hifn_command *,
139 u_int8_t *);
140 static int hifn_crypto(struct hifn_softc *, struct hifn_command *,
141 struct cryptop*, int);
142 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
143 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
144 static int hifn_dmamap_aligned(bus_dmamap_t);
145 static int hifn_dmamap_load_src(struct hifn_softc *,
146 struct hifn_command *);
147 static int hifn_dmamap_load_dst(struct hifn_softc *,
148 struct hifn_command *);
149 static int hifn_init_pubrng(struct hifn_softc *);
150 static void hifn_rng(void *);
151 static void hifn_rng_locked(void *);
152 static void hifn_tick(void *);
153 static void hifn_abort(struct hifn_softc *);
154 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
155 int *);
156 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
157 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
158 #ifdef HAVE_CRYPTO_LZS
159 static int hifn_compression(struct hifn_softc *, struct cryptop *,
160 struct hifn_command *);
161 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
162 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
163 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
164 u_int8_t *);
165 #endif /* HAVE_CRYPTO_LZS */
166
167 struct hifn_stats hifnstats;
168
169 static const struct hifn_product {
170 pci_vendor_id_t hifn_vendor;
171 pci_product_id_t hifn_product;
172 int hifn_flags;
173 const char *hifn_name;
174 } hifn_products[] = {
175 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
176 0,
177 "Invertex AEON",
178 },
179
180 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
181 0,
182 "Hifn 7751",
183 },
184 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
185 0,
186 "Hifn 7751 (NetSec)"
187 },
188
189 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
190 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
191 "Hifn 7811",
192 },
193
194 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
195 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
196 "Hifn 7951",
197 },
198
199 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
200 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
201 "Hifn 7955",
202 },
203
204 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
205 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
206 "Hifn 7956",
207 },
208
209
210 { 0, 0,
211 0,
212 NULL
213 }
214 };
215
216 static const struct hifn_product *
217 hifn_lookup(const struct pci_attach_args *pa)
218 {
219 const struct hifn_product *hp;
220
221 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
222 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
223 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
224 return (hp);
225 }
226 return (NULL);
227 }
228
229 static int
230 hifn_probe(device_t parent, cfdata_t match, void *aux)
231 {
232 struct pci_attach_args *pa = aux;
233
234 if (hifn_lookup(pa) != NULL)
235 return 1;
236
237 return 0;
238 }
239
240 static void
241 hifn_attach(device_t parent, device_t self, void *aux)
242 {
243 struct hifn_softc *sc = device_private(self);
244 struct pci_attach_args *pa = aux;
245 const struct hifn_product *hp;
246 pci_chipset_tag_t pc = pa->pa_pc;
247 pci_intr_handle_t ih;
248 const char *intrstr = NULL;
249 const char *hifncap;
250 char rbase;
251 #ifdef __NetBSD__
252 #define iosize0 sc->sc_iosz0
253 #define iosize1 sc->sc_iosz1
254 #else
255 bus_size_t iosize0, iosize1;
256 #endif
257 u_int32_t cmd;
258 u_int16_t ena;
259 bus_dma_segment_t seg;
260 bus_dmamap_t dmamap;
261 int rseg;
262 void *kva;
263 char intrbuf[PCI_INTRSTR_LEN];
264
265 hp = hifn_lookup(pa);
266 if (hp == NULL) {
267 printf("\n");
268 panic("hifn_attach: impossible");
269 }
270
271 pci_aprint_devinfo_fancy(pa, "Crypto processor", hp->hifn_name, 1);
272
273 sc->sc_dv = self;
274 sc->sc_pci_pc = pa->pa_pc;
275 sc->sc_pci_tag = pa->pa_tag;
276
277 sc->sc_flags = hp->hifn_flags;
278
279 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
280 cmd |= PCI_COMMAND_MASTER_ENABLE;
281 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
282
283 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
284 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
285 aprint_error_dev(sc->sc_dv, "can't map mem space %d\n", 0);
286 return;
287 }
288
289 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
290 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
291 aprint_error_dev(sc->sc_dv, "can't find mem space %d\n", 1);
292 goto fail_io0;
293 }
294
295 hifn_set_retry(sc);
296
297 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
298 sc->sc_waw_lastgroup = -1;
299 sc->sc_waw_lastreg = 1;
300 }
301
302 sc->sc_dmat = pa->pa_dmat;
303 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
304 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
305 aprint_error_dev(sc->sc_dv, "can't alloc DMA buffer\n");
306 goto fail_io1;
307 }
308 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
309 BUS_DMA_NOWAIT)) {
310 aprint_error_dev(sc->sc_dv, "can't map DMA buffers (%lu bytes)\n",
311 (u_long)sizeof(*sc->sc_dma));
312 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
313 goto fail_io1;
314 }
315 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
316 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
317 aprint_error_dev(sc->sc_dv, "can't create DMA map\n");
318 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
319 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
320 goto fail_io1;
321 }
322 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
323 NULL, BUS_DMA_NOWAIT)) {
324 aprint_error_dev(sc->sc_dv, "can't load DMA map\n");
325 bus_dmamap_destroy(sc->sc_dmat, dmamap);
326 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
327 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
328 goto fail_io1;
329 }
330 sc->sc_dmamap = dmamap;
331 sc->sc_dma = (struct hifn_dma *)kva;
332 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
333
334 hifn_reset_board(sc, 0);
335
336 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
337 aprint_error_dev(sc->sc_dv, "crypto enabling failed\n");
338 goto fail_mem;
339 }
340 hifn_reset_puc(sc);
341
342 hifn_init_dma(sc);
343 hifn_init_pci_registers(sc);
344
345 /* XXX can't dynamically determine ram type for 795x; force dram */
346 if (sc->sc_flags & HIFN_IS_7956)
347 sc->sc_drammodel = 1;
348 else if (hifn_ramtype(sc))
349 goto fail_mem;
350
351 if (sc->sc_drammodel == 0)
352 hifn_sramsize(sc);
353 else
354 hifn_dramsize(sc);
355
356 /*
357 * Workaround for NetSec 7751 rev A: half ram size because two
358 * of the address lines were left floating
359 */
360 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
361 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
362 PCI_REVISION(pa->pa_class) == 0x61)
363 sc->sc_ramsize >>= 1;
364
365 if (pci_intr_map(pa, &ih)) {
366 aprint_error_dev(sc->sc_dv, "couldn't map interrupt\n");
367 goto fail_mem;
368 }
369 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
370 #ifdef __OpenBSD__
371 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
372 device_xname(self));
373 #else
374 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
375 #endif
376 if (sc->sc_ih == NULL) {
377 aprint_error_dev(sc->sc_dv, "couldn't establish interrupt\n");
378 if (intrstr != NULL)
379 aprint_error(" at %s", intrstr);
380 aprint_error("\n");
381 goto fail_mem;
382 }
383
384 hifn_sessions(sc);
385
386 rseg = sc->sc_ramsize / 1024;
387 rbase = 'K';
388 if (sc->sc_ramsize >= (1024 * 1024)) {
389 rbase = 'M';
390 rseg /= 1024;
391 }
392 aprint_normal_dev(sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n",
393 hifncap, rseg, rbase,
394 sc->sc_drammodel ? 'D' : 'S', intrstr);
395
396 sc->sc_cid = crypto_get_driverid(0);
397 if (sc->sc_cid < 0) {
398 aprint_error_dev(sc->sc_dv, "couldn't get crypto driver id\n");
399 goto fail_intr;
400 }
401
402 WRITE_REG_0(sc, HIFN_0_PUCNFG,
403 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
404 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
405
406 switch (ena) {
407 case HIFN_PUSTAT_ENA_2:
408 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
409 hifn_newsession, hifn_freesession, hifn_process, sc);
410 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
411 hifn_newsession, hifn_freesession, hifn_process, sc);
412 if (sc->sc_flags & HIFN_HAS_AES)
413 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
414 hifn_newsession, hifn_freesession,
415 hifn_process, sc);
416 /*FALLTHROUGH*/
417 case HIFN_PUSTAT_ENA_1:
418 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
419 hifn_newsession, hifn_freesession, hifn_process, sc);
420 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
421 hifn_newsession, hifn_freesession, hifn_process, sc);
422 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0,
423 hifn_newsession, hifn_freesession, hifn_process, sc);
424 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0,
425 hifn_newsession, hifn_freesession, hifn_process, sc);
426 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
427 hifn_newsession, hifn_freesession, hifn_process, sc);
428 break;
429 }
430
431 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
432 sc->sc_dmamap->dm_mapsize,
433 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
434
435 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) {
436 hifn_init_pubrng(sc);
437 sc->sc_rng_need = RND_POOLBITS / NBBY;
438 }
439
440 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM);
441
442 #ifdef __OpenBSD__
443 timeout_set(&sc->sc_tickto, hifn_tick, sc);
444 timeout_add(&sc->sc_tickto, hz);
445 #else
446 callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
447 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
448 #endif
449 return;
450
451 fail_intr:
452 pci_intr_disestablish(pc, sc->sc_ih);
453 fail_mem:
454 bus_dmamap_unload(sc->sc_dmat, dmamap);
455 bus_dmamap_destroy(sc->sc_dmat, dmamap);
456 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
457 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
458
459 /* Turn off DMA polling */
460 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
461 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
462
463 fail_io1:
464 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
465 fail_io0:
466 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
467 }
468
469 #ifdef __NetBSD__
470 static int
471 hifn_detach(device_t self, int flags)
472 {
473 struct hifn_softc *sc = device_private(self);
474
475 hifn_abort(sc);
476
477 hifn_reset_board(sc, 1);
478
479 pci_intr_disestablish(sc->sc_pci_pc, sc->sc_ih);
480
481 crypto_unregister_all(sc->sc_cid);
482
483 rnd_detach_source(&sc->sc_rnd_source);
484
485 mutex_enter(&sc->sc_mtx);
486 callout_halt(&sc->sc_tickto, NULL);
487 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
488 callout_halt(&sc->sc_rngto, NULL);
489 mutex_exit(&sc->sc_mtx);
490
491 bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1);
492 bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0);
493
494 /*
495 * XXX It's not clear if any additional buffers have been
496 * XXX allocated and require free()ing
497 */
498
499 return 0;
500 }
501
502 MODULE(MODULE_CLASS_DRIVER, hifn, "pci,opencrypto");
503
504 #ifdef _MODULE
505 #include "ioconf.c"
506 #endif
507
508 static int
509 hifn_modcmd(modcmd_t cmd, void *data)
510 {
511 int error = 0;
512
513 switch(cmd) {
514 case MODULE_CMD_INIT:
515 #ifdef _MODULE
516 error = config_init_component(cfdriver_ioconf_hifn,
517 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
518 #endif
519 return error;
520 case MODULE_CMD_FINI:
521 #ifdef _MODULE
522 error = config_fini_component(cfdriver_ioconf_hifn,
523 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
524 #endif
525 return error;
526 default:
527 return ENOTTY;
528 }
529 }
530
531 #endif /* ifdef __NetBSD__ */
532
533 static void
534 hifn_rng_get(size_t bytes, void *priv)
535 {
536 struct hifn_softc *sc = priv;
537
538 mutex_enter(&sc->sc_mtx);
539 sc->sc_rng_need = bytes;
540
541 hifn_rng_locked(sc);
542 mutex_exit(&sc->sc_mtx);
543 }
544
545 static int
546 hifn_init_pubrng(struct hifn_softc *sc)
547 {
548 u_int32_t r;
549 int i;
550
551 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
552 /* Reset 7951 public key/rng engine */
553 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
554 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
555
556 for (i = 0; i < 100; i++) {
557 DELAY(1000);
558 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
559 HIFN_PUBRST_RESET) == 0)
560 break;
561 }
562
563 if (i == 100) {
564 printf("%s: public key init failed\n",
565 device_xname(sc->sc_dv));
566 return (1);
567 }
568 }
569
570 /* Enable the rng, if available */
571 if (sc->sc_flags & HIFN_HAS_RNG) {
572 if (sc->sc_flags & HIFN_IS_7811) {
573 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
574 if (r & HIFN_7811_RNGENA_ENA) {
575 r &= ~HIFN_7811_RNGENA_ENA;
576 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
577 }
578 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
579 HIFN_7811_RNGCFG_DEFL);
580 r |= HIFN_7811_RNGENA_ENA;
581 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
582 } else
583 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
584 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
585 HIFN_RNGCFG_ENA);
586
587 /*
588 * The Hifn RNG documentation states that at their
589 * recommended "conservative" RNG config values,
590 * the RNG must warm up for 0.4s before providing
591 * data that meet their worst-case estimate of 0.06
592 * bits of random data per output register bit.
593 */
594 DELAY(4000);
595
596 #ifdef __NetBSD__
597 rndsource_setcb(&sc->sc_rnd_source, hifn_rng_get, sc);
598 /*
599 * XXX Careful! The use of RND_FLAG_NO_ESTIMATE
600 * XXX here is unobvious: we later feed raw bits
601 * XXX into the "entropy pool" with rnd_add_data,
602 * XXX explicitly supplying an entropy estimate.
603 * XXX In this context, NO_ESTIMATE serves only
604 * XXX to prevent rnd_add_data from trying to
605 * XXX use the *time at which we added the data*
606 * XXX as entropy, which is not a good idea since
607 * XXX we add data periodically from a callout.
608 */
609 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv),
610 RND_TYPE_RNG,
611 RND_FLAG_NO_ESTIMATE|RND_FLAG_HASCB);
612 #endif
613
614 if (hz >= 100)
615 sc->sc_rnghz = hz / 100;
616 else
617 sc->sc_rnghz = 1;
618 #ifdef __OpenBSD__
619 timeout_set(&sc->sc_rngto, hifn_rng, sc);
620 #else /* !__OpenBSD__ */
621 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
622 #endif /* !__OpenBSD__ */
623 }
624
625 /* Enable public key engine, if available */
626 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
627 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
628 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
629 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
630 }
631
632 /* Call directly into the RNG once to prime the pool. */
633 hifn_rng(sc); /* Sets callout/timeout at end */
634
635 return (0);
636 }
637
638 static void
639 hifn_rng_locked(void *vsc)
640 {
641 struct hifn_softc *sc = vsc;
642 #ifdef __NetBSD__
643 uint32_t num[64];
644 #else
645 uint32_t num[2];
646 #endif
647 uint32_t sts;
648 int i;
649 size_t got, gotent;
650
651 if (sc->sc_rng_need < 1) {
652 callout_stop(&sc->sc_rngto);
653 return;
654 }
655
656 if (sc->sc_flags & HIFN_IS_7811) {
657 for (i = 0; i < 5; i++) { /* XXX why 5? */
658 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
659 if (sts & HIFN_7811_RNGSTS_UFL) {
660 printf("%s: RNG underflow: disabling\n",
661 device_xname(sc->sc_dv));
662 return;
663 }
664 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
665 break;
666
667 /*
668 * There are at least two words in the RNG FIFO
669 * at this point.
670 */
671 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
672 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
673 got = 2 * sizeof(num[0]);
674 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
675
676 #ifdef __NetBSD__
677 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
678 sc->sc_rng_need -= gotent;
679 #else
680 /*
681 * XXX This is a really bad idea.
682 * XXX Hifn estimate as little as 0.06
683 * XXX actual bits of entropy per output
684 * XXX register bit. How can we tell the
685 * XXX kernel RNG subsystem we're handing
686 * XXX it 64 "true" random bits, for any
687 * XXX sane value of "true"?
688 * XXX
689 * XXX The right thing to do here, if we
690 * XXX cannot supply an estimate ourselves,
691 * XXX would be to hash the bits locally.
692 */
693 add_true_randomness(num[0]);
694 add_true_randomness(num[1]);
695 #endif
696
697 }
698 } else {
699 int nwords = 0;
700
701 if (sc->sc_rng_need) {
702 nwords = (sc->sc_rng_need * NBBY) / HIFN_RNG_BITSPER;
703 nwords = MIN(__arraycount(num), nwords);
704 }
705
706 if (nwords < 2) {
707 nwords = 2;
708 }
709
710 /*
711 * We must be *extremely* careful here. The Hifn
712 * 795x differ from the published 6500 RNG design
713 * in more ways than the obvious lack of the output
714 * FIFO and LFSR control registers. In fact, there
715 * is only one LFSR, instead of the 6500's two, and
716 * it's 32 bits, not 31.
717 *
718 * Further, a block diagram obtained from Hifn shows
719 * a very curious latching of this register: the LFSR
720 * rotates at a frequency of RNG_Clk / 8, but the
721 * RNG_Data register is latched at a frequency of
722 * RNG_Clk, which means that it is possible for
723 * consecutive reads of the RNG_Data register to read
724 * identical state from the LFSR. The simplest
725 * workaround seems to be to read eight samples from
726 * the register for each one that we use. Since each
727 * read must require at least one PCI cycle, and
728 * RNG_Clk is at least PCI_Clk, this is safe.
729 */
730 for(i = 0 ; i < nwords * 8; i++)
731 {
732 volatile u_int32_t regtmp;
733 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA);
734 num[i / 8] = regtmp;
735 }
736
737 got = nwords * sizeof(num[0]);
738 gotent = (got * NBBY) / HIFN_RNG_BITSPER;
739 #ifdef __NetBSD__
740 rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
741 sc->sc_rng_need -= gotent;
742 #else
743 /* XXX a bad idea; see 7811 block above */
744 add_true_randomness(num[0]);
745 #endif
746 }
747
748 #ifdef __OpenBSD__
749 timeout_add(&sc->sc_rngto, sc->sc_rnghz);
750 #else
751 if (sc->sc_rng_need > 0) {
752 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
753 }
754 #endif
755 }
756
757 static void
758 hifn_rng(void *vsc)
759 {
760 struct hifn_softc *sc = vsc;
761
762 mutex_spin_enter(&sc->sc_mtx);
763 hifn_rng_locked(vsc);
764 mutex_spin_exit(&sc->sc_mtx);
765 }
766
767 static void
768 hifn_puc_wait(struct hifn_softc *sc)
769 {
770 int i;
771
772 for (i = 5000; i > 0; i--) {
773 DELAY(1);
774 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
775 break;
776 }
777 if (!i)
778 printf("%s: proc unit did not reset\n", device_xname(sc->sc_dv));
779 }
780
781 /*
782 * Reset the processing unit.
783 */
784 static void
785 hifn_reset_puc(struct hifn_softc *sc)
786 {
787 /* Reset processing unit */
788 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
789 hifn_puc_wait(sc);
790 }
791
792 static void
793 hifn_set_retry(struct hifn_softc *sc)
794 {
795 u_int32_t r;
796
797 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
798 r &= 0xffff0000;
799 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
800 }
801
802 /*
803 * Resets the board. Values in the regesters are left as is
804 * from the reset (i.e. initial values are assigned elsewhere).
805 */
806 static void
807 hifn_reset_board(struct hifn_softc *sc, int full)
808 {
809 u_int32_t reg;
810
811 /*
812 * Set polling in the DMA configuration register to zero. 0x7 avoids
813 * resetting the board and zeros out the other fields.
814 */
815 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
816 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
817
818 /*
819 * Now that polling has been disabled, we have to wait 1 ms
820 * before resetting the board.
821 */
822 DELAY(1000);
823
824 /* Reset the DMA unit */
825 if (full) {
826 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
827 DELAY(1000);
828 } else {
829 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
830 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
831 hifn_reset_puc(sc);
832 }
833
834 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
835
836 /* Bring dma unit out of reset */
837 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
838 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
839
840 hifn_puc_wait(sc);
841
842 hifn_set_retry(sc);
843
844 if (sc->sc_flags & HIFN_IS_7811) {
845 for (reg = 0; reg < 1000; reg++) {
846 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
847 HIFN_MIPSRST_CRAMINIT)
848 break;
849 DELAY(1000);
850 }
851 if (reg == 1000)
852 printf(": cram init timeout\n");
853 }
854 }
855
856 static u_int32_t
857 hifn_next_signature(u_int32_t a, u_int cnt)
858 {
859 int i;
860 u_int32_t v;
861
862 for (i = 0; i < cnt; i++) {
863
864 /* get the parity */
865 v = a & 0x80080125;
866 v ^= v >> 16;
867 v ^= v >> 8;
868 v ^= v >> 4;
869 v ^= v >> 2;
870 v ^= v >> 1;
871
872 a = (v & 1) ^ (a << 1);
873 }
874
875 return a;
876 }
877
878 static struct pci2id {
879 u_short pci_vendor;
880 u_short pci_prod;
881 char card_id[13];
882 } const pci2id[] = {
883 {
884 PCI_VENDOR_HIFN,
885 PCI_PRODUCT_HIFN_7951,
886 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
887 0x00, 0x00, 0x00, 0x00, 0x00 }
888 }, {
889 PCI_VENDOR_HIFN,
890 PCI_PRODUCT_HIFN_7955,
891 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
892 0x00, 0x00, 0x00, 0x00, 0x00 }
893 }, {
894 PCI_VENDOR_HIFN,
895 PCI_PRODUCT_HIFN_7956,
896 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
897 0x00, 0x00, 0x00, 0x00, 0x00 }
898 }, {
899 PCI_VENDOR_NETSEC,
900 PCI_PRODUCT_NETSEC_7751,
901 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00, 0x00 }
903 }, {
904 PCI_VENDOR_INVERTEX,
905 PCI_PRODUCT_INVERTEX_AEON,
906 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00, 0x00 }
908 }, {
909 PCI_VENDOR_HIFN,
910 PCI_PRODUCT_HIFN_7811,
911 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
912 0x00, 0x00, 0x00, 0x00, 0x00 }
913 }, {
914 /*
915 * Other vendors share this PCI ID as well, such as
916 * http://www.powercrypt.com, and obviously they also
917 * use the same key.
918 */
919 PCI_VENDOR_HIFN,
920 PCI_PRODUCT_HIFN_7751,
921 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00, 0x00 }
923 },
924 };
925
926 /*
927 * Checks to see if crypto is already enabled. If crypto isn't enable,
928 * "hifn_enable_crypto" is called to enable it. The check is important,
929 * as enabling crypto twice will lock the board.
930 */
931 static const char *
932 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
933 {
934 u_int32_t dmacfg, ramcfg, encl, addr, i;
935 const char *offtbl = NULL;
936
937 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
938 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
939 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
940 offtbl = pci2id[i].card_id;
941 break;
942 }
943 }
944
945 if (offtbl == NULL) {
946 #ifdef HIFN_DEBUG
947 aprint_debug_dev(sc->sc_dv, "Unknown card!\n");
948 #endif
949 return (NULL);
950 }
951
952 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
953 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
954
955 /*
956 * The RAM config register's encrypt level bit needs to be set before
957 * every read performed on the encryption level register.
958 */
959 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
960
961 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
962
963 /*
964 * Make sure we don't re-unlock. Two unlocks kills chip until the
965 * next reboot.
966 */
967 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
968 #ifdef HIFN_DEBUG
969 aprint_debug_dev(sc->sc_dv, "Strong Crypto already enabled!\n");
970 #endif
971 goto report;
972 }
973
974 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
975 #ifdef HIFN_DEBUG
976 aprint_debug_dev(sc->sc_dv, "Unknown encryption level\n");
977 #endif
978 return (NULL);
979 }
980
981 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
982 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
983 DELAY(1000);
984 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
985 DELAY(1000);
986 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
987 DELAY(1000);
988
989 for (i = 0; i <= 12; i++) {
990 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
991 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
992
993 DELAY(1000);
994 }
995
996 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
997 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
998
999 #ifdef HIFN_DEBUG
1000 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1001 aprint_debug("Encryption engine is permanently locked until next system reset.");
1002 else
1003 aprint_debug("Encryption engine enabled successfully!");
1004 #endif
1005
1006 report:
1007 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1008 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1009
1010 switch (encl) {
1011 case HIFN_PUSTAT_ENA_0:
1012 return ("LZS-only (no encr/auth)");
1013
1014 case HIFN_PUSTAT_ENA_1:
1015 return ("DES");
1016
1017 case HIFN_PUSTAT_ENA_2:
1018 if (sc->sc_flags & HIFN_HAS_AES)
1019 return ("3DES/AES");
1020 else
1021 return ("3DES");
1022
1023 default:
1024 return ("disabled");
1025 }
1026 /* NOTREACHED */
1027 }
1028
1029 /*
1030 * Give initial values to the registers listed in the "Register Space"
1031 * section of the HIFN Software Development reference manual.
1032 */
1033 static void
1034 hifn_init_pci_registers(struct hifn_softc *sc)
1035 {
1036 /* write fixed values needed by the Initialization registers */
1037 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1038 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1039 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1040
1041 /* write all 4 ring address registers */
1042 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1043 offsetof(struct hifn_dma, cmdr[0]));
1044 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1045 offsetof(struct hifn_dma, srcr[0]));
1046 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1047 offsetof(struct hifn_dma, dstr[0]));
1048 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1049 offsetof(struct hifn_dma, resr[0]));
1050
1051 DELAY(2000);
1052
1053 /* write status register */
1054 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1055 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1056 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1057 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1058 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1059 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1060 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1061 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1062 HIFN_DMACSR_S_WAIT |
1063 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1064 HIFN_DMACSR_C_WAIT |
1065 HIFN_DMACSR_ENGINE |
1066 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1067 HIFN_DMACSR_PUBDONE : 0) |
1068 ((sc->sc_flags & HIFN_IS_7811) ?
1069 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1070
1071 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1072 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1073 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1074 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1075 HIFN_DMAIER_ENGINE |
1076 ((sc->sc_flags & HIFN_IS_7811) ?
1077 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1078 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1079 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1080 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
1081
1082 if (sc->sc_flags & HIFN_IS_7956) {
1083 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1084 HIFN_PUCNFG_TCALLPHASES |
1085 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1086 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
1087 } else {
1088 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1089 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1090 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1091 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1092 }
1093
1094 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1095 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1096 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1097 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1098 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1099 }
1100
1101 /*
1102 * The maximum number of sessions supported by the card
1103 * is dependent on the amount of context ram, which
1104 * encryption algorithms are enabled, and how compression
1105 * is configured. This should be configured before this
1106 * routine is called.
1107 */
1108 static void
1109 hifn_sessions(struct hifn_softc *sc)
1110 {
1111 u_int32_t pucnfg;
1112 int ctxsize;
1113
1114 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1115
1116 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1117 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1118 ctxsize = 128;
1119 else
1120 ctxsize = 512;
1121 /*
1122 * 7955/7956 has internal context memory of 32K
1123 */
1124 if (sc->sc_flags & HIFN_IS_7956)
1125 sc->sc_maxses = 32768 / ctxsize;
1126 else
1127 sc->sc_maxses = 1 +
1128 ((sc->sc_ramsize - 32768) / ctxsize);
1129 }
1130 else
1131 sc->sc_maxses = sc->sc_ramsize / 16384;
1132
1133 if (sc->sc_maxses > 2048)
1134 sc->sc_maxses = 2048;
1135 }
1136
1137 /*
1138 * Determine ram type (sram or dram). Board should be just out of a reset
1139 * state when this is called.
1140 */
1141 static int
1142 hifn_ramtype(struct hifn_softc *sc)
1143 {
1144 u_int8_t data[8], dataexpect[8];
1145 int i;
1146
1147 for (i = 0; i < sizeof(data); i++)
1148 data[i] = dataexpect[i] = 0x55;
1149 if (hifn_writeramaddr(sc, 0, data))
1150 return (-1);
1151 if (hifn_readramaddr(sc, 0, data))
1152 return (-1);
1153 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1154 sc->sc_drammodel = 1;
1155 return (0);
1156 }
1157
1158 for (i = 0; i < sizeof(data); i++)
1159 data[i] = dataexpect[i] = 0xaa;
1160 if (hifn_writeramaddr(sc, 0, data))
1161 return (-1);
1162 if (hifn_readramaddr(sc, 0, data))
1163 return (-1);
1164 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1165 sc->sc_drammodel = 1;
1166 return (0);
1167 }
1168
1169 return (0);
1170 }
1171
1172 #define HIFN_SRAM_MAX (32 << 20)
1173 #define HIFN_SRAM_STEP_SIZE 16384
1174 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1175
1176 static int
1177 hifn_sramsize(struct hifn_softc *sc)
1178 {
1179 u_int32_t a;
1180 u_int8_t data[8];
1181 u_int8_t dataexpect[sizeof(data)];
1182 int32_t i;
1183
1184 for (i = 0; i < sizeof(data); i++)
1185 data[i] = dataexpect[i] = i ^ 0x5a;
1186
1187 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1188 a = i * HIFN_SRAM_STEP_SIZE;
1189 memcpy(data, &i, sizeof(i));
1190 hifn_writeramaddr(sc, a, data);
1191 }
1192
1193 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1194 a = i * HIFN_SRAM_STEP_SIZE;
1195 memcpy(dataexpect, &i, sizeof(i));
1196 if (hifn_readramaddr(sc, a, data) < 0)
1197 return (0);
1198 if (memcmp(data, dataexpect, sizeof(data)) != 0)
1199 return (0);
1200 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1201 }
1202
1203 return (0);
1204 }
1205
1206 /*
1207 * XXX For dram boards, one should really try all of the
1208 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1209 * is already set up correctly.
1210 */
1211 static int
1212 hifn_dramsize(struct hifn_softc *sc)
1213 {
1214 u_int32_t cnfg;
1215
1216 if (sc->sc_flags & HIFN_IS_7956) {
1217 /*
1218 * 7955/7956 have a fixed internal ram of only 32K.
1219 */
1220 sc->sc_ramsize = 32768;
1221 } else {
1222 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1223 HIFN_PUCNFG_DRAMMASK;
1224 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1225 }
1226 return (0);
1227 }
1228
1229 static void
1230 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1231 int *resp)
1232 {
1233 struct hifn_dma *dma = sc->sc_dma;
1234
1235 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1236 dma->cmdi = 0;
1237 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1238 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1239 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1240 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1241 }
1242 *cmdp = dma->cmdi++;
1243 dma->cmdk = dma->cmdi;
1244
1245 if (dma->srci == HIFN_D_SRC_RSIZE) {
1246 dma->srci = 0;
1247 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1248 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1249 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1250 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1251 }
1252 *srcp = dma->srci++;
1253 dma->srck = dma->srci;
1254
1255 if (dma->dsti == HIFN_D_DST_RSIZE) {
1256 dma->dsti = 0;
1257 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1258 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1259 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1260 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1261 }
1262 *dstp = dma->dsti++;
1263 dma->dstk = dma->dsti;
1264
1265 if (dma->resi == HIFN_D_RES_RSIZE) {
1266 dma->resi = 0;
1267 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1268 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1269 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1270 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1271 }
1272 *resp = dma->resi++;
1273 dma->resk = dma->resi;
1274 }
1275
1276 static int
1277 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1278 {
1279 struct hifn_dma *dma = sc->sc_dma;
1280 struct hifn_base_command wc;
1281 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1282 int r, cmdi, resi, srci, dsti;
1283
1284 wc.masks = htole16(3 << 13);
1285 wc.session_num = htole16(addr >> 14);
1286 wc.total_source_count = htole16(8);
1287 wc.total_dest_count = htole16(addr & 0x3fff);
1288
1289 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1290
1291 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1292 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1293 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1294
1295 /* build write command */
1296 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1297 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1298 memcpy(&dma->test_src, data, sizeof(dma->test_src));
1299
1300 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1301 + offsetof(struct hifn_dma, test_src));
1302 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1303 + offsetof(struct hifn_dma, test_dst));
1304
1305 dma->cmdr[cmdi].l = htole32(16 | masks);
1306 dma->srcr[srci].l = htole32(8 | masks);
1307 dma->dstr[dsti].l = htole32(4 | masks);
1308 dma->resr[resi].l = htole32(4 | masks);
1309
1310 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1311 0, sc->sc_dmamap->dm_mapsize,
1312 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1313
1314 for (r = 10000; r >= 0; r--) {
1315 DELAY(10);
1316 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1317 0, sc->sc_dmamap->dm_mapsize,
1318 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1319 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1320 break;
1321 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1322 0, sc->sc_dmamap->dm_mapsize,
1323 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1324 }
1325 if (r == 0) {
1326 printf("%s: writeramaddr -- "
1327 "result[%d](addr %d) still valid\n",
1328 device_xname(sc->sc_dv), resi, addr);
1329 r = -1;
1330 return (-1);
1331 } else
1332 r = 0;
1333
1334 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1335 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1336 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1337
1338 return (r);
1339 }
1340
1341 static int
1342 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1343 {
1344 struct hifn_dma *dma = sc->sc_dma;
1345 struct hifn_base_command rc;
1346 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1347 int r, cmdi, srci, dsti, resi;
1348
1349 rc.masks = htole16(2 << 13);
1350 rc.session_num = htole16(addr >> 14);
1351 rc.total_source_count = htole16(addr & 0x3fff);
1352 rc.total_dest_count = htole16(8);
1353
1354 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1355
1356 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1357 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1358 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1359
1360 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1361 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1362
1363 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1364 offsetof(struct hifn_dma, test_src));
1365 dma->test_src = 0;
1366 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1367 offsetof(struct hifn_dma, test_dst));
1368 dma->test_dst = 0;
1369 dma->cmdr[cmdi].l = htole32(8 | masks);
1370 dma->srcr[srci].l = htole32(8 | masks);
1371 dma->dstr[dsti].l = htole32(8 | masks);
1372 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1373
1374 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1375 0, sc->sc_dmamap->dm_mapsize,
1376 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1377
1378 for (r = 10000; r >= 0; r--) {
1379 DELAY(10);
1380 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1381 0, sc->sc_dmamap->dm_mapsize,
1382 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1383 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1384 break;
1385 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1386 0, sc->sc_dmamap->dm_mapsize,
1387 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1388 }
1389 if (r == 0) {
1390 printf("%s: readramaddr -- "
1391 "result[%d](addr %d) still valid\n",
1392 device_xname(sc->sc_dv), resi, addr);
1393 r = -1;
1394 } else {
1395 r = 0;
1396 memcpy(data, &dma->test_dst, sizeof(dma->test_dst));
1397 }
1398
1399 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1400 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1401 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1402
1403 return (r);
1404 }
1405
1406 /*
1407 * Initialize the descriptor rings.
1408 */
1409 static void
1410 hifn_init_dma(struct hifn_softc *sc)
1411 {
1412 struct hifn_dma *dma = sc->sc_dma;
1413 int i;
1414
1415 hifn_set_retry(sc);
1416
1417 /* initialize static pointer values */
1418 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1419 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1420 offsetof(struct hifn_dma, command_bufs[i][0]));
1421 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1422 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1423 offsetof(struct hifn_dma, result_bufs[i][0]));
1424
1425 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1426 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1427 offsetof(struct hifn_dma, cmdr[0]));
1428 dma->srcr[HIFN_D_SRC_RSIZE].p =
1429 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1430 offsetof(struct hifn_dma, srcr[0]));
1431 dma->dstr[HIFN_D_DST_RSIZE].p =
1432 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1433 offsetof(struct hifn_dma, dstr[0]));
1434 dma->resr[HIFN_D_RES_RSIZE].p =
1435 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1436 offsetof(struct hifn_dma, resr[0]));
1437
1438 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1439 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1440 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1441 }
1442
1443 /*
1444 * Writes out the raw command buffer space. Returns the
1445 * command buffer size.
1446 */
1447 static u_int
1448 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1449 {
1450 u_int8_t *buf_pos;
1451 struct hifn_base_command *base_cmd;
1452 struct hifn_mac_command *mac_cmd;
1453 struct hifn_crypt_command *cry_cmd;
1454 struct hifn_comp_command *comp_cmd;
1455 int using_mac, using_crypt, using_comp, len, ivlen;
1456 u_int32_t dlen, slen;
1457
1458 buf_pos = buf;
1459 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1460 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1461 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1462
1463 base_cmd = (struct hifn_base_command *)buf_pos;
1464 base_cmd->masks = htole16(cmd->base_masks);
1465 slen = cmd->src_map->dm_mapsize;
1466 if (cmd->sloplen)
1467 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1468 sizeof(u_int32_t);
1469 else
1470 dlen = cmd->dst_map->dm_mapsize;
1471 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1472 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1473 dlen >>= 16;
1474 slen >>= 16;
1475 base_cmd->session_num = htole16(cmd->session_num |
1476 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1477 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1478 buf_pos += sizeof(struct hifn_base_command);
1479
1480 if (using_comp) {
1481 comp_cmd = (struct hifn_comp_command *)buf_pos;
1482 dlen = cmd->compcrd->crd_len;
1483 comp_cmd->source_count = htole16(dlen & 0xffff);
1484 dlen >>= 16;
1485 comp_cmd->masks = htole16(cmd->comp_masks |
1486 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1487 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1488 comp_cmd->reserved = 0;
1489 buf_pos += sizeof(struct hifn_comp_command);
1490 }
1491
1492 if (using_mac) {
1493 mac_cmd = (struct hifn_mac_command *)buf_pos;
1494 dlen = cmd->maccrd->crd_len;
1495 mac_cmd->source_count = htole16(dlen & 0xffff);
1496 dlen >>= 16;
1497 mac_cmd->masks = htole16(cmd->mac_masks |
1498 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1499 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1500 mac_cmd->reserved = 0;
1501 buf_pos += sizeof(struct hifn_mac_command);
1502 }
1503
1504 if (using_crypt) {
1505 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1506 dlen = cmd->enccrd->crd_len;
1507 cry_cmd->source_count = htole16(dlen & 0xffff);
1508 dlen >>= 16;
1509 cry_cmd->masks = htole16(cmd->cry_masks |
1510 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1511 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1512 cry_cmd->reserved = 0;
1513 buf_pos += sizeof(struct hifn_crypt_command);
1514 }
1515
1516 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1517 memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH);
1518 buf_pos += HIFN_MAC_KEY_LENGTH;
1519 }
1520
1521 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1522 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1523 case HIFN_CRYPT_CMD_ALG_3DES:
1524 memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH);
1525 buf_pos += HIFN_3DES_KEY_LENGTH;
1526 break;
1527 case HIFN_CRYPT_CMD_ALG_DES:
1528 memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH);
1529 buf_pos += HIFN_DES_KEY_LENGTH;
1530 break;
1531 case HIFN_CRYPT_CMD_ALG_RC4:
1532 len = 256;
1533 do {
1534 int clen;
1535
1536 clen = MIN(cmd->cklen, len);
1537 memcpy(buf_pos, cmd->ck, clen);
1538 len -= clen;
1539 buf_pos += clen;
1540 } while (len > 0);
1541 memset(buf_pos, 0, 4);
1542 buf_pos += 4;
1543 break;
1544 case HIFN_CRYPT_CMD_ALG_AES:
1545 /*
1546 * AES keys are variable 128, 192 and
1547 * 256 bits (16, 24 and 32 bytes).
1548 */
1549 memcpy(buf_pos, cmd->ck, cmd->cklen);
1550 buf_pos += cmd->cklen;
1551 break;
1552 }
1553 }
1554
1555 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1556 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1557 case HIFN_CRYPT_CMD_ALG_AES:
1558 ivlen = HIFN_AES_IV_LENGTH;
1559 break;
1560 default:
1561 ivlen = HIFN_IV_LENGTH;
1562 break;
1563 }
1564 memcpy(buf_pos, cmd->iv, ivlen);
1565 buf_pos += ivlen;
1566 }
1567
1568 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1569 HIFN_BASE_CMD_COMP)) == 0) {
1570 memset(buf_pos, 0, 8);
1571 buf_pos += 8;
1572 }
1573
1574 return (buf_pos - buf);
1575 }
1576
1577 static int
1578 hifn_dmamap_aligned(bus_dmamap_t map)
1579 {
1580 int i;
1581
1582 for (i = 0; i < map->dm_nsegs; i++) {
1583 if (map->dm_segs[i].ds_addr & 3)
1584 return (0);
1585 if ((i != (map->dm_nsegs - 1)) &&
1586 (map->dm_segs[i].ds_len & 3))
1587 return (0);
1588 }
1589 return (1);
1590 }
1591
1592 static int
1593 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1594 {
1595 struct hifn_dma *dma = sc->sc_dma;
1596 bus_dmamap_t map = cmd->dst_map;
1597 u_int32_t p, l;
1598 int idx, used = 0, i;
1599
1600 idx = dma->dsti;
1601 for (i = 0; i < map->dm_nsegs - 1; i++) {
1602 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1603 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1604 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1605 HIFN_DSTR_SYNC(sc, idx,
1606 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1607 used++;
1608
1609 if (++idx == HIFN_D_DST_RSIZE) {
1610 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1611 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1612 HIFN_DSTR_SYNC(sc, idx,
1613 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1614 idx = 0;
1615 }
1616 }
1617
1618 if (cmd->sloplen == 0) {
1619 p = map->dm_segs[i].ds_addr;
1620 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1621 map->dm_segs[i].ds_len;
1622 } else {
1623 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1624 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1625 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1626 sizeof(u_int32_t);
1627
1628 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1629 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1630 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1631 HIFN_D_MASKDONEIRQ |
1632 (map->dm_segs[i].ds_len - cmd->sloplen));
1633 HIFN_DSTR_SYNC(sc, idx,
1634 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1635 used++;
1636
1637 if (++idx == HIFN_D_DST_RSIZE) {
1638 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1639 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1640 HIFN_DSTR_SYNC(sc, idx,
1641 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1642 idx = 0;
1643 }
1644 }
1645 }
1646 dma->dstr[idx].p = htole32(p);
1647 dma->dstr[idx].l = htole32(l);
1648 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1649 used++;
1650
1651 if (++idx == HIFN_D_DST_RSIZE) {
1652 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1653 HIFN_D_MASKDONEIRQ);
1654 HIFN_DSTR_SYNC(sc, idx,
1655 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1656 idx = 0;
1657 }
1658
1659 dma->dsti = idx;
1660 dma->dstu += used;
1661 return (idx);
1662 }
1663
1664 static int
1665 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1666 {
1667 struct hifn_dma *dma = sc->sc_dma;
1668 bus_dmamap_t map = cmd->src_map;
1669 int idx, i;
1670 u_int32_t last = 0;
1671
1672 idx = dma->srci;
1673 for (i = 0; i < map->dm_nsegs; i++) {
1674 if (i == map->dm_nsegs - 1)
1675 last = HIFN_D_LAST;
1676
1677 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1678 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1679 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1680 HIFN_SRCR_SYNC(sc, idx,
1681 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1682
1683 if (++idx == HIFN_D_SRC_RSIZE) {
1684 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1685 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1686 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1687 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1688 idx = 0;
1689 }
1690 }
1691 dma->srci = idx;
1692 dma->srcu += map->dm_nsegs;
1693 return (idx);
1694 }
1695
1696 static int
1697 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1698 struct cryptop *crp, int hint)
1699 {
1700 struct hifn_dma *dma = sc->sc_dma;
1701 u_int32_t cmdlen;
1702 int cmdi, resi, err = 0;
1703
1704 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1705 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1706 return (ENOMEM);
1707
1708 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1709 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1710 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1711 err = ENOMEM;
1712 goto err_srcmap1;
1713 }
1714 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1715 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1716 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1717 err = ENOMEM;
1718 goto err_srcmap1;
1719 }
1720 } else {
1721 err = EINVAL;
1722 goto err_srcmap1;
1723 }
1724
1725 if (hifn_dmamap_aligned(cmd->src_map)) {
1726 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1727 if (crp->crp_flags & CRYPTO_F_IOV)
1728 cmd->dstu.dst_io = cmd->srcu.src_io;
1729 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1730 cmd->dstu.dst_m = cmd->srcu.src_m;
1731 cmd->dst_map = cmd->src_map;
1732 } else {
1733 if (crp->crp_flags & CRYPTO_F_IOV) {
1734 err = EINVAL;
1735 goto err_srcmap;
1736 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1737 int totlen, len;
1738 struct mbuf *m, *m0, *mlast;
1739
1740 totlen = cmd->src_map->dm_mapsize;
1741 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1742 len = MHLEN;
1743 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1744 } else {
1745 len = MLEN;
1746 MGET(m0, M_DONTWAIT, MT_DATA);
1747 }
1748 if (m0 == NULL) {
1749 err = ENOMEM;
1750 goto err_srcmap;
1751 }
1752 if (len == MHLEN)
1753 M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1754 if (totlen >= MINCLSIZE) {
1755 MCLGET(m0, M_DONTWAIT);
1756 if (m0->m_flags & M_EXT)
1757 len = MCLBYTES;
1758 }
1759 totlen -= len;
1760 m0->m_pkthdr.len = m0->m_len = len;
1761 mlast = m0;
1762
1763 while (totlen > 0) {
1764 MGET(m, M_DONTWAIT, MT_DATA);
1765 if (m == NULL) {
1766 err = ENOMEM;
1767 m_freem(m0);
1768 goto err_srcmap;
1769 }
1770 len = MLEN;
1771 if (totlen >= MINCLSIZE) {
1772 MCLGET(m, M_DONTWAIT);
1773 if (m->m_flags & M_EXT)
1774 len = MCLBYTES;
1775 }
1776
1777 m->m_len = len;
1778 if (m0->m_flags & M_PKTHDR)
1779 m0->m_pkthdr.len += len;
1780 totlen -= len;
1781
1782 mlast->m_next = m;
1783 mlast = m;
1784 }
1785 cmd->dstu.dst_m = m0;
1786 }
1787 }
1788
1789 if (cmd->dst_map == NULL) {
1790 if (bus_dmamap_create(sc->sc_dmat,
1791 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1792 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1793 err = ENOMEM;
1794 goto err_srcmap;
1795 }
1796 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1797 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1798 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1799 err = ENOMEM;
1800 goto err_dstmap1;
1801 }
1802 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1803 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1804 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1805 err = ENOMEM;
1806 goto err_dstmap1;
1807 }
1808 }
1809 }
1810
1811 #ifdef HIFN_DEBUG
1812 if (hifn_debug)
1813 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1814 device_xname(sc->sc_dv),
1815 READ_REG_1(sc, HIFN_1_DMA_CSR),
1816 READ_REG_1(sc, HIFN_1_DMA_IER),
1817 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1818 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1819 #endif
1820
1821 if (cmd->src_map == cmd->dst_map)
1822 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1823 0, cmd->src_map->dm_mapsize,
1824 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1825 else {
1826 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1827 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1828 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1829 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1830 }
1831
1832 /*
1833 * need 1 cmd, and 1 res
1834 * need N src, and N dst
1835 */
1836 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1837 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1838 err = ENOMEM;
1839 goto err_dstmap;
1840 }
1841 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1842 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1843 err = ENOMEM;
1844 goto err_dstmap;
1845 }
1846
1847 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1848 dma->cmdi = 0;
1849 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1850 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1851 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1852 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1853 }
1854 cmdi = dma->cmdi++;
1855 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1856 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1857
1858 /* .p for command/result already set */
1859 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1860 HIFN_D_MASKDONEIRQ);
1861 HIFN_CMDR_SYNC(sc, cmdi,
1862 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1863 dma->cmdu++;
1864 if (sc->sc_c_busy == 0) {
1865 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1866 sc->sc_c_busy = 1;
1867 SET_LED(sc, HIFN_MIPSRST_LED0);
1868 }
1869
1870 /*
1871 * We don't worry about missing an interrupt (which a "command wait"
1872 * interrupt salvages us from), unless there is more than one command
1873 * in the queue.
1874 *
1875 * XXX We do seem to miss some interrupts. So we always enable
1876 * XXX command wait. From OpenBSD revision 1.149.
1877 *
1878 */
1879 #if 0
1880 if (dma->cmdu > 1) {
1881 #endif
1882 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1883 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1884 #if 0
1885 }
1886 #endif
1887
1888 hifnstats.hst_ipackets++;
1889 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1890
1891 hifn_dmamap_load_src(sc, cmd);
1892 if (sc->sc_s_busy == 0) {
1893 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1894 sc->sc_s_busy = 1;
1895 SET_LED(sc, HIFN_MIPSRST_LED1);
1896 }
1897
1898 /*
1899 * Unlike other descriptors, we don't mask done interrupt from
1900 * result descriptor.
1901 */
1902 #ifdef HIFN_DEBUG
1903 if (hifn_debug)
1904 printf("load res\n");
1905 #endif
1906 if (dma->resi == HIFN_D_RES_RSIZE) {
1907 dma->resi = 0;
1908 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1909 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1910 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1911 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1912 }
1913 resi = dma->resi++;
1914 dma->hifn_commands[resi] = cmd;
1915 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1916 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1917 HIFN_D_VALID | HIFN_D_LAST);
1918 HIFN_RESR_SYNC(sc, resi,
1919 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1920 dma->resu++;
1921 if (sc->sc_r_busy == 0) {
1922 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1923 sc->sc_r_busy = 1;
1924 SET_LED(sc, HIFN_MIPSRST_LED2);
1925 }
1926
1927 if (cmd->sloplen)
1928 cmd->slopidx = resi;
1929
1930 hifn_dmamap_load_dst(sc, cmd);
1931
1932 if (sc->sc_d_busy == 0) {
1933 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1934 sc->sc_d_busy = 1;
1935 }
1936
1937 #ifdef HIFN_DEBUG
1938 if (hifn_debug)
1939 printf("%s: command: stat %8x ier %8x\n",
1940 device_xname(sc->sc_dv),
1941 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1942 #endif
1943
1944 sc->sc_active = 5;
1945 return (err); /* success */
1946
1947 err_dstmap:
1948 if (cmd->src_map != cmd->dst_map)
1949 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1950 err_dstmap1:
1951 if (cmd->src_map != cmd->dst_map)
1952 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1953 err_srcmap:
1954 if (crp->crp_flags & CRYPTO_F_IMBUF &&
1955 cmd->srcu.src_m != cmd->dstu.dst_m)
1956 m_freem(cmd->dstu.dst_m);
1957 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1958 err_srcmap1:
1959 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1960 return (err);
1961 }
1962
1963 static void
1964 hifn_tick(void *vsc)
1965 {
1966 struct hifn_softc *sc = vsc;
1967
1968 mutex_spin_enter(&sc->sc_mtx);
1969 if (sc->sc_active == 0) {
1970 struct hifn_dma *dma = sc->sc_dma;
1971 u_int32_t r = 0;
1972
1973 if (dma->cmdu == 0 && sc->sc_c_busy) {
1974 sc->sc_c_busy = 0;
1975 r |= HIFN_DMACSR_C_CTRL_DIS;
1976 CLR_LED(sc, HIFN_MIPSRST_LED0);
1977 }
1978 if (dma->srcu == 0 && sc->sc_s_busy) {
1979 sc->sc_s_busy = 0;
1980 r |= HIFN_DMACSR_S_CTRL_DIS;
1981 CLR_LED(sc, HIFN_MIPSRST_LED1);
1982 }
1983 if (dma->dstu == 0 && sc->sc_d_busy) {
1984 sc->sc_d_busy = 0;
1985 r |= HIFN_DMACSR_D_CTRL_DIS;
1986 }
1987 if (dma->resu == 0 && sc->sc_r_busy) {
1988 sc->sc_r_busy = 0;
1989 r |= HIFN_DMACSR_R_CTRL_DIS;
1990 CLR_LED(sc, HIFN_MIPSRST_LED2);
1991 }
1992 if (r)
1993 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1994 }
1995 else
1996 sc->sc_active--;
1997 #ifdef __OpenBSD__
1998 timeout_add(&sc->sc_tickto, hz);
1999 #else
2000 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2001 #endif
2002 mutex_spin_exit(&sc->sc_mtx);
2003 }
2004
2005 static int
2006 hifn_intr(void *arg)
2007 {
2008 struct hifn_softc *sc = arg;
2009 struct hifn_dma *dma = sc->sc_dma;
2010 u_int32_t dmacsr, restart;
2011 int i, u;
2012
2013 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2014
2015 #ifdef HIFN_DEBUG
2016 if (hifn_debug)
2017 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
2018 device_xname(sc->sc_dv),
2019 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
2020 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2021 #endif
2022
2023 mutex_spin_enter(&sc->sc_mtx);
2024
2025 /* Nothing in the DMA unit interrupted */
2026 if ((dmacsr & sc->sc_dmaier) == 0) {
2027 mutex_spin_exit(&sc->sc_mtx);
2028 return (0);
2029 }
2030
2031 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2032
2033 if (dmacsr & HIFN_DMACSR_ENGINE)
2034 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
2035
2036 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2037 (dmacsr & HIFN_DMACSR_PUBDONE))
2038 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2039 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2040
2041 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
2042 if (restart)
2043 printf("%s: overrun %x\n", device_xname(sc->sc_dv), dmacsr);
2044
2045 if (sc->sc_flags & HIFN_IS_7811) {
2046 if (dmacsr & HIFN_DMACSR_ILLR)
2047 printf("%s: illegal read\n", device_xname(sc->sc_dv));
2048 if (dmacsr & HIFN_DMACSR_ILLW)
2049 printf("%s: illegal write\n", device_xname(sc->sc_dv));
2050 }
2051
2052 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2053 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2054 if (restart) {
2055 printf("%s: abort, resetting.\n", device_xname(sc->sc_dv));
2056 hifnstats.hst_abort++;
2057 hifn_abort(sc);
2058 goto out;
2059 }
2060
2061 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
2062 /*
2063 * If no slots to process and we receive a "waiting on
2064 * command" interrupt, we disable the "waiting on command"
2065 * (by clearing it).
2066 */
2067 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2068 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2069 }
2070
2071 /* clear the rings */
2072 i = dma->resk;
2073 while (dma->resu != 0) {
2074 HIFN_RESR_SYNC(sc, i,
2075 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2076 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2077 HIFN_RESR_SYNC(sc, i,
2078 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2079 break;
2080 }
2081
2082 if (i != HIFN_D_RES_RSIZE) {
2083 struct hifn_command *cmd;
2084
2085 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2086 cmd = dma->hifn_commands[i];
2087 KASSERT(cmd != NULL
2088 /*("hifn_intr: null command slot %u", i)*/);
2089 dma->hifn_commands[i] = NULL;
2090
2091 hifn_callback(sc, cmd, dma->result_bufs[i]);
2092 hifnstats.hst_opackets++;
2093 }
2094
2095 if (++i == (HIFN_D_RES_RSIZE + 1))
2096 i = 0;
2097 else
2098 dma->resu--;
2099 }
2100 dma->resk = i;
2101
2102 i = dma->srck; u = dma->srcu;
2103 while (u != 0) {
2104 HIFN_SRCR_SYNC(sc, i,
2105 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2106 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2107 HIFN_SRCR_SYNC(sc, i,
2108 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2109 break;
2110 }
2111 if (++i == (HIFN_D_SRC_RSIZE + 1))
2112 i = 0;
2113 else
2114 u--;
2115 }
2116 dma->srck = i; dma->srcu = u;
2117
2118 i = dma->cmdk; u = dma->cmdu;
2119 while (u != 0) {
2120 HIFN_CMDR_SYNC(sc, i,
2121 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2122 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2123 HIFN_CMDR_SYNC(sc, i,
2124 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2125 break;
2126 }
2127 if (i != HIFN_D_CMD_RSIZE) {
2128 u--;
2129 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2130 }
2131 if (++i == (HIFN_D_CMD_RSIZE + 1))
2132 i = 0;
2133 }
2134 dma->cmdk = i; dma->cmdu = u;
2135
2136 out:
2137 mutex_spin_exit(&sc->sc_mtx);
2138 return (1);
2139 }
2140
2141 /*
2142 * Allocate a new 'session' and return an encoded session id. 'sidp'
2143 * contains our registration id, and should contain an encoded session
2144 * id on successful allocation.
2145 */
2146 static int
2147 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2148 {
2149 struct cryptoini *c;
2150 struct hifn_softc *sc = arg;
2151 int i, mac = 0, cry = 0, comp = 0, retval = EINVAL;
2152
2153 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2154 if (sidp == NULL || cri == NULL || sc == NULL)
2155 return retval;
2156
2157 mutex_spin_enter(&sc->sc_mtx);
2158
2159 for (i = 0; i < sc->sc_maxses; i++)
2160 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2161 break;
2162 if (i == sc->sc_maxses) {
2163 retval = ENOMEM;
2164 goto out;
2165 }
2166
2167 for (c = cri; c != NULL; c = c->cri_next) {
2168 switch (c->cri_alg) {
2169 case CRYPTO_MD5:
2170 case CRYPTO_SHA1:
2171 case CRYPTO_MD5_HMAC_96:
2172 case CRYPTO_SHA1_HMAC_96:
2173 if (mac) {
2174 goto out;
2175 }
2176 mac = 1;
2177 break;
2178 case CRYPTO_DES_CBC:
2179 case CRYPTO_3DES_CBC:
2180 case CRYPTO_AES_CBC:
2181 /* Note that this is an initialization
2182 vector, not a cipher key; any function
2183 giving sufficient Hamming distance
2184 between outputs is fine. Use of RC4
2185 to generate IVs has been FIPS140-2
2186 certified by several labs. */
2187 #ifdef __NetBSD__
2188 cprng_fast(sc->sc_sessions[i].hs_iv,
2189 c->cri_alg == CRYPTO_AES_CBC ?
2190 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2191 #else /* FreeBSD and OpenBSD have get_random_bytes */
2192 /* XXX this may read fewer, does it matter? */
2193 get_random_bytes(sc->sc_sessions[i].hs_iv,
2194 c->cri_alg == CRYPTO_AES_CBC ?
2195 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2196 #endif
2197 /*FALLTHROUGH*/
2198 case CRYPTO_ARC4:
2199 if (cry) {
2200 goto out;
2201 }
2202 cry = 1;
2203 break;
2204 #ifdef HAVE_CRYPTO_LZS
2205 case CRYPTO_LZS_COMP:
2206 if (comp) {
2207 goto out;
2208 }
2209 comp = 1;
2210 break;
2211 #endif
2212 default:
2213 goto out;
2214 }
2215 }
2216 if (mac == 0 && cry == 0 && comp == 0) {
2217 goto out;
2218 }
2219
2220 /*
2221 * XXX only want to support compression without chaining to
2222 * MAC/crypt engine right now
2223 */
2224 if ((comp && mac) || (comp && cry)) {
2225 goto out;
2226 }
2227
2228 *sidp = HIFN_SID(device_unit(sc->sc_dv), i);
2229 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2230
2231 retval = 0;
2232 out:
2233 mutex_spin_exit(&sc->sc_mtx);
2234 return retval;
2235 }
2236
2237 /*
2238 * Deallocate a session.
2239 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2240 * XXX to blow away any keys already stored there.
2241 */
2242 static int
2243 hifn_freesession(void *arg, u_int64_t tid)
2244 {
2245 struct hifn_softc *sc = arg;
2246 int session;
2247 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2248
2249 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2250 if (sc == NULL)
2251 return (EINVAL);
2252
2253 mutex_spin_enter(&sc->sc_mtx);
2254 session = HIFN_SESSION(sid);
2255 if (session >= sc->sc_maxses) {
2256 mutex_spin_exit(&sc->sc_mtx);
2257 return (EINVAL);
2258 }
2259
2260 memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session]));
2261 mutex_spin_exit(&sc->sc_mtx);
2262 return (0);
2263 }
2264
2265 static int
2266 hifn_process(void *arg, struct cryptop *crp, int hint)
2267 {
2268 struct hifn_softc *sc = arg;
2269 struct hifn_command *cmd = NULL;
2270 int session, err, ivlen;
2271 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2272
2273 if (crp == NULL || crp->crp_callback == NULL) {
2274 hifnstats.hst_invalid++;
2275 return (EINVAL);
2276 }
2277
2278 mutex_spin_enter(&sc->sc_mtx);
2279 session = HIFN_SESSION(crp->crp_sid);
2280
2281 if (sc == NULL || session >= sc->sc_maxses) {
2282 err = EINVAL;
2283 goto errout;
2284 }
2285
2286 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2287 M_DEVBUF, M_NOWAIT|M_ZERO);
2288 if (cmd == NULL) {
2289 hifnstats.hst_nomem++;
2290 err = ENOMEM;
2291 goto errout;
2292 }
2293
2294 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2295 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2296 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2297 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2298 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2299 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2300 } else {
2301 err = EINVAL;
2302 goto errout; /* XXX we don't handle contiguous buffers! */
2303 }
2304
2305 crd1 = crp->crp_desc;
2306 if (crd1 == NULL) {
2307 err = EINVAL;
2308 goto errout;
2309 }
2310 crd2 = crd1->crd_next;
2311
2312 if (crd2 == NULL) {
2313 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2314 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2315 crd1->crd_alg == CRYPTO_SHA1 ||
2316 crd1->crd_alg == CRYPTO_MD5) {
2317 maccrd = crd1;
2318 enccrd = NULL;
2319 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2320 crd1->crd_alg == CRYPTO_3DES_CBC ||
2321 crd1->crd_alg == CRYPTO_AES_CBC ||
2322 crd1->crd_alg == CRYPTO_ARC4) {
2323 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2324 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2325 maccrd = NULL;
2326 enccrd = crd1;
2327 #ifdef HAVE_CRYPTO_LZS
2328 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2329 return (hifn_compression(sc, crp, cmd));
2330 #endif
2331 } else {
2332 err = EINVAL;
2333 goto errout;
2334 }
2335 } else {
2336 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2337 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2338 crd1->crd_alg == CRYPTO_MD5 ||
2339 crd1->crd_alg == CRYPTO_SHA1) &&
2340 (crd2->crd_alg == CRYPTO_DES_CBC ||
2341 crd2->crd_alg == CRYPTO_3DES_CBC ||
2342 crd2->crd_alg == CRYPTO_AES_CBC ||
2343 crd2->crd_alg == CRYPTO_ARC4) &&
2344 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2345 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2346 maccrd = crd1;
2347 enccrd = crd2;
2348 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2349 crd1->crd_alg == CRYPTO_ARC4 ||
2350 crd1->crd_alg == CRYPTO_3DES_CBC ||
2351 crd1->crd_alg == CRYPTO_AES_CBC) &&
2352 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 ||
2353 crd2->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2354 crd2->crd_alg == CRYPTO_MD5 ||
2355 crd2->crd_alg == CRYPTO_SHA1) &&
2356 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2357 enccrd = crd1;
2358 maccrd = crd2;
2359 } else {
2360 /*
2361 * We cannot order the 7751 as requested
2362 */
2363 err = EINVAL;
2364 goto errout;
2365 }
2366 }
2367
2368 if (enccrd) {
2369 cmd->enccrd = enccrd;
2370 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2371 switch (enccrd->crd_alg) {
2372 case CRYPTO_ARC4:
2373 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2374 if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2375 != sc->sc_sessions[session].hs_prev_op)
2376 sc->sc_sessions[session].hs_state =
2377 HS_STATE_USED;
2378 break;
2379 case CRYPTO_DES_CBC:
2380 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2381 HIFN_CRYPT_CMD_MODE_CBC |
2382 HIFN_CRYPT_CMD_NEW_IV;
2383 break;
2384 case CRYPTO_3DES_CBC:
2385 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2386 HIFN_CRYPT_CMD_MODE_CBC |
2387 HIFN_CRYPT_CMD_NEW_IV;
2388 break;
2389 case CRYPTO_AES_CBC:
2390 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2391 HIFN_CRYPT_CMD_MODE_CBC |
2392 HIFN_CRYPT_CMD_NEW_IV;
2393 break;
2394 default:
2395 err = EINVAL;
2396 goto errout;
2397 }
2398 if (enccrd->crd_alg != CRYPTO_ARC4) {
2399 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2400 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2401 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2402 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2403 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2404 else
2405 bcopy(sc->sc_sessions[session].hs_iv,
2406 cmd->iv, ivlen);
2407
2408 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2409 == 0) {
2410 if (crp->crp_flags & CRYPTO_F_IMBUF)
2411 m_copyback(cmd->srcu.src_m,
2412 enccrd->crd_inject,
2413 ivlen, cmd->iv);
2414 else if (crp->crp_flags & CRYPTO_F_IOV)
2415 cuio_copyback(cmd->srcu.src_io,
2416 enccrd->crd_inject,
2417 ivlen, cmd->iv);
2418 }
2419 } else {
2420 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2421 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2422 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2423 m_copydata(cmd->srcu.src_m,
2424 enccrd->crd_inject, ivlen, cmd->iv);
2425 else if (crp->crp_flags & CRYPTO_F_IOV)
2426 cuio_copydata(cmd->srcu.src_io,
2427 enccrd->crd_inject, ivlen, cmd->iv);
2428 }
2429 }
2430
2431 cmd->ck = enccrd->crd_key;
2432 cmd->cklen = enccrd->crd_klen >> 3;
2433
2434 /*
2435 * Need to specify the size for the AES key in the masks.
2436 */
2437 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2438 HIFN_CRYPT_CMD_ALG_AES) {
2439 switch (cmd->cklen) {
2440 case 16:
2441 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2442 break;
2443 case 24:
2444 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2445 break;
2446 case 32:
2447 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2448 break;
2449 default:
2450 err = EINVAL;
2451 goto errout;
2452 }
2453 }
2454
2455 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2456 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2457 }
2458
2459 if (maccrd) {
2460 cmd->maccrd = maccrd;
2461 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2462
2463 switch (maccrd->crd_alg) {
2464 case CRYPTO_MD5:
2465 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2466 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2467 HIFN_MAC_CMD_POS_IPSEC;
2468 break;
2469 case CRYPTO_MD5_HMAC_96:
2470 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2471 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2472 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2473 break;
2474 case CRYPTO_SHA1:
2475 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2476 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2477 HIFN_MAC_CMD_POS_IPSEC;
2478 break;
2479 case CRYPTO_SHA1_HMAC_96:
2480 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2481 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2482 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2483 break;
2484 }
2485
2486 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2487 maccrd->crd_alg == CRYPTO_MD5_HMAC_96) &&
2488 sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2489 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2490 memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3);
2491 memset(cmd->mac + (maccrd->crd_klen >> 3), 0,
2492 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2493 }
2494 }
2495
2496 cmd->crp = crp;
2497 cmd->session_num = session;
2498 cmd->softc = sc;
2499
2500 err = hifn_crypto(sc, cmd, crp, hint);
2501 if (err == 0) {
2502 if (enccrd)
2503 sc->sc_sessions[session].hs_prev_op =
2504 enccrd->crd_flags & CRD_F_ENCRYPT;
2505 if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2506 sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2507 mutex_spin_exit(&sc->sc_mtx);
2508 return 0;
2509 } else if (err == ERESTART) {
2510 /*
2511 * There weren't enough resources to dispatch the request
2512 * to the part. Notify the caller so they'll requeue this
2513 * request and resubmit it again soon.
2514 */
2515 #ifdef HIFN_DEBUG
2516 if (hifn_debug)
2517 printf("%s: requeue request\n", device_xname(sc->sc_dv));
2518 #endif
2519 free(cmd, M_DEVBUF);
2520 sc->sc_needwakeup |= CRYPTO_SYMQ;
2521 mutex_spin_exit(&sc->sc_mtx);
2522 return (err);
2523 }
2524
2525 errout:
2526 if (cmd != NULL)
2527 free(cmd, M_DEVBUF);
2528 if (err == EINVAL)
2529 hifnstats.hst_invalid++;
2530 else
2531 hifnstats.hst_nomem++;
2532 crp->crp_etype = err;
2533 mutex_spin_exit(&sc->sc_mtx);
2534 crypto_done(crp);
2535 return (0);
2536 }
2537
2538 static void
2539 hifn_abort(struct hifn_softc *sc)
2540 {
2541 struct hifn_dma *dma = sc->sc_dma;
2542 struct hifn_command *cmd;
2543 struct cryptop *crp;
2544 int i, u;
2545
2546 i = dma->resk; u = dma->resu;
2547 while (u != 0) {
2548 cmd = dma->hifn_commands[i];
2549 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2550 dma->hifn_commands[i] = NULL;
2551 crp = cmd->crp;
2552
2553 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2554 /* Salvage what we can. */
2555 hifnstats.hst_opackets++;
2556 hifn_callback(sc, cmd, dma->result_bufs[i]);
2557 } else {
2558 if (cmd->src_map == cmd->dst_map) {
2559 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2560 0, cmd->src_map->dm_mapsize,
2561 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2562 } else {
2563 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2564 0, cmd->src_map->dm_mapsize,
2565 BUS_DMASYNC_POSTWRITE);
2566 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2567 0, cmd->dst_map->dm_mapsize,
2568 BUS_DMASYNC_POSTREAD);
2569 }
2570
2571 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2572 m_freem(cmd->srcu.src_m);
2573 crp->crp_buf = (void *)cmd->dstu.dst_m;
2574 }
2575
2576 /* non-shared buffers cannot be restarted */
2577 if (cmd->src_map != cmd->dst_map) {
2578 /*
2579 * XXX should be EAGAIN, delayed until
2580 * after the reset.
2581 */
2582 crp->crp_etype = ENOMEM;
2583 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2584 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2585 } else
2586 crp->crp_etype = ENOMEM;
2587
2588 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2589 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2590
2591 free(cmd, M_DEVBUF);
2592 if (crp->crp_etype != EAGAIN)
2593 crypto_done(crp);
2594 }
2595
2596 if (++i == HIFN_D_RES_RSIZE)
2597 i = 0;
2598 u--;
2599 }
2600 dma->resk = i; dma->resu = u;
2601
2602 /* Force upload of key next time */
2603 for (i = 0; i < sc->sc_maxses; i++)
2604 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2605 sc->sc_sessions[i].hs_state = HS_STATE_USED;
2606
2607 hifn_reset_board(sc, 1);
2608 hifn_init_dma(sc);
2609 hifn_init_pci_registers(sc);
2610 }
2611
2612 static void
2613 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf)
2614 {
2615 struct hifn_dma *dma = sc->sc_dma;
2616 struct cryptop *crp = cmd->crp;
2617 struct cryptodesc *crd;
2618 struct mbuf *m;
2619 int totlen, i, u, ivlen;
2620
2621 if (cmd->src_map == cmd->dst_map)
2622 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2623 0, cmd->src_map->dm_mapsize,
2624 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2625 else {
2626 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2627 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2628 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2629 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2630 }
2631
2632 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2633 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2634 crp->crp_buf = (void *)cmd->dstu.dst_m;
2635 totlen = cmd->src_map->dm_mapsize;
2636 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2637 if (totlen < m->m_len) {
2638 m->m_len = totlen;
2639 totlen = 0;
2640 } else
2641 totlen -= m->m_len;
2642 }
2643 cmd->dstu.dst_m->m_pkthdr.len =
2644 cmd->srcu.src_m->m_pkthdr.len;
2645 m_freem(cmd->srcu.src_m);
2646 }
2647 }
2648
2649 if (cmd->sloplen != 0) {
2650 if (crp->crp_flags & CRYPTO_F_IMBUF)
2651 m_copyback((struct mbuf *)crp->crp_buf,
2652 cmd->src_map->dm_mapsize - cmd->sloplen,
2653 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2654 else if (crp->crp_flags & CRYPTO_F_IOV)
2655 cuio_copyback((struct uio *)crp->crp_buf,
2656 cmd->src_map->dm_mapsize - cmd->sloplen,
2657 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2658 }
2659
2660 i = dma->dstk; u = dma->dstu;
2661 while (u != 0) {
2662 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2663 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2664 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2665 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2666 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2667 offsetof(struct hifn_dma, dstr[i]),
2668 sizeof(struct hifn_desc),
2669 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2670 break;
2671 }
2672 if (++i == (HIFN_D_DST_RSIZE + 1))
2673 i = 0;
2674 else
2675 u--;
2676 }
2677 dma->dstk = i; dma->dstu = u;
2678
2679 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2680
2681 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2682 HIFN_BASE_CMD_CRYPT) {
2683 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2684 if (crd->crd_alg != CRYPTO_DES_CBC &&
2685 crd->crd_alg != CRYPTO_3DES_CBC &&
2686 crd->crd_alg != CRYPTO_AES_CBC)
2687 continue;
2688 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2689 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2690 if (crp->crp_flags & CRYPTO_F_IMBUF)
2691 m_copydata((struct mbuf *)crp->crp_buf,
2692 crd->crd_skip + crd->crd_len - ivlen,
2693 ivlen,
2694 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2695 else if (crp->crp_flags & CRYPTO_F_IOV) {
2696 cuio_copydata((struct uio *)crp->crp_buf,
2697 crd->crd_skip + crd->crd_len - ivlen,
2698 ivlen,
2699 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2700 }
2701 /* XXX We do not handle contig data */
2702 break;
2703 }
2704 }
2705
2706 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2707 u_int8_t *macbuf;
2708
2709 macbuf = resbuf + sizeof(struct hifn_base_result);
2710 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2711 macbuf += sizeof(struct hifn_comp_result);
2712 macbuf += sizeof(struct hifn_mac_result);
2713
2714 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2715 int len;
2716
2717 if (crd->crd_alg == CRYPTO_MD5)
2718 len = 16;
2719 else if (crd->crd_alg == CRYPTO_SHA1)
2720 len = 20;
2721 else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 ||
2722 crd->crd_alg == CRYPTO_SHA1_HMAC_96)
2723 len = 12;
2724 else
2725 continue;
2726
2727 if (crp->crp_flags & CRYPTO_F_IMBUF)
2728 m_copyback((struct mbuf *)crp->crp_buf,
2729 crd->crd_inject, len, macbuf);
2730 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2731 memcpy(crp->crp_mac, (void *)macbuf, len);
2732 break;
2733 }
2734 }
2735
2736 if (cmd->src_map != cmd->dst_map) {
2737 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2738 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2739 }
2740 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2741 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2742 free(cmd, M_DEVBUF);
2743 crypto_done(crp);
2744 }
2745
2746 #ifdef HAVE_CRYPTO_LZS
2747
2748 static int
2749 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2750 struct hifn_command *cmd)
2751 {
2752 struct cryptodesc *crd = crp->crp_desc;
2753 int s, err = 0;
2754
2755 cmd->compcrd = crd;
2756 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2757
2758 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2759 /*
2760 * XXX can only handle mbufs right now since we can
2761 * XXX dynamically resize them.
2762 */
2763 err = EINVAL;
2764 return (ENOMEM);
2765 }
2766
2767 if ((crd->crd_flags & CRD_F_COMP) == 0)
2768 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2769 if (crd->crd_alg == CRYPTO_LZS_COMP)
2770 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2771 HIFN_COMP_CMD_CLEARHIST;
2772
2773 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2774 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2775 err = ENOMEM;
2776 goto fail;
2777 }
2778
2779 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2780 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2781 err = ENOMEM;
2782 goto fail;
2783 }
2784
2785 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2786 int len;
2787
2788 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2789 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2790 err = ENOMEM;
2791 goto fail;
2792 }
2793
2794 len = cmd->src_map->dm_mapsize / MCLBYTES;
2795 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2796 len++;
2797 len *= MCLBYTES;
2798
2799 if ((crd->crd_flags & CRD_F_COMP) == 0)
2800 len *= 4;
2801
2802 if (len > HIFN_MAX_DMALEN)
2803 len = HIFN_MAX_DMALEN;
2804
2805 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2806 if (cmd->dstu.dst_m == NULL) {
2807 err = ENOMEM;
2808 goto fail;
2809 }
2810
2811 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2812 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2813 err = ENOMEM;
2814 goto fail;
2815 }
2816 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2817 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2818 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2819 err = ENOMEM;
2820 goto fail;
2821 }
2822 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2823 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2824 err = ENOMEM;
2825 goto fail;
2826 }
2827 }
2828
2829 if (cmd->src_map == cmd->dst_map)
2830 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2831 0, cmd->src_map->dm_mapsize,
2832 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2833 else {
2834 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2835 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2836 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2837 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2838 }
2839
2840 cmd->crp = crp;
2841 /*
2842 * Always use session 0. The modes of compression we use are
2843 * stateless and there is always at least one compression
2844 * context, zero.
2845 */
2846 cmd->session_num = 0;
2847 cmd->softc = sc;
2848
2849 err = hifn_compress_enter(sc, cmd);
2850
2851 if (err != 0)
2852 goto fail;
2853 return (0);
2854
2855 fail:
2856 if (cmd->dst_map != NULL) {
2857 if (cmd->dst_map->dm_nsegs > 0)
2858 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2859 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2860 }
2861 if (cmd->src_map != NULL) {
2862 if (cmd->src_map->dm_nsegs > 0)
2863 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2864 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2865 }
2866 free(cmd, M_DEVBUF);
2867 if (err == EINVAL)
2868 hifnstats.hst_invalid++;
2869 else
2870 hifnstats.hst_nomem++;
2871 crp->crp_etype = err;
2872 crypto_done(crp);
2873 return (0);
2874 }
2875
2876 static int
2877 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2878 {
2879 struct hifn_dma *dma = sc->sc_dma;
2880 int cmdi, resi;
2881 u_int32_t cmdlen;
2882
2883 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2884 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2885 return (ENOMEM);
2886
2887 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2888 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2889 return (ENOMEM);
2890
2891 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2892 dma->cmdi = 0;
2893 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2894 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2895 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2896 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2897 }
2898 cmdi = dma->cmdi++;
2899 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2900 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2901
2902 /* .p for command/result already set */
2903 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2904 HIFN_D_MASKDONEIRQ);
2905 HIFN_CMDR_SYNC(sc, cmdi,
2906 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2907 dma->cmdu++;
2908 if (sc->sc_c_busy == 0) {
2909 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2910 sc->sc_c_busy = 1;
2911 SET_LED(sc, HIFN_MIPSRST_LED0);
2912 }
2913
2914 /*
2915 * We don't worry about missing an interrupt (which a "command wait"
2916 * interrupt salvages us from), unless there is more than one command
2917 * in the queue.
2918 */
2919 if (dma->cmdu > 1) {
2920 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2921 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2922 }
2923
2924 hifnstats.hst_ipackets++;
2925 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2926
2927 hifn_dmamap_load_src(sc, cmd);
2928 if (sc->sc_s_busy == 0) {
2929 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2930 sc->sc_s_busy = 1;
2931 SET_LED(sc, HIFN_MIPSRST_LED1);
2932 }
2933
2934 /*
2935 * Unlike other descriptors, we don't mask done interrupt from
2936 * result descriptor.
2937 */
2938 if (dma->resi == HIFN_D_RES_RSIZE) {
2939 dma->resi = 0;
2940 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2941 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2942 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2943 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2944 }
2945 resi = dma->resi++;
2946 dma->hifn_commands[resi] = cmd;
2947 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2948 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2949 HIFN_D_VALID | HIFN_D_LAST);
2950 HIFN_RESR_SYNC(sc, resi,
2951 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2952 dma->resu++;
2953 if (sc->sc_r_busy == 0) {
2954 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2955 sc->sc_r_busy = 1;
2956 SET_LED(sc, HIFN_MIPSRST_LED2);
2957 }
2958
2959 if (cmd->sloplen)
2960 cmd->slopidx = resi;
2961
2962 hifn_dmamap_load_dst(sc, cmd);
2963
2964 if (sc->sc_d_busy == 0) {
2965 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2966 sc->sc_d_busy = 1;
2967 }
2968 sc->sc_active = 5;
2969 cmd->cmd_callback = hifn_callback_comp;
2970 return (0);
2971 }
2972
2973 static void
2974 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2975 u_int8_t *resbuf)
2976 {
2977 struct hifn_base_result baseres;
2978 struct cryptop *crp = cmd->crp;
2979 struct hifn_dma *dma = sc->sc_dma;
2980 struct mbuf *m;
2981 int err = 0, i, u;
2982 u_int32_t olen;
2983 bus_size_t dstsize;
2984
2985 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2986 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2987 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2988 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2989
2990 dstsize = cmd->dst_map->dm_mapsize;
2991 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2992
2993 memcpy(&baseres, resbuf, sizeof(struct hifn_base_result));
2994
2995 i = dma->dstk; u = dma->dstu;
2996 while (u != 0) {
2997 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2998 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2999 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3000 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
3001 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
3002 offsetof(struct hifn_dma, dstr[i]),
3003 sizeof(struct hifn_desc),
3004 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3005 break;
3006 }
3007 if (++i == (HIFN_D_DST_RSIZE + 1))
3008 i = 0;
3009 else
3010 u--;
3011 }
3012 dma->dstk = i; dma->dstu = u;
3013
3014 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
3015 bus_size_t xlen;
3016
3017 xlen = dstsize;
3018
3019 m_freem(cmd->dstu.dst_m);
3020
3021 if (xlen == HIFN_MAX_DMALEN) {
3022 /* We've done all we can. */
3023 err = E2BIG;
3024 goto out;
3025 }
3026
3027 xlen += MCLBYTES;
3028
3029 if (xlen > HIFN_MAX_DMALEN)
3030 xlen = HIFN_MAX_DMALEN;
3031
3032 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
3033 cmd->srcu.src_m);
3034 if (cmd->dstu.dst_m == NULL) {
3035 err = ENOMEM;
3036 goto out;
3037 }
3038 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
3039 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
3040 err = ENOMEM;
3041 goto out;
3042 }
3043
3044 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3045 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3046 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
3047 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
3048
3049 err = hifn_compress_enter(sc, cmd);
3050 if (err != 0)
3051 goto out;
3052 return;
3053 }
3054
3055 olen = dstsize - (letoh16(baseres.dst_cnt) |
3056 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
3057 HIFN_BASE_RES_DSTLEN_S) << 16));
3058
3059 crp->crp_olen = olen - cmd->compcrd->crd_skip;
3060
3061 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3062 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3063 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
3064
3065 m = cmd->dstu.dst_m;
3066 if (m->m_flags & M_PKTHDR)
3067 m->m_pkthdr.len = olen;
3068 crp->crp_buf = (void *)m;
3069 for (; m != NULL; m = m->m_next) {
3070 if (olen >= m->m_len)
3071 olen -= m->m_len;
3072 else {
3073 m->m_len = olen;
3074 olen = 0;
3075 }
3076 }
3077
3078 m_freem(cmd->srcu.src_m);
3079 free(cmd, M_DEVBUF);
3080 crp->crp_etype = 0;
3081 crypto_done(crp);
3082 return;
3083
3084 out:
3085 if (cmd->dst_map != NULL) {
3086 if (cmd->src_map->dm_nsegs != 0)
3087 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3088 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
3089 }
3090 if (cmd->src_map != NULL) {
3091 if (cmd->src_map->dm_nsegs != 0)
3092 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3093 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3094 }
3095 if (cmd->dstu.dst_m != NULL)
3096 m_freem(cmd->dstu.dst_m);
3097 free(cmd, M_DEVBUF);
3098 crp->crp_etype = err;
3099 crypto_done(crp);
3100 }
3101
3102 static struct mbuf *
3103 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
3104 {
3105 int len;
3106 struct mbuf *m, *m0, *mlast;
3107
3108 if (mtemplate->m_flags & M_PKTHDR) {
3109 len = MHLEN;
3110 MGETHDR(m0, M_DONTWAIT, MT_DATA);
3111 } else {
3112 len = MLEN;
3113 MGET(m0, M_DONTWAIT, MT_DATA);
3114 }
3115 if (m0 == NULL)
3116 return (NULL);
3117 if (len == MHLEN)
3118 M_DUP_PKTHDR(m0, mtemplate);
3119 MCLGET(m0, M_DONTWAIT);
3120 if (!(m0->m_flags & M_EXT))
3121 m_freem(m0);
3122 len = MCLBYTES;
3123
3124 totlen -= len;
3125 m0->m_pkthdr.len = m0->m_len = len;
3126 mlast = m0;
3127
3128 while (totlen > 0) {
3129 MGET(m, M_DONTWAIT, MT_DATA);
3130 if (m == NULL) {
3131 m_freem(m0);
3132 return (NULL);
3133 }
3134 MCLGET(m, M_DONTWAIT);
3135 if (!(m->m_flags & M_EXT)) {
3136 m_freem(m0);
3137 return (NULL);
3138 }
3139 len = MCLBYTES;
3140 m->m_len = len;
3141 if (m0->m_flags & M_PKTHDR)
3142 m0->m_pkthdr.len += len;
3143 totlen -= len;
3144
3145 mlast->m_next = m;
3146 mlast = m;
3147 }
3148
3149 return (m0);
3150 }
3151 #endif /* HAVE_CRYPTO_LZS */
3152
3153 static void
3154 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val)
3155 {
3156 /*
3157 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
3158 * and Group 1 registers; avoid conditions that could create
3159 * burst writes by doing a read in between the writes.
3160 */
3161 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3162 if (sc->sc_waw_lastgroup == reggrp &&
3163 sc->sc_waw_lastreg == reg - 4) {
3164 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
3165 }
3166 sc->sc_waw_lastgroup = reggrp;
3167 sc->sc_waw_lastreg = reg;
3168 }
3169 if (reggrp == 0)
3170 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3171 else
3172 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3173
3174 }
3175
3176 static u_int32_t
3177 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3178 {
3179 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3180 sc->sc_waw_lastgroup = -1;
3181 sc->sc_waw_lastreg = 1;
3182 }
3183 if (reggrp == 0)
3184 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3185 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3186 }
3187