hifn7751.c revision 1.73 1 /* $NetBSD: hifn7751.c,v 1.73 2020/05/17 15:42:10 riastradh Exp $ */
2 /* $OpenBSD: hifn7751.c,v 1.179 2020/01/11 21:34:03 cheloha Exp $ */
3
4 /*
5 * Invertex AEON / Hifn 7751 driver
6 * Copyright (c) 1999 Invertex Inc. All rights reserved.
7 * Copyright (c) 1999 Theo de Raadt
8 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
9 * http://www.netsec.net
10 * Copyright (c) 2003 Hifn Inc.
11 *
12 * This driver is based on a previous driver by Invertex, for which they
13 * requested: Please send any comments, feedback, bug-fixes, or feature
14 * requests to software (at) invertex.com.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. The name of the author may not be used to endorse or promote products
26 * derived from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
33 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
37 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Effort sponsored in part by the Defense Advanced Research Projects
40 * Agency (DARPA) and Air Force Research Laboratory, Air Force
41 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
42 *
43 */
44
45 /*
46 * Driver for various Hifn encryption processors.
47 */
48
49 #include <sys/cdefs.h>
50 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.73 2020/05/17 15:42:10 riastradh Exp $");
51
52 #include <sys/param.h>
53 #include <sys/cprng.h>
54 #include <sys/device.h>
55 #include <sys/endian.h>
56 #include <sys/errno.h>
57 #include <sys/kernel.h>
58 #include <sys/mbuf.h>
59 #include <sys/module.h>
60 #include <sys/mutex.h>
61 #include <sys/pool.h>
62 #include <sys/proc.h>
63 #include <sys/rndsource.h>
64 #include <sys/sha1.h>
65 #include <sys/systm.h>
66
67 #include <opencrypto/cryptodev.h>
68
69 #include <dev/pci/pcireg.h>
70 #include <dev/pci/pcivar.h>
71 #include <dev/pci/pcidevs.h>
72
73 #include <dev/pci/hifn7751reg.h>
74 #include <dev/pci/hifn7751var.h>
75
76 #undef HIFN_DEBUG
77
78 #ifdef HIFN_DEBUG
79 extern int hifn_debug; /* patchable */
80 int hifn_debug = 1;
81 #endif
82
83 /*
84 * Prototypes and count for the pci_device structure
85 */
86 static int hifn_match(device_t, cfdata_t, void *);
87 static void hifn_attach(device_t, device_t, void *);
88 static int hifn_detach(device_t, int);
89
90 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
91 hifn_match, hifn_attach, hifn_detach, NULL);
92
93 static void hifn_reset_board(struct hifn_softc *, int);
94 static void hifn_reset_puc(struct hifn_softc *);
95 static void hifn_puc_wait(struct hifn_softc *);
96 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
97 static void hifn_set_retry(struct hifn_softc *);
98 static void hifn_init_dma(struct hifn_softc *);
99 static void hifn_init_pci_registers(struct hifn_softc *);
100 static int hifn_sramsize(struct hifn_softc *);
101 static int hifn_dramsize(struct hifn_softc *);
102 static int hifn_ramtype(struct hifn_softc *);
103 static void hifn_sessions(struct hifn_softc *);
104 static int hifn_intr(void *);
105 static u_int hifn_write_command(struct hifn_command *, uint8_t *);
106 static uint32_t hifn_next_signature(uint32_t a, u_int cnt);
107 static int hifn_newsession(void*, uint32_t *, struct cryptoini *);
108 static int hifn_freesession(void*, uint64_t);
109 static int hifn_process(void*, struct cryptop *, int);
110 static void hifn_callback(struct hifn_softc *, struct hifn_command *,
111 uint8_t *);
112 static int hifn_crypto(struct hifn_softc *, struct hifn_command *,
113 struct cryptop*, int);
114 static int hifn_readramaddr(struct hifn_softc *, int, uint8_t *);
115 static int hifn_writeramaddr(struct hifn_softc *, int, uint8_t *);
116 static int hifn_dmamap_aligned(bus_dmamap_t);
117 static int hifn_dmamap_load_src(struct hifn_softc *,
118 struct hifn_command *);
119 static int hifn_dmamap_load_dst(struct hifn_softc *,
120 struct hifn_command *);
121 static int hifn_init_pubrng(struct hifn_softc *);
122 static void hifn_rng(struct hifn_softc *);
123 static void hifn_rng_intr(void *);
124 static void hifn_tick(void *);
125 static void hifn_abort(struct hifn_softc *);
126 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
127 int *);
128 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, uint32_t);
129 static uint32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
130 #ifdef CRYPTO_LZS_COMP
131 static int hifn_compression(struct hifn_softc *, struct cryptop *,
132 struct hifn_command *);
133 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
134 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
135 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
136 uint8_t *);
137 #endif /* CRYPTO_LZS_COMP */
138
139 struct hifn_stats hifnstats;
140
141 static int
142 hifn_cmd_ctor(void *vsc, void *vcmd, int pflags)
143 {
144 struct hifn_softc *sc = vsc;
145 struct hifn_command *cmd = vcmd;
146 int bflags = pflags & PR_WAITOK ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT;
147 int error;
148
149 memset(cmd, 0, sizeof(*cmd));
150
151 error = bus_dmamap_create(sc->sc_dmat,
152 HIFN_MAX_DMALEN, MAX_SCATTER, HIFN_MAX_SEGLEN,
153 0, bflags, &cmd->src_map);
154 if (error)
155 goto fail0;
156
157 error = bus_dmamap_create(sc->sc_dmat,
158 HIFN_MAX_SEGLEN*MAX_SCATTER, MAX_SCATTER, HIFN_MAX_SEGLEN,
159 0, bflags, &cmd->dst_map_alloc);
160 if (error)
161 goto fail1;
162
163 /* Success! */
164 cmd->dst_map = NULL;
165 return 0;
166
167 fail2: __unused
168 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map_alloc);
169 fail1: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
170 fail0: return error;
171 }
172
173 static void
174 hifn_cmd_dtor(void *vsc, void *vcmd)
175 {
176 struct hifn_softc *sc = vsc;
177 struct hifn_command *cmd = vcmd;
178
179 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map_alloc);
180 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
181 }
182
183 static const struct hifn_product {
184 pci_vendor_id_t hifn_vendor;
185 pci_product_id_t hifn_product;
186 int hifn_flags;
187 const char *hifn_name;
188 } hifn_products[] = {
189 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
190 0,
191 "Invertex AEON",
192 },
193
194 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
195 0,
196 "Hifn 7751",
197 },
198 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
199 0,
200 "Hifn 7751 (NetSec)"
201 },
202
203 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
204 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
205 "Hifn 7811",
206 },
207
208 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
209 HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
210 "Hifn 7951",
211 },
212
213 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
214 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
215 "Hifn 7955",
216 },
217
218 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
219 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
220 "Hifn 7956",
221 },
222
223 { 0, 0,
224 0,
225 NULL
226 }
227 };
228
229 static const struct hifn_product *
230 hifn_lookup(const struct pci_attach_args *pa)
231 {
232 const struct hifn_product *hp;
233
234 for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
235 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
236 PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
237 return (hp);
238 }
239 return (NULL);
240 }
241
242 static int
243 hifn_match(device_t parent, cfdata_t match, void *aux)
244 {
245 struct pci_attach_args *pa = aux;
246
247 if (hifn_lookup(pa) != NULL)
248 return 1;
249
250 return 0;
251 }
252
253 static void
254 hifn_attach(device_t parent, device_t self, void *aux)
255 {
256 struct hifn_softc *sc = device_private(self);
257 struct pci_attach_args *pa = aux;
258 const struct hifn_product *hp;
259 pci_chipset_tag_t pc = pa->pa_pc;
260 pci_intr_handle_t ih;
261 const char *intrstr = NULL;
262 const char *hifncap;
263 char rbase;
264 uint32_t cmd;
265 uint16_t ena;
266 bus_dma_segment_t seg;
267 bus_dmamap_t dmamap;
268 int rseg;
269 void *kva;
270 char intrbuf[PCI_INTRSTR_LEN];
271
272 hp = hifn_lookup(pa);
273 if (hp == NULL) {
274 printf("\n");
275 panic("hifn_attach: impossible");
276 }
277
278 pci_aprint_devinfo_fancy(pa, "Crypto processor", hp->hifn_name, 1);
279
280 sc->sc_dv = self;
281 sc->sc_pci_pc = pa->pa_pc;
282 sc->sc_pci_tag = pa->pa_tag;
283
284 sc->sc_flags = hp->hifn_flags;
285
286 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
287 cmd |= PCI_COMMAND_MASTER_ENABLE;
288 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
289
290 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
291 &sc->sc_st0, &sc->sc_sh0, NULL, &sc->sc_iosz0)) {
292 aprint_error_dev(sc->sc_dv, "can't map mem space %d\n", 0);
293 return;
294 }
295
296 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
297 &sc->sc_st1, &sc->sc_sh1, NULL, &sc->sc_iosz1)) {
298 aprint_error_dev(sc->sc_dv, "can't find mem space %d\n", 1);
299 goto fail_io0;
300 }
301
302 hifn_set_retry(sc);
303
304 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
305 sc->sc_waw_lastgroup = -1;
306 sc->sc_waw_lastreg = 1;
307 }
308
309 sc->sc_dmat = pa->pa_dmat;
310 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
311 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
312 aprint_error_dev(sc->sc_dv, "can't alloc DMA buffer\n");
313 goto fail_io1;
314 }
315 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
316 BUS_DMA_NOWAIT)) {
317 aprint_error_dev(sc->sc_dv, "can't map DMA buffers (%lu bytes)\n",
318 (u_long)sizeof(*sc->sc_dma));
319 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
320 goto fail_io1;
321 }
322 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
323 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
324 aprint_error_dev(sc->sc_dv, "can't create DMA map\n");
325 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
326 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
327 goto fail_io1;
328 }
329 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
330 NULL, BUS_DMA_NOWAIT)) {
331 aprint_error_dev(sc->sc_dv, "can't load DMA map\n");
332 bus_dmamap_destroy(sc->sc_dmat, dmamap);
333 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
334 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
335 goto fail_io1;
336 }
337 sc->sc_dmamap = dmamap;
338 sc->sc_dma = (struct hifn_dma *)kva;
339 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
340
341 hifn_reset_board(sc, 0);
342
343 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
344 aprint_error_dev(sc->sc_dv, "crypto enabling failed\n");
345 goto fail_mem;
346 }
347 hifn_reset_puc(sc);
348
349 hifn_init_dma(sc);
350 hifn_init_pci_registers(sc);
351
352 /* XXX can't dynamically determine ram type for 795x; force dram */
353 if (sc->sc_flags & HIFN_IS_7956)
354 sc->sc_drammodel = 1;
355 else if (hifn_ramtype(sc))
356 goto fail_mem;
357
358 if (sc->sc_drammodel == 0)
359 hifn_sramsize(sc);
360 else
361 hifn_dramsize(sc);
362
363 /*
364 * Workaround for NetSec 7751 rev A: half ram size because two
365 * of the address lines were left floating
366 */
367 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
368 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
369 PCI_REVISION(pa->pa_class) == 0x61)
370 sc->sc_ramsize >>= 1;
371
372 if (pci_intr_map(pa, &ih)) {
373 aprint_error_dev(sc->sc_dv, "couldn't map interrupt\n");
374 goto fail_mem;
375 }
376 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
377 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, hifn_intr, sc,
378 device_xname(self));
379 if (sc->sc_ih == NULL) {
380 aprint_error_dev(sc->sc_dv, "couldn't establish interrupt\n");
381 if (intrstr != NULL)
382 aprint_error(" at %s", intrstr);
383 aprint_error("\n");
384 goto fail_mem;
385 }
386
387 hifn_sessions(sc);
388
389 rseg = sc->sc_ramsize / 1024;
390 rbase = 'K';
391 if (sc->sc_ramsize >= (1024 * 1024)) {
392 rbase = 'M';
393 rseg /= 1024;
394 }
395 aprint_normal_dev(sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n",
396 hifncap, rseg, rbase,
397 sc->sc_drammodel ? 'D' : 'S', intrstr);
398
399 sc->sc_cid = crypto_get_driverid(0);
400 if (sc->sc_cid < 0) {
401 aprint_error_dev(sc->sc_dv, "couldn't get crypto driver id\n");
402 goto fail_intr;
403 }
404
405 sc->sc_cmd_cache = pool_cache_init(sizeof(struct hifn_command),
406 0, 0, 0, "hifncmd", NULL, IPL_VM,
407 &hifn_cmd_ctor, &hifn_cmd_dtor, sc);
408 pool_cache_setlowat(sc->sc_cmd_cache, sc->sc_maxses);
409
410 WRITE_REG_0(sc, HIFN_0_PUCNFG,
411 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
412 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
413
414 switch (ena) {
415 case HIFN_PUSTAT_ENA_2:
416 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
417 hifn_newsession, hifn_freesession, hifn_process, sc);
418 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
419 hifn_newsession, hifn_freesession, hifn_process, sc);
420 if (sc->sc_flags & HIFN_HAS_AES)
421 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
422 hifn_newsession, hifn_freesession,
423 hifn_process, sc);
424 /*FALLTHROUGH*/
425 case HIFN_PUSTAT_ENA_1:
426 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
427 hifn_newsession, hifn_freesession, hifn_process, sc);
428 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
429 hifn_newsession, hifn_freesession, hifn_process, sc);
430 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0,
431 hifn_newsession, hifn_freesession, hifn_process, sc);
432 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0,
433 hifn_newsession, hifn_freesession, hifn_process, sc);
434 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
435 hifn_newsession, hifn_freesession, hifn_process, sc);
436 break;
437 }
438
439 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
440 sc->sc_dmamap->dm_mapsize,
441 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
442
443 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM);
444
445 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) {
446 hifn_init_pubrng(sc);
447 }
448
449 callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
450 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
451 return;
452
453 fail_intr:
454 pci_intr_disestablish(pc, sc->sc_ih);
455 fail_mem:
456 bus_dmamap_unload(sc->sc_dmat, dmamap);
457 bus_dmamap_destroy(sc->sc_dmat, dmamap);
458 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
459 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
460
461 /* Turn off DMA polling */
462 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
463 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
464
465 fail_io1:
466 bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1);
467 fail_io0:
468 bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0);
469 }
470
471 static int
472 hifn_detach(device_t self, int flags)
473 {
474 struct hifn_softc *sc = device_private(self);
475
476 mutex_enter(&sc->sc_mtx);
477 hifn_abort(sc);
478 mutex_exit(&sc->sc_mtx);
479
480 hifn_reset_board(sc, 1);
481
482 pci_intr_disestablish(sc->sc_pci_pc, sc->sc_ih);
483
484 crypto_unregister_all(sc->sc_cid);
485
486 rnd_detach_source(&sc->sc_rnd_source);
487
488 callout_halt(&sc->sc_tickto, NULL);
489 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
490 callout_halt(&sc->sc_rngto, NULL);
491
492 pool_cache_destroy(sc->sc_cmd_cache);
493
494 bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1);
495 bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0);
496
497 /*
498 * XXX It's not clear if any additional buffers have been
499 * XXX allocated and require free()ing
500 */
501
502 return 0;
503 }
504
505 MODULE(MODULE_CLASS_DRIVER, hifn, "pci,opencrypto");
506
507 #ifdef _MODULE
508 #include "ioconf.c"
509 #endif
510
511 static int
512 hifn_modcmd(modcmd_t cmd, void *data)
513 {
514 int error = 0;
515
516 switch (cmd) {
517 case MODULE_CMD_INIT:
518 #ifdef _MODULE
519 error = config_init_component(cfdriver_ioconf_hifn,
520 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
521 #endif
522 return error;
523 case MODULE_CMD_FINI:
524 #ifdef _MODULE
525 error = config_fini_component(cfdriver_ioconf_hifn,
526 cfattach_ioconf_hifn, cfdata_ioconf_hifn);
527 #endif
528 return error;
529 default:
530 return ENOTTY;
531 }
532 }
533
534 static void
535 hifn_rng_get(size_t bytes, void *priv)
536 {
537 struct hifn_softc *sc = priv;
538 struct timeval delta = {0, 400000};
539 struct timeval now, oktime, wait;
540
541 /*
542 * Wait until 0.4 seconds after we start up the RNG to read
543 * anything out of it. If the time hasn't elapsed, schedule a
544 * callout later on.
545 */
546 microtime(&now);
547
548 mutex_enter(&sc->sc_mtx);
549 sc->sc_rng_needbits = MAX(sc->sc_rng_needbits, NBBY*bytes);
550 timeradd(&sc->sc_rngboottime, &delta, &oktime);
551 if (timercmp(&oktime, &now, <=)) {
552 hifn_rng(sc);
553 } else if (!callout_pending(&sc->sc_rngto)) {
554 timersub(&oktime, &now, &wait);
555 callout_schedule(&sc->sc_rngto, MAX(1, tvtohz(&wait)));
556 }
557 mutex_exit(&sc->sc_mtx);
558 }
559
560 static int
561 hifn_init_pubrng(struct hifn_softc *sc)
562 {
563 uint32_t r;
564 int i;
565
566 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
567 /* Reset 7951 public key/rng engine */
568 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
569 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
570
571 for (i = 0; i < 100; i++) {
572 DELAY(1000);
573 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
574 HIFN_PUBRST_RESET) == 0)
575 break;
576 }
577
578 if (i == 100) {
579 printf("%s: public key init failed\n",
580 device_xname(sc->sc_dv));
581 return (1);
582 }
583 }
584
585 /* Enable the rng, if available */
586 if (sc->sc_flags & HIFN_HAS_RNG) {
587 if (sc->sc_flags & HIFN_IS_7811) {
588 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
589 if (r & HIFN_7811_RNGENA_ENA) {
590 r &= ~HIFN_7811_RNGENA_ENA;
591 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
592 }
593 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
594 HIFN_7811_RNGCFG_DEFL);
595 r |= HIFN_7811_RNGENA_ENA;
596 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
597 } else
598 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
599 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
600 HIFN_RNGCFG_ENA);
601
602 /*
603 * The Hifn RNG documentation states that at their
604 * recommended "conservative" RNG config values,
605 * the RNG must warm up for 0.4s before providing
606 * data that meet their worst-case estimate of 0.06
607 * bits of random data per output register bit.
608 */
609 microtime(&sc->sc_rngboottime);
610 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
611 callout_setfunc(&sc->sc_rngto, hifn_rng_intr, sc);
612 rndsource_setcb(&sc->sc_rnd_source, hifn_rng_get, sc);
613 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv),
614 RND_TYPE_RNG, RND_FLAG_DEFAULT|RND_FLAG_HASCB);
615 }
616
617 /* Enable public key engine, if available */
618 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
619 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
620 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
621 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
622 }
623
624 return (0);
625 }
626
627 static void
628 hifn_rng(struct hifn_softc *sc)
629 {
630 uint32_t entropybits;
631
632 KASSERT(mutex_owned(&sc->sc_mtx));
633
634 if (sc->sc_flags & HIFN_IS_7811) {
635 while (sc->sc_rng_needbits) {
636 uint32_t num[2];
637 uint32_t sts;
638
639 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
640 if (sts & HIFN_7811_RNGSTS_UFL) {
641 device_printf(sc->sc_dv, "RNG underflow\n");
642 return;
643 }
644 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
645 break;
646
647 /*
648 * There are at least two words in the RNG FIFO
649 * at this point.
650 */
651 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
652 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
653 #ifdef HIFN_DEBUG
654 if (hifn_debug >= 2)
655 hexdump(printf, "hifn", num, sizeof num);
656 #endif
657 entropybits = NBBY*sizeof(num)/HIFN_RNG_BITSPER;
658 rnd_add_data(&sc->sc_rnd_source, num, sizeof(num),
659 entropybits);
660 entropybits = MAX(entropybits, 1);
661 entropybits = MIN(entropybits, sc->sc_rng_needbits);
662 sc->sc_rng_needbits -= entropybits;
663 }
664 } else {
665 /*
666 * We must be *extremely* careful here. The Hifn
667 * 795x differ from the published 6500 RNG design
668 * in more ways than the obvious lack of the output
669 * FIFO and LFSR control registers. In fact, there
670 * is only one LFSR, instead of the 6500's two, and
671 * it's 32 bits, not 31.
672 *
673 * Further, a block diagram obtained from Hifn shows
674 * a very curious latching of this register: the LFSR
675 * rotates at a frequency of RNG_Clk / 8, but the
676 * RNG_Data register is latched at a frequency of
677 * RNG_Clk, which means that it is possible for
678 * consecutive reads of the RNG_Data register to read
679 * identical state from the LFSR. The simplest
680 * workaround seems to be to read eight samples from
681 * the register for each one that we use. Since each
682 * read must require at least one PCI cycle, and
683 * RNG_Clk is at least PCI_Clk, this is safe.
684 */
685 while (sc->sc_rng_needbits) {
686 uint32_t num[64];
687 unsigned i;
688
689 for (i = 0; i < 8*__arraycount(num); i++)
690 num[i/8] = READ_REG_1(sc, HIFN_1_RNG_DATA);
691 #ifdef HIFN_DEBUG
692 if (hifn_debug >= 2)
693 hexdump(printf, "hifn", num, sizeof num);
694 #endif
695 entropybits = NBBY*sizeof(num)/HIFN_RNG_BITSPER;
696 rnd_add_data(&sc->sc_rnd_source, num, sizeof num,
697 entropybits);
698 entropybits = MAX(entropybits, 1);
699 entropybits = MIN(entropybits, sc->sc_rng_needbits);
700 sc->sc_rng_needbits -= entropybits;
701 }
702 }
703
704 /* If we still need more, try again in another second. */
705 if (sc->sc_rng_needbits)
706 callout_schedule(&sc->sc_rngto, hz);
707 }
708
709 static void
710 hifn_rng_intr(void *vsc)
711 {
712 struct hifn_softc *sc = vsc;
713
714 mutex_spin_enter(&sc->sc_mtx);
715 hifn_rng(sc);
716 mutex_spin_exit(&sc->sc_mtx);
717 }
718
719 static void
720 hifn_puc_wait(struct hifn_softc *sc)
721 {
722 int i;
723
724 for (i = 5000; i > 0; i--) {
725 DELAY(1);
726 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
727 break;
728 }
729 if (!i)
730 printf("%s: proc unit did not reset\n", device_xname(sc->sc_dv));
731 }
732
733 /*
734 * Reset the processing unit.
735 */
736 static void
737 hifn_reset_puc(struct hifn_softc *sc)
738 {
739 /* Reset processing unit */
740 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
741 hifn_puc_wait(sc);
742 }
743
744 static void
745 hifn_set_retry(struct hifn_softc *sc)
746 {
747 uint32_t r;
748
749 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
750 r &= 0xffff0000;
751 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
752 }
753
754 /*
755 * Resets the board. Values in the regesters are left as is
756 * from the reset (i.e. initial values are assigned elsewhere).
757 */
758 static void
759 hifn_reset_board(struct hifn_softc *sc, int full)
760 {
761 uint32_t reg;
762
763 /*
764 * Set polling in the DMA configuration register to zero. 0x7 avoids
765 * resetting the board and zeros out the other fields.
766 */
767 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
768 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
769
770 /*
771 * Now that polling has been disabled, we have to wait 1 ms
772 * before resetting the board.
773 */
774 DELAY(1000);
775
776 /* Reset the DMA unit */
777 if (full) {
778 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
779 DELAY(1000);
780 } else {
781 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
782 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
783 hifn_reset_puc(sc);
784 }
785
786 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
787
788 /* Bring dma unit out of reset */
789 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
790 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
791
792 hifn_puc_wait(sc);
793
794 hifn_set_retry(sc);
795
796 if (sc->sc_flags & HIFN_IS_7811) {
797 for (reg = 0; reg < 1000; reg++) {
798 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
799 HIFN_MIPSRST_CRAMINIT)
800 break;
801 DELAY(1000);
802 }
803 if (reg == 1000)
804 printf(": cram init timeout\n");
805 }
806 }
807
808 static uint32_t
809 hifn_next_signature(uint32_t a, u_int cnt)
810 {
811 u_int i;
812 uint32_t v;
813
814 for (i = 0; i < cnt; i++) {
815
816 /* get the parity */
817 v = a & 0x80080125;
818 v ^= v >> 16;
819 v ^= v >> 8;
820 v ^= v >> 4;
821 v ^= v >> 2;
822 v ^= v >> 1;
823
824 a = (v & 1) ^ (a << 1);
825 }
826
827 return a;
828 }
829
830 static struct pci2id {
831 u_short pci_vendor;
832 u_short pci_prod;
833 char card_id[13];
834 } const pci2id[] = {
835 {
836 PCI_VENDOR_HIFN,
837 PCI_PRODUCT_HIFN_7951,
838 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
839 0x00, 0x00, 0x00, 0x00, 0x00 }
840 }, {
841 PCI_VENDOR_HIFN,
842 PCI_PRODUCT_HIFN_7955,
843 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00, 0x00 }
845 }, {
846 PCI_VENDOR_HIFN,
847 PCI_PRODUCT_HIFN_7956,
848 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00, 0x00 }
850 }, {
851 PCI_VENDOR_NETSEC,
852 PCI_PRODUCT_NETSEC_7751,
853 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00, 0x00 }
855 }, {
856 PCI_VENDOR_INVERTEX,
857 PCI_PRODUCT_INVERTEX_AEON,
858 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
859 0x00, 0x00, 0x00, 0x00, 0x00 }
860 }, {
861 PCI_VENDOR_HIFN,
862 PCI_PRODUCT_HIFN_7811,
863 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
864 0x00, 0x00, 0x00, 0x00, 0x00 }
865 }, {
866 /*
867 * Other vendors share this PCI ID as well, such as
868 * powercrypt, and obviously they also
869 * use the same key.
870 */
871 PCI_VENDOR_HIFN,
872 PCI_PRODUCT_HIFN_7751,
873 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00, 0x00 }
875 },
876 };
877
878 /*
879 * Checks to see if crypto is already enabled. If crypto isn't enable,
880 * "hifn_enable_crypto" is called to enable it. The check is important,
881 * as enabling crypto twice will lock the board.
882 */
883 static const char *
884 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
885 {
886 uint32_t dmacfg, ramcfg, encl, addr, i;
887 const char *offtbl = NULL;
888
889 for (i = 0; i < __arraycount(pci2id); i++) {
890 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
891 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
892 offtbl = pci2id[i].card_id;
893 break;
894 }
895 }
896
897 if (offtbl == NULL) {
898 #ifdef HIFN_DEBUG
899 aprint_debug_dev(sc->sc_dv, "Unknown card!\n");
900 #endif
901 return (NULL);
902 }
903
904 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
905 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
906
907 /*
908 * The RAM config register's encrypt level bit needs to be set before
909 * every read performed on the encryption level register.
910 */
911 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
912
913 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
914
915 /*
916 * Make sure we don't re-unlock. Two unlocks kills chip until the
917 * next reboot.
918 */
919 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
920 #ifdef HIFN_DEBUG
921 aprint_debug_dev(sc->sc_dv, "Strong Crypto already enabled!\n");
922 #endif
923 goto report;
924 }
925
926 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
927 #ifdef HIFN_DEBUG
928 aprint_debug_dev(sc->sc_dv, "Unknown encryption level\n");
929 #endif
930 return (NULL);
931 }
932
933 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
934 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
935 DELAY(1000);
936 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
937 DELAY(1000);
938 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
939 DELAY(1000);
940
941 for (i = 0; i <= 12; i++) {
942 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
943 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
944
945 DELAY(1000);
946 }
947
948 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
949 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
950
951 #ifdef HIFN_DEBUG
952 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
953 aprint_debug("Encryption engine is permanently locked until next system reset.");
954 else
955 aprint_debug("Encryption engine enabled successfully!");
956 #endif
957
958 report:
959 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
960 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
961
962 switch (encl) {
963 case HIFN_PUSTAT_ENA_0:
964 return ("LZS-only (no encr/auth)");
965
966 case HIFN_PUSTAT_ENA_1:
967 return ("DES");
968
969 case HIFN_PUSTAT_ENA_2:
970 if (sc->sc_flags & HIFN_HAS_AES)
971 return ("3DES/AES");
972 else
973 return ("3DES");
974
975 default:
976 return ("disabled");
977 }
978 /* NOTREACHED */
979 }
980
981 /*
982 * Give initial values to the registers listed in the "Register Space"
983 * section of the HIFN Software Development reference manual.
984 */
985 static void
986 hifn_init_pci_registers(struct hifn_softc *sc)
987 {
988 /* write fixed values needed by the Initialization registers */
989 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
990 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
991 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
992
993 /* write all 4 ring address registers */
994 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
995 offsetof(struct hifn_dma, cmdr[0]));
996 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
997 offsetof(struct hifn_dma, srcr[0]));
998 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
999 offsetof(struct hifn_dma, dstr[0]));
1000 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
1001 offsetof(struct hifn_dma, resr[0]));
1002
1003 DELAY(2000);
1004
1005 /* write status register */
1006 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1007 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1008 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1009 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1010 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1011 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1012 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1013 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1014 HIFN_DMACSR_S_WAIT |
1015 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1016 HIFN_DMACSR_C_WAIT |
1017 HIFN_DMACSR_ENGINE |
1018 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1019 HIFN_DMACSR_PUBDONE : 0) |
1020 ((sc->sc_flags & HIFN_IS_7811) ?
1021 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1022
1023 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1024 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1025 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1026 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1027 HIFN_DMAIER_ENGINE |
1028 ((sc->sc_flags & HIFN_IS_7811) ?
1029 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1030 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1031 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1032 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
1033
1034 if (sc->sc_flags & HIFN_IS_7956) {
1035 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1036 HIFN_PUCNFG_TCALLPHASES |
1037 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1038 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
1039 } else {
1040 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1041 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1042 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1043 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1044 }
1045
1046 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1047 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1048 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1049 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1050 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1051 }
1052
1053 /*
1054 * The maximum number of sessions supported by the card
1055 * is dependent on the amount of context ram, which
1056 * encryption algorithms are enabled, and how compression
1057 * is configured. This should be configured before this
1058 * routine is called.
1059 */
1060 static void
1061 hifn_sessions(struct hifn_softc *sc)
1062 {
1063 uint32_t pucnfg;
1064 int ctxsize;
1065
1066 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1067
1068 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1069 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1070 ctxsize = 128;
1071 else
1072 ctxsize = 512;
1073 /*
1074 * 7955/7956 has internal context memory of 32K
1075 */
1076 if (sc->sc_flags & HIFN_IS_7956)
1077 sc->sc_maxses = 32768 / ctxsize;
1078 else
1079 sc->sc_maxses = 1 +
1080 ((sc->sc_ramsize - 32768) / ctxsize);
1081 } else
1082 sc->sc_maxses = sc->sc_ramsize / 16384;
1083
1084 if (sc->sc_maxses > 2048)
1085 sc->sc_maxses = 2048;
1086 }
1087
1088 /*
1089 * Determine ram type (sram or dram). Board should be just out of a reset
1090 * state when this is called.
1091 */
1092 static int
1093 hifn_ramtype(struct hifn_softc *sc)
1094 {
1095 uint8_t data[8], dataexpect[8];
1096 size_t i;
1097
1098 for (i = 0; i < sizeof(data); i++)
1099 data[i] = dataexpect[i] = 0x55;
1100 if (hifn_writeramaddr(sc, 0, data))
1101 return (-1);
1102 if (hifn_readramaddr(sc, 0, data))
1103 return (-1);
1104 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1105 sc->sc_drammodel = 1;
1106 return (0);
1107 }
1108
1109 for (i = 0; i < sizeof(data); i++)
1110 data[i] = dataexpect[i] = 0xaa;
1111 if (hifn_writeramaddr(sc, 0, data))
1112 return (-1);
1113 if (hifn_readramaddr(sc, 0, data))
1114 return (-1);
1115 if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1116 sc->sc_drammodel = 1;
1117 return (0);
1118 }
1119
1120 return (0);
1121 }
1122
1123 #define HIFN_SRAM_MAX (32 << 20)
1124 #define HIFN_SRAM_STEP_SIZE 16384
1125 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1126
1127 static int
1128 hifn_sramsize(struct hifn_softc *sc)
1129 {
1130 uint32_t a, b;
1131 uint8_t data[8];
1132 uint8_t dataexpect[sizeof(data)];
1133 size_t i;
1134
1135 for (i = 0; i < sizeof(data); i++)
1136 data[i] = dataexpect[i] = i ^ 0x5a;
1137
1138 a = HIFN_SRAM_GRANULARITY * HIFN_SRAM_STEP_SIZE;
1139 b = HIFN_SRAM_GRANULARITY;
1140 for (i = 0; i < HIFN_SRAM_GRANULARITY; ++i) {
1141 a -= HIFN_SRAM_STEP_SIZE;
1142 b -= 1;
1143 le32enc(data, b);
1144 hifn_writeramaddr(sc, a, data);
1145 }
1146
1147 a = 0;
1148 b = 0;
1149 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1150 le32enc(dataexpect, b);
1151 if (hifn_readramaddr(sc, a, data) < 0)
1152 return (0);
1153 if (memcmp(data, dataexpect, sizeof(data)) != 0)
1154 return (0);
1155
1156 a += HIFN_SRAM_STEP_SIZE;
1157 b += 1;
1158 sc->sc_ramsize = a;
1159 }
1160
1161 return (0);
1162 }
1163
1164 /*
1165 * XXX For dram boards, one should really try all of the
1166 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1167 * is already set up correctly.
1168 */
1169 static int
1170 hifn_dramsize(struct hifn_softc *sc)
1171 {
1172 uint32_t cnfg;
1173
1174 if (sc->sc_flags & HIFN_IS_7956) {
1175 /*
1176 * 7955/7956 have a fixed internal ram of only 32K.
1177 */
1178 sc->sc_ramsize = 32768;
1179 } else {
1180 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1181 HIFN_PUCNFG_DRAMMASK;
1182 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1183 }
1184 return (0);
1185 }
1186
1187 static void
1188 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1189 int *resp)
1190 {
1191 struct hifn_dma *dma = sc->sc_dma;
1192
1193 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1194 dma->cmdi = 0;
1195 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1196 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1197 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1198 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1199 }
1200 *cmdp = dma->cmdi++;
1201 dma->cmdk = dma->cmdi;
1202
1203 if (dma->srci == HIFN_D_SRC_RSIZE) {
1204 dma->srci = 0;
1205 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1206 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1207 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1208 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1209 }
1210 *srcp = dma->srci++;
1211 dma->srck = dma->srci;
1212
1213 if (dma->dsti == HIFN_D_DST_RSIZE) {
1214 dma->dsti = 0;
1215 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1216 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1217 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1218 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1219 }
1220 *dstp = dma->dsti++;
1221 dma->dstk = dma->dsti;
1222
1223 if (dma->resi == HIFN_D_RES_RSIZE) {
1224 dma->resi = 0;
1225 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1226 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1227 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1228 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1229 }
1230 *resp = dma->resi++;
1231 dma->resk = dma->resi;
1232 }
1233
1234 static int
1235 hifn_writeramaddr(struct hifn_softc *sc, int addr, uint8_t *data)
1236 {
1237 struct hifn_dma *dma = sc->sc_dma;
1238 struct hifn_base_command wc;
1239 const uint32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1240 int r, cmdi, resi, srci, dsti;
1241
1242 wc.masks = htole16(3 << 13);
1243 wc.session_num = htole16(addr >> 14);
1244 wc.total_source_count = htole16(8);
1245 wc.total_dest_count = htole16(addr & 0x3fff);
1246
1247 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1248
1249 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1250 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1251 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1252
1253 /* build write command */
1254 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1255 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1256 memcpy(&dma->test_src, data, sizeof(dma->test_src));
1257
1258 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1259 + offsetof(struct hifn_dma, test_src));
1260 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1261 + offsetof(struct hifn_dma, test_dst));
1262
1263 dma->cmdr[cmdi].l = htole32(16 | masks);
1264 dma->srcr[srci].l = htole32(8 | masks);
1265 dma->dstr[dsti].l = htole32(4 | masks);
1266 dma->resr[resi].l = htole32(4 | masks);
1267
1268 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1269 0, sc->sc_dmamap->dm_mapsize,
1270 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1271
1272 for (r = 10000; r >= 0; r--) {
1273 DELAY(10);
1274 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1275 0, sc->sc_dmamap->dm_mapsize,
1276 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1277 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1278 break;
1279 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1280 0, sc->sc_dmamap->dm_mapsize,
1281 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1282 }
1283 if (r == 0) {
1284 printf("%s: writeramaddr -- "
1285 "result[%d](addr %d) still valid\n",
1286 device_xname(sc->sc_dv), resi, addr);
1287 return (-1);
1288 } else
1289 r = 0;
1290
1291 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1292 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1293 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1294
1295 return (r);
1296 }
1297
1298 static int
1299 hifn_readramaddr(struct hifn_softc *sc, int addr, uint8_t *data)
1300 {
1301 struct hifn_dma *dma = sc->sc_dma;
1302 struct hifn_base_command rc;
1303 const uint32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1304 int r, cmdi, srci, dsti, resi;
1305
1306 rc.masks = htole16(2 << 13);
1307 rc.session_num = htole16(addr >> 14);
1308 rc.total_source_count = htole16(addr & 0x3fff);
1309 rc.total_dest_count = htole16(8);
1310
1311 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1312
1313 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1314 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1315 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1316
1317 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1318 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1319
1320 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1321 offsetof(struct hifn_dma, test_src));
1322 dma->test_src = 0;
1323 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1324 offsetof(struct hifn_dma, test_dst));
1325 dma->test_dst = 0;
1326 dma->cmdr[cmdi].l = htole32(8 | masks);
1327 dma->srcr[srci].l = htole32(8 | masks);
1328 dma->dstr[dsti].l = htole32(8 | masks);
1329 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1330
1331 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1332 0, sc->sc_dmamap->dm_mapsize,
1333 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1334
1335 for (r = 10000; r >= 0; r--) {
1336 DELAY(10);
1337 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1338 0, sc->sc_dmamap->dm_mapsize,
1339 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1340 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1341 break;
1342 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1343 0, sc->sc_dmamap->dm_mapsize,
1344 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1345 }
1346 if (r == 0) {
1347 printf("%s: readramaddr -- "
1348 "result[%d](addr %d) still valid\n",
1349 device_xname(sc->sc_dv), resi, addr);
1350 r = -1;
1351 } else {
1352 r = 0;
1353 memcpy(data, &dma->test_dst, sizeof(dma->test_dst));
1354 }
1355
1356 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1357 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1358 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1359
1360 return (r);
1361 }
1362
1363 /*
1364 * Initialize the descriptor rings.
1365 */
1366 static void
1367 hifn_init_dma(struct hifn_softc *sc)
1368 {
1369 struct hifn_dma *dma = sc->sc_dma;
1370 int i;
1371
1372 hifn_set_retry(sc);
1373
1374 /* initialize static pointer values */
1375 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1376 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1377 offsetof(struct hifn_dma, command_bufs[i][0]));
1378 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1379 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1380 offsetof(struct hifn_dma, result_bufs[i][0]));
1381
1382 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1383 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1384 offsetof(struct hifn_dma, cmdr[0]));
1385 dma->srcr[HIFN_D_SRC_RSIZE].p =
1386 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1387 offsetof(struct hifn_dma, srcr[0]));
1388 dma->dstr[HIFN_D_DST_RSIZE].p =
1389 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1390 offsetof(struct hifn_dma, dstr[0]));
1391 dma->resr[HIFN_D_RES_RSIZE].p =
1392 htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1393 offsetof(struct hifn_dma, resr[0]));
1394
1395 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1396 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1397 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1398 }
1399
1400 /*
1401 * Writes out the raw command buffer space. Returns the
1402 * command buffer size.
1403 */
1404 static u_int
1405 hifn_write_command(struct hifn_command *cmd, uint8_t *buf)
1406 {
1407 uint8_t *buf_pos;
1408 struct hifn_base_command *base_cmd;
1409 struct hifn_mac_command *mac_cmd;
1410 struct hifn_crypt_command *cry_cmd;
1411 struct hifn_comp_command *comp_cmd;
1412 int using_mac, using_crypt, using_comp, len, ivlen;
1413 uint32_t dlen, slen;
1414
1415 buf_pos = buf;
1416 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1417 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1418 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1419
1420 base_cmd = (struct hifn_base_command *)buf_pos;
1421 base_cmd->masks = htole16(cmd->base_masks);
1422 slen = cmd->src_map->dm_mapsize;
1423 if (cmd->sloplen)
1424 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1425 sizeof(uint32_t);
1426 else
1427 dlen = cmd->dst_map->dm_mapsize;
1428 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1429 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1430 dlen >>= 16;
1431 slen >>= 16;
1432 base_cmd->session_num = htole16(
1433 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1434 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1435 buf_pos += sizeof(struct hifn_base_command);
1436
1437 if (using_comp) {
1438 comp_cmd = (struct hifn_comp_command *)buf_pos;
1439 dlen = cmd->compcrd->crd_len;
1440 comp_cmd->source_count = htole16(dlen & 0xffff);
1441 dlen >>= 16;
1442 comp_cmd->masks = htole16(cmd->comp_masks |
1443 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1444 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1445 comp_cmd->reserved = 0;
1446 buf_pos += sizeof(struct hifn_comp_command);
1447 }
1448
1449 if (using_mac) {
1450 mac_cmd = (struct hifn_mac_command *)buf_pos;
1451 dlen = cmd->maccrd->crd_len;
1452 mac_cmd->source_count = htole16(dlen & 0xffff);
1453 dlen >>= 16;
1454 mac_cmd->masks = htole16(cmd->mac_masks |
1455 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1456 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1457 mac_cmd->reserved = 0;
1458 buf_pos += sizeof(struct hifn_mac_command);
1459 }
1460
1461 if (using_crypt) {
1462 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1463 dlen = cmd->enccrd->crd_len;
1464 cry_cmd->source_count = htole16(dlen & 0xffff);
1465 dlen >>= 16;
1466 cry_cmd->masks = htole16(cmd->cry_masks |
1467 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1468 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1469 cry_cmd->reserved = 0;
1470 buf_pos += sizeof(struct hifn_crypt_command);
1471 }
1472
1473 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1474 memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH);
1475 buf_pos += HIFN_MAC_KEY_LENGTH;
1476 }
1477
1478 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1479 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1480 case HIFN_CRYPT_CMD_ALG_3DES:
1481 memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH);
1482 buf_pos += HIFN_3DES_KEY_LENGTH;
1483 break;
1484 case HIFN_CRYPT_CMD_ALG_DES:
1485 memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH);
1486 buf_pos += HIFN_DES_KEY_LENGTH;
1487 break;
1488 case HIFN_CRYPT_CMD_ALG_RC4:
1489 len = 256;
1490 do {
1491 int clen;
1492
1493 clen = MIN(cmd->cklen, len);
1494 memcpy(buf_pos, cmd->ck, clen);
1495 len -= clen;
1496 buf_pos += clen;
1497 } while (len > 0);
1498 memset(buf_pos, 0, 4);
1499 buf_pos += 4;
1500 break;
1501 case HIFN_CRYPT_CMD_ALG_AES:
1502 /*
1503 * AES keys are variable 128, 192 and
1504 * 256 bits (16, 24 and 32 bytes).
1505 */
1506 memcpy(buf_pos, cmd->ck, cmd->cklen);
1507 buf_pos += cmd->cklen;
1508 break;
1509 }
1510 }
1511
1512 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1513 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1514 case HIFN_CRYPT_CMD_ALG_AES:
1515 ivlen = HIFN_AES_IV_LENGTH;
1516 break;
1517 default:
1518 ivlen = HIFN_IV_LENGTH;
1519 break;
1520 }
1521 memcpy(buf_pos, cmd->iv, ivlen);
1522 buf_pos += ivlen;
1523 }
1524
1525 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1526 HIFN_BASE_CMD_COMP)) == 0) {
1527 memset(buf_pos, 0, 8);
1528 buf_pos += 8;
1529 }
1530
1531 return (buf_pos - buf);
1532 }
1533
1534 static int
1535 hifn_dmamap_aligned(bus_dmamap_t map)
1536 {
1537 int i;
1538
1539 for (i = 0; i < map->dm_nsegs; i++) {
1540 if (map->dm_segs[i].ds_addr & 3)
1541 return (0);
1542 if ((i != (map->dm_nsegs - 1)) &&
1543 (map->dm_segs[i].ds_len & 3))
1544 return (0);
1545 }
1546 return (1);
1547 }
1548
1549 static int
1550 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1551 {
1552 struct hifn_dma *dma = sc->sc_dma;
1553 bus_dmamap_t map = cmd->dst_map;
1554 uint32_t p, l;
1555 int idx, used = 0, i;
1556
1557 idx = dma->dsti;
1558 for (i = 0; i < map->dm_nsegs - 1; i++) {
1559 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1560 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1561 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1562 HIFN_DSTR_SYNC(sc, idx,
1563 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1564 used++;
1565
1566 if (++idx == HIFN_D_DST_RSIZE) {
1567 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1568 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1569 HIFN_DSTR_SYNC(sc, idx,
1570 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1571 idx = 0;
1572 }
1573 }
1574
1575 if (cmd->sloplen == 0) {
1576 p = map->dm_segs[i].ds_addr;
1577 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1578 map->dm_segs[i].ds_len;
1579 } else {
1580 p = sc->sc_dmamap->dm_segs[0].ds_addr +
1581 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1582 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1583 sizeof(uint32_t);
1584
1585 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1586 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1587 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1588 HIFN_D_MASKDONEIRQ |
1589 (map->dm_segs[i].ds_len - cmd->sloplen));
1590 HIFN_DSTR_SYNC(sc, idx,
1591 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1592 used++;
1593
1594 if (++idx == HIFN_D_DST_RSIZE) {
1595 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1596 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1597 HIFN_DSTR_SYNC(sc, idx,
1598 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1599 idx = 0;
1600 }
1601 }
1602 }
1603 dma->dstr[idx].p = htole32(p);
1604 dma->dstr[idx].l = htole32(l);
1605 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1606 used++;
1607
1608 if (++idx == HIFN_D_DST_RSIZE) {
1609 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1610 HIFN_D_MASKDONEIRQ);
1611 HIFN_DSTR_SYNC(sc, idx,
1612 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1613 idx = 0;
1614 }
1615
1616 dma->dsti = idx;
1617 dma->dstu += used;
1618 return (idx);
1619 }
1620
1621 static int
1622 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1623 {
1624 struct hifn_dma *dma = sc->sc_dma;
1625 bus_dmamap_t map = cmd->src_map;
1626 int idx, i;
1627 uint32_t last = 0;
1628
1629 idx = dma->srci;
1630 for (i = 0; i < map->dm_nsegs; i++) {
1631 if (i == map->dm_nsegs - 1)
1632 last = HIFN_D_LAST;
1633
1634 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1635 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1636 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1637 HIFN_SRCR_SYNC(sc, idx,
1638 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1639
1640 if (++idx == HIFN_D_SRC_RSIZE) {
1641 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1642 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1643 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1644 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1645 idx = 0;
1646 }
1647 }
1648 dma->srci = idx;
1649 dma->srcu += map->dm_nsegs;
1650 return (idx);
1651 }
1652
1653 static int
1654 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1655 struct cryptop *crp, int hint)
1656 {
1657 struct hifn_dma *dma = sc->sc_dma;
1658 uint32_t cmdlen;
1659 int cmdi, resi, err = 0;
1660
1661 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1662 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1663 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1664 err = ENOMEM;
1665 goto err_srcmap1;
1666 }
1667 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1668 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1669 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1670 err = ENOMEM;
1671 goto err_srcmap1;
1672 }
1673 } else {
1674 err = EINVAL;
1675 goto err_srcmap1;
1676 }
1677
1678 if (hifn_dmamap_aligned(cmd->src_map)) {
1679 cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1680 if (crp->crp_flags & CRYPTO_F_IOV)
1681 cmd->dstu.dst_io = cmd->srcu.src_io;
1682 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1683 cmd->dstu.dst_m = cmd->srcu.src_m;
1684 cmd->dst_map = cmd->src_map;
1685 } else {
1686 if (crp->crp_flags & CRYPTO_F_IOV) {
1687 err = EINVAL;
1688 goto err_srcmap;
1689 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1690 int totlen, len;
1691 struct mbuf *m, *m0, *mlast;
1692
1693 totlen = cmd->src_map->dm_mapsize;
1694 if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1695 len = MHLEN;
1696 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1697 } else {
1698 len = MLEN;
1699 MGET(m0, M_DONTWAIT, MT_DATA);
1700 }
1701 if (m0 == NULL) {
1702 err = ENOMEM;
1703 goto err_srcmap;
1704 }
1705 if (len == MHLEN)
1706 m_copy_pkthdr(m0, cmd->srcu.src_m);
1707 if (totlen >= MINCLSIZE) {
1708 MCLGET(m0, M_DONTWAIT);
1709 if (m0->m_flags & M_EXT)
1710 len = MCLBYTES;
1711 }
1712 totlen -= len;
1713 m0->m_pkthdr.len = m0->m_len = len;
1714 mlast = m0;
1715
1716 while (totlen > 0) {
1717 MGET(m, M_DONTWAIT, MT_DATA);
1718 if (m == NULL) {
1719 err = ENOMEM;
1720 m_freem(m0);
1721 goto err_srcmap;
1722 }
1723 len = MLEN;
1724 if (totlen >= MINCLSIZE) {
1725 MCLGET(m, M_DONTWAIT);
1726 if (m->m_flags & M_EXT)
1727 len = MCLBYTES;
1728 }
1729
1730 m->m_len = len;
1731 if (m0->m_flags & M_PKTHDR)
1732 m0->m_pkthdr.len += len;
1733 totlen -= len;
1734
1735 mlast->m_next = m;
1736 mlast = m;
1737 }
1738 cmd->dstu.dst_m = m0;
1739 }
1740 cmd->dst_map = cmd->dst_map_alloc;
1741 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1742 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1743 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1744 err = ENOMEM;
1745 goto err_dstmap1;
1746 }
1747 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1748 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1749 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1750 err = ENOMEM;
1751 goto err_dstmap1;
1752 }
1753 }
1754 }
1755
1756 #ifdef HIFN_DEBUG
1757 if (hifn_debug)
1758 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1759 device_xname(sc->sc_dv),
1760 READ_REG_1(sc, HIFN_1_DMA_CSR),
1761 READ_REG_1(sc, HIFN_1_DMA_IER),
1762 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1763 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1764 #endif
1765
1766 if (cmd->src_map == cmd->dst_map)
1767 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1768 0, cmd->src_map->dm_mapsize,
1769 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1770 else {
1771 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1772 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1773 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1774 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1775 }
1776
1777 /*
1778 * need 1 cmd, and 1 res
1779 * need N src, and N dst
1780 */
1781 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1782 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1783 err = ENOMEM;
1784 goto err_dstmap;
1785 }
1786 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1787 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1788 err = ENOMEM;
1789 goto err_dstmap;
1790 }
1791
1792 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1793 dma->cmdi = 0;
1794 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1795 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1796 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1797 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1798 }
1799 cmdi = dma->cmdi++;
1800 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1801 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1802
1803 /* .p for command/result already set */
1804 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1805 HIFN_D_MASKDONEIRQ);
1806 HIFN_CMDR_SYNC(sc, cmdi,
1807 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1808 dma->cmdu++;
1809 if (sc->sc_c_busy == 0) {
1810 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1811 sc->sc_c_busy = 1;
1812 SET_LED(sc, HIFN_MIPSRST_LED0);
1813 }
1814
1815 /*
1816 * Always enable the command wait interrupt. We are obviously
1817 * missing an interrupt or two somewhere. Enabling the command wait
1818 * interrupt will guarantee we get called periodically until all
1819 * of the queues are drained and thus work around this.
1820 */
1821 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1822 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1823
1824 hifnstats.hst_ipackets++;
1825 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1826
1827 hifn_dmamap_load_src(sc, cmd);
1828 if (sc->sc_s_busy == 0) {
1829 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1830 sc->sc_s_busy = 1;
1831 SET_LED(sc, HIFN_MIPSRST_LED1);
1832 }
1833
1834 /*
1835 * Unlike other descriptors, we don't mask done interrupt from
1836 * result descriptor.
1837 */
1838 #ifdef HIFN_DEBUG
1839 if (hifn_debug)
1840 printf("load res\n");
1841 #endif
1842 if (dma->resi == HIFN_D_RES_RSIZE) {
1843 dma->resi = 0;
1844 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1845 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1846 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1847 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1848 }
1849 resi = dma->resi++;
1850 dma->hifn_commands[resi] = cmd;
1851 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1852 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1853 HIFN_D_VALID | HIFN_D_LAST);
1854 HIFN_RESR_SYNC(sc, resi,
1855 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1856 dma->resu++;
1857 if (sc->sc_r_busy == 0) {
1858 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1859 sc->sc_r_busy = 1;
1860 SET_LED(sc, HIFN_MIPSRST_LED2);
1861 }
1862
1863 if (cmd->sloplen)
1864 cmd->slopidx = resi;
1865
1866 hifn_dmamap_load_dst(sc, cmd);
1867
1868 if (sc->sc_d_busy == 0) {
1869 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1870 sc->sc_d_busy = 1;
1871 }
1872
1873 #ifdef HIFN_DEBUG
1874 if (hifn_debug)
1875 printf("%s: command: stat %8x ier %8x\n",
1876 device_xname(sc->sc_dv),
1877 READ_REG_1(sc, HIFN_1_DMA_CSR),
1878 READ_REG_1(sc, HIFN_1_DMA_IER));
1879 #endif
1880
1881 sc->sc_active = 5;
1882 return (err); /* success */
1883
1884 err_dstmap:
1885 if (cmd->src_map != cmd->dst_map)
1886 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1887 err_dstmap1:
1888 err_srcmap:
1889 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1890 err_srcmap1:
1891 return (err);
1892 }
1893
1894 static void
1895 hifn_tick(void *vsc)
1896 {
1897 struct hifn_softc *sc = vsc;
1898
1899 mutex_spin_enter(&sc->sc_mtx);
1900 if (sc->sc_active == 0) {
1901 struct hifn_dma *dma = sc->sc_dma;
1902 uint32_t r = 0;
1903
1904 if (dma->cmdu == 0 && sc->sc_c_busy) {
1905 sc->sc_c_busy = 0;
1906 r |= HIFN_DMACSR_C_CTRL_DIS;
1907 CLR_LED(sc, HIFN_MIPSRST_LED0);
1908 }
1909 if (dma->srcu == 0 && sc->sc_s_busy) {
1910 sc->sc_s_busy = 0;
1911 r |= HIFN_DMACSR_S_CTRL_DIS;
1912 CLR_LED(sc, HIFN_MIPSRST_LED1);
1913 }
1914 if (dma->dstu == 0 && sc->sc_d_busy) {
1915 sc->sc_d_busy = 0;
1916 r |= HIFN_DMACSR_D_CTRL_DIS;
1917 }
1918 if (dma->resu == 0 && sc->sc_r_busy) {
1919 sc->sc_r_busy = 0;
1920 r |= HIFN_DMACSR_R_CTRL_DIS;
1921 CLR_LED(sc, HIFN_MIPSRST_LED2);
1922 }
1923 if (r)
1924 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1925 } else
1926 sc->sc_active--;
1927 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1928 mutex_spin_exit(&sc->sc_mtx);
1929 }
1930
1931 static int
1932 hifn_intr(void *arg)
1933 {
1934 struct hifn_softc *sc = arg;
1935 struct hifn_dma *dma = sc->sc_dma;
1936 uint32_t dmacsr, restart;
1937 int i, u;
1938
1939 mutex_spin_enter(&sc->sc_mtx);
1940
1941 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1942
1943 #ifdef HIFN_DEBUG
1944 if (hifn_debug)
1945 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1946 device_xname(sc->sc_dv),
1947 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1948 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1949 #endif
1950
1951 /* Nothing in the DMA unit interrupted */
1952 if ((dmacsr & sc->sc_dmaier) == 0) {
1953 mutex_spin_exit(&sc->sc_mtx);
1954 return (0);
1955 }
1956
1957 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1958
1959 if (dmacsr & HIFN_DMACSR_ENGINE)
1960 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
1961
1962 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1963 (dmacsr & HIFN_DMACSR_PUBDONE))
1964 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1965 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1966
1967 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1968 if (restart)
1969 printf("%s: overrun %x\n", device_xname(sc->sc_dv), dmacsr);
1970
1971 if (sc->sc_flags & HIFN_IS_7811) {
1972 if (dmacsr & HIFN_DMACSR_ILLR)
1973 printf("%s: illegal read\n", device_xname(sc->sc_dv));
1974 if (dmacsr & HIFN_DMACSR_ILLW)
1975 printf("%s: illegal write\n", device_xname(sc->sc_dv));
1976 }
1977
1978 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1979 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1980 if (restart) {
1981 printf("%s: abort, resetting.\n", device_xname(sc->sc_dv));
1982 hifnstats.hst_abort++;
1983 hifn_abort(sc);
1984 goto out;
1985 }
1986
1987 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
1988 /*
1989 * If no slots to process and we receive a "waiting on
1990 * command" interrupt, we disable the "waiting on command"
1991 * (by clearing it).
1992 */
1993 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1994 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1995 }
1996
1997 /* clear the rings */
1998 i = dma->resk;
1999 while (dma->resu != 0) {
2000 HIFN_RESR_SYNC(sc, i,
2001 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2002 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2003 HIFN_RESR_SYNC(sc, i,
2004 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2005 break;
2006 }
2007
2008 if (i != HIFN_D_RES_RSIZE) {
2009 struct hifn_command *cmd;
2010
2011 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2012 cmd = dma->hifn_commands[i];
2013 KASSERT(cmd != NULL
2014 /*("hifn_intr: null command slot %u", i)*/);
2015 dma->hifn_commands[i] = NULL;
2016
2017 hifn_callback(sc, cmd, dma->result_bufs[i]);
2018 hifnstats.hst_opackets++;
2019 }
2020
2021 if (++i == (HIFN_D_RES_RSIZE + 1))
2022 i = 0;
2023 else
2024 dma->resu--;
2025 }
2026 dma->resk = i;
2027
2028 i = dma->srck; u = dma->srcu;
2029 while (u != 0) {
2030 HIFN_SRCR_SYNC(sc, i,
2031 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2032 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2033 HIFN_SRCR_SYNC(sc, i,
2034 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2035 break;
2036 }
2037 if (++i == (HIFN_D_SRC_RSIZE + 1))
2038 i = 0;
2039 else
2040 u--;
2041 }
2042 dma->srck = i; dma->srcu = u;
2043
2044 i = dma->cmdk; u = dma->cmdu;
2045 while (u != 0) {
2046 HIFN_CMDR_SYNC(sc, i,
2047 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2048 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2049 HIFN_CMDR_SYNC(sc, i,
2050 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2051 break;
2052 }
2053 if (i != HIFN_D_CMD_RSIZE) {
2054 u--;
2055 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2056 }
2057 if (++i == (HIFN_D_CMD_RSIZE + 1))
2058 i = 0;
2059 }
2060 dma->cmdk = i; dma->cmdu = u;
2061
2062 out:
2063 mutex_spin_exit(&sc->sc_mtx);
2064 return (1);
2065 }
2066
2067 /*
2068 * Allocate a new 'session' and return an encoded session id. 'sidp'
2069 * contains our registration id, and should contain an encoded session
2070 * id on successful allocation.
2071 */
2072 static int
2073 hifn_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
2074 {
2075 struct cryptoini *c;
2076 struct hifn_softc *sc = arg;
2077 int i, mac = 0, cry = 0, comp = 0, retval = EINVAL;
2078
2079 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2080 if (sidp == NULL || cri == NULL || sc == NULL)
2081 return retval;
2082
2083 mutex_spin_enter(&sc->sc_mtx);
2084 for (i = 0; i < sc->sc_maxses; i++)
2085 if (isclr(sc->sc_sessions, i))
2086 break;
2087 if (i == sc->sc_maxses) {
2088 retval = ENOMEM;
2089 goto out;
2090 }
2091
2092 for (c = cri; c != NULL; c = c->cri_next) {
2093 switch (c->cri_alg) {
2094 case CRYPTO_MD5:
2095 case CRYPTO_SHA1:
2096 case CRYPTO_MD5_HMAC_96:
2097 case CRYPTO_SHA1_HMAC_96:
2098 if (mac) {
2099 goto out;
2100 }
2101 mac = 1;
2102 break;
2103 case CRYPTO_DES_CBC:
2104 case CRYPTO_3DES_CBC:
2105 case CRYPTO_AES_CBC:
2106 case CRYPTO_ARC4:
2107 if (cry) {
2108 goto out;
2109 }
2110 cry = 1;
2111 break;
2112 #ifdef CRYPTO_LZS_COMP
2113 case CRYPTO_LZS_COMP:
2114 if (comp) {
2115 goto out;
2116 }
2117 comp = 1;
2118 break;
2119 #endif
2120 default:
2121 goto out;
2122 }
2123 }
2124 if (mac == 0 && cry == 0 && comp == 0) {
2125 goto out;
2126 }
2127
2128 /*
2129 * XXX only want to support compression without chaining to
2130 * MAC/crypt engine right now
2131 */
2132 if ((comp && mac) || (comp && cry)) {
2133 goto out;
2134 }
2135
2136 *sidp = HIFN_SID(device_unit(sc->sc_dv), i);
2137 setbit(sc->sc_sessions, i);
2138
2139 retval = 0;
2140 out:
2141 mutex_spin_exit(&sc->sc_mtx);
2142 return retval;
2143 }
2144
2145 /*
2146 * Deallocate a session.
2147 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2148 * XXX to blow away any keys already stored there.
2149 */
2150 static int
2151 hifn_freesession(void *arg, uint64_t tid)
2152 {
2153 struct hifn_softc *sc = arg;
2154 int session;
2155 uint32_t sid = ((uint32_t) tid) & 0xffffffff;
2156
2157 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2158 if (sc == NULL)
2159 return (EINVAL);
2160
2161 mutex_spin_enter(&sc->sc_mtx);
2162 session = HIFN_SESSION(sid);
2163 if (session >= sc->sc_maxses) {
2164 mutex_spin_exit(&sc->sc_mtx);
2165 return (EINVAL);
2166 }
2167 clrbit(sc->sc_sessions, session);
2168 mutex_spin_exit(&sc->sc_mtx);
2169 return (0);
2170 }
2171
2172 static int
2173 hifn_process(void *arg, struct cryptop *crp, int hint)
2174 {
2175 struct hifn_softc *sc = arg;
2176 struct hifn_command *cmd = NULL;
2177 int session, err = 0, ivlen;
2178 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2179
2180 if (crp == NULL || crp->crp_callback == NULL) {
2181 hifnstats.hst_invalid++;
2182 return (EINVAL);
2183 }
2184
2185 if ((cmd = pool_cache_get(sc->sc_cmd_cache, PR_NOWAIT)) == NULL) {
2186 hifnstats.hst_nomem++;
2187 return (ENOMEM);
2188 }
2189
2190 mutex_spin_enter(&sc->sc_mtx);
2191 session = HIFN_SESSION(crp->crp_sid);
2192 if (session >= sc->sc_maxses) {
2193 err = EINVAL;
2194 goto errout;
2195 }
2196
2197 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2198 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2199 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2200 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2201 cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2202 cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2203 } else {
2204 err = EINVAL;
2205 goto errout; /* XXX we don't handle contiguous buffers! */
2206 }
2207
2208 crd1 = crp->crp_desc;
2209 if (crd1 == NULL) {
2210 err = EINVAL;
2211 goto errout;
2212 }
2213 crd2 = crd1->crd_next;
2214
2215 if (crd2 == NULL) {
2216 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2217 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2218 crd1->crd_alg == CRYPTO_SHA1 ||
2219 crd1->crd_alg == CRYPTO_MD5) {
2220 maccrd = crd1;
2221 enccrd = NULL;
2222 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2223 crd1->crd_alg == CRYPTO_3DES_CBC ||
2224 crd1->crd_alg == CRYPTO_AES_CBC ||
2225 crd1->crd_alg == CRYPTO_ARC4) {
2226 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2227 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2228 maccrd = NULL;
2229 enccrd = crd1;
2230 #ifdef CRYPTO_LZS_COMP
2231 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2232 err = hifn_compression(sc, crp, cmd);
2233 mutex_spin_exit(&sc->sc_mtx);
2234 return err;
2235 #endif
2236 } else {
2237 err = EINVAL;
2238 goto errout;
2239 }
2240 } else {
2241 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2242 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2243 crd1->crd_alg == CRYPTO_MD5 ||
2244 crd1->crd_alg == CRYPTO_SHA1) &&
2245 (crd2->crd_alg == CRYPTO_DES_CBC ||
2246 crd2->crd_alg == CRYPTO_3DES_CBC ||
2247 crd2->crd_alg == CRYPTO_AES_CBC ||
2248 crd2->crd_alg == CRYPTO_ARC4) &&
2249 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2250 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2251 maccrd = crd1;
2252 enccrd = crd2;
2253 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2254 crd1->crd_alg == CRYPTO_ARC4 ||
2255 crd1->crd_alg == CRYPTO_3DES_CBC ||
2256 crd1->crd_alg == CRYPTO_AES_CBC) &&
2257 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 ||
2258 crd2->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2259 crd2->crd_alg == CRYPTO_MD5 ||
2260 crd2->crd_alg == CRYPTO_SHA1) &&
2261 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2262 enccrd = crd1;
2263 maccrd = crd2;
2264 } else {
2265 /*
2266 * We cannot order the 7751 as requested
2267 */
2268 err = EINVAL;
2269 goto errout;
2270 }
2271 }
2272
2273 if (enccrd) {
2274 cmd->enccrd = enccrd;
2275 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2276 switch (enccrd->crd_alg) {
2277 case CRYPTO_ARC4:
2278 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2279 break;
2280 case CRYPTO_DES_CBC:
2281 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2282 HIFN_CRYPT_CMD_MODE_CBC |
2283 HIFN_CRYPT_CMD_NEW_IV;
2284 break;
2285 case CRYPTO_3DES_CBC:
2286 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2287 HIFN_CRYPT_CMD_MODE_CBC |
2288 HIFN_CRYPT_CMD_NEW_IV;
2289 break;
2290 case CRYPTO_AES_CBC:
2291 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2292 HIFN_CRYPT_CMD_MODE_CBC |
2293 HIFN_CRYPT_CMD_NEW_IV;
2294 break;
2295 default:
2296 err = EINVAL;
2297 goto errout;
2298 }
2299 if (enccrd->crd_alg != CRYPTO_ARC4) {
2300 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2301 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2302 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2303 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2304 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2305 else
2306 cprng_fast(cmd->iv, ivlen);
2307
2308 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2309 == 0) {
2310 if (crp->crp_flags & CRYPTO_F_IMBUF)
2311 m_copyback(cmd->srcu.src_m,
2312 enccrd->crd_inject,
2313 ivlen, cmd->iv);
2314 else if (crp->crp_flags & CRYPTO_F_IOV)
2315 cuio_copyback(cmd->srcu.src_io,
2316 enccrd->crd_inject,
2317 ivlen, cmd->iv);
2318 }
2319 } else {
2320 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2321 memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2322 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2323 m_copydata(cmd->srcu.src_m,
2324 enccrd->crd_inject, ivlen, cmd->iv);
2325 else if (crp->crp_flags & CRYPTO_F_IOV)
2326 cuio_copydata(cmd->srcu.src_io,
2327 enccrd->crd_inject,
2328 ivlen, cmd->iv);
2329 }
2330 }
2331
2332 cmd->ck = enccrd->crd_key;
2333 cmd->cklen = enccrd->crd_klen >> 3;
2334 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2335
2336 /*
2337 * Need to specify the size for the AES key in the masks.
2338 */
2339 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2340 HIFN_CRYPT_CMD_ALG_AES) {
2341 switch (cmd->cklen) {
2342 case 16:
2343 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2344 break;
2345 case 24:
2346 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2347 break;
2348 case 32:
2349 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2350 break;
2351 default:
2352 err = EINVAL;
2353 goto errout;
2354 }
2355 }
2356 }
2357
2358 if (maccrd) {
2359 cmd->maccrd = maccrd;
2360 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2361
2362 switch (maccrd->crd_alg) {
2363 case CRYPTO_MD5:
2364 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2365 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2366 HIFN_MAC_CMD_POS_IPSEC;
2367 break;
2368 case CRYPTO_MD5_HMAC_96:
2369 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2370 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2371 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2372 break;
2373 case CRYPTO_SHA1:
2374 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2375 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2376 HIFN_MAC_CMD_POS_IPSEC;
2377 break;
2378 case CRYPTO_SHA1_HMAC_96:
2379 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2380 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2381 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2382 break;
2383 }
2384
2385 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2386 maccrd->crd_alg == CRYPTO_MD5_HMAC_96) {
2387 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2388 memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3);
2389 memset(cmd->mac + (maccrd->crd_klen >> 3), 0,
2390 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2391 }
2392 }
2393
2394 cmd->crp = crp;
2395 cmd->session_num = session;
2396 cmd->softc = sc;
2397
2398 err = hifn_crypto(sc, cmd, crp, hint);
2399 if (err == 0) {
2400 mutex_exit(&sc->sc_mtx);
2401 return 0;
2402 } else if (err == ERESTART) {
2403 /*
2404 * There weren't enough resources to dispatch the request
2405 * to the part. Notify the caller so they'll requeue this
2406 * request and resubmit it again soon.
2407 */
2408 #ifdef HIFN_DEBUG
2409 if (hifn_debug)
2410 printf("%s: requeue request\n", device_xname(sc->sc_dv));
2411 #endif
2412 sc->sc_needwakeup |= CRYPTO_SYMQ;
2413 mutex_spin_exit(&sc->sc_mtx);
2414 pool_cache_put(sc->sc_cmd_cache, cmd);
2415 return (err);
2416 }
2417
2418 errout:
2419 if (err == EINVAL)
2420 hifnstats.hst_invalid++;
2421 else
2422 hifnstats.hst_nomem++;
2423 crp->crp_etype = err;
2424 mutex_spin_exit(&sc->sc_mtx);
2425 if (cmd != NULL) {
2426 if (crp->crp_flags & CRYPTO_F_IMBUF &&
2427 cmd->srcu.src_m != cmd->dstu.dst_m)
2428 m_freem(cmd->dstu.dst_m);
2429 cmd->dst_map = NULL;
2430 pool_cache_put(sc->sc_cmd_cache, cmd);
2431 }
2432 crypto_done(crp);
2433 return (0);
2434 }
2435
2436 static void
2437 hifn_abort(struct hifn_softc *sc)
2438 {
2439 struct hifn_dma *dma = sc->sc_dma;
2440 struct hifn_command *cmd;
2441 struct cryptop *crp;
2442 int i, u;
2443
2444 KASSERT(mutex_owned(&sc->sc_mtx));
2445
2446 i = dma->resk; u = dma->resu;
2447 while (u != 0) {
2448 cmd = dma->hifn_commands[i];
2449 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2450 dma->hifn_commands[i] = NULL;
2451 crp = cmd->crp;
2452
2453 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2454 /* Salvage what we can. */
2455 hifnstats.hst_opackets++;
2456 hifn_callback(sc, cmd, dma->result_bufs[i]);
2457 } else {
2458 if (cmd->src_map == cmd->dst_map) {
2459 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2460 0, cmd->src_map->dm_mapsize,
2461 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2462 } else {
2463 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2464 0, cmd->src_map->dm_mapsize,
2465 BUS_DMASYNC_POSTWRITE);
2466 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2467 0, cmd->dst_map->dm_mapsize,
2468 BUS_DMASYNC_POSTREAD);
2469 }
2470
2471 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2472 m_freem(cmd->srcu.src_m);
2473 crp->crp_buf = (void *)cmd->dstu.dst_m;
2474 }
2475
2476 /* non-shared buffers cannot be restarted */
2477 if (cmd->src_map != cmd->dst_map) {
2478 /*
2479 * XXX should be EAGAIN, delayed until
2480 * after the reset.
2481 */
2482 crp->crp_etype = ENOMEM;
2483 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2484 } else
2485 crp->crp_etype = ENOMEM;
2486
2487 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2488
2489 cmd->dst_map = NULL;
2490 pool_cache_put(sc->sc_cmd_cache, cmd);
2491
2492 if (crp->crp_etype != EAGAIN)
2493 crypto_done(crp);
2494 }
2495
2496 if (++i == HIFN_D_RES_RSIZE)
2497 i = 0;
2498 u--;
2499 }
2500 dma->resk = i; dma->resu = u;
2501
2502 hifn_reset_board(sc, 1);
2503 hifn_init_dma(sc);
2504 hifn_init_pci_registers(sc);
2505 }
2506
2507 static void
2508 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, uint8_t *resbuf)
2509 {
2510 struct hifn_dma *dma = sc->sc_dma;
2511 struct cryptop *crp = cmd->crp;
2512 struct cryptodesc *crd;
2513 struct mbuf *m;
2514 int totlen, i, u;
2515
2516 KASSERT(mutex_owned(&sc->sc_mtx));
2517
2518 if (cmd->src_map == cmd->dst_map)
2519 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2520 0, cmd->src_map->dm_mapsize,
2521 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2522 else {
2523 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2524 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2525 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2526 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2527 }
2528
2529 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2530 if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2531 crp->crp_buf = (void *)cmd->dstu.dst_m;
2532 totlen = cmd->src_map->dm_mapsize;
2533 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2534 if (totlen < m->m_len) {
2535 m->m_len = totlen;
2536 totlen = 0;
2537 } else
2538 totlen -= m->m_len;
2539 }
2540 cmd->dstu.dst_m->m_pkthdr.len =
2541 cmd->srcu.src_m->m_pkthdr.len;
2542 m_freem(cmd->srcu.src_m);
2543 }
2544 }
2545
2546 if (cmd->sloplen != 0) {
2547 if (crp->crp_flags & CRYPTO_F_IMBUF)
2548 m_copyback((struct mbuf *)crp->crp_buf,
2549 cmd->src_map->dm_mapsize - cmd->sloplen,
2550 cmd->sloplen, &dma->slop[cmd->slopidx]);
2551 else if (crp->crp_flags & CRYPTO_F_IOV)
2552 cuio_copyback((struct uio *)crp->crp_buf,
2553 cmd->src_map->dm_mapsize - cmd->sloplen,
2554 cmd->sloplen, &dma->slop[cmd->slopidx]);
2555 }
2556
2557 i = dma->dstk; u = dma->dstu;
2558 while (u != 0) {
2559 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2560 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2561 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2562 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2563 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2564 offsetof(struct hifn_dma, dstr[i]),
2565 sizeof(struct hifn_desc),
2566 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2567 break;
2568 }
2569 if (++i == (HIFN_D_DST_RSIZE + 1))
2570 i = 0;
2571 else
2572 u--;
2573 }
2574 dma->dstk = i; dma->dstu = u;
2575
2576 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2577
2578 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2579 uint8_t *macbuf;
2580
2581 macbuf = resbuf + sizeof(struct hifn_base_result);
2582 if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2583 macbuf += sizeof(struct hifn_comp_result);
2584 macbuf += sizeof(struct hifn_mac_result);
2585
2586 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2587 int len;
2588
2589 if (crd->crd_alg == CRYPTO_MD5)
2590 len = 16;
2591 else if (crd->crd_alg == CRYPTO_SHA1)
2592 len = 20;
2593 else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 ||
2594 crd->crd_alg == CRYPTO_SHA1_HMAC_96)
2595 len = 12;
2596 else
2597 continue;
2598
2599 if (crp->crp_flags & CRYPTO_F_IMBUF)
2600 m_copyback((struct mbuf *)crp->crp_buf,
2601 crd->crd_inject, len, macbuf);
2602 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2603 memcpy(crp->crp_mac, (void *)macbuf, len);
2604 break;
2605 }
2606 }
2607
2608 if (cmd->src_map != cmd->dst_map)
2609 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2610 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2611 cmd->dst_map = NULL;
2612 pool_cache_put(sc->sc_cmd_cache, cmd);
2613 crypto_done(crp);
2614 }
2615
2616 #ifdef CRYPTO_LZS_COMP
2617
2618 static int
2619 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2620 struct hifn_command *cmd)
2621 {
2622 struct cryptodesc *crd = crp->crp_desc;
2623 int s, err = 0;
2624
2625 cmd->compcrd = crd;
2626 cmd->base_masks |= HIFN_BASE_CMD_COMP;
2627
2628 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2629 /*
2630 * XXX can only handle mbufs right now since we can
2631 * XXX dynamically resize them.
2632 */
2633 err = EINVAL;
2634 return (ENOMEM);
2635 }
2636
2637 if ((crd->crd_flags & CRD_F_COMP) == 0)
2638 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2639 if (crd->crd_alg == CRYPTO_LZS_COMP)
2640 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2641 HIFN_COMP_CMD_CLEARHIST;
2642
2643 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2644 int len;
2645
2646 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2647 cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2648 err = ENOMEM;
2649 goto fail;
2650 }
2651
2652 len = cmd->src_map->dm_mapsize / MCLBYTES;
2653 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2654 len++;
2655 len *= MCLBYTES;
2656
2657 if ((crd->crd_flags & CRD_F_COMP) == 0)
2658 len *= 4;
2659
2660 if (len > HIFN_MAX_DMALEN)
2661 len = HIFN_MAX_DMALEN;
2662
2663 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2664 if (cmd->dstu.dst_m == NULL) {
2665 err = ENOMEM;
2666 goto fail;
2667 }
2668
2669 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2670 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2671 err = ENOMEM;
2672 goto fail;
2673 }
2674 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2675 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2676 cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2677 err = ENOMEM;
2678 goto fail;
2679 }
2680 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2681 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2682 err = ENOMEM;
2683 goto fail;
2684 }
2685 }
2686
2687 if (cmd->src_map == cmd->dst_map)
2688 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2689 0, cmd->src_map->dm_mapsize,
2690 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2691 else {
2692 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2693 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2694 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2695 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2696 }
2697
2698 cmd->crp = crp;
2699 /*
2700 * Always use session 0. The modes of compression we use are
2701 * stateless and there is always at least one compression
2702 * context, zero.
2703 */
2704 cmd->session_num = 0;
2705 cmd->softc = sc;
2706
2707 err = hifn_compress_enter(sc, cmd);
2708
2709 if (err != 0)
2710 goto fail;
2711 return (0);
2712
2713 fail:
2714 if (cmd->dst_map != NULL) {
2715 if (cmd->dst_map->dm_nsegs > 0)
2716 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2717 }
2718 if (cmd->src_map != NULL) {
2719 if (cmd->src_map->dm_nsegs > 0)
2720 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2721 }
2722 cmd->dst_map = NULL;
2723 pool_cache_put(sc->sc_cmd_cache, cmd);
2724 if (err == EINVAL)
2725 hifnstats.hst_invalid++;
2726 else
2727 hifnstats.hst_nomem++;
2728 crp->crp_etype = err;
2729 crypto_done(crp);
2730 return (0);
2731 }
2732
2733 static int
2734 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2735 {
2736 struct hifn_dma *dma = sc->sc_dma;
2737 int cmdi, resi;
2738 uint32_t cmdlen;
2739
2740 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2741 (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2742 return (ENOMEM);
2743
2744 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2745 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2746 return (ENOMEM);
2747
2748 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2749 dma->cmdi = 0;
2750 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2751 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2752 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2753 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2754 }
2755 cmdi = dma->cmdi++;
2756 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2757 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2758
2759 /* .p for command/result already set */
2760 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2761 HIFN_D_MASKDONEIRQ);
2762 HIFN_CMDR_SYNC(sc, cmdi,
2763 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2764 dma->cmdu++;
2765 if (sc->sc_c_busy == 0) {
2766 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2767 sc->sc_c_busy = 1;
2768 SET_LED(sc, HIFN_MIPSRST_LED0);
2769 }
2770
2771 /*
2772 * Always enable the command wait interrupt. We are obviously
2773 * missing an interrupt or two somewhere. Enabling the command wait
2774 * interrupt will guarantee we get called periodically until all
2775 * of the queues are drained and thus work around this.
2776 */
2777 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2778 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2779
2780 hifnstats.hst_ipackets++;
2781 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2782
2783 hifn_dmamap_load_src(sc, cmd);
2784 if (sc->sc_s_busy == 0) {
2785 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2786 sc->sc_s_busy = 1;
2787 SET_LED(sc, HIFN_MIPSRST_LED1);
2788 }
2789
2790 /*
2791 * Unlike other descriptors, we don't mask done interrupt from
2792 * result descriptor.
2793 */
2794 if (dma->resi == HIFN_D_RES_RSIZE) {
2795 dma->resi = 0;
2796 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2797 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2798 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2799 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2800 }
2801 resi = dma->resi++;
2802 dma->hifn_commands[resi] = cmd;
2803 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2804 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2805 HIFN_D_VALID | HIFN_D_LAST);
2806 HIFN_RESR_SYNC(sc, resi,
2807 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2808 dma->resu++;
2809 if (sc->sc_r_busy == 0) {
2810 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2811 sc->sc_r_busy = 1;
2812 SET_LED(sc, HIFN_MIPSRST_LED2);
2813 }
2814
2815 if (cmd->sloplen)
2816 cmd->slopidx = resi;
2817
2818 hifn_dmamap_load_dst(sc, cmd);
2819
2820 if (sc->sc_d_busy == 0) {
2821 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2822 sc->sc_d_busy = 1;
2823 }
2824 sc->sc_active = 5;
2825 cmd->cmd_callback = hifn_callback_comp;
2826 return (0);
2827 }
2828
2829 static void
2830 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2831 uint8_t *resbuf)
2832 {
2833 struct hifn_base_result baseres;
2834 struct cryptop *crp = cmd->crp;
2835 struct hifn_dma *dma = sc->sc_dma;
2836 struct mbuf *m;
2837 int err = 0, i, u;
2838 uint32_t olen;
2839 bus_size_t dstsize;
2840
2841 KASSERT(mutex_owned(&sc->sc_mtx));
2842
2843 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2844 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2845 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2846 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2847
2848 dstsize = cmd->dst_map->dm_mapsize;
2849 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2850
2851 memcpy(&baseres, resbuf, sizeof(struct hifn_base_result));
2852
2853 i = dma->dstk; u = dma->dstu;
2854 while (u != 0) {
2855 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2856 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2857 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2858 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2859 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2860 offsetof(struct hifn_dma, dstr[i]),
2861 sizeof(struct hifn_desc),
2862 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2863 break;
2864 }
2865 if (++i == (HIFN_D_DST_RSIZE + 1))
2866 i = 0;
2867 else
2868 u--;
2869 }
2870 dma->dstk = i; dma->dstu = u;
2871
2872 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
2873 bus_size_t xlen;
2874
2875 xlen = dstsize;
2876
2877 m_freem(cmd->dstu.dst_m);
2878
2879 if (xlen == HIFN_MAX_DMALEN) {
2880 /* We've done all we can. */
2881 err = E2BIG;
2882 goto out;
2883 }
2884
2885 xlen += MCLBYTES;
2886
2887 if (xlen > HIFN_MAX_DMALEN)
2888 xlen = HIFN_MAX_DMALEN;
2889
2890 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
2891 cmd->srcu.src_m);
2892 if (cmd->dstu.dst_m == NULL) {
2893 err = ENOMEM;
2894 goto out;
2895 }
2896 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2897 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2898 err = ENOMEM;
2899 goto out;
2900 }
2901
2902 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2903 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2904 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2905 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2906
2907 err = hifn_compress_enter(sc, cmd);
2908 if (err != 0)
2909 goto out;
2910 return;
2911 }
2912
2913 olen = dstsize - (letoh16(baseres.dst_cnt) |
2914 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
2915 HIFN_BASE_RES_DSTLEN_S) << 16));
2916
2917 crp->crp_olen = olen - cmd->compcrd->crd_skip;
2918
2919 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2920
2921 m = cmd->dstu.dst_m;
2922 if (m->m_flags & M_PKTHDR)
2923 m->m_pkthdr.len = olen;
2924 crp->crp_buf = (void *)m;
2925 for (; m != NULL; m = m->m_next) {
2926 if (olen >= m->m_len)
2927 olen -= m->m_len;
2928 else {
2929 m->m_len = olen;
2930 olen = 0;
2931 }
2932 }
2933
2934 m_freem(cmd->srcu.src_m);
2935 cmd->dst_map = NULL;
2936 pool_cache_put(sc->sc_cmd_cache, cmd);
2937 crp->crp_etype = 0;
2938 crypto_done(crp);
2939 return;
2940
2941 out:
2942 if (cmd->dst_map != NULL) {
2943 if (cmd->src_map->dm_nsegs != 0)
2944 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2945 }
2946 if (cmd->src_map != NULL) {
2947 if (cmd->src_map->dm_nsegs != 0)
2948 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2949 }
2950 m_freem(cmd->dstu.dst_m);
2951 cmd->dst_map = NULL;
2952 pool_cache_put(sc->sc_cmd_cache, cmd);
2953 crp->crp_etype = err;
2954 crypto_done(crp);
2955 }
2956
2957 static struct mbuf *
2958 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
2959 {
2960 int len;
2961 struct mbuf *m, *m0, *mlast;
2962
2963 if (mtemplate->m_flags & M_PKTHDR) {
2964 len = MHLEN;
2965 MGETHDR(m0, M_DONTWAIT, MT_DATA);
2966 } else {
2967 len = MLEN;
2968 MGET(m0, M_DONTWAIT, MT_DATA);
2969 }
2970 if (m0 == NULL)
2971 return (NULL);
2972 if (len == MHLEN)
2973 m_copy_pkthdr(m0, mtemplate);
2974 MCLGET(m0, M_DONTWAIT);
2975 if (!(m0->m_flags & M_EXT)) {
2976 m_freem(m0);
2977 return (NULL);
2978 }
2979 len = MCLBYTES;
2980
2981 totlen -= len;
2982 m0->m_pkthdr.len = m0->m_len = len;
2983 mlast = m0;
2984
2985 while (totlen > 0) {
2986 MGET(m, M_DONTWAIT, MT_DATA);
2987 if (m == NULL) {
2988 m_freem(m0);
2989 return (NULL);
2990 }
2991 MCLGET(m, M_DONTWAIT);
2992 if (!(m->m_flags & M_EXT)) {
2993 m_free(m);
2994 m_freem(m0);
2995 return (NULL);
2996 }
2997 len = MCLBYTES;
2998 m->m_len = len;
2999 if (m0->m_flags & M_PKTHDR)
3000 m0->m_pkthdr.len += len;
3001 totlen -= len;
3002
3003 mlast->m_next = m;
3004 mlast = m;
3005 }
3006
3007 return (m0);
3008 }
3009 #endif /* CRYPTO_LZS_COMP */
3010
3011 static void
3012 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, uint32_t val)
3013 {
3014 /*
3015 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
3016 * and Group 1 registers; avoid conditions that could create
3017 * burst writes by doing a read in between the writes.
3018 */
3019 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3020 if (sc->sc_waw_lastgroup == reggrp &&
3021 sc->sc_waw_lastreg == reg - 4) {
3022 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
3023 }
3024 sc->sc_waw_lastgroup = reggrp;
3025 sc->sc_waw_lastreg = reg;
3026 }
3027 if (reggrp == 0)
3028 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3029 else
3030 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3031
3032 }
3033
3034 static uint32_t
3035 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3036 {
3037 if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3038 sc->sc_waw_lastgroup = -1;
3039 sc->sc_waw_lastreg = 1;
3040 }
3041 if (reggrp == 0)
3042 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3043 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3044 }
3045