1 /* $NetBSD: hifn7751.c,v 1.82 2023/08/04 07:38:53 riastradh Exp $ */ 2 /* $OpenBSD: hifn7751.c,v 1.179 2020/01/11 21:34:03 cheloha Exp $ */ 3 4 /* 5 * Invertex AEON / Hifn 7751 driver 6 * Copyright (c) 1999 Invertex Inc. All rights reserved. 7 * Copyright (c) 1999 Theo de Raadt 8 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 9 * http://www.netsec.net 10 * Copyright (c) 2003 Hifn Inc. 11 * 12 * This driver is based on a previous driver by Invertex, for which they 13 * requested: Please send any comments, feedback, bug-fixes, or feature 14 * requests to software (at) invertex.com. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. The name of the author may not be used to endorse or promote products 26 * derived from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 31 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 33 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 37 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 * 39 * Effort sponsored in part by the Defense Advanced Research Projects 40 * Agency (DARPA) and Air Force Research Laboratory, Air Force 41 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 42 * 43 */ 44 45 /* 46 * Driver for various Hifn encryption processors. 47 */ 48 49 #include <sys/cdefs.h> 50 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.82 2023/08/04 07:38:53 riastradh Exp $"); 51 52 #include <sys/param.h> 53 #include <sys/cprng.h> 54 #include <sys/device.h> 55 #include <sys/endian.h> 56 #include <sys/errno.h> 57 #include <sys/kernel.h> 58 #include <sys/mbuf.h> 59 #include <sys/module.h> 60 #include <sys/mutex.h> 61 #include <sys/pool.h> 62 #include <sys/proc.h> 63 #include <sys/rndsource.h> 64 #include <sys/sha1.h> 65 #include <sys/systm.h> 66 67 #include <opencrypto/cryptodev.h> 68 69 #include <dev/pci/pcireg.h> 70 #include <dev/pci/pcivar.h> 71 #include <dev/pci/pcidevs.h> 72 73 #include <dev/pci/hifn7751reg.h> 74 #include <dev/pci/hifn7751var.h> 75 76 #undef HIFN_DEBUG 77 78 #ifdef HIFN_DEBUG 79 extern int hifn_debug; /* patchable */ 80 int hifn_debug = 1; 81 #endif 82 83 /* 84 * Prototypes and count for the pci_device structure 85 */ 86 static int hifn_match(device_t, cfdata_t, void *); 87 static void hifn_attach(device_t, device_t, void *); 88 static int hifn_detach(device_t, int); 89 90 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc), 91 hifn_match, hifn_attach, hifn_detach, NULL); 92 93 static void hifn_reset_board(struct hifn_softc *, int); 94 static void hifn_reset_puc(struct hifn_softc *); 95 static void hifn_puc_wait(struct hifn_softc *); 96 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t); 97 static void hifn_set_retry(struct hifn_softc *); 98 static void hifn_init_dma(struct hifn_softc *); 99 static void hifn_init_pci_registers(struct hifn_softc *); 100 static int hifn_sramsize(struct hifn_softc *); 101 static int hifn_dramsize(struct hifn_softc *); 102 static int hifn_ramtype(struct hifn_softc *); 103 static void hifn_sessions(struct hifn_softc *); 104 static int hifn_intr(void *); 105 static u_int hifn_write_command(struct hifn_command *, uint8_t *); 106 static uint32_t hifn_next_signature(uint32_t a, u_int cnt); 107 static int hifn_newsession(void*, uint32_t *, struct cryptoini *); 108 static void hifn_freesession(void*, uint64_t); 109 static int hifn_process(void*, struct cryptop *, int); 110 static void hifn_callback(struct hifn_softc *, struct hifn_command *, 111 uint8_t *); 112 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, 113 struct cryptop*, int); 114 static int hifn_readramaddr(struct hifn_softc *, int, uint8_t *); 115 static int hifn_writeramaddr(struct hifn_softc *, int, uint8_t *); 116 static int hifn_dmamap_aligned(bus_dmamap_t); 117 static int hifn_dmamap_load_src(struct hifn_softc *, 118 struct hifn_command *); 119 static int hifn_dmamap_load_dst(struct hifn_softc *, 120 struct hifn_command *); 121 static int hifn_init_pubrng(struct hifn_softc *); 122 static void hifn_rng(struct hifn_softc *); 123 static void hifn_rng_intr(void *); 124 static void hifn_tick(void *); 125 static void hifn_abort(struct hifn_softc *); 126 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, 127 int *); 128 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, uint32_t); 129 static uint32_t hifn_read_4(struct hifn_softc *, int, bus_size_t); 130 #ifdef CRYPTO_LZS_COMP 131 static void hifn_compression(struct hifn_softc *, struct cryptop *, 132 struct hifn_command *); 133 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *); 134 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *); 135 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *, 136 uint8_t *); 137 #endif /* CRYPTO_LZS_COMP */ 138 139 struct hifn_stats hifnstats; 140 141 static int 142 hifn_cmd_ctor(void *vsc, void *vcmd, int pflags) 143 { 144 struct hifn_softc *sc = vsc; 145 struct hifn_command *cmd = vcmd; 146 int bflags = pflags & PR_WAITOK ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT; 147 int error; 148 149 memset(cmd, 0, sizeof(*cmd)); 150 151 error = bus_dmamap_create(sc->sc_dmat, 152 HIFN_MAX_DMALEN, MAX_SCATTER, HIFN_MAX_SEGLEN, 153 0, bflags, &cmd->src_map); 154 if (error) 155 goto fail0; 156 157 error = bus_dmamap_create(sc->sc_dmat, 158 HIFN_MAX_SEGLEN*MAX_SCATTER, MAX_SCATTER, HIFN_MAX_SEGLEN, 159 0, bflags, &cmd->dst_map_alloc); 160 if (error) 161 goto fail1; 162 163 /* Success! */ 164 cmd->dst_map = NULL; 165 return 0; 166 167 fail2: __unused 168 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map_alloc); 169 fail1: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 170 fail0: return error; 171 } 172 173 static void 174 hifn_cmd_dtor(void *vsc, void *vcmd) 175 { 176 struct hifn_softc *sc = vsc; 177 struct hifn_command *cmd = vcmd; 178 179 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map_alloc); 180 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 181 } 182 183 static const struct hifn_product { 184 pci_vendor_id_t hifn_vendor; 185 pci_product_id_t hifn_product; 186 int hifn_flags; 187 const char *hifn_name; 188 } hifn_products[] = { 189 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, 190 0, 191 "Invertex AEON", 192 }, 193 194 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, 195 0, 196 "Hifn 7751", 197 }, 198 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, 199 0, 200 "Hifn 7751 (NetSec)" 201 }, 202 203 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, 204 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE, 205 "Hifn 7811", 206 }, 207 208 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, 209 HIFN_HAS_RNG | HIFN_HAS_PUBLIC, 210 "Hifn 7951", 211 }, 212 213 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955, 214 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, 215 "Hifn 7955", 216 }, 217 218 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956, 219 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, 220 "Hifn 7956", 221 }, 222 223 { 0, 0, 224 0, 225 NULL 226 } 227 }; 228 229 static const struct hifn_product * 230 hifn_lookup(const struct pci_attach_args *pa) 231 { 232 const struct hifn_product *hp; 233 234 for (hp = hifn_products; hp->hifn_name != NULL; hp++) { 235 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor && 236 PCI_PRODUCT(pa->pa_id) == hp->hifn_product) 237 return (hp); 238 } 239 return (NULL); 240 } 241 242 static int 243 hifn_match(device_t parent, cfdata_t match, void *aux) 244 { 245 struct pci_attach_args *pa = aux; 246 247 if (hifn_lookup(pa) != NULL) 248 return 1; 249 250 return 0; 251 } 252 253 static void 254 hifn_attach(device_t parent, device_t self, void *aux) 255 { 256 struct hifn_softc *sc = device_private(self); 257 struct pci_attach_args *pa = aux; 258 const struct hifn_product *hp; 259 pci_chipset_tag_t pc = pa->pa_pc; 260 pci_intr_handle_t ih; 261 const char *intrstr = NULL; 262 const char *hifncap; 263 char rbase; 264 uint32_t cmd; 265 uint16_t ena; 266 bus_dma_segment_t seg; 267 bus_dmamap_t dmamap; 268 int rseg; 269 void *kva; 270 char intrbuf[PCI_INTRSTR_LEN]; 271 272 hp = hifn_lookup(pa); 273 if (hp == NULL) { 274 printf("\n"); 275 panic("hifn_attach: impossible"); 276 } 277 278 pci_aprint_devinfo_fancy(pa, "Crypto processor", hp->hifn_name, 1); 279 280 sc->sc_dv = self; 281 sc->sc_pci_pc = pa->pa_pc; 282 sc->sc_pci_tag = pa->pa_tag; 283 284 sc->sc_flags = hp->hifn_flags; 285 286 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 287 cmd |= PCI_COMMAND_MASTER_ENABLE; 288 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 289 290 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0, 291 &sc->sc_st0, &sc->sc_sh0, NULL, &sc->sc_iosz0)) { 292 aprint_error_dev(sc->sc_dv, "can't map mem space %d\n", 0); 293 return; 294 } 295 296 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0, 297 &sc->sc_st1, &sc->sc_sh1, NULL, &sc->sc_iosz1)) { 298 aprint_error_dev(sc->sc_dv, "can't find mem space %d\n", 1); 299 goto fail_io0; 300 } 301 302 hifn_set_retry(sc); 303 304 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 305 sc->sc_waw_lastgroup = -1; 306 sc->sc_waw_lastreg = 1; 307 } 308 309 sc->sc_dmat = pa->pa_dmat; 310 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0, 311 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 312 aprint_error_dev(sc->sc_dv, "can't alloc DMA buffer\n"); 313 goto fail_io1; 314 } 315 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva, 316 BUS_DMA_NOWAIT)) { 317 aprint_error_dev(sc->sc_dv, "can't map DMA buffers (%lu bytes)\n", 318 (u_long)sizeof(*sc->sc_dma)); 319 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 320 goto fail_io1; 321 } 322 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1, 323 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) { 324 aprint_error_dev(sc->sc_dv, "can't create DMA map\n"); 325 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 326 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 327 goto fail_io1; 328 } 329 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma), 330 NULL, BUS_DMA_NOWAIT)) { 331 aprint_error_dev(sc->sc_dv, "can't load DMA map\n"); 332 bus_dmamap_destroy(sc->sc_dmat, dmamap); 333 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 334 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 335 goto fail_io1; 336 } 337 sc->sc_dmamap = dmamap; 338 sc->sc_dma = (struct hifn_dma *)kva; 339 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma)); 340 341 hifn_reset_board(sc, 0); 342 343 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) { 344 aprint_error_dev(sc->sc_dv, "crypto enabling failed\n"); 345 goto fail_mem; 346 } 347 hifn_reset_puc(sc); 348 349 hifn_init_dma(sc); 350 hifn_init_pci_registers(sc); 351 352 /* XXX can't dynamically determine ram type for 795x; force dram */ 353 if (sc->sc_flags & HIFN_IS_7956) 354 sc->sc_drammodel = 1; 355 else if (hifn_ramtype(sc)) 356 goto fail_mem; 357 358 if (sc->sc_drammodel == 0) 359 hifn_sramsize(sc); 360 else 361 hifn_dramsize(sc); 362 363 /* 364 * Workaround for NetSec 7751 rev A: half ram size because two 365 * of the address lines were left floating 366 */ 367 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC && 368 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 && 369 PCI_REVISION(pa->pa_class) == 0x61) 370 sc->sc_ramsize >>= 1; 371 372 if (pci_intr_map(pa, &ih)) { 373 aprint_error_dev(sc->sc_dv, "couldn't map interrupt\n"); 374 goto fail_mem; 375 } 376 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 377 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, hifn_intr, sc, 378 device_xname(self)); 379 if (sc->sc_ih == NULL) { 380 aprint_error_dev(sc->sc_dv, "couldn't establish interrupt\n"); 381 if (intrstr != NULL) 382 aprint_error(" at %s", intrstr); 383 aprint_error("\n"); 384 goto fail_mem; 385 } 386 387 hifn_sessions(sc); 388 389 rseg = sc->sc_ramsize / 1024; 390 rbase = 'K'; 391 if (sc->sc_ramsize >= (1024 * 1024)) { 392 rbase = 'M'; 393 rseg /= 1024; 394 } 395 aprint_normal_dev(sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n", 396 hifncap, rseg, rbase, 397 sc->sc_drammodel ? 'D' : 'S', intrstr); 398 399 sc->sc_cid = crypto_get_driverid(0); 400 if (sc->sc_cid < 0) { 401 aprint_error_dev(sc->sc_dv, "couldn't get crypto driver id\n"); 402 goto fail_intr; 403 } 404 405 sc->sc_cmd_cache = pool_cache_init(sizeof(struct hifn_command), 406 0, 0, 0, "hifncmd", NULL, IPL_VM, 407 &hifn_cmd_ctor, &hifn_cmd_dtor, sc); 408 pool_cache_prime(sc->sc_cmd_cache, sc->sc_maxses); 409 410 WRITE_REG_0(sc, HIFN_0_PUCNFG, 411 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 412 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 413 414 switch (ena) { 415 case HIFN_PUSTAT_ENA_2: 416 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 417 hifn_newsession, hifn_freesession, hifn_process, sc); 418 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, 419 hifn_newsession, hifn_freesession, hifn_process, sc); 420 if (sc->sc_flags & HIFN_HAS_AES) 421 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, 422 hifn_newsession, hifn_freesession, 423 hifn_process, sc); 424 /*FALLTHROUGH*/ 425 case HIFN_PUSTAT_ENA_1: 426 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, 427 hifn_newsession, hifn_freesession, hifn_process, sc); 428 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, 429 hifn_newsession, hifn_freesession, hifn_process, sc); 430 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0, 431 hifn_newsession, hifn_freesession, hifn_process, sc); 432 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0, 433 hifn_newsession, hifn_freesession, hifn_process, sc); 434 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 435 hifn_newsession, hifn_freesession, hifn_process, sc); 436 break; 437 } 438 439 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0, 440 sc->sc_dmamap->dm_mapsize, 441 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 442 443 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM); 444 445 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) { 446 hifn_init_pubrng(sc); 447 } 448 449 callout_init(&sc->sc_tickto, CALLOUT_MPSAFE); 450 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 451 return; 452 453 fail_intr: 454 pci_intr_disestablish(pc, sc->sc_ih); 455 fail_mem: 456 bus_dmamap_unload(sc->sc_dmat, dmamap); 457 bus_dmamap_destroy(sc->sc_dmat, dmamap); 458 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 459 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 460 461 /* Turn off DMA polling */ 462 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 463 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 464 465 fail_io1: 466 bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1); 467 fail_io0: 468 bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0); 469 } 470 471 static int 472 hifn_detach(device_t self, int flags) 473 { 474 struct hifn_softc *sc = device_private(self); 475 476 mutex_enter(&sc->sc_mtx); 477 hifn_abort(sc); 478 mutex_exit(&sc->sc_mtx); 479 480 hifn_reset_board(sc, 1); 481 482 pci_intr_disestablish(sc->sc_pci_pc, sc->sc_ih); 483 484 crypto_unregister_all(sc->sc_cid); 485 486 rnd_detach_source(&sc->sc_rnd_source); 487 488 callout_halt(&sc->sc_tickto, NULL); 489 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 490 callout_halt(&sc->sc_rngto, NULL); 491 492 pool_cache_destroy(sc->sc_cmd_cache); 493 494 bus_space_unmap(sc->sc_st1, sc->sc_sh1, sc->sc_iosz1); 495 bus_space_unmap(sc->sc_st0, sc->sc_sh0, sc->sc_iosz0); 496 497 /* 498 * XXX It's not clear if any additional buffers have been 499 * XXX allocated and require free()ing 500 */ 501 502 return 0; 503 } 504 505 MODULE(MODULE_CLASS_DRIVER, hifn, "pci,opencrypto"); 506 507 #ifdef _MODULE 508 #include "ioconf.c" 509 #endif 510 511 static int 512 hifn_modcmd(modcmd_t cmd, void *data) 513 { 514 int error = 0; 515 516 switch (cmd) { 517 case MODULE_CMD_INIT: 518 #ifdef _MODULE 519 error = config_init_component(cfdriver_ioconf_hifn, 520 cfattach_ioconf_hifn, cfdata_ioconf_hifn); 521 #endif 522 return error; 523 case MODULE_CMD_FINI: 524 #ifdef _MODULE 525 error = config_fini_component(cfdriver_ioconf_hifn, 526 cfattach_ioconf_hifn, cfdata_ioconf_hifn); 527 #endif 528 return error; 529 default: 530 return ENOTTY; 531 } 532 } 533 534 static void 535 hifn_rng_get(size_t bytes, void *priv) 536 { 537 struct hifn_softc *sc = priv; 538 struct timeval delta = {0, 400000}; 539 struct timeval now, oktime, wait; 540 541 /* 542 * Wait until 0.4 seconds after we start up the RNG to read 543 * anything out of it. If the time hasn't elapsed, schedule a 544 * callout later on. 545 */ 546 microtime(&now); 547 548 mutex_enter(&sc->sc_mtx); 549 sc->sc_rng_needbits = MAX(sc->sc_rng_needbits, NBBY*bytes); 550 timeradd(&sc->sc_rngboottime, &delta, &oktime); 551 if (timercmp(&oktime, &now, <=)) { 552 hifn_rng(sc); 553 } else if (!callout_pending(&sc->sc_rngto)) { 554 timersub(&oktime, &now, &wait); 555 callout_schedule(&sc->sc_rngto, MAX(1, tvtohz(&wait))); 556 } 557 mutex_exit(&sc->sc_mtx); 558 } 559 560 static int 561 hifn_init_pubrng(struct hifn_softc *sc) 562 { 563 uint32_t r; 564 int i; 565 566 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 567 /* Reset 7951 public key/rng engine */ 568 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 569 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 570 571 for (i = 0; i < 100; i++) { 572 DELAY(1000); 573 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 574 HIFN_PUBRST_RESET) == 0) 575 break; 576 } 577 578 if (i == 100) { 579 printf("%s: public key init failed\n", 580 device_xname(sc->sc_dv)); 581 return (1); 582 } 583 } 584 585 /* Enable the rng, if available */ 586 if (sc->sc_flags & HIFN_HAS_RNG) { 587 if (sc->sc_flags & HIFN_IS_7811) { 588 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 589 if (r & HIFN_7811_RNGENA_ENA) { 590 r &= ~HIFN_7811_RNGENA_ENA; 591 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 592 } 593 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 594 HIFN_7811_RNGCFG_DEFL); 595 r |= HIFN_7811_RNGENA_ENA; 596 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 597 } else 598 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 599 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 600 HIFN_RNGCFG_ENA); 601 602 /* 603 * The Hifn RNG documentation states that at their 604 * recommended "conservative" RNG config values, 605 * the RNG must warm up for 0.4s before providing 606 * data that meet their worst-case estimate of 0.06 607 * bits of random data per output register bit. 608 */ 609 microtime(&sc->sc_rngboottime); 610 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE); 611 callout_setfunc(&sc->sc_rngto, hifn_rng_intr, sc); 612 rndsource_setcb(&sc->sc_rnd_source, hifn_rng_get, sc); 613 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv), 614 RND_TYPE_RNG, RND_FLAG_DEFAULT|RND_FLAG_HASCB); 615 } 616 617 /* Enable public key engine, if available */ 618 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 619 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 620 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 621 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 622 } 623 624 return (0); 625 } 626 627 static void 628 hifn_rng(struct hifn_softc *sc) 629 { 630 uint32_t entropybits; 631 632 KASSERT(mutex_owned(&sc->sc_mtx)); 633 634 if (sc->sc_flags & HIFN_IS_7811) { 635 while (sc->sc_rng_needbits) { 636 uint32_t num[2]; 637 uint32_t sts; 638 639 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 640 if (sts & HIFN_7811_RNGSTS_UFL) { 641 device_printf(sc->sc_dv, "RNG underflow\n"); 642 return; 643 } 644 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 645 break; 646 647 /* 648 * There are at least two words in the RNG FIFO 649 * at this point. 650 */ 651 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 652 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 653 #ifdef HIFN_DEBUG 654 if (hifn_debug >= 2) 655 hexdump(printf, "hifn", num, sizeof num); 656 #endif 657 entropybits = NBBY*sizeof(num)/HIFN_RNG_BITSPER; 658 rnd_add_data_intr(&sc->sc_rnd_source, num, sizeof(num), 659 entropybits); 660 entropybits = MAX(entropybits, 1); 661 entropybits = MIN(entropybits, sc->sc_rng_needbits); 662 sc->sc_rng_needbits -= entropybits; 663 } 664 } else { 665 /* 666 * We must be *extremely* careful here. The Hifn 667 * 795x differ from the published 6500 RNG design 668 * in more ways than the obvious lack of the output 669 * FIFO and LFSR control registers. In fact, there 670 * is only one LFSR, instead of the 6500's two, and 671 * it's 32 bits, not 31. 672 * 673 * Further, a block diagram obtained from Hifn shows 674 * a very curious latching of this register: the LFSR 675 * rotates at a frequency of RNG_Clk / 8, but the 676 * RNG_Data register is latched at a frequency of 677 * RNG_Clk, which means that it is possible for 678 * consecutive reads of the RNG_Data register to read 679 * identical state from the LFSR. The simplest 680 * workaround seems to be to read eight samples from 681 * the register for each one that we use. Since each 682 * read must require at least one PCI cycle, and 683 * RNG_Clk is at least PCI_Clk, this is safe. 684 */ 685 while (sc->sc_rng_needbits) { 686 uint32_t num[64]; 687 unsigned i; 688 689 for (i = 0; i < 8*__arraycount(num); i++) 690 num[i/8] = READ_REG_1(sc, HIFN_1_RNG_DATA); 691 #ifdef HIFN_DEBUG 692 if (hifn_debug >= 2) 693 hexdump(printf, "hifn", num, sizeof num); 694 #endif 695 entropybits = NBBY*sizeof(num)/HIFN_RNG_BITSPER; 696 rnd_add_data_intr(&sc->sc_rnd_source, num, sizeof num, 697 entropybits); 698 entropybits = MAX(entropybits, 1); 699 entropybits = MIN(entropybits, sc->sc_rng_needbits); 700 sc->sc_rng_needbits -= entropybits; 701 } 702 } 703 704 /* If we still need more, try again in another second. */ 705 if (sc->sc_rng_needbits) 706 callout_schedule(&sc->sc_rngto, hz); 707 } 708 709 static void 710 hifn_rng_intr(void *vsc) 711 { 712 struct hifn_softc *sc = vsc; 713 714 mutex_spin_enter(&sc->sc_mtx); 715 hifn_rng(sc); 716 mutex_spin_exit(&sc->sc_mtx); 717 } 718 719 static void 720 hifn_puc_wait(struct hifn_softc *sc) 721 { 722 int i; 723 724 for (i = 5000; i > 0; i--) { 725 DELAY(1); 726 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) 727 break; 728 } 729 if (!i) 730 printf("%s: proc unit did not reset\n", device_xname(sc->sc_dv)); 731 } 732 733 /* 734 * Reset the processing unit. 735 */ 736 static void 737 hifn_reset_puc(struct hifn_softc *sc) 738 { 739 /* Reset processing unit */ 740 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 741 hifn_puc_wait(sc); 742 } 743 744 static void 745 hifn_set_retry(struct hifn_softc *sc) 746 { 747 uint32_t r; 748 749 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT); 750 r &= 0xffff0000; 751 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r); 752 } 753 754 /* 755 * Resets the board. Values in the registers are left as is 756 * from the reset (i.e. initial values are assigned elsewhere). 757 */ 758 static void 759 hifn_reset_board(struct hifn_softc *sc, int full) 760 { 761 uint32_t reg; 762 763 /* 764 * Set polling in the DMA configuration register to zero. 0x7 avoids 765 * resetting the board and zeros out the other fields. 766 */ 767 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 768 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 769 770 /* 771 * Now that polling has been disabled, we have to wait 1 ms 772 * before resetting the board. 773 */ 774 DELAY(1000); 775 776 /* Reset the DMA unit */ 777 if (full) { 778 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 779 DELAY(1000); 780 } else { 781 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 782 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 783 hifn_reset_puc(sc); 784 } 785 786 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma)); 787 788 /* Bring dma unit out of reset */ 789 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 790 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 791 792 hifn_puc_wait(sc); 793 794 hifn_set_retry(sc); 795 796 if (sc->sc_flags & HIFN_IS_7811) { 797 for (reg = 0; reg < 1000; reg++) { 798 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 799 HIFN_MIPSRST_CRAMINIT) 800 break; 801 DELAY(1000); 802 } 803 if (reg == 1000) 804 printf(": cram init timeout\n"); 805 } 806 } 807 808 static uint32_t 809 hifn_next_signature(uint32_t a, u_int cnt) 810 { 811 u_int i; 812 uint32_t v; 813 814 for (i = 0; i < cnt; i++) { 815 816 /* get the parity */ 817 v = a & 0x80080125; 818 v ^= v >> 16; 819 v ^= v >> 8; 820 v ^= v >> 4; 821 v ^= v >> 2; 822 v ^= v >> 1; 823 824 a = (v & 1) ^ (a << 1); 825 } 826 827 return a; 828 } 829 830 static struct pci2id { 831 u_short pci_vendor; 832 u_short pci_prod; 833 char card_id[13]; 834 } const pci2id[] = { 835 { 836 PCI_VENDOR_HIFN, 837 PCI_PRODUCT_HIFN_7951, 838 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 839 0x00, 0x00, 0x00, 0x00, 0x00 } 840 }, { 841 PCI_VENDOR_HIFN, 842 PCI_PRODUCT_HIFN_7955, 843 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 844 0x00, 0x00, 0x00, 0x00, 0x00 } 845 }, { 846 PCI_VENDOR_HIFN, 847 PCI_PRODUCT_HIFN_7956, 848 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 849 0x00, 0x00, 0x00, 0x00, 0x00 } 850 }, { 851 PCI_VENDOR_NETSEC, 852 PCI_PRODUCT_NETSEC_7751, 853 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 854 0x00, 0x00, 0x00, 0x00, 0x00 } 855 }, { 856 PCI_VENDOR_INVERTEX, 857 PCI_PRODUCT_INVERTEX_AEON, 858 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 859 0x00, 0x00, 0x00, 0x00, 0x00 } 860 }, { 861 PCI_VENDOR_HIFN, 862 PCI_PRODUCT_HIFN_7811, 863 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 864 0x00, 0x00, 0x00, 0x00, 0x00 } 865 }, { 866 /* 867 * Other vendors share this PCI ID as well, such as 868 * powercrypt, and obviously they also 869 * use the same key. 870 */ 871 PCI_VENDOR_HIFN, 872 PCI_PRODUCT_HIFN_7751, 873 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 874 0x00, 0x00, 0x00, 0x00, 0x00 } 875 }, 876 }; 877 878 /* 879 * Checks to see if crypto is already enabled. If crypto isn't enable, 880 * "hifn_enable_crypto" is called to enable it. The check is important, 881 * as enabling crypto twice will lock the board. 882 */ 883 static const char * 884 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid) 885 { 886 uint32_t dmacfg, ramcfg, encl, addr, i; 887 const char *offtbl = NULL; 888 889 for (i = 0; i < __arraycount(pci2id); i++) { 890 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) && 891 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) { 892 offtbl = pci2id[i].card_id; 893 break; 894 } 895 } 896 897 if (offtbl == NULL) { 898 #ifdef HIFN_DEBUG 899 aprint_debug_dev(sc->sc_dv, "Unknown card!\n"); 900 #endif 901 return (NULL); 902 } 903 904 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 905 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 906 907 /* 908 * The RAM config register's encrypt level bit needs to be set before 909 * every read performed on the encryption level register. 910 */ 911 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 912 913 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 914 915 /* 916 * Make sure we don't re-unlock. Two unlocks kills chip until the 917 * next reboot. 918 */ 919 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 920 #ifdef HIFN_DEBUG 921 aprint_debug_dev(sc->sc_dv, "Strong Crypto already enabled!\n"); 922 #endif 923 goto report; 924 } 925 926 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 927 #ifdef HIFN_DEBUG 928 aprint_debug_dev(sc->sc_dv, "Unknown encryption level\n"); 929 #endif 930 return (NULL); 931 } 932 933 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 934 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 935 DELAY(1000); 936 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1); 937 DELAY(1000); 938 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0); 939 DELAY(1000); 940 941 for (i = 0; i <= 12; i++) { 942 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 943 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr); 944 945 DELAY(1000); 946 } 947 948 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 949 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 950 951 #ifdef HIFN_DEBUG 952 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 953 aprint_debug("Encryption engine is permanently locked until next system reset."); 954 else 955 aprint_debug("Encryption engine enabled successfully!"); 956 #endif 957 958 report: 959 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 960 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 961 962 switch (encl) { 963 case HIFN_PUSTAT_ENA_0: 964 return ("LZS-only (no encr/auth)"); 965 966 case HIFN_PUSTAT_ENA_1: 967 return ("DES"); 968 969 case HIFN_PUSTAT_ENA_2: 970 if (sc->sc_flags & HIFN_HAS_AES) 971 return ("3DES/AES"); 972 else 973 return ("3DES"); 974 975 default: 976 return ("disabled"); 977 } 978 /* NOTREACHED */ 979 } 980 981 /* 982 * Give initial values to the registers listed in the "Register Space" 983 * section of the HIFN Software Development reference manual. 984 */ 985 static void 986 hifn_init_pci_registers(struct hifn_softc *sc) 987 { 988 /* write fixed values needed by the Initialization registers */ 989 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 990 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 991 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 992 993 /* write all 4 ring address registers */ 994 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 995 offsetof(struct hifn_dma, cmdr[0])); 996 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 997 offsetof(struct hifn_dma, srcr[0])); 998 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 999 offsetof(struct hifn_dma, dstr[0])); 1000 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 1001 offsetof(struct hifn_dma, resr[0])); 1002 1003 DELAY(2000); 1004 1005 /* write status register */ 1006 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1007 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 1008 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 1009 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 1010 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 1011 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 1012 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 1013 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 1014 HIFN_DMACSR_S_WAIT | 1015 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 1016 HIFN_DMACSR_C_WAIT | 1017 HIFN_DMACSR_ENGINE | 1018 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 1019 HIFN_DMACSR_PUBDONE : 0) | 1020 ((sc->sc_flags & HIFN_IS_7811) ? 1021 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 1022 1023 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 1024 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 1025 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 1026 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 1027 HIFN_DMAIER_ENGINE | 1028 ((sc->sc_flags & HIFN_IS_7811) ? 1029 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 1030 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1031 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1032 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2); 1033 1034 if (sc->sc_flags & HIFN_IS_7956) { 1035 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1036 HIFN_PUCNFG_TCALLPHASES | 1037 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); 1038 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956); 1039 } else { 1040 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1041 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 1042 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 1043 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 1044 } 1045 1046 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 1047 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 1048 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 1049 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 1050 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 1051 } 1052 1053 /* 1054 * The maximum number of sessions supported by the card 1055 * is dependent on the amount of context ram, which 1056 * encryption algorithms are enabled, and how compression 1057 * is configured. This should be configured before this 1058 * routine is called. 1059 */ 1060 static void 1061 hifn_sessions(struct hifn_softc *sc) 1062 { 1063 uint32_t pucnfg; 1064 int ctxsize; 1065 1066 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1067 1068 if (pucnfg & HIFN_PUCNFG_COMPSING) { 1069 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 1070 ctxsize = 128; 1071 else 1072 ctxsize = 512; 1073 /* 1074 * 7955/7956 has internal context memory of 32K 1075 */ 1076 if (sc->sc_flags & HIFN_IS_7956) 1077 sc->sc_maxses = 32768 / ctxsize; 1078 else 1079 sc->sc_maxses = 1 + 1080 ((sc->sc_ramsize - 32768) / ctxsize); 1081 } else 1082 sc->sc_maxses = sc->sc_ramsize / 16384; 1083 1084 if (sc->sc_maxses > 2048) 1085 sc->sc_maxses = 2048; 1086 } 1087 1088 /* 1089 * Determine ram type (sram or dram). Board should be just out of a reset 1090 * state when this is called. 1091 */ 1092 static int 1093 hifn_ramtype(struct hifn_softc *sc) 1094 { 1095 uint8_t data[8], dataexpect[8]; 1096 size_t i; 1097 1098 for (i = 0; i < sizeof(data); i++) 1099 data[i] = dataexpect[i] = 0x55; 1100 if (hifn_writeramaddr(sc, 0, data)) 1101 return (-1); 1102 if (hifn_readramaddr(sc, 0, data)) 1103 return (-1); 1104 if (memcmp(data, dataexpect, sizeof(data)) != 0) { 1105 sc->sc_drammodel = 1; 1106 return (0); 1107 } 1108 1109 for (i = 0; i < sizeof(data); i++) 1110 data[i] = dataexpect[i] = 0xaa; 1111 if (hifn_writeramaddr(sc, 0, data)) 1112 return (-1); 1113 if (hifn_readramaddr(sc, 0, data)) 1114 return (-1); 1115 if (memcmp(data, dataexpect, sizeof(data)) != 0) { 1116 sc->sc_drammodel = 1; 1117 return (0); 1118 } 1119 1120 return (0); 1121 } 1122 1123 #define HIFN_SRAM_MAX (32 << 20) 1124 #define HIFN_SRAM_STEP_SIZE 16384 1125 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1126 1127 static int 1128 hifn_sramsize(struct hifn_softc *sc) 1129 { 1130 uint32_t a, b; 1131 uint8_t data[8]; 1132 uint8_t dataexpect[sizeof(data)]; 1133 size_t i; 1134 1135 for (i = 0; i < sizeof(data); i++) 1136 data[i] = dataexpect[i] = i ^ 0x5a; 1137 1138 a = HIFN_SRAM_GRANULARITY * HIFN_SRAM_STEP_SIZE; 1139 b = HIFN_SRAM_GRANULARITY; 1140 for (i = 0; i < HIFN_SRAM_GRANULARITY; ++i) { 1141 a -= HIFN_SRAM_STEP_SIZE; 1142 b -= 1; 1143 le32enc(data, b); 1144 hifn_writeramaddr(sc, a, data); 1145 } 1146 1147 a = 0; 1148 b = 0; 1149 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1150 le32enc(dataexpect, b); 1151 if (hifn_readramaddr(sc, a, data) < 0) 1152 return (0); 1153 if (memcmp(data, dataexpect, sizeof(data)) != 0) 1154 return (0); 1155 1156 a += HIFN_SRAM_STEP_SIZE; 1157 b += 1; 1158 sc->sc_ramsize = a; 1159 } 1160 1161 return (0); 1162 } 1163 1164 /* 1165 * XXX For dram boards, one should really try all of the 1166 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1167 * is already set up correctly. 1168 */ 1169 static int 1170 hifn_dramsize(struct hifn_softc *sc) 1171 { 1172 uint32_t cnfg; 1173 1174 if (sc->sc_flags & HIFN_IS_7956) { 1175 /* 1176 * 7955/7956 have a fixed internal ram of only 32K. 1177 */ 1178 sc->sc_ramsize = 32768; 1179 } else { 1180 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1181 HIFN_PUCNFG_DRAMMASK; 1182 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1183 } 1184 return (0); 1185 } 1186 1187 static void 1188 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, 1189 int *resp) 1190 { 1191 struct hifn_dma *dma = sc->sc_dma; 1192 1193 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1194 dma->cmdi = 0; 1195 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1196 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1197 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1198 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1199 } 1200 *cmdp = dma->cmdi++; 1201 dma->cmdk = dma->cmdi; 1202 1203 if (dma->srci == HIFN_D_SRC_RSIZE) { 1204 dma->srci = 0; 1205 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1206 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1207 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1208 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1209 } 1210 *srcp = dma->srci++; 1211 dma->srck = dma->srci; 1212 1213 if (dma->dsti == HIFN_D_DST_RSIZE) { 1214 dma->dsti = 0; 1215 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1216 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1217 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1218 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1219 } 1220 *dstp = dma->dsti++; 1221 dma->dstk = dma->dsti; 1222 1223 if (dma->resi == HIFN_D_RES_RSIZE) { 1224 dma->resi = 0; 1225 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1226 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1227 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1228 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1229 } 1230 *resp = dma->resi++; 1231 dma->resk = dma->resi; 1232 } 1233 1234 static int 1235 hifn_writeramaddr(struct hifn_softc *sc, int addr, uint8_t *data) 1236 { 1237 struct hifn_dma *dma = sc->sc_dma; 1238 struct hifn_base_command wc; 1239 const uint32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1240 int r, cmdi, resi, srci, dsti; 1241 1242 wc.masks = htole16(3 << 13); 1243 wc.session_num = htole16(addr >> 14); 1244 wc.total_source_count = htole16(8); 1245 wc.total_dest_count = htole16(addr & 0x3fff); 1246 1247 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1248 1249 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1250 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1251 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1252 1253 /* build write command */ 1254 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND); 1255 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc; 1256 memcpy(&dma->test_src, data, sizeof(dma->test_src)); 1257 1258 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr 1259 + offsetof(struct hifn_dma, test_src)); 1260 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr 1261 + offsetof(struct hifn_dma, test_dst)); 1262 1263 dma->cmdr[cmdi].l = htole32(16 | masks); 1264 dma->srcr[srci].l = htole32(8 | masks); 1265 dma->dstr[dsti].l = htole32(4 | masks); 1266 dma->resr[resi].l = htole32(4 | masks); 1267 1268 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1269 0, sc->sc_dmamap->dm_mapsize, 1270 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1271 1272 for (r = 10000; r >= 0; r--) { 1273 DELAY(10); 1274 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1275 0, sc->sc_dmamap->dm_mapsize, 1276 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1277 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1278 break; 1279 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1280 0, sc->sc_dmamap->dm_mapsize, 1281 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1282 } 1283 if (r == 0) { 1284 printf("%s: writeramaddr -- " 1285 "result[%d](addr %d) still valid\n", 1286 device_xname(sc->sc_dv), resi, addr); 1287 return (-1); 1288 } else 1289 r = 0; 1290 1291 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1292 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1293 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1294 1295 return (r); 1296 } 1297 1298 static int 1299 hifn_readramaddr(struct hifn_softc *sc, int addr, uint8_t *data) 1300 { 1301 struct hifn_dma *dma = sc->sc_dma; 1302 struct hifn_base_command rc; 1303 const uint32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1304 int r, cmdi, srci, dsti, resi; 1305 1306 rc.masks = htole16(2 << 13); 1307 rc.session_num = htole16(addr >> 14); 1308 rc.total_source_count = htole16(addr & 0x3fff); 1309 rc.total_dest_count = htole16(8); 1310 1311 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1312 1313 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1314 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1315 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1316 1317 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND); 1318 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc; 1319 1320 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1321 offsetof(struct hifn_dma, test_src)); 1322 dma->test_src = 0; 1323 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1324 offsetof(struct hifn_dma, test_dst)); 1325 dma->test_dst = 0; 1326 dma->cmdr[cmdi].l = htole32(8 | masks); 1327 dma->srcr[srci].l = htole32(8 | masks); 1328 dma->dstr[dsti].l = htole32(8 | masks); 1329 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1330 1331 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1332 0, sc->sc_dmamap->dm_mapsize, 1333 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1334 1335 for (r = 10000; r >= 0; r--) { 1336 DELAY(10); 1337 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1338 0, sc->sc_dmamap->dm_mapsize, 1339 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1340 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1341 break; 1342 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1343 0, sc->sc_dmamap->dm_mapsize, 1344 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1345 } 1346 if (r == 0) { 1347 printf("%s: readramaddr -- " 1348 "result[%d](addr %d) still valid\n", 1349 device_xname(sc->sc_dv), resi, addr); 1350 r = -1; 1351 } else { 1352 r = 0; 1353 memcpy(data, &dma->test_dst, sizeof(dma->test_dst)); 1354 } 1355 1356 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1357 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1358 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1359 1360 return (r); 1361 } 1362 1363 /* 1364 * Initialize the descriptor rings. 1365 */ 1366 static void 1367 hifn_init_dma(struct hifn_softc *sc) 1368 { 1369 struct hifn_dma *dma = sc->sc_dma; 1370 int i; 1371 1372 hifn_set_retry(sc); 1373 1374 /* initialize static pointer values */ 1375 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1376 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1377 offsetof(struct hifn_dma, command_bufs[i][0])); 1378 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1379 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1380 offsetof(struct hifn_dma, result_bufs[i][0])); 1381 1382 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1383 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1384 offsetof(struct hifn_dma, cmdr[0])); 1385 dma->srcr[HIFN_D_SRC_RSIZE].p = 1386 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1387 offsetof(struct hifn_dma, srcr[0])); 1388 dma->dstr[HIFN_D_DST_RSIZE].p = 1389 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1390 offsetof(struct hifn_dma, dstr[0])); 1391 dma->resr[HIFN_D_RES_RSIZE].p = 1392 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1393 offsetof(struct hifn_dma, resr[0])); 1394 1395 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 1396 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 1397 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 1398 } 1399 1400 /* 1401 * Writes out the raw command buffer space. Returns the 1402 * command buffer size. 1403 */ 1404 static u_int 1405 hifn_write_command(struct hifn_command *cmd, uint8_t *buf) 1406 { 1407 uint8_t *buf_pos; 1408 struct hifn_base_command *base_cmd; 1409 struct hifn_mac_command *mac_cmd; 1410 struct hifn_crypt_command *cry_cmd; 1411 struct hifn_comp_command *comp_cmd; 1412 int using_mac, using_crypt, using_comp, len, ivlen; 1413 uint32_t dlen, slen; 1414 1415 buf_pos = buf; 1416 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1417 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1418 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP; 1419 1420 base_cmd = (struct hifn_base_command *)buf_pos; 1421 base_cmd->masks = htole16(cmd->base_masks); 1422 slen = cmd->src_map->dm_mapsize; 1423 if (cmd->sloplen) 1424 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen + 1425 sizeof(uint32_t); 1426 else 1427 dlen = cmd->dst_map->dm_mapsize; 1428 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1429 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1430 dlen >>= 16; 1431 slen >>= 16; 1432 base_cmd->session_num = htole16( 1433 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1434 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1435 buf_pos += sizeof(struct hifn_base_command); 1436 1437 if (using_comp) { 1438 comp_cmd = (struct hifn_comp_command *)buf_pos; 1439 dlen = cmd->compcrd->crd_len; 1440 comp_cmd->source_count = htole16(dlen & 0xffff); 1441 dlen >>= 16; 1442 comp_cmd->masks = htole16(cmd->comp_masks | 1443 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M)); 1444 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip); 1445 comp_cmd->reserved = 0; 1446 buf_pos += sizeof(struct hifn_comp_command); 1447 } 1448 1449 if (using_mac) { 1450 mac_cmd = (struct hifn_mac_command *)buf_pos; 1451 dlen = cmd->maccrd->crd_len; 1452 mac_cmd->source_count = htole16(dlen & 0xffff); 1453 dlen >>= 16; 1454 mac_cmd->masks = htole16(cmd->mac_masks | 1455 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1456 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1457 mac_cmd->reserved = 0; 1458 buf_pos += sizeof(struct hifn_mac_command); 1459 } 1460 1461 if (using_crypt) { 1462 cry_cmd = (struct hifn_crypt_command *)buf_pos; 1463 dlen = cmd->enccrd->crd_len; 1464 cry_cmd->source_count = htole16(dlen & 0xffff); 1465 dlen >>= 16; 1466 cry_cmd->masks = htole16(cmd->cry_masks | 1467 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1468 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1469 cry_cmd->reserved = 0; 1470 buf_pos += sizeof(struct hifn_crypt_command); 1471 } 1472 1473 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1474 memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH); 1475 buf_pos += HIFN_MAC_KEY_LENGTH; 1476 } 1477 1478 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1479 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1480 case HIFN_CRYPT_CMD_ALG_3DES: 1481 memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH); 1482 buf_pos += HIFN_3DES_KEY_LENGTH; 1483 break; 1484 case HIFN_CRYPT_CMD_ALG_DES: 1485 memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH); 1486 buf_pos += HIFN_DES_KEY_LENGTH; 1487 break; 1488 case HIFN_CRYPT_CMD_ALG_RC4: 1489 len = 256; 1490 do { 1491 int clen; 1492 1493 clen = MIN(cmd->cklen, len); 1494 memcpy(buf_pos, cmd->ck, clen); 1495 len -= clen; 1496 buf_pos += clen; 1497 } while (len > 0); 1498 memset(buf_pos, 0, 4); 1499 buf_pos += 4; 1500 break; 1501 case HIFN_CRYPT_CMD_ALG_AES: 1502 /* 1503 * AES keys are variable 128, 192 and 1504 * 256 bits (16, 24 and 32 bytes). 1505 */ 1506 memcpy(buf_pos, cmd->ck, cmd->cklen); 1507 buf_pos += cmd->cklen; 1508 break; 1509 } 1510 } 1511 1512 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1513 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1514 case HIFN_CRYPT_CMD_ALG_AES: 1515 ivlen = HIFN_AES_IV_LENGTH; 1516 break; 1517 default: 1518 ivlen = HIFN_IV_LENGTH; 1519 break; 1520 } 1521 memcpy(buf_pos, cmd->iv, ivlen); 1522 buf_pos += ivlen; 1523 } 1524 1525 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT | 1526 HIFN_BASE_CMD_COMP)) == 0) { 1527 memset(buf_pos, 0, 8); 1528 buf_pos += 8; 1529 } 1530 1531 return (buf_pos - buf); 1532 } 1533 1534 static int 1535 hifn_dmamap_aligned(bus_dmamap_t map) 1536 { 1537 int i; 1538 1539 for (i = 0; i < map->dm_nsegs; i++) { 1540 if (map->dm_segs[i].ds_addr & 3) 1541 return (0); 1542 if ((i != (map->dm_nsegs - 1)) && 1543 (map->dm_segs[i].ds_len & 3)) 1544 return (0); 1545 } 1546 return (1); 1547 } 1548 1549 static int 1550 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1551 { 1552 struct hifn_dma *dma = sc->sc_dma; 1553 bus_dmamap_t map = cmd->dst_map; 1554 uint32_t p, l; 1555 int idx, used = 0, i; 1556 1557 idx = dma->dsti; 1558 for (i = 0; i < map->dm_nsegs - 1; i++) { 1559 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); 1560 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1561 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len); 1562 HIFN_DSTR_SYNC(sc, idx, 1563 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1564 used++; 1565 1566 if (++idx == HIFN_D_DST_RSIZE) { 1567 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1568 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1569 HIFN_DSTR_SYNC(sc, idx, 1570 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1571 idx = 0; 1572 } 1573 } 1574 1575 if (cmd->sloplen == 0) { 1576 p = map->dm_segs[i].ds_addr; 1577 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1578 map->dm_segs[i].ds_len; 1579 } else { 1580 p = sc->sc_dmamap->dm_segs[0].ds_addr + 1581 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1582 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1583 sizeof(uint32_t); 1584 1585 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) { 1586 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); 1587 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1588 HIFN_D_MASKDONEIRQ | 1589 (map->dm_segs[i].ds_len - cmd->sloplen)); 1590 HIFN_DSTR_SYNC(sc, idx, 1591 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1592 used++; 1593 1594 if (++idx == HIFN_D_DST_RSIZE) { 1595 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1596 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1597 HIFN_DSTR_SYNC(sc, idx, 1598 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1599 idx = 0; 1600 } 1601 } 1602 } 1603 dma->dstr[idx].p = htole32(p); 1604 dma->dstr[idx].l = htole32(l); 1605 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1606 used++; 1607 1608 if (++idx == HIFN_D_DST_RSIZE) { 1609 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1610 HIFN_D_MASKDONEIRQ); 1611 HIFN_DSTR_SYNC(sc, idx, 1612 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1613 idx = 0; 1614 } 1615 1616 dma->dsti = idx; 1617 dma->dstu += used; 1618 return (idx); 1619 } 1620 1621 static int 1622 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1623 { 1624 struct hifn_dma *dma = sc->sc_dma; 1625 bus_dmamap_t map = cmd->src_map; 1626 int idx, i; 1627 uint32_t last = 0; 1628 1629 idx = dma->srci; 1630 for (i = 0; i < map->dm_nsegs; i++) { 1631 if (i == map->dm_nsegs - 1) 1632 last = HIFN_D_LAST; 1633 1634 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr); 1635 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len | 1636 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1637 HIFN_SRCR_SYNC(sc, idx, 1638 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1639 1640 if (++idx == HIFN_D_SRC_RSIZE) { 1641 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1642 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1643 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1644 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1645 idx = 0; 1646 } 1647 } 1648 dma->srci = idx; 1649 dma->srcu += map->dm_nsegs; 1650 return (idx); 1651 } 1652 1653 static int 1654 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd, 1655 struct cryptop *crp, int hint) 1656 { 1657 struct hifn_dma *dma = sc->sc_dma; 1658 uint32_t cmdlen; 1659 int cmdi, resi, err = 0; 1660 1661 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1662 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1663 cmd->srcu.src_m, BUS_DMA_NOWAIT)) { 1664 err = ENOMEM; 1665 goto err_srcmap1; 1666 } 1667 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1668 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1669 cmd->srcu.src_io, BUS_DMA_NOWAIT)) { 1670 err = ENOMEM; 1671 goto err_srcmap1; 1672 } 1673 } else { 1674 err = EINVAL; 1675 goto err_srcmap1; 1676 } 1677 1678 if (hifn_dmamap_aligned(cmd->src_map)) { 1679 cmd->sloplen = cmd->src_map->dm_mapsize & 3; 1680 if (crp->crp_flags & CRYPTO_F_IOV) 1681 cmd->dstu.dst_io = cmd->srcu.src_io; 1682 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1683 cmd->dstu.dst_m = cmd->srcu.src_m; 1684 cmd->dst_map = cmd->src_map; 1685 } else { 1686 if (crp->crp_flags & CRYPTO_F_IOV) { 1687 err = EINVAL; 1688 goto err_srcmap; 1689 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1690 int totlen, len; 1691 struct mbuf *m, *m0, *mlast; 1692 1693 totlen = cmd->src_map->dm_mapsize; 1694 if (cmd->srcu.src_m->m_flags & M_PKTHDR) { 1695 len = MHLEN; 1696 MGETHDR(m0, M_DONTWAIT, MT_DATA); 1697 } else { 1698 len = MLEN; 1699 MGET(m0, M_DONTWAIT, MT_DATA); 1700 } 1701 if (m0 == NULL) { 1702 err = ENOMEM; 1703 goto err_srcmap; 1704 } 1705 if (len == MHLEN) 1706 m_copy_pkthdr(m0, cmd->srcu.src_m); 1707 if (totlen >= MINCLSIZE) { 1708 MCLGET(m0, M_DONTWAIT); 1709 if (m0->m_flags & M_EXT) 1710 len = MCLBYTES; 1711 } 1712 totlen -= len; 1713 m0->m_pkthdr.len = m0->m_len = len; 1714 mlast = m0; 1715 1716 while (totlen > 0) { 1717 MGET(m, M_DONTWAIT, MT_DATA); 1718 if (m == NULL) { 1719 err = ENOMEM; 1720 m_freem(m0); 1721 goto err_srcmap; 1722 } 1723 len = MLEN; 1724 if (totlen >= MINCLSIZE) { 1725 MCLGET(m, M_DONTWAIT); 1726 if (m->m_flags & M_EXT) 1727 len = MCLBYTES; 1728 } 1729 1730 m->m_len = len; 1731 if (m0->m_flags & M_PKTHDR) 1732 m0->m_pkthdr.len += len; 1733 totlen -= len; 1734 1735 mlast->m_next = m; 1736 mlast = m; 1737 } 1738 cmd->dstu.dst_m = m0; 1739 } 1740 cmd->dst_map = cmd->dst_map_alloc; 1741 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1742 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1743 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 1744 err = ENOMEM; 1745 goto err_dstmap1; 1746 } 1747 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1748 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 1749 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { 1750 err = ENOMEM; 1751 goto err_dstmap1; 1752 } 1753 } 1754 } 1755 1756 #ifdef HIFN_DEBUG 1757 if (hifn_debug) 1758 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 1759 device_xname(sc->sc_dv), 1760 READ_REG_1(sc, HIFN_1_DMA_CSR), 1761 READ_REG_1(sc, HIFN_1_DMA_IER), 1762 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 1763 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs); 1764 #endif 1765 1766 if (cmd->src_map == cmd->dst_map) 1767 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1768 0, cmd->src_map->dm_mapsize, 1769 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1770 else { 1771 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1772 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1773 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 1774 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1775 } 1776 1777 /* 1778 * need 1 cmd, and 1 res 1779 * need N src, and N dst 1780 */ 1781 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 1782 (dma->resu + 1) > HIFN_D_RES_RSIZE) { 1783 err = ENOMEM; 1784 goto err_dstmap; 1785 } 1786 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || 1787 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) { 1788 err = ENOMEM; 1789 goto err_dstmap; 1790 } 1791 1792 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1793 dma->cmdi = 0; 1794 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1795 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1796 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1797 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1798 } 1799 cmdi = dma->cmdi++; 1800 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 1801 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 1802 1803 /* .p for command/result already set */ 1804 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 1805 HIFN_D_MASKDONEIRQ); 1806 HIFN_CMDR_SYNC(sc, cmdi, 1807 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1808 dma->cmdu++; 1809 if (sc->sc_c_busy == 0) { 1810 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 1811 sc->sc_c_busy = 1; 1812 SET_LED(sc, HIFN_MIPSRST_LED0); 1813 } 1814 1815 /* 1816 * Always enable the command wait interrupt. We are obviously 1817 * missing an interrupt or two somewhere. Enabling the command wait 1818 * interrupt will guarantee we get called periodically until all 1819 * of the queues are drained and thus work around this. 1820 */ 1821 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 1822 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1823 1824 hifnstats.hst_ipackets++; 1825 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; 1826 1827 hifn_dmamap_load_src(sc, cmd); 1828 if (sc->sc_s_busy == 0) { 1829 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 1830 sc->sc_s_busy = 1; 1831 SET_LED(sc, HIFN_MIPSRST_LED1); 1832 } 1833 1834 /* 1835 * Unlike other descriptors, we don't mask done interrupt from 1836 * result descriptor. 1837 */ 1838 #ifdef HIFN_DEBUG 1839 if (hifn_debug) 1840 printf("load res\n"); 1841 #endif 1842 if (dma->resi == HIFN_D_RES_RSIZE) { 1843 dma->resi = 0; 1844 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1845 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1846 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1847 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1848 } 1849 resi = dma->resi++; 1850 dma->hifn_commands[resi] = cmd; 1851 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 1852 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 1853 HIFN_D_VALID | HIFN_D_LAST); 1854 HIFN_RESR_SYNC(sc, resi, 1855 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1856 dma->resu++; 1857 if (sc->sc_r_busy == 0) { 1858 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 1859 sc->sc_r_busy = 1; 1860 SET_LED(sc, HIFN_MIPSRST_LED2); 1861 } 1862 1863 if (cmd->sloplen) 1864 cmd->slopidx = resi; 1865 1866 hifn_dmamap_load_dst(sc, cmd); 1867 1868 if (sc->sc_d_busy == 0) { 1869 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 1870 sc->sc_d_busy = 1; 1871 } 1872 1873 #ifdef HIFN_DEBUG 1874 if (hifn_debug) 1875 printf("%s: command: stat %8x ier %8x\n", 1876 device_xname(sc->sc_dv), 1877 READ_REG_1(sc, HIFN_1_DMA_CSR), 1878 READ_REG_1(sc, HIFN_1_DMA_IER)); 1879 #endif 1880 1881 sc->sc_active = 5; 1882 return (err); /* success */ 1883 1884 err_dstmap: 1885 if (cmd->src_map != cmd->dst_map) 1886 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 1887 err_dstmap1: 1888 err_srcmap: 1889 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 1890 err_srcmap1: 1891 return (err); 1892 } 1893 1894 static void 1895 hifn_tick(void *vsc) 1896 { 1897 struct hifn_softc *sc = vsc; 1898 1899 mutex_spin_enter(&sc->sc_mtx); 1900 if (sc->sc_active == 0) { 1901 struct hifn_dma *dma = sc->sc_dma; 1902 uint32_t r = 0; 1903 1904 if (dma->cmdu == 0 && sc->sc_c_busy) { 1905 sc->sc_c_busy = 0; 1906 r |= HIFN_DMACSR_C_CTRL_DIS; 1907 CLR_LED(sc, HIFN_MIPSRST_LED0); 1908 } 1909 if (dma->srcu == 0 && sc->sc_s_busy) { 1910 sc->sc_s_busy = 0; 1911 r |= HIFN_DMACSR_S_CTRL_DIS; 1912 CLR_LED(sc, HIFN_MIPSRST_LED1); 1913 } 1914 if (dma->dstu == 0 && sc->sc_d_busy) { 1915 sc->sc_d_busy = 0; 1916 r |= HIFN_DMACSR_D_CTRL_DIS; 1917 } 1918 if (dma->resu == 0 && sc->sc_r_busy) { 1919 sc->sc_r_busy = 0; 1920 r |= HIFN_DMACSR_R_CTRL_DIS; 1921 CLR_LED(sc, HIFN_MIPSRST_LED2); 1922 } 1923 if (r) 1924 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 1925 } else 1926 sc->sc_active--; 1927 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 1928 mutex_spin_exit(&sc->sc_mtx); 1929 } 1930 1931 static int 1932 hifn_intr(void *arg) 1933 { 1934 struct hifn_softc *sc = arg; 1935 struct hifn_dma *dma = sc->sc_dma; 1936 uint32_t dmacsr, restart; 1937 int i, u; 1938 1939 mutex_spin_enter(&sc->sc_mtx); 1940 1941 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 1942 1943 #ifdef HIFN_DEBUG 1944 if (hifn_debug) 1945 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n", 1946 device_xname(sc->sc_dv), 1947 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), 1948 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 1949 #endif 1950 1951 /* Nothing in the DMA unit interrupted */ 1952 if ((dmacsr & sc->sc_dmaier) == 0) { 1953 mutex_spin_exit(&sc->sc_mtx); 1954 return (0); 1955 } 1956 1957 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 1958 1959 if (dmacsr & HIFN_DMACSR_ENGINE) 1960 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR)); 1961 1962 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 1963 (dmacsr & HIFN_DMACSR_PUBDONE)) 1964 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 1965 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 1966 1967 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER); 1968 if (restart) 1969 printf("%s: overrun %x\n", device_xname(sc->sc_dv), dmacsr); 1970 1971 if (sc->sc_flags & HIFN_IS_7811) { 1972 if (dmacsr & HIFN_DMACSR_ILLR) 1973 printf("%s: illegal read\n", device_xname(sc->sc_dv)); 1974 if (dmacsr & HIFN_DMACSR_ILLW) 1975 printf("%s: illegal write\n", device_xname(sc->sc_dv)); 1976 } 1977 1978 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 1979 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 1980 if (restart) { 1981 printf("%s: abort, resetting.\n", device_xname(sc->sc_dv)); 1982 hifnstats.hst_abort++; 1983 hifn_abort(sc); 1984 goto out; 1985 } 1986 1987 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) { 1988 /* 1989 * If no slots to process and we receive a "waiting on 1990 * command" interrupt, we disable the "waiting on command" 1991 * (by clearing it). 1992 */ 1993 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1994 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1995 } 1996 1997 /* clear the rings */ 1998 i = dma->resk; 1999 while (dma->resu != 0) { 2000 HIFN_RESR_SYNC(sc, i, 2001 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2002 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 2003 HIFN_RESR_SYNC(sc, i, 2004 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2005 break; 2006 } 2007 2008 if (i != HIFN_D_RES_RSIZE) { 2009 struct hifn_command *cmd; 2010 2011 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 2012 cmd = dma->hifn_commands[i]; 2013 KASSERT(cmd != NULL 2014 /*("hifn_intr: null command slot %u", i)*/); 2015 dma->hifn_commands[i] = NULL; 2016 2017 hifn_callback(sc, cmd, dma->result_bufs[i]); 2018 hifnstats.hst_opackets++; 2019 } 2020 2021 if (++i == (HIFN_D_RES_RSIZE + 1)) 2022 i = 0; 2023 else 2024 dma->resu--; 2025 } 2026 dma->resk = i; 2027 2028 i = dma->srck; u = dma->srcu; 2029 while (u != 0) { 2030 HIFN_SRCR_SYNC(sc, i, 2031 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2032 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 2033 HIFN_SRCR_SYNC(sc, i, 2034 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2035 break; 2036 } 2037 if (++i == (HIFN_D_SRC_RSIZE + 1)) 2038 i = 0; 2039 else 2040 u--; 2041 } 2042 dma->srck = i; dma->srcu = u; 2043 2044 i = dma->cmdk; u = dma->cmdu; 2045 while (u != 0) { 2046 HIFN_CMDR_SYNC(sc, i, 2047 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2048 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 2049 HIFN_CMDR_SYNC(sc, i, 2050 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2051 break; 2052 } 2053 if (i != HIFN_D_CMD_RSIZE) { 2054 u--; 2055 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 2056 } 2057 if (++i == (HIFN_D_CMD_RSIZE + 1)) 2058 i = 0; 2059 } 2060 dma->cmdk = i; dma->cmdu = u; 2061 2062 out: 2063 mutex_spin_exit(&sc->sc_mtx); 2064 return (1); 2065 } 2066 2067 /* 2068 * Allocate a new 'session' and return an encoded session id. 'sidp' 2069 * contains our registration id, and should contain an encoded session 2070 * id on successful allocation. 2071 */ 2072 static int 2073 hifn_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri) 2074 { 2075 struct cryptoini *c; 2076 struct hifn_softc *sc = arg; 2077 int i, mac = 0, cry = 0, comp = 0, retval = EINVAL; 2078 2079 mutex_spin_enter(&sc->sc_mtx); 2080 for (i = 0; i < sc->sc_maxses; i++) 2081 if (isclr(sc->sc_sessions, i)) 2082 break; 2083 if (i == sc->sc_maxses) { 2084 retval = ENOMEM; 2085 goto out; 2086 } 2087 2088 for (c = cri; c != NULL; c = c->cri_next) { 2089 switch (c->cri_alg) { 2090 case CRYPTO_MD5: 2091 case CRYPTO_SHA1: 2092 case CRYPTO_MD5_HMAC_96: 2093 case CRYPTO_SHA1_HMAC_96: 2094 if (mac) { 2095 goto out; 2096 } 2097 mac = 1; 2098 break; 2099 case CRYPTO_DES_CBC: 2100 case CRYPTO_3DES_CBC: 2101 case CRYPTO_AES_CBC: 2102 case CRYPTO_ARC4: 2103 if (cry) { 2104 goto out; 2105 } 2106 cry = 1; 2107 break; 2108 #ifdef CRYPTO_LZS_COMP 2109 case CRYPTO_LZS_COMP: 2110 if (comp) { 2111 goto out; 2112 } 2113 comp = 1; 2114 break; 2115 #endif 2116 default: 2117 goto out; 2118 } 2119 } 2120 if (mac == 0 && cry == 0 && comp == 0) { 2121 goto out; 2122 } 2123 2124 /* 2125 * XXX only want to support compression without chaining to 2126 * MAC/crypt engine right now 2127 */ 2128 if ((comp && mac) || (comp && cry)) { 2129 goto out; 2130 } 2131 2132 *sidp = HIFN_SID(device_unit(sc->sc_dv), i); 2133 setbit(sc->sc_sessions, i); 2134 2135 retval = 0; 2136 out: 2137 mutex_spin_exit(&sc->sc_mtx); 2138 return retval; 2139 } 2140 2141 /* 2142 * Deallocate a session. 2143 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2144 * XXX to blow away any keys already stored there. 2145 */ 2146 static void 2147 hifn_freesession(void *arg, uint64_t tid) 2148 { 2149 struct hifn_softc *sc = arg; 2150 int session; 2151 uint32_t sid = ((uint32_t) tid) & 0xffffffff; 2152 2153 mutex_spin_enter(&sc->sc_mtx); 2154 session = HIFN_SESSION(sid); 2155 KASSERTMSG(session >= 0, "session=%d", session); 2156 KASSERTMSG(session < sc->sc_maxses, "session=%d maxses=%d", 2157 session, sc->sc_maxses); 2158 KASSERT(isset(sc->sc_sessions, session)); 2159 clrbit(sc->sc_sessions, session); 2160 mutex_spin_exit(&sc->sc_mtx); 2161 } 2162 2163 static int 2164 hifn_process(void *arg, struct cryptop *crp, int hint) 2165 { 2166 struct hifn_softc *sc = arg; 2167 struct hifn_command *cmd = NULL; 2168 int session, err = 0, ivlen; 2169 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2170 2171 if ((cmd = pool_cache_get(sc->sc_cmd_cache, PR_NOWAIT)) == NULL) { 2172 hifnstats.hst_nomem++; 2173 err = ENOMEM; 2174 goto errout; 2175 } 2176 2177 mutex_spin_enter(&sc->sc_mtx); 2178 session = HIFN_SESSION(crp->crp_sid); 2179 KASSERTMSG(session < sc->sc_maxses, "session=%d maxses=%d", 2180 session, sc->sc_maxses); 2181 2182 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2183 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf; 2184 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf; 2185 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2186 cmd->srcu.src_io = (struct uio *)crp->crp_buf; 2187 cmd->dstu.dst_io = (struct uio *)crp->crp_buf; 2188 } else { 2189 err = EINVAL; 2190 goto errout; /* XXX we don't handle contiguous buffers! */ 2191 } 2192 2193 crd1 = crp->crp_desc; 2194 if (crd1 == NULL) { 2195 err = EINVAL; 2196 goto errout; 2197 } 2198 crd2 = crd1->crd_next; 2199 2200 if (crd2 == NULL) { 2201 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 2202 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 || 2203 crd1->crd_alg == CRYPTO_SHA1 || 2204 crd1->crd_alg == CRYPTO_MD5) { 2205 maccrd = crd1; 2206 enccrd = NULL; 2207 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2208 crd1->crd_alg == CRYPTO_3DES_CBC || 2209 crd1->crd_alg == CRYPTO_AES_CBC || 2210 crd1->crd_alg == CRYPTO_ARC4) { 2211 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2212 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2213 maccrd = NULL; 2214 enccrd = crd1; 2215 #ifdef CRYPTO_LZS_COMP 2216 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) { 2217 hifn_compression(sc, crp, cmd); 2218 mutex_spin_exit(&sc->sc_mtx); 2219 return 0; 2220 #endif 2221 } else { 2222 err = EINVAL; 2223 goto errout; 2224 } 2225 } else { 2226 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 2227 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 || 2228 crd1->crd_alg == CRYPTO_MD5 || 2229 crd1->crd_alg == CRYPTO_SHA1) && 2230 (crd2->crd_alg == CRYPTO_DES_CBC || 2231 crd2->crd_alg == CRYPTO_3DES_CBC || 2232 crd2->crd_alg == CRYPTO_AES_CBC || 2233 crd2->crd_alg == CRYPTO_ARC4) && 2234 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2235 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2236 maccrd = crd1; 2237 enccrd = crd2; 2238 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2239 crd1->crd_alg == CRYPTO_ARC4 || 2240 crd1->crd_alg == CRYPTO_3DES_CBC || 2241 crd1->crd_alg == CRYPTO_AES_CBC) && 2242 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 || 2243 crd2->crd_alg == CRYPTO_SHA1_HMAC_96 || 2244 crd2->crd_alg == CRYPTO_MD5 || 2245 crd2->crd_alg == CRYPTO_SHA1) && 2246 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2247 enccrd = crd1; 2248 maccrd = crd2; 2249 } else { 2250 /* 2251 * We cannot order the 7751 as requested 2252 */ 2253 err = EINVAL; 2254 goto errout; 2255 } 2256 } 2257 2258 if (enccrd) { 2259 cmd->enccrd = enccrd; 2260 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2261 switch (enccrd->crd_alg) { 2262 case CRYPTO_ARC4: 2263 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2264 break; 2265 case CRYPTO_DES_CBC: 2266 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2267 HIFN_CRYPT_CMD_MODE_CBC | 2268 HIFN_CRYPT_CMD_NEW_IV; 2269 break; 2270 case CRYPTO_3DES_CBC: 2271 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2272 HIFN_CRYPT_CMD_MODE_CBC | 2273 HIFN_CRYPT_CMD_NEW_IV; 2274 break; 2275 case CRYPTO_AES_CBC: 2276 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | 2277 HIFN_CRYPT_CMD_MODE_CBC | 2278 HIFN_CRYPT_CMD_NEW_IV; 2279 break; 2280 default: 2281 err = EINVAL; 2282 goto errout; 2283 } 2284 if (enccrd->crd_alg != CRYPTO_ARC4) { 2285 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? 2286 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2287 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2288 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2289 memcpy(cmd->iv, enccrd->crd_iv, ivlen); 2290 else 2291 cprng_fast(cmd->iv, ivlen); 2292 2293 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2294 == 0) { 2295 if (crp->crp_flags & CRYPTO_F_IMBUF) 2296 m_copyback(cmd->srcu.src_m, 2297 enccrd->crd_inject, 2298 ivlen, cmd->iv); 2299 else if (crp->crp_flags & CRYPTO_F_IOV) 2300 cuio_copyback(cmd->srcu.src_io, 2301 enccrd->crd_inject, 2302 ivlen, cmd->iv); 2303 } 2304 } else { 2305 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2306 memcpy(cmd->iv, enccrd->crd_iv, ivlen); 2307 else if (crp->crp_flags & CRYPTO_F_IMBUF) 2308 m_copydata(cmd->srcu.src_m, 2309 enccrd->crd_inject, ivlen, cmd->iv); 2310 else if (crp->crp_flags & CRYPTO_F_IOV) 2311 cuio_copydata(cmd->srcu.src_io, 2312 enccrd->crd_inject, 2313 ivlen, cmd->iv); 2314 } 2315 } 2316 2317 cmd->ck = enccrd->crd_key; 2318 cmd->cklen = enccrd->crd_klen >> 3; 2319 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2320 2321 /* 2322 * Need to specify the size for the AES key in the masks. 2323 */ 2324 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == 2325 HIFN_CRYPT_CMD_ALG_AES) { 2326 switch (cmd->cklen) { 2327 case 16: 2328 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; 2329 break; 2330 case 24: 2331 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; 2332 break; 2333 case 32: 2334 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; 2335 break; 2336 default: 2337 err = EINVAL; 2338 goto errout; 2339 } 2340 } 2341 } 2342 2343 if (maccrd) { 2344 cmd->maccrd = maccrd; 2345 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2346 2347 switch (maccrd->crd_alg) { 2348 case CRYPTO_MD5: 2349 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2350 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2351 HIFN_MAC_CMD_POS_IPSEC; 2352 break; 2353 case CRYPTO_MD5_HMAC_96: 2354 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2355 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2356 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2357 break; 2358 case CRYPTO_SHA1: 2359 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2360 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2361 HIFN_MAC_CMD_POS_IPSEC; 2362 break; 2363 case CRYPTO_SHA1_HMAC_96: 2364 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2365 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2366 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2367 break; 2368 } 2369 2370 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 || 2371 maccrd->crd_alg == CRYPTO_MD5_HMAC_96) { 2372 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2373 memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3); 2374 memset(cmd->mac + (maccrd->crd_klen >> 3), 0, 2375 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2376 } 2377 } 2378 2379 cmd->crp = crp; 2380 cmd->session_num = session; 2381 cmd->softc = sc; 2382 2383 err = hifn_crypto(sc, cmd, crp, hint); 2384 if (err == 0) { 2385 mutex_exit(&sc->sc_mtx); 2386 return 0; 2387 } else if (err == ERESTART) { 2388 /* 2389 * There weren't enough resources to dispatch the request 2390 * to the part. Notify the caller so they'll requeue this 2391 * request and resubmit it again soon. 2392 */ 2393 #ifdef HIFN_DEBUG 2394 if (hifn_debug) 2395 printf("%s: requeue request\n", device_xname(sc->sc_dv)); 2396 #endif 2397 sc->sc_needwakeup |= CRYPTO_SYMQ; 2398 mutex_spin_exit(&sc->sc_mtx); 2399 pool_cache_put(sc->sc_cmd_cache, cmd); 2400 return ERESTART; 2401 } 2402 2403 errout: 2404 if (err == EINVAL) 2405 hifnstats.hst_invalid++; 2406 else 2407 hifnstats.hst_nomem++; 2408 crp->crp_etype = err; 2409 mutex_spin_exit(&sc->sc_mtx); 2410 if (cmd != NULL) { 2411 if (crp->crp_flags & CRYPTO_F_IMBUF && 2412 cmd->srcu.src_m != cmd->dstu.dst_m) 2413 m_freem(cmd->dstu.dst_m); 2414 cmd->dst_map = NULL; 2415 pool_cache_put(sc->sc_cmd_cache, cmd); 2416 } 2417 crypto_done(crp); 2418 return 0; 2419 } 2420 2421 static void 2422 hifn_abort(struct hifn_softc *sc) 2423 { 2424 struct hifn_dma *dma = sc->sc_dma; 2425 struct hifn_command *cmd; 2426 struct cryptop *crp; 2427 int i, u; 2428 2429 KASSERT(mutex_owned(&sc->sc_mtx)); 2430 2431 i = dma->resk; u = dma->resu; 2432 while (u != 0) { 2433 cmd = dma->hifn_commands[i]; 2434 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/); 2435 dma->hifn_commands[i] = NULL; 2436 crp = cmd->crp; 2437 2438 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2439 /* Salvage what we can. */ 2440 hifnstats.hst_opackets++; 2441 hifn_callback(sc, cmd, dma->result_bufs[i]); 2442 } else { 2443 if (cmd->src_map == cmd->dst_map) { 2444 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2445 0, cmd->src_map->dm_mapsize, 2446 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2447 } else { 2448 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2449 0, cmd->src_map->dm_mapsize, 2450 BUS_DMASYNC_POSTWRITE); 2451 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2452 0, cmd->dst_map->dm_mapsize, 2453 BUS_DMASYNC_POSTREAD); 2454 } 2455 2456 if (cmd->srcu.src_m != cmd->dstu.dst_m) { 2457 m_freem(cmd->srcu.src_m); 2458 crp->crp_buf = (void *)cmd->dstu.dst_m; 2459 } 2460 2461 /* non-shared buffers cannot be restarted */ 2462 if (cmd->src_map != cmd->dst_map) { 2463 /* 2464 * XXX should be EAGAIN, delayed until 2465 * after the reset. 2466 */ 2467 crp->crp_etype = ENOMEM; 2468 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2469 } else 2470 crp->crp_etype = ENOMEM; 2471 2472 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2473 2474 cmd->dst_map = NULL; 2475 pool_cache_put(sc->sc_cmd_cache, cmd); 2476 2477 if (crp->crp_etype != EAGAIN) 2478 crypto_done(crp); 2479 } 2480 2481 if (++i == HIFN_D_RES_RSIZE) 2482 i = 0; 2483 u--; 2484 } 2485 dma->resk = i; dma->resu = u; 2486 2487 hifn_reset_board(sc, 1); 2488 hifn_init_dma(sc); 2489 hifn_init_pci_registers(sc); 2490 } 2491 2492 static void 2493 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, uint8_t *resbuf) 2494 { 2495 struct hifn_dma *dma = sc->sc_dma; 2496 struct cryptop *crp = cmd->crp; 2497 struct cryptodesc *crd; 2498 struct mbuf *m; 2499 int totlen, i, u; 2500 2501 KASSERT(mutex_owned(&sc->sc_mtx)); 2502 2503 if (cmd->src_map == cmd->dst_map) 2504 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2505 0, cmd->src_map->dm_mapsize, 2506 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2507 else { 2508 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2509 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2510 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2511 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2512 } 2513 2514 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2515 if (cmd->srcu.src_m != cmd->dstu.dst_m) { 2516 crp->crp_buf = (void *)cmd->dstu.dst_m; 2517 totlen = cmd->src_map->dm_mapsize; 2518 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) { 2519 if (totlen < m->m_len) { 2520 m->m_len = totlen; 2521 totlen = 0; 2522 } else 2523 totlen -= m->m_len; 2524 } 2525 cmd->dstu.dst_m->m_pkthdr.len = 2526 cmd->srcu.src_m->m_pkthdr.len; 2527 m_freem(cmd->srcu.src_m); 2528 } 2529 } 2530 2531 if (cmd->sloplen != 0) { 2532 if (crp->crp_flags & CRYPTO_F_IMBUF) 2533 m_copyback((struct mbuf *)crp->crp_buf, 2534 cmd->src_map->dm_mapsize - cmd->sloplen, 2535 cmd->sloplen, &dma->slop[cmd->slopidx]); 2536 else if (crp->crp_flags & CRYPTO_F_IOV) 2537 cuio_copyback((struct uio *)crp->crp_buf, 2538 cmd->src_map->dm_mapsize - cmd->sloplen, 2539 cmd->sloplen, &dma->slop[cmd->slopidx]); 2540 } 2541 2542 i = dma->dstk; u = dma->dstu; 2543 while (u != 0) { 2544 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2545 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), 2546 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2547 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2548 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2549 offsetof(struct hifn_dma, dstr[i]), 2550 sizeof(struct hifn_desc), 2551 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2552 break; 2553 } 2554 if (++i == (HIFN_D_DST_RSIZE + 1)) 2555 i = 0; 2556 else 2557 u--; 2558 } 2559 dma->dstk = i; dma->dstu = u; 2560 2561 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize; 2562 2563 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2564 uint8_t *macbuf; 2565 2566 macbuf = resbuf + sizeof(struct hifn_base_result); 2567 if (cmd->base_masks & HIFN_BASE_CMD_COMP) 2568 macbuf += sizeof(struct hifn_comp_result); 2569 macbuf += sizeof(struct hifn_mac_result); 2570 2571 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2572 int len; 2573 2574 if (crd->crd_alg == CRYPTO_MD5) 2575 len = 16; 2576 else if (crd->crd_alg == CRYPTO_SHA1) 2577 len = 20; 2578 else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 || 2579 crd->crd_alg == CRYPTO_SHA1_HMAC_96) 2580 len = 12; 2581 else 2582 continue; 2583 2584 if (crp->crp_flags & CRYPTO_F_IMBUF) 2585 m_copyback((struct mbuf *)crp->crp_buf, 2586 crd->crd_inject, len, macbuf); 2587 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) 2588 memcpy(crp->crp_mac, (void *)macbuf, len); 2589 break; 2590 } 2591 } 2592 2593 if (cmd->src_map != cmd->dst_map) 2594 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2595 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2596 cmd->dst_map = NULL; 2597 pool_cache_put(sc->sc_cmd_cache, cmd); 2598 crypto_done(crp); 2599 } 2600 2601 #ifdef CRYPTO_LZS_COMP 2602 2603 static void 2604 hifn_compression(struct hifn_softc *sc, struct cryptop *crp, 2605 struct hifn_command *cmd) 2606 { 2607 struct cryptodesc *crd = crp->crp_desc; 2608 int s, err = 0; 2609 2610 cmd->compcrd = crd; 2611 cmd->base_masks |= HIFN_BASE_CMD_COMP; 2612 2613 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) { 2614 /* 2615 * XXX can only handle mbufs right now since we can 2616 * XXX dynamically resize them. 2617 */ 2618 err = EINVAL; 2619 goto fail; 2620 } 2621 2622 if ((crd->crd_flags & CRD_F_COMP) == 0) 2623 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2624 if (crd->crd_alg == CRYPTO_LZS_COMP) 2625 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS | 2626 HIFN_COMP_CMD_CLEARHIST; 2627 2628 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2629 int len; 2630 2631 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 2632 cmd->srcu.src_m, BUS_DMA_NOWAIT)) { 2633 err = ENOMEM; 2634 goto fail; 2635 } 2636 2637 len = cmd->src_map->dm_mapsize / MCLBYTES; 2638 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0) 2639 len++; 2640 len *= MCLBYTES; 2641 2642 if ((crd->crd_flags & CRD_F_COMP) == 0) 2643 len *= 4; 2644 2645 if (len > HIFN_MAX_DMALEN) 2646 len = HIFN_MAX_DMALEN; 2647 2648 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m); 2649 if (cmd->dstu.dst_m == NULL) { 2650 err = ENOMEM; 2651 goto fail; 2652 } 2653 2654 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 2655 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 2656 err = ENOMEM; 2657 goto fail; 2658 } 2659 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2660 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 2661 cmd->srcu.src_io, BUS_DMA_NOWAIT)) { 2662 err = ENOMEM; 2663 goto fail; 2664 } 2665 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 2666 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { 2667 err = ENOMEM; 2668 goto fail; 2669 } 2670 } 2671 2672 if (cmd->src_map == cmd->dst_map) 2673 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2674 0, cmd->src_map->dm_mapsize, 2675 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2676 else { 2677 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2678 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2679 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2680 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2681 } 2682 2683 cmd->crp = crp; 2684 /* 2685 * Always use session 0. The modes of compression we use are 2686 * stateless and there is always at least one compression 2687 * context, zero. 2688 */ 2689 cmd->session_num = 0; 2690 cmd->softc = sc; 2691 2692 err = hifn_compress_enter(sc, cmd); 2693 if (err) 2694 goto fail; 2695 2696 return; 2697 2698 fail: 2699 if (cmd->dst_map != NULL) { 2700 if (cmd->dst_map->dm_nsegs > 0) 2701 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2702 } 2703 if (cmd->src_map != NULL) { 2704 if (cmd->src_map->dm_nsegs > 0) 2705 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2706 } 2707 cmd->dst_map = NULL; 2708 pool_cache_put(sc->sc_cmd_cache, cmd); 2709 if (err == EINVAL) 2710 hifnstats.hst_invalid++; 2711 else 2712 hifnstats.hst_nomem++; 2713 crp->crp_etype = err; 2714 crypto_done(crp); 2715 } 2716 2717 static int 2718 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd) 2719 { 2720 struct hifn_dma *dma = sc->sc_dma; 2721 int cmdi, resi; 2722 uint32_t cmdlen; 2723 2724 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 2725 (dma->resu + 1) > HIFN_D_CMD_RSIZE) 2726 return (ENOMEM); 2727 2728 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || 2729 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE) 2730 return (ENOMEM); 2731 2732 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 2733 dma->cmdi = 0; 2734 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 2735 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2736 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 2737 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2738 } 2739 cmdi = dma->cmdi++; 2740 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 2741 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 2742 2743 /* .p for command/result already set */ 2744 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 2745 HIFN_D_MASKDONEIRQ); 2746 HIFN_CMDR_SYNC(sc, cmdi, 2747 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2748 dma->cmdu++; 2749 if (sc->sc_c_busy == 0) { 2750 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 2751 sc->sc_c_busy = 1; 2752 SET_LED(sc, HIFN_MIPSRST_LED0); 2753 } 2754 2755 /* 2756 * Always enable the command wait interrupt. We are obviously 2757 * missing an interrupt or two somewhere. Enabling the command wait 2758 * interrupt will guarantee we get called periodically until all 2759 * of the queues are drained and thus work around this. 2760 */ 2761 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 2762 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2763 2764 hifnstats.hst_ipackets++; 2765 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; 2766 2767 hifn_dmamap_load_src(sc, cmd); 2768 if (sc->sc_s_busy == 0) { 2769 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 2770 sc->sc_s_busy = 1; 2771 SET_LED(sc, HIFN_MIPSRST_LED1); 2772 } 2773 2774 /* 2775 * Unlike other descriptors, we don't mask done interrupt from 2776 * result descriptor. 2777 */ 2778 if (dma->resi == HIFN_D_RES_RSIZE) { 2779 dma->resi = 0; 2780 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 2781 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2782 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 2783 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2784 } 2785 resi = dma->resi++; 2786 dma->hifn_commands[resi] = cmd; 2787 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 2788 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2789 HIFN_D_VALID | HIFN_D_LAST); 2790 HIFN_RESR_SYNC(sc, resi, 2791 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2792 dma->resu++; 2793 if (sc->sc_r_busy == 0) { 2794 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 2795 sc->sc_r_busy = 1; 2796 SET_LED(sc, HIFN_MIPSRST_LED2); 2797 } 2798 2799 if (cmd->sloplen) 2800 cmd->slopidx = resi; 2801 2802 hifn_dmamap_load_dst(sc, cmd); 2803 2804 if (sc->sc_d_busy == 0) { 2805 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 2806 sc->sc_d_busy = 1; 2807 } 2808 sc->sc_active = 5; 2809 cmd->cmd_callback = hifn_callback_comp; 2810 return (0); 2811 } 2812 2813 static void 2814 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd, 2815 uint8_t *resbuf) 2816 { 2817 struct hifn_base_result baseres; 2818 struct cryptop *crp = cmd->crp; 2819 struct hifn_dma *dma = sc->sc_dma; 2820 struct mbuf *m; 2821 int err = 0, i, u; 2822 uint32_t olen; 2823 bus_size_t dstsize; 2824 2825 KASSERT(mutex_owned(&sc->sc_mtx)); 2826 2827 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2828 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2829 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2830 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2831 2832 dstsize = cmd->dst_map->dm_mapsize; 2833 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2834 2835 memcpy(&baseres, resbuf, sizeof(struct hifn_base_result)); 2836 2837 i = dma->dstk; u = dma->dstu; 2838 while (u != 0) { 2839 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2840 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), 2841 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2842 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2843 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2844 offsetof(struct hifn_dma, dstr[i]), 2845 sizeof(struct hifn_desc), 2846 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2847 break; 2848 } 2849 if (++i == (HIFN_D_DST_RSIZE + 1)) 2850 i = 0; 2851 else 2852 u--; 2853 } 2854 dma->dstk = i; dma->dstu = u; 2855 2856 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) { 2857 bus_size_t xlen; 2858 2859 xlen = dstsize; 2860 2861 m_freem(cmd->dstu.dst_m); 2862 2863 if (xlen == HIFN_MAX_DMALEN) { 2864 /* We've done all we can. */ 2865 err = E2BIG; 2866 goto out; 2867 } 2868 2869 xlen += MCLBYTES; 2870 2871 if (xlen > HIFN_MAX_DMALEN) 2872 xlen = HIFN_MAX_DMALEN; 2873 2874 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen, 2875 cmd->srcu.src_m); 2876 if (cmd->dstu.dst_m == NULL) { 2877 err = ENOMEM; 2878 goto out; 2879 } 2880 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 2881 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 2882 err = ENOMEM; 2883 goto out; 2884 } 2885 2886 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2887 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2888 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2889 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2890 2891 err = hifn_compress_enter(sc, cmd); 2892 if (err != 0) 2893 goto out; 2894 return; 2895 } 2896 2897 olen = dstsize - (letoh16(baseres.dst_cnt) | 2898 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >> 2899 HIFN_BASE_RES_DSTLEN_S) << 16)); 2900 2901 crp->crp_olen = olen - cmd->compcrd->crd_skip; 2902 2903 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2904 2905 m = cmd->dstu.dst_m; 2906 if (m->m_flags & M_PKTHDR) 2907 m->m_pkthdr.len = olen; 2908 crp->crp_buf = (void *)m; 2909 for (; m != NULL; m = m->m_next) { 2910 if (olen >= m->m_len) 2911 olen -= m->m_len; 2912 else { 2913 m->m_len = olen; 2914 olen = 0; 2915 } 2916 } 2917 2918 m_freem(cmd->srcu.src_m); 2919 cmd->dst_map = NULL; 2920 pool_cache_put(sc->sc_cmd_cache, cmd); 2921 crp->crp_etype = 0; 2922 crypto_done(crp); 2923 return; 2924 2925 out: 2926 if (cmd->dst_map != NULL) { 2927 if (cmd->src_map->dm_nsegs != 0) 2928 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2929 } 2930 if (cmd->src_map != NULL) { 2931 if (cmd->src_map->dm_nsegs != 0) 2932 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2933 } 2934 m_freem(cmd->dstu.dst_m); 2935 cmd->dst_map = NULL; 2936 pool_cache_put(sc->sc_cmd_cache, cmd); 2937 crp->crp_etype = err; 2938 crypto_done(crp); 2939 } 2940 2941 static struct mbuf * 2942 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate) 2943 { 2944 int len; 2945 struct mbuf *m, *m0, *mlast; 2946 2947 if (mtemplate->m_flags & M_PKTHDR) { 2948 len = MHLEN; 2949 MGETHDR(m0, M_DONTWAIT, MT_DATA); 2950 } else { 2951 len = MLEN; 2952 MGET(m0, M_DONTWAIT, MT_DATA); 2953 } 2954 if (m0 == NULL) 2955 return (NULL); 2956 if (len == MHLEN) 2957 m_copy_pkthdr(m0, mtemplate); 2958 MCLGET(m0, M_DONTWAIT); 2959 if (!(m0->m_flags & M_EXT)) { 2960 m_freem(m0); 2961 return (NULL); 2962 } 2963 len = MCLBYTES; 2964 2965 totlen -= len; 2966 m0->m_pkthdr.len = m0->m_len = len; 2967 mlast = m0; 2968 2969 while (totlen > 0) { 2970 MGET(m, M_DONTWAIT, MT_DATA); 2971 if (m == NULL) { 2972 m_freem(m0); 2973 return (NULL); 2974 } 2975 MCLGET(m, M_DONTWAIT); 2976 if (!(m->m_flags & M_EXT)) { 2977 m_free(m); 2978 m_freem(m0); 2979 return (NULL); 2980 } 2981 len = MCLBYTES; 2982 m->m_len = len; 2983 if (m0->m_flags & M_PKTHDR) 2984 m0->m_pkthdr.len += len; 2985 totlen -= len; 2986 2987 mlast->m_next = m; 2988 mlast = m; 2989 } 2990 2991 return (m0); 2992 } 2993 #endif /* CRYPTO_LZS_COMP */ 2994 2995 static void 2996 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, uint32_t val) 2997 { 2998 /* 2999 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 3000 * and Group 1 registers; avoid conditions that could create 3001 * burst writes by doing a read in between the writes. 3002 */ 3003 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 3004 if (sc->sc_waw_lastgroup == reggrp && 3005 sc->sc_waw_lastreg == reg - 4) { 3006 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 3007 } 3008 sc->sc_waw_lastgroup = reggrp; 3009 sc->sc_waw_lastreg = reg; 3010 } 3011 if (reggrp == 0) 3012 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 3013 else 3014 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 3015 3016 } 3017 3018 static uint32_t 3019 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg) 3020 { 3021 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 3022 sc->sc_waw_lastgroup = -1; 3023 sc->sc_waw_lastreg = 1; 3024 } 3025 if (reggrp == 0) 3026 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg)); 3027 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg)); 3028 } 3029