1 1.65 andvar /* $NetBSD: ubsec.c,v 1.65 2024/02/23 22:03:45 andvar Exp $ */ 2 1.1 jonathan /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.6 2003/01/23 21:06:43 sam Exp $ */ 3 1.40 bad /* $OpenBSD: ubsec.c,v 1.143 2009/03/27 13:31:30 reyk Exp$ */ 4 1.1 jonathan 5 1.1 jonathan /* 6 1.1 jonathan * Copyright (c) 2000 Jason L. Wright (jason (at) thought.net) 7 1.1 jonathan * Copyright (c) 2000 Theo de Raadt (deraadt (at) openbsd.org) 8 1.1 jonathan * Copyright (c) 2001 Patrik Lindergren (patrik (at) ipunplugged.com) 9 1.5 perry * 10 1.1 jonathan * Redistribution and use in source and binary forms, with or without 11 1.1 jonathan * modification, are permitted provided that the following conditions 12 1.1 jonathan * are met: 13 1.1 jonathan * 1. Redistributions of source code must retain the above copyright 14 1.1 jonathan * notice, this list of conditions and the following disclaimer. 15 1.1 jonathan * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 jonathan * notice, this list of conditions and the following disclaimer in the 17 1.1 jonathan * documentation and/or other materials provided with the distribution. 18 1.1 jonathan * 19 1.1 jonathan * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 1.1 jonathan * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 1.1 jonathan * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 1.1 jonathan * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 23 1.1 jonathan * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 1.1 jonathan * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 1.1 jonathan * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 1.1 jonathan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 1.1 jonathan * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 1.1 jonathan * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 jonathan * POSSIBILITY OF SUCH DAMAGE. 30 1.1 jonathan * 31 1.1 jonathan * Effort sponsored in part by the Defense Advanced Research Projects 32 1.1 jonathan * Agency (DARPA) and Air Force Research Laboratory, Air Force 33 1.1 jonathan * Materiel Command, USAF, under agreement number F30602-01-2-0537. 34 1.1 jonathan * 35 1.1 jonathan */ 36 1.1 jonathan 37 1.14 lukem #include <sys/cdefs.h> 38 1.65 andvar __KERNEL_RCSID(0, "$NetBSD: ubsec.c,v 1.65 2024/02/23 22:03:45 andvar Exp $"); 39 1.14 lukem 40 1.1 jonathan #undef UBSEC_DEBUG 41 1.1 jonathan 42 1.1 jonathan /* 43 1.40 bad * uBsec 5[56]01, 58xx hardware crypto accelerator 44 1.1 jonathan */ 45 1.1 jonathan 46 1.1 jonathan #include <sys/param.h> 47 1.1 jonathan #include <sys/systm.h> 48 1.1 jonathan #include <sys/proc.h> 49 1.1 jonathan #include <sys/endian.h> 50 1.1 jonathan #include <sys/errno.h> 51 1.1 jonathan #include <sys/malloc.h> 52 1.1 jonathan #include <sys/kernel.h> 53 1.1 jonathan #include <sys/mbuf.h> 54 1.1 jonathan #include <sys/device.h> 55 1.32 bad #include <sys/module.h> 56 1.1 jonathan #include <sys/queue.h> 57 1.33 bad #include <sys/sysctl.h> 58 1.1 jonathan 59 1.1 jonathan #include <opencrypto/cryptodev.h> 60 1.8 thorpej #include <opencrypto/xform.h> 61 1.51 thorpej #include <sys/cprng.h> 62 1.51 thorpej #include <sys/md5.h> 63 1.51 thorpej #include <sys/rndsource.h> 64 1.1 jonathan #include <sys/sha1.h> 65 1.1 jonathan 66 1.1 jonathan #include <dev/pci/pcireg.h> 67 1.1 jonathan #include <dev/pci/pcivar.h> 68 1.1 jonathan #include <dev/pci/pcidevs.h> 69 1.1 jonathan 70 1.1 jonathan #include <dev/pci/ubsecreg.h> 71 1.1 jonathan #include <dev/pci/ubsecvar.h> 72 1.1 jonathan 73 1.51 thorpej #define UBSEC_NO_RNG /* hangs on attach */ 74 1.51 thorpej #define letoh16 htole16 75 1.51 thorpej #define letoh32 htole32 76 1.51 thorpej 77 1.1 jonathan /* 78 1.1 jonathan * Prototypes and count for the pci_device structure 79 1.1 jonathan */ 80 1.43 msaitoh static int ubsec_probe(device_t, cfdata_t, void *); 81 1.22 cegger static void ubsec_attach(device_t, device_t, void *); 82 1.32 bad static int ubsec_detach(device_t, int); 83 1.1 jonathan static void ubsec_reset_board(struct ubsec_softc *); 84 1.1 jonathan static void ubsec_init_board(struct ubsec_softc *); 85 1.1 jonathan static void ubsec_init_pciregs(struct pci_attach_args *pa); 86 1.1 jonathan static void ubsec_cleanchip(struct ubsec_softc *); 87 1.1 jonathan static void ubsec_totalreset(struct ubsec_softc *); 88 1.1 jonathan static int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *); 89 1.1 jonathan 90 1.28 chs CFATTACH_DECL_NEW(ubsec, sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach, 91 1.32 bad ubsec_detach, NULL); 92 1.1 jonathan extern struct cfdriver ubsec_cd; 93 1.1 jonathan 94 1.1 jonathan /* patchable */ 95 1.1 jonathan #ifdef UBSEC_DEBUG 96 1.1 jonathan extern int ubsec_debug; 97 1.1 jonathan int ubsec_debug=1; 98 1.1 jonathan #endif 99 1.1 jonathan 100 1.1 jonathan static int ubsec_intr(void *); 101 1.1 jonathan static int ubsec_newsession(void*, u_int32_t *, struct cryptoini *); 102 1.59 riastrad static void ubsec_freesession(void*, u_int64_t); 103 1.1 jonathan static int ubsec_process(void*, struct cryptop *, int hint); 104 1.1 jonathan static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); 105 1.1 jonathan static void ubsec_feed(struct ubsec_softc *); 106 1.1 jonathan static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); 107 1.1 jonathan static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); 108 1.1 jonathan static void ubsec_feed2(struct ubsec_softc *); 109 1.34 bad static void ubsec_feed4(struct ubsec_softc *); 110 1.1 jonathan #ifndef UBSEC_NO_RNG 111 1.29 tls static void ubsec_rng(void *); 112 1.29 tls static void ubsec_rng_locked(void *); 113 1.29 tls static void ubsec_rng_get(size_t, void *); 114 1.1 jonathan #endif /* UBSEC_NO_RNG */ 115 1.1 jonathan static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, 116 1.1 jonathan struct ubsec_dma_alloc *, int); 117 1.1 jonathan static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); 118 1.1 jonathan static int ubsec_dmamap_aligned(bus_dmamap_t); 119 1.1 jonathan 120 1.1 jonathan static int ubsec_kprocess(void*, struct cryptkop *, int); 121 1.55 riastrad static void ubsec_kprocess_modexp_sw(struct ubsec_softc *, 122 1.1 jonathan struct cryptkop *, int); 123 1.55 riastrad static void ubsec_kprocess_modexp_hw(struct ubsec_softc *, 124 1.1 jonathan struct cryptkop *, int); 125 1.55 riastrad static void ubsec_kprocess_rsapriv(struct ubsec_softc *, 126 1.1 jonathan struct cryptkop *, int); 127 1.1 jonathan static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); 128 1.1 jonathan static int ubsec_ksigbits(struct crparam *); 129 1.1 jonathan static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 130 1.1 jonathan static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 131 1.1 jonathan 132 1.1 jonathan #ifdef UBSEC_DEBUG 133 1.1 jonathan static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); 134 1.1 jonathan static void ubsec_dump_mcr(struct ubsec_mcr *); 135 1.1 jonathan static void ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *); 136 1.1 jonathan #endif 137 1.1 jonathan 138 1.1 jonathan #define READ_REG(sc,r) \ 139 1.1 jonathan bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 140 1.1 jonathan 141 1.1 jonathan #define WRITE_REG(sc,reg,val) \ 142 1.1 jonathan bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 143 1.1 jonathan 144 1.1 jonathan #define SWAP32(x) (x) = htole32(ntohl((x))) 145 1.1 jonathan #ifndef HTOLE32 146 1.1 jonathan #define HTOLE32(x) (x) = htole32(x) 147 1.1 jonathan #endif 148 1.1 jonathan 149 1.1 jonathan struct ubsec_stats ubsecstats; 150 1.1 jonathan 151 1.1 jonathan /* 152 1.5 perry * ubsec_maxbatch controls the number of crypto ops to voluntarily 153 1.1 jonathan * collect into one submission to the hardware. This batching happens 154 1.1 jonathan * when ops are dispatched from the crypto subsystem with a hint that 155 1.1 jonathan * more are to follow immediately. These ops must also not be marked 156 1.1 jonathan * with a ``no delay'' flag. 157 1.1 jonathan */ 158 1.1 jonathan static int ubsec_maxbatch = 1; 159 1.1 jonathan 160 1.1 jonathan /* 161 1.1 jonathan * ubsec_maxaggr controls the number of crypto ops to submit to the 162 1.1 jonathan * hardware as a unit. This aggregation reduces the number of interrupts 163 1.1 jonathan * to the host at the expense of increased latency (for all but the last 164 1.1 jonathan * operation). For network traffic setting this to one yields the highest 165 1.1 jonathan * performance but at the expense of more interrupt processing. 166 1.1 jonathan */ 167 1.1 jonathan static int ubsec_maxaggr = 1; 168 1.1 jonathan 169 1.4 thorpej static const struct ubsec_product { 170 1.4 thorpej pci_vendor_id_t ubsec_vendor; 171 1.4 thorpej pci_product_id_t ubsec_product; 172 1.4 thorpej int ubsec_flags; 173 1.4 thorpej int ubsec_statmask; 174 1.34 bad int ubsec_maxaggr; 175 1.4 thorpej const char *ubsec_name; 176 1.4 thorpej } ubsec_products[] = { 177 1.4 thorpej { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5501, 178 1.4 thorpej 0, 179 1.4 thorpej BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 180 1.34 bad UBS_MIN_AGGR, 181 1.4 thorpej "Bluesteel 5501" 182 1.4 thorpej }, 183 1.4 thorpej { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5601, 184 1.4 thorpej UBS_FLAGS_KEY | UBS_FLAGS_RNG, 185 1.4 thorpej BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 186 1.34 bad UBS_MIN_AGGR, 187 1.4 thorpej "Bluesteel 5601" 188 1.4 thorpej }, 189 1.4 thorpej 190 1.4 thorpej { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5801, 191 1.4 thorpej 0, 192 1.4 thorpej BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 193 1.34 bad UBS_MIN_AGGR, 194 1.4 thorpej "Broadcom BCM5801" 195 1.4 thorpej }, 196 1.4 thorpej 197 1.4 thorpej { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5802, 198 1.4 thorpej UBS_FLAGS_KEY | UBS_FLAGS_RNG, 199 1.4 thorpej BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 200 1.34 bad UBS_MIN_AGGR, 201 1.4 thorpej "Broadcom BCM5802" 202 1.4 thorpej }, 203 1.4 thorpej 204 1.4 thorpej { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5805, 205 1.4 thorpej UBS_FLAGS_KEY | UBS_FLAGS_RNG, 206 1.4 thorpej BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 207 1.34 bad UBS_MIN_AGGR, 208 1.4 thorpej "Broadcom BCM5805" 209 1.4 thorpej }, 210 1.4 thorpej 211 1.4 thorpej { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5820, 212 1.4 thorpej UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 213 1.4 thorpej UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 214 1.4 thorpej BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 215 1.34 bad UBS_MIN_AGGR, 216 1.4 thorpej "Broadcom BCM5820" 217 1.4 thorpej }, 218 1.4 thorpej 219 1.4 thorpej { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5821, 220 1.4 thorpej UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 221 1.4 thorpej UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 222 1.4 thorpej BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 223 1.4 thorpej BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 224 1.34 bad UBS_MIN_AGGR, 225 1.4 thorpej "Broadcom BCM5821" 226 1.4 thorpej }, 227 1.4 thorpej { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_SCA1K, 228 1.4 thorpej UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 229 1.4 thorpej UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 230 1.4 thorpej BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 231 1.4 thorpej BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 232 1.34 bad UBS_MIN_AGGR, 233 1.4 thorpej "Sun Crypto Accelerator 1000" 234 1.4 thorpej }, 235 1.4 thorpej { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_5821, 236 1.4 thorpej UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 237 1.4 thorpej UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 238 1.4 thorpej BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 239 1.4 thorpej BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 240 1.34 bad UBS_MIN_AGGR, 241 1.4 thorpej "Broadcom BCM5821 (Sun)" 242 1.4 thorpej }, 243 1.4 thorpej 244 1.4 thorpej { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5822, 245 1.4 thorpej UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 246 1.4 thorpej UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 247 1.4 thorpej BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 248 1.4 thorpej BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 249 1.34 bad UBS_MIN_AGGR, 250 1.4 thorpej "Broadcom BCM5822" 251 1.4 thorpej }, 252 1.4 thorpej 253 1.4 thorpej { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5823, 254 1.4 thorpej UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 255 1.40 bad UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES, 256 1.4 thorpej BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 257 1.4 thorpej BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 258 1.34 bad UBS_MIN_AGGR, 259 1.4 thorpej "Broadcom BCM5823" 260 1.4 thorpej }, 261 1.4 thorpej 262 1.34 bad { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5825, 263 1.34 bad UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 264 1.40 bad UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES, 265 1.34 bad BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 266 1.34 bad BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 267 1.34 bad UBS_MIN_AGGR, 268 1.34 bad "Broadcom BCM5825" 269 1.34 bad }, 270 1.34 bad 271 1.34 bad { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5860, 272 1.34 bad UBS_FLAGS_MULTIMCR | UBS_FLAGS_HWNORM | 273 1.34 bad UBS_FLAGS_LONGCTX | 274 1.34 bad UBS_FLAGS_RNG | UBS_FLAGS_RNG4 | 275 1.40 bad UBS_FLAGS_KEY | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES, 276 1.34 bad BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 277 1.34 bad BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY | 278 1.34 bad BS_STAT_MCR3_ALLEMPTY | BS_STAT_MCR4_ALLEMPTY, 279 1.34 bad UBS_MAX_AGGR, 280 1.34 bad "Broadcom BCM5860" 281 1.34 bad }, 282 1.34 bad 283 1.34 bad { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5861, 284 1.34 bad UBS_FLAGS_MULTIMCR | UBS_FLAGS_HWNORM | 285 1.34 bad UBS_FLAGS_LONGCTX | 286 1.34 bad UBS_FLAGS_RNG | UBS_FLAGS_RNG4 | 287 1.40 bad UBS_FLAGS_KEY | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES, 288 1.34 bad BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 289 1.34 bad BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY | 290 1.34 bad BS_STAT_MCR3_ALLEMPTY | BS_STAT_MCR4_ALLEMPTY, 291 1.34 bad UBS_MAX_AGGR, 292 1.34 bad "Broadcom BCM5861" 293 1.34 bad }, 294 1.34 bad 295 1.34 bad { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5862, 296 1.34 bad UBS_FLAGS_MULTIMCR | UBS_FLAGS_HWNORM | 297 1.34 bad UBS_FLAGS_LONGCTX | 298 1.34 bad UBS_FLAGS_RNG | UBS_FLAGS_RNG4 | 299 1.40 bad UBS_FLAGS_KEY | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES, 300 1.34 bad BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 301 1.34 bad BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY | 302 1.34 bad BS_STAT_MCR3_ALLEMPTY | BS_STAT_MCR4_ALLEMPTY, 303 1.34 bad UBS_MAX_AGGR, 304 1.34 bad "Broadcom BCM5862" 305 1.34 bad }, 306 1.34 bad 307 1.4 thorpej { 0, 0, 308 1.4 thorpej 0, 309 1.4 thorpej 0, 310 1.34 bad 0, 311 1.4 thorpej NULL 312 1.4 thorpej } 313 1.4 thorpej }; 314 1.4 thorpej 315 1.4 thorpej static const struct ubsec_product * 316 1.4 thorpej ubsec_lookup(const struct pci_attach_args *pa) 317 1.4 thorpej { 318 1.4 thorpej const struct ubsec_product *up; 319 1.4 thorpej 320 1.4 thorpej for (up = ubsec_products; up->ubsec_name != NULL; up++) { 321 1.4 thorpej if (PCI_VENDOR(pa->pa_id) == up->ubsec_vendor && 322 1.4 thorpej PCI_PRODUCT(pa->pa_id) == up->ubsec_product) 323 1.4 thorpej return (up); 324 1.4 thorpej } 325 1.4 thorpej return (NULL); 326 1.4 thorpej } 327 1.4 thorpej 328 1.1 jonathan static int 329 1.22 cegger ubsec_probe(device_t parent, cfdata_t match, void *aux) 330 1.1 jonathan { 331 1.1 jonathan struct pci_attach_args *pa = (struct pci_attach_args *)aux; 332 1.1 jonathan 333 1.4 thorpej if (ubsec_lookup(pa) != NULL) 334 1.1 jonathan return (1); 335 1.1 jonathan 336 1.1 jonathan return (0); 337 1.1 jonathan } 338 1.1 jonathan 339 1.7 thorpej static void 340 1.22 cegger ubsec_attach(device_t parent, device_t self, void *aux) 341 1.1 jonathan { 342 1.23 cegger struct ubsec_softc *sc = device_private(self); 343 1.1 jonathan struct pci_attach_args *pa = aux; 344 1.4 thorpej const struct ubsec_product *up; 345 1.1 jonathan pci_chipset_tag_t pc = pa->pa_pc; 346 1.1 jonathan pci_intr_handle_t ih; 347 1.1 jonathan const char *intrstr = NULL; 348 1.34 bad pcireg_t memtype; 349 1.1 jonathan struct ubsec_dma *dmap; 350 1.1 jonathan u_int32_t cmd, i; 351 1.38 christos char intrbuf[PCI_INTRSTR_LEN]; 352 1.1 jonathan 353 1.28 chs sc->sc_dev = self; 354 1.32 bad sc->sc_pct = pc; 355 1.32 bad 356 1.4 thorpej up = ubsec_lookup(pa); 357 1.4 thorpej if (up == NULL) { 358 1.4 thorpej printf("\n"); 359 1.4 thorpej panic("ubsec_attach: impossible"); 360 1.4 thorpej } 361 1.4 thorpej 362 1.27 drochner pci_aprint_devinfo_fancy(pa, "Crypto processor", up->ubsec_name, 1); 363 1.4 thorpej 364 1.1 jonathan SIMPLEQ_INIT(&sc->sc_queue); 365 1.1 jonathan SIMPLEQ_INIT(&sc->sc_qchip); 366 1.1 jonathan SIMPLEQ_INIT(&sc->sc_queue2); 367 1.1 jonathan SIMPLEQ_INIT(&sc->sc_qchip2); 368 1.34 bad SIMPLEQ_INIT(&sc->sc_queue4); 369 1.34 bad SIMPLEQ_INIT(&sc->sc_qchip4); 370 1.1 jonathan SIMPLEQ_INIT(&sc->sc_q2free); 371 1.1 jonathan 372 1.4 thorpej sc->sc_flags = up->ubsec_flags; 373 1.4 thorpej sc->sc_statmask = up->ubsec_statmask; 374 1.34 bad sc->sc_maxaggr = up->ubsec_maxaggr; 375 1.1 jonathan 376 1.1 jonathan cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 377 1.4 thorpej cmd |= PCI_COMMAND_MASTER_ENABLE; 378 1.1 jonathan pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 379 1.1 jonathan 380 1.34 bad memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BS_BAR); 381 1.34 bad if (pci_mapreg_map(pa, BS_BAR, memtype, 0, 382 1.32 bad &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_memsize)) { 383 1.28 chs aprint_error_dev(self, "can't find mem space"); 384 1.1 jonathan return; 385 1.1 jonathan } 386 1.1 jonathan 387 1.1 jonathan sc->sc_dmat = pa->pa_dmat; 388 1.1 jonathan 389 1.1 jonathan if (pci_intr_map(pa, &ih)) { 390 1.28 chs aprint_error_dev(self, "couldn't map interrupt\n"); 391 1.1 jonathan return; 392 1.1 jonathan } 393 1.38 christos intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 394 1.45 jdolecek sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, ubsec_intr, sc, 395 1.45 jdolecek device_xname(self)); 396 1.1 jonathan if (sc->sc_ih == NULL) { 397 1.28 chs aprint_error_dev(self, "couldn't establish interrupt"); 398 1.1 jonathan if (intrstr != NULL) 399 1.24 njoly aprint_error(" at %s", intrstr); 400 1.24 njoly aprint_error("\n"); 401 1.1 jonathan return; 402 1.1 jonathan } 403 1.28 chs aprint_normal_dev(self, "interrupting at %s\n", intrstr); 404 1.1 jonathan 405 1.1 jonathan sc->sc_cid = crypto_get_driverid(0); 406 1.1 jonathan if (sc->sc_cid < 0) { 407 1.28 chs aprint_error_dev(self, "couldn't get crypto driver id\n"); 408 1.1 jonathan pci_intr_disestablish(pc, sc->sc_ih); 409 1.1 jonathan return; 410 1.1 jonathan } 411 1.1 jonathan 412 1.29 tls mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM); 413 1.29 tls 414 1.1 jonathan SIMPLEQ_INIT(&sc->sc_freequeue); 415 1.1 jonathan dmap = sc->sc_dmaa; 416 1.1 jonathan for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { 417 1.1 jonathan struct ubsec_q *q; 418 1.1 jonathan 419 1.47 chs q = malloc(sizeof(struct ubsec_q), M_DEVBUF, M_ZERO|M_WAITOK); 420 1.1 jonathan 421 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), 422 1.1 jonathan &dmap->d_alloc, 0)) { 423 1.28 chs aprint_error_dev(self, "can't allocate dma buffers\n"); 424 1.1 jonathan free(q, M_DEVBUF); 425 1.1 jonathan break; 426 1.1 jonathan } 427 1.1 jonathan dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; 428 1.1 jonathan 429 1.1 jonathan q->q_dma = dmap; 430 1.1 jonathan sc->sc_queuea[i] = q; 431 1.1 jonathan 432 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 433 1.1 jonathan } 434 1.1 jonathan 435 1.1 jonathan crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 436 1.1 jonathan ubsec_newsession, ubsec_freesession, ubsec_process, sc); 437 1.1 jonathan crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 438 1.1 jonathan ubsec_newsession, ubsec_freesession, ubsec_process, sc); 439 1.15 tls crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0, 440 1.1 jonathan ubsec_newsession, ubsec_freesession, ubsec_process, sc); 441 1.15 tls crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0, 442 1.1 jonathan ubsec_newsession, ubsec_freesession, ubsec_process, sc); 443 1.40 bad if (sc->sc_flags & UBS_FLAGS_AES) { 444 1.40 bad crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, 445 1.40 bad ubsec_newsession, ubsec_freesession, ubsec_process, sc); 446 1.40 bad } 447 1.1 jonathan 448 1.1 jonathan /* 449 1.1 jonathan * Reset Broadcom chip 450 1.1 jonathan */ 451 1.1 jonathan ubsec_reset_board(sc); 452 1.1 jonathan 453 1.1 jonathan /* 454 1.1 jonathan * Init Broadcom specific PCI settings 455 1.1 jonathan */ 456 1.1 jonathan ubsec_init_pciregs(pa); 457 1.1 jonathan 458 1.1 jonathan /* 459 1.1 jonathan * Init Broadcom chip 460 1.1 jonathan */ 461 1.1 jonathan ubsec_init_board(sc); 462 1.1 jonathan 463 1.1 jonathan #ifndef UBSEC_NO_RNG 464 1.1 jonathan if (sc->sc_flags & UBS_FLAGS_RNG) { 465 1.34 bad if (sc->sc_flags & UBS_FLAGS_RNG4) 466 1.34 bad sc->sc_statmask |= BS_STAT_MCR4_DONE; 467 1.34 bad else 468 1.34 bad sc->sc_statmask |= BS_STAT_MCR2_DONE; 469 1.1 jonathan 470 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 471 1.1 jonathan &sc->sc_rng.rng_q.q_mcr, 0)) 472 1.1 jonathan goto skip_rng; 473 1.1 jonathan 474 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), 475 1.1 jonathan &sc->sc_rng.rng_q.q_ctx, 0)) { 476 1.1 jonathan ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 477 1.1 jonathan goto skip_rng; 478 1.1 jonathan } 479 1.1 jonathan 480 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * 481 1.1 jonathan UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { 482 1.1 jonathan ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 483 1.1 jonathan ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 484 1.1 jonathan goto skip_rng; 485 1.1 jonathan } 486 1.1 jonathan if (hz >= 100) 487 1.1 jonathan sc->sc_rnghz = hz / 100; 488 1.1 jonathan else 489 1.1 jonathan sc->sc_rnghz = 1; 490 1.13 ad callout_init(&sc->sc_rngto, 0); 491 1.30 bad callout_setfunc(&sc->sc_rngto, ubsec_rng, sc); 492 1.49 riastrad rndsource_setcb(&sc->sc_rnd_source, ubsec_rng_get, sc); 493 1.49 riastrad rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 494 1.49 riastrad RND_TYPE_RNG, 495 1.49 riastrad RND_FLAG_COLLECT_VALUE|RND_FLAG_HASCB); 496 1.49 riastrad 497 1.4 thorpej skip_rng: 498 1.4 thorpej if (sc->sc_rnghz) 499 1.43 msaitoh aprint_normal_dev(self, 500 1.43 msaitoh "random number generator enabled\n"); 501 1.4 thorpej else 502 1.43 msaitoh aprint_error_dev(self, 503 1.43 msaitoh "WARNING: random number generator disabled\n"); 504 1.1 jonathan } 505 1.1 jonathan #endif /* UBSEC_NO_RNG */ 506 1.1 jonathan 507 1.1 jonathan if (sc->sc_flags & UBS_FLAGS_KEY) { 508 1.1 jonathan sc->sc_statmask |= BS_STAT_MCR2_DONE; 509 1.1 jonathan 510 1.1 jonathan crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, 511 1.1 jonathan ubsec_kprocess, sc); 512 1.1 jonathan #if 0 513 1.1 jonathan crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, 514 1.1 jonathan ubsec_kprocess, sc); 515 1.1 jonathan #endif 516 1.1 jonathan } 517 1.1 jonathan } 518 1.1 jonathan 519 1.32 bad static int 520 1.32 bad ubsec_detach(device_t self, int flags) 521 1.32 bad { 522 1.32 bad struct ubsec_softc *sc = device_private(self); 523 1.32 bad struct ubsec_q *q, *qtmp; 524 1.35 bad volatile u_int32_t ctrl; 525 1.32 bad 526 1.32 bad /* disable interrupts */ 527 1.32 bad /* XXX wait/abort current ops? where is DMAERR enabled? */ 528 1.35 bad ctrl = READ_REG(sc, BS_CTRL); 529 1.35 bad 530 1.35 bad ctrl &= ~(BS_CTRL_MCR2INT | BS_CTRL_MCR1INT | BS_CTRL_DMAERR); 531 1.35 bad if (sc->sc_flags & UBS_FLAGS_MULTIMCR) 532 1.35 bad ctrl &= ~BS_CTRL_MCR4INT; 533 1.35 bad 534 1.35 bad WRITE_REG(sc, BS_CTRL, ctrl); 535 1.32 bad 536 1.32 bad #ifndef UBSEC_NO_RNG 537 1.32 bad if (sc->sc_flags & UBS_FLAGS_RNG) { 538 1.32 bad callout_halt(&sc->sc_rngto, NULL); 539 1.32 bad ubsec_dma_free(sc, &sc->sc_rng.rng_buf); 540 1.32 bad ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 541 1.32 bad ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 542 1.32 bad rnd_detach_source(&sc->sc_rnd_source); 543 1.32 bad } 544 1.32 bad #endif /* UBSEC_NO_RNG */ 545 1.32 bad 546 1.32 bad crypto_unregister_all(sc->sc_cid); 547 1.32 bad 548 1.32 bad mutex_spin_enter(&sc->sc_mtx); 549 1.32 bad 550 1.32 bad ubsec_totalreset(sc); /* XXX leaves the chip running */ 551 1.32 bad 552 1.32 bad SIMPLEQ_FOREACH_SAFE(q, &sc->sc_freequeue, q_next, qtmp) { 553 1.32 bad ubsec_dma_free(sc, &q->q_dma->d_alloc); 554 1.39 bad if (q->q_src_map != NULL) 555 1.39 bad bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 556 1.39 bad if (q->q_cached_dst_map != NULL) 557 1.39 bad bus_dmamap_destroy(sc->sc_dmat, q->q_cached_dst_map); 558 1.32 bad free(q, M_DEVBUF); 559 1.32 bad } 560 1.32 bad 561 1.32 bad mutex_spin_exit(&sc->sc_mtx); 562 1.32 bad 563 1.32 bad if (sc->sc_ih != NULL) { 564 1.32 bad pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 565 1.32 bad sc->sc_ih = NULL; 566 1.32 bad } 567 1.32 bad 568 1.32 bad if (sc->sc_memsize != 0) { 569 1.32 bad bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_memsize); 570 1.32 bad sc->sc_memsize = 0; 571 1.32 bad } 572 1.32 bad 573 1.32 bad return 0; 574 1.32 bad } 575 1.32 bad 576 1.36 pgoyette MODULE(MODULE_CLASS_DRIVER, ubsec, "pci,opencrypto"); 577 1.32 bad 578 1.32 bad #ifdef _MODULE 579 1.32 bad #include "ioconf.c" 580 1.32 bad #endif 581 1.32 bad 582 1.32 bad static int 583 1.32 bad ubsec_modcmd(modcmd_t cmd, void *data) 584 1.32 bad { 585 1.32 bad int error = 0; 586 1.32 bad 587 1.32 bad switch (cmd) { 588 1.32 bad case MODULE_CMD_INIT: 589 1.32 bad #ifdef _MODULE 590 1.32 bad error = config_init_component(cfdriver_ioconf_ubsec, 591 1.32 bad cfattach_ioconf_ubsec, cfdata_ioconf_ubsec); 592 1.32 bad #endif 593 1.32 bad return error; 594 1.32 bad case MODULE_CMD_FINI: 595 1.32 bad #ifdef _MODULE 596 1.32 bad error = config_fini_component(cfdriver_ioconf_ubsec, 597 1.32 bad cfattach_ioconf_ubsec, cfdata_ioconf_ubsec); 598 1.32 bad #endif 599 1.32 bad return error; 600 1.32 bad default: 601 1.32 bad return ENOTTY; 602 1.32 bad } 603 1.32 bad } 604 1.32 bad 605 1.48 pgoyette SYSCTL_SETUP(ubsec_sysctl_init, "ubsec sysctl") 606 1.33 bad { 607 1.33 bad const struct sysctlnode *node = NULL; 608 1.33 bad 609 1.48 pgoyette sysctl_createv(clog, 0, NULL, &node, 610 1.33 bad CTLFLAG_PERMANENT, 611 1.61 skrll CTLTYPE_NODE, "ubsec", 612 1.65 andvar SYSCTL_DESCR("ubsec options"), 613 1.33 bad NULL, 0, NULL, 0, 614 1.33 bad CTL_HW, CTL_CREATE, CTL_EOL); 615 1.48 pgoyette sysctl_createv(clog, 0, &node, NULL, 616 1.33 bad CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 617 1.33 bad CTLTYPE_INT, "maxbatch", 618 1.33 bad SYSCTL_DESCR("max ops to batch w/o interrupt"), 619 1.33 bad NULL, 0, &ubsec_maxbatch, 0, 620 1.33 bad CTL_CREATE, CTL_EOL); 621 1.48 pgoyette sysctl_createv(clog, 0, &node, NULL, 622 1.33 bad CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 623 1.33 bad CTLTYPE_INT, "maxaggr", 624 1.33 bad SYSCTL_DESCR("max ops to aggregate under one interrupt"), 625 1.33 bad NULL, 0, &ubsec_maxaggr, 0, 626 1.33 bad CTL_CREATE, CTL_EOL); 627 1.33 bad 628 1.48 pgoyette return; 629 1.33 bad } 630 1.33 bad 631 1.1 jonathan /* 632 1.1 jonathan * UBSEC Interrupt routine 633 1.1 jonathan */ 634 1.7 thorpej static int 635 1.1 jonathan ubsec_intr(void *arg) 636 1.1 jonathan { 637 1.1 jonathan struct ubsec_softc *sc = arg; 638 1.1 jonathan volatile u_int32_t stat; 639 1.1 jonathan struct ubsec_q *q; 640 1.1 jonathan struct ubsec_dma *dmap; 641 1.34 bad int flags; 642 1.1 jonathan int npkts = 0, i; 643 1.1 jonathan 644 1.29 tls mutex_spin_enter(&sc->sc_mtx); 645 1.1 jonathan stat = READ_REG(sc, BS_STAT); 646 1.1 jonathan stat &= sc->sc_statmask; 647 1.1 jonathan if (stat == 0) { 648 1.29 tls mutex_spin_exit(&sc->sc_mtx); 649 1.1 jonathan return (0); 650 1.1 jonathan } 651 1.1 jonathan 652 1.1 jonathan WRITE_REG(sc, BS_STAT, stat); /* IACK */ 653 1.1 jonathan 654 1.1 jonathan /* 655 1.1 jonathan * Check to see if we have any packets waiting for us 656 1.1 jonathan */ 657 1.1 jonathan if ((stat & BS_STAT_MCR1_DONE)) { 658 1.1 jonathan while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 659 1.1 jonathan q = SIMPLEQ_FIRST(&sc->sc_qchip); 660 1.1 jonathan dmap = q->q_dma; 661 1.1 jonathan 662 1.43 msaitoh if ((dmap->d_dma->d_mcr.mcr_flags 663 1.43 msaitoh & htole16(UBS_MCR_DONE)) == 0) 664 1.1 jonathan break; 665 1.1 jonathan 666 1.1 jonathan q = SIMPLEQ_FIRST(&sc->sc_qchip); 667 1.1 jonathan SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); 668 1.1 jonathan 669 1.1 jonathan npkts = q->q_nstacked_mcrs; 670 1.1 jonathan sc->sc_nqchip -= 1+npkts; 671 1.1 jonathan /* 672 1.1 jonathan * search for further sc_qchip ubsec_q's that share 673 1.1 jonathan * the same MCR, and complete them too, they must be 674 1.1 jonathan * at the top. 675 1.1 jonathan */ 676 1.1 jonathan for (i = 0; i < npkts; i++) { 677 1.1 jonathan if(q->q_stacked_mcr[i]) 678 1.1 jonathan ubsec_callback(sc, q->q_stacked_mcr[i]); 679 1.1 jonathan else 680 1.1 jonathan break; 681 1.1 jonathan } 682 1.1 jonathan ubsec_callback(sc, q); 683 1.1 jonathan } 684 1.1 jonathan 685 1.1 jonathan /* 686 1.1 jonathan * Don't send any more packet to chip if there has been 687 1.1 jonathan * a DMAERR. 688 1.1 jonathan */ 689 1.1 jonathan if (!(stat & BS_STAT_DMAERR)) 690 1.1 jonathan ubsec_feed(sc); 691 1.1 jonathan } 692 1.1 jonathan 693 1.1 jonathan /* 694 1.1 jonathan * Check to see if we have any key setups/rng's waiting for us 695 1.1 jonathan */ 696 1.1 jonathan if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && 697 1.1 jonathan (stat & BS_STAT_MCR2_DONE)) { 698 1.1 jonathan struct ubsec_q2 *q2; 699 1.1 jonathan struct ubsec_mcr *mcr; 700 1.1 jonathan 701 1.1 jonathan while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { 702 1.1 jonathan q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 703 1.1 jonathan 704 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, 705 1.1 jonathan 0, q2->q_mcr.dma_map->dm_mapsize, 706 1.1 jonathan BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 707 1.1 jonathan 708 1.1 jonathan mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; 709 1.34 bad 710 1.34 bad /* A bug in new devices requires to swap this field */ 711 1.34 bad if (sc->sc_flags & UBS_FLAGS_MULTIMCR) 712 1.34 bad flags = htole16(mcr->mcr_flags); 713 1.34 bad else 714 1.34 bad flags = mcr->mcr_flags; 715 1.34 bad if ((flags & htole16(UBS_MCR_DONE)) == 0) { 716 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, 717 1.1 jonathan q2->q_mcr.dma_map, 0, 718 1.1 jonathan q2->q_mcr.dma_map->dm_mapsize, 719 1.1 jonathan BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 720 1.1 jonathan break; 721 1.1 jonathan } 722 1.1 jonathan q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 723 1.1 jonathan SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, /*q2,*/ q_next); 724 1.1 jonathan ubsec_callback2(sc, q2); 725 1.1 jonathan /* 726 1.1 jonathan * Don't send any more packet to chip if there has been 727 1.1 jonathan * a DMAERR. 728 1.1 jonathan */ 729 1.1 jonathan if (!(stat & BS_STAT_DMAERR)) 730 1.1 jonathan ubsec_feed2(sc); 731 1.1 jonathan } 732 1.1 jonathan } 733 1.34 bad if ((sc->sc_flags & UBS_FLAGS_RNG4) && (stat & BS_STAT_MCR4_DONE)) { 734 1.34 bad struct ubsec_q2 *q2; 735 1.34 bad struct ubsec_mcr *mcr; 736 1.34 bad 737 1.34 bad while (!SIMPLEQ_EMPTY(&sc->sc_qchip4)) { 738 1.34 bad q2 = SIMPLEQ_FIRST(&sc->sc_qchip4); 739 1.34 bad 740 1.34 bad bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, 741 1.34 bad 0, q2->q_mcr.dma_map->dm_mapsize, 742 1.34 bad BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 743 1.34 bad 744 1.34 bad mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; 745 1.34 bad 746 1.34 bad /* A bug in new devices requires to swap this field */ 747 1.34 bad flags = htole16(mcr->mcr_flags); 748 1.34 bad 749 1.34 bad if ((flags & htole16(UBS_MCR_DONE)) == 0) { 750 1.34 bad bus_dmamap_sync(sc->sc_dmat, 751 1.34 bad q2->q_mcr.dma_map, 0, 752 1.34 bad q2->q_mcr.dma_map->dm_mapsize, 753 1.34 bad BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 754 1.34 bad break; 755 1.34 bad } 756 1.34 bad SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip4, q_next); 757 1.34 bad ubsec_callback2(sc, q2); 758 1.34 bad /* 759 1.34 bad * Don't send any more packet to chip if there has been 760 1.34 bad * a DMAERR. 761 1.34 bad */ 762 1.34 bad if (!(stat & BS_STAT_DMAERR)) 763 1.34 bad ubsec_feed4(sc); 764 1.34 bad } 765 1.34 bad } 766 1.1 jonathan 767 1.1 jonathan /* 768 1.1 jonathan * Check to see if we got any DMA Error 769 1.1 jonathan */ 770 1.1 jonathan if (stat & BS_STAT_DMAERR) { 771 1.1 jonathan #ifdef UBSEC_DEBUG 772 1.1 jonathan if (ubsec_debug) { 773 1.1 jonathan volatile u_int32_t a = READ_REG(sc, BS_ERR); 774 1.1 jonathan 775 1.28 chs printf("%s: dmaerr %s@%08x\n", device_xname(sc->sc_dev), 776 1.1 jonathan (a & BS_ERR_READ) ? "read" : "write", 777 1.1 jonathan a & BS_ERR_ADDR); 778 1.1 jonathan } 779 1.1 jonathan #endif /* UBSEC_DEBUG */ 780 1.1 jonathan ubsecstats.hst_dmaerr++; 781 1.1 jonathan ubsec_totalreset(sc); 782 1.1 jonathan ubsec_feed(sc); 783 1.1 jonathan } 784 1.1 jonathan 785 1.1 jonathan if (sc->sc_needwakeup) { /* XXX check high watermark */ 786 1.6 christos int wkeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 787 1.1 jonathan #ifdef UBSEC_DEBUG 788 1.1 jonathan if (ubsec_debug) 789 1.43 msaitoh printf("%s: wakeup crypto (%x)\n", 790 1.43 msaitoh device_xname(sc->sc_dev), sc->sc_needwakeup); 791 1.1 jonathan #endif /* UBSEC_DEBUG */ 792 1.6 christos sc->sc_needwakeup &= ~wkeup; 793 1.6 christos crypto_unblock(sc->sc_cid, wkeup); 794 1.1 jonathan } 795 1.29 tls mutex_spin_exit(&sc->sc_mtx); 796 1.1 jonathan return (1); 797 1.1 jonathan } 798 1.1 jonathan 799 1.1 jonathan /* 800 1.1 jonathan * ubsec_feed() - aggregate and post requests to chip 801 1.1 jonathan * OpenBSD comments: 802 1.1 jonathan * It is assumed that the caller set splnet() 803 1.1 jonathan */ 804 1.1 jonathan static void 805 1.1 jonathan ubsec_feed(struct ubsec_softc *sc) 806 1.1 jonathan { 807 1.1 jonathan struct ubsec_q *q, *q2; 808 1.1 jonathan int npkts, i; 809 1.1 jonathan void *v; 810 1.1 jonathan u_int32_t stat; 811 1.1 jonathan #ifdef UBSEC_DEBUG 812 1.1 jonathan static int max; 813 1.1 jonathan #endif /* UBSEC_DEBUG */ 814 1.1 jonathan 815 1.1 jonathan npkts = sc->sc_nqueue; 816 1.1 jonathan if (npkts > ubsecstats.hst_maxqueue) 817 1.1 jonathan ubsecstats.hst_maxqueue = npkts; 818 1.1 jonathan if (npkts < 2) 819 1.1 jonathan goto feed1; 820 1.1 jonathan 821 1.1 jonathan /* 822 1.1 jonathan * Decide how many ops to combine in a single MCR. We cannot 823 1.1 jonathan * aggregate more than UBS_MAX_AGGR because this is the number 824 1.1 jonathan * of slots defined in the data structure. Otherwise we clamp 825 1.1 jonathan * based on the tunable parameter ubsec_maxaggr. Note that 826 1.1 jonathan * aggregation can happen in two ways: either by batching ops 827 1.5 perry * from above or because the h/w backs up and throttles us. 828 1.1 jonathan * Aggregating ops reduces the number of interrupts to the host 829 1.1 jonathan * but also (potentially) increases the latency for processing 830 1.1 jonathan * completed ops as we only get an interrupt when all aggregated 831 1.1 jonathan * ops have completed. 832 1.1 jonathan */ 833 1.34 bad if (npkts > sc->sc_maxaggr) 834 1.34 bad npkts = sc->sc_maxaggr; 835 1.1 jonathan if (npkts > ubsec_maxaggr) 836 1.1 jonathan npkts = ubsec_maxaggr; 837 1.1 jonathan if (npkts > ubsecstats.hst_maxbatch) 838 1.1 jonathan ubsecstats.hst_maxbatch = npkts; 839 1.1 jonathan if (npkts < 2) 840 1.1 jonathan goto feed1; 841 1.1 jonathan ubsecstats.hst_totbatch += npkts-1; 842 1.1 jonathan 843 1.43 msaitoh if ((stat = READ_REG(sc, BS_STAT)) 844 1.43 msaitoh & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 845 1.1 jonathan if (stat & BS_STAT_DMAERR) { 846 1.1 jonathan ubsec_totalreset(sc); 847 1.1 jonathan ubsecstats.hst_dmaerr++; 848 1.1 jonathan } else { 849 1.1 jonathan ubsecstats.hst_mcr1full++; 850 1.1 jonathan } 851 1.1 jonathan return; 852 1.1 jonathan } 853 1.1 jonathan 854 1.1 jonathan #ifdef UBSEC_DEBUG 855 1.1 jonathan if (ubsec_debug) 856 1.1 jonathan printf("merging %d records\n", npkts); 857 1.1 jonathan /* XXX temporary aggregation statistics reporting code */ 858 1.1 jonathan if (max < npkts) { 859 1.1 jonathan max = npkts; 860 1.43 msaitoh printf("%s: new max aggregate %d\n", device_xname(sc->sc_dev), 861 1.43 msaitoh max); 862 1.1 jonathan } 863 1.1 jonathan #endif /* UBSEC_DEBUG */ 864 1.1 jonathan 865 1.1 jonathan q = SIMPLEQ_FIRST(&sc->sc_queue); 866 1.1 jonathan SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); 867 1.1 jonathan --sc->sc_nqueue; 868 1.1 jonathan 869 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 870 1.1 jonathan 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 871 1.1 jonathan if (q->q_dst_map != NULL) 872 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 873 1.1 jonathan 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 874 1.1 jonathan 875 1.1 jonathan q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ 876 1.1 jonathan 877 1.1 jonathan for (i = 0; i < q->q_nstacked_mcrs; i++) { 878 1.1 jonathan q2 = SIMPLEQ_FIRST(&sc->sc_queue); 879 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, 880 1.1 jonathan 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 881 1.1 jonathan if (q2->q_dst_map != NULL) 882 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, 883 1.1 jonathan 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 884 1.1 jonathan q2= SIMPLEQ_FIRST(&sc->sc_queue); 885 1.1 jonathan SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q2,*/ q_next); 886 1.1 jonathan --sc->sc_nqueue; 887 1.1 jonathan 888 1.1 jonathan v = ((void *)&q2->q_dma->d_dma->d_mcr); 889 1.1 jonathan v = (char*)v + (sizeof(struct ubsec_mcr) - 890 1.1 jonathan sizeof(struct ubsec_mcr_add)); 891 1.43 msaitoh memcpy(&q->q_dma->d_dma->d_mcradd[i], v, 892 1.43 msaitoh sizeof(struct ubsec_mcr_add)); 893 1.1 jonathan q->q_stacked_mcr[i] = q2; 894 1.1 jonathan } 895 1.1 jonathan q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); 896 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 897 1.1 jonathan sc->sc_nqchip += npkts; 898 1.1 jonathan if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 899 1.1 jonathan ubsecstats.hst_maxqchip = sc->sc_nqchip; 900 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 901 1.1 jonathan 0, q->q_dma->d_alloc.dma_map->dm_mapsize, 902 1.1 jonathan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 903 1.1 jonathan WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 904 1.1 jonathan offsetof(struct ubsec_dmachunk, d_mcr)); 905 1.1 jonathan return; 906 1.1 jonathan 907 1.1 jonathan feed1: 908 1.1 jonathan while (!SIMPLEQ_EMPTY(&sc->sc_queue)) { 909 1.43 msaitoh if ((stat = READ_REG(sc, BS_STAT)) 910 1.43 msaitoh & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 911 1.1 jonathan if (stat & BS_STAT_DMAERR) { 912 1.1 jonathan ubsec_totalreset(sc); 913 1.1 jonathan ubsecstats.hst_dmaerr++; 914 1.1 jonathan } else { 915 1.1 jonathan ubsecstats.hst_mcr1full++; 916 1.1 jonathan } 917 1.1 jonathan break; 918 1.1 jonathan } 919 1.1 jonathan 920 1.1 jonathan q = SIMPLEQ_FIRST(&sc->sc_queue); 921 1.1 jonathan 922 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 923 1.1 jonathan 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 924 1.1 jonathan if (q->q_dst_map != NULL) 925 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 926 1.1 jonathan 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 927 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 928 1.1 jonathan 0, q->q_dma->d_alloc.dma_map->dm_mapsize, 929 1.1 jonathan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 930 1.1 jonathan 931 1.1 jonathan WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 932 1.1 jonathan offsetof(struct ubsec_dmachunk, d_mcr)); 933 1.1 jonathan #ifdef UBSEC_DEBUG 934 1.1 jonathan if (ubsec_debug) 935 1.1 jonathan printf("feed: q->chip %p %08x stat %08x\n", 936 1.1 jonathan q, (u_int32_t)q->q_dma->d_alloc.dma_paddr, 937 1.1 jonathan stat); 938 1.1 jonathan #endif /* UBSEC_DEBUG */ 939 1.1 jonathan q = SIMPLEQ_FIRST(&sc->sc_queue); 940 1.1 jonathan SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); 941 1.1 jonathan --sc->sc_nqueue; 942 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 943 1.1 jonathan sc->sc_nqchip++; 944 1.1 jonathan } 945 1.1 jonathan if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 946 1.1 jonathan ubsecstats.hst_maxqchip = sc->sc_nqchip; 947 1.1 jonathan } 948 1.1 jonathan 949 1.1 jonathan /* 950 1.1 jonathan * Allocate a new 'session' and return an encoded session id. 'sidp' 951 1.1 jonathan * contains our registration id, and should contain an encoded session 952 1.1 jonathan * id on successful allocation. 953 1.1 jonathan */ 954 1.1 jonathan static int 955 1.1 jonathan ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 956 1.1 jonathan { 957 1.1 jonathan struct cryptoini *c, *encini = NULL, *macini = NULL; 958 1.58 riastrad struct ubsec_softc *sc = arg; 959 1.1 jonathan struct ubsec_session *ses = NULL; 960 1.1 jonathan MD5_CTX md5ctx; 961 1.1 jonathan SHA1_CTX sha1ctx; 962 1.1 jonathan int i, sesn; 963 1.1 jonathan 964 1.1 jonathan for (c = cri; c != NULL; c = c->cri_next) { 965 1.15 tls if (c->cri_alg == CRYPTO_MD5_HMAC_96 || 966 1.15 tls c->cri_alg == CRYPTO_SHA1_HMAC_96) { 967 1.1 jonathan if (macini) 968 1.1 jonathan return (EINVAL); 969 1.1 jonathan macini = c; 970 1.1 jonathan } else if (c->cri_alg == CRYPTO_DES_CBC || 971 1.40 bad c->cri_alg == CRYPTO_3DES_CBC || 972 1.40 bad c->cri_alg == CRYPTO_AES_CBC) { 973 1.1 jonathan if (encini) 974 1.1 jonathan return (EINVAL); 975 1.1 jonathan encini = c; 976 1.1 jonathan } else 977 1.1 jonathan return (EINVAL); 978 1.1 jonathan } 979 1.1 jonathan if (encini == NULL && macini == NULL) 980 1.1 jonathan return (EINVAL); 981 1.1 jonathan 982 1.40 bad if (encini && encini->cri_alg == CRYPTO_AES_CBC) { 983 1.40 bad switch (encini->cri_klen) { 984 1.40 bad case 128: 985 1.40 bad case 192: 986 1.40 bad case 256: 987 1.40 bad break; 988 1.40 bad default: 989 1.40 bad return (EINVAL); 990 1.40 bad } 991 1.40 bad } 992 1.40 bad 993 1.1 jonathan if (sc->sc_sessions == NULL) { 994 1.1 jonathan ses = sc->sc_sessions = (struct ubsec_session *)malloc( 995 1.1 jonathan sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 996 1.1 jonathan if (ses == NULL) 997 1.1 jonathan return (ENOMEM); 998 1.1 jonathan sesn = 0; 999 1.1 jonathan sc->sc_nsessions = 1; 1000 1.1 jonathan } else { 1001 1.1 jonathan for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 1002 1.1 jonathan if (sc->sc_sessions[sesn].ses_used == 0) { 1003 1.1 jonathan ses = &sc->sc_sessions[sesn]; 1004 1.1 jonathan break; 1005 1.1 jonathan } 1006 1.1 jonathan } 1007 1.1 jonathan 1008 1.1 jonathan if (ses == NULL) { 1009 1.1 jonathan sesn = sc->sc_nsessions; 1010 1.1 jonathan ses = (struct ubsec_session *)malloc((sesn + 1) * 1011 1.1 jonathan sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 1012 1.1 jonathan if (ses == NULL) 1013 1.1 jonathan return (ENOMEM); 1014 1.20 tsutsui memcpy(ses, sc->sc_sessions, sesn * 1015 1.1 jonathan sizeof(struct ubsec_session)); 1016 1.18 cegger memset(sc->sc_sessions, 0, sesn * 1017 1.1 jonathan sizeof(struct ubsec_session)); 1018 1.1 jonathan free(sc->sc_sessions, M_DEVBUF); 1019 1.1 jonathan sc->sc_sessions = ses; 1020 1.1 jonathan ses = &sc->sc_sessions[sesn]; 1021 1.1 jonathan sc->sc_nsessions++; 1022 1.1 jonathan } 1023 1.1 jonathan } 1024 1.1 jonathan 1025 1.18 cegger memset(ses, 0, sizeof(struct ubsec_session)); 1026 1.1 jonathan ses->ses_used = 1; 1027 1.1 jonathan if (encini) { 1028 1.1 jonathan /* Go ahead and compute key in ubsec's byte order */ 1029 1.40 bad if (encini->cri_alg == CRYPTO_AES_CBC) { 1030 1.40 bad memcpy(ses->ses_key, encini->cri_key, 1031 1.40 bad encini->cri_klen / 8); 1032 1.40 bad } 1033 1.1 jonathan if (encini->cri_alg == CRYPTO_DES_CBC) { 1034 1.40 bad memcpy(&ses->ses_key[0], encini->cri_key, 8); 1035 1.40 bad memcpy(&ses->ses_key[2], encini->cri_key, 8); 1036 1.40 bad memcpy(&ses->ses_key[4], encini->cri_key, 8); 1037 1.1 jonathan } else 1038 1.40 bad memcpy(ses->ses_key, encini->cri_key, 24); 1039 1.1 jonathan 1040 1.40 bad SWAP32(ses->ses_key[0]); 1041 1.40 bad SWAP32(ses->ses_key[1]); 1042 1.40 bad SWAP32(ses->ses_key[2]); 1043 1.40 bad SWAP32(ses->ses_key[3]); 1044 1.40 bad SWAP32(ses->ses_key[4]); 1045 1.40 bad SWAP32(ses->ses_key[5]); 1046 1.1 jonathan } 1047 1.1 jonathan 1048 1.1 jonathan if (macini) { 1049 1.1 jonathan for (i = 0; i < macini->cri_klen / 8; i++) 1050 1.1 jonathan macini->cri_key[i] ^= HMAC_IPAD_VAL; 1051 1.1 jonathan 1052 1.15 tls if (macini->cri_alg == CRYPTO_MD5_HMAC_96) { 1053 1.1 jonathan MD5Init(&md5ctx); 1054 1.1 jonathan MD5Update(&md5ctx, macini->cri_key, 1055 1.1 jonathan macini->cri_klen / 8); 1056 1.1 jonathan MD5Update(&md5ctx, hmac_ipad_buffer, 1057 1.1 jonathan HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 1058 1.20 tsutsui memcpy(ses->ses_hminner, md5ctx.state, 1059 1.1 jonathan sizeof(md5ctx.state)); 1060 1.1 jonathan } else { 1061 1.1 jonathan SHA1Init(&sha1ctx); 1062 1.1 jonathan SHA1Update(&sha1ctx, macini->cri_key, 1063 1.1 jonathan macini->cri_klen / 8); 1064 1.1 jonathan SHA1Update(&sha1ctx, hmac_ipad_buffer, 1065 1.1 jonathan HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 1066 1.20 tsutsui memcpy(ses->ses_hminner, sha1ctx.state, 1067 1.1 jonathan sizeof(sha1ctx.state)); 1068 1.1 jonathan } 1069 1.1 jonathan 1070 1.1 jonathan for (i = 0; i < macini->cri_klen / 8; i++) 1071 1.1 jonathan macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 1072 1.1 jonathan 1073 1.15 tls if (macini->cri_alg == CRYPTO_MD5_HMAC_96) { 1074 1.1 jonathan MD5Init(&md5ctx); 1075 1.1 jonathan MD5Update(&md5ctx, macini->cri_key, 1076 1.1 jonathan macini->cri_klen / 8); 1077 1.1 jonathan MD5Update(&md5ctx, hmac_opad_buffer, 1078 1.1 jonathan HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 1079 1.20 tsutsui memcpy(ses->ses_hmouter, md5ctx.state, 1080 1.1 jonathan sizeof(md5ctx.state)); 1081 1.1 jonathan } else { 1082 1.1 jonathan SHA1Init(&sha1ctx); 1083 1.1 jonathan SHA1Update(&sha1ctx, macini->cri_key, 1084 1.1 jonathan macini->cri_klen / 8); 1085 1.1 jonathan SHA1Update(&sha1ctx, hmac_opad_buffer, 1086 1.1 jonathan HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 1087 1.20 tsutsui memcpy(ses->ses_hmouter, sha1ctx.state, 1088 1.1 jonathan sizeof(sha1ctx.state)); 1089 1.1 jonathan } 1090 1.1 jonathan 1091 1.1 jonathan for (i = 0; i < macini->cri_klen / 8; i++) 1092 1.1 jonathan macini->cri_key[i] ^= HMAC_OPAD_VAL; 1093 1.1 jonathan } 1094 1.1 jonathan 1095 1.28 chs *sidp = UBSEC_SID(device_unit(sc->sc_dev), sesn); 1096 1.1 jonathan return (0); 1097 1.1 jonathan } 1098 1.1 jonathan 1099 1.1 jonathan /* 1100 1.1 jonathan * Deallocate a session. 1101 1.1 jonathan */ 1102 1.59 riastrad static void 1103 1.1 jonathan ubsec_freesession(void *arg, u_int64_t tid) 1104 1.1 jonathan { 1105 1.58 riastrad struct ubsec_softc *sc = arg; 1106 1.1 jonathan int session; 1107 1.1 jonathan u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 1108 1.1 jonathan 1109 1.1 jonathan session = UBSEC_SESSION(sid); 1110 1.58 riastrad KASSERTMSG(session >= 0, "session=%d", session); 1111 1.58 riastrad KASSERTMSG(session < sc->sc_nsessions, "session=%d nsessions=%d", 1112 1.58 riastrad session, sc->sc_nsessions); 1113 1.1 jonathan 1114 1.18 cegger memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session])); 1115 1.1 jonathan } 1116 1.1 jonathan 1117 1.1 jonathan #ifdef __FreeBSD__ /* Ugly gratuitous changes to bus_dma */ 1118 1.1 jonathan static void 1119 1.43 msaitoh ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, 1120 1.43 msaitoh int error) 1121 1.1 jonathan { 1122 1.1 jonathan struct ubsec_operand *op = arg; 1123 1.1 jonathan 1124 1.5 perry KASSERT(nsegs <= UBS_MAX_SCATTER 1125 1.1 jonathan /*, ("Too many DMA segments returned when mapping operand")*/); 1126 1.1 jonathan #ifdef UBSEC_DEBUG 1127 1.1 jonathan if (ubsec_debug) 1128 1.1 jonathan printf("ubsec_op_cb: mapsize %u nsegs %d\n", 1129 1.1 jonathan (u_int) mapsize, nsegs); 1130 1.1 jonathan #endif 1131 1.1 jonathan op->mapsize = mapsize; 1132 1.1 jonathan op->nsegs = nsegs; 1133 1.20 tsutsui memcpy(op->segs, seg, nsegs * sizeof (seg[0])); 1134 1.1 jonathan } 1135 1.1 jonathan #endif 1136 1.1 jonathan 1137 1.1 jonathan static int 1138 1.1 jonathan ubsec_process(void *arg, struct cryptop *crp, int hint) 1139 1.1 jonathan { 1140 1.1 jonathan struct ubsec_q *q = NULL; 1141 1.29 tls int err = 0, i, j, nicealign; 1142 1.53 riastrad struct ubsec_softc *sc = arg; 1143 1.1 jonathan struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 1144 1.1 jonathan int encoffset = 0, macoffset = 0, cpskip, cpoffset; 1145 1.1 jonathan int sskip, dskip, stheend, dtheend; 1146 1.1 jonathan int16_t coffset; 1147 1.40 bad struct ubsec_session *ses, key; 1148 1.1 jonathan struct ubsec_dma *dmap = NULL; 1149 1.40 bad u_int16_t flags = 0; 1150 1.40 bad int ivlen = 0, keylen = 0; 1151 1.1 jonathan 1152 1.56 riastrad KASSERTMSG(UBSEC_SESSION(crp->crp_sid) < sc->sc_nsessions, 1153 1.56 riastrad "invalid session id 0x%"PRIx64", nsessions=%d", 1154 1.56 riastrad crp->crp_sid, sc->sc_nsessions); 1155 1.1 jonathan 1156 1.29 tls mutex_spin_enter(&sc->sc_mtx); 1157 1.1 jonathan if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 1158 1.1 jonathan ubsecstats.hst_queuefull++; 1159 1.29 tls mutex_spin_exit(&sc->sc_mtx); 1160 1.57 riastrad err = ERESTART; 1161 1.57 riastrad goto errout; 1162 1.1 jonathan } 1163 1.1 jonathan q = SIMPLEQ_FIRST(&sc->sc_freequeue); 1164 1.1 jonathan SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, /*q,*/ q_next); 1165 1.29 tls mutex_spin_exit(&sc->sc_mtx); 1166 1.1 jonathan 1167 1.1 jonathan dmap = q->q_dma; /* Save dma pointer */ 1168 1.39 bad /* don't lose the cached dmamaps q_src_map and q_cached_dst_map */ 1169 1.39 bad memset(q, 0, offsetof(struct ubsec_q, q_src_map)); 1170 1.40 bad memset(&key, 0, sizeof(key)); 1171 1.1 jonathan 1172 1.1 jonathan q->q_sesn = UBSEC_SESSION(crp->crp_sid); 1173 1.1 jonathan q->q_dma = dmap; 1174 1.1 jonathan ses = &sc->sc_sessions[q->q_sesn]; 1175 1.1 jonathan 1176 1.1 jonathan if (crp->crp_flags & CRYPTO_F_IMBUF) { 1177 1.1 jonathan q->q_src_m = (struct mbuf *)crp->crp_buf; 1178 1.1 jonathan q->q_dst_m = (struct mbuf *)crp->crp_buf; 1179 1.1 jonathan } else if (crp->crp_flags & CRYPTO_F_IOV) { 1180 1.1 jonathan q->q_src_io = (struct uio *)crp->crp_buf; 1181 1.1 jonathan q->q_dst_io = (struct uio *)crp->crp_buf; 1182 1.1 jonathan } else { 1183 1.1 jonathan ubsecstats.hst_badflags++; 1184 1.1 jonathan err = EINVAL; 1185 1.1 jonathan goto errout; /* XXX we don't handle contiguous blocks! */ 1186 1.1 jonathan } 1187 1.1 jonathan 1188 1.18 cegger memset(&dmap->d_dma->d_mcr, 0, sizeof(struct ubsec_mcr)); 1189 1.1 jonathan 1190 1.1 jonathan dmap->d_dma->d_mcr.mcr_pkts = htole16(1); 1191 1.1 jonathan dmap->d_dma->d_mcr.mcr_flags = 0; 1192 1.1 jonathan q->q_crp = crp; 1193 1.1 jonathan 1194 1.1 jonathan crd1 = crp->crp_desc; 1195 1.1 jonathan if (crd1 == NULL) { 1196 1.1 jonathan ubsecstats.hst_nodesc++; 1197 1.1 jonathan err = EINVAL; 1198 1.1 jonathan goto errout; 1199 1.1 jonathan } 1200 1.1 jonathan crd2 = crd1->crd_next; 1201 1.1 jonathan 1202 1.1 jonathan if (crd2 == NULL) { 1203 1.15 tls if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 1204 1.15 tls crd1->crd_alg == CRYPTO_SHA1_HMAC_96) { 1205 1.1 jonathan maccrd = crd1; 1206 1.1 jonathan enccrd = NULL; 1207 1.1 jonathan } else if (crd1->crd_alg == CRYPTO_DES_CBC || 1208 1.40 bad crd1->crd_alg == CRYPTO_3DES_CBC || 1209 1.40 bad crd1->crd_alg == CRYPTO_AES_CBC) { 1210 1.1 jonathan maccrd = NULL; 1211 1.1 jonathan enccrd = crd1; 1212 1.1 jonathan } else { 1213 1.1 jonathan ubsecstats.hst_badalg++; 1214 1.1 jonathan err = EINVAL; 1215 1.1 jonathan goto errout; 1216 1.1 jonathan } 1217 1.1 jonathan } else { 1218 1.15 tls if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 1219 1.15 tls crd1->crd_alg == CRYPTO_SHA1_HMAC_96) && 1220 1.1 jonathan (crd2->crd_alg == CRYPTO_DES_CBC || 1221 1.40 bad crd2->crd_alg == CRYPTO_3DES_CBC || 1222 1.40 bad crd2->crd_alg == CRYPTO_AES_CBC) && 1223 1.1 jonathan ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 1224 1.1 jonathan maccrd = crd1; 1225 1.1 jonathan enccrd = crd2; 1226 1.1 jonathan } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 1227 1.40 bad crd1->crd_alg == CRYPTO_3DES_CBC || 1228 1.40 bad crd1->crd_alg == CRYPTO_AES_CBC) && 1229 1.15 tls (crd2->crd_alg == CRYPTO_MD5_HMAC_96 || 1230 1.40 bad crd2->crd_alg == CRYPTO_SHA1_HMAC_96) && 1231 1.1 jonathan (crd1->crd_flags & CRD_F_ENCRYPT)) { 1232 1.1 jonathan enccrd = crd1; 1233 1.1 jonathan maccrd = crd2; 1234 1.1 jonathan } else { 1235 1.1 jonathan /* 1236 1.1 jonathan * We cannot order the ubsec as requested 1237 1.1 jonathan */ 1238 1.1 jonathan ubsecstats.hst_badalg++; 1239 1.1 jonathan err = EINVAL; 1240 1.1 jonathan goto errout; 1241 1.1 jonathan } 1242 1.1 jonathan } 1243 1.1 jonathan 1244 1.1 jonathan if (enccrd) { 1245 1.40 bad if (enccrd->crd_alg == CRYPTO_AES_CBC) { 1246 1.40 bad if ((sc->sc_flags & UBS_FLAGS_AES) == 0) { 1247 1.40 bad /* 1248 1.40 bad * We cannot order the ubsec as requested 1249 1.40 bad */ 1250 1.40 bad ubsecstats.hst_badalg++; 1251 1.40 bad err = EINVAL; 1252 1.40 bad goto errout; 1253 1.40 bad } 1254 1.40 bad flags |= htole16(UBS_PKTCTX_ENC_AES); 1255 1.40 bad switch (enccrd->crd_klen) { 1256 1.40 bad case 128: 1257 1.40 bad case 192: 1258 1.40 bad case 256: 1259 1.40 bad keylen = enccrd->crd_klen / 8; 1260 1.40 bad break; 1261 1.40 bad default: 1262 1.40 bad err = EINVAL; 1263 1.40 bad goto errout; 1264 1.40 bad } 1265 1.40 bad ivlen = 16; 1266 1.40 bad } else { 1267 1.40 bad flags |= htole16(UBS_PKTCTX_ENC_3DES); 1268 1.40 bad ivlen = 8; 1269 1.40 bad keylen = 24; 1270 1.40 bad } 1271 1.40 bad 1272 1.1 jonathan encoffset = enccrd->crd_skip; 1273 1.1 jonathan 1274 1.1 jonathan if (enccrd->crd_flags & CRD_F_ENCRYPT) { 1275 1.1 jonathan if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1276 1.40 bad memcpy(key.ses_iv, enccrd->crd_iv, ivlen); 1277 1.52 riastrad else 1278 1.52 riastrad cprng_fast(key.ses_iv, ivlen); 1279 1.1 jonathan 1280 1.1 jonathan if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { 1281 1.1 jonathan if (crp->crp_flags & CRYPTO_F_IMBUF) 1282 1.1 jonathan m_copyback(q->q_src_m, 1283 1.1 jonathan enccrd->crd_inject, 1284 1.40 bad ivlen, (void *)key.ses_iv); 1285 1.1 jonathan else if (crp->crp_flags & CRYPTO_F_IOV) 1286 1.1 jonathan cuio_copyback(q->q_src_io, 1287 1.1 jonathan enccrd->crd_inject, 1288 1.40 bad ivlen, (void *)key.ses_iv); 1289 1.1 jonathan } 1290 1.1 jonathan } else { 1291 1.40 bad flags |= htole16(UBS_PKTCTX_INBOUND); 1292 1.1 jonathan 1293 1.1 jonathan if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1294 1.40 bad memcpy(key.ses_iv, enccrd->crd_iv, ivlen); 1295 1.1 jonathan else if (crp->crp_flags & CRYPTO_F_IMBUF) 1296 1.1 jonathan m_copydata(q->q_src_m, enccrd->crd_inject, 1297 1.40 bad ivlen, (void *)key.ses_iv); 1298 1.1 jonathan else if (crp->crp_flags & CRYPTO_F_IOV) 1299 1.1 jonathan cuio_copydata(q->q_src_io, 1300 1.1 jonathan enccrd->crd_inject, 8, 1301 1.40 bad (void *)key.ses_iv); 1302 1.1 jonathan } 1303 1.1 jonathan 1304 1.40 bad for (i = 0; i < (keylen / 4); i++) 1305 1.40 bad key.ses_key[i] = ses->ses_key[i]; 1306 1.40 bad for (i = 0; i < (ivlen / 4); i++) 1307 1.40 bad SWAP32(key.ses_iv[i]); 1308 1.1 jonathan } 1309 1.1 jonathan 1310 1.1 jonathan if (maccrd) { 1311 1.1 jonathan macoffset = maccrd->crd_skip; 1312 1.1 jonathan 1313 1.15 tls if (maccrd->crd_alg == CRYPTO_MD5_HMAC_96) 1314 1.40 bad flags |= htole16(UBS_PKTCTX_AUTH_MD5); 1315 1.1 jonathan else 1316 1.40 bad flags |= htole16(UBS_PKTCTX_AUTH_SHA1); 1317 1.1 jonathan 1318 1.1 jonathan for (i = 0; i < 5; i++) { 1319 1.40 bad key.ses_hminner[i] = ses->ses_hminner[i]; 1320 1.40 bad key.ses_hmouter[i] = ses->ses_hmouter[i]; 1321 1.1 jonathan 1322 1.40 bad HTOLE32(key.ses_hminner[i]); 1323 1.40 bad HTOLE32(key.ses_hmouter[i]); 1324 1.1 jonathan } 1325 1.1 jonathan } 1326 1.1 jonathan 1327 1.1 jonathan if (enccrd && maccrd) { 1328 1.1 jonathan /* 1329 1.1 jonathan * ubsec cannot handle packets where the end of encryption 1330 1.1 jonathan * and authentication are not the same, or where the 1331 1.1 jonathan * encrypted part begins before the authenticated part. 1332 1.1 jonathan */ 1333 1.1 jonathan if ((encoffset + enccrd->crd_len) != 1334 1.1 jonathan (macoffset + maccrd->crd_len)) { 1335 1.1 jonathan ubsecstats.hst_lenmismatch++; 1336 1.1 jonathan err = EINVAL; 1337 1.1 jonathan goto errout; 1338 1.1 jonathan } 1339 1.1 jonathan if (enccrd->crd_skip < maccrd->crd_skip) { 1340 1.1 jonathan ubsecstats.hst_skipmismatch++; 1341 1.1 jonathan err = EINVAL; 1342 1.1 jonathan goto errout; 1343 1.1 jonathan } 1344 1.1 jonathan sskip = maccrd->crd_skip; 1345 1.1 jonathan cpskip = dskip = enccrd->crd_skip; 1346 1.1 jonathan stheend = maccrd->crd_len; 1347 1.1 jonathan dtheend = enccrd->crd_len; 1348 1.1 jonathan coffset = enccrd->crd_skip - maccrd->crd_skip; 1349 1.1 jonathan cpoffset = cpskip + dtheend; 1350 1.1 jonathan #ifdef UBSEC_DEBUG 1351 1.1 jonathan if (ubsec_debug) { 1352 1.1 jonathan printf("mac: skip %d, len %d, inject %d\n", 1353 1.43 msaitoh maccrd->crd_skip, maccrd->crd_len, 1354 1.43 msaitoh maccrd->crd_inject); 1355 1.1 jonathan printf("enc: skip %d, len %d, inject %d\n", 1356 1.43 msaitoh enccrd->crd_skip, enccrd->crd_len, 1357 1.43 msaitoh enccrd->crd_inject); 1358 1.1 jonathan printf("src: skip %d, len %d\n", sskip, stheend); 1359 1.1 jonathan printf("dst: skip %d, len %d\n", dskip, dtheend); 1360 1.1 jonathan printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", 1361 1.1 jonathan coffset, stheend, cpskip, cpoffset); 1362 1.1 jonathan } 1363 1.1 jonathan #endif 1364 1.1 jonathan } else { 1365 1.1 jonathan cpskip = dskip = sskip = macoffset + encoffset; 1366 1.1 jonathan dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; 1367 1.1 jonathan cpoffset = cpskip + dtheend; 1368 1.1 jonathan coffset = 0; 1369 1.1 jonathan } 1370 1.1 jonathan 1371 1.39 bad if (q->q_src_map == NULL) { 1372 1.39 bad /* XXX FIXME: jonathan asks, what the heck's that 0xfff0? */ 1373 1.39 bad if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER, 1374 1.39 bad 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) { 1375 1.39 bad err = ENOMEM; 1376 1.39 bad goto errout; 1377 1.39 bad } 1378 1.1 jonathan } 1379 1.1 jonathan if (crp->crp_flags & CRYPTO_F_IMBUF) { 1380 1.1 jonathan if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, 1381 1.1 jonathan q->q_src_m, BUS_DMA_NOWAIT) != 0) { 1382 1.1 jonathan ubsecstats.hst_noload++; 1383 1.1 jonathan err = ENOMEM; 1384 1.1 jonathan goto errout; 1385 1.1 jonathan } 1386 1.1 jonathan } else if (crp->crp_flags & CRYPTO_F_IOV) { 1387 1.1 jonathan if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, 1388 1.1 jonathan q->q_src_io, BUS_DMA_NOWAIT) != 0) { 1389 1.1 jonathan ubsecstats.hst_noload++; 1390 1.1 jonathan err = ENOMEM; 1391 1.1 jonathan goto errout; 1392 1.1 jonathan } 1393 1.1 jonathan } 1394 1.1 jonathan nicealign = ubsec_dmamap_aligned(q->q_src_map); 1395 1.1 jonathan 1396 1.1 jonathan dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); 1397 1.1 jonathan 1398 1.1 jonathan #ifdef UBSEC_DEBUG 1399 1.1 jonathan if (ubsec_debug) 1400 1.1 jonathan printf("src skip: %d nicealign: %u\n", sskip, nicealign); 1401 1.1 jonathan #endif 1402 1.1 jonathan for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) { 1403 1.1 jonathan struct ubsec_pktbuf *pb; 1404 1.1 jonathan bus_size_t packl = q->q_src_map->dm_segs[i].ds_len; 1405 1.1 jonathan bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr; 1406 1.1 jonathan 1407 1.1 jonathan if (sskip >= packl) { 1408 1.1 jonathan sskip -= packl; 1409 1.1 jonathan continue; 1410 1.1 jonathan } 1411 1.1 jonathan 1412 1.1 jonathan packl -= sskip; 1413 1.1 jonathan packp += sskip; 1414 1.1 jonathan sskip = 0; 1415 1.1 jonathan 1416 1.1 jonathan if (packl > 0xfffc) { 1417 1.1 jonathan err = EIO; 1418 1.1 jonathan goto errout; 1419 1.1 jonathan } 1420 1.1 jonathan 1421 1.1 jonathan if (j == 0) 1422 1.1 jonathan pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; 1423 1.1 jonathan else 1424 1.1 jonathan pb = &dmap->d_dma->d_sbuf[j - 1]; 1425 1.1 jonathan 1426 1.1 jonathan pb->pb_addr = htole32(packp); 1427 1.1 jonathan 1428 1.1 jonathan if (stheend) { 1429 1.1 jonathan if (packl > stheend) { 1430 1.1 jonathan pb->pb_len = htole32(stheend); 1431 1.1 jonathan stheend = 0; 1432 1.1 jonathan } else { 1433 1.1 jonathan pb->pb_len = htole32(packl); 1434 1.1 jonathan stheend -= packl; 1435 1.1 jonathan } 1436 1.1 jonathan } else 1437 1.1 jonathan pb->pb_len = htole32(packl); 1438 1.1 jonathan 1439 1.1 jonathan if ((i + 1) == q->q_src_map->dm_nsegs) 1440 1.1 jonathan pb->pb_next = 0; 1441 1.1 jonathan else 1442 1.1 jonathan pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1443 1.1 jonathan offsetof(struct ubsec_dmachunk, d_sbuf[j])); 1444 1.1 jonathan j++; 1445 1.1 jonathan } 1446 1.1 jonathan 1447 1.1 jonathan if (enccrd == NULL && maccrd != NULL) { 1448 1.1 jonathan dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; 1449 1.1 jonathan dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; 1450 1.1 jonathan dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + 1451 1.1 jonathan offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1452 1.1 jonathan #ifdef UBSEC_DEBUG 1453 1.1 jonathan if (ubsec_debug) 1454 1.1 jonathan printf("opkt: %x %x %x\n", 1455 1.1 jonathan dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, 1456 1.1 jonathan dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, 1457 1.1 jonathan dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); 1458 1.1 jonathan 1459 1.1 jonathan #endif 1460 1.1 jonathan } else { 1461 1.1 jonathan if (crp->crp_flags & CRYPTO_F_IOV) { 1462 1.1 jonathan if (!nicealign) { 1463 1.1 jonathan ubsecstats.hst_iovmisaligned++; 1464 1.1 jonathan err = EINVAL; 1465 1.1 jonathan goto errout; 1466 1.1 jonathan } 1467 1.39 bad if (q->q_dst_map == NULL) { 1468 1.39 bad if (q->q_cached_dst_map == NULL) { 1469 1.43 msaitoh /* 1470 1.43 msaitoh * XXX: ``what the heck's that'' 1471 1.43 msaitoh * 0xfff0? 1472 1.43 msaitoh */ 1473 1.43 msaitoh if (bus_dmamap_create(sc->sc_dmat, 1474 1.43 msaitoh 0xfff0, UBS_MAX_SCATTER, 0xfff0, 0, 1475 1.43 msaitoh BUS_DMA_NOWAIT, 1476 1.39 bad &q->q_cached_dst_map) != 0) { 1477 1.39 bad ubsecstats.hst_nomap++; 1478 1.39 bad err = ENOMEM; 1479 1.39 bad goto errout; 1480 1.39 bad } 1481 1.39 bad } 1482 1.39 bad q->q_dst_map = q->q_cached_dst_map; 1483 1.1 jonathan } 1484 1.1 jonathan if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, 1485 1.1 jonathan q->q_dst_io, BUS_DMA_NOWAIT) != 0) { 1486 1.1 jonathan ubsecstats.hst_noload++; 1487 1.1 jonathan err = ENOMEM; 1488 1.1 jonathan goto errout; 1489 1.1 jonathan } 1490 1.1 jonathan } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1491 1.1 jonathan if (nicealign) { 1492 1.1 jonathan q->q_dst_m = q->q_src_m; 1493 1.1 jonathan q->q_dst_map = q->q_src_map; 1494 1.1 jonathan } else { 1495 1.1 jonathan int totlen, len; 1496 1.1 jonathan struct mbuf *m, *top, **mp; 1497 1.1 jonathan 1498 1.1 jonathan ubsecstats.hst_unaligned++; 1499 1.1 jonathan totlen = q->q_src_map->dm_mapsize; 1500 1.1 jonathan if (q->q_src_m->m_flags & M_PKTHDR) { 1501 1.1 jonathan len = MHLEN; 1502 1.1 jonathan MGETHDR(m, M_DONTWAIT, MT_DATA); 1503 1.1 jonathan /*XXX FIXME: m_dup_pkthdr */ 1504 1.1 jonathan if (m && 1 /*!m_dup_pkthdr(m, q->q_src_m, M_DONTWAIT)*/) { 1505 1.1 jonathan m_free(m); 1506 1.1 jonathan m = NULL; 1507 1.1 jonathan } 1508 1.1 jonathan } else { 1509 1.1 jonathan len = MLEN; 1510 1.1 jonathan MGET(m, M_DONTWAIT, MT_DATA); 1511 1.1 jonathan } 1512 1.1 jonathan if (m == NULL) { 1513 1.1 jonathan ubsecstats.hst_nombuf++; 1514 1.1 jonathan err = sc->sc_nqueue ? ERESTART : ENOMEM; 1515 1.1 jonathan goto errout; 1516 1.1 jonathan } 1517 1.1 jonathan if (len == MHLEN) 1518 1.1 jonathan /*XXX was M_DUP_PKTHDR*/ 1519 1.46 maxv m_copy_pkthdr(m, q->q_src_m); 1520 1.1 jonathan if (totlen >= MINCLSIZE) { 1521 1.1 jonathan MCLGET(m, M_DONTWAIT); 1522 1.1 jonathan if ((m->m_flags & M_EXT) == 0) { 1523 1.1 jonathan m_free(m); 1524 1.1 jonathan ubsecstats.hst_nomcl++; 1525 1.43 msaitoh err = sc->sc_nqueue 1526 1.43 msaitoh ? ERESTART : ENOMEM; 1527 1.1 jonathan goto errout; 1528 1.1 jonathan } 1529 1.1 jonathan len = MCLBYTES; 1530 1.1 jonathan } 1531 1.1 jonathan m->m_len = len; 1532 1.1 jonathan top = NULL; 1533 1.1 jonathan mp = ⊤ 1534 1.1 jonathan 1535 1.1 jonathan while (totlen > 0) { 1536 1.1 jonathan if (top) { 1537 1.1 jonathan MGET(m, M_DONTWAIT, MT_DATA); 1538 1.1 jonathan if (m == NULL) { 1539 1.1 jonathan m_freem(top); 1540 1.1 jonathan ubsecstats.hst_nombuf++; 1541 1.1 jonathan err = sc->sc_nqueue ? ERESTART : ENOMEM; 1542 1.1 jonathan goto errout; 1543 1.1 jonathan } 1544 1.1 jonathan len = MLEN; 1545 1.1 jonathan } 1546 1.1 jonathan if (top && totlen >= MINCLSIZE) { 1547 1.1 jonathan MCLGET(m, M_DONTWAIT); 1548 1.1 jonathan if ((m->m_flags & M_EXT) == 0) { 1549 1.1 jonathan *mp = m; 1550 1.1 jonathan m_freem(top); 1551 1.1 jonathan ubsecstats.hst_nomcl++; 1552 1.1 jonathan err = sc->sc_nqueue ? ERESTART : ENOMEM; 1553 1.1 jonathan goto errout; 1554 1.1 jonathan } 1555 1.1 jonathan len = MCLBYTES; 1556 1.1 jonathan } 1557 1.44 riastrad m->m_len = len = uimin(totlen, len); 1558 1.1 jonathan totlen -= len; 1559 1.1 jonathan *mp = m; 1560 1.1 jonathan mp = &m->m_next; 1561 1.1 jonathan } 1562 1.1 jonathan q->q_dst_m = top; 1563 1.1 jonathan ubsec_mcopy(q->q_src_m, q->q_dst_m, 1564 1.1 jonathan cpskip, cpoffset); 1565 1.39 bad if (q->q_dst_map == NULL) { 1566 1.39 bad if (q->q_cached_dst_map == NULL) { 1567 1.39 bad /* XXX again, what the heck is that 0xfff0? */ 1568 1.39 bad if (bus_dmamap_create(sc->sc_dmat, 0xfff0, 1569 1.39 bad UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, 1570 1.39 bad &q->q_cached_dst_map) != 0) { 1571 1.39 bad ubsecstats.hst_nomap++; 1572 1.39 bad err = ENOMEM; 1573 1.39 bad goto errout; 1574 1.39 bad } 1575 1.39 bad } 1576 1.39 bad q->q_dst_map = q->q_cached_dst_map; 1577 1.1 jonathan } 1578 1.1 jonathan if (bus_dmamap_load_mbuf(sc->sc_dmat, 1579 1.1 jonathan q->q_dst_map, q->q_dst_m, 1580 1.1 jonathan BUS_DMA_NOWAIT) != 0) { 1581 1.1 jonathan ubsecstats.hst_noload++; 1582 1.1 jonathan err = ENOMEM; 1583 1.1 jonathan goto errout; 1584 1.1 jonathan } 1585 1.1 jonathan } 1586 1.1 jonathan } else { 1587 1.1 jonathan ubsecstats.hst_badflags++; 1588 1.1 jonathan err = EINVAL; 1589 1.1 jonathan goto errout; 1590 1.1 jonathan } 1591 1.1 jonathan 1592 1.1 jonathan #ifdef UBSEC_DEBUG 1593 1.1 jonathan if (ubsec_debug) 1594 1.1 jonathan printf("dst skip: %d\n", dskip); 1595 1.1 jonathan #endif 1596 1.1 jonathan for (i = j = 0; i < q->q_dst_map->dm_nsegs; i++) { 1597 1.1 jonathan struct ubsec_pktbuf *pb; 1598 1.1 jonathan bus_size_t packl = q->q_dst_map->dm_segs[i].ds_len; 1599 1.1 jonathan bus_addr_t packp = q->q_dst_map->dm_segs[i].ds_addr; 1600 1.1 jonathan 1601 1.1 jonathan if (dskip >= packl) { 1602 1.1 jonathan dskip -= packl; 1603 1.1 jonathan continue; 1604 1.1 jonathan } 1605 1.1 jonathan 1606 1.1 jonathan packl -= dskip; 1607 1.1 jonathan packp += dskip; 1608 1.1 jonathan dskip = 0; 1609 1.1 jonathan 1610 1.1 jonathan if (packl > 0xfffc) { 1611 1.1 jonathan err = EIO; 1612 1.1 jonathan goto errout; 1613 1.1 jonathan } 1614 1.1 jonathan 1615 1.1 jonathan if (j == 0) 1616 1.1 jonathan pb = &dmap->d_dma->d_mcr.mcr_opktbuf; 1617 1.1 jonathan else 1618 1.1 jonathan pb = &dmap->d_dma->d_dbuf[j - 1]; 1619 1.1 jonathan 1620 1.1 jonathan pb->pb_addr = htole32(packp); 1621 1.1 jonathan 1622 1.1 jonathan if (dtheend) { 1623 1.1 jonathan if (packl > dtheend) { 1624 1.1 jonathan pb->pb_len = htole32(dtheend); 1625 1.1 jonathan dtheend = 0; 1626 1.1 jonathan } else { 1627 1.1 jonathan pb->pb_len = htole32(packl); 1628 1.1 jonathan dtheend -= packl; 1629 1.1 jonathan } 1630 1.1 jonathan } else 1631 1.1 jonathan pb->pb_len = htole32(packl); 1632 1.1 jonathan 1633 1.1 jonathan if ((i + 1) == q->q_dst_map->dm_nsegs) { 1634 1.1 jonathan if (maccrd) 1635 1.1 jonathan pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1636 1.1 jonathan offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1637 1.1 jonathan else 1638 1.1 jonathan pb->pb_next = 0; 1639 1.1 jonathan } else 1640 1.1 jonathan pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1641 1.1 jonathan offsetof(struct ubsec_dmachunk, d_dbuf[j])); 1642 1.1 jonathan j++; 1643 1.1 jonathan } 1644 1.1 jonathan } 1645 1.1 jonathan 1646 1.1 jonathan dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + 1647 1.1 jonathan offsetof(struct ubsec_dmachunk, d_ctx)); 1648 1.1 jonathan 1649 1.40 bad if (enccrd && enccrd->crd_alg == CRYPTO_AES_CBC) { 1650 1.40 bad struct ubsec_pktctx_aes128 *aes128; 1651 1.40 bad struct ubsec_pktctx_aes192 *aes192; 1652 1.40 bad struct ubsec_pktctx_aes256 *aes256; 1653 1.40 bad struct ubsec_pktctx_hdr *ph; 1654 1.40 bad u_int8_t *ctx; 1655 1.40 bad 1656 1.40 bad ctx = (u_int8_t *)(dmap->d_alloc.dma_vaddr) + 1657 1.40 bad offsetof(struct ubsec_dmachunk, d_ctx); 1658 1.40 bad 1659 1.40 bad ph = (struct ubsec_pktctx_hdr *)ctx; 1660 1.40 bad ph->ph_type = htole16(UBS_PKTCTX_TYPE_IPSEC_AES); 1661 1.40 bad ph->ph_flags = flags; 1662 1.40 bad ph->ph_offset = htole16(coffset >> 2); 1663 1.40 bad 1664 1.40 bad switch (enccrd->crd_klen) { 1665 1.40 bad case 128: 1666 1.40 bad aes128 = (struct ubsec_pktctx_aes128 *)ctx; 1667 1.40 bad ph->ph_len = htole16(sizeof(*aes128)); 1668 1.40 bad ph->ph_flags |= htole16(UBS_PKTCTX_KEYSIZE_128); 1669 1.40 bad for (i = 0; i < 4; i++) 1670 1.40 bad aes128->pc_aeskey[i] = key.ses_key[i]; 1671 1.40 bad for (i = 0; i < 5; i++) 1672 1.40 bad aes128->pc_hminner[i] = key.ses_hminner[i]; 1673 1.40 bad for (i = 0; i < 5; i++) 1674 1.61 skrll aes128->pc_hmouter[i] = key.ses_hmouter[i]; 1675 1.40 bad for (i = 0; i < 4; i++) 1676 1.40 bad aes128->pc_iv[i] = key.ses_iv[i]; 1677 1.40 bad break; 1678 1.40 bad case 192: 1679 1.40 bad aes192 = (struct ubsec_pktctx_aes192 *)ctx; 1680 1.40 bad ph->ph_len = htole16(sizeof(*aes192)); 1681 1.40 bad ph->ph_flags |= htole16(UBS_PKTCTX_KEYSIZE_192); 1682 1.40 bad for (i = 0; i < 6; i++) 1683 1.40 bad aes192->pc_aeskey[i] = key.ses_key[i]; 1684 1.40 bad for (i = 0; i < 5; i++) 1685 1.40 bad aes192->pc_hminner[i] = key.ses_hminner[i]; 1686 1.40 bad for (i = 0; i < 5; i++) 1687 1.61 skrll aes192->pc_hmouter[i] = key.ses_hmouter[i]; 1688 1.40 bad for (i = 0; i < 4; i++) 1689 1.40 bad aes192->pc_iv[i] = key.ses_iv[i]; 1690 1.40 bad break; 1691 1.40 bad case 256: 1692 1.40 bad aes256 = (struct ubsec_pktctx_aes256 *)ctx; 1693 1.40 bad ph->ph_len = htole16(sizeof(*aes256)); 1694 1.40 bad ph->ph_flags |= htole16(UBS_PKTCTX_KEYSIZE_256); 1695 1.40 bad for (i = 0; i < 8; i++) 1696 1.40 bad aes256->pc_aeskey[i] = key.ses_key[i]; 1697 1.40 bad for (i = 0; i < 5; i++) 1698 1.40 bad aes256->pc_hminner[i] = key.ses_hminner[i]; 1699 1.40 bad for (i = 0; i < 5; i++) 1700 1.61 skrll aes256->pc_hmouter[i] = key.ses_hmouter[i]; 1701 1.40 bad for (i = 0; i < 4; i++) 1702 1.40 bad aes256->pc_iv[i] = key.ses_iv[i]; 1703 1.40 bad break; 1704 1.40 bad } 1705 1.40 bad } else if (sc->sc_flags & UBS_FLAGS_LONGCTX) { 1706 1.40 bad struct ubsec_pktctx_3des *ctx; 1707 1.40 bad struct ubsec_pktctx_hdr *ph; 1708 1.1 jonathan 1709 1.40 bad ctx = (struct ubsec_pktctx_3des *) 1710 1.40 bad ((u_int8_t *)(dmap->d_alloc.dma_vaddr) + 1711 1.1 jonathan offsetof(struct ubsec_dmachunk, d_ctx)); 1712 1.5 perry 1713 1.40 bad ph = (struct ubsec_pktctx_hdr *)ctx; 1714 1.40 bad ph->ph_len = htole16(sizeof(*ctx)); 1715 1.40 bad ph->ph_type = htole16(UBS_PKTCTX_TYPE_IPSEC_3DES); 1716 1.40 bad ph->ph_flags = flags; 1717 1.40 bad ph->ph_offset = htole16(coffset >> 2); 1718 1.40 bad 1719 1.1 jonathan for (i = 0; i < 6; i++) 1720 1.40 bad ctx->pc_deskey[i] = key.ses_key[i]; 1721 1.1 jonathan for (i = 0; i < 5; i++) 1722 1.40 bad ctx->pc_hminner[i] = key.ses_hminner[i]; 1723 1.1 jonathan for (i = 0; i < 5; i++) 1724 1.40 bad ctx->pc_hmouter[i] = key.ses_hmouter[i]; 1725 1.40 bad for (i = 0; i < 2; i++) 1726 1.40 bad ctx->pc_iv[i] = key.ses_iv[i]; 1727 1.40 bad } else { 1728 1.40 bad struct ubsec_pktctx *ctx = (struct ubsec_pktctx *) 1729 1.40 bad ((u_int8_t *)dmap->d_alloc.dma_vaddr + 1730 1.40 bad offsetof(struct ubsec_dmachunk, d_ctx)); 1731 1.40 bad 1732 1.40 bad ctx->pc_flags = flags; 1733 1.40 bad ctx->pc_offset = htole16(coffset >> 2); 1734 1.40 bad for (i = 0; i < 6; i++) 1735 1.40 bad ctx->pc_deskey[i] = key.ses_key[i]; 1736 1.40 bad for (i = 0; i < 5; i++) 1737 1.40 bad ctx->pc_hminner[i] = key.ses_hminner[i]; 1738 1.40 bad for (i = 0; i < 5; i++) 1739 1.61 skrll ctx->pc_hmouter[i] = key.ses_hmouter[i]; 1740 1.40 bad for (i = 0; i < 2; i++) 1741 1.40 bad ctx->pc_iv[i] = key.ses_iv[i]; 1742 1.40 bad } 1743 1.1 jonathan 1744 1.29 tls mutex_spin_enter(&sc->sc_mtx); 1745 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); 1746 1.1 jonathan sc->sc_nqueue++; 1747 1.1 jonathan ubsecstats.hst_ipackets++; 1748 1.1 jonathan ubsecstats.hst_ibytes += dmap->d_alloc.dma_map->dm_mapsize; 1749 1.1 jonathan if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= ubsec_maxbatch) 1750 1.1 jonathan ubsec_feed(sc); 1751 1.29 tls mutex_spin_exit(&sc->sc_mtx); 1752 1.57 riastrad return 0; 1753 1.1 jonathan 1754 1.1 jonathan errout: 1755 1.1 jonathan if (q != NULL) { 1756 1.1 jonathan if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1757 1.1 jonathan bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1758 1.1 jonathan } 1759 1.1 jonathan if (q->q_src_map != NULL) { 1760 1.1 jonathan bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1761 1.1 jonathan } 1762 1.1 jonathan 1763 1.60 rin if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1764 1.60 rin m_freem(q->q_dst_m); 1765 1.60 rin 1766 1.29 tls mutex_spin_enter(&sc->sc_mtx); 1767 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1768 1.29 tls mutex_spin_exit(&sc->sc_mtx); 1769 1.1 jonathan } 1770 1.57 riastrad if (err == ERESTART) { 1771 1.57 riastrad mutex_spin_enter(&sc->sc_mtx); 1772 1.1 jonathan sc->sc_needwakeup |= CRYPTO_SYMQ; 1773 1.57 riastrad mutex_spin_exit(&sc->sc_mtx); 1774 1.57 riastrad return ERESTART; 1775 1.1 jonathan } 1776 1.57 riastrad crp->crp_etype = err; 1777 1.57 riastrad crypto_done(crp); 1778 1.57 riastrad return 0; 1779 1.1 jonathan } 1780 1.1 jonathan 1781 1.7 thorpej static void 1782 1.7 thorpej ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) 1783 1.1 jonathan { 1784 1.1 jonathan struct cryptop *crp = (struct cryptop *)q->q_crp; 1785 1.1 jonathan struct cryptodesc *crd; 1786 1.1 jonathan struct ubsec_dma *dmap = q->q_dma; 1787 1.1 jonathan 1788 1.1 jonathan ubsecstats.hst_opackets++; 1789 1.1 jonathan ubsecstats.hst_obytes += dmap->d_alloc.dma_size; 1790 1.1 jonathan 1791 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0, 1792 1.1 jonathan dmap->d_alloc.dma_map->dm_mapsize, 1793 1.1 jonathan BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1794 1.1 jonathan if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1795 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 1796 1.1 jonathan 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1797 1.1 jonathan bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1798 1.1 jonathan } 1799 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 1800 1.1 jonathan 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1801 1.1 jonathan bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1802 1.1 jonathan 1803 1.1 jonathan if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { 1804 1.1 jonathan m_freem(q->q_src_m); 1805 1.12 christos crp->crp_buf = (void *)q->q_dst_m; 1806 1.1 jonathan } 1807 1.1 jonathan 1808 1.1 jonathan for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1809 1.15 tls if (crd->crd_alg != CRYPTO_MD5_HMAC_96 && 1810 1.15 tls crd->crd_alg != CRYPTO_SHA1_HMAC_96) 1811 1.1 jonathan continue; 1812 1.1 jonathan if (crp->crp_flags & CRYPTO_F_IMBUF) 1813 1.1 jonathan m_copyback((struct mbuf *)crp->crp_buf, 1814 1.1 jonathan crd->crd_inject, 12, 1815 1.12 christos (void *)dmap->d_dma->d_macbuf); 1816 1.1 jonathan else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) 1817 1.12 christos bcopy((void *)dmap->d_dma->d_macbuf, 1818 1.1 jonathan crp->crp_mac, 12); 1819 1.1 jonathan break; 1820 1.1 jonathan } 1821 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1822 1.1 jonathan crypto_done(crp); 1823 1.1 jonathan } 1824 1.1 jonathan 1825 1.1 jonathan static void 1826 1.1 jonathan ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) 1827 1.1 jonathan { 1828 1.1 jonathan int i, j, dlen, slen; 1829 1.12 christos char *dptr, *sptr; 1830 1.1 jonathan 1831 1.1 jonathan j = 0; 1832 1.1 jonathan sptr = srcm->m_data; 1833 1.1 jonathan slen = srcm->m_len; 1834 1.1 jonathan dptr = dstm->m_data; 1835 1.1 jonathan dlen = dstm->m_len; 1836 1.1 jonathan 1837 1.1 jonathan while (1) { 1838 1.44 riastrad for (i = 0; i < uimin(slen, dlen); i++) { 1839 1.1 jonathan if (j < hoffset || j >= toffset) 1840 1.1 jonathan *dptr++ = *sptr++; 1841 1.1 jonathan slen--; 1842 1.1 jonathan dlen--; 1843 1.1 jonathan j++; 1844 1.1 jonathan } 1845 1.1 jonathan if (slen == 0) { 1846 1.1 jonathan srcm = srcm->m_next; 1847 1.1 jonathan if (srcm == NULL) 1848 1.1 jonathan return; 1849 1.1 jonathan sptr = srcm->m_data; 1850 1.1 jonathan slen = srcm->m_len; 1851 1.1 jonathan } 1852 1.1 jonathan if (dlen == 0) { 1853 1.1 jonathan dstm = dstm->m_next; 1854 1.1 jonathan if (dstm == NULL) 1855 1.1 jonathan return; 1856 1.1 jonathan dptr = dstm->m_data; 1857 1.1 jonathan dlen = dstm->m_len; 1858 1.1 jonathan } 1859 1.1 jonathan } 1860 1.1 jonathan } 1861 1.1 jonathan 1862 1.1 jonathan /* 1863 1.1 jonathan * feed the key generator, must be called at splnet() or higher. 1864 1.1 jonathan */ 1865 1.1 jonathan static void 1866 1.1 jonathan ubsec_feed2(struct ubsec_softc *sc) 1867 1.1 jonathan { 1868 1.1 jonathan struct ubsec_q2 *q; 1869 1.1 jonathan 1870 1.1 jonathan while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { 1871 1.1 jonathan if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) 1872 1.1 jonathan break; 1873 1.1 jonathan q = SIMPLEQ_FIRST(&sc->sc_queue2); 1874 1.1 jonathan 1875 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0, 1876 1.1 jonathan q->q_mcr.dma_map->dm_mapsize, 1877 1.1 jonathan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1878 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, 1879 1.1 jonathan q->q_ctx.dma_map->dm_mapsize, 1880 1.1 jonathan BUS_DMASYNC_PREWRITE); 1881 1.1 jonathan 1882 1.1 jonathan WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); 1883 1.1 jonathan q = SIMPLEQ_FIRST(&sc->sc_queue2); 1884 1.1 jonathan SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, /*q,*/ q_next); 1885 1.1 jonathan --sc->sc_nqueue2; 1886 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); 1887 1.1 jonathan } 1888 1.1 jonathan } 1889 1.1 jonathan 1890 1.1 jonathan /* 1891 1.34 bad * feed the RNG (used instead of ubsec_feed2() on 5827+ devices) 1892 1.34 bad */ 1893 1.34 bad void 1894 1.34 bad ubsec_feed4(struct ubsec_softc *sc) 1895 1.34 bad { 1896 1.34 bad struct ubsec_q2 *q; 1897 1.34 bad 1898 1.34 bad while (!SIMPLEQ_EMPTY(&sc->sc_queue4)) { 1899 1.34 bad if (READ_REG(sc, BS_STAT) & BS_STAT_MCR4_FULL) 1900 1.34 bad break; 1901 1.34 bad q = SIMPLEQ_FIRST(&sc->sc_queue4); 1902 1.34 bad 1903 1.34 bad bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0, 1904 1.34 bad q->q_mcr.dma_map->dm_mapsize, 1905 1.34 bad BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1906 1.34 bad bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, 1907 1.34 bad q->q_ctx.dma_map->dm_mapsize, 1908 1.34 bad BUS_DMASYNC_PREWRITE); 1909 1.34 bad 1910 1.34 bad WRITE_REG(sc, BS_MCR4, q->q_mcr.dma_paddr); 1911 1.34 bad SIMPLEQ_REMOVE_HEAD(&sc->sc_queue4, q_next); 1912 1.34 bad --sc->sc_nqueue4; 1913 1.34 bad SIMPLEQ_INSERT_TAIL(&sc->sc_qchip4, q, q_next); 1914 1.34 bad } 1915 1.34 bad } 1916 1.34 bad 1917 1.34 bad /* 1918 1.1 jonathan * Callback for handling random numbers 1919 1.1 jonathan */ 1920 1.1 jonathan static void 1921 1.1 jonathan ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) 1922 1.1 jonathan { 1923 1.1 jonathan struct cryptkop *krp; 1924 1.1 jonathan struct ubsec_ctx_keyop *ctx; 1925 1.1 jonathan 1926 1.1 jonathan ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; 1927 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, 1928 1.1 jonathan q->q_ctx.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1929 1.1 jonathan 1930 1.1 jonathan switch (q->q_type) { 1931 1.1 jonathan #ifndef UBSEC_NO_RNG 1932 1.1 jonathan case UBS_CTXOP_RNGSHA1: 1933 1.1 jonathan case UBS_CTXOP_RNGBYPASS: { 1934 1.1 jonathan struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; 1935 1.1 jonathan u_int32_t *p; 1936 1.1 jonathan int i; 1937 1.1 jonathan 1938 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, 1939 1.1 jonathan rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1940 1.1 jonathan p = (u_int32_t *)rng->rng_buf.dma_vaddr; 1941 1.29 tls i = UBSEC_RNG_BUFSIZ * sizeof(u_int32_t); 1942 1.64 riastrad rnd_add_data_intr(&sc->sc_rnd_source, (char *)p, i, i * NBBY); 1943 1.29 tls sc->sc_rng_need -= i; 1944 1.29 tls rng->rng_used = 0; 1945 1.29 tls if (sc->sc_rng_need > 0) { 1946 1.30 bad callout_schedule(&sc->sc_rngto, sc->sc_rnghz); 1947 1.29 tls } 1948 1.1 jonathan break; 1949 1.1 jonathan } 1950 1.1 jonathan #endif 1951 1.1 jonathan case UBS_CTXOP_MODEXP: { 1952 1.1 jonathan struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 1953 1.1 jonathan u_int rlen, clen; 1954 1.1 jonathan 1955 1.1 jonathan krp = me->me_krp; 1956 1.1 jonathan rlen = (me->me_modbits + 7) / 8; 1957 1.1 jonathan clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; 1958 1.1 jonathan 1959 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 1960 1.1 jonathan 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1961 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 1962 1.1 jonathan 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1963 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 1964 1.1 jonathan 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1965 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 1966 1.1 jonathan 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1967 1.1 jonathan 1968 1.1 jonathan if (clen < rlen) 1969 1.1 jonathan krp->krp_status = E2BIG; 1970 1.1 jonathan else { 1971 1.1 jonathan if (sc->sc_flags & UBS_FLAGS_HWNORM) { 1972 1.18 cegger memset(krp->krp_param[krp->krp_iparams].crp_p, 0, 1973 1.1 jonathan (krp->krp_param[krp->krp_iparams].crp_nbits 1974 1.1 jonathan + 7) / 8); 1975 1.1 jonathan bcopy(me->me_C.dma_vaddr, 1976 1.1 jonathan krp->krp_param[krp->krp_iparams].crp_p, 1977 1.1 jonathan (me->me_modbits + 7) / 8); 1978 1.1 jonathan } else 1979 1.1 jonathan ubsec_kshift_l(me->me_shiftbits, 1980 1.1 jonathan me->me_C.dma_vaddr, me->me_normbits, 1981 1.1 jonathan krp->krp_param[krp->krp_iparams].crp_p, 1982 1.1 jonathan krp->krp_param[krp->krp_iparams].crp_nbits); 1983 1.1 jonathan } 1984 1.1 jonathan 1985 1.1 jonathan crypto_kdone(krp); 1986 1.1 jonathan 1987 1.1 jonathan /* bzero all potentially sensitive data */ 1988 1.18 cegger memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 1989 1.18 cegger memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 1990 1.18 cegger memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 1991 1.18 cegger memset(me->me_q.q_ctx.dma_vaddr, 0, me->me_q.q_ctx.dma_size); 1992 1.1 jonathan 1993 1.1 jonathan /* Can't free here, so put us on the free list. */ 1994 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); 1995 1.1 jonathan break; 1996 1.1 jonathan } 1997 1.1 jonathan case UBS_CTXOP_RSAPRIV: { 1998 1.1 jonathan struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 1999 1.1 jonathan u_int len; 2000 1.1 jonathan 2001 1.1 jonathan krp = rp->rpr_krp; 2002 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 0, 2003 1.1 jonathan rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2004 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 0, 2005 1.1 jonathan rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2006 1.1 jonathan 2007 1.43 msaitoh len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) 2008 1.43 msaitoh / 8; 2009 1.1 jonathan bcopy(rp->rpr_msgout.dma_vaddr, 2010 1.1 jonathan krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); 2011 1.1 jonathan 2012 1.1 jonathan crypto_kdone(krp); 2013 1.1 jonathan 2014 1.18 cegger memset(rp->rpr_msgin.dma_vaddr, 0, rp->rpr_msgin.dma_size); 2015 1.18 cegger memset(rp->rpr_msgout.dma_vaddr, 0, rp->rpr_msgout.dma_size); 2016 1.18 cegger memset(rp->rpr_q.q_ctx.dma_vaddr, 0, rp->rpr_q.q_ctx.dma_size); 2017 1.1 jonathan 2018 1.1 jonathan /* Can't free here, so put us on the free list. */ 2019 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); 2020 1.1 jonathan break; 2021 1.1 jonathan } 2022 1.1 jonathan default: 2023 1.28 chs printf("%s: unknown ctx op: %x\n", device_xname(sc->sc_dev), 2024 1.1 jonathan letoh16(ctx->ctx_op)); 2025 1.1 jonathan break; 2026 1.1 jonathan } 2027 1.1 jonathan } 2028 1.1 jonathan 2029 1.1 jonathan #ifndef UBSEC_NO_RNG 2030 1.29 tls 2031 1.29 tls static void 2032 1.29 tls ubsec_rng_get(size_t bytes, void *vsc) 2033 1.29 tls { 2034 1.29 tls struct ubsec_softc *sc = vsc; 2035 1.29 tls 2036 1.29 tls mutex_spin_enter(&sc->sc_mtx); 2037 1.29 tls sc->sc_rng_need = bytes; 2038 1.29 tls ubsec_rng_locked(sc); 2039 1.29 tls mutex_spin_exit(&sc->sc_mtx); 2040 1.29 tls 2041 1.29 tls } 2042 1.29 tls 2043 1.1 jonathan static void 2044 1.1 jonathan ubsec_rng(void *vsc) 2045 1.1 jonathan { 2046 1.1 jonathan struct ubsec_softc *sc = vsc; 2047 1.29 tls mutex_spin_enter(&sc->sc_mtx); 2048 1.29 tls ubsec_rng_locked(sc); 2049 1.29 tls mutex_spin_exit(&sc->sc_mtx); 2050 1.29 tls } 2051 1.29 tls 2052 1.29 tls static void 2053 1.29 tls ubsec_rng_locked(void *vsc) 2054 1.29 tls { 2055 1.29 tls struct ubsec_softc *sc = vsc; 2056 1.1 jonathan struct ubsec_q2_rng *rng = &sc->sc_rng; 2057 1.1 jonathan struct ubsec_mcr *mcr; 2058 1.1 jonathan struct ubsec_ctx_rngbypass *ctx; 2059 1.34 bad int *nqueue; 2060 1.1 jonathan 2061 1.31 bad /* Caller is responsible to lock and release sc_mtx. */ 2062 1.31 bad KASSERT(mutex_owned(&sc->sc_mtx)); 2063 1.31 bad 2064 1.1 jonathan if (rng->rng_used) { 2065 1.1 jonathan return; 2066 1.1 jonathan } 2067 1.29 tls 2068 1.29 tls if (sc->sc_rng_need < 1) { 2069 1.29 tls callout_stop(&sc->sc_rngto); 2070 1.29 tls return; 2071 1.29 tls } 2072 1.29 tls 2073 1.34 bad if (sc->sc_flags & UBS_FLAGS_RNG4) 2074 1.34 bad nqueue = &sc->sc_nqueue4; 2075 1.34 bad else 2076 1.34 bad nqueue = &sc->sc_nqueue2; 2077 1.34 bad 2078 1.34 bad (*nqueue)++; 2079 1.34 bad if (*nqueue >= UBS_MAX_NQUEUE) 2080 1.34 bad goto out; 2081 1.1 jonathan 2082 1.1 jonathan mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; 2083 1.1 jonathan ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; 2084 1.1 jonathan 2085 1.1 jonathan mcr->mcr_pkts = htole16(1); 2086 1.1 jonathan mcr->mcr_flags = 0; 2087 1.1 jonathan mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); 2088 1.1 jonathan mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; 2089 1.1 jonathan mcr->mcr_ipktbuf.pb_len = 0; 2090 1.1 jonathan mcr->mcr_reserved = mcr->mcr_pktlen = 0; 2091 1.1 jonathan mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); 2092 1.1 jonathan mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & 2093 1.1 jonathan UBS_PKTBUF_LEN); 2094 1.1 jonathan mcr->mcr_opktbuf.pb_next = 0; 2095 1.1 jonathan 2096 1.1 jonathan ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); 2097 1.1 jonathan ctx->rbp_op = htole16(UBS_CTXOP_RNGSHA1); 2098 1.1 jonathan rng->rng_q.q_type = UBS_CTXOP_RNGSHA1; 2099 1.1 jonathan 2100 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, 2101 1.1 jonathan rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2102 1.1 jonathan 2103 1.34 bad if (sc->sc_flags & UBS_FLAGS_RNG4) { 2104 1.34 bad SIMPLEQ_INSERT_TAIL(&sc->sc_queue4, &rng->rng_q, q_next); 2105 1.34 bad ubsec_feed4(sc); 2106 1.34 bad } else { 2107 1.34 bad SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); 2108 1.34 bad ubsec_feed2(sc); 2109 1.34 bad } 2110 1.1 jonathan rng->rng_used = 1; 2111 1.1 jonathan ubsecstats.hst_rng++; 2112 1.1 jonathan 2113 1.1 jonathan return; 2114 1.1 jonathan 2115 1.1 jonathan out: 2116 1.1 jonathan /* 2117 1.1 jonathan * Something weird happened, generate our own call back. 2118 1.1 jonathan */ 2119 1.34 bad (*nqueue)--; 2120 1.30 bad callout_schedule(&sc->sc_rngto, sc->sc_rnghz); 2121 1.1 jonathan } 2122 1.1 jonathan #endif /* UBSEC_NO_RNG */ 2123 1.1 jonathan 2124 1.1 jonathan static int 2125 1.1 jonathan ubsec_dma_malloc(struct ubsec_softc *sc, bus_size_t size, 2126 1.1 jonathan struct ubsec_dma_alloc *dma,int mapflags) 2127 1.1 jonathan { 2128 1.1 jonathan int r; 2129 1.1 jonathan 2130 1.1 jonathan if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, 2131 1.1 jonathan &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) != 0) 2132 1.1 jonathan goto fail_0; 2133 1.1 jonathan 2134 1.1 jonathan if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, 2135 1.1 jonathan size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0) 2136 1.1 jonathan goto fail_1; 2137 1.1 jonathan 2138 1.1 jonathan if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 2139 1.1 jonathan BUS_DMA_NOWAIT, &dma->dma_map)) != 0) 2140 1.1 jonathan goto fail_2; 2141 1.1 jonathan 2142 1.1 jonathan if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, 2143 1.1 jonathan size, NULL, BUS_DMA_NOWAIT)) != 0) 2144 1.1 jonathan goto fail_3; 2145 1.1 jonathan 2146 1.1 jonathan dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; 2147 1.1 jonathan dma->dma_size = size; 2148 1.1 jonathan return (0); 2149 1.1 jonathan 2150 1.1 jonathan fail_3: 2151 1.1 jonathan bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 2152 1.1 jonathan fail_2: 2153 1.1 jonathan bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); 2154 1.1 jonathan fail_1: 2155 1.1 jonathan bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 2156 1.1 jonathan fail_0: 2157 1.1 jonathan dma->dma_map = NULL; 2158 1.1 jonathan return (r); 2159 1.1 jonathan } 2160 1.1 jonathan 2161 1.1 jonathan static void 2162 1.1 jonathan ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) 2163 1.1 jonathan { 2164 1.1 jonathan bus_dmamap_unload(sc->sc_dmat, dma->dma_map); 2165 1.1 jonathan bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size); 2166 1.1 jonathan bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 2167 1.1 jonathan bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 2168 1.1 jonathan } 2169 1.1 jonathan 2170 1.1 jonathan /* 2171 1.63 andvar * Resets the board. Values in the registers are left as is 2172 1.1 jonathan * from the reset (i.e. initial values are assigned elsewhere). 2173 1.1 jonathan */ 2174 1.1 jonathan static void 2175 1.1 jonathan ubsec_reset_board(struct ubsec_softc *sc) 2176 1.1 jonathan { 2177 1.34 bad volatile u_int32_t ctrl; 2178 1.34 bad 2179 1.34 bad ctrl = READ_REG(sc, BS_CTRL); 2180 1.34 bad ctrl |= BS_CTRL_RESET; 2181 1.34 bad WRITE_REG(sc, BS_CTRL, ctrl); 2182 1.34 bad 2183 1.34 bad /* 2184 1.62 skrll * Wait approx. 30 PCI clocks = 900 ns = 0.9 us 2185 1.34 bad */ 2186 1.34 bad DELAY(10); 2187 1.1 jonathan 2188 1.34 bad /* Enable RNG and interrupts on newer devices */ 2189 1.34 bad if (sc->sc_flags & UBS_FLAGS_MULTIMCR) { 2190 1.34 bad #ifndef UBSEC_NO_RNG 2191 1.34 bad WRITE_REG(sc, BS_CFG, BS_CFG_RNG); 2192 1.34 bad #endif 2193 1.34 bad WRITE_REG(sc, BS_INT, BS_INT_DMAINT); 2194 1.34 bad } 2195 1.1 jonathan } 2196 1.1 jonathan 2197 1.1 jonathan /* 2198 1.1 jonathan * Init Broadcom registers 2199 1.1 jonathan */ 2200 1.1 jonathan static void 2201 1.1 jonathan ubsec_init_board(struct ubsec_softc *sc) 2202 1.1 jonathan { 2203 1.1 jonathan u_int32_t ctrl; 2204 1.1 jonathan 2205 1.1 jonathan ctrl = READ_REG(sc, BS_CTRL); 2206 1.1 jonathan ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); 2207 1.1 jonathan ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; 2208 1.1 jonathan 2209 1.1 jonathan /* 2210 1.1 jonathan * XXX: Sam Leffler's code has (UBS_FLAGS_KEY|UBS_FLAGS_RNG)). 2211 1.1 jonathan * anyone got hw docs? 2212 1.1 jonathan */ 2213 1.1 jonathan if (sc->sc_flags & UBS_FLAGS_KEY) 2214 1.1 jonathan ctrl |= BS_CTRL_MCR2INT; 2215 1.1 jonathan else 2216 1.1 jonathan ctrl &= ~BS_CTRL_MCR2INT; 2217 1.1 jonathan 2218 1.1 jonathan if (sc->sc_flags & UBS_FLAGS_HWNORM) 2219 1.1 jonathan ctrl &= ~BS_CTRL_SWNORM; 2220 1.1 jonathan 2221 1.34 bad if (sc->sc_flags & UBS_FLAGS_MULTIMCR) { 2222 1.34 bad ctrl |= BS_CTRL_BSIZE240; 2223 1.34 bad ctrl &= ~BS_CTRL_MCR3INT; /* MCR3 is reserved for SSL */ 2224 1.34 bad 2225 1.34 bad if (sc->sc_flags & UBS_FLAGS_RNG4) 2226 1.34 bad ctrl |= BS_CTRL_MCR4INT; 2227 1.34 bad else 2228 1.34 bad ctrl &= ~BS_CTRL_MCR4INT; 2229 1.34 bad } 2230 1.34 bad 2231 1.1 jonathan WRITE_REG(sc, BS_CTRL, ctrl); 2232 1.1 jonathan } 2233 1.1 jonathan 2234 1.1 jonathan /* 2235 1.1 jonathan * Init Broadcom PCI registers 2236 1.1 jonathan */ 2237 1.1 jonathan static void 2238 1.7 thorpej ubsec_init_pciregs(struct pci_attach_args *pa) 2239 1.1 jonathan { 2240 1.1 jonathan pci_chipset_tag_t pc = pa->pa_pc; 2241 1.1 jonathan u_int32_t misc; 2242 1.1 jonathan 2243 1.1 jonathan /* 2244 1.1 jonathan * This will set the cache line size to 1, this will 2245 1.1 jonathan * force the BCM58xx chip just to do burst read/writes. 2246 1.1 jonathan * Cache line read/writes are to slow 2247 1.1 jonathan */ 2248 1.1 jonathan misc = pci_conf_read(pc, pa->pa_tag, PCI_BHLC_REG); 2249 1.1 jonathan misc = (misc & ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT)) 2250 1.1 jonathan | ((UBS_DEF_CACHELINE & 0xff) << PCI_CACHELINE_SHIFT); 2251 1.1 jonathan pci_conf_write(pc, pa->pa_tag, PCI_BHLC_REG, misc); 2252 1.1 jonathan } 2253 1.1 jonathan 2254 1.1 jonathan /* 2255 1.1 jonathan * Clean up after a chip crash. 2256 1.1 jonathan * It is assumed that the caller in splnet() 2257 1.1 jonathan */ 2258 1.1 jonathan static void 2259 1.1 jonathan ubsec_cleanchip(struct ubsec_softc *sc) 2260 1.1 jonathan { 2261 1.1 jonathan struct ubsec_q *q; 2262 1.1 jonathan 2263 1.1 jonathan while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 2264 1.1 jonathan q = SIMPLEQ_FIRST(&sc->sc_qchip); 2265 1.1 jonathan SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); 2266 1.1 jonathan ubsec_free_q(sc, q); 2267 1.1 jonathan } 2268 1.1 jonathan sc->sc_nqchip = 0; 2269 1.1 jonathan } 2270 1.1 jonathan 2271 1.1 jonathan /* 2272 1.1 jonathan * free a ubsec_q 2273 1.1 jonathan * It is assumed that the caller is within splnet() 2274 1.1 jonathan */ 2275 1.1 jonathan static int 2276 1.1 jonathan ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) 2277 1.1 jonathan { 2278 1.1 jonathan struct ubsec_q *q2; 2279 1.1 jonathan struct cryptop *crp; 2280 1.1 jonathan int npkts; 2281 1.1 jonathan int i; 2282 1.1 jonathan 2283 1.1 jonathan npkts = q->q_nstacked_mcrs; 2284 1.1 jonathan 2285 1.1 jonathan for (i = 0; i < npkts; i++) { 2286 1.1 jonathan if(q->q_stacked_mcr[i]) { 2287 1.1 jonathan q2 = q->q_stacked_mcr[i]; 2288 1.1 jonathan 2289 1.43 msaitoh if ((q2->q_dst_m != NULL) 2290 1.43 msaitoh && (q2->q_src_m != q2->q_dst_m)) 2291 1.1 jonathan m_freem(q2->q_dst_m); 2292 1.1 jonathan 2293 1.1 jonathan crp = (struct cryptop *)q2->q_crp; 2294 1.5 perry 2295 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); 2296 1.5 perry 2297 1.1 jonathan crp->crp_etype = EFAULT; 2298 1.1 jonathan crypto_done(crp); 2299 1.1 jonathan } else { 2300 1.1 jonathan break; 2301 1.1 jonathan } 2302 1.1 jonathan } 2303 1.1 jonathan 2304 1.1 jonathan /* 2305 1.1 jonathan * Free header MCR 2306 1.1 jonathan */ 2307 1.1 jonathan if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 2308 1.1 jonathan m_freem(q->q_dst_m); 2309 1.1 jonathan 2310 1.1 jonathan crp = (struct cryptop *)q->q_crp; 2311 1.5 perry 2312 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 2313 1.5 perry 2314 1.1 jonathan crp->crp_etype = EFAULT; 2315 1.1 jonathan crypto_done(crp); 2316 1.1 jonathan return(0); 2317 1.1 jonathan } 2318 1.1 jonathan 2319 1.1 jonathan /* 2320 1.1 jonathan * Routine to reset the chip and clean up. 2321 1.1 jonathan * It is assumed that the caller is in splnet() 2322 1.1 jonathan */ 2323 1.1 jonathan static void 2324 1.1 jonathan ubsec_totalreset(struct ubsec_softc *sc) 2325 1.1 jonathan { 2326 1.1 jonathan ubsec_reset_board(sc); 2327 1.1 jonathan ubsec_init_board(sc); 2328 1.1 jonathan ubsec_cleanchip(sc); 2329 1.1 jonathan } 2330 1.1 jonathan 2331 1.1 jonathan static int 2332 1.1 jonathan ubsec_dmamap_aligned(bus_dmamap_t map) 2333 1.1 jonathan { 2334 1.1 jonathan int i; 2335 1.1 jonathan 2336 1.1 jonathan for (i = 0; i < map->dm_nsegs; i++) { 2337 1.1 jonathan if (map->dm_segs[i].ds_addr & 3) 2338 1.1 jonathan return (0); 2339 1.1 jonathan if ((i != (map->dm_nsegs - 1)) && 2340 1.1 jonathan (map->dm_segs[i].ds_len & 3)) 2341 1.1 jonathan return (0); 2342 1.1 jonathan } 2343 1.1 jonathan return (1); 2344 1.1 jonathan } 2345 1.1 jonathan 2346 1.1 jonathan static void 2347 1.1 jonathan ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) 2348 1.1 jonathan { 2349 1.1 jonathan switch (q->q_type) { 2350 1.1 jonathan case UBS_CTXOP_MODEXP: { 2351 1.1 jonathan struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 2352 1.1 jonathan 2353 1.1 jonathan ubsec_dma_free(sc, &me->me_q.q_mcr); 2354 1.1 jonathan ubsec_dma_free(sc, &me->me_q.q_ctx); 2355 1.1 jonathan ubsec_dma_free(sc, &me->me_M); 2356 1.1 jonathan ubsec_dma_free(sc, &me->me_E); 2357 1.1 jonathan ubsec_dma_free(sc, &me->me_C); 2358 1.1 jonathan ubsec_dma_free(sc, &me->me_epb); 2359 1.1 jonathan free(me, M_DEVBUF); 2360 1.1 jonathan break; 2361 1.1 jonathan } 2362 1.1 jonathan case UBS_CTXOP_RSAPRIV: { 2363 1.1 jonathan struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 2364 1.1 jonathan 2365 1.1 jonathan ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2366 1.1 jonathan ubsec_dma_free(sc, &rp->rpr_q.q_ctx); 2367 1.1 jonathan ubsec_dma_free(sc, &rp->rpr_msgin); 2368 1.1 jonathan ubsec_dma_free(sc, &rp->rpr_msgout); 2369 1.1 jonathan free(rp, M_DEVBUF); 2370 1.1 jonathan break; 2371 1.1 jonathan } 2372 1.1 jonathan default: 2373 1.28 chs printf("%s: invalid kfree 0x%x\n", device_xname(sc->sc_dev), 2374 1.1 jonathan q->q_type); 2375 1.1 jonathan break; 2376 1.1 jonathan } 2377 1.1 jonathan } 2378 1.1 jonathan 2379 1.1 jonathan static int 2380 1.1 jonathan ubsec_kprocess(void *arg, struct cryptkop *krp, int hint) 2381 1.1 jonathan { 2382 1.53 riastrad struct ubsec_softc *sc = arg; 2383 1.1 jonathan 2384 1.1 jonathan while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { 2385 1.1 jonathan struct ubsec_q2 *q; 2386 1.1 jonathan 2387 1.1 jonathan q = SIMPLEQ_FIRST(&sc->sc_q2free); 2388 1.1 jonathan SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, /*q,*/ q_next); 2389 1.1 jonathan ubsec_kfree(sc, q); 2390 1.1 jonathan } 2391 1.1 jonathan 2392 1.1 jonathan switch (krp->krp_op) { 2393 1.1 jonathan case CRK_MOD_EXP: 2394 1.1 jonathan if (sc->sc_flags & UBS_FLAGS_HWNORM) 2395 1.55 riastrad ubsec_kprocess_modexp_hw(sc, krp, hint); 2396 1.1 jonathan else 2397 1.55 riastrad ubsec_kprocess_modexp_sw(sc, krp, hint); 2398 1.1 jonathan break; 2399 1.1 jonathan case CRK_MOD_EXP_CRT: 2400 1.55 riastrad ubsec_kprocess_rsapriv(sc, krp, hint); 2401 1.1 jonathan break; 2402 1.1 jonathan default: 2403 1.1 jonathan printf("%s: kprocess: invalid op 0x%x\n", 2404 1.28 chs device_xname(sc->sc_dev), krp->krp_op); 2405 1.1 jonathan krp->krp_status = EOPNOTSUPP; 2406 1.1 jonathan crypto_kdone(krp); 2407 1.1 jonathan } 2408 1.55 riastrad return 0; 2409 1.1 jonathan } 2410 1.1 jonathan 2411 1.1 jonathan /* 2412 1.1 jonathan * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) 2413 1.1 jonathan */ 2414 1.55 riastrad static void 2415 1.1 jonathan ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, 2416 1.11 christos int hint) 2417 1.1 jonathan { 2418 1.1 jonathan struct ubsec_q2_modexp *me; 2419 1.1 jonathan struct ubsec_mcr *mcr; 2420 1.1 jonathan struct ubsec_ctx_modexp *ctx; 2421 1.1 jonathan struct ubsec_pktbuf *epb; 2422 1.29 tls int err = 0; 2423 1.1 jonathan u_int nbits, normbits, mbits, shiftbits, ebits; 2424 1.1 jonathan 2425 1.1 jonathan me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2426 1.1 jonathan if (me == NULL) { 2427 1.1 jonathan err = ENOMEM; 2428 1.1 jonathan goto errout; 2429 1.1 jonathan } 2430 1.18 cegger memset(me, 0, sizeof *me); 2431 1.1 jonathan me->me_krp = krp; 2432 1.1 jonathan me->me_q.q_type = UBS_CTXOP_MODEXP; 2433 1.1 jonathan 2434 1.1 jonathan nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2435 1.1 jonathan if (nbits <= 512) 2436 1.1 jonathan normbits = 512; 2437 1.1 jonathan else if (nbits <= 768) 2438 1.1 jonathan normbits = 768; 2439 1.1 jonathan else if (nbits <= 1024) 2440 1.1 jonathan normbits = 1024; 2441 1.1 jonathan else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2442 1.1 jonathan normbits = 1536; 2443 1.1 jonathan else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2444 1.1 jonathan normbits = 2048; 2445 1.1 jonathan else { 2446 1.1 jonathan err = E2BIG; 2447 1.1 jonathan goto errout; 2448 1.1 jonathan } 2449 1.1 jonathan 2450 1.1 jonathan shiftbits = normbits - nbits; 2451 1.1 jonathan 2452 1.1 jonathan me->me_modbits = nbits; 2453 1.1 jonathan me->me_shiftbits = shiftbits; 2454 1.1 jonathan me->me_normbits = normbits; 2455 1.1 jonathan 2456 1.1 jonathan /* Sanity check: result bits must be >= true modulus bits. */ 2457 1.1 jonathan if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2458 1.1 jonathan err = ERANGE; 2459 1.1 jonathan goto errout; 2460 1.1 jonathan } 2461 1.1 jonathan 2462 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2463 1.1 jonathan &me->me_q.q_mcr, 0)) { 2464 1.1 jonathan err = ENOMEM; 2465 1.1 jonathan goto errout; 2466 1.1 jonathan } 2467 1.1 jonathan mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2468 1.1 jonathan 2469 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2470 1.1 jonathan &me->me_q.q_ctx, 0)) { 2471 1.1 jonathan err = ENOMEM; 2472 1.1 jonathan goto errout; 2473 1.1 jonathan } 2474 1.1 jonathan 2475 1.1 jonathan mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2476 1.1 jonathan if (mbits > nbits) { 2477 1.1 jonathan err = E2BIG; 2478 1.1 jonathan goto errout; 2479 1.1 jonathan } 2480 1.1 jonathan if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2481 1.1 jonathan err = ENOMEM; 2482 1.1 jonathan goto errout; 2483 1.1 jonathan } 2484 1.1 jonathan ubsec_kshift_r(shiftbits, 2485 1.1 jonathan krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, 2486 1.1 jonathan me->me_M.dma_vaddr, normbits); 2487 1.1 jonathan 2488 1.1 jonathan if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2489 1.1 jonathan err = ENOMEM; 2490 1.1 jonathan goto errout; 2491 1.1 jonathan } 2492 1.18 cegger memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2493 1.1 jonathan 2494 1.1 jonathan ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2495 1.1 jonathan if (ebits > nbits) { 2496 1.1 jonathan err = E2BIG; 2497 1.1 jonathan goto errout; 2498 1.1 jonathan } 2499 1.1 jonathan if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2500 1.1 jonathan err = ENOMEM; 2501 1.1 jonathan goto errout; 2502 1.1 jonathan } 2503 1.1 jonathan ubsec_kshift_r(shiftbits, 2504 1.1 jonathan krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, 2505 1.1 jonathan me->me_E.dma_vaddr, normbits); 2506 1.1 jonathan 2507 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2508 1.1 jonathan &me->me_epb, 0)) { 2509 1.1 jonathan err = ENOMEM; 2510 1.1 jonathan goto errout; 2511 1.1 jonathan } 2512 1.1 jonathan epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2513 1.1 jonathan epb->pb_addr = htole32(me->me_E.dma_paddr); 2514 1.1 jonathan epb->pb_next = 0; 2515 1.1 jonathan epb->pb_len = htole32(normbits / 8); 2516 1.1 jonathan 2517 1.1 jonathan #ifdef UBSEC_DEBUG 2518 1.1 jonathan if (ubsec_debug) { 2519 1.1 jonathan printf("Epb "); 2520 1.1 jonathan ubsec_dump_pb(epb); 2521 1.1 jonathan } 2522 1.1 jonathan #endif 2523 1.1 jonathan 2524 1.1 jonathan mcr->mcr_pkts = htole16(1); 2525 1.1 jonathan mcr->mcr_flags = 0; 2526 1.1 jonathan mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2527 1.1 jonathan mcr->mcr_reserved = 0; 2528 1.1 jonathan mcr->mcr_pktlen = 0; 2529 1.1 jonathan 2530 1.1 jonathan mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2531 1.1 jonathan mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2532 1.1 jonathan mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2533 1.1 jonathan 2534 1.1 jonathan mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2535 1.1 jonathan mcr->mcr_opktbuf.pb_next = 0; 2536 1.1 jonathan mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2537 1.1 jonathan 2538 1.1 jonathan #ifdef DIAGNOSTIC 2539 1.1 jonathan /* Misaligned output buffer will hang the chip. */ 2540 1.1 jonathan if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2541 1.43 msaitoh panic("%s: modexp invalid addr 0x%x", device_xname(sc->sc_dev), 2542 1.43 msaitoh letoh32(mcr->mcr_opktbuf.pb_addr)); 2543 1.1 jonathan if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2544 1.43 msaitoh panic("%s: modexp invalid len 0x%x", device_xname(sc->sc_dev), 2545 1.43 msaitoh letoh32(mcr->mcr_opktbuf.pb_len)); 2546 1.1 jonathan #endif 2547 1.1 jonathan 2548 1.1 jonathan ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2549 1.18 cegger memset(ctx, 0, sizeof(*ctx)); 2550 1.1 jonathan ubsec_kshift_r(shiftbits, 2551 1.1 jonathan krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, 2552 1.1 jonathan ctx->me_N, normbits); 2553 1.1 jonathan ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2554 1.1 jonathan ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2555 1.1 jonathan ctx->me_E_len = htole16(nbits); 2556 1.1 jonathan ctx->me_N_len = htole16(nbits); 2557 1.1 jonathan 2558 1.1 jonathan #ifdef UBSEC_DEBUG 2559 1.1 jonathan if (ubsec_debug) { 2560 1.1 jonathan ubsec_dump_mcr(mcr); 2561 1.1 jonathan ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2562 1.1 jonathan } 2563 1.1 jonathan #endif 2564 1.1 jonathan 2565 1.1 jonathan /* 2566 1.1 jonathan * ubsec_feed2 will sync mcr and ctx, we just need to sync 2567 1.1 jonathan * everything else. 2568 1.1 jonathan */ 2569 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 2570 1.1 jonathan 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2571 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 2572 1.1 jonathan 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2573 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 2574 1.1 jonathan 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2575 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 2576 1.1 jonathan 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2577 1.1 jonathan 2578 1.1 jonathan /* Enqueue and we're done... */ 2579 1.29 tls mutex_spin_enter(&sc->sc_mtx); 2580 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2581 1.1 jonathan ubsec_feed2(sc); 2582 1.1 jonathan ubsecstats.hst_modexp++; 2583 1.29 tls mutex_spin_exit(&sc->sc_mtx); 2584 1.1 jonathan 2585 1.55 riastrad return; 2586 1.1 jonathan 2587 1.1 jonathan errout: 2588 1.1 jonathan if (me != NULL) { 2589 1.1 jonathan if (me->me_q.q_mcr.dma_map != NULL) 2590 1.1 jonathan ubsec_dma_free(sc, &me->me_q.q_mcr); 2591 1.1 jonathan if (me->me_q.q_ctx.dma_map != NULL) { 2592 1.43 msaitoh memset(me->me_q.q_ctx.dma_vaddr, 0, 2593 1.43 msaitoh me->me_q.q_ctx.dma_size); 2594 1.1 jonathan ubsec_dma_free(sc, &me->me_q.q_ctx); 2595 1.1 jonathan } 2596 1.1 jonathan if (me->me_M.dma_map != NULL) { 2597 1.18 cegger memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 2598 1.1 jonathan ubsec_dma_free(sc, &me->me_M); 2599 1.1 jonathan } 2600 1.1 jonathan if (me->me_E.dma_map != NULL) { 2601 1.18 cegger memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 2602 1.1 jonathan ubsec_dma_free(sc, &me->me_E); 2603 1.1 jonathan } 2604 1.1 jonathan if (me->me_C.dma_map != NULL) { 2605 1.18 cegger memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2606 1.1 jonathan ubsec_dma_free(sc, &me->me_C); 2607 1.1 jonathan } 2608 1.1 jonathan if (me->me_epb.dma_map != NULL) 2609 1.1 jonathan ubsec_dma_free(sc, &me->me_epb); 2610 1.1 jonathan free(me, M_DEVBUF); 2611 1.1 jonathan } 2612 1.1 jonathan krp->krp_status = err; 2613 1.1 jonathan crypto_kdone(krp); 2614 1.1 jonathan } 2615 1.1 jonathan 2616 1.1 jonathan /* 2617 1.1 jonathan * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) 2618 1.1 jonathan */ 2619 1.55 riastrad static void 2620 1.1 jonathan ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, 2621 1.11 christos int hint) 2622 1.1 jonathan { 2623 1.1 jonathan struct ubsec_q2_modexp *me; 2624 1.1 jonathan struct ubsec_mcr *mcr; 2625 1.1 jonathan struct ubsec_ctx_modexp *ctx; 2626 1.1 jonathan struct ubsec_pktbuf *epb; 2627 1.29 tls int err = 0; 2628 1.1 jonathan u_int nbits, normbits, mbits, shiftbits, ebits; 2629 1.1 jonathan 2630 1.1 jonathan me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2631 1.1 jonathan if (me == NULL) { 2632 1.1 jonathan err = ENOMEM; 2633 1.1 jonathan goto errout; 2634 1.1 jonathan } 2635 1.18 cegger memset(me, 0, sizeof *me); 2636 1.1 jonathan me->me_krp = krp; 2637 1.1 jonathan me->me_q.q_type = UBS_CTXOP_MODEXP; 2638 1.1 jonathan 2639 1.1 jonathan nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2640 1.1 jonathan if (nbits <= 512) 2641 1.1 jonathan normbits = 512; 2642 1.1 jonathan else if (nbits <= 768) 2643 1.1 jonathan normbits = 768; 2644 1.1 jonathan else if (nbits <= 1024) 2645 1.1 jonathan normbits = 1024; 2646 1.1 jonathan else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2647 1.1 jonathan normbits = 1536; 2648 1.1 jonathan else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2649 1.1 jonathan normbits = 2048; 2650 1.1 jonathan else { 2651 1.1 jonathan err = E2BIG; 2652 1.1 jonathan goto errout; 2653 1.1 jonathan } 2654 1.1 jonathan 2655 1.1 jonathan shiftbits = normbits - nbits; 2656 1.1 jonathan 2657 1.1 jonathan /* XXX ??? */ 2658 1.1 jonathan me->me_modbits = nbits; 2659 1.1 jonathan me->me_shiftbits = shiftbits; 2660 1.1 jonathan me->me_normbits = normbits; 2661 1.1 jonathan 2662 1.1 jonathan /* Sanity check: result bits must be >= true modulus bits. */ 2663 1.1 jonathan if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2664 1.1 jonathan err = ERANGE; 2665 1.1 jonathan goto errout; 2666 1.1 jonathan } 2667 1.1 jonathan 2668 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2669 1.1 jonathan &me->me_q.q_mcr, 0)) { 2670 1.1 jonathan err = ENOMEM; 2671 1.1 jonathan goto errout; 2672 1.1 jonathan } 2673 1.1 jonathan mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2674 1.1 jonathan 2675 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2676 1.1 jonathan &me->me_q.q_ctx, 0)) { 2677 1.1 jonathan err = ENOMEM; 2678 1.1 jonathan goto errout; 2679 1.1 jonathan } 2680 1.1 jonathan 2681 1.1 jonathan mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2682 1.1 jonathan if (mbits > nbits) { 2683 1.1 jonathan err = E2BIG; 2684 1.1 jonathan goto errout; 2685 1.1 jonathan } 2686 1.1 jonathan if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2687 1.1 jonathan err = ENOMEM; 2688 1.1 jonathan goto errout; 2689 1.1 jonathan } 2690 1.18 cegger memset(me->me_M.dma_vaddr, 0, normbits / 8); 2691 1.1 jonathan bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, 2692 1.1 jonathan me->me_M.dma_vaddr, (mbits + 7) / 8); 2693 1.1 jonathan 2694 1.1 jonathan if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2695 1.1 jonathan err = ENOMEM; 2696 1.1 jonathan goto errout; 2697 1.1 jonathan } 2698 1.18 cegger memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2699 1.1 jonathan 2700 1.1 jonathan ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2701 1.1 jonathan if (ebits > nbits) { 2702 1.1 jonathan err = E2BIG; 2703 1.1 jonathan goto errout; 2704 1.1 jonathan } 2705 1.1 jonathan if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2706 1.1 jonathan err = ENOMEM; 2707 1.1 jonathan goto errout; 2708 1.1 jonathan } 2709 1.18 cegger memset(me->me_E.dma_vaddr, 0, normbits / 8); 2710 1.1 jonathan bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, 2711 1.1 jonathan me->me_E.dma_vaddr, (ebits + 7) / 8); 2712 1.1 jonathan 2713 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2714 1.1 jonathan &me->me_epb, 0)) { 2715 1.1 jonathan err = ENOMEM; 2716 1.1 jonathan goto errout; 2717 1.1 jonathan } 2718 1.1 jonathan epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2719 1.1 jonathan epb->pb_addr = htole32(me->me_E.dma_paddr); 2720 1.1 jonathan epb->pb_next = 0; 2721 1.1 jonathan epb->pb_len = htole32((ebits + 7) / 8); 2722 1.1 jonathan 2723 1.1 jonathan #ifdef UBSEC_DEBUG 2724 1.1 jonathan if (ubsec_debug) { 2725 1.1 jonathan printf("Epb "); 2726 1.1 jonathan ubsec_dump_pb(epb); 2727 1.1 jonathan } 2728 1.1 jonathan #endif 2729 1.1 jonathan 2730 1.1 jonathan mcr->mcr_pkts = htole16(1); 2731 1.1 jonathan mcr->mcr_flags = 0; 2732 1.1 jonathan mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2733 1.1 jonathan mcr->mcr_reserved = 0; 2734 1.1 jonathan mcr->mcr_pktlen = 0; 2735 1.1 jonathan 2736 1.1 jonathan mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2737 1.1 jonathan mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2738 1.1 jonathan mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2739 1.1 jonathan 2740 1.1 jonathan mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2741 1.1 jonathan mcr->mcr_opktbuf.pb_next = 0; 2742 1.1 jonathan mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2743 1.1 jonathan 2744 1.1 jonathan #ifdef DIAGNOSTIC 2745 1.1 jonathan /* Misaligned output buffer will hang the chip. */ 2746 1.1 jonathan if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2747 1.43 msaitoh panic("%s: modexp invalid addr 0x%x", device_xname(sc->sc_dev), 2748 1.43 msaitoh letoh32(mcr->mcr_opktbuf.pb_addr)); 2749 1.1 jonathan if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2750 1.43 msaitoh panic("%s: modexp invalid len 0x%x", device_xname(sc->sc_dev), 2751 1.43 msaitoh letoh32(mcr->mcr_opktbuf.pb_len)); 2752 1.1 jonathan #endif 2753 1.1 jonathan 2754 1.1 jonathan ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2755 1.18 cegger memset(ctx, 0, sizeof(*ctx)); 2756 1.20 tsutsui memcpy(ctx->me_N, krp->krp_param[UBS_MODEXP_PAR_N].crp_p, 2757 1.1 jonathan (nbits + 7) / 8); 2758 1.1 jonathan ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2759 1.1 jonathan ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2760 1.1 jonathan ctx->me_E_len = htole16(ebits); 2761 1.1 jonathan ctx->me_N_len = htole16(nbits); 2762 1.1 jonathan 2763 1.1 jonathan #ifdef UBSEC_DEBUG 2764 1.1 jonathan if (ubsec_debug) { 2765 1.1 jonathan ubsec_dump_mcr(mcr); 2766 1.1 jonathan ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2767 1.1 jonathan } 2768 1.1 jonathan #endif 2769 1.1 jonathan 2770 1.1 jonathan /* 2771 1.1 jonathan * ubsec_feed2 will sync mcr and ctx, we just need to sync 2772 1.1 jonathan * everything else. 2773 1.1 jonathan */ 2774 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 2775 1.1 jonathan 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2776 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 2777 1.1 jonathan 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2778 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 2779 1.1 jonathan 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2780 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 2781 1.1 jonathan 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2782 1.1 jonathan 2783 1.1 jonathan /* Enqueue and we're done... */ 2784 1.29 tls mutex_spin_enter(&sc->sc_mtx); 2785 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2786 1.1 jonathan ubsec_feed2(sc); 2787 1.29 tls mutex_spin_exit(&sc->sc_mtx); 2788 1.1 jonathan 2789 1.55 riastrad return; 2790 1.1 jonathan 2791 1.1 jonathan errout: 2792 1.1 jonathan if (me != NULL) { 2793 1.1 jonathan if (me->me_q.q_mcr.dma_map != NULL) 2794 1.1 jonathan ubsec_dma_free(sc, &me->me_q.q_mcr); 2795 1.1 jonathan if (me->me_q.q_ctx.dma_map != NULL) { 2796 1.43 msaitoh memset(me->me_q.q_ctx.dma_vaddr, 0, 2797 1.43 msaitoh me->me_q.q_ctx.dma_size); 2798 1.1 jonathan ubsec_dma_free(sc, &me->me_q.q_ctx); 2799 1.1 jonathan } 2800 1.1 jonathan if (me->me_M.dma_map != NULL) { 2801 1.18 cegger memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 2802 1.1 jonathan ubsec_dma_free(sc, &me->me_M); 2803 1.1 jonathan } 2804 1.1 jonathan if (me->me_E.dma_map != NULL) { 2805 1.18 cegger memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 2806 1.1 jonathan ubsec_dma_free(sc, &me->me_E); 2807 1.1 jonathan } 2808 1.1 jonathan if (me->me_C.dma_map != NULL) { 2809 1.18 cegger memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2810 1.1 jonathan ubsec_dma_free(sc, &me->me_C); 2811 1.1 jonathan } 2812 1.1 jonathan if (me->me_epb.dma_map != NULL) 2813 1.1 jonathan ubsec_dma_free(sc, &me->me_epb); 2814 1.1 jonathan free(me, M_DEVBUF); 2815 1.1 jonathan } 2816 1.1 jonathan krp->krp_status = err; 2817 1.1 jonathan crypto_kdone(krp); 2818 1.1 jonathan } 2819 1.1 jonathan 2820 1.55 riastrad static void 2821 1.1 jonathan ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, 2822 1.11 christos int hint) 2823 1.1 jonathan { 2824 1.1 jonathan struct ubsec_q2_rsapriv *rp = NULL; 2825 1.1 jonathan struct ubsec_mcr *mcr; 2826 1.1 jonathan struct ubsec_ctx_rsapriv *ctx; 2827 1.29 tls int err = 0; 2828 1.1 jonathan u_int padlen, msglen; 2829 1.1 jonathan 2830 1.1 jonathan msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); 2831 1.1 jonathan padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); 2832 1.1 jonathan if (msglen > padlen) 2833 1.1 jonathan padlen = msglen; 2834 1.1 jonathan 2835 1.1 jonathan if (padlen <= 256) 2836 1.1 jonathan padlen = 256; 2837 1.1 jonathan else if (padlen <= 384) 2838 1.1 jonathan padlen = 384; 2839 1.1 jonathan else if (padlen <= 512) 2840 1.1 jonathan padlen = 512; 2841 1.1 jonathan else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) 2842 1.1 jonathan padlen = 768; 2843 1.1 jonathan else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) 2844 1.1 jonathan padlen = 1024; 2845 1.1 jonathan else { 2846 1.1 jonathan err = E2BIG; 2847 1.1 jonathan goto errout; 2848 1.1 jonathan } 2849 1.1 jonathan 2850 1.1 jonathan if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { 2851 1.1 jonathan err = E2BIG; 2852 1.1 jonathan goto errout; 2853 1.1 jonathan } 2854 1.1 jonathan 2855 1.1 jonathan if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { 2856 1.1 jonathan err = E2BIG; 2857 1.1 jonathan goto errout; 2858 1.1 jonathan } 2859 1.1 jonathan 2860 1.1 jonathan if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { 2861 1.1 jonathan err = E2BIG; 2862 1.1 jonathan goto errout; 2863 1.1 jonathan } 2864 1.1 jonathan 2865 1.17 cegger rp = malloc(sizeof *rp, M_DEVBUF, M_NOWAIT|M_ZERO); 2866 1.54 riastrad if (rp == NULL) { 2867 1.54 riastrad err = ENOMEM; 2868 1.54 riastrad goto errout; 2869 1.54 riastrad } 2870 1.1 jonathan rp->rpr_krp = krp; 2871 1.1 jonathan rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; 2872 1.1 jonathan 2873 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2874 1.1 jonathan &rp->rpr_q.q_mcr, 0)) { 2875 1.1 jonathan err = ENOMEM; 2876 1.1 jonathan goto errout; 2877 1.1 jonathan } 2878 1.1 jonathan mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; 2879 1.1 jonathan 2880 1.1 jonathan if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), 2881 1.1 jonathan &rp->rpr_q.q_ctx, 0)) { 2882 1.1 jonathan err = ENOMEM; 2883 1.1 jonathan goto errout; 2884 1.1 jonathan } 2885 1.1 jonathan ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; 2886 1.18 cegger memset(ctx, 0, sizeof *ctx); 2887 1.1 jonathan 2888 1.1 jonathan /* Copy in p */ 2889 1.1 jonathan bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, 2890 1.1 jonathan &ctx->rpr_buf[0 * (padlen / 8)], 2891 1.1 jonathan (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); 2892 1.1 jonathan 2893 1.1 jonathan /* Copy in q */ 2894 1.1 jonathan bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, 2895 1.1 jonathan &ctx->rpr_buf[1 * (padlen / 8)], 2896 1.1 jonathan (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); 2897 1.1 jonathan 2898 1.1 jonathan /* Copy in dp */ 2899 1.1 jonathan bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, 2900 1.1 jonathan &ctx->rpr_buf[2 * (padlen / 8)], 2901 1.1 jonathan (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); 2902 1.1 jonathan 2903 1.1 jonathan /* Copy in dq */ 2904 1.1 jonathan bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, 2905 1.1 jonathan &ctx->rpr_buf[3 * (padlen / 8)], 2906 1.1 jonathan (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); 2907 1.1 jonathan 2908 1.1 jonathan /* Copy in pinv */ 2909 1.1 jonathan bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, 2910 1.1 jonathan &ctx->rpr_buf[4 * (padlen / 8)], 2911 1.1 jonathan (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); 2912 1.1 jonathan 2913 1.1 jonathan msglen = padlen * 2; 2914 1.1 jonathan 2915 1.1 jonathan /* Copy in input message (aligned buffer/length). */ 2916 1.1 jonathan if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { 2917 1.1 jonathan /* Is this likely? */ 2918 1.1 jonathan err = E2BIG; 2919 1.1 jonathan goto errout; 2920 1.1 jonathan } 2921 1.1 jonathan if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { 2922 1.1 jonathan err = ENOMEM; 2923 1.1 jonathan goto errout; 2924 1.1 jonathan } 2925 1.18 cegger memset(rp->rpr_msgin.dma_vaddr, 0, (msglen + 7) / 8); 2926 1.1 jonathan bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, 2927 1.1 jonathan rp->rpr_msgin.dma_vaddr, 2928 1.1 jonathan (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); 2929 1.1 jonathan 2930 1.1 jonathan /* Prepare space for output message (aligned buffer/length). */ 2931 1.1 jonathan if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { 2932 1.1 jonathan /* Is this likely? */ 2933 1.1 jonathan err = E2BIG; 2934 1.1 jonathan goto errout; 2935 1.1 jonathan } 2936 1.1 jonathan if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { 2937 1.1 jonathan err = ENOMEM; 2938 1.1 jonathan goto errout; 2939 1.1 jonathan } 2940 1.18 cegger memset(rp->rpr_msgout.dma_vaddr, 0, (msglen + 7) / 8); 2941 1.1 jonathan 2942 1.1 jonathan mcr->mcr_pkts = htole16(1); 2943 1.1 jonathan mcr->mcr_flags = 0; 2944 1.1 jonathan mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); 2945 1.1 jonathan mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); 2946 1.1 jonathan mcr->mcr_ipktbuf.pb_next = 0; 2947 1.1 jonathan mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); 2948 1.1 jonathan mcr->mcr_reserved = 0; 2949 1.1 jonathan mcr->mcr_pktlen = htole16(msglen); 2950 1.1 jonathan mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); 2951 1.1 jonathan mcr->mcr_opktbuf.pb_next = 0; 2952 1.1 jonathan mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); 2953 1.1 jonathan 2954 1.1 jonathan #ifdef DIAGNOSTIC 2955 1.1 jonathan if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { 2956 1.3 thorpej panic("%s: rsapriv: invalid msgin 0x%lx(0x%lx)", 2957 1.28 chs device_xname(sc->sc_dev), (u_long) rp->rpr_msgin.dma_paddr, 2958 1.3 thorpej (u_long) rp->rpr_msgin.dma_size); 2959 1.1 jonathan } 2960 1.1 jonathan if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { 2961 1.3 thorpej panic("%s: rsapriv: invalid msgout 0x%lx(0x%lx)", 2962 1.28 chs device_xname(sc->sc_dev), (u_long) rp->rpr_msgout.dma_paddr, 2963 1.3 thorpej (u_long) rp->rpr_msgout.dma_size); 2964 1.1 jonathan } 2965 1.1 jonathan #endif 2966 1.1 jonathan 2967 1.1 jonathan ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); 2968 1.1 jonathan ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); 2969 1.1 jonathan ctx->rpr_q_len = htole16(padlen); 2970 1.1 jonathan ctx->rpr_p_len = htole16(padlen); 2971 1.1 jonathan 2972 1.1 jonathan /* 2973 1.1 jonathan * ubsec_feed2 will sync mcr and ctx, we just need to sync 2974 1.1 jonathan * everything else. 2975 1.1 jonathan */ 2976 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 2977 1.1 jonathan 0, rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2978 1.1 jonathan bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 2979 1.1 jonathan 0, rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2980 1.1 jonathan 2981 1.1 jonathan /* Enqueue and we're done... */ 2982 1.29 tls mutex_spin_enter(&sc->sc_mtx); 2983 1.1 jonathan SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); 2984 1.1 jonathan ubsec_feed2(sc); 2985 1.1 jonathan ubsecstats.hst_modexpcrt++; 2986 1.29 tls mutex_spin_exit(&sc->sc_mtx); 2987 1.55 riastrad return; 2988 1.1 jonathan 2989 1.1 jonathan errout: 2990 1.1 jonathan if (rp != NULL) { 2991 1.1 jonathan if (rp->rpr_q.q_mcr.dma_map != NULL) 2992 1.1 jonathan ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2993 1.1 jonathan if (rp->rpr_msgin.dma_map != NULL) { 2994 1.43 msaitoh memset(rp->rpr_msgin.dma_vaddr, 0, 2995 1.43 msaitoh rp->rpr_msgin.dma_size); 2996 1.1 jonathan ubsec_dma_free(sc, &rp->rpr_msgin); 2997 1.1 jonathan } 2998 1.1 jonathan if (rp->rpr_msgout.dma_map != NULL) { 2999 1.43 msaitoh memset(rp->rpr_msgout.dma_vaddr, 0, 3000 1.43 msaitoh rp->rpr_msgout.dma_size); 3001 1.1 jonathan ubsec_dma_free(sc, &rp->rpr_msgout); 3002 1.1 jonathan } 3003 1.1 jonathan free(rp, M_DEVBUF); 3004 1.1 jonathan } 3005 1.1 jonathan krp->krp_status = err; 3006 1.1 jonathan crypto_kdone(krp); 3007 1.1 jonathan } 3008 1.1 jonathan 3009 1.1 jonathan #ifdef UBSEC_DEBUG 3010 1.1 jonathan static void 3011 1.1 jonathan ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) 3012 1.1 jonathan { 3013 1.1 jonathan printf("addr 0x%x (0x%x) next 0x%x\n", 3014 1.1 jonathan pb->pb_addr, pb->pb_len, pb->pb_next); 3015 1.1 jonathan } 3016 1.1 jonathan 3017 1.1 jonathan static void 3018 1.1 jonathan ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *c) 3019 1.1 jonathan { 3020 1.1 jonathan printf("CTX (0x%x):\n", c->ctx_len); 3021 1.1 jonathan switch (letoh16(c->ctx_op)) { 3022 1.1 jonathan case UBS_CTXOP_RNGBYPASS: 3023 1.1 jonathan case UBS_CTXOP_RNGSHA1: 3024 1.1 jonathan break; 3025 1.1 jonathan case UBS_CTXOP_MODEXP: 3026 1.1 jonathan { 3027 1.1 jonathan struct ubsec_ctx_modexp *cx = (void *)c; 3028 1.1 jonathan int i, len; 3029 1.1 jonathan 3030 1.1 jonathan printf(" Elen %u, Nlen %u\n", 3031 1.1 jonathan letoh16(cx->me_E_len), letoh16(cx->me_N_len)); 3032 1.1 jonathan len = (cx->me_N_len + 7)/8; 3033 1.1 jonathan for (i = 0; i < len; i++) 3034 1.1 jonathan printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); 3035 1.1 jonathan printf("\n"); 3036 1.1 jonathan break; 3037 1.1 jonathan } 3038 1.1 jonathan default: 3039 1.1 jonathan printf("unknown context: %x\n", c->ctx_op); 3040 1.1 jonathan } 3041 1.1 jonathan printf("END CTX\n"); 3042 1.1 jonathan } 3043 1.1 jonathan 3044 1.1 jonathan static void 3045 1.1 jonathan ubsec_dump_mcr(struct ubsec_mcr *mcr) 3046 1.1 jonathan { 3047 1.1 jonathan volatile struct ubsec_mcr_add *ma; 3048 1.1 jonathan int i; 3049 1.1 jonathan 3050 1.1 jonathan printf("MCR:\n"); 3051 1.1 jonathan printf(" pkts: %u, flags 0x%x\n", 3052 1.1 jonathan letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); 3053 1.1 jonathan ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; 3054 1.1 jonathan for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { 3055 1.1 jonathan printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, 3056 1.1 jonathan letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), 3057 1.1 jonathan letoh16(ma->mcr_reserved)); 3058 1.1 jonathan printf(" %d: ipkt ", i); 3059 1.1 jonathan ubsec_dump_pb(&ma->mcr_ipktbuf); 3060 1.1 jonathan printf(" %d: opkt ", i); 3061 1.1 jonathan ubsec_dump_pb(&ma->mcr_opktbuf); 3062 1.1 jonathan ma++; 3063 1.1 jonathan } 3064 1.1 jonathan printf("END MCR\n"); 3065 1.1 jonathan } 3066 1.1 jonathan #endif /* UBSEC_DEBUG */ 3067 1.1 jonathan 3068 1.1 jonathan /* 3069 1.1 jonathan * Return the number of significant bits of a big number. 3070 1.1 jonathan */ 3071 1.1 jonathan static int 3072 1.1 jonathan ubsec_ksigbits(struct crparam *cr) 3073 1.1 jonathan { 3074 1.1 jonathan u_int plen = (cr->crp_nbits + 7) / 8; 3075 1.1 jonathan int i, sig = plen * 8; 3076 1.1 jonathan u_int8_t c, *p = cr->crp_p; 3077 1.1 jonathan 3078 1.1 jonathan for (i = plen - 1; i >= 0; i--) { 3079 1.1 jonathan c = p[i]; 3080 1.1 jonathan if (c != 0) { 3081 1.1 jonathan while ((c & 0x80) == 0) { 3082 1.1 jonathan sig--; 3083 1.1 jonathan c <<= 1; 3084 1.1 jonathan } 3085 1.1 jonathan break; 3086 1.1 jonathan } 3087 1.1 jonathan sig -= 8; 3088 1.1 jonathan } 3089 1.1 jonathan return (sig); 3090 1.1 jonathan } 3091 1.1 jonathan 3092 1.1 jonathan static void 3093 1.7 thorpej ubsec_kshift_r(u_int shiftbits, u_int8_t *src, u_int srcbits, 3094 1.7 thorpej u_int8_t *dst, u_int dstbits) 3095 1.1 jonathan { 3096 1.1 jonathan u_int slen, dlen; 3097 1.1 jonathan int i, si, di, n; 3098 1.1 jonathan 3099 1.1 jonathan slen = (srcbits + 7) / 8; 3100 1.1 jonathan dlen = (dstbits + 7) / 8; 3101 1.1 jonathan 3102 1.1 jonathan for (i = 0; i < slen; i++) 3103 1.1 jonathan dst[i] = src[i]; 3104 1.1 jonathan for (i = 0; i < dlen - slen; i++) 3105 1.1 jonathan dst[slen + i] = 0; 3106 1.1 jonathan 3107 1.1 jonathan n = shiftbits / 8; 3108 1.1 jonathan if (n != 0) { 3109 1.1 jonathan si = dlen - n - 1; 3110 1.1 jonathan di = dlen - 1; 3111 1.1 jonathan while (si >= 0) 3112 1.1 jonathan dst[di--] = dst[si--]; 3113 1.1 jonathan while (di >= 0) 3114 1.1 jonathan dst[di--] = 0; 3115 1.1 jonathan } 3116 1.1 jonathan 3117 1.1 jonathan n = shiftbits % 8; 3118 1.1 jonathan if (n != 0) { 3119 1.1 jonathan for (i = dlen - 1; i > 0; i--) 3120 1.1 jonathan dst[i] = (dst[i] << n) | 3121 1.1 jonathan (dst[i - 1] >> (8 - n)); 3122 1.1 jonathan dst[0] = dst[0] << n; 3123 1.1 jonathan } 3124 1.1 jonathan } 3125 1.1 jonathan 3126 1.1 jonathan static void 3127 1.7 thorpej ubsec_kshift_l(u_int shiftbits, u_int8_t *src, u_int srcbits, 3128 1.7 thorpej u_int8_t *dst, u_int dstbits) 3129 1.1 jonathan { 3130 1.1 jonathan int slen, dlen, i, n; 3131 1.1 jonathan 3132 1.1 jonathan slen = (srcbits + 7) / 8; 3133 1.1 jonathan dlen = (dstbits + 7) / 8; 3134 1.1 jonathan 3135 1.1 jonathan n = shiftbits / 8; 3136 1.1 jonathan for (i = 0; i < slen; i++) 3137 1.1 jonathan dst[i] = src[i + n]; 3138 1.1 jonathan for (i = 0; i < dlen - slen; i++) 3139 1.1 jonathan dst[slen + i] = 0; 3140 1.1 jonathan 3141 1.1 jonathan n = shiftbits % 8; 3142 1.1 jonathan if (n != 0) { 3143 1.1 jonathan for (i = 0; i < (dlen - 1); i++) 3144 1.1 jonathan dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); 3145 1.1 jonathan dst[dlen - 1] = dst[dlen - 1] >> n; 3146 1.1 jonathan } 3147 1.1 jonathan } 3148