1 1.20 andvar /* $NetBSD: mvxpsec.c,v 1.20 2024/02/09 22:08:35 andvar Exp $ */ 2 1.1 hsuenaga /* 3 1.1 hsuenaga * Copyright (c) 2015 Internet Initiative Japan Inc. 4 1.1 hsuenaga * All rights reserved. 5 1.1 hsuenaga * 6 1.1 hsuenaga * Redistribution and use in source and binary forms, with or without 7 1.1 hsuenaga * modification, are permitted provided that the following conditions 8 1.1 hsuenaga * are met: 9 1.1 hsuenaga * 1. Redistributions of source code must retain the above copyright 10 1.1 hsuenaga * notice, this list of conditions and the following disclaimer. 11 1.1 hsuenaga * 2. Redistributions in binary form must reproduce the above copyright 12 1.1 hsuenaga * notice, this list of conditions and the following disclaimer in the 13 1.1 hsuenaga * documentation and/or other materials provided with the distribution. 14 1.1 hsuenaga * 15 1.1 hsuenaga * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 1.1 hsuenaga * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 1.1 hsuenaga * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 1.1 hsuenaga * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 1.1 hsuenaga * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 1.1 hsuenaga * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 1.1 hsuenaga * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 1.1 hsuenaga * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 1.1 hsuenaga * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 1.1 hsuenaga * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 1.1 hsuenaga * POSSIBILITY OF SUCH DAMAGE. 26 1.1 hsuenaga */ 27 1.7 riastrad 28 1.7 riastrad #ifdef _KERNEL_OPT 29 1.7 riastrad #include "opt_ipsec.h" 30 1.7 riastrad #endif 31 1.7 riastrad 32 1.1 hsuenaga /* 33 1.1 hsuenaga * Cryptographic Engine and Security Accelerator(MVXPSEC) 34 1.1 hsuenaga */ 35 1.1 hsuenaga #include <sys/cdefs.h> 36 1.1 hsuenaga #include <sys/param.h> 37 1.1 hsuenaga #include <sys/types.h> 38 1.1 hsuenaga #include <sys/kernel.h> 39 1.1 hsuenaga #include <sys/queue.h> 40 1.1 hsuenaga #include <sys/conf.h> 41 1.1 hsuenaga #include <sys/proc.h> 42 1.1 hsuenaga #include <sys/bus.h> 43 1.1 hsuenaga #include <sys/evcnt.h> 44 1.1 hsuenaga #include <sys/device.h> 45 1.1 hsuenaga #include <sys/endian.h> 46 1.1 hsuenaga #include <sys/errno.h> 47 1.1 hsuenaga #include <sys/kmem.h> 48 1.1 hsuenaga #include <sys/mbuf.h> 49 1.1 hsuenaga #include <sys/callout.h> 50 1.1 hsuenaga #include <sys/pool.h> 51 1.1 hsuenaga #include <sys/cprng.h> 52 1.1 hsuenaga #include <sys/syslog.h> 53 1.1 hsuenaga #include <sys/mutex.h> 54 1.1 hsuenaga #include <sys/kthread.h> 55 1.1 hsuenaga #include <sys/atomic.h> 56 1.1 hsuenaga #include <sys/sha1.h> 57 1.1 hsuenaga #include <sys/md5.h> 58 1.1 hsuenaga 59 1.1 hsuenaga #include <uvm/uvm_extern.h> 60 1.1 hsuenaga 61 1.1 hsuenaga #include <opencrypto/cryptodev.h> 62 1.1 hsuenaga #include <opencrypto/xform.h> 63 1.1 hsuenaga 64 1.1 hsuenaga #include <net/net_stats.h> 65 1.1 hsuenaga 66 1.1 hsuenaga #include <netinet/in_systm.h> 67 1.1 hsuenaga #include <netinet/in.h> 68 1.1 hsuenaga #include <netinet/ip.h> 69 1.1 hsuenaga #include <netinet/ip6.h> 70 1.1 hsuenaga 71 1.7 riastrad #if NIPSEC > 0 72 1.1 hsuenaga #include <netipsec/esp_var.h> 73 1.7 riastrad #endif 74 1.1 hsuenaga 75 1.1 hsuenaga #include <arm/cpufunc.h> 76 1.1 hsuenaga #include <arm/marvell/mvsocvar.h> 77 1.1 hsuenaga #include <arm/marvell/armadaxpreg.h> 78 1.1 hsuenaga #include <dev/marvell/marvellreg.h> 79 1.1 hsuenaga #include <dev/marvell/marvellvar.h> 80 1.1 hsuenaga #include <dev/marvell/mvxpsecreg.h> 81 1.1 hsuenaga #include <dev/marvell/mvxpsecvar.h> 82 1.1 hsuenaga 83 1.1 hsuenaga #ifdef DEBUG 84 1.1 hsuenaga #define STATIC __attribute__ ((noinline)) extern 85 1.1 hsuenaga #define _STATIC __attribute__ ((noinline)) extern 86 1.1 hsuenaga #define INLINE __attribute__ ((noinline)) extern 87 1.1 hsuenaga #define _INLINE __attribute__ ((noinline)) extern 88 1.1 hsuenaga #else 89 1.1 hsuenaga #define STATIC static 90 1.1 hsuenaga #define _STATIC __attribute__ ((unused)) static 91 1.1 hsuenaga #define INLINE static inline 92 1.1 hsuenaga #define _INLINE __attribute__ ((unused)) static inline 93 1.1 hsuenaga #endif 94 1.1 hsuenaga 95 1.1 hsuenaga /* 96 1.1 hsuenaga * IRQ and SRAM spaces for each of unit 97 1.1 hsuenaga * XXX: move to attach_args 98 1.1 hsuenaga */ 99 1.1 hsuenaga struct { 100 1.1 hsuenaga int err_int; 101 1.1 hsuenaga } mvxpsec_config[] = { 102 1.1 hsuenaga { .err_int = ARMADAXP_IRQ_CESA0_ERR, }, /* unit 0 */ 103 1.1 hsuenaga { .err_int = ARMADAXP_IRQ_CESA1_ERR, }, /* unit 1 */ 104 1.1 hsuenaga }; 105 1.1 hsuenaga #define MVXPSEC_ERR_INT(sc) \ 106 1.1 hsuenaga mvxpsec_config[device_unit((sc)->sc_dev)].err_int 107 1.1 hsuenaga 108 1.1 hsuenaga /* 109 1.1 hsuenaga * AES 110 1.1 hsuenaga */ 111 1.1 hsuenaga #define MAXBC (128/32) 112 1.1 hsuenaga #define MAXKC (256/32) 113 1.1 hsuenaga #define MAXROUNDS 14 114 1.1 hsuenaga STATIC int mv_aes_ksched(uint8_t[4][MAXKC], int, 115 1.1 hsuenaga uint8_t[MAXROUNDS+1][4][MAXBC]); 116 1.1 hsuenaga STATIC int mv_aes_deckey(uint8_t *, uint8_t *, int); 117 1.1 hsuenaga 118 1.1 hsuenaga /* 119 1.1 hsuenaga * device driver autoconf interface 120 1.1 hsuenaga */ 121 1.1 hsuenaga STATIC int mvxpsec_match(device_t, cfdata_t, void *); 122 1.1 hsuenaga STATIC void mvxpsec_attach(device_t, device_t, void *); 123 1.1 hsuenaga STATIC void mvxpsec_evcnt_attach(struct mvxpsec_softc *); 124 1.1 hsuenaga 125 1.1 hsuenaga /* 126 1.1 hsuenaga * register setup 127 1.1 hsuenaga */ 128 1.1 hsuenaga STATIC int mvxpsec_wininit(struct mvxpsec_softc *, enum marvell_tags *); 129 1.1 hsuenaga 130 1.1 hsuenaga /* 131 1.1 hsuenaga * timer(callout) interface 132 1.1 hsuenaga * 133 1.1 hsuenaga * XXX: callout is not MP safe... 134 1.1 hsuenaga */ 135 1.1 hsuenaga STATIC void mvxpsec_timer(void *); 136 1.1 hsuenaga 137 1.1 hsuenaga /* 138 1.1 hsuenaga * interrupt interface 139 1.1 hsuenaga */ 140 1.1 hsuenaga STATIC int mvxpsec_intr(void *); 141 1.1 hsuenaga INLINE void mvxpsec_intr_cleanup(struct mvxpsec_softc *); 142 1.1 hsuenaga STATIC int mvxpsec_eintr(void *); 143 1.1 hsuenaga STATIC uint32_t mvxpsec_intr_ack(struct mvxpsec_softc *); 144 1.1 hsuenaga STATIC uint32_t mvxpsec_eintr_ack(struct mvxpsec_softc *); 145 1.1 hsuenaga INLINE void mvxpsec_intr_cnt(struct mvxpsec_softc *, int); 146 1.1 hsuenaga 147 1.1 hsuenaga /* 148 1.1 hsuenaga * memory allocators and VM management 149 1.1 hsuenaga */ 150 1.1 hsuenaga STATIC struct mvxpsec_devmem *mvxpsec_alloc_devmem(struct mvxpsec_softc *, 151 1.1 hsuenaga paddr_t, int); 152 1.1 hsuenaga STATIC int mvxpsec_init_sram(struct mvxpsec_softc *); 153 1.1 hsuenaga 154 1.1 hsuenaga /* 155 1.1 hsuenaga * Low-level DMA interface 156 1.1 hsuenaga */ 157 1.1 hsuenaga STATIC int mvxpsec_init_dma(struct mvxpsec_softc *, 158 1.1 hsuenaga struct marvell_attach_args *); 159 1.1 hsuenaga INLINE int mvxpsec_dma_wait(struct mvxpsec_softc *); 160 1.1 hsuenaga INLINE int mvxpsec_acc_wait(struct mvxpsec_softc *); 161 1.1 hsuenaga INLINE struct mvxpsec_descriptor_handle *mvxpsec_dma_getdesc(struct mvxpsec_softc *); 162 1.1 hsuenaga _INLINE void mvxpsec_dma_putdesc(struct mvxpsec_softc *, struct mvxpsec_descriptor_handle *); 163 1.1 hsuenaga INLINE void mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *, 164 1.1 hsuenaga uint32_t, uint32_t, uint32_t); 165 1.1 hsuenaga INLINE void mvxpsec_dma_cat(struct mvxpsec_softc *, 166 1.1 hsuenaga struct mvxpsec_descriptor_handle *, struct mvxpsec_descriptor_handle *); 167 1.1 hsuenaga 168 1.1 hsuenaga /* 169 1.1 hsuenaga * High-level DMA interface 170 1.1 hsuenaga */ 171 1.1 hsuenaga INLINE int mvxpsec_dma_copy0(struct mvxpsec_softc *, 172 1.1 hsuenaga mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t); 173 1.1 hsuenaga INLINE int mvxpsec_dma_copy(struct mvxpsec_softc *, 174 1.1 hsuenaga mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t); 175 1.1 hsuenaga INLINE int mvxpsec_dma_acc_activate(struct mvxpsec_softc *, 176 1.1 hsuenaga mvxpsec_dma_ring *); 177 1.1 hsuenaga INLINE void mvxpsec_dma_finalize(struct mvxpsec_softc *, 178 1.1 hsuenaga mvxpsec_dma_ring *); 179 1.1 hsuenaga INLINE void mvxpsec_dma_free(struct mvxpsec_softc *, 180 1.1 hsuenaga mvxpsec_dma_ring *); 181 1.1 hsuenaga INLINE int mvxpsec_dma_copy_packet(struct mvxpsec_softc *, struct mvxpsec_packet *); 182 1.1 hsuenaga INLINE int mvxpsec_dma_sync_packet(struct mvxpsec_softc *, struct mvxpsec_packet *); 183 1.1 hsuenaga 184 1.1 hsuenaga /* 185 1.1 hsuenaga * Session management interface (OpenCrypto) 186 1.1 hsuenaga */ 187 1.1 hsuenaga #define MVXPSEC_SESSION(sid) ((sid) & 0x0fffffff) 188 1.1 hsuenaga #define MVXPSEC_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff)) 189 1.1 hsuenaga /* pool management */ 190 1.1 hsuenaga STATIC int mvxpsec_session_ctor(void *, void *, int); 191 1.1 hsuenaga STATIC void mvxpsec_session_dtor(void *, void *); 192 1.1 hsuenaga STATIC int mvxpsec_packet_ctor(void *, void *, int); 193 1.1 hsuenaga STATIC void mvxpsec_packet_dtor(void *, void *); 194 1.1 hsuenaga 195 1.1 hsuenaga /* session management */ 196 1.1 hsuenaga STATIC struct mvxpsec_session *mvxpsec_session_alloc(struct mvxpsec_softc *); 197 1.1 hsuenaga STATIC void mvxpsec_session_dealloc(struct mvxpsec_session *); 198 1.1 hsuenaga INLINE struct mvxpsec_session *mvxpsec_session_lookup(struct mvxpsec_softc *, int); 199 1.1 hsuenaga INLINE int mvxpsec_session_ref(struct mvxpsec_session *); 200 1.1 hsuenaga INLINE void mvxpsec_session_unref(struct mvxpsec_session *); 201 1.1 hsuenaga 202 1.1 hsuenaga /* packet management */ 203 1.1 hsuenaga STATIC struct mvxpsec_packet *mvxpsec_packet_alloc(struct mvxpsec_session *); 204 1.1 hsuenaga INLINE void mvxpsec_packet_enqueue(struct mvxpsec_packet *); 205 1.1 hsuenaga STATIC void mvxpsec_packet_dealloc(struct mvxpsec_packet *); 206 1.1 hsuenaga STATIC int mvxpsec_done_packet(struct mvxpsec_packet *); 207 1.1 hsuenaga 208 1.1 hsuenaga /* session header manegement */ 209 1.1 hsuenaga STATIC int mvxpsec_header_finalize(struct mvxpsec_packet *); 210 1.1 hsuenaga 211 1.1 hsuenaga /* packet queue management */ 212 1.1 hsuenaga INLINE void mvxpsec_drop(struct mvxpsec_softc *, struct cryptop *, struct mvxpsec_packet *, int); 213 1.1 hsuenaga STATIC int mvxpsec_dispatch_queue(struct mvxpsec_softc *); 214 1.1 hsuenaga 215 1.4 msaitoh /* opencrypto operation */ 216 1.1 hsuenaga INLINE int mvxpsec_parse_crd(struct mvxpsec_packet *, struct cryptodesc *); 217 1.1 hsuenaga INLINE int mvxpsec_parse_crp(struct mvxpsec_packet *); 218 1.1 hsuenaga 219 1.1 hsuenaga /* payload data management */ 220 1.1 hsuenaga INLINE int mvxpsec_packet_setcrp(struct mvxpsec_packet *, struct cryptop *); 221 1.1 hsuenaga STATIC int mvxpsec_packet_setdata(struct mvxpsec_packet *, void *, uint32_t); 222 1.1 hsuenaga STATIC int mvxpsec_packet_setmbuf(struct mvxpsec_packet *, struct mbuf *); 223 1.1 hsuenaga STATIC int mvxpsec_packet_setuio(struct mvxpsec_packet *, struct uio *); 224 1.1 hsuenaga STATIC int mvxpsec_packet_rdata(struct mvxpsec_packet *, int, int, void *); 225 1.1 hsuenaga _STATIC int mvxpsec_packet_wdata(struct mvxpsec_packet *, int, int, void *); 226 1.1 hsuenaga STATIC int mvxpsec_packet_write_iv(struct mvxpsec_packet *, void *, int); 227 1.1 hsuenaga STATIC int mvxpsec_packet_copy_iv(struct mvxpsec_packet *, int, int); 228 1.1 hsuenaga 229 1.1 hsuenaga /* key pre-computation */ 230 1.1 hsuenaga STATIC int mvxpsec_key_precomp(int, void *, int, void *, void *); 231 1.1 hsuenaga STATIC int mvxpsec_hmac_precomp(int, void *, int, void *, void *); 232 1.1 hsuenaga 233 1.1 hsuenaga /* crypto operation management */ 234 1.1 hsuenaga INLINE void mvxpsec_packet_reset_op(struct mvxpsec_packet *); 235 1.1 hsuenaga INLINE void mvxpsec_packet_update_op_order(struct mvxpsec_packet *, int); 236 1.1 hsuenaga 237 1.1 hsuenaga /* 238 1.1 hsuenaga * parameter converters 239 1.1 hsuenaga */ 240 1.1 hsuenaga INLINE uint32_t mvxpsec_alg2acc(uint32_t alg); 241 1.1 hsuenaga INLINE uint32_t mvxpsec_aesklen(int klen); 242 1.1 hsuenaga 243 1.1 hsuenaga /* 244 1.1 hsuenaga * string formatters 245 1.1 hsuenaga */ 246 1.1 hsuenaga _STATIC const char *s_ctrlreg(uint32_t); 247 1.1 hsuenaga _STATIC const char *s_winreg(uint32_t); 248 1.1 hsuenaga _STATIC const char *s_errreg(uint32_t); 249 1.1 hsuenaga _STATIC const char *s_xpsecintr(uint32_t); 250 1.1 hsuenaga _STATIC const char *s_ctlalg(uint32_t); 251 1.1 hsuenaga _STATIC const char *s_xpsec_op(uint32_t); 252 1.1 hsuenaga _STATIC const char *s_xpsec_enc(uint32_t); 253 1.1 hsuenaga _STATIC const char *s_xpsec_mac(uint32_t); 254 1.1 hsuenaga _STATIC const char *s_xpsec_frag(uint32_t); 255 1.1 hsuenaga 256 1.1 hsuenaga /* 257 1.1 hsuenaga * debugging supports 258 1.1 hsuenaga */ 259 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 260 1.1 hsuenaga _STATIC void mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *); 261 1.1 hsuenaga _STATIC void mvxpsec_dump_reg(struct mvxpsec_softc *); 262 1.1 hsuenaga _STATIC void mvxpsec_dump_sram(const char *, struct mvxpsec_softc *, size_t); 263 1.1 hsuenaga _STATIC void mvxpsec_dump_data(const char *, void *, size_t); 264 1.1 hsuenaga 265 1.1 hsuenaga _STATIC void mvxpsec_dump_packet(const char *, struct mvxpsec_packet *); 266 1.1 hsuenaga _STATIC void mvxpsec_dump_packet_data(const char *, struct mvxpsec_packet *); 267 1.1 hsuenaga _STATIC void mvxpsec_dump_packet_desc(const char *, struct mvxpsec_packet *); 268 1.1 hsuenaga 269 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_config(const char *, uint32_t); 270 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_encdata(const char *, uint32_t, uint32_t); 271 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_enclen(const char *, uint32_t); 272 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_enckey(const char *, uint32_t); 273 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_enciv(const char *, uint32_t); 274 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_macsrc(const char *, uint32_t); 275 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_macdst(const char *, uint32_t); 276 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_maciv(const char *, uint32_t); 277 1.1 hsuenaga #endif 278 1.1 hsuenaga 279 1.1 hsuenaga /* 280 1.1 hsuenaga * global configurations, params, work spaces, ... 281 1.1 hsuenaga * 282 1.1 hsuenaga * XXX: use sysctl for global configurations 283 1.1 hsuenaga */ 284 1.1 hsuenaga /* waiting for device */ 285 1.1 hsuenaga static int mvxpsec_wait_interval = 10; /* usec */ 286 1.1 hsuenaga static int mvxpsec_wait_retry = 100; /* times = wait for 1 [msec] */ 287 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 288 1.1 hsuenaga static uint32_t mvxpsec_debug = MVXPSEC_DEBUG; /* debug level */ 289 1.1 hsuenaga #endif 290 1.1 hsuenaga 291 1.1 hsuenaga /* 292 1.1 hsuenaga * Register accessors 293 1.1 hsuenaga */ 294 1.1 hsuenaga #define MVXPSEC_WRITE(sc, off, val) \ 295 1.1 hsuenaga bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (off), (val)) 296 1.1 hsuenaga #define MVXPSEC_READ(sc, off) \ 297 1.1 hsuenaga bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (off)) 298 1.1 hsuenaga 299 1.1 hsuenaga /* 300 1.1 hsuenaga * device driver autoconf interface 301 1.1 hsuenaga */ 302 1.1 hsuenaga CFATTACH_DECL2_NEW(mvxpsec_mbus, sizeof(struct mvxpsec_softc), 303 1.1 hsuenaga mvxpsec_match, mvxpsec_attach, NULL, NULL, NULL, NULL); 304 1.1 hsuenaga 305 1.1 hsuenaga STATIC int 306 1.1 hsuenaga mvxpsec_match(device_t dev, cfdata_t match, void *aux) 307 1.1 hsuenaga { 308 1.1 hsuenaga struct marvell_attach_args *mva = aux; 309 1.1 hsuenaga uint32_t tag; 310 1.1 hsuenaga int window; 311 1.1 hsuenaga 312 1.1 hsuenaga if (strcmp(mva->mva_name, match->cf_name) != 0) 313 1.1 hsuenaga return 0; 314 1.1 hsuenaga if (mva->mva_offset == MVA_OFFSET_DEFAULT) 315 1.1 hsuenaga return 0; 316 1.1 hsuenaga 317 1.1 hsuenaga switch (mva->mva_unit) { 318 1.1 hsuenaga case 0: 319 1.1 hsuenaga tag = ARMADAXP_TAG_CRYPT0; 320 1.1 hsuenaga break; 321 1.1 hsuenaga case 1: 322 1.1 hsuenaga tag = ARMADAXP_TAG_CRYPT1; 323 1.1 hsuenaga break; 324 1.1 hsuenaga default: 325 1.1 hsuenaga aprint_error_dev(dev, 326 1.1 hsuenaga "unit %d is not supported\n", mva->mva_unit); 327 1.1 hsuenaga return 0; 328 1.1 hsuenaga } 329 1.1 hsuenaga 330 1.1 hsuenaga window = mvsoc_target(tag, NULL, NULL, NULL, NULL); 331 1.1 hsuenaga if (window >= nwindow) { 332 1.1 hsuenaga aprint_error_dev(dev, 333 1.1 hsuenaga "Security Accelerator SRAM is not configured.\n"); 334 1.1 hsuenaga return 0; 335 1.1 hsuenaga } 336 1.1 hsuenaga 337 1.1 hsuenaga return 1; 338 1.1 hsuenaga } 339 1.1 hsuenaga 340 1.1 hsuenaga STATIC void 341 1.1 hsuenaga mvxpsec_attach(device_t parent, device_t self, void *aux) 342 1.1 hsuenaga { 343 1.1 hsuenaga struct marvell_attach_args *mva = aux; 344 1.1 hsuenaga struct mvxpsec_softc *sc = device_private(self); 345 1.1 hsuenaga int v; 346 1.1 hsuenaga int i; 347 1.1 hsuenaga 348 1.1 hsuenaga sc->sc_dev = self; 349 1.1 hsuenaga 350 1.1 hsuenaga aprint_normal(": Marvell Crypto Engines and Security Accelerator\n"); 351 1.1 hsuenaga aprint_naive("\n"); 352 1.1 hsuenaga #ifdef MVXPSEC_MULTI_PACKET 353 1.1 hsuenaga aprint_normal_dev(sc->sc_dev, "multi-packet chained mode enabled.\n"); 354 1.1 hsuenaga #else 355 1.1 hsuenaga aprint_normal_dev(sc->sc_dev, "multi-packet chained mode disabled.\n"); 356 1.1 hsuenaga #endif 357 1.1 hsuenaga aprint_normal_dev(sc->sc_dev, 358 1.1 hsuenaga "Max %d sessions.\n", MVXPSEC_MAX_SESSIONS); 359 1.1 hsuenaga 360 1.1 hsuenaga /* mutex */ 361 1.1 hsuenaga mutex_init(&sc->sc_session_mtx, MUTEX_DEFAULT, IPL_NET); 362 1.1 hsuenaga mutex_init(&sc->sc_dma_mtx, MUTEX_DEFAULT, IPL_NET); 363 1.1 hsuenaga mutex_init(&sc->sc_queue_mtx, MUTEX_DEFAULT, IPL_NET); 364 1.1 hsuenaga 365 1.1 hsuenaga /* Packet queue */ 366 1.1 hsuenaga SIMPLEQ_INIT(&sc->sc_wait_queue); 367 1.1 hsuenaga SIMPLEQ_INIT(&sc->sc_run_queue); 368 1.1 hsuenaga SLIST_INIT(&sc->sc_free_list); 369 1.1 hsuenaga sc->sc_wait_qlen = 0; 370 1.1 hsuenaga #ifdef MVXPSEC_MULTI_PACKET 371 1.1 hsuenaga sc->sc_wait_qlimit = 16; 372 1.1 hsuenaga #else 373 1.1 hsuenaga sc->sc_wait_qlimit = 0; 374 1.1 hsuenaga #endif 375 1.1 hsuenaga sc->sc_free_qlen = 0; 376 1.1 hsuenaga 377 1.1 hsuenaga /* Timer */ 378 1.1 hsuenaga callout_init(&sc->sc_timeout, 0); /* XXX: use CALLOUT_MPSAFE */ 379 1.1 hsuenaga callout_setfunc(&sc->sc_timeout, mvxpsec_timer, sc); 380 1.1 hsuenaga 381 1.1 hsuenaga /* I/O */ 382 1.1 hsuenaga sc->sc_iot = mva->mva_iot; 383 1.1 hsuenaga if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 384 1.1 hsuenaga mva->mva_offset, mva->mva_size, &sc->sc_ioh)) { 385 1.1 hsuenaga aprint_error_dev(self, "Cannot map registers\n"); 386 1.1 hsuenaga return; 387 1.1 hsuenaga } 388 1.1 hsuenaga 389 1.1 hsuenaga /* DMA */ 390 1.1 hsuenaga sc->sc_dmat = mva->mva_dmat; 391 1.1 hsuenaga if (mvxpsec_init_dma(sc, mva) < 0) 392 1.1 hsuenaga return; 393 1.1 hsuenaga 394 1.1 hsuenaga /* SRAM */ 395 1.1 hsuenaga if (mvxpsec_init_sram(sc) < 0) 396 1.1 hsuenaga return; 397 1.1 hsuenaga 398 1.1 hsuenaga /* Registers */ 399 1.1 hsuenaga mvxpsec_wininit(sc, mva->mva_tags); 400 1.1 hsuenaga 401 1.1 hsuenaga /* INTR */ 402 1.1 hsuenaga MVXPSEC_WRITE(sc, MVXPSEC_INT_MASK, MVXPSEC_DEFAULT_INT); 403 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_ERR_MASK, MVXPSEC_DEFAULT_ERR); 404 1.12 riastrad sc->sc_done_ih = 405 1.1 hsuenaga marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpsec_intr, sc); 406 1.14 andvar /* XXX: should pass error IRQ using mva */ 407 1.1 hsuenaga sc->sc_error_ih = marvell_intr_establish(MVXPSEC_ERR_INT(sc), 408 1.1 hsuenaga IPL_NET, mvxpsec_eintr, sc); 409 1.1 hsuenaga aprint_normal_dev(self, 410 1.1 hsuenaga "Error Reporting IRQ %d\n", MVXPSEC_ERR_INT(sc)); 411 1.1 hsuenaga 412 1.1 hsuenaga /* Initialize TDMA (It's enabled here, but waiting for SA) */ 413 1.1 hsuenaga if (mvxpsec_dma_wait(sc) < 0) 414 1.1 hsuenaga panic("%s: DMA DEVICE not responding\n", __func__); 415 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0); 416 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0); 417 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_DST, 0); 418 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0); 419 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0); 420 1.1 hsuenaga v = MVXPSEC_READ(sc, MV_TDMA_CONTROL); 421 1.1 hsuenaga v |= MV_TDMA_CONTROL_ENABLE; 422 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, v); 423 1.1 hsuenaga 424 1.1 hsuenaga /* Initialize SA */ 425 1.1 hsuenaga if (mvxpsec_acc_wait(sc) < 0) 426 1.1 hsuenaga panic("%s: MVXPSEC not responding\n", __func__); 427 1.1 hsuenaga v = MVXPSEC_READ(sc, MV_ACC_CONFIG); 428 1.1 hsuenaga v &= ~MV_ACC_CONFIG_STOP_ON_ERR; 429 1.1 hsuenaga v |= MV_ACC_CONFIG_MULT_PKT; 430 1.1 hsuenaga v |= MV_ACC_CONFIG_WAIT_TDMA; 431 1.1 hsuenaga v |= MV_ACC_CONFIG_ACT_TDMA; 432 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_CONFIG, v); 433 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_DESC, 0); 434 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP); 435 1.1 hsuenaga 436 1.1 hsuenaga /* Session */ 437 1.12 riastrad sc->sc_session_pool = 438 1.1 hsuenaga pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0, 439 1.1 hsuenaga "mvxpsecpl", NULL, IPL_NET, 440 1.1 hsuenaga mvxpsec_session_ctor, mvxpsec_session_dtor, sc); 441 1.1 hsuenaga pool_cache_sethiwat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS); 442 1.1 hsuenaga pool_cache_setlowat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS / 2); 443 1.1 hsuenaga sc->sc_last_session = NULL; 444 1.1 hsuenaga 445 1.18 andvar /* Packet */ 446 1.1 hsuenaga sc->sc_packet_pool = 447 1.1 hsuenaga pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0, 448 1.1 hsuenaga "mvxpsec_pktpl", NULL, IPL_NET, 449 1.1 hsuenaga mvxpsec_packet_ctor, mvxpsec_packet_dtor, sc); 450 1.1 hsuenaga pool_cache_sethiwat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS); 451 1.1 hsuenaga pool_cache_setlowat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS / 2); 452 1.1 hsuenaga 453 1.1 hsuenaga /* Register to EVCNT framework */ 454 1.1 hsuenaga mvxpsec_evcnt_attach(sc); 455 1.1 hsuenaga 456 1.1 hsuenaga /* Register to Opencrypto */ 457 1.1 hsuenaga for (i = 0; i < MVXPSEC_MAX_SESSIONS; i++) { 458 1.1 hsuenaga sc->sc_sessions[i] = NULL; 459 1.1 hsuenaga } 460 1.1 hsuenaga if (mvxpsec_register(sc)) 461 1.1 hsuenaga panic("cannot initialize OpenCrypto module.\n"); 462 1.1 hsuenaga 463 1.1 hsuenaga return; 464 1.1 hsuenaga } 465 1.1 hsuenaga 466 1.1 hsuenaga STATIC void 467 1.1 hsuenaga mvxpsec_evcnt_attach(struct mvxpsec_softc *sc) 468 1.1 hsuenaga { 469 1.1 hsuenaga struct mvxpsec_evcnt *sc_ev = &sc->sc_ev; 470 1.1 hsuenaga 471 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_all, EVCNT_TYPE_INTR, 472 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Main Intr."); 473 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_auth, EVCNT_TYPE_INTR, 474 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Auth Intr."); 475 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_des, EVCNT_TYPE_INTR, 476 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "DES Intr."); 477 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_aes_enc, EVCNT_TYPE_INTR, 478 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "AES-Encrypt Intr."); 479 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_aes_dec, EVCNT_TYPE_INTR, 480 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "AES-Decrypt Intr."); 481 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_enc, EVCNT_TYPE_INTR, 482 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Crypto Intr."); 483 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_sa, EVCNT_TYPE_INTR, 484 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "SA Intr."); 485 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_acctdma, EVCNT_TYPE_INTR, 486 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "AccTDMA Intr."); 487 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_comp, EVCNT_TYPE_INTR, 488 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "TDMA-Complete Intr."); 489 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_own, EVCNT_TYPE_INTR, 490 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "TDMA-Ownership Intr."); 491 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_acctdma_cont, EVCNT_TYPE_INTR, 492 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "AccTDMA-Continue Intr."); 493 1.1 hsuenaga 494 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->session_new, EVCNT_TYPE_MISC, 495 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "New-Session"); 496 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->session_free, EVCNT_TYPE_MISC, 497 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Free-Session"); 498 1.1 hsuenaga 499 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->packet_ok, EVCNT_TYPE_MISC, 500 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Packet-OK"); 501 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->packet_err, EVCNT_TYPE_MISC, 502 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Packet-ERR"); 503 1.1 hsuenaga 504 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->dispatch_packets, EVCNT_TYPE_MISC, 505 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Packet-Dispatch"); 506 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->dispatch_queue, EVCNT_TYPE_MISC, 507 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Queue-Dispatch"); 508 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->queue_full, EVCNT_TYPE_MISC, 509 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Queue-Full"); 510 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->max_dispatch, EVCNT_TYPE_MISC, 511 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Max-Dispatch"); 512 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->max_done, EVCNT_TYPE_MISC, 513 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Max-Done"); 514 1.1 hsuenaga } 515 1.1 hsuenaga 516 1.1 hsuenaga /* 517 1.1 hsuenaga * Register setup 518 1.1 hsuenaga */ 519 1.1 hsuenaga STATIC int mvxpsec_wininit(struct mvxpsec_softc *sc, enum marvell_tags *tags) 520 1.1 hsuenaga { 521 1.1 hsuenaga device_t pdev = device_parent(sc->sc_dev); 522 1.1 hsuenaga uint64_t base; 523 1.1 hsuenaga uint32_t size, reg; 524 1.1 hsuenaga int window, target, attr, rv, i; 525 1.1 hsuenaga 526 1.1 hsuenaga /* disable all window */ 527 1.1 hsuenaga for (window = 0; window < MV_TDMA_NWINDOW; window++) 528 1.1 hsuenaga { 529 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), 0); 530 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), 0); 531 1.1 hsuenaga } 532 1.1 hsuenaga 533 1.1 hsuenaga for (window = 0, i = 0; 534 1.1 hsuenaga tags[i] != MARVELL_TAG_UNDEFINED && window < MV_TDMA_NWINDOW; i++) { 535 1.1 hsuenaga rv = marvell_winparams_by_tag(pdev, tags[i], 536 1.1 hsuenaga &target, &attr, &base, &size); 537 1.1 hsuenaga if (rv != 0 || size == 0) 538 1.1 hsuenaga continue; 539 1.1 hsuenaga 540 1.1 hsuenaga if (base > 0xffffffffULL) { 541 1.1 hsuenaga aprint_error_dev(sc->sc_dev, 542 1.1 hsuenaga "can't remap window %d\n", window); 543 1.1 hsuenaga continue; 544 1.1 hsuenaga } 545 1.1 hsuenaga 546 1.1 hsuenaga reg = MV_TDMA_BAR_BASE(base); 547 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), reg); 548 1.1 hsuenaga 549 1.1 hsuenaga reg = MV_TDMA_ATTR_TARGET(target); 550 1.1 hsuenaga reg |= MV_TDMA_ATTR_ATTR(attr); 551 1.1 hsuenaga reg |= MV_TDMA_ATTR_SIZE(size); 552 1.1 hsuenaga reg |= MV_TDMA_ATTR_ENABLE; 553 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), reg); 554 1.1 hsuenaga 555 1.1 hsuenaga window++; 556 1.1 hsuenaga } 557 1.1 hsuenaga 558 1.1 hsuenaga return 0; 559 1.1 hsuenaga } 560 1.1 hsuenaga 561 1.1 hsuenaga /* 562 1.1 hsuenaga * Timer handling 563 1.1 hsuenaga */ 564 1.1 hsuenaga STATIC void 565 1.1 hsuenaga mvxpsec_timer(void *aux) 566 1.1 hsuenaga { 567 1.1 hsuenaga struct mvxpsec_softc *sc = aux; 568 1.1 hsuenaga struct mvxpsec_packet *mv_p; 569 1.1 hsuenaga uint32_t reg; 570 1.1 hsuenaga int ndone; 571 1.1 hsuenaga int refill; 572 1.1 hsuenaga int s; 573 1.1 hsuenaga 574 1.1 hsuenaga /* IPL_SOFTCLOCK */ 575 1.1 hsuenaga 576 1.1 hsuenaga log(LOG_ERR, "%s: device timeout.\n", __func__); 577 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 578 1.1 hsuenaga mvxpsec_dump_reg(sc); 579 1.1 hsuenaga #endif 580 1.12 riastrad 581 1.1 hsuenaga s = splnet(); 582 1.1 hsuenaga /* stop security accelerator */ 583 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP); 584 1.1 hsuenaga 585 1.1 hsuenaga /* stop TDMA */ 586 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, 0); 587 1.1 hsuenaga 588 1.1 hsuenaga /* cleanup packet queue */ 589 1.1 hsuenaga mutex_enter(&sc->sc_queue_mtx); 590 1.1 hsuenaga ndone = 0; 591 1.1 hsuenaga while ( (mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue)) != NULL) { 592 1.1 hsuenaga SIMPLEQ_REMOVE_HEAD(&sc->sc_run_queue, queue); 593 1.1 hsuenaga 594 1.1 hsuenaga mv_p->crp->crp_etype = EINVAL; 595 1.1 hsuenaga mvxpsec_done_packet(mv_p); 596 1.1 hsuenaga ndone++; 597 1.1 hsuenaga } 598 1.1 hsuenaga MVXPSEC_EVCNT_MAX(sc, max_done, ndone); 599 1.1 hsuenaga sc->sc_flags &= ~HW_RUNNING; 600 1.1 hsuenaga refill = (sc->sc_wait_qlen > 0) ? 1 : 0; 601 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx); 602 1.1 hsuenaga 603 1.1 hsuenaga /* reenable TDMA */ 604 1.1 hsuenaga if (mvxpsec_dma_wait(sc) < 0) 605 1.1 hsuenaga panic("%s: failed to reset DMA DEVICE. give up.", __func__); 606 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0); 607 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0); 608 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_DST, 0); 609 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0); 610 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0); 611 1.1 hsuenaga reg = MV_TDMA_DEFAULT_CONTROL; 612 1.1 hsuenaga reg |= MV_TDMA_CONTROL_ENABLE; 613 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, reg); 614 1.1 hsuenaga 615 1.1 hsuenaga if (mvxpsec_acc_wait(sc) < 0) 616 1.1 hsuenaga panic("%s: failed to reset MVXPSEC. give up.", __func__); 617 1.1 hsuenaga reg = MV_ACC_CONFIG_MULT_PKT; 618 1.1 hsuenaga reg |= MV_ACC_CONFIG_WAIT_TDMA; 619 1.1 hsuenaga reg |= MV_ACC_CONFIG_ACT_TDMA; 620 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_CONFIG, reg); 621 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_DESC, 0); 622 1.1 hsuenaga 623 1.1 hsuenaga if (refill) { 624 1.1 hsuenaga mutex_enter(&sc->sc_queue_mtx); 625 1.1 hsuenaga mvxpsec_dispatch_queue(sc); 626 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx); 627 1.1 hsuenaga } 628 1.1 hsuenaga 629 1.1 hsuenaga crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ); 630 1.1 hsuenaga splx(s); 631 1.1 hsuenaga } 632 1.1 hsuenaga 633 1.1 hsuenaga /* 634 1.1 hsuenaga * DMA handling 635 1.1 hsuenaga */ 636 1.1 hsuenaga 637 1.1 hsuenaga /* 638 1.1 hsuenaga * Allocate kernel devmem and DMA safe memory with bus_dma API 639 1.1 hsuenaga * used for DMA descriptors. 640 1.1 hsuenaga * 641 1.1 hsuenaga * if phys != 0, assume phys is a DMA safe memory and bypass 642 1.1 hsuenaga * allocator. 643 1.1 hsuenaga */ 644 1.1 hsuenaga STATIC struct mvxpsec_devmem * 645 1.1 hsuenaga mvxpsec_alloc_devmem(struct mvxpsec_softc *sc, paddr_t phys, int size) 646 1.1 hsuenaga { 647 1.1 hsuenaga struct mvxpsec_devmem *devmem; 648 1.1 hsuenaga bus_dma_segment_t seg; 649 1.1 hsuenaga int rseg; 650 1.1 hsuenaga int err; 651 1.1 hsuenaga 652 1.1 hsuenaga if (sc == NULL) 653 1.1 hsuenaga return NULL; 654 1.1 hsuenaga 655 1.3 chs devmem = kmem_alloc(sizeof(*devmem), KM_SLEEP); 656 1.1 hsuenaga devmem->size = size; 657 1.1 hsuenaga 658 1.1 hsuenaga if (phys) { 659 1.1 hsuenaga seg.ds_addr = phys; 660 1.1 hsuenaga seg.ds_len = devmem->size; 661 1.1 hsuenaga rseg = 1; 662 1.1 hsuenaga err = 0; 663 1.1 hsuenaga } 664 1.1 hsuenaga else { 665 1.1 hsuenaga err = bus_dmamem_alloc(sc->sc_dmat, 666 1.1 hsuenaga devmem->size, PAGE_SIZE, 0, 667 1.1 hsuenaga &seg, MVXPSEC_DMA_MAX_SEGS, &rseg, BUS_DMA_NOWAIT); 668 1.1 hsuenaga } 669 1.1 hsuenaga if (err) { 670 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "can't alloc DMA buffer\n"); 671 1.1 hsuenaga goto fail_kmem_free; 672 1.1 hsuenaga } 673 1.1 hsuenaga 674 1.1 hsuenaga err = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 675 1.1 hsuenaga devmem->size, &devmem->kva, BUS_DMA_NOWAIT); 676 1.1 hsuenaga if (err) { 677 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "can't map DMA buffer\n"); 678 1.1 hsuenaga goto fail_dmamem_free; 679 1.1 hsuenaga } 680 1.1 hsuenaga 681 1.1 hsuenaga err = bus_dmamap_create(sc->sc_dmat, 682 1.1 hsuenaga size, 1, size, 0, BUS_DMA_NOWAIT, &devmem->map); 683 1.1 hsuenaga if (err) { 684 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "can't create DMA map\n"); 685 1.1 hsuenaga goto fail_unmap; 686 1.1 hsuenaga } 687 1.1 hsuenaga 688 1.1 hsuenaga err = bus_dmamap_load(sc->sc_dmat, 689 1.1 hsuenaga devmem->map, devmem->kva, devmem->size, NULL, 690 1.1 hsuenaga BUS_DMA_NOWAIT); 691 1.1 hsuenaga if (err) { 692 1.1 hsuenaga aprint_error_dev(sc->sc_dev, 693 1.1 hsuenaga "can't load DMA buffer VA:%p PA:0x%08x\n", 694 1.1 hsuenaga devmem->kva, (int)seg.ds_addr); 695 1.1 hsuenaga goto fail_destroy; 696 1.1 hsuenaga } 697 1.1 hsuenaga 698 1.1 hsuenaga return devmem; 699 1.1 hsuenaga 700 1.1 hsuenaga fail_destroy: 701 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, devmem->map); 702 1.1 hsuenaga fail_unmap: 703 1.1 hsuenaga bus_dmamem_unmap(sc->sc_dmat, devmem->kva, devmem->size); 704 1.1 hsuenaga fail_dmamem_free: 705 1.1 hsuenaga bus_dmamem_free(sc->sc_dmat, &seg, rseg); 706 1.1 hsuenaga fail_kmem_free: 707 1.1 hsuenaga kmem_free(devmem, sizeof(*devmem)); 708 1.1 hsuenaga 709 1.1 hsuenaga return NULL; 710 1.1 hsuenaga } 711 1.1 hsuenaga 712 1.1 hsuenaga /* 713 1.1 hsuenaga * Get DMA Descriptor from (DMA safe) descriptor pool. 714 1.1 hsuenaga */ 715 1.1 hsuenaga INLINE struct mvxpsec_descriptor_handle * 716 1.1 hsuenaga mvxpsec_dma_getdesc(struct mvxpsec_softc *sc) 717 1.1 hsuenaga { 718 1.1 hsuenaga struct mvxpsec_descriptor_handle *entry; 719 1.1 hsuenaga 720 1.1 hsuenaga /* must called with sc->sc_dma_mtx held */ 721 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_dma_mtx)); 722 1.1 hsuenaga 723 1.1 hsuenaga if (sc->sc_desc_ring_prod == sc->sc_desc_ring_cons) 724 1.1 hsuenaga return NULL; 725 1.1 hsuenaga 726 1.1 hsuenaga entry = &sc->sc_desc_ring[sc->sc_desc_ring_prod]; 727 1.1 hsuenaga sc->sc_desc_ring_prod++; 728 1.1 hsuenaga if (sc->sc_desc_ring_prod >= sc->sc_desc_ring_size) 729 1.1 hsuenaga sc->sc_desc_ring_prod -= sc->sc_desc_ring_size; 730 1.1 hsuenaga 731 1.1 hsuenaga return entry; 732 1.1 hsuenaga } 733 1.1 hsuenaga 734 1.1 hsuenaga /* 735 1.1 hsuenaga * Put DMA Descriptor to descriptor pool. 736 1.1 hsuenaga */ 737 1.1 hsuenaga _INLINE void 738 1.1 hsuenaga mvxpsec_dma_putdesc(struct mvxpsec_softc *sc, 739 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh) 740 1.1 hsuenaga { 741 1.1 hsuenaga /* must called with sc->sc_dma_mtx held */ 742 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_dma_mtx)); 743 1.1 hsuenaga 744 1.1 hsuenaga sc->sc_desc_ring_cons++; 745 1.1 hsuenaga if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size) 746 1.1 hsuenaga sc->sc_desc_ring_cons -= sc->sc_desc_ring_size; 747 1.1 hsuenaga 748 1.1 hsuenaga return; 749 1.1 hsuenaga } 750 1.1 hsuenaga 751 1.1 hsuenaga /* 752 1.1 hsuenaga * Setup DMA Descriptor 753 1.1 hsuenaga * copy from 'src' to 'dst' by 'size' bytes. 754 1.1 hsuenaga * 'src' or 'dst' must be SRAM address. 755 1.1 hsuenaga */ 756 1.1 hsuenaga INLINE void 757 1.1 hsuenaga mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *dh, 758 1.1 hsuenaga uint32_t dst, uint32_t src, uint32_t size) 759 1.1 hsuenaga { 760 1.1 hsuenaga struct mvxpsec_descriptor *desc; 761 1.1 hsuenaga 762 1.1 hsuenaga desc = (struct mvxpsec_descriptor *)dh->_desc; 763 1.1 hsuenaga 764 1.1 hsuenaga desc->tdma_dst = dst; 765 1.1 hsuenaga desc->tdma_src = src; 766 1.1 hsuenaga desc->tdma_word0 = size; 767 1.1 hsuenaga if (size != 0) 768 1.1 hsuenaga desc->tdma_word0 |= MV_TDMA_CNT_OWN; 769 1.1 hsuenaga /* size == 0 is owned by ACC, not TDMA */ 770 1.1 hsuenaga 771 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 772 1.1 hsuenaga mvxpsec_dump_dmaq(dh); 773 1.1 hsuenaga #endif 774 1.1 hsuenaga } 775 1.1 hsuenaga 776 1.1 hsuenaga /* 777 1.1 hsuenaga * Concat 2 DMA 778 1.1 hsuenaga */ 779 1.1 hsuenaga INLINE void 780 1.1 hsuenaga mvxpsec_dma_cat(struct mvxpsec_softc *sc, 781 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh1, 782 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh2) 783 1.1 hsuenaga { 784 1.1 hsuenaga ((struct mvxpsec_descriptor*)dh1->_desc)->tdma_nxt = dh2->phys_addr; 785 1.1 hsuenaga MVXPSEC_SYNC_DESC(sc, dh1, BUS_DMASYNC_PREWRITE); 786 1.1 hsuenaga } 787 1.1 hsuenaga 788 1.1 hsuenaga /* 789 1.1 hsuenaga * Schedule DMA Copy 790 1.1 hsuenaga */ 791 1.1 hsuenaga INLINE int 792 1.1 hsuenaga mvxpsec_dma_copy0(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r, 793 1.1 hsuenaga uint32_t dst, uint32_t src, uint32_t size) 794 1.1 hsuenaga { 795 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh; 796 1.1 hsuenaga 797 1.1 hsuenaga dh = mvxpsec_dma_getdesc(sc); 798 1.1 hsuenaga if (dh == NULL) { 799 1.1 hsuenaga log(LOG_ERR, "%s: descriptor full\n", __func__); 800 1.1 hsuenaga return -1; 801 1.1 hsuenaga } 802 1.1 hsuenaga 803 1.1 hsuenaga mvxpsec_dma_setup(dh, dst, src, size); 804 1.1 hsuenaga if (r->dma_head == NULL) { 805 1.1 hsuenaga r->dma_head = dh; 806 1.1 hsuenaga r->dma_last = dh; 807 1.1 hsuenaga r->dma_size = 1; 808 1.1 hsuenaga } 809 1.1 hsuenaga else { 810 1.1 hsuenaga mvxpsec_dma_cat(sc, r->dma_last, dh); 811 1.1 hsuenaga r->dma_last = dh; 812 1.1 hsuenaga r->dma_size++; 813 1.1 hsuenaga } 814 1.1 hsuenaga 815 1.1 hsuenaga return 0; 816 1.1 hsuenaga } 817 1.1 hsuenaga 818 1.1 hsuenaga INLINE int 819 1.1 hsuenaga mvxpsec_dma_copy(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r, 820 1.1 hsuenaga uint32_t dst, uint32_t src, uint32_t size) 821 1.1 hsuenaga { 822 1.1 hsuenaga if (size == 0) /* 0 is very special descriptor */ 823 1.1 hsuenaga return 0; 824 1.1 hsuenaga 825 1.1 hsuenaga return mvxpsec_dma_copy0(sc, r, dst, src, size); 826 1.1 hsuenaga } 827 1.1 hsuenaga 828 1.1 hsuenaga /* 829 1.1 hsuenaga * Schedule ACC Activate 830 1.1 hsuenaga */ 831 1.1 hsuenaga INLINE int 832 1.1 hsuenaga mvxpsec_dma_acc_activate(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r) 833 1.1 hsuenaga { 834 1.1 hsuenaga return mvxpsec_dma_copy0(sc, r, 0, 0, 0); 835 1.1 hsuenaga } 836 1.1 hsuenaga 837 1.1 hsuenaga /* 838 1.1 hsuenaga * Finalize DMA setup 839 1.1 hsuenaga */ 840 1.1 hsuenaga INLINE void 841 1.1 hsuenaga mvxpsec_dma_finalize(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r) 842 1.1 hsuenaga { 843 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh; 844 1.1 hsuenaga 845 1.1 hsuenaga dh = r->dma_last; 846 1.1 hsuenaga ((struct mvxpsec_descriptor*)dh->_desc)->tdma_nxt = 0; 847 1.1 hsuenaga MVXPSEC_SYNC_DESC(sc, dh, BUS_DMASYNC_PREWRITE); 848 1.1 hsuenaga } 849 1.1 hsuenaga 850 1.1 hsuenaga /* 851 1.1 hsuenaga * Free entire DMA ring 852 1.1 hsuenaga */ 853 1.1 hsuenaga INLINE void 854 1.1 hsuenaga mvxpsec_dma_free(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r) 855 1.1 hsuenaga { 856 1.1 hsuenaga sc->sc_desc_ring_cons += r->dma_size; 857 1.1 hsuenaga if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size) 858 1.1 hsuenaga sc->sc_desc_ring_cons -= sc->sc_desc_ring_size; 859 1.1 hsuenaga r->dma_head = NULL; 860 1.1 hsuenaga r->dma_last = NULL; 861 1.1 hsuenaga r->dma_size = 0; 862 1.1 hsuenaga } 863 1.1 hsuenaga 864 1.1 hsuenaga /* 865 1.1 hsuenaga * create DMA descriptor chain for the packet 866 1.1 hsuenaga */ 867 1.1 hsuenaga INLINE int 868 1.1 hsuenaga mvxpsec_dma_copy_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p) 869 1.1 hsuenaga { 870 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s; 871 1.1 hsuenaga uint32_t src, dst, len; 872 1.1 hsuenaga uint32_t pkt_off, pkt_off_r; 873 1.1 hsuenaga int err; 874 1.1 hsuenaga int i; 875 1.1 hsuenaga 876 1.1 hsuenaga /* must called with sc->sc_dma_mtx held */ 877 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_dma_mtx)); 878 1.1 hsuenaga 879 1.1 hsuenaga /* 880 1.1 hsuenaga * set offset for mem->device copy 881 1.1 hsuenaga * 882 1.1 hsuenaga * typical packet image: 883 1.1 hsuenaga * 884 1.1 hsuenaga * enc_ivoff 885 1.1 hsuenaga * mac_off 886 1.1 hsuenaga * | 887 1.1 hsuenaga * | enc_off 888 1.1 hsuenaga * | | 889 1.1 hsuenaga * v v 890 1.1 hsuenaga * +----+--------... 891 1.12 riastrad * |IV |DATA 892 1.1 hsuenaga * +----+--------... 893 1.1 hsuenaga */ 894 1.1 hsuenaga pkt_off = 0; 895 1.1 hsuenaga if (mv_p->mac_off > 0) 896 1.1 hsuenaga pkt_off = mv_p->mac_off; 897 1.1 hsuenaga if ((mv_p->flags & CRP_EXT_IV) == 0 && pkt_off > mv_p->enc_ivoff) 898 1.1 hsuenaga pkt_off = mv_p->enc_ivoff; 899 1.1 hsuenaga if (mv_p->enc_off > 0 && pkt_off > mv_p->enc_off) 900 1.1 hsuenaga pkt_off = mv_p->enc_off; 901 1.1 hsuenaga pkt_off_r = pkt_off; 902 1.1 hsuenaga 903 1.1 hsuenaga /* make DMA descriptors to copy packet header: DRAM -> SRAM */ 904 1.1 hsuenaga dst = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc); 905 1.1 hsuenaga src = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr; 906 1.1 hsuenaga len = sizeof(mv_p->pkt_header); 907 1.1 hsuenaga err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len); 908 1.1 hsuenaga if (__predict_false(err)) 909 1.1 hsuenaga return err; 910 1.1 hsuenaga 911 1.12 riastrad /* 912 1.1 hsuenaga * make DMA descriptors to copy session header: DRAM -> SRAM 913 1.1 hsuenaga * we can reuse session header on SRAM if session is not changed. 914 1.1 hsuenaga */ 915 1.1 hsuenaga if (sc->sc_last_session != mv_s) { 916 1.1 hsuenaga dst = (uint32_t)MVXPSEC_SRAM_SESS_HDR_PA(sc); 917 1.1 hsuenaga src = (uint32_t)mv_s->session_header_map->dm_segs[0].ds_addr; 918 1.1 hsuenaga len = sizeof(mv_s->session_header); 919 1.1 hsuenaga err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len); 920 1.1 hsuenaga if (__predict_false(err)) 921 1.1 hsuenaga return err; 922 1.1 hsuenaga sc->sc_last_session = mv_s; 923 1.1 hsuenaga } 924 1.1 hsuenaga 925 1.1 hsuenaga /* make DMA descriptor to copy payload data: DRAM -> SRAM */ 926 1.1 hsuenaga dst = MVXPSEC_SRAM_PAYLOAD_PA(sc, 0); 927 1.1 hsuenaga for (i = 0; i < mv_p->data_map->dm_nsegs; i++) { 928 1.1 hsuenaga src = mv_p->data_map->dm_segs[i].ds_addr; 929 1.1 hsuenaga len = mv_p->data_map->dm_segs[i].ds_len; 930 1.1 hsuenaga if (pkt_off) { 931 1.1 hsuenaga if (len <= pkt_off) { 932 1.1 hsuenaga /* ignore the segment */ 933 1.1 hsuenaga dst += len; 934 1.1 hsuenaga pkt_off -= len; 935 1.1 hsuenaga continue; 936 1.1 hsuenaga } 937 1.1 hsuenaga /* copy from the middle of the segment */ 938 1.1 hsuenaga dst += pkt_off; 939 1.1 hsuenaga src += pkt_off; 940 1.1 hsuenaga len -= pkt_off; 941 1.1 hsuenaga pkt_off = 0; 942 1.1 hsuenaga } 943 1.1 hsuenaga err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len); 944 1.1 hsuenaga if (__predict_false(err)) 945 1.1 hsuenaga return err; 946 1.1 hsuenaga dst += len; 947 1.1 hsuenaga } 948 1.1 hsuenaga 949 1.1 hsuenaga /* make special descriptor to activate security accelerator */ 950 1.1 hsuenaga err = mvxpsec_dma_acc_activate(sc, &mv_p->dma_ring); 951 1.1 hsuenaga if (__predict_false(err)) 952 1.1 hsuenaga return err; 953 1.1 hsuenaga 954 1.1 hsuenaga /* make DMA descriptors to copy payload: SRAM -> DRAM */ 955 1.1 hsuenaga src = (uint32_t)MVXPSEC_SRAM_PAYLOAD_PA(sc, 0); 956 1.1 hsuenaga for (i = 0; i < mv_p->data_map->dm_nsegs; i++) { 957 1.1 hsuenaga dst = (uint32_t)mv_p->data_map->dm_segs[i].ds_addr; 958 1.1 hsuenaga len = (uint32_t)mv_p->data_map->dm_segs[i].ds_len; 959 1.1 hsuenaga if (pkt_off_r) { 960 1.1 hsuenaga if (len <= pkt_off_r) { 961 1.1 hsuenaga /* ignore the segment */ 962 1.1 hsuenaga src += len; 963 1.1 hsuenaga pkt_off_r -= len; 964 1.1 hsuenaga continue; 965 1.1 hsuenaga } 966 1.1 hsuenaga /* copy from the middle of the segment */ 967 1.1 hsuenaga src += pkt_off_r; 968 1.1 hsuenaga dst += pkt_off_r; 969 1.1 hsuenaga len -= pkt_off_r; 970 1.1 hsuenaga pkt_off_r = 0; 971 1.1 hsuenaga } 972 1.1 hsuenaga err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len); 973 1.1 hsuenaga if (__predict_false(err)) 974 1.1 hsuenaga return err; 975 1.1 hsuenaga src += len; 976 1.1 hsuenaga } 977 1.1 hsuenaga KASSERT(pkt_off == 0); 978 1.1 hsuenaga KASSERT(pkt_off_r == 0); 979 1.1 hsuenaga 980 1.1 hsuenaga /* 981 1.1 hsuenaga * make DMA descriptors to copy packet header: SRAM->DRAM 982 1.1 hsuenaga * if IV is present in the payload, no need to copy. 983 1.1 hsuenaga */ 984 1.1 hsuenaga if (mv_p->flags & CRP_EXT_IV) { 985 1.1 hsuenaga dst = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr; 986 1.1 hsuenaga src = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc); 987 1.1 hsuenaga len = sizeof(mv_p->pkt_header); 988 1.1 hsuenaga err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len); 989 1.1 hsuenaga if (__predict_false(err)) 990 1.1 hsuenaga return err; 991 1.1 hsuenaga } 992 1.1 hsuenaga 993 1.1 hsuenaga return 0; 994 1.1 hsuenaga } 995 1.1 hsuenaga 996 1.1 hsuenaga INLINE int 997 1.1 hsuenaga mvxpsec_dma_sync_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p) 998 1.1 hsuenaga { 999 1.1 hsuenaga /* sync packet header */ 1000 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat, 1001 1.1 hsuenaga mv_p->pkt_header_map, 0, sizeof(mv_p->pkt_header), 1002 1.1 hsuenaga BUS_DMASYNC_PREWRITE); 1003 1.1 hsuenaga 1004 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 1005 1.1 hsuenaga /* sync session header */ 1006 1.1 hsuenaga if (mvxpsec_debug != 0) { 1007 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s; 1008 1.1 hsuenaga 1009 1.1 hsuenaga /* only debug code touch the session header after newsession */ 1010 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat, 1011 1.1 hsuenaga mv_s->session_header_map, 1012 1.1 hsuenaga 0, sizeof(mv_s->session_header), 1013 1.1 hsuenaga BUS_DMASYNC_PREWRITE); 1014 1.1 hsuenaga } 1015 1.1 hsuenaga #endif 1016 1.1 hsuenaga 1017 1.1 hsuenaga /* sync packet buffer */ 1018 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat, 1019 1.1 hsuenaga mv_p->data_map, 0, mv_p->data_len, 1020 1.1 hsuenaga BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1021 1.1 hsuenaga 1022 1.1 hsuenaga return 0; 1023 1.1 hsuenaga } 1024 1.1 hsuenaga 1025 1.1 hsuenaga /* 1026 1.1 hsuenaga * Initialize MVXPSEC Internal SRAM 1027 1.1 hsuenaga * 1028 1.18 andvar * - must be called after DMA initialization. 1029 1.1 hsuenaga * - make VM mapping for SRAM area on MBus. 1030 1.1 hsuenaga */ 1031 1.1 hsuenaga STATIC int 1032 1.1 hsuenaga mvxpsec_init_sram(struct mvxpsec_softc *sc) 1033 1.1 hsuenaga { 1034 1.1 hsuenaga uint32_t tag, target, attr, base, size; 1035 1.1 hsuenaga vaddr_t va; 1036 1.1 hsuenaga int window; 1037 1.1 hsuenaga 1038 1.10 riastrad switch (device_unit(sc->sc_dev)) { 1039 1.1 hsuenaga case 0: 1040 1.1 hsuenaga tag = ARMADAXP_TAG_CRYPT0; 1041 1.1 hsuenaga break; 1042 1.1 hsuenaga case 1: 1043 1.1 hsuenaga tag = ARMADAXP_TAG_CRYPT1; 1044 1.1 hsuenaga break; 1045 1.1 hsuenaga default: 1046 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n"); 1047 1.1 hsuenaga return -1; 1048 1.1 hsuenaga } 1049 1.1 hsuenaga 1050 1.1 hsuenaga window = mvsoc_target(tag, &target, &attr, &base, &size); 1051 1.1 hsuenaga if (window >= nwindow) { 1052 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n"); 1053 1.1 hsuenaga return -1; 1054 1.1 hsuenaga } 1055 1.1 hsuenaga 1056 1.1 hsuenaga if (sizeof(struct mvxpsec_crypt_sram) > size) { 1057 1.1 hsuenaga aprint_error_dev(sc->sc_dev, 1058 1.1 hsuenaga "SRAM Data Structure Excceeds SRAM window size.\n"); 1059 1.1 hsuenaga return -1; 1060 1.1 hsuenaga } 1061 1.1 hsuenaga 1062 1.1 hsuenaga aprint_normal_dev(sc->sc_dev, 1063 1.1 hsuenaga "internal SRAM window at 0x%08x-0x%08x", 1064 1.1 hsuenaga base, base + size - 1); 1065 1.1 hsuenaga sc->sc_sram_pa = base; 1066 1.1 hsuenaga 1067 1.1 hsuenaga /* get vmspace to read/write device internal SRAM */ 1068 1.1 hsuenaga va = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE, 1069 1.1 hsuenaga UVM_KMF_VAONLY | UVM_KMF_NOWAIT); 1070 1.1 hsuenaga if (va == 0) { 1071 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "cannot map SRAM window\n"); 1072 1.1 hsuenaga sc->sc_sram_va = NULL; 1073 1.1 hsuenaga aprint_normal("\n"); 1074 1.1 hsuenaga return 0; 1075 1.1 hsuenaga } 1076 1.1 hsuenaga /* XXX: not working. PMAP_NOCACHE is not affected? */ 1077 1.1 hsuenaga pmap_kenter_pa(va, base, VM_PROT_READ|VM_PROT_WRITE, PMAP_NOCACHE); 1078 1.1 hsuenaga pmap_update(pmap_kernel()); 1079 1.1 hsuenaga sc->sc_sram_va = (void *)va; 1080 1.1 hsuenaga aprint_normal(" va %p\n", sc->sc_sram_va); 1081 1.1 hsuenaga memset(sc->sc_sram_va, 0xff, MV_ACC_SRAM_SIZE); 1082 1.1 hsuenaga 1083 1.1 hsuenaga return 0; 1084 1.1 hsuenaga } 1085 1.1 hsuenaga 1086 1.1 hsuenaga /* 1087 1.1 hsuenaga * Initialize TDMA engine. 1088 1.1 hsuenaga */ 1089 1.1 hsuenaga STATIC int 1090 1.1 hsuenaga mvxpsec_init_dma(struct mvxpsec_softc *sc, struct marvell_attach_args *mva) 1091 1.1 hsuenaga { 1092 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh; 1093 1.1 hsuenaga uint8_t *va; 1094 1.1 hsuenaga paddr_t pa; 1095 1.1 hsuenaga off_t va_off, pa_off; 1096 1.1 hsuenaga int i, n, seg, ndh; 1097 1.1 hsuenaga 1098 1.1 hsuenaga /* Init Deviced's control parameters (disabled yet) */ 1099 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, MV_TDMA_DEFAULT_CONTROL); 1100 1.1 hsuenaga 1101 1.1 hsuenaga /* Init Software DMA Handlers */ 1102 1.1 hsuenaga sc->sc_devmem_desc = 1103 1.1 hsuenaga mvxpsec_alloc_devmem(sc, 0, PAGE_SIZE * MVXPSEC_DMA_DESC_PAGES); 1104 1.1 hsuenaga ndh = (PAGE_SIZE / sizeof(struct mvxpsec_descriptor)) 1105 1.1 hsuenaga * MVXPSEC_DMA_DESC_PAGES; 1106 1.1 hsuenaga sc->sc_desc_ring = 1107 1.1 hsuenaga kmem_alloc(sizeof(struct mvxpsec_descriptor_handle) * ndh, 1108 1.3 chs KM_SLEEP); 1109 1.1 hsuenaga aprint_normal_dev(sc->sc_dev, "%d DMA handles in %zu bytes array\n", 1110 1.1 hsuenaga ndh, sizeof(struct mvxpsec_descriptor_handle) * ndh); 1111 1.1 hsuenaga 1112 1.1 hsuenaga ndh = 0; 1113 1.1 hsuenaga for (seg = 0; seg < devmem_nseg(sc->sc_devmem_desc); seg++) { 1114 1.1 hsuenaga va = devmem_va(sc->sc_devmem_desc); 1115 1.1 hsuenaga pa = devmem_pa(sc->sc_devmem_desc, seg); 1116 1.1 hsuenaga n = devmem_palen(sc->sc_devmem_desc, seg) / 1117 1.1 hsuenaga sizeof(struct mvxpsec_descriptor); 1118 1.1 hsuenaga va_off = (PAGE_SIZE * seg); 1119 1.1 hsuenaga pa_off = 0; 1120 1.1 hsuenaga for (i = 0; i < n; i++) { 1121 1.1 hsuenaga dh = &sc->sc_desc_ring[ndh]; 1122 1.1 hsuenaga dh->map = devmem_map(sc->sc_devmem_desc); 1123 1.1 hsuenaga dh->off = va_off + pa_off; 1124 1.1 hsuenaga dh->_desc = (void *)(va + va_off + pa_off); 1125 1.1 hsuenaga dh->phys_addr = pa + pa_off; 1126 1.1 hsuenaga pa_off += sizeof(struct mvxpsec_descriptor); 1127 1.1 hsuenaga ndh++; 1128 1.1 hsuenaga } 1129 1.1 hsuenaga } 1130 1.1 hsuenaga sc->sc_desc_ring_size = ndh; 1131 1.1 hsuenaga sc->sc_desc_ring_prod = 0; 1132 1.1 hsuenaga sc->sc_desc_ring_cons = sc->sc_desc_ring_size - 1; 1133 1.1 hsuenaga 1134 1.1 hsuenaga return 0; 1135 1.1 hsuenaga } 1136 1.1 hsuenaga 1137 1.1 hsuenaga /* 1138 1.1 hsuenaga * Wait for TDMA controller become idle 1139 1.1 hsuenaga */ 1140 1.1 hsuenaga INLINE int 1141 1.1 hsuenaga mvxpsec_dma_wait(struct mvxpsec_softc *sc) 1142 1.1 hsuenaga { 1143 1.1 hsuenaga int retry = 0; 1144 1.1 hsuenaga 1145 1.1 hsuenaga while (MVXPSEC_READ(sc, MV_TDMA_CONTROL) & MV_TDMA_CONTROL_ACT) { 1146 1.1 hsuenaga delay(mvxpsec_wait_interval); 1147 1.1 hsuenaga if (retry++ >= mvxpsec_wait_retry) 1148 1.1 hsuenaga return -1; 1149 1.1 hsuenaga } 1150 1.1 hsuenaga return 0; 1151 1.1 hsuenaga } 1152 1.1 hsuenaga 1153 1.1 hsuenaga /* 1154 1.1 hsuenaga * Wait for Security Accelerator become idle 1155 1.1 hsuenaga */ 1156 1.1 hsuenaga INLINE int 1157 1.1 hsuenaga mvxpsec_acc_wait(struct mvxpsec_softc *sc) 1158 1.1 hsuenaga { 1159 1.1 hsuenaga int retry = 0; 1160 1.1 hsuenaga 1161 1.1 hsuenaga while (MVXPSEC_READ(sc, MV_ACC_COMMAND) & MV_ACC_COMMAND_ACT) { 1162 1.1 hsuenaga delay(mvxpsec_wait_interval); 1163 1.1 hsuenaga if (++retry >= mvxpsec_wait_retry) 1164 1.1 hsuenaga return -1; 1165 1.1 hsuenaga } 1166 1.1 hsuenaga return 0; 1167 1.1 hsuenaga } 1168 1.1 hsuenaga 1169 1.1 hsuenaga /* 1170 1.1 hsuenaga * Entry of interrupt handler 1171 1.1 hsuenaga * 1172 1.1 hsuenaga * register this to kernel via marvell_intr_establish() 1173 1.1 hsuenaga */ 1174 1.1 hsuenaga int 1175 1.1 hsuenaga mvxpsec_intr(void *arg) 1176 1.1 hsuenaga { 1177 1.1 hsuenaga struct mvxpsec_softc *sc = arg; 1178 1.1 hsuenaga uint32_t v; 1179 1.1 hsuenaga 1180 1.1 hsuenaga /* IPL_NET */ 1181 1.1 hsuenaga while ((v = mvxpsec_intr_ack(sc)) != 0) { 1182 1.1 hsuenaga mvxpsec_intr_cnt(sc, v); 1183 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "MVXPSEC Intr 0x%08x\n", v); 1184 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "%s\n", s_xpsecintr(v)); 1185 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 1186 1.1 hsuenaga mvxpsec_dump_reg(sc); 1187 1.1 hsuenaga #endif 1188 1.1 hsuenaga 1189 1.1 hsuenaga /* call high-level handlers */ 1190 1.1 hsuenaga if (v & MVXPSEC_INT_ACCTDMA) 1191 1.1 hsuenaga mvxpsec_done(sc); 1192 1.1 hsuenaga } 1193 1.1 hsuenaga 1194 1.1 hsuenaga return 0; 1195 1.1 hsuenaga } 1196 1.1 hsuenaga 1197 1.1 hsuenaga INLINE void 1198 1.1 hsuenaga mvxpsec_intr_cleanup(struct mvxpsec_softc *sc) 1199 1.1 hsuenaga { 1200 1.1 hsuenaga struct mvxpsec_packet *mv_p; 1201 1.1 hsuenaga 1202 1.1 hsuenaga /* must called with sc->sc_dma_mtx held */ 1203 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_dma_mtx)); 1204 1.1 hsuenaga 1205 1.1 hsuenaga /* 1206 1.1 hsuenaga * there is only one intr for run_queue. 1207 1.1 hsuenaga * no one touch sc_run_queue. 1208 1.1 hsuenaga */ 1209 1.1 hsuenaga SIMPLEQ_FOREACH(mv_p, &sc->sc_run_queue, queue) 1210 1.1 hsuenaga mvxpsec_dma_free(sc, &mv_p->dma_ring); 1211 1.1 hsuenaga } 1212 1.1 hsuenaga 1213 1.1 hsuenaga /* 1214 1.1 hsuenaga * Acknowledge to interrupt 1215 1.1 hsuenaga * 1216 1.1 hsuenaga * read cause bits, clear it, and return it. 1217 1.1 hsuenaga * NOTE: multiple cause bits may be returned at once. 1218 1.1 hsuenaga */ 1219 1.1 hsuenaga STATIC uint32_t 1220 1.1 hsuenaga mvxpsec_intr_ack(struct mvxpsec_softc *sc) 1221 1.1 hsuenaga { 1222 1.1 hsuenaga uint32_t reg; 1223 1.1 hsuenaga 1224 1.1 hsuenaga reg = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE); 1225 1.1 hsuenaga reg &= MVXPSEC_DEFAULT_INT; 1226 1.1 hsuenaga MVXPSEC_WRITE(sc, MVXPSEC_INT_CAUSE, ~reg); 1227 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg)); 1228 1.1 hsuenaga 1229 1.1 hsuenaga return reg; 1230 1.1 hsuenaga } 1231 1.1 hsuenaga 1232 1.1 hsuenaga /* 1233 1.1 hsuenaga * Entry of TDMA error interrupt handler 1234 1.1 hsuenaga * 1235 1.1 hsuenaga * register this to kernel via marvell_intr_establish() 1236 1.1 hsuenaga */ 1237 1.1 hsuenaga int 1238 1.1 hsuenaga mvxpsec_eintr(void *arg) 1239 1.1 hsuenaga { 1240 1.1 hsuenaga struct mvxpsec_softc *sc = arg; 1241 1.1 hsuenaga uint32_t err; 1242 1.1 hsuenaga 1243 1.1 hsuenaga /* IPL_NET */ 1244 1.1 hsuenaga again: 1245 1.1 hsuenaga err = mvxpsec_eintr_ack(sc); 1246 1.1 hsuenaga if (err == 0) 1247 1.1 hsuenaga goto done; 1248 1.1 hsuenaga 1249 1.1 hsuenaga log(LOG_ERR, "%s: DMA Error Interrupt: %s\n", __func__, 1250 1.1 hsuenaga s_errreg(err)); 1251 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 1252 1.1 hsuenaga mvxpsec_dump_reg(sc); 1253 1.1 hsuenaga #endif 1254 1.1 hsuenaga 1255 1.1 hsuenaga goto again; 1256 1.1 hsuenaga done: 1257 1.1 hsuenaga return 0; 1258 1.1 hsuenaga } 1259 1.1 hsuenaga 1260 1.1 hsuenaga /* 1261 1.1 hsuenaga * Acknowledge to TDMA error interrupt 1262 1.1 hsuenaga * 1263 1.1 hsuenaga * read cause bits, clear it, and return it. 1264 1.1 hsuenaga * NOTE: multiple cause bits may be returned at once. 1265 1.1 hsuenaga */ 1266 1.1 hsuenaga STATIC uint32_t 1267 1.1 hsuenaga mvxpsec_eintr_ack(struct mvxpsec_softc *sc) 1268 1.1 hsuenaga { 1269 1.1 hsuenaga uint32_t reg; 1270 1.12 riastrad 1271 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE); 1272 1.1 hsuenaga reg &= MVXPSEC_DEFAULT_ERR; 1273 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_ERR_CAUSE, ~reg); 1274 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg)); 1275 1.1 hsuenaga 1276 1.1 hsuenaga return reg; 1277 1.1 hsuenaga } 1278 1.1 hsuenaga 1279 1.1 hsuenaga /* 1280 1.1 hsuenaga * Interrupt statistics 1281 1.1 hsuenaga * 1282 1.9 andvar * this is NOT a statistics of how many times the events 'occurred'. 1283 1.1 hsuenaga * this ONLY means how many times the events 'handled'. 1284 1.1 hsuenaga */ 1285 1.1 hsuenaga INLINE void 1286 1.1 hsuenaga mvxpsec_intr_cnt(struct mvxpsec_softc *sc, int cause) 1287 1.1 hsuenaga { 1288 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_all); 1289 1.1 hsuenaga if (cause & MVXPSEC_INT_AUTH) 1290 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_auth); 1291 1.1 hsuenaga if (cause & MVXPSEC_INT_DES) 1292 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_des); 1293 1.1 hsuenaga if (cause & MVXPSEC_INT_AES_ENC) 1294 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_aes_enc); 1295 1.1 hsuenaga if (cause & MVXPSEC_INT_AES_DEC) 1296 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_aes_dec); 1297 1.1 hsuenaga if (cause & MVXPSEC_INT_ENC) 1298 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_enc); 1299 1.1 hsuenaga if (cause & MVXPSEC_INT_SA) 1300 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_sa); 1301 1.1 hsuenaga if (cause & MVXPSEC_INT_ACCTDMA) 1302 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_acctdma); 1303 1.1 hsuenaga if (cause & MVXPSEC_INT_TDMA_COMP) 1304 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_comp); 1305 1.1 hsuenaga if (cause & MVXPSEC_INT_TDMA_OWN) 1306 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_own); 1307 1.1 hsuenaga if (cause & MVXPSEC_INT_ACCTDMA_CONT) 1308 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_acctdma_cont); 1309 1.1 hsuenaga } 1310 1.1 hsuenaga 1311 1.1 hsuenaga /* 1312 1.1 hsuenaga * Setup MVXPSEC header structure. 1313 1.1 hsuenaga * 1314 1.1 hsuenaga * the header contains descriptor of security accelerator, 1315 1.18 andvar * key material of ciphers, iv of ciphers and macs, ... 1316 1.1 hsuenaga * 1317 1.5 msaitoh * the header is transferred to MVXPSEC Internal SRAM by TDMA, 1318 1.1 hsuenaga * and parsed by MVXPSEC H/W. 1319 1.1 hsuenaga */ 1320 1.1 hsuenaga STATIC int 1321 1.1 hsuenaga mvxpsec_header_finalize(struct mvxpsec_packet *mv_p) 1322 1.1 hsuenaga { 1323 1.1 hsuenaga struct mvxpsec_acc_descriptor *desc = &mv_p->pkt_header.desc; 1324 1.1 hsuenaga int enc_start, enc_len, iv_offset; 1325 1.1 hsuenaga int mac_start, mac_len, mac_offset; 1326 1.1 hsuenaga 1327 1.1 hsuenaga /* offset -> device address */ 1328 1.1 hsuenaga enc_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_off); 1329 1.1 hsuenaga enc_len = mv_p->enc_len; 1330 1.1 hsuenaga if (mv_p->flags & CRP_EXT_IV) 1331 1.1 hsuenaga iv_offset = mv_p->enc_ivoff; 1332 1.1 hsuenaga else 1333 1.1 hsuenaga iv_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_ivoff); 1334 1.1 hsuenaga mac_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_off); 1335 1.1 hsuenaga mac_len = mv_p->mac_len; 1336 1.1 hsuenaga mac_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_dst); 1337 1.1 hsuenaga 1338 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 1339 1.1 hsuenaga "PAYLOAD at 0x%08x\n", (int)MVXPSEC_SRAM_PAYLOAD_OFF); 1340 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 1341 1.1 hsuenaga "ENC from 0x%08x\n", enc_start); 1342 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 1343 1.1 hsuenaga "MAC from 0x%08x\n", mac_start); 1344 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 1345 1.1 hsuenaga "MAC to 0x%08x\n", mac_offset); 1346 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 1347 1.1 hsuenaga "ENC IV at 0x%08x\n", iv_offset); 1348 1.1 hsuenaga 1349 1.1 hsuenaga /* setup device addresses in Security Accelerator Descriptors */ 1350 1.1 hsuenaga desc->acc_encdata = MV_ACC_DESC_ENC_DATA(enc_start, enc_start); 1351 1.1 hsuenaga desc->acc_enclen = MV_ACC_DESC_ENC_LEN(enc_len); 1352 1.1 hsuenaga if (desc->acc_config & MV_ACC_CRYPTO_DECRYPT) 1353 1.1 hsuenaga desc->acc_enckey = 1354 1.1 hsuenaga MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_D_DA); 1355 1.1 hsuenaga else 1356 1.1 hsuenaga desc->acc_enckey = 1357 1.1 hsuenaga MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_DA); 1358 1.1 hsuenaga desc->acc_enciv = 1359 1.1 hsuenaga MV_ACC_DESC_ENC_IV(MVXPSEC_SRAM_IV_WORK_DA, iv_offset); 1360 1.1 hsuenaga 1361 1.1 hsuenaga desc->acc_macsrc = MV_ACC_DESC_MAC_SRC(mac_start, mac_len); 1362 1.1 hsuenaga desc->acc_macdst = MV_ACC_DESC_MAC_DST(mac_offset, mac_len); 1363 1.1 hsuenaga desc->acc_maciv = 1364 1.1 hsuenaga MV_ACC_DESC_MAC_IV(MVXPSEC_SRAM_MIV_IN_DA, 1365 1.1 hsuenaga MVXPSEC_SRAM_MIV_OUT_DA); 1366 1.1 hsuenaga 1367 1.1 hsuenaga return 0; 1368 1.1 hsuenaga } 1369 1.1 hsuenaga 1370 1.1 hsuenaga /* 1371 1.1 hsuenaga * constractor of session structure. 1372 1.1 hsuenaga * 1373 1.1 hsuenaga * this constrator will be called by pool_cache framework. 1374 1.1 hsuenaga */ 1375 1.1 hsuenaga STATIC int 1376 1.1 hsuenaga mvxpsec_session_ctor(void *arg, void *obj, int flags) 1377 1.1 hsuenaga { 1378 1.1 hsuenaga struct mvxpsec_softc *sc = arg; 1379 1.1 hsuenaga struct mvxpsec_session *mv_s = obj; 1380 1.1 hsuenaga 1381 1.1 hsuenaga /* pool is owned by softc */ 1382 1.1 hsuenaga mv_s->sc = sc; 1383 1.1 hsuenaga 1384 1.1 hsuenaga /* Create and load DMA map for session header */ 1385 1.1 hsuenaga mv_s->session_header_map = 0; 1386 1.1 hsuenaga if (bus_dmamap_create(sc->sc_dmat, 1387 1.1 hsuenaga sizeof(mv_s->session_header), 1, 1388 1.1 hsuenaga sizeof(mv_s->session_header), 0, 1389 1.1 hsuenaga BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1390 1.1 hsuenaga &mv_s->session_header_map)) { 1391 1.1 hsuenaga log(LOG_ERR, "%s: cannot create DMA map\n", __func__); 1392 1.1 hsuenaga goto fail; 1393 1.1 hsuenaga } 1394 1.1 hsuenaga if (bus_dmamap_load(sc->sc_dmat, mv_s->session_header_map, 1395 1.1 hsuenaga &mv_s->session_header, sizeof(mv_s->session_header), 1396 1.1 hsuenaga NULL, BUS_DMA_NOWAIT)) { 1397 1.1 hsuenaga log(LOG_ERR, "%s: cannot load header\n", __func__); 1398 1.1 hsuenaga goto fail; 1399 1.1 hsuenaga } 1400 1.1 hsuenaga 1401 1.1 hsuenaga return 0; 1402 1.1 hsuenaga fail: 1403 1.1 hsuenaga if (mv_s->session_header_map) 1404 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map); 1405 1.1 hsuenaga return ENOMEM; 1406 1.1 hsuenaga } 1407 1.1 hsuenaga 1408 1.1 hsuenaga /* 1409 1.1 hsuenaga * destractor of session structure. 1410 1.1 hsuenaga * 1411 1.1 hsuenaga * this destrator will be called by pool_cache framework. 1412 1.1 hsuenaga */ 1413 1.1 hsuenaga STATIC void 1414 1.1 hsuenaga mvxpsec_session_dtor(void *arg, void *obj) 1415 1.1 hsuenaga { 1416 1.1 hsuenaga struct mvxpsec_softc *sc = arg; 1417 1.1 hsuenaga struct mvxpsec_session *mv_s = obj; 1418 1.1 hsuenaga 1419 1.1 hsuenaga if (mv_s->sc != sc) 1420 1.1 hsuenaga panic("inconsitent context\n"); 1421 1.1 hsuenaga 1422 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map); 1423 1.1 hsuenaga } 1424 1.1 hsuenaga 1425 1.1 hsuenaga /* 1426 1.1 hsuenaga * constructor of packet structure. 1427 1.1 hsuenaga */ 1428 1.1 hsuenaga STATIC int 1429 1.1 hsuenaga mvxpsec_packet_ctor(void *arg, void *obj, int flags) 1430 1.1 hsuenaga { 1431 1.1 hsuenaga struct mvxpsec_softc *sc = arg; 1432 1.1 hsuenaga struct mvxpsec_packet *mv_p = obj; 1433 1.1 hsuenaga 1434 1.1 hsuenaga mv_p->dma_ring.dma_head = NULL; 1435 1.1 hsuenaga mv_p->dma_ring.dma_last = NULL; 1436 1.1 hsuenaga mv_p->dma_ring.dma_size = 0; 1437 1.1 hsuenaga 1438 1.1 hsuenaga /* Create and load DMA map for packet header */ 1439 1.1 hsuenaga mv_p->pkt_header_map = 0; 1440 1.1 hsuenaga if (bus_dmamap_create(sc->sc_dmat, 1441 1.1 hsuenaga sizeof(mv_p->pkt_header), 1, sizeof(mv_p->pkt_header), 0, 1442 1.1 hsuenaga BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1443 1.1 hsuenaga &mv_p->pkt_header_map)) { 1444 1.1 hsuenaga log(LOG_ERR, "%s: cannot create DMA map\n", __func__); 1445 1.1 hsuenaga goto fail; 1446 1.1 hsuenaga } 1447 1.12 riastrad if (bus_dmamap_load(sc->sc_dmat, mv_p->pkt_header_map, 1448 1.1 hsuenaga &mv_p->pkt_header, sizeof(mv_p->pkt_header), 1449 1.1 hsuenaga NULL, BUS_DMA_NOWAIT)) { 1450 1.1 hsuenaga log(LOG_ERR, "%s: cannot load header\n", __func__); 1451 1.1 hsuenaga goto fail; 1452 1.1 hsuenaga } 1453 1.1 hsuenaga 1454 1.1 hsuenaga /* Create DMA map for session data. */ 1455 1.1 hsuenaga mv_p->data_map = 0; 1456 1.1 hsuenaga if (bus_dmamap_create(sc->sc_dmat, 1457 1.1 hsuenaga MVXPSEC_DMA_MAX_SIZE, MVXPSEC_DMA_MAX_SEGS, MVXPSEC_DMA_MAX_SIZE, 1458 1.1 hsuenaga 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mv_p->data_map)) { 1459 1.1 hsuenaga log(LOG_ERR, "%s: cannot create DMA map\n", __func__); 1460 1.1 hsuenaga goto fail; 1461 1.1 hsuenaga } 1462 1.1 hsuenaga 1463 1.1 hsuenaga return 0; 1464 1.1 hsuenaga fail: 1465 1.1 hsuenaga if (mv_p->pkt_header_map) 1466 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map); 1467 1.1 hsuenaga if (mv_p->data_map) 1468 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map); 1469 1.1 hsuenaga return ENOMEM; 1470 1.1 hsuenaga } 1471 1.1 hsuenaga 1472 1.1 hsuenaga /* 1473 1.1 hsuenaga * destractor of packet structure. 1474 1.1 hsuenaga */ 1475 1.1 hsuenaga STATIC void 1476 1.1 hsuenaga mvxpsec_packet_dtor(void *arg, void *obj) 1477 1.1 hsuenaga { 1478 1.1 hsuenaga struct mvxpsec_softc *sc = arg; 1479 1.1 hsuenaga struct mvxpsec_packet *mv_p = obj; 1480 1.1 hsuenaga 1481 1.1 hsuenaga mutex_enter(&sc->sc_dma_mtx); 1482 1.1 hsuenaga mvxpsec_dma_free(sc, &mv_p->dma_ring); 1483 1.1 hsuenaga mutex_exit(&sc->sc_dma_mtx); 1484 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map); 1485 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map); 1486 1.1 hsuenaga } 1487 1.1 hsuenaga 1488 1.1 hsuenaga /* 1489 1.8 andvar * allocate new session structure. 1490 1.1 hsuenaga */ 1491 1.1 hsuenaga STATIC struct mvxpsec_session * 1492 1.1 hsuenaga mvxpsec_session_alloc(struct mvxpsec_softc *sc) 1493 1.1 hsuenaga { 1494 1.1 hsuenaga struct mvxpsec_session *mv_s; 1495 1.1 hsuenaga 1496 1.2 christos mv_s = pool_cache_get(sc->sc_session_pool, PR_NOWAIT); 1497 1.1 hsuenaga if (mv_s == NULL) { 1498 1.1 hsuenaga log(LOG_ERR, "%s: cannot allocate memory\n", __func__); 1499 1.1 hsuenaga return NULL; 1500 1.1 hsuenaga } 1501 1.17 skrll mv_s->refs = 1; /* 0 means session is already invalid */ 1502 1.1 hsuenaga mv_s->sflags = 0; 1503 1.1 hsuenaga 1504 1.1 hsuenaga return mv_s; 1505 1.1 hsuenaga } 1506 1.1 hsuenaga 1507 1.1 hsuenaga /* 1508 1.1 hsuenaga * deallocate session structure. 1509 1.1 hsuenaga */ 1510 1.1 hsuenaga STATIC void 1511 1.1 hsuenaga mvxpsec_session_dealloc(struct mvxpsec_session *mv_s) 1512 1.1 hsuenaga { 1513 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc; 1514 1.1 hsuenaga 1515 1.1 hsuenaga mv_s->sflags |= DELETED; 1516 1.1 hsuenaga mvxpsec_session_unref(mv_s); 1517 1.1 hsuenaga crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ); 1518 1.1 hsuenaga 1519 1.1 hsuenaga return; 1520 1.1 hsuenaga } 1521 1.1 hsuenaga 1522 1.1 hsuenaga STATIC int 1523 1.1 hsuenaga mvxpsec_session_ref(struct mvxpsec_session *mv_s) 1524 1.1 hsuenaga { 1525 1.1 hsuenaga uint32_t refs; 1526 1.1 hsuenaga 1527 1.1 hsuenaga if (mv_s->sflags & DELETED) { 1528 1.1 hsuenaga log(LOG_ERR, 1529 1.1 hsuenaga "%s: session is already deleted.\n", __func__); 1530 1.1 hsuenaga return -1; 1531 1.1 hsuenaga } 1532 1.1 hsuenaga 1533 1.1 hsuenaga refs = atomic_inc_32_nv(&mv_s->refs); 1534 1.1 hsuenaga if (refs == 1) { 1535 1.12 riastrad /* 1536 1.1 hsuenaga * a session with refs == 0 is 1537 1.1 hsuenaga * already invalidated. revert it. 1538 1.1 hsuenaga * XXX: use CAS ? 1539 1.1 hsuenaga */ 1540 1.1 hsuenaga atomic_dec_32(&mv_s->refs); 1541 1.1 hsuenaga log(LOG_ERR, 1542 1.1 hsuenaga "%s: session is already invalidated.\n", __func__); 1543 1.1 hsuenaga return -1; 1544 1.1 hsuenaga } 1545 1.12 riastrad 1546 1.1 hsuenaga return 0; 1547 1.1 hsuenaga } 1548 1.1 hsuenaga 1549 1.1 hsuenaga STATIC void 1550 1.1 hsuenaga mvxpsec_session_unref(struct mvxpsec_session *mv_s) 1551 1.1 hsuenaga { 1552 1.1 hsuenaga uint32_t refs; 1553 1.1 hsuenaga 1554 1.13 riastrad membar_release(); 1555 1.1 hsuenaga refs = atomic_dec_32_nv(&mv_s->refs); 1556 1.11 riastrad if (refs == 0) { 1557 1.13 riastrad membar_acquire(); 1558 1.1 hsuenaga pool_cache_put(mv_s->sc->sc_session_pool, mv_s); 1559 1.11 riastrad } 1560 1.1 hsuenaga } 1561 1.1 hsuenaga 1562 1.1 hsuenaga /* 1563 1.1 hsuenaga * look for session is exist or not 1564 1.1 hsuenaga */ 1565 1.1 hsuenaga INLINE struct mvxpsec_session * 1566 1.1 hsuenaga mvxpsec_session_lookup(struct mvxpsec_softc *sc, int sid) 1567 1.1 hsuenaga { 1568 1.1 hsuenaga struct mvxpsec_session *mv_s; 1569 1.1 hsuenaga int session; 1570 1.1 hsuenaga 1571 1.1 hsuenaga /* must called sc->sc_session_mtx held */ 1572 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_session_mtx)); 1573 1.1 hsuenaga 1574 1.1 hsuenaga session = MVXPSEC_SESSION(sid); 1575 1.1 hsuenaga if (__predict_false(session > MVXPSEC_MAX_SESSIONS)) { 1576 1.1 hsuenaga log(LOG_ERR, "%s: session number too large %d\n", 1577 1.1 hsuenaga __func__, session); 1578 1.1 hsuenaga return NULL; 1579 1.1 hsuenaga } 1580 1.1 hsuenaga if (__predict_false( (mv_s = sc->sc_sessions[session]) == NULL)) { 1581 1.1 hsuenaga log(LOG_ERR, "%s: invalid session %d\n", 1582 1.1 hsuenaga __func__, session); 1583 1.1 hsuenaga return NULL; 1584 1.1 hsuenaga } 1585 1.1 hsuenaga 1586 1.1 hsuenaga KASSERT(mv_s->sid == session); 1587 1.1 hsuenaga 1588 1.1 hsuenaga return mv_s; 1589 1.1 hsuenaga } 1590 1.1 hsuenaga 1591 1.1 hsuenaga /* 1592 1.1 hsuenaga * allocation new packet structure. 1593 1.1 hsuenaga */ 1594 1.1 hsuenaga STATIC struct mvxpsec_packet * 1595 1.1 hsuenaga mvxpsec_packet_alloc(struct mvxpsec_session *mv_s) 1596 1.1 hsuenaga { 1597 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc; 1598 1.1 hsuenaga struct mvxpsec_packet *mv_p; 1599 1.1 hsuenaga 1600 1.1 hsuenaga /* must be called mv_queue_mtx held. */ 1601 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_queue_mtx)); 1602 1.1 hsuenaga /* must be called mv_session_mtx held. */ 1603 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_session_mtx)); 1604 1.1 hsuenaga 1605 1.1 hsuenaga if (mvxpsec_session_ref(mv_s) < 0) { 1606 1.1 hsuenaga log(LOG_ERR, "%s: invalid session.\n", __func__); 1607 1.1 hsuenaga return NULL; 1608 1.1 hsuenaga } 1609 1.1 hsuenaga 1610 1.1 hsuenaga if ( (mv_p = SLIST_FIRST(&sc->sc_free_list)) != NULL) { 1611 1.1 hsuenaga SLIST_REMOVE_HEAD(&sc->sc_free_list, free_list); 1612 1.1 hsuenaga sc->sc_free_qlen--; 1613 1.1 hsuenaga } 1614 1.1 hsuenaga else { 1615 1.2 christos mv_p = pool_cache_get(sc->sc_packet_pool, PR_NOWAIT); 1616 1.1 hsuenaga if (mv_p == NULL) { 1617 1.1 hsuenaga log(LOG_ERR, "%s: cannot allocate memory\n", 1618 1.1 hsuenaga __func__); 1619 1.1 hsuenaga mvxpsec_session_unref(mv_s); 1620 1.1 hsuenaga return NULL; 1621 1.1 hsuenaga } 1622 1.1 hsuenaga } 1623 1.1 hsuenaga mv_p->mv_s = mv_s; 1624 1.1 hsuenaga mv_p->flags = 0; 1625 1.1 hsuenaga mv_p->data_ptr = NULL; 1626 1.1 hsuenaga 1627 1.1 hsuenaga return mv_p; 1628 1.1 hsuenaga } 1629 1.1 hsuenaga 1630 1.1 hsuenaga /* 1631 1.1 hsuenaga * free packet structure. 1632 1.1 hsuenaga */ 1633 1.1 hsuenaga STATIC void 1634 1.1 hsuenaga mvxpsec_packet_dealloc(struct mvxpsec_packet *mv_p) 1635 1.1 hsuenaga { 1636 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s; 1637 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc; 1638 1.1 hsuenaga 1639 1.1 hsuenaga /* must called with sc->sc_queue_mtx held */ 1640 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_queue_mtx)); 1641 1.1 hsuenaga 1642 1.1 hsuenaga if (mv_p->dma_ring.dma_size != 0) { 1643 1.1 hsuenaga sc->sc_desc_ring_cons += mv_p->dma_ring.dma_size; 1644 1.1 hsuenaga } 1645 1.1 hsuenaga mv_p->dma_ring.dma_head = NULL; 1646 1.1 hsuenaga mv_p->dma_ring.dma_last = NULL; 1647 1.1 hsuenaga mv_p->dma_ring.dma_size = 0; 1648 1.1 hsuenaga 1649 1.1 hsuenaga if (mv_p->data_map) { 1650 1.1 hsuenaga if (mv_p->flags & RDY_DATA) { 1651 1.1 hsuenaga bus_dmamap_unload(sc->sc_dmat, mv_p->data_map); 1652 1.1 hsuenaga mv_p->flags &= ~RDY_DATA; 1653 1.1 hsuenaga } 1654 1.1 hsuenaga } 1655 1.1 hsuenaga 1656 1.1 hsuenaga if (sc->sc_free_qlen > sc->sc_wait_qlimit) 1657 1.1 hsuenaga pool_cache_put(sc->sc_packet_pool, mv_p); 1658 1.1 hsuenaga else { 1659 1.1 hsuenaga SLIST_INSERT_HEAD(&sc->sc_free_list, mv_p, free_list); 1660 1.1 hsuenaga sc->sc_free_qlen++; 1661 1.1 hsuenaga } 1662 1.1 hsuenaga mvxpsec_session_unref(mv_s); 1663 1.1 hsuenaga } 1664 1.1 hsuenaga 1665 1.1 hsuenaga INLINE void 1666 1.1 hsuenaga mvxpsec_packet_enqueue(struct mvxpsec_packet *mv_p) 1667 1.1 hsuenaga { 1668 1.1 hsuenaga struct mvxpsec_softc *sc = mv_p->mv_s->sc; 1669 1.1 hsuenaga struct mvxpsec_packet *last_packet; 1670 1.1 hsuenaga struct mvxpsec_descriptor_handle *cur_dma, *prev_dma; 1671 1.1 hsuenaga 1672 1.1 hsuenaga /* must called with sc->sc_queue_mtx held */ 1673 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_queue_mtx)); 1674 1.1 hsuenaga 1675 1.1 hsuenaga if (sc->sc_wait_qlen == 0) { 1676 1.1 hsuenaga SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue); 1677 1.1 hsuenaga sc->sc_wait_qlen++; 1678 1.1 hsuenaga mv_p->flags |= SETUP_DONE; 1679 1.1 hsuenaga return; 1680 1.1 hsuenaga } 1681 1.1 hsuenaga 1682 1.1 hsuenaga last_packet = SIMPLEQ_LAST(&sc->sc_wait_queue, mvxpsec_packet, queue); 1683 1.1 hsuenaga SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue); 1684 1.1 hsuenaga sc->sc_wait_qlen++; 1685 1.1 hsuenaga 1686 1.1 hsuenaga /* chain the DMA */ 1687 1.1 hsuenaga cur_dma = mv_p->dma_ring.dma_head; 1688 1.1 hsuenaga prev_dma = last_packet->dma_ring.dma_last; 1689 1.1 hsuenaga mvxpsec_dma_cat(sc, prev_dma, cur_dma); 1690 1.1 hsuenaga mv_p->flags |= SETUP_DONE; 1691 1.1 hsuenaga } 1692 1.1 hsuenaga 1693 1.1 hsuenaga /* 1694 1.1 hsuenaga * called by interrupt handler 1695 1.1 hsuenaga */ 1696 1.1 hsuenaga STATIC int 1697 1.1 hsuenaga mvxpsec_done_packet(struct mvxpsec_packet *mv_p) 1698 1.1 hsuenaga { 1699 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s; 1700 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc; 1701 1.1 hsuenaga 1702 1.1 hsuenaga KASSERT((mv_p->flags & RDY_DATA)); 1703 1.1 hsuenaga KASSERT((mv_p->flags & SETUP_DONE)); 1704 1.1 hsuenaga 1705 1.1 hsuenaga /* unload data */ 1706 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat, mv_p->data_map, 1707 1.1 hsuenaga 0, mv_p->data_len, 1708 1.1 hsuenaga BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1709 1.1 hsuenaga bus_dmamap_unload(sc->sc_dmat, mv_p->data_map); 1710 1.1 hsuenaga mv_p->flags &= ~RDY_DATA; 1711 1.1 hsuenaga 1712 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 1713 1.1 hsuenaga if (mvxpsec_debug != 0) { 1714 1.1 hsuenaga int s; 1715 1.1 hsuenaga 1716 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat, mv_p->pkt_header_map, 1717 1.1 hsuenaga 0, sizeof(mv_p->pkt_header), 1718 1.1 hsuenaga BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1719 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat, mv_s->session_header_map, 1720 1.1 hsuenaga 0, sizeof(mv_s->session_header), 1721 1.1 hsuenaga BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1722 1.1 hsuenaga 1723 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) { 1724 1.1 hsuenaga char buf[1500]; 1725 1.1 hsuenaga struct mbuf *m; 1726 1.1 hsuenaga struct uio *uio; 1727 1.1 hsuenaga size_t len; 1728 1.1 hsuenaga 1729 1.1 hsuenaga switch (mv_p->data_type) { 1730 1.1 hsuenaga case MVXPSEC_DATA_MBUF: 1731 1.1 hsuenaga m = mv_p->data_mbuf; 1732 1.1 hsuenaga len = m->m_pkthdr.len; 1733 1.1 hsuenaga if (len > sizeof(buf)) 1734 1.1 hsuenaga len = sizeof(buf); 1735 1.1 hsuenaga m_copydata(m, 0, len, buf); 1736 1.1 hsuenaga break; 1737 1.1 hsuenaga case MVXPSEC_DATA_UIO: 1738 1.1 hsuenaga uio = mv_p->data_uio; 1739 1.1 hsuenaga len = uio->uio_resid; 1740 1.1 hsuenaga if (len > sizeof(buf)) 1741 1.1 hsuenaga len = sizeof(buf); 1742 1.1 hsuenaga cuio_copydata(uio, 0, len, buf); 1743 1.1 hsuenaga break; 1744 1.1 hsuenaga default: 1745 1.1 hsuenaga len = 0; 1746 1.1 hsuenaga } 1747 1.1 hsuenaga if (len > 0) 1748 1.1 hsuenaga mvxpsec_dump_data(__func__, buf, len); 1749 1.1 hsuenaga } 1750 1.1 hsuenaga 1751 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_PAYLOAD) { 1752 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD, 1753 1.1 hsuenaga "%s: session_descriptor:\n", __func__); 1754 1.1 hsuenaga mvxpsec_dump_packet_desc(__func__, mv_p); 1755 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD, 1756 1.1 hsuenaga "%s: session_data:\n", __func__); 1757 1.1 hsuenaga mvxpsec_dump_packet_data(__func__, mv_p); 1758 1.1 hsuenaga } 1759 1.1 hsuenaga 1760 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_SRAM) { 1761 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_SRAM, 1762 1.1 hsuenaga "%s: SRAM\n", __func__); 1763 1.1 hsuenaga mvxpsec_dump_sram(__func__, sc, 2000); 1764 1.1 hsuenaga } 1765 1.1 hsuenaga 1766 1.1 hsuenaga s = MVXPSEC_READ(sc, MV_ACC_STATUS); 1767 1.1 hsuenaga if (s & MV_ACC_STATUS_MAC_ERR) { 1768 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, 1769 1.1 hsuenaga "%s: Message Authentication Failed.\n", __func__); 1770 1.1 hsuenaga } 1771 1.1 hsuenaga } 1772 1.1 hsuenaga #endif 1773 1.1 hsuenaga 1774 1.1 hsuenaga /* copy back IV */ 1775 1.1 hsuenaga if (mv_p->flags & CRP_EXT_IV) { 1776 1.1 hsuenaga memcpy(mv_p->ext_iv, 1777 1.1 hsuenaga &mv_p->pkt_header.crp_iv_ext, mv_p->ext_ivlen); 1778 1.1 hsuenaga mv_p->ext_iv = NULL; 1779 1.1 hsuenaga mv_p->ext_ivlen = 0; 1780 1.1 hsuenaga } 1781 1.1 hsuenaga 1782 1.1 hsuenaga /* notify opencrypto */ 1783 1.1 hsuenaga mv_p->crp->crp_etype = 0; 1784 1.1 hsuenaga crypto_done(mv_p->crp); 1785 1.1 hsuenaga mv_p->crp = NULL; 1786 1.1 hsuenaga 1787 1.1 hsuenaga /* unblock driver */ 1788 1.1 hsuenaga mvxpsec_packet_dealloc(mv_p); 1789 1.1 hsuenaga crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ); 1790 1.1 hsuenaga 1791 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, packet_ok); 1792 1.1 hsuenaga 1793 1.1 hsuenaga return 0; 1794 1.1 hsuenaga } 1795 1.1 hsuenaga 1796 1.1 hsuenaga 1797 1.1 hsuenaga /* 1798 1.1 hsuenaga * Opencrypto API registration 1799 1.1 hsuenaga */ 1800 1.1 hsuenaga int 1801 1.1 hsuenaga mvxpsec_register(struct mvxpsec_softc *sc) 1802 1.1 hsuenaga { 1803 1.1 hsuenaga int oplen = SRAM_PAYLOAD_SIZE; 1804 1.1 hsuenaga int flags = 0; 1805 1.1 hsuenaga int err; 1806 1.1 hsuenaga 1807 1.1 hsuenaga sc->sc_nsessions = 0; 1808 1.1 hsuenaga sc->sc_cid = crypto_get_driverid(0); 1809 1.1 hsuenaga if (sc->sc_cid < 0) { 1810 1.1 hsuenaga log(LOG_ERR, 1811 1.1 hsuenaga "%s: crypto_get_driverid() failed.\n", __func__); 1812 1.1 hsuenaga err = EINVAL; 1813 1.1 hsuenaga goto done; 1814 1.1 hsuenaga } 1815 1.1 hsuenaga 1816 1.1 hsuenaga /* Ciphers */ 1817 1.1 hsuenaga err = crypto_register(sc->sc_cid, CRYPTO_DES_CBC, oplen, flags, 1818 1.1 hsuenaga mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc); 1819 1.1 hsuenaga if (err) 1820 1.1 hsuenaga goto done; 1821 1.1 hsuenaga 1822 1.1 hsuenaga err = crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, oplen, flags, 1823 1.1 hsuenaga mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc); 1824 1.1 hsuenaga if (err) 1825 1.1 hsuenaga goto done; 1826 1.1 hsuenaga 1827 1.1 hsuenaga err = crypto_register(sc->sc_cid, CRYPTO_AES_CBC, oplen, flags, 1828 1.1 hsuenaga mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc); 1829 1.1 hsuenaga if (err) 1830 1.1 hsuenaga goto done; 1831 1.1 hsuenaga 1832 1.1 hsuenaga /* MACs */ 1833 1.1 hsuenaga err = crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 1834 1.1 hsuenaga oplen, flags, 1835 1.1 hsuenaga mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc); 1836 1.1 hsuenaga if (err) 1837 1.1 hsuenaga goto done; 1838 1.1 hsuenaga 1839 1.1 hsuenaga err = crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 1840 1.1 hsuenaga oplen, flags, 1841 1.1 hsuenaga mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc); 1842 1.1 hsuenaga if (err) 1843 1.1 hsuenaga goto done; 1844 1.1 hsuenaga 1845 1.1 hsuenaga #ifdef DEBUG 1846 1.1 hsuenaga log(LOG_DEBUG, 1847 1.1 hsuenaga "%s: registered to opencrypto(max data = %d bytes)\n", 1848 1.1 hsuenaga device_xname(sc->sc_dev), oplen); 1849 1.1 hsuenaga #endif 1850 1.1 hsuenaga 1851 1.1 hsuenaga err = 0; 1852 1.1 hsuenaga done: 1853 1.1 hsuenaga return err; 1854 1.1 hsuenaga } 1855 1.1 hsuenaga 1856 1.1 hsuenaga /* 1857 1.1 hsuenaga * Create new opencrypto session 1858 1.1 hsuenaga * 1859 1.1 hsuenaga * - register cipher key, mac key. 1860 1.1 hsuenaga * - initialize mac internal state. 1861 1.1 hsuenaga */ 1862 1.1 hsuenaga int 1863 1.1 hsuenaga mvxpsec_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri) 1864 1.1 hsuenaga { 1865 1.1 hsuenaga struct mvxpsec_softc *sc = arg; 1866 1.1 hsuenaga struct mvxpsec_session *mv_s = NULL; 1867 1.1 hsuenaga struct cryptoini *c; 1868 1.1 hsuenaga static int hint = 0; 1869 1.1 hsuenaga int session = -1; 1870 1.1 hsuenaga int sid; 1871 1.1 hsuenaga int err; 1872 1.1 hsuenaga int i; 1873 1.1 hsuenaga 1874 1.1 hsuenaga /* allocate driver session context */ 1875 1.1 hsuenaga mv_s = mvxpsec_session_alloc(sc); 1876 1.1 hsuenaga if (mv_s == NULL) 1877 1.1 hsuenaga return ENOMEM; 1878 1.1 hsuenaga 1879 1.1 hsuenaga /* 1880 1.1 hsuenaga * lookup opencrypto session table 1881 1.1 hsuenaga * 1882 1.1 hsuenaga * we have sc_session_mtx after here. 1883 1.1 hsuenaga */ 1884 1.1 hsuenaga mutex_enter(&sc->sc_session_mtx); 1885 1.1 hsuenaga if (sc->sc_nsessions >= MVXPSEC_MAX_SESSIONS) { 1886 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx); 1887 1.1 hsuenaga log(LOG_ERR, "%s: too many IPsec SA(max %d)\n", 1888 1.1 hsuenaga __func__, MVXPSEC_MAX_SESSIONS); 1889 1.1 hsuenaga mvxpsec_session_dealloc(mv_s); 1890 1.1 hsuenaga return ENOMEM; 1891 1.1 hsuenaga } 1892 1.1 hsuenaga for (i = hint; i < MVXPSEC_MAX_SESSIONS; i++) { 1893 1.1 hsuenaga if (sc->sc_sessions[i]) 1894 1.1 hsuenaga continue; 1895 1.1 hsuenaga session = i; 1896 1.1 hsuenaga hint = session + 1; 1897 1.1 hsuenaga break; 1898 1.1 hsuenaga } 1899 1.1 hsuenaga if (session < 0) { 1900 1.1 hsuenaga for (i = 0; i < hint; i++) { 1901 1.1 hsuenaga if (sc->sc_sessions[i]) 1902 1.1 hsuenaga continue; 1903 1.1 hsuenaga session = i; 1904 1.1 hsuenaga hint = session + 1; 1905 1.1 hsuenaga break; 1906 1.1 hsuenaga } 1907 1.1 hsuenaga if (session < 0) { 1908 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx); 1909 1.1 hsuenaga /* session full */ 1910 1.1 hsuenaga log(LOG_ERR, "%s: too many IPsec SA(max %d)\n", 1911 1.1 hsuenaga __func__, MVXPSEC_MAX_SESSIONS); 1912 1.1 hsuenaga mvxpsec_session_dealloc(mv_s); 1913 1.1 hsuenaga hint = 0; 1914 1.1 hsuenaga return ENOMEM; 1915 1.1 hsuenaga } 1916 1.1 hsuenaga } 1917 1.1 hsuenaga if (hint >= MVXPSEC_MAX_SESSIONS) 1918 1.1 hsuenaga hint = 0; 1919 1.1 hsuenaga sc->sc_nsessions++; 1920 1.1 hsuenaga sc->sc_sessions[session] = mv_s; 1921 1.1 hsuenaga #ifdef DEBUG 1922 1.1 hsuenaga log(LOG_DEBUG, "%s: new session %d allocated\n", __func__, session); 1923 1.1 hsuenaga #endif 1924 1.1 hsuenaga 1925 1.1 hsuenaga sid = MVXPSEC_SID(device_unit(sc->sc_dev), session); 1926 1.1 hsuenaga mv_s->sid = sid; 1927 1.1 hsuenaga 1928 1.1 hsuenaga /* setup the session key ... */ 1929 1.1 hsuenaga for (c = cri; c; c = c->cri_next) { 1930 1.1 hsuenaga switch (c->cri_alg) { 1931 1.1 hsuenaga case CRYPTO_DES_CBC: 1932 1.1 hsuenaga case CRYPTO_3DES_CBC: 1933 1.1 hsuenaga case CRYPTO_AES_CBC: 1934 1.1 hsuenaga /* key */ 1935 1.1 hsuenaga if (mvxpsec_key_precomp(c->cri_alg, 1936 1.1 hsuenaga c->cri_key, c->cri_klen, 1937 1.1 hsuenaga &mv_s->session_header.crp_key, 1938 1.1 hsuenaga &mv_s->session_header.crp_key_d)) { 1939 1.1 hsuenaga log(LOG_ERR, 1940 1.1 hsuenaga "%s: Invalid HMAC key for %s.\n", 1941 1.1 hsuenaga __func__, s_ctlalg(c->cri_alg)); 1942 1.1 hsuenaga err = EINVAL; 1943 1.1 hsuenaga goto fail; 1944 1.1 hsuenaga } 1945 1.1 hsuenaga if (mv_s->sflags & RDY_CRP_KEY) { 1946 1.1 hsuenaga log(LOG_WARNING, 1947 1.1 hsuenaga "%s: overwrite cipher: %s->%s.\n", 1948 1.1 hsuenaga __func__, 1949 1.1 hsuenaga s_ctlalg(mv_s->cipher_alg), 1950 1.1 hsuenaga s_ctlalg(c->cri_alg)); 1951 1.1 hsuenaga } 1952 1.1 hsuenaga mv_s->sflags |= RDY_CRP_KEY; 1953 1.1 hsuenaga mv_s->enc_klen = c->cri_klen; 1954 1.1 hsuenaga mv_s->cipher_alg = c->cri_alg; 1955 1.1 hsuenaga /* create per session IV (compatible with KAME IPsec) */ 1956 1.1 hsuenaga cprng_fast(&mv_s->session_iv, sizeof(mv_s->session_iv)); 1957 1.1 hsuenaga mv_s->sflags |= RDY_CRP_IV; 1958 1.1 hsuenaga break; 1959 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96: 1960 1.1 hsuenaga case CRYPTO_MD5_HMAC_96: 1961 1.1 hsuenaga /* key */ 1962 1.1 hsuenaga if (mvxpsec_hmac_precomp(c->cri_alg, 1963 1.1 hsuenaga c->cri_key, c->cri_klen, 1964 1.1 hsuenaga (uint32_t *)&mv_s->session_header.miv_in, 1965 1.1 hsuenaga (uint32_t *)&mv_s->session_header.miv_out)) { 1966 1.1 hsuenaga log(LOG_ERR, 1967 1.1 hsuenaga "%s: Invalid MAC key\n", __func__); 1968 1.1 hsuenaga err = EINVAL; 1969 1.1 hsuenaga goto fail; 1970 1.1 hsuenaga } 1971 1.1 hsuenaga if (mv_s->sflags & RDY_MAC_KEY || 1972 1.1 hsuenaga mv_s->sflags & RDY_MAC_IV) { 1973 1.1 hsuenaga log(LOG_ERR, 1974 1.1 hsuenaga "%s: overwrite HMAC: %s->%s.\n", 1975 1.1 hsuenaga __func__, s_ctlalg(mv_s->hmac_alg), 1976 1.1 hsuenaga s_ctlalg(c->cri_alg)); 1977 1.1 hsuenaga } 1978 1.1 hsuenaga mv_s->sflags |= RDY_MAC_KEY; 1979 1.1 hsuenaga mv_s->sflags |= RDY_MAC_IV; 1980 1.1 hsuenaga 1981 1.1 hsuenaga mv_s->mac_klen = c->cri_klen; 1982 1.1 hsuenaga mv_s->hmac_alg = c->cri_alg; 1983 1.1 hsuenaga break; 1984 1.1 hsuenaga default: 1985 1.1 hsuenaga log(LOG_ERR, "%s: Unknown algorithm %d\n", 1986 1.1 hsuenaga __func__, c->cri_alg); 1987 1.1 hsuenaga err = EINVAL; 1988 1.1 hsuenaga goto fail; 1989 1.1 hsuenaga } 1990 1.1 hsuenaga } 1991 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 1992 1.1 hsuenaga "H/W Crypto session (id:%u) added.\n", session); 1993 1.1 hsuenaga 1994 1.1 hsuenaga *sidp = sid; 1995 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, session_new); 1996 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx); 1997 1.1 hsuenaga 1998 1.1 hsuenaga /* sync session header(it's never touched after here) */ 1999 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat, 2000 1.1 hsuenaga mv_s->session_header_map, 2001 1.1 hsuenaga 0, sizeof(mv_s->session_header), 2002 1.1 hsuenaga BUS_DMASYNC_PREWRITE); 2003 1.1 hsuenaga 2004 1.1 hsuenaga return 0; 2005 1.1 hsuenaga 2006 1.1 hsuenaga fail: 2007 1.1 hsuenaga sc->sc_nsessions--; 2008 1.1 hsuenaga sc->sc_sessions[session] = NULL; 2009 1.1 hsuenaga hint = session; 2010 1.1 hsuenaga if (mv_s) 2011 1.1 hsuenaga mvxpsec_session_dealloc(mv_s); 2012 1.1 hsuenaga log(LOG_WARNING, 2013 1.19 andvar "%s: Failed to add H/W crypto session (id:%u): err=%d\n", 2014 1.1 hsuenaga __func__, session, err); 2015 1.1 hsuenaga 2016 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx); 2017 1.1 hsuenaga return err; 2018 1.1 hsuenaga } 2019 1.1 hsuenaga 2020 1.1 hsuenaga /* 2021 1.1 hsuenaga * remove opencrypto session 2022 1.1 hsuenaga */ 2023 1.16 riastrad void 2024 1.1 hsuenaga mvxpsec_freesession(void *arg, uint64_t tid) 2025 1.1 hsuenaga { 2026 1.1 hsuenaga struct mvxpsec_softc *sc = arg; 2027 1.1 hsuenaga struct mvxpsec_session *mv_s; 2028 1.1 hsuenaga int session; 2029 1.1 hsuenaga uint32_t sid = ((uint32_t)tid) & 0xffffffff; 2030 1.1 hsuenaga 2031 1.1 hsuenaga session = MVXPSEC_SESSION(sid); 2032 1.15 riastrad KASSERTMSG(session >= 0, "session=%d", session); 2033 1.15 riastrad KASSERTMSG(session < MVXPSEC_MAX_SESSIONS, "session=%d max=%d", 2034 1.15 riastrad session, MVXPSEC_MAX_SESSIONS); 2035 1.1 hsuenaga 2036 1.1 hsuenaga mutex_enter(&sc->sc_session_mtx); 2037 1.15 riastrad mv_s = sc->sc_sessions[session]; 2038 1.15 riastrad KASSERT(mv_s != NULL); 2039 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 2040 1.1 hsuenaga "%s: inactivate session %d\n", __func__, session); 2041 1.1 hsuenaga 2042 1.1 hsuenaga /* inactivate mvxpsec session */ 2043 1.1 hsuenaga sc->sc_sessions[session] = NULL; 2044 1.1 hsuenaga sc->sc_nsessions--; 2045 1.1 hsuenaga sc->sc_last_session = NULL; 2046 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx); 2047 1.1 hsuenaga 2048 1.1 hsuenaga KASSERT(sc->sc_nsessions >= 0); 2049 1.1 hsuenaga KASSERT(mv_s->sid == sid); 2050 1.1 hsuenaga 2051 1.1 hsuenaga mvxpsec_session_dealloc(mv_s); 2052 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 2053 1.1 hsuenaga "H/W Crypto session (id: %d) deleted.\n", session); 2054 1.1 hsuenaga 2055 1.1 hsuenaga /* force unblock opencrypto */ 2056 1.1 hsuenaga crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ); 2057 1.1 hsuenaga 2058 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, session_free); 2059 1.1 hsuenaga } 2060 1.1 hsuenaga 2061 1.1 hsuenaga /* 2062 1.1 hsuenaga * process data with existing session 2063 1.1 hsuenaga */ 2064 1.1 hsuenaga int 2065 1.1 hsuenaga mvxpsec_dispatch(void *arg, struct cryptop *crp, int hint) 2066 1.1 hsuenaga { 2067 1.1 hsuenaga struct mvxpsec_softc *sc = arg; 2068 1.1 hsuenaga struct mvxpsec_session *mv_s; 2069 1.1 hsuenaga struct mvxpsec_packet *mv_p; 2070 1.1 hsuenaga int q_full; 2071 1.1 hsuenaga int running; 2072 1.1 hsuenaga int err; 2073 1.1 hsuenaga 2074 1.1 hsuenaga mutex_enter(&sc->sc_queue_mtx); 2075 1.1 hsuenaga 2076 1.1 hsuenaga /* 2077 1.1 hsuenaga * lookup session 2078 1.1 hsuenaga */ 2079 1.1 hsuenaga mutex_enter(&sc->sc_session_mtx); 2080 1.1 hsuenaga mv_s = mvxpsec_session_lookup(sc, crp->crp_sid); 2081 1.1 hsuenaga if (__predict_false(mv_s == NULL)) { 2082 1.1 hsuenaga err = EINVAL; 2083 1.1 hsuenaga mv_p = NULL; 2084 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx); 2085 1.1 hsuenaga goto fail; 2086 1.1 hsuenaga } 2087 1.1 hsuenaga mv_p = mvxpsec_packet_alloc(mv_s); 2088 1.1 hsuenaga if (__predict_false(mv_p == NULL)) { 2089 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx); 2090 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx); 2091 1.1 hsuenaga return ERESTART; /* => queued in opencrypto layer */ 2092 1.1 hsuenaga } 2093 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx); 2094 1.1 hsuenaga 2095 1.1 hsuenaga /* 2096 1.1 hsuenaga * check queue status 2097 1.1 hsuenaga */ 2098 1.1 hsuenaga #ifdef MVXPSEC_MULTI_PACKET 2099 1.1 hsuenaga q_full = (sc->sc_wait_qlen >= sc->sc_wait_qlimit) ? 1 : 0; 2100 1.1 hsuenaga #else 2101 1.1 hsuenaga q_full = (sc->sc_wait_qlen != 0) ? 1 : 0; 2102 1.1 hsuenaga #endif 2103 1.1 hsuenaga running = (sc->sc_flags & HW_RUNNING) ? 1: 0; 2104 1.1 hsuenaga if (q_full) { 2105 1.1 hsuenaga /* input queue is full. */ 2106 1.1 hsuenaga if (!running && sc->sc_wait_qlen > 0) 2107 1.1 hsuenaga mvxpsec_dispatch_queue(sc); 2108 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, queue_full); 2109 1.1 hsuenaga mvxpsec_packet_dealloc(mv_p); 2110 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx); 2111 1.1 hsuenaga return ERESTART; /* => queued in opencrypto layer */ 2112 1.1 hsuenaga } 2113 1.1 hsuenaga 2114 1.1 hsuenaga /* 2115 1.1 hsuenaga * Load and setup packet data 2116 1.1 hsuenaga */ 2117 1.1 hsuenaga err = mvxpsec_packet_setcrp(mv_p, crp); 2118 1.1 hsuenaga if (__predict_false(err)) 2119 1.1 hsuenaga goto fail; 2120 1.12 riastrad 2121 1.1 hsuenaga /* 2122 1.1 hsuenaga * Setup DMA descriptor chains 2123 1.1 hsuenaga */ 2124 1.1 hsuenaga mutex_enter(&sc->sc_dma_mtx); 2125 1.1 hsuenaga err = mvxpsec_dma_copy_packet(sc, mv_p); 2126 1.1 hsuenaga mutex_exit(&sc->sc_dma_mtx); 2127 1.1 hsuenaga if (__predict_false(err)) 2128 1.1 hsuenaga goto fail; 2129 1.1 hsuenaga 2130 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 2131 1.1 hsuenaga mvxpsec_dump_packet(__func__, mv_p); 2132 1.1 hsuenaga #endif 2133 1.1 hsuenaga 2134 1.1 hsuenaga /* 2135 1.1 hsuenaga * Sync/inval the data cache 2136 1.1 hsuenaga */ 2137 1.1 hsuenaga err = mvxpsec_dma_sync_packet(sc, mv_p); 2138 1.1 hsuenaga if (__predict_false(err)) 2139 1.1 hsuenaga goto fail; 2140 1.1 hsuenaga 2141 1.1 hsuenaga /* 2142 1.1 hsuenaga * Enqueue the packet 2143 1.1 hsuenaga */ 2144 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, dispatch_packets); 2145 1.1 hsuenaga #ifdef MVXPSEC_MULTI_PACKET 2146 1.1 hsuenaga mvxpsec_packet_enqueue(mv_p); 2147 1.1 hsuenaga if (!running) 2148 1.1 hsuenaga mvxpsec_dispatch_queue(sc); 2149 1.1 hsuenaga #else 2150 1.1 hsuenaga SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue); 2151 1.1 hsuenaga sc->sc_wait_qlen++; 2152 1.1 hsuenaga mv_p->flags |= SETUP_DONE; 2153 1.1 hsuenaga if (!running) 2154 1.1 hsuenaga mvxpsec_dispatch_queue(sc); 2155 1.1 hsuenaga #endif 2156 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx); 2157 1.1 hsuenaga return 0; 2158 1.1 hsuenaga 2159 1.1 hsuenaga fail: 2160 1.1 hsuenaga /* Drop the incoming packet */ 2161 1.1 hsuenaga mvxpsec_drop(sc, crp, mv_p, err); 2162 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx); 2163 1.1 hsuenaga return 0; 2164 1.1 hsuenaga } 2165 1.1 hsuenaga 2166 1.1 hsuenaga /* 2167 1.1 hsuenaga * back the packet to the IP stack 2168 1.1 hsuenaga */ 2169 1.1 hsuenaga void 2170 1.1 hsuenaga mvxpsec_done(void *arg) 2171 1.1 hsuenaga { 2172 1.1 hsuenaga struct mvxpsec_softc *sc = arg; 2173 1.1 hsuenaga struct mvxpsec_packet *mv_p; 2174 1.1 hsuenaga mvxpsec_queue_t ret_queue; 2175 1.1 hsuenaga int ndone; 2176 1.1 hsuenaga 2177 1.1 hsuenaga mutex_enter(&sc->sc_queue_mtx); 2178 1.1 hsuenaga 2179 1.1 hsuenaga /* stop wdog timer */ 2180 1.1 hsuenaga callout_stop(&sc->sc_timeout); 2181 1.1 hsuenaga 2182 1.1 hsuenaga /* refill MVXPSEC */ 2183 1.1 hsuenaga ret_queue = sc->sc_run_queue; 2184 1.1 hsuenaga SIMPLEQ_INIT(&sc->sc_run_queue); 2185 1.1 hsuenaga sc->sc_flags &= ~HW_RUNNING; 2186 1.1 hsuenaga if (sc->sc_wait_qlen > 0) 2187 1.1 hsuenaga mvxpsec_dispatch_queue(sc); 2188 1.1 hsuenaga 2189 1.1 hsuenaga ndone = 0; 2190 1.1 hsuenaga while ( (mv_p = SIMPLEQ_FIRST(&ret_queue)) != NULL) { 2191 1.1 hsuenaga SIMPLEQ_REMOVE_HEAD(&ret_queue, queue); 2192 1.1 hsuenaga mvxpsec_dma_free(sc, &mv_p->dma_ring); 2193 1.1 hsuenaga mvxpsec_done_packet(mv_p); 2194 1.1 hsuenaga ndone++; 2195 1.1 hsuenaga } 2196 1.1 hsuenaga MVXPSEC_EVCNT_MAX(sc, max_done, ndone); 2197 1.1 hsuenaga 2198 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx); 2199 1.1 hsuenaga } 2200 1.1 hsuenaga 2201 1.1 hsuenaga /* 2202 1.1 hsuenaga * drop the packet 2203 1.1 hsuenaga */ 2204 1.1 hsuenaga INLINE void 2205 1.1 hsuenaga mvxpsec_drop(struct mvxpsec_softc *sc, struct cryptop *crp, 2206 1.1 hsuenaga struct mvxpsec_packet *mv_p, int err) 2207 1.1 hsuenaga { 2208 1.1 hsuenaga /* must called with sc->sc_queue_mtx held */ 2209 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_queue_mtx)); 2210 1.1 hsuenaga 2211 1.1 hsuenaga if (mv_p) 2212 1.1 hsuenaga mvxpsec_packet_dealloc(mv_p); 2213 1.1 hsuenaga if (err < 0) 2214 1.1 hsuenaga err = EINVAL; 2215 1.1 hsuenaga crp->crp_etype = err; 2216 1.1 hsuenaga crypto_done(crp); 2217 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, packet_err); 2218 1.1 hsuenaga 2219 1.1 hsuenaga /* dispatch other packets in queue */ 2220 1.1 hsuenaga if (sc->sc_wait_qlen > 0 && 2221 1.1 hsuenaga !(sc->sc_flags & HW_RUNNING)) 2222 1.1 hsuenaga mvxpsec_dispatch_queue(sc); 2223 1.1 hsuenaga 2224 1.1 hsuenaga /* unblock driver for dropped packet */ 2225 1.1 hsuenaga crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ); 2226 1.1 hsuenaga } 2227 1.1 hsuenaga 2228 1.1 hsuenaga /* move wait queue entry to run queue */ 2229 1.1 hsuenaga STATIC int 2230 1.1 hsuenaga mvxpsec_dispatch_queue(struct mvxpsec_softc *sc) 2231 1.1 hsuenaga { 2232 1.1 hsuenaga struct mvxpsec_packet *mv_p; 2233 1.1 hsuenaga paddr_t head; 2234 1.1 hsuenaga int ndispatch = 0; 2235 1.1 hsuenaga 2236 1.1 hsuenaga /* must called with sc->sc_queue_mtx held */ 2237 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_queue_mtx)); 2238 1.1 hsuenaga 2239 1.1 hsuenaga /* check there is any task */ 2240 1.1 hsuenaga if (__predict_false(sc->sc_flags & HW_RUNNING)) { 2241 1.1 hsuenaga log(LOG_WARNING, 2242 1.1 hsuenaga "%s: another packet already exist.\n", __func__); 2243 1.1 hsuenaga return 0; 2244 1.1 hsuenaga } 2245 1.1 hsuenaga if (__predict_false(SIMPLEQ_EMPTY(&sc->sc_wait_queue))) { 2246 1.1 hsuenaga log(LOG_WARNING, 2247 1.1 hsuenaga "%s: no waiting packet yet(qlen=%d).\n", 2248 1.1 hsuenaga __func__, sc->sc_wait_qlen); 2249 1.1 hsuenaga return 0; 2250 1.1 hsuenaga } 2251 1.1 hsuenaga 2252 1.1 hsuenaga /* move queue */ 2253 1.1 hsuenaga sc->sc_run_queue = sc->sc_wait_queue; 2254 1.1 hsuenaga sc->sc_flags |= HW_RUNNING; /* dropped by intr or timeout */ 2255 1.1 hsuenaga SIMPLEQ_INIT(&sc->sc_wait_queue); 2256 1.1 hsuenaga ndispatch = sc->sc_wait_qlen; 2257 1.1 hsuenaga sc->sc_wait_qlen = 0; 2258 1.1 hsuenaga 2259 1.1 hsuenaga /* get 1st DMA descriptor */ 2260 1.1 hsuenaga mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue); 2261 1.1 hsuenaga head = mv_p->dma_ring.dma_head->phys_addr; 2262 1.1 hsuenaga 2263 1.1 hsuenaga /* terminate last DMA descriptor */ 2264 1.1 hsuenaga mv_p = SIMPLEQ_LAST(&sc->sc_run_queue, mvxpsec_packet, queue); 2265 1.1 hsuenaga mvxpsec_dma_finalize(sc, &mv_p->dma_ring); 2266 1.1 hsuenaga 2267 1.1 hsuenaga /* configure TDMA */ 2268 1.1 hsuenaga if (mvxpsec_dma_wait(sc) < 0) { 2269 1.1 hsuenaga log(LOG_ERR, "%s: DMA DEVICE not responding", __func__); 2270 1.1 hsuenaga callout_schedule(&sc->sc_timeout, hz); 2271 1.1 hsuenaga return 0; 2272 1.1 hsuenaga } 2273 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_NXT, head); 2274 1.1 hsuenaga 2275 1.1 hsuenaga /* trigger ACC */ 2276 1.1 hsuenaga if (mvxpsec_acc_wait(sc) < 0) { 2277 1.1 hsuenaga log(LOG_ERR, "%s: MVXPSEC not responding", __func__); 2278 1.1 hsuenaga callout_schedule(&sc->sc_timeout, hz); 2279 1.1 hsuenaga return 0; 2280 1.1 hsuenaga } 2281 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_ACT); 2282 1.1 hsuenaga 2283 1.1 hsuenaga MVXPSEC_EVCNT_MAX(sc, max_dispatch, ndispatch); 2284 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, dispatch_queue); 2285 1.1 hsuenaga callout_schedule(&sc->sc_timeout, hz); 2286 1.1 hsuenaga return 0; 2287 1.1 hsuenaga } 2288 1.1 hsuenaga 2289 1.1 hsuenaga /* 2290 1.1 hsuenaga * process opencrypto operations(cryptop) for packets. 2291 1.1 hsuenaga */ 2292 1.1 hsuenaga INLINE int 2293 1.1 hsuenaga mvxpsec_parse_crd(struct mvxpsec_packet *mv_p, struct cryptodesc *crd) 2294 1.1 hsuenaga { 2295 1.1 hsuenaga int ivlen; 2296 1.1 hsuenaga 2297 1.1 hsuenaga KASSERT(mv_p->flags & RDY_DATA); 2298 1.1 hsuenaga 2299 1.1 hsuenaga /* MAC & Ciphers: set data location and operation */ 2300 1.1 hsuenaga switch (crd->crd_alg) { 2301 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96: 2302 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96; 2303 1.1 hsuenaga /* fall through */ 2304 1.1 hsuenaga case CRYPTO_SHA1_HMAC: 2305 1.1 hsuenaga mv_p->mac_dst = crd->crd_inject; 2306 1.1 hsuenaga mv_p->mac_off = crd->crd_skip; 2307 1.1 hsuenaga mv_p->mac_len = crd->crd_len; 2308 1.1 hsuenaga MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config, 2309 1.1 hsuenaga MV_ACC_CRYPTO_MAC_HMAC_SHA1); 2310 1.1 hsuenaga mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC); 2311 1.1 hsuenaga /* No more setup for MAC */ 2312 1.1 hsuenaga return 0; 2313 1.1 hsuenaga case CRYPTO_MD5_HMAC_96: 2314 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96; 2315 1.1 hsuenaga /* fall through */ 2316 1.1 hsuenaga case CRYPTO_MD5_HMAC: 2317 1.1 hsuenaga mv_p->mac_dst = crd->crd_inject; 2318 1.1 hsuenaga mv_p->mac_off = crd->crd_skip; 2319 1.1 hsuenaga mv_p->mac_len = crd->crd_len; 2320 1.1 hsuenaga MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config, 2321 1.1 hsuenaga MV_ACC_CRYPTO_MAC_HMAC_MD5); 2322 1.1 hsuenaga mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC); 2323 1.1 hsuenaga /* No more setup for MAC */ 2324 1.1 hsuenaga return 0; 2325 1.1 hsuenaga case CRYPTO_DES_CBC: 2326 1.1 hsuenaga mv_p->enc_ivoff = crd->crd_inject; 2327 1.1 hsuenaga mv_p->enc_off = crd->crd_skip; 2328 1.1 hsuenaga mv_p->enc_len = crd->crd_len; 2329 1.1 hsuenaga ivlen = 8; 2330 1.1 hsuenaga MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config, 2331 1.1 hsuenaga MV_ACC_CRYPTO_ENC_DES); 2332 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC; 2333 1.1 hsuenaga mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC); 2334 1.1 hsuenaga break; 2335 1.1 hsuenaga case CRYPTO_3DES_CBC: 2336 1.1 hsuenaga mv_p->enc_ivoff = crd->crd_inject; 2337 1.1 hsuenaga mv_p->enc_off = crd->crd_skip; 2338 1.1 hsuenaga mv_p->enc_len = crd->crd_len; 2339 1.1 hsuenaga ivlen = 8; 2340 1.1 hsuenaga MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config, 2341 1.1 hsuenaga MV_ACC_CRYPTO_ENC_3DES); 2342 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC; 2343 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_3DES_EDE; 2344 1.1 hsuenaga mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC); 2345 1.1 hsuenaga break; 2346 1.1 hsuenaga case CRYPTO_AES_CBC: 2347 1.1 hsuenaga mv_p->enc_ivoff = crd->crd_inject; 2348 1.1 hsuenaga mv_p->enc_off = crd->crd_skip; 2349 1.1 hsuenaga mv_p->enc_len = crd->crd_len; 2350 1.1 hsuenaga ivlen = 16; 2351 1.1 hsuenaga MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config, 2352 1.1 hsuenaga MV_ACC_CRYPTO_ENC_AES); 2353 1.1 hsuenaga MV_ACC_CRYPTO_AES_KLEN_SET( 2354 1.1 hsuenaga mv_p->pkt_header.desc.acc_config, 2355 1.1 hsuenaga mvxpsec_aesklen(mv_p->mv_s->enc_klen)); 2356 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC; 2357 1.1 hsuenaga mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC); 2358 1.1 hsuenaga break; 2359 1.1 hsuenaga default: 2360 1.1 hsuenaga log(LOG_ERR, "%s: Unknown algorithm %d\n", 2361 1.1 hsuenaga __func__, crd->crd_alg); 2362 1.1 hsuenaga return EINVAL; 2363 1.1 hsuenaga } 2364 1.1 hsuenaga 2365 1.1 hsuenaga /* Operations only for Cipher, not MAC */ 2366 1.1 hsuenaga if (crd->crd_flags & CRD_F_ENCRYPT) { 2367 1.1 hsuenaga /* Ciphers: Originate IV for Encryption.*/ 2368 1.1 hsuenaga mv_p->pkt_header.desc.acc_config &= ~MV_ACC_CRYPTO_DECRYPT; 2369 1.1 hsuenaga mv_p->flags |= DIR_ENCRYPT; 2370 1.1 hsuenaga 2371 1.1 hsuenaga if (crd->crd_flags & CRD_F_IV_EXPLICIT) { 2372 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "EXPLICIT IV\n"); 2373 1.1 hsuenaga mv_p->flags |= CRP_EXT_IV; 2374 1.1 hsuenaga mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen); 2375 1.1 hsuenaga mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF; 2376 1.1 hsuenaga } 2377 1.1 hsuenaga else if (crd->crd_flags & CRD_F_IV_PRESENT) { 2378 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "IV is present\n"); 2379 1.1 hsuenaga mvxpsec_packet_copy_iv(mv_p, crd->crd_inject, ivlen); 2380 1.1 hsuenaga } 2381 1.1 hsuenaga else { 2382 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "Create New IV\n"); 2383 1.1 hsuenaga mvxpsec_packet_write_iv(mv_p, NULL, ivlen); 2384 1.1 hsuenaga } 2385 1.1 hsuenaga } 2386 1.1 hsuenaga else { 2387 1.1 hsuenaga /* Ciphers: IV is loadded from crd_inject when it's present */ 2388 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_DECRYPT; 2389 1.1 hsuenaga mv_p->flags |= DIR_DECRYPT; 2390 1.1 hsuenaga 2391 1.1 hsuenaga if (crd->crd_flags & CRD_F_IV_EXPLICIT) { 2392 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 2393 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_ENC_IV) { 2394 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, 2395 1.1 hsuenaga "EXPLICIT IV(Decrypt)\n"); 2396 1.1 hsuenaga mvxpsec_dump_data(__func__, crd->crd_iv, ivlen); 2397 1.1 hsuenaga } 2398 1.1 hsuenaga #endif 2399 1.1 hsuenaga mv_p->flags |= CRP_EXT_IV; 2400 1.1 hsuenaga mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen); 2401 1.1 hsuenaga mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF; 2402 1.1 hsuenaga } 2403 1.1 hsuenaga } 2404 1.1 hsuenaga 2405 1.1 hsuenaga KASSERT(!((mv_p->flags & DIR_ENCRYPT) && (mv_p->flags & DIR_DECRYPT))); 2406 1.1 hsuenaga 2407 1.1 hsuenaga return 0; 2408 1.1 hsuenaga } 2409 1.1 hsuenaga 2410 1.1 hsuenaga INLINE int 2411 1.1 hsuenaga mvxpsec_parse_crp(struct mvxpsec_packet *mv_p) 2412 1.1 hsuenaga { 2413 1.1 hsuenaga struct cryptop *crp = mv_p->crp; 2414 1.1 hsuenaga struct cryptodesc *crd; 2415 1.1 hsuenaga int err; 2416 1.1 hsuenaga 2417 1.1 hsuenaga KASSERT(crp); 2418 1.1 hsuenaga 2419 1.1 hsuenaga mvxpsec_packet_reset_op(mv_p); 2420 1.1 hsuenaga 2421 1.1 hsuenaga for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2422 1.1 hsuenaga err = mvxpsec_parse_crd(mv_p, crd); 2423 1.1 hsuenaga if (err) 2424 1.1 hsuenaga return err; 2425 1.1 hsuenaga } 2426 1.1 hsuenaga 2427 1.1 hsuenaga return 0; 2428 1.1 hsuenaga } 2429 1.1 hsuenaga 2430 1.1 hsuenaga INLINE int 2431 1.1 hsuenaga mvxpsec_packet_setcrp(struct mvxpsec_packet *mv_p, struct cryptop *crp) 2432 1.1 hsuenaga { 2433 1.1 hsuenaga int err = EINVAL; 2434 1.1 hsuenaga 2435 1.20 andvar /* register crp to the MVXPSEC packet */ 2436 1.1 hsuenaga if (crp->crp_flags & CRYPTO_F_IMBUF) { 2437 1.1 hsuenaga err = mvxpsec_packet_setmbuf(mv_p, 2438 1.1 hsuenaga (struct mbuf *)crp->crp_buf); 2439 1.1 hsuenaga mv_p->crp = crp; 2440 1.1 hsuenaga } 2441 1.1 hsuenaga else if (crp->crp_flags & CRYPTO_F_IOV) { 2442 1.1 hsuenaga err = mvxpsec_packet_setuio(mv_p, 2443 1.1 hsuenaga (struct uio *)crp->crp_buf); 2444 1.1 hsuenaga mv_p->crp = crp; 2445 1.1 hsuenaga } 2446 1.1 hsuenaga else { 2447 1.1 hsuenaga err = mvxpsec_packet_setdata(mv_p, 2448 1.1 hsuenaga (struct mbuf *)crp->crp_buf, crp->crp_ilen); 2449 1.1 hsuenaga mv_p->crp = crp; 2450 1.1 hsuenaga } 2451 1.1 hsuenaga if (__predict_false(err)) 2452 1.1 hsuenaga return err; 2453 1.1 hsuenaga 2454 1.1 hsuenaga /* parse crp and setup MVXPSEC registers/descriptors */ 2455 1.1 hsuenaga err = mvxpsec_parse_crp(mv_p); 2456 1.1 hsuenaga if (__predict_false(err)) 2457 1.1 hsuenaga return err; 2458 1.1 hsuenaga 2459 1.1 hsuenaga /* fixup data offset to fit MVXPSEC internal SRAM */ 2460 1.1 hsuenaga err = mvxpsec_header_finalize(mv_p); 2461 1.1 hsuenaga if (__predict_false(err)) 2462 1.1 hsuenaga return err; 2463 1.1 hsuenaga 2464 1.1 hsuenaga return 0; 2465 1.1 hsuenaga } 2466 1.1 hsuenaga 2467 1.1 hsuenaga /* 2468 1.1 hsuenaga * load data for encrypt/decrypt/authentication 2469 1.1 hsuenaga * 2470 1.1 hsuenaga * data is raw kernel memory area. 2471 1.1 hsuenaga */ 2472 1.1 hsuenaga STATIC int 2473 1.1 hsuenaga mvxpsec_packet_setdata(struct mvxpsec_packet *mv_p, 2474 1.1 hsuenaga void *data, uint32_t data_len) 2475 1.1 hsuenaga { 2476 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s; 2477 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc; 2478 1.1 hsuenaga 2479 1.1 hsuenaga if (bus_dmamap_load(sc->sc_dmat, mv_p->data_map, data, data_len, 2480 1.1 hsuenaga NULL, BUS_DMA_NOWAIT)) { 2481 1.1 hsuenaga log(LOG_ERR, "%s: cannot load data\n", __func__); 2482 1.1 hsuenaga return -1; 2483 1.1 hsuenaga } 2484 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_RAW; 2485 1.1 hsuenaga mv_p->data_raw = data; 2486 1.1 hsuenaga mv_p->data_len = data_len; 2487 1.1 hsuenaga mv_p->flags |= RDY_DATA; 2488 1.1 hsuenaga 2489 1.1 hsuenaga return 0; 2490 1.1 hsuenaga } 2491 1.1 hsuenaga 2492 1.1 hsuenaga /* 2493 1.1 hsuenaga * load data for encrypt/decrypt/authentication 2494 1.1 hsuenaga * 2495 1.1 hsuenaga * data is mbuf based network data. 2496 1.1 hsuenaga */ 2497 1.1 hsuenaga STATIC int 2498 1.1 hsuenaga mvxpsec_packet_setmbuf(struct mvxpsec_packet *mv_p, struct mbuf *m) 2499 1.1 hsuenaga { 2500 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s; 2501 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc; 2502 1.1 hsuenaga size_t pktlen = 0; 2503 1.1 hsuenaga 2504 1.1 hsuenaga if (__predict_true(m->m_flags & M_PKTHDR)) 2505 1.1 hsuenaga pktlen = m->m_pkthdr.len; 2506 1.1 hsuenaga else { 2507 1.1 hsuenaga struct mbuf *mp = m; 2508 1.1 hsuenaga 2509 1.1 hsuenaga while (mp != NULL) { 2510 1.1 hsuenaga pktlen += m->m_len; 2511 1.1 hsuenaga mp = mp->m_next; 2512 1.1 hsuenaga } 2513 1.1 hsuenaga } 2514 1.1 hsuenaga if (pktlen > SRAM_PAYLOAD_SIZE) { 2515 1.7 riastrad #if NIPSEC > 0 2516 1.1 hsuenaga extern percpu_t *espstat_percpu; 2517 1.1 hsuenaga /* XXX: 2518 1.1 hsuenaga * layer violation. opencrypto knows our max packet size 2519 1.1 hsuenaga * from crypto_register(9) API. 2520 1.1 hsuenaga */ 2521 1.1 hsuenaga 2522 1.1 hsuenaga _NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG); 2523 1.7 riastrad #endif 2524 1.1 hsuenaga log(LOG_ERR, 2525 1.1 hsuenaga "%s: ESP Packet too large: %zu [oct.] > %zu [oct.]\n", 2526 1.1 hsuenaga device_xname(sc->sc_dev), 2527 1.1 hsuenaga (size_t)pktlen, SRAM_PAYLOAD_SIZE); 2528 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_NONE; 2529 1.1 hsuenaga mv_p->data_mbuf = NULL; 2530 1.1 hsuenaga return -1; 2531 1.1 hsuenaga } 2532 1.1 hsuenaga 2533 1.1 hsuenaga if (bus_dmamap_load_mbuf(sc->sc_dmat, mv_p->data_map, m, 2534 1.1 hsuenaga BUS_DMA_NOWAIT)) { 2535 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_NONE; 2536 1.1 hsuenaga mv_p->data_mbuf = NULL; 2537 1.1 hsuenaga log(LOG_ERR, "%s: cannot load mbuf\n", __func__); 2538 1.1 hsuenaga return -1; 2539 1.1 hsuenaga } 2540 1.1 hsuenaga 2541 1.1 hsuenaga /* set payload buffer */ 2542 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_MBUF; 2543 1.1 hsuenaga mv_p->data_mbuf = m; 2544 1.1 hsuenaga if (m->m_flags & M_PKTHDR) { 2545 1.1 hsuenaga mv_p->data_len = m->m_pkthdr.len; 2546 1.1 hsuenaga } 2547 1.1 hsuenaga else { 2548 1.1 hsuenaga mv_p->data_len = 0; 2549 1.1 hsuenaga while (m) { 2550 1.1 hsuenaga mv_p->data_len += m->m_len; 2551 1.1 hsuenaga m = m->m_next; 2552 1.1 hsuenaga } 2553 1.1 hsuenaga } 2554 1.1 hsuenaga mv_p->flags |= RDY_DATA; 2555 1.1 hsuenaga 2556 1.1 hsuenaga return 0; 2557 1.1 hsuenaga } 2558 1.1 hsuenaga 2559 1.1 hsuenaga STATIC int 2560 1.1 hsuenaga mvxpsec_packet_setuio(struct mvxpsec_packet *mv_p, struct uio *uio) 2561 1.1 hsuenaga { 2562 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s; 2563 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc; 2564 1.1 hsuenaga 2565 1.1 hsuenaga if (uio->uio_resid > SRAM_PAYLOAD_SIZE) { 2566 1.7 riastrad #if NIPSEC > 0 2567 1.1 hsuenaga extern percpu_t *espstat_percpu; 2568 1.1 hsuenaga /* XXX: 2569 1.1 hsuenaga * layer violation. opencrypto knows our max packet size 2570 1.1 hsuenaga * from crypto_register(9) API. 2571 1.1 hsuenaga */ 2572 1.1 hsuenaga 2573 1.1 hsuenaga _NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG); 2574 1.7 riastrad #endif 2575 1.1 hsuenaga log(LOG_ERR, 2576 1.1 hsuenaga "%s: uio request too large: %zu [oct.] > %zu [oct.]\n", 2577 1.1 hsuenaga device_xname(sc->sc_dev), 2578 1.1 hsuenaga uio->uio_resid, SRAM_PAYLOAD_SIZE); 2579 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_NONE; 2580 1.1 hsuenaga mv_p->data_mbuf = NULL; 2581 1.1 hsuenaga return -1; 2582 1.1 hsuenaga } 2583 1.1 hsuenaga 2584 1.1 hsuenaga if (bus_dmamap_load_uio(sc->sc_dmat, mv_p->data_map, uio, 2585 1.1 hsuenaga BUS_DMA_NOWAIT)) { 2586 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_NONE; 2587 1.1 hsuenaga mv_p->data_mbuf = NULL; 2588 1.1 hsuenaga log(LOG_ERR, "%s: cannot load uio buf\n", __func__); 2589 1.1 hsuenaga return -1; 2590 1.1 hsuenaga } 2591 1.1 hsuenaga 2592 1.1 hsuenaga /* set payload buffer */ 2593 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_UIO; 2594 1.1 hsuenaga mv_p->data_uio = uio; 2595 1.1 hsuenaga mv_p->data_len = uio->uio_resid; 2596 1.1 hsuenaga mv_p->flags |= RDY_DATA; 2597 1.1 hsuenaga 2598 1.1 hsuenaga return 0; 2599 1.1 hsuenaga } 2600 1.1 hsuenaga 2601 1.1 hsuenaga STATIC int 2602 1.1 hsuenaga mvxpsec_packet_rdata(struct mvxpsec_packet *mv_p, 2603 1.1 hsuenaga int off, int len, void *cp) 2604 1.1 hsuenaga { 2605 1.1 hsuenaga uint8_t *p; 2606 1.1 hsuenaga 2607 1.1 hsuenaga if (mv_p->data_type == MVXPSEC_DATA_RAW) { 2608 1.1 hsuenaga p = (uint8_t *)mv_p->data_raw + off; 2609 1.1 hsuenaga memcpy(cp, p, len); 2610 1.1 hsuenaga } 2611 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_MBUF) { 2612 1.1 hsuenaga m_copydata(mv_p->data_mbuf, off, len, cp); 2613 1.1 hsuenaga } 2614 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_UIO) { 2615 1.1 hsuenaga cuio_copydata(mv_p->data_uio, off, len, cp); 2616 1.1 hsuenaga } 2617 1.1 hsuenaga else 2618 1.1 hsuenaga return -1; 2619 1.1 hsuenaga 2620 1.1 hsuenaga return 0; 2621 1.1 hsuenaga } 2622 1.1 hsuenaga 2623 1.1 hsuenaga STATIC int 2624 1.1 hsuenaga mvxpsec_packet_wdata(struct mvxpsec_packet *mv_p, 2625 1.1 hsuenaga int off, int len, void *cp) 2626 1.1 hsuenaga { 2627 1.1 hsuenaga uint8_t *p; 2628 1.1 hsuenaga 2629 1.1 hsuenaga if (mv_p->data_type == MVXPSEC_DATA_RAW) { 2630 1.1 hsuenaga p = (uint8_t *)mv_p->data_raw + off; 2631 1.1 hsuenaga memcpy(p, cp, len); 2632 1.1 hsuenaga } 2633 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_MBUF) { 2634 1.1 hsuenaga m_copyback(mv_p->data_mbuf, off, len, cp); 2635 1.1 hsuenaga } 2636 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_UIO) { 2637 1.1 hsuenaga cuio_copyback(mv_p->data_uio, off, len, cp); 2638 1.1 hsuenaga } 2639 1.1 hsuenaga else 2640 1.1 hsuenaga return -1; 2641 1.1 hsuenaga 2642 1.1 hsuenaga return 0; 2643 1.1 hsuenaga } 2644 1.1 hsuenaga 2645 1.1 hsuenaga /* 2646 1.1 hsuenaga * Set initial vector of cipher to the session. 2647 1.1 hsuenaga */ 2648 1.1 hsuenaga STATIC int 2649 1.1 hsuenaga mvxpsec_packet_write_iv(struct mvxpsec_packet *mv_p, void *iv, int ivlen) 2650 1.1 hsuenaga { 2651 1.1 hsuenaga uint8_t ivbuf[16]; 2652 1.12 riastrad 2653 1.1 hsuenaga KASSERT(ivlen == 8 || ivlen == 16); 2654 1.1 hsuenaga 2655 1.1 hsuenaga if (iv == NULL) { 2656 1.1 hsuenaga if (mv_p->mv_s->sflags & RDY_CRP_IV) { 2657 1.1 hsuenaga /* use per session IV (compatible with KAME IPsec) */ 2658 1.1 hsuenaga mv_p->pkt_header.crp_iv_work = mv_p->mv_s->session_iv; 2659 1.1 hsuenaga mv_p->flags |= RDY_CRP_IV; 2660 1.1 hsuenaga return 0; 2661 1.1 hsuenaga } 2662 1.1 hsuenaga cprng_fast(ivbuf, ivlen); 2663 1.1 hsuenaga iv = ivbuf; 2664 1.1 hsuenaga } 2665 1.1 hsuenaga memcpy(&mv_p->pkt_header.crp_iv_work, iv, ivlen); 2666 1.1 hsuenaga if (mv_p->flags & CRP_EXT_IV) { 2667 1.1 hsuenaga memcpy(&mv_p->pkt_header.crp_iv_ext, iv, ivlen); 2668 1.1 hsuenaga mv_p->ext_iv = iv; 2669 1.1 hsuenaga mv_p->ext_ivlen = ivlen; 2670 1.1 hsuenaga } 2671 1.1 hsuenaga mv_p->flags |= RDY_CRP_IV; 2672 1.1 hsuenaga 2673 1.1 hsuenaga return 0; 2674 1.1 hsuenaga } 2675 1.1 hsuenaga 2676 1.1 hsuenaga STATIC int 2677 1.1 hsuenaga mvxpsec_packet_copy_iv(struct mvxpsec_packet *mv_p, int off, int ivlen) 2678 1.1 hsuenaga { 2679 1.1 hsuenaga mvxpsec_packet_rdata(mv_p, off, ivlen, 2680 1.1 hsuenaga &mv_p->pkt_header.crp_iv_work); 2681 1.1 hsuenaga mv_p->flags |= RDY_CRP_IV; 2682 1.1 hsuenaga 2683 1.1 hsuenaga return 0; 2684 1.1 hsuenaga } 2685 1.1 hsuenaga 2686 1.1 hsuenaga /* 2687 1.1 hsuenaga * set a encryption or decryption key to the session 2688 1.1 hsuenaga * 2689 1.1 hsuenaga * Input key material is big endian. 2690 1.1 hsuenaga */ 2691 1.1 hsuenaga STATIC int 2692 1.1 hsuenaga mvxpsec_key_precomp(int alg, void *keymat, int kbitlen, 2693 1.1 hsuenaga void *key_encrypt, void *key_decrypt) 2694 1.1 hsuenaga { 2695 1.1 hsuenaga uint32_t *kp = keymat; 2696 1.1 hsuenaga uint32_t *ekp = key_encrypt; 2697 1.1 hsuenaga uint32_t *dkp = key_decrypt; 2698 1.1 hsuenaga int i; 2699 1.1 hsuenaga 2700 1.1 hsuenaga switch (alg) { 2701 1.1 hsuenaga case CRYPTO_DES_CBC: 2702 1.1 hsuenaga if (kbitlen < 64 || (kbitlen % 8) != 0) { 2703 1.1 hsuenaga log(LOG_WARNING, 2704 1.1 hsuenaga "mvxpsec: invalid DES keylen %d\n", kbitlen); 2705 1.1 hsuenaga return EINVAL; 2706 1.1 hsuenaga } 2707 1.1 hsuenaga for (i = 0; i < 2; i++) 2708 1.1 hsuenaga dkp[i] = ekp[i] = kp[i]; 2709 1.1 hsuenaga for (; i < 8; i++) 2710 1.1 hsuenaga dkp[i] = ekp[i] = 0; 2711 1.1 hsuenaga break; 2712 1.1 hsuenaga case CRYPTO_3DES_CBC: 2713 1.1 hsuenaga if (kbitlen < 192 || (kbitlen % 8) != 0) { 2714 1.1 hsuenaga log(LOG_WARNING, 2715 1.1 hsuenaga "mvxpsec: invalid 3DES keylen %d\n", kbitlen); 2716 1.1 hsuenaga return EINVAL; 2717 1.1 hsuenaga } 2718 1.1 hsuenaga for (i = 0; i < 8; i++) 2719 1.1 hsuenaga dkp[i] = ekp[i] = kp[i]; 2720 1.1 hsuenaga break; 2721 1.1 hsuenaga case CRYPTO_AES_CBC: 2722 1.1 hsuenaga if (kbitlen < 128) { 2723 1.1 hsuenaga log(LOG_WARNING, 2724 1.1 hsuenaga "mvxpsec: invalid AES keylen %d\n", kbitlen); 2725 1.1 hsuenaga return EINVAL; 2726 1.1 hsuenaga } 2727 1.1 hsuenaga else if (kbitlen < 192) { 2728 1.1 hsuenaga /* AES-128 */ 2729 1.1 hsuenaga for (i = 0; i < 4; i++) 2730 1.1 hsuenaga ekp[i] = kp[i]; 2731 1.1 hsuenaga for (; i < 8; i++) 2732 1.1 hsuenaga ekp[i] = 0; 2733 1.1 hsuenaga } 2734 1.1 hsuenaga else if (kbitlen < 256) { 2735 1.1 hsuenaga /* AES-192 */ 2736 1.1 hsuenaga for (i = 0; i < 6; i++) 2737 1.1 hsuenaga ekp[i] = kp[i]; 2738 1.1 hsuenaga for (; i < 8; i++) 2739 1.1 hsuenaga ekp[i] = 0; 2740 1.1 hsuenaga } 2741 1.1 hsuenaga else { 2742 1.1 hsuenaga /* AES-256 */ 2743 1.1 hsuenaga for (i = 0; i < 8; i++) 2744 1.1 hsuenaga ekp[i] = kp[i]; 2745 1.1 hsuenaga } 2746 1.1 hsuenaga /* make decryption key */ 2747 1.1 hsuenaga mv_aes_deckey((uint8_t *)dkp, (uint8_t *)ekp, kbitlen); 2748 1.1 hsuenaga break; 2749 1.1 hsuenaga default: 2750 1.1 hsuenaga for (i = 0; i < 8; i++) 2751 1.1 hsuenaga ekp[0] = dkp[0] = 0; 2752 1.1 hsuenaga break; 2753 1.1 hsuenaga } 2754 1.1 hsuenaga 2755 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 2756 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) { 2757 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 2758 1.1 hsuenaga "%s: keyregistered\n", __func__); 2759 1.1 hsuenaga mvxpsec_dump_data(__func__, ekp, 32); 2760 1.1 hsuenaga } 2761 1.1 hsuenaga #endif 2762 1.1 hsuenaga 2763 1.1 hsuenaga return 0; 2764 1.1 hsuenaga } 2765 1.1 hsuenaga 2766 1.1 hsuenaga /* 2767 1.1 hsuenaga * set MAC key to the session 2768 1.1 hsuenaga * 2769 1.1 hsuenaga * MAC engine has no register for key itself, but the engine has 2770 1.1 hsuenaga * inner and outer IV register. software must compute IV before 2771 1.1 hsuenaga * enable the engine. 2772 1.1 hsuenaga * 2773 1.1 hsuenaga * IV is a hash of ipad/opad. these are defined by FIPS-198a 2774 1.1 hsuenaga * standard. 2775 1.1 hsuenaga */ 2776 1.1 hsuenaga STATIC int 2777 1.1 hsuenaga mvxpsec_hmac_precomp(int alg, void *key, int kbitlen, 2778 1.1 hsuenaga void *iv_inner, void *iv_outer) 2779 1.1 hsuenaga { 2780 1.1 hsuenaga SHA1_CTX sha1; 2781 1.1 hsuenaga MD5_CTX md5; 2782 1.1 hsuenaga uint8_t *key8 = key; 2783 1.1 hsuenaga uint8_t kbuf[64]; 2784 1.1 hsuenaga uint8_t ipad[64]; 2785 1.1 hsuenaga uint8_t opad[64]; 2786 1.1 hsuenaga uint32_t *iv_in = iv_inner; 2787 1.1 hsuenaga uint32_t *iv_out = iv_outer; 2788 1.1 hsuenaga int kbytelen; 2789 1.1 hsuenaga int i; 2790 1.1 hsuenaga #define HMAC_IPAD 0x36 2791 1.1 hsuenaga #define HMAC_OPAD 0x5c 2792 1.1 hsuenaga 2793 1.1 hsuenaga kbytelen = kbitlen / 8; 2794 1.1 hsuenaga KASSERT(kbitlen == kbytelen * 8); 2795 1.1 hsuenaga if (kbytelen > 64) { 2796 1.1 hsuenaga SHA1Init(&sha1); 2797 1.1 hsuenaga SHA1Update(&sha1, key, kbytelen); 2798 1.1 hsuenaga SHA1Final(kbuf, &sha1); 2799 1.1 hsuenaga key8 = kbuf; 2800 1.1 hsuenaga kbytelen = 64; 2801 1.1 hsuenaga } 2802 1.1 hsuenaga 2803 1.1 hsuenaga /* make initial 64 oct. string */ 2804 1.1 hsuenaga switch (alg) { 2805 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96: 2806 1.1 hsuenaga case CRYPTO_SHA1_HMAC: 2807 1.1 hsuenaga case CRYPTO_MD5_HMAC_96: 2808 1.1 hsuenaga case CRYPTO_MD5_HMAC: 2809 1.1 hsuenaga for (i = 0; i < kbytelen; i++) { 2810 1.1 hsuenaga ipad[i] = (key8[i] ^ HMAC_IPAD); 2811 1.1 hsuenaga opad[i] = (key8[i] ^ HMAC_OPAD); 2812 1.1 hsuenaga } 2813 1.1 hsuenaga for (; i < 64; i++) { 2814 1.1 hsuenaga ipad[i] = HMAC_IPAD; 2815 1.1 hsuenaga opad[i] = HMAC_OPAD; 2816 1.1 hsuenaga } 2817 1.1 hsuenaga break; 2818 1.1 hsuenaga default: 2819 1.1 hsuenaga break; 2820 1.1 hsuenaga } 2821 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 2822 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) { 2823 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 2824 1.1 hsuenaga "%s: HMAC-KEY Pre-comp:\n", __func__); 2825 1.1 hsuenaga mvxpsec_dump_data(__func__, key, 64); 2826 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 2827 1.1 hsuenaga "%s: ipad:\n", __func__); 2828 1.1 hsuenaga mvxpsec_dump_data(__func__, ipad, sizeof(ipad)); 2829 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 2830 1.1 hsuenaga "%s: opad:\n", __func__); 2831 1.1 hsuenaga mvxpsec_dump_data(__func__, opad, sizeof(opad)); 2832 1.1 hsuenaga } 2833 1.1 hsuenaga #endif 2834 1.1 hsuenaga 2835 1.1 hsuenaga /* make iv from string */ 2836 1.1 hsuenaga switch (alg) { 2837 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96: 2838 1.1 hsuenaga case CRYPTO_SHA1_HMAC: 2839 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 2840 1.1 hsuenaga "%s: Generate iv_in(SHA1)\n", __func__); 2841 1.1 hsuenaga SHA1Init(&sha1); 2842 1.1 hsuenaga SHA1Update(&sha1, ipad, 64); 2843 1.1 hsuenaga /* XXX: private state... (LE) */ 2844 1.1 hsuenaga iv_in[0] = htobe32(sha1.state[0]); 2845 1.1 hsuenaga iv_in[1] = htobe32(sha1.state[1]); 2846 1.1 hsuenaga iv_in[2] = htobe32(sha1.state[2]); 2847 1.1 hsuenaga iv_in[3] = htobe32(sha1.state[3]); 2848 1.1 hsuenaga iv_in[4] = htobe32(sha1.state[4]); 2849 1.1 hsuenaga 2850 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 2851 1.1 hsuenaga "%s: Generate iv_out(SHA1)\n", __func__); 2852 1.1 hsuenaga SHA1Init(&sha1); 2853 1.1 hsuenaga SHA1Update(&sha1, opad, 64); 2854 1.1 hsuenaga /* XXX: private state... (LE) */ 2855 1.1 hsuenaga iv_out[0] = htobe32(sha1.state[0]); 2856 1.1 hsuenaga iv_out[1] = htobe32(sha1.state[1]); 2857 1.1 hsuenaga iv_out[2] = htobe32(sha1.state[2]); 2858 1.1 hsuenaga iv_out[3] = htobe32(sha1.state[3]); 2859 1.1 hsuenaga iv_out[4] = htobe32(sha1.state[4]); 2860 1.1 hsuenaga break; 2861 1.1 hsuenaga case CRYPTO_MD5_HMAC_96: 2862 1.1 hsuenaga case CRYPTO_MD5_HMAC: 2863 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 2864 1.1 hsuenaga "%s: Generate iv_in(MD5)\n", __func__); 2865 1.1 hsuenaga MD5Init(&md5); 2866 1.1 hsuenaga MD5Update(&md5, ipad, sizeof(ipad)); 2867 1.1 hsuenaga /* XXX: private state... (LE) */ 2868 1.1 hsuenaga iv_in[0] = htobe32(md5.state[0]); 2869 1.1 hsuenaga iv_in[1] = htobe32(md5.state[1]); 2870 1.1 hsuenaga iv_in[2] = htobe32(md5.state[2]); 2871 1.1 hsuenaga iv_in[3] = htobe32(md5.state[3]); 2872 1.1 hsuenaga iv_in[4] = 0; 2873 1.1 hsuenaga 2874 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO, 2875 1.1 hsuenaga "%s: Generate iv_out(MD5)\n", __func__); 2876 1.1 hsuenaga MD5Init(&md5); 2877 1.1 hsuenaga MD5Update(&md5, opad, sizeof(opad)); 2878 1.1 hsuenaga /* XXX: private state... (LE) */ 2879 1.1 hsuenaga iv_out[0] = htobe32(md5.state[0]); 2880 1.1 hsuenaga iv_out[1] = htobe32(md5.state[1]); 2881 1.1 hsuenaga iv_out[2] = htobe32(md5.state[2]); 2882 1.1 hsuenaga iv_out[3] = htobe32(md5.state[3]); 2883 1.1 hsuenaga iv_out[4] = 0; 2884 1.1 hsuenaga break; 2885 1.1 hsuenaga default: 2886 1.1 hsuenaga break; 2887 1.1 hsuenaga } 2888 1.1 hsuenaga 2889 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 2890 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_HASH_IV) { 2891 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV, 2892 1.1 hsuenaga "%s: HMAC IV-IN\n", __func__); 2893 1.1 hsuenaga mvxpsec_dump_data(__func__, (uint8_t *)iv_in, 20); 2894 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV, 2895 1.1 hsuenaga "%s: HMAC IV-OUT\n", __func__); 2896 1.1 hsuenaga mvxpsec_dump_data(__func__, (uint8_t *)iv_out, 20); 2897 1.1 hsuenaga } 2898 1.1 hsuenaga #endif 2899 1.1 hsuenaga 2900 1.1 hsuenaga return 0; 2901 1.1 hsuenaga #undef HMAC_IPAD 2902 1.1 hsuenaga #undef HMAC_OPAD 2903 1.1 hsuenaga } 2904 1.1 hsuenaga 2905 1.1 hsuenaga /* 2906 1.1 hsuenaga * AES Support routine 2907 1.1 hsuenaga */ 2908 1.1 hsuenaga static uint8_t AES_SBOX[256] = { 2909 1.1 hsuenaga 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 2910 1.1 hsuenaga 171, 118, 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, 2911 1.1 hsuenaga 156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 2912 1.1 hsuenaga 229, 241, 113, 216, 49, 21, 4, 199, 35, 195, 24, 150, 5, 154, 2913 1.1 hsuenaga 7, 18, 128, 226, 235, 39, 178, 117, 9, 131, 44, 26, 27, 110, 2914 1.1 hsuenaga 90, 160, 82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0, 237, 2915 1.1 hsuenaga 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208, 239, 2916 1.12 riastrad 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, 2917 1.1 hsuenaga 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 2918 1.1 hsuenaga 243, 210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 2919 1.1 hsuenaga 100, 93, 25, 115, 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 2920 1.1 hsuenaga 184, 20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92, 2921 1.1 hsuenaga 194, 211, 172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213, 2922 1.1 hsuenaga 78, 169, 108, 86, 244, 234, 101, 122, 174, 8, 186, 120, 37, 46, 2923 1.1 hsuenaga 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, 112, 62, 2924 1.1 hsuenaga 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, 2925 1.1 hsuenaga 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 2926 1.1 hsuenaga 40, 223, 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 2927 1.1 hsuenaga 176, 84, 187, 22 2928 1.1 hsuenaga }; 2929 1.1 hsuenaga 2930 1.12 riastrad static uint32_t AES_RCON[30] = { 2931 1.1 hsuenaga 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 2932 1.1 hsuenaga 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 2933 1.1 hsuenaga 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91 2934 1.1 hsuenaga }; 2935 1.1 hsuenaga 2936 1.1 hsuenaga STATIC int 2937 1.1 hsuenaga mv_aes_ksched(uint8_t k[4][MAXKC], int keyBits, 2938 1.12 riastrad uint8_t W[MAXROUNDS+1][4][MAXBC]) 2939 1.1 hsuenaga { 2940 1.1 hsuenaga int KC, BC, ROUNDS; 2941 1.1 hsuenaga int i, j, t, rconpointer = 0; 2942 1.12 riastrad uint8_t tk[4][MAXKC]; 2943 1.1 hsuenaga 2944 1.1 hsuenaga switch (keyBits) { 2945 1.1 hsuenaga case 128: 2946 1.1 hsuenaga ROUNDS = 10; 2947 1.1 hsuenaga KC = 4; 2948 1.1 hsuenaga break; 2949 1.1 hsuenaga case 192: 2950 1.1 hsuenaga ROUNDS = 12; 2951 1.1 hsuenaga KC = 6; 2952 1.1 hsuenaga break; 2953 1.1 hsuenaga case 256: 2954 1.1 hsuenaga ROUNDS = 14; 2955 1.1 hsuenaga KC = 8; 2956 1.1 hsuenaga break; 2957 1.1 hsuenaga default: 2958 1.1 hsuenaga return (-1); 2959 1.1 hsuenaga } 2960 1.1 hsuenaga BC = 4; /* 128 bits */ 2961 1.1 hsuenaga 2962 1.1 hsuenaga for(j = 0; j < KC; j++) 2963 1.1 hsuenaga for(i = 0; i < 4; i++) 2964 1.1 hsuenaga tk[i][j] = k[i][j]; 2965 1.1 hsuenaga t = 0; 2966 1.1 hsuenaga 2967 1.1 hsuenaga /* copy values into round key array */ 2968 1.1 hsuenaga for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++) 2969 1.1 hsuenaga for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j]; 2970 1.12 riastrad 2971 1.1 hsuenaga while (t < (ROUNDS+1)*BC) { /* while not enough round key material calculated */ 2972 1.1 hsuenaga /* calculate new values */ 2973 1.1 hsuenaga for(i = 0; i < 4; i++) 2974 1.1 hsuenaga tk[i][0] ^= AES_SBOX[tk[(i+1)%4][KC-1]]; 2975 1.1 hsuenaga tk[0][0] ^= AES_RCON[rconpointer++]; 2976 1.1 hsuenaga 2977 1.1 hsuenaga if (KC != 8) 2978 1.1 hsuenaga for(j = 1; j < KC; j++) 2979 1.1 hsuenaga for(i = 0; i < 4; i++) 2980 1.1 hsuenaga tk[i][j] ^= tk[i][j-1]; 2981 1.1 hsuenaga else { 2982 1.1 hsuenaga for(j = 1; j < KC/2; j++) 2983 1.1 hsuenaga for(i = 0; i < 4; i++) 2984 1.1 hsuenaga tk[i][j] ^= tk[i][j-1]; 2985 1.1 hsuenaga for(i = 0; i < 4; i++) 2986 1.1 hsuenaga tk[i][KC/2] ^= AES_SBOX[tk[i][KC/2 - 1]]; 2987 1.1 hsuenaga for(j = KC/2 + 1; j < KC; j++) 2988 1.1 hsuenaga for(i = 0; i < 4; i++) 2989 1.1 hsuenaga tk[i][j] ^= tk[i][j-1]; 2990 1.1 hsuenaga } 2991 1.1 hsuenaga /* copy values into round key array */ 2992 1.1 hsuenaga for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++) 2993 1.1 hsuenaga for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j]; 2994 1.12 riastrad } 2995 1.1 hsuenaga 2996 1.1 hsuenaga return 0; 2997 1.1 hsuenaga } 2998 1.12 riastrad 2999 1.1 hsuenaga STATIC int 3000 1.1 hsuenaga mv_aes_deckey(uint8_t *expandedKey, uint8_t *keyMaterial, int keyLen) 3001 1.1 hsuenaga { 3002 1.1 hsuenaga uint8_t W[MAXROUNDS+1][4][MAXBC]; 3003 1.1 hsuenaga uint8_t k[4][MAXKC]; 3004 1.1 hsuenaga uint8_t j; 3005 1.1 hsuenaga int i, rounds, KC; 3006 1.1 hsuenaga 3007 1.1 hsuenaga if (expandedKey == NULL) 3008 1.1 hsuenaga return -1; 3009 1.1 hsuenaga 3010 1.12 riastrad if (!((keyLen == 128) || (keyLen == 192) || (keyLen == 256))) 3011 1.1 hsuenaga return -1; 3012 1.1 hsuenaga 3013 1.12 riastrad if (keyMaterial == NULL) 3014 1.1 hsuenaga return -1; 3015 1.1 hsuenaga 3016 1.12 riastrad /* initialize key schedule: */ 3017 1.1 hsuenaga for (i=0; i<keyLen/8; i++) { 3018 1.1 hsuenaga j = keyMaterial[i]; 3019 1.12 riastrad k[i % 4][i / 4] = j; 3020 1.1 hsuenaga } 3021 1.1 hsuenaga 3022 1.1 hsuenaga mv_aes_ksched(k, keyLen, W); 3023 1.1 hsuenaga switch (keyLen) { 3024 1.12 riastrad case 128: 3025 1.1 hsuenaga rounds = 10; 3026 1.12 riastrad KC = 4; 3027 1.1 hsuenaga break; 3028 1.12 riastrad case 192: 3029 1.1 hsuenaga rounds = 12; 3030 1.12 riastrad KC = 6; 3031 1.1 hsuenaga break; 3032 1.12 riastrad case 256: 3033 1.1 hsuenaga rounds = 14; 3034 1.12 riastrad KC = 8; 3035 1.1 hsuenaga break; 3036 1.1 hsuenaga default: 3037 1.1 hsuenaga return -1; 3038 1.1 hsuenaga } 3039 1.1 hsuenaga 3040 1.1 hsuenaga for(i=0; i<MAXBC; i++) 3041 1.1 hsuenaga for(j=0; j<4; j++) 3042 1.1 hsuenaga expandedKey[i*4+j] = W[rounds][j][i]; 3043 1.1 hsuenaga for(; i<KC; i++) 3044 1.1 hsuenaga for(j=0; j<4; j++) 3045 1.1 hsuenaga expandedKey[i*4+j] = W[rounds-1][j][i+MAXBC-KC]; 3046 1.1 hsuenaga 3047 1.1 hsuenaga return 0; 3048 1.1 hsuenaga } 3049 1.1 hsuenaga 3050 1.1 hsuenaga /* 3051 1.1 hsuenaga * Clear cipher/mac operation state 3052 1.1 hsuenaga */ 3053 1.1 hsuenaga INLINE void 3054 1.1 hsuenaga mvxpsec_packet_reset_op(struct mvxpsec_packet *mv_p) 3055 1.1 hsuenaga { 3056 1.1 hsuenaga mv_p->pkt_header.desc.acc_config = 0; 3057 1.1 hsuenaga mv_p->enc_off = mv_p->enc_ivoff = mv_p->enc_len = 0; 3058 1.1 hsuenaga mv_p->mac_off = mv_p->mac_dst = mv_p->mac_len = 0; 3059 1.1 hsuenaga } 3060 1.1 hsuenaga 3061 1.1 hsuenaga /* 3062 1.1 hsuenaga * update MVXPSEC operation order 3063 1.1 hsuenaga */ 3064 1.1 hsuenaga INLINE void 3065 1.1 hsuenaga mvxpsec_packet_update_op_order(struct mvxpsec_packet *mv_p, int op) 3066 1.1 hsuenaga { 3067 1.1 hsuenaga struct mvxpsec_acc_descriptor *acc_desc = &mv_p->pkt_header.desc; 3068 1.1 hsuenaga uint32_t cur_op = acc_desc->acc_config & MV_ACC_CRYPTO_OP_MASK; 3069 1.1 hsuenaga 3070 1.1 hsuenaga KASSERT(op == MV_ACC_CRYPTO_OP_MAC || op == MV_ACC_CRYPTO_OP_ENC); 3071 1.1 hsuenaga KASSERT((op & MV_ACC_CRYPTO_OP_MASK) == op); 3072 1.1 hsuenaga 3073 1.1 hsuenaga if (cur_op == 0) 3074 1.1 hsuenaga acc_desc->acc_config |= op; 3075 1.1 hsuenaga else if (cur_op == MV_ACC_CRYPTO_OP_MAC && op == MV_ACC_CRYPTO_OP_ENC) { 3076 1.1 hsuenaga acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK; 3077 1.1 hsuenaga acc_desc->acc_config |= MV_ACC_CRYPTO_OP_MACENC; 3078 1.1 hsuenaga /* MAC then ENC (= decryption) */ 3079 1.1 hsuenaga } 3080 1.1 hsuenaga else if (cur_op == MV_ACC_CRYPTO_OP_ENC && op == MV_ACC_CRYPTO_OP_MAC) { 3081 1.1 hsuenaga acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK; 3082 1.1 hsuenaga acc_desc->acc_config |= MV_ACC_CRYPTO_OP_ENCMAC; 3083 1.1 hsuenaga /* ENC then MAC (= encryption) */ 3084 1.1 hsuenaga } 3085 1.1 hsuenaga else { 3086 1.1 hsuenaga log(LOG_ERR, "%s: multiple %s algorithm is not supported.\n", 3087 1.1 hsuenaga __func__, 3088 1.1 hsuenaga (op == MV_ACC_CRYPTO_OP_ENC) ? "encryption" : "authentication"); 3089 1.1 hsuenaga } 3090 1.1 hsuenaga } 3091 1.1 hsuenaga 3092 1.1 hsuenaga /* 3093 1.1 hsuenaga * Parameter Conversions 3094 1.1 hsuenaga */ 3095 1.1 hsuenaga INLINE uint32_t 3096 1.1 hsuenaga mvxpsec_alg2acc(uint32_t alg) 3097 1.1 hsuenaga { 3098 1.1 hsuenaga uint32_t reg; 3099 1.1 hsuenaga 3100 1.1 hsuenaga switch (alg) { 3101 1.1 hsuenaga case CRYPTO_DES_CBC: 3102 1.1 hsuenaga reg = MV_ACC_CRYPTO_ENC_DES; 3103 1.1 hsuenaga reg |= MV_ACC_CRYPTO_CBC; 3104 1.1 hsuenaga break; 3105 1.1 hsuenaga case CRYPTO_3DES_CBC: 3106 1.1 hsuenaga reg = MV_ACC_CRYPTO_ENC_3DES; 3107 1.1 hsuenaga reg |= MV_ACC_CRYPTO_3DES_EDE; 3108 1.1 hsuenaga reg |= MV_ACC_CRYPTO_CBC; 3109 1.1 hsuenaga break; 3110 1.1 hsuenaga case CRYPTO_AES_CBC: 3111 1.1 hsuenaga reg = MV_ACC_CRYPTO_ENC_AES; 3112 1.1 hsuenaga reg |= MV_ACC_CRYPTO_CBC; 3113 1.1 hsuenaga break; 3114 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96: 3115 1.1 hsuenaga reg = MV_ACC_CRYPTO_MAC_HMAC_SHA1; 3116 1.1 hsuenaga reg |= MV_ACC_CRYPTO_MAC_96; 3117 1.1 hsuenaga break; 3118 1.1 hsuenaga case CRYPTO_MD5_HMAC_96: 3119 1.1 hsuenaga reg = MV_ACC_CRYPTO_MAC_HMAC_MD5; 3120 1.1 hsuenaga reg |= MV_ACC_CRYPTO_MAC_96; 3121 1.1 hsuenaga break; 3122 1.1 hsuenaga default: 3123 1.1 hsuenaga reg = 0; 3124 1.1 hsuenaga break; 3125 1.1 hsuenaga } 3126 1.1 hsuenaga 3127 1.1 hsuenaga return reg; 3128 1.1 hsuenaga } 3129 1.1 hsuenaga 3130 1.1 hsuenaga INLINE uint32_t 3131 1.1 hsuenaga mvxpsec_aesklen(int klen) 3132 1.1 hsuenaga { 3133 1.1 hsuenaga if (klen < 128) 3134 1.1 hsuenaga return 0; 3135 1.1 hsuenaga else if (klen < 192) 3136 1.1 hsuenaga return MV_ACC_CRYPTO_AES_KLEN_128; 3137 1.1 hsuenaga else if (klen < 256) 3138 1.1 hsuenaga return MV_ACC_CRYPTO_AES_KLEN_192; 3139 1.1 hsuenaga else 3140 1.1 hsuenaga return MV_ACC_CRYPTO_AES_KLEN_256; 3141 1.1 hsuenaga 3142 1.1 hsuenaga return 0; 3143 1.1 hsuenaga } 3144 1.1 hsuenaga 3145 1.1 hsuenaga /* 3146 1.1 hsuenaga * String Conversions 3147 1.1 hsuenaga */ 3148 1.1 hsuenaga STATIC const char * 3149 1.1 hsuenaga s_errreg(uint32_t v) 3150 1.1 hsuenaga { 3151 1.1 hsuenaga static char buf[80]; 3152 1.1 hsuenaga 3153 1.1 hsuenaga snprintf(buf, sizeof(buf), 3154 1.1 hsuenaga "%sMiss %sDoubleHit %sBothHit %sDataError", 3155 1.1 hsuenaga (v & MV_TDMA_ERRC_MISS) ? "+" : "-", 3156 1.1 hsuenaga (v & MV_TDMA_ERRC_DHIT) ? "+" : "-", 3157 1.1 hsuenaga (v & MV_TDMA_ERRC_BHIT) ? "+" : "-", 3158 1.1 hsuenaga (v & MV_TDMA_ERRC_DERR) ? "+" : "-"); 3159 1.1 hsuenaga 3160 1.1 hsuenaga return (const char *)buf; 3161 1.1 hsuenaga } 3162 1.1 hsuenaga 3163 1.1 hsuenaga STATIC const char * 3164 1.1 hsuenaga s_winreg(uint32_t v) 3165 1.1 hsuenaga { 3166 1.1 hsuenaga static char buf[80]; 3167 1.12 riastrad 3168 1.1 hsuenaga snprintf(buf, sizeof(buf), 3169 1.1 hsuenaga "%s TGT 0x%x ATTR 0x%02x size %u(0x%04x)[64KB]", 3170 1.1 hsuenaga (v & MV_TDMA_ATTR_ENABLE) ? "EN" : "DIS", 3171 1.1 hsuenaga MV_TDMA_ATTR_GET_TARGET(v), MV_TDMA_ATTR_GET_ATTR(v), 3172 1.1 hsuenaga MV_TDMA_ATTR_GET_SIZE(v), MV_TDMA_ATTR_GET_SIZE(v)); 3173 1.1 hsuenaga 3174 1.1 hsuenaga return (const char *)buf; 3175 1.1 hsuenaga } 3176 1.1 hsuenaga 3177 1.1 hsuenaga STATIC const char * 3178 1.1 hsuenaga s_ctrlreg(uint32_t reg) 3179 1.1 hsuenaga { 3180 1.1 hsuenaga static char buf[80]; 3181 1.12 riastrad 3182 1.1 hsuenaga snprintf(buf, sizeof(buf), 3183 1.1 hsuenaga "%s: %sFETCH DBURST-%u SBURST-%u %sOUTS %sCHAIN %sBSWAP %sACT", 3184 1.1 hsuenaga (reg & MV_TDMA_CONTROL_ENABLE) ? "ENABLE" : "DISABLE", 3185 1.1 hsuenaga (reg & MV_TDMA_CONTROL_FETCH) ? "+" : "-", 3186 1.1 hsuenaga MV_TDMA_CONTROL_GET_DST_BURST(reg), 3187 1.1 hsuenaga MV_TDMA_CONTROL_GET_SRC_BURST(reg), 3188 1.1 hsuenaga (reg & MV_TDMA_CONTROL_OUTS_EN) ? "+" : "-", 3189 1.1 hsuenaga (reg & MV_TDMA_CONTROL_CHAIN_DIS) ? "-" : "+", 3190 1.1 hsuenaga (reg & MV_TDMA_CONTROL_BSWAP_DIS) ? "-" : "+", 3191 1.1 hsuenaga (reg & MV_TDMA_CONTROL_ACT) ? "+" : "-"); 3192 1.1 hsuenaga 3193 1.1 hsuenaga return (const char *)buf; 3194 1.1 hsuenaga } 3195 1.1 hsuenaga 3196 1.1 hsuenaga _STATIC const char * 3197 1.1 hsuenaga s_xpsecintr(uint32_t v) 3198 1.1 hsuenaga { 3199 1.1 hsuenaga static char buf[160]; 3200 1.1 hsuenaga 3201 1.1 hsuenaga snprintf(buf, sizeof(buf), 3202 1.1 hsuenaga "%sAuth %sDES %sAES-ENC %sAES-DEC %sENC %sSA %sAccAndTDMA " 3203 1.1 hsuenaga "%sTDMAComp %sTDMAOwn %sAccAndTDMA_Cont", 3204 1.1 hsuenaga (v & MVXPSEC_INT_AUTH) ? "+" : "-", 3205 1.1 hsuenaga (v & MVXPSEC_INT_DES) ? "+" : "-", 3206 1.1 hsuenaga (v & MVXPSEC_INT_AES_ENC) ? "+" : "-", 3207 1.1 hsuenaga (v & MVXPSEC_INT_AES_DEC) ? "+" : "-", 3208 1.1 hsuenaga (v & MVXPSEC_INT_ENC) ? "+" : "-", 3209 1.1 hsuenaga (v & MVXPSEC_INT_SA) ? "+" : "-", 3210 1.1 hsuenaga (v & MVXPSEC_INT_ACCTDMA) ? "+" : "-", 3211 1.1 hsuenaga (v & MVXPSEC_INT_TDMA_COMP) ? "+" : "-", 3212 1.1 hsuenaga (v & MVXPSEC_INT_TDMA_OWN) ? "+" : "-", 3213 1.1 hsuenaga (v & MVXPSEC_INT_ACCTDMA_CONT) ? "+" : "-"); 3214 1.1 hsuenaga 3215 1.1 hsuenaga return (const char *)buf; 3216 1.1 hsuenaga } 3217 1.1 hsuenaga 3218 1.1 hsuenaga STATIC const char * 3219 1.1 hsuenaga s_ctlalg(uint32_t alg) 3220 1.1 hsuenaga { 3221 1.1 hsuenaga switch (alg) { 3222 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96: 3223 1.1 hsuenaga return "HMAC-SHA1-96"; 3224 1.1 hsuenaga case CRYPTO_SHA1_HMAC: 3225 1.1 hsuenaga return "HMAC-SHA1"; 3226 1.1 hsuenaga case CRYPTO_SHA1: 3227 1.1 hsuenaga return "SHA1"; 3228 1.1 hsuenaga case CRYPTO_MD5_HMAC_96: 3229 1.1 hsuenaga return "HMAC-MD5-96"; 3230 1.1 hsuenaga case CRYPTO_MD5_HMAC: 3231 1.1 hsuenaga return "HMAC-MD5"; 3232 1.1 hsuenaga case CRYPTO_MD5: 3233 1.1 hsuenaga return "MD5"; 3234 1.1 hsuenaga case CRYPTO_DES_CBC: 3235 1.1 hsuenaga return "DES-CBC"; 3236 1.1 hsuenaga case CRYPTO_3DES_CBC: 3237 1.1 hsuenaga return "3DES-CBC"; 3238 1.1 hsuenaga case CRYPTO_AES_CBC: 3239 1.1 hsuenaga return "AES-CBC"; 3240 1.1 hsuenaga default: 3241 1.1 hsuenaga break; 3242 1.1 hsuenaga } 3243 1.1 hsuenaga 3244 1.1 hsuenaga return "Unknown"; 3245 1.1 hsuenaga } 3246 1.1 hsuenaga 3247 1.1 hsuenaga STATIC const char * 3248 1.1 hsuenaga s_xpsec_op(uint32_t reg) 3249 1.1 hsuenaga { 3250 1.1 hsuenaga reg &= MV_ACC_CRYPTO_OP_MASK; 3251 1.1 hsuenaga switch (reg) { 3252 1.1 hsuenaga case MV_ACC_CRYPTO_OP_ENC: 3253 1.1 hsuenaga return "ENC"; 3254 1.1 hsuenaga case MV_ACC_CRYPTO_OP_MAC: 3255 1.1 hsuenaga return "MAC"; 3256 1.1 hsuenaga case MV_ACC_CRYPTO_OP_ENCMAC: 3257 1.1 hsuenaga return "ENC-MAC"; 3258 1.1 hsuenaga case MV_ACC_CRYPTO_OP_MACENC: 3259 1.1 hsuenaga return "MAC-ENC"; 3260 1.1 hsuenaga default: 3261 1.1 hsuenaga break; 3262 1.1 hsuenaga } 3263 1.12 riastrad 3264 1.1 hsuenaga return "Unknown"; 3265 1.1 hsuenaga } 3266 1.1 hsuenaga 3267 1.1 hsuenaga STATIC const char * 3268 1.1 hsuenaga s_xpsec_enc(uint32_t alg) 3269 1.1 hsuenaga { 3270 1.1 hsuenaga alg <<= MV_ACC_CRYPTO_ENC_SHIFT; 3271 1.1 hsuenaga switch (alg) { 3272 1.1 hsuenaga case MV_ACC_CRYPTO_ENC_DES: 3273 1.1 hsuenaga return "DES"; 3274 1.1 hsuenaga case MV_ACC_CRYPTO_ENC_3DES: 3275 1.1 hsuenaga return "3DES"; 3276 1.1 hsuenaga case MV_ACC_CRYPTO_ENC_AES: 3277 1.1 hsuenaga return "AES"; 3278 1.1 hsuenaga default: 3279 1.1 hsuenaga break; 3280 1.1 hsuenaga } 3281 1.1 hsuenaga 3282 1.1 hsuenaga return "Unknown"; 3283 1.1 hsuenaga } 3284 1.1 hsuenaga 3285 1.1 hsuenaga STATIC const char * 3286 1.1 hsuenaga s_xpsec_mac(uint32_t alg) 3287 1.1 hsuenaga { 3288 1.1 hsuenaga alg <<= MV_ACC_CRYPTO_MAC_SHIFT; 3289 1.1 hsuenaga switch (alg) { 3290 1.1 hsuenaga case MV_ACC_CRYPTO_MAC_NONE: 3291 1.1 hsuenaga return "Disabled"; 3292 1.1 hsuenaga case MV_ACC_CRYPTO_MAC_MD5: 3293 1.1 hsuenaga return "MD5"; 3294 1.1 hsuenaga case MV_ACC_CRYPTO_MAC_SHA1: 3295 1.1 hsuenaga return "SHA1"; 3296 1.1 hsuenaga case MV_ACC_CRYPTO_MAC_HMAC_MD5: 3297 1.1 hsuenaga return "HMAC-MD5"; 3298 1.1 hsuenaga case MV_ACC_CRYPTO_MAC_HMAC_SHA1: 3299 1.1 hsuenaga return "HMAC-SHA1"; 3300 1.1 hsuenaga default: 3301 1.1 hsuenaga break; 3302 1.1 hsuenaga } 3303 1.1 hsuenaga 3304 1.1 hsuenaga return "Unknown"; 3305 1.1 hsuenaga } 3306 1.1 hsuenaga 3307 1.1 hsuenaga STATIC const char * 3308 1.1 hsuenaga s_xpsec_frag(uint32_t frag) 3309 1.1 hsuenaga { 3310 1.1 hsuenaga frag <<= MV_ACC_CRYPTO_FRAG_SHIFT; 3311 1.1 hsuenaga switch (frag) { 3312 1.1 hsuenaga case MV_ACC_CRYPTO_NOFRAG: 3313 1.1 hsuenaga return "NoFragment"; 3314 1.1 hsuenaga case MV_ACC_CRYPTO_FRAG_FIRST: 3315 1.1 hsuenaga return "FirstFragment"; 3316 1.1 hsuenaga case MV_ACC_CRYPTO_FRAG_MID: 3317 1.1 hsuenaga return "MiddleFragment"; 3318 1.1 hsuenaga case MV_ACC_CRYPTO_FRAG_LAST: 3319 1.1 hsuenaga return "LastFragment"; 3320 1.1 hsuenaga default: 3321 1.1 hsuenaga break; 3322 1.1 hsuenaga } 3323 1.1 hsuenaga 3324 1.1 hsuenaga return "Unknown"; 3325 1.1 hsuenaga } 3326 1.1 hsuenaga 3327 1.1 hsuenaga #ifdef MVXPSEC_DEBUG 3328 1.1 hsuenaga void 3329 1.1 hsuenaga mvxpsec_dump_reg(struct mvxpsec_softc *sc) 3330 1.1 hsuenaga { 3331 1.1 hsuenaga uint32_t reg; 3332 1.1 hsuenaga int i; 3333 1.1 hsuenaga 3334 1.1 hsuenaga if ((mvxpsec_debug & MVXPSEC_DEBUG_DESC) == 0) 3335 1.1 hsuenaga return; 3336 1.1 hsuenaga 3337 1.1 hsuenaga printf("--- Interrupt Registers ---\n"); 3338 1.1 hsuenaga reg = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE); 3339 1.1 hsuenaga printf("MVXPSEC INT CAUSE: 0x%08x\n", reg); 3340 1.1 hsuenaga printf("MVXPSEC INT CAUSE: %s\n", s_xpsecintr(reg)); 3341 1.1 hsuenaga reg = MVXPSEC_READ(sc, MVXPSEC_INT_MASK); 3342 1.1 hsuenaga printf("MVXPSEC INT MASK: 0x%08x\n", reg); 3343 1.1 hsuenaga printf("MVXPSEC INT MASKE: %s\n", s_xpsecintr(reg)); 3344 1.1 hsuenaga 3345 1.1 hsuenaga printf("--- DMA Configuration Registers ---\n"); 3346 1.1 hsuenaga for (i = 0; i < MV_TDMA_NWINDOW; i++) { 3347 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_BAR(i)); 3348 1.1 hsuenaga printf("TDMA BAR%d: 0x%08x\n", i, reg); 3349 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_ATTR(i)); 3350 1.1 hsuenaga printf("TDMA ATTR%d: 0x%08x\n", i, reg); 3351 1.1 hsuenaga printf(" -> %s\n", s_winreg(reg)); 3352 1.1 hsuenaga } 3353 1.1 hsuenaga 3354 1.1 hsuenaga printf("--- DMA Control Registers ---\n"); 3355 1.1 hsuenaga 3356 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL); 3357 1.1 hsuenaga printf("TDMA CONTROL: 0x%08x\n", reg); 3358 1.1 hsuenaga printf(" -> %s\n", s_ctrlreg(reg)); 3359 1.1 hsuenaga 3360 1.1 hsuenaga printf("--- DMA Current Command Descriptors ---\n"); 3361 1.1 hsuenaga 3362 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE); 3363 1.1 hsuenaga printf("TDMA ERR CAUSE: 0x%08x\n", reg); 3364 1.1 hsuenaga 3365 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_ERR_MASK); 3366 1.1 hsuenaga printf("TDMA ERR MASK: 0x%08x\n", reg); 3367 1.1 hsuenaga 3368 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_CNT); 3369 1.1 hsuenaga printf("TDMA DATA OWNER: %s\n", 3370 1.1 hsuenaga (reg & MV_TDMA_CNT_OWN) ? "DMAC" : "CPU"); 3371 1.1 hsuenaga printf("TDMA DATA COUNT: %d(0x%x)\n", 3372 1.1 hsuenaga (reg & ~MV_TDMA_CNT_OWN), (reg & ~MV_TDMA_CNT_OWN)); 3373 1.1 hsuenaga 3374 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_SRC); 3375 1.1 hsuenaga printf("TDMA DATA SRC: 0x%08x\n", reg); 3376 1.1 hsuenaga 3377 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_DST); 3378 1.1 hsuenaga printf("TDMA DATA DST: 0x%08x\n", reg); 3379 1.1 hsuenaga 3380 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_NXT); 3381 1.1 hsuenaga printf("TDMA DATA NXT: 0x%08x\n", reg); 3382 1.1 hsuenaga 3383 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_CUR); 3384 1.1 hsuenaga printf("TDMA DATA CUR: 0x%08x\n", reg); 3385 1.1 hsuenaga 3386 1.1 hsuenaga printf("--- ACC Command Register ---\n"); 3387 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_ACC_COMMAND); 3388 1.1 hsuenaga printf("ACC COMMAND: 0x%08x\n", reg); 3389 1.1 hsuenaga printf("ACC: %sACT %sSTOP\n", 3390 1.1 hsuenaga (reg & MV_ACC_COMMAND_ACT) ? "+" : "-", 3391 1.1 hsuenaga (reg & MV_ACC_COMMAND_STOP) ? "+" : "-"); 3392 1.1 hsuenaga 3393 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_ACC_CONFIG); 3394 1.1 hsuenaga printf("ACC CONFIG: 0x%08x\n", reg); 3395 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_ACC_DESC); 3396 1.1 hsuenaga printf("ACC DESC: 0x%08x\n", reg); 3397 1.1 hsuenaga 3398 1.1 hsuenaga printf("--- DES Key Register ---\n"); 3399 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0L); 3400 1.1 hsuenaga printf("DES KEY0 Low: 0x%08x\n", reg); 3401 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0H); 3402 1.1 hsuenaga printf("DES KEY0 High: 0x%08x\n", reg); 3403 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1L); 3404 1.1 hsuenaga printf("DES KEY1 Low: 0x%08x\n", reg); 3405 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1H); 3406 1.1 hsuenaga printf("DES KEY1 High: 0x%08x\n", reg); 3407 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2L); 3408 1.1 hsuenaga printf("DES KEY2 Low: 0x%08x\n", reg); 3409 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2H); 3410 1.1 hsuenaga printf("DES KEY2 High: 0x%08x\n", reg); 3411 1.1 hsuenaga 3412 1.1 hsuenaga printf("--- AES Key Register ---\n"); 3413 1.1 hsuenaga for (i = 0; i < 8; i++) { 3414 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_AES_EKEY(i)); 3415 1.1 hsuenaga printf("AES ENC KEY COL%d: %08x\n", i, reg); 3416 1.1 hsuenaga } 3417 1.1 hsuenaga for (i = 0; i < 8; i++) { 3418 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_AES_DKEY(i)); 3419 1.1 hsuenaga printf("AES DEC KEY COL%d: %08x\n", i, reg); 3420 1.1 hsuenaga } 3421 1.1 hsuenaga 3422 1.1 hsuenaga return; 3423 1.1 hsuenaga } 3424 1.1 hsuenaga 3425 1.1 hsuenaga STATIC void 3426 1.1 hsuenaga mvxpsec_dump_sram(const char *name, struct mvxpsec_softc *sc, size_t len) 3427 1.1 hsuenaga { 3428 1.1 hsuenaga uint32_t reg; 3429 1.1 hsuenaga 3430 1.1 hsuenaga if (sc->sc_sram_va == NULL) 3431 1.1 hsuenaga return; 3432 1.1 hsuenaga 3433 1.1 hsuenaga if (len == 0) { 3434 1.1 hsuenaga printf("\n%s NO DATA(len=0)\n", name); 3435 1.1 hsuenaga return; 3436 1.1 hsuenaga } 3437 1.1 hsuenaga else if (len > MV_ACC_SRAM_SIZE) 3438 1.1 hsuenaga len = MV_ACC_SRAM_SIZE; 3439 1.1 hsuenaga 3440 1.1 hsuenaga mutex_enter(&sc->sc_dma_mtx); 3441 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL); 3442 1.1 hsuenaga if (reg & MV_TDMA_CONTROL_ACT) { 3443 1.1 hsuenaga printf("TDMA is active, cannot access SRAM\n"); 3444 1.1 hsuenaga mutex_exit(&sc->sc_dma_mtx); 3445 1.1 hsuenaga return; 3446 1.1 hsuenaga } 3447 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_ACC_COMMAND); 3448 1.1 hsuenaga if (reg & MV_ACC_COMMAND_ACT) { 3449 1.1 hsuenaga printf("SA is active, cannot access SRAM\n"); 3450 1.1 hsuenaga mutex_exit(&sc->sc_dma_mtx); 3451 1.1 hsuenaga return; 3452 1.1 hsuenaga } 3453 1.1 hsuenaga 3454 1.1 hsuenaga printf("%s: dump SRAM, %zu bytes\n", name, len); 3455 1.1 hsuenaga mvxpsec_dump_data(name, sc->sc_sram_va, len); 3456 1.1 hsuenaga mutex_exit(&sc->sc_dma_mtx); 3457 1.1 hsuenaga return; 3458 1.1 hsuenaga } 3459 1.1 hsuenaga 3460 1.1 hsuenaga 3461 1.1 hsuenaga _STATIC void 3462 1.1 hsuenaga mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *dh) 3463 1.1 hsuenaga { 3464 1.1 hsuenaga struct mvxpsec_descriptor *d = 3465 1.1 hsuenaga (struct mvxpsec_descriptor *)dh->_desc; 3466 1.1 hsuenaga 3467 1.1 hsuenaga printf("--- DMA Command Descriptor ---\n"); 3468 1.1 hsuenaga printf("DESC: VA=%p PA=0x%08x\n", 3469 1.1 hsuenaga d, (uint32_t)dh->phys_addr); 3470 1.1 hsuenaga printf("DESC: WORD0 = 0x%08x\n", d->tdma_word0); 3471 1.1 hsuenaga printf("DESC: SRC = 0x%08x\n", d->tdma_src); 3472 1.1 hsuenaga printf("DESC: DST = 0x%08x\n", d->tdma_dst); 3473 1.1 hsuenaga printf("DESC: NXT = 0x%08x\n", d->tdma_nxt); 3474 1.1 hsuenaga 3475 1.1 hsuenaga return; 3476 1.1 hsuenaga } 3477 1.1 hsuenaga 3478 1.1 hsuenaga STATIC void 3479 1.1 hsuenaga mvxpsec_dump_data(const char *name, void *p, size_t len) 3480 1.1 hsuenaga { 3481 1.1 hsuenaga uint8_t *data = p; 3482 1.1 hsuenaga off_t off; 3483 1.1 hsuenaga 3484 1.1 hsuenaga printf("%s: dump %p, %zu bytes", name, p, len); 3485 1.1 hsuenaga if (p == NULL || len == 0) { 3486 1.1 hsuenaga printf("\n%s: NO DATA\n", name); 3487 1.1 hsuenaga return; 3488 1.1 hsuenaga } 3489 1.1 hsuenaga for (off = 0; off < len; off++) { 3490 1.1 hsuenaga if ((off % 16) == 0) { 3491 1.1 hsuenaga printf("\n%s: 0x%08x:", name, (uint32_t)off); 3492 1.1 hsuenaga } 3493 1.1 hsuenaga if ((off % 4) == 0) { 3494 1.1 hsuenaga printf(" "); 3495 1.1 hsuenaga } 3496 1.1 hsuenaga printf("%02x", data[off]); 3497 1.1 hsuenaga } 3498 1.1 hsuenaga printf("\n"); 3499 1.1 hsuenaga 3500 1.1 hsuenaga return; 3501 1.1 hsuenaga } 3502 1.1 hsuenaga 3503 1.1 hsuenaga _STATIC void 3504 1.1 hsuenaga mvxpsec_dump_packet(const char *name, struct mvxpsec_packet *mv_p) 3505 1.1 hsuenaga { 3506 1.1 hsuenaga struct mvxpsec_softc *sc = mv_p->mv_s->sc; 3507 1.1 hsuenaga 3508 1.1 hsuenaga printf("%s: packet_data:\n", name); 3509 1.1 hsuenaga mvxpsec_dump_packet_data(name, mv_p); 3510 1.1 hsuenaga 3511 1.1 hsuenaga printf("%s: SRAM:\n", name); 3512 1.1 hsuenaga mvxpsec_dump_sram(name, sc, 2000); 3513 1.1 hsuenaga 3514 1.1 hsuenaga printf("%s: packet_descriptor:\n", name); 3515 1.1 hsuenaga mvxpsec_dump_packet_desc(name, mv_p); 3516 1.1 hsuenaga } 3517 1.1 hsuenaga 3518 1.1 hsuenaga _STATIC void 3519 1.1 hsuenaga mvxpsec_dump_packet_data(const char *name, struct mvxpsec_packet *mv_p) 3520 1.1 hsuenaga { 3521 1.1 hsuenaga static char buf[1500]; 3522 1.1 hsuenaga int len; 3523 1.1 hsuenaga 3524 1.1 hsuenaga if (mv_p->data_type == MVXPSEC_DATA_MBUF) { 3525 1.1 hsuenaga struct mbuf *m; 3526 1.1 hsuenaga 3527 1.1 hsuenaga m = mv_p->data.mbuf; 3528 1.1 hsuenaga len = m->m_pkthdr.len; 3529 1.1 hsuenaga if (len > sizeof(buf)) 3530 1.1 hsuenaga len = sizeof(buf); 3531 1.1 hsuenaga m_copydata(m, 0, len, buf); 3532 1.1 hsuenaga } 3533 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_UIO) { 3534 1.1 hsuenaga struct uio *uio; 3535 1.1 hsuenaga 3536 1.1 hsuenaga uio = mv_p->data.uio; 3537 1.1 hsuenaga len = uio->uio_resid; 3538 1.1 hsuenaga if (len > sizeof(buf)) 3539 1.1 hsuenaga len = sizeof(buf); 3540 1.1 hsuenaga cuio_copydata(uio, 0, len, buf); 3541 1.1 hsuenaga } 3542 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_RAW) { 3543 1.1 hsuenaga len = mv_p->data_len; 3544 1.1 hsuenaga if (len > sizeof(buf)) 3545 1.1 hsuenaga len = sizeof(buf); 3546 1.1 hsuenaga memcpy(buf, mv_p->data.raw, len); 3547 1.1 hsuenaga } 3548 1.1 hsuenaga else 3549 1.1 hsuenaga return; 3550 1.1 hsuenaga mvxpsec_dump_data(name, buf, len); 3551 1.1 hsuenaga 3552 1.1 hsuenaga return; 3553 1.1 hsuenaga } 3554 1.1 hsuenaga 3555 1.1 hsuenaga _STATIC void 3556 1.1 hsuenaga mvxpsec_dump_packet_desc(const char *name, struct mvxpsec_packet *mv_p) 3557 1.1 hsuenaga { 3558 1.1 hsuenaga uint32_t *words; 3559 1.1 hsuenaga 3560 1.1 hsuenaga if (mv_p == NULL) 3561 1.1 hsuenaga return; 3562 1.1 hsuenaga 3563 1.1 hsuenaga words = &mv_p->pkt_header.desc.acc_desc_dword0; 3564 1.1 hsuenaga mvxpsec_dump_acc_config(name, words[0]); 3565 1.1 hsuenaga mvxpsec_dump_acc_encdata(name, words[1], words[2]); 3566 1.1 hsuenaga mvxpsec_dump_acc_enclen(name, words[2]); 3567 1.1 hsuenaga mvxpsec_dump_acc_enckey(name, words[3]); 3568 1.1 hsuenaga mvxpsec_dump_acc_enciv(name, words[4]); 3569 1.1 hsuenaga mvxpsec_dump_acc_macsrc(name, words[5]); 3570 1.1 hsuenaga mvxpsec_dump_acc_macdst(name, words[6]); 3571 1.1 hsuenaga mvxpsec_dump_acc_maciv(name, words[7]); 3572 1.1 hsuenaga 3573 1.1 hsuenaga return; 3574 1.1 hsuenaga } 3575 1.1 hsuenaga 3576 1.1 hsuenaga _STATIC void 3577 1.1 hsuenaga mvxpsec_dump_acc_config(const char *name, uint32_t w) 3578 1.1 hsuenaga { 3579 1.1 hsuenaga /* SA: Dword 0 */ 3580 1.1 hsuenaga printf("%s: Dword0=0x%08x\n", name, w); 3581 1.1 hsuenaga printf("%s: OP = %s\n", name, 3582 1.1 hsuenaga s_xpsec_op(MV_ACC_CRYPTO_OP(w))); 3583 1.1 hsuenaga printf("%s: MAC = %s\n", name, 3584 1.1 hsuenaga s_xpsec_mac(MV_ACC_CRYPTO_MAC(w))); 3585 1.1 hsuenaga printf("%s: MAC_LEN = %s\n", name, 3586 1.1 hsuenaga w & MV_ACC_CRYPTO_MAC_96 ? "96-bit" : "full-bit"); 3587 1.1 hsuenaga printf("%s: ENC = %s\n", name, 3588 1.1 hsuenaga s_xpsec_enc(MV_ACC_CRYPTO_ENC(w))); 3589 1.1 hsuenaga printf("%s: DIR = %s\n", name, 3590 1.1 hsuenaga w & MV_ACC_CRYPTO_DECRYPT ? "decryption" : "encryption"); 3591 1.1 hsuenaga printf("%s: CHAIN = %s\n", name, 3592 1.1 hsuenaga w & MV_ACC_CRYPTO_CBC ? "CBC" : "ECB"); 3593 1.1 hsuenaga printf("%s: 3DES = %s\n", name, 3594 1.1 hsuenaga w & MV_ACC_CRYPTO_3DES_EDE ? "EDE" : "EEE"); 3595 1.1 hsuenaga printf("%s: FRAGMENT = %s\n", name, 3596 1.1 hsuenaga s_xpsec_frag(MV_ACC_CRYPTO_FRAG(w))); 3597 1.1 hsuenaga return; 3598 1.1 hsuenaga } 3599 1.1 hsuenaga 3600 1.1 hsuenaga STATIC void 3601 1.1 hsuenaga mvxpsec_dump_acc_encdata(const char *name, uint32_t w, uint32_t w2) 3602 1.1 hsuenaga { 3603 1.1 hsuenaga /* SA: Dword 1 */ 3604 1.1 hsuenaga printf("%s: Dword1=0x%08x\n", name, w); 3605 1.1 hsuenaga printf("%s: ENC SRC = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w)); 3606 1.1 hsuenaga printf("%s: ENC DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w)); 3607 1.1 hsuenaga printf("%s: ENC RANGE = 0x%x - 0x%x\n", name, 3608 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w), 3609 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_1(w2) - 1); 3610 1.1 hsuenaga return; 3611 1.1 hsuenaga } 3612 1.1 hsuenaga 3613 1.1 hsuenaga STATIC void 3614 1.1 hsuenaga mvxpsec_dump_acc_enclen(const char *name, uint32_t w) 3615 1.1 hsuenaga { 3616 1.1 hsuenaga /* SA: Dword 2 */ 3617 1.1 hsuenaga printf("%s: Dword2=0x%08x\n", name, w); 3618 1.1 hsuenaga printf("%s: ENC LEN = %d\n", name, 3619 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w)); 3620 1.1 hsuenaga return; 3621 1.1 hsuenaga } 3622 1.1 hsuenaga 3623 1.1 hsuenaga STATIC void 3624 1.1 hsuenaga mvxpsec_dump_acc_enckey(const char *name, uint32_t w) 3625 1.1 hsuenaga { 3626 1.1 hsuenaga /* SA: Dword 3 */ 3627 1.1 hsuenaga printf("%s: Dword3=0x%08x\n", name, w); 3628 1.1 hsuenaga printf("%s: EKEY = 0x%x\n", name, 3629 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w)); 3630 1.1 hsuenaga return; 3631 1.1 hsuenaga } 3632 1.1 hsuenaga 3633 1.1 hsuenaga STATIC void 3634 1.1 hsuenaga mvxpsec_dump_acc_enciv(const char *name, uint32_t w) 3635 1.1 hsuenaga { 3636 1.1 hsuenaga /* SA: Dword 4 */ 3637 1.1 hsuenaga printf("%s: Dword4=0x%08x\n", name, w); 3638 1.1 hsuenaga printf("%s: EIV = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w)); 3639 1.1 hsuenaga printf("%s: EIV_BUF = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w)); 3640 1.1 hsuenaga return; 3641 1.1 hsuenaga } 3642 1.1 hsuenaga 3643 1.1 hsuenaga STATIC void 3644 1.1 hsuenaga mvxpsec_dump_acc_macsrc(const char *name, uint32_t w) 3645 1.1 hsuenaga { 3646 1.1 hsuenaga /* SA: Dword 5 */ 3647 1.1 hsuenaga printf("%s: Dword5=0x%08x\n", name, w); 3648 1.1 hsuenaga printf("%s: MAC_SRC = 0x%x\n", name, 3649 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w)); 3650 1.1 hsuenaga printf("%s: MAC_TOTAL_LEN = %d\n", name, 3651 1.1 hsuenaga MV_ACC_DESC_GET_VAL_3(w)); 3652 1.1 hsuenaga printf("%s: MAC_RANGE = 0x%0x - 0x%0x\n", name, 3653 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w), 3654 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_3(w) - 1); 3655 1.1 hsuenaga return; 3656 1.1 hsuenaga } 3657 1.1 hsuenaga 3658 1.1 hsuenaga STATIC void 3659 1.1 hsuenaga mvxpsec_dump_acc_macdst(const char *name, uint32_t w) 3660 1.1 hsuenaga { 3661 1.1 hsuenaga /* SA: Dword 6 */ 3662 1.1 hsuenaga printf("%s: Dword6=0x%08x\n", name, w); 3663 1.1 hsuenaga printf("%s: MAC_DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w)); 3664 1.1 hsuenaga printf("%s: MAC_BLOCK_LEN = %d\n", name, 3665 1.1 hsuenaga MV_ACC_DESC_GET_VAL_2(w)); 3666 1.1 hsuenaga return; 3667 1.1 hsuenaga } 3668 1.1 hsuenaga 3669 1.1 hsuenaga STATIC void 3670 1.1 hsuenaga mvxpsec_dump_acc_maciv(const char *name, uint32_t w) 3671 1.1 hsuenaga { 3672 1.1 hsuenaga /* SA: Dword 7 */ 3673 1.1 hsuenaga printf("%s: Dword7=0x%08x\n", name, w); 3674 1.1 hsuenaga printf("%s: MAC_INNER_IV = 0x%x\n", name, 3675 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w)); 3676 1.1 hsuenaga printf("%s: MAC_OUTER_IV = 0x%x\n", name, 3677 1.1 hsuenaga MV_ACC_DESC_GET_VAL_2(w)); 3678 1.1 hsuenaga return; 3679 1.1 hsuenaga } 3680 1.1 hsuenaga #endif 3681