mvxpsec.c revision 1.2 1 1.2 christos /* $NetBSD: mvxpsec.c,v 1.2 2017/11/09 22:22:58 christos Exp $ */
2 1.1 hsuenaga /*
3 1.1 hsuenaga * Copyright (c) 2015 Internet Initiative Japan Inc.
4 1.1 hsuenaga * All rights reserved.
5 1.1 hsuenaga *
6 1.1 hsuenaga * Redistribution and use in source and binary forms, with or without
7 1.1 hsuenaga * modification, are permitted provided that the following conditions
8 1.1 hsuenaga * are met:
9 1.1 hsuenaga * 1. Redistributions of source code must retain the above copyright
10 1.1 hsuenaga * notice, this list of conditions and the following disclaimer.
11 1.1 hsuenaga * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 hsuenaga * notice, this list of conditions and the following disclaimer in the
13 1.1 hsuenaga * documentation and/or other materials provided with the distribution.
14 1.1 hsuenaga *
15 1.1 hsuenaga * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 1.1 hsuenaga * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.1 hsuenaga * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.1 hsuenaga * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 1.1 hsuenaga * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 1.1 hsuenaga * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.1 hsuenaga * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.1 hsuenaga * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 1.1 hsuenaga * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 1.1 hsuenaga * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 1.1 hsuenaga * POSSIBILITY OF SUCH DAMAGE.
26 1.1 hsuenaga */
27 1.1 hsuenaga /*
28 1.1 hsuenaga * Cryptographic Engine and Security Accelerator(MVXPSEC)
29 1.1 hsuenaga */
30 1.1 hsuenaga #include <sys/cdefs.h>
31 1.1 hsuenaga #include <sys/param.h>
32 1.1 hsuenaga #include <sys/types.h>
33 1.1 hsuenaga #include <sys/kernel.h>
34 1.1 hsuenaga #include <sys/queue.h>
35 1.1 hsuenaga #include <sys/conf.h>
36 1.1 hsuenaga #include <sys/proc.h>
37 1.1 hsuenaga #include <sys/bus.h>
38 1.1 hsuenaga #include <sys/evcnt.h>
39 1.1 hsuenaga #include <sys/device.h>
40 1.1 hsuenaga #include <sys/endian.h>
41 1.1 hsuenaga #include <sys/errno.h>
42 1.1 hsuenaga #include <sys/kmem.h>
43 1.1 hsuenaga #include <sys/mbuf.h>
44 1.1 hsuenaga #include <sys/callout.h>
45 1.1 hsuenaga #include <sys/pool.h>
46 1.1 hsuenaga #include <sys/cprng.h>
47 1.1 hsuenaga #include <sys/syslog.h>
48 1.1 hsuenaga #include <sys/mutex.h>
49 1.1 hsuenaga #include <sys/kthread.h>
50 1.1 hsuenaga #include <sys/atomic.h>
51 1.1 hsuenaga #include <sys/sha1.h>
52 1.1 hsuenaga #include <sys/md5.h>
53 1.1 hsuenaga
54 1.1 hsuenaga #include <uvm/uvm_extern.h>
55 1.1 hsuenaga
56 1.1 hsuenaga #include <crypto/rijndael/rijndael.h>
57 1.1 hsuenaga
58 1.1 hsuenaga #include <opencrypto/cryptodev.h>
59 1.1 hsuenaga #include <opencrypto/xform.h>
60 1.1 hsuenaga
61 1.1 hsuenaga #include <net/net_stats.h>
62 1.1 hsuenaga
63 1.1 hsuenaga #include <netinet/in_systm.h>
64 1.1 hsuenaga #include <netinet/in.h>
65 1.1 hsuenaga #include <netinet/ip.h>
66 1.1 hsuenaga #include <netinet/ip6.h>
67 1.1 hsuenaga
68 1.1 hsuenaga #include <netipsec/esp_var.h>
69 1.1 hsuenaga
70 1.1 hsuenaga #include <arm/cpufunc.h>
71 1.1 hsuenaga #include <arm/marvell/mvsocvar.h>
72 1.1 hsuenaga #include <arm/marvell/armadaxpreg.h>
73 1.1 hsuenaga #include <dev/marvell/marvellreg.h>
74 1.1 hsuenaga #include <dev/marvell/marvellvar.h>
75 1.1 hsuenaga #include <dev/marvell/mvxpsecreg.h>
76 1.1 hsuenaga #include <dev/marvell/mvxpsecvar.h>
77 1.1 hsuenaga
78 1.1 hsuenaga #ifdef DEBUG
79 1.1 hsuenaga #define STATIC __attribute__ ((noinline)) extern
80 1.1 hsuenaga #define _STATIC __attribute__ ((noinline)) extern
81 1.1 hsuenaga #define INLINE __attribute__ ((noinline)) extern
82 1.1 hsuenaga #define _INLINE __attribute__ ((noinline)) extern
83 1.1 hsuenaga #else
84 1.1 hsuenaga #define STATIC static
85 1.1 hsuenaga #define _STATIC __attribute__ ((unused)) static
86 1.1 hsuenaga #define INLINE static inline
87 1.1 hsuenaga #define _INLINE __attribute__ ((unused)) static inline
88 1.1 hsuenaga #endif
89 1.1 hsuenaga
90 1.1 hsuenaga /*
91 1.1 hsuenaga * IRQ and SRAM spaces for each of unit
92 1.1 hsuenaga * XXX: move to attach_args
93 1.1 hsuenaga */
94 1.1 hsuenaga struct {
95 1.1 hsuenaga int err_int;
96 1.1 hsuenaga } mvxpsec_config[] = {
97 1.1 hsuenaga { .err_int = ARMADAXP_IRQ_CESA0_ERR, }, /* unit 0 */
98 1.1 hsuenaga { .err_int = ARMADAXP_IRQ_CESA1_ERR, }, /* unit 1 */
99 1.1 hsuenaga };
100 1.1 hsuenaga #define MVXPSEC_ERR_INT(sc) \
101 1.1 hsuenaga mvxpsec_config[device_unit((sc)->sc_dev)].err_int
102 1.1 hsuenaga
103 1.1 hsuenaga /*
104 1.1 hsuenaga * AES
105 1.1 hsuenaga */
106 1.1 hsuenaga #define MAXBC (128/32)
107 1.1 hsuenaga #define MAXKC (256/32)
108 1.1 hsuenaga #define MAXROUNDS 14
109 1.1 hsuenaga STATIC int mv_aes_ksched(uint8_t[4][MAXKC], int,
110 1.1 hsuenaga uint8_t[MAXROUNDS+1][4][MAXBC]);
111 1.1 hsuenaga STATIC int mv_aes_deckey(uint8_t *, uint8_t *, int);
112 1.1 hsuenaga
113 1.1 hsuenaga /*
114 1.1 hsuenaga * device driver autoconf interface
115 1.1 hsuenaga */
116 1.1 hsuenaga STATIC int mvxpsec_match(device_t, cfdata_t, void *);
117 1.1 hsuenaga STATIC void mvxpsec_attach(device_t, device_t, void *);
118 1.1 hsuenaga STATIC void mvxpsec_evcnt_attach(struct mvxpsec_softc *);
119 1.1 hsuenaga
120 1.1 hsuenaga /*
121 1.1 hsuenaga * register setup
122 1.1 hsuenaga */
123 1.1 hsuenaga STATIC int mvxpsec_wininit(struct mvxpsec_softc *, enum marvell_tags *);
124 1.1 hsuenaga
125 1.1 hsuenaga /*
126 1.1 hsuenaga * timer(callout) interface
127 1.1 hsuenaga *
128 1.1 hsuenaga * XXX: callout is not MP safe...
129 1.1 hsuenaga */
130 1.1 hsuenaga STATIC void mvxpsec_timer(void *);
131 1.1 hsuenaga
132 1.1 hsuenaga /*
133 1.1 hsuenaga * interrupt interface
134 1.1 hsuenaga */
135 1.1 hsuenaga STATIC int mvxpsec_intr(void *);
136 1.1 hsuenaga INLINE void mvxpsec_intr_cleanup(struct mvxpsec_softc *);
137 1.1 hsuenaga STATIC int mvxpsec_eintr(void *);
138 1.1 hsuenaga STATIC uint32_t mvxpsec_intr_ack(struct mvxpsec_softc *);
139 1.1 hsuenaga STATIC uint32_t mvxpsec_eintr_ack(struct mvxpsec_softc *);
140 1.1 hsuenaga INLINE void mvxpsec_intr_cnt(struct mvxpsec_softc *, int);
141 1.1 hsuenaga
142 1.1 hsuenaga /*
143 1.1 hsuenaga * memory allocators and VM management
144 1.1 hsuenaga */
145 1.1 hsuenaga STATIC struct mvxpsec_devmem *mvxpsec_alloc_devmem(struct mvxpsec_softc *,
146 1.1 hsuenaga paddr_t, int);
147 1.1 hsuenaga STATIC int mvxpsec_init_sram(struct mvxpsec_softc *);
148 1.1 hsuenaga
149 1.1 hsuenaga /*
150 1.1 hsuenaga * Low-level DMA interface
151 1.1 hsuenaga */
152 1.1 hsuenaga STATIC int mvxpsec_init_dma(struct mvxpsec_softc *,
153 1.1 hsuenaga struct marvell_attach_args *);
154 1.1 hsuenaga INLINE int mvxpsec_dma_wait(struct mvxpsec_softc *);
155 1.1 hsuenaga INLINE int mvxpsec_acc_wait(struct mvxpsec_softc *);
156 1.1 hsuenaga INLINE struct mvxpsec_descriptor_handle *mvxpsec_dma_getdesc(struct mvxpsec_softc *);
157 1.1 hsuenaga _INLINE void mvxpsec_dma_putdesc(struct mvxpsec_softc *, struct mvxpsec_descriptor_handle *);
158 1.1 hsuenaga INLINE void mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *,
159 1.1 hsuenaga uint32_t, uint32_t, uint32_t);
160 1.1 hsuenaga INLINE void mvxpsec_dma_cat(struct mvxpsec_softc *,
161 1.1 hsuenaga struct mvxpsec_descriptor_handle *, struct mvxpsec_descriptor_handle *);
162 1.1 hsuenaga
163 1.1 hsuenaga /*
164 1.1 hsuenaga * High-level DMA interface
165 1.1 hsuenaga */
166 1.1 hsuenaga INLINE int mvxpsec_dma_copy0(struct mvxpsec_softc *,
167 1.1 hsuenaga mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
168 1.1 hsuenaga INLINE int mvxpsec_dma_copy(struct mvxpsec_softc *,
169 1.1 hsuenaga mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
170 1.1 hsuenaga INLINE int mvxpsec_dma_acc_activate(struct mvxpsec_softc *,
171 1.1 hsuenaga mvxpsec_dma_ring *);
172 1.1 hsuenaga INLINE void mvxpsec_dma_finalize(struct mvxpsec_softc *,
173 1.1 hsuenaga mvxpsec_dma_ring *);
174 1.1 hsuenaga INLINE void mvxpsec_dma_free(struct mvxpsec_softc *,
175 1.1 hsuenaga mvxpsec_dma_ring *);
176 1.1 hsuenaga INLINE int mvxpsec_dma_copy_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
177 1.1 hsuenaga INLINE int mvxpsec_dma_sync_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
178 1.1 hsuenaga
179 1.1 hsuenaga /*
180 1.1 hsuenaga * Session management interface (OpenCrypto)
181 1.1 hsuenaga */
182 1.1 hsuenaga #define MVXPSEC_SESSION(sid) ((sid) & 0x0fffffff)
183 1.1 hsuenaga #define MVXPSEC_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
184 1.1 hsuenaga /* pool management */
185 1.1 hsuenaga STATIC int mvxpsec_session_ctor(void *, void *, int);
186 1.1 hsuenaga STATIC void mvxpsec_session_dtor(void *, void *);
187 1.1 hsuenaga STATIC int mvxpsec_packet_ctor(void *, void *, int);
188 1.1 hsuenaga STATIC void mvxpsec_packet_dtor(void *, void *);
189 1.1 hsuenaga
190 1.1 hsuenaga /* session management */
191 1.1 hsuenaga STATIC struct mvxpsec_session *mvxpsec_session_alloc(struct mvxpsec_softc *);
192 1.1 hsuenaga STATIC void mvxpsec_session_dealloc(struct mvxpsec_session *);
193 1.1 hsuenaga INLINE struct mvxpsec_session *mvxpsec_session_lookup(struct mvxpsec_softc *, int);
194 1.1 hsuenaga INLINE int mvxpsec_session_ref(struct mvxpsec_session *);
195 1.1 hsuenaga INLINE void mvxpsec_session_unref(struct mvxpsec_session *);
196 1.1 hsuenaga
197 1.1 hsuenaga /* packet management */
198 1.1 hsuenaga STATIC struct mvxpsec_packet *mvxpsec_packet_alloc(struct mvxpsec_session *);
199 1.1 hsuenaga INLINE void mvxpsec_packet_enqueue(struct mvxpsec_packet *);
200 1.1 hsuenaga STATIC void mvxpsec_packet_dealloc(struct mvxpsec_packet *);
201 1.1 hsuenaga STATIC int mvxpsec_done_packet(struct mvxpsec_packet *);
202 1.1 hsuenaga
203 1.1 hsuenaga /* session header manegement */
204 1.1 hsuenaga STATIC int mvxpsec_header_finalize(struct mvxpsec_packet *);
205 1.1 hsuenaga
206 1.1 hsuenaga /* packet queue management */
207 1.1 hsuenaga INLINE void mvxpsec_drop(struct mvxpsec_softc *, struct cryptop *, struct mvxpsec_packet *, int);
208 1.1 hsuenaga STATIC int mvxpsec_dispatch_queue(struct mvxpsec_softc *);
209 1.1 hsuenaga
210 1.1 hsuenaga /* opencrypto opration */
211 1.1 hsuenaga INLINE int mvxpsec_parse_crd(struct mvxpsec_packet *, struct cryptodesc *);
212 1.1 hsuenaga INLINE int mvxpsec_parse_crp(struct mvxpsec_packet *);
213 1.1 hsuenaga
214 1.1 hsuenaga /* payload data management */
215 1.1 hsuenaga INLINE int mvxpsec_packet_setcrp(struct mvxpsec_packet *, struct cryptop *);
216 1.1 hsuenaga STATIC int mvxpsec_packet_setdata(struct mvxpsec_packet *, void *, uint32_t);
217 1.1 hsuenaga STATIC int mvxpsec_packet_setmbuf(struct mvxpsec_packet *, struct mbuf *);
218 1.1 hsuenaga STATIC int mvxpsec_packet_setuio(struct mvxpsec_packet *, struct uio *);
219 1.1 hsuenaga STATIC int mvxpsec_packet_rdata(struct mvxpsec_packet *, int, int, void *);
220 1.1 hsuenaga _STATIC int mvxpsec_packet_wdata(struct mvxpsec_packet *, int, int, void *);
221 1.1 hsuenaga STATIC int mvxpsec_packet_write_iv(struct mvxpsec_packet *, void *, int);
222 1.1 hsuenaga STATIC int mvxpsec_packet_copy_iv(struct mvxpsec_packet *, int, int);
223 1.1 hsuenaga
224 1.1 hsuenaga /* key pre-computation */
225 1.1 hsuenaga STATIC int mvxpsec_key_precomp(int, void *, int, void *, void *);
226 1.1 hsuenaga STATIC int mvxpsec_hmac_precomp(int, void *, int, void *, void *);
227 1.1 hsuenaga
228 1.1 hsuenaga /* crypto operation management */
229 1.1 hsuenaga INLINE void mvxpsec_packet_reset_op(struct mvxpsec_packet *);
230 1.1 hsuenaga INLINE void mvxpsec_packet_update_op_order(struct mvxpsec_packet *, int);
231 1.1 hsuenaga
232 1.1 hsuenaga /*
233 1.1 hsuenaga * parameter converters
234 1.1 hsuenaga */
235 1.1 hsuenaga INLINE uint32_t mvxpsec_alg2acc(uint32_t alg);
236 1.1 hsuenaga INLINE uint32_t mvxpsec_aesklen(int klen);
237 1.1 hsuenaga
238 1.1 hsuenaga /*
239 1.1 hsuenaga * string formatters
240 1.1 hsuenaga */
241 1.1 hsuenaga _STATIC const char *s_ctrlreg(uint32_t);
242 1.1 hsuenaga _STATIC const char *s_winreg(uint32_t);
243 1.1 hsuenaga _STATIC const char *s_errreg(uint32_t);
244 1.1 hsuenaga _STATIC const char *s_xpsecintr(uint32_t);
245 1.1 hsuenaga _STATIC const char *s_ctlalg(uint32_t);
246 1.1 hsuenaga _STATIC const char *s_xpsec_op(uint32_t);
247 1.1 hsuenaga _STATIC const char *s_xpsec_enc(uint32_t);
248 1.1 hsuenaga _STATIC const char *s_xpsec_mac(uint32_t);
249 1.1 hsuenaga _STATIC const char *s_xpsec_frag(uint32_t);
250 1.1 hsuenaga
251 1.1 hsuenaga /*
252 1.1 hsuenaga * debugging supports
253 1.1 hsuenaga */
254 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
255 1.1 hsuenaga _STATIC void mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *);
256 1.1 hsuenaga _STATIC void mvxpsec_dump_reg(struct mvxpsec_softc *);
257 1.1 hsuenaga _STATIC void mvxpsec_dump_sram(const char *, struct mvxpsec_softc *, size_t);
258 1.1 hsuenaga _STATIC void mvxpsec_dump_data(const char *, void *, size_t);
259 1.1 hsuenaga
260 1.1 hsuenaga _STATIC void mvxpsec_dump_packet(const char *, struct mvxpsec_packet *);
261 1.1 hsuenaga _STATIC void mvxpsec_dump_packet_data(const char *, struct mvxpsec_packet *);
262 1.1 hsuenaga _STATIC void mvxpsec_dump_packet_desc(const char *, struct mvxpsec_packet *);
263 1.1 hsuenaga
264 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_config(const char *, uint32_t);
265 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_encdata(const char *, uint32_t, uint32_t);
266 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_enclen(const char *, uint32_t);
267 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_enckey(const char *, uint32_t);
268 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_enciv(const char *, uint32_t);
269 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_macsrc(const char *, uint32_t);
270 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_macdst(const char *, uint32_t);
271 1.1 hsuenaga _STATIC void mvxpsec_dump_acc_maciv(const char *, uint32_t);
272 1.1 hsuenaga #endif
273 1.1 hsuenaga
274 1.1 hsuenaga /*
275 1.1 hsuenaga * global configurations, params, work spaces, ...
276 1.1 hsuenaga *
277 1.1 hsuenaga * XXX: use sysctl for global configurations
278 1.1 hsuenaga */
279 1.1 hsuenaga /* waiting for device */
280 1.1 hsuenaga static int mvxpsec_wait_interval = 10; /* usec */
281 1.1 hsuenaga static int mvxpsec_wait_retry = 100; /* times = wait for 1 [msec] */
282 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
283 1.1 hsuenaga static uint32_t mvxpsec_debug = MVXPSEC_DEBUG; /* debug level */
284 1.1 hsuenaga #endif
285 1.1 hsuenaga
286 1.1 hsuenaga /*
287 1.1 hsuenaga * Register accessors
288 1.1 hsuenaga */
289 1.1 hsuenaga #define MVXPSEC_WRITE(sc, off, val) \
290 1.1 hsuenaga bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (off), (val))
291 1.1 hsuenaga #define MVXPSEC_READ(sc, off) \
292 1.1 hsuenaga bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (off))
293 1.1 hsuenaga
294 1.1 hsuenaga /*
295 1.1 hsuenaga * device driver autoconf interface
296 1.1 hsuenaga */
297 1.1 hsuenaga CFATTACH_DECL2_NEW(mvxpsec_mbus, sizeof(struct mvxpsec_softc),
298 1.1 hsuenaga mvxpsec_match, mvxpsec_attach, NULL, NULL, NULL, NULL);
299 1.1 hsuenaga
300 1.1 hsuenaga STATIC int
301 1.1 hsuenaga mvxpsec_match(device_t dev, cfdata_t match, void *aux)
302 1.1 hsuenaga {
303 1.1 hsuenaga struct marvell_attach_args *mva = aux;
304 1.1 hsuenaga uint32_t tag;
305 1.1 hsuenaga int window;
306 1.1 hsuenaga
307 1.1 hsuenaga if (strcmp(mva->mva_name, match->cf_name) != 0)
308 1.1 hsuenaga return 0;
309 1.1 hsuenaga if (mva->mva_offset == MVA_OFFSET_DEFAULT)
310 1.1 hsuenaga return 0;
311 1.1 hsuenaga
312 1.1 hsuenaga switch (mva->mva_unit) {
313 1.1 hsuenaga case 0:
314 1.1 hsuenaga tag = ARMADAXP_TAG_CRYPT0;
315 1.1 hsuenaga break;
316 1.1 hsuenaga case 1:
317 1.1 hsuenaga tag = ARMADAXP_TAG_CRYPT1;
318 1.1 hsuenaga break;
319 1.1 hsuenaga default:
320 1.1 hsuenaga aprint_error_dev(dev,
321 1.1 hsuenaga "unit %d is not supported\n", mva->mva_unit);
322 1.1 hsuenaga return 0;
323 1.1 hsuenaga }
324 1.1 hsuenaga
325 1.1 hsuenaga window = mvsoc_target(tag, NULL, NULL, NULL, NULL);
326 1.1 hsuenaga if (window >= nwindow) {
327 1.1 hsuenaga aprint_error_dev(dev,
328 1.1 hsuenaga "Security Accelerator SRAM is not configured.\n");
329 1.1 hsuenaga return 0;
330 1.1 hsuenaga }
331 1.1 hsuenaga
332 1.1 hsuenaga return 1;
333 1.1 hsuenaga }
334 1.1 hsuenaga
335 1.1 hsuenaga STATIC void
336 1.1 hsuenaga mvxpsec_attach(device_t parent, device_t self, void *aux)
337 1.1 hsuenaga {
338 1.1 hsuenaga struct marvell_attach_args *mva = aux;
339 1.1 hsuenaga struct mvxpsec_softc *sc = device_private(self);
340 1.1 hsuenaga int v;
341 1.1 hsuenaga int i;
342 1.1 hsuenaga
343 1.1 hsuenaga sc->sc_dev = self;
344 1.1 hsuenaga
345 1.1 hsuenaga aprint_normal(": Marvell Crypto Engines and Security Accelerator\n");
346 1.1 hsuenaga aprint_naive("\n");
347 1.1 hsuenaga #ifdef MVXPSEC_MULTI_PACKET
348 1.1 hsuenaga aprint_normal_dev(sc->sc_dev, "multi-packet chained mode enabled.\n");
349 1.1 hsuenaga #else
350 1.1 hsuenaga aprint_normal_dev(sc->sc_dev, "multi-packet chained mode disabled.\n");
351 1.1 hsuenaga #endif
352 1.1 hsuenaga aprint_normal_dev(sc->sc_dev,
353 1.1 hsuenaga "Max %d sessions.\n", MVXPSEC_MAX_SESSIONS);
354 1.1 hsuenaga
355 1.1 hsuenaga /* mutex */
356 1.1 hsuenaga mutex_init(&sc->sc_session_mtx, MUTEX_DEFAULT, IPL_NET);
357 1.1 hsuenaga mutex_init(&sc->sc_dma_mtx, MUTEX_DEFAULT, IPL_NET);
358 1.1 hsuenaga mutex_init(&sc->sc_queue_mtx, MUTEX_DEFAULT, IPL_NET);
359 1.1 hsuenaga
360 1.1 hsuenaga /* Packet queue */
361 1.1 hsuenaga SIMPLEQ_INIT(&sc->sc_wait_queue);
362 1.1 hsuenaga SIMPLEQ_INIT(&sc->sc_run_queue);
363 1.1 hsuenaga SLIST_INIT(&sc->sc_free_list);
364 1.1 hsuenaga sc->sc_wait_qlen = 0;
365 1.1 hsuenaga #ifdef MVXPSEC_MULTI_PACKET
366 1.1 hsuenaga sc->sc_wait_qlimit = 16;
367 1.1 hsuenaga #else
368 1.1 hsuenaga sc->sc_wait_qlimit = 0;
369 1.1 hsuenaga #endif
370 1.1 hsuenaga sc->sc_free_qlen = 0;
371 1.1 hsuenaga
372 1.1 hsuenaga /* Timer */
373 1.1 hsuenaga callout_init(&sc->sc_timeout, 0); /* XXX: use CALLOUT_MPSAFE */
374 1.1 hsuenaga callout_setfunc(&sc->sc_timeout, mvxpsec_timer, sc);
375 1.1 hsuenaga
376 1.1 hsuenaga /* I/O */
377 1.1 hsuenaga sc->sc_iot = mva->mva_iot;
378 1.1 hsuenaga if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
379 1.1 hsuenaga mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
380 1.1 hsuenaga aprint_error_dev(self, "Cannot map registers\n");
381 1.1 hsuenaga return;
382 1.1 hsuenaga }
383 1.1 hsuenaga
384 1.1 hsuenaga /* DMA */
385 1.1 hsuenaga sc->sc_dmat = mva->mva_dmat;
386 1.1 hsuenaga if (mvxpsec_init_dma(sc, mva) < 0)
387 1.1 hsuenaga return;
388 1.1 hsuenaga
389 1.1 hsuenaga /* SRAM */
390 1.1 hsuenaga if (mvxpsec_init_sram(sc) < 0)
391 1.1 hsuenaga return;
392 1.1 hsuenaga
393 1.1 hsuenaga /* Registers */
394 1.1 hsuenaga mvxpsec_wininit(sc, mva->mva_tags);
395 1.1 hsuenaga
396 1.1 hsuenaga /* INTR */
397 1.1 hsuenaga MVXPSEC_WRITE(sc, MVXPSEC_INT_MASK, MVXPSEC_DEFAULT_INT);
398 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_ERR_MASK, MVXPSEC_DEFAULT_ERR);
399 1.1 hsuenaga sc->sc_done_ih =
400 1.1 hsuenaga marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpsec_intr, sc);
401 1.1 hsuenaga /* XXX: sould pass error IRQ using mva */
402 1.1 hsuenaga sc->sc_error_ih = marvell_intr_establish(MVXPSEC_ERR_INT(sc),
403 1.1 hsuenaga IPL_NET, mvxpsec_eintr, sc);
404 1.1 hsuenaga aprint_normal_dev(self,
405 1.1 hsuenaga "Error Reporting IRQ %d\n", MVXPSEC_ERR_INT(sc));
406 1.1 hsuenaga
407 1.1 hsuenaga /* Initialize TDMA (It's enabled here, but waiting for SA) */
408 1.1 hsuenaga if (mvxpsec_dma_wait(sc) < 0)
409 1.1 hsuenaga panic("%s: DMA DEVICE not responding\n", __func__);
410 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
411 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
412 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
413 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
414 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
415 1.1 hsuenaga v = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
416 1.1 hsuenaga v |= MV_TDMA_CONTROL_ENABLE;
417 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, v);
418 1.1 hsuenaga
419 1.1 hsuenaga /* Initialize SA */
420 1.1 hsuenaga if (mvxpsec_acc_wait(sc) < 0)
421 1.1 hsuenaga panic("%s: MVXPSEC not responding\n", __func__);
422 1.1 hsuenaga v = MVXPSEC_READ(sc, MV_ACC_CONFIG);
423 1.1 hsuenaga v &= ~MV_ACC_CONFIG_STOP_ON_ERR;
424 1.1 hsuenaga v |= MV_ACC_CONFIG_MULT_PKT;
425 1.1 hsuenaga v |= MV_ACC_CONFIG_WAIT_TDMA;
426 1.1 hsuenaga v |= MV_ACC_CONFIG_ACT_TDMA;
427 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_CONFIG, v);
428 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
429 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
430 1.1 hsuenaga
431 1.1 hsuenaga /* Session */
432 1.1 hsuenaga sc->sc_session_pool =
433 1.1 hsuenaga pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
434 1.1 hsuenaga "mvxpsecpl", NULL, IPL_NET,
435 1.1 hsuenaga mvxpsec_session_ctor, mvxpsec_session_dtor, sc);
436 1.1 hsuenaga pool_cache_sethiwat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS);
437 1.1 hsuenaga pool_cache_setlowat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS / 2);
438 1.1 hsuenaga sc->sc_last_session = NULL;
439 1.1 hsuenaga
440 1.1 hsuenaga /* Pakcet */
441 1.1 hsuenaga sc->sc_packet_pool =
442 1.1 hsuenaga pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
443 1.1 hsuenaga "mvxpsec_pktpl", NULL, IPL_NET,
444 1.1 hsuenaga mvxpsec_packet_ctor, mvxpsec_packet_dtor, sc);
445 1.1 hsuenaga pool_cache_sethiwat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS);
446 1.1 hsuenaga pool_cache_setlowat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS / 2);
447 1.1 hsuenaga
448 1.1 hsuenaga /* Register to EVCNT framework */
449 1.1 hsuenaga mvxpsec_evcnt_attach(sc);
450 1.1 hsuenaga
451 1.1 hsuenaga /* Register to Opencrypto */
452 1.1 hsuenaga for (i = 0; i < MVXPSEC_MAX_SESSIONS; i++) {
453 1.1 hsuenaga sc->sc_sessions[i] = NULL;
454 1.1 hsuenaga }
455 1.1 hsuenaga if (mvxpsec_register(sc))
456 1.1 hsuenaga panic("cannot initialize OpenCrypto module.\n");
457 1.1 hsuenaga
458 1.1 hsuenaga return;
459 1.1 hsuenaga }
460 1.1 hsuenaga
461 1.1 hsuenaga STATIC void
462 1.1 hsuenaga mvxpsec_evcnt_attach(struct mvxpsec_softc *sc)
463 1.1 hsuenaga {
464 1.1 hsuenaga struct mvxpsec_evcnt *sc_ev = &sc->sc_ev;
465 1.1 hsuenaga
466 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_all, EVCNT_TYPE_INTR,
467 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Main Intr.");
468 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_auth, EVCNT_TYPE_INTR,
469 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Auth Intr.");
470 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_des, EVCNT_TYPE_INTR,
471 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "DES Intr.");
472 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_aes_enc, EVCNT_TYPE_INTR,
473 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "AES-Encrypt Intr.");
474 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_aes_dec, EVCNT_TYPE_INTR,
475 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "AES-Decrypt Intr.");
476 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_enc, EVCNT_TYPE_INTR,
477 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Crypto Intr.");
478 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_sa, EVCNT_TYPE_INTR,
479 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "SA Intr.");
480 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_acctdma, EVCNT_TYPE_INTR,
481 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "AccTDMA Intr.");
482 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_comp, EVCNT_TYPE_INTR,
483 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "TDMA-Complete Intr.");
484 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_own, EVCNT_TYPE_INTR,
485 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "TDMA-Ownership Intr.");
486 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->intr_acctdma_cont, EVCNT_TYPE_INTR,
487 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "AccTDMA-Continue Intr.");
488 1.1 hsuenaga
489 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->session_new, EVCNT_TYPE_MISC,
490 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "New-Session");
491 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->session_free, EVCNT_TYPE_MISC,
492 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Free-Session");
493 1.1 hsuenaga
494 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->packet_ok, EVCNT_TYPE_MISC,
495 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Packet-OK");
496 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->packet_err, EVCNT_TYPE_MISC,
497 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Packet-ERR");
498 1.1 hsuenaga
499 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->dispatch_packets, EVCNT_TYPE_MISC,
500 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Packet-Dispatch");
501 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->dispatch_queue, EVCNT_TYPE_MISC,
502 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Queue-Dispatch");
503 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->queue_full, EVCNT_TYPE_MISC,
504 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Queue-Full");
505 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->max_dispatch, EVCNT_TYPE_MISC,
506 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Max-Dispatch");
507 1.1 hsuenaga evcnt_attach_dynamic(&sc_ev->max_done, EVCNT_TYPE_MISC,
508 1.1 hsuenaga NULL, device_xname(sc->sc_dev), "Max-Done");
509 1.1 hsuenaga }
510 1.1 hsuenaga
511 1.1 hsuenaga /*
512 1.1 hsuenaga * Register setup
513 1.1 hsuenaga */
514 1.1 hsuenaga STATIC int mvxpsec_wininit(struct mvxpsec_softc *sc, enum marvell_tags *tags)
515 1.1 hsuenaga {
516 1.1 hsuenaga device_t pdev = device_parent(sc->sc_dev);
517 1.1 hsuenaga uint64_t base;
518 1.1 hsuenaga uint32_t size, reg;
519 1.1 hsuenaga int window, target, attr, rv, i;
520 1.1 hsuenaga
521 1.1 hsuenaga /* disable all window */
522 1.1 hsuenaga for (window = 0; window < MV_TDMA_NWINDOW; window++)
523 1.1 hsuenaga {
524 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), 0);
525 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), 0);
526 1.1 hsuenaga }
527 1.1 hsuenaga
528 1.1 hsuenaga for (window = 0, i = 0;
529 1.1 hsuenaga tags[i] != MARVELL_TAG_UNDEFINED && window < MV_TDMA_NWINDOW; i++) {
530 1.1 hsuenaga rv = marvell_winparams_by_tag(pdev, tags[i],
531 1.1 hsuenaga &target, &attr, &base, &size);
532 1.1 hsuenaga if (rv != 0 || size == 0)
533 1.1 hsuenaga continue;
534 1.1 hsuenaga
535 1.1 hsuenaga if (base > 0xffffffffULL) {
536 1.1 hsuenaga aprint_error_dev(sc->sc_dev,
537 1.1 hsuenaga "can't remap window %d\n", window);
538 1.1 hsuenaga continue;
539 1.1 hsuenaga }
540 1.1 hsuenaga
541 1.1 hsuenaga reg = MV_TDMA_BAR_BASE(base);
542 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), reg);
543 1.1 hsuenaga
544 1.1 hsuenaga reg = MV_TDMA_ATTR_TARGET(target);
545 1.1 hsuenaga reg |= MV_TDMA_ATTR_ATTR(attr);
546 1.1 hsuenaga reg |= MV_TDMA_ATTR_SIZE(size);
547 1.1 hsuenaga reg |= MV_TDMA_ATTR_ENABLE;
548 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), reg);
549 1.1 hsuenaga
550 1.1 hsuenaga window++;
551 1.1 hsuenaga }
552 1.1 hsuenaga
553 1.1 hsuenaga return 0;
554 1.1 hsuenaga }
555 1.1 hsuenaga
556 1.1 hsuenaga /*
557 1.1 hsuenaga * Timer handling
558 1.1 hsuenaga */
559 1.1 hsuenaga STATIC void
560 1.1 hsuenaga mvxpsec_timer(void *aux)
561 1.1 hsuenaga {
562 1.1 hsuenaga struct mvxpsec_softc *sc = aux;
563 1.1 hsuenaga struct mvxpsec_packet *mv_p;
564 1.1 hsuenaga uint32_t reg;
565 1.1 hsuenaga int ndone;
566 1.1 hsuenaga int refill;
567 1.1 hsuenaga int s;
568 1.1 hsuenaga
569 1.1 hsuenaga /* IPL_SOFTCLOCK */
570 1.1 hsuenaga
571 1.1 hsuenaga log(LOG_ERR, "%s: device timeout.\n", __func__);
572 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
573 1.1 hsuenaga mvxpsec_dump_reg(sc);
574 1.1 hsuenaga #endif
575 1.1 hsuenaga
576 1.1 hsuenaga s = splnet();
577 1.1 hsuenaga /* stop security accelerator */
578 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
579 1.1 hsuenaga
580 1.1 hsuenaga /* stop TDMA */
581 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, 0);
582 1.1 hsuenaga
583 1.1 hsuenaga /* cleanup packet queue */
584 1.1 hsuenaga mutex_enter(&sc->sc_queue_mtx);
585 1.1 hsuenaga ndone = 0;
586 1.1 hsuenaga while ( (mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue)) != NULL) {
587 1.1 hsuenaga SIMPLEQ_REMOVE_HEAD(&sc->sc_run_queue, queue);
588 1.1 hsuenaga
589 1.1 hsuenaga mv_p->crp->crp_etype = EINVAL;
590 1.1 hsuenaga mvxpsec_done_packet(mv_p);
591 1.1 hsuenaga ndone++;
592 1.1 hsuenaga }
593 1.1 hsuenaga MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
594 1.1 hsuenaga sc->sc_flags &= ~HW_RUNNING;
595 1.1 hsuenaga refill = (sc->sc_wait_qlen > 0) ? 1 : 0;
596 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx);
597 1.1 hsuenaga
598 1.1 hsuenaga /* reenable TDMA */
599 1.1 hsuenaga if (mvxpsec_dma_wait(sc) < 0)
600 1.1 hsuenaga panic("%s: failed to reset DMA DEVICE. give up.", __func__);
601 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
602 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
603 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
604 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
605 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
606 1.1 hsuenaga reg = MV_TDMA_DEFAULT_CONTROL;
607 1.1 hsuenaga reg |= MV_TDMA_CONTROL_ENABLE;
608 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, reg);
609 1.1 hsuenaga
610 1.1 hsuenaga if (mvxpsec_acc_wait(sc) < 0)
611 1.1 hsuenaga panic("%s: failed to reset MVXPSEC. give up.", __func__);
612 1.1 hsuenaga reg = MV_ACC_CONFIG_MULT_PKT;
613 1.1 hsuenaga reg |= MV_ACC_CONFIG_WAIT_TDMA;
614 1.1 hsuenaga reg |= MV_ACC_CONFIG_ACT_TDMA;
615 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_CONFIG, reg);
616 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
617 1.1 hsuenaga
618 1.1 hsuenaga if (refill) {
619 1.1 hsuenaga mutex_enter(&sc->sc_queue_mtx);
620 1.1 hsuenaga mvxpsec_dispatch_queue(sc);
621 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx);
622 1.1 hsuenaga }
623 1.1 hsuenaga
624 1.1 hsuenaga crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
625 1.1 hsuenaga splx(s);
626 1.1 hsuenaga }
627 1.1 hsuenaga
628 1.1 hsuenaga /*
629 1.1 hsuenaga * DMA handling
630 1.1 hsuenaga */
631 1.1 hsuenaga
632 1.1 hsuenaga /*
633 1.1 hsuenaga * Allocate kernel devmem and DMA safe memory with bus_dma API
634 1.1 hsuenaga * used for DMA descriptors.
635 1.1 hsuenaga *
636 1.1 hsuenaga * if phys != 0, assume phys is a DMA safe memory and bypass
637 1.1 hsuenaga * allocator.
638 1.1 hsuenaga */
639 1.1 hsuenaga STATIC struct mvxpsec_devmem *
640 1.1 hsuenaga mvxpsec_alloc_devmem(struct mvxpsec_softc *sc, paddr_t phys, int size)
641 1.1 hsuenaga {
642 1.1 hsuenaga struct mvxpsec_devmem *devmem;
643 1.1 hsuenaga bus_dma_segment_t seg;
644 1.1 hsuenaga int rseg;
645 1.1 hsuenaga int err;
646 1.1 hsuenaga
647 1.1 hsuenaga if (sc == NULL)
648 1.1 hsuenaga return NULL;
649 1.1 hsuenaga
650 1.1 hsuenaga devmem = kmem_alloc(sizeof(*devmem), KM_NOSLEEP);
651 1.1 hsuenaga if (devmem == NULL) {
652 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "can't alloc kmem\n");
653 1.1 hsuenaga return NULL;
654 1.1 hsuenaga }
655 1.1 hsuenaga
656 1.1 hsuenaga devmem->size = size;
657 1.1 hsuenaga
658 1.1 hsuenaga if (phys) {
659 1.1 hsuenaga seg.ds_addr = phys;
660 1.1 hsuenaga seg.ds_len = devmem->size;
661 1.1 hsuenaga rseg = 1;
662 1.1 hsuenaga err = 0;
663 1.1 hsuenaga }
664 1.1 hsuenaga else {
665 1.1 hsuenaga err = bus_dmamem_alloc(sc->sc_dmat,
666 1.1 hsuenaga devmem->size, PAGE_SIZE, 0,
667 1.1 hsuenaga &seg, MVXPSEC_DMA_MAX_SEGS, &rseg, BUS_DMA_NOWAIT);
668 1.1 hsuenaga }
669 1.1 hsuenaga if (err) {
670 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "can't alloc DMA buffer\n");
671 1.1 hsuenaga goto fail_kmem_free;
672 1.1 hsuenaga }
673 1.1 hsuenaga
674 1.1 hsuenaga err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
675 1.1 hsuenaga devmem->size, &devmem->kva, BUS_DMA_NOWAIT);
676 1.1 hsuenaga if (err) {
677 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "can't map DMA buffer\n");
678 1.1 hsuenaga goto fail_dmamem_free;
679 1.1 hsuenaga }
680 1.1 hsuenaga
681 1.1 hsuenaga err = bus_dmamap_create(sc->sc_dmat,
682 1.1 hsuenaga size, 1, size, 0, BUS_DMA_NOWAIT, &devmem->map);
683 1.1 hsuenaga if (err) {
684 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
685 1.1 hsuenaga goto fail_unmap;
686 1.1 hsuenaga }
687 1.1 hsuenaga
688 1.1 hsuenaga err = bus_dmamap_load(sc->sc_dmat,
689 1.1 hsuenaga devmem->map, devmem->kva, devmem->size, NULL,
690 1.1 hsuenaga BUS_DMA_NOWAIT);
691 1.1 hsuenaga if (err) {
692 1.1 hsuenaga aprint_error_dev(sc->sc_dev,
693 1.1 hsuenaga "can't load DMA buffer VA:%p PA:0x%08x\n",
694 1.1 hsuenaga devmem->kva, (int)seg.ds_addr);
695 1.1 hsuenaga goto fail_destroy;
696 1.1 hsuenaga }
697 1.1 hsuenaga
698 1.1 hsuenaga return devmem;
699 1.1 hsuenaga
700 1.1 hsuenaga fail_destroy:
701 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, devmem->map);
702 1.1 hsuenaga fail_unmap:
703 1.1 hsuenaga bus_dmamem_unmap(sc->sc_dmat, devmem->kva, devmem->size);
704 1.1 hsuenaga fail_dmamem_free:
705 1.1 hsuenaga bus_dmamem_free(sc->sc_dmat, &seg, rseg);
706 1.1 hsuenaga fail_kmem_free:
707 1.1 hsuenaga kmem_free(devmem, sizeof(*devmem));
708 1.1 hsuenaga
709 1.1 hsuenaga return NULL;
710 1.1 hsuenaga }
711 1.1 hsuenaga
712 1.1 hsuenaga /*
713 1.1 hsuenaga * Get DMA Descriptor from (DMA safe) descriptor pool.
714 1.1 hsuenaga */
715 1.1 hsuenaga INLINE struct mvxpsec_descriptor_handle *
716 1.1 hsuenaga mvxpsec_dma_getdesc(struct mvxpsec_softc *sc)
717 1.1 hsuenaga {
718 1.1 hsuenaga struct mvxpsec_descriptor_handle *entry;
719 1.1 hsuenaga
720 1.1 hsuenaga /* must called with sc->sc_dma_mtx held */
721 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_dma_mtx));
722 1.1 hsuenaga
723 1.1 hsuenaga if (sc->sc_desc_ring_prod == sc->sc_desc_ring_cons)
724 1.1 hsuenaga return NULL;
725 1.1 hsuenaga
726 1.1 hsuenaga entry = &sc->sc_desc_ring[sc->sc_desc_ring_prod];
727 1.1 hsuenaga sc->sc_desc_ring_prod++;
728 1.1 hsuenaga if (sc->sc_desc_ring_prod >= sc->sc_desc_ring_size)
729 1.1 hsuenaga sc->sc_desc_ring_prod -= sc->sc_desc_ring_size;
730 1.1 hsuenaga
731 1.1 hsuenaga return entry;
732 1.1 hsuenaga }
733 1.1 hsuenaga
734 1.1 hsuenaga /*
735 1.1 hsuenaga * Put DMA Descriptor to descriptor pool.
736 1.1 hsuenaga */
737 1.1 hsuenaga _INLINE void
738 1.1 hsuenaga mvxpsec_dma_putdesc(struct mvxpsec_softc *sc,
739 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh)
740 1.1 hsuenaga {
741 1.1 hsuenaga /* must called with sc->sc_dma_mtx held */
742 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_dma_mtx));
743 1.1 hsuenaga
744 1.1 hsuenaga sc->sc_desc_ring_cons++;
745 1.1 hsuenaga if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
746 1.1 hsuenaga sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
747 1.1 hsuenaga
748 1.1 hsuenaga return;
749 1.1 hsuenaga }
750 1.1 hsuenaga
751 1.1 hsuenaga /*
752 1.1 hsuenaga * Setup DMA Descriptor
753 1.1 hsuenaga * copy from 'src' to 'dst' by 'size' bytes.
754 1.1 hsuenaga * 'src' or 'dst' must be SRAM address.
755 1.1 hsuenaga */
756 1.1 hsuenaga INLINE void
757 1.1 hsuenaga mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *dh,
758 1.1 hsuenaga uint32_t dst, uint32_t src, uint32_t size)
759 1.1 hsuenaga {
760 1.1 hsuenaga struct mvxpsec_descriptor *desc;
761 1.1 hsuenaga
762 1.1 hsuenaga desc = (struct mvxpsec_descriptor *)dh->_desc;
763 1.1 hsuenaga
764 1.1 hsuenaga desc->tdma_dst = dst;
765 1.1 hsuenaga desc->tdma_src = src;
766 1.1 hsuenaga desc->tdma_word0 = size;
767 1.1 hsuenaga if (size != 0)
768 1.1 hsuenaga desc->tdma_word0 |= MV_TDMA_CNT_OWN;
769 1.1 hsuenaga /* size == 0 is owned by ACC, not TDMA */
770 1.1 hsuenaga
771 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
772 1.1 hsuenaga mvxpsec_dump_dmaq(dh);
773 1.1 hsuenaga #endif
774 1.1 hsuenaga
775 1.1 hsuenaga }
776 1.1 hsuenaga
777 1.1 hsuenaga /*
778 1.1 hsuenaga * Concat 2 DMA
779 1.1 hsuenaga */
780 1.1 hsuenaga INLINE void
781 1.1 hsuenaga mvxpsec_dma_cat(struct mvxpsec_softc *sc,
782 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh1,
783 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh2)
784 1.1 hsuenaga {
785 1.1 hsuenaga ((struct mvxpsec_descriptor*)dh1->_desc)->tdma_nxt = dh2->phys_addr;
786 1.1 hsuenaga MVXPSEC_SYNC_DESC(sc, dh1, BUS_DMASYNC_PREWRITE);
787 1.1 hsuenaga }
788 1.1 hsuenaga
789 1.1 hsuenaga /*
790 1.1 hsuenaga * Schedule DMA Copy
791 1.1 hsuenaga */
792 1.1 hsuenaga INLINE int
793 1.1 hsuenaga mvxpsec_dma_copy0(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
794 1.1 hsuenaga uint32_t dst, uint32_t src, uint32_t size)
795 1.1 hsuenaga {
796 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh;
797 1.1 hsuenaga
798 1.1 hsuenaga dh = mvxpsec_dma_getdesc(sc);
799 1.1 hsuenaga if (dh == NULL) {
800 1.1 hsuenaga log(LOG_ERR, "%s: descriptor full\n", __func__);
801 1.1 hsuenaga return -1;
802 1.1 hsuenaga }
803 1.1 hsuenaga
804 1.1 hsuenaga mvxpsec_dma_setup(dh, dst, src, size);
805 1.1 hsuenaga if (r->dma_head == NULL) {
806 1.1 hsuenaga r->dma_head = dh;
807 1.1 hsuenaga r->dma_last = dh;
808 1.1 hsuenaga r->dma_size = 1;
809 1.1 hsuenaga }
810 1.1 hsuenaga else {
811 1.1 hsuenaga mvxpsec_dma_cat(sc, r->dma_last, dh);
812 1.1 hsuenaga r->dma_last = dh;
813 1.1 hsuenaga r->dma_size++;
814 1.1 hsuenaga }
815 1.1 hsuenaga
816 1.1 hsuenaga return 0;
817 1.1 hsuenaga }
818 1.1 hsuenaga
819 1.1 hsuenaga INLINE int
820 1.1 hsuenaga mvxpsec_dma_copy(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
821 1.1 hsuenaga uint32_t dst, uint32_t src, uint32_t size)
822 1.1 hsuenaga {
823 1.1 hsuenaga if (size == 0) /* 0 is very special descriptor */
824 1.1 hsuenaga return 0;
825 1.1 hsuenaga
826 1.1 hsuenaga return mvxpsec_dma_copy0(sc, r, dst, src, size);
827 1.1 hsuenaga }
828 1.1 hsuenaga
829 1.1 hsuenaga /*
830 1.1 hsuenaga * Schedule ACC Activate
831 1.1 hsuenaga */
832 1.1 hsuenaga INLINE int
833 1.1 hsuenaga mvxpsec_dma_acc_activate(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
834 1.1 hsuenaga {
835 1.1 hsuenaga return mvxpsec_dma_copy0(sc, r, 0, 0, 0);
836 1.1 hsuenaga }
837 1.1 hsuenaga
838 1.1 hsuenaga /*
839 1.1 hsuenaga * Finalize DMA setup
840 1.1 hsuenaga */
841 1.1 hsuenaga INLINE void
842 1.1 hsuenaga mvxpsec_dma_finalize(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
843 1.1 hsuenaga {
844 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh;
845 1.1 hsuenaga
846 1.1 hsuenaga dh = r->dma_last;
847 1.1 hsuenaga ((struct mvxpsec_descriptor*)dh->_desc)->tdma_nxt = 0;
848 1.1 hsuenaga MVXPSEC_SYNC_DESC(sc, dh, BUS_DMASYNC_PREWRITE);
849 1.1 hsuenaga }
850 1.1 hsuenaga
851 1.1 hsuenaga /*
852 1.1 hsuenaga * Free entire DMA ring
853 1.1 hsuenaga */
854 1.1 hsuenaga INLINE void
855 1.1 hsuenaga mvxpsec_dma_free(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
856 1.1 hsuenaga {
857 1.1 hsuenaga sc->sc_desc_ring_cons += r->dma_size;
858 1.1 hsuenaga if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
859 1.1 hsuenaga sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
860 1.1 hsuenaga r->dma_head = NULL;
861 1.1 hsuenaga r->dma_last = NULL;
862 1.1 hsuenaga r->dma_size = 0;
863 1.1 hsuenaga }
864 1.1 hsuenaga
865 1.1 hsuenaga /*
866 1.1 hsuenaga * create DMA descriptor chain for the packet
867 1.1 hsuenaga */
868 1.1 hsuenaga INLINE int
869 1.1 hsuenaga mvxpsec_dma_copy_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
870 1.1 hsuenaga {
871 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s;
872 1.1 hsuenaga uint32_t src, dst, len;
873 1.1 hsuenaga uint32_t pkt_off, pkt_off_r;
874 1.1 hsuenaga int err;
875 1.1 hsuenaga int i;
876 1.1 hsuenaga
877 1.1 hsuenaga /* must called with sc->sc_dma_mtx held */
878 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_dma_mtx));
879 1.1 hsuenaga
880 1.1 hsuenaga /*
881 1.1 hsuenaga * set offset for mem->device copy
882 1.1 hsuenaga *
883 1.1 hsuenaga * typical packet image:
884 1.1 hsuenaga *
885 1.1 hsuenaga * enc_ivoff
886 1.1 hsuenaga * mac_off
887 1.1 hsuenaga * |
888 1.1 hsuenaga * | enc_off
889 1.1 hsuenaga * | |
890 1.1 hsuenaga * v v
891 1.1 hsuenaga * +----+--------...
892 1.1 hsuenaga * |IV |DATA
893 1.1 hsuenaga * +----+--------...
894 1.1 hsuenaga */
895 1.1 hsuenaga pkt_off = 0;
896 1.1 hsuenaga if (mv_p->mac_off > 0)
897 1.1 hsuenaga pkt_off = mv_p->mac_off;
898 1.1 hsuenaga if ((mv_p->flags & CRP_EXT_IV) == 0 && pkt_off > mv_p->enc_ivoff)
899 1.1 hsuenaga pkt_off = mv_p->enc_ivoff;
900 1.1 hsuenaga if (mv_p->enc_off > 0 && pkt_off > mv_p->enc_off)
901 1.1 hsuenaga pkt_off = mv_p->enc_off;
902 1.1 hsuenaga pkt_off_r = pkt_off;
903 1.1 hsuenaga
904 1.1 hsuenaga /* make DMA descriptors to copy packet header: DRAM -> SRAM */
905 1.1 hsuenaga dst = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
906 1.1 hsuenaga src = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
907 1.1 hsuenaga len = sizeof(mv_p->pkt_header);
908 1.1 hsuenaga err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
909 1.1 hsuenaga if (__predict_false(err))
910 1.1 hsuenaga return err;
911 1.1 hsuenaga
912 1.1 hsuenaga /*
913 1.1 hsuenaga * make DMA descriptors to copy session header: DRAM -> SRAM
914 1.1 hsuenaga * we can reuse session header on SRAM if session is not changed.
915 1.1 hsuenaga */
916 1.1 hsuenaga if (sc->sc_last_session != mv_s) {
917 1.1 hsuenaga dst = (uint32_t)MVXPSEC_SRAM_SESS_HDR_PA(sc);
918 1.1 hsuenaga src = (uint32_t)mv_s->session_header_map->dm_segs[0].ds_addr;
919 1.1 hsuenaga len = sizeof(mv_s->session_header);
920 1.1 hsuenaga err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
921 1.1 hsuenaga if (__predict_false(err))
922 1.1 hsuenaga return err;
923 1.1 hsuenaga sc->sc_last_session = mv_s;
924 1.1 hsuenaga }
925 1.1 hsuenaga
926 1.1 hsuenaga /* make DMA descriptor to copy payload data: DRAM -> SRAM */
927 1.1 hsuenaga dst = MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
928 1.1 hsuenaga for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
929 1.1 hsuenaga src = mv_p->data_map->dm_segs[i].ds_addr;
930 1.1 hsuenaga len = mv_p->data_map->dm_segs[i].ds_len;
931 1.1 hsuenaga if (pkt_off) {
932 1.1 hsuenaga if (len <= pkt_off) {
933 1.1 hsuenaga /* ignore the segment */
934 1.1 hsuenaga dst += len;
935 1.1 hsuenaga pkt_off -= len;
936 1.1 hsuenaga continue;
937 1.1 hsuenaga }
938 1.1 hsuenaga /* copy from the middle of the segment */
939 1.1 hsuenaga dst += pkt_off;
940 1.1 hsuenaga src += pkt_off;
941 1.1 hsuenaga len -= pkt_off;
942 1.1 hsuenaga pkt_off = 0;
943 1.1 hsuenaga }
944 1.1 hsuenaga err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
945 1.1 hsuenaga if (__predict_false(err))
946 1.1 hsuenaga return err;
947 1.1 hsuenaga dst += len;
948 1.1 hsuenaga }
949 1.1 hsuenaga
950 1.1 hsuenaga /* make special descriptor to activate security accelerator */
951 1.1 hsuenaga err = mvxpsec_dma_acc_activate(sc, &mv_p->dma_ring);
952 1.1 hsuenaga if (__predict_false(err))
953 1.1 hsuenaga return err;
954 1.1 hsuenaga
955 1.1 hsuenaga /* make DMA descriptors to copy payload: SRAM -> DRAM */
956 1.1 hsuenaga src = (uint32_t)MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
957 1.1 hsuenaga for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
958 1.1 hsuenaga dst = (uint32_t)mv_p->data_map->dm_segs[i].ds_addr;
959 1.1 hsuenaga len = (uint32_t)mv_p->data_map->dm_segs[i].ds_len;
960 1.1 hsuenaga if (pkt_off_r) {
961 1.1 hsuenaga if (len <= pkt_off_r) {
962 1.1 hsuenaga /* ignore the segment */
963 1.1 hsuenaga src += len;
964 1.1 hsuenaga pkt_off_r -= len;
965 1.1 hsuenaga continue;
966 1.1 hsuenaga }
967 1.1 hsuenaga /* copy from the middle of the segment */
968 1.1 hsuenaga src += pkt_off_r;
969 1.1 hsuenaga dst += pkt_off_r;
970 1.1 hsuenaga len -= pkt_off_r;
971 1.1 hsuenaga pkt_off_r = 0;
972 1.1 hsuenaga }
973 1.1 hsuenaga err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
974 1.1 hsuenaga if (__predict_false(err))
975 1.1 hsuenaga return err;
976 1.1 hsuenaga src += len;
977 1.1 hsuenaga }
978 1.1 hsuenaga KASSERT(pkt_off == 0);
979 1.1 hsuenaga KASSERT(pkt_off_r == 0);
980 1.1 hsuenaga
981 1.1 hsuenaga /*
982 1.1 hsuenaga * make DMA descriptors to copy packet header: SRAM->DRAM
983 1.1 hsuenaga * if IV is present in the payload, no need to copy.
984 1.1 hsuenaga */
985 1.1 hsuenaga if (mv_p->flags & CRP_EXT_IV) {
986 1.1 hsuenaga dst = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
987 1.1 hsuenaga src = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
988 1.1 hsuenaga len = sizeof(mv_p->pkt_header);
989 1.1 hsuenaga err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
990 1.1 hsuenaga if (__predict_false(err))
991 1.1 hsuenaga return err;
992 1.1 hsuenaga }
993 1.1 hsuenaga
994 1.1 hsuenaga return 0;
995 1.1 hsuenaga }
996 1.1 hsuenaga
997 1.1 hsuenaga INLINE int
998 1.1 hsuenaga mvxpsec_dma_sync_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
999 1.1 hsuenaga {
1000 1.1 hsuenaga /* sync packet header */
1001 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat,
1002 1.1 hsuenaga mv_p->pkt_header_map, 0, sizeof(mv_p->pkt_header),
1003 1.1 hsuenaga BUS_DMASYNC_PREWRITE);
1004 1.1 hsuenaga
1005 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
1006 1.1 hsuenaga /* sync session header */
1007 1.1 hsuenaga if (mvxpsec_debug != 0) {
1008 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s;
1009 1.1 hsuenaga
1010 1.1 hsuenaga /* only debug code touch the session header after newsession */
1011 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat,
1012 1.1 hsuenaga mv_s->session_header_map,
1013 1.1 hsuenaga 0, sizeof(mv_s->session_header),
1014 1.1 hsuenaga BUS_DMASYNC_PREWRITE);
1015 1.1 hsuenaga }
1016 1.1 hsuenaga #endif
1017 1.1 hsuenaga
1018 1.1 hsuenaga /* sync packet buffer */
1019 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat,
1020 1.1 hsuenaga mv_p->data_map, 0, mv_p->data_len,
1021 1.1 hsuenaga BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1022 1.1 hsuenaga
1023 1.1 hsuenaga return 0;
1024 1.1 hsuenaga }
1025 1.1 hsuenaga
1026 1.1 hsuenaga /*
1027 1.1 hsuenaga * Initialize MVXPSEC Internal SRAM
1028 1.1 hsuenaga *
1029 1.1 hsuenaga * - must be called after DMA initizlization.
1030 1.1 hsuenaga * - make VM mapping for SRAM area on MBus.
1031 1.1 hsuenaga */
1032 1.1 hsuenaga STATIC int
1033 1.1 hsuenaga mvxpsec_init_sram(struct mvxpsec_softc *sc)
1034 1.1 hsuenaga {
1035 1.1 hsuenaga uint32_t tag, target, attr, base, size;
1036 1.1 hsuenaga vaddr_t va;
1037 1.1 hsuenaga int window;
1038 1.1 hsuenaga
1039 1.1 hsuenaga switch (sc->sc_dev->dv_unit) {
1040 1.1 hsuenaga case 0:
1041 1.1 hsuenaga tag = ARMADAXP_TAG_CRYPT0;
1042 1.1 hsuenaga break;
1043 1.1 hsuenaga case 1:
1044 1.1 hsuenaga tag = ARMADAXP_TAG_CRYPT1;
1045 1.1 hsuenaga break;
1046 1.1 hsuenaga default:
1047 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1048 1.1 hsuenaga return -1;
1049 1.1 hsuenaga }
1050 1.1 hsuenaga
1051 1.1 hsuenaga window = mvsoc_target(tag, &target, &attr, &base, &size);
1052 1.1 hsuenaga if (window >= nwindow) {
1053 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1054 1.1 hsuenaga return -1;
1055 1.1 hsuenaga }
1056 1.1 hsuenaga
1057 1.1 hsuenaga if (sizeof(struct mvxpsec_crypt_sram) > size) {
1058 1.1 hsuenaga aprint_error_dev(sc->sc_dev,
1059 1.1 hsuenaga "SRAM Data Structure Excceeds SRAM window size.\n");
1060 1.1 hsuenaga return -1;
1061 1.1 hsuenaga }
1062 1.1 hsuenaga
1063 1.1 hsuenaga aprint_normal_dev(sc->sc_dev,
1064 1.1 hsuenaga "internal SRAM window at 0x%08x-0x%08x",
1065 1.1 hsuenaga base, base + size - 1);
1066 1.1 hsuenaga sc->sc_sram_pa = base;
1067 1.1 hsuenaga
1068 1.1 hsuenaga /* get vmspace to read/write device internal SRAM */
1069 1.1 hsuenaga va = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
1070 1.1 hsuenaga UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
1071 1.1 hsuenaga if (va == 0) {
1072 1.1 hsuenaga aprint_error_dev(sc->sc_dev, "cannot map SRAM window\n");
1073 1.1 hsuenaga sc->sc_sram_va = NULL;
1074 1.1 hsuenaga aprint_normal("\n");
1075 1.1 hsuenaga return 0;
1076 1.1 hsuenaga }
1077 1.1 hsuenaga /* XXX: not working. PMAP_NOCACHE is not affected? */
1078 1.1 hsuenaga pmap_kenter_pa(va, base, VM_PROT_READ|VM_PROT_WRITE, PMAP_NOCACHE);
1079 1.1 hsuenaga pmap_update(pmap_kernel());
1080 1.1 hsuenaga sc->sc_sram_va = (void *)va;
1081 1.1 hsuenaga aprint_normal(" va %p\n", sc->sc_sram_va);
1082 1.1 hsuenaga memset(sc->sc_sram_va, 0xff, MV_ACC_SRAM_SIZE);
1083 1.1 hsuenaga
1084 1.1 hsuenaga return 0;
1085 1.1 hsuenaga }
1086 1.1 hsuenaga
1087 1.1 hsuenaga /*
1088 1.1 hsuenaga * Initialize TDMA engine.
1089 1.1 hsuenaga */
1090 1.1 hsuenaga STATIC int
1091 1.1 hsuenaga mvxpsec_init_dma(struct mvxpsec_softc *sc, struct marvell_attach_args *mva)
1092 1.1 hsuenaga {
1093 1.1 hsuenaga struct mvxpsec_descriptor_handle *dh;
1094 1.1 hsuenaga uint8_t *va;
1095 1.1 hsuenaga paddr_t pa;
1096 1.1 hsuenaga off_t va_off, pa_off;
1097 1.1 hsuenaga int i, n, seg, ndh;
1098 1.1 hsuenaga
1099 1.1 hsuenaga /* Init Deviced's control parameters (disabled yet) */
1100 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, MV_TDMA_DEFAULT_CONTROL);
1101 1.1 hsuenaga
1102 1.1 hsuenaga /* Init Software DMA Handlers */
1103 1.1 hsuenaga sc->sc_devmem_desc =
1104 1.1 hsuenaga mvxpsec_alloc_devmem(sc, 0, PAGE_SIZE * MVXPSEC_DMA_DESC_PAGES);
1105 1.1 hsuenaga if (sc->sc_devmem_desc == NULL)
1106 1.1 hsuenaga panic("Cannot allocate memory\n");
1107 1.1 hsuenaga ndh = (PAGE_SIZE / sizeof(struct mvxpsec_descriptor))
1108 1.1 hsuenaga * MVXPSEC_DMA_DESC_PAGES;
1109 1.1 hsuenaga sc->sc_desc_ring =
1110 1.1 hsuenaga kmem_alloc(sizeof(struct mvxpsec_descriptor_handle) * ndh,
1111 1.1 hsuenaga KM_NOSLEEP);
1112 1.1 hsuenaga if (sc->sc_desc_ring == NULL)
1113 1.1 hsuenaga panic("Cannot allocate memory\n");
1114 1.1 hsuenaga aprint_normal_dev(sc->sc_dev, "%d DMA handles in %zu bytes array\n",
1115 1.1 hsuenaga ndh, sizeof(struct mvxpsec_descriptor_handle) * ndh);
1116 1.1 hsuenaga
1117 1.1 hsuenaga ndh = 0;
1118 1.1 hsuenaga for (seg = 0; seg < devmem_nseg(sc->sc_devmem_desc); seg++) {
1119 1.1 hsuenaga va = devmem_va(sc->sc_devmem_desc);
1120 1.1 hsuenaga pa = devmem_pa(sc->sc_devmem_desc, seg);
1121 1.1 hsuenaga n = devmem_palen(sc->sc_devmem_desc, seg) /
1122 1.1 hsuenaga sizeof(struct mvxpsec_descriptor);
1123 1.1 hsuenaga va_off = (PAGE_SIZE * seg);
1124 1.1 hsuenaga pa_off = 0;
1125 1.1 hsuenaga for (i = 0; i < n; i++) {
1126 1.1 hsuenaga dh = &sc->sc_desc_ring[ndh];
1127 1.1 hsuenaga dh->map = devmem_map(sc->sc_devmem_desc);
1128 1.1 hsuenaga dh->off = va_off + pa_off;
1129 1.1 hsuenaga dh->_desc = (void *)(va + va_off + pa_off);
1130 1.1 hsuenaga dh->phys_addr = pa + pa_off;
1131 1.1 hsuenaga pa_off += sizeof(struct mvxpsec_descriptor);
1132 1.1 hsuenaga ndh++;
1133 1.1 hsuenaga }
1134 1.1 hsuenaga }
1135 1.1 hsuenaga sc->sc_desc_ring_size = ndh;
1136 1.1 hsuenaga sc->sc_desc_ring_prod = 0;
1137 1.1 hsuenaga sc->sc_desc_ring_cons = sc->sc_desc_ring_size - 1;
1138 1.1 hsuenaga
1139 1.1 hsuenaga return 0;
1140 1.1 hsuenaga }
1141 1.1 hsuenaga
1142 1.1 hsuenaga /*
1143 1.1 hsuenaga * Wait for TDMA controller become idle
1144 1.1 hsuenaga */
1145 1.1 hsuenaga INLINE int
1146 1.1 hsuenaga mvxpsec_dma_wait(struct mvxpsec_softc *sc)
1147 1.1 hsuenaga {
1148 1.1 hsuenaga int retry = 0;
1149 1.1 hsuenaga
1150 1.1 hsuenaga while (MVXPSEC_READ(sc, MV_TDMA_CONTROL) & MV_TDMA_CONTROL_ACT) {
1151 1.1 hsuenaga delay(mvxpsec_wait_interval);
1152 1.1 hsuenaga if (retry++ >= mvxpsec_wait_retry)
1153 1.1 hsuenaga return -1;
1154 1.1 hsuenaga }
1155 1.1 hsuenaga return 0;
1156 1.1 hsuenaga }
1157 1.1 hsuenaga
1158 1.1 hsuenaga /*
1159 1.1 hsuenaga * Wait for Security Accelerator become idle
1160 1.1 hsuenaga */
1161 1.1 hsuenaga INLINE int
1162 1.1 hsuenaga mvxpsec_acc_wait(struct mvxpsec_softc *sc)
1163 1.1 hsuenaga {
1164 1.1 hsuenaga int retry = 0;
1165 1.1 hsuenaga
1166 1.1 hsuenaga while (MVXPSEC_READ(sc, MV_ACC_COMMAND) & MV_ACC_COMMAND_ACT) {
1167 1.1 hsuenaga delay(mvxpsec_wait_interval);
1168 1.1 hsuenaga if (++retry >= mvxpsec_wait_retry)
1169 1.1 hsuenaga return -1;
1170 1.1 hsuenaga }
1171 1.1 hsuenaga return 0;
1172 1.1 hsuenaga }
1173 1.1 hsuenaga
1174 1.1 hsuenaga /*
1175 1.1 hsuenaga * Entry of interrupt handler
1176 1.1 hsuenaga *
1177 1.1 hsuenaga * register this to kernel via marvell_intr_establish()
1178 1.1 hsuenaga */
1179 1.1 hsuenaga int
1180 1.1 hsuenaga mvxpsec_intr(void *arg)
1181 1.1 hsuenaga {
1182 1.1 hsuenaga struct mvxpsec_softc *sc = arg;
1183 1.1 hsuenaga uint32_t v;
1184 1.1 hsuenaga
1185 1.1 hsuenaga /* IPL_NET */
1186 1.1 hsuenaga while ((v = mvxpsec_intr_ack(sc)) != 0) {
1187 1.1 hsuenaga mvxpsec_intr_cnt(sc, v);
1188 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "MVXPSEC Intr 0x%08x\n", v);
1189 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "%s\n", s_xpsecintr(v));
1190 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
1191 1.1 hsuenaga mvxpsec_dump_reg(sc);
1192 1.1 hsuenaga #endif
1193 1.1 hsuenaga
1194 1.1 hsuenaga /* call high-level handlers */
1195 1.1 hsuenaga if (v & MVXPSEC_INT_ACCTDMA)
1196 1.1 hsuenaga mvxpsec_done(sc);
1197 1.1 hsuenaga }
1198 1.1 hsuenaga
1199 1.1 hsuenaga return 0;
1200 1.1 hsuenaga }
1201 1.1 hsuenaga
1202 1.1 hsuenaga INLINE void
1203 1.1 hsuenaga mvxpsec_intr_cleanup(struct mvxpsec_softc *sc)
1204 1.1 hsuenaga {
1205 1.1 hsuenaga struct mvxpsec_packet *mv_p;
1206 1.1 hsuenaga
1207 1.1 hsuenaga /* must called with sc->sc_dma_mtx held */
1208 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_dma_mtx));
1209 1.1 hsuenaga
1210 1.1 hsuenaga /*
1211 1.1 hsuenaga * there is only one intr for run_queue.
1212 1.1 hsuenaga * no one touch sc_run_queue.
1213 1.1 hsuenaga */
1214 1.1 hsuenaga SIMPLEQ_FOREACH(mv_p, &sc->sc_run_queue, queue)
1215 1.1 hsuenaga mvxpsec_dma_free(sc, &mv_p->dma_ring);
1216 1.1 hsuenaga }
1217 1.1 hsuenaga
1218 1.1 hsuenaga /*
1219 1.1 hsuenaga * Acknowledge to interrupt
1220 1.1 hsuenaga *
1221 1.1 hsuenaga * read cause bits, clear it, and return it.
1222 1.1 hsuenaga * NOTE: multiple cause bits may be returned at once.
1223 1.1 hsuenaga */
1224 1.1 hsuenaga STATIC uint32_t
1225 1.1 hsuenaga mvxpsec_intr_ack(struct mvxpsec_softc *sc)
1226 1.1 hsuenaga {
1227 1.1 hsuenaga uint32_t reg;
1228 1.1 hsuenaga
1229 1.1 hsuenaga reg = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
1230 1.1 hsuenaga reg &= MVXPSEC_DEFAULT_INT;
1231 1.1 hsuenaga MVXPSEC_WRITE(sc, MVXPSEC_INT_CAUSE, ~reg);
1232 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1233 1.1 hsuenaga
1234 1.1 hsuenaga return reg;
1235 1.1 hsuenaga }
1236 1.1 hsuenaga
1237 1.1 hsuenaga /*
1238 1.1 hsuenaga * Entry of TDMA error interrupt handler
1239 1.1 hsuenaga *
1240 1.1 hsuenaga * register this to kernel via marvell_intr_establish()
1241 1.1 hsuenaga */
1242 1.1 hsuenaga int
1243 1.1 hsuenaga mvxpsec_eintr(void *arg)
1244 1.1 hsuenaga {
1245 1.1 hsuenaga struct mvxpsec_softc *sc = arg;
1246 1.1 hsuenaga uint32_t err;
1247 1.1 hsuenaga
1248 1.1 hsuenaga /* IPL_NET */
1249 1.1 hsuenaga again:
1250 1.1 hsuenaga err = mvxpsec_eintr_ack(sc);
1251 1.1 hsuenaga if (err == 0)
1252 1.1 hsuenaga goto done;
1253 1.1 hsuenaga
1254 1.1 hsuenaga log(LOG_ERR, "%s: DMA Error Interrupt: %s\n", __func__,
1255 1.1 hsuenaga s_errreg(err));
1256 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
1257 1.1 hsuenaga mvxpsec_dump_reg(sc);
1258 1.1 hsuenaga #endif
1259 1.1 hsuenaga
1260 1.1 hsuenaga goto again;
1261 1.1 hsuenaga done:
1262 1.1 hsuenaga return 0;
1263 1.1 hsuenaga }
1264 1.1 hsuenaga
1265 1.1 hsuenaga /*
1266 1.1 hsuenaga * Acknowledge to TDMA error interrupt
1267 1.1 hsuenaga *
1268 1.1 hsuenaga * read cause bits, clear it, and return it.
1269 1.1 hsuenaga * NOTE: multiple cause bits may be returned at once.
1270 1.1 hsuenaga */
1271 1.1 hsuenaga STATIC uint32_t
1272 1.1 hsuenaga mvxpsec_eintr_ack(struct mvxpsec_softc *sc)
1273 1.1 hsuenaga {
1274 1.1 hsuenaga uint32_t reg;
1275 1.1 hsuenaga
1276 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
1277 1.1 hsuenaga reg &= MVXPSEC_DEFAULT_ERR;
1278 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_ERR_CAUSE, ~reg);
1279 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1280 1.1 hsuenaga
1281 1.1 hsuenaga return reg;
1282 1.1 hsuenaga }
1283 1.1 hsuenaga
1284 1.1 hsuenaga /*
1285 1.1 hsuenaga * Interrupt statistics
1286 1.1 hsuenaga *
1287 1.1 hsuenaga * this is NOT a statistics of how may times the events 'occured'.
1288 1.1 hsuenaga * this ONLY means how many times the events 'handled'.
1289 1.1 hsuenaga */
1290 1.1 hsuenaga INLINE void
1291 1.1 hsuenaga mvxpsec_intr_cnt(struct mvxpsec_softc *sc, int cause)
1292 1.1 hsuenaga {
1293 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_all);
1294 1.1 hsuenaga if (cause & MVXPSEC_INT_AUTH)
1295 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_auth);
1296 1.1 hsuenaga if (cause & MVXPSEC_INT_DES)
1297 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_des);
1298 1.1 hsuenaga if (cause & MVXPSEC_INT_AES_ENC)
1299 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_aes_enc);
1300 1.1 hsuenaga if (cause & MVXPSEC_INT_AES_DEC)
1301 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_aes_dec);
1302 1.1 hsuenaga if (cause & MVXPSEC_INT_ENC)
1303 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_enc);
1304 1.1 hsuenaga if (cause & MVXPSEC_INT_SA)
1305 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_sa);
1306 1.1 hsuenaga if (cause & MVXPSEC_INT_ACCTDMA)
1307 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_acctdma);
1308 1.1 hsuenaga if (cause & MVXPSEC_INT_TDMA_COMP)
1309 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_comp);
1310 1.1 hsuenaga if (cause & MVXPSEC_INT_TDMA_OWN)
1311 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_own);
1312 1.1 hsuenaga if (cause & MVXPSEC_INT_ACCTDMA_CONT)
1313 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, intr_acctdma_cont);
1314 1.1 hsuenaga }
1315 1.1 hsuenaga
1316 1.1 hsuenaga /*
1317 1.1 hsuenaga * Setup MVXPSEC header structure.
1318 1.1 hsuenaga *
1319 1.1 hsuenaga * the header contains descriptor of security accelerator,
1320 1.1 hsuenaga * key material of chiphers, iv of ciphers and macs, ...
1321 1.1 hsuenaga *
1322 1.1 hsuenaga * the header is transfered to MVXPSEC Internal SRAM by TDMA,
1323 1.1 hsuenaga * and parsed by MVXPSEC H/W.
1324 1.1 hsuenaga */
1325 1.1 hsuenaga STATIC int
1326 1.1 hsuenaga mvxpsec_header_finalize(struct mvxpsec_packet *mv_p)
1327 1.1 hsuenaga {
1328 1.1 hsuenaga struct mvxpsec_acc_descriptor *desc = &mv_p->pkt_header.desc;
1329 1.1 hsuenaga int enc_start, enc_len, iv_offset;
1330 1.1 hsuenaga int mac_start, mac_len, mac_offset;
1331 1.1 hsuenaga
1332 1.1 hsuenaga /* offset -> device address */
1333 1.1 hsuenaga enc_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_off);
1334 1.1 hsuenaga enc_len = mv_p->enc_len;
1335 1.1 hsuenaga if (mv_p->flags & CRP_EXT_IV)
1336 1.1 hsuenaga iv_offset = mv_p->enc_ivoff;
1337 1.1 hsuenaga else
1338 1.1 hsuenaga iv_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_ivoff);
1339 1.1 hsuenaga mac_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_off);
1340 1.1 hsuenaga mac_len = mv_p->mac_len;
1341 1.1 hsuenaga mac_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_dst);
1342 1.1 hsuenaga
1343 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1344 1.1 hsuenaga "PAYLOAD at 0x%08x\n", (int)MVXPSEC_SRAM_PAYLOAD_OFF);
1345 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1346 1.1 hsuenaga "ENC from 0x%08x\n", enc_start);
1347 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1348 1.1 hsuenaga "MAC from 0x%08x\n", mac_start);
1349 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1350 1.1 hsuenaga "MAC to 0x%08x\n", mac_offset);
1351 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1352 1.1 hsuenaga "ENC IV at 0x%08x\n", iv_offset);
1353 1.1 hsuenaga
1354 1.1 hsuenaga /* setup device addresses in Security Accelerator Descriptors */
1355 1.1 hsuenaga desc->acc_encdata = MV_ACC_DESC_ENC_DATA(enc_start, enc_start);
1356 1.1 hsuenaga desc->acc_enclen = MV_ACC_DESC_ENC_LEN(enc_len);
1357 1.1 hsuenaga if (desc->acc_config & MV_ACC_CRYPTO_DECRYPT)
1358 1.1 hsuenaga desc->acc_enckey =
1359 1.1 hsuenaga MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_D_DA);
1360 1.1 hsuenaga else
1361 1.1 hsuenaga desc->acc_enckey =
1362 1.1 hsuenaga MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_DA);
1363 1.1 hsuenaga desc->acc_enciv =
1364 1.1 hsuenaga MV_ACC_DESC_ENC_IV(MVXPSEC_SRAM_IV_WORK_DA, iv_offset);
1365 1.1 hsuenaga
1366 1.1 hsuenaga desc->acc_macsrc = MV_ACC_DESC_MAC_SRC(mac_start, mac_len);
1367 1.1 hsuenaga desc->acc_macdst = MV_ACC_DESC_MAC_DST(mac_offset, mac_len);
1368 1.1 hsuenaga desc->acc_maciv =
1369 1.1 hsuenaga MV_ACC_DESC_MAC_IV(MVXPSEC_SRAM_MIV_IN_DA,
1370 1.1 hsuenaga MVXPSEC_SRAM_MIV_OUT_DA);
1371 1.1 hsuenaga
1372 1.1 hsuenaga return 0;
1373 1.1 hsuenaga }
1374 1.1 hsuenaga
1375 1.1 hsuenaga /*
1376 1.1 hsuenaga * constractor of session structure.
1377 1.1 hsuenaga *
1378 1.1 hsuenaga * this constrator will be called by pool_cache framework.
1379 1.1 hsuenaga */
1380 1.1 hsuenaga STATIC int
1381 1.1 hsuenaga mvxpsec_session_ctor(void *arg, void *obj, int flags)
1382 1.1 hsuenaga {
1383 1.1 hsuenaga struct mvxpsec_softc *sc = arg;
1384 1.1 hsuenaga struct mvxpsec_session *mv_s = obj;
1385 1.1 hsuenaga
1386 1.1 hsuenaga /* pool is owned by softc */
1387 1.1 hsuenaga mv_s->sc = sc;
1388 1.1 hsuenaga
1389 1.1 hsuenaga /* Create and load DMA map for session header */
1390 1.1 hsuenaga mv_s->session_header_map = 0;
1391 1.1 hsuenaga if (bus_dmamap_create(sc->sc_dmat,
1392 1.1 hsuenaga sizeof(mv_s->session_header), 1,
1393 1.1 hsuenaga sizeof(mv_s->session_header), 0,
1394 1.1 hsuenaga BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1395 1.1 hsuenaga &mv_s->session_header_map)) {
1396 1.1 hsuenaga log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1397 1.1 hsuenaga goto fail;
1398 1.1 hsuenaga }
1399 1.1 hsuenaga if (bus_dmamap_load(sc->sc_dmat, mv_s->session_header_map,
1400 1.1 hsuenaga &mv_s->session_header, sizeof(mv_s->session_header),
1401 1.1 hsuenaga NULL, BUS_DMA_NOWAIT)) {
1402 1.1 hsuenaga log(LOG_ERR, "%s: cannot load header\n", __func__);
1403 1.1 hsuenaga goto fail;
1404 1.1 hsuenaga }
1405 1.1 hsuenaga
1406 1.1 hsuenaga return 0;
1407 1.1 hsuenaga fail:
1408 1.1 hsuenaga if (mv_s->session_header_map)
1409 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1410 1.1 hsuenaga return ENOMEM;
1411 1.1 hsuenaga }
1412 1.1 hsuenaga
1413 1.1 hsuenaga /*
1414 1.1 hsuenaga * destractor of session structure.
1415 1.1 hsuenaga *
1416 1.1 hsuenaga * this destrator will be called by pool_cache framework.
1417 1.1 hsuenaga */
1418 1.1 hsuenaga STATIC void
1419 1.1 hsuenaga mvxpsec_session_dtor(void *arg, void *obj)
1420 1.1 hsuenaga {
1421 1.1 hsuenaga struct mvxpsec_softc *sc = arg;
1422 1.1 hsuenaga struct mvxpsec_session *mv_s = obj;
1423 1.1 hsuenaga
1424 1.1 hsuenaga if (mv_s->sc != sc)
1425 1.1 hsuenaga panic("inconsitent context\n");
1426 1.1 hsuenaga
1427 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1428 1.1 hsuenaga }
1429 1.1 hsuenaga
1430 1.1 hsuenaga /*
1431 1.1 hsuenaga * constructor of packet structure.
1432 1.1 hsuenaga */
1433 1.1 hsuenaga STATIC int
1434 1.1 hsuenaga mvxpsec_packet_ctor(void *arg, void *obj, int flags)
1435 1.1 hsuenaga {
1436 1.1 hsuenaga struct mvxpsec_softc *sc = arg;
1437 1.1 hsuenaga struct mvxpsec_packet *mv_p = obj;
1438 1.1 hsuenaga
1439 1.1 hsuenaga mv_p->dma_ring.dma_head = NULL;
1440 1.1 hsuenaga mv_p->dma_ring.dma_last = NULL;
1441 1.1 hsuenaga mv_p->dma_ring.dma_size = 0;
1442 1.1 hsuenaga
1443 1.1 hsuenaga /* Create and load DMA map for packet header */
1444 1.1 hsuenaga mv_p->pkt_header_map = 0;
1445 1.1 hsuenaga if (bus_dmamap_create(sc->sc_dmat,
1446 1.1 hsuenaga sizeof(mv_p->pkt_header), 1, sizeof(mv_p->pkt_header), 0,
1447 1.1 hsuenaga BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1448 1.1 hsuenaga &mv_p->pkt_header_map)) {
1449 1.1 hsuenaga log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1450 1.1 hsuenaga goto fail;
1451 1.1 hsuenaga }
1452 1.1 hsuenaga if (bus_dmamap_load(sc->sc_dmat, mv_p->pkt_header_map,
1453 1.1 hsuenaga &mv_p->pkt_header, sizeof(mv_p->pkt_header),
1454 1.1 hsuenaga NULL, BUS_DMA_NOWAIT)) {
1455 1.1 hsuenaga log(LOG_ERR, "%s: cannot load header\n", __func__);
1456 1.1 hsuenaga goto fail;
1457 1.1 hsuenaga }
1458 1.1 hsuenaga
1459 1.1 hsuenaga /* Create DMA map for session data. */
1460 1.1 hsuenaga mv_p->data_map = 0;
1461 1.1 hsuenaga if (bus_dmamap_create(sc->sc_dmat,
1462 1.1 hsuenaga MVXPSEC_DMA_MAX_SIZE, MVXPSEC_DMA_MAX_SEGS, MVXPSEC_DMA_MAX_SIZE,
1463 1.1 hsuenaga 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mv_p->data_map)) {
1464 1.1 hsuenaga log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1465 1.1 hsuenaga goto fail;
1466 1.1 hsuenaga }
1467 1.1 hsuenaga
1468 1.1 hsuenaga return 0;
1469 1.1 hsuenaga fail:
1470 1.1 hsuenaga if (mv_p->pkt_header_map)
1471 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1472 1.1 hsuenaga if (mv_p->data_map)
1473 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1474 1.1 hsuenaga return ENOMEM;
1475 1.1 hsuenaga }
1476 1.1 hsuenaga
1477 1.1 hsuenaga /*
1478 1.1 hsuenaga * destractor of packet structure.
1479 1.1 hsuenaga */
1480 1.1 hsuenaga STATIC void
1481 1.1 hsuenaga mvxpsec_packet_dtor(void *arg, void *obj)
1482 1.1 hsuenaga {
1483 1.1 hsuenaga struct mvxpsec_softc *sc = arg;
1484 1.1 hsuenaga struct mvxpsec_packet *mv_p = obj;
1485 1.1 hsuenaga
1486 1.1 hsuenaga mutex_enter(&sc->sc_dma_mtx);
1487 1.1 hsuenaga mvxpsec_dma_free(sc, &mv_p->dma_ring);
1488 1.1 hsuenaga mutex_exit(&sc->sc_dma_mtx);
1489 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1490 1.1 hsuenaga bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1491 1.1 hsuenaga }
1492 1.1 hsuenaga
1493 1.1 hsuenaga /*
1494 1.1 hsuenaga * allocate new session struture.
1495 1.1 hsuenaga */
1496 1.1 hsuenaga STATIC struct mvxpsec_session *
1497 1.1 hsuenaga mvxpsec_session_alloc(struct mvxpsec_softc *sc)
1498 1.1 hsuenaga {
1499 1.1 hsuenaga struct mvxpsec_session *mv_s;
1500 1.1 hsuenaga
1501 1.2 christos mv_s = pool_cache_get(sc->sc_session_pool, PR_NOWAIT);
1502 1.1 hsuenaga if (mv_s == NULL) {
1503 1.1 hsuenaga log(LOG_ERR, "%s: cannot allocate memory\n", __func__);
1504 1.1 hsuenaga return NULL;
1505 1.1 hsuenaga }
1506 1.1 hsuenaga mv_s->refs = 1; /* 0 means session is alredy invalid */
1507 1.1 hsuenaga mv_s->sflags = 0;
1508 1.1 hsuenaga
1509 1.1 hsuenaga return mv_s;
1510 1.1 hsuenaga }
1511 1.1 hsuenaga
1512 1.1 hsuenaga /*
1513 1.1 hsuenaga * deallocate session structure.
1514 1.1 hsuenaga */
1515 1.1 hsuenaga STATIC void
1516 1.1 hsuenaga mvxpsec_session_dealloc(struct mvxpsec_session *mv_s)
1517 1.1 hsuenaga {
1518 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc;
1519 1.1 hsuenaga
1520 1.1 hsuenaga mv_s->sflags |= DELETED;
1521 1.1 hsuenaga mvxpsec_session_unref(mv_s);
1522 1.1 hsuenaga crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1523 1.1 hsuenaga
1524 1.1 hsuenaga return;
1525 1.1 hsuenaga }
1526 1.1 hsuenaga
1527 1.1 hsuenaga STATIC int
1528 1.1 hsuenaga mvxpsec_session_ref(struct mvxpsec_session *mv_s)
1529 1.1 hsuenaga {
1530 1.1 hsuenaga uint32_t refs;
1531 1.1 hsuenaga
1532 1.1 hsuenaga if (mv_s->sflags & DELETED) {
1533 1.1 hsuenaga log(LOG_ERR,
1534 1.1 hsuenaga "%s: session is already deleted.\n", __func__);
1535 1.1 hsuenaga return -1;
1536 1.1 hsuenaga }
1537 1.1 hsuenaga
1538 1.1 hsuenaga refs = atomic_inc_32_nv(&mv_s->refs);
1539 1.1 hsuenaga if (refs == 1) {
1540 1.1 hsuenaga /*
1541 1.1 hsuenaga * a session with refs == 0 is
1542 1.1 hsuenaga * already invalidated. revert it.
1543 1.1 hsuenaga * XXX: use CAS ?
1544 1.1 hsuenaga */
1545 1.1 hsuenaga atomic_dec_32(&mv_s->refs);
1546 1.1 hsuenaga log(LOG_ERR,
1547 1.1 hsuenaga "%s: session is already invalidated.\n", __func__);
1548 1.1 hsuenaga return -1;
1549 1.1 hsuenaga }
1550 1.1 hsuenaga
1551 1.1 hsuenaga return 0;
1552 1.1 hsuenaga }
1553 1.1 hsuenaga
1554 1.1 hsuenaga STATIC void
1555 1.1 hsuenaga mvxpsec_session_unref(struct mvxpsec_session *mv_s)
1556 1.1 hsuenaga {
1557 1.1 hsuenaga uint32_t refs;
1558 1.1 hsuenaga
1559 1.1 hsuenaga refs = atomic_dec_32_nv(&mv_s->refs);
1560 1.1 hsuenaga if (refs == 0)
1561 1.1 hsuenaga pool_cache_put(mv_s->sc->sc_session_pool, mv_s);
1562 1.1 hsuenaga }
1563 1.1 hsuenaga
1564 1.1 hsuenaga /*
1565 1.1 hsuenaga * look for session is exist or not
1566 1.1 hsuenaga */
1567 1.1 hsuenaga INLINE struct mvxpsec_session *
1568 1.1 hsuenaga mvxpsec_session_lookup(struct mvxpsec_softc *sc, int sid)
1569 1.1 hsuenaga {
1570 1.1 hsuenaga struct mvxpsec_session *mv_s;
1571 1.1 hsuenaga int session;
1572 1.1 hsuenaga
1573 1.1 hsuenaga /* must called sc->sc_session_mtx held */
1574 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_session_mtx));
1575 1.1 hsuenaga
1576 1.1 hsuenaga session = MVXPSEC_SESSION(sid);
1577 1.1 hsuenaga if (__predict_false(session > MVXPSEC_MAX_SESSIONS)) {
1578 1.1 hsuenaga log(LOG_ERR, "%s: session number too large %d\n",
1579 1.1 hsuenaga __func__, session);
1580 1.1 hsuenaga return NULL;
1581 1.1 hsuenaga }
1582 1.1 hsuenaga if (__predict_false( (mv_s = sc->sc_sessions[session]) == NULL)) {
1583 1.1 hsuenaga log(LOG_ERR, "%s: invalid session %d\n",
1584 1.1 hsuenaga __func__, session);
1585 1.1 hsuenaga return NULL;
1586 1.1 hsuenaga }
1587 1.1 hsuenaga
1588 1.1 hsuenaga KASSERT(mv_s->sid == session);
1589 1.1 hsuenaga
1590 1.1 hsuenaga return mv_s;
1591 1.1 hsuenaga }
1592 1.1 hsuenaga
1593 1.1 hsuenaga /*
1594 1.1 hsuenaga * allocation new packet structure.
1595 1.1 hsuenaga */
1596 1.1 hsuenaga STATIC struct mvxpsec_packet *
1597 1.1 hsuenaga mvxpsec_packet_alloc(struct mvxpsec_session *mv_s)
1598 1.1 hsuenaga {
1599 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc;
1600 1.1 hsuenaga struct mvxpsec_packet *mv_p;
1601 1.1 hsuenaga
1602 1.1 hsuenaga /* must be called mv_queue_mtx held. */
1603 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_queue_mtx));
1604 1.1 hsuenaga /* must be called mv_session_mtx held. */
1605 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_session_mtx));
1606 1.1 hsuenaga
1607 1.1 hsuenaga if (mvxpsec_session_ref(mv_s) < 0) {
1608 1.1 hsuenaga log(LOG_ERR, "%s: invalid session.\n", __func__);
1609 1.1 hsuenaga return NULL;
1610 1.1 hsuenaga }
1611 1.1 hsuenaga
1612 1.1 hsuenaga if ( (mv_p = SLIST_FIRST(&sc->sc_free_list)) != NULL) {
1613 1.1 hsuenaga SLIST_REMOVE_HEAD(&sc->sc_free_list, free_list);
1614 1.1 hsuenaga sc->sc_free_qlen--;
1615 1.1 hsuenaga }
1616 1.1 hsuenaga else {
1617 1.2 christos mv_p = pool_cache_get(sc->sc_packet_pool, PR_NOWAIT);
1618 1.1 hsuenaga if (mv_p == NULL) {
1619 1.1 hsuenaga log(LOG_ERR, "%s: cannot allocate memory\n",
1620 1.1 hsuenaga __func__);
1621 1.1 hsuenaga mvxpsec_session_unref(mv_s);
1622 1.1 hsuenaga return NULL;
1623 1.1 hsuenaga }
1624 1.1 hsuenaga }
1625 1.1 hsuenaga mv_p->mv_s = mv_s;
1626 1.1 hsuenaga mv_p->flags = 0;
1627 1.1 hsuenaga mv_p->data_ptr = NULL;
1628 1.1 hsuenaga
1629 1.1 hsuenaga return mv_p;
1630 1.1 hsuenaga }
1631 1.1 hsuenaga
1632 1.1 hsuenaga /*
1633 1.1 hsuenaga * free packet structure.
1634 1.1 hsuenaga */
1635 1.1 hsuenaga STATIC void
1636 1.1 hsuenaga mvxpsec_packet_dealloc(struct mvxpsec_packet *mv_p)
1637 1.1 hsuenaga {
1638 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s;
1639 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc;
1640 1.1 hsuenaga
1641 1.1 hsuenaga /* must called with sc->sc_queue_mtx held */
1642 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_queue_mtx));
1643 1.1 hsuenaga
1644 1.1 hsuenaga if (mv_p->dma_ring.dma_size != 0) {
1645 1.1 hsuenaga sc->sc_desc_ring_cons += mv_p->dma_ring.dma_size;
1646 1.1 hsuenaga }
1647 1.1 hsuenaga mv_p->dma_ring.dma_head = NULL;
1648 1.1 hsuenaga mv_p->dma_ring.dma_last = NULL;
1649 1.1 hsuenaga mv_p->dma_ring.dma_size = 0;
1650 1.1 hsuenaga
1651 1.1 hsuenaga if (mv_p->data_map) {
1652 1.1 hsuenaga if (mv_p->flags & RDY_DATA) {
1653 1.1 hsuenaga bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1654 1.1 hsuenaga mv_p->flags &= ~RDY_DATA;
1655 1.1 hsuenaga }
1656 1.1 hsuenaga }
1657 1.1 hsuenaga
1658 1.1 hsuenaga if (sc->sc_free_qlen > sc->sc_wait_qlimit)
1659 1.1 hsuenaga pool_cache_put(sc->sc_packet_pool, mv_p);
1660 1.1 hsuenaga else {
1661 1.1 hsuenaga SLIST_INSERT_HEAD(&sc->sc_free_list, mv_p, free_list);
1662 1.1 hsuenaga sc->sc_free_qlen++;
1663 1.1 hsuenaga }
1664 1.1 hsuenaga mvxpsec_session_unref(mv_s);
1665 1.1 hsuenaga }
1666 1.1 hsuenaga
1667 1.1 hsuenaga INLINE void
1668 1.1 hsuenaga mvxpsec_packet_enqueue(struct mvxpsec_packet *mv_p)
1669 1.1 hsuenaga {
1670 1.1 hsuenaga struct mvxpsec_softc *sc = mv_p->mv_s->sc;
1671 1.1 hsuenaga struct mvxpsec_packet *last_packet;
1672 1.1 hsuenaga struct mvxpsec_descriptor_handle *cur_dma, *prev_dma;
1673 1.1 hsuenaga
1674 1.1 hsuenaga /* must called with sc->sc_queue_mtx held */
1675 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_queue_mtx));
1676 1.1 hsuenaga
1677 1.1 hsuenaga if (sc->sc_wait_qlen == 0) {
1678 1.1 hsuenaga SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1679 1.1 hsuenaga sc->sc_wait_qlen++;
1680 1.1 hsuenaga mv_p->flags |= SETUP_DONE;
1681 1.1 hsuenaga return;
1682 1.1 hsuenaga }
1683 1.1 hsuenaga
1684 1.1 hsuenaga last_packet = SIMPLEQ_LAST(&sc->sc_wait_queue, mvxpsec_packet, queue);
1685 1.1 hsuenaga SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1686 1.1 hsuenaga sc->sc_wait_qlen++;
1687 1.1 hsuenaga
1688 1.1 hsuenaga /* chain the DMA */
1689 1.1 hsuenaga cur_dma = mv_p->dma_ring.dma_head;
1690 1.1 hsuenaga prev_dma = last_packet->dma_ring.dma_last;
1691 1.1 hsuenaga mvxpsec_dma_cat(sc, prev_dma, cur_dma);
1692 1.1 hsuenaga mv_p->flags |= SETUP_DONE;
1693 1.1 hsuenaga }
1694 1.1 hsuenaga
1695 1.1 hsuenaga /*
1696 1.1 hsuenaga * called by interrupt handler
1697 1.1 hsuenaga */
1698 1.1 hsuenaga STATIC int
1699 1.1 hsuenaga mvxpsec_done_packet(struct mvxpsec_packet *mv_p)
1700 1.1 hsuenaga {
1701 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s;
1702 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc;
1703 1.1 hsuenaga
1704 1.1 hsuenaga KASSERT((mv_p->flags & RDY_DATA));
1705 1.1 hsuenaga KASSERT((mv_p->flags & SETUP_DONE));
1706 1.1 hsuenaga
1707 1.1 hsuenaga /* unload data */
1708 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat, mv_p->data_map,
1709 1.1 hsuenaga 0, mv_p->data_len,
1710 1.1 hsuenaga BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1711 1.1 hsuenaga bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1712 1.1 hsuenaga mv_p->flags &= ~RDY_DATA;
1713 1.1 hsuenaga
1714 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
1715 1.1 hsuenaga if (mvxpsec_debug != 0) {
1716 1.1 hsuenaga int s;
1717 1.1 hsuenaga
1718 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat, mv_p->pkt_header_map,
1719 1.1 hsuenaga 0, sizeof(mv_p->pkt_header),
1720 1.1 hsuenaga BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1721 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat, mv_s->session_header_map,
1722 1.1 hsuenaga 0, sizeof(mv_s->session_header),
1723 1.1 hsuenaga BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1724 1.1 hsuenaga
1725 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
1726 1.1 hsuenaga char buf[1500];
1727 1.1 hsuenaga struct mbuf *m;
1728 1.1 hsuenaga struct uio *uio;
1729 1.1 hsuenaga size_t len;
1730 1.1 hsuenaga
1731 1.1 hsuenaga switch (mv_p->data_type) {
1732 1.1 hsuenaga case MVXPSEC_DATA_MBUF:
1733 1.1 hsuenaga m = mv_p->data_mbuf;
1734 1.1 hsuenaga len = m->m_pkthdr.len;
1735 1.1 hsuenaga if (len > sizeof(buf))
1736 1.1 hsuenaga len = sizeof(buf);
1737 1.1 hsuenaga m_copydata(m, 0, len, buf);
1738 1.1 hsuenaga break;
1739 1.1 hsuenaga case MVXPSEC_DATA_UIO:
1740 1.1 hsuenaga uio = mv_p->data_uio;
1741 1.1 hsuenaga len = uio->uio_resid;
1742 1.1 hsuenaga if (len > sizeof(buf))
1743 1.1 hsuenaga len = sizeof(buf);
1744 1.1 hsuenaga cuio_copydata(uio, 0, len, buf);
1745 1.1 hsuenaga break;
1746 1.1 hsuenaga default:
1747 1.1 hsuenaga len = 0;
1748 1.1 hsuenaga }
1749 1.1 hsuenaga if (len > 0)
1750 1.1 hsuenaga mvxpsec_dump_data(__func__, buf, len);
1751 1.1 hsuenaga }
1752 1.1 hsuenaga
1753 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_PAYLOAD) {
1754 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1755 1.1 hsuenaga "%s: session_descriptor:\n", __func__);
1756 1.1 hsuenaga mvxpsec_dump_packet_desc(__func__, mv_p);
1757 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1758 1.1 hsuenaga "%s: session_data:\n", __func__);
1759 1.1 hsuenaga mvxpsec_dump_packet_data(__func__, mv_p);
1760 1.1 hsuenaga }
1761 1.1 hsuenaga
1762 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_SRAM) {
1763 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_SRAM,
1764 1.1 hsuenaga "%s: SRAM\n", __func__);
1765 1.1 hsuenaga mvxpsec_dump_sram(__func__, sc, 2000);
1766 1.1 hsuenaga }
1767 1.1 hsuenaga
1768 1.1 hsuenaga s = MVXPSEC_READ(sc, MV_ACC_STATUS);
1769 1.1 hsuenaga if (s & MV_ACC_STATUS_MAC_ERR) {
1770 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR,
1771 1.1 hsuenaga "%s: Message Authentication Failed.\n", __func__);
1772 1.1 hsuenaga }
1773 1.1 hsuenaga }
1774 1.1 hsuenaga #endif
1775 1.1 hsuenaga
1776 1.1 hsuenaga /* copy back IV */
1777 1.1 hsuenaga if (mv_p->flags & CRP_EXT_IV) {
1778 1.1 hsuenaga memcpy(mv_p->ext_iv,
1779 1.1 hsuenaga &mv_p->pkt_header.crp_iv_ext, mv_p->ext_ivlen);
1780 1.1 hsuenaga mv_p->ext_iv = NULL;
1781 1.1 hsuenaga mv_p->ext_ivlen = 0;
1782 1.1 hsuenaga }
1783 1.1 hsuenaga
1784 1.1 hsuenaga /* notify opencrypto */
1785 1.1 hsuenaga mv_p->crp->crp_etype = 0;
1786 1.1 hsuenaga crypto_done(mv_p->crp);
1787 1.1 hsuenaga mv_p->crp = NULL;
1788 1.1 hsuenaga
1789 1.1 hsuenaga /* unblock driver */
1790 1.1 hsuenaga mvxpsec_packet_dealloc(mv_p);
1791 1.1 hsuenaga crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1792 1.1 hsuenaga
1793 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, packet_ok);
1794 1.1 hsuenaga
1795 1.1 hsuenaga return 0;
1796 1.1 hsuenaga }
1797 1.1 hsuenaga
1798 1.1 hsuenaga
1799 1.1 hsuenaga /*
1800 1.1 hsuenaga * Opencrypto API registration
1801 1.1 hsuenaga */
1802 1.1 hsuenaga int
1803 1.1 hsuenaga mvxpsec_register(struct mvxpsec_softc *sc)
1804 1.1 hsuenaga {
1805 1.1 hsuenaga int oplen = SRAM_PAYLOAD_SIZE;
1806 1.1 hsuenaga int flags = 0;
1807 1.1 hsuenaga int err;
1808 1.1 hsuenaga
1809 1.1 hsuenaga sc->sc_nsessions = 0;
1810 1.1 hsuenaga sc->sc_cid = crypto_get_driverid(0);
1811 1.1 hsuenaga if (sc->sc_cid < 0) {
1812 1.1 hsuenaga log(LOG_ERR,
1813 1.1 hsuenaga "%s: crypto_get_driverid() failed.\n", __func__);
1814 1.1 hsuenaga err = EINVAL;
1815 1.1 hsuenaga goto done;
1816 1.1 hsuenaga }
1817 1.1 hsuenaga
1818 1.1 hsuenaga /* Ciphers */
1819 1.1 hsuenaga err = crypto_register(sc->sc_cid, CRYPTO_DES_CBC, oplen, flags,
1820 1.1 hsuenaga mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1821 1.1 hsuenaga if (err)
1822 1.1 hsuenaga goto done;
1823 1.1 hsuenaga
1824 1.1 hsuenaga err = crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, oplen, flags,
1825 1.1 hsuenaga mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1826 1.1 hsuenaga if (err)
1827 1.1 hsuenaga goto done;
1828 1.1 hsuenaga
1829 1.1 hsuenaga err = crypto_register(sc->sc_cid, CRYPTO_AES_CBC, oplen, flags,
1830 1.1 hsuenaga mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1831 1.1 hsuenaga if (err)
1832 1.1 hsuenaga goto done;
1833 1.1 hsuenaga
1834 1.1 hsuenaga /* MACs */
1835 1.1 hsuenaga err = crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96,
1836 1.1 hsuenaga oplen, flags,
1837 1.1 hsuenaga mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1838 1.1 hsuenaga if (err)
1839 1.1 hsuenaga goto done;
1840 1.1 hsuenaga
1841 1.1 hsuenaga err = crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96,
1842 1.1 hsuenaga oplen, flags,
1843 1.1 hsuenaga mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1844 1.1 hsuenaga if (err)
1845 1.1 hsuenaga goto done;
1846 1.1 hsuenaga
1847 1.1 hsuenaga #ifdef DEBUG
1848 1.1 hsuenaga log(LOG_DEBUG,
1849 1.1 hsuenaga "%s: registered to opencrypto(max data = %d bytes)\n",
1850 1.1 hsuenaga device_xname(sc->sc_dev), oplen);
1851 1.1 hsuenaga #endif
1852 1.1 hsuenaga
1853 1.1 hsuenaga err = 0;
1854 1.1 hsuenaga done:
1855 1.1 hsuenaga return err;
1856 1.1 hsuenaga }
1857 1.1 hsuenaga
1858 1.1 hsuenaga /*
1859 1.1 hsuenaga * Create new opencrypto session
1860 1.1 hsuenaga *
1861 1.1 hsuenaga * - register cipher key, mac key.
1862 1.1 hsuenaga * - initialize mac internal state.
1863 1.1 hsuenaga */
1864 1.1 hsuenaga int
1865 1.1 hsuenaga mvxpsec_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
1866 1.1 hsuenaga {
1867 1.1 hsuenaga struct mvxpsec_softc *sc = arg;
1868 1.1 hsuenaga struct mvxpsec_session *mv_s = NULL;
1869 1.1 hsuenaga struct cryptoini *c;
1870 1.1 hsuenaga static int hint = 0;
1871 1.1 hsuenaga int session = -1;
1872 1.1 hsuenaga int sid;
1873 1.1 hsuenaga int err;
1874 1.1 hsuenaga int i;
1875 1.1 hsuenaga
1876 1.1 hsuenaga /* allocate driver session context */
1877 1.1 hsuenaga mv_s = mvxpsec_session_alloc(sc);
1878 1.1 hsuenaga if (mv_s == NULL)
1879 1.1 hsuenaga return ENOMEM;
1880 1.1 hsuenaga
1881 1.1 hsuenaga /*
1882 1.1 hsuenaga * lookup opencrypto session table
1883 1.1 hsuenaga *
1884 1.1 hsuenaga * we have sc_session_mtx after here.
1885 1.1 hsuenaga */
1886 1.1 hsuenaga mutex_enter(&sc->sc_session_mtx);
1887 1.1 hsuenaga if (sc->sc_nsessions >= MVXPSEC_MAX_SESSIONS) {
1888 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx);
1889 1.1 hsuenaga log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1890 1.1 hsuenaga __func__, MVXPSEC_MAX_SESSIONS);
1891 1.1 hsuenaga mvxpsec_session_dealloc(mv_s);
1892 1.1 hsuenaga return ENOMEM;
1893 1.1 hsuenaga }
1894 1.1 hsuenaga for (i = hint; i < MVXPSEC_MAX_SESSIONS; i++) {
1895 1.1 hsuenaga if (sc->sc_sessions[i])
1896 1.1 hsuenaga continue;
1897 1.1 hsuenaga session = i;
1898 1.1 hsuenaga hint = session + 1;
1899 1.1 hsuenaga break;
1900 1.1 hsuenaga }
1901 1.1 hsuenaga if (session < 0) {
1902 1.1 hsuenaga for (i = 0; i < hint; i++) {
1903 1.1 hsuenaga if (sc->sc_sessions[i])
1904 1.1 hsuenaga continue;
1905 1.1 hsuenaga session = i;
1906 1.1 hsuenaga hint = session + 1;
1907 1.1 hsuenaga break;
1908 1.1 hsuenaga }
1909 1.1 hsuenaga if (session < 0) {
1910 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx);
1911 1.1 hsuenaga /* session full */
1912 1.1 hsuenaga log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1913 1.1 hsuenaga __func__, MVXPSEC_MAX_SESSIONS);
1914 1.1 hsuenaga mvxpsec_session_dealloc(mv_s);
1915 1.1 hsuenaga hint = 0;
1916 1.1 hsuenaga return ENOMEM;
1917 1.1 hsuenaga }
1918 1.1 hsuenaga }
1919 1.1 hsuenaga if (hint >= MVXPSEC_MAX_SESSIONS)
1920 1.1 hsuenaga hint = 0;
1921 1.1 hsuenaga sc->sc_nsessions++;
1922 1.1 hsuenaga sc->sc_sessions[session] = mv_s;
1923 1.1 hsuenaga #ifdef DEBUG
1924 1.1 hsuenaga log(LOG_DEBUG, "%s: new session %d allocated\n", __func__, session);
1925 1.1 hsuenaga #endif
1926 1.1 hsuenaga
1927 1.1 hsuenaga sid = MVXPSEC_SID(device_unit(sc->sc_dev), session);
1928 1.1 hsuenaga mv_s->sid = sid;
1929 1.1 hsuenaga
1930 1.1 hsuenaga /* setup the session key ... */
1931 1.1 hsuenaga for (c = cri; c; c = c->cri_next) {
1932 1.1 hsuenaga switch (c->cri_alg) {
1933 1.1 hsuenaga case CRYPTO_DES_CBC:
1934 1.1 hsuenaga case CRYPTO_3DES_CBC:
1935 1.1 hsuenaga case CRYPTO_AES_CBC:
1936 1.1 hsuenaga /* key */
1937 1.1 hsuenaga if (mvxpsec_key_precomp(c->cri_alg,
1938 1.1 hsuenaga c->cri_key, c->cri_klen,
1939 1.1 hsuenaga &mv_s->session_header.crp_key,
1940 1.1 hsuenaga &mv_s->session_header.crp_key_d)) {
1941 1.1 hsuenaga log(LOG_ERR,
1942 1.1 hsuenaga "%s: Invalid HMAC key for %s.\n",
1943 1.1 hsuenaga __func__, s_ctlalg(c->cri_alg));
1944 1.1 hsuenaga err = EINVAL;
1945 1.1 hsuenaga goto fail;
1946 1.1 hsuenaga }
1947 1.1 hsuenaga if (mv_s->sflags & RDY_CRP_KEY) {
1948 1.1 hsuenaga log(LOG_WARNING,
1949 1.1 hsuenaga "%s: overwrite cipher: %s->%s.\n",
1950 1.1 hsuenaga __func__,
1951 1.1 hsuenaga s_ctlalg(mv_s->cipher_alg),
1952 1.1 hsuenaga s_ctlalg(c->cri_alg));
1953 1.1 hsuenaga }
1954 1.1 hsuenaga mv_s->sflags |= RDY_CRP_KEY;
1955 1.1 hsuenaga mv_s->enc_klen = c->cri_klen;
1956 1.1 hsuenaga mv_s->cipher_alg = c->cri_alg;
1957 1.1 hsuenaga /* create per session IV (compatible with KAME IPsec) */
1958 1.1 hsuenaga cprng_fast(&mv_s->session_iv, sizeof(mv_s->session_iv));
1959 1.1 hsuenaga mv_s->sflags |= RDY_CRP_IV;
1960 1.1 hsuenaga break;
1961 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96:
1962 1.1 hsuenaga case CRYPTO_MD5_HMAC_96:
1963 1.1 hsuenaga /* key */
1964 1.1 hsuenaga if (mvxpsec_hmac_precomp(c->cri_alg,
1965 1.1 hsuenaga c->cri_key, c->cri_klen,
1966 1.1 hsuenaga (uint32_t *)&mv_s->session_header.miv_in,
1967 1.1 hsuenaga (uint32_t *)&mv_s->session_header.miv_out)) {
1968 1.1 hsuenaga log(LOG_ERR,
1969 1.1 hsuenaga "%s: Invalid MAC key\n", __func__);
1970 1.1 hsuenaga err = EINVAL;
1971 1.1 hsuenaga goto fail;
1972 1.1 hsuenaga }
1973 1.1 hsuenaga if (mv_s->sflags & RDY_MAC_KEY ||
1974 1.1 hsuenaga mv_s->sflags & RDY_MAC_IV) {
1975 1.1 hsuenaga log(LOG_ERR,
1976 1.1 hsuenaga "%s: overwrite HMAC: %s->%s.\n",
1977 1.1 hsuenaga __func__, s_ctlalg(mv_s->hmac_alg),
1978 1.1 hsuenaga s_ctlalg(c->cri_alg));
1979 1.1 hsuenaga }
1980 1.1 hsuenaga mv_s->sflags |= RDY_MAC_KEY;
1981 1.1 hsuenaga mv_s->sflags |= RDY_MAC_IV;
1982 1.1 hsuenaga
1983 1.1 hsuenaga mv_s->mac_klen = c->cri_klen;
1984 1.1 hsuenaga mv_s->hmac_alg = c->cri_alg;
1985 1.1 hsuenaga break;
1986 1.1 hsuenaga default:
1987 1.1 hsuenaga log(LOG_ERR, "%s: Unknown algorithm %d\n",
1988 1.1 hsuenaga __func__, c->cri_alg);
1989 1.1 hsuenaga err = EINVAL;
1990 1.1 hsuenaga goto fail;
1991 1.1 hsuenaga }
1992 1.1 hsuenaga }
1993 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1994 1.1 hsuenaga "H/W Crypto session (id:%u) added.\n", session);
1995 1.1 hsuenaga
1996 1.1 hsuenaga *sidp = sid;
1997 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, session_new);
1998 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx);
1999 1.1 hsuenaga
2000 1.1 hsuenaga /* sync session header(it's never touched after here) */
2001 1.1 hsuenaga bus_dmamap_sync(sc->sc_dmat,
2002 1.1 hsuenaga mv_s->session_header_map,
2003 1.1 hsuenaga 0, sizeof(mv_s->session_header),
2004 1.1 hsuenaga BUS_DMASYNC_PREWRITE);
2005 1.1 hsuenaga
2006 1.1 hsuenaga return 0;
2007 1.1 hsuenaga
2008 1.1 hsuenaga fail:
2009 1.1 hsuenaga sc->sc_nsessions--;
2010 1.1 hsuenaga sc->sc_sessions[session] = NULL;
2011 1.1 hsuenaga hint = session;
2012 1.1 hsuenaga if (mv_s)
2013 1.1 hsuenaga mvxpsec_session_dealloc(mv_s);
2014 1.1 hsuenaga log(LOG_WARNING,
2015 1.1 hsuenaga "%s: Failed to add H/W crypto sessoin (id:%u): err=%d\n",
2016 1.1 hsuenaga __func__, session, err);
2017 1.1 hsuenaga
2018 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx);
2019 1.1 hsuenaga return err;
2020 1.1 hsuenaga }
2021 1.1 hsuenaga
2022 1.1 hsuenaga /*
2023 1.1 hsuenaga * remove opencrypto session
2024 1.1 hsuenaga */
2025 1.1 hsuenaga int
2026 1.1 hsuenaga mvxpsec_freesession(void *arg, uint64_t tid)
2027 1.1 hsuenaga {
2028 1.1 hsuenaga struct mvxpsec_softc *sc = arg;
2029 1.1 hsuenaga struct mvxpsec_session *mv_s;
2030 1.1 hsuenaga int session;
2031 1.1 hsuenaga uint32_t sid = ((uint32_t)tid) & 0xffffffff;
2032 1.1 hsuenaga
2033 1.1 hsuenaga session = MVXPSEC_SESSION(sid);
2034 1.1 hsuenaga if (session < 0 || session >= MVXPSEC_MAX_SESSIONS) {
2035 1.1 hsuenaga log(LOG_ERR, "%s: invalid session (id:%u)\n",
2036 1.1 hsuenaga __func__, session);
2037 1.1 hsuenaga return EINVAL;
2038 1.1 hsuenaga }
2039 1.1 hsuenaga
2040 1.1 hsuenaga mutex_enter(&sc->sc_session_mtx);
2041 1.1 hsuenaga if ( (mv_s = sc->sc_sessions[session]) == NULL) {
2042 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx);
2043 1.1 hsuenaga #ifdef DEBUG
2044 1.1 hsuenaga log(LOG_DEBUG, "%s: session %d already inactivated\n",
2045 1.1 hsuenaga __func__, session);
2046 1.1 hsuenaga #endif
2047 1.1 hsuenaga return ENOENT;
2048 1.1 hsuenaga }
2049 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2050 1.1 hsuenaga "%s: inactivate session %d\n", __func__, session);
2051 1.1 hsuenaga
2052 1.1 hsuenaga /* inactivate mvxpsec session */
2053 1.1 hsuenaga sc->sc_sessions[session] = NULL;
2054 1.1 hsuenaga sc->sc_nsessions--;
2055 1.1 hsuenaga sc->sc_last_session = NULL;
2056 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx);
2057 1.1 hsuenaga
2058 1.1 hsuenaga KASSERT(sc->sc_nsessions >= 0);
2059 1.1 hsuenaga KASSERT(mv_s->sid == sid);
2060 1.1 hsuenaga
2061 1.1 hsuenaga mvxpsec_session_dealloc(mv_s);
2062 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2063 1.1 hsuenaga "H/W Crypto session (id: %d) deleted.\n", session);
2064 1.1 hsuenaga
2065 1.1 hsuenaga /* force unblock opencrypto */
2066 1.1 hsuenaga crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2067 1.1 hsuenaga
2068 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, session_free);
2069 1.1 hsuenaga
2070 1.1 hsuenaga return 0;
2071 1.1 hsuenaga }
2072 1.1 hsuenaga
2073 1.1 hsuenaga /*
2074 1.1 hsuenaga * process data with existing session
2075 1.1 hsuenaga */
2076 1.1 hsuenaga int
2077 1.1 hsuenaga mvxpsec_dispatch(void *arg, struct cryptop *crp, int hint)
2078 1.1 hsuenaga {
2079 1.1 hsuenaga struct mvxpsec_softc *sc = arg;
2080 1.1 hsuenaga struct mvxpsec_session *mv_s;
2081 1.1 hsuenaga struct mvxpsec_packet *mv_p;
2082 1.1 hsuenaga int q_full;
2083 1.1 hsuenaga int running;
2084 1.1 hsuenaga int err;
2085 1.1 hsuenaga
2086 1.1 hsuenaga mutex_enter(&sc->sc_queue_mtx);
2087 1.1 hsuenaga
2088 1.1 hsuenaga /*
2089 1.1 hsuenaga * lookup session
2090 1.1 hsuenaga */
2091 1.1 hsuenaga mutex_enter(&sc->sc_session_mtx);
2092 1.1 hsuenaga mv_s = mvxpsec_session_lookup(sc, crp->crp_sid);
2093 1.1 hsuenaga if (__predict_false(mv_s == NULL)) {
2094 1.1 hsuenaga err = EINVAL;
2095 1.1 hsuenaga mv_p = NULL;
2096 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx);
2097 1.1 hsuenaga goto fail;
2098 1.1 hsuenaga }
2099 1.1 hsuenaga mv_p = mvxpsec_packet_alloc(mv_s);
2100 1.1 hsuenaga if (__predict_false(mv_p == NULL)) {
2101 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx);
2102 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx);
2103 1.1 hsuenaga return ERESTART; /* => queued in opencrypto layer */
2104 1.1 hsuenaga }
2105 1.1 hsuenaga mutex_exit(&sc->sc_session_mtx);
2106 1.1 hsuenaga
2107 1.1 hsuenaga /*
2108 1.1 hsuenaga * check queue status
2109 1.1 hsuenaga */
2110 1.1 hsuenaga #ifdef MVXPSEC_MULTI_PACKET
2111 1.1 hsuenaga q_full = (sc->sc_wait_qlen >= sc->sc_wait_qlimit) ? 1 : 0;
2112 1.1 hsuenaga #else
2113 1.1 hsuenaga q_full = (sc->sc_wait_qlen != 0) ? 1 : 0;
2114 1.1 hsuenaga #endif
2115 1.1 hsuenaga running = (sc->sc_flags & HW_RUNNING) ? 1: 0;
2116 1.1 hsuenaga if (q_full) {
2117 1.1 hsuenaga /* input queue is full. */
2118 1.1 hsuenaga if (!running && sc->sc_wait_qlen > 0)
2119 1.1 hsuenaga mvxpsec_dispatch_queue(sc);
2120 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, queue_full);
2121 1.1 hsuenaga mvxpsec_packet_dealloc(mv_p);
2122 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx);
2123 1.1 hsuenaga return ERESTART; /* => queued in opencrypto layer */
2124 1.1 hsuenaga }
2125 1.1 hsuenaga
2126 1.1 hsuenaga /*
2127 1.1 hsuenaga * Load and setup packet data
2128 1.1 hsuenaga */
2129 1.1 hsuenaga err = mvxpsec_packet_setcrp(mv_p, crp);
2130 1.1 hsuenaga if (__predict_false(err))
2131 1.1 hsuenaga goto fail;
2132 1.1 hsuenaga
2133 1.1 hsuenaga /*
2134 1.1 hsuenaga * Setup DMA descriptor chains
2135 1.1 hsuenaga */
2136 1.1 hsuenaga mutex_enter(&sc->sc_dma_mtx);
2137 1.1 hsuenaga err = mvxpsec_dma_copy_packet(sc, mv_p);
2138 1.1 hsuenaga mutex_exit(&sc->sc_dma_mtx);
2139 1.1 hsuenaga if (__predict_false(err))
2140 1.1 hsuenaga goto fail;
2141 1.1 hsuenaga
2142 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
2143 1.1 hsuenaga mvxpsec_dump_packet(__func__, mv_p);
2144 1.1 hsuenaga #endif
2145 1.1 hsuenaga
2146 1.1 hsuenaga /*
2147 1.1 hsuenaga * Sync/inval the data cache
2148 1.1 hsuenaga */
2149 1.1 hsuenaga err = mvxpsec_dma_sync_packet(sc, mv_p);
2150 1.1 hsuenaga if (__predict_false(err))
2151 1.1 hsuenaga goto fail;
2152 1.1 hsuenaga
2153 1.1 hsuenaga /*
2154 1.1 hsuenaga * Enqueue the packet
2155 1.1 hsuenaga */
2156 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, dispatch_packets);
2157 1.1 hsuenaga #ifdef MVXPSEC_MULTI_PACKET
2158 1.1 hsuenaga mvxpsec_packet_enqueue(mv_p);
2159 1.1 hsuenaga if (!running)
2160 1.1 hsuenaga mvxpsec_dispatch_queue(sc);
2161 1.1 hsuenaga #else
2162 1.1 hsuenaga SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
2163 1.1 hsuenaga sc->sc_wait_qlen++;
2164 1.1 hsuenaga mv_p->flags |= SETUP_DONE;
2165 1.1 hsuenaga if (!running)
2166 1.1 hsuenaga mvxpsec_dispatch_queue(sc);
2167 1.1 hsuenaga #endif
2168 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx);
2169 1.1 hsuenaga return 0;
2170 1.1 hsuenaga
2171 1.1 hsuenaga fail:
2172 1.1 hsuenaga /* Drop the incoming packet */
2173 1.1 hsuenaga mvxpsec_drop(sc, crp, mv_p, err);
2174 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx);
2175 1.1 hsuenaga return 0;
2176 1.1 hsuenaga }
2177 1.1 hsuenaga
2178 1.1 hsuenaga /*
2179 1.1 hsuenaga * back the packet to the IP stack
2180 1.1 hsuenaga */
2181 1.1 hsuenaga void
2182 1.1 hsuenaga mvxpsec_done(void *arg)
2183 1.1 hsuenaga {
2184 1.1 hsuenaga struct mvxpsec_softc *sc = arg;
2185 1.1 hsuenaga struct mvxpsec_packet *mv_p;
2186 1.1 hsuenaga mvxpsec_queue_t ret_queue;
2187 1.1 hsuenaga int ndone;
2188 1.1 hsuenaga
2189 1.1 hsuenaga mutex_enter(&sc->sc_queue_mtx);
2190 1.1 hsuenaga
2191 1.1 hsuenaga /* stop wdog timer */
2192 1.1 hsuenaga callout_stop(&sc->sc_timeout);
2193 1.1 hsuenaga
2194 1.1 hsuenaga /* refill MVXPSEC */
2195 1.1 hsuenaga ret_queue = sc->sc_run_queue;
2196 1.1 hsuenaga SIMPLEQ_INIT(&sc->sc_run_queue);
2197 1.1 hsuenaga sc->sc_flags &= ~HW_RUNNING;
2198 1.1 hsuenaga if (sc->sc_wait_qlen > 0)
2199 1.1 hsuenaga mvxpsec_dispatch_queue(sc);
2200 1.1 hsuenaga
2201 1.1 hsuenaga ndone = 0;
2202 1.1 hsuenaga while ( (mv_p = SIMPLEQ_FIRST(&ret_queue)) != NULL) {
2203 1.1 hsuenaga SIMPLEQ_REMOVE_HEAD(&ret_queue, queue);
2204 1.1 hsuenaga mvxpsec_dma_free(sc, &mv_p->dma_ring);
2205 1.1 hsuenaga mvxpsec_done_packet(mv_p);
2206 1.1 hsuenaga ndone++;
2207 1.1 hsuenaga }
2208 1.1 hsuenaga MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
2209 1.1 hsuenaga
2210 1.1 hsuenaga mutex_exit(&sc->sc_queue_mtx);
2211 1.1 hsuenaga }
2212 1.1 hsuenaga
2213 1.1 hsuenaga /*
2214 1.1 hsuenaga * drop the packet
2215 1.1 hsuenaga */
2216 1.1 hsuenaga INLINE void
2217 1.1 hsuenaga mvxpsec_drop(struct mvxpsec_softc *sc, struct cryptop *crp,
2218 1.1 hsuenaga struct mvxpsec_packet *mv_p, int err)
2219 1.1 hsuenaga {
2220 1.1 hsuenaga /* must called with sc->sc_queue_mtx held */
2221 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_queue_mtx));
2222 1.1 hsuenaga
2223 1.1 hsuenaga if (mv_p)
2224 1.1 hsuenaga mvxpsec_packet_dealloc(mv_p);
2225 1.1 hsuenaga if (err < 0)
2226 1.1 hsuenaga err = EINVAL;
2227 1.1 hsuenaga crp->crp_etype = err;
2228 1.1 hsuenaga crypto_done(crp);
2229 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, packet_err);
2230 1.1 hsuenaga
2231 1.1 hsuenaga /* dispatch other packets in queue */
2232 1.1 hsuenaga if (sc->sc_wait_qlen > 0 &&
2233 1.1 hsuenaga !(sc->sc_flags & HW_RUNNING))
2234 1.1 hsuenaga mvxpsec_dispatch_queue(sc);
2235 1.1 hsuenaga
2236 1.1 hsuenaga /* unblock driver for dropped packet */
2237 1.1 hsuenaga crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2238 1.1 hsuenaga }
2239 1.1 hsuenaga
2240 1.1 hsuenaga /* move wait queue entry to run queue */
2241 1.1 hsuenaga STATIC int
2242 1.1 hsuenaga mvxpsec_dispatch_queue(struct mvxpsec_softc *sc)
2243 1.1 hsuenaga {
2244 1.1 hsuenaga struct mvxpsec_packet *mv_p;
2245 1.1 hsuenaga paddr_t head;
2246 1.1 hsuenaga int ndispatch = 0;
2247 1.1 hsuenaga
2248 1.1 hsuenaga /* must called with sc->sc_queue_mtx held */
2249 1.1 hsuenaga KASSERT(mutex_owned(&sc->sc_queue_mtx));
2250 1.1 hsuenaga
2251 1.1 hsuenaga /* check there is any task */
2252 1.1 hsuenaga if (__predict_false(sc->sc_flags & HW_RUNNING)) {
2253 1.1 hsuenaga log(LOG_WARNING,
2254 1.1 hsuenaga "%s: another packet already exist.\n", __func__);
2255 1.1 hsuenaga return 0;
2256 1.1 hsuenaga }
2257 1.1 hsuenaga if (__predict_false(SIMPLEQ_EMPTY(&sc->sc_wait_queue))) {
2258 1.1 hsuenaga log(LOG_WARNING,
2259 1.1 hsuenaga "%s: no waiting packet yet(qlen=%d).\n",
2260 1.1 hsuenaga __func__, sc->sc_wait_qlen);
2261 1.1 hsuenaga return 0;
2262 1.1 hsuenaga }
2263 1.1 hsuenaga
2264 1.1 hsuenaga /* move queue */
2265 1.1 hsuenaga sc->sc_run_queue = sc->sc_wait_queue;
2266 1.1 hsuenaga sc->sc_flags |= HW_RUNNING; /* dropped by intr or timeout */
2267 1.1 hsuenaga SIMPLEQ_INIT(&sc->sc_wait_queue);
2268 1.1 hsuenaga ndispatch = sc->sc_wait_qlen;
2269 1.1 hsuenaga sc->sc_wait_qlen = 0;
2270 1.1 hsuenaga
2271 1.1 hsuenaga /* get 1st DMA descriptor */
2272 1.1 hsuenaga mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue);
2273 1.1 hsuenaga head = mv_p->dma_ring.dma_head->phys_addr;
2274 1.1 hsuenaga
2275 1.1 hsuenaga /* terminate last DMA descriptor */
2276 1.1 hsuenaga mv_p = SIMPLEQ_LAST(&sc->sc_run_queue, mvxpsec_packet, queue);
2277 1.1 hsuenaga mvxpsec_dma_finalize(sc, &mv_p->dma_ring);
2278 1.1 hsuenaga
2279 1.1 hsuenaga /* configure TDMA */
2280 1.1 hsuenaga if (mvxpsec_dma_wait(sc) < 0) {
2281 1.1 hsuenaga log(LOG_ERR, "%s: DMA DEVICE not responding", __func__);
2282 1.1 hsuenaga callout_schedule(&sc->sc_timeout, hz);
2283 1.1 hsuenaga return 0;
2284 1.1 hsuenaga }
2285 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_TDMA_NXT, head);
2286 1.1 hsuenaga
2287 1.1 hsuenaga /* trigger ACC */
2288 1.1 hsuenaga if (mvxpsec_acc_wait(sc) < 0) {
2289 1.1 hsuenaga log(LOG_ERR, "%s: MVXPSEC not responding", __func__);
2290 1.1 hsuenaga callout_schedule(&sc->sc_timeout, hz);
2291 1.1 hsuenaga return 0;
2292 1.1 hsuenaga }
2293 1.1 hsuenaga MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_ACT);
2294 1.1 hsuenaga
2295 1.1 hsuenaga MVXPSEC_EVCNT_MAX(sc, max_dispatch, ndispatch);
2296 1.1 hsuenaga MVXPSEC_EVCNT_INCR(sc, dispatch_queue);
2297 1.1 hsuenaga callout_schedule(&sc->sc_timeout, hz);
2298 1.1 hsuenaga return 0;
2299 1.1 hsuenaga }
2300 1.1 hsuenaga
2301 1.1 hsuenaga /*
2302 1.1 hsuenaga * process opencrypto operations(cryptop) for packets.
2303 1.1 hsuenaga */
2304 1.1 hsuenaga INLINE int
2305 1.1 hsuenaga mvxpsec_parse_crd(struct mvxpsec_packet *mv_p, struct cryptodesc *crd)
2306 1.1 hsuenaga {
2307 1.1 hsuenaga int ivlen;
2308 1.1 hsuenaga
2309 1.1 hsuenaga KASSERT(mv_p->flags & RDY_DATA);
2310 1.1 hsuenaga
2311 1.1 hsuenaga /* MAC & Ciphers: set data location and operation */
2312 1.1 hsuenaga switch (crd->crd_alg) {
2313 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96:
2314 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2315 1.1 hsuenaga /* fall through */
2316 1.1 hsuenaga case CRYPTO_SHA1_HMAC:
2317 1.1 hsuenaga mv_p->mac_dst = crd->crd_inject;
2318 1.1 hsuenaga mv_p->mac_off = crd->crd_skip;
2319 1.1 hsuenaga mv_p->mac_len = crd->crd_len;
2320 1.1 hsuenaga MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2321 1.1 hsuenaga MV_ACC_CRYPTO_MAC_HMAC_SHA1);
2322 1.1 hsuenaga mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2323 1.1 hsuenaga /* No more setup for MAC */
2324 1.1 hsuenaga return 0;
2325 1.1 hsuenaga case CRYPTO_MD5_HMAC_96:
2326 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2327 1.1 hsuenaga /* fall through */
2328 1.1 hsuenaga case CRYPTO_MD5_HMAC:
2329 1.1 hsuenaga mv_p->mac_dst = crd->crd_inject;
2330 1.1 hsuenaga mv_p->mac_off = crd->crd_skip;
2331 1.1 hsuenaga mv_p->mac_len = crd->crd_len;
2332 1.1 hsuenaga MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2333 1.1 hsuenaga MV_ACC_CRYPTO_MAC_HMAC_MD5);
2334 1.1 hsuenaga mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2335 1.1 hsuenaga /* No more setup for MAC */
2336 1.1 hsuenaga return 0;
2337 1.1 hsuenaga case CRYPTO_DES_CBC:
2338 1.1 hsuenaga mv_p->enc_ivoff = crd->crd_inject;
2339 1.1 hsuenaga mv_p->enc_off = crd->crd_skip;
2340 1.1 hsuenaga mv_p->enc_len = crd->crd_len;
2341 1.1 hsuenaga ivlen = 8;
2342 1.1 hsuenaga MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2343 1.1 hsuenaga MV_ACC_CRYPTO_ENC_DES);
2344 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2345 1.1 hsuenaga mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2346 1.1 hsuenaga break;
2347 1.1 hsuenaga case CRYPTO_3DES_CBC:
2348 1.1 hsuenaga mv_p->enc_ivoff = crd->crd_inject;
2349 1.1 hsuenaga mv_p->enc_off = crd->crd_skip;
2350 1.1 hsuenaga mv_p->enc_len = crd->crd_len;
2351 1.1 hsuenaga ivlen = 8;
2352 1.1 hsuenaga MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2353 1.1 hsuenaga MV_ACC_CRYPTO_ENC_3DES);
2354 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2355 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_3DES_EDE;
2356 1.1 hsuenaga mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2357 1.1 hsuenaga break;
2358 1.1 hsuenaga case CRYPTO_AES_CBC:
2359 1.1 hsuenaga mv_p->enc_ivoff = crd->crd_inject;
2360 1.1 hsuenaga mv_p->enc_off = crd->crd_skip;
2361 1.1 hsuenaga mv_p->enc_len = crd->crd_len;
2362 1.1 hsuenaga ivlen = 16;
2363 1.1 hsuenaga MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2364 1.1 hsuenaga MV_ACC_CRYPTO_ENC_AES);
2365 1.1 hsuenaga MV_ACC_CRYPTO_AES_KLEN_SET(
2366 1.1 hsuenaga mv_p->pkt_header.desc.acc_config,
2367 1.1 hsuenaga mvxpsec_aesklen(mv_p->mv_s->enc_klen));
2368 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2369 1.1 hsuenaga mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2370 1.1 hsuenaga break;
2371 1.1 hsuenaga default:
2372 1.1 hsuenaga log(LOG_ERR, "%s: Unknown algorithm %d\n",
2373 1.1 hsuenaga __func__, crd->crd_alg);
2374 1.1 hsuenaga return EINVAL;
2375 1.1 hsuenaga }
2376 1.1 hsuenaga
2377 1.1 hsuenaga /* Operations only for Cipher, not MAC */
2378 1.1 hsuenaga if (crd->crd_flags & CRD_F_ENCRYPT) {
2379 1.1 hsuenaga /* Ciphers: Originate IV for Encryption.*/
2380 1.1 hsuenaga mv_p->pkt_header.desc.acc_config &= ~MV_ACC_CRYPTO_DECRYPT;
2381 1.1 hsuenaga mv_p->flags |= DIR_ENCRYPT;
2382 1.1 hsuenaga
2383 1.1 hsuenaga if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2384 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "EXPLICIT IV\n");
2385 1.1 hsuenaga mv_p->flags |= CRP_EXT_IV;
2386 1.1 hsuenaga mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2387 1.1 hsuenaga mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2388 1.1 hsuenaga }
2389 1.1 hsuenaga else if (crd->crd_flags & CRD_F_IV_PRESENT) {
2390 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "IV is present\n");
2391 1.1 hsuenaga mvxpsec_packet_copy_iv(mv_p, crd->crd_inject, ivlen);
2392 1.1 hsuenaga }
2393 1.1 hsuenaga else {
2394 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "Create New IV\n");
2395 1.1 hsuenaga mvxpsec_packet_write_iv(mv_p, NULL, ivlen);
2396 1.1 hsuenaga }
2397 1.1 hsuenaga }
2398 1.1 hsuenaga else {
2399 1.1 hsuenaga /* Ciphers: IV is loadded from crd_inject when it's present */
2400 1.1 hsuenaga mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_DECRYPT;
2401 1.1 hsuenaga mv_p->flags |= DIR_DECRYPT;
2402 1.1 hsuenaga
2403 1.1 hsuenaga if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2404 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
2405 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_ENC_IV) {
2406 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV,
2407 1.1 hsuenaga "EXPLICIT IV(Decrypt)\n");
2408 1.1 hsuenaga mvxpsec_dump_data(__func__, crd->crd_iv, ivlen);
2409 1.1 hsuenaga }
2410 1.1 hsuenaga #endif
2411 1.1 hsuenaga mv_p->flags |= CRP_EXT_IV;
2412 1.1 hsuenaga mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2413 1.1 hsuenaga mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2414 1.1 hsuenaga }
2415 1.1 hsuenaga }
2416 1.1 hsuenaga
2417 1.1 hsuenaga KASSERT(!((mv_p->flags & DIR_ENCRYPT) && (mv_p->flags & DIR_DECRYPT)));
2418 1.1 hsuenaga
2419 1.1 hsuenaga return 0;
2420 1.1 hsuenaga }
2421 1.1 hsuenaga
2422 1.1 hsuenaga INLINE int
2423 1.1 hsuenaga mvxpsec_parse_crp(struct mvxpsec_packet *mv_p)
2424 1.1 hsuenaga {
2425 1.1 hsuenaga struct cryptop *crp = mv_p->crp;
2426 1.1 hsuenaga struct cryptodesc *crd;
2427 1.1 hsuenaga int err;
2428 1.1 hsuenaga
2429 1.1 hsuenaga KASSERT(crp);
2430 1.1 hsuenaga
2431 1.1 hsuenaga mvxpsec_packet_reset_op(mv_p);
2432 1.1 hsuenaga
2433 1.1 hsuenaga for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2434 1.1 hsuenaga err = mvxpsec_parse_crd(mv_p, crd);
2435 1.1 hsuenaga if (err)
2436 1.1 hsuenaga return err;
2437 1.1 hsuenaga }
2438 1.1 hsuenaga
2439 1.1 hsuenaga return 0;
2440 1.1 hsuenaga }
2441 1.1 hsuenaga
2442 1.1 hsuenaga INLINE int
2443 1.1 hsuenaga mvxpsec_packet_setcrp(struct mvxpsec_packet *mv_p, struct cryptop *crp)
2444 1.1 hsuenaga {
2445 1.1 hsuenaga int err = EINVAL;
2446 1.1 hsuenaga
2447 1.1 hsuenaga /* regiseter crp to the MVXPSEC packet */
2448 1.1 hsuenaga if (crp->crp_flags & CRYPTO_F_IMBUF) {
2449 1.1 hsuenaga err = mvxpsec_packet_setmbuf(mv_p,
2450 1.1 hsuenaga (struct mbuf *)crp->crp_buf);
2451 1.1 hsuenaga mv_p->crp = crp;
2452 1.1 hsuenaga }
2453 1.1 hsuenaga else if (crp->crp_flags & CRYPTO_F_IOV) {
2454 1.1 hsuenaga err = mvxpsec_packet_setuio(mv_p,
2455 1.1 hsuenaga (struct uio *)crp->crp_buf);
2456 1.1 hsuenaga mv_p->crp = crp;
2457 1.1 hsuenaga }
2458 1.1 hsuenaga else {
2459 1.1 hsuenaga err = mvxpsec_packet_setdata(mv_p,
2460 1.1 hsuenaga (struct mbuf *)crp->crp_buf, crp->crp_ilen);
2461 1.1 hsuenaga mv_p->crp = crp;
2462 1.1 hsuenaga }
2463 1.1 hsuenaga if (__predict_false(err))
2464 1.1 hsuenaga return err;
2465 1.1 hsuenaga
2466 1.1 hsuenaga /* parse crp and setup MVXPSEC registers/descriptors */
2467 1.1 hsuenaga err = mvxpsec_parse_crp(mv_p);
2468 1.1 hsuenaga if (__predict_false(err))
2469 1.1 hsuenaga return err;
2470 1.1 hsuenaga
2471 1.1 hsuenaga /* fixup data offset to fit MVXPSEC internal SRAM */
2472 1.1 hsuenaga err = mvxpsec_header_finalize(mv_p);
2473 1.1 hsuenaga if (__predict_false(err))
2474 1.1 hsuenaga return err;
2475 1.1 hsuenaga
2476 1.1 hsuenaga return 0;
2477 1.1 hsuenaga }
2478 1.1 hsuenaga
2479 1.1 hsuenaga /*
2480 1.1 hsuenaga * load data for encrypt/decrypt/authentication
2481 1.1 hsuenaga *
2482 1.1 hsuenaga * data is raw kernel memory area.
2483 1.1 hsuenaga */
2484 1.1 hsuenaga STATIC int
2485 1.1 hsuenaga mvxpsec_packet_setdata(struct mvxpsec_packet *mv_p,
2486 1.1 hsuenaga void *data, uint32_t data_len)
2487 1.1 hsuenaga {
2488 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s;
2489 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc;
2490 1.1 hsuenaga
2491 1.1 hsuenaga if (bus_dmamap_load(sc->sc_dmat, mv_p->data_map, data, data_len,
2492 1.1 hsuenaga NULL, BUS_DMA_NOWAIT)) {
2493 1.1 hsuenaga log(LOG_ERR, "%s: cannot load data\n", __func__);
2494 1.1 hsuenaga return -1;
2495 1.1 hsuenaga }
2496 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_RAW;
2497 1.1 hsuenaga mv_p->data_raw = data;
2498 1.1 hsuenaga mv_p->data_len = data_len;
2499 1.1 hsuenaga mv_p->flags |= RDY_DATA;
2500 1.1 hsuenaga
2501 1.1 hsuenaga return 0;
2502 1.1 hsuenaga }
2503 1.1 hsuenaga
2504 1.1 hsuenaga /*
2505 1.1 hsuenaga * load data for encrypt/decrypt/authentication
2506 1.1 hsuenaga *
2507 1.1 hsuenaga * data is mbuf based network data.
2508 1.1 hsuenaga */
2509 1.1 hsuenaga STATIC int
2510 1.1 hsuenaga mvxpsec_packet_setmbuf(struct mvxpsec_packet *mv_p, struct mbuf *m)
2511 1.1 hsuenaga {
2512 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s;
2513 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc;
2514 1.1 hsuenaga size_t pktlen = 0;
2515 1.1 hsuenaga
2516 1.1 hsuenaga if (__predict_true(m->m_flags & M_PKTHDR))
2517 1.1 hsuenaga pktlen = m->m_pkthdr.len;
2518 1.1 hsuenaga else {
2519 1.1 hsuenaga struct mbuf *mp = m;
2520 1.1 hsuenaga
2521 1.1 hsuenaga while (mp != NULL) {
2522 1.1 hsuenaga pktlen += m->m_len;
2523 1.1 hsuenaga mp = mp->m_next;
2524 1.1 hsuenaga }
2525 1.1 hsuenaga }
2526 1.1 hsuenaga if (pktlen > SRAM_PAYLOAD_SIZE) {
2527 1.1 hsuenaga extern percpu_t *espstat_percpu;
2528 1.1 hsuenaga /* XXX:
2529 1.1 hsuenaga * layer violation. opencrypto knows our max packet size
2530 1.1 hsuenaga * from crypto_register(9) API.
2531 1.1 hsuenaga */
2532 1.1 hsuenaga
2533 1.1 hsuenaga _NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2534 1.1 hsuenaga log(LOG_ERR,
2535 1.1 hsuenaga "%s: ESP Packet too large: %zu [oct.] > %zu [oct.]\n",
2536 1.1 hsuenaga device_xname(sc->sc_dev),
2537 1.1 hsuenaga (size_t)pktlen, SRAM_PAYLOAD_SIZE);
2538 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_NONE;
2539 1.1 hsuenaga mv_p->data_mbuf = NULL;
2540 1.1 hsuenaga return -1;
2541 1.1 hsuenaga }
2542 1.1 hsuenaga
2543 1.1 hsuenaga if (bus_dmamap_load_mbuf(sc->sc_dmat, mv_p->data_map, m,
2544 1.1 hsuenaga BUS_DMA_NOWAIT)) {
2545 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_NONE;
2546 1.1 hsuenaga mv_p->data_mbuf = NULL;
2547 1.1 hsuenaga log(LOG_ERR, "%s: cannot load mbuf\n", __func__);
2548 1.1 hsuenaga return -1;
2549 1.1 hsuenaga }
2550 1.1 hsuenaga
2551 1.1 hsuenaga /* set payload buffer */
2552 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_MBUF;
2553 1.1 hsuenaga mv_p->data_mbuf = m;
2554 1.1 hsuenaga if (m->m_flags & M_PKTHDR) {
2555 1.1 hsuenaga mv_p->data_len = m->m_pkthdr.len;
2556 1.1 hsuenaga }
2557 1.1 hsuenaga else {
2558 1.1 hsuenaga mv_p->data_len = 0;
2559 1.1 hsuenaga while (m) {
2560 1.1 hsuenaga mv_p->data_len += m->m_len;
2561 1.1 hsuenaga m = m->m_next;
2562 1.1 hsuenaga }
2563 1.1 hsuenaga }
2564 1.1 hsuenaga mv_p->flags |= RDY_DATA;
2565 1.1 hsuenaga
2566 1.1 hsuenaga return 0;
2567 1.1 hsuenaga }
2568 1.1 hsuenaga
2569 1.1 hsuenaga STATIC int
2570 1.1 hsuenaga mvxpsec_packet_setuio(struct mvxpsec_packet *mv_p, struct uio *uio)
2571 1.1 hsuenaga {
2572 1.1 hsuenaga struct mvxpsec_session *mv_s = mv_p->mv_s;
2573 1.1 hsuenaga struct mvxpsec_softc *sc = mv_s->sc;
2574 1.1 hsuenaga
2575 1.1 hsuenaga if (uio->uio_resid > SRAM_PAYLOAD_SIZE) {
2576 1.1 hsuenaga extern percpu_t *espstat_percpu;
2577 1.1 hsuenaga /* XXX:
2578 1.1 hsuenaga * layer violation. opencrypto knows our max packet size
2579 1.1 hsuenaga * from crypto_register(9) API.
2580 1.1 hsuenaga */
2581 1.1 hsuenaga
2582 1.1 hsuenaga _NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2583 1.1 hsuenaga log(LOG_ERR,
2584 1.1 hsuenaga "%s: uio request too large: %zu [oct.] > %zu [oct.]\n",
2585 1.1 hsuenaga device_xname(sc->sc_dev),
2586 1.1 hsuenaga uio->uio_resid, SRAM_PAYLOAD_SIZE);
2587 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_NONE;
2588 1.1 hsuenaga mv_p->data_mbuf = NULL;
2589 1.1 hsuenaga return -1;
2590 1.1 hsuenaga }
2591 1.1 hsuenaga
2592 1.1 hsuenaga if (bus_dmamap_load_uio(sc->sc_dmat, mv_p->data_map, uio,
2593 1.1 hsuenaga BUS_DMA_NOWAIT)) {
2594 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_NONE;
2595 1.1 hsuenaga mv_p->data_mbuf = NULL;
2596 1.1 hsuenaga log(LOG_ERR, "%s: cannot load uio buf\n", __func__);
2597 1.1 hsuenaga return -1;
2598 1.1 hsuenaga }
2599 1.1 hsuenaga
2600 1.1 hsuenaga /* set payload buffer */
2601 1.1 hsuenaga mv_p->data_type = MVXPSEC_DATA_UIO;
2602 1.1 hsuenaga mv_p->data_uio = uio;
2603 1.1 hsuenaga mv_p->data_len = uio->uio_resid;
2604 1.1 hsuenaga mv_p->flags |= RDY_DATA;
2605 1.1 hsuenaga
2606 1.1 hsuenaga return 0;
2607 1.1 hsuenaga }
2608 1.1 hsuenaga
2609 1.1 hsuenaga STATIC int
2610 1.1 hsuenaga mvxpsec_packet_rdata(struct mvxpsec_packet *mv_p,
2611 1.1 hsuenaga int off, int len, void *cp)
2612 1.1 hsuenaga {
2613 1.1 hsuenaga uint8_t *p;
2614 1.1 hsuenaga
2615 1.1 hsuenaga if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2616 1.1 hsuenaga p = (uint8_t *)mv_p->data_raw + off;
2617 1.1 hsuenaga memcpy(cp, p, len);
2618 1.1 hsuenaga }
2619 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2620 1.1 hsuenaga m_copydata(mv_p->data_mbuf, off, len, cp);
2621 1.1 hsuenaga }
2622 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2623 1.1 hsuenaga cuio_copydata(mv_p->data_uio, off, len, cp);
2624 1.1 hsuenaga }
2625 1.1 hsuenaga else
2626 1.1 hsuenaga return -1;
2627 1.1 hsuenaga
2628 1.1 hsuenaga return 0;
2629 1.1 hsuenaga }
2630 1.1 hsuenaga
2631 1.1 hsuenaga STATIC int
2632 1.1 hsuenaga mvxpsec_packet_wdata(struct mvxpsec_packet *mv_p,
2633 1.1 hsuenaga int off, int len, void *cp)
2634 1.1 hsuenaga {
2635 1.1 hsuenaga uint8_t *p;
2636 1.1 hsuenaga
2637 1.1 hsuenaga if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2638 1.1 hsuenaga p = (uint8_t *)mv_p->data_raw + off;
2639 1.1 hsuenaga memcpy(p, cp, len);
2640 1.1 hsuenaga }
2641 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2642 1.1 hsuenaga m_copyback(mv_p->data_mbuf, off, len, cp);
2643 1.1 hsuenaga }
2644 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2645 1.1 hsuenaga cuio_copyback(mv_p->data_uio, off, len, cp);
2646 1.1 hsuenaga }
2647 1.1 hsuenaga else
2648 1.1 hsuenaga return -1;
2649 1.1 hsuenaga
2650 1.1 hsuenaga return 0;
2651 1.1 hsuenaga }
2652 1.1 hsuenaga
2653 1.1 hsuenaga /*
2654 1.1 hsuenaga * Set initial vector of cipher to the session.
2655 1.1 hsuenaga */
2656 1.1 hsuenaga STATIC int
2657 1.1 hsuenaga mvxpsec_packet_write_iv(struct mvxpsec_packet *mv_p, void *iv, int ivlen)
2658 1.1 hsuenaga {
2659 1.1 hsuenaga uint8_t ivbuf[16];
2660 1.1 hsuenaga
2661 1.1 hsuenaga KASSERT(ivlen == 8 || ivlen == 16);
2662 1.1 hsuenaga
2663 1.1 hsuenaga if (iv == NULL) {
2664 1.1 hsuenaga if (mv_p->mv_s->sflags & RDY_CRP_IV) {
2665 1.1 hsuenaga /* use per session IV (compatible with KAME IPsec) */
2666 1.1 hsuenaga mv_p->pkt_header.crp_iv_work = mv_p->mv_s->session_iv;
2667 1.1 hsuenaga mv_p->flags |= RDY_CRP_IV;
2668 1.1 hsuenaga return 0;
2669 1.1 hsuenaga }
2670 1.1 hsuenaga cprng_fast(ivbuf, ivlen);
2671 1.1 hsuenaga iv = ivbuf;
2672 1.1 hsuenaga }
2673 1.1 hsuenaga memcpy(&mv_p->pkt_header.crp_iv_work, iv, ivlen);
2674 1.1 hsuenaga if (mv_p->flags & CRP_EXT_IV) {
2675 1.1 hsuenaga memcpy(&mv_p->pkt_header.crp_iv_ext, iv, ivlen);
2676 1.1 hsuenaga mv_p->ext_iv = iv;
2677 1.1 hsuenaga mv_p->ext_ivlen = ivlen;
2678 1.1 hsuenaga }
2679 1.1 hsuenaga mv_p->flags |= RDY_CRP_IV;
2680 1.1 hsuenaga
2681 1.1 hsuenaga return 0;
2682 1.1 hsuenaga }
2683 1.1 hsuenaga
2684 1.1 hsuenaga STATIC int
2685 1.1 hsuenaga mvxpsec_packet_copy_iv(struct mvxpsec_packet *mv_p, int off, int ivlen)
2686 1.1 hsuenaga {
2687 1.1 hsuenaga mvxpsec_packet_rdata(mv_p, off, ivlen,
2688 1.1 hsuenaga &mv_p->pkt_header.crp_iv_work);
2689 1.1 hsuenaga mv_p->flags |= RDY_CRP_IV;
2690 1.1 hsuenaga
2691 1.1 hsuenaga return 0;
2692 1.1 hsuenaga }
2693 1.1 hsuenaga
2694 1.1 hsuenaga /*
2695 1.1 hsuenaga * set a encryption or decryption key to the session
2696 1.1 hsuenaga *
2697 1.1 hsuenaga * Input key material is big endian.
2698 1.1 hsuenaga */
2699 1.1 hsuenaga STATIC int
2700 1.1 hsuenaga mvxpsec_key_precomp(int alg, void *keymat, int kbitlen,
2701 1.1 hsuenaga void *key_encrypt, void *key_decrypt)
2702 1.1 hsuenaga {
2703 1.1 hsuenaga uint32_t *kp = keymat;
2704 1.1 hsuenaga uint32_t *ekp = key_encrypt;
2705 1.1 hsuenaga uint32_t *dkp = key_decrypt;
2706 1.1 hsuenaga int i;
2707 1.1 hsuenaga
2708 1.1 hsuenaga switch (alg) {
2709 1.1 hsuenaga case CRYPTO_DES_CBC:
2710 1.1 hsuenaga if (kbitlen < 64 || (kbitlen % 8) != 0) {
2711 1.1 hsuenaga log(LOG_WARNING,
2712 1.1 hsuenaga "mvxpsec: invalid DES keylen %d\n", kbitlen);
2713 1.1 hsuenaga return EINVAL;
2714 1.1 hsuenaga }
2715 1.1 hsuenaga for (i = 0; i < 2; i++)
2716 1.1 hsuenaga dkp[i] = ekp[i] = kp[i];
2717 1.1 hsuenaga for (; i < 8; i++)
2718 1.1 hsuenaga dkp[i] = ekp[i] = 0;
2719 1.1 hsuenaga break;
2720 1.1 hsuenaga case CRYPTO_3DES_CBC:
2721 1.1 hsuenaga if (kbitlen < 192 || (kbitlen % 8) != 0) {
2722 1.1 hsuenaga log(LOG_WARNING,
2723 1.1 hsuenaga "mvxpsec: invalid 3DES keylen %d\n", kbitlen);
2724 1.1 hsuenaga return EINVAL;
2725 1.1 hsuenaga }
2726 1.1 hsuenaga for (i = 0; i < 8; i++)
2727 1.1 hsuenaga dkp[i] = ekp[i] = kp[i];
2728 1.1 hsuenaga break;
2729 1.1 hsuenaga case CRYPTO_AES_CBC:
2730 1.1 hsuenaga if (kbitlen < 128) {
2731 1.1 hsuenaga log(LOG_WARNING,
2732 1.1 hsuenaga "mvxpsec: invalid AES keylen %d\n", kbitlen);
2733 1.1 hsuenaga return EINVAL;
2734 1.1 hsuenaga }
2735 1.1 hsuenaga else if (kbitlen < 192) {
2736 1.1 hsuenaga /* AES-128 */
2737 1.1 hsuenaga for (i = 0; i < 4; i++)
2738 1.1 hsuenaga ekp[i] = kp[i];
2739 1.1 hsuenaga for (; i < 8; i++)
2740 1.1 hsuenaga ekp[i] = 0;
2741 1.1 hsuenaga }
2742 1.1 hsuenaga else if (kbitlen < 256) {
2743 1.1 hsuenaga /* AES-192 */
2744 1.1 hsuenaga for (i = 0; i < 6; i++)
2745 1.1 hsuenaga ekp[i] = kp[i];
2746 1.1 hsuenaga for (; i < 8; i++)
2747 1.1 hsuenaga ekp[i] = 0;
2748 1.1 hsuenaga }
2749 1.1 hsuenaga else {
2750 1.1 hsuenaga /* AES-256 */
2751 1.1 hsuenaga for (i = 0; i < 8; i++)
2752 1.1 hsuenaga ekp[i] = kp[i];
2753 1.1 hsuenaga }
2754 1.1 hsuenaga /* make decryption key */
2755 1.1 hsuenaga mv_aes_deckey((uint8_t *)dkp, (uint8_t *)ekp, kbitlen);
2756 1.1 hsuenaga break;
2757 1.1 hsuenaga default:
2758 1.1 hsuenaga for (i = 0; i < 8; i++)
2759 1.1 hsuenaga ekp[0] = dkp[0] = 0;
2760 1.1 hsuenaga break;
2761 1.1 hsuenaga }
2762 1.1 hsuenaga
2763 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
2764 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2765 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2766 1.1 hsuenaga "%s: keyregistered\n", __func__);
2767 1.1 hsuenaga mvxpsec_dump_data(__func__, ekp, 32);
2768 1.1 hsuenaga }
2769 1.1 hsuenaga #endif
2770 1.1 hsuenaga
2771 1.1 hsuenaga return 0;
2772 1.1 hsuenaga }
2773 1.1 hsuenaga
2774 1.1 hsuenaga /*
2775 1.1 hsuenaga * set MAC key to the session
2776 1.1 hsuenaga *
2777 1.1 hsuenaga * MAC engine has no register for key itself, but the engine has
2778 1.1 hsuenaga * inner and outer IV register. software must compute IV before
2779 1.1 hsuenaga * enable the engine.
2780 1.1 hsuenaga *
2781 1.1 hsuenaga * IV is a hash of ipad/opad. these are defined by FIPS-198a
2782 1.1 hsuenaga * standard.
2783 1.1 hsuenaga */
2784 1.1 hsuenaga STATIC int
2785 1.1 hsuenaga mvxpsec_hmac_precomp(int alg, void *key, int kbitlen,
2786 1.1 hsuenaga void *iv_inner, void *iv_outer)
2787 1.1 hsuenaga {
2788 1.1 hsuenaga SHA1_CTX sha1;
2789 1.1 hsuenaga MD5_CTX md5;
2790 1.1 hsuenaga uint8_t *key8 = key;
2791 1.1 hsuenaga uint8_t kbuf[64];
2792 1.1 hsuenaga uint8_t ipad[64];
2793 1.1 hsuenaga uint8_t opad[64];
2794 1.1 hsuenaga uint32_t *iv_in = iv_inner;
2795 1.1 hsuenaga uint32_t *iv_out = iv_outer;
2796 1.1 hsuenaga int kbytelen;
2797 1.1 hsuenaga int i;
2798 1.1 hsuenaga #define HMAC_IPAD 0x36
2799 1.1 hsuenaga #define HMAC_OPAD 0x5c
2800 1.1 hsuenaga
2801 1.1 hsuenaga kbytelen = kbitlen / 8;
2802 1.1 hsuenaga KASSERT(kbitlen == kbytelen * 8);
2803 1.1 hsuenaga if (kbytelen > 64) {
2804 1.1 hsuenaga SHA1Init(&sha1);
2805 1.1 hsuenaga SHA1Update(&sha1, key, kbytelen);
2806 1.1 hsuenaga SHA1Final(kbuf, &sha1);
2807 1.1 hsuenaga key8 = kbuf;
2808 1.1 hsuenaga kbytelen = 64;
2809 1.1 hsuenaga }
2810 1.1 hsuenaga
2811 1.1 hsuenaga /* make initial 64 oct. string */
2812 1.1 hsuenaga switch (alg) {
2813 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96:
2814 1.1 hsuenaga case CRYPTO_SHA1_HMAC:
2815 1.1 hsuenaga case CRYPTO_MD5_HMAC_96:
2816 1.1 hsuenaga case CRYPTO_MD5_HMAC:
2817 1.1 hsuenaga for (i = 0; i < kbytelen; i++) {
2818 1.1 hsuenaga ipad[i] = (key8[i] ^ HMAC_IPAD);
2819 1.1 hsuenaga opad[i] = (key8[i] ^ HMAC_OPAD);
2820 1.1 hsuenaga }
2821 1.1 hsuenaga for (; i < 64; i++) {
2822 1.1 hsuenaga ipad[i] = HMAC_IPAD;
2823 1.1 hsuenaga opad[i] = HMAC_OPAD;
2824 1.1 hsuenaga }
2825 1.1 hsuenaga break;
2826 1.1 hsuenaga default:
2827 1.1 hsuenaga break;
2828 1.1 hsuenaga }
2829 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
2830 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2831 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2832 1.1 hsuenaga "%s: HMAC-KEY Pre-comp:\n", __func__);
2833 1.1 hsuenaga mvxpsec_dump_data(__func__, key, 64);
2834 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2835 1.1 hsuenaga "%s: ipad:\n", __func__);
2836 1.1 hsuenaga mvxpsec_dump_data(__func__, ipad, sizeof(ipad));
2837 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2838 1.1 hsuenaga "%s: opad:\n", __func__);
2839 1.1 hsuenaga mvxpsec_dump_data(__func__, opad, sizeof(opad));
2840 1.1 hsuenaga }
2841 1.1 hsuenaga #endif
2842 1.1 hsuenaga
2843 1.1 hsuenaga /* make iv from string */
2844 1.1 hsuenaga switch (alg) {
2845 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96:
2846 1.1 hsuenaga case CRYPTO_SHA1_HMAC:
2847 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2848 1.1 hsuenaga "%s: Generate iv_in(SHA1)\n", __func__);
2849 1.1 hsuenaga SHA1Init(&sha1);
2850 1.1 hsuenaga SHA1Update(&sha1, ipad, 64);
2851 1.1 hsuenaga /* XXX: private state... (LE) */
2852 1.1 hsuenaga iv_in[0] = htobe32(sha1.state[0]);
2853 1.1 hsuenaga iv_in[1] = htobe32(sha1.state[1]);
2854 1.1 hsuenaga iv_in[2] = htobe32(sha1.state[2]);
2855 1.1 hsuenaga iv_in[3] = htobe32(sha1.state[3]);
2856 1.1 hsuenaga iv_in[4] = htobe32(sha1.state[4]);
2857 1.1 hsuenaga
2858 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2859 1.1 hsuenaga "%s: Generate iv_out(SHA1)\n", __func__);
2860 1.1 hsuenaga SHA1Init(&sha1);
2861 1.1 hsuenaga SHA1Update(&sha1, opad, 64);
2862 1.1 hsuenaga /* XXX: private state... (LE) */
2863 1.1 hsuenaga iv_out[0] = htobe32(sha1.state[0]);
2864 1.1 hsuenaga iv_out[1] = htobe32(sha1.state[1]);
2865 1.1 hsuenaga iv_out[2] = htobe32(sha1.state[2]);
2866 1.1 hsuenaga iv_out[3] = htobe32(sha1.state[3]);
2867 1.1 hsuenaga iv_out[4] = htobe32(sha1.state[4]);
2868 1.1 hsuenaga break;
2869 1.1 hsuenaga case CRYPTO_MD5_HMAC_96:
2870 1.1 hsuenaga case CRYPTO_MD5_HMAC:
2871 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2872 1.1 hsuenaga "%s: Generate iv_in(MD5)\n", __func__);
2873 1.1 hsuenaga MD5Init(&md5);
2874 1.1 hsuenaga MD5Update(&md5, ipad, sizeof(ipad));
2875 1.1 hsuenaga /* XXX: private state... (LE) */
2876 1.1 hsuenaga iv_in[0] = htobe32(md5.state[0]);
2877 1.1 hsuenaga iv_in[1] = htobe32(md5.state[1]);
2878 1.1 hsuenaga iv_in[2] = htobe32(md5.state[2]);
2879 1.1 hsuenaga iv_in[3] = htobe32(md5.state[3]);
2880 1.1 hsuenaga iv_in[4] = 0;
2881 1.1 hsuenaga
2882 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2883 1.1 hsuenaga "%s: Generate iv_out(MD5)\n", __func__);
2884 1.1 hsuenaga MD5Init(&md5);
2885 1.1 hsuenaga MD5Update(&md5, opad, sizeof(opad));
2886 1.1 hsuenaga /* XXX: private state... (LE) */
2887 1.1 hsuenaga iv_out[0] = htobe32(md5.state[0]);
2888 1.1 hsuenaga iv_out[1] = htobe32(md5.state[1]);
2889 1.1 hsuenaga iv_out[2] = htobe32(md5.state[2]);
2890 1.1 hsuenaga iv_out[3] = htobe32(md5.state[3]);
2891 1.1 hsuenaga iv_out[4] = 0;
2892 1.1 hsuenaga break;
2893 1.1 hsuenaga default:
2894 1.1 hsuenaga break;
2895 1.1 hsuenaga }
2896 1.1 hsuenaga
2897 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
2898 1.1 hsuenaga if (mvxpsec_debug & MVXPSEC_DEBUG_HASH_IV) {
2899 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2900 1.1 hsuenaga "%s: HMAC IV-IN\n", __func__);
2901 1.1 hsuenaga mvxpsec_dump_data(__func__, (uint8_t *)iv_in, 20);
2902 1.1 hsuenaga MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2903 1.1 hsuenaga "%s: HMAC IV-OUT\n", __func__);
2904 1.1 hsuenaga mvxpsec_dump_data(__func__, (uint8_t *)iv_out, 20);
2905 1.1 hsuenaga }
2906 1.1 hsuenaga #endif
2907 1.1 hsuenaga
2908 1.1 hsuenaga return 0;
2909 1.1 hsuenaga #undef HMAC_IPAD
2910 1.1 hsuenaga #undef HMAC_OPAD
2911 1.1 hsuenaga }
2912 1.1 hsuenaga
2913 1.1 hsuenaga /*
2914 1.1 hsuenaga * AES Support routine
2915 1.1 hsuenaga */
2916 1.1 hsuenaga static uint8_t AES_SBOX[256] = {
2917 1.1 hsuenaga 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215,
2918 1.1 hsuenaga 171, 118, 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175,
2919 1.1 hsuenaga 156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204, 52, 165,
2920 1.1 hsuenaga 229, 241, 113, 216, 49, 21, 4, 199, 35, 195, 24, 150, 5, 154,
2921 1.1 hsuenaga 7, 18, 128, 226, 235, 39, 178, 117, 9, 131, 44, 26, 27, 110,
2922 1.1 hsuenaga 90, 160, 82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0, 237,
2923 1.1 hsuenaga 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208, 239,
2924 1.1 hsuenaga 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168,
2925 1.1 hsuenaga 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255,
2926 1.1 hsuenaga 243, 210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61,
2927 1.1 hsuenaga 100, 93, 25, 115, 96, 129, 79, 220, 34, 42, 144, 136, 70, 238,
2928 1.1 hsuenaga 184, 20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92,
2929 1.1 hsuenaga 194, 211, 172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213,
2930 1.1 hsuenaga 78, 169, 108, 86, 244, 234, 101, 122, 174, 8, 186, 120, 37, 46,
2931 1.1 hsuenaga 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, 112, 62,
2932 1.1 hsuenaga 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158,
2933 1.1 hsuenaga 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85,
2934 1.1 hsuenaga 40, 223, 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15,
2935 1.1 hsuenaga 176, 84, 187, 22
2936 1.1 hsuenaga };
2937 1.1 hsuenaga
2938 1.1 hsuenaga static uint32_t AES_RCON[30] = {
2939 1.1 hsuenaga 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
2940 1.1 hsuenaga 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4,
2941 1.1 hsuenaga 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91
2942 1.1 hsuenaga };
2943 1.1 hsuenaga
2944 1.1 hsuenaga STATIC int
2945 1.1 hsuenaga mv_aes_ksched(uint8_t k[4][MAXKC], int keyBits,
2946 1.1 hsuenaga uint8_t W[MAXROUNDS+1][4][MAXBC])
2947 1.1 hsuenaga {
2948 1.1 hsuenaga int KC, BC, ROUNDS;
2949 1.1 hsuenaga int i, j, t, rconpointer = 0;
2950 1.1 hsuenaga uint8_t tk[4][MAXKC];
2951 1.1 hsuenaga
2952 1.1 hsuenaga switch (keyBits) {
2953 1.1 hsuenaga case 128:
2954 1.1 hsuenaga ROUNDS = 10;
2955 1.1 hsuenaga KC = 4;
2956 1.1 hsuenaga break;
2957 1.1 hsuenaga case 192:
2958 1.1 hsuenaga ROUNDS = 12;
2959 1.1 hsuenaga KC = 6;
2960 1.1 hsuenaga break;
2961 1.1 hsuenaga case 256:
2962 1.1 hsuenaga ROUNDS = 14;
2963 1.1 hsuenaga KC = 8;
2964 1.1 hsuenaga break;
2965 1.1 hsuenaga default:
2966 1.1 hsuenaga return (-1);
2967 1.1 hsuenaga }
2968 1.1 hsuenaga BC = 4; /* 128 bits */
2969 1.1 hsuenaga
2970 1.1 hsuenaga for(j = 0; j < KC; j++)
2971 1.1 hsuenaga for(i = 0; i < 4; i++)
2972 1.1 hsuenaga tk[i][j] = k[i][j];
2973 1.1 hsuenaga t = 0;
2974 1.1 hsuenaga
2975 1.1 hsuenaga /* copy values into round key array */
2976 1.1 hsuenaga for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
2977 1.1 hsuenaga for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
2978 1.1 hsuenaga
2979 1.1 hsuenaga while (t < (ROUNDS+1)*BC) { /* while not enough round key material calculated */
2980 1.1 hsuenaga /* calculate new values */
2981 1.1 hsuenaga for(i = 0; i < 4; i++)
2982 1.1 hsuenaga tk[i][0] ^= AES_SBOX[tk[(i+1)%4][KC-1]];
2983 1.1 hsuenaga tk[0][0] ^= AES_RCON[rconpointer++];
2984 1.1 hsuenaga
2985 1.1 hsuenaga if (KC != 8)
2986 1.1 hsuenaga for(j = 1; j < KC; j++)
2987 1.1 hsuenaga for(i = 0; i < 4; i++)
2988 1.1 hsuenaga tk[i][j] ^= tk[i][j-1];
2989 1.1 hsuenaga else {
2990 1.1 hsuenaga for(j = 1; j < KC/2; j++)
2991 1.1 hsuenaga for(i = 0; i < 4; i++)
2992 1.1 hsuenaga tk[i][j] ^= tk[i][j-1];
2993 1.1 hsuenaga for(i = 0; i < 4; i++)
2994 1.1 hsuenaga tk[i][KC/2] ^= AES_SBOX[tk[i][KC/2 - 1]];
2995 1.1 hsuenaga for(j = KC/2 + 1; j < KC; j++)
2996 1.1 hsuenaga for(i = 0; i < 4; i++)
2997 1.1 hsuenaga tk[i][j] ^= tk[i][j-1];
2998 1.1 hsuenaga }
2999 1.1 hsuenaga /* copy values into round key array */
3000 1.1 hsuenaga for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
3001 1.1 hsuenaga for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
3002 1.1 hsuenaga }
3003 1.1 hsuenaga
3004 1.1 hsuenaga return 0;
3005 1.1 hsuenaga }
3006 1.1 hsuenaga
3007 1.1 hsuenaga STATIC int
3008 1.1 hsuenaga mv_aes_deckey(uint8_t *expandedKey, uint8_t *keyMaterial, int keyLen)
3009 1.1 hsuenaga {
3010 1.1 hsuenaga uint8_t W[MAXROUNDS+1][4][MAXBC];
3011 1.1 hsuenaga uint8_t k[4][MAXKC];
3012 1.1 hsuenaga uint8_t j;
3013 1.1 hsuenaga int i, rounds, KC;
3014 1.1 hsuenaga
3015 1.1 hsuenaga if (expandedKey == NULL)
3016 1.1 hsuenaga return -1;
3017 1.1 hsuenaga
3018 1.1 hsuenaga if (!((keyLen == 128) || (keyLen == 192) || (keyLen == 256)))
3019 1.1 hsuenaga return -1;
3020 1.1 hsuenaga
3021 1.1 hsuenaga if (keyMaterial == NULL)
3022 1.1 hsuenaga return -1;
3023 1.1 hsuenaga
3024 1.1 hsuenaga /* initialize key schedule: */
3025 1.1 hsuenaga for (i=0; i<keyLen/8; i++) {
3026 1.1 hsuenaga j = keyMaterial[i];
3027 1.1 hsuenaga k[i % 4][i / 4] = j;
3028 1.1 hsuenaga }
3029 1.1 hsuenaga
3030 1.1 hsuenaga mv_aes_ksched(k, keyLen, W);
3031 1.1 hsuenaga switch (keyLen) {
3032 1.1 hsuenaga case 128:
3033 1.1 hsuenaga rounds = 10;
3034 1.1 hsuenaga KC = 4;
3035 1.1 hsuenaga break;
3036 1.1 hsuenaga case 192:
3037 1.1 hsuenaga rounds = 12;
3038 1.1 hsuenaga KC = 6;
3039 1.1 hsuenaga break;
3040 1.1 hsuenaga case 256:
3041 1.1 hsuenaga rounds = 14;
3042 1.1 hsuenaga KC = 8;
3043 1.1 hsuenaga break;
3044 1.1 hsuenaga default:
3045 1.1 hsuenaga return -1;
3046 1.1 hsuenaga }
3047 1.1 hsuenaga
3048 1.1 hsuenaga for(i=0; i<MAXBC; i++)
3049 1.1 hsuenaga for(j=0; j<4; j++)
3050 1.1 hsuenaga expandedKey[i*4+j] = W[rounds][j][i];
3051 1.1 hsuenaga for(; i<KC; i++)
3052 1.1 hsuenaga for(j=0; j<4; j++)
3053 1.1 hsuenaga expandedKey[i*4+j] = W[rounds-1][j][i+MAXBC-KC];
3054 1.1 hsuenaga
3055 1.1 hsuenaga return 0;
3056 1.1 hsuenaga }
3057 1.1 hsuenaga
3058 1.1 hsuenaga /*
3059 1.1 hsuenaga * Clear cipher/mac operation state
3060 1.1 hsuenaga */
3061 1.1 hsuenaga INLINE void
3062 1.1 hsuenaga mvxpsec_packet_reset_op(struct mvxpsec_packet *mv_p)
3063 1.1 hsuenaga {
3064 1.1 hsuenaga mv_p->pkt_header.desc.acc_config = 0;
3065 1.1 hsuenaga mv_p->enc_off = mv_p->enc_ivoff = mv_p->enc_len = 0;
3066 1.1 hsuenaga mv_p->mac_off = mv_p->mac_dst = mv_p->mac_len = 0;
3067 1.1 hsuenaga }
3068 1.1 hsuenaga
3069 1.1 hsuenaga /*
3070 1.1 hsuenaga * update MVXPSEC operation order
3071 1.1 hsuenaga */
3072 1.1 hsuenaga INLINE void
3073 1.1 hsuenaga mvxpsec_packet_update_op_order(struct mvxpsec_packet *mv_p, int op)
3074 1.1 hsuenaga {
3075 1.1 hsuenaga struct mvxpsec_acc_descriptor *acc_desc = &mv_p->pkt_header.desc;
3076 1.1 hsuenaga uint32_t cur_op = acc_desc->acc_config & MV_ACC_CRYPTO_OP_MASK;
3077 1.1 hsuenaga
3078 1.1 hsuenaga KASSERT(op == MV_ACC_CRYPTO_OP_MAC || op == MV_ACC_CRYPTO_OP_ENC);
3079 1.1 hsuenaga KASSERT((op & MV_ACC_CRYPTO_OP_MASK) == op);
3080 1.1 hsuenaga
3081 1.1 hsuenaga if (cur_op == 0)
3082 1.1 hsuenaga acc_desc->acc_config |= op;
3083 1.1 hsuenaga else if (cur_op == MV_ACC_CRYPTO_OP_MAC && op == MV_ACC_CRYPTO_OP_ENC) {
3084 1.1 hsuenaga acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3085 1.1 hsuenaga acc_desc->acc_config |= MV_ACC_CRYPTO_OP_MACENC;
3086 1.1 hsuenaga /* MAC then ENC (= decryption) */
3087 1.1 hsuenaga }
3088 1.1 hsuenaga else if (cur_op == MV_ACC_CRYPTO_OP_ENC && op == MV_ACC_CRYPTO_OP_MAC) {
3089 1.1 hsuenaga acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3090 1.1 hsuenaga acc_desc->acc_config |= MV_ACC_CRYPTO_OP_ENCMAC;
3091 1.1 hsuenaga /* ENC then MAC (= encryption) */
3092 1.1 hsuenaga }
3093 1.1 hsuenaga else {
3094 1.1 hsuenaga log(LOG_ERR, "%s: multiple %s algorithm is not supported.\n",
3095 1.1 hsuenaga __func__,
3096 1.1 hsuenaga (op == MV_ACC_CRYPTO_OP_ENC) ? "encryption" : "authentication");
3097 1.1 hsuenaga }
3098 1.1 hsuenaga }
3099 1.1 hsuenaga
3100 1.1 hsuenaga /*
3101 1.1 hsuenaga * Parameter Conversions
3102 1.1 hsuenaga */
3103 1.1 hsuenaga INLINE uint32_t
3104 1.1 hsuenaga mvxpsec_alg2acc(uint32_t alg)
3105 1.1 hsuenaga {
3106 1.1 hsuenaga uint32_t reg;
3107 1.1 hsuenaga
3108 1.1 hsuenaga switch (alg) {
3109 1.1 hsuenaga case CRYPTO_DES_CBC:
3110 1.1 hsuenaga reg = MV_ACC_CRYPTO_ENC_DES;
3111 1.1 hsuenaga reg |= MV_ACC_CRYPTO_CBC;
3112 1.1 hsuenaga break;
3113 1.1 hsuenaga case CRYPTO_3DES_CBC:
3114 1.1 hsuenaga reg = MV_ACC_CRYPTO_ENC_3DES;
3115 1.1 hsuenaga reg |= MV_ACC_CRYPTO_3DES_EDE;
3116 1.1 hsuenaga reg |= MV_ACC_CRYPTO_CBC;
3117 1.1 hsuenaga break;
3118 1.1 hsuenaga case CRYPTO_AES_CBC:
3119 1.1 hsuenaga reg = MV_ACC_CRYPTO_ENC_AES;
3120 1.1 hsuenaga reg |= MV_ACC_CRYPTO_CBC;
3121 1.1 hsuenaga break;
3122 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96:
3123 1.1 hsuenaga reg = MV_ACC_CRYPTO_MAC_HMAC_SHA1;
3124 1.1 hsuenaga reg |= MV_ACC_CRYPTO_MAC_96;
3125 1.1 hsuenaga break;
3126 1.1 hsuenaga case CRYPTO_MD5_HMAC_96:
3127 1.1 hsuenaga reg = MV_ACC_CRYPTO_MAC_HMAC_MD5;
3128 1.1 hsuenaga reg |= MV_ACC_CRYPTO_MAC_96;
3129 1.1 hsuenaga break;
3130 1.1 hsuenaga default:
3131 1.1 hsuenaga reg = 0;
3132 1.1 hsuenaga break;
3133 1.1 hsuenaga }
3134 1.1 hsuenaga
3135 1.1 hsuenaga return reg;
3136 1.1 hsuenaga }
3137 1.1 hsuenaga
3138 1.1 hsuenaga INLINE uint32_t
3139 1.1 hsuenaga mvxpsec_aesklen(int klen)
3140 1.1 hsuenaga {
3141 1.1 hsuenaga if (klen < 128)
3142 1.1 hsuenaga return 0;
3143 1.1 hsuenaga else if (klen < 192)
3144 1.1 hsuenaga return MV_ACC_CRYPTO_AES_KLEN_128;
3145 1.1 hsuenaga else if (klen < 256)
3146 1.1 hsuenaga return MV_ACC_CRYPTO_AES_KLEN_192;
3147 1.1 hsuenaga else
3148 1.1 hsuenaga return MV_ACC_CRYPTO_AES_KLEN_256;
3149 1.1 hsuenaga
3150 1.1 hsuenaga return 0;
3151 1.1 hsuenaga }
3152 1.1 hsuenaga
3153 1.1 hsuenaga /*
3154 1.1 hsuenaga * String Conversions
3155 1.1 hsuenaga */
3156 1.1 hsuenaga STATIC const char *
3157 1.1 hsuenaga s_errreg(uint32_t v)
3158 1.1 hsuenaga {
3159 1.1 hsuenaga static char buf[80];
3160 1.1 hsuenaga
3161 1.1 hsuenaga snprintf(buf, sizeof(buf),
3162 1.1 hsuenaga "%sMiss %sDoubleHit %sBothHit %sDataError",
3163 1.1 hsuenaga (v & MV_TDMA_ERRC_MISS) ? "+" : "-",
3164 1.1 hsuenaga (v & MV_TDMA_ERRC_DHIT) ? "+" : "-",
3165 1.1 hsuenaga (v & MV_TDMA_ERRC_BHIT) ? "+" : "-",
3166 1.1 hsuenaga (v & MV_TDMA_ERRC_DERR) ? "+" : "-");
3167 1.1 hsuenaga
3168 1.1 hsuenaga return (const char *)buf;
3169 1.1 hsuenaga }
3170 1.1 hsuenaga
3171 1.1 hsuenaga STATIC const char *
3172 1.1 hsuenaga s_winreg(uint32_t v)
3173 1.1 hsuenaga {
3174 1.1 hsuenaga static char buf[80];
3175 1.1 hsuenaga
3176 1.1 hsuenaga snprintf(buf, sizeof(buf),
3177 1.1 hsuenaga "%s TGT 0x%x ATTR 0x%02x size %u(0x%04x)[64KB]",
3178 1.1 hsuenaga (v & MV_TDMA_ATTR_ENABLE) ? "EN" : "DIS",
3179 1.1 hsuenaga MV_TDMA_ATTR_GET_TARGET(v), MV_TDMA_ATTR_GET_ATTR(v),
3180 1.1 hsuenaga MV_TDMA_ATTR_GET_SIZE(v), MV_TDMA_ATTR_GET_SIZE(v));
3181 1.1 hsuenaga
3182 1.1 hsuenaga return (const char *)buf;
3183 1.1 hsuenaga }
3184 1.1 hsuenaga
3185 1.1 hsuenaga STATIC const char *
3186 1.1 hsuenaga s_ctrlreg(uint32_t reg)
3187 1.1 hsuenaga {
3188 1.1 hsuenaga static char buf[80];
3189 1.1 hsuenaga
3190 1.1 hsuenaga snprintf(buf, sizeof(buf),
3191 1.1 hsuenaga "%s: %sFETCH DBURST-%u SBURST-%u %sOUTS %sCHAIN %sBSWAP %sACT",
3192 1.1 hsuenaga (reg & MV_TDMA_CONTROL_ENABLE) ? "ENABLE" : "DISABLE",
3193 1.1 hsuenaga (reg & MV_TDMA_CONTROL_FETCH) ? "+" : "-",
3194 1.1 hsuenaga MV_TDMA_CONTROL_GET_DST_BURST(reg),
3195 1.1 hsuenaga MV_TDMA_CONTROL_GET_SRC_BURST(reg),
3196 1.1 hsuenaga (reg & MV_TDMA_CONTROL_OUTS_EN) ? "+" : "-",
3197 1.1 hsuenaga (reg & MV_TDMA_CONTROL_CHAIN_DIS) ? "-" : "+",
3198 1.1 hsuenaga (reg & MV_TDMA_CONTROL_BSWAP_DIS) ? "-" : "+",
3199 1.1 hsuenaga (reg & MV_TDMA_CONTROL_ACT) ? "+" : "-");
3200 1.1 hsuenaga
3201 1.1 hsuenaga return (const char *)buf;
3202 1.1 hsuenaga }
3203 1.1 hsuenaga
3204 1.1 hsuenaga _STATIC const char *
3205 1.1 hsuenaga s_xpsecintr(uint32_t v)
3206 1.1 hsuenaga {
3207 1.1 hsuenaga static char buf[160];
3208 1.1 hsuenaga
3209 1.1 hsuenaga snprintf(buf, sizeof(buf),
3210 1.1 hsuenaga "%sAuth %sDES %sAES-ENC %sAES-DEC %sENC %sSA %sAccAndTDMA "
3211 1.1 hsuenaga "%sTDMAComp %sTDMAOwn %sAccAndTDMA_Cont",
3212 1.1 hsuenaga (v & MVXPSEC_INT_AUTH) ? "+" : "-",
3213 1.1 hsuenaga (v & MVXPSEC_INT_DES) ? "+" : "-",
3214 1.1 hsuenaga (v & MVXPSEC_INT_AES_ENC) ? "+" : "-",
3215 1.1 hsuenaga (v & MVXPSEC_INT_AES_DEC) ? "+" : "-",
3216 1.1 hsuenaga (v & MVXPSEC_INT_ENC) ? "+" : "-",
3217 1.1 hsuenaga (v & MVXPSEC_INT_SA) ? "+" : "-",
3218 1.1 hsuenaga (v & MVXPSEC_INT_ACCTDMA) ? "+" : "-",
3219 1.1 hsuenaga (v & MVXPSEC_INT_TDMA_COMP) ? "+" : "-",
3220 1.1 hsuenaga (v & MVXPSEC_INT_TDMA_OWN) ? "+" : "-",
3221 1.1 hsuenaga (v & MVXPSEC_INT_ACCTDMA_CONT) ? "+" : "-");
3222 1.1 hsuenaga
3223 1.1 hsuenaga return (const char *)buf;
3224 1.1 hsuenaga }
3225 1.1 hsuenaga
3226 1.1 hsuenaga STATIC const char *
3227 1.1 hsuenaga s_ctlalg(uint32_t alg)
3228 1.1 hsuenaga {
3229 1.1 hsuenaga switch (alg) {
3230 1.1 hsuenaga case CRYPTO_SHA1_HMAC_96:
3231 1.1 hsuenaga return "HMAC-SHA1-96";
3232 1.1 hsuenaga case CRYPTO_SHA1_HMAC:
3233 1.1 hsuenaga return "HMAC-SHA1";
3234 1.1 hsuenaga case CRYPTO_SHA1:
3235 1.1 hsuenaga return "SHA1";
3236 1.1 hsuenaga case CRYPTO_MD5_HMAC_96:
3237 1.1 hsuenaga return "HMAC-MD5-96";
3238 1.1 hsuenaga case CRYPTO_MD5_HMAC:
3239 1.1 hsuenaga return "HMAC-MD5";
3240 1.1 hsuenaga case CRYPTO_MD5:
3241 1.1 hsuenaga return "MD5";
3242 1.1 hsuenaga case CRYPTO_DES_CBC:
3243 1.1 hsuenaga return "DES-CBC";
3244 1.1 hsuenaga case CRYPTO_3DES_CBC:
3245 1.1 hsuenaga return "3DES-CBC";
3246 1.1 hsuenaga case CRYPTO_AES_CBC:
3247 1.1 hsuenaga return "AES-CBC";
3248 1.1 hsuenaga default:
3249 1.1 hsuenaga break;
3250 1.1 hsuenaga }
3251 1.1 hsuenaga
3252 1.1 hsuenaga return "Unknown";
3253 1.1 hsuenaga }
3254 1.1 hsuenaga
3255 1.1 hsuenaga STATIC const char *
3256 1.1 hsuenaga s_xpsec_op(uint32_t reg)
3257 1.1 hsuenaga {
3258 1.1 hsuenaga reg &= MV_ACC_CRYPTO_OP_MASK;
3259 1.1 hsuenaga switch (reg) {
3260 1.1 hsuenaga case MV_ACC_CRYPTO_OP_ENC:
3261 1.1 hsuenaga return "ENC";
3262 1.1 hsuenaga case MV_ACC_CRYPTO_OP_MAC:
3263 1.1 hsuenaga return "MAC";
3264 1.1 hsuenaga case MV_ACC_CRYPTO_OP_ENCMAC:
3265 1.1 hsuenaga return "ENC-MAC";
3266 1.1 hsuenaga case MV_ACC_CRYPTO_OP_MACENC:
3267 1.1 hsuenaga return "MAC-ENC";
3268 1.1 hsuenaga default:
3269 1.1 hsuenaga break;
3270 1.1 hsuenaga }
3271 1.1 hsuenaga
3272 1.1 hsuenaga return "Unknown";
3273 1.1 hsuenaga
3274 1.1 hsuenaga }
3275 1.1 hsuenaga
3276 1.1 hsuenaga STATIC const char *
3277 1.1 hsuenaga s_xpsec_enc(uint32_t alg)
3278 1.1 hsuenaga {
3279 1.1 hsuenaga alg <<= MV_ACC_CRYPTO_ENC_SHIFT;
3280 1.1 hsuenaga switch (alg) {
3281 1.1 hsuenaga case MV_ACC_CRYPTO_ENC_DES:
3282 1.1 hsuenaga return "DES";
3283 1.1 hsuenaga case MV_ACC_CRYPTO_ENC_3DES:
3284 1.1 hsuenaga return "3DES";
3285 1.1 hsuenaga case MV_ACC_CRYPTO_ENC_AES:
3286 1.1 hsuenaga return "AES";
3287 1.1 hsuenaga default:
3288 1.1 hsuenaga break;
3289 1.1 hsuenaga }
3290 1.1 hsuenaga
3291 1.1 hsuenaga return "Unknown";
3292 1.1 hsuenaga }
3293 1.1 hsuenaga
3294 1.1 hsuenaga STATIC const char *
3295 1.1 hsuenaga s_xpsec_mac(uint32_t alg)
3296 1.1 hsuenaga {
3297 1.1 hsuenaga alg <<= MV_ACC_CRYPTO_MAC_SHIFT;
3298 1.1 hsuenaga switch (alg) {
3299 1.1 hsuenaga case MV_ACC_CRYPTO_MAC_NONE:
3300 1.1 hsuenaga return "Disabled";
3301 1.1 hsuenaga case MV_ACC_CRYPTO_MAC_MD5:
3302 1.1 hsuenaga return "MD5";
3303 1.1 hsuenaga case MV_ACC_CRYPTO_MAC_SHA1:
3304 1.1 hsuenaga return "SHA1";
3305 1.1 hsuenaga case MV_ACC_CRYPTO_MAC_HMAC_MD5:
3306 1.1 hsuenaga return "HMAC-MD5";
3307 1.1 hsuenaga case MV_ACC_CRYPTO_MAC_HMAC_SHA1:
3308 1.1 hsuenaga return "HMAC-SHA1";
3309 1.1 hsuenaga default:
3310 1.1 hsuenaga break;
3311 1.1 hsuenaga }
3312 1.1 hsuenaga
3313 1.1 hsuenaga return "Unknown";
3314 1.1 hsuenaga }
3315 1.1 hsuenaga
3316 1.1 hsuenaga STATIC const char *
3317 1.1 hsuenaga s_xpsec_frag(uint32_t frag)
3318 1.1 hsuenaga {
3319 1.1 hsuenaga frag <<= MV_ACC_CRYPTO_FRAG_SHIFT;
3320 1.1 hsuenaga switch (frag) {
3321 1.1 hsuenaga case MV_ACC_CRYPTO_NOFRAG:
3322 1.1 hsuenaga return "NoFragment";
3323 1.1 hsuenaga case MV_ACC_CRYPTO_FRAG_FIRST:
3324 1.1 hsuenaga return "FirstFragment";
3325 1.1 hsuenaga case MV_ACC_CRYPTO_FRAG_MID:
3326 1.1 hsuenaga return "MiddleFragment";
3327 1.1 hsuenaga case MV_ACC_CRYPTO_FRAG_LAST:
3328 1.1 hsuenaga return "LastFragment";
3329 1.1 hsuenaga default:
3330 1.1 hsuenaga break;
3331 1.1 hsuenaga }
3332 1.1 hsuenaga
3333 1.1 hsuenaga return "Unknown";
3334 1.1 hsuenaga }
3335 1.1 hsuenaga
3336 1.1 hsuenaga #ifdef MVXPSEC_DEBUG
3337 1.1 hsuenaga void
3338 1.1 hsuenaga mvxpsec_dump_reg(struct mvxpsec_softc *sc)
3339 1.1 hsuenaga {
3340 1.1 hsuenaga uint32_t reg;
3341 1.1 hsuenaga int i;
3342 1.1 hsuenaga
3343 1.1 hsuenaga if ((mvxpsec_debug & MVXPSEC_DEBUG_DESC) == 0)
3344 1.1 hsuenaga return;
3345 1.1 hsuenaga
3346 1.1 hsuenaga printf("--- Interrupt Registers ---\n");
3347 1.1 hsuenaga reg = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
3348 1.1 hsuenaga printf("MVXPSEC INT CAUSE: 0x%08x\n", reg);
3349 1.1 hsuenaga printf("MVXPSEC INT CAUSE: %s\n", s_xpsecintr(reg));
3350 1.1 hsuenaga reg = MVXPSEC_READ(sc, MVXPSEC_INT_MASK);
3351 1.1 hsuenaga printf("MVXPSEC INT MASK: 0x%08x\n", reg);
3352 1.1 hsuenaga printf("MVXPSEC INT MASKE: %s\n", s_xpsecintr(reg));
3353 1.1 hsuenaga
3354 1.1 hsuenaga printf("--- DMA Configuration Registers ---\n");
3355 1.1 hsuenaga for (i = 0; i < MV_TDMA_NWINDOW; i++) {
3356 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_BAR(i));
3357 1.1 hsuenaga printf("TDMA BAR%d: 0x%08x\n", i, reg);
3358 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_ATTR(i));
3359 1.1 hsuenaga printf("TDMA ATTR%d: 0x%08x\n", i, reg);
3360 1.1 hsuenaga printf(" -> %s\n", s_winreg(reg));
3361 1.1 hsuenaga }
3362 1.1 hsuenaga
3363 1.1 hsuenaga printf("--- DMA Control Registers ---\n");
3364 1.1 hsuenaga
3365 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3366 1.1 hsuenaga printf("TDMA CONTROL: 0x%08x\n", reg);
3367 1.1 hsuenaga printf(" -> %s\n", s_ctrlreg(reg));
3368 1.1 hsuenaga
3369 1.1 hsuenaga printf("--- DMA Current Command Descriptors ---\n");
3370 1.1 hsuenaga
3371 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
3372 1.1 hsuenaga printf("TDMA ERR CAUSE: 0x%08x\n", reg);
3373 1.1 hsuenaga
3374 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_ERR_MASK);
3375 1.1 hsuenaga printf("TDMA ERR MASK: 0x%08x\n", reg);
3376 1.1 hsuenaga
3377 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_CNT);
3378 1.1 hsuenaga printf("TDMA DATA OWNER: %s\n",
3379 1.1 hsuenaga (reg & MV_TDMA_CNT_OWN) ? "DMAC" : "CPU");
3380 1.1 hsuenaga printf("TDMA DATA COUNT: %d(0x%x)\n",
3381 1.1 hsuenaga (reg & ~MV_TDMA_CNT_OWN), (reg & ~MV_TDMA_CNT_OWN));
3382 1.1 hsuenaga
3383 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_SRC);
3384 1.1 hsuenaga printf("TDMA DATA SRC: 0x%08x\n", reg);
3385 1.1 hsuenaga
3386 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_DST);
3387 1.1 hsuenaga printf("TDMA DATA DST: 0x%08x\n", reg);
3388 1.1 hsuenaga
3389 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_NXT);
3390 1.1 hsuenaga printf("TDMA DATA NXT: 0x%08x\n", reg);
3391 1.1 hsuenaga
3392 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_CUR);
3393 1.1 hsuenaga printf("TDMA DATA CUR: 0x%08x\n", reg);
3394 1.1 hsuenaga
3395 1.1 hsuenaga printf("--- ACC Command Register ---\n");
3396 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3397 1.1 hsuenaga printf("ACC COMMAND: 0x%08x\n", reg);
3398 1.1 hsuenaga printf("ACC: %sACT %sSTOP\n",
3399 1.1 hsuenaga (reg & MV_ACC_COMMAND_ACT) ? "+" : "-",
3400 1.1 hsuenaga (reg & MV_ACC_COMMAND_STOP) ? "+" : "-");
3401 1.1 hsuenaga
3402 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_ACC_CONFIG);
3403 1.1 hsuenaga printf("ACC CONFIG: 0x%08x\n", reg);
3404 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_ACC_DESC);
3405 1.1 hsuenaga printf("ACC DESC: 0x%08x\n", reg);
3406 1.1 hsuenaga
3407 1.1 hsuenaga printf("--- DES Key Register ---\n");
3408 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0L);
3409 1.1 hsuenaga printf("DES KEY0 Low: 0x%08x\n", reg);
3410 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0H);
3411 1.1 hsuenaga printf("DES KEY0 High: 0x%08x\n", reg);
3412 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1L);
3413 1.1 hsuenaga printf("DES KEY1 Low: 0x%08x\n", reg);
3414 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1H);
3415 1.1 hsuenaga printf("DES KEY1 High: 0x%08x\n", reg);
3416 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2L);
3417 1.1 hsuenaga printf("DES KEY2 Low: 0x%08x\n", reg);
3418 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2H);
3419 1.1 hsuenaga printf("DES KEY2 High: 0x%08x\n", reg);
3420 1.1 hsuenaga
3421 1.1 hsuenaga printf("--- AES Key Register ---\n");
3422 1.1 hsuenaga for (i = 0; i < 8; i++) {
3423 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_AES_EKEY(i));
3424 1.1 hsuenaga printf("AES ENC KEY COL%d: %08x\n", i, reg);
3425 1.1 hsuenaga }
3426 1.1 hsuenaga for (i = 0; i < 8; i++) {
3427 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_CE_AES_DKEY(i));
3428 1.1 hsuenaga printf("AES DEC KEY COL%d: %08x\n", i, reg);
3429 1.1 hsuenaga }
3430 1.1 hsuenaga
3431 1.1 hsuenaga return;
3432 1.1 hsuenaga }
3433 1.1 hsuenaga
3434 1.1 hsuenaga STATIC void
3435 1.1 hsuenaga mvxpsec_dump_sram(const char *name, struct mvxpsec_softc *sc, size_t len)
3436 1.1 hsuenaga {
3437 1.1 hsuenaga uint32_t reg;
3438 1.1 hsuenaga
3439 1.1 hsuenaga if (sc->sc_sram_va == NULL)
3440 1.1 hsuenaga return;
3441 1.1 hsuenaga
3442 1.1 hsuenaga if (len == 0) {
3443 1.1 hsuenaga printf("\n%s NO DATA(len=0)\n", name);
3444 1.1 hsuenaga return;
3445 1.1 hsuenaga }
3446 1.1 hsuenaga else if (len > MV_ACC_SRAM_SIZE)
3447 1.1 hsuenaga len = MV_ACC_SRAM_SIZE;
3448 1.1 hsuenaga
3449 1.1 hsuenaga mutex_enter(&sc->sc_dma_mtx);
3450 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3451 1.1 hsuenaga if (reg & MV_TDMA_CONTROL_ACT) {
3452 1.1 hsuenaga printf("TDMA is active, cannot access SRAM\n");
3453 1.1 hsuenaga mutex_exit(&sc->sc_dma_mtx);
3454 1.1 hsuenaga return;
3455 1.1 hsuenaga }
3456 1.1 hsuenaga reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3457 1.1 hsuenaga if (reg & MV_ACC_COMMAND_ACT) {
3458 1.1 hsuenaga printf("SA is active, cannot access SRAM\n");
3459 1.1 hsuenaga mutex_exit(&sc->sc_dma_mtx);
3460 1.1 hsuenaga return;
3461 1.1 hsuenaga }
3462 1.1 hsuenaga
3463 1.1 hsuenaga printf("%s: dump SRAM, %zu bytes\n", name, len);
3464 1.1 hsuenaga mvxpsec_dump_data(name, sc->sc_sram_va, len);
3465 1.1 hsuenaga mutex_exit(&sc->sc_dma_mtx);
3466 1.1 hsuenaga return;
3467 1.1 hsuenaga }
3468 1.1 hsuenaga
3469 1.1 hsuenaga
3470 1.1 hsuenaga _STATIC void
3471 1.1 hsuenaga mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *dh)
3472 1.1 hsuenaga {
3473 1.1 hsuenaga struct mvxpsec_descriptor *d =
3474 1.1 hsuenaga (struct mvxpsec_descriptor *)dh->_desc;
3475 1.1 hsuenaga
3476 1.1 hsuenaga printf("--- DMA Command Descriptor ---\n");
3477 1.1 hsuenaga printf("DESC: VA=%p PA=0x%08x\n",
3478 1.1 hsuenaga d, (uint32_t)dh->phys_addr);
3479 1.1 hsuenaga printf("DESC: WORD0 = 0x%08x\n", d->tdma_word0);
3480 1.1 hsuenaga printf("DESC: SRC = 0x%08x\n", d->tdma_src);
3481 1.1 hsuenaga printf("DESC: DST = 0x%08x\n", d->tdma_dst);
3482 1.1 hsuenaga printf("DESC: NXT = 0x%08x\n", d->tdma_nxt);
3483 1.1 hsuenaga
3484 1.1 hsuenaga return;
3485 1.1 hsuenaga }
3486 1.1 hsuenaga
3487 1.1 hsuenaga STATIC void
3488 1.1 hsuenaga mvxpsec_dump_data(const char *name, void *p, size_t len)
3489 1.1 hsuenaga {
3490 1.1 hsuenaga uint8_t *data = p;
3491 1.1 hsuenaga off_t off;
3492 1.1 hsuenaga
3493 1.1 hsuenaga printf("%s: dump %p, %zu bytes", name, p, len);
3494 1.1 hsuenaga if (p == NULL || len == 0) {
3495 1.1 hsuenaga printf("\n%s: NO DATA\n", name);
3496 1.1 hsuenaga return;
3497 1.1 hsuenaga }
3498 1.1 hsuenaga for (off = 0; off < len; off++) {
3499 1.1 hsuenaga if ((off % 16) == 0) {
3500 1.1 hsuenaga printf("\n%s: 0x%08x:", name, (uint32_t)off);
3501 1.1 hsuenaga }
3502 1.1 hsuenaga if ((off % 4) == 0) {
3503 1.1 hsuenaga printf(" ");
3504 1.1 hsuenaga }
3505 1.1 hsuenaga printf("%02x", data[off]);
3506 1.1 hsuenaga }
3507 1.1 hsuenaga printf("\n");
3508 1.1 hsuenaga
3509 1.1 hsuenaga return;
3510 1.1 hsuenaga }
3511 1.1 hsuenaga
3512 1.1 hsuenaga _STATIC void
3513 1.1 hsuenaga mvxpsec_dump_packet(const char *name, struct mvxpsec_packet *mv_p)
3514 1.1 hsuenaga {
3515 1.1 hsuenaga struct mvxpsec_softc *sc = mv_p->mv_s->sc;
3516 1.1 hsuenaga
3517 1.1 hsuenaga printf("%s: packet_data:\n", name);
3518 1.1 hsuenaga mvxpsec_dump_packet_data(name, mv_p);
3519 1.1 hsuenaga
3520 1.1 hsuenaga printf("%s: SRAM:\n", name);
3521 1.1 hsuenaga mvxpsec_dump_sram(name, sc, 2000);
3522 1.1 hsuenaga
3523 1.1 hsuenaga printf("%s: packet_descriptor:\n", name);
3524 1.1 hsuenaga mvxpsec_dump_packet_desc(name, mv_p);
3525 1.1 hsuenaga }
3526 1.1 hsuenaga
3527 1.1 hsuenaga _STATIC void
3528 1.1 hsuenaga mvxpsec_dump_packet_data(const char *name, struct mvxpsec_packet *mv_p)
3529 1.1 hsuenaga {
3530 1.1 hsuenaga static char buf[1500];
3531 1.1 hsuenaga int len;
3532 1.1 hsuenaga
3533 1.1 hsuenaga if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
3534 1.1 hsuenaga struct mbuf *m;
3535 1.1 hsuenaga
3536 1.1 hsuenaga m = mv_p->data.mbuf;
3537 1.1 hsuenaga len = m->m_pkthdr.len;
3538 1.1 hsuenaga if (len > sizeof(buf))
3539 1.1 hsuenaga len = sizeof(buf);
3540 1.1 hsuenaga m_copydata(m, 0, len, buf);
3541 1.1 hsuenaga }
3542 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
3543 1.1 hsuenaga struct uio *uio;
3544 1.1 hsuenaga
3545 1.1 hsuenaga uio = mv_p->data.uio;
3546 1.1 hsuenaga len = uio->uio_resid;
3547 1.1 hsuenaga if (len > sizeof(buf))
3548 1.1 hsuenaga len = sizeof(buf);
3549 1.1 hsuenaga cuio_copydata(uio, 0, len, buf);
3550 1.1 hsuenaga }
3551 1.1 hsuenaga else if (mv_p->data_type == MVXPSEC_DATA_RAW) {
3552 1.1 hsuenaga len = mv_p->data_len;
3553 1.1 hsuenaga if (len > sizeof(buf))
3554 1.1 hsuenaga len = sizeof(buf);
3555 1.1 hsuenaga memcpy(buf, mv_p->data.raw, len);
3556 1.1 hsuenaga }
3557 1.1 hsuenaga else
3558 1.1 hsuenaga return;
3559 1.1 hsuenaga mvxpsec_dump_data(name, buf, len);
3560 1.1 hsuenaga
3561 1.1 hsuenaga return;
3562 1.1 hsuenaga }
3563 1.1 hsuenaga
3564 1.1 hsuenaga _STATIC void
3565 1.1 hsuenaga mvxpsec_dump_packet_desc(const char *name, struct mvxpsec_packet *mv_p)
3566 1.1 hsuenaga {
3567 1.1 hsuenaga uint32_t *words;
3568 1.1 hsuenaga
3569 1.1 hsuenaga if (mv_p == NULL)
3570 1.1 hsuenaga return;
3571 1.1 hsuenaga
3572 1.1 hsuenaga words = &mv_p->pkt_header.desc.acc_desc_dword0;
3573 1.1 hsuenaga mvxpsec_dump_acc_config(name, words[0]);
3574 1.1 hsuenaga mvxpsec_dump_acc_encdata(name, words[1], words[2]);
3575 1.1 hsuenaga mvxpsec_dump_acc_enclen(name, words[2]);
3576 1.1 hsuenaga mvxpsec_dump_acc_enckey(name, words[3]);
3577 1.1 hsuenaga mvxpsec_dump_acc_enciv(name, words[4]);
3578 1.1 hsuenaga mvxpsec_dump_acc_macsrc(name, words[5]);
3579 1.1 hsuenaga mvxpsec_dump_acc_macdst(name, words[6]);
3580 1.1 hsuenaga mvxpsec_dump_acc_maciv(name, words[7]);
3581 1.1 hsuenaga
3582 1.1 hsuenaga return;
3583 1.1 hsuenaga }
3584 1.1 hsuenaga
3585 1.1 hsuenaga _STATIC void
3586 1.1 hsuenaga mvxpsec_dump_acc_config(const char *name, uint32_t w)
3587 1.1 hsuenaga {
3588 1.1 hsuenaga /* SA: Dword 0 */
3589 1.1 hsuenaga printf("%s: Dword0=0x%08x\n", name, w);
3590 1.1 hsuenaga printf("%s: OP = %s\n", name,
3591 1.1 hsuenaga s_xpsec_op(MV_ACC_CRYPTO_OP(w)));
3592 1.1 hsuenaga printf("%s: MAC = %s\n", name,
3593 1.1 hsuenaga s_xpsec_mac(MV_ACC_CRYPTO_MAC(w)));
3594 1.1 hsuenaga printf("%s: MAC_LEN = %s\n", name,
3595 1.1 hsuenaga w & MV_ACC_CRYPTO_MAC_96 ? "96-bit" : "full-bit");
3596 1.1 hsuenaga printf("%s: ENC = %s\n", name,
3597 1.1 hsuenaga s_xpsec_enc(MV_ACC_CRYPTO_ENC(w)));
3598 1.1 hsuenaga printf("%s: DIR = %s\n", name,
3599 1.1 hsuenaga w & MV_ACC_CRYPTO_DECRYPT ? "decryption" : "encryption");
3600 1.1 hsuenaga printf("%s: CHAIN = %s\n", name,
3601 1.1 hsuenaga w & MV_ACC_CRYPTO_CBC ? "CBC" : "ECB");
3602 1.1 hsuenaga printf("%s: 3DES = %s\n", name,
3603 1.1 hsuenaga w & MV_ACC_CRYPTO_3DES_EDE ? "EDE" : "EEE");
3604 1.1 hsuenaga printf("%s: FRAGMENT = %s\n", name,
3605 1.1 hsuenaga s_xpsec_frag(MV_ACC_CRYPTO_FRAG(w)));
3606 1.1 hsuenaga return;
3607 1.1 hsuenaga }
3608 1.1 hsuenaga
3609 1.1 hsuenaga STATIC void
3610 1.1 hsuenaga mvxpsec_dump_acc_encdata(const char *name, uint32_t w, uint32_t w2)
3611 1.1 hsuenaga {
3612 1.1 hsuenaga /* SA: Dword 1 */
3613 1.1 hsuenaga printf("%s: Dword1=0x%08x\n", name, w);
3614 1.1 hsuenaga printf("%s: ENC SRC = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3615 1.1 hsuenaga printf("%s: ENC DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3616 1.1 hsuenaga printf("%s: ENC RANGE = 0x%x - 0x%x\n", name,
3617 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w),
3618 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_1(w2) - 1);
3619 1.1 hsuenaga return;
3620 1.1 hsuenaga }
3621 1.1 hsuenaga
3622 1.1 hsuenaga STATIC void
3623 1.1 hsuenaga mvxpsec_dump_acc_enclen(const char *name, uint32_t w)
3624 1.1 hsuenaga {
3625 1.1 hsuenaga /* SA: Dword 2 */
3626 1.1 hsuenaga printf("%s: Dword2=0x%08x\n", name, w);
3627 1.1 hsuenaga printf("%s: ENC LEN = %d\n", name,
3628 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w));
3629 1.1 hsuenaga return;
3630 1.1 hsuenaga }
3631 1.1 hsuenaga
3632 1.1 hsuenaga STATIC void
3633 1.1 hsuenaga mvxpsec_dump_acc_enckey(const char *name, uint32_t w)
3634 1.1 hsuenaga {
3635 1.1 hsuenaga /* SA: Dword 3 */
3636 1.1 hsuenaga printf("%s: Dword3=0x%08x\n", name, w);
3637 1.1 hsuenaga printf("%s: EKEY = 0x%x\n", name,
3638 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w));
3639 1.1 hsuenaga return;
3640 1.1 hsuenaga }
3641 1.1 hsuenaga
3642 1.1 hsuenaga STATIC void
3643 1.1 hsuenaga mvxpsec_dump_acc_enciv(const char *name, uint32_t w)
3644 1.1 hsuenaga {
3645 1.1 hsuenaga /* SA: Dword 4 */
3646 1.1 hsuenaga printf("%s: Dword4=0x%08x\n", name, w);
3647 1.1 hsuenaga printf("%s: EIV = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3648 1.1 hsuenaga printf("%s: EIV_BUF = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3649 1.1 hsuenaga return;
3650 1.1 hsuenaga }
3651 1.1 hsuenaga
3652 1.1 hsuenaga STATIC void
3653 1.1 hsuenaga mvxpsec_dump_acc_macsrc(const char *name, uint32_t w)
3654 1.1 hsuenaga {
3655 1.1 hsuenaga /* SA: Dword 5 */
3656 1.1 hsuenaga printf("%s: Dword5=0x%08x\n", name, w);
3657 1.1 hsuenaga printf("%s: MAC_SRC = 0x%x\n", name,
3658 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w));
3659 1.1 hsuenaga printf("%s: MAC_TOTAL_LEN = %d\n", name,
3660 1.1 hsuenaga MV_ACC_DESC_GET_VAL_3(w));
3661 1.1 hsuenaga printf("%s: MAC_RANGE = 0x%0x - 0x%0x\n", name,
3662 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w),
3663 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_3(w) - 1);
3664 1.1 hsuenaga return;
3665 1.1 hsuenaga }
3666 1.1 hsuenaga
3667 1.1 hsuenaga STATIC void
3668 1.1 hsuenaga mvxpsec_dump_acc_macdst(const char *name, uint32_t w)
3669 1.1 hsuenaga {
3670 1.1 hsuenaga /* SA: Dword 6 */
3671 1.1 hsuenaga printf("%s: Dword6=0x%08x\n", name, w);
3672 1.1 hsuenaga printf("%s: MAC_DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3673 1.1 hsuenaga printf("%s: MAC_BLOCK_LEN = %d\n", name,
3674 1.1 hsuenaga MV_ACC_DESC_GET_VAL_2(w));
3675 1.1 hsuenaga return;
3676 1.1 hsuenaga }
3677 1.1 hsuenaga
3678 1.1 hsuenaga STATIC void
3679 1.1 hsuenaga mvxpsec_dump_acc_maciv(const char *name, uint32_t w)
3680 1.1 hsuenaga {
3681 1.1 hsuenaga /* SA: Dword 7 */
3682 1.1 hsuenaga printf("%s: Dword7=0x%08x\n", name, w);
3683 1.1 hsuenaga printf("%s: MAC_INNER_IV = 0x%x\n", name,
3684 1.1 hsuenaga MV_ACC_DESC_GET_VAL_1(w));
3685 1.1 hsuenaga printf("%s: MAC_OUTER_IV = 0x%x\n", name,
3686 1.1 hsuenaga MV_ACC_DESC_GET_VAL_2(w));
3687 1.1 hsuenaga return;
3688 1.1 hsuenaga }
3689 1.1 hsuenaga #endif
3690 1.1 hsuenaga
3691