mvxpsec.c revision 1.15 1 /* $NetBSD: mvxpsec.c,v 1.15 2022/05/22 11:38:51 riastradh Exp $ */
2 /*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #ifdef _KERNEL_OPT
29 #include "opt_ipsec.h"
30 #endif
31
32 /*
33 * Cryptographic Engine and Security Accelerator(MVXPSEC)
34 */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/types.h>
38 #include <sys/kernel.h>
39 #include <sys/queue.h>
40 #include <sys/conf.h>
41 #include <sys/proc.h>
42 #include <sys/bus.h>
43 #include <sys/evcnt.h>
44 #include <sys/device.h>
45 #include <sys/endian.h>
46 #include <sys/errno.h>
47 #include <sys/kmem.h>
48 #include <sys/mbuf.h>
49 #include <sys/callout.h>
50 #include <sys/pool.h>
51 #include <sys/cprng.h>
52 #include <sys/syslog.h>
53 #include <sys/mutex.h>
54 #include <sys/kthread.h>
55 #include <sys/atomic.h>
56 #include <sys/sha1.h>
57 #include <sys/md5.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #include <opencrypto/cryptodev.h>
62 #include <opencrypto/xform.h>
63
64 #include <net/net_stats.h>
65
66 #include <netinet/in_systm.h>
67 #include <netinet/in.h>
68 #include <netinet/ip.h>
69 #include <netinet/ip6.h>
70
71 #if NIPSEC > 0
72 #include <netipsec/esp_var.h>
73 #endif
74
75 #include <arm/cpufunc.h>
76 #include <arm/marvell/mvsocvar.h>
77 #include <arm/marvell/armadaxpreg.h>
78 #include <dev/marvell/marvellreg.h>
79 #include <dev/marvell/marvellvar.h>
80 #include <dev/marvell/mvxpsecreg.h>
81 #include <dev/marvell/mvxpsecvar.h>
82
83 #ifdef DEBUG
84 #define STATIC __attribute__ ((noinline)) extern
85 #define _STATIC __attribute__ ((noinline)) extern
86 #define INLINE __attribute__ ((noinline)) extern
87 #define _INLINE __attribute__ ((noinline)) extern
88 #else
89 #define STATIC static
90 #define _STATIC __attribute__ ((unused)) static
91 #define INLINE static inline
92 #define _INLINE __attribute__ ((unused)) static inline
93 #endif
94
95 /*
96 * IRQ and SRAM spaces for each of unit
97 * XXX: move to attach_args
98 */
99 struct {
100 int err_int;
101 } mvxpsec_config[] = {
102 { .err_int = ARMADAXP_IRQ_CESA0_ERR, }, /* unit 0 */
103 { .err_int = ARMADAXP_IRQ_CESA1_ERR, }, /* unit 1 */
104 };
105 #define MVXPSEC_ERR_INT(sc) \
106 mvxpsec_config[device_unit((sc)->sc_dev)].err_int
107
108 /*
109 * AES
110 */
111 #define MAXBC (128/32)
112 #define MAXKC (256/32)
113 #define MAXROUNDS 14
114 STATIC int mv_aes_ksched(uint8_t[4][MAXKC], int,
115 uint8_t[MAXROUNDS+1][4][MAXBC]);
116 STATIC int mv_aes_deckey(uint8_t *, uint8_t *, int);
117
118 /*
119 * device driver autoconf interface
120 */
121 STATIC int mvxpsec_match(device_t, cfdata_t, void *);
122 STATIC void mvxpsec_attach(device_t, device_t, void *);
123 STATIC void mvxpsec_evcnt_attach(struct mvxpsec_softc *);
124
125 /*
126 * register setup
127 */
128 STATIC int mvxpsec_wininit(struct mvxpsec_softc *, enum marvell_tags *);
129
130 /*
131 * timer(callout) interface
132 *
133 * XXX: callout is not MP safe...
134 */
135 STATIC void mvxpsec_timer(void *);
136
137 /*
138 * interrupt interface
139 */
140 STATIC int mvxpsec_intr(void *);
141 INLINE void mvxpsec_intr_cleanup(struct mvxpsec_softc *);
142 STATIC int mvxpsec_eintr(void *);
143 STATIC uint32_t mvxpsec_intr_ack(struct mvxpsec_softc *);
144 STATIC uint32_t mvxpsec_eintr_ack(struct mvxpsec_softc *);
145 INLINE void mvxpsec_intr_cnt(struct mvxpsec_softc *, int);
146
147 /*
148 * memory allocators and VM management
149 */
150 STATIC struct mvxpsec_devmem *mvxpsec_alloc_devmem(struct mvxpsec_softc *,
151 paddr_t, int);
152 STATIC int mvxpsec_init_sram(struct mvxpsec_softc *);
153
154 /*
155 * Low-level DMA interface
156 */
157 STATIC int mvxpsec_init_dma(struct mvxpsec_softc *,
158 struct marvell_attach_args *);
159 INLINE int mvxpsec_dma_wait(struct mvxpsec_softc *);
160 INLINE int mvxpsec_acc_wait(struct mvxpsec_softc *);
161 INLINE struct mvxpsec_descriptor_handle *mvxpsec_dma_getdesc(struct mvxpsec_softc *);
162 _INLINE void mvxpsec_dma_putdesc(struct mvxpsec_softc *, struct mvxpsec_descriptor_handle *);
163 INLINE void mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *,
164 uint32_t, uint32_t, uint32_t);
165 INLINE void mvxpsec_dma_cat(struct mvxpsec_softc *,
166 struct mvxpsec_descriptor_handle *, struct mvxpsec_descriptor_handle *);
167
168 /*
169 * High-level DMA interface
170 */
171 INLINE int mvxpsec_dma_copy0(struct mvxpsec_softc *,
172 mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
173 INLINE int mvxpsec_dma_copy(struct mvxpsec_softc *,
174 mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
175 INLINE int mvxpsec_dma_acc_activate(struct mvxpsec_softc *,
176 mvxpsec_dma_ring *);
177 INLINE void mvxpsec_dma_finalize(struct mvxpsec_softc *,
178 mvxpsec_dma_ring *);
179 INLINE void mvxpsec_dma_free(struct mvxpsec_softc *,
180 mvxpsec_dma_ring *);
181 INLINE int mvxpsec_dma_copy_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
182 INLINE int mvxpsec_dma_sync_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
183
184 /*
185 * Session management interface (OpenCrypto)
186 */
187 #define MVXPSEC_SESSION(sid) ((sid) & 0x0fffffff)
188 #define MVXPSEC_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
189 /* pool management */
190 STATIC int mvxpsec_session_ctor(void *, void *, int);
191 STATIC void mvxpsec_session_dtor(void *, void *);
192 STATIC int mvxpsec_packet_ctor(void *, void *, int);
193 STATIC void mvxpsec_packet_dtor(void *, void *);
194
195 /* session management */
196 STATIC struct mvxpsec_session *mvxpsec_session_alloc(struct mvxpsec_softc *);
197 STATIC void mvxpsec_session_dealloc(struct mvxpsec_session *);
198 INLINE struct mvxpsec_session *mvxpsec_session_lookup(struct mvxpsec_softc *, int);
199 INLINE int mvxpsec_session_ref(struct mvxpsec_session *);
200 INLINE void mvxpsec_session_unref(struct mvxpsec_session *);
201
202 /* packet management */
203 STATIC struct mvxpsec_packet *mvxpsec_packet_alloc(struct mvxpsec_session *);
204 INLINE void mvxpsec_packet_enqueue(struct mvxpsec_packet *);
205 STATIC void mvxpsec_packet_dealloc(struct mvxpsec_packet *);
206 STATIC int mvxpsec_done_packet(struct mvxpsec_packet *);
207
208 /* session header manegement */
209 STATIC int mvxpsec_header_finalize(struct mvxpsec_packet *);
210
211 /* packet queue management */
212 INLINE void mvxpsec_drop(struct mvxpsec_softc *, struct cryptop *, struct mvxpsec_packet *, int);
213 STATIC int mvxpsec_dispatch_queue(struct mvxpsec_softc *);
214
215 /* opencrypto operation */
216 INLINE int mvxpsec_parse_crd(struct mvxpsec_packet *, struct cryptodesc *);
217 INLINE int mvxpsec_parse_crp(struct mvxpsec_packet *);
218
219 /* payload data management */
220 INLINE int mvxpsec_packet_setcrp(struct mvxpsec_packet *, struct cryptop *);
221 STATIC int mvxpsec_packet_setdata(struct mvxpsec_packet *, void *, uint32_t);
222 STATIC int mvxpsec_packet_setmbuf(struct mvxpsec_packet *, struct mbuf *);
223 STATIC int mvxpsec_packet_setuio(struct mvxpsec_packet *, struct uio *);
224 STATIC int mvxpsec_packet_rdata(struct mvxpsec_packet *, int, int, void *);
225 _STATIC int mvxpsec_packet_wdata(struct mvxpsec_packet *, int, int, void *);
226 STATIC int mvxpsec_packet_write_iv(struct mvxpsec_packet *, void *, int);
227 STATIC int mvxpsec_packet_copy_iv(struct mvxpsec_packet *, int, int);
228
229 /* key pre-computation */
230 STATIC int mvxpsec_key_precomp(int, void *, int, void *, void *);
231 STATIC int mvxpsec_hmac_precomp(int, void *, int, void *, void *);
232
233 /* crypto operation management */
234 INLINE void mvxpsec_packet_reset_op(struct mvxpsec_packet *);
235 INLINE void mvxpsec_packet_update_op_order(struct mvxpsec_packet *, int);
236
237 /*
238 * parameter converters
239 */
240 INLINE uint32_t mvxpsec_alg2acc(uint32_t alg);
241 INLINE uint32_t mvxpsec_aesklen(int klen);
242
243 /*
244 * string formatters
245 */
246 _STATIC const char *s_ctrlreg(uint32_t);
247 _STATIC const char *s_winreg(uint32_t);
248 _STATIC const char *s_errreg(uint32_t);
249 _STATIC const char *s_xpsecintr(uint32_t);
250 _STATIC const char *s_ctlalg(uint32_t);
251 _STATIC const char *s_xpsec_op(uint32_t);
252 _STATIC const char *s_xpsec_enc(uint32_t);
253 _STATIC const char *s_xpsec_mac(uint32_t);
254 _STATIC const char *s_xpsec_frag(uint32_t);
255
256 /*
257 * debugging supports
258 */
259 #ifdef MVXPSEC_DEBUG
260 _STATIC void mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *);
261 _STATIC void mvxpsec_dump_reg(struct mvxpsec_softc *);
262 _STATIC void mvxpsec_dump_sram(const char *, struct mvxpsec_softc *, size_t);
263 _STATIC void mvxpsec_dump_data(const char *, void *, size_t);
264
265 _STATIC void mvxpsec_dump_packet(const char *, struct mvxpsec_packet *);
266 _STATIC void mvxpsec_dump_packet_data(const char *, struct mvxpsec_packet *);
267 _STATIC void mvxpsec_dump_packet_desc(const char *, struct mvxpsec_packet *);
268
269 _STATIC void mvxpsec_dump_acc_config(const char *, uint32_t);
270 _STATIC void mvxpsec_dump_acc_encdata(const char *, uint32_t, uint32_t);
271 _STATIC void mvxpsec_dump_acc_enclen(const char *, uint32_t);
272 _STATIC void mvxpsec_dump_acc_enckey(const char *, uint32_t);
273 _STATIC void mvxpsec_dump_acc_enciv(const char *, uint32_t);
274 _STATIC void mvxpsec_dump_acc_macsrc(const char *, uint32_t);
275 _STATIC void mvxpsec_dump_acc_macdst(const char *, uint32_t);
276 _STATIC void mvxpsec_dump_acc_maciv(const char *, uint32_t);
277 #endif
278
279 /*
280 * global configurations, params, work spaces, ...
281 *
282 * XXX: use sysctl for global configurations
283 */
284 /* waiting for device */
285 static int mvxpsec_wait_interval = 10; /* usec */
286 static int mvxpsec_wait_retry = 100; /* times = wait for 1 [msec] */
287 #ifdef MVXPSEC_DEBUG
288 static uint32_t mvxpsec_debug = MVXPSEC_DEBUG; /* debug level */
289 #endif
290
291 /*
292 * Register accessors
293 */
294 #define MVXPSEC_WRITE(sc, off, val) \
295 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (off), (val))
296 #define MVXPSEC_READ(sc, off) \
297 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (off))
298
299 /*
300 * device driver autoconf interface
301 */
302 CFATTACH_DECL2_NEW(mvxpsec_mbus, sizeof(struct mvxpsec_softc),
303 mvxpsec_match, mvxpsec_attach, NULL, NULL, NULL, NULL);
304
305 STATIC int
306 mvxpsec_match(device_t dev, cfdata_t match, void *aux)
307 {
308 struct marvell_attach_args *mva = aux;
309 uint32_t tag;
310 int window;
311
312 if (strcmp(mva->mva_name, match->cf_name) != 0)
313 return 0;
314 if (mva->mva_offset == MVA_OFFSET_DEFAULT)
315 return 0;
316
317 switch (mva->mva_unit) {
318 case 0:
319 tag = ARMADAXP_TAG_CRYPT0;
320 break;
321 case 1:
322 tag = ARMADAXP_TAG_CRYPT1;
323 break;
324 default:
325 aprint_error_dev(dev,
326 "unit %d is not supported\n", mva->mva_unit);
327 return 0;
328 }
329
330 window = mvsoc_target(tag, NULL, NULL, NULL, NULL);
331 if (window >= nwindow) {
332 aprint_error_dev(dev,
333 "Security Accelerator SRAM is not configured.\n");
334 return 0;
335 }
336
337 return 1;
338 }
339
340 STATIC void
341 mvxpsec_attach(device_t parent, device_t self, void *aux)
342 {
343 struct marvell_attach_args *mva = aux;
344 struct mvxpsec_softc *sc = device_private(self);
345 int v;
346 int i;
347
348 sc->sc_dev = self;
349
350 aprint_normal(": Marvell Crypto Engines and Security Accelerator\n");
351 aprint_naive("\n");
352 #ifdef MVXPSEC_MULTI_PACKET
353 aprint_normal_dev(sc->sc_dev, "multi-packet chained mode enabled.\n");
354 #else
355 aprint_normal_dev(sc->sc_dev, "multi-packet chained mode disabled.\n");
356 #endif
357 aprint_normal_dev(sc->sc_dev,
358 "Max %d sessions.\n", MVXPSEC_MAX_SESSIONS);
359
360 /* mutex */
361 mutex_init(&sc->sc_session_mtx, MUTEX_DEFAULT, IPL_NET);
362 mutex_init(&sc->sc_dma_mtx, MUTEX_DEFAULT, IPL_NET);
363 mutex_init(&sc->sc_queue_mtx, MUTEX_DEFAULT, IPL_NET);
364
365 /* Packet queue */
366 SIMPLEQ_INIT(&sc->sc_wait_queue);
367 SIMPLEQ_INIT(&sc->sc_run_queue);
368 SLIST_INIT(&sc->sc_free_list);
369 sc->sc_wait_qlen = 0;
370 #ifdef MVXPSEC_MULTI_PACKET
371 sc->sc_wait_qlimit = 16;
372 #else
373 sc->sc_wait_qlimit = 0;
374 #endif
375 sc->sc_free_qlen = 0;
376
377 /* Timer */
378 callout_init(&sc->sc_timeout, 0); /* XXX: use CALLOUT_MPSAFE */
379 callout_setfunc(&sc->sc_timeout, mvxpsec_timer, sc);
380
381 /* I/O */
382 sc->sc_iot = mva->mva_iot;
383 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
384 mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
385 aprint_error_dev(self, "Cannot map registers\n");
386 return;
387 }
388
389 /* DMA */
390 sc->sc_dmat = mva->mva_dmat;
391 if (mvxpsec_init_dma(sc, mva) < 0)
392 return;
393
394 /* SRAM */
395 if (mvxpsec_init_sram(sc) < 0)
396 return;
397
398 /* Registers */
399 mvxpsec_wininit(sc, mva->mva_tags);
400
401 /* INTR */
402 MVXPSEC_WRITE(sc, MVXPSEC_INT_MASK, MVXPSEC_DEFAULT_INT);
403 MVXPSEC_WRITE(sc, MV_TDMA_ERR_MASK, MVXPSEC_DEFAULT_ERR);
404 sc->sc_done_ih =
405 marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpsec_intr, sc);
406 /* XXX: should pass error IRQ using mva */
407 sc->sc_error_ih = marvell_intr_establish(MVXPSEC_ERR_INT(sc),
408 IPL_NET, mvxpsec_eintr, sc);
409 aprint_normal_dev(self,
410 "Error Reporting IRQ %d\n", MVXPSEC_ERR_INT(sc));
411
412 /* Initialize TDMA (It's enabled here, but waiting for SA) */
413 if (mvxpsec_dma_wait(sc) < 0)
414 panic("%s: DMA DEVICE not responding\n", __func__);
415 MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
416 MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
417 MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
418 MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
419 MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
420 v = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
421 v |= MV_TDMA_CONTROL_ENABLE;
422 MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, v);
423
424 /* Initialize SA */
425 if (mvxpsec_acc_wait(sc) < 0)
426 panic("%s: MVXPSEC not responding\n", __func__);
427 v = MVXPSEC_READ(sc, MV_ACC_CONFIG);
428 v &= ~MV_ACC_CONFIG_STOP_ON_ERR;
429 v |= MV_ACC_CONFIG_MULT_PKT;
430 v |= MV_ACC_CONFIG_WAIT_TDMA;
431 v |= MV_ACC_CONFIG_ACT_TDMA;
432 MVXPSEC_WRITE(sc, MV_ACC_CONFIG, v);
433 MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
434 MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
435
436 /* Session */
437 sc->sc_session_pool =
438 pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
439 "mvxpsecpl", NULL, IPL_NET,
440 mvxpsec_session_ctor, mvxpsec_session_dtor, sc);
441 pool_cache_sethiwat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS);
442 pool_cache_setlowat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS / 2);
443 sc->sc_last_session = NULL;
444
445 /* Pakcet */
446 sc->sc_packet_pool =
447 pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
448 "mvxpsec_pktpl", NULL, IPL_NET,
449 mvxpsec_packet_ctor, mvxpsec_packet_dtor, sc);
450 pool_cache_sethiwat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS);
451 pool_cache_setlowat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS / 2);
452
453 /* Register to EVCNT framework */
454 mvxpsec_evcnt_attach(sc);
455
456 /* Register to Opencrypto */
457 for (i = 0; i < MVXPSEC_MAX_SESSIONS; i++) {
458 sc->sc_sessions[i] = NULL;
459 }
460 if (mvxpsec_register(sc))
461 panic("cannot initialize OpenCrypto module.\n");
462
463 return;
464 }
465
466 STATIC void
467 mvxpsec_evcnt_attach(struct mvxpsec_softc *sc)
468 {
469 struct mvxpsec_evcnt *sc_ev = &sc->sc_ev;
470
471 evcnt_attach_dynamic(&sc_ev->intr_all, EVCNT_TYPE_INTR,
472 NULL, device_xname(sc->sc_dev), "Main Intr.");
473 evcnt_attach_dynamic(&sc_ev->intr_auth, EVCNT_TYPE_INTR,
474 NULL, device_xname(sc->sc_dev), "Auth Intr.");
475 evcnt_attach_dynamic(&sc_ev->intr_des, EVCNT_TYPE_INTR,
476 NULL, device_xname(sc->sc_dev), "DES Intr.");
477 evcnt_attach_dynamic(&sc_ev->intr_aes_enc, EVCNT_TYPE_INTR,
478 NULL, device_xname(sc->sc_dev), "AES-Encrypt Intr.");
479 evcnt_attach_dynamic(&sc_ev->intr_aes_dec, EVCNT_TYPE_INTR,
480 NULL, device_xname(sc->sc_dev), "AES-Decrypt Intr.");
481 evcnt_attach_dynamic(&sc_ev->intr_enc, EVCNT_TYPE_INTR,
482 NULL, device_xname(sc->sc_dev), "Crypto Intr.");
483 evcnt_attach_dynamic(&sc_ev->intr_sa, EVCNT_TYPE_INTR,
484 NULL, device_xname(sc->sc_dev), "SA Intr.");
485 evcnt_attach_dynamic(&sc_ev->intr_acctdma, EVCNT_TYPE_INTR,
486 NULL, device_xname(sc->sc_dev), "AccTDMA Intr.");
487 evcnt_attach_dynamic(&sc_ev->intr_comp, EVCNT_TYPE_INTR,
488 NULL, device_xname(sc->sc_dev), "TDMA-Complete Intr.");
489 evcnt_attach_dynamic(&sc_ev->intr_own, EVCNT_TYPE_INTR,
490 NULL, device_xname(sc->sc_dev), "TDMA-Ownership Intr.");
491 evcnt_attach_dynamic(&sc_ev->intr_acctdma_cont, EVCNT_TYPE_INTR,
492 NULL, device_xname(sc->sc_dev), "AccTDMA-Continue Intr.");
493
494 evcnt_attach_dynamic(&sc_ev->session_new, EVCNT_TYPE_MISC,
495 NULL, device_xname(sc->sc_dev), "New-Session");
496 evcnt_attach_dynamic(&sc_ev->session_free, EVCNT_TYPE_MISC,
497 NULL, device_xname(sc->sc_dev), "Free-Session");
498
499 evcnt_attach_dynamic(&sc_ev->packet_ok, EVCNT_TYPE_MISC,
500 NULL, device_xname(sc->sc_dev), "Packet-OK");
501 evcnt_attach_dynamic(&sc_ev->packet_err, EVCNT_TYPE_MISC,
502 NULL, device_xname(sc->sc_dev), "Packet-ERR");
503
504 evcnt_attach_dynamic(&sc_ev->dispatch_packets, EVCNT_TYPE_MISC,
505 NULL, device_xname(sc->sc_dev), "Packet-Dispatch");
506 evcnt_attach_dynamic(&sc_ev->dispatch_queue, EVCNT_TYPE_MISC,
507 NULL, device_xname(sc->sc_dev), "Queue-Dispatch");
508 evcnt_attach_dynamic(&sc_ev->queue_full, EVCNT_TYPE_MISC,
509 NULL, device_xname(sc->sc_dev), "Queue-Full");
510 evcnt_attach_dynamic(&sc_ev->max_dispatch, EVCNT_TYPE_MISC,
511 NULL, device_xname(sc->sc_dev), "Max-Dispatch");
512 evcnt_attach_dynamic(&sc_ev->max_done, EVCNT_TYPE_MISC,
513 NULL, device_xname(sc->sc_dev), "Max-Done");
514 }
515
516 /*
517 * Register setup
518 */
519 STATIC int mvxpsec_wininit(struct mvxpsec_softc *sc, enum marvell_tags *tags)
520 {
521 device_t pdev = device_parent(sc->sc_dev);
522 uint64_t base;
523 uint32_t size, reg;
524 int window, target, attr, rv, i;
525
526 /* disable all window */
527 for (window = 0; window < MV_TDMA_NWINDOW; window++)
528 {
529 MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), 0);
530 MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), 0);
531 }
532
533 for (window = 0, i = 0;
534 tags[i] != MARVELL_TAG_UNDEFINED && window < MV_TDMA_NWINDOW; i++) {
535 rv = marvell_winparams_by_tag(pdev, tags[i],
536 &target, &attr, &base, &size);
537 if (rv != 0 || size == 0)
538 continue;
539
540 if (base > 0xffffffffULL) {
541 aprint_error_dev(sc->sc_dev,
542 "can't remap window %d\n", window);
543 continue;
544 }
545
546 reg = MV_TDMA_BAR_BASE(base);
547 MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), reg);
548
549 reg = MV_TDMA_ATTR_TARGET(target);
550 reg |= MV_TDMA_ATTR_ATTR(attr);
551 reg |= MV_TDMA_ATTR_SIZE(size);
552 reg |= MV_TDMA_ATTR_ENABLE;
553 MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), reg);
554
555 window++;
556 }
557
558 return 0;
559 }
560
561 /*
562 * Timer handling
563 */
564 STATIC void
565 mvxpsec_timer(void *aux)
566 {
567 struct mvxpsec_softc *sc = aux;
568 struct mvxpsec_packet *mv_p;
569 uint32_t reg;
570 int ndone;
571 int refill;
572 int s;
573
574 /* IPL_SOFTCLOCK */
575
576 log(LOG_ERR, "%s: device timeout.\n", __func__);
577 #ifdef MVXPSEC_DEBUG
578 mvxpsec_dump_reg(sc);
579 #endif
580
581 s = splnet();
582 /* stop security accelerator */
583 MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
584
585 /* stop TDMA */
586 MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, 0);
587
588 /* cleanup packet queue */
589 mutex_enter(&sc->sc_queue_mtx);
590 ndone = 0;
591 while ( (mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue)) != NULL) {
592 SIMPLEQ_REMOVE_HEAD(&sc->sc_run_queue, queue);
593
594 mv_p->crp->crp_etype = EINVAL;
595 mvxpsec_done_packet(mv_p);
596 ndone++;
597 }
598 MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
599 sc->sc_flags &= ~HW_RUNNING;
600 refill = (sc->sc_wait_qlen > 0) ? 1 : 0;
601 mutex_exit(&sc->sc_queue_mtx);
602
603 /* reenable TDMA */
604 if (mvxpsec_dma_wait(sc) < 0)
605 panic("%s: failed to reset DMA DEVICE. give up.", __func__);
606 MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
607 MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
608 MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
609 MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
610 MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
611 reg = MV_TDMA_DEFAULT_CONTROL;
612 reg |= MV_TDMA_CONTROL_ENABLE;
613 MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, reg);
614
615 if (mvxpsec_acc_wait(sc) < 0)
616 panic("%s: failed to reset MVXPSEC. give up.", __func__);
617 reg = MV_ACC_CONFIG_MULT_PKT;
618 reg |= MV_ACC_CONFIG_WAIT_TDMA;
619 reg |= MV_ACC_CONFIG_ACT_TDMA;
620 MVXPSEC_WRITE(sc, MV_ACC_CONFIG, reg);
621 MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
622
623 if (refill) {
624 mutex_enter(&sc->sc_queue_mtx);
625 mvxpsec_dispatch_queue(sc);
626 mutex_exit(&sc->sc_queue_mtx);
627 }
628
629 crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
630 splx(s);
631 }
632
633 /*
634 * DMA handling
635 */
636
637 /*
638 * Allocate kernel devmem and DMA safe memory with bus_dma API
639 * used for DMA descriptors.
640 *
641 * if phys != 0, assume phys is a DMA safe memory and bypass
642 * allocator.
643 */
644 STATIC struct mvxpsec_devmem *
645 mvxpsec_alloc_devmem(struct mvxpsec_softc *sc, paddr_t phys, int size)
646 {
647 struct mvxpsec_devmem *devmem;
648 bus_dma_segment_t seg;
649 int rseg;
650 int err;
651
652 if (sc == NULL)
653 return NULL;
654
655 devmem = kmem_alloc(sizeof(*devmem), KM_SLEEP);
656 devmem->size = size;
657
658 if (phys) {
659 seg.ds_addr = phys;
660 seg.ds_len = devmem->size;
661 rseg = 1;
662 err = 0;
663 }
664 else {
665 err = bus_dmamem_alloc(sc->sc_dmat,
666 devmem->size, PAGE_SIZE, 0,
667 &seg, MVXPSEC_DMA_MAX_SEGS, &rseg, BUS_DMA_NOWAIT);
668 }
669 if (err) {
670 aprint_error_dev(sc->sc_dev, "can't alloc DMA buffer\n");
671 goto fail_kmem_free;
672 }
673
674 err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
675 devmem->size, &devmem->kva, BUS_DMA_NOWAIT);
676 if (err) {
677 aprint_error_dev(sc->sc_dev, "can't map DMA buffer\n");
678 goto fail_dmamem_free;
679 }
680
681 err = bus_dmamap_create(sc->sc_dmat,
682 size, 1, size, 0, BUS_DMA_NOWAIT, &devmem->map);
683 if (err) {
684 aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
685 goto fail_unmap;
686 }
687
688 err = bus_dmamap_load(sc->sc_dmat,
689 devmem->map, devmem->kva, devmem->size, NULL,
690 BUS_DMA_NOWAIT);
691 if (err) {
692 aprint_error_dev(sc->sc_dev,
693 "can't load DMA buffer VA:%p PA:0x%08x\n",
694 devmem->kva, (int)seg.ds_addr);
695 goto fail_destroy;
696 }
697
698 return devmem;
699
700 fail_destroy:
701 bus_dmamap_destroy(sc->sc_dmat, devmem->map);
702 fail_unmap:
703 bus_dmamem_unmap(sc->sc_dmat, devmem->kva, devmem->size);
704 fail_dmamem_free:
705 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
706 fail_kmem_free:
707 kmem_free(devmem, sizeof(*devmem));
708
709 return NULL;
710 }
711
712 /*
713 * Get DMA Descriptor from (DMA safe) descriptor pool.
714 */
715 INLINE struct mvxpsec_descriptor_handle *
716 mvxpsec_dma_getdesc(struct mvxpsec_softc *sc)
717 {
718 struct mvxpsec_descriptor_handle *entry;
719
720 /* must called with sc->sc_dma_mtx held */
721 KASSERT(mutex_owned(&sc->sc_dma_mtx));
722
723 if (sc->sc_desc_ring_prod == sc->sc_desc_ring_cons)
724 return NULL;
725
726 entry = &sc->sc_desc_ring[sc->sc_desc_ring_prod];
727 sc->sc_desc_ring_prod++;
728 if (sc->sc_desc_ring_prod >= sc->sc_desc_ring_size)
729 sc->sc_desc_ring_prod -= sc->sc_desc_ring_size;
730
731 return entry;
732 }
733
734 /*
735 * Put DMA Descriptor to descriptor pool.
736 */
737 _INLINE void
738 mvxpsec_dma_putdesc(struct mvxpsec_softc *sc,
739 struct mvxpsec_descriptor_handle *dh)
740 {
741 /* must called with sc->sc_dma_mtx held */
742 KASSERT(mutex_owned(&sc->sc_dma_mtx));
743
744 sc->sc_desc_ring_cons++;
745 if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
746 sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
747
748 return;
749 }
750
751 /*
752 * Setup DMA Descriptor
753 * copy from 'src' to 'dst' by 'size' bytes.
754 * 'src' or 'dst' must be SRAM address.
755 */
756 INLINE void
757 mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *dh,
758 uint32_t dst, uint32_t src, uint32_t size)
759 {
760 struct mvxpsec_descriptor *desc;
761
762 desc = (struct mvxpsec_descriptor *)dh->_desc;
763
764 desc->tdma_dst = dst;
765 desc->tdma_src = src;
766 desc->tdma_word0 = size;
767 if (size != 0)
768 desc->tdma_word0 |= MV_TDMA_CNT_OWN;
769 /* size == 0 is owned by ACC, not TDMA */
770
771 #ifdef MVXPSEC_DEBUG
772 mvxpsec_dump_dmaq(dh);
773 #endif
774 }
775
776 /*
777 * Concat 2 DMA
778 */
779 INLINE void
780 mvxpsec_dma_cat(struct mvxpsec_softc *sc,
781 struct mvxpsec_descriptor_handle *dh1,
782 struct mvxpsec_descriptor_handle *dh2)
783 {
784 ((struct mvxpsec_descriptor*)dh1->_desc)->tdma_nxt = dh2->phys_addr;
785 MVXPSEC_SYNC_DESC(sc, dh1, BUS_DMASYNC_PREWRITE);
786 }
787
788 /*
789 * Schedule DMA Copy
790 */
791 INLINE int
792 mvxpsec_dma_copy0(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
793 uint32_t dst, uint32_t src, uint32_t size)
794 {
795 struct mvxpsec_descriptor_handle *dh;
796
797 dh = mvxpsec_dma_getdesc(sc);
798 if (dh == NULL) {
799 log(LOG_ERR, "%s: descriptor full\n", __func__);
800 return -1;
801 }
802
803 mvxpsec_dma_setup(dh, dst, src, size);
804 if (r->dma_head == NULL) {
805 r->dma_head = dh;
806 r->dma_last = dh;
807 r->dma_size = 1;
808 }
809 else {
810 mvxpsec_dma_cat(sc, r->dma_last, dh);
811 r->dma_last = dh;
812 r->dma_size++;
813 }
814
815 return 0;
816 }
817
818 INLINE int
819 mvxpsec_dma_copy(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
820 uint32_t dst, uint32_t src, uint32_t size)
821 {
822 if (size == 0) /* 0 is very special descriptor */
823 return 0;
824
825 return mvxpsec_dma_copy0(sc, r, dst, src, size);
826 }
827
828 /*
829 * Schedule ACC Activate
830 */
831 INLINE int
832 mvxpsec_dma_acc_activate(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
833 {
834 return mvxpsec_dma_copy0(sc, r, 0, 0, 0);
835 }
836
837 /*
838 * Finalize DMA setup
839 */
840 INLINE void
841 mvxpsec_dma_finalize(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
842 {
843 struct mvxpsec_descriptor_handle *dh;
844
845 dh = r->dma_last;
846 ((struct mvxpsec_descriptor*)dh->_desc)->tdma_nxt = 0;
847 MVXPSEC_SYNC_DESC(sc, dh, BUS_DMASYNC_PREWRITE);
848 }
849
850 /*
851 * Free entire DMA ring
852 */
853 INLINE void
854 mvxpsec_dma_free(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
855 {
856 sc->sc_desc_ring_cons += r->dma_size;
857 if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
858 sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
859 r->dma_head = NULL;
860 r->dma_last = NULL;
861 r->dma_size = 0;
862 }
863
864 /*
865 * create DMA descriptor chain for the packet
866 */
867 INLINE int
868 mvxpsec_dma_copy_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
869 {
870 struct mvxpsec_session *mv_s = mv_p->mv_s;
871 uint32_t src, dst, len;
872 uint32_t pkt_off, pkt_off_r;
873 int err;
874 int i;
875
876 /* must called with sc->sc_dma_mtx held */
877 KASSERT(mutex_owned(&sc->sc_dma_mtx));
878
879 /*
880 * set offset for mem->device copy
881 *
882 * typical packet image:
883 *
884 * enc_ivoff
885 * mac_off
886 * |
887 * | enc_off
888 * | |
889 * v v
890 * +----+--------...
891 * |IV |DATA
892 * +----+--------...
893 */
894 pkt_off = 0;
895 if (mv_p->mac_off > 0)
896 pkt_off = mv_p->mac_off;
897 if ((mv_p->flags & CRP_EXT_IV) == 0 && pkt_off > mv_p->enc_ivoff)
898 pkt_off = mv_p->enc_ivoff;
899 if (mv_p->enc_off > 0 && pkt_off > mv_p->enc_off)
900 pkt_off = mv_p->enc_off;
901 pkt_off_r = pkt_off;
902
903 /* make DMA descriptors to copy packet header: DRAM -> SRAM */
904 dst = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
905 src = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
906 len = sizeof(mv_p->pkt_header);
907 err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
908 if (__predict_false(err))
909 return err;
910
911 /*
912 * make DMA descriptors to copy session header: DRAM -> SRAM
913 * we can reuse session header on SRAM if session is not changed.
914 */
915 if (sc->sc_last_session != mv_s) {
916 dst = (uint32_t)MVXPSEC_SRAM_SESS_HDR_PA(sc);
917 src = (uint32_t)mv_s->session_header_map->dm_segs[0].ds_addr;
918 len = sizeof(mv_s->session_header);
919 err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
920 if (__predict_false(err))
921 return err;
922 sc->sc_last_session = mv_s;
923 }
924
925 /* make DMA descriptor to copy payload data: DRAM -> SRAM */
926 dst = MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
927 for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
928 src = mv_p->data_map->dm_segs[i].ds_addr;
929 len = mv_p->data_map->dm_segs[i].ds_len;
930 if (pkt_off) {
931 if (len <= pkt_off) {
932 /* ignore the segment */
933 dst += len;
934 pkt_off -= len;
935 continue;
936 }
937 /* copy from the middle of the segment */
938 dst += pkt_off;
939 src += pkt_off;
940 len -= pkt_off;
941 pkt_off = 0;
942 }
943 err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
944 if (__predict_false(err))
945 return err;
946 dst += len;
947 }
948
949 /* make special descriptor to activate security accelerator */
950 err = mvxpsec_dma_acc_activate(sc, &mv_p->dma_ring);
951 if (__predict_false(err))
952 return err;
953
954 /* make DMA descriptors to copy payload: SRAM -> DRAM */
955 src = (uint32_t)MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
956 for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
957 dst = (uint32_t)mv_p->data_map->dm_segs[i].ds_addr;
958 len = (uint32_t)mv_p->data_map->dm_segs[i].ds_len;
959 if (pkt_off_r) {
960 if (len <= pkt_off_r) {
961 /* ignore the segment */
962 src += len;
963 pkt_off_r -= len;
964 continue;
965 }
966 /* copy from the middle of the segment */
967 src += pkt_off_r;
968 dst += pkt_off_r;
969 len -= pkt_off_r;
970 pkt_off_r = 0;
971 }
972 err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
973 if (__predict_false(err))
974 return err;
975 src += len;
976 }
977 KASSERT(pkt_off == 0);
978 KASSERT(pkt_off_r == 0);
979
980 /*
981 * make DMA descriptors to copy packet header: SRAM->DRAM
982 * if IV is present in the payload, no need to copy.
983 */
984 if (mv_p->flags & CRP_EXT_IV) {
985 dst = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
986 src = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
987 len = sizeof(mv_p->pkt_header);
988 err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
989 if (__predict_false(err))
990 return err;
991 }
992
993 return 0;
994 }
995
996 INLINE int
997 mvxpsec_dma_sync_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
998 {
999 /* sync packet header */
1000 bus_dmamap_sync(sc->sc_dmat,
1001 mv_p->pkt_header_map, 0, sizeof(mv_p->pkt_header),
1002 BUS_DMASYNC_PREWRITE);
1003
1004 #ifdef MVXPSEC_DEBUG
1005 /* sync session header */
1006 if (mvxpsec_debug != 0) {
1007 struct mvxpsec_session *mv_s = mv_p->mv_s;
1008
1009 /* only debug code touch the session header after newsession */
1010 bus_dmamap_sync(sc->sc_dmat,
1011 mv_s->session_header_map,
1012 0, sizeof(mv_s->session_header),
1013 BUS_DMASYNC_PREWRITE);
1014 }
1015 #endif
1016
1017 /* sync packet buffer */
1018 bus_dmamap_sync(sc->sc_dmat,
1019 mv_p->data_map, 0, mv_p->data_len,
1020 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1021
1022 return 0;
1023 }
1024
1025 /*
1026 * Initialize MVXPSEC Internal SRAM
1027 *
1028 * - must be called after DMA initizlization.
1029 * - make VM mapping for SRAM area on MBus.
1030 */
1031 STATIC int
1032 mvxpsec_init_sram(struct mvxpsec_softc *sc)
1033 {
1034 uint32_t tag, target, attr, base, size;
1035 vaddr_t va;
1036 int window;
1037
1038 switch (device_unit(sc->sc_dev)) {
1039 case 0:
1040 tag = ARMADAXP_TAG_CRYPT0;
1041 break;
1042 case 1:
1043 tag = ARMADAXP_TAG_CRYPT1;
1044 break;
1045 default:
1046 aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1047 return -1;
1048 }
1049
1050 window = mvsoc_target(tag, &target, &attr, &base, &size);
1051 if (window >= nwindow) {
1052 aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1053 return -1;
1054 }
1055
1056 if (sizeof(struct mvxpsec_crypt_sram) > size) {
1057 aprint_error_dev(sc->sc_dev,
1058 "SRAM Data Structure Excceeds SRAM window size.\n");
1059 return -1;
1060 }
1061
1062 aprint_normal_dev(sc->sc_dev,
1063 "internal SRAM window at 0x%08x-0x%08x",
1064 base, base + size - 1);
1065 sc->sc_sram_pa = base;
1066
1067 /* get vmspace to read/write device internal SRAM */
1068 va = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
1069 UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
1070 if (va == 0) {
1071 aprint_error_dev(sc->sc_dev, "cannot map SRAM window\n");
1072 sc->sc_sram_va = NULL;
1073 aprint_normal("\n");
1074 return 0;
1075 }
1076 /* XXX: not working. PMAP_NOCACHE is not affected? */
1077 pmap_kenter_pa(va, base, VM_PROT_READ|VM_PROT_WRITE, PMAP_NOCACHE);
1078 pmap_update(pmap_kernel());
1079 sc->sc_sram_va = (void *)va;
1080 aprint_normal(" va %p\n", sc->sc_sram_va);
1081 memset(sc->sc_sram_va, 0xff, MV_ACC_SRAM_SIZE);
1082
1083 return 0;
1084 }
1085
1086 /*
1087 * Initialize TDMA engine.
1088 */
1089 STATIC int
1090 mvxpsec_init_dma(struct mvxpsec_softc *sc, struct marvell_attach_args *mva)
1091 {
1092 struct mvxpsec_descriptor_handle *dh;
1093 uint8_t *va;
1094 paddr_t pa;
1095 off_t va_off, pa_off;
1096 int i, n, seg, ndh;
1097
1098 /* Init Deviced's control parameters (disabled yet) */
1099 MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, MV_TDMA_DEFAULT_CONTROL);
1100
1101 /* Init Software DMA Handlers */
1102 sc->sc_devmem_desc =
1103 mvxpsec_alloc_devmem(sc, 0, PAGE_SIZE * MVXPSEC_DMA_DESC_PAGES);
1104 ndh = (PAGE_SIZE / sizeof(struct mvxpsec_descriptor))
1105 * MVXPSEC_DMA_DESC_PAGES;
1106 sc->sc_desc_ring =
1107 kmem_alloc(sizeof(struct mvxpsec_descriptor_handle) * ndh,
1108 KM_SLEEP);
1109 aprint_normal_dev(sc->sc_dev, "%d DMA handles in %zu bytes array\n",
1110 ndh, sizeof(struct mvxpsec_descriptor_handle) * ndh);
1111
1112 ndh = 0;
1113 for (seg = 0; seg < devmem_nseg(sc->sc_devmem_desc); seg++) {
1114 va = devmem_va(sc->sc_devmem_desc);
1115 pa = devmem_pa(sc->sc_devmem_desc, seg);
1116 n = devmem_palen(sc->sc_devmem_desc, seg) /
1117 sizeof(struct mvxpsec_descriptor);
1118 va_off = (PAGE_SIZE * seg);
1119 pa_off = 0;
1120 for (i = 0; i < n; i++) {
1121 dh = &sc->sc_desc_ring[ndh];
1122 dh->map = devmem_map(sc->sc_devmem_desc);
1123 dh->off = va_off + pa_off;
1124 dh->_desc = (void *)(va + va_off + pa_off);
1125 dh->phys_addr = pa + pa_off;
1126 pa_off += sizeof(struct mvxpsec_descriptor);
1127 ndh++;
1128 }
1129 }
1130 sc->sc_desc_ring_size = ndh;
1131 sc->sc_desc_ring_prod = 0;
1132 sc->sc_desc_ring_cons = sc->sc_desc_ring_size - 1;
1133
1134 return 0;
1135 }
1136
1137 /*
1138 * Wait for TDMA controller become idle
1139 */
1140 INLINE int
1141 mvxpsec_dma_wait(struct mvxpsec_softc *sc)
1142 {
1143 int retry = 0;
1144
1145 while (MVXPSEC_READ(sc, MV_TDMA_CONTROL) & MV_TDMA_CONTROL_ACT) {
1146 delay(mvxpsec_wait_interval);
1147 if (retry++ >= mvxpsec_wait_retry)
1148 return -1;
1149 }
1150 return 0;
1151 }
1152
1153 /*
1154 * Wait for Security Accelerator become idle
1155 */
1156 INLINE int
1157 mvxpsec_acc_wait(struct mvxpsec_softc *sc)
1158 {
1159 int retry = 0;
1160
1161 while (MVXPSEC_READ(sc, MV_ACC_COMMAND) & MV_ACC_COMMAND_ACT) {
1162 delay(mvxpsec_wait_interval);
1163 if (++retry >= mvxpsec_wait_retry)
1164 return -1;
1165 }
1166 return 0;
1167 }
1168
1169 /*
1170 * Entry of interrupt handler
1171 *
1172 * register this to kernel via marvell_intr_establish()
1173 */
1174 int
1175 mvxpsec_intr(void *arg)
1176 {
1177 struct mvxpsec_softc *sc = arg;
1178 uint32_t v;
1179
1180 /* IPL_NET */
1181 while ((v = mvxpsec_intr_ack(sc)) != 0) {
1182 mvxpsec_intr_cnt(sc, v);
1183 MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "MVXPSEC Intr 0x%08x\n", v);
1184 MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "%s\n", s_xpsecintr(v));
1185 #ifdef MVXPSEC_DEBUG
1186 mvxpsec_dump_reg(sc);
1187 #endif
1188
1189 /* call high-level handlers */
1190 if (v & MVXPSEC_INT_ACCTDMA)
1191 mvxpsec_done(sc);
1192 }
1193
1194 return 0;
1195 }
1196
1197 INLINE void
1198 mvxpsec_intr_cleanup(struct mvxpsec_softc *sc)
1199 {
1200 struct mvxpsec_packet *mv_p;
1201
1202 /* must called with sc->sc_dma_mtx held */
1203 KASSERT(mutex_owned(&sc->sc_dma_mtx));
1204
1205 /*
1206 * there is only one intr for run_queue.
1207 * no one touch sc_run_queue.
1208 */
1209 SIMPLEQ_FOREACH(mv_p, &sc->sc_run_queue, queue)
1210 mvxpsec_dma_free(sc, &mv_p->dma_ring);
1211 }
1212
1213 /*
1214 * Acknowledge to interrupt
1215 *
1216 * read cause bits, clear it, and return it.
1217 * NOTE: multiple cause bits may be returned at once.
1218 */
1219 STATIC uint32_t
1220 mvxpsec_intr_ack(struct mvxpsec_softc *sc)
1221 {
1222 uint32_t reg;
1223
1224 reg = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
1225 reg &= MVXPSEC_DEFAULT_INT;
1226 MVXPSEC_WRITE(sc, MVXPSEC_INT_CAUSE, ~reg);
1227 MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1228
1229 return reg;
1230 }
1231
1232 /*
1233 * Entry of TDMA error interrupt handler
1234 *
1235 * register this to kernel via marvell_intr_establish()
1236 */
1237 int
1238 mvxpsec_eintr(void *arg)
1239 {
1240 struct mvxpsec_softc *sc = arg;
1241 uint32_t err;
1242
1243 /* IPL_NET */
1244 again:
1245 err = mvxpsec_eintr_ack(sc);
1246 if (err == 0)
1247 goto done;
1248
1249 log(LOG_ERR, "%s: DMA Error Interrupt: %s\n", __func__,
1250 s_errreg(err));
1251 #ifdef MVXPSEC_DEBUG
1252 mvxpsec_dump_reg(sc);
1253 #endif
1254
1255 goto again;
1256 done:
1257 return 0;
1258 }
1259
1260 /*
1261 * Acknowledge to TDMA error interrupt
1262 *
1263 * read cause bits, clear it, and return it.
1264 * NOTE: multiple cause bits may be returned at once.
1265 */
1266 STATIC uint32_t
1267 mvxpsec_eintr_ack(struct mvxpsec_softc *sc)
1268 {
1269 uint32_t reg;
1270
1271 reg = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
1272 reg &= MVXPSEC_DEFAULT_ERR;
1273 MVXPSEC_WRITE(sc, MV_TDMA_ERR_CAUSE, ~reg);
1274 MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1275
1276 return reg;
1277 }
1278
1279 /*
1280 * Interrupt statistics
1281 *
1282 * this is NOT a statistics of how many times the events 'occurred'.
1283 * this ONLY means how many times the events 'handled'.
1284 */
1285 INLINE void
1286 mvxpsec_intr_cnt(struct mvxpsec_softc *sc, int cause)
1287 {
1288 MVXPSEC_EVCNT_INCR(sc, intr_all);
1289 if (cause & MVXPSEC_INT_AUTH)
1290 MVXPSEC_EVCNT_INCR(sc, intr_auth);
1291 if (cause & MVXPSEC_INT_DES)
1292 MVXPSEC_EVCNT_INCR(sc, intr_des);
1293 if (cause & MVXPSEC_INT_AES_ENC)
1294 MVXPSEC_EVCNT_INCR(sc, intr_aes_enc);
1295 if (cause & MVXPSEC_INT_AES_DEC)
1296 MVXPSEC_EVCNT_INCR(sc, intr_aes_dec);
1297 if (cause & MVXPSEC_INT_ENC)
1298 MVXPSEC_EVCNT_INCR(sc, intr_enc);
1299 if (cause & MVXPSEC_INT_SA)
1300 MVXPSEC_EVCNT_INCR(sc, intr_sa);
1301 if (cause & MVXPSEC_INT_ACCTDMA)
1302 MVXPSEC_EVCNT_INCR(sc, intr_acctdma);
1303 if (cause & MVXPSEC_INT_TDMA_COMP)
1304 MVXPSEC_EVCNT_INCR(sc, intr_comp);
1305 if (cause & MVXPSEC_INT_TDMA_OWN)
1306 MVXPSEC_EVCNT_INCR(sc, intr_own);
1307 if (cause & MVXPSEC_INT_ACCTDMA_CONT)
1308 MVXPSEC_EVCNT_INCR(sc, intr_acctdma_cont);
1309 }
1310
1311 /*
1312 * Setup MVXPSEC header structure.
1313 *
1314 * the header contains descriptor of security accelerator,
1315 * key material of chiphers, iv of ciphers and macs, ...
1316 *
1317 * the header is transferred to MVXPSEC Internal SRAM by TDMA,
1318 * and parsed by MVXPSEC H/W.
1319 */
1320 STATIC int
1321 mvxpsec_header_finalize(struct mvxpsec_packet *mv_p)
1322 {
1323 struct mvxpsec_acc_descriptor *desc = &mv_p->pkt_header.desc;
1324 int enc_start, enc_len, iv_offset;
1325 int mac_start, mac_len, mac_offset;
1326
1327 /* offset -> device address */
1328 enc_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_off);
1329 enc_len = mv_p->enc_len;
1330 if (mv_p->flags & CRP_EXT_IV)
1331 iv_offset = mv_p->enc_ivoff;
1332 else
1333 iv_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_ivoff);
1334 mac_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_off);
1335 mac_len = mv_p->mac_len;
1336 mac_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_dst);
1337
1338 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1339 "PAYLOAD at 0x%08x\n", (int)MVXPSEC_SRAM_PAYLOAD_OFF);
1340 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1341 "ENC from 0x%08x\n", enc_start);
1342 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1343 "MAC from 0x%08x\n", mac_start);
1344 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1345 "MAC to 0x%08x\n", mac_offset);
1346 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1347 "ENC IV at 0x%08x\n", iv_offset);
1348
1349 /* setup device addresses in Security Accelerator Descriptors */
1350 desc->acc_encdata = MV_ACC_DESC_ENC_DATA(enc_start, enc_start);
1351 desc->acc_enclen = MV_ACC_DESC_ENC_LEN(enc_len);
1352 if (desc->acc_config & MV_ACC_CRYPTO_DECRYPT)
1353 desc->acc_enckey =
1354 MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_D_DA);
1355 else
1356 desc->acc_enckey =
1357 MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_DA);
1358 desc->acc_enciv =
1359 MV_ACC_DESC_ENC_IV(MVXPSEC_SRAM_IV_WORK_DA, iv_offset);
1360
1361 desc->acc_macsrc = MV_ACC_DESC_MAC_SRC(mac_start, mac_len);
1362 desc->acc_macdst = MV_ACC_DESC_MAC_DST(mac_offset, mac_len);
1363 desc->acc_maciv =
1364 MV_ACC_DESC_MAC_IV(MVXPSEC_SRAM_MIV_IN_DA,
1365 MVXPSEC_SRAM_MIV_OUT_DA);
1366
1367 return 0;
1368 }
1369
1370 /*
1371 * constractor of session structure.
1372 *
1373 * this constrator will be called by pool_cache framework.
1374 */
1375 STATIC int
1376 mvxpsec_session_ctor(void *arg, void *obj, int flags)
1377 {
1378 struct mvxpsec_softc *sc = arg;
1379 struct mvxpsec_session *mv_s = obj;
1380
1381 /* pool is owned by softc */
1382 mv_s->sc = sc;
1383
1384 /* Create and load DMA map for session header */
1385 mv_s->session_header_map = 0;
1386 if (bus_dmamap_create(sc->sc_dmat,
1387 sizeof(mv_s->session_header), 1,
1388 sizeof(mv_s->session_header), 0,
1389 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1390 &mv_s->session_header_map)) {
1391 log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1392 goto fail;
1393 }
1394 if (bus_dmamap_load(sc->sc_dmat, mv_s->session_header_map,
1395 &mv_s->session_header, sizeof(mv_s->session_header),
1396 NULL, BUS_DMA_NOWAIT)) {
1397 log(LOG_ERR, "%s: cannot load header\n", __func__);
1398 goto fail;
1399 }
1400
1401 return 0;
1402 fail:
1403 if (mv_s->session_header_map)
1404 bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1405 return ENOMEM;
1406 }
1407
1408 /*
1409 * destractor of session structure.
1410 *
1411 * this destrator will be called by pool_cache framework.
1412 */
1413 STATIC void
1414 mvxpsec_session_dtor(void *arg, void *obj)
1415 {
1416 struct mvxpsec_softc *sc = arg;
1417 struct mvxpsec_session *mv_s = obj;
1418
1419 if (mv_s->sc != sc)
1420 panic("inconsitent context\n");
1421
1422 bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1423 }
1424
1425 /*
1426 * constructor of packet structure.
1427 */
1428 STATIC int
1429 mvxpsec_packet_ctor(void *arg, void *obj, int flags)
1430 {
1431 struct mvxpsec_softc *sc = arg;
1432 struct mvxpsec_packet *mv_p = obj;
1433
1434 mv_p->dma_ring.dma_head = NULL;
1435 mv_p->dma_ring.dma_last = NULL;
1436 mv_p->dma_ring.dma_size = 0;
1437
1438 /* Create and load DMA map for packet header */
1439 mv_p->pkt_header_map = 0;
1440 if (bus_dmamap_create(sc->sc_dmat,
1441 sizeof(mv_p->pkt_header), 1, sizeof(mv_p->pkt_header), 0,
1442 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1443 &mv_p->pkt_header_map)) {
1444 log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1445 goto fail;
1446 }
1447 if (bus_dmamap_load(sc->sc_dmat, mv_p->pkt_header_map,
1448 &mv_p->pkt_header, sizeof(mv_p->pkt_header),
1449 NULL, BUS_DMA_NOWAIT)) {
1450 log(LOG_ERR, "%s: cannot load header\n", __func__);
1451 goto fail;
1452 }
1453
1454 /* Create DMA map for session data. */
1455 mv_p->data_map = 0;
1456 if (bus_dmamap_create(sc->sc_dmat,
1457 MVXPSEC_DMA_MAX_SIZE, MVXPSEC_DMA_MAX_SEGS, MVXPSEC_DMA_MAX_SIZE,
1458 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mv_p->data_map)) {
1459 log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1460 goto fail;
1461 }
1462
1463 return 0;
1464 fail:
1465 if (mv_p->pkt_header_map)
1466 bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1467 if (mv_p->data_map)
1468 bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1469 return ENOMEM;
1470 }
1471
1472 /*
1473 * destractor of packet structure.
1474 */
1475 STATIC void
1476 mvxpsec_packet_dtor(void *arg, void *obj)
1477 {
1478 struct mvxpsec_softc *sc = arg;
1479 struct mvxpsec_packet *mv_p = obj;
1480
1481 mutex_enter(&sc->sc_dma_mtx);
1482 mvxpsec_dma_free(sc, &mv_p->dma_ring);
1483 mutex_exit(&sc->sc_dma_mtx);
1484 bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1485 bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1486 }
1487
1488 /*
1489 * allocate new session structure.
1490 */
1491 STATIC struct mvxpsec_session *
1492 mvxpsec_session_alloc(struct mvxpsec_softc *sc)
1493 {
1494 struct mvxpsec_session *mv_s;
1495
1496 mv_s = pool_cache_get(sc->sc_session_pool, PR_NOWAIT);
1497 if (mv_s == NULL) {
1498 log(LOG_ERR, "%s: cannot allocate memory\n", __func__);
1499 return NULL;
1500 }
1501 mv_s->refs = 1; /* 0 means session is alredy invalid */
1502 mv_s->sflags = 0;
1503
1504 return mv_s;
1505 }
1506
1507 /*
1508 * deallocate session structure.
1509 */
1510 STATIC void
1511 mvxpsec_session_dealloc(struct mvxpsec_session *mv_s)
1512 {
1513 struct mvxpsec_softc *sc = mv_s->sc;
1514
1515 mv_s->sflags |= DELETED;
1516 mvxpsec_session_unref(mv_s);
1517 crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1518
1519 return;
1520 }
1521
1522 STATIC int
1523 mvxpsec_session_ref(struct mvxpsec_session *mv_s)
1524 {
1525 uint32_t refs;
1526
1527 if (mv_s->sflags & DELETED) {
1528 log(LOG_ERR,
1529 "%s: session is already deleted.\n", __func__);
1530 return -1;
1531 }
1532
1533 refs = atomic_inc_32_nv(&mv_s->refs);
1534 if (refs == 1) {
1535 /*
1536 * a session with refs == 0 is
1537 * already invalidated. revert it.
1538 * XXX: use CAS ?
1539 */
1540 atomic_dec_32(&mv_s->refs);
1541 log(LOG_ERR,
1542 "%s: session is already invalidated.\n", __func__);
1543 return -1;
1544 }
1545
1546 return 0;
1547 }
1548
1549 STATIC void
1550 mvxpsec_session_unref(struct mvxpsec_session *mv_s)
1551 {
1552 uint32_t refs;
1553
1554 membar_release();
1555 refs = atomic_dec_32_nv(&mv_s->refs);
1556 if (refs == 0) {
1557 membar_acquire();
1558 pool_cache_put(mv_s->sc->sc_session_pool, mv_s);
1559 }
1560 }
1561
1562 /*
1563 * look for session is exist or not
1564 */
1565 INLINE struct mvxpsec_session *
1566 mvxpsec_session_lookup(struct mvxpsec_softc *sc, int sid)
1567 {
1568 struct mvxpsec_session *mv_s;
1569 int session;
1570
1571 /* must called sc->sc_session_mtx held */
1572 KASSERT(mutex_owned(&sc->sc_session_mtx));
1573
1574 session = MVXPSEC_SESSION(sid);
1575 if (__predict_false(session > MVXPSEC_MAX_SESSIONS)) {
1576 log(LOG_ERR, "%s: session number too large %d\n",
1577 __func__, session);
1578 return NULL;
1579 }
1580 if (__predict_false( (mv_s = sc->sc_sessions[session]) == NULL)) {
1581 log(LOG_ERR, "%s: invalid session %d\n",
1582 __func__, session);
1583 return NULL;
1584 }
1585
1586 KASSERT(mv_s->sid == session);
1587
1588 return mv_s;
1589 }
1590
1591 /*
1592 * allocation new packet structure.
1593 */
1594 STATIC struct mvxpsec_packet *
1595 mvxpsec_packet_alloc(struct mvxpsec_session *mv_s)
1596 {
1597 struct mvxpsec_softc *sc = mv_s->sc;
1598 struct mvxpsec_packet *mv_p;
1599
1600 /* must be called mv_queue_mtx held. */
1601 KASSERT(mutex_owned(&sc->sc_queue_mtx));
1602 /* must be called mv_session_mtx held. */
1603 KASSERT(mutex_owned(&sc->sc_session_mtx));
1604
1605 if (mvxpsec_session_ref(mv_s) < 0) {
1606 log(LOG_ERR, "%s: invalid session.\n", __func__);
1607 return NULL;
1608 }
1609
1610 if ( (mv_p = SLIST_FIRST(&sc->sc_free_list)) != NULL) {
1611 SLIST_REMOVE_HEAD(&sc->sc_free_list, free_list);
1612 sc->sc_free_qlen--;
1613 }
1614 else {
1615 mv_p = pool_cache_get(sc->sc_packet_pool, PR_NOWAIT);
1616 if (mv_p == NULL) {
1617 log(LOG_ERR, "%s: cannot allocate memory\n",
1618 __func__);
1619 mvxpsec_session_unref(mv_s);
1620 return NULL;
1621 }
1622 }
1623 mv_p->mv_s = mv_s;
1624 mv_p->flags = 0;
1625 mv_p->data_ptr = NULL;
1626
1627 return mv_p;
1628 }
1629
1630 /*
1631 * free packet structure.
1632 */
1633 STATIC void
1634 mvxpsec_packet_dealloc(struct mvxpsec_packet *mv_p)
1635 {
1636 struct mvxpsec_session *mv_s = mv_p->mv_s;
1637 struct mvxpsec_softc *sc = mv_s->sc;
1638
1639 /* must called with sc->sc_queue_mtx held */
1640 KASSERT(mutex_owned(&sc->sc_queue_mtx));
1641
1642 if (mv_p->dma_ring.dma_size != 0) {
1643 sc->sc_desc_ring_cons += mv_p->dma_ring.dma_size;
1644 }
1645 mv_p->dma_ring.dma_head = NULL;
1646 mv_p->dma_ring.dma_last = NULL;
1647 mv_p->dma_ring.dma_size = 0;
1648
1649 if (mv_p->data_map) {
1650 if (mv_p->flags & RDY_DATA) {
1651 bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1652 mv_p->flags &= ~RDY_DATA;
1653 }
1654 }
1655
1656 if (sc->sc_free_qlen > sc->sc_wait_qlimit)
1657 pool_cache_put(sc->sc_packet_pool, mv_p);
1658 else {
1659 SLIST_INSERT_HEAD(&sc->sc_free_list, mv_p, free_list);
1660 sc->sc_free_qlen++;
1661 }
1662 mvxpsec_session_unref(mv_s);
1663 }
1664
1665 INLINE void
1666 mvxpsec_packet_enqueue(struct mvxpsec_packet *mv_p)
1667 {
1668 struct mvxpsec_softc *sc = mv_p->mv_s->sc;
1669 struct mvxpsec_packet *last_packet;
1670 struct mvxpsec_descriptor_handle *cur_dma, *prev_dma;
1671
1672 /* must called with sc->sc_queue_mtx held */
1673 KASSERT(mutex_owned(&sc->sc_queue_mtx));
1674
1675 if (sc->sc_wait_qlen == 0) {
1676 SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1677 sc->sc_wait_qlen++;
1678 mv_p->flags |= SETUP_DONE;
1679 return;
1680 }
1681
1682 last_packet = SIMPLEQ_LAST(&sc->sc_wait_queue, mvxpsec_packet, queue);
1683 SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1684 sc->sc_wait_qlen++;
1685
1686 /* chain the DMA */
1687 cur_dma = mv_p->dma_ring.dma_head;
1688 prev_dma = last_packet->dma_ring.dma_last;
1689 mvxpsec_dma_cat(sc, prev_dma, cur_dma);
1690 mv_p->flags |= SETUP_DONE;
1691 }
1692
1693 /*
1694 * called by interrupt handler
1695 */
1696 STATIC int
1697 mvxpsec_done_packet(struct mvxpsec_packet *mv_p)
1698 {
1699 struct mvxpsec_session *mv_s = mv_p->mv_s;
1700 struct mvxpsec_softc *sc = mv_s->sc;
1701
1702 KASSERT((mv_p->flags & RDY_DATA));
1703 KASSERT((mv_p->flags & SETUP_DONE));
1704
1705 /* unload data */
1706 bus_dmamap_sync(sc->sc_dmat, mv_p->data_map,
1707 0, mv_p->data_len,
1708 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1709 bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1710 mv_p->flags &= ~RDY_DATA;
1711
1712 #ifdef MVXPSEC_DEBUG
1713 if (mvxpsec_debug != 0) {
1714 int s;
1715
1716 bus_dmamap_sync(sc->sc_dmat, mv_p->pkt_header_map,
1717 0, sizeof(mv_p->pkt_header),
1718 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1719 bus_dmamap_sync(sc->sc_dmat, mv_s->session_header_map,
1720 0, sizeof(mv_s->session_header),
1721 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1722
1723 if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
1724 char buf[1500];
1725 struct mbuf *m;
1726 struct uio *uio;
1727 size_t len;
1728
1729 switch (mv_p->data_type) {
1730 case MVXPSEC_DATA_MBUF:
1731 m = mv_p->data_mbuf;
1732 len = m->m_pkthdr.len;
1733 if (len > sizeof(buf))
1734 len = sizeof(buf);
1735 m_copydata(m, 0, len, buf);
1736 break;
1737 case MVXPSEC_DATA_UIO:
1738 uio = mv_p->data_uio;
1739 len = uio->uio_resid;
1740 if (len > sizeof(buf))
1741 len = sizeof(buf);
1742 cuio_copydata(uio, 0, len, buf);
1743 break;
1744 default:
1745 len = 0;
1746 }
1747 if (len > 0)
1748 mvxpsec_dump_data(__func__, buf, len);
1749 }
1750
1751 if (mvxpsec_debug & MVXPSEC_DEBUG_PAYLOAD) {
1752 MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1753 "%s: session_descriptor:\n", __func__);
1754 mvxpsec_dump_packet_desc(__func__, mv_p);
1755 MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1756 "%s: session_data:\n", __func__);
1757 mvxpsec_dump_packet_data(__func__, mv_p);
1758 }
1759
1760 if (mvxpsec_debug & MVXPSEC_DEBUG_SRAM) {
1761 MVXPSEC_PRINTF(MVXPSEC_DEBUG_SRAM,
1762 "%s: SRAM\n", __func__);
1763 mvxpsec_dump_sram(__func__, sc, 2000);
1764 }
1765
1766 s = MVXPSEC_READ(sc, MV_ACC_STATUS);
1767 if (s & MV_ACC_STATUS_MAC_ERR) {
1768 MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR,
1769 "%s: Message Authentication Failed.\n", __func__);
1770 }
1771 }
1772 #endif
1773
1774 /* copy back IV */
1775 if (mv_p->flags & CRP_EXT_IV) {
1776 memcpy(mv_p->ext_iv,
1777 &mv_p->pkt_header.crp_iv_ext, mv_p->ext_ivlen);
1778 mv_p->ext_iv = NULL;
1779 mv_p->ext_ivlen = 0;
1780 }
1781
1782 /* notify opencrypto */
1783 mv_p->crp->crp_etype = 0;
1784 crypto_done(mv_p->crp);
1785 mv_p->crp = NULL;
1786
1787 /* unblock driver */
1788 mvxpsec_packet_dealloc(mv_p);
1789 crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1790
1791 MVXPSEC_EVCNT_INCR(sc, packet_ok);
1792
1793 return 0;
1794 }
1795
1796
1797 /*
1798 * Opencrypto API registration
1799 */
1800 int
1801 mvxpsec_register(struct mvxpsec_softc *sc)
1802 {
1803 int oplen = SRAM_PAYLOAD_SIZE;
1804 int flags = 0;
1805 int err;
1806
1807 sc->sc_nsessions = 0;
1808 sc->sc_cid = crypto_get_driverid(0);
1809 if (sc->sc_cid < 0) {
1810 log(LOG_ERR,
1811 "%s: crypto_get_driverid() failed.\n", __func__);
1812 err = EINVAL;
1813 goto done;
1814 }
1815
1816 /* Ciphers */
1817 err = crypto_register(sc->sc_cid, CRYPTO_DES_CBC, oplen, flags,
1818 mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1819 if (err)
1820 goto done;
1821
1822 err = crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, oplen, flags,
1823 mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1824 if (err)
1825 goto done;
1826
1827 err = crypto_register(sc->sc_cid, CRYPTO_AES_CBC, oplen, flags,
1828 mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1829 if (err)
1830 goto done;
1831
1832 /* MACs */
1833 err = crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96,
1834 oplen, flags,
1835 mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1836 if (err)
1837 goto done;
1838
1839 err = crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96,
1840 oplen, flags,
1841 mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1842 if (err)
1843 goto done;
1844
1845 #ifdef DEBUG
1846 log(LOG_DEBUG,
1847 "%s: registered to opencrypto(max data = %d bytes)\n",
1848 device_xname(sc->sc_dev), oplen);
1849 #endif
1850
1851 err = 0;
1852 done:
1853 return err;
1854 }
1855
1856 /*
1857 * Create new opencrypto session
1858 *
1859 * - register cipher key, mac key.
1860 * - initialize mac internal state.
1861 */
1862 int
1863 mvxpsec_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
1864 {
1865 struct mvxpsec_softc *sc = arg;
1866 struct mvxpsec_session *mv_s = NULL;
1867 struct cryptoini *c;
1868 static int hint = 0;
1869 int session = -1;
1870 int sid;
1871 int err;
1872 int i;
1873
1874 /* allocate driver session context */
1875 mv_s = mvxpsec_session_alloc(sc);
1876 if (mv_s == NULL)
1877 return ENOMEM;
1878
1879 /*
1880 * lookup opencrypto session table
1881 *
1882 * we have sc_session_mtx after here.
1883 */
1884 mutex_enter(&sc->sc_session_mtx);
1885 if (sc->sc_nsessions >= MVXPSEC_MAX_SESSIONS) {
1886 mutex_exit(&sc->sc_session_mtx);
1887 log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1888 __func__, MVXPSEC_MAX_SESSIONS);
1889 mvxpsec_session_dealloc(mv_s);
1890 return ENOMEM;
1891 }
1892 for (i = hint; i < MVXPSEC_MAX_SESSIONS; i++) {
1893 if (sc->sc_sessions[i])
1894 continue;
1895 session = i;
1896 hint = session + 1;
1897 break;
1898 }
1899 if (session < 0) {
1900 for (i = 0; i < hint; i++) {
1901 if (sc->sc_sessions[i])
1902 continue;
1903 session = i;
1904 hint = session + 1;
1905 break;
1906 }
1907 if (session < 0) {
1908 mutex_exit(&sc->sc_session_mtx);
1909 /* session full */
1910 log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1911 __func__, MVXPSEC_MAX_SESSIONS);
1912 mvxpsec_session_dealloc(mv_s);
1913 hint = 0;
1914 return ENOMEM;
1915 }
1916 }
1917 if (hint >= MVXPSEC_MAX_SESSIONS)
1918 hint = 0;
1919 sc->sc_nsessions++;
1920 sc->sc_sessions[session] = mv_s;
1921 #ifdef DEBUG
1922 log(LOG_DEBUG, "%s: new session %d allocated\n", __func__, session);
1923 #endif
1924
1925 sid = MVXPSEC_SID(device_unit(sc->sc_dev), session);
1926 mv_s->sid = sid;
1927
1928 /* setup the session key ... */
1929 for (c = cri; c; c = c->cri_next) {
1930 switch (c->cri_alg) {
1931 case CRYPTO_DES_CBC:
1932 case CRYPTO_3DES_CBC:
1933 case CRYPTO_AES_CBC:
1934 /* key */
1935 if (mvxpsec_key_precomp(c->cri_alg,
1936 c->cri_key, c->cri_klen,
1937 &mv_s->session_header.crp_key,
1938 &mv_s->session_header.crp_key_d)) {
1939 log(LOG_ERR,
1940 "%s: Invalid HMAC key for %s.\n",
1941 __func__, s_ctlalg(c->cri_alg));
1942 err = EINVAL;
1943 goto fail;
1944 }
1945 if (mv_s->sflags & RDY_CRP_KEY) {
1946 log(LOG_WARNING,
1947 "%s: overwrite cipher: %s->%s.\n",
1948 __func__,
1949 s_ctlalg(mv_s->cipher_alg),
1950 s_ctlalg(c->cri_alg));
1951 }
1952 mv_s->sflags |= RDY_CRP_KEY;
1953 mv_s->enc_klen = c->cri_klen;
1954 mv_s->cipher_alg = c->cri_alg;
1955 /* create per session IV (compatible with KAME IPsec) */
1956 cprng_fast(&mv_s->session_iv, sizeof(mv_s->session_iv));
1957 mv_s->sflags |= RDY_CRP_IV;
1958 break;
1959 case CRYPTO_SHA1_HMAC_96:
1960 case CRYPTO_MD5_HMAC_96:
1961 /* key */
1962 if (mvxpsec_hmac_precomp(c->cri_alg,
1963 c->cri_key, c->cri_klen,
1964 (uint32_t *)&mv_s->session_header.miv_in,
1965 (uint32_t *)&mv_s->session_header.miv_out)) {
1966 log(LOG_ERR,
1967 "%s: Invalid MAC key\n", __func__);
1968 err = EINVAL;
1969 goto fail;
1970 }
1971 if (mv_s->sflags & RDY_MAC_KEY ||
1972 mv_s->sflags & RDY_MAC_IV) {
1973 log(LOG_ERR,
1974 "%s: overwrite HMAC: %s->%s.\n",
1975 __func__, s_ctlalg(mv_s->hmac_alg),
1976 s_ctlalg(c->cri_alg));
1977 }
1978 mv_s->sflags |= RDY_MAC_KEY;
1979 mv_s->sflags |= RDY_MAC_IV;
1980
1981 mv_s->mac_klen = c->cri_klen;
1982 mv_s->hmac_alg = c->cri_alg;
1983 break;
1984 default:
1985 log(LOG_ERR, "%s: Unknown algorithm %d\n",
1986 __func__, c->cri_alg);
1987 err = EINVAL;
1988 goto fail;
1989 }
1990 }
1991 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1992 "H/W Crypto session (id:%u) added.\n", session);
1993
1994 *sidp = sid;
1995 MVXPSEC_EVCNT_INCR(sc, session_new);
1996 mutex_exit(&sc->sc_session_mtx);
1997
1998 /* sync session header(it's never touched after here) */
1999 bus_dmamap_sync(sc->sc_dmat,
2000 mv_s->session_header_map,
2001 0, sizeof(mv_s->session_header),
2002 BUS_DMASYNC_PREWRITE);
2003
2004 return 0;
2005
2006 fail:
2007 sc->sc_nsessions--;
2008 sc->sc_sessions[session] = NULL;
2009 hint = session;
2010 if (mv_s)
2011 mvxpsec_session_dealloc(mv_s);
2012 log(LOG_WARNING,
2013 "%s: Failed to add H/W crypto sessoin (id:%u): err=%d\n",
2014 __func__, session, err);
2015
2016 mutex_exit(&sc->sc_session_mtx);
2017 return err;
2018 }
2019
2020 /*
2021 * remove opencrypto session
2022 */
2023 int
2024 mvxpsec_freesession(void *arg, uint64_t tid)
2025 {
2026 struct mvxpsec_softc *sc = arg;
2027 struct mvxpsec_session *mv_s;
2028 int session;
2029 uint32_t sid = ((uint32_t)tid) & 0xffffffff;
2030
2031 session = MVXPSEC_SESSION(sid);
2032 KASSERTMSG(session >= 0, "session=%d", session);
2033 KASSERTMSG(session < MVXPSEC_MAX_SESSIONS, "session=%d max=%d",
2034 session, MVXPSEC_MAX_SESSIONS);
2035
2036 mutex_enter(&sc->sc_session_mtx);
2037 mv_s = sc->sc_sessions[session];
2038 KASSERT(mv_s != NULL);
2039 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2040 "%s: inactivate session %d\n", __func__, session);
2041
2042 /* inactivate mvxpsec session */
2043 sc->sc_sessions[session] = NULL;
2044 sc->sc_nsessions--;
2045 sc->sc_last_session = NULL;
2046 mutex_exit(&sc->sc_session_mtx);
2047
2048 KASSERT(sc->sc_nsessions >= 0);
2049 KASSERT(mv_s->sid == sid);
2050
2051 mvxpsec_session_dealloc(mv_s);
2052 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2053 "H/W Crypto session (id: %d) deleted.\n", session);
2054
2055 /* force unblock opencrypto */
2056 crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2057
2058 MVXPSEC_EVCNT_INCR(sc, session_free);
2059
2060 return 0;
2061 }
2062
2063 /*
2064 * process data with existing session
2065 */
2066 int
2067 mvxpsec_dispatch(void *arg, struct cryptop *crp, int hint)
2068 {
2069 struct mvxpsec_softc *sc = arg;
2070 struct mvxpsec_session *mv_s;
2071 struct mvxpsec_packet *mv_p;
2072 int q_full;
2073 int running;
2074 int err;
2075
2076 mutex_enter(&sc->sc_queue_mtx);
2077
2078 /*
2079 * lookup session
2080 */
2081 mutex_enter(&sc->sc_session_mtx);
2082 mv_s = mvxpsec_session_lookup(sc, crp->crp_sid);
2083 if (__predict_false(mv_s == NULL)) {
2084 err = EINVAL;
2085 mv_p = NULL;
2086 mutex_exit(&sc->sc_session_mtx);
2087 goto fail;
2088 }
2089 mv_p = mvxpsec_packet_alloc(mv_s);
2090 if (__predict_false(mv_p == NULL)) {
2091 mutex_exit(&sc->sc_session_mtx);
2092 mutex_exit(&sc->sc_queue_mtx);
2093 return ERESTART; /* => queued in opencrypto layer */
2094 }
2095 mutex_exit(&sc->sc_session_mtx);
2096
2097 /*
2098 * check queue status
2099 */
2100 #ifdef MVXPSEC_MULTI_PACKET
2101 q_full = (sc->sc_wait_qlen >= sc->sc_wait_qlimit) ? 1 : 0;
2102 #else
2103 q_full = (sc->sc_wait_qlen != 0) ? 1 : 0;
2104 #endif
2105 running = (sc->sc_flags & HW_RUNNING) ? 1: 0;
2106 if (q_full) {
2107 /* input queue is full. */
2108 if (!running && sc->sc_wait_qlen > 0)
2109 mvxpsec_dispatch_queue(sc);
2110 MVXPSEC_EVCNT_INCR(sc, queue_full);
2111 mvxpsec_packet_dealloc(mv_p);
2112 mutex_exit(&sc->sc_queue_mtx);
2113 return ERESTART; /* => queued in opencrypto layer */
2114 }
2115
2116 /*
2117 * Load and setup packet data
2118 */
2119 err = mvxpsec_packet_setcrp(mv_p, crp);
2120 if (__predict_false(err))
2121 goto fail;
2122
2123 /*
2124 * Setup DMA descriptor chains
2125 */
2126 mutex_enter(&sc->sc_dma_mtx);
2127 err = mvxpsec_dma_copy_packet(sc, mv_p);
2128 mutex_exit(&sc->sc_dma_mtx);
2129 if (__predict_false(err))
2130 goto fail;
2131
2132 #ifdef MVXPSEC_DEBUG
2133 mvxpsec_dump_packet(__func__, mv_p);
2134 #endif
2135
2136 /*
2137 * Sync/inval the data cache
2138 */
2139 err = mvxpsec_dma_sync_packet(sc, mv_p);
2140 if (__predict_false(err))
2141 goto fail;
2142
2143 /*
2144 * Enqueue the packet
2145 */
2146 MVXPSEC_EVCNT_INCR(sc, dispatch_packets);
2147 #ifdef MVXPSEC_MULTI_PACKET
2148 mvxpsec_packet_enqueue(mv_p);
2149 if (!running)
2150 mvxpsec_dispatch_queue(sc);
2151 #else
2152 SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
2153 sc->sc_wait_qlen++;
2154 mv_p->flags |= SETUP_DONE;
2155 if (!running)
2156 mvxpsec_dispatch_queue(sc);
2157 #endif
2158 mutex_exit(&sc->sc_queue_mtx);
2159 return 0;
2160
2161 fail:
2162 /* Drop the incoming packet */
2163 mvxpsec_drop(sc, crp, mv_p, err);
2164 mutex_exit(&sc->sc_queue_mtx);
2165 return 0;
2166 }
2167
2168 /*
2169 * back the packet to the IP stack
2170 */
2171 void
2172 mvxpsec_done(void *arg)
2173 {
2174 struct mvxpsec_softc *sc = arg;
2175 struct mvxpsec_packet *mv_p;
2176 mvxpsec_queue_t ret_queue;
2177 int ndone;
2178
2179 mutex_enter(&sc->sc_queue_mtx);
2180
2181 /* stop wdog timer */
2182 callout_stop(&sc->sc_timeout);
2183
2184 /* refill MVXPSEC */
2185 ret_queue = sc->sc_run_queue;
2186 SIMPLEQ_INIT(&sc->sc_run_queue);
2187 sc->sc_flags &= ~HW_RUNNING;
2188 if (sc->sc_wait_qlen > 0)
2189 mvxpsec_dispatch_queue(sc);
2190
2191 ndone = 0;
2192 while ( (mv_p = SIMPLEQ_FIRST(&ret_queue)) != NULL) {
2193 SIMPLEQ_REMOVE_HEAD(&ret_queue, queue);
2194 mvxpsec_dma_free(sc, &mv_p->dma_ring);
2195 mvxpsec_done_packet(mv_p);
2196 ndone++;
2197 }
2198 MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
2199
2200 mutex_exit(&sc->sc_queue_mtx);
2201 }
2202
2203 /*
2204 * drop the packet
2205 */
2206 INLINE void
2207 mvxpsec_drop(struct mvxpsec_softc *sc, struct cryptop *crp,
2208 struct mvxpsec_packet *mv_p, int err)
2209 {
2210 /* must called with sc->sc_queue_mtx held */
2211 KASSERT(mutex_owned(&sc->sc_queue_mtx));
2212
2213 if (mv_p)
2214 mvxpsec_packet_dealloc(mv_p);
2215 if (err < 0)
2216 err = EINVAL;
2217 crp->crp_etype = err;
2218 crypto_done(crp);
2219 MVXPSEC_EVCNT_INCR(sc, packet_err);
2220
2221 /* dispatch other packets in queue */
2222 if (sc->sc_wait_qlen > 0 &&
2223 !(sc->sc_flags & HW_RUNNING))
2224 mvxpsec_dispatch_queue(sc);
2225
2226 /* unblock driver for dropped packet */
2227 crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2228 }
2229
2230 /* move wait queue entry to run queue */
2231 STATIC int
2232 mvxpsec_dispatch_queue(struct mvxpsec_softc *sc)
2233 {
2234 struct mvxpsec_packet *mv_p;
2235 paddr_t head;
2236 int ndispatch = 0;
2237
2238 /* must called with sc->sc_queue_mtx held */
2239 KASSERT(mutex_owned(&sc->sc_queue_mtx));
2240
2241 /* check there is any task */
2242 if (__predict_false(sc->sc_flags & HW_RUNNING)) {
2243 log(LOG_WARNING,
2244 "%s: another packet already exist.\n", __func__);
2245 return 0;
2246 }
2247 if (__predict_false(SIMPLEQ_EMPTY(&sc->sc_wait_queue))) {
2248 log(LOG_WARNING,
2249 "%s: no waiting packet yet(qlen=%d).\n",
2250 __func__, sc->sc_wait_qlen);
2251 return 0;
2252 }
2253
2254 /* move queue */
2255 sc->sc_run_queue = sc->sc_wait_queue;
2256 sc->sc_flags |= HW_RUNNING; /* dropped by intr or timeout */
2257 SIMPLEQ_INIT(&sc->sc_wait_queue);
2258 ndispatch = sc->sc_wait_qlen;
2259 sc->sc_wait_qlen = 0;
2260
2261 /* get 1st DMA descriptor */
2262 mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue);
2263 head = mv_p->dma_ring.dma_head->phys_addr;
2264
2265 /* terminate last DMA descriptor */
2266 mv_p = SIMPLEQ_LAST(&sc->sc_run_queue, mvxpsec_packet, queue);
2267 mvxpsec_dma_finalize(sc, &mv_p->dma_ring);
2268
2269 /* configure TDMA */
2270 if (mvxpsec_dma_wait(sc) < 0) {
2271 log(LOG_ERR, "%s: DMA DEVICE not responding", __func__);
2272 callout_schedule(&sc->sc_timeout, hz);
2273 return 0;
2274 }
2275 MVXPSEC_WRITE(sc, MV_TDMA_NXT, head);
2276
2277 /* trigger ACC */
2278 if (mvxpsec_acc_wait(sc) < 0) {
2279 log(LOG_ERR, "%s: MVXPSEC not responding", __func__);
2280 callout_schedule(&sc->sc_timeout, hz);
2281 return 0;
2282 }
2283 MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_ACT);
2284
2285 MVXPSEC_EVCNT_MAX(sc, max_dispatch, ndispatch);
2286 MVXPSEC_EVCNT_INCR(sc, dispatch_queue);
2287 callout_schedule(&sc->sc_timeout, hz);
2288 return 0;
2289 }
2290
2291 /*
2292 * process opencrypto operations(cryptop) for packets.
2293 */
2294 INLINE int
2295 mvxpsec_parse_crd(struct mvxpsec_packet *mv_p, struct cryptodesc *crd)
2296 {
2297 int ivlen;
2298
2299 KASSERT(mv_p->flags & RDY_DATA);
2300
2301 /* MAC & Ciphers: set data location and operation */
2302 switch (crd->crd_alg) {
2303 case CRYPTO_SHA1_HMAC_96:
2304 mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2305 /* fall through */
2306 case CRYPTO_SHA1_HMAC:
2307 mv_p->mac_dst = crd->crd_inject;
2308 mv_p->mac_off = crd->crd_skip;
2309 mv_p->mac_len = crd->crd_len;
2310 MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2311 MV_ACC_CRYPTO_MAC_HMAC_SHA1);
2312 mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2313 /* No more setup for MAC */
2314 return 0;
2315 case CRYPTO_MD5_HMAC_96:
2316 mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2317 /* fall through */
2318 case CRYPTO_MD5_HMAC:
2319 mv_p->mac_dst = crd->crd_inject;
2320 mv_p->mac_off = crd->crd_skip;
2321 mv_p->mac_len = crd->crd_len;
2322 MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2323 MV_ACC_CRYPTO_MAC_HMAC_MD5);
2324 mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2325 /* No more setup for MAC */
2326 return 0;
2327 case CRYPTO_DES_CBC:
2328 mv_p->enc_ivoff = crd->crd_inject;
2329 mv_p->enc_off = crd->crd_skip;
2330 mv_p->enc_len = crd->crd_len;
2331 ivlen = 8;
2332 MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2333 MV_ACC_CRYPTO_ENC_DES);
2334 mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2335 mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2336 break;
2337 case CRYPTO_3DES_CBC:
2338 mv_p->enc_ivoff = crd->crd_inject;
2339 mv_p->enc_off = crd->crd_skip;
2340 mv_p->enc_len = crd->crd_len;
2341 ivlen = 8;
2342 MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2343 MV_ACC_CRYPTO_ENC_3DES);
2344 mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2345 mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_3DES_EDE;
2346 mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2347 break;
2348 case CRYPTO_AES_CBC:
2349 mv_p->enc_ivoff = crd->crd_inject;
2350 mv_p->enc_off = crd->crd_skip;
2351 mv_p->enc_len = crd->crd_len;
2352 ivlen = 16;
2353 MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2354 MV_ACC_CRYPTO_ENC_AES);
2355 MV_ACC_CRYPTO_AES_KLEN_SET(
2356 mv_p->pkt_header.desc.acc_config,
2357 mvxpsec_aesklen(mv_p->mv_s->enc_klen));
2358 mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2359 mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2360 break;
2361 default:
2362 log(LOG_ERR, "%s: Unknown algorithm %d\n",
2363 __func__, crd->crd_alg);
2364 return EINVAL;
2365 }
2366
2367 /* Operations only for Cipher, not MAC */
2368 if (crd->crd_flags & CRD_F_ENCRYPT) {
2369 /* Ciphers: Originate IV for Encryption.*/
2370 mv_p->pkt_header.desc.acc_config &= ~MV_ACC_CRYPTO_DECRYPT;
2371 mv_p->flags |= DIR_ENCRYPT;
2372
2373 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2374 MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "EXPLICIT IV\n");
2375 mv_p->flags |= CRP_EXT_IV;
2376 mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2377 mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2378 }
2379 else if (crd->crd_flags & CRD_F_IV_PRESENT) {
2380 MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "IV is present\n");
2381 mvxpsec_packet_copy_iv(mv_p, crd->crd_inject, ivlen);
2382 }
2383 else {
2384 MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "Create New IV\n");
2385 mvxpsec_packet_write_iv(mv_p, NULL, ivlen);
2386 }
2387 }
2388 else {
2389 /* Ciphers: IV is loadded from crd_inject when it's present */
2390 mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_DECRYPT;
2391 mv_p->flags |= DIR_DECRYPT;
2392
2393 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2394 #ifdef MVXPSEC_DEBUG
2395 if (mvxpsec_debug & MVXPSEC_DEBUG_ENC_IV) {
2396 MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV,
2397 "EXPLICIT IV(Decrypt)\n");
2398 mvxpsec_dump_data(__func__, crd->crd_iv, ivlen);
2399 }
2400 #endif
2401 mv_p->flags |= CRP_EXT_IV;
2402 mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2403 mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2404 }
2405 }
2406
2407 KASSERT(!((mv_p->flags & DIR_ENCRYPT) && (mv_p->flags & DIR_DECRYPT)));
2408
2409 return 0;
2410 }
2411
2412 INLINE int
2413 mvxpsec_parse_crp(struct mvxpsec_packet *mv_p)
2414 {
2415 struct cryptop *crp = mv_p->crp;
2416 struct cryptodesc *crd;
2417 int err;
2418
2419 KASSERT(crp);
2420
2421 mvxpsec_packet_reset_op(mv_p);
2422
2423 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2424 err = mvxpsec_parse_crd(mv_p, crd);
2425 if (err)
2426 return err;
2427 }
2428
2429 return 0;
2430 }
2431
2432 INLINE int
2433 mvxpsec_packet_setcrp(struct mvxpsec_packet *mv_p, struct cryptop *crp)
2434 {
2435 int err = EINVAL;
2436
2437 /* regiseter crp to the MVXPSEC packet */
2438 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2439 err = mvxpsec_packet_setmbuf(mv_p,
2440 (struct mbuf *)crp->crp_buf);
2441 mv_p->crp = crp;
2442 }
2443 else if (crp->crp_flags & CRYPTO_F_IOV) {
2444 err = mvxpsec_packet_setuio(mv_p,
2445 (struct uio *)crp->crp_buf);
2446 mv_p->crp = crp;
2447 }
2448 else {
2449 err = mvxpsec_packet_setdata(mv_p,
2450 (struct mbuf *)crp->crp_buf, crp->crp_ilen);
2451 mv_p->crp = crp;
2452 }
2453 if (__predict_false(err))
2454 return err;
2455
2456 /* parse crp and setup MVXPSEC registers/descriptors */
2457 err = mvxpsec_parse_crp(mv_p);
2458 if (__predict_false(err))
2459 return err;
2460
2461 /* fixup data offset to fit MVXPSEC internal SRAM */
2462 err = mvxpsec_header_finalize(mv_p);
2463 if (__predict_false(err))
2464 return err;
2465
2466 return 0;
2467 }
2468
2469 /*
2470 * load data for encrypt/decrypt/authentication
2471 *
2472 * data is raw kernel memory area.
2473 */
2474 STATIC int
2475 mvxpsec_packet_setdata(struct mvxpsec_packet *mv_p,
2476 void *data, uint32_t data_len)
2477 {
2478 struct mvxpsec_session *mv_s = mv_p->mv_s;
2479 struct mvxpsec_softc *sc = mv_s->sc;
2480
2481 if (bus_dmamap_load(sc->sc_dmat, mv_p->data_map, data, data_len,
2482 NULL, BUS_DMA_NOWAIT)) {
2483 log(LOG_ERR, "%s: cannot load data\n", __func__);
2484 return -1;
2485 }
2486 mv_p->data_type = MVXPSEC_DATA_RAW;
2487 mv_p->data_raw = data;
2488 mv_p->data_len = data_len;
2489 mv_p->flags |= RDY_DATA;
2490
2491 return 0;
2492 }
2493
2494 /*
2495 * load data for encrypt/decrypt/authentication
2496 *
2497 * data is mbuf based network data.
2498 */
2499 STATIC int
2500 mvxpsec_packet_setmbuf(struct mvxpsec_packet *mv_p, struct mbuf *m)
2501 {
2502 struct mvxpsec_session *mv_s = mv_p->mv_s;
2503 struct mvxpsec_softc *sc = mv_s->sc;
2504 size_t pktlen = 0;
2505
2506 if (__predict_true(m->m_flags & M_PKTHDR))
2507 pktlen = m->m_pkthdr.len;
2508 else {
2509 struct mbuf *mp = m;
2510
2511 while (mp != NULL) {
2512 pktlen += m->m_len;
2513 mp = mp->m_next;
2514 }
2515 }
2516 if (pktlen > SRAM_PAYLOAD_SIZE) {
2517 #if NIPSEC > 0
2518 extern percpu_t *espstat_percpu;
2519 /* XXX:
2520 * layer violation. opencrypto knows our max packet size
2521 * from crypto_register(9) API.
2522 */
2523
2524 _NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2525 #endif
2526 log(LOG_ERR,
2527 "%s: ESP Packet too large: %zu [oct.] > %zu [oct.]\n",
2528 device_xname(sc->sc_dev),
2529 (size_t)pktlen, SRAM_PAYLOAD_SIZE);
2530 mv_p->data_type = MVXPSEC_DATA_NONE;
2531 mv_p->data_mbuf = NULL;
2532 return -1;
2533 }
2534
2535 if (bus_dmamap_load_mbuf(sc->sc_dmat, mv_p->data_map, m,
2536 BUS_DMA_NOWAIT)) {
2537 mv_p->data_type = MVXPSEC_DATA_NONE;
2538 mv_p->data_mbuf = NULL;
2539 log(LOG_ERR, "%s: cannot load mbuf\n", __func__);
2540 return -1;
2541 }
2542
2543 /* set payload buffer */
2544 mv_p->data_type = MVXPSEC_DATA_MBUF;
2545 mv_p->data_mbuf = m;
2546 if (m->m_flags & M_PKTHDR) {
2547 mv_p->data_len = m->m_pkthdr.len;
2548 }
2549 else {
2550 mv_p->data_len = 0;
2551 while (m) {
2552 mv_p->data_len += m->m_len;
2553 m = m->m_next;
2554 }
2555 }
2556 mv_p->flags |= RDY_DATA;
2557
2558 return 0;
2559 }
2560
2561 STATIC int
2562 mvxpsec_packet_setuio(struct mvxpsec_packet *mv_p, struct uio *uio)
2563 {
2564 struct mvxpsec_session *mv_s = mv_p->mv_s;
2565 struct mvxpsec_softc *sc = mv_s->sc;
2566
2567 if (uio->uio_resid > SRAM_PAYLOAD_SIZE) {
2568 #if NIPSEC > 0
2569 extern percpu_t *espstat_percpu;
2570 /* XXX:
2571 * layer violation. opencrypto knows our max packet size
2572 * from crypto_register(9) API.
2573 */
2574
2575 _NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2576 #endif
2577 log(LOG_ERR,
2578 "%s: uio request too large: %zu [oct.] > %zu [oct.]\n",
2579 device_xname(sc->sc_dev),
2580 uio->uio_resid, SRAM_PAYLOAD_SIZE);
2581 mv_p->data_type = MVXPSEC_DATA_NONE;
2582 mv_p->data_mbuf = NULL;
2583 return -1;
2584 }
2585
2586 if (bus_dmamap_load_uio(sc->sc_dmat, mv_p->data_map, uio,
2587 BUS_DMA_NOWAIT)) {
2588 mv_p->data_type = MVXPSEC_DATA_NONE;
2589 mv_p->data_mbuf = NULL;
2590 log(LOG_ERR, "%s: cannot load uio buf\n", __func__);
2591 return -1;
2592 }
2593
2594 /* set payload buffer */
2595 mv_p->data_type = MVXPSEC_DATA_UIO;
2596 mv_p->data_uio = uio;
2597 mv_p->data_len = uio->uio_resid;
2598 mv_p->flags |= RDY_DATA;
2599
2600 return 0;
2601 }
2602
2603 STATIC int
2604 mvxpsec_packet_rdata(struct mvxpsec_packet *mv_p,
2605 int off, int len, void *cp)
2606 {
2607 uint8_t *p;
2608
2609 if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2610 p = (uint8_t *)mv_p->data_raw + off;
2611 memcpy(cp, p, len);
2612 }
2613 else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2614 m_copydata(mv_p->data_mbuf, off, len, cp);
2615 }
2616 else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2617 cuio_copydata(mv_p->data_uio, off, len, cp);
2618 }
2619 else
2620 return -1;
2621
2622 return 0;
2623 }
2624
2625 STATIC int
2626 mvxpsec_packet_wdata(struct mvxpsec_packet *mv_p,
2627 int off, int len, void *cp)
2628 {
2629 uint8_t *p;
2630
2631 if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2632 p = (uint8_t *)mv_p->data_raw + off;
2633 memcpy(p, cp, len);
2634 }
2635 else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2636 m_copyback(mv_p->data_mbuf, off, len, cp);
2637 }
2638 else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2639 cuio_copyback(mv_p->data_uio, off, len, cp);
2640 }
2641 else
2642 return -1;
2643
2644 return 0;
2645 }
2646
2647 /*
2648 * Set initial vector of cipher to the session.
2649 */
2650 STATIC int
2651 mvxpsec_packet_write_iv(struct mvxpsec_packet *mv_p, void *iv, int ivlen)
2652 {
2653 uint8_t ivbuf[16];
2654
2655 KASSERT(ivlen == 8 || ivlen == 16);
2656
2657 if (iv == NULL) {
2658 if (mv_p->mv_s->sflags & RDY_CRP_IV) {
2659 /* use per session IV (compatible with KAME IPsec) */
2660 mv_p->pkt_header.crp_iv_work = mv_p->mv_s->session_iv;
2661 mv_p->flags |= RDY_CRP_IV;
2662 return 0;
2663 }
2664 cprng_fast(ivbuf, ivlen);
2665 iv = ivbuf;
2666 }
2667 memcpy(&mv_p->pkt_header.crp_iv_work, iv, ivlen);
2668 if (mv_p->flags & CRP_EXT_IV) {
2669 memcpy(&mv_p->pkt_header.crp_iv_ext, iv, ivlen);
2670 mv_p->ext_iv = iv;
2671 mv_p->ext_ivlen = ivlen;
2672 }
2673 mv_p->flags |= RDY_CRP_IV;
2674
2675 return 0;
2676 }
2677
2678 STATIC int
2679 mvxpsec_packet_copy_iv(struct mvxpsec_packet *mv_p, int off, int ivlen)
2680 {
2681 mvxpsec_packet_rdata(mv_p, off, ivlen,
2682 &mv_p->pkt_header.crp_iv_work);
2683 mv_p->flags |= RDY_CRP_IV;
2684
2685 return 0;
2686 }
2687
2688 /*
2689 * set a encryption or decryption key to the session
2690 *
2691 * Input key material is big endian.
2692 */
2693 STATIC int
2694 mvxpsec_key_precomp(int alg, void *keymat, int kbitlen,
2695 void *key_encrypt, void *key_decrypt)
2696 {
2697 uint32_t *kp = keymat;
2698 uint32_t *ekp = key_encrypt;
2699 uint32_t *dkp = key_decrypt;
2700 int i;
2701
2702 switch (alg) {
2703 case CRYPTO_DES_CBC:
2704 if (kbitlen < 64 || (kbitlen % 8) != 0) {
2705 log(LOG_WARNING,
2706 "mvxpsec: invalid DES keylen %d\n", kbitlen);
2707 return EINVAL;
2708 }
2709 for (i = 0; i < 2; i++)
2710 dkp[i] = ekp[i] = kp[i];
2711 for (; i < 8; i++)
2712 dkp[i] = ekp[i] = 0;
2713 break;
2714 case CRYPTO_3DES_CBC:
2715 if (kbitlen < 192 || (kbitlen % 8) != 0) {
2716 log(LOG_WARNING,
2717 "mvxpsec: invalid 3DES keylen %d\n", kbitlen);
2718 return EINVAL;
2719 }
2720 for (i = 0; i < 8; i++)
2721 dkp[i] = ekp[i] = kp[i];
2722 break;
2723 case CRYPTO_AES_CBC:
2724 if (kbitlen < 128) {
2725 log(LOG_WARNING,
2726 "mvxpsec: invalid AES keylen %d\n", kbitlen);
2727 return EINVAL;
2728 }
2729 else if (kbitlen < 192) {
2730 /* AES-128 */
2731 for (i = 0; i < 4; i++)
2732 ekp[i] = kp[i];
2733 for (; i < 8; i++)
2734 ekp[i] = 0;
2735 }
2736 else if (kbitlen < 256) {
2737 /* AES-192 */
2738 for (i = 0; i < 6; i++)
2739 ekp[i] = kp[i];
2740 for (; i < 8; i++)
2741 ekp[i] = 0;
2742 }
2743 else {
2744 /* AES-256 */
2745 for (i = 0; i < 8; i++)
2746 ekp[i] = kp[i];
2747 }
2748 /* make decryption key */
2749 mv_aes_deckey((uint8_t *)dkp, (uint8_t *)ekp, kbitlen);
2750 break;
2751 default:
2752 for (i = 0; i < 8; i++)
2753 ekp[0] = dkp[0] = 0;
2754 break;
2755 }
2756
2757 #ifdef MVXPSEC_DEBUG
2758 if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2759 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2760 "%s: keyregistered\n", __func__);
2761 mvxpsec_dump_data(__func__, ekp, 32);
2762 }
2763 #endif
2764
2765 return 0;
2766 }
2767
2768 /*
2769 * set MAC key to the session
2770 *
2771 * MAC engine has no register for key itself, but the engine has
2772 * inner and outer IV register. software must compute IV before
2773 * enable the engine.
2774 *
2775 * IV is a hash of ipad/opad. these are defined by FIPS-198a
2776 * standard.
2777 */
2778 STATIC int
2779 mvxpsec_hmac_precomp(int alg, void *key, int kbitlen,
2780 void *iv_inner, void *iv_outer)
2781 {
2782 SHA1_CTX sha1;
2783 MD5_CTX md5;
2784 uint8_t *key8 = key;
2785 uint8_t kbuf[64];
2786 uint8_t ipad[64];
2787 uint8_t opad[64];
2788 uint32_t *iv_in = iv_inner;
2789 uint32_t *iv_out = iv_outer;
2790 int kbytelen;
2791 int i;
2792 #define HMAC_IPAD 0x36
2793 #define HMAC_OPAD 0x5c
2794
2795 kbytelen = kbitlen / 8;
2796 KASSERT(kbitlen == kbytelen * 8);
2797 if (kbytelen > 64) {
2798 SHA1Init(&sha1);
2799 SHA1Update(&sha1, key, kbytelen);
2800 SHA1Final(kbuf, &sha1);
2801 key8 = kbuf;
2802 kbytelen = 64;
2803 }
2804
2805 /* make initial 64 oct. string */
2806 switch (alg) {
2807 case CRYPTO_SHA1_HMAC_96:
2808 case CRYPTO_SHA1_HMAC:
2809 case CRYPTO_MD5_HMAC_96:
2810 case CRYPTO_MD5_HMAC:
2811 for (i = 0; i < kbytelen; i++) {
2812 ipad[i] = (key8[i] ^ HMAC_IPAD);
2813 opad[i] = (key8[i] ^ HMAC_OPAD);
2814 }
2815 for (; i < 64; i++) {
2816 ipad[i] = HMAC_IPAD;
2817 opad[i] = HMAC_OPAD;
2818 }
2819 break;
2820 default:
2821 break;
2822 }
2823 #ifdef MVXPSEC_DEBUG
2824 if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2825 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2826 "%s: HMAC-KEY Pre-comp:\n", __func__);
2827 mvxpsec_dump_data(__func__, key, 64);
2828 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2829 "%s: ipad:\n", __func__);
2830 mvxpsec_dump_data(__func__, ipad, sizeof(ipad));
2831 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2832 "%s: opad:\n", __func__);
2833 mvxpsec_dump_data(__func__, opad, sizeof(opad));
2834 }
2835 #endif
2836
2837 /* make iv from string */
2838 switch (alg) {
2839 case CRYPTO_SHA1_HMAC_96:
2840 case CRYPTO_SHA1_HMAC:
2841 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2842 "%s: Generate iv_in(SHA1)\n", __func__);
2843 SHA1Init(&sha1);
2844 SHA1Update(&sha1, ipad, 64);
2845 /* XXX: private state... (LE) */
2846 iv_in[0] = htobe32(sha1.state[0]);
2847 iv_in[1] = htobe32(sha1.state[1]);
2848 iv_in[2] = htobe32(sha1.state[2]);
2849 iv_in[3] = htobe32(sha1.state[3]);
2850 iv_in[4] = htobe32(sha1.state[4]);
2851
2852 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2853 "%s: Generate iv_out(SHA1)\n", __func__);
2854 SHA1Init(&sha1);
2855 SHA1Update(&sha1, opad, 64);
2856 /* XXX: private state... (LE) */
2857 iv_out[0] = htobe32(sha1.state[0]);
2858 iv_out[1] = htobe32(sha1.state[1]);
2859 iv_out[2] = htobe32(sha1.state[2]);
2860 iv_out[3] = htobe32(sha1.state[3]);
2861 iv_out[4] = htobe32(sha1.state[4]);
2862 break;
2863 case CRYPTO_MD5_HMAC_96:
2864 case CRYPTO_MD5_HMAC:
2865 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2866 "%s: Generate iv_in(MD5)\n", __func__);
2867 MD5Init(&md5);
2868 MD5Update(&md5, ipad, sizeof(ipad));
2869 /* XXX: private state... (LE) */
2870 iv_in[0] = htobe32(md5.state[0]);
2871 iv_in[1] = htobe32(md5.state[1]);
2872 iv_in[2] = htobe32(md5.state[2]);
2873 iv_in[3] = htobe32(md5.state[3]);
2874 iv_in[4] = 0;
2875
2876 MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2877 "%s: Generate iv_out(MD5)\n", __func__);
2878 MD5Init(&md5);
2879 MD5Update(&md5, opad, sizeof(opad));
2880 /* XXX: private state... (LE) */
2881 iv_out[0] = htobe32(md5.state[0]);
2882 iv_out[1] = htobe32(md5.state[1]);
2883 iv_out[2] = htobe32(md5.state[2]);
2884 iv_out[3] = htobe32(md5.state[3]);
2885 iv_out[4] = 0;
2886 break;
2887 default:
2888 break;
2889 }
2890
2891 #ifdef MVXPSEC_DEBUG
2892 if (mvxpsec_debug & MVXPSEC_DEBUG_HASH_IV) {
2893 MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2894 "%s: HMAC IV-IN\n", __func__);
2895 mvxpsec_dump_data(__func__, (uint8_t *)iv_in, 20);
2896 MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2897 "%s: HMAC IV-OUT\n", __func__);
2898 mvxpsec_dump_data(__func__, (uint8_t *)iv_out, 20);
2899 }
2900 #endif
2901
2902 return 0;
2903 #undef HMAC_IPAD
2904 #undef HMAC_OPAD
2905 }
2906
2907 /*
2908 * AES Support routine
2909 */
2910 static uint8_t AES_SBOX[256] = {
2911 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215,
2912 171, 118, 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175,
2913 156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204, 52, 165,
2914 229, 241, 113, 216, 49, 21, 4, 199, 35, 195, 24, 150, 5, 154,
2915 7, 18, 128, 226, 235, 39, 178, 117, 9, 131, 44, 26, 27, 110,
2916 90, 160, 82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0, 237,
2917 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208, 239,
2918 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168,
2919 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255,
2920 243, 210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61,
2921 100, 93, 25, 115, 96, 129, 79, 220, 34, 42, 144, 136, 70, 238,
2922 184, 20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92,
2923 194, 211, 172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213,
2924 78, 169, 108, 86, 244, 234, 101, 122, 174, 8, 186, 120, 37, 46,
2925 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, 112, 62,
2926 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158,
2927 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85,
2928 40, 223, 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15,
2929 176, 84, 187, 22
2930 };
2931
2932 static uint32_t AES_RCON[30] = {
2933 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
2934 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4,
2935 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91
2936 };
2937
2938 STATIC int
2939 mv_aes_ksched(uint8_t k[4][MAXKC], int keyBits,
2940 uint8_t W[MAXROUNDS+1][4][MAXBC])
2941 {
2942 int KC, BC, ROUNDS;
2943 int i, j, t, rconpointer = 0;
2944 uint8_t tk[4][MAXKC];
2945
2946 switch (keyBits) {
2947 case 128:
2948 ROUNDS = 10;
2949 KC = 4;
2950 break;
2951 case 192:
2952 ROUNDS = 12;
2953 KC = 6;
2954 break;
2955 case 256:
2956 ROUNDS = 14;
2957 KC = 8;
2958 break;
2959 default:
2960 return (-1);
2961 }
2962 BC = 4; /* 128 bits */
2963
2964 for(j = 0; j < KC; j++)
2965 for(i = 0; i < 4; i++)
2966 tk[i][j] = k[i][j];
2967 t = 0;
2968
2969 /* copy values into round key array */
2970 for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
2971 for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
2972
2973 while (t < (ROUNDS+1)*BC) { /* while not enough round key material calculated */
2974 /* calculate new values */
2975 for(i = 0; i < 4; i++)
2976 tk[i][0] ^= AES_SBOX[tk[(i+1)%4][KC-1]];
2977 tk[0][0] ^= AES_RCON[rconpointer++];
2978
2979 if (KC != 8)
2980 for(j = 1; j < KC; j++)
2981 for(i = 0; i < 4; i++)
2982 tk[i][j] ^= tk[i][j-1];
2983 else {
2984 for(j = 1; j < KC/2; j++)
2985 for(i = 0; i < 4; i++)
2986 tk[i][j] ^= tk[i][j-1];
2987 for(i = 0; i < 4; i++)
2988 tk[i][KC/2] ^= AES_SBOX[tk[i][KC/2 - 1]];
2989 for(j = KC/2 + 1; j < KC; j++)
2990 for(i = 0; i < 4; i++)
2991 tk[i][j] ^= tk[i][j-1];
2992 }
2993 /* copy values into round key array */
2994 for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
2995 for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
2996 }
2997
2998 return 0;
2999 }
3000
3001 STATIC int
3002 mv_aes_deckey(uint8_t *expandedKey, uint8_t *keyMaterial, int keyLen)
3003 {
3004 uint8_t W[MAXROUNDS+1][4][MAXBC];
3005 uint8_t k[4][MAXKC];
3006 uint8_t j;
3007 int i, rounds, KC;
3008
3009 if (expandedKey == NULL)
3010 return -1;
3011
3012 if (!((keyLen == 128) || (keyLen == 192) || (keyLen == 256)))
3013 return -1;
3014
3015 if (keyMaterial == NULL)
3016 return -1;
3017
3018 /* initialize key schedule: */
3019 for (i=0; i<keyLen/8; i++) {
3020 j = keyMaterial[i];
3021 k[i % 4][i / 4] = j;
3022 }
3023
3024 mv_aes_ksched(k, keyLen, W);
3025 switch (keyLen) {
3026 case 128:
3027 rounds = 10;
3028 KC = 4;
3029 break;
3030 case 192:
3031 rounds = 12;
3032 KC = 6;
3033 break;
3034 case 256:
3035 rounds = 14;
3036 KC = 8;
3037 break;
3038 default:
3039 return -1;
3040 }
3041
3042 for(i=0; i<MAXBC; i++)
3043 for(j=0; j<4; j++)
3044 expandedKey[i*4+j] = W[rounds][j][i];
3045 for(; i<KC; i++)
3046 for(j=0; j<4; j++)
3047 expandedKey[i*4+j] = W[rounds-1][j][i+MAXBC-KC];
3048
3049 return 0;
3050 }
3051
3052 /*
3053 * Clear cipher/mac operation state
3054 */
3055 INLINE void
3056 mvxpsec_packet_reset_op(struct mvxpsec_packet *mv_p)
3057 {
3058 mv_p->pkt_header.desc.acc_config = 0;
3059 mv_p->enc_off = mv_p->enc_ivoff = mv_p->enc_len = 0;
3060 mv_p->mac_off = mv_p->mac_dst = mv_p->mac_len = 0;
3061 }
3062
3063 /*
3064 * update MVXPSEC operation order
3065 */
3066 INLINE void
3067 mvxpsec_packet_update_op_order(struct mvxpsec_packet *mv_p, int op)
3068 {
3069 struct mvxpsec_acc_descriptor *acc_desc = &mv_p->pkt_header.desc;
3070 uint32_t cur_op = acc_desc->acc_config & MV_ACC_CRYPTO_OP_MASK;
3071
3072 KASSERT(op == MV_ACC_CRYPTO_OP_MAC || op == MV_ACC_CRYPTO_OP_ENC);
3073 KASSERT((op & MV_ACC_CRYPTO_OP_MASK) == op);
3074
3075 if (cur_op == 0)
3076 acc_desc->acc_config |= op;
3077 else if (cur_op == MV_ACC_CRYPTO_OP_MAC && op == MV_ACC_CRYPTO_OP_ENC) {
3078 acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3079 acc_desc->acc_config |= MV_ACC_CRYPTO_OP_MACENC;
3080 /* MAC then ENC (= decryption) */
3081 }
3082 else if (cur_op == MV_ACC_CRYPTO_OP_ENC && op == MV_ACC_CRYPTO_OP_MAC) {
3083 acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3084 acc_desc->acc_config |= MV_ACC_CRYPTO_OP_ENCMAC;
3085 /* ENC then MAC (= encryption) */
3086 }
3087 else {
3088 log(LOG_ERR, "%s: multiple %s algorithm is not supported.\n",
3089 __func__,
3090 (op == MV_ACC_CRYPTO_OP_ENC) ? "encryption" : "authentication");
3091 }
3092 }
3093
3094 /*
3095 * Parameter Conversions
3096 */
3097 INLINE uint32_t
3098 mvxpsec_alg2acc(uint32_t alg)
3099 {
3100 uint32_t reg;
3101
3102 switch (alg) {
3103 case CRYPTO_DES_CBC:
3104 reg = MV_ACC_CRYPTO_ENC_DES;
3105 reg |= MV_ACC_CRYPTO_CBC;
3106 break;
3107 case CRYPTO_3DES_CBC:
3108 reg = MV_ACC_CRYPTO_ENC_3DES;
3109 reg |= MV_ACC_CRYPTO_3DES_EDE;
3110 reg |= MV_ACC_CRYPTO_CBC;
3111 break;
3112 case CRYPTO_AES_CBC:
3113 reg = MV_ACC_CRYPTO_ENC_AES;
3114 reg |= MV_ACC_CRYPTO_CBC;
3115 break;
3116 case CRYPTO_SHA1_HMAC_96:
3117 reg = MV_ACC_CRYPTO_MAC_HMAC_SHA1;
3118 reg |= MV_ACC_CRYPTO_MAC_96;
3119 break;
3120 case CRYPTO_MD5_HMAC_96:
3121 reg = MV_ACC_CRYPTO_MAC_HMAC_MD5;
3122 reg |= MV_ACC_CRYPTO_MAC_96;
3123 break;
3124 default:
3125 reg = 0;
3126 break;
3127 }
3128
3129 return reg;
3130 }
3131
3132 INLINE uint32_t
3133 mvxpsec_aesklen(int klen)
3134 {
3135 if (klen < 128)
3136 return 0;
3137 else if (klen < 192)
3138 return MV_ACC_CRYPTO_AES_KLEN_128;
3139 else if (klen < 256)
3140 return MV_ACC_CRYPTO_AES_KLEN_192;
3141 else
3142 return MV_ACC_CRYPTO_AES_KLEN_256;
3143
3144 return 0;
3145 }
3146
3147 /*
3148 * String Conversions
3149 */
3150 STATIC const char *
3151 s_errreg(uint32_t v)
3152 {
3153 static char buf[80];
3154
3155 snprintf(buf, sizeof(buf),
3156 "%sMiss %sDoubleHit %sBothHit %sDataError",
3157 (v & MV_TDMA_ERRC_MISS) ? "+" : "-",
3158 (v & MV_TDMA_ERRC_DHIT) ? "+" : "-",
3159 (v & MV_TDMA_ERRC_BHIT) ? "+" : "-",
3160 (v & MV_TDMA_ERRC_DERR) ? "+" : "-");
3161
3162 return (const char *)buf;
3163 }
3164
3165 STATIC const char *
3166 s_winreg(uint32_t v)
3167 {
3168 static char buf[80];
3169
3170 snprintf(buf, sizeof(buf),
3171 "%s TGT 0x%x ATTR 0x%02x size %u(0x%04x)[64KB]",
3172 (v & MV_TDMA_ATTR_ENABLE) ? "EN" : "DIS",
3173 MV_TDMA_ATTR_GET_TARGET(v), MV_TDMA_ATTR_GET_ATTR(v),
3174 MV_TDMA_ATTR_GET_SIZE(v), MV_TDMA_ATTR_GET_SIZE(v));
3175
3176 return (const char *)buf;
3177 }
3178
3179 STATIC const char *
3180 s_ctrlreg(uint32_t reg)
3181 {
3182 static char buf[80];
3183
3184 snprintf(buf, sizeof(buf),
3185 "%s: %sFETCH DBURST-%u SBURST-%u %sOUTS %sCHAIN %sBSWAP %sACT",
3186 (reg & MV_TDMA_CONTROL_ENABLE) ? "ENABLE" : "DISABLE",
3187 (reg & MV_TDMA_CONTROL_FETCH) ? "+" : "-",
3188 MV_TDMA_CONTROL_GET_DST_BURST(reg),
3189 MV_TDMA_CONTROL_GET_SRC_BURST(reg),
3190 (reg & MV_TDMA_CONTROL_OUTS_EN) ? "+" : "-",
3191 (reg & MV_TDMA_CONTROL_CHAIN_DIS) ? "-" : "+",
3192 (reg & MV_TDMA_CONTROL_BSWAP_DIS) ? "-" : "+",
3193 (reg & MV_TDMA_CONTROL_ACT) ? "+" : "-");
3194
3195 return (const char *)buf;
3196 }
3197
3198 _STATIC const char *
3199 s_xpsecintr(uint32_t v)
3200 {
3201 static char buf[160];
3202
3203 snprintf(buf, sizeof(buf),
3204 "%sAuth %sDES %sAES-ENC %sAES-DEC %sENC %sSA %sAccAndTDMA "
3205 "%sTDMAComp %sTDMAOwn %sAccAndTDMA_Cont",
3206 (v & MVXPSEC_INT_AUTH) ? "+" : "-",
3207 (v & MVXPSEC_INT_DES) ? "+" : "-",
3208 (v & MVXPSEC_INT_AES_ENC) ? "+" : "-",
3209 (v & MVXPSEC_INT_AES_DEC) ? "+" : "-",
3210 (v & MVXPSEC_INT_ENC) ? "+" : "-",
3211 (v & MVXPSEC_INT_SA) ? "+" : "-",
3212 (v & MVXPSEC_INT_ACCTDMA) ? "+" : "-",
3213 (v & MVXPSEC_INT_TDMA_COMP) ? "+" : "-",
3214 (v & MVXPSEC_INT_TDMA_OWN) ? "+" : "-",
3215 (v & MVXPSEC_INT_ACCTDMA_CONT) ? "+" : "-");
3216
3217 return (const char *)buf;
3218 }
3219
3220 STATIC const char *
3221 s_ctlalg(uint32_t alg)
3222 {
3223 switch (alg) {
3224 case CRYPTO_SHA1_HMAC_96:
3225 return "HMAC-SHA1-96";
3226 case CRYPTO_SHA1_HMAC:
3227 return "HMAC-SHA1";
3228 case CRYPTO_SHA1:
3229 return "SHA1";
3230 case CRYPTO_MD5_HMAC_96:
3231 return "HMAC-MD5-96";
3232 case CRYPTO_MD5_HMAC:
3233 return "HMAC-MD5";
3234 case CRYPTO_MD5:
3235 return "MD5";
3236 case CRYPTO_DES_CBC:
3237 return "DES-CBC";
3238 case CRYPTO_3DES_CBC:
3239 return "3DES-CBC";
3240 case CRYPTO_AES_CBC:
3241 return "AES-CBC";
3242 default:
3243 break;
3244 }
3245
3246 return "Unknown";
3247 }
3248
3249 STATIC const char *
3250 s_xpsec_op(uint32_t reg)
3251 {
3252 reg &= MV_ACC_CRYPTO_OP_MASK;
3253 switch (reg) {
3254 case MV_ACC_CRYPTO_OP_ENC:
3255 return "ENC";
3256 case MV_ACC_CRYPTO_OP_MAC:
3257 return "MAC";
3258 case MV_ACC_CRYPTO_OP_ENCMAC:
3259 return "ENC-MAC";
3260 case MV_ACC_CRYPTO_OP_MACENC:
3261 return "MAC-ENC";
3262 default:
3263 break;
3264 }
3265
3266 return "Unknown";
3267 }
3268
3269 STATIC const char *
3270 s_xpsec_enc(uint32_t alg)
3271 {
3272 alg <<= MV_ACC_CRYPTO_ENC_SHIFT;
3273 switch (alg) {
3274 case MV_ACC_CRYPTO_ENC_DES:
3275 return "DES";
3276 case MV_ACC_CRYPTO_ENC_3DES:
3277 return "3DES";
3278 case MV_ACC_CRYPTO_ENC_AES:
3279 return "AES";
3280 default:
3281 break;
3282 }
3283
3284 return "Unknown";
3285 }
3286
3287 STATIC const char *
3288 s_xpsec_mac(uint32_t alg)
3289 {
3290 alg <<= MV_ACC_CRYPTO_MAC_SHIFT;
3291 switch (alg) {
3292 case MV_ACC_CRYPTO_MAC_NONE:
3293 return "Disabled";
3294 case MV_ACC_CRYPTO_MAC_MD5:
3295 return "MD5";
3296 case MV_ACC_CRYPTO_MAC_SHA1:
3297 return "SHA1";
3298 case MV_ACC_CRYPTO_MAC_HMAC_MD5:
3299 return "HMAC-MD5";
3300 case MV_ACC_CRYPTO_MAC_HMAC_SHA1:
3301 return "HMAC-SHA1";
3302 default:
3303 break;
3304 }
3305
3306 return "Unknown";
3307 }
3308
3309 STATIC const char *
3310 s_xpsec_frag(uint32_t frag)
3311 {
3312 frag <<= MV_ACC_CRYPTO_FRAG_SHIFT;
3313 switch (frag) {
3314 case MV_ACC_CRYPTO_NOFRAG:
3315 return "NoFragment";
3316 case MV_ACC_CRYPTO_FRAG_FIRST:
3317 return "FirstFragment";
3318 case MV_ACC_CRYPTO_FRAG_MID:
3319 return "MiddleFragment";
3320 case MV_ACC_CRYPTO_FRAG_LAST:
3321 return "LastFragment";
3322 default:
3323 break;
3324 }
3325
3326 return "Unknown";
3327 }
3328
3329 #ifdef MVXPSEC_DEBUG
3330 void
3331 mvxpsec_dump_reg(struct mvxpsec_softc *sc)
3332 {
3333 uint32_t reg;
3334 int i;
3335
3336 if ((mvxpsec_debug & MVXPSEC_DEBUG_DESC) == 0)
3337 return;
3338
3339 printf("--- Interrupt Registers ---\n");
3340 reg = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
3341 printf("MVXPSEC INT CAUSE: 0x%08x\n", reg);
3342 printf("MVXPSEC INT CAUSE: %s\n", s_xpsecintr(reg));
3343 reg = MVXPSEC_READ(sc, MVXPSEC_INT_MASK);
3344 printf("MVXPSEC INT MASK: 0x%08x\n", reg);
3345 printf("MVXPSEC INT MASKE: %s\n", s_xpsecintr(reg));
3346
3347 printf("--- DMA Configuration Registers ---\n");
3348 for (i = 0; i < MV_TDMA_NWINDOW; i++) {
3349 reg = MVXPSEC_READ(sc, MV_TDMA_BAR(i));
3350 printf("TDMA BAR%d: 0x%08x\n", i, reg);
3351 reg = MVXPSEC_READ(sc, MV_TDMA_ATTR(i));
3352 printf("TDMA ATTR%d: 0x%08x\n", i, reg);
3353 printf(" -> %s\n", s_winreg(reg));
3354 }
3355
3356 printf("--- DMA Control Registers ---\n");
3357
3358 reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3359 printf("TDMA CONTROL: 0x%08x\n", reg);
3360 printf(" -> %s\n", s_ctrlreg(reg));
3361
3362 printf("--- DMA Current Command Descriptors ---\n");
3363
3364 reg = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
3365 printf("TDMA ERR CAUSE: 0x%08x\n", reg);
3366
3367 reg = MVXPSEC_READ(sc, MV_TDMA_ERR_MASK);
3368 printf("TDMA ERR MASK: 0x%08x\n", reg);
3369
3370 reg = MVXPSEC_READ(sc, MV_TDMA_CNT);
3371 printf("TDMA DATA OWNER: %s\n",
3372 (reg & MV_TDMA_CNT_OWN) ? "DMAC" : "CPU");
3373 printf("TDMA DATA COUNT: %d(0x%x)\n",
3374 (reg & ~MV_TDMA_CNT_OWN), (reg & ~MV_TDMA_CNT_OWN));
3375
3376 reg = MVXPSEC_READ(sc, MV_TDMA_SRC);
3377 printf("TDMA DATA SRC: 0x%08x\n", reg);
3378
3379 reg = MVXPSEC_READ(sc, MV_TDMA_DST);
3380 printf("TDMA DATA DST: 0x%08x\n", reg);
3381
3382 reg = MVXPSEC_READ(sc, MV_TDMA_NXT);
3383 printf("TDMA DATA NXT: 0x%08x\n", reg);
3384
3385 reg = MVXPSEC_READ(sc, MV_TDMA_CUR);
3386 printf("TDMA DATA CUR: 0x%08x\n", reg);
3387
3388 printf("--- ACC Command Register ---\n");
3389 reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3390 printf("ACC COMMAND: 0x%08x\n", reg);
3391 printf("ACC: %sACT %sSTOP\n",
3392 (reg & MV_ACC_COMMAND_ACT) ? "+" : "-",
3393 (reg & MV_ACC_COMMAND_STOP) ? "+" : "-");
3394
3395 reg = MVXPSEC_READ(sc, MV_ACC_CONFIG);
3396 printf("ACC CONFIG: 0x%08x\n", reg);
3397 reg = MVXPSEC_READ(sc, MV_ACC_DESC);
3398 printf("ACC DESC: 0x%08x\n", reg);
3399
3400 printf("--- DES Key Register ---\n");
3401 reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0L);
3402 printf("DES KEY0 Low: 0x%08x\n", reg);
3403 reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0H);
3404 printf("DES KEY0 High: 0x%08x\n", reg);
3405 reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1L);
3406 printf("DES KEY1 Low: 0x%08x\n", reg);
3407 reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1H);
3408 printf("DES KEY1 High: 0x%08x\n", reg);
3409 reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2L);
3410 printf("DES KEY2 Low: 0x%08x\n", reg);
3411 reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2H);
3412 printf("DES KEY2 High: 0x%08x\n", reg);
3413
3414 printf("--- AES Key Register ---\n");
3415 for (i = 0; i < 8; i++) {
3416 reg = MVXPSEC_READ(sc, MV_CE_AES_EKEY(i));
3417 printf("AES ENC KEY COL%d: %08x\n", i, reg);
3418 }
3419 for (i = 0; i < 8; i++) {
3420 reg = MVXPSEC_READ(sc, MV_CE_AES_DKEY(i));
3421 printf("AES DEC KEY COL%d: %08x\n", i, reg);
3422 }
3423
3424 return;
3425 }
3426
3427 STATIC void
3428 mvxpsec_dump_sram(const char *name, struct mvxpsec_softc *sc, size_t len)
3429 {
3430 uint32_t reg;
3431
3432 if (sc->sc_sram_va == NULL)
3433 return;
3434
3435 if (len == 0) {
3436 printf("\n%s NO DATA(len=0)\n", name);
3437 return;
3438 }
3439 else if (len > MV_ACC_SRAM_SIZE)
3440 len = MV_ACC_SRAM_SIZE;
3441
3442 mutex_enter(&sc->sc_dma_mtx);
3443 reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3444 if (reg & MV_TDMA_CONTROL_ACT) {
3445 printf("TDMA is active, cannot access SRAM\n");
3446 mutex_exit(&sc->sc_dma_mtx);
3447 return;
3448 }
3449 reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3450 if (reg & MV_ACC_COMMAND_ACT) {
3451 printf("SA is active, cannot access SRAM\n");
3452 mutex_exit(&sc->sc_dma_mtx);
3453 return;
3454 }
3455
3456 printf("%s: dump SRAM, %zu bytes\n", name, len);
3457 mvxpsec_dump_data(name, sc->sc_sram_va, len);
3458 mutex_exit(&sc->sc_dma_mtx);
3459 return;
3460 }
3461
3462
3463 _STATIC void
3464 mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *dh)
3465 {
3466 struct mvxpsec_descriptor *d =
3467 (struct mvxpsec_descriptor *)dh->_desc;
3468
3469 printf("--- DMA Command Descriptor ---\n");
3470 printf("DESC: VA=%p PA=0x%08x\n",
3471 d, (uint32_t)dh->phys_addr);
3472 printf("DESC: WORD0 = 0x%08x\n", d->tdma_word0);
3473 printf("DESC: SRC = 0x%08x\n", d->tdma_src);
3474 printf("DESC: DST = 0x%08x\n", d->tdma_dst);
3475 printf("DESC: NXT = 0x%08x\n", d->tdma_nxt);
3476
3477 return;
3478 }
3479
3480 STATIC void
3481 mvxpsec_dump_data(const char *name, void *p, size_t len)
3482 {
3483 uint8_t *data = p;
3484 off_t off;
3485
3486 printf("%s: dump %p, %zu bytes", name, p, len);
3487 if (p == NULL || len == 0) {
3488 printf("\n%s: NO DATA\n", name);
3489 return;
3490 }
3491 for (off = 0; off < len; off++) {
3492 if ((off % 16) == 0) {
3493 printf("\n%s: 0x%08x:", name, (uint32_t)off);
3494 }
3495 if ((off % 4) == 0) {
3496 printf(" ");
3497 }
3498 printf("%02x", data[off]);
3499 }
3500 printf("\n");
3501
3502 return;
3503 }
3504
3505 _STATIC void
3506 mvxpsec_dump_packet(const char *name, struct mvxpsec_packet *mv_p)
3507 {
3508 struct mvxpsec_softc *sc = mv_p->mv_s->sc;
3509
3510 printf("%s: packet_data:\n", name);
3511 mvxpsec_dump_packet_data(name, mv_p);
3512
3513 printf("%s: SRAM:\n", name);
3514 mvxpsec_dump_sram(name, sc, 2000);
3515
3516 printf("%s: packet_descriptor:\n", name);
3517 mvxpsec_dump_packet_desc(name, mv_p);
3518 }
3519
3520 _STATIC void
3521 mvxpsec_dump_packet_data(const char *name, struct mvxpsec_packet *mv_p)
3522 {
3523 static char buf[1500];
3524 int len;
3525
3526 if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
3527 struct mbuf *m;
3528
3529 m = mv_p->data.mbuf;
3530 len = m->m_pkthdr.len;
3531 if (len > sizeof(buf))
3532 len = sizeof(buf);
3533 m_copydata(m, 0, len, buf);
3534 }
3535 else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
3536 struct uio *uio;
3537
3538 uio = mv_p->data.uio;
3539 len = uio->uio_resid;
3540 if (len > sizeof(buf))
3541 len = sizeof(buf);
3542 cuio_copydata(uio, 0, len, buf);
3543 }
3544 else if (mv_p->data_type == MVXPSEC_DATA_RAW) {
3545 len = mv_p->data_len;
3546 if (len > sizeof(buf))
3547 len = sizeof(buf);
3548 memcpy(buf, mv_p->data.raw, len);
3549 }
3550 else
3551 return;
3552 mvxpsec_dump_data(name, buf, len);
3553
3554 return;
3555 }
3556
3557 _STATIC void
3558 mvxpsec_dump_packet_desc(const char *name, struct mvxpsec_packet *mv_p)
3559 {
3560 uint32_t *words;
3561
3562 if (mv_p == NULL)
3563 return;
3564
3565 words = &mv_p->pkt_header.desc.acc_desc_dword0;
3566 mvxpsec_dump_acc_config(name, words[0]);
3567 mvxpsec_dump_acc_encdata(name, words[1], words[2]);
3568 mvxpsec_dump_acc_enclen(name, words[2]);
3569 mvxpsec_dump_acc_enckey(name, words[3]);
3570 mvxpsec_dump_acc_enciv(name, words[4]);
3571 mvxpsec_dump_acc_macsrc(name, words[5]);
3572 mvxpsec_dump_acc_macdst(name, words[6]);
3573 mvxpsec_dump_acc_maciv(name, words[7]);
3574
3575 return;
3576 }
3577
3578 _STATIC void
3579 mvxpsec_dump_acc_config(const char *name, uint32_t w)
3580 {
3581 /* SA: Dword 0 */
3582 printf("%s: Dword0=0x%08x\n", name, w);
3583 printf("%s: OP = %s\n", name,
3584 s_xpsec_op(MV_ACC_CRYPTO_OP(w)));
3585 printf("%s: MAC = %s\n", name,
3586 s_xpsec_mac(MV_ACC_CRYPTO_MAC(w)));
3587 printf("%s: MAC_LEN = %s\n", name,
3588 w & MV_ACC_CRYPTO_MAC_96 ? "96-bit" : "full-bit");
3589 printf("%s: ENC = %s\n", name,
3590 s_xpsec_enc(MV_ACC_CRYPTO_ENC(w)));
3591 printf("%s: DIR = %s\n", name,
3592 w & MV_ACC_CRYPTO_DECRYPT ? "decryption" : "encryption");
3593 printf("%s: CHAIN = %s\n", name,
3594 w & MV_ACC_CRYPTO_CBC ? "CBC" : "ECB");
3595 printf("%s: 3DES = %s\n", name,
3596 w & MV_ACC_CRYPTO_3DES_EDE ? "EDE" : "EEE");
3597 printf("%s: FRAGMENT = %s\n", name,
3598 s_xpsec_frag(MV_ACC_CRYPTO_FRAG(w)));
3599 return;
3600 }
3601
3602 STATIC void
3603 mvxpsec_dump_acc_encdata(const char *name, uint32_t w, uint32_t w2)
3604 {
3605 /* SA: Dword 1 */
3606 printf("%s: Dword1=0x%08x\n", name, w);
3607 printf("%s: ENC SRC = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3608 printf("%s: ENC DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3609 printf("%s: ENC RANGE = 0x%x - 0x%x\n", name,
3610 MV_ACC_DESC_GET_VAL_1(w),
3611 MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_1(w2) - 1);
3612 return;
3613 }
3614
3615 STATIC void
3616 mvxpsec_dump_acc_enclen(const char *name, uint32_t w)
3617 {
3618 /* SA: Dword 2 */
3619 printf("%s: Dword2=0x%08x\n", name, w);
3620 printf("%s: ENC LEN = %d\n", name,
3621 MV_ACC_DESC_GET_VAL_1(w));
3622 return;
3623 }
3624
3625 STATIC void
3626 mvxpsec_dump_acc_enckey(const char *name, uint32_t w)
3627 {
3628 /* SA: Dword 3 */
3629 printf("%s: Dword3=0x%08x\n", name, w);
3630 printf("%s: EKEY = 0x%x\n", name,
3631 MV_ACC_DESC_GET_VAL_1(w));
3632 return;
3633 }
3634
3635 STATIC void
3636 mvxpsec_dump_acc_enciv(const char *name, uint32_t w)
3637 {
3638 /* SA: Dword 4 */
3639 printf("%s: Dword4=0x%08x\n", name, w);
3640 printf("%s: EIV = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3641 printf("%s: EIV_BUF = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3642 return;
3643 }
3644
3645 STATIC void
3646 mvxpsec_dump_acc_macsrc(const char *name, uint32_t w)
3647 {
3648 /* SA: Dword 5 */
3649 printf("%s: Dword5=0x%08x\n", name, w);
3650 printf("%s: MAC_SRC = 0x%x\n", name,
3651 MV_ACC_DESC_GET_VAL_1(w));
3652 printf("%s: MAC_TOTAL_LEN = %d\n", name,
3653 MV_ACC_DESC_GET_VAL_3(w));
3654 printf("%s: MAC_RANGE = 0x%0x - 0x%0x\n", name,
3655 MV_ACC_DESC_GET_VAL_1(w),
3656 MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_3(w) - 1);
3657 return;
3658 }
3659
3660 STATIC void
3661 mvxpsec_dump_acc_macdst(const char *name, uint32_t w)
3662 {
3663 /* SA: Dword 6 */
3664 printf("%s: Dword6=0x%08x\n", name, w);
3665 printf("%s: MAC_DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3666 printf("%s: MAC_BLOCK_LEN = %d\n", name,
3667 MV_ACC_DESC_GET_VAL_2(w));
3668 return;
3669 }
3670
3671 STATIC void
3672 mvxpsec_dump_acc_maciv(const char *name, uint32_t w)
3673 {
3674 /* SA: Dword 7 */
3675 printf("%s: Dword7=0x%08x\n", name, w);
3676 printf("%s: MAC_INNER_IV = 0x%x\n", name,
3677 MV_ACC_DESC_GET_VAL_1(w));
3678 printf("%s: MAC_OUTER_IV = 0x%x\n", name,
3679 MV_ACC_DESC_GET_VAL_2(w));
3680 return;
3681 }
3682 #endif
3683