mvxpsecvar.h revision 1.1.18.2 1 /* $NetBSD: mvxpsecvar.h,v 1.1.18.2 2017/12/03 11:37:05 jdolecek Exp $ */
2 /*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * Cryptographic Engine and Security Accelerator(CESA)
30 */
31 #ifndef __MVXPSECVAR_H__
32 #define __MVXPSECVAR_H__
33 #include <sys/device.h>
34 #include <dev/marvell/mvxpsecreg.h>
35
36 /*
37 * Compile time options
38 */
39 /* use multi-packet chained mode */
40 #define MVXPSEC_MULTI_PACKET
41 #define MVXPSEC_EVENT_COUNTERS
42
43 /*
44 * Memory management
45 */
46 struct mvxpsec_devmem {
47 bus_dmamap_t map;
48 void *kva;
49 int size;
50 };
51 #define dm_paddr dm_segs[0].ds_addr
52 #define devmem_va(x) ((x)->kva)
53 #define devmem_nseg(x) ((x)->map->dm_nsegs)
54 #define devmem_pa(x, s) ((x)->map->dm_segs[(s)].ds_addr)
55 #define devmem_palen(x, s) ((x)->map->dm_segs[(s)].ds_len)
56 #define devmem_size(x) ((x)->size)
57 #define devmem_map(x) ((x)->map)
58
59 /*
60 * DMA Descriptors
61 */
62 struct mvxpsec_descriptor {
63 uint32_t tdma_word0;
64 uint32_t tdma_src;
65 uint32_t tdma_dst;
66 uint32_t tdma_nxt;
67 } __attribute__((__packed__));
68
69 struct mvxpsec_descriptor_handle {
70 bus_dmamap_t map;
71 paddr_t phys_addr;
72 int off;
73
74 void *_desc;
75
76 SIMPLEQ_ENTRY(mvxpsec_descriptor_handle) chain;
77 };
78 SIMPLEQ_HEAD(mvxpsec_descriptor_list, mvxpsec_descriptor_handle);
79
80 struct mvxpsec_descriptor_ring {
81 struct mvxpsec_descriptor_handle *dma_head;
82 struct mvxpsec_descriptor_handle *dma_last;
83 int dma_size;
84 };
85
86 #define MVXPSEC_SYNC_DESC(sc, x, f) \
87 do { \
88 bus_dmamap_sync((sc)->sc_dmat, (x)->map, \
89 (x)->off, sizeof(struct mvxpsec_descriptor), (f)); \
90 } while (0);
91
92 typedef struct mvxpsec_descriptor_ring mvxpsec_dma_ring;
93
94 #define MV_TDMA_DEFAULT_CONTROL \
95 ( MV_TDMA_CONTROL_DST_BURST_32 | \
96 MV_TDMA_CONTROL_SRC_BURST_32 | \
97 MV_TDMA_CONTROL_OUTS_EN | \
98 MV_TDMA_CONTROL_OUTS_MODE_4OUTS | \
99 MV_TDMA_CONTROL_BSWAP_DIS )
100
101 /*
102 * Security Accelerator Descriptors
103 */
104 struct mvxpsec_acc_descriptor {
105 uint32_t acc_config;
106 uint32_t acc_encdata;
107 uint32_t acc_enclen;
108 uint32_t acc_enckey;
109 uint32_t acc_enciv;
110 uint32_t acc_macsrc;
111 uint32_t acc_macdst;
112 uint32_t acc_maciv;
113 #define acc_desc_dword0 acc_config
114 #define acc_desc_dword1 acc_encdata
115 #define acc_desc_dword2 acc_enclen
116 #define acc_desc_dword3 acc_enckey
117 #define acc_desc_dword4 acc_enciv
118 #define acc_desc_dword5 acc_macsrc
119 #define acc_desc_dword6 acc_macdst
120 #define acc_desc_dword7 acc_maciv
121 } __attribute__((aligned(4)));
122
123 struct mvxpsec_crp_key {
124 uint32_t crp_key32[8];
125 } __attribute__((aligned(4)));
126
127 struct mvxpsec_crp_iv {
128 uint32_t crp_iv32[4];
129 } __attribute__((aligned(4)));
130
131 struct mvxpsec_mac_iv {
132 uint32_t mac_iv32[5];
133 uint32_t mac_ivpad[1]; /* bit[2:0] = 0 */
134 } __attribute__((aligned(8)));
135
136 /* many pointer in the desc has a limitation of bit[2:0] = 0. */
137 struct mvxpsec_packet_header {
138 struct mvxpsec_acc_descriptor desc; /* 32 oct. */
139 struct mvxpsec_crp_iv crp_iv_work; /* 16 oct. */
140 struct mvxpsec_crp_iv crp_iv_ext; /* 16 oct. */
141 } __attribute__((aligned(4))); /* 64 oct. */
142
143 struct mvxpsec_session_header {
144 struct mvxpsec_crp_key crp_key; /* 32 oct. */
145 struct mvxpsec_crp_key crp_key_d; /* 32 oct. */
146 struct mvxpsec_mac_iv miv_in; /* 24 oct. */
147 struct mvxpsec_mac_iv miv_out; /* 24 oct. */
148 uint8_t pad[16]; /* 16 oct. */
149 } __attribute__((aligned(4))); /* 128 oct. */
150
151 /*
152 * Usage of CESA internal SRAM
153 *
154 * +---------------+ MVXPSEC_SRAM_PKT_HDR_OFF(0)
155 * |Packet Header | contains per packet information (IV, ACC descriptor)
156 * | |
157 * | |
158 * +---------------+ MVXPSEC_SRAM_SESS_HDR_OFF
159 * |Session Header | contains per session information (Key, HMAC-iPad/oPad)
160 * | | may not DMA transfered if session is not changed.
161 * | |
162 * +---------------+ MVXPSEC_SRAM_PAYLOAD_OFF
163 * |Payload |
164 * | |
165 * . .
166 * . .
167 * . .
168 * | |
169 * +---------------+ MV_ACC_SRAM_SIZE(2048)
170 *
171 * The input data is transfered to SRAM from system DRAM using TDMA,
172 * and ACC is working on the SRAM. When ACC finished the work,
173 * TDMA returns the payload of SRAM to system DRAM.
174 *
175 * CPU can also access the SRAM via Mbus interface directly. This driver
176 * access the SRAM only for debugging.
177 *
178 */
179 #define SRAM_PAYLOAD_SIZE \
180 (MV_ACC_SRAM_SIZE \
181 - sizeof(struct mvxpsec_packet_header) \
182 - sizeof(struct mvxpsec_session_header))
183 struct mvxpsec_crypt_sram {
184 struct mvxpsec_packet_header packet_header; /* 64 oct. */
185 struct mvxpsec_session_header session_header; /* 128 oct. */
186 uint8_t payload[SRAM_PAYLOAD_SIZE];
187 } __attribute__((aligned(8))); /* Max. 2048 oct. */
188 #define MVXPSEC_SRAM_PKT_HDR_OFF \
189 (offsetof(struct mvxpsec_crypt_sram, packet_header))
190 #define MVXPSEC_SRAM_DESC_OFF (MVXPSEC_SRAM_PKT_HDR_OFF + \
191 offsetof(struct mvxpsec_packet_header, desc))
192 #define MVXPSEC_SRAM_IV_WORK_OFF (MVXPSEC_SRAM_PKT_HDR_OFF + \
193 offsetof(struct mvxpsec_packet_header, crp_iv_work))
194 #define MVXPSEC_SRAM_IV_EXT_OFF (MVXPSEC_SRAM_PKT_HDR_OFF + \
195 offsetof(struct mvxpsec_packet_header, crp_iv_ext))
196
197 #define MVXPSEC_SRAM_SESS_HDR_OFF \
198 (offsetof(struct mvxpsec_crypt_sram, session_header))
199 #define MVXPSEC_SRAM_KEY_OFF (MVXPSEC_SRAM_SESS_HDR_OFF + \
200 offsetof(struct mvxpsec_session_header, crp_key))
201 #define MVXPSEC_SRAM_KEY_D_OFF (MVXPSEC_SRAM_SESS_HDR_OFF + \
202 offsetof(struct mvxpsec_session_header, crp_key_d))
203 #define MVXPSEC_SRAM_MIV_IN_OFF (MVXPSEC_SRAM_SESS_HDR_OFF + \
204 offsetof(struct mvxpsec_session_header, miv_in))
205 #define MVXPSEC_SRAM_MIV_OUT_OFF (MVXPSEC_SRAM_SESS_HDR_OFF + \
206 offsetof(struct mvxpsec_session_header, miv_out))
207
208 #define MVXPSEC_SRAM_PAYLOAD_OFF \
209 (offsetof(struct mvxpsec_crypt_sram, payload))
210
211 /* CESA device address (CESA internal SRAM address space) */
212 #define MVXPSEC_SRAM_DESC_DA MVXPSEC_SRAM_DESC_OFF
213 #define MVXPSEC_SRAM_IV_WORK_DA MVXPSEC_SRAM_IV_WORK_OFF
214 #define MVXPSEC_SRAM_IV_EXT_DA MVXPSEC_SRAM_IV_EXT_OFF
215 #define MVXPSEC_SRAM_KEY_DA MVXPSEC_SRAM_KEY_OFF
216 #define MVXPSEC_SRAM_KEY_D_DA MVXPSEC_SRAM_KEY_D_OFF
217 #define MVXPSEC_SRAM_MIV_IN_DA MVXPSEC_SRAM_MIV_IN_OFF
218 #define MVXPSEC_SRAM_MIV_OUT_DA MVXPSEC_SRAM_MIV_OUT_OFF
219 #define MVXPSEC_SRAM_PAYLOAD_DA(offset) \
220 (MVXPSEC_SRAM_PAYLOAD_OFF + (offset))
221
222 /*
223 * Session management
224 */
225 enum mvxpsec_data_type {
226 MVXPSEC_DATA_NONE,
227 MVXPSEC_DATA_RAW,
228 MVXPSEC_DATA_MBUF,
229 MVXPSEC_DATA_UIO,
230 MVXPSEC_DATA_LAST,
231 };
232
233 /* session flags */
234 #define RDY_DATA (1 << 0)
235 #define RDY_CRP_KEY (1 << 1)
236 #define RDY_CRP_IV (1 << 2)
237 #define RDY_MAC_KEY (1 << 3)
238 #define RDY_MAC_IV (1 << 4)
239 #define CRP_EXT_IV (1 << 5)
240
241 #define SETUP_DONE (1 << 10)
242 #define DELETED (1 << 11)
243 #define DIR_ENCRYPT (1 << 12)
244 #define DIR_DECRYPT (1 << 13)
245
246 #define HW_RUNNING (1 << 16)
247
248 /* 64 peer * 2 way(in/out) * 2 family(inet/inet6) * 2 state(mature/dying) */
249 #define MVXPSEC_MAX_SESSIONS 512
250
251 struct mvxpsec_session {
252 struct mvxpsec_softc *sc;
253 uint32_t sid;
254
255 uint32_t sflags;
256 uint32_t refs;
257
258 /*
259 * Header of Security Accelerator
260 * - include key entity for ciphers
261 * - include iv for HMAC
262 */
263 bus_dmamap_t session_header_map;
264 struct mvxpsec_session_header session_header;
265
266 /* Key length for variable key length algorithm [bits] */
267 int enc_klen;
268 int mac_klen;
269
270 /* IV Store */
271 struct mvxpsec_crp_iv session_iv;
272
273 /* debug */
274 int cipher_alg;
275 int hmac_alg;
276 };
277
278 struct mvxpsec_packet {
279 struct mvxpsec_session *mv_s;
280 struct cryptop *crp;
281 int flags;
282
283 mvxpsec_dma_ring dma_ring;
284
285 bus_dmamap_t pkt_header_map;
286 struct mvxpsec_packet_header pkt_header;
287
288 bus_dmamap_t data_map;
289 enum mvxpsec_data_type data_type;
290 uint32_t data_len;
291 union {
292 /* payload buffer come from opencrypto API */
293 void *ptr;
294 void *raw;
295 struct mbuf *mbuf;
296 struct uio *uio;
297 } data;
298
299 /* IV place holder for EXPLICIT IV */
300 void *ext_iv;
301 int ext_ivlen;
302
303 uint32_t enc_off;
304 uint32_t enc_len;
305 uint32_t enc_ivoff;
306 uint32_t mac_off;
307 uint32_t mac_len;
308 uint32_t mac_dst;
309 #define data_ptr data.ptr
310 #define data_raw data.raw
311 #define data_mbuf data.mbuf
312 #define data_uio data.uio
313
314 /* list */
315 SIMPLEQ_ENTRY(mvxpsec_packet) queue;
316 SLIST_ENTRY(mvxpsec_packet) free_list;
317 };
318 typedef SIMPLEQ_HEAD(mvxpsec_packet_queue, mvxpsec_packet) mvxpsec_queue_t;
319 typedef SLIST_HEAD(mvxpsec_packet_list, mvxpsec_packet) mvxpsec_list_t;
320
321 /*
322 * DMA Configuration
323 */
324 #define MVXPSEC_DMA_DESC_PAGES 16
325 #define MVXPSEC_DMA_MAX_SEGS 30
326 #define MVXPSEC_DMA_MAX_SIZE 2048 /* = SRAM size */
327
328 /*
329 * Interrupt Configuration
330 */
331 #define MVXPSEC_ALL_INT (0xffffffff)
332 #define MVXPSEC_ALL_ERR (0xffffffff)
333 #define MVXPSEC_DEFAULT_INT (MVXPSEC_INT_ACCTDMA)
334 #define MVXPSEC_DEFAULT_ERR (MVXPSEC_ALL_ERR)
335
336 /*
337 * QUEUE Configuration
338 */
339 #define MVXPSEC_MAX_QLEN 512
340 #define MVXPSEC_QLEN_HIWAT 256
341 #define MVXPSEC_QLEN_DEF_LOWAT 16
342 #define MVXPSEC_DEF_PENDING 0
343
344 /*
345 * Event counters
346 */
347 struct mvxpsec_evcnt {
348 /* interuprts */
349 struct evcnt intr_all;
350 struct evcnt intr_auth;
351 struct evcnt intr_des;
352 struct evcnt intr_aes_enc;
353 struct evcnt intr_aes_dec;
354 struct evcnt intr_enc;
355 struct evcnt intr_sa;
356 struct evcnt intr_acctdma;
357 struct evcnt intr_comp;
358 struct evcnt intr_own;
359 struct evcnt intr_acctdma_cont;
360
361 /* session counter */
362 struct evcnt session_new;
363 struct evcnt session_free;
364
365 /* packet counter */
366 struct evcnt packet_ok;
367 struct evcnt packet_err;
368
369 /* queue */
370 struct evcnt dispatch_packets;
371 struct evcnt dispatch_queue;
372 struct evcnt queue_full;
373 struct evcnt max_dispatch;
374 struct evcnt max_done;
375 };
376 #ifdef MVXPSEC_EVENT_COUNTERS
377 #define MVXPSEC_EVCNT_INCR(sc, name) do { \
378 (sc)->sc_ev.name.ev_count++; \
379 } while (/*CONSTCOND*/0)
380 #define MVXPSEC_EVCNT_ADD(sc, name, val) do { \
381 (sc)->sc_ev.name.ev_count += (val); \
382 } while (/*CONSTCOND*/0)
383 #define MVXPSEC_EVCNT_MAX(sc, name, val) do { \
384 if ((val) > (sc)->sc_ev.name.ev_count) \
385 (sc)->sc_ev.name.ev_count = (val); \
386 } while (/*CONSTCOND*/0)
387 #else
388 #define MVXPSEC_EVCNT_INCR(sc, name) /* nothing */
389 #define MVXPSEC_EVCNT_ADD(sc, name, val) /* nothing */
390 #define MVXPSEC_EVCNT_MAX(sc, name, val) /* nothing */
391 #endif
392
393 struct mvxpsec_softc {
394 device_t sc_dev;
395 uint32_t sc_cid;
396 bus_space_tag_t sc_iot;
397 bus_space_handle_t sc_ioh;
398 bus_dma_tag_t sc_dmat;
399
400 /* Memory Pools */
401 struct mvxpsec_devmem *sc_devmem_desc;
402 struct mvxpsec_devmem *sc_devmem_mmap;
403 pool_cache_t sc_session_pool;
404 pool_cache_t sc_packet_pool;
405
406 /* Event Counters */
407 #ifdef MVXPSEC_EVENT_COUNTERS
408 struct mvxpsec_evcnt sc_ev;
409 #endif
410
411 /* SRAM mappings */
412 paddr_t sc_sram_pa;
413 void * sc_sram_va;
414
415 /* Interrupts and Timers */
416 callout_t sc_timeout;
417 void * sc_done_ih;
418 void * sc_error_ih;
419
420 /* DMA Descriptors */
421 kmutex_t sc_dma_mtx;
422 struct mvxpsec_descriptor_handle *sc_desc_ring;
423 int sc_desc_ring_size;
424 int sc_desc_ring_prod;
425 int sc_desc_ring_cons;
426
427 /* Session */
428 kmutex_t sc_session_mtx;
429 struct mvxpsec_session *sc_sessions[MVXPSEC_MAX_SESSIONS];
430 int sc_nsessions;
431 struct mvxpsec_session *sc_last_session;
432
433 /* Packet queue */
434 kmutex_t sc_queue_mtx;
435 mvxpsec_queue_t sc_wait_queue;
436 int sc_wait_qlen;
437 int sc_wait_qlimit;
438 mvxpsec_queue_t sc_run_queue;
439 mvxpsec_list_t sc_free_list;
440 int sc_free_qlen;
441 uint32_t sc_flags;
442
443 /* Debug */
444 int sc_craft_conf;
445 int sc_craft_p0;
446 };
447 /* SRAM parameters accessor */
448 #define MVXPSEC_SRAM_BASE(sc) ((sc)->sc_sram_pa)
449 #define MVXPSEC_SRAM_SIZE(sc) (sizeof(struct mvxpsec_crypt_sram))
450 #define MVXPSEC_SRAM_PA(sc, offset) \
451 (MVXPSEC_SRAM_BASE(sc) + (offset))
452 #define MVXPSEC_SRAM_LIMIT(sc) \
453 (MVXPSEC_SRAM_BASE(sc) + MVXPSEC_SRAM_SIZE(sc))
454 #define MVXPSEC_SRAM_PKT_HDR_PA(sc) \
455 MVXPSEC_SRAM_PA((sc), MVXPSEC_SRAM_PKT_HDR_OFF)
456 #define MVXPSEC_SRAM_DESC_PA(sc) \
457 MVXPSEC_SRAM_PA((sc), MVXPSEC_SRAM_DESC_OFF)
458 #define MVXPSEC_SRAM_IV_WORK_PA(sc) \
459 MVXPSEC_SRAM_PA((sc), MVXPSEC_SRAM_IV_WORK_OFF)
460 #define MVXPSEC_SRAM_SESS_HDR_PA(sc) \
461 MVXPSEC_SRAM_PA((sc), MVXPSEC_SRAM_SESS_HDR_OFF)
462 #define MVXPSEC_SRAM_KEY_PA(sc) \
463 MVXPSEC_SRAM_PA((sc), MVXPSEC_SRAM_KEY_OFF)
464 #define MVXPSEC_SRAM_KEY_D_PA(sc) \
465 MVXPSEC_SRAM_PA((sc), MVXPSEC_SRAM_KEY_D_OFF)
466 #define MVXPSEC_SRAM_MIV_IN_PA(sc) \
467 MVXPSEC_SRAM_PA((sc), MVXPSEC_SRAM_MIV_IN_OFF)
468 #define MVXPSEC_SRAM_MIV_OUT_PA(sc) \
469 MVXPSEC_SRAM_PA((sc), MVXPSEC_SRAM_MIV_OUT_OFF)
470 #define MVXPSEC_SRAM_PAYLOAD_PA(sc, offset) \
471 MVXPSEC_SRAM_PA((sc), MVXPSEC_SRAM_PAYLOAD_OFF + (offset))
472
473 /*
474 * OpenCrypto API
475 */
476 extern int mvxpsec_register(struct mvxpsec_softc *);
477 extern int mvxpsec_newsession(void *, uint32_t *, struct cryptoini *);
478 extern int mvxpsec_freesession(void *, uint64_t);
479 extern int mvxpsec_dispatch(void *, struct cryptop *, int);
480 extern void mvxpsec_done(void *);
481
482 /* debug flags */
483 #define MVXPSEC_DEBUG_DMA __BIT(0)
484 #define MVXPSEC_DEBUG_IOCTL __BIT(1)
485 #define MVXPSEC_DEBUG_INTR __BIT(2)
486 #define MVXPSEC_DEBUG_SRAM __BIT(3)
487 #define MVXPSEC_DEBUG_OPENCRYPTO __BIT(4)
488 #define MVXPSEC_DEBUG_PAYLOAD __BIT(5)
489 #define MVXPSEC_DEBUG_HASH_IV __BIT(6)
490 #define MVXPSEC_DEBUG_HASH_VAL __BIT(7)
491 #define MVXPSEC_DEBUG_DESC __BIT(8) /* descriptors and registers */
492 #define MVXPSEC_DEBUG_INPUT __BIT(9)
493 #define MVXPSEC_DEBUG_ENC_IV __BIT(10)
494 #define MVXPSEC_DEBUG_QUEUE __BIT(11)
495
496 #define MVXPSEC_DEBUG_ALL __BITS(11,0)
497
498 #ifdef MVXPSEC_DEBUG
499 #define MVXPSEC_PRINTF(level, fmt, ...) \
500 do { \
501 if (mvxpsec_debug & level) { \
502 printf("%s: ", __func__); \
503 printf((fmt), ##__VA_ARGS__); \
504 } \
505 } while (/*CONSTCOND*/0)
506 #else
507 #define MVXPSEC_PRINTF(level, fmt, ...) /* nothing */
508 #endif
509
510
511 #endif /* __MVXPSECVAR_H__ */
512