qatvar.h revision 1.1 1 1.1 hikaru /* $NetBSD: qatvar.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
2 1.1 hikaru
3 1.1 hikaru /*
4 1.1 hikaru * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 1.1 hikaru * All rights reserved.
6 1.1 hikaru *
7 1.1 hikaru * Redistribution and use in source and binary forms, with or without
8 1.1 hikaru * modification, are permitted provided that the following conditions
9 1.1 hikaru * are met:
10 1.1 hikaru * 1. Redistributions of source code must retain the above copyright
11 1.1 hikaru * notice, this list of conditions and the following disclaimer.
12 1.1 hikaru * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 hikaru * notice, this list of conditions and the following disclaimer in the
14 1.1 hikaru * documentation and/or other materials provided with the distribution.
15 1.1 hikaru *
16 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 1.1 hikaru * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.1 hikaru * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.1 hikaru * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.1 hikaru * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.1 hikaru * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.1 hikaru * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.1 hikaru * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.1 hikaru * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.1 hikaru * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.1 hikaru * POSSIBILITY OF SUCH DAMAGE.
27 1.1 hikaru */
28 1.1 hikaru
29 1.1 hikaru /*
30 1.1 hikaru * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
31 1.1 hikaru *
32 1.1 hikaru * Redistribution and use in source and binary forms, with or without
33 1.1 hikaru * modification, are permitted provided that the following conditions
34 1.1 hikaru * are met:
35 1.1 hikaru *
36 1.1 hikaru * * Redistributions of source code must retain the above copyright
37 1.1 hikaru * notice, this list of conditions and the following disclaimer.
38 1.1 hikaru * * Redistributions in binary form must reproduce the above copyright
39 1.1 hikaru * notice, this list of conditions and the following disclaimer in
40 1.1 hikaru * the documentation and/or other materials provided with the
41 1.1 hikaru * distribution.
42 1.1 hikaru * * Neither the name of Intel Corporation nor the names of its
43 1.1 hikaru * contributors may be used to endorse or promote products derived
44 1.1 hikaru * from this software without specific prior written permission.
45 1.1 hikaru *
46 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
47 1.1 hikaru * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
48 1.1 hikaru * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
49 1.1 hikaru * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
50 1.1 hikaru * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51 1.1 hikaru * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
52 1.1 hikaru * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 1.1 hikaru * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 1.1 hikaru * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 1.1 hikaru * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
56 1.1 hikaru * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 1.1 hikaru */
58 1.1 hikaru
59 1.1 hikaru #ifndef _DEV_PCI_QATVAR_H_
60 1.1 hikaru #define _DEV_PCI_QATVAR_H_
61 1.1 hikaru
62 1.1 hikaru #include <sys/malloc.h> /* for cryptodev.h */
63 1.1 hikaru #include <opencrypto/cryptodev.h>
64 1.1 hikaru
65 1.1 hikaru #define QAT_NSYMREQ 256
66 1.1 hikaru #define QAT_NSYMCOOKIE ((QAT_NSYMREQ * 2 + 1) * 2) /* XXX why? */
67 1.1 hikaru #define QAT_NASYMREQ 64
68 1.1 hikaru #define QAT_BATCH_SUBMIT_FREE_SPACE 2
69 1.1 hikaru #define QAT_NSESSION 16384
70 1.1 hikaru
71 1.1 hikaru #define QAT_EV_NAME_SIZE 32
72 1.1 hikaru #define QAT_RING_NAME_SIZE 32
73 1.1 hikaru
74 1.1 hikaru #define QAT_MAXSEG 32 /* max segments for sg dma */
75 1.1 hikaru #define QAT_MAXLEN 65535 /* IP_MAXPACKET */
76 1.1 hikaru
77 1.1 hikaru #define QAT_HB_INTERVAL 500 /* heartbeat msec */
78 1.1 hikaru #define QAT_SSM_WDT 100
79 1.1 hikaru
80 1.1 hikaru #if !defined(SET)
81 1.1 hikaru #define SET(t, f) ((t) |= (f))
82 1.1 hikaru #define ISSET(t, f) ((t) & (f))
83 1.1 hikaru #define CLR(t, f) ((t) &= ~(f))
84 1.1 hikaru #endif
85 1.1 hikaru
86 1.1 hikaru #define QAT_EVENT_COUNTERS
87 1.1 hikaru
88 1.1 hikaru #ifdef QAT_EVENT_COUNTERS
89 1.1 hikaru #define QAT_EVCNT_ATTACH(sc, ev, type, name, fmt, args...) \
90 1.1 hikaru do { \
91 1.1 hikaru snprintf((name), sizeof((name)), fmt, ##args); \
92 1.1 hikaru evcnt_attach_dynamic((ev), (type), NULL, \
93 1.1 hikaru device_xname((sc)->sc_dev), (name)); \
94 1.1 hikaru } while (0)
95 1.1 hikaru #define QAT_EVCNT_INCR(ev) (ev)->ev_count++
96 1.1 hikaru #else
97 1.1 hikaru #define QAT_EVCNT_ATTACH(sc, ev, type, name, fmt, args...) /* nothing */
98 1.1 hikaru #define QAT_EVCNT_INCR(ev) /* nothing */
99 1.1 hikaru #endif
100 1.1 hikaru
101 1.1 hikaru enum qat_chip_type {
102 1.1 hikaru QAT_CHIP_C2XXX = 0, /* NanoQAT: Atom C2000 */
103 1.1 hikaru QAT_CHIP_C2XXX_IOV,
104 1.1 hikaru QAT_CHIP_C3XXX, /* Atom C3000 */
105 1.1 hikaru QAT_CHIP_C3XXX_IOV,
106 1.1 hikaru QAT_CHIP_C62X,
107 1.1 hikaru QAT_CHIP_C62X_IOV,
108 1.1 hikaru QAT_CHIP_D15XX,
109 1.1 hikaru QAT_CHIP_D15XX_IOV,
110 1.1 hikaru };
111 1.1 hikaru
112 1.1 hikaru enum qat_sku {
113 1.1 hikaru QAT_SKU_UNKNOWN = 0,
114 1.1 hikaru QAT_SKU_1,
115 1.1 hikaru QAT_SKU_2,
116 1.1 hikaru QAT_SKU_3,
117 1.1 hikaru QAT_SKU_4,
118 1.1 hikaru QAT_SKU_VF,
119 1.1 hikaru };
120 1.1 hikaru
121 1.1 hikaru enum qat_ae_status {
122 1.1 hikaru QAT_AE_ENABLED = 1,
123 1.1 hikaru QAT_AE_ACTIVE,
124 1.1 hikaru QAT_AE_DISABLED
125 1.1 hikaru };
126 1.1 hikaru
127 1.1 hikaru #define TIMEOUT_AE_RESET 100
128 1.1 hikaru #define TIMEOUT_AE_CHECK 10000
129 1.1 hikaru #define TIMEOUT_AE_CSR 500
130 1.1 hikaru #define AE_EXEC_CYCLE 20
131 1.1 hikaru
132 1.1 hikaru #define QAT_UOF_MAX_PAGE 1
133 1.1 hikaru #define QAT_UOF_MAX_PAGE_REGION 1
134 1.1 hikaru
135 1.1 hikaru struct qat_dmamem {
136 1.1 hikaru bus_dmamap_t qdm_dma_map;
137 1.1 hikaru bus_size_t qdm_dma_size;
138 1.1 hikaru bus_dma_segment_t qdm_dma_seg;
139 1.1 hikaru void *qdm_dma_vaddr;
140 1.1 hikaru };
141 1.1 hikaru
142 1.1 hikaru /* Valid internal ring size values */
143 1.1 hikaru #define QAT_RING_SIZE_128 0x01
144 1.1 hikaru #define QAT_RING_SIZE_256 0x02
145 1.1 hikaru #define QAT_RING_SIZE_512 0x03
146 1.1 hikaru #define QAT_RING_SIZE_4K 0x06
147 1.1 hikaru #define QAT_RING_SIZE_16K 0x08
148 1.1 hikaru #define QAT_RING_SIZE_4M 0x10
149 1.1 hikaru #define QAT_MIN_RING_SIZE QAT_RING_SIZE_128
150 1.1 hikaru #define QAT_MAX_RING_SIZE QAT_RING_SIZE_4M
151 1.1 hikaru #define QAT_DEFAULT_RING_SIZE QAT_RING_SIZE_16K
152 1.1 hikaru
153 1.1 hikaru /* Valid internal msg size values */
154 1.1 hikaru #define QAT_MSG_SIZE_32 0x01
155 1.1 hikaru #define QAT_MSG_SIZE_64 0x02
156 1.1 hikaru #define QAT_MSG_SIZE_128 0x04
157 1.1 hikaru #define QAT_MIN_MSG_SIZE QAT_MSG_SIZE_32
158 1.1 hikaru #define QAT_MAX_MSG_SIZE QAT_MSG_SIZE_128
159 1.1 hikaru
160 1.1 hikaru /* Size to bytes conversion macros for ring and msg size values */
161 1.1 hikaru #define QAT_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
162 1.1 hikaru #define QAT_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
163 1.1 hikaru #define QAT_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
164 1.1 hikaru #define QAT_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
165 1.1 hikaru
166 1.1 hikaru /* Minimum ring bufer size for memory allocation */
167 1.1 hikaru #define QAT_RING_SIZE_BYTES_MIN(SIZE) \
168 1.1 hikaru ((SIZE < QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K)) ? \
169 1.1 hikaru QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K) : SIZE)
170 1.1 hikaru #define QAT_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
171 1.1 hikaru #define QAT_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
172 1.1 hikaru SIZE) & ~0x4)
173 1.1 hikaru /* Max outstanding requests */
174 1.1 hikaru #define QAT_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
175 1.1 hikaru ((((1 << (RING_SIZE - 1)) << 3) >> QAT_SIZE_TO_POW(MSG_SIZE)) - 1)
176 1.1 hikaru
177 1.1 hikaru #define QAT_RING_PATTERN 0x7f
178 1.1 hikaru
179 1.1 hikaru struct qat_softc;
180 1.1 hikaru
181 1.1 hikaru typedef int (*qat_cb_t)(struct qat_softc *, void *, void *);
182 1.1 hikaru
183 1.1 hikaru struct qat_ring {
184 1.1 hikaru struct qat_dmamem qr_dma;
185 1.1 hikaru bus_addr_t qr_ring_paddr;
186 1.1 hikaru void *qr_ring_vaddr;
187 1.1 hikaru uint32_t * volatile qr_inflight; /* tx/rx shared */
188 1.1 hikaru uint32_t qr_head;
189 1.1 hikaru uint32_t qr_tail;
190 1.1 hikaru uint8_t qr_msg_size;
191 1.1 hikaru uint8_t qr_ring_size;
192 1.1 hikaru uint32_t qr_ring; /* ring number in bank */
193 1.1 hikaru uint32_t qr_bank; /* bank number in device */
194 1.1 hikaru uint32_t qr_ring_id;
195 1.1 hikaru uint32_t qr_ring_mask;
196 1.1 hikaru qat_cb_t qr_cb;
197 1.1 hikaru void *qr_cb_arg;
198 1.1 hikaru
199 1.1 hikaru const char *qr_name;
200 1.1 hikaru kmutex_t qr_ring_mtx; /* Lock per ring */
201 1.1 hikaru
202 1.1 hikaru #ifdef QAT_EVENT_COUNTERS
203 1.1 hikaru char qr_ev_rxintr_name[QAT_EV_NAME_SIZE];
204 1.1 hikaru struct evcnt qr_ev_rxintr;
205 1.1 hikaru char qr_ev_rxmsg_name[QAT_EV_NAME_SIZE];
206 1.1 hikaru struct evcnt qr_ev_rxmsg;
207 1.1 hikaru char qr_ev_txmsg_name[QAT_EV_NAME_SIZE];
208 1.1 hikaru struct evcnt qr_ev_txmsg;
209 1.1 hikaru char qr_ev_txfull_name[QAT_EV_NAME_SIZE];
210 1.1 hikaru struct evcnt qr_ev_txfull;
211 1.1 hikaru #endif
212 1.1 hikaru };
213 1.1 hikaru
214 1.1 hikaru struct qat_bank {
215 1.1 hikaru struct qat_softc *qb_sc; /* back pointer to softc */
216 1.1 hikaru uint32_t qb_intr_mask; /* current interrupt mask */
217 1.1 hikaru uint32_t qb_allocated_rings; /* current allocated ring bitfiled */
218 1.1 hikaru uint32_t qb_coalescing_time; /* timer in nano sec, 0: disabled */
219 1.1 hikaru #define COALESCING_TIME_INTERVAL_DEFAULT 10000
220 1.1 hikaru #define COALESCING_TIME_INTERVAL_MIN 500
221 1.1 hikaru #define COALESCING_TIME_INTERVAL_MAX 0xfffff
222 1.1 hikaru uint32_t qb_bank; /* bank index */
223 1.1 hikaru kmutex_t qb_bank_mtx;
224 1.1 hikaru void *qb_ih_cookie;
225 1.1 hikaru
226 1.1 hikaru #ifdef QAT_EVENT_COUNTERS
227 1.1 hikaru char qb_ev_rxintr_name[QAT_EV_NAME_SIZE];
228 1.1 hikaru struct evcnt qb_ev_rxintr;
229 1.1 hikaru #endif
230 1.1 hikaru
231 1.1 hikaru struct qat_ring qb_et_rings[MAX_RING_PER_BANK];
232 1.1 hikaru
233 1.1 hikaru };
234 1.1 hikaru
235 1.1 hikaru struct qat_ap_bank {
236 1.1 hikaru uint32_t qab_nf_mask;
237 1.1 hikaru uint32_t qab_nf_dest;
238 1.1 hikaru uint32_t qab_ne_mask;
239 1.1 hikaru uint32_t qab_ne_dest;
240 1.1 hikaru };
241 1.1 hikaru
242 1.1 hikaru struct qat_ae_page {
243 1.1 hikaru struct qat_ae_page *qap_next;
244 1.1 hikaru struct qat_uof_page *qap_page;
245 1.1 hikaru struct qat_ae_region *qap_region;
246 1.1 hikaru u_int qap_flags;
247 1.1 hikaru };
248 1.1 hikaru
249 1.1 hikaru #define QAT_AE_PAGA_FLAG_WAITING (1 << 0)
250 1.1 hikaru
251 1.1 hikaru struct qat_ae_region {
252 1.1 hikaru struct qat_ae_page *qar_loaded_page;
253 1.1 hikaru SIMPLEQ_HEAD(, qat_ae_page) qar_waiting_pages;
254 1.1 hikaru };
255 1.1 hikaru
256 1.1 hikaru struct qat_ae_slice {
257 1.1 hikaru u_int qas_assigned_ctx_mask;
258 1.1 hikaru struct qat_ae_region qas_regions[QAT_UOF_MAX_PAGE_REGION];
259 1.1 hikaru struct qat_ae_page qas_pages[QAT_UOF_MAX_PAGE];
260 1.1 hikaru struct qat_ae_page *qas_cur_pages[MAX_AE_CTX];
261 1.1 hikaru struct qat_uof_image *qas_image;
262 1.1 hikaru };
263 1.1 hikaru
264 1.1 hikaru #define QAT_AE(sc, ae) \
265 1.1 hikaru ((sc)->sc_ae[ae])
266 1.1 hikaru
267 1.1 hikaru struct qat_ae {
268 1.1 hikaru u_int qae_state; /* AE state */
269 1.1 hikaru u_int qae_ustore_size; /* free micro-store address */
270 1.1 hikaru u_int qae_free_addr; /* free micro-store address */
271 1.1 hikaru u_int qae_free_size; /* free micro-store size */
272 1.1 hikaru u_int qae_live_ctx_mask; /* live context mask */
273 1.1 hikaru u_int qae_ustore_dram_addr; /* mirco-store DRAM address */
274 1.1 hikaru u_int qae_reload_size; /* reloadable code size */
275 1.1 hikaru
276 1.1 hikaru /* aefw */
277 1.1 hikaru u_int qae_num_slices;
278 1.1 hikaru struct qat_ae_slice qae_slices[MAX_AE_CTX];
279 1.1 hikaru u_int qae_reloc_ustore_dram; /* reloadable ustore-dram address */
280 1.1 hikaru u_int qae_effect_ustore_size; /* effective AE ustore size */
281 1.1 hikaru u_int qae_shareable_ustore;
282 1.1 hikaru };
283 1.1 hikaru
284 1.1 hikaru struct qat_mof {
285 1.1 hikaru void *qmf_sym; /* SYM_OBJS in sc_fw_mof */
286 1.1 hikaru size_t qmf_sym_size;
287 1.1 hikaru void *qmf_uof_objs; /* UOF_OBJS in sc_fw_mof */
288 1.1 hikaru size_t qmf_uof_objs_size;
289 1.1 hikaru void *qmf_suof_objs; /* SUOF_OBJS in sc_fw_mof */
290 1.1 hikaru size_t qmf_suof_objs_size;
291 1.1 hikaru };
292 1.1 hikaru
293 1.1 hikaru struct qat_ae_batch_init {
294 1.1 hikaru u_int qabi_ae;
295 1.1 hikaru u_int qabi_addr;
296 1.1 hikaru u_int *qabi_value;
297 1.1 hikaru u_int qabi_size;
298 1.1 hikaru SIMPLEQ_ENTRY(qat_ae_batch_init) qabi_next;
299 1.1 hikaru };
300 1.1 hikaru
301 1.1 hikaru SIMPLEQ_HEAD(qat_ae_batch_init_list, qat_ae_batch_init);
302 1.1 hikaru
303 1.1 hikaru /* overwritten struct uof_uword_block */
304 1.1 hikaru struct qat_uof_uword_block {
305 1.1 hikaru u_int quub_start_addr; /* start address */
306 1.1 hikaru u_int quub_num_words; /* number of microwords */
307 1.1 hikaru uint64_t quub_micro_words; /* pointer to the uwords */
308 1.1 hikaru };
309 1.1 hikaru
310 1.1 hikaru struct qat_uof_page {
311 1.1 hikaru u_int qup_page_num; /* page number */
312 1.1 hikaru u_int qup_def_page; /* default page */
313 1.1 hikaru u_int qup_page_region; /* region of page */
314 1.1 hikaru u_int qup_beg_vaddr; /* begin virtual address */
315 1.1 hikaru u_int qup_beg_paddr; /* begin physical address */
316 1.1 hikaru
317 1.1 hikaru u_int qup_num_uc_var; /* num of uC var in array */
318 1.1 hikaru struct uof_uword_fixup *qup_uc_var;
319 1.1 hikaru /* array of import variables */
320 1.1 hikaru u_int qup_num_imp_var; /* num of import var in array */
321 1.1 hikaru struct uof_import_var *qup_imp_var;
322 1.1 hikaru /* array of import variables */
323 1.1 hikaru u_int qup_num_imp_expr; /* num of import expr in array */
324 1.1 hikaru struct uof_uword_fixup *qup_imp_expr;
325 1.1 hikaru /* array of import expressions */
326 1.1 hikaru u_int qup_num_neigh_reg; /* num of neigh-reg in array */
327 1.1 hikaru struct uof_uword_fixup *qup_neigh_reg;
328 1.1 hikaru /* array of neigh-reg assignments */
329 1.1 hikaru u_int qup_num_micro_words; /* number of microwords in the seg */
330 1.1 hikaru
331 1.1 hikaru u_int qup_num_uw_blocks; /* number of uword blocks */
332 1.1 hikaru struct qat_uof_uword_block *qup_uw_blocks;
333 1.1 hikaru /* array of uword blocks */
334 1.1 hikaru };
335 1.1 hikaru
336 1.1 hikaru struct qat_uof_image {
337 1.1 hikaru struct uof_image *qui_image; /* image pointer */
338 1.1 hikaru struct qat_uof_page qui_pages[QAT_UOF_MAX_PAGE];
339 1.1 hikaru /* array of pages */
340 1.1 hikaru
341 1.1 hikaru u_int qui_num_ae_reg; /* num of registers */
342 1.1 hikaru struct uof_ae_reg *qui_ae_reg; /* array of registers */
343 1.1 hikaru
344 1.1 hikaru u_int qui_num_init_reg_sym; /* num of reg/sym init values */
345 1.1 hikaru struct uof_init_reg_sym *qui_init_reg_sym;
346 1.1 hikaru /* array of reg/sym init values */
347 1.1 hikaru
348 1.1 hikaru u_int qui_num_sbreak; /* num of sbreak values */
349 1.1 hikaru struct qui_sbreak *qui_sbreak; /* array of sbreak values */
350 1.1 hikaru
351 1.1 hikaru u_int qui_num_uwords_used;
352 1.1 hikaru /* highest uword addressreferenced + 1 */
353 1.1 hikaru };
354 1.1 hikaru
355 1.1 hikaru struct qat_aefw_uof {
356 1.1 hikaru size_t qafu_size; /* uof size */
357 1.1 hikaru struct uof_obj_hdr *qafu_obj_hdr; /* UOF_OBJS */
358 1.1 hikaru
359 1.1 hikaru void *qafu_str_tab;
360 1.1 hikaru size_t qafu_str_tab_size;
361 1.1 hikaru
362 1.1 hikaru u_int qafu_num_init_mem;
363 1.1 hikaru struct uof_init_mem *qafu_init_mem;
364 1.1 hikaru size_t qafu_init_mem_size;
365 1.1 hikaru
366 1.1 hikaru struct uof_var_mem_seg *qafu_var_mem_seg;
367 1.1 hikaru
368 1.1 hikaru struct qat_ae_batch_init_list qafu_lm_init[MAX_AE];
369 1.1 hikaru size_t qafu_num_lm_init[MAX_AE];
370 1.1 hikaru size_t qafu_num_lm_init_inst[MAX_AE];
371 1.1 hikaru
372 1.1 hikaru u_int qafu_num_imgs; /* number of uof image */
373 1.1 hikaru struct qat_uof_image qafu_imgs[MAX_NUM_AE * MAX_AE_CTX];
374 1.1 hikaru /* uof images */
375 1.1 hikaru };
376 1.1 hikaru
377 1.1 hikaru #define QAT_SERVICE_CRYPTO_A (1 << 0)
378 1.1 hikaru #define QAT_SERVICE_CRYPTO_B (1 << 1)
379 1.1 hikaru
380 1.1 hikaru struct qat_admin_rings {
381 1.1 hikaru uint32_t qadr_active_aes_per_accel;
382 1.1 hikaru uint8_t qadr_srv_mask[MAX_AE_PER_ACCEL];
383 1.1 hikaru
384 1.1 hikaru struct qat_dmamem qadr_dma;
385 1.1 hikaru struct fw_init_ring_table *qadr_master_ring_tbl;
386 1.1 hikaru struct fw_init_ring_table *qadr_cya_ring_tbl;
387 1.1 hikaru struct fw_init_ring_table *qadr_cyb_ring_tbl;
388 1.1 hikaru
389 1.1 hikaru struct qat_ring *qadr_admin_tx;
390 1.1 hikaru struct qat_ring *qadr_admin_rx;
391 1.1 hikaru };
392 1.1 hikaru
393 1.1 hikaru struct qat_accel_init_cb {
394 1.1 hikaru int qaic_status;
395 1.1 hikaru };
396 1.1 hikaru
397 1.1 hikaru struct qat_admin_comms {
398 1.1 hikaru struct qat_dmamem qadc_dma;
399 1.1 hikaru struct qat_dmamem qadc_const_tbl_dma;
400 1.1 hikaru struct qat_dmamem qadc_hb_dma;
401 1.1 hikaru };
402 1.1 hikaru
403 1.1 hikaru #define QAT_PID_MINOR_REV 0xf
404 1.1 hikaru #define QAT_PID_MAJOR_REV (0xf << 4)
405 1.1 hikaru
406 1.1 hikaru struct qat_suof_image {
407 1.1 hikaru char *qsi_simg_buf;
408 1.1 hikaru u_long qsi_simg_len;
409 1.1 hikaru char *qsi_css_header;
410 1.1 hikaru char *qsi_css_key;
411 1.1 hikaru char *qsi_css_signature;
412 1.1 hikaru char *qsi_css_simg;
413 1.1 hikaru u_long qsi_simg_size;
414 1.1 hikaru u_int qsi_ae_num;
415 1.1 hikaru u_int qsi_ae_mask;
416 1.1 hikaru u_int qsi_fw_type;
417 1.1 hikaru u_long qsi_simg_name;
418 1.1 hikaru u_long qsi_appmeta_data;
419 1.1 hikaru struct qat_dmamem qsi_dma;
420 1.1 hikaru };
421 1.1 hikaru
422 1.1 hikaru struct qat_aefw_suof {
423 1.1 hikaru u_int qafs_file_id;
424 1.1 hikaru u_int qafs_check_sum;
425 1.1 hikaru char qafs_min_ver;
426 1.1 hikaru char qafs_maj_ver;
427 1.1 hikaru char qafs_fw_type;
428 1.1 hikaru char *qafs_suof_buf;
429 1.1 hikaru u_int qafs_suof_size;
430 1.1 hikaru char *qafs_sym_str;
431 1.1 hikaru u_int qafs_sym_size;
432 1.1 hikaru u_int qafs_num_simgs;
433 1.1 hikaru struct qat_suof_image *qafs_simg;
434 1.1 hikaru };
435 1.1 hikaru
436 1.1 hikaru enum qat_sym_hash_algorithm {
437 1.1 hikaru QAT_SYM_HASH_NONE = 0,
438 1.1 hikaru QAT_SYM_HASH_MD5,
439 1.1 hikaru QAT_SYM_HASH_SHA1,
440 1.1 hikaru QAT_SYM_HASH_SHA224,
441 1.1 hikaru QAT_SYM_HASH_SHA256,
442 1.1 hikaru QAT_SYM_HASH_SHA384,
443 1.1 hikaru QAT_SYM_HASH_SHA512,
444 1.1 hikaru QAT_SYM_HASH_AES_XCBC,
445 1.1 hikaru QAT_SYM_HASH_AES_CCM,
446 1.1 hikaru QAT_SYM_HASH_AES_GCM,
447 1.1 hikaru QAT_SYM_HASH_KASUMI_F9,
448 1.1 hikaru QAT_SYM_HASH_SNOW3G_UIA2,
449 1.1 hikaru QAT_SYM_HASH_AES_CMAC,
450 1.1 hikaru QAT_SYM_HASH_AES_GMAC,
451 1.1 hikaru QAT_SYM_HASH_AES_CBC_MAC
452 1.1 hikaru };
453 1.1 hikaru
454 1.1 hikaru #define QAT_HASH_MD5_BLOCK_SIZE 64
455 1.1 hikaru #define QAT_HASH_MD5_DIGEST_SIZE 16
456 1.1 hikaru #define QAT_HASH_MD5_STATE_SIZE 16
457 1.1 hikaru #define QAT_HASH_SHA1_BLOCK_SIZE 64
458 1.1 hikaru #define QAT_HASH_SHA1_DIGEST_SIZE 20
459 1.1 hikaru #define QAT_HASH_SHA1_STATE_SIZE 20
460 1.1 hikaru #define QAT_HASH_SHA224_BLOCK_SIZE 64
461 1.1 hikaru #define QAT_HASH_SHA224_DIGEST_SIZE 28
462 1.1 hikaru #define QAT_HASH_SHA224_STATE_SIZE 32
463 1.1 hikaru #define QAT_HASH_SHA256_BLOCK_SIZE 64
464 1.1 hikaru #define QAT_HASH_SHA256_DIGEST_SIZE 32
465 1.1 hikaru #define QAT_HASH_SHA256_STATE_SIZE 32
466 1.1 hikaru #define QAT_HASH_SHA384_BLOCK_SIZE 128
467 1.1 hikaru #define QAT_HASH_SHA384_DIGEST_SIZE 48
468 1.1 hikaru #define QAT_HASH_SHA384_STATE_SIZE 64
469 1.1 hikaru #define QAT_HASH_SHA512_BLOCK_SIZE 128
470 1.1 hikaru #define QAT_HASH_SHA512_DIGEST_SIZE 64
471 1.1 hikaru #define QAT_HASH_SHA512_STATE_SIZE 64
472 1.1 hikaru #define QAT_HASH_XCBC_PRECOMP_KEY_NUM 3
473 1.1 hikaru #define QAT_HASH_XCBC_MAC_BLOCK_SIZE 16
474 1.1 hikaru #define QAT_HASH_XCBC_MAC_128_DIGEST_SIZE 16
475 1.1 hikaru #define QAT_HASH_CMAC_BLOCK_SIZE 16
476 1.1 hikaru #define QAT_HASH_CMAC_128_DIGEST_SIZE 16
477 1.1 hikaru #define QAT_HASH_AES_CCM_BLOCK_SIZE 16
478 1.1 hikaru #define QAT_HASH_AES_CCM_DIGEST_SIZE 16
479 1.1 hikaru #define QAT_HASH_AES_GCM_BLOCK_SIZE 16
480 1.1 hikaru #define QAT_HASH_AES_GCM_DIGEST_SIZE 16
481 1.1 hikaru #define QAT_HASH_KASUMI_F9_BLOCK_SIZE 8
482 1.1 hikaru #define QAT_HASH_KASUMI_F9_DIGEST_SIZE 4
483 1.1 hikaru #define QAT_HASH_SNOW3G_UIA2_BLOCK_SIZE 8
484 1.1 hikaru #define QAT_HASH_SNOW3G_UIA2_DIGEST_SIZE 4
485 1.1 hikaru #define QAT_HASH_AES_CBC_MAC_BLOCK_SIZE 16
486 1.1 hikaru #define QAT_HASH_AES_CBC_MAC_DIGEST_SIZE 16
487 1.1 hikaru #define QAT_HASH_AES_GCM_ICV_SIZE_8 8
488 1.1 hikaru #define QAT_HASH_AES_GCM_ICV_SIZE_12 12
489 1.1 hikaru #define QAT_HASH_AES_GCM_ICV_SIZE_16 16
490 1.1 hikaru #define QAT_HASH_AES_CCM_ICV_SIZE_MIN 4
491 1.1 hikaru #define QAT_HASH_AES_CCM_ICV_SIZE_MAX 16
492 1.1 hikaru #define QAT_HASH_IPAD_BYTE 0x36
493 1.1 hikaru #define QAT_HASH_OPAD_BYTE 0x5c
494 1.1 hikaru #define QAT_HASH_IPAD_4_BYTES 0x36363636
495 1.1 hikaru #define QAT_HASH_OPAD_4_BYTES 0x5c5c5c5c
496 1.1 hikaru #define QAT_HASH_KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
497 1.1 hikaru
498 1.1 hikaru #define QAT_SYM_XCBC_STATE_SIZE ((QAT_HASH_XCBC_MAC_BLOCK_SIZE) * 3)
499 1.1 hikaru #define QAT_SYM_CMAC_STATE_SIZE ((QAT_HASH_CMAC_BLOCK_SIZE) * 3)
500 1.1 hikaru
501 1.1 hikaru struct qat_sym_hash_alg_info {
502 1.1 hikaru uint32_t qshai_digest_len; /* Digest length in bytes */
503 1.1 hikaru uint32_t qshai_block_len; /* Block length in bytes */
504 1.1 hikaru const uint8_t *qshai_init_state; /* Initialiser state for hash
505 1.1 hikaru * algorithm */
506 1.1 hikaru uint32_t qshai_state_size; /* size of above state in bytes */
507 1.1 hikaru
508 1.1 hikaru const struct swcr_auth_hash *qshai_sah; /* software auth hash */
509 1.1 hikaru uint32_t qshai_state_offset; /* offset to state in *_CTX */
510 1.1 hikaru uint32_t qshai_state_word;
511 1.1 hikaru };
512 1.1 hikaru
513 1.1 hikaru struct qat_sym_hash_qat_info {
514 1.1 hikaru uint32_t qshqi_algo_enc; /* QAT Algorithm encoding */
515 1.1 hikaru uint32_t qshqi_auth_counter; /* Counter value for Auth */
516 1.1 hikaru uint32_t qshqi_state1_len; /* QAT state1 length in bytes */
517 1.1 hikaru uint32_t qshqi_state2_len; /* QAT state2 length in bytes */
518 1.1 hikaru };
519 1.1 hikaru
520 1.1 hikaru struct qat_sym_hash_def {
521 1.1 hikaru const struct qat_sym_hash_alg_info *qshd_alg;
522 1.1 hikaru const struct qat_sym_hash_qat_info *qshd_qat;
523 1.1 hikaru };
524 1.1 hikaru
525 1.1 hikaru #define QAT_SYM_REQ_PARAMS_SIZE_MAX (24 + 32)
526 1.1 hikaru /* Reserve enough space for cipher and authentication request params */
527 1.1 hikaru /* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */
528 1.1 hikaru
529 1.1 hikaru #define QAT_SYM_REQ_PARAMS_SIZE_PADDED \
530 1.1 hikaru roundup(QAT_SYM_REQ_PARAMS_SIZE_MAX, QAT_OPTIMAL_ALIGN)
531 1.1 hikaru /* Pad out to 64-byte multiple to ensure optimal alignment of next field */
532 1.1 hikaru
533 1.1 hikaru #define QAT_SYM_KEY_TLS_PREFIX_SIZE (128)
534 1.1 hikaru /* Hash Prefix size in bytes for TLS (128 = MAX = SHA2 (384, 512)*/
535 1.1 hikaru
536 1.1 hikaru #define QAT_SYM_KEY_MAX_HASH_STATE_BUFFER \
537 1.1 hikaru (QAT_SYM_KEY_TLS_PREFIX_SIZE * 2)
538 1.1 hikaru /* hash state prefix buffer structure that holds the maximum sized secret */
539 1.1 hikaru
540 1.1 hikaru #define QAT_SYM_HASH_BUFFER_LEN QAT_HASH_SHA512_STATE_SIZE
541 1.1 hikaru /* Buffer length to hold 16 byte MD5 key and 20 byte SHA1 key */
542 1.1 hikaru
543 1.1 hikaru struct qat_sym_bulk_cookie {
544 1.1 hikaru uint8_t qsbc_req_params_buf[QAT_SYM_REQ_PARAMS_SIZE_PADDED];
545 1.1 hikaru /* memory block reserved for request params
546 1.1 hikaru * NOTE: Field must be correctly aligned in memory for access by QAT
547 1.1 hikaru * engine */
548 1.1 hikaru struct qat_crypto *qsbc_crypto;
549 1.1 hikaru struct qat_session *qsbc_session;
550 1.1 hikaru /* Session context */
551 1.1 hikaru void *qsbc_cb_tag;
552 1.1 hikaru /* correlator supplied by the client */
553 1.1 hikaru uint8_t qsbc_msg[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)];
554 1.1 hikaru /* QAT request message */
555 1.1 hikaru } __aligned(QAT_OPTIMAL_ALIGN);
556 1.1 hikaru
557 1.1 hikaru struct qat_sym_cookie {
558 1.1 hikaru union qat_sym_cookie_u {
559 1.1 hikaru /* should be 64byte aligned */
560 1.1 hikaru struct qat_sym_bulk_cookie qsc_bulk_cookie;
561 1.1 hikaru /* symmetric bulk cookie */
562 1.1 hikaru #ifdef notyet
563 1.1 hikaru struct qat_sym_key_cookie qsc_key_cookie;
564 1.1 hikaru /* symmetric key cookie */
565 1.1 hikaru struct qat_sym_nrbg_cookie qsc_nrbg_cookie;
566 1.1 hikaru /* symmetric NRBG cookie */
567 1.1 hikaru #endif
568 1.1 hikaru } u;
569 1.1 hikaru
570 1.1 hikaru /* should be 64-byte aligned */
571 1.1 hikaru struct buffer_list_desc qsc_buf_list;
572 1.1 hikaru struct flat_buffer_desc qsc_flat_bufs[QAT_MAXSEG]; /* should be here */
573 1.1 hikaru
574 1.1 hikaru bus_dmamap_t *qsc_self_dmamap; /* self DMA mapping and
575 1.1 hikaru end of DMA region */
576 1.1 hikaru
577 1.1 hikaru uint8_t qsc_iv_buf[EALG_MAX_BLOCK_LEN];
578 1.1 hikaru
579 1.1 hikaru bus_dmamap_t qsc_buf_dmamap; /* qsc_flat_bufs DMA mapping */
580 1.1 hikaru void *qsc_buf;
581 1.1 hikaru
582 1.1 hikaru bus_addr_t qsc_bulk_req_params_buf_paddr;
583 1.1 hikaru bus_addr_t qsc_buffer_list_desc_paddr;
584 1.1 hikaru bus_addr_t qsc_iv_buf_paddr;
585 1.1 hikaru
586 1.1 hikaru #ifdef notyet
587 1.1 hikaru uint64_t qsc_key_content_desc_paddr;
588 1.1 hikaru uint64_t qsc_key_hash_state_buf_paddr;
589 1.1 hikaru uint64_t qsc_key_ssl_key_in_paddr;
590 1.1 hikaru uint64_t qsc_key_tls_key_in_paddr;
591 1.1 hikaru #endif
592 1.1 hikaru };
593 1.1 hikaru
594 1.1 hikaru CTASSERT(offsetof(struct qat_sym_cookie,
595 1.1 hikaru u.qsc_bulk_cookie.qsbc_req_params_buf) % QAT_OPTIMAL_ALIGN == 0);
596 1.1 hikaru CTASSERT(offsetof(struct qat_sym_cookie, qsc_buf_list) % QAT_OPTIMAL_ALIGN == 0);
597 1.1 hikaru CTASSERT(sizeof(struct buffer_list_desc) == 16);
598 1.1 hikaru
599 1.1 hikaru #define MAX_CIPHER_SETUP_BLK_SZ \
600 1.1 hikaru (sizeof(struct hw_cipher_config) + \
601 1.1 hikaru 2 * HW_KASUMI_KEY_SZ + 2 * HW_KASUMI_BLK_SZ)
602 1.1 hikaru #define MAX_HASH_SETUP_BLK_SZ sizeof(union hw_auth_algo_blk)
603 1.1 hikaru
604 1.1 hikaru /* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */
605 1.1 hikaru #define HASH_CONTENT_DESC_SIZE 176
606 1.1 hikaru #define CIPHER_CONTENT_DESC_SIZE 64
607 1.1 hikaru
608 1.1 hikaru #define CONTENT_DESC_MAX_SIZE roundup( \
609 1.1 hikaru HASH_CONTENT_DESC_SIZE + CIPHER_CONTENT_DESC_SIZE, \
610 1.1 hikaru QAT_OPTIMAL_ALIGN)
611 1.1 hikaru
612 1.1 hikaru #define QAT_MAX_AAD_SIZE_BYTES 256
613 1.1 hikaru
614 1.1 hikaru struct qat_crypto_desc {
615 1.1 hikaru uint8_t qcd_content_desc[CONTENT_DESC_MAX_SIZE];
616 1.1 hikaru /* using only for qat 1.5 */
617 1.1 hikaru uint8_t qcd_hash_state_prefix_buf[QAT_MAX_AAD_SIZE_BYTES];
618 1.1 hikaru
619 1.1 hikaru enum fw_slice qcd_slices[MAX_FW_SLICE];
620 1.1 hikaru enum fw_la_cmd_id qcd_cmd_id;
621 1.1 hikaru enum hw_cipher_dir qcd_cipher_dir;
622 1.1 hikaru
623 1.1 hikaru bus_addr_t qcd_desc_paddr;
624 1.1 hikaru bus_addr_t qcd_hash_state_paddr;
625 1.1 hikaru
626 1.1 hikaru /* content desc info */
627 1.1 hikaru uint8_t qcd_hdr_sz; /* in quad words */
628 1.1 hikaru uint8_t qcd_hw_blk_sz; /* in quad words */
629 1.1 hikaru /* hash info */
630 1.1 hikaru uint8_t qcd_state_storage_sz; /* in quad words */
631 1.1 hikaru /* cipher info */
632 1.1 hikaru uint16_t qcd_cipher_blk_sz; /* in bytes */
633 1.1 hikaru uint16_t qcd_auth_sz; /* in bytes */
634 1.1 hikaru
635 1.1 hikaru uint8_t qcd_req_cache[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)];
636 1.1 hikaru } __aligned(QAT_OPTIMAL_ALIGN);
637 1.1 hikaru
638 1.1 hikaru /* should be aligned to 64bytes */
639 1.1 hikaru struct qat_session {
640 1.1 hikaru struct qat_crypto_desc qs_dec_desc; /* should be at top of struct*/
641 1.1 hikaru /* decrypt or auth then decrypt or auth */
642 1.1 hikaru
643 1.1 hikaru struct qat_crypto_desc qs_enc_desc;
644 1.1 hikaru /* encrypt or encrypt then auth */
645 1.1 hikaru
646 1.1 hikaru uint32_t qs_lid;
647 1.1 hikaru uint32_t qs_status;
648 1.1 hikaru #define QAT_SESSION_STATUS_ACTIVE (1 << 0)
649 1.1 hikaru #define QAT_SESSION_STATUS_FREEING (1 << 1)
650 1.1 hikaru uint32_t qs_inflight;
651 1.1 hikaru
652 1.1 hikaru kmutex_t qs_session_mtx;
653 1.1 hikaru };
654 1.1 hikaru
655 1.1 hikaru CTASSERT(offsetof(struct qat_session, qs_dec_desc) % QAT_OPTIMAL_ALIGN == 0);
656 1.1 hikaru CTASSERT(offsetof(struct qat_session, qs_enc_desc) % QAT_OPTIMAL_ALIGN == 0);
657 1.1 hikaru
658 1.1 hikaru struct qat_crypto_bank {
659 1.1 hikaru uint16_t qcb_bank;
660 1.1 hikaru
661 1.1 hikaru struct qat_ring *qcb_sym_tx;
662 1.1 hikaru struct qat_ring *qcb_sym_rx;
663 1.1 hikaru
664 1.1 hikaru struct qat_dmamem qcb_symck_dmamems[QAT_NSYMCOOKIE];
665 1.1 hikaru struct qat_sym_cookie *qcb_symck_free[QAT_NSYMCOOKIE];
666 1.1 hikaru uint32_t qcb_symck_free_count;
667 1.1 hikaru
668 1.1 hikaru kmutex_t qcb_bank_mtx;
669 1.1 hikaru
670 1.1 hikaru struct qat_crypto *qcb_crypto;
671 1.1 hikaru
672 1.1 hikaru char qcb_ring_names[2][QAT_RING_NAME_SIZE]; /* sym tx,rx */
673 1.1 hikaru #ifdef QAT_EVENT_COUNTERS
674 1.1 hikaru char qcb_ev_no_symck_name[QAT_EV_NAME_SIZE];
675 1.1 hikaru struct evcnt qcb_ev_no_symck;
676 1.1 hikaru #endif
677 1.1 hikaru };
678 1.1 hikaru
679 1.1 hikaru
680 1.1 hikaru struct qat_crypto {
681 1.1 hikaru struct qat_softc *qcy_sc;
682 1.1 hikaru uint32_t qcy_bank_mask;
683 1.1 hikaru uint16_t qcy_num_banks;
684 1.1 hikaru
685 1.1 hikaru int32_t qcy_cid; /* OpenCrypto driver ID */
686 1.1 hikaru
687 1.1 hikaru struct qat_crypto_bank *qcy_banks; /* array of qat_crypto_bank */
688 1.1 hikaru
689 1.1 hikaru struct qat_dmamem qcy_session_dmamems[QAT_NSESSION];
690 1.1 hikaru struct qat_session *qcy_sessions[QAT_NSESSION];
691 1.1 hikaru struct qat_session *qcy_session_free[QAT_NSESSION];
692 1.1 hikaru uint32_t qcy_session_free_count;
693 1.1 hikaru
694 1.1 hikaru kmutex_t qcy_crypto_mtx;
695 1.1 hikaru
696 1.1 hikaru #ifdef QAT_EVENT_COUNTERS
697 1.1 hikaru char qcy_ev_new_sess_name[QAT_EV_NAME_SIZE];
698 1.1 hikaru struct evcnt qcy_ev_new_sess;
699 1.1 hikaru char qcy_ev_free_sess_name[QAT_EV_NAME_SIZE];
700 1.1 hikaru struct evcnt qcy_ev_free_sess;
701 1.1 hikaru char qcy_ev_no_sess_name[QAT_EV_NAME_SIZE];
702 1.1 hikaru struct evcnt qcy_ev_no_sess;
703 1.1 hikaru #endif
704 1.1 hikaru };
705 1.1 hikaru
706 1.1 hikaru struct qat_hw {
707 1.1 hikaru int8_t qhw_sram_bar_id;
708 1.1 hikaru int8_t qhw_misc_bar_id;
709 1.1 hikaru int8_t qhw_etr_bar_id;
710 1.1 hikaru
711 1.1 hikaru bus_size_t qhw_cap_global_offset;
712 1.1 hikaru bus_size_t qhw_ae_offset;
713 1.1 hikaru bus_size_t qhw_ae_local_offset;
714 1.1 hikaru bus_size_t qhw_etr_bundle_size;
715 1.1 hikaru
716 1.1 hikaru /* crypto processing callbacks */
717 1.1 hikaru size_t qhw_crypto_opaque_offset;
718 1.1 hikaru void (*qhw_crypto_setup_req_params)(struct qat_crypto_bank *,
719 1.1 hikaru struct qat_session *, struct qat_crypto_desc const *,
720 1.1 hikaru struct qat_sym_cookie *, struct cryptodesc *, struct cryptodesc *,
721 1.1 hikaru bus_addr_t);
722 1.1 hikaru void (*qhw_crypto_setup_desc)(struct qat_crypto *, struct qat_session *,
723 1.1 hikaru struct qat_crypto_desc *, struct cryptoini *, struct cryptoini *);
724 1.1 hikaru
725 1.1 hikaru uint8_t qhw_num_banks; /* max number of banks */
726 1.1 hikaru uint8_t qhw_num_ap_banks; /* max number of AutoPush banks */
727 1.1 hikaru uint8_t qhw_num_rings_per_bank; /* rings per bank */
728 1.1 hikaru uint8_t qhw_num_accel; /* max number of accelerators */
729 1.1 hikaru uint8_t qhw_num_engines; /* max number of accelerator engines */
730 1.1 hikaru uint8_t qhw_tx_rx_gap;
731 1.1 hikaru uint32_t qhw_tx_rings_mask;
732 1.1 hikaru uint32_t qhw_clock_per_sec;
733 1.1 hikaru bool qhw_fw_auth;
734 1.1 hikaru uint32_t qhw_fw_req_size;
735 1.1 hikaru uint32_t qhw_fw_resp_size;
736 1.1 hikaru
737 1.1 hikaru uint8_t qhw_ring_sym_tx;
738 1.1 hikaru uint8_t qhw_ring_sym_rx;
739 1.1 hikaru uint8_t qhw_ring_asym_tx;
740 1.1 hikaru uint8_t qhw_ring_asym_rx;
741 1.1 hikaru
742 1.1 hikaru /* MSIx */
743 1.1 hikaru uint32_t qhw_msix_ae_vec_gap; /* gap to ae vec from bank */
744 1.1 hikaru
745 1.1 hikaru const char *qhw_mof_fwname;
746 1.1 hikaru const char *qhw_mmp_fwname;
747 1.1 hikaru
748 1.1 hikaru uint32_t qhw_prod_type; /* cpu type */
749 1.1 hikaru
750 1.1 hikaru /* setup callbacks */
751 1.1 hikaru uint32_t (*qhw_get_accel_mask)(struct qat_softc *);
752 1.1 hikaru uint32_t (*qhw_get_ae_mask)(struct qat_softc *);
753 1.1 hikaru enum qat_sku (*qhw_get_sku)(struct qat_softc *);
754 1.1 hikaru uint32_t (*qhw_get_accel_cap)(struct qat_softc *);
755 1.1 hikaru const char *(*qhw_get_fw_uof_name)(struct qat_softc *);
756 1.1 hikaru void (*qhw_enable_intr)(struct qat_softc *);
757 1.1 hikaru void (*qhw_init_etr_intr)(struct qat_softc *, int);
758 1.1 hikaru int (*qhw_init_admin_comms)(struct qat_softc *);
759 1.1 hikaru int (*qhw_send_admin_init)(struct qat_softc *);
760 1.1 hikaru int (*qhw_init_arb)(struct qat_softc *);
761 1.1 hikaru void (*qhw_get_arb_mapping)(struct qat_softc *, const uint32_t **);
762 1.1 hikaru void (*qhw_enable_error_correction)(struct qat_softc *);
763 1.1 hikaru int (*qhw_check_uncorrectable_error)(struct qat_softc *);
764 1.1 hikaru void (*qhw_print_err_registers)(struct qat_softc *);
765 1.1 hikaru void (*qhw_disable_error_interrupts)(struct qat_softc *);
766 1.1 hikaru int (*qhw_check_slice_hang)(struct qat_softc *);
767 1.1 hikaru int (*qhw_set_ssm_wdtimer)(struct qat_softc *);
768 1.1 hikaru };
769 1.1 hikaru
770 1.1 hikaru
771 1.1 hikaru /* sc_flags */
772 1.1 hikaru #define QAT_FLAG_ESRAM_ENABLE_AUTO_INIT (1 << 0)
773 1.1 hikaru #define QAT_FLAG_SHRAM_WAIT_READY (1 << 1)
774 1.1 hikaru
775 1.1 hikaru /* sc_accel_cap */
776 1.1 hikaru #define QAT_ACCEL_CAP_CRYPTO_SYMMETRIC (1 << 0)
777 1.1 hikaru #define QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC (1 << 1)
778 1.1 hikaru #define QAT_ACCEL_CAP_CIPHER (1 << 2)
779 1.1 hikaru #define QAT_ACCEL_CAP_AUTHENTICATION (1 << 3)
780 1.1 hikaru #define QAT_ACCEL_CAP_REGEX (1 << 4)
781 1.1 hikaru #define QAT_ACCEL_CAP_COMPRESSION (1 << 5)
782 1.1 hikaru #define QAT_ACCEL_CAP_LZS_COMPRESSION (1 << 6)
783 1.1 hikaru #define QAT_ACCEL_CAP_RANDOM_NUMBER (1 << 7)
784 1.1 hikaru #define QAT_ACCEL_CAP_ZUC (1 << 8)
785 1.1 hikaru #define QAT_ACCEL_CAP_SHA3 (1 << 9)
786 1.1 hikaru #define QAT_ACCEL_CAP_KPT (1 << 10)
787 1.1 hikaru
788 1.1 hikaru #define QAT_ACCEL_CAP_BITS \
789 1.1 hikaru "\177\020" \
790 1.1 hikaru "b\x0a" "KPT\0" \
791 1.1 hikaru "b\x09" "SHA3\0" \
792 1.1 hikaru "b\x08" "ZUC\0" \
793 1.1 hikaru "b\x07" "RANDOM_NUMBER\0" \
794 1.1 hikaru "b\x06" "LZS_COMPRESSION\0" \
795 1.1 hikaru "b\x05" "COMPRESSION\0" \
796 1.1 hikaru "b\x04" "REGEX\0" \
797 1.1 hikaru "b\x03" "AUTHENTICATION\0" \
798 1.1 hikaru "b\x02" "CIPHER\0" \
799 1.1 hikaru "b\x01" "CRYPTO_ASYMMETRIC\0" \
800 1.1 hikaru "b\x00" "CRYPTO_SYMMETRIC\0"
801 1.1 hikaru
802 1.1 hikaru #define QAT_HI_PRIO_RING_WEIGHT 0xfc
803 1.1 hikaru #define QAT_LO_PRIO_RING_WEIGHT 0xfe
804 1.1 hikaru #define QAT_DEFAULT_RING_WEIGHT 0xff
805 1.1 hikaru #define QAT_DEFAULT_PVL 0
806 1.1 hikaru
807 1.1 hikaru struct qat_softc {
808 1.1 hikaru struct device *sc_dev;
809 1.1 hikaru
810 1.1 hikaru pci_chipset_tag_t sc_pc;
811 1.1 hikaru pcitag_t sc_pcitag;
812 1.1 hikaru
813 1.1 hikaru bus_space_tag_t sc_csrt[MAX_BARS];
814 1.1 hikaru bus_space_handle_t sc_csrh[MAX_BARS];
815 1.1 hikaru bus_size_t sc_csrs[MAX_BARS];
816 1.1 hikaru
817 1.1 hikaru bus_dma_tag_t sc_dmat;
818 1.1 hikaru
819 1.1 hikaru uint32_t sc_ae_num;
820 1.1 hikaru uint32_t sc_ae_mask;
821 1.1 hikaru
822 1.1 hikaru struct qat_crypto sc_crypto; /* crypto services */
823 1.1 hikaru
824 1.1 hikaru struct qat_hw sc_hw;
825 1.1 hikaru
826 1.1 hikaru uint8_t sc_rev;
827 1.1 hikaru enum qat_sku sc_sku;
828 1.1 hikaru uint32_t sc_flags;
829 1.1 hikaru
830 1.1 hikaru uint32_t sc_accel_num;
831 1.1 hikaru uint32_t sc_accel_mask;
832 1.1 hikaru uint32_t sc_accel_cap;
833 1.1 hikaru
834 1.1 hikaru struct qat_admin_rings sc_admin_rings; /* use only for qat 1.5 */
835 1.1 hikaru struct qat_admin_comms sc_admin_comms; /* use only for qat 1.7 */
836 1.1 hikaru
837 1.1 hikaru /* ETR */
838 1.1 hikaru struct qat_bank *sc_etr_banks; /* array of etr banks */
839 1.1 hikaru struct qat_ap_bank *sc_etr_ap_banks; /* array of etr auto push banks */
840 1.1 hikaru
841 1.1 hikaru /* AE */
842 1.1 hikaru struct qat_ae sc_ae[MAX_NUM_AE];
843 1.1 hikaru
844 1.1 hikaru /* Interrupt */
845 1.1 hikaru pci_intr_handle_t *sc_ih; /* banks and ae cluster ih */
846 1.1 hikaru void *sc_ae_ih_cookie; /* ae cluster ih cookie */
847 1.1 hikaru
848 1.1 hikaru /* Firmware */
849 1.1 hikaru void *sc_fw_mof; /* mof via firmload(9) */
850 1.1 hikaru size_t sc_fw_mof_size; /* mof size */
851 1.1 hikaru struct qat_mof sc_mof; /* mof sections */
852 1.1 hikaru
853 1.1 hikaru const char *sc_fw_uof_name; /* uof/suof name in mof */
854 1.1 hikaru
855 1.1 hikaru void *sc_fw_uof; /* uof head */
856 1.1 hikaru size_t sc_fw_uof_size; /* uof size */
857 1.1 hikaru struct qat_aefw_uof sc_aefw_uof; /* UOF_OBJS in uof */
858 1.1 hikaru
859 1.1 hikaru void *sc_fw_suof; /* suof head */
860 1.1 hikaru size_t sc_fw_suof_size; /* suof size */
861 1.1 hikaru struct qat_aefw_suof sc_aefw_suof; /* suof context */
862 1.1 hikaru
863 1.1 hikaru void *sc_fw_mmp; /* mmp via firmload(9) */
864 1.1 hikaru size_t sc_fw_mmp_size; /* mmp size */
865 1.1 hikaru };
866 1.1 hikaru
867 1.1 hikaru #define QAT_DUMP_DESC __BIT(0)
868 1.1 hikaru #define QAT_DUMP_RING __BIT(1)
869 1.1 hikaru #define QAT_DUMP_RING_MSG __BIT(2)
870 1.1 hikaru #define QAT_DUMP_PCI __BIT(3)
871 1.1 hikaru #define QAT_DUMP_AEFW __BIT(4)
872 1.1 hikaru
873 1.1 hikaru //#define QAT_DUMP (__BITS(0, 4))
874 1.1 hikaru
875 1.1 hikaru #ifdef QAT_DUMP
876 1.1 hikaru
877 1.1 hikaru #include <sys/endian.h>
878 1.1 hikaru
879 1.1 hikaru #ifdef DDB
880 1.1 hikaru #include <machine/db_machdep.h>
881 1.1 hikaru #include <ddb/db_sym.h>
882 1.1 hikaru #endif
883 1.1 hikaru
884 1.1 hikaru /*
885 1.1 hikaru * To avoid spinout detection in mutex_enter,
886 1.1 hikaru * yield cpu to other threads if QAT_DUMP is defined.
887 1.1 hikaru *
888 1.1 hikaru * Since printf of QAT_DUMP_PCI takes a lot of cpu time,
889 1.1 hikaru * and the configroot threads, which is running for qat_init(),
890 1.1 hikaru * takes kernel_lock and the uvm_scheduler is not working at that point.
891 1.1 hikaru */
892 1.1 hikaru #define QAT_YIELD() yield()
893 1.1 hikaru
894 1.1 hikaru extern int qat_dump;
895 1.1 hikaru
896 1.1 hikaru void qat_dump_raw(int, const char *, void *, size_t);
897 1.1 hikaru void qat_dump_ring(int, int);
898 1.1 hikaru void qat_dump_mbuf(struct mbuf *, int, int);
899 1.1 hikaru
900 1.1 hikaru static inline void
901 1.1 hikaru qat_print_sym(uintptr_t pc)
902 1.1 hikaru {
903 1.1 hikaru #ifdef DDB
904 1.1 hikaru const char *name;
905 1.1 hikaru db_expr_t offset;
906 1.1 hikaru
907 1.1 hikaru db_find_sym_and_offset((db_expr_t)pc, &name, &offset);
908 1.1 hikaru
909 1.1 hikaru if (name != NULL) {
910 1.1 hikaru printf("%zx (%s+%zx)", (size_t)pc, name, (size_t)offset);
911 1.1 hikaru return;
912 1.1 hikaru }
913 1.1 hikaru #endif
914 1.1 hikaru printf("%zx", (size_t)pc);
915 1.1 hikaru }
916 1.1 hikaru
917 1.1 hikaru static inline void
918 1.1 hikaru qat_dump_bar_write_4(struct qat_softc *sc, int baroff, bus_size_t offset,
919 1.1 hikaru int value)
920 1.1 hikaru {
921 1.1 hikaru pc:
922 1.1 hikaru if ((qat_dump & QAT_DUMP_PCI) == 0)
923 1.1 hikaru return;
924 1.1 hikaru printf("[qat_pci]: w %02x+%04zx %08x ", baroff, (size_t)offset, value);
925 1.1 hikaru qat_print_sym((uintptr_t)&&pc);
926 1.1 hikaru printf("\n");
927 1.1 hikaru }
928 1.1 hikaru
929 1.1 hikaru #else /* QAT_DUMP */
930 1.1 hikaru #define QAT_YIELD()
931 1.1 hikaru #endif /* QAT_DUMP */
932 1.1 hikaru
933 1.1 hikaru static inline void
934 1.1 hikaru qat_bar_write_4(struct qat_softc *sc, int baroff, bus_size_t offset,
935 1.1 hikaru uint32_t value)
936 1.1 hikaru {
937 1.1 hikaru
938 1.1 hikaru KASSERT(baroff >= 0 && baroff < MAX_BARS);
939 1.1 hikaru
940 1.1 hikaru bus_space_write_4(sc->sc_csrt[baroff],
941 1.1 hikaru sc->sc_csrh[baroff], offset, value);
942 1.1 hikaru #ifdef QAT_DUMP
943 1.1 hikaru qat_dump_bar_write_4(sc, baroff, offset, value);
944 1.1 hikaru #endif
945 1.1 hikaru }
946 1.1 hikaru
947 1.1 hikaru static inline uint32_t
948 1.1 hikaru qat_bar_read_4(struct qat_softc *sc, int baroff, bus_size_t offset)
949 1.1 hikaru {
950 1.1 hikaru
951 1.1 hikaru KASSERT(baroff >= 0 && baroff < MAX_BARS);
952 1.1 hikaru
953 1.1 hikaru return bus_space_read_4(sc->sc_csrt[baroff],
954 1.1 hikaru sc->sc_csrh[baroff], offset);
955 1.1 hikaru }
956 1.1 hikaru
957 1.1 hikaru static inline void
958 1.1 hikaru qat_misc_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
959 1.1 hikaru {
960 1.1 hikaru
961 1.1 hikaru qat_bar_write_4(sc, sc->sc_hw.qhw_misc_bar_id, offset, value);
962 1.1 hikaru }
963 1.1 hikaru
964 1.1 hikaru static inline uint32_t
965 1.1 hikaru qat_misc_read_4(struct qat_softc *sc, bus_size_t offset)
966 1.1 hikaru {
967 1.1 hikaru
968 1.1 hikaru return qat_bar_read_4(sc, sc->sc_hw.qhw_misc_bar_id, offset);
969 1.1 hikaru }
970 1.1 hikaru
971 1.1 hikaru static inline void
972 1.1 hikaru qat_misc_read_write_or_4(struct qat_softc *sc, bus_size_t offset,
973 1.1 hikaru uint32_t value)
974 1.1 hikaru {
975 1.1 hikaru uint32_t reg;
976 1.1 hikaru
977 1.1 hikaru reg = qat_misc_read_4(sc, offset);
978 1.1 hikaru reg |= value;
979 1.1 hikaru qat_misc_write_4(sc, offset, reg);
980 1.1 hikaru }
981 1.1 hikaru
982 1.1 hikaru static inline void
983 1.1 hikaru qat_misc_read_write_and_4(struct qat_softc *sc, bus_size_t offset,
984 1.1 hikaru uint32_t mask)
985 1.1 hikaru {
986 1.1 hikaru uint32_t reg;
987 1.1 hikaru
988 1.1 hikaru reg = qat_misc_read_4(sc, offset);
989 1.1 hikaru reg &= mask;
990 1.1 hikaru qat_misc_write_4(sc, offset, reg);
991 1.1 hikaru }
992 1.1 hikaru
993 1.1 hikaru static inline void
994 1.1 hikaru qat_etr_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
995 1.1 hikaru {
996 1.1 hikaru
997 1.1 hikaru qat_bar_write_4(sc, sc->sc_hw.qhw_etr_bar_id, offset, value);
998 1.1 hikaru }
999 1.1 hikaru
1000 1.1 hikaru static inline uint32_t
1001 1.1 hikaru qat_etr_read_4(struct qat_softc *sc, bus_size_t offset)
1002 1.1 hikaru {
1003 1.1 hikaru
1004 1.1 hikaru return qat_bar_read_4(sc, sc->sc_hw.qhw_etr_bar_id, offset);
1005 1.1 hikaru }
1006 1.1 hikaru
1007 1.1 hikaru static inline void
1008 1.1 hikaru qat_ae_local_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
1009 1.1 hikaru uint32_t value)
1010 1.1 hikaru {
1011 1.1 hikaru
1012 1.1 hikaru offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) |
1013 1.1 hikaru (offset & AE_LOCAL_CSR_MASK);
1014 1.1 hikaru
1015 1.1 hikaru qat_misc_write_4(sc, sc->sc_hw.qhw_ae_local_offset + offset,
1016 1.1 hikaru value);
1017 1.1 hikaru }
1018 1.1 hikaru
1019 1.1 hikaru static inline uint32_t
1020 1.1 hikaru qat_ae_local_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset)
1021 1.1 hikaru {
1022 1.1 hikaru
1023 1.1 hikaru offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) |
1024 1.1 hikaru (offset & AE_LOCAL_CSR_MASK);
1025 1.1 hikaru
1026 1.1 hikaru return qat_misc_read_4(sc, sc->sc_hw.qhw_ae_local_offset + offset);
1027 1.1 hikaru }
1028 1.1 hikaru
1029 1.1 hikaru static inline void
1030 1.1 hikaru qat_ae_xfer_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
1031 1.1 hikaru uint32_t value)
1032 1.1 hikaru {
1033 1.1 hikaru offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_XFER_AE_MASK) |
1034 1.1 hikaru __SHIFTIN(offset, AE_XFER_CSR_MASK);
1035 1.1 hikaru
1036 1.1 hikaru qat_misc_write_4(sc, sc->sc_hw.qhw_ae_offset + offset, value);
1037 1.1 hikaru }
1038 1.1 hikaru
1039 1.1 hikaru static inline void
1040 1.1 hikaru qat_cap_global_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
1041 1.1 hikaru {
1042 1.1 hikaru
1043 1.1 hikaru qat_misc_write_4(sc, sc->sc_hw.qhw_cap_global_offset + offset, value);
1044 1.1 hikaru }
1045 1.1 hikaru
1046 1.1 hikaru static inline uint32_t
1047 1.1 hikaru qat_cap_global_read_4(struct qat_softc *sc, bus_size_t offset)
1048 1.1 hikaru {
1049 1.1 hikaru
1050 1.1 hikaru return qat_misc_read_4(sc, sc->sc_hw.qhw_cap_global_offset + offset);
1051 1.1 hikaru }
1052 1.1 hikaru
1053 1.1 hikaru
1054 1.1 hikaru static inline void
1055 1.1 hikaru qat_etr_bank_write_4(struct qat_softc *sc, int bank,
1056 1.1 hikaru bus_size_t offset, uint32_t value)
1057 1.1 hikaru {
1058 1.1 hikaru
1059 1.1 hikaru qat_etr_write_4(sc, sc->sc_hw.qhw_etr_bundle_size * bank + offset,
1060 1.1 hikaru value);
1061 1.1 hikaru }
1062 1.1 hikaru
1063 1.1 hikaru static inline uint32_t
1064 1.1 hikaru qat_etr_bank_read_4(struct qat_softc *sc, int bank,
1065 1.1 hikaru bus_size_t offset)
1066 1.1 hikaru {
1067 1.1 hikaru
1068 1.1 hikaru return qat_etr_read_4(sc,
1069 1.1 hikaru sc->sc_hw.qhw_etr_bundle_size * bank + offset);
1070 1.1 hikaru }
1071 1.1 hikaru
1072 1.1 hikaru static inline void
1073 1.1 hikaru qat_etr_ap_bank_write_4(struct qat_softc *sc, int ap_bank,
1074 1.1 hikaru bus_size_t offset, uint32_t value)
1075 1.1 hikaru {
1076 1.1 hikaru
1077 1.1 hikaru qat_etr_write_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset, value);
1078 1.1 hikaru }
1079 1.1 hikaru
1080 1.1 hikaru static inline uint32_t
1081 1.1 hikaru qat_etr_ap_bank_read_4(struct qat_softc *sc, int ap_bank,
1082 1.1 hikaru bus_size_t offset)
1083 1.1 hikaru {
1084 1.1 hikaru
1085 1.1 hikaru return qat_etr_read_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset);
1086 1.1 hikaru }
1087 1.1 hikaru
1088 1.1 hikaru
1089 1.1 hikaru static inline void
1090 1.1 hikaru qat_etr_bank_ring_write_4(struct qat_softc *sc, int bank, int ring,
1091 1.1 hikaru bus_size_t offset, uint32_t value)
1092 1.1 hikaru {
1093 1.1 hikaru
1094 1.1 hikaru qat_etr_bank_write_4(sc, bank, (ring << 2) + offset, value);
1095 1.1 hikaru }
1096 1.1 hikaru
1097 1.1 hikaru static inline uint32_t
1098 1.1 hikaru qat_etr_bank_ring_read_4(struct qat_softc *sc, int bank, int ring,
1099 1.1 hikaru bus_size_t offset)
1100 1.1 hikaru {
1101 1.1 hikaru
1102 1.1 hikaru return qat_etr_bank_read_4(sc, bank, (ring << 2) * offset);
1103 1.1 hikaru }
1104 1.1 hikaru
1105 1.1 hikaru static inline void
1106 1.1 hikaru qat_etr_bank_ring_base_write_8(struct qat_softc *sc, int bank, int ring,
1107 1.1 hikaru uint64_t value)
1108 1.1 hikaru {
1109 1.1 hikaru uint32_t lo, hi;
1110 1.1 hikaru
1111 1.1 hikaru lo = (uint32_t)(value & 0xffffffff);
1112 1.1 hikaru hi = (uint32_t)((value & 0xffffffff00000000ULL) >> 32);
1113 1.1 hikaru qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_LBASE, lo);
1114 1.1 hikaru qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_UBASE, hi);
1115 1.1 hikaru }
1116 1.1 hikaru
1117 1.1 hikaru static inline void
1118 1.1 hikaru qat_arb_ringsrvarben_write_4(struct qat_softc *sc, int index, uint32_t value)
1119 1.1 hikaru {
1120 1.1 hikaru
1121 1.1 hikaru qat_etr_write_4(sc, ARB_RINGSRVARBEN_OFFSET +
1122 1.1 hikaru (ARB_REG_SLOT * index), value);
1123 1.1 hikaru }
1124 1.1 hikaru
1125 1.1 hikaru static inline void
1126 1.1 hikaru qat_arb_sarconfig_write_4(struct qat_softc *sc, int index, uint32_t value)
1127 1.1 hikaru {
1128 1.1 hikaru
1129 1.1 hikaru qat_etr_write_4(sc, ARB_OFFSET +
1130 1.1 hikaru (ARB_REG_SIZE * index), value);
1131 1.1 hikaru }
1132 1.1 hikaru
1133 1.1 hikaru static inline void
1134 1.1 hikaru qat_arb_wrk_2_ser_map_write_4(struct qat_softc *sc, int index, uint32_t value)
1135 1.1 hikaru {
1136 1.1 hikaru
1137 1.1 hikaru qat_etr_write_4(sc, ARB_OFFSET + ARB_WRK_2_SER_MAP_OFFSET +
1138 1.1 hikaru (ARB_REG_SIZE * index), value);
1139 1.1 hikaru }
1140 1.1 hikaru
1141 1.1 hikaru void * qat_alloc_mem(size_t);
1142 1.1 hikaru void qat_free_mem(void *);
1143 1.1 hikaru void qat_free_dmamem(struct qat_softc *, struct qat_dmamem *);
1144 1.1 hikaru int qat_alloc_dmamem(struct qat_softc *, struct qat_dmamem *,
1145 1.1 hikaru bus_size_t, bus_size_t);
1146 1.1 hikaru
1147 1.1 hikaru int qat_etr_setup_ring(struct qat_softc *, int, uint32_t, uint32_t,
1148 1.1 hikaru uint32_t, qat_cb_t, void *, const char *,
1149 1.1 hikaru struct qat_ring **);
1150 1.1 hikaru int qat_etr_put_msg(struct qat_softc *, struct qat_ring *,
1151 1.1 hikaru uint32_t *);
1152 1.1 hikaru
1153 1.1 hikaru void qat_memcpy_htobe64(void *, const void *, size_t);
1154 1.1 hikaru void qat_memcpy_htobe32(void *, const void *, size_t);
1155 1.1 hikaru void qat_memcpy_htobe(void *, const void *, size_t, uint32_t);
1156 1.1 hikaru void qat_crypto_hmac_precompute(struct qat_crypto_desc *,
1157 1.1 hikaru struct cryptoini *cria, struct qat_sym_hash_def const *,
1158 1.1 hikaru uint8_t *, uint8_t *);
1159 1.1 hikaru uint16_t qat_crypto_load_cipher_cryptoini(
1160 1.1 hikaru struct qat_crypto_desc *, struct cryptoini *);
1161 1.1 hikaru uint16_t qat_crypto_load_auth_cryptoini(
1162 1.1 hikaru struct qat_crypto_desc *, struct cryptoini *,
1163 1.1 hikaru struct qat_sym_hash_def const **);
1164 1.1 hikaru
1165 1.1 hikaru #endif
1166