qat.c revision 1.2 1 1.2 msaitoh /* $NetBSD: qat.c,v 1.2 2019/12/02 03:06:51 msaitoh Exp $ */
2 1.1 hikaru
3 1.1 hikaru /*
4 1.1 hikaru * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 1.1 hikaru * All rights reserved.
6 1.1 hikaru *
7 1.1 hikaru * Redistribution and use in source and binary forms, with or without
8 1.1 hikaru * modification, are permitted provided that the following conditions
9 1.1 hikaru * are met:
10 1.1 hikaru * 1. Redistributions of source code must retain the above copyright
11 1.1 hikaru * notice, this list of conditions and the following disclaimer.
12 1.1 hikaru * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 hikaru * notice, this list of conditions and the following disclaimer in the
14 1.1 hikaru * documentation and/or other materials provided with the distribution.
15 1.1 hikaru *
16 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 1.1 hikaru * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.1 hikaru * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.1 hikaru * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.1 hikaru * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.1 hikaru * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.1 hikaru * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.1 hikaru * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.1 hikaru * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.1 hikaru * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.1 hikaru * POSSIBILITY OF SUCH DAMAGE.
27 1.1 hikaru */
28 1.1 hikaru
29 1.1 hikaru /*
30 1.1 hikaru * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
31 1.1 hikaru *
32 1.1 hikaru * Redistribution and use in source and binary forms, with or without
33 1.1 hikaru * modification, are permitted provided that the following conditions
34 1.1 hikaru * are met:
35 1.1 hikaru *
36 1.1 hikaru * * Redistributions of source code must retain the above copyright
37 1.1 hikaru * notice, this list of conditions and the following disclaimer.
38 1.1 hikaru * * Redistributions in binary form must reproduce the above copyright
39 1.1 hikaru * notice, this list of conditions and the following disclaimer in
40 1.1 hikaru * the documentation and/or other materials provided with the
41 1.1 hikaru * distribution.
42 1.1 hikaru * * Neither the name of Intel Corporation nor the names of its
43 1.1 hikaru * contributors may be used to endorse or promote products derived
44 1.1 hikaru * from this software without specific prior written permission.
45 1.1 hikaru *
46 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
47 1.1 hikaru * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
48 1.1 hikaru * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
49 1.1 hikaru * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
50 1.1 hikaru * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51 1.1 hikaru * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
52 1.1 hikaru * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 1.1 hikaru * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 1.1 hikaru * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 1.1 hikaru * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
56 1.1 hikaru * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 1.1 hikaru */
58 1.1 hikaru
59 1.1 hikaru #include <sys/cdefs.h>
60 1.2 msaitoh __KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.2 2019/12/02 03:06:51 msaitoh Exp $");
61 1.1 hikaru
62 1.1 hikaru #include <sys/param.h>
63 1.1 hikaru #include <sys/systm.h>
64 1.1 hikaru #include <sys/kernel.h>
65 1.1 hikaru #include <sys/device.h>
66 1.1 hikaru #include <sys/module.h>
67 1.1 hikaru #include <sys/kmem.h>
68 1.1 hikaru #include <sys/mutex.h>
69 1.1 hikaru #include <sys/bitops.h>
70 1.1 hikaru #include <sys/atomic.h>
71 1.1 hikaru #include <sys/mbuf.h>
72 1.1 hikaru #include <sys/cprng.h>
73 1.1 hikaru #include <sys/cpu.h>
74 1.1 hikaru #include <sys/interrupt.h>
75 1.1 hikaru #include <sys/md5.h>
76 1.1 hikaru #include <sys/sha1.h>
77 1.1 hikaru #include <sys/sha2.h>
78 1.1 hikaru
79 1.1 hikaru #include <opencrypto/cryptodev.h>
80 1.1 hikaru #include <opencrypto/cryptosoft.h>
81 1.1 hikaru #include <opencrypto/xform.h>
82 1.1 hikaru
83 1.1 hikaru /* XXX same as sys/arch/x86/x86/via_padlock.c */
84 1.1 hikaru #include <opencrypto/cryptosoft_xform.c>
85 1.1 hikaru
86 1.1 hikaru #include <dev/pci/pcireg.h>
87 1.1 hikaru #include <dev/pci/pcivar.h>
88 1.1 hikaru #include <dev/pci/pcidevs.h>
89 1.1 hikaru
90 1.1 hikaru #include "qatreg.h"
91 1.1 hikaru #include "qatvar.h"
92 1.1 hikaru #include "qat_aevar.h"
93 1.1 hikaru
94 1.1 hikaru extern struct qat_hw qat_hw_c2xxx;
95 1.1 hikaru extern struct qat_hw qat_hw_c3xxx;
96 1.1 hikaru extern struct qat_hw qat_hw_c62x;
97 1.1 hikaru extern struct qat_hw qat_hw_d15xx;
98 1.1 hikaru
99 1.1 hikaru static const struct qat_product {
100 1.1 hikaru pci_vendor_id_t qatp_vendor;
101 1.1 hikaru pci_product_id_t qatp_product;
102 1.1 hikaru const char *qatp_name;
103 1.1 hikaru enum qat_chip_type qatp_chip;
104 1.1 hikaru const struct qat_hw *qatp_hw;
105 1.1 hikaru } qat_products[] = {
106 1.1 hikaru
107 1.1 hikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS,
108 1.1 hikaru "Intel C2000 QuickAssist Physical Function",
109 1.1 hikaru QAT_CHIP_C2XXX, &qat_hw_c2xxx },
110 1.1 hikaru
111 1.1 hikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT,
112 1.1 hikaru "Intel C3000 QuickAssist Physical Function",
113 1.1 hikaru QAT_CHIP_C3XXX, &qat_hw_c3xxx },
114 1.1 hikaru #ifdef notyet
115 1.1 hikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT_VF,
116 1.1 hikaru "Intel C3000 QuickAssist Virtual Function",
117 1.1 hikaru QAT_CHIP_C3XXX_IOV, &qat_hw_c3xxxvf },
118 1.1 hikaru #endif
119 1.1 hikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT,
120 1.1 hikaru "Intel C620/Xeon D-2100 QuickAssist Physical Function",
121 1.1 hikaru QAT_CHIP_C62X, &qat_hw_c62x },
122 1.1 hikaru #ifdef notyet
123 1.1 hikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT_VF,
124 1.1 hikaru "Intel C620/Xeon D-2100 QuickAssist Virtual Function",
125 1.1 hikaru QAT_CHIP_C62X_IOV, &qat_hw_c62xvf },
126 1.1 hikaru #endif
127 1.1 hikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT,
128 1.1 hikaru "Intel Xeon D-1500 QuickAssist Physical Function",
129 1.1 hikaru QAT_CHIP_D15XX, &qat_hw_d15xx },
130 1.1 hikaru #ifdef notyet
131 1.1 hikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT_VF,
132 1.1 hikaru "Intel Xeon D-1500 QuickAssist Virtual Function",
133 1.1 hikaru QAT_CHIP_D15XX_IOV, &qat_hw_d15xxvf },
134 1.1 hikaru #endif
135 1.1 hikaru { 0, 0, NULL, 0, NULL },
136 1.1 hikaru };
137 1.1 hikaru
138 1.1 hikaru /* md5 16 bytes - Initialiser state can be found in RFC 1321*/
139 1.1 hikaru static const uint8_t md5_initial_state[QAT_HASH_MD5_STATE_SIZE] = {
140 1.1 hikaru 0x01, 0x23, 0x45, 0x67,
141 1.1 hikaru 0x89, 0xab, 0xcd, 0xef,
142 1.1 hikaru 0xfe, 0xdc, 0xba, 0x98,
143 1.1 hikaru 0x76, 0x54, 0x32, 0x10,
144 1.1 hikaru };
145 1.1 hikaru
146 1.1 hikaru /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
147 1.1 hikaru static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = {
148 1.1 hikaru 0x67, 0x45, 0x23, 0x01,
149 1.1 hikaru 0xef, 0xcd, 0xab, 0x89,
150 1.1 hikaru 0x98, 0xba, 0xdc, 0xfe,
151 1.1 hikaru 0x10, 0x32, 0x54, 0x76,
152 1.1 hikaru 0xc3, 0xd2, 0xe1, 0xf0
153 1.1 hikaru };
154 1.1 hikaru
155 1.1 hikaru /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
156 1.1 hikaru static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = {
157 1.1 hikaru 0x6a, 0x09, 0xe6, 0x67,
158 1.1 hikaru 0xbb, 0x67, 0xae, 0x85,
159 1.1 hikaru 0x3c, 0x6e, 0xf3, 0x72,
160 1.1 hikaru 0xa5, 0x4f, 0xf5, 0x3a,
161 1.1 hikaru 0x51, 0x0e, 0x52, 0x7f,
162 1.1 hikaru 0x9b, 0x05, 0x68, 0x8c,
163 1.1 hikaru 0x1f, 0x83, 0xd9, 0xab,
164 1.1 hikaru 0x5b, 0xe0, 0xcd, 0x19
165 1.1 hikaru };
166 1.1 hikaru
167 1.1 hikaru /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
168 1.1 hikaru static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = {
169 1.1 hikaru 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8,
170 1.1 hikaru 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07,
171 1.1 hikaru 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
172 1.1 hikaru 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39,
173 1.1 hikaru 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31,
174 1.1 hikaru 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11,
175 1.1 hikaru 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7,
176 1.1 hikaru 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4
177 1.1 hikaru };
178 1.1 hikaru
179 1.1 hikaru /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
180 1.1 hikaru static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = {
181 1.1 hikaru 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08,
182 1.1 hikaru 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b,
183 1.1 hikaru 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b,
184 1.1 hikaru 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1,
185 1.1 hikaru 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1,
186 1.1 hikaru 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
187 1.1 hikaru 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b,
188 1.1 hikaru 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79
189 1.1 hikaru };
190 1.1 hikaru
191 1.1 hikaru /* Hash Algorithm specific structure */
192 1.1 hikaru
193 1.1 hikaru static const struct qat_sym_hash_alg_info md5_info = {
194 1.1 hikaru QAT_HASH_MD5_DIGEST_SIZE,
195 1.1 hikaru QAT_HASH_MD5_BLOCK_SIZE,
196 1.1 hikaru md5_initial_state,
197 1.1 hikaru QAT_HASH_MD5_STATE_SIZE,
198 1.1 hikaru &swcr_auth_hash_hmac_md5_96,
199 1.1 hikaru offsetof(MD5_CTX, state),
200 1.1 hikaru 4,
201 1.1 hikaru };
202 1.1 hikaru
203 1.1 hikaru static const struct qat_sym_hash_alg_info sha1_info = {
204 1.1 hikaru QAT_HASH_SHA1_DIGEST_SIZE,
205 1.1 hikaru QAT_HASH_SHA1_BLOCK_SIZE,
206 1.1 hikaru sha1_initial_state,
207 1.1 hikaru QAT_HASH_SHA1_STATE_SIZE,
208 1.1 hikaru &swcr_auth_hash_hmac_sha1_96,
209 1.1 hikaru offsetof(SHA1_CTX, state),
210 1.1 hikaru 4,
211 1.1 hikaru };
212 1.1 hikaru
213 1.1 hikaru static const struct qat_sym_hash_alg_info sha256_info = {
214 1.1 hikaru QAT_HASH_SHA256_DIGEST_SIZE,
215 1.1 hikaru QAT_HASH_SHA256_BLOCK_SIZE,
216 1.1 hikaru sha256_initial_state,
217 1.1 hikaru QAT_HASH_SHA256_STATE_SIZE,
218 1.1 hikaru &swcr_auth_hash_hmac_sha2_256,
219 1.1 hikaru offsetof(SHA256_CTX, state),
220 1.1 hikaru 4,
221 1.1 hikaru };
222 1.1 hikaru
223 1.1 hikaru static const struct qat_sym_hash_alg_info sha384_info = {
224 1.1 hikaru QAT_HASH_SHA384_DIGEST_SIZE,
225 1.1 hikaru QAT_HASH_SHA384_BLOCK_SIZE,
226 1.1 hikaru sha384_initial_state,
227 1.1 hikaru QAT_HASH_SHA384_STATE_SIZE,
228 1.1 hikaru &swcr_auth_hash_hmac_sha2_384,
229 1.1 hikaru offsetof(SHA384_CTX, state),
230 1.1 hikaru 8,
231 1.1 hikaru };
232 1.1 hikaru
233 1.1 hikaru static const struct qat_sym_hash_alg_info sha512_info = {
234 1.1 hikaru QAT_HASH_SHA512_DIGEST_SIZE,
235 1.1 hikaru QAT_HASH_SHA512_BLOCK_SIZE,
236 1.1 hikaru sha512_initial_state,
237 1.1 hikaru QAT_HASH_SHA512_STATE_SIZE,
238 1.1 hikaru &swcr_auth_hash_hmac_sha2_512,
239 1.1 hikaru offsetof(SHA512_CTX, state),
240 1.1 hikaru 8,
241 1.1 hikaru };
242 1.1 hikaru
243 1.1 hikaru static const struct qat_sym_hash_alg_info aes_gcm_info = {
244 1.1 hikaru QAT_HASH_AES_GCM_DIGEST_SIZE,
245 1.1 hikaru QAT_HASH_AES_GCM_BLOCK_SIZE,
246 1.1 hikaru NULL, 0,
247 1.1 hikaru NULL, 0, 0, /* XXX */
248 1.1 hikaru };
249 1.1 hikaru
250 1.1 hikaru /* Hash QAT specific structures */
251 1.1 hikaru
252 1.1 hikaru static const struct qat_sym_hash_qat_info md5_config = {
253 1.1 hikaru HW_AUTH_ALGO_MD5,
254 1.1 hikaru QAT_HASH_MD5_BLOCK_SIZE,
255 1.1 hikaru HW_MD5_STATE1_SZ,
256 1.1 hikaru HW_MD5_STATE2_SZ
257 1.1 hikaru };
258 1.1 hikaru
259 1.1 hikaru static const struct qat_sym_hash_qat_info sha1_config = {
260 1.1 hikaru HW_AUTH_ALGO_SHA1,
261 1.1 hikaru QAT_HASH_SHA1_BLOCK_SIZE,
262 1.1 hikaru HW_SHA1_STATE1_SZ,
263 1.1 hikaru HW_SHA1_STATE2_SZ
264 1.1 hikaru };
265 1.1 hikaru
266 1.1 hikaru static const struct qat_sym_hash_qat_info sha256_config = {
267 1.1 hikaru HW_AUTH_ALGO_SHA256,
268 1.1 hikaru QAT_HASH_SHA256_BLOCK_SIZE,
269 1.1 hikaru HW_SHA256_STATE1_SZ,
270 1.1 hikaru HW_SHA256_STATE2_SZ
271 1.1 hikaru };
272 1.1 hikaru
273 1.1 hikaru static const struct qat_sym_hash_qat_info sha384_config = {
274 1.1 hikaru HW_AUTH_ALGO_SHA384,
275 1.1 hikaru QAT_HASH_SHA384_BLOCK_SIZE,
276 1.1 hikaru HW_SHA384_STATE1_SZ,
277 1.1 hikaru HW_SHA384_STATE2_SZ
278 1.1 hikaru };
279 1.1 hikaru
280 1.1 hikaru static const struct qat_sym_hash_qat_info sha512_config = {
281 1.1 hikaru HW_AUTH_ALGO_SHA512,
282 1.1 hikaru QAT_HASH_SHA512_BLOCK_SIZE,
283 1.1 hikaru HW_SHA512_STATE1_SZ,
284 1.1 hikaru HW_SHA512_STATE2_SZ
285 1.1 hikaru };
286 1.1 hikaru
287 1.1 hikaru static const struct qat_sym_hash_qat_info aes_gcm_config = {
288 1.1 hikaru HW_AUTH_ALGO_GALOIS_128,
289 1.1 hikaru 0,
290 1.1 hikaru HW_GALOIS_128_STATE1_SZ,
291 1.1 hikaru HW_GALOIS_H_SZ +
292 1.1 hikaru HW_GALOIS_LEN_A_SZ +
293 1.1 hikaru HW_GALOIS_E_CTR0_SZ
294 1.1 hikaru };
295 1.1 hikaru
296 1.1 hikaru static const struct qat_sym_hash_def qat_sym_hash_defs[] = {
297 1.1 hikaru [QAT_SYM_HASH_MD5] = { &md5_info, &md5_config },
298 1.1 hikaru [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config },
299 1.1 hikaru [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config },
300 1.1 hikaru [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config },
301 1.1 hikaru [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config },
302 1.1 hikaru [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config },
303 1.1 hikaru };
304 1.1 hikaru
305 1.1 hikaru const struct qat_product *
306 1.1 hikaru qat_lookup(const struct pci_attach_args *);
307 1.1 hikaru int qat_match(struct device *, struct cfdata *, void *);
308 1.1 hikaru void qat_attach(struct device *, struct device *, void *);
309 1.1 hikaru void qat_init(struct device *);
310 1.1 hikaru int qat_start(struct device *);
311 1.1 hikaru int qat_detach(struct device *, int);
312 1.1 hikaru
313 1.1 hikaru int qat_alloc_msix_intr(struct qat_softc *,
314 1.1 hikaru struct pci_attach_args *);
315 1.1 hikaru void * qat_establish_msix_intr(struct qat_softc *, pci_intr_handle_t,
316 1.1 hikaru int (*)(void *), void *, const char *, int);
317 1.1 hikaru int qat_setup_msix_intr(struct qat_softc *);
318 1.1 hikaru
319 1.1 hikaru int qat_etr_init(struct qat_softc *);
320 1.1 hikaru int qat_etr_bank_init(struct qat_softc *, int);
321 1.1 hikaru
322 1.1 hikaru int qat_etr_ap_bank_init(struct qat_softc *);
323 1.1 hikaru void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int);
324 1.1 hikaru void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *,
325 1.1 hikaru uint32_t, int);
326 1.1 hikaru void qat_etr_ap_bank_setup_ring(struct qat_softc *,
327 1.1 hikaru struct qat_ring *);
328 1.1 hikaru int qat_etr_verify_ring_size(uint32_t, uint32_t);
329 1.1 hikaru
330 1.1 hikaru int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *,
331 1.1 hikaru struct qat_ring *);
332 1.1 hikaru int qat_etr_bank_intr(void *);
333 1.1 hikaru
334 1.1 hikaru void qat_arb_update(struct qat_softc *, struct qat_bank *);
335 1.1 hikaru
336 1.1 hikaru struct qat_sym_cookie *
337 1.1 hikaru qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *);
338 1.1 hikaru void qat_crypto_free_sym_cookie(struct qat_crypto_bank *,
339 1.1 hikaru struct qat_sym_cookie *);
340 1.1 hikaru int qat_crypto_load_buf(struct qat_softc *, struct cryptop *,
341 1.1 hikaru struct qat_sym_cookie *, struct qat_crypto_desc const *,
342 1.1 hikaru uint8_t *, int, bus_addr_t *);
343 1.1 hikaru int qat_crypto_load_iv(struct qat_sym_cookie *, struct cryptop *,
344 1.1 hikaru struct cryptodesc *, struct qat_crypto_desc const *);
345 1.1 hikaru int qat_crypto_process(void *, struct cryptop *, int);
346 1.1 hikaru int qat_crypto_setup_ring(struct qat_softc *,
347 1.1 hikaru struct qat_crypto_bank *);
348 1.1 hikaru int qat_crypto_new_session(void *, uint32_t *, struct cryptoini *);
349 1.1 hikaru int qat_crypto_free_session0(struct qat_crypto *,
350 1.1 hikaru struct qat_session *);
351 1.1 hikaru void qat_crypto_check_free_session(struct qat_crypto *,
352 1.1 hikaru struct qat_session *);
353 1.1 hikaru int qat_crypto_free_session(void *, uint64_t);
354 1.1 hikaru int qat_crypto_bank_init(struct qat_softc *,
355 1.1 hikaru struct qat_crypto_bank *);
356 1.1 hikaru int qat_crypto_init(struct qat_softc *);
357 1.1 hikaru int qat_crypto_start(struct qat_softc *);
358 1.1 hikaru int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *);
359 1.1 hikaru
360 1.1 hikaru CFATTACH_DECL_NEW(qat, sizeof(struct qat_softc),
361 1.1 hikaru qat_match, qat_attach, qat_detach, NULL);
362 1.1 hikaru
363 1.1 hikaru struct qat_softc *gsc = NULL;
364 1.1 hikaru
365 1.1 hikaru #ifdef QAT_DUMP
366 1.1 hikaru int qat_dump = QAT_DUMP;
367 1.1 hikaru #endif
368 1.1 hikaru
369 1.1 hikaru const struct qat_product *
370 1.1 hikaru qat_lookup(const struct pci_attach_args *pa)
371 1.1 hikaru {
372 1.1 hikaru const struct qat_product *qatp;
373 1.1 hikaru
374 1.1 hikaru for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) {
375 1.1 hikaru if (PCI_VENDOR(pa->pa_id) == qatp->qatp_vendor &&
376 1.1 hikaru PCI_PRODUCT(pa->pa_id) == qatp->qatp_product)
377 1.1 hikaru return qatp;
378 1.1 hikaru }
379 1.1 hikaru return NULL;
380 1.1 hikaru }
381 1.1 hikaru
382 1.1 hikaru int
383 1.1 hikaru qat_match(struct device *parent, struct cfdata *cf, void *aux)
384 1.1 hikaru {
385 1.1 hikaru struct pci_attach_args *pa = aux;
386 1.1 hikaru
387 1.1 hikaru if (qat_lookup(pa) != NULL)
388 1.1 hikaru return 1;
389 1.1 hikaru
390 1.1 hikaru return 0;
391 1.1 hikaru }
392 1.1 hikaru
393 1.1 hikaru void
394 1.1 hikaru qat_attach(struct device *parent, struct device *self, void *aux)
395 1.1 hikaru {
396 1.1 hikaru struct qat_softc *sc = device_private(self);
397 1.1 hikaru struct pci_attach_args *pa = aux;
398 1.1 hikaru pci_chipset_tag_t pc = pa->pa_pc;
399 1.1 hikaru const struct qat_product *qatp;
400 1.1 hikaru char cap[256];
401 1.1 hikaru pcireg_t cmd, memtype, msixoff, fusectl;
402 1.1 hikaru bus_size_t msixtbl_offset;
403 1.1 hikaru int i, bar, msixtbl_bar;
404 1.1 hikaru
405 1.1 hikaru sc->sc_dev = self;
406 1.1 hikaru sc->sc_pc = pc;
407 1.1 hikaru sc->sc_pcitag = pa->pa_tag;
408 1.1 hikaru
409 1.1 hikaru gsc = sc; /* for debug */
410 1.1 hikaru
411 1.1 hikaru qatp = qat_lookup(pa);
412 1.1 hikaru KASSERT(qatp != NULL);
413 1.1 hikaru
414 1.1 hikaru if (pci_dma64_available(pa))
415 1.1 hikaru sc->sc_dmat = pa->pa_dmat64;
416 1.1 hikaru else
417 1.1 hikaru sc->sc_dmat = pa->pa_dmat;
418 1.1 hikaru
419 1.1 hikaru aprint_naive(": Crypto processor\n");
420 1.1 hikaru sc->sc_rev = PCI_REVISION(pa->pa_class);
421 1.1 hikaru aprint_normal(": %s (rev. 0x%02x)\n", qatp->qatp_name, sc->sc_rev);
422 1.1 hikaru
423 1.1 hikaru memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw));
424 1.1 hikaru
425 1.1 hikaru /* Determine active accelerators and engines */
426 1.1 hikaru sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc);
427 1.1 hikaru sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc);
428 1.1 hikaru
429 1.1 hikaru sc->sc_accel_num = 0;
430 1.1 hikaru for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) {
431 1.1 hikaru if (sc->sc_accel_mask & (1 << i))
432 1.1 hikaru sc->sc_accel_num++;
433 1.1 hikaru }
434 1.1 hikaru sc->sc_ae_num = 0;
435 1.1 hikaru for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) {
436 1.1 hikaru if (sc->sc_ae_mask & (1 << i)) {
437 1.1 hikaru sc->sc_ae_num++;
438 1.1 hikaru }
439 1.1 hikaru }
440 1.1 hikaru
441 1.1 hikaru if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) {
442 1.1 hikaru aprint_error_dev(sc->sc_dev, "couldn't find acceleration");
443 1.1 hikaru goto fail;
444 1.1 hikaru }
445 1.1 hikaru
446 1.1 hikaru KASSERT(sc->sc_accel_num <= MAX_NUM_ACCEL);
447 1.1 hikaru KASSERT(sc->sc_ae_num <= MAX_NUM_AE);
448 1.1 hikaru
449 1.1 hikaru /* Determine SKU and capabilities */
450 1.1 hikaru sc->sc_sku = sc->sc_hw.qhw_get_sku(sc);
451 1.1 hikaru sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc);
452 1.1 hikaru sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc);
453 1.1 hikaru
454 1.1 hikaru aprint_normal_dev(sc->sc_dev,
455 1.1 hikaru "sku %d accel %d accel_mask 0x%x ae %d ae_mask 0x%x\n",
456 1.1 hikaru sc->sc_sku, sc->sc_accel_num, sc->sc_accel_mask,
457 1.1 hikaru sc->sc_ae_num, sc->sc_ae_mask);
458 1.1 hikaru snprintb(cap, sizeof(cap), QAT_ACCEL_CAP_BITS, sc->sc_accel_cap);
459 1.1 hikaru aprint_normal_dev(sc->sc_dev, "accel capabilities %s\n", cap);
460 1.1 hikaru
461 1.1 hikaru /* Map BARs */
462 1.1 hikaru
463 1.1 hikaru msixtbl_bar = 0;
464 1.1 hikaru msixtbl_offset = 0;
465 1.1 hikaru if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff, NULL)) {
466 1.1 hikaru pcireg_t msixtbl;
467 1.1 hikaru msixtbl = pci_conf_read(pc, pa->pa_tag,
468 1.1 hikaru msixoff + PCI_MSIX_TBLOFFSET);
469 1.1 hikaru msixtbl_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK;
470 1.1 hikaru msixtbl_bar = PCI_MAPREG_START +
471 1.2 msaitoh ((msixtbl & PCI_MSIX_TBLBIR_MASK) << 2);
472 1.1 hikaru }
473 1.1 hikaru
474 1.1 hikaru i = 0;
475 1.1 hikaru if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) {
476 1.1 hikaru KASSERT(sc->sc_hw.qhw_sram_bar_id == 0);
477 1.1 hikaru fusectl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, FUSECTL_REG);
478 1.1 hikaru /* Skip SRAM BAR */
479 1.1 hikaru i = (fusectl & FUSECTL_MASK) ? 1 : 0;
480 1.1 hikaru }
481 1.1 hikaru for (bar = PCI_MAPREG_START; bar <= PCI_MAPREG_END; bar += 4) {
482 1.1 hikaru bus_size_t size;
483 1.1 hikaru bus_addr_t addr;
484 1.1 hikaru
485 1.1 hikaru if (pci_mapreg_probe(pc, pa->pa_tag, bar, &memtype) == 0)
486 1.1 hikaru continue;
487 1.1 hikaru
488 1.1 hikaru if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM)
489 1.1 hikaru continue;
490 1.1 hikaru
491 1.1 hikaru /* MSI-X table will be mapped by pci_msix_alloc_map */
492 1.1 hikaru if (bar == msixtbl_bar)
493 1.1 hikaru size = msixtbl_offset;
494 1.1 hikaru else
495 1.1 hikaru size = 0;
496 1.1 hikaru
497 1.1 hikaru if (pci_mapreg_submap(pa, bar, memtype, 0, size, 0,
498 1.1 hikaru &sc->sc_csrt[i], &sc->sc_csrh[i], &addr, &sc->sc_csrs[i])) {
499 1.1 hikaru aprint_error_dev(sc->sc_dev,
500 1.1 hikaru "couldn't map bar 0x%02x\n", bar);
501 1.1 hikaru goto fail;
502 1.1 hikaru }
503 1.1 hikaru
504 1.1 hikaru aprint_verbose_dev(sc->sc_dev,
505 1.1 hikaru "region #%d bar 0x%02x size 0x%x at 0x%llx"
506 1.1 hikaru " mapped to %p\n", i, bar,
507 1.1 hikaru (int)sc->sc_csrs[i], (unsigned long long)addr,
508 1.1 hikaru bus_space_vaddr(sc->sc_csrt[i], sc->sc_csrh[i]));
509 1.1 hikaru
510 1.1 hikaru i++;
511 1.1 hikaru if (PCI_MAPREG_MEM_TYPE(memtype) == PCI_MAPREG_MEM_TYPE_64BIT)
512 1.1 hikaru bar += 4;
513 1.1 hikaru }
514 1.1 hikaru
515 1.1 hikaru /* XXX Enable advanced error reporting */
516 1.1 hikaru
517 1.1 hikaru /* Enable bus mastering */
518 1.1 hikaru cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
519 1.1 hikaru cmd |= PCI_COMMAND_MASTER_ENABLE;
520 1.1 hikaru pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
521 1.1 hikaru
522 1.1 hikaru if (qat_alloc_msix_intr(sc, pa))
523 1.1 hikaru goto fail;
524 1.1 hikaru
525 1.1 hikaru config_mountroot(self, qat_init);
526 1.1 hikaru
527 1.1 hikaru fail:
528 1.1 hikaru /* XXX */
529 1.1 hikaru return;
530 1.1 hikaru }
531 1.1 hikaru
532 1.1 hikaru void
533 1.1 hikaru qat_init(struct device *self)
534 1.1 hikaru {
535 1.1 hikaru int error;
536 1.1 hikaru struct qat_softc *sc = device_private(self);
537 1.1 hikaru
538 1.1 hikaru aprint_verbose_dev(sc->sc_dev, "Initializing ETR\n");
539 1.1 hikaru error = qat_etr_init(sc);
540 1.1 hikaru if (error) {
541 1.1 hikaru aprint_error_dev(sc->sc_dev,
542 1.1 hikaru "Could not initialize ETR: %d\n", error);
543 1.1 hikaru return;
544 1.1 hikaru }
545 1.1 hikaru
546 1.1 hikaru aprint_verbose_dev(sc->sc_dev, "Initializing admin comms\n");
547 1.1 hikaru if (sc->sc_hw.qhw_init_admin_comms != NULL &&
548 1.1 hikaru (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) {
549 1.1 hikaru aprint_error_dev(sc->sc_dev,
550 1.1 hikaru "Could not initialize admin comms: %d\n", error);
551 1.1 hikaru return;
552 1.1 hikaru }
553 1.1 hikaru
554 1.1 hikaru aprint_verbose_dev(sc->sc_dev, "Initializing hw arbiter\n");
555 1.1 hikaru if (sc->sc_hw.qhw_init_arb != NULL &&
556 1.1 hikaru (error = sc->sc_hw.qhw_init_arb(sc)) != 0) {
557 1.1 hikaru aprint_error_dev(sc->sc_dev,
558 1.1 hikaru "Could not initialize hw arbiter: %d\n", error);
559 1.1 hikaru return;
560 1.1 hikaru }
561 1.1 hikaru
562 1.1 hikaru aprint_verbose_dev(sc->sc_dev, "Initializing acceleration engine\n");
563 1.1 hikaru error = qat_ae_init(sc);
564 1.1 hikaru if (error) {
565 1.1 hikaru aprint_error_dev(sc->sc_dev,
566 1.1 hikaru "Could not initialize Acceleration Engine: %d\n", error);
567 1.1 hikaru return;
568 1.1 hikaru }
569 1.1 hikaru
570 1.1 hikaru aprint_verbose_dev(sc->sc_dev, "Loading acceleration engine firmware\n");
571 1.1 hikaru error = qat_aefw_load(sc);
572 1.1 hikaru if (error) {
573 1.1 hikaru aprint_error_dev(sc->sc_dev,
574 1.1 hikaru "Could not load firmware: %d\n", error);
575 1.1 hikaru return;
576 1.1 hikaru }
577 1.1 hikaru
578 1.1 hikaru aprint_verbose_dev(sc->sc_dev, "Establishing interrupts\n");
579 1.1 hikaru error = qat_setup_msix_intr(sc);
580 1.1 hikaru if (error) {
581 1.1 hikaru aprint_error_dev(sc->sc_dev,
582 1.1 hikaru "Could not setup interrupts: %d\n", error);
583 1.1 hikaru return;
584 1.1 hikaru }
585 1.1 hikaru
586 1.1 hikaru sc->sc_hw.qhw_enable_intr(sc);
587 1.1 hikaru
588 1.1 hikaru error = qat_crypto_init(sc);
589 1.1 hikaru if (error) {
590 1.1 hikaru aprint_error_dev(sc->sc_dev,
591 1.1 hikaru "Could not initialize service: %d\n", error);
592 1.1 hikaru return;
593 1.1 hikaru }
594 1.1 hikaru
595 1.1 hikaru aprint_verbose_dev(sc->sc_dev, "Enabling error correction\n");
596 1.1 hikaru if (sc->sc_hw.qhw_enable_error_correction != NULL)
597 1.1 hikaru sc->sc_hw.qhw_enable_error_correction(sc);
598 1.1 hikaru
599 1.1 hikaru aprint_verbose_dev(sc->sc_dev, "Initializing watchdog timer\n");
600 1.1 hikaru if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL &&
601 1.1 hikaru (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) {
602 1.1 hikaru aprint_error_dev(sc->sc_dev,
603 1.1 hikaru "Could not initialize watchdog timer: %d\n", error);
604 1.1 hikaru return;
605 1.1 hikaru }
606 1.1 hikaru
607 1.1 hikaru error = qat_start(self);
608 1.1 hikaru if (error) {
609 1.1 hikaru aprint_error_dev(sc->sc_dev,
610 1.1 hikaru "Could not start: %d\n", error);
611 1.1 hikaru return;
612 1.1 hikaru }
613 1.1 hikaru }
614 1.1 hikaru
615 1.1 hikaru int
616 1.1 hikaru qat_start(struct device *self)
617 1.1 hikaru {
618 1.1 hikaru struct qat_softc *sc = device_private(self);
619 1.1 hikaru int error;
620 1.1 hikaru
621 1.1 hikaru error = qat_ae_start(sc);
622 1.1 hikaru if (error)
623 1.1 hikaru return error;
624 1.1 hikaru
625 1.1 hikaru if (sc->sc_hw.qhw_send_admin_init != NULL &&
626 1.1 hikaru (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) {
627 1.1 hikaru return error;
628 1.1 hikaru }
629 1.1 hikaru
630 1.1 hikaru error = qat_crypto_start(sc);
631 1.1 hikaru if (error)
632 1.1 hikaru return error;
633 1.1 hikaru
634 1.1 hikaru return 0;
635 1.1 hikaru }
636 1.1 hikaru
637 1.1 hikaru int
638 1.1 hikaru qat_detach(struct device *self, int flags)
639 1.1 hikaru {
640 1.1 hikaru
641 1.1 hikaru return 0;
642 1.1 hikaru }
643 1.1 hikaru
644 1.1 hikaru void *
645 1.1 hikaru qat_alloc_mem(size_t size)
646 1.1 hikaru {
647 1.1 hikaru size_t *sptr;
648 1.1 hikaru sptr = kmem_zalloc(size + sizeof(size), KM_SLEEP);
649 1.1 hikaru *sptr = size;
650 1.1 hikaru return ++sptr;
651 1.1 hikaru }
652 1.1 hikaru
653 1.1 hikaru void
654 1.1 hikaru qat_free_mem(void *ptr)
655 1.1 hikaru {
656 1.1 hikaru size_t *sptr = ptr, size;
657 1.1 hikaru size = *(--sptr);
658 1.1 hikaru kmem_free(sptr, size + sizeof(size));
659 1.1 hikaru }
660 1.1 hikaru
661 1.1 hikaru void
662 1.1 hikaru qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm)
663 1.1 hikaru {
664 1.1 hikaru
665 1.1 hikaru bus_dmamap_unload(sc->sc_dmat, qdm->qdm_dma_map);
666 1.1 hikaru bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map);
667 1.1 hikaru bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, qdm->qdm_dma_size);
668 1.1 hikaru bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1);
669 1.1 hikaru explicit_memset(qdm, 0, sizeof(*qdm));
670 1.1 hikaru }
671 1.1 hikaru
672 1.1 hikaru int
673 1.1 hikaru qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm,
674 1.1 hikaru bus_size_t size, bus_size_t alignment)
675 1.1 hikaru {
676 1.1 hikaru int error = 0, nseg;
677 1.1 hikaru
678 1.1 hikaru error = bus_dmamem_alloc(sc->sc_dmat, size, alignment,
679 1.1 hikaru 0, &qdm->qdm_dma_seg, 1, &nseg, BUS_DMA_NOWAIT);
680 1.1 hikaru if (error) {
681 1.1 hikaru aprint_error_dev(sc->sc_dev,
682 1.1 hikaru "couldn't allocate dmamem, error = %d\n", error);
683 1.1 hikaru goto fail_0;
684 1.1 hikaru }
685 1.1 hikaru KASSERT(nseg == 1);
686 1.1 hikaru error = bus_dmamem_map(sc->sc_dmat, &qdm->qdm_dma_seg,
687 1.1 hikaru nseg, size, &qdm->qdm_dma_vaddr,
688 1.1 hikaru BUS_DMA_COHERENT | BUS_DMA_NOWAIT);
689 1.1 hikaru if (error) {
690 1.1 hikaru aprint_error_dev(sc->sc_dev,
691 1.1 hikaru "couldn't map dmamem, error = %d\n", error);
692 1.1 hikaru goto fail_1;
693 1.1 hikaru }
694 1.1 hikaru qdm->qdm_dma_size = size;
695 1.1 hikaru error = bus_dmamap_create(sc->sc_dmat, size, nseg, size,
696 1.1 hikaru 0, BUS_DMA_NOWAIT, &qdm->qdm_dma_map);
697 1.1 hikaru if (error) {
698 1.1 hikaru aprint_error_dev(sc->sc_dev,
699 1.1 hikaru "couldn't create dmamem map, error = %d\n", error);
700 1.1 hikaru goto fail_2;
701 1.1 hikaru }
702 1.1 hikaru error = bus_dmamap_load(sc->sc_dmat, qdm->qdm_dma_map,
703 1.1 hikaru qdm->qdm_dma_vaddr, size, NULL, BUS_DMA_NOWAIT);
704 1.1 hikaru if (error) {
705 1.1 hikaru aprint_error_dev(sc->sc_dev,
706 1.1 hikaru "couldn't load dmamem map, error = %d\n", error);
707 1.1 hikaru goto fail_3;
708 1.1 hikaru }
709 1.1 hikaru
710 1.1 hikaru return 0;
711 1.1 hikaru fail_3:
712 1.1 hikaru bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map);
713 1.1 hikaru qdm->qdm_dma_map = NULL;
714 1.1 hikaru fail_2:
715 1.1 hikaru bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, size);
716 1.1 hikaru qdm->qdm_dma_vaddr = NULL;
717 1.1 hikaru qdm->qdm_dma_size = 0;
718 1.1 hikaru fail_1:
719 1.1 hikaru bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1);
720 1.1 hikaru fail_0:
721 1.1 hikaru return error;
722 1.1 hikaru }
723 1.1 hikaru
724 1.1 hikaru int
725 1.1 hikaru qat_alloc_msix_intr(struct qat_softc *sc, struct pci_attach_args *pa)
726 1.1 hikaru {
727 1.1 hikaru u_int *ih_map, vec;
728 1.1 hikaru int error, count, ihi;
729 1.1 hikaru
730 1.1 hikaru count = sc->sc_hw.qhw_num_banks + 1;
731 1.1 hikaru ih_map = qat_alloc_mem(sizeof(*ih_map) * count);
732 1.1 hikaru ihi = 0;
733 1.1 hikaru
734 1.1 hikaru for (vec = 0; vec < sc->sc_hw.qhw_num_banks; vec++)
735 1.1 hikaru ih_map[ihi++] = vec;
736 1.1 hikaru
737 1.1 hikaru vec += sc->sc_hw.qhw_msix_ae_vec_gap;
738 1.1 hikaru ih_map[ihi++] = vec;
739 1.1 hikaru
740 1.1 hikaru error = pci_msix_alloc_map(pa, &sc->sc_ih, ih_map, count);
741 1.1 hikaru qat_free_mem(ih_map);
742 1.1 hikaru if (error) {
743 1.1 hikaru aprint_error_dev(sc->sc_dev, "couldn't allocate msix %d: %d\n",
744 1.1 hikaru count, error);
745 1.1 hikaru }
746 1.1 hikaru
747 1.1 hikaru return error;
748 1.1 hikaru }
749 1.1 hikaru
750 1.1 hikaru void *
751 1.1 hikaru qat_establish_msix_intr(struct qat_softc *sc, pci_intr_handle_t ih,
752 1.1 hikaru int (*func)(void *), void *arg,
753 1.1 hikaru const char *name, int index)
754 1.1 hikaru {
755 1.1 hikaru kcpuset_t *affinity;
756 1.1 hikaru int error;
757 1.1 hikaru char buf[PCI_INTRSTR_LEN];
758 1.1 hikaru char intrxname[INTRDEVNAMEBUF];
759 1.1 hikaru const char *intrstr;
760 1.1 hikaru void *cookie;
761 1.1 hikaru
762 1.1 hikaru snprintf(intrxname, sizeof(intrxname), "%s%s%d",
763 1.1 hikaru device_xname(sc->sc_dev), name, index);
764 1.1 hikaru
765 1.1 hikaru intrstr = pci_intr_string(sc->sc_pc, ih, buf, sizeof(buf));
766 1.1 hikaru
767 1.1 hikaru pci_intr_setattr(sc->sc_pc, &ih, PCI_INTR_MPSAFE, true);
768 1.1 hikaru
769 1.1 hikaru cookie = pci_intr_establish_xname(sc->sc_pc, ih,
770 1.1 hikaru IPL_NET, func, arg, intrxname);
771 1.1 hikaru
772 1.1 hikaru aprint_normal_dev(sc->sc_dev, "%s%d interrupting at %s\n",
773 1.1 hikaru name, index, intrstr);
774 1.1 hikaru
775 1.1 hikaru kcpuset_create(&affinity, true);
776 1.1 hikaru kcpuset_set(affinity, index % ncpu);
777 1.1 hikaru error = interrupt_distribute(cookie, affinity, NULL);
778 1.1 hikaru if (error) {
779 1.1 hikaru aprint_error_dev(sc->sc_dev,
780 1.1 hikaru "couldn't distribute interrupt: %s%d\n", name, index);
781 1.1 hikaru }
782 1.1 hikaru kcpuset_destroy(affinity);
783 1.1 hikaru
784 1.1 hikaru return cookie;
785 1.1 hikaru }
786 1.1 hikaru
787 1.1 hikaru int
788 1.1 hikaru qat_setup_msix_intr(struct qat_softc *sc)
789 1.1 hikaru {
790 1.1 hikaru int i;
791 1.1 hikaru pci_intr_handle_t ih;
792 1.1 hikaru
793 1.1 hikaru for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
794 1.1 hikaru struct qat_bank *qb = &sc->sc_etr_banks[i];
795 1.1 hikaru ih = sc->sc_ih[i];
796 1.1 hikaru
797 1.1 hikaru qb->qb_ih_cookie = qat_establish_msix_intr(sc, ih,
798 1.1 hikaru qat_etr_bank_intr, qb, "bank", i);
799 1.1 hikaru if (qb->qb_ih_cookie == NULL)
800 1.1 hikaru return ENOMEM;
801 1.1 hikaru }
802 1.1 hikaru
803 1.1 hikaru sc->sc_ae_ih_cookie = qat_establish_msix_intr(sc, sc->sc_ih[i],
804 1.1 hikaru qat_ae_cluster_intr, sc, "aeclust", 0);
805 1.1 hikaru if (sc->sc_ae_ih_cookie == NULL)
806 1.1 hikaru return ENOMEM;
807 1.1 hikaru
808 1.1 hikaru return 0;
809 1.1 hikaru }
810 1.1 hikaru
811 1.1 hikaru int
812 1.1 hikaru qat_etr_init(struct qat_softc *sc)
813 1.1 hikaru {
814 1.1 hikaru int i;
815 1.1 hikaru int error = 0;
816 1.1 hikaru
817 1.1 hikaru sc->sc_etr_banks = qat_alloc_mem(
818 1.1 hikaru sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks);
819 1.1 hikaru
820 1.1 hikaru for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
821 1.1 hikaru error = qat_etr_bank_init(sc, i);
822 1.1 hikaru if (error) {
823 1.1 hikaru goto fail;
824 1.1 hikaru }
825 1.1 hikaru }
826 1.1 hikaru
827 1.1 hikaru if (sc->sc_hw.qhw_num_ap_banks) {
828 1.1 hikaru sc->sc_etr_ap_banks = qat_alloc_mem(
829 1.1 hikaru sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks);
830 1.1 hikaru error = qat_etr_ap_bank_init(sc);
831 1.1 hikaru if (error) {
832 1.1 hikaru goto fail;
833 1.1 hikaru }
834 1.1 hikaru }
835 1.1 hikaru
836 1.1 hikaru return 0;
837 1.1 hikaru
838 1.1 hikaru fail:
839 1.1 hikaru if (sc->sc_etr_banks != NULL) {
840 1.1 hikaru qat_free_mem(sc->sc_etr_banks);
841 1.1 hikaru sc->sc_etr_banks = NULL;
842 1.1 hikaru }
843 1.1 hikaru if (sc->sc_etr_ap_banks != NULL) {
844 1.1 hikaru qat_free_mem(sc->sc_etr_ap_banks);
845 1.1 hikaru sc->sc_etr_ap_banks = NULL;
846 1.1 hikaru }
847 1.1 hikaru return error;
848 1.1 hikaru }
849 1.1 hikaru
850 1.1 hikaru int
851 1.1 hikaru qat_etr_bank_init(struct qat_softc *sc, int bank)
852 1.1 hikaru {
853 1.1 hikaru struct qat_bank *qb = &sc->sc_etr_banks[bank];
854 1.1 hikaru int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap;
855 1.1 hikaru
856 1.1 hikaru KASSERT(bank < sc->sc_hw.qhw_num_banks);
857 1.1 hikaru
858 1.1 hikaru mutex_init(&qb->qb_bank_mtx, MUTEX_DEFAULT, IPL_NET);
859 1.1 hikaru
860 1.1 hikaru qb->qb_sc = sc;
861 1.1 hikaru qb->qb_bank = bank;
862 1.1 hikaru qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT;
863 1.1 hikaru QAT_EVCNT_ATTACH(sc, &qb->qb_ev_rxintr, EVCNT_TYPE_INTR,
864 1.1 hikaru qb->qb_ev_rxintr_name, "bank%d rxintr", bank);
865 1.1 hikaru
866 1.1 hikaru /* Clean CSRs for all rings within the bank */
867 1.1 hikaru for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
868 1.1 hikaru struct qat_ring *qr = &qb->qb_et_rings[i];
869 1.1 hikaru
870 1.1 hikaru qat_etr_bank_ring_write_4(sc, bank, i,
871 1.1 hikaru ETR_RING_CONFIG, 0);
872 1.1 hikaru qat_etr_bank_ring_base_write_8(sc, bank, i, 0);
873 1.1 hikaru
874 1.1 hikaru if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
875 1.1 hikaru qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t));
876 1.1 hikaru } else if (sc->sc_hw.qhw_tx_rings_mask &
877 1.1 hikaru (1 << (i - tx_rx_gap))) {
878 1.1 hikaru /* Share inflight counter with rx and tx */
879 1.1 hikaru qr->qr_inflight =
880 1.1 hikaru qb->qb_et_rings[i - tx_rx_gap].qr_inflight;
881 1.1 hikaru }
882 1.1 hikaru }
883 1.1 hikaru
884 1.1 hikaru if (sc->sc_hw.qhw_init_etr_intr != NULL) {
885 1.1 hikaru sc->sc_hw.qhw_init_etr_intr(sc, bank);
886 1.1 hikaru } else {
887 1.1 hikaru /* common code in qat 1.7 */
888 1.1 hikaru qat_etr_bank_write_4(sc, bank, ETR_INT_REG,
889 1.1 hikaru ETR_INT_REG_CLEAR_MASK);
890 1.1 hikaru for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank /
891 1.1 hikaru ETR_RINGS_PER_INT_SRCSEL; i++) {
892 1.1 hikaru qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL +
893 1.1 hikaru (i * ETR_INT_SRCSEL_NEXT_OFFSET),
894 1.1 hikaru ETR_INT_SRCSEL_MASK);
895 1.1 hikaru }
896 1.1 hikaru }
897 1.1 hikaru
898 1.1 hikaru return 0;
899 1.1 hikaru }
900 1.1 hikaru
901 1.1 hikaru int
902 1.1 hikaru qat_etr_ap_bank_init(struct qat_softc *sc)
903 1.1 hikaru {
904 1.1 hikaru int ap_bank;
905 1.1 hikaru
906 1.1 hikaru for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) {
907 1.1 hikaru struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank];
908 1.1 hikaru
909 1.1 hikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK,
910 1.1 hikaru ETR_AP_NF_MASK_INIT);
911 1.1 hikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0);
912 1.1 hikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK,
913 1.1 hikaru ETR_AP_NE_MASK_INIT);
914 1.1 hikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0);
915 1.1 hikaru
916 1.1 hikaru memset(qab, 0, sizeof(*qab));
917 1.1 hikaru }
918 1.1 hikaru
919 1.1 hikaru return 0;
920 1.1 hikaru }
921 1.1 hikaru
922 1.1 hikaru void
923 1.1 hikaru qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask)
924 1.1 hikaru {
925 1.1 hikaru if (set_mask)
926 1.1 hikaru *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
927 1.1 hikaru else
928 1.1 hikaru *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
929 1.1 hikaru }
930 1.1 hikaru
931 1.1 hikaru void
932 1.1 hikaru qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest,
933 1.1 hikaru uint32_t ring, int set_dest)
934 1.1 hikaru {
935 1.1 hikaru uint32_t ae_mask;
936 1.1 hikaru uint8_t mailbox, ae, nae;
937 1.1 hikaru uint8_t *dest = (uint8_t *)ap_dest;
938 1.1 hikaru
939 1.1 hikaru mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring);
940 1.1 hikaru
941 1.1 hikaru nae = 0;
942 1.1 hikaru ae_mask = sc->sc_ae_mask;
943 1.1 hikaru for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) {
944 1.1 hikaru if ((ae_mask & (1 << ae)) == 0)
945 1.1 hikaru continue;
946 1.1 hikaru
947 1.1 hikaru if (set_dest) {
948 1.1 hikaru dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) |
949 1.1 hikaru __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) |
950 1.1 hikaru ETR_AP_DEST_ENABLE;
951 1.1 hikaru } else {
952 1.1 hikaru dest[nae] = 0;
953 1.1 hikaru }
954 1.1 hikaru nae++;
955 1.1 hikaru if (nae == ETR_MAX_AE_PER_MAILBOX)
956 1.1 hikaru break;
957 1.1 hikaru
958 1.1 hikaru }
959 1.1 hikaru }
960 1.1 hikaru
961 1.1 hikaru void
962 1.1 hikaru qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr)
963 1.1 hikaru {
964 1.1 hikaru struct qat_ap_bank *qab;
965 1.1 hikaru int ap_bank;
966 1.1 hikaru
967 1.1 hikaru if (sc->sc_hw.qhw_num_ap_banks == 0)
968 1.1 hikaru return;
969 1.1 hikaru
970 1.1 hikaru ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring);
971 1.1 hikaru KASSERT(ap_bank < sc->sc_hw.qhw_num_ap_banks);
972 1.1 hikaru qab = &sc->sc_etr_ap_banks[ap_bank];
973 1.1 hikaru
974 1.1 hikaru if (qr->qr_cb == NULL) {
975 1.1 hikaru qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1);
976 1.1 hikaru if (!qab->qab_ne_dest) {
977 1.1 hikaru qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest,
978 1.1 hikaru qr->qr_ring, 1);
979 1.1 hikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST,
980 1.1 hikaru qab->qab_ne_dest);
981 1.1 hikaru }
982 1.1 hikaru } else {
983 1.1 hikaru qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1);
984 1.1 hikaru if (!qab->qab_nf_dest) {
985 1.1 hikaru qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest,
986 1.1 hikaru qr->qr_ring, 1);
987 1.1 hikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST,
988 1.1 hikaru qab->qab_nf_dest);
989 1.1 hikaru }
990 1.1 hikaru }
991 1.1 hikaru }
992 1.1 hikaru
993 1.1 hikaru int
994 1.1 hikaru qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs)
995 1.1 hikaru {
996 1.1 hikaru int i = QAT_MIN_RING_SIZE;
997 1.1 hikaru
998 1.1 hikaru for (; i <= QAT_MAX_RING_SIZE; i++)
999 1.1 hikaru if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i))
1000 1.1 hikaru return i;
1001 1.1 hikaru
1002 1.1 hikaru return QAT_DEFAULT_RING_SIZE;
1003 1.1 hikaru }
1004 1.1 hikaru
1005 1.1 hikaru int
1006 1.1 hikaru qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring,
1007 1.1 hikaru uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg,
1008 1.1 hikaru const char *name, struct qat_ring **rqr)
1009 1.1 hikaru {
1010 1.1 hikaru struct qat_bank *qb;
1011 1.1 hikaru struct qat_ring *qr = NULL;
1012 1.1 hikaru int error;
1013 1.1 hikaru uint32_t ring_size_bytes, ring_config;
1014 1.1 hikaru uint64_t ring_base;
1015 1.1 hikaru uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512;
1016 1.1 hikaru uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0;
1017 1.1 hikaru
1018 1.1 hikaru KASSERT(bank < sc->sc_hw.qhw_num_banks);
1019 1.1 hikaru
1020 1.1 hikaru /* Allocate a ring from specified bank */
1021 1.1 hikaru qb = &sc->sc_etr_banks[bank];
1022 1.1 hikaru
1023 1.1 hikaru if (ring >= sc->sc_hw.qhw_num_rings_per_bank)
1024 1.1 hikaru return EINVAL;
1025 1.1 hikaru if (qb->qb_allocated_rings & (1 << ring))
1026 1.1 hikaru return ENOENT;
1027 1.1 hikaru qr = &qb->qb_et_rings[ring];
1028 1.1 hikaru qb->qb_allocated_rings |= 1 << ring;
1029 1.1 hikaru
1030 1.1 hikaru /* Intialize allocated ring */
1031 1.1 hikaru qr->qr_ring = ring;
1032 1.1 hikaru qr->qr_bank = bank;
1033 1.1 hikaru qr->qr_name = name;
1034 1.1 hikaru qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring;
1035 1.1 hikaru qr->qr_ring_mask = (1 << ring);
1036 1.1 hikaru qr->qr_cb = cb;
1037 1.1 hikaru qr->qr_cb_arg = cb_arg;
1038 1.1 hikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxintr, EVCNT_TYPE_INTR,
1039 1.1 hikaru qr->qr_ev_rxintr_name, "bank%d ring%d rxintr", bank, ring);
1040 1.1 hikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxmsg, EVCNT_TYPE_MISC,
1041 1.1 hikaru qr->qr_ev_rxmsg_name, "bank%d ring%d rxmsg", bank, ring);
1042 1.1 hikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txmsg, EVCNT_TYPE_MISC,
1043 1.1 hikaru qr->qr_ev_txmsg_name, "bank%d ring%d txmsg", bank, ring);
1044 1.1 hikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txfull, EVCNT_TYPE_MISC,
1045 1.1 hikaru qr->qr_ev_txfull_name, "bank%d ring%d txfull", bank, ring);
1046 1.1 hikaru
1047 1.1 hikaru /* Setup the shadow variables */
1048 1.1 hikaru qr->qr_head = 0;
1049 1.1 hikaru qr->qr_tail = 0;
1050 1.1 hikaru qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size);
1051 1.1 hikaru qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs);
1052 1.1 hikaru
1053 1.1 hikaru /*
1054 1.1 hikaru * To make sure that ring is alligned to ring size allocate
1055 1.1 hikaru * at least 4k and then tell the user it is smaller.
1056 1.1 hikaru */
1057 1.1 hikaru ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size);
1058 1.1 hikaru ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes);
1059 1.1 hikaru error = qat_alloc_dmamem(sc, &qr->qr_dma,
1060 1.1 hikaru ring_size_bytes, ring_size_bytes);
1061 1.1 hikaru if (error)
1062 1.1 hikaru return error;
1063 1.1 hikaru
1064 1.1 hikaru KASSERT(qr->qr_dma.qdm_dma_map->dm_nsegs == 1);
1065 1.1 hikaru
1066 1.1 hikaru qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr;
1067 1.1 hikaru qr->qr_ring_paddr = qr->qr_dma.qdm_dma_map->dm_segs[0].ds_addr;
1068 1.1 hikaru
1069 1.1 hikaru aprint_verbose_dev(sc->sc_dev,
1070 1.1 hikaru "allocate ring %d of bank %d for %s "
1071 1.1 hikaru "size %d %d at vaddr %p paddr 0x%llx\n",
1072 1.1 hikaru ring, bank, name, ring_size_bytes,
1073 1.1 hikaru (int)qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len,
1074 1.1 hikaru qr->qr_ring_vaddr,
1075 1.1 hikaru (unsigned long long)qr->qr_ring_paddr);
1076 1.1 hikaru
1077 1.1 hikaru memset(qr->qr_ring_vaddr, QAT_RING_PATTERN,
1078 1.1 hikaru qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len);
1079 1.1 hikaru
1080 1.1 hikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, 0,
1081 1.1 hikaru qr->qr_dma.qdm_dma_map->dm_mapsize,
1082 1.1 hikaru BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1083 1.1 hikaru
1084 1.1 hikaru if (((uintptr_t)qr->qr_ring_paddr & (ring_size_bytes - 1)) != 0) {
1085 1.1 hikaru aprint_error_dev(sc->sc_dev, "ring address not aligned\n");
1086 1.1 hikaru return EFAULT;
1087 1.1 hikaru }
1088 1.1 hikaru
1089 1.1 hikaru if (cb == NULL) {
1090 1.1 hikaru ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size);
1091 1.1 hikaru } else {
1092 1.1 hikaru ring_config =
1093 1.1 hikaru ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne);
1094 1.1 hikaru }
1095 1.1 hikaru qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config);
1096 1.1 hikaru
1097 1.1 hikaru ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size);
1098 1.1 hikaru qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base);
1099 1.1 hikaru
1100 1.1 hikaru if (sc->sc_hw.qhw_init_arb != NULL)
1101 1.1 hikaru qat_arb_update(sc, qb);
1102 1.1 hikaru
1103 1.1 hikaru mutex_init(&qr->qr_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1104 1.1 hikaru
1105 1.1 hikaru qat_etr_ap_bank_setup_ring(sc, qr);
1106 1.1 hikaru
1107 1.1 hikaru if (cb != NULL) {
1108 1.1 hikaru uint32_t intr_mask;
1109 1.1 hikaru
1110 1.1 hikaru qb->qb_intr_mask |= qr->qr_ring_mask;
1111 1.1 hikaru intr_mask = qb->qb_intr_mask;
1112 1.1 hikaru
1113 1.1 hikaru aprint_verbose_dev(sc->sc_dev,
1114 1.1 hikaru "update intr mask for bank %d "
1115 1.1 hikaru "(coalescing time %dns): 0x%08x\n",
1116 1.1 hikaru bank, qb->qb_coalescing_time, intr_mask);
1117 1.1 hikaru qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN,
1118 1.1 hikaru intr_mask);
1119 1.1 hikaru qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL,
1120 1.1 hikaru ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
1121 1.1 hikaru }
1122 1.1 hikaru
1123 1.1 hikaru *rqr = qr;
1124 1.1 hikaru
1125 1.1 hikaru return 0;
1126 1.1 hikaru }
1127 1.1 hikaru
1128 1.1 hikaru static inline u_int
1129 1.1 hikaru qat_modulo(u_int data, u_int shift)
1130 1.1 hikaru {
1131 1.1 hikaru u_int div = data >> shift;
1132 1.1 hikaru u_int mult = div << shift;
1133 1.1 hikaru return data - mult;
1134 1.1 hikaru }
1135 1.1 hikaru
1136 1.1 hikaru int
1137 1.1 hikaru qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg)
1138 1.1 hikaru {
1139 1.1 hikaru uint32_t inflight;
1140 1.1 hikaru uint32_t *addr;
1141 1.1 hikaru
1142 1.1 hikaru mutex_spin_enter(&qr->qr_ring_mtx);
1143 1.1 hikaru
1144 1.1 hikaru inflight = atomic_inc_32_nv(qr->qr_inflight);
1145 1.1 hikaru if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) {
1146 1.1 hikaru atomic_dec_32(qr->qr_inflight);
1147 1.1 hikaru QAT_EVCNT_INCR(&qr->qr_ev_txfull);
1148 1.1 hikaru mutex_spin_exit(&qr->qr_ring_mtx);
1149 1.1 hikaru return EBUSY;
1150 1.1 hikaru }
1151 1.1 hikaru QAT_EVCNT_INCR(&qr->qr_ev_txmsg);
1152 1.1 hikaru
1153 1.1 hikaru addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail);
1154 1.1 hikaru
1155 1.1 hikaru memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
1156 1.1 hikaru #ifdef QAT_DUMP
1157 1.1 hikaru qat_dump_raw(QAT_DUMP_RING_MSG, "put_msg", addr,
1158 1.1 hikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
1159 1.1 hikaru #endif
1160 1.1 hikaru
1161 1.1 hikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_tail,
1162 1.1 hikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1163 1.1 hikaru BUS_DMASYNC_PREWRITE);
1164 1.1 hikaru
1165 1.1 hikaru qr->qr_tail = qat_modulo(qr->qr_tail +
1166 1.1 hikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1167 1.1 hikaru QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1168 1.1 hikaru
1169 1.1 hikaru qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1170 1.1 hikaru ETR_RING_TAIL_OFFSET, qr->qr_tail);
1171 1.1 hikaru
1172 1.1 hikaru mutex_spin_exit(&qr->qr_ring_mtx);
1173 1.1 hikaru
1174 1.1 hikaru return 0;
1175 1.1 hikaru }
1176 1.1 hikaru
1177 1.1 hikaru int
1178 1.1 hikaru qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb,
1179 1.1 hikaru struct qat_ring *qr)
1180 1.1 hikaru {
1181 1.1 hikaru int handled = 0;
1182 1.1 hikaru uint32_t *msg;
1183 1.1 hikaru uint32_t nmsg = 0;
1184 1.1 hikaru
1185 1.1 hikaru mutex_spin_enter(&qr->qr_ring_mtx);
1186 1.1 hikaru
1187 1.1 hikaru QAT_EVCNT_INCR(&qr->qr_ev_rxintr);
1188 1.1 hikaru
1189 1.1 hikaru msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1190 1.1 hikaru
1191 1.1 hikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
1192 1.1 hikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1193 1.1 hikaru BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1194 1.1 hikaru
1195 1.1 hikaru while (*msg != ETR_RING_EMPTY_ENTRY_SIG) {
1196 1.1 hikaru atomic_dec_32(qr->qr_inflight);
1197 1.1 hikaru QAT_EVCNT_INCR(&qr->qr_ev_rxmsg);
1198 1.1 hikaru
1199 1.1 hikaru if (qr->qr_cb != NULL) {
1200 1.1 hikaru mutex_spin_exit(&qr->qr_ring_mtx);
1201 1.1 hikaru handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg);
1202 1.1 hikaru mutex_spin_enter(&qr->qr_ring_mtx);
1203 1.1 hikaru }
1204 1.1 hikaru
1205 1.1 hikaru *msg = ETR_RING_EMPTY_ENTRY_SIG;
1206 1.1 hikaru
1207 1.1 hikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
1208 1.1 hikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1209 1.1 hikaru BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1210 1.1 hikaru
1211 1.1 hikaru qr->qr_head = qat_modulo(qr->qr_head +
1212 1.1 hikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1213 1.1 hikaru QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1214 1.1 hikaru nmsg++;
1215 1.1 hikaru
1216 1.1 hikaru msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1217 1.1 hikaru
1218 1.1 hikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
1219 1.1 hikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1220 1.1 hikaru BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1221 1.1 hikaru }
1222 1.1 hikaru
1223 1.1 hikaru if (nmsg > 0) {
1224 1.1 hikaru qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1225 1.1 hikaru ETR_RING_HEAD_OFFSET, qr->qr_head);
1226 1.1 hikaru }
1227 1.1 hikaru
1228 1.1 hikaru mutex_spin_exit(&qr->qr_ring_mtx);
1229 1.1 hikaru
1230 1.1 hikaru return handled;
1231 1.1 hikaru }
1232 1.1 hikaru
1233 1.1 hikaru int
1234 1.1 hikaru qat_etr_bank_intr(void *arg)
1235 1.1 hikaru {
1236 1.1 hikaru struct qat_bank *qb = arg;
1237 1.1 hikaru struct qat_softc *sc = qb->qb_sc;
1238 1.1 hikaru uint32_t estat;
1239 1.1 hikaru int i, handled = 0;
1240 1.1 hikaru
1241 1.1 hikaru mutex_spin_enter(&qb->qb_bank_mtx);
1242 1.1 hikaru
1243 1.1 hikaru QAT_EVCNT_INCR(&qb->qb_ev_rxintr);
1244 1.1 hikaru
1245 1.1 hikaru qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0);
1246 1.1 hikaru
1247 1.1 hikaru /* Now handle all the responses */
1248 1.1 hikaru estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT);
1249 1.1 hikaru estat &= qb->qb_intr_mask;
1250 1.1 hikaru
1251 1.1 hikaru qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL,
1252 1.1 hikaru ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
1253 1.1 hikaru
1254 1.1 hikaru mutex_spin_exit(&qb->qb_bank_mtx);
1255 1.1 hikaru
1256 1.1 hikaru while ((i = ffs32(estat)) != 0) {
1257 1.1 hikaru struct qat_ring *qr = &qb->qb_et_rings[--i];
1258 1.1 hikaru estat &= ~(1 << i);
1259 1.1 hikaru handled |= qat_etr_ring_intr(sc, qb, qr);
1260 1.1 hikaru }
1261 1.1 hikaru
1262 1.1 hikaru return handled;
1263 1.1 hikaru }
1264 1.1 hikaru
1265 1.1 hikaru void
1266 1.1 hikaru qat_arb_update(struct qat_softc *sc, struct qat_bank *qb)
1267 1.1 hikaru {
1268 1.1 hikaru
1269 1.1 hikaru qat_arb_ringsrvarben_write_4(sc, qb->qb_bank,
1270 1.1 hikaru qb->qb_allocated_rings & 0xff);
1271 1.1 hikaru }
1272 1.1 hikaru
1273 1.1 hikaru struct qat_sym_cookie *
1274 1.1 hikaru qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb)
1275 1.1 hikaru {
1276 1.1 hikaru struct qat_sym_cookie *qsc;
1277 1.1 hikaru
1278 1.1 hikaru mutex_spin_enter(&qcb->qcb_bank_mtx);
1279 1.1 hikaru
1280 1.1 hikaru if (qcb->qcb_symck_free_count == 0) {
1281 1.1 hikaru QAT_EVCNT_INCR(&qcb->qcb_ev_no_symck);
1282 1.1 hikaru mutex_spin_exit(&qcb->qcb_bank_mtx);
1283 1.1 hikaru return NULL;
1284 1.1 hikaru }
1285 1.1 hikaru
1286 1.1 hikaru qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count];
1287 1.1 hikaru
1288 1.1 hikaru mutex_spin_exit(&qcb->qcb_bank_mtx);
1289 1.1 hikaru
1290 1.1 hikaru return qsc;
1291 1.1 hikaru }
1292 1.1 hikaru
1293 1.1 hikaru void
1294 1.1 hikaru qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb, struct qat_sym_cookie *qsc)
1295 1.1 hikaru {
1296 1.1 hikaru
1297 1.1 hikaru mutex_spin_enter(&qcb->qcb_bank_mtx);
1298 1.1 hikaru qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc;
1299 1.1 hikaru mutex_spin_exit(&qcb->qcb_bank_mtx);
1300 1.1 hikaru }
1301 1.1 hikaru
1302 1.1 hikaru
1303 1.1 hikaru void
1304 1.1 hikaru qat_memcpy_htobe64(void *dst, const void *src, size_t len)
1305 1.1 hikaru {
1306 1.1 hikaru uint64_t *dst0 = dst;
1307 1.1 hikaru const uint64_t *src0 = src;
1308 1.1 hikaru size_t i;
1309 1.1 hikaru
1310 1.1 hikaru KASSERT(len % sizeof(*dst0) == 0);
1311 1.1 hikaru
1312 1.1 hikaru for (i = 0; i < len / sizeof(*dst0); i++)
1313 1.1 hikaru *(dst0 + i) = htobe64(*(src0 + i));
1314 1.1 hikaru }
1315 1.1 hikaru
1316 1.1 hikaru void
1317 1.1 hikaru qat_memcpy_htobe32(void *dst, const void *src, size_t len)
1318 1.1 hikaru {
1319 1.1 hikaru uint32_t *dst0 = dst;
1320 1.1 hikaru const uint32_t *src0 = src;
1321 1.1 hikaru size_t i;
1322 1.1 hikaru
1323 1.1 hikaru KASSERT(len % sizeof(*dst0) == 0);
1324 1.1 hikaru
1325 1.1 hikaru for (i = 0; i < len / sizeof(*dst0); i++)
1326 1.1 hikaru *(dst0 + i) = htobe32(*(src0 + i));
1327 1.1 hikaru }
1328 1.1 hikaru
1329 1.1 hikaru void
1330 1.1 hikaru qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte)
1331 1.1 hikaru {
1332 1.1 hikaru switch (wordbyte) {
1333 1.1 hikaru case 4:
1334 1.1 hikaru qat_memcpy_htobe32(dst, src, len);
1335 1.1 hikaru break;
1336 1.1 hikaru case 8:
1337 1.1 hikaru qat_memcpy_htobe64(dst, src, len);
1338 1.1 hikaru break;
1339 1.1 hikaru default:
1340 1.1 hikaru KASSERT(0);
1341 1.1 hikaru }
1342 1.1 hikaru }
1343 1.1 hikaru
1344 1.1 hikaru void
1345 1.1 hikaru qat_crypto_hmac_precompute(struct qat_crypto_desc *desc, struct cryptoini *cria,
1346 1.1 hikaru struct qat_sym_hash_def const *hash_def, uint8_t *state1, uint8_t *state2)
1347 1.1 hikaru {
1348 1.1 hikaru int i, state_swap;
1349 1.1 hikaru struct swcr_auth_hash const *sah = hash_def->qshd_alg->qshai_sah;
1350 1.1 hikaru uint32_t blklen = hash_def->qshd_alg->qshai_block_len;
1351 1.1 hikaru uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset;
1352 1.1 hikaru uint32_t state_size = hash_def->qshd_alg->qshai_state_size;
1353 1.1 hikaru uint32_t state_word = hash_def->qshd_alg->qshai_state_word;
1354 1.1 hikaru uint32_t keylen = cria->cri_klen / 8;
1355 1.1 hikaru uint32_t padlen = blklen - keylen;
1356 1.1 hikaru uint8_t *ipad = desc->qcd_hash_state_prefix_buf;
1357 1.1 hikaru uint8_t *opad = desc->qcd_hash_state_prefix_buf +
1358 1.1 hikaru sizeof(desc->qcd_hash_state_prefix_buf) / 2;
1359 1.1 hikaru /* XXX
1360 1.1 hikaru * For "stack protector not protecting local variables" error,
1361 1.1 hikaru * use constant variable.
1362 1.1 hikaru * Currently, the max length is sizeof(aesxcbc_ctx) used by
1363 1.1 hikaru * swcr_auth_hash_aes_xcbc_mac
1364 1.1 hikaru */
1365 1.1 hikaru uint8_t ctx[sizeof(aesxcbc_ctx)];
1366 1.1 hikaru
1367 1.1 hikaru memcpy(ipad, cria->cri_key, keylen);
1368 1.1 hikaru memcpy(opad, cria->cri_key, keylen);
1369 1.1 hikaru
1370 1.1 hikaru if (padlen > 0) {
1371 1.1 hikaru memset(ipad + keylen, 0, padlen);
1372 1.1 hikaru memset(opad + keylen, 0, padlen);
1373 1.1 hikaru }
1374 1.1 hikaru for (i = 0; i < blklen; i++) {
1375 1.1 hikaru ipad[i] ^= 0x36;
1376 1.1 hikaru opad[i] ^= 0x5c;
1377 1.1 hikaru }
1378 1.1 hikaru
1379 1.1 hikaru /* ipad */
1380 1.1 hikaru sah->Init(ctx);
1381 1.1 hikaru /* Check the endian of kernel built-in hash state */
1382 1.1 hikaru state_swap = memcmp(hash_def->qshd_alg->qshai_init_state,
1383 1.1 hikaru ((uint8_t *)ctx) + state_offset, state_word);
1384 1.1 hikaru sah->Update(ctx, ipad, blklen);
1385 1.1 hikaru if (state_swap == 0) {
1386 1.1 hikaru memcpy(state1, ((uint8_t *)ctx) + state_offset, state_size);
1387 1.1 hikaru } else {
1388 1.1 hikaru qat_memcpy_htobe(state1, ((uint8_t *)ctx) + state_offset,
1389 1.1 hikaru state_size, state_word);
1390 1.1 hikaru }
1391 1.1 hikaru
1392 1.1 hikaru /* opad */
1393 1.1 hikaru sah->Init(ctx);
1394 1.1 hikaru sah->Update(ctx, opad, blklen);
1395 1.1 hikaru if (state_swap == 0) {
1396 1.1 hikaru memcpy(state2, ((uint8_t *)ctx) + state_offset, state_size);
1397 1.1 hikaru } else {
1398 1.1 hikaru qat_memcpy_htobe(state2, ((uint8_t *)ctx) + state_offset,
1399 1.1 hikaru state_size, state_word);
1400 1.1 hikaru }
1401 1.1 hikaru }
1402 1.1 hikaru
1403 1.1 hikaru uint16_t
1404 1.1 hikaru qat_crypto_load_cipher_cryptoini(
1405 1.1 hikaru struct qat_crypto_desc *desc, struct cryptoini *crie)
1406 1.1 hikaru {
1407 1.1 hikaru enum hw_cipher_algo algo = HW_CIPHER_ALGO_NULL;
1408 1.1 hikaru enum hw_cipher_mode mode = HW_CIPHER_CBC_MODE;
1409 1.1 hikaru enum hw_cipher_convert key_convert = HW_CIPHER_NO_CONVERT;
1410 1.1 hikaru
1411 1.1 hikaru switch (crie->cri_alg) {
1412 1.1 hikaru case CRYPTO_DES_CBC:
1413 1.1 hikaru algo = HW_CIPHER_ALGO_DES;
1414 1.1 hikaru desc->qcd_cipher_blk_sz = HW_DES_BLK_SZ;
1415 1.1 hikaru break;
1416 1.1 hikaru case CRYPTO_3DES_CBC:
1417 1.1 hikaru algo = HW_CIPHER_ALGO_3DES;
1418 1.1 hikaru desc->qcd_cipher_blk_sz = HW_3DES_BLK_SZ;
1419 1.1 hikaru break;
1420 1.1 hikaru case CRYPTO_AES_CBC:
1421 1.1 hikaru switch (crie->cri_klen / 8) {
1422 1.1 hikaru case HW_AES_128_KEY_SZ:
1423 1.1 hikaru algo = HW_CIPHER_ALGO_AES128;
1424 1.1 hikaru break;
1425 1.1 hikaru case HW_AES_192_KEY_SZ:
1426 1.1 hikaru algo = HW_CIPHER_ALGO_AES192;
1427 1.1 hikaru break;
1428 1.1 hikaru case HW_AES_256_KEY_SZ:
1429 1.1 hikaru algo = HW_CIPHER_ALGO_AES256;
1430 1.1 hikaru break;
1431 1.1 hikaru default:
1432 1.1 hikaru KASSERT(0);
1433 1.1 hikaru break;
1434 1.1 hikaru }
1435 1.1 hikaru desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
1436 1.1 hikaru /*
1437 1.1 hikaru * AES decrypt key needs to be reversed.
1438 1.1 hikaru * Instead of reversing the key at session registration,
1439 1.1 hikaru * it is instead reversed on-the-fly by setting the KEY_CONVERT
1440 1.1 hikaru * bit here
1441 1.1 hikaru */
1442 1.1 hikaru if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT)
1443 1.1 hikaru key_convert = HW_CIPHER_KEY_CONVERT;
1444 1.1 hikaru
1445 1.1 hikaru break;
1446 1.1 hikaru default:
1447 1.1 hikaru KASSERT(0);
1448 1.1 hikaru break;
1449 1.1 hikaru }
1450 1.1 hikaru
1451 1.1 hikaru return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert,
1452 1.1 hikaru desc->qcd_cipher_dir);
1453 1.1 hikaru }
1454 1.1 hikaru
1455 1.1 hikaru uint16_t
1456 1.1 hikaru qat_crypto_load_auth_cryptoini(
1457 1.1 hikaru struct qat_crypto_desc *desc, struct cryptoini *cria,
1458 1.1 hikaru struct qat_sym_hash_def const **hash_def)
1459 1.1 hikaru {
1460 1.1 hikaru const struct swcr_auth_hash *sah;
1461 1.1 hikaru enum qat_sym_hash_algorithm algo = 0;
1462 1.1 hikaru
1463 1.1 hikaru switch (cria->cri_alg) {
1464 1.1 hikaru case CRYPTO_MD5_HMAC_96:
1465 1.1 hikaru algo = QAT_SYM_HASH_MD5;
1466 1.1 hikaru break;
1467 1.1 hikaru case CRYPTO_SHA1_HMAC_96:
1468 1.1 hikaru algo = QAT_SYM_HASH_SHA1;
1469 1.1 hikaru break;
1470 1.1 hikaru case CRYPTO_SHA2_256_HMAC:
1471 1.1 hikaru algo = QAT_SYM_HASH_SHA256;
1472 1.1 hikaru break;
1473 1.1 hikaru case CRYPTO_SHA2_384_HMAC:
1474 1.1 hikaru algo = QAT_SYM_HASH_SHA384;
1475 1.1 hikaru break;
1476 1.1 hikaru case CRYPTO_SHA2_512_HMAC:
1477 1.1 hikaru algo = QAT_SYM_HASH_SHA512;
1478 1.1 hikaru break;
1479 1.1 hikaru default:
1480 1.1 hikaru KASSERT(0);
1481 1.1 hikaru break;
1482 1.1 hikaru }
1483 1.1 hikaru *hash_def = &qat_sym_hash_defs[algo];
1484 1.1 hikaru sah = (*hash_def)->qshd_alg->qshai_sah;
1485 1.1 hikaru KASSERT(sah != NULL);
1486 1.1 hikaru desc->qcd_auth_sz = sah->auth_hash->authsize;
1487 1.1 hikaru
1488 1.1 hikaru return HW_AUTH_CONFIG_BUILD(HW_AUTH_MODE1,
1489 1.1 hikaru (*hash_def)->qshd_qat->qshqi_algo_enc,
1490 1.1 hikaru (*hash_def)->qshd_alg->qshai_digest_len);
1491 1.1 hikaru }
1492 1.1 hikaru
1493 1.1 hikaru int
1494 1.1 hikaru qat_crypto_load_buf(struct qat_softc *sc, struct cryptop *crp,
1495 1.1 hikaru struct qat_sym_cookie *qsc, struct qat_crypto_desc const *desc,
1496 1.1 hikaru uint8_t *icv_buf, int icv_offset, bus_addr_t *icv_paddr)
1497 1.1 hikaru {
1498 1.1 hikaru int error, i, nsegs;
1499 1.1 hikaru
1500 1.1 hikaru if (crp->crp_flags & CRYPTO_F_IMBUF) {
1501 1.1 hikaru struct mbuf *m = (struct mbuf *)crp->crp_buf;
1502 1.1 hikaru
1503 1.1 hikaru if (icv_offset >= 0) {
1504 1.1 hikaru if (m_length(m) == icv_offset) {
1505 1.1 hikaru m_copyback(m, icv_offset, desc->qcd_auth_sz,
1506 1.1 hikaru icv_buf);
1507 1.1 hikaru if (m_length(m) == icv_offset)
1508 1.1 hikaru return ENOBUFS;
1509 1.1 hikaru } else {
1510 1.1 hikaru struct mbuf *m0;
1511 1.1 hikaru m0 = m_pulldown(m, icv_offset,
1512 1.1 hikaru desc->qcd_auth_sz, NULL);
1513 1.1 hikaru if (m0 == NULL)
1514 1.1 hikaru return ENOBUFS;
1515 1.1 hikaru }
1516 1.1 hikaru }
1517 1.1 hikaru
1518 1.1 hikaru error = bus_dmamap_load_mbuf(sc->sc_dmat, qsc->qsc_buf_dmamap,
1519 1.1 hikaru m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1520 1.1 hikaru if (error == EFBIG) {
1521 1.1 hikaru struct mbuf *m_new;
1522 1.1 hikaru m_new = m_defrag(m, M_DONTWAIT);
1523 1.1 hikaru if (m_new != NULL) {
1524 1.1 hikaru crp->crp_buf = m_new;
1525 1.1 hikaru qsc->qsc_buf = m_new;
1526 1.1 hikaru error = bus_dmamap_load_mbuf(sc->sc_dmat,
1527 1.1 hikaru qsc->qsc_buf_dmamap, m_new,
1528 1.1 hikaru BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1529 1.1 hikaru if (error) {
1530 1.1 hikaru m_freem(m_new);
1531 1.1 hikaru crp->crp_buf = NULL;
1532 1.1 hikaru }
1533 1.1 hikaru }
1534 1.1 hikaru }
1535 1.1 hikaru
1536 1.1 hikaru } else if (crp->crp_flags & CRYPTO_F_IOV) {
1537 1.1 hikaru error = bus_dmamap_load_uio(sc->sc_dmat, qsc->qsc_buf_dmamap,
1538 1.1 hikaru (struct uio *)crp->crp_buf, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1539 1.1 hikaru } else {
1540 1.1 hikaru error = bus_dmamap_load(sc->sc_dmat, qsc->qsc_buf_dmamap,
1541 1.1 hikaru crp->crp_buf, crp->crp_ilen, NULL, BUS_DMA_NOWAIT);
1542 1.1 hikaru }
1543 1.1 hikaru if (error) {
1544 1.1 hikaru aprint_debug_dev(sc->sc_dev,
1545 1.1 hikaru "can't load crp_buf, error %d\n", error);
1546 1.1 hikaru crp->crp_etype = error;
1547 1.1 hikaru return error;
1548 1.1 hikaru }
1549 1.1 hikaru
1550 1.1 hikaru nsegs = qsc->qsc_buf_dmamap->dm_nsegs;
1551 1.1 hikaru qsc->qsc_buf_list.num_buffers = nsegs;
1552 1.1 hikaru for (i = 0; i < nsegs; i++) {
1553 1.1 hikaru struct flat_buffer_desc *flatbuf =
1554 1.1 hikaru &qsc->qsc_buf_list.phy_buffers[i];
1555 1.1 hikaru bus_addr_t paddr = qsc->qsc_buf_dmamap->dm_segs[i].ds_addr;
1556 1.1 hikaru bus_size_t len = qsc->qsc_buf_dmamap->dm_segs[i].ds_len;
1557 1.1 hikaru
1558 1.1 hikaru flatbuf->data_len_in_bytes = len;
1559 1.1 hikaru flatbuf->phy_buffer = (uint64_t)paddr;
1560 1.1 hikaru
1561 1.1 hikaru if (icv_offset >= 0) {
1562 1.1 hikaru if (icv_offset < len)
1563 1.1 hikaru *icv_paddr = paddr + icv_offset;
1564 1.1 hikaru else
1565 1.1 hikaru icv_offset -= len;
1566 1.1 hikaru }
1567 1.1 hikaru }
1568 1.1 hikaru
1569 1.1 hikaru bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0,
1570 1.1 hikaru qsc->qsc_buf_dmamap->dm_mapsize,
1571 1.1 hikaru BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1572 1.1 hikaru
1573 1.1 hikaru return 0;
1574 1.1 hikaru }
1575 1.1 hikaru
1576 1.1 hikaru int
1577 1.1 hikaru qat_crypto_load_iv(struct qat_sym_cookie *qsc, struct cryptop *crp,
1578 1.1 hikaru struct cryptodesc *crde, struct qat_crypto_desc const *desc)
1579 1.1 hikaru {
1580 1.1 hikaru uint32_t rand;
1581 1.1 hikaru uint32_t ivlen = desc->qcd_cipher_blk_sz;
1582 1.1 hikaru int i;
1583 1.1 hikaru
1584 1.1 hikaru if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
1585 1.1 hikaru memcpy(qsc->qsc_iv_buf, crde->crd_iv, ivlen);
1586 1.1 hikaru } else {
1587 1.1 hikaru if (crde->crd_flags & CRD_F_ENCRYPT) {
1588 1.1 hikaru for (i = 0; i + sizeof(rand) <= ivlen;
1589 1.1 hikaru i += sizeof(rand)) {
1590 1.1 hikaru rand = cprng_fast32();
1591 1.1 hikaru memcpy(qsc->qsc_iv_buf + i, &rand, sizeof(rand));
1592 1.1 hikaru }
1593 1.1 hikaru if (sizeof(qsc->qsc_iv_buf) % sizeof(rand) != 0) {
1594 1.1 hikaru rand = cprng_fast32();
1595 1.1 hikaru memcpy(qsc->qsc_iv_buf + i, &rand,
1596 1.1 hikaru sizeof(qsc->qsc_iv_buf) - i);
1597 1.1 hikaru }
1598 1.1 hikaru } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1599 1.1 hikaru /* get iv from buf */
1600 1.1 hikaru m_copydata(qsc->qsc_buf, crde->crd_inject, ivlen,
1601 1.1 hikaru qsc->qsc_iv_buf);
1602 1.1 hikaru } else if (crp->crp_flags & CRYPTO_F_IOV) {
1603 1.1 hikaru cuio_copydata(qsc->qsc_buf, crde->crd_inject, ivlen,
1604 1.1 hikaru qsc->qsc_iv_buf);
1605 1.1 hikaru }
1606 1.1 hikaru }
1607 1.1 hikaru
1608 1.1 hikaru if ((crde->crd_flags & CRD_F_ENCRYPT) != 0 &&
1609 1.1 hikaru (crde->crd_flags & CRD_F_IV_PRESENT) == 0) {
1610 1.1 hikaru if (crp->crp_flags & CRYPTO_F_IMBUF) {
1611 1.1 hikaru m_copyback(qsc->qsc_buf, crde->crd_inject, ivlen,
1612 1.1 hikaru qsc->qsc_iv_buf);
1613 1.1 hikaru } else if (crp->crp_flags & CRYPTO_F_IOV) {
1614 1.1 hikaru cuio_copyback(qsc->qsc_buf, crde->crd_inject, ivlen,
1615 1.1 hikaru qsc->qsc_iv_buf);
1616 1.1 hikaru }
1617 1.1 hikaru }
1618 1.1 hikaru
1619 1.1 hikaru return 0;
1620 1.1 hikaru }
1621 1.1 hikaru
1622 1.1 hikaru static inline struct qat_crypto_bank *
1623 1.1 hikaru qat_crypto_select_bank(struct qat_crypto *qcy)
1624 1.1 hikaru {
1625 1.1 hikaru u_int cpuid = cpu_index(curcpu());
1626 1.1 hikaru
1627 1.1 hikaru return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks];
1628 1.1 hikaru }
1629 1.1 hikaru
1630 1.1 hikaru int
1631 1.1 hikaru qat_crypto_process(void *arg, struct cryptop *crp, int hint)
1632 1.1 hikaru {
1633 1.1 hikaru struct qat_crypto *qcy = arg;
1634 1.1 hikaru struct qat_crypto_bank *qcb;
1635 1.1 hikaru struct qat_session *qs = NULL;
1636 1.1 hikaru struct qat_crypto_desc const *desc;
1637 1.1 hikaru struct qat_sym_cookie *qsc = NULL;
1638 1.1 hikaru struct qat_sym_bulk_cookie *qsbc;
1639 1.1 hikaru struct cryptodesc *crd, *crda = NULL, *crde = NULL;
1640 1.1 hikaru bus_addr_t icv_paddr = 0;
1641 1.1 hikaru int error, icv_offset = -1;
1642 1.1 hikaru uint8_t icv_buf[CRYPTO_MAX_MAC_LEN];
1643 1.1 hikaru
1644 1.1 hikaru qs = qcy->qcy_sessions[CRYPTO_SESID2LID(crp->crp_sid)];
1645 1.1 hikaru mutex_spin_enter(&qs->qs_session_mtx);
1646 1.1 hikaru KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
1647 1.1 hikaru qs->qs_inflight++;
1648 1.1 hikaru mutex_spin_exit(&qs->qs_session_mtx);
1649 1.1 hikaru
1650 1.1 hikaru qcb = qat_crypto_select_bank(qcy);
1651 1.1 hikaru
1652 1.1 hikaru qsc = qat_crypto_alloc_sym_cookie(qcb);
1653 1.1 hikaru if (qsc == NULL) {
1654 1.1 hikaru error = ENOBUFS;
1655 1.1 hikaru goto fail;
1656 1.1 hikaru }
1657 1.1 hikaru
1658 1.1 hikaru error = 0;
1659 1.1 hikaru desc = &qs->qs_dec_desc;
1660 1.1 hikaru crd = crp->crp_desc;
1661 1.1 hikaru while (crd != NULL) {
1662 1.1 hikaru switch (crd->crd_alg) {
1663 1.1 hikaru case CRYPTO_DES_CBC:
1664 1.1 hikaru case CRYPTO_3DES_CBC:
1665 1.1 hikaru case CRYPTO_AES_CBC:
1666 1.1 hikaru if (crde != NULL)
1667 1.1 hikaru error = EINVAL;
1668 1.1 hikaru if (crd->crd_flags & CRD_F_ENCRYPT) {
1669 1.1 hikaru /* use encrypt desc */
1670 1.1 hikaru desc = &qs->qs_enc_desc;
1671 1.1 hikaru if (crda != NULL)
1672 1.1 hikaru error = ENOTSUP;
1673 1.1 hikaru }
1674 1.1 hikaru crde = crd;
1675 1.1 hikaru break;
1676 1.1 hikaru case CRYPTO_MD5_HMAC_96:
1677 1.1 hikaru case CRYPTO_SHA1_HMAC_96:
1678 1.1 hikaru case CRYPTO_SHA2_256_HMAC:
1679 1.1 hikaru case CRYPTO_SHA2_384_HMAC:
1680 1.1 hikaru case CRYPTO_SHA2_512_HMAC:
1681 1.1 hikaru if (crda != NULL)
1682 1.1 hikaru error = EINVAL;
1683 1.1 hikaru if (crde != NULL &&
1684 1.1 hikaru (crde->crd_flags & CRD_F_ENCRYPT) == 0)
1685 1.1 hikaru error = EINVAL;
1686 1.1 hikaru crda = crd;
1687 1.1 hikaru icv_offset = crd->crd_inject;
1688 1.1 hikaru break;
1689 1.1 hikaru }
1690 1.1 hikaru if (error)
1691 1.1 hikaru goto fail;
1692 1.1 hikaru
1693 1.1 hikaru crd = crd->crd_next;
1694 1.1 hikaru }
1695 1.1 hikaru
1696 1.1 hikaru qsc->qsc_buf = crp->crp_buf;
1697 1.1 hikaru
1698 1.1 hikaru if (crde != NULL) {
1699 1.1 hikaru error = qat_crypto_load_iv(qsc, crp, crde, desc);
1700 1.1 hikaru if (error)
1701 1.1 hikaru goto fail;
1702 1.1 hikaru }
1703 1.1 hikaru
1704 1.1 hikaru error = qat_crypto_load_buf(qcy->qcy_sc, crp, qsc, desc, icv_buf,
1705 1.1 hikaru icv_offset, &icv_paddr);
1706 1.1 hikaru if (error)
1707 1.1 hikaru goto fail;
1708 1.1 hikaru
1709 1.1 hikaru qsbc = &qsc->u.qsc_bulk_cookie;
1710 1.1 hikaru
1711 1.1 hikaru qsbc->qsbc_crypto = qcy;
1712 1.1 hikaru qsbc->qsbc_session = qs;
1713 1.1 hikaru qsbc->qsbc_cb_tag = crp;
1714 1.1 hikaru
1715 1.1 hikaru qcy->qcy_sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc,
1716 1.1 hikaru crde, crda, icv_paddr);
1717 1.1 hikaru
1718 1.1 hikaru bus_dmamap_sync(qcy->qcy_sc->sc_dmat, *qsc->qsc_self_dmamap, 0,
1719 1.1 hikaru offsetof(struct qat_sym_cookie, qsc_self_dmamap),
1720 1.1 hikaru BUS_DMASYNC_PREWRITE);
1721 1.1 hikaru
1722 1.1 hikaru error = qat_etr_put_msg(qcy->qcy_sc, qcb->qcb_sym_tx,
1723 1.1 hikaru (uint32_t *)qsbc->qsbc_msg);
1724 1.1 hikaru if (error)
1725 1.1 hikaru goto fail;
1726 1.1 hikaru
1727 1.1 hikaru return 0;
1728 1.1 hikaru fail:
1729 1.1 hikaru if (qsc)
1730 1.1 hikaru qat_crypto_free_sym_cookie(qcb, qsc);
1731 1.1 hikaru mutex_spin_enter(&qs->qs_session_mtx);
1732 1.1 hikaru qs->qs_inflight--;
1733 1.1 hikaru qat_crypto_check_free_session(qcy, qs);
1734 1.1 hikaru crp->crp_etype = error;
1735 1.1 hikaru crypto_done(crp);
1736 1.1 hikaru return 0;
1737 1.1 hikaru }
1738 1.1 hikaru
1739 1.1 hikaru int
1740 1.1 hikaru qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1741 1.1 hikaru {
1742 1.1 hikaru int error, i, bank;
1743 1.1 hikaru int curname = 0;
1744 1.1 hikaru char *name;
1745 1.1 hikaru
1746 1.1 hikaru bank = qcb->qcb_bank;
1747 1.1 hikaru
1748 1.1 hikaru name = qcb->qcb_ring_names[curname++];
1749 1.1 hikaru snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank);
1750 1.1 hikaru error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1751 1.1 hikaru sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size,
1752 1.1 hikaru NULL, NULL, name, &qcb->qcb_sym_tx);
1753 1.1 hikaru if (error)
1754 1.1 hikaru return error;
1755 1.1 hikaru
1756 1.1 hikaru name = qcb->qcb_ring_names[curname++];
1757 1.1 hikaru snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank);
1758 1.1 hikaru error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1759 1.1 hikaru sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size,
1760 1.1 hikaru qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx);
1761 1.1 hikaru if (error)
1762 1.1 hikaru return error;
1763 1.1 hikaru
1764 1.1 hikaru for (i = 0; i < QAT_NSYMCOOKIE; i++) {
1765 1.1 hikaru struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i];
1766 1.1 hikaru struct qat_sym_cookie *qsc;
1767 1.1 hikaru
1768 1.1 hikaru error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_sym_cookie),
1769 1.1 hikaru QAT_OPTIMAL_ALIGN);
1770 1.1 hikaru if (error)
1771 1.1 hikaru return error;
1772 1.1 hikaru
1773 1.1 hikaru qsc = qdm->qdm_dma_vaddr;
1774 1.1 hikaru qsc->qsc_self_dmamap = &qdm->qdm_dma_map;
1775 1.1 hikaru qsc->qsc_bulk_req_params_buf_paddr =
1776 1.1 hikaru qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1777 1.1 hikaru u.qsc_bulk_cookie.qsbc_req_params_buf);
1778 1.1 hikaru qsc->qsc_buffer_list_desc_paddr =
1779 1.1 hikaru qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1780 1.1 hikaru qsc_buf_list);
1781 1.1 hikaru qsc->qsc_iv_buf_paddr =
1782 1.1 hikaru qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1783 1.1 hikaru qsc_iv_buf);
1784 1.1 hikaru qcb->qcb_symck_free[i] = qsc;
1785 1.1 hikaru qcb->qcb_symck_free_count++;
1786 1.1 hikaru
1787 1.1 hikaru error = bus_dmamap_create(sc->sc_dmat, QAT_MAXLEN,
1788 1.1 hikaru QAT_MAXSEG, MCLBYTES, 0, 0, &qsc->qsc_buf_dmamap);
1789 1.1 hikaru if (error)
1790 1.1 hikaru return error;
1791 1.1 hikaru }
1792 1.1 hikaru
1793 1.1 hikaru return 0;
1794 1.1 hikaru }
1795 1.1 hikaru
1796 1.1 hikaru int
1797 1.1 hikaru qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1798 1.1 hikaru {
1799 1.1 hikaru int error;
1800 1.1 hikaru
1801 1.1 hikaru mutex_init(&qcb->qcb_bank_mtx, MUTEX_DEFAULT, IPL_NET);
1802 1.1 hikaru
1803 1.1 hikaru QAT_EVCNT_ATTACH(sc, &qcb->qcb_ev_no_symck, EVCNT_TYPE_MISC,
1804 1.1 hikaru qcb->qcb_ev_no_symck_name, "crypto no_symck");
1805 1.1 hikaru
1806 1.1 hikaru error = qat_crypto_setup_ring(sc, qcb);
1807 1.1 hikaru if (error)
1808 1.1 hikaru return error;
1809 1.1 hikaru
1810 1.1 hikaru return 0;
1811 1.1 hikaru }
1812 1.1 hikaru
1813 1.1 hikaru int
1814 1.1 hikaru qat_crypto_init(struct qat_softc *sc)
1815 1.1 hikaru {
1816 1.1 hikaru struct qat_crypto *qcy = &sc->sc_crypto;
1817 1.1 hikaru int error, bank, i;
1818 1.1 hikaru int num_banks;
1819 1.1 hikaru
1820 1.1 hikaru qcy->qcy_sc = sc;
1821 1.1 hikaru
1822 1.1 hikaru if (sc->sc_hw.qhw_init_arb != NULL)
1823 1.1 hikaru num_banks = uimin(ncpu, sc->sc_hw.qhw_num_banks);
1824 1.1 hikaru else
1825 1.1 hikaru num_banks = sc->sc_ae_num;
1826 1.1 hikaru
1827 1.1 hikaru qcy->qcy_num_banks = num_banks;
1828 1.1 hikaru
1829 1.1 hikaru qcy->qcy_banks =
1830 1.1 hikaru qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks);
1831 1.1 hikaru
1832 1.1 hikaru for (bank = 0; bank < num_banks; bank++) {
1833 1.1 hikaru struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank];
1834 1.1 hikaru qcb->qcb_bank = bank;
1835 1.1 hikaru qcb->qcb_crypto = qcy;
1836 1.1 hikaru error = qat_crypto_bank_init(sc, qcb);
1837 1.1 hikaru if (error)
1838 1.1 hikaru return error;
1839 1.1 hikaru }
1840 1.1 hikaru
1841 1.1 hikaru mutex_init(&qcy->qcy_crypto_mtx, MUTEX_DEFAULT, IPL_NET);
1842 1.1 hikaru
1843 1.1 hikaru for (i = 0; i < QAT_NSESSION; i++) {
1844 1.1 hikaru struct qat_dmamem *qdm = &qcy->qcy_session_dmamems[i];
1845 1.1 hikaru struct qat_session *qs;
1846 1.1 hikaru
1847 1.1 hikaru error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_session),
1848 1.1 hikaru QAT_OPTIMAL_ALIGN);
1849 1.1 hikaru if (error)
1850 1.1 hikaru return error;
1851 1.1 hikaru
1852 1.1 hikaru qs = qdm->qdm_dma_vaddr;
1853 1.1 hikaru qs->qs_lid = i;
1854 1.1 hikaru qs->qs_dec_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr;
1855 1.1 hikaru qs->qs_dec_desc.qcd_hash_state_paddr =
1856 1.1 hikaru qs->qs_dec_desc.qcd_desc_paddr +
1857 1.1 hikaru offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1858 1.1 hikaru qs->qs_enc_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr +
1859 1.1 hikaru offsetof(struct qat_session, qs_enc_desc);
1860 1.1 hikaru qs->qs_enc_desc.qcd_hash_state_paddr =
1861 1.1 hikaru qs->qs_enc_desc.qcd_desc_paddr +
1862 1.1 hikaru offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1863 1.1 hikaru
1864 1.1 hikaru mutex_init(&qs->qs_session_mtx, MUTEX_DEFAULT, IPL_NET);
1865 1.1 hikaru
1866 1.1 hikaru qcy->qcy_sessions[i] = qs;
1867 1.1 hikaru qcy->qcy_session_free[i] = qs;
1868 1.1 hikaru qcy->qcy_session_free_count++;
1869 1.1 hikaru }
1870 1.1 hikaru
1871 1.1 hikaru QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_new_sess, EVCNT_TYPE_MISC,
1872 1.1 hikaru qcy->qcy_ev_new_sess_name, "crypto new_sess");
1873 1.1 hikaru QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_free_sess, EVCNT_TYPE_MISC,
1874 1.1 hikaru qcy->qcy_ev_free_sess_name, "crypto free_sess");
1875 1.1 hikaru QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_no_sess, EVCNT_TYPE_MISC,
1876 1.1 hikaru qcy->qcy_ev_no_sess_name, "crypto no_sess");
1877 1.1 hikaru
1878 1.1 hikaru return 0;
1879 1.1 hikaru }
1880 1.1 hikaru
1881 1.1 hikaru int
1882 1.1 hikaru qat_crypto_new_session(void *arg, uint32_t *lid, struct cryptoini *cri)
1883 1.1 hikaru {
1884 1.1 hikaru struct qat_crypto *qcy = arg;
1885 1.1 hikaru struct qat_session *qs = NULL;
1886 1.1 hikaru struct cryptoini *crie = NULL;
1887 1.1 hikaru struct cryptoini *cria = NULL;
1888 1.1 hikaru int slice, error;
1889 1.1 hikaru
1890 1.1 hikaru mutex_spin_enter(&qcy->qcy_crypto_mtx);
1891 1.1 hikaru
1892 1.1 hikaru if (qcy->qcy_session_free_count == 0) {
1893 1.1 hikaru QAT_EVCNT_INCR(&qcy->qcy_ev_no_sess);
1894 1.1 hikaru mutex_spin_exit(&qcy->qcy_crypto_mtx);
1895 1.1 hikaru return ENOBUFS;
1896 1.1 hikaru }
1897 1.1 hikaru qs = qcy->qcy_session_free[--qcy->qcy_session_free_count];
1898 1.1 hikaru QAT_EVCNT_INCR(&qcy->qcy_ev_new_sess);
1899 1.1 hikaru
1900 1.1 hikaru mutex_spin_exit(&qcy->qcy_crypto_mtx);
1901 1.1 hikaru
1902 1.1 hikaru qs->qs_status = QAT_SESSION_STATUS_ACTIVE;
1903 1.1 hikaru qs->qs_inflight = 0;
1904 1.1 hikaru *lid = qs->qs_lid;
1905 1.1 hikaru
1906 1.1 hikaru error = 0;
1907 1.1 hikaru while (cri) {
1908 1.1 hikaru switch (cri->cri_alg) {
1909 1.1 hikaru case CRYPTO_DES_CBC:
1910 1.1 hikaru case CRYPTO_3DES_CBC:
1911 1.1 hikaru case CRYPTO_AES_CBC:
1912 1.1 hikaru if (crie != NULL)
1913 1.1 hikaru error = EINVAL;
1914 1.1 hikaru crie = cri;
1915 1.1 hikaru break;
1916 1.1 hikaru case CRYPTO_MD5_HMAC_96:
1917 1.1 hikaru case CRYPTO_SHA1_HMAC_96:
1918 1.1 hikaru case CRYPTO_SHA2_256_HMAC:
1919 1.1 hikaru case CRYPTO_SHA2_384_HMAC:
1920 1.1 hikaru case CRYPTO_SHA2_512_HMAC:
1921 1.1 hikaru if (cria != NULL)
1922 1.1 hikaru error = EINVAL;
1923 1.1 hikaru cria = cri;
1924 1.1 hikaru break;
1925 1.1 hikaru default:
1926 1.1 hikaru error = EINVAL;
1927 1.1 hikaru }
1928 1.1 hikaru if (error)
1929 1.1 hikaru goto fail;
1930 1.1 hikaru cri = cri->cri_next;
1931 1.1 hikaru }
1932 1.1 hikaru
1933 1.1 hikaru slice = 1;
1934 1.1 hikaru if (crie != NULL && cria != NULL) {
1935 1.1 hikaru slice = 2;
1936 1.1 hikaru /* auth then decrypt */
1937 1.1 hikaru qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH;
1938 1.1 hikaru qs->qs_dec_desc.qcd_slices[1] = FW_SLICE_CIPHER;
1939 1.1 hikaru qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT;
1940 1.1 hikaru qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
1941 1.1 hikaru /* encrypt then auth */
1942 1.1 hikaru qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER;
1943 1.1 hikaru qs->qs_enc_desc.qcd_slices[1] = FW_SLICE_AUTH;
1944 1.1 hikaru qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT;
1945 1.1 hikaru qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
1946 1.1 hikaru } else if (crie != NULL) {
1947 1.1 hikaru /* decrypt */
1948 1.1 hikaru qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_CIPHER;
1949 1.1 hikaru qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT;
1950 1.1 hikaru qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_CIPHER;
1951 1.1 hikaru /* encrypt */
1952 1.1 hikaru qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER;
1953 1.1 hikaru qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT;
1954 1.1 hikaru qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER;
1955 1.1 hikaru } else if (cria != NULL) {
1956 1.1 hikaru /* auth */
1957 1.1 hikaru qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH;
1958 1.1 hikaru qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_AUTH;
1959 1.1 hikaru /* auth */
1960 1.1 hikaru qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_AUTH;
1961 1.1 hikaru qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_AUTH;
1962 1.1 hikaru } else {
1963 1.1 hikaru error = EINVAL;
1964 1.1 hikaru goto fail;
1965 1.1 hikaru }
1966 1.1 hikaru qs->qs_dec_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR;
1967 1.1 hikaru qs->qs_enc_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR;
1968 1.1 hikaru
1969 1.1 hikaru qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_dec_desc, crie, cria);
1970 1.1 hikaru qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_enc_desc, crie, cria);
1971 1.1 hikaru
1972 1.1 hikaru membar_producer();
1973 1.1 hikaru
1974 1.1 hikaru return 0;
1975 1.1 hikaru fail:
1976 1.1 hikaru if (qs != NULL) {
1977 1.1 hikaru mutex_spin_enter(&qs->qs_session_mtx);
1978 1.1 hikaru qat_crypto_free_session0(qcy, qs);
1979 1.1 hikaru }
1980 1.1 hikaru return error;
1981 1.1 hikaru }
1982 1.1 hikaru
1983 1.1 hikaru static inline void
1984 1.1 hikaru qat_crypto_clean_desc(struct qat_crypto_desc *desc)
1985 1.1 hikaru {
1986 1.1 hikaru explicit_memset(desc->qcd_content_desc, 0,
1987 1.1 hikaru sizeof(desc->qcd_content_desc));
1988 1.1 hikaru explicit_memset(desc->qcd_hash_state_prefix_buf, 0,
1989 1.1 hikaru sizeof(desc->qcd_hash_state_prefix_buf));
1990 1.1 hikaru explicit_memset(desc->qcd_req_cache, 0,
1991 1.1 hikaru sizeof(desc->qcd_req_cache));
1992 1.1 hikaru }
1993 1.1 hikaru
1994 1.1 hikaru int
1995 1.1 hikaru qat_crypto_free_session0(struct qat_crypto *qcy, struct qat_session *qs)
1996 1.1 hikaru {
1997 1.1 hikaru
1998 1.1 hikaru qat_crypto_clean_desc(&qs->qs_dec_desc);
1999 1.1 hikaru qat_crypto_clean_desc(&qs->qs_enc_desc);
2000 1.1 hikaru qs->qs_status &= ~QAT_SESSION_STATUS_ACTIVE;
2001 1.1 hikaru
2002 1.1 hikaru mutex_spin_exit(&qs->qs_session_mtx);
2003 1.1 hikaru
2004 1.1 hikaru mutex_spin_enter(&qcy->qcy_crypto_mtx);
2005 1.1 hikaru
2006 1.1 hikaru qcy->qcy_session_free[qcy->qcy_session_free_count++] = qs;
2007 1.1 hikaru QAT_EVCNT_INCR(&qcy->qcy_ev_free_sess);
2008 1.1 hikaru
2009 1.1 hikaru mutex_spin_exit(&qcy->qcy_crypto_mtx);
2010 1.1 hikaru
2011 1.1 hikaru return 0;
2012 1.1 hikaru }
2013 1.1 hikaru
2014 1.1 hikaru void
2015 1.1 hikaru qat_crypto_check_free_session(struct qat_crypto *qcy, struct qat_session *qs)
2016 1.1 hikaru {
2017 1.1 hikaru
2018 1.1 hikaru if ((qs->qs_status & QAT_SESSION_STATUS_FREEING) &&
2019 1.1 hikaru qs->qs_inflight == 0) {
2020 1.1 hikaru qat_crypto_free_session0(qcy, qs);
2021 1.1 hikaru } else {
2022 1.1 hikaru mutex_spin_exit(&qs->qs_session_mtx);
2023 1.1 hikaru }
2024 1.1 hikaru }
2025 1.1 hikaru
2026 1.1 hikaru int
2027 1.1 hikaru qat_crypto_free_session(void *arg, uint64_t sid)
2028 1.1 hikaru {
2029 1.1 hikaru struct qat_crypto *qcy = arg;
2030 1.1 hikaru struct qat_session *qs;
2031 1.1 hikaru int error;
2032 1.1 hikaru
2033 1.1 hikaru qs = qcy->qcy_sessions[CRYPTO_SESID2LID(sid)];
2034 1.1 hikaru
2035 1.1 hikaru mutex_spin_enter(&qs->qs_session_mtx);
2036 1.1 hikaru
2037 1.1 hikaru if (qs->qs_inflight > 0) {
2038 1.1 hikaru qs->qs_status |= QAT_SESSION_STATUS_FREEING;
2039 1.1 hikaru mutex_spin_exit(&qs->qs_session_mtx);
2040 1.1 hikaru return 0;
2041 1.1 hikaru }
2042 1.1 hikaru
2043 1.1 hikaru error = qat_crypto_free_session0(qcy, qs);
2044 1.1 hikaru
2045 1.1 hikaru return error;
2046 1.1 hikaru }
2047 1.1 hikaru
2048 1.1 hikaru int
2049 1.1 hikaru qat_crypto_start(struct qat_softc *sc)
2050 1.1 hikaru {
2051 1.1 hikaru struct qat_crypto *qcy = &sc->sc_crypto;
2052 1.1 hikaru int error, i;
2053 1.1 hikaru static const int algs[] = {
2054 1.1 hikaru CRYPTO_DES_CBC, CRYPTO_3DES_CBC, CRYPTO_AES_CBC,
2055 1.1 hikaru CRYPTO_MD5_HMAC_96, CRYPTO_SHA1_HMAC_96, CRYPTO_SHA2_256_HMAC,
2056 1.1 hikaru CRYPTO_SHA2_384_HMAC, CRYPTO_SHA2_512_HMAC,
2057 1.1 hikaru };
2058 1.1 hikaru
2059 1.1 hikaru /* opencrypto */
2060 1.1 hikaru qcy->qcy_cid = crypto_get_driverid(0);
2061 1.1 hikaru if (qcy->qcy_cid < 0) {
2062 1.1 hikaru aprint_error_dev(sc->sc_dev,
2063 1.1 hikaru "could not get opencrypto driver id\n");
2064 1.1 hikaru return ENOENT;
2065 1.1 hikaru }
2066 1.1 hikaru
2067 1.1 hikaru for (i = 0; i < __arraycount(algs); i++) {
2068 1.1 hikaru error = crypto_register(qcy->qcy_cid, algs[i], 0, 0,
2069 1.1 hikaru qat_crypto_new_session, qat_crypto_free_session,
2070 1.1 hikaru qat_crypto_process, qcy);
2071 1.1 hikaru if (error) {
2072 1.1 hikaru aprint_error_dev(sc->sc_dev,
2073 1.1 hikaru "could not register crypto: %d\n", error);
2074 1.1 hikaru return error;
2075 1.1 hikaru }
2076 1.1 hikaru }
2077 1.1 hikaru
2078 1.1 hikaru return 0;
2079 1.1 hikaru }
2080 1.1 hikaru
2081 1.1 hikaru int
2082 1.1 hikaru qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg)
2083 1.1 hikaru {
2084 1.1 hikaru struct qat_crypto_bank *qcb = arg;
2085 1.1 hikaru struct qat_crypto *qcy;
2086 1.1 hikaru struct qat_session *qs;
2087 1.1 hikaru struct qat_sym_cookie *qsc;
2088 1.1 hikaru struct qat_sym_bulk_cookie *qsbc;
2089 1.1 hikaru struct cryptop *crp;
2090 1.1 hikaru
2091 1.1 hikaru qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset);
2092 1.1 hikaru
2093 1.1 hikaru qsbc = &qsc->u.qsc_bulk_cookie;
2094 1.1 hikaru qcy = qsbc->qsbc_crypto;
2095 1.1 hikaru qs = qsbc->qsbc_session;
2096 1.1 hikaru crp = qsbc->qsbc_cb_tag;
2097 1.1 hikaru
2098 1.1 hikaru bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0,
2099 1.1 hikaru qsc->qsc_buf_dmamap->dm_mapsize,
2100 1.1 hikaru BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2101 1.1 hikaru bus_dmamap_unload(sc->sc_dmat, qsc->qsc_buf_dmamap);
2102 1.1 hikaru qat_crypto_free_sym_cookie(qcb, qsc);
2103 1.1 hikaru
2104 1.1 hikaru crp->crp_etype = 0;
2105 1.1 hikaru crypto_done(crp);
2106 1.1 hikaru
2107 1.1 hikaru mutex_spin_enter(&qs->qs_session_mtx);
2108 1.1 hikaru KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
2109 1.1 hikaru qs->qs_inflight--;
2110 1.1 hikaru qat_crypto_check_free_session(qcy, qs);
2111 1.1 hikaru
2112 1.1 hikaru return 1;
2113 1.1 hikaru }
2114 1.1 hikaru
2115 1.1 hikaru #ifdef QAT_DUMP
2116 1.1 hikaru
2117 1.1 hikaru void
2118 1.1 hikaru qat_dump_raw(int flag, const char *label, void *d, size_t len)
2119 1.1 hikaru {
2120 1.1 hikaru uintptr_t pc;
2121 1.1 hikaru size_t pos;
2122 1.1 hikaru uint8_t *dp = (uint8_t *)d;
2123 1.1 hikaru
2124 1.1 hikaru if ((qat_dump & flag) == 0)
2125 1.1 hikaru return;
2126 1.1 hikaru
2127 1.1 hikaru printf("dumping %s at %p len %zu\n", label, d, len);
2128 1.1 hikaru
2129 1.1 hikaru pc = __RETURN_ADDRESS;
2130 1.1 hikaru printf("\tcallpc ");
2131 1.1 hikaru qat_print_sym(pc);
2132 1.1 hikaru printf("\n");
2133 1.1 hikaru
2134 1.1 hikaru for (pos = 0; pos < len; pos++) {
2135 1.1 hikaru if (pos % 32 == 0)
2136 1.1 hikaru printf("%8zx: ", pos);
2137 1.1 hikaru else if (pos % 4 == 0)
2138 1.1 hikaru printf(" ");
2139 1.1 hikaru
2140 1.1 hikaru printf("%02x", dp[pos]);
2141 1.1 hikaru
2142 1.1 hikaru if (pos % 32 == 31 || pos + 1 == len)
2143 1.1 hikaru printf("\n");
2144 1.1 hikaru }
2145 1.1 hikaru }
2146 1.1 hikaru
2147 1.1 hikaru void
2148 1.1 hikaru qat_dump_ring(int bank, int ring)
2149 1.1 hikaru {
2150 1.1 hikaru struct qat_softc *sc = gsc;
2151 1.1 hikaru struct qat_bank *qb = &sc->sc_etr_banks[bank];
2152 1.1 hikaru struct qat_ring *qr = &qb->qb_et_rings[ring];
2153 1.1 hikaru u_int offset;
2154 1.1 hikaru int i;
2155 1.1 hikaru uint32_t msg;
2156 1.1 hikaru
2157 1.1 hikaru printf("dumping bank %d ring %d\n", bank, ring);
2158 1.1 hikaru printf("\tid %d name %s msg size %d ring size %d\n",
2159 1.1 hikaru qr->qr_ring_id, qr->qr_name,
2160 1.1 hikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
2161 1.1 hikaru qr->qr_ring_size);
2162 1.1 hikaru printf("\thost head 0x%08x tail 0x%08x\n", qr->qr_head, qr->qr_tail);
2163 1.1 hikaru printf("\ttarget head 0x%08x tail 0x%08x\n",
2164 1.1 hikaru qat_etr_bank_ring_read_4(sc, qr->qr_bank, qr->qr_ring,
2165 1.1 hikaru ETR_RING_HEAD_OFFSET),
2166 1.1 hikaru qat_etr_bank_ring_read_4(sc, qr->qr_bank, qr->qr_ring,
2167 1.1 hikaru ETR_RING_TAIL_OFFSET));
2168 1.1 hikaru
2169 1.1 hikaru printf("\n");
2170 1.1 hikaru i = 0;
2171 1.1 hikaru offset = 0;
2172 1.1 hikaru do {
2173 1.1 hikaru if (i % 8 == 0)
2174 1.1 hikaru printf("%8x:", offset);
2175 1.1 hikaru
2176 1.1 hikaru if (offset == qr->qr_head) {
2177 1.1 hikaru printf("*");
2178 1.1 hikaru } else if (offset == qr->qr_tail) {
2179 1.1 hikaru printf("v");
2180 1.1 hikaru } else {
2181 1.1 hikaru printf(" ");
2182 1.1 hikaru }
2183 1.1 hikaru
2184 1.1 hikaru msg = *(uint32_t *)((uintptr_t)qr->qr_ring_vaddr + offset);
2185 1.1 hikaru printf("%08x", htobe32(msg));
2186 1.1 hikaru
2187 1.1 hikaru if (i % 8 == 7)
2188 1.1 hikaru printf("\n");
2189 1.1 hikaru
2190 1.1 hikaru i++;
2191 1.1 hikaru offset = qat_modulo(offset +
2192 1.1 hikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
2193 1.1 hikaru QAT_RING_SIZE_MODULO(qr->qr_ring_size));
2194 1.1 hikaru } while (offset != 0);
2195 1.1 hikaru }
2196 1.1 hikaru
2197 1.1 hikaru void
2198 1.1 hikaru qat_dump_mbuf(struct mbuf *m0, int pre, int post)
2199 1.1 hikaru {
2200 1.1 hikaru struct mbuf *m;
2201 1.1 hikaru
2202 1.1 hikaru for (m = m0; m != NULL; m = m->m_next) {
2203 1.1 hikaru size_t pos, len;
2204 1.1 hikaru uint8_t *buf_start, *data_start, *data_end, *buf_end;
2205 1.1 hikaru uint8_t *start, *end, *dp;
2206 1.1 hikaru bool skip_ind;
2207 1.1 hikaru const char *ind;
2208 1.1 hikaru
2209 1.1 hikaru printf("dumping mbuf %p len %d flags 0x%08x\n",
2210 1.1 hikaru m, m->m_len, m->m_flags);
2211 1.1 hikaru if (m->m_len == 0)
2212 1.1 hikaru continue;
2213 1.1 hikaru
2214 1.1 hikaru data_start = (uint8_t *)m->m_data;
2215 1.1 hikaru data_end = data_start + m->m_len;
2216 1.1 hikaru switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) {
2217 1.1 hikaru case 0:
2218 1.1 hikaru buf_start = (uint8_t *)M_BUFADDR(m);
2219 1.1 hikaru buf_end = buf_start +
2220 1.1 hikaru ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN);
2221 1.1 hikaru break;
2222 1.1 hikaru case M_EXT|M_EXT_CLUSTER:
2223 1.1 hikaru buf_start = (uint8_t *)m->m_ext.ext_buf;
2224 1.1 hikaru buf_end = buf_start +m->m_ext.ext_size;
2225 1.1 hikaru break;
2226 1.1 hikaru default:
2227 1.1 hikaru /* XXX */
2228 1.1 hikaru buf_start = data_start;
2229 1.1 hikaru buf_end = data_end;
2230 1.1 hikaru break;
2231 1.1 hikaru }
2232 1.1 hikaru
2233 1.1 hikaru start = data_start - pre;
2234 1.1 hikaru if (start < buf_start)
2235 1.1 hikaru start = buf_start;
2236 1.1 hikaru end = data_end + post;
2237 1.1 hikaru if (end > buf_end)
2238 1.1 hikaru end = buf_end;
2239 1.1 hikaru
2240 1.1 hikaru dp = start;
2241 1.1 hikaru len = (size_t)(end - start);
2242 1.1 hikaru skip_ind = false;
2243 1.1 hikaru for (pos = 0; pos < len; pos++) {
2244 1.1 hikaru
2245 1.1 hikaru if (skip_ind)
2246 1.1 hikaru ind = "";
2247 1.1 hikaru else if (&dp[pos] == data_start)
2248 1.1 hikaru ind = "`";
2249 1.1 hikaru else
2250 1.1 hikaru ind = " ";
2251 1.1 hikaru
2252 1.1 hikaru if (pos % 32 == 0)
2253 1.1 hikaru printf("%8zx:%s", pos, ind);
2254 1.1 hikaru else if (pos % 2 == 0)
2255 1.1 hikaru printf("%s", ind);
2256 1.1 hikaru
2257 1.1 hikaru printf("%02x", dp[pos]);
2258 1.1 hikaru
2259 1.1 hikaru skip_ind = false;
2260 1.1 hikaru if (&dp[pos + 1] == data_end) {
2261 1.1 hikaru skip_ind = true;
2262 1.1 hikaru printf("'");
2263 1.1 hikaru }
2264 1.1 hikaru
2265 1.1 hikaru if (pos % 32 == 31 || pos + 1 == len) {
2266 1.1 hikaru printf("\n");
2267 1.1 hikaru skip_ind = false;
2268 1.1 hikaru }
2269 1.1 hikaru }
2270 1.1 hikaru }
2271 1.1 hikaru }
2272 1.1 hikaru
2273 1.1 hikaru #endif /* QAT_DUMP */
2274 1.1 hikaru
2275 1.1 hikaru MODULE(MODULE_CLASS_DRIVER, qat, "pci,opencrypto");
2276 1.1 hikaru
2277 1.1 hikaru #ifdef _MODULE
2278 1.1 hikaru #include "ioconf.c"
2279 1.1 hikaru #endif
2280 1.1 hikaru
2281 1.1 hikaru int
2282 1.1 hikaru qat_modcmd(modcmd_t cmd, void *data)
2283 1.1 hikaru {
2284 1.1 hikaru int error = 0;
2285 1.1 hikaru
2286 1.1 hikaru switch (cmd) {
2287 1.1 hikaru case MODULE_CMD_INIT:
2288 1.1 hikaru #ifdef _MODULE
2289 1.1 hikaru error = config_init_component(cfdriver_ioconf_qat,
2290 1.1 hikaru cfattach_ioconf_qat, cfdata_ioconf_qat);
2291 1.1 hikaru #endif
2292 1.1 hikaru return error;
2293 1.1 hikaru case MODULE_CMD_FINI:
2294 1.1 hikaru #ifdef _MODULE
2295 1.1 hikaru error = config_fini_component(cfdriver_ioconf_qat,
2296 1.1 hikaru cfattach_ioconf_qat, cfdata_ioconf_qat);
2297 1.1 hikaru #endif
2298 1.1 hikaru return error;
2299 1.1 hikaru default:
2300 1.1 hikaru return ENOTTY;
2301 1.1 hikaru }
2302 1.1 hikaru }
2303