Home | History | Annotate | Line # | Download | only in qat
qat.c revision 1.1
      1 /*	$NetBSD: qat.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2019 Internet Initiative Japan, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 /*
     30  *   Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
     31  *
     32  *   Redistribution and use in source and binary forms, with or without
     33  *   modification, are permitted provided that the following conditions
     34  *   are met:
     35  *
     36  *     * Redistributions of source code must retain the above copyright
     37  *       notice, this list of conditions and the following disclaimer.
     38  *     * Redistributions in binary form must reproduce the above copyright
     39  *       notice, this list of conditions and the following disclaimer in
     40  *       the documentation and/or other materials provided with the
     41  *       distribution.
     42  *     * Neither the name of Intel Corporation nor the names of its
     43  *       contributors may be used to endorse or promote products derived
     44  *       from this software without specific prior written permission.
     45  *
     46  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     47  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     48  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     49  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     50  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     51  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     52  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     53  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     54  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     55  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     56  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     57  */
     58 
     59 #include <sys/cdefs.h>
     60 __KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
     61 
     62 #include <sys/param.h>
     63 #include <sys/systm.h>
     64 #include <sys/kernel.h>
     65 #include <sys/device.h>
     66 #include <sys/module.h>
     67 #include <sys/kmem.h>
     68 #include <sys/mutex.h>
     69 #include <sys/bitops.h>
     70 #include <sys/atomic.h>
     71 #include <sys/mbuf.h>
     72 #include <sys/cprng.h>
     73 #include <sys/cpu.h>
     74 #include <sys/interrupt.h>
     75 #include <sys/md5.h>
     76 #include <sys/sha1.h>
     77 #include <sys/sha2.h>
     78 
     79 #include <opencrypto/cryptodev.h>
     80 #include <opencrypto/cryptosoft.h>
     81 #include <opencrypto/xform.h>
     82 
     83 /* XXX same as sys/arch/x86/x86/via_padlock.c */
     84 #include <opencrypto/cryptosoft_xform.c>
     85 
     86 #include <dev/pci/pcireg.h>
     87 #include <dev/pci/pcivar.h>
     88 #include <dev/pci/pcidevs.h>
     89 
     90 #include "qatreg.h"
     91 #include "qatvar.h"
     92 #include "qat_aevar.h"
     93 
     94 extern struct qat_hw qat_hw_c2xxx;
     95 extern struct qat_hw qat_hw_c3xxx;
     96 extern struct qat_hw qat_hw_c62x;
     97 extern struct qat_hw qat_hw_d15xx;
     98 
     99 static const struct qat_product {
    100 	pci_vendor_id_t qatp_vendor;
    101 	pci_product_id_t qatp_product;
    102 	const char *qatp_name;
    103 	enum qat_chip_type qatp_chip;
    104 	const struct qat_hw *qatp_hw;
    105 } qat_products[] = {
    106 
    107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_IQIA_PHYS,
    108 	  "Intel C2000 QuickAssist Physical Function",
    109 	  QAT_CHIP_C2XXX, &qat_hw_c2xxx },
    110 
    111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C3K_QAT,
    112 	  "Intel C3000 QuickAssist Physical Function",
    113 	  QAT_CHIP_C3XXX, &qat_hw_c3xxx },
    114 #ifdef notyet
    115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C3K_QAT_VF,
    116 	  "Intel C3000 QuickAssist Virtual Function",
    117 	  QAT_CHIP_C3XXX_IOV, &qat_hw_c3xxxvf },
    118 #endif
    119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C620_QAT,
    120 	  "Intel C620/Xeon D-2100 QuickAssist Physical Function",
    121 	  QAT_CHIP_C62X, &qat_hw_c62x },
    122 #ifdef notyet
    123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C620_QAT_VF,
    124 	  "Intel C620/Xeon D-2100 QuickAssist Virtual Function",
    125 	  QAT_CHIP_C62X_IOV, &qat_hw_c62xvf },
    126 #endif
    127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XEOND_QAT,
    128 	  "Intel Xeon D-1500 QuickAssist Physical Function",
    129 	  QAT_CHIP_D15XX, &qat_hw_d15xx },
    130 #ifdef notyet
    131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XEOND_QAT_VF,
    132 	  "Intel Xeon D-1500 QuickAssist Virtual Function",
    133 	  QAT_CHIP_D15XX_IOV, &qat_hw_d15xxvf },
    134 #endif
    135 	{ 0, 0, NULL, 0, NULL },
    136 };
    137 
    138 /* md5 16 bytes - Initialiser state can be found in RFC 1321*/
    139 static const uint8_t md5_initial_state[QAT_HASH_MD5_STATE_SIZE] = {
    140 	0x01, 0x23, 0x45, 0x67,
    141 	0x89, 0xab, 0xcd, 0xef,
    142 	0xfe, 0xdc, 0xba, 0x98,
    143 	0x76, 0x54, 0x32, 0x10,
    144 };
    145 
    146 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
    147 static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = {
    148 	0x67, 0x45, 0x23, 0x01,
    149 	0xef, 0xcd, 0xab, 0x89,
    150 	0x98, 0xba, 0xdc, 0xfe,
    151 	0x10, 0x32, 0x54, 0x76,
    152 	0xc3, 0xd2, 0xe1, 0xf0
    153 };
    154 
    155 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
    156 static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = {
    157 	0x6a, 0x09, 0xe6, 0x67,
    158 	0xbb, 0x67, 0xae, 0x85,
    159 	0x3c, 0x6e, 0xf3, 0x72,
    160 	0xa5, 0x4f, 0xf5, 0x3a,
    161 	0x51, 0x0e, 0x52, 0x7f,
    162 	0x9b, 0x05, 0x68, 0x8c,
    163 	0x1f, 0x83, 0xd9, 0xab,
    164 	0x5b, 0xe0, 0xcd, 0x19
    165 };
    166 
    167 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
    168 static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = {
    169 	0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8,
    170 	0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07,
    171 	0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
    172 	0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39,
    173 	0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31,
    174 	0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11,
    175 	0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7,
    176 	0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4
    177 };
    178 
    179 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
    180 static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = {
    181 	0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08,
    182 	0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b,
    183 	0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b,
    184 	0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1,
    185 	0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1,
    186 	0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
    187 	0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b,
    188 	0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79
    189 };
    190 
    191 /* Hash Algorithm specific structure */
    192 
    193 static const struct qat_sym_hash_alg_info md5_info = {
    194 	QAT_HASH_MD5_DIGEST_SIZE,
    195 	QAT_HASH_MD5_BLOCK_SIZE,
    196 	md5_initial_state,
    197 	QAT_HASH_MD5_STATE_SIZE,
    198 	&swcr_auth_hash_hmac_md5_96,
    199 	offsetof(MD5_CTX, state),
    200 	4,
    201 };
    202 
    203 static const struct qat_sym_hash_alg_info sha1_info = {
    204 	QAT_HASH_SHA1_DIGEST_SIZE,
    205 	QAT_HASH_SHA1_BLOCK_SIZE,
    206 	sha1_initial_state,
    207 	QAT_HASH_SHA1_STATE_SIZE,
    208 	&swcr_auth_hash_hmac_sha1_96,
    209 	offsetof(SHA1_CTX, state),
    210 	4,
    211 };
    212 
    213 static const struct qat_sym_hash_alg_info sha256_info = {
    214 	QAT_HASH_SHA256_DIGEST_SIZE,
    215 	QAT_HASH_SHA256_BLOCK_SIZE,
    216 	sha256_initial_state,
    217 	QAT_HASH_SHA256_STATE_SIZE,
    218 	&swcr_auth_hash_hmac_sha2_256,
    219 	offsetof(SHA256_CTX, state),
    220 	4,
    221 };
    222 
    223 static const struct qat_sym_hash_alg_info sha384_info = {
    224 	QAT_HASH_SHA384_DIGEST_SIZE,
    225 	QAT_HASH_SHA384_BLOCK_SIZE,
    226 	sha384_initial_state,
    227 	QAT_HASH_SHA384_STATE_SIZE,
    228 	&swcr_auth_hash_hmac_sha2_384,
    229 	offsetof(SHA384_CTX, state),
    230 	8,
    231 };
    232 
    233 static const struct qat_sym_hash_alg_info sha512_info = {
    234 	QAT_HASH_SHA512_DIGEST_SIZE,
    235 	QAT_HASH_SHA512_BLOCK_SIZE,
    236 	sha512_initial_state,
    237 	QAT_HASH_SHA512_STATE_SIZE,
    238 	&swcr_auth_hash_hmac_sha2_512,
    239 	offsetof(SHA512_CTX, state),
    240 	8,
    241 };
    242 
    243 static const struct qat_sym_hash_alg_info aes_gcm_info = {
    244 	QAT_HASH_AES_GCM_DIGEST_SIZE,
    245 	QAT_HASH_AES_GCM_BLOCK_SIZE,
    246 	NULL, 0,
    247 	NULL, 0, 0, /* XXX */
    248 };
    249 
    250 /* Hash QAT specific structures */
    251 
    252 static const struct qat_sym_hash_qat_info md5_config = {
    253 	HW_AUTH_ALGO_MD5,
    254 	QAT_HASH_MD5_BLOCK_SIZE,
    255 	HW_MD5_STATE1_SZ,
    256 	HW_MD5_STATE2_SZ
    257 };
    258 
    259 static const struct qat_sym_hash_qat_info sha1_config = {
    260 	HW_AUTH_ALGO_SHA1,
    261 	QAT_HASH_SHA1_BLOCK_SIZE,
    262 	HW_SHA1_STATE1_SZ,
    263 	HW_SHA1_STATE2_SZ
    264 };
    265 
    266 static const struct qat_sym_hash_qat_info sha256_config = {
    267 	HW_AUTH_ALGO_SHA256,
    268 	QAT_HASH_SHA256_BLOCK_SIZE,
    269 	HW_SHA256_STATE1_SZ,
    270 	HW_SHA256_STATE2_SZ
    271 };
    272 
    273 static const struct qat_sym_hash_qat_info sha384_config = {
    274 	HW_AUTH_ALGO_SHA384,
    275 	QAT_HASH_SHA384_BLOCK_SIZE,
    276 	HW_SHA384_STATE1_SZ,
    277 	HW_SHA384_STATE2_SZ
    278 };
    279 
    280 static const struct qat_sym_hash_qat_info sha512_config = {
    281 	HW_AUTH_ALGO_SHA512,
    282 	QAT_HASH_SHA512_BLOCK_SIZE,
    283 	HW_SHA512_STATE1_SZ,
    284 	HW_SHA512_STATE2_SZ
    285 };
    286 
    287 static const struct qat_sym_hash_qat_info aes_gcm_config = {
    288 	HW_AUTH_ALGO_GALOIS_128,
    289 	0,
    290 	HW_GALOIS_128_STATE1_SZ,
    291 	HW_GALOIS_H_SZ +
    292 	HW_GALOIS_LEN_A_SZ +
    293 	HW_GALOIS_E_CTR0_SZ
    294 };
    295 
    296 static const struct qat_sym_hash_def qat_sym_hash_defs[] = {
    297 	[QAT_SYM_HASH_MD5] = { &md5_info, &md5_config },
    298 	[QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config },
    299 	[QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config },
    300 	[QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config },
    301 	[QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config },
    302 	[QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config },
    303 };
    304 
    305 const struct qat_product *
    306 		qat_lookup(const struct pci_attach_args *);
    307 int		qat_match(struct device *, struct cfdata *, void *);
    308 void		qat_attach(struct device *, struct device *, void *);
    309 void		qat_init(struct device *);
    310 int		qat_start(struct device *);
    311 int		qat_detach(struct device *, int);
    312 
    313 int		qat_alloc_msix_intr(struct qat_softc *,
    314 		    struct pci_attach_args *);
    315 void *		qat_establish_msix_intr(struct qat_softc *, pci_intr_handle_t,
    316 			int (*)(void *), void *, const char *, int);
    317 int		qat_setup_msix_intr(struct qat_softc *);
    318 
    319 int		qat_etr_init(struct qat_softc *);
    320 int		qat_etr_bank_init(struct qat_softc *, int);
    321 
    322 int		qat_etr_ap_bank_init(struct qat_softc *);
    323 void		qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int);
    324 void		qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *,
    325 		    uint32_t, int);
    326 void		qat_etr_ap_bank_setup_ring(struct qat_softc *,
    327 		    struct qat_ring *);
    328 int		qat_etr_verify_ring_size(uint32_t, uint32_t);
    329 
    330 int		qat_etr_ring_intr(struct qat_softc *, struct qat_bank *,
    331 		    struct qat_ring *);
    332 int		qat_etr_bank_intr(void *);
    333 
    334 void		qat_arb_update(struct qat_softc *, struct qat_bank *);
    335 
    336 struct qat_sym_cookie *
    337 		qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *);
    338 void		qat_crypto_free_sym_cookie(struct qat_crypto_bank *,
    339 		    struct qat_sym_cookie *);
    340 int		qat_crypto_load_buf(struct qat_softc *, struct cryptop *,
    341 		    struct qat_sym_cookie *, struct qat_crypto_desc const *,
    342 		    uint8_t *, int, bus_addr_t *);
    343 int		qat_crypto_load_iv(struct qat_sym_cookie *, struct cryptop *,
    344 		    struct cryptodesc *, struct qat_crypto_desc const *);
    345 int		qat_crypto_process(void *, struct cryptop *, int);
    346 int		qat_crypto_setup_ring(struct qat_softc *,
    347 		    struct qat_crypto_bank *);
    348 int		qat_crypto_new_session(void *, uint32_t *, struct cryptoini *);
    349 int		qat_crypto_free_session0(struct qat_crypto *,
    350 		    struct qat_session *);
    351 void		qat_crypto_check_free_session(struct qat_crypto *,
    352 		    struct qat_session *);
    353 int		qat_crypto_free_session(void *, uint64_t);
    354 int		qat_crypto_bank_init(struct qat_softc *,
    355 		    struct qat_crypto_bank *);
    356 int		qat_crypto_init(struct qat_softc *);
    357 int		qat_crypto_start(struct qat_softc *);
    358 int		qat_crypto_sym_rxintr(struct qat_softc *, void *, void *);
    359 
    360 CFATTACH_DECL_NEW(qat, sizeof(struct qat_softc),
    361     qat_match, qat_attach, qat_detach, NULL);
    362 
    363 struct qat_softc *gsc = NULL;
    364 
    365 #ifdef QAT_DUMP
    366 int qat_dump = QAT_DUMP;
    367 #endif
    368 
    369 const struct qat_product *
    370 qat_lookup(const struct pci_attach_args *pa)
    371 {
    372 	const struct qat_product *qatp;
    373 
    374 	for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) {
    375 		if (PCI_VENDOR(pa->pa_id) == qatp->qatp_vendor &&
    376 		    PCI_PRODUCT(pa->pa_id) == qatp->qatp_product)
    377 			return qatp;
    378 	}
    379 	return NULL;
    380 }
    381 
    382 int
    383 qat_match(struct device *parent, struct cfdata *cf, void *aux)
    384 {
    385 	struct pci_attach_args *pa = aux;
    386 
    387 	if (qat_lookup(pa) != NULL)
    388 		return 1;
    389 
    390 	return 0;
    391 }
    392 
    393 void
    394 qat_attach(struct device *parent, struct device *self, void *aux)
    395 {
    396 	struct qat_softc *sc = device_private(self);
    397 	struct pci_attach_args *pa = aux;
    398 	pci_chipset_tag_t pc = pa->pa_pc;
    399 	const struct qat_product *qatp;
    400 	char cap[256];
    401 	pcireg_t cmd, memtype, msixoff, fusectl;
    402 	bus_size_t msixtbl_offset;
    403 	int i, bar, msixtbl_bar;
    404 
    405 	sc->sc_dev = self;
    406 	sc->sc_pc = pc;
    407 	sc->sc_pcitag = pa->pa_tag;
    408 
    409 	gsc = sc; /* for debug */
    410 
    411 	qatp = qat_lookup(pa);
    412 	KASSERT(qatp != NULL);
    413 
    414 	if (pci_dma64_available(pa))
    415 		sc->sc_dmat = pa->pa_dmat64;
    416 	else
    417 		sc->sc_dmat = pa->pa_dmat;
    418 
    419 	aprint_naive(": Crypto processor\n");
    420 	sc->sc_rev = PCI_REVISION(pa->pa_class);
    421 	aprint_normal(": %s (rev. 0x%02x)\n", qatp->qatp_name, sc->sc_rev);
    422 
    423 	memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw));
    424 
    425 	/* Determine active accelerators and engines */
    426 	sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc);
    427 	sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc);
    428 
    429 	sc->sc_accel_num = 0;
    430 	for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) {
    431 		if (sc->sc_accel_mask & (1 << i))
    432 			sc->sc_accel_num++;
    433 	}
    434 	sc->sc_ae_num = 0;
    435 	for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) {
    436 		if (sc->sc_ae_mask & (1 << i)) {
    437 			sc->sc_ae_num++;
    438 		}
    439 	}
    440 
    441 	if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) {
    442 		aprint_error_dev(sc->sc_dev, "couldn't find acceleration");
    443 		goto fail;
    444 	}
    445 
    446 	KASSERT(sc->sc_accel_num <= MAX_NUM_ACCEL);
    447 	KASSERT(sc->sc_ae_num <= MAX_NUM_AE);
    448 
    449 	/* Determine SKU and capabilities */
    450 	sc->sc_sku = sc->sc_hw.qhw_get_sku(sc);
    451 	sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc);
    452 	sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc);
    453 
    454 	aprint_normal_dev(sc->sc_dev,
    455 	    "sku %d accel %d accel_mask 0x%x ae %d ae_mask 0x%x\n",
    456 	    sc->sc_sku, sc->sc_accel_num, sc->sc_accel_mask,
    457 	    sc->sc_ae_num, sc->sc_ae_mask);
    458 	snprintb(cap, sizeof(cap), QAT_ACCEL_CAP_BITS, sc->sc_accel_cap);
    459 	aprint_normal_dev(sc->sc_dev, "accel capabilities %s\n", cap);
    460 
    461 	/* Map BARs */
    462 
    463 	msixtbl_bar = 0;
    464 	msixtbl_offset = 0;
    465 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff, NULL)) {
    466 		pcireg_t msixtbl;
    467 		msixtbl = pci_conf_read(pc, pa->pa_tag,
    468 		    msixoff + PCI_MSIX_TBLOFFSET);
    469 		msixtbl_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK;
    470 		msixtbl_bar = PCI_MAPREG_START +
    471 		    ((msixtbl & PCI_MSIX_PBABIR_MASK) << 2);
    472 	}
    473 
    474 	i = 0;
    475 	if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) {
    476 		KASSERT(sc->sc_hw.qhw_sram_bar_id == 0);
    477 		fusectl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, FUSECTL_REG);
    478 		/* Skip SRAM BAR */
    479 		i = (fusectl & FUSECTL_MASK) ? 1 : 0;
    480 	}
    481 	for (bar = PCI_MAPREG_START; bar <= PCI_MAPREG_END; bar += 4) {
    482 		bus_size_t size;
    483 		bus_addr_t addr;
    484 
    485 		if (pci_mapreg_probe(pc, pa->pa_tag, bar, &memtype) == 0)
    486 			continue;
    487 
    488 		if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM)
    489 			continue;
    490 
    491 		/* MSI-X table will be mapped by pci_msix_alloc_map */
    492 		if (bar == msixtbl_bar)
    493 			size = msixtbl_offset;
    494 		else
    495 			size = 0;
    496 
    497 		if (pci_mapreg_submap(pa, bar, memtype, 0, size, 0,
    498 		    &sc->sc_csrt[i], &sc->sc_csrh[i], &addr, &sc->sc_csrs[i])) {
    499 			aprint_error_dev(sc->sc_dev,
    500 			    "couldn't map bar 0x%02x\n", bar);
    501 			goto fail;
    502 		}
    503 
    504 		aprint_verbose_dev(sc->sc_dev,
    505 		    "region #%d bar 0x%02x size 0x%x at 0x%llx"
    506 		    " mapped to %p\n", i, bar,
    507 		    (int)sc->sc_csrs[i], (unsigned long long)addr,
    508 		    bus_space_vaddr(sc->sc_csrt[i], sc->sc_csrh[i]));
    509 
    510 		i++;
    511 		if (PCI_MAPREG_MEM_TYPE(memtype) == PCI_MAPREG_MEM_TYPE_64BIT)
    512 			bar += 4;
    513 	}
    514 
    515 	/* XXX Enable advanced error reporting */
    516 
    517 	/* Enable bus mastering */
    518 	cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    519 	cmd |= PCI_COMMAND_MASTER_ENABLE;
    520 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
    521 
    522 	if (qat_alloc_msix_intr(sc, pa))
    523 		goto fail;
    524 
    525 	config_mountroot(self, qat_init);
    526 
    527 fail:
    528 	/* XXX */
    529 	return;
    530 }
    531 
    532 void
    533 qat_init(struct device *self)
    534 {
    535 	int error;
    536 	struct qat_softc *sc = device_private(self);
    537 
    538 	aprint_verbose_dev(sc->sc_dev, "Initializing ETR\n");
    539 	error = qat_etr_init(sc);
    540 	if (error) {
    541 		aprint_error_dev(sc->sc_dev,
    542 		    "Could not initialize ETR: %d\n", error);
    543 		return;
    544 	}
    545 
    546 	aprint_verbose_dev(sc->sc_dev, "Initializing admin comms\n");
    547 	if (sc->sc_hw.qhw_init_admin_comms != NULL &&
    548 	    (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) {
    549 		aprint_error_dev(sc->sc_dev,
    550 		    "Could not initialize admin comms: %d\n", error);
    551 		return;
    552 	}
    553 
    554 	aprint_verbose_dev(sc->sc_dev, "Initializing hw arbiter\n");
    555 	if (sc->sc_hw.qhw_init_arb != NULL &&
    556 	    (error = sc->sc_hw.qhw_init_arb(sc)) != 0) {
    557 		aprint_error_dev(sc->sc_dev,
    558 		    "Could not initialize hw arbiter: %d\n", error);
    559 		return;
    560 	}
    561 
    562 	aprint_verbose_dev(sc->sc_dev, "Initializing acceleration engine\n");
    563 	error = qat_ae_init(sc);
    564 	if (error) {
    565 		aprint_error_dev(sc->sc_dev,
    566 		    "Could not initialize Acceleration Engine: %d\n", error);
    567 		return;
    568 	}
    569 
    570 	aprint_verbose_dev(sc->sc_dev, "Loading acceleration engine firmware\n");
    571 	error = qat_aefw_load(sc);
    572 	if (error) {
    573 		aprint_error_dev(sc->sc_dev,
    574 		    "Could not load firmware: %d\n", error);
    575 		return;
    576 	}
    577 
    578 	aprint_verbose_dev(sc->sc_dev, "Establishing interrupts\n");
    579 	error = qat_setup_msix_intr(sc);
    580 	if (error) {
    581 		aprint_error_dev(sc->sc_dev,
    582 		    "Could not setup interrupts: %d\n", error);
    583 		return;
    584 	}
    585 
    586 	sc->sc_hw.qhw_enable_intr(sc);
    587 
    588 	error = qat_crypto_init(sc);
    589 	if (error) {
    590 		aprint_error_dev(sc->sc_dev,
    591 		    "Could not initialize service: %d\n", error);
    592 		return;
    593 	}
    594 
    595 	aprint_verbose_dev(sc->sc_dev, "Enabling error correction\n");
    596 	if (sc->sc_hw.qhw_enable_error_correction != NULL)
    597 		sc->sc_hw.qhw_enable_error_correction(sc);
    598 
    599 	aprint_verbose_dev(sc->sc_dev, "Initializing watchdog timer\n");
    600 	if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL &&
    601 	    (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) {
    602 		aprint_error_dev(sc->sc_dev,
    603 		    "Could not initialize watchdog timer: %d\n", error);
    604 		return;
    605 	}
    606 
    607 	error = qat_start(self);
    608 	if (error) {
    609 		aprint_error_dev(sc->sc_dev,
    610 		    "Could not start: %d\n", error);
    611 		return;
    612 	}
    613 }
    614 
    615 int
    616 qat_start(struct device *self)
    617 {
    618 	struct qat_softc *sc = device_private(self);
    619 	int error;
    620 
    621 	error = qat_ae_start(sc);
    622 	if (error)
    623 		return error;
    624 
    625 	if (sc->sc_hw.qhw_send_admin_init != NULL &&
    626 	    (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) {
    627 		return error;
    628 	}
    629 
    630 	error = qat_crypto_start(sc);
    631 	if (error)
    632 		return error;
    633 
    634 	return 0;
    635 }
    636 
    637 int
    638 qat_detach(struct device *self, int flags)
    639 {
    640 
    641 	return 0;
    642 }
    643 
    644 void *
    645 qat_alloc_mem(size_t size)
    646 {
    647 	size_t *sptr;
    648 	sptr = kmem_zalloc(size + sizeof(size), KM_SLEEP);
    649 	*sptr = size;
    650 	return ++sptr;
    651 }
    652 
    653 void
    654 qat_free_mem(void *ptr)
    655 {
    656 	size_t *sptr = ptr, size;
    657 	size = *(--sptr);
    658 	kmem_free(sptr, size + sizeof(size));
    659 }
    660 
    661 void
    662 qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm)
    663 {
    664 
    665 	bus_dmamap_unload(sc->sc_dmat, qdm->qdm_dma_map);
    666 	bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map);
    667 	bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, qdm->qdm_dma_size);
    668 	bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1);
    669 	explicit_memset(qdm, 0, sizeof(*qdm));
    670 }
    671 
    672 int
    673 qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm,
    674 	bus_size_t size, bus_size_t alignment)
    675 {
    676 	int error = 0, nseg;
    677 
    678 	error = bus_dmamem_alloc(sc->sc_dmat, size, alignment,
    679 	    0, &qdm->qdm_dma_seg, 1, &nseg, BUS_DMA_NOWAIT);
    680 	if (error) {
    681 		aprint_error_dev(sc->sc_dev,
    682 		    "couldn't allocate dmamem, error = %d\n", error);
    683 		goto fail_0;
    684 	}
    685 	KASSERT(nseg == 1);
    686 	error = bus_dmamem_map(sc->sc_dmat, &qdm->qdm_dma_seg,
    687 	    nseg, size, &qdm->qdm_dma_vaddr,
    688 	    BUS_DMA_COHERENT | BUS_DMA_NOWAIT);
    689 	if (error) {
    690 		aprint_error_dev(sc->sc_dev,
    691 		    "couldn't map dmamem, error = %d\n", error);
    692 		goto fail_1;
    693 	}
    694 	qdm->qdm_dma_size = size;
    695 	error = bus_dmamap_create(sc->sc_dmat, size, nseg, size,
    696 	    0, BUS_DMA_NOWAIT, &qdm->qdm_dma_map);
    697 	if (error) {
    698 		aprint_error_dev(sc->sc_dev,
    699 		    "couldn't create dmamem map, error = %d\n", error);
    700 		goto fail_2;
    701 	}
    702 	error = bus_dmamap_load(sc->sc_dmat, qdm->qdm_dma_map,
    703 	    qdm->qdm_dma_vaddr, size, NULL, BUS_DMA_NOWAIT);
    704 	if (error) {
    705 		aprint_error_dev(sc->sc_dev,
    706 		    "couldn't load dmamem map, error = %d\n", error);
    707 		goto fail_3;
    708 	}
    709 
    710 	return 0;
    711 fail_3:
    712 	bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map);
    713 	qdm->qdm_dma_map = NULL;
    714 fail_2:
    715 	bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, size);
    716 	qdm->qdm_dma_vaddr = NULL;
    717 	qdm->qdm_dma_size = 0;
    718 fail_1:
    719 	bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1);
    720 fail_0:
    721 	return error;
    722 }
    723 
    724 int
    725 qat_alloc_msix_intr(struct qat_softc *sc, struct pci_attach_args *pa)
    726 {
    727 	u_int *ih_map, vec;
    728 	int error, count, ihi;
    729 
    730 	count = sc->sc_hw.qhw_num_banks + 1;
    731 	ih_map = qat_alloc_mem(sizeof(*ih_map) * count);
    732 	ihi = 0;
    733 
    734 	for (vec = 0; vec < sc->sc_hw.qhw_num_banks; vec++)
    735 		ih_map[ihi++] = vec;
    736 
    737 	vec += sc->sc_hw.qhw_msix_ae_vec_gap;
    738 	ih_map[ihi++] = vec;
    739 
    740 	error = pci_msix_alloc_map(pa, &sc->sc_ih, ih_map, count);
    741 	qat_free_mem(ih_map);
    742 	if (error) {
    743 		aprint_error_dev(sc->sc_dev, "couldn't allocate msix %d: %d\n",
    744 		    count, error);
    745 	}
    746 
    747 	return error;
    748 }
    749 
    750 void *
    751 qat_establish_msix_intr(struct qat_softc *sc, pci_intr_handle_t ih,
    752 	int (*func)(void *), void *arg,
    753 	const char *name, int index)
    754 {
    755 	kcpuset_t *affinity;
    756 	int error;
    757 	char buf[PCI_INTRSTR_LEN];
    758 	char intrxname[INTRDEVNAMEBUF];
    759 	const char *intrstr;
    760 	void *cookie;
    761 
    762 	snprintf(intrxname, sizeof(intrxname), "%s%s%d",
    763 	    device_xname(sc->sc_dev), name, index);
    764 
    765 	intrstr = pci_intr_string(sc->sc_pc, ih, buf, sizeof(buf));
    766 
    767 	pci_intr_setattr(sc->sc_pc, &ih, PCI_INTR_MPSAFE, true);
    768 
    769 	cookie = pci_intr_establish_xname(sc->sc_pc, ih,
    770 	    IPL_NET, func, arg, intrxname);
    771 
    772 	aprint_normal_dev(sc->sc_dev, "%s%d interrupting at %s\n",
    773 	    name, index, intrstr);
    774 
    775 	kcpuset_create(&affinity, true);
    776 	kcpuset_set(affinity, index % ncpu);
    777 	error = interrupt_distribute(cookie, affinity, NULL);
    778 	if (error) {
    779 		aprint_error_dev(sc->sc_dev,
    780 		    "couldn't distribute interrupt: %s%d\n", name, index);
    781 	}
    782 	kcpuset_destroy(affinity);
    783 
    784 	return cookie;
    785 }
    786 
    787 int
    788 qat_setup_msix_intr(struct qat_softc *sc)
    789 {
    790 	int i;
    791 	pci_intr_handle_t ih;
    792 
    793 	for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
    794 		struct qat_bank *qb = &sc->sc_etr_banks[i];
    795 		ih = sc->sc_ih[i];
    796 
    797 		qb->qb_ih_cookie = qat_establish_msix_intr(sc, ih,
    798 		    qat_etr_bank_intr, qb, "bank", i);
    799 		if (qb->qb_ih_cookie == NULL)
    800 			return ENOMEM;
    801 	}
    802 
    803 	sc->sc_ae_ih_cookie = qat_establish_msix_intr(sc, sc->sc_ih[i],
    804 	    qat_ae_cluster_intr, sc, "aeclust", 0);
    805 	if (sc->sc_ae_ih_cookie == NULL)
    806 		return ENOMEM;
    807 
    808 	return 0;
    809 }
    810 
    811 int
    812 qat_etr_init(struct qat_softc *sc)
    813 {
    814 	int i;
    815 	int error = 0;
    816 
    817 	sc->sc_etr_banks = qat_alloc_mem(
    818 	    sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks);
    819 
    820 	for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
    821 		error = qat_etr_bank_init(sc, i);
    822 		if (error) {
    823 			goto fail;
    824 		}
    825 	}
    826 
    827 	if (sc->sc_hw.qhw_num_ap_banks) {
    828 		sc->sc_etr_ap_banks = qat_alloc_mem(
    829 		    sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks);
    830 		error = qat_etr_ap_bank_init(sc);
    831 		if (error) {
    832 			goto fail;
    833 		}
    834 	}
    835 
    836 	return 0;
    837 
    838 fail:
    839 	if (sc->sc_etr_banks != NULL) {
    840 		qat_free_mem(sc->sc_etr_banks);
    841 		sc->sc_etr_banks = NULL;
    842 	}
    843 	if (sc->sc_etr_ap_banks != NULL) {
    844 		qat_free_mem(sc->sc_etr_ap_banks);
    845 		sc->sc_etr_ap_banks = NULL;
    846 	}
    847 	return error;
    848 }
    849 
    850 int
    851 qat_etr_bank_init(struct qat_softc *sc, int bank)
    852 {
    853 	struct qat_bank *qb = &sc->sc_etr_banks[bank];
    854 	int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap;
    855 
    856 	KASSERT(bank < sc->sc_hw.qhw_num_banks);
    857 
    858 	mutex_init(&qb->qb_bank_mtx, MUTEX_DEFAULT, IPL_NET);
    859 
    860 	qb->qb_sc = sc;
    861 	qb->qb_bank = bank;
    862 	qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT;
    863 	QAT_EVCNT_ATTACH(sc, &qb->qb_ev_rxintr, EVCNT_TYPE_INTR,
    864 	    qb->qb_ev_rxintr_name, "bank%d rxintr", bank);
    865 
    866 	/* Clean CSRs for all rings within the bank */
    867 	for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
    868 		struct qat_ring *qr = &qb->qb_et_rings[i];
    869 
    870 		qat_etr_bank_ring_write_4(sc, bank, i,
    871 		    ETR_RING_CONFIG, 0);
    872 		qat_etr_bank_ring_base_write_8(sc, bank, i, 0);
    873 
    874 		if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
    875 			qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t));
    876 		} else if (sc->sc_hw.qhw_tx_rings_mask &
    877 		    (1 << (i - tx_rx_gap))) {
    878 			/* Share inflight counter with rx and tx */
    879 			qr->qr_inflight =
    880 			    qb->qb_et_rings[i - tx_rx_gap].qr_inflight;
    881 		}
    882 	}
    883 
    884 	if (sc->sc_hw.qhw_init_etr_intr != NULL) {
    885 		sc->sc_hw.qhw_init_etr_intr(sc, bank);
    886 	} else {
    887 		/* common code in qat 1.7 */
    888 		qat_etr_bank_write_4(sc, bank, ETR_INT_REG,
    889 		    ETR_INT_REG_CLEAR_MASK);
    890 		for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank /
    891 		    ETR_RINGS_PER_INT_SRCSEL; i++) {
    892 			qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL +
    893 			    (i * ETR_INT_SRCSEL_NEXT_OFFSET),
    894 			    ETR_INT_SRCSEL_MASK);
    895 		}
    896 	}
    897 
    898 	return 0;
    899 }
    900 
    901 int
    902 qat_etr_ap_bank_init(struct qat_softc *sc)
    903 {
    904 	int ap_bank;
    905 
    906 	for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) {
    907 		struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank];
    908 
    909 		qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK,
    910 		    ETR_AP_NF_MASK_INIT);
    911 		qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0);
    912 		qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK,
    913 		    ETR_AP_NE_MASK_INIT);
    914 		qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0);
    915 
    916 		memset(qab, 0, sizeof(*qab));
    917 	}
    918 
    919 	return 0;
    920 }
    921 
    922 void
    923 qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask)
    924 {
    925 	if (set_mask)
    926 		*ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
    927 	else
    928 		*ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
    929 }
    930 
    931 void
    932 qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest,
    933 	uint32_t ring, int set_dest)
    934 {
    935 	uint32_t ae_mask;
    936 	uint8_t mailbox, ae, nae;
    937 	uint8_t *dest = (uint8_t *)ap_dest;
    938 
    939 	mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring);
    940 
    941 	nae = 0;
    942 	ae_mask = sc->sc_ae_mask;
    943 	for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) {
    944 		if ((ae_mask & (1 << ae)) == 0)
    945 			continue;
    946 
    947 		if (set_dest) {
    948 			dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) |
    949 			    __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) |
    950 			    ETR_AP_DEST_ENABLE;
    951 		} else {
    952 			dest[nae] = 0;
    953 		}
    954 		nae++;
    955 		if (nae == ETR_MAX_AE_PER_MAILBOX)
    956 			break;
    957 
    958 	}
    959 }
    960 
    961 void
    962 qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr)
    963 {
    964 	struct qat_ap_bank *qab;
    965 	int ap_bank;
    966 
    967 	if (sc->sc_hw.qhw_num_ap_banks == 0)
    968 		return;
    969 
    970 	ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring);
    971 	KASSERT(ap_bank < sc->sc_hw.qhw_num_ap_banks);
    972 	qab = &sc->sc_etr_ap_banks[ap_bank];
    973 
    974 	if (qr->qr_cb == NULL) {
    975 		qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1);
    976 		if (!qab->qab_ne_dest) {
    977 			qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest,
    978 			    qr->qr_ring, 1);
    979 			qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST,
    980 			    qab->qab_ne_dest);
    981 		}
    982 	} else {
    983 		qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1);
    984 		if (!qab->qab_nf_dest) {
    985 			qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest,
    986 			    qr->qr_ring, 1);
    987 			qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST,
    988 			    qab->qab_nf_dest);
    989 		}
    990 	}
    991 }
    992 
    993 int
    994 qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs)
    995 {
    996 	int i = QAT_MIN_RING_SIZE;
    997 
    998 	for (; i <= QAT_MAX_RING_SIZE; i++)
    999 		if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i))
   1000 			return i;
   1001 
   1002 	return QAT_DEFAULT_RING_SIZE;
   1003 }
   1004 
   1005 int
   1006 qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring,
   1007 	uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg,
   1008 	const char *name, struct qat_ring **rqr)
   1009 {
   1010 	struct qat_bank *qb;
   1011 	struct qat_ring *qr = NULL;
   1012 	int error;
   1013 	uint32_t ring_size_bytes, ring_config;
   1014 	uint64_t ring_base;
   1015 	uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512;
   1016 	uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0;
   1017 
   1018 	KASSERT(bank < sc->sc_hw.qhw_num_banks);
   1019 
   1020 	/* Allocate a ring from specified bank */
   1021 	qb = &sc->sc_etr_banks[bank];
   1022 
   1023 	if (ring >= sc->sc_hw.qhw_num_rings_per_bank)
   1024 		return EINVAL;
   1025 	if (qb->qb_allocated_rings & (1 << ring))
   1026 		return ENOENT;
   1027 	qr = &qb->qb_et_rings[ring];
   1028 	qb->qb_allocated_rings |= 1 << ring;
   1029 
   1030 	/* Intialize allocated ring */
   1031 	qr->qr_ring = ring;
   1032 	qr->qr_bank = bank;
   1033 	qr->qr_name = name;
   1034 	qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring;
   1035 	qr->qr_ring_mask = (1 << ring);
   1036 	qr->qr_cb = cb;
   1037 	qr->qr_cb_arg = cb_arg;
   1038 	QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxintr, EVCNT_TYPE_INTR,
   1039 	    qr->qr_ev_rxintr_name, "bank%d ring%d rxintr", bank, ring);
   1040 	QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxmsg, EVCNT_TYPE_MISC,
   1041 	    qr->qr_ev_rxmsg_name, "bank%d ring%d rxmsg", bank, ring);
   1042 	QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txmsg, EVCNT_TYPE_MISC,
   1043 	    qr->qr_ev_txmsg_name, "bank%d ring%d txmsg", bank, ring);
   1044 	QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txfull, EVCNT_TYPE_MISC,
   1045 	    qr->qr_ev_txfull_name, "bank%d ring%d txfull", bank, ring);
   1046 
   1047 	/* Setup the shadow variables */
   1048 	qr->qr_head = 0;
   1049 	qr->qr_tail = 0;
   1050 	qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size);
   1051 	qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs);
   1052 
   1053 	/*
   1054 	 * To make sure that ring is alligned to ring size allocate
   1055 	 * at least 4k and then tell the user it is smaller.
   1056 	 */
   1057 	ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size);
   1058 	ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes);
   1059 	error = qat_alloc_dmamem(sc, &qr->qr_dma,
   1060 	    ring_size_bytes, ring_size_bytes);
   1061 	if (error)
   1062 		return error;
   1063 
   1064 	KASSERT(qr->qr_dma.qdm_dma_map->dm_nsegs == 1);
   1065 
   1066 	qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr;
   1067 	qr->qr_ring_paddr = qr->qr_dma.qdm_dma_map->dm_segs[0].ds_addr;
   1068 
   1069 	aprint_verbose_dev(sc->sc_dev,
   1070 	    "allocate ring %d of bank %d for %s "
   1071 	    "size %d %d at vaddr %p paddr 0x%llx\n",
   1072 	    ring, bank, name, ring_size_bytes,
   1073 	    (int)qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len,
   1074 	    qr->qr_ring_vaddr,
   1075 	    (unsigned long long)qr->qr_ring_paddr);
   1076 
   1077 	memset(qr->qr_ring_vaddr, QAT_RING_PATTERN,
   1078 	    qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len);
   1079 
   1080 	bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, 0,
   1081 	    qr->qr_dma.qdm_dma_map->dm_mapsize,
   1082 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
   1083 
   1084 	if (((uintptr_t)qr->qr_ring_paddr & (ring_size_bytes - 1)) != 0) {
   1085 		aprint_error_dev(sc->sc_dev, "ring address not aligned\n");
   1086 		return EFAULT;
   1087 	}
   1088 
   1089 	if (cb == NULL) {
   1090 		ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size);
   1091 	} else {
   1092 		ring_config =
   1093 		    ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne);
   1094 	}
   1095 	qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config);
   1096 
   1097 	ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size);
   1098 	qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base);
   1099 
   1100 	if (sc->sc_hw.qhw_init_arb != NULL)
   1101 		qat_arb_update(sc, qb);
   1102 
   1103 	mutex_init(&qr->qr_ring_mtx, MUTEX_DEFAULT, IPL_NET);
   1104 
   1105 	qat_etr_ap_bank_setup_ring(sc, qr);
   1106 
   1107 	if (cb != NULL) {
   1108 		uint32_t intr_mask;
   1109 
   1110 		qb->qb_intr_mask |= qr->qr_ring_mask;
   1111 		intr_mask = qb->qb_intr_mask;
   1112 
   1113 		aprint_verbose_dev(sc->sc_dev,
   1114 		    "update intr mask for bank %d "
   1115 		    "(coalescing time %dns): 0x%08x\n",
   1116 		    bank, qb->qb_coalescing_time, intr_mask);
   1117 		qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN,
   1118 		    intr_mask);
   1119 		qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL,
   1120 		    ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
   1121 	}
   1122 
   1123 	*rqr = qr;
   1124 
   1125 	return 0;
   1126 }
   1127 
   1128 static inline u_int
   1129 qat_modulo(u_int data, u_int shift)
   1130 {
   1131 	u_int div = data >> shift;
   1132 	u_int mult = div << shift;
   1133 	return data - mult;
   1134 }
   1135 
   1136 int
   1137 qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg)
   1138 {
   1139 	uint32_t inflight;
   1140 	uint32_t *addr;
   1141 
   1142 	mutex_spin_enter(&qr->qr_ring_mtx);
   1143 
   1144 	inflight = atomic_inc_32_nv(qr->qr_inflight);
   1145 	if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) {
   1146 		atomic_dec_32(qr->qr_inflight);
   1147 		QAT_EVCNT_INCR(&qr->qr_ev_txfull);
   1148 		mutex_spin_exit(&qr->qr_ring_mtx);
   1149 		return EBUSY;
   1150 	}
   1151 	QAT_EVCNT_INCR(&qr->qr_ev_txmsg);
   1152 
   1153 	addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail);
   1154 
   1155 	memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
   1156 #ifdef QAT_DUMP
   1157 	qat_dump_raw(QAT_DUMP_RING_MSG, "put_msg", addr,
   1158 	    QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
   1159 #endif
   1160 
   1161 	bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_tail,
   1162 	    QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
   1163 	    BUS_DMASYNC_PREWRITE);
   1164 
   1165 	qr->qr_tail = qat_modulo(qr->qr_tail +
   1166 	    QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
   1167 	    QAT_RING_SIZE_MODULO(qr->qr_ring_size));
   1168 
   1169 	qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
   1170 	    ETR_RING_TAIL_OFFSET, qr->qr_tail);
   1171 
   1172 	mutex_spin_exit(&qr->qr_ring_mtx);
   1173 
   1174 	return 0;
   1175 }
   1176 
   1177 int
   1178 qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb,
   1179     struct qat_ring *qr)
   1180 {
   1181 	int handled = 0;
   1182 	uint32_t *msg;
   1183 	uint32_t nmsg = 0;
   1184 
   1185 	mutex_spin_enter(&qr->qr_ring_mtx);
   1186 
   1187 	QAT_EVCNT_INCR(&qr->qr_ev_rxintr);
   1188 
   1189 	msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
   1190 
   1191 	bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
   1192 	    QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
   1193 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1194 
   1195 	while (*msg != ETR_RING_EMPTY_ENTRY_SIG) {
   1196 		atomic_dec_32(qr->qr_inflight);
   1197 		QAT_EVCNT_INCR(&qr->qr_ev_rxmsg);
   1198 
   1199 		if (qr->qr_cb != NULL) {
   1200 			mutex_spin_exit(&qr->qr_ring_mtx);
   1201 			handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg);
   1202 			mutex_spin_enter(&qr->qr_ring_mtx);
   1203 		}
   1204 
   1205 		*msg = ETR_RING_EMPTY_ENTRY_SIG;
   1206 
   1207 		bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
   1208 		    QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
   1209 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
   1210 
   1211 		qr->qr_head = qat_modulo(qr->qr_head +
   1212 		    QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
   1213 		    QAT_RING_SIZE_MODULO(qr->qr_ring_size));
   1214 		nmsg++;
   1215 
   1216 		msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
   1217 
   1218 		bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
   1219 		    QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
   1220 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1221 	}
   1222 
   1223 	if (nmsg > 0) {
   1224 		qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
   1225 		    ETR_RING_HEAD_OFFSET, qr->qr_head);
   1226 	}
   1227 
   1228 	mutex_spin_exit(&qr->qr_ring_mtx);
   1229 
   1230 	return handled;
   1231 }
   1232 
   1233 int
   1234 qat_etr_bank_intr(void *arg)
   1235 {
   1236 	struct qat_bank *qb = arg;
   1237 	struct qat_softc *sc = qb->qb_sc;
   1238 	uint32_t estat;
   1239 	int i, handled = 0;
   1240 
   1241 	mutex_spin_enter(&qb->qb_bank_mtx);
   1242 
   1243 	QAT_EVCNT_INCR(&qb->qb_ev_rxintr);
   1244 
   1245 	qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0);
   1246 
   1247 	/* Now handle all the responses */
   1248 	estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT);
   1249 	estat &= qb->qb_intr_mask;
   1250 
   1251 	qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL,
   1252 	    ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
   1253 
   1254 	mutex_spin_exit(&qb->qb_bank_mtx);
   1255 
   1256 	while ((i = ffs32(estat)) != 0) {
   1257 		struct qat_ring *qr = &qb->qb_et_rings[--i];
   1258 		estat &= ~(1 << i);
   1259 		handled |= qat_etr_ring_intr(sc, qb, qr);
   1260 	}
   1261 
   1262 	return handled;
   1263 }
   1264 
   1265 void
   1266 qat_arb_update(struct qat_softc *sc, struct qat_bank *qb)
   1267 {
   1268 
   1269 	qat_arb_ringsrvarben_write_4(sc, qb->qb_bank,
   1270 	    qb->qb_allocated_rings & 0xff);
   1271 }
   1272 
   1273 struct qat_sym_cookie *
   1274 qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb)
   1275 {
   1276 	struct qat_sym_cookie *qsc;
   1277 
   1278 	mutex_spin_enter(&qcb->qcb_bank_mtx);
   1279 
   1280 	if (qcb->qcb_symck_free_count == 0) {
   1281 		QAT_EVCNT_INCR(&qcb->qcb_ev_no_symck);
   1282 		mutex_spin_exit(&qcb->qcb_bank_mtx);
   1283 		return NULL;
   1284 	}
   1285 
   1286 	qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count];
   1287 
   1288 	mutex_spin_exit(&qcb->qcb_bank_mtx);
   1289 
   1290 	return qsc;
   1291 }
   1292 
   1293 void
   1294 qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb, struct qat_sym_cookie *qsc)
   1295 {
   1296 
   1297 	mutex_spin_enter(&qcb->qcb_bank_mtx);
   1298 	qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc;
   1299 	mutex_spin_exit(&qcb->qcb_bank_mtx);
   1300 }
   1301 
   1302 
   1303 void
   1304 qat_memcpy_htobe64(void *dst, const void *src, size_t len)
   1305 {
   1306 	uint64_t *dst0 = dst;
   1307 	const uint64_t *src0 = src;
   1308 	size_t i;
   1309 
   1310 	KASSERT(len % sizeof(*dst0) == 0);
   1311 
   1312 	for (i = 0; i < len / sizeof(*dst0); i++)
   1313 		*(dst0 + i) = htobe64(*(src0 + i));
   1314 }
   1315 
   1316 void
   1317 qat_memcpy_htobe32(void *dst, const void *src, size_t len)
   1318 {
   1319 	uint32_t *dst0 = dst;
   1320 	const uint32_t *src0 = src;
   1321 	size_t i;
   1322 
   1323 	KASSERT(len % sizeof(*dst0) == 0);
   1324 
   1325 	for (i = 0; i < len / sizeof(*dst0); i++)
   1326 		*(dst0 + i) = htobe32(*(src0 + i));
   1327 }
   1328 
   1329 void
   1330 qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte)
   1331 {
   1332 	switch (wordbyte) {
   1333 	case 4:
   1334 		qat_memcpy_htobe32(dst, src, len);
   1335 		break;
   1336 	case 8:
   1337 		qat_memcpy_htobe64(dst, src, len);
   1338 		break;
   1339 	default:
   1340 		KASSERT(0);
   1341 	}
   1342 }
   1343 
   1344 void
   1345 qat_crypto_hmac_precompute(struct qat_crypto_desc *desc, struct cryptoini *cria,
   1346     struct qat_sym_hash_def const *hash_def, uint8_t *state1, uint8_t *state2)
   1347 {
   1348 	int i, state_swap;
   1349 	struct swcr_auth_hash const *sah = hash_def->qshd_alg->qshai_sah;
   1350 	uint32_t blklen = hash_def->qshd_alg->qshai_block_len;
   1351 	uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset;
   1352 	uint32_t state_size = hash_def->qshd_alg->qshai_state_size;
   1353 	uint32_t state_word = hash_def->qshd_alg->qshai_state_word;
   1354 	uint32_t keylen = cria->cri_klen / 8;
   1355 	uint32_t padlen = blklen - keylen;
   1356 	uint8_t *ipad = desc->qcd_hash_state_prefix_buf;
   1357 	uint8_t *opad = desc->qcd_hash_state_prefix_buf +
   1358 	    sizeof(desc->qcd_hash_state_prefix_buf) / 2;
   1359 	/* XXX
   1360 	 * For "stack protector not protecting local variables" error,
   1361 	 * use constant variable.
   1362 	 * Currently, the max length is sizeof(aesxcbc_ctx) used by
   1363 	 * swcr_auth_hash_aes_xcbc_mac
   1364 	 */
   1365 	uint8_t ctx[sizeof(aesxcbc_ctx)];
   1366 
   1367 	memcpy(ipad, cria->cri_key, keylen);
   1368 	memcpy(opad, cria->cri_key, keylen);
   1369 
   1370 	if (padlen > 0) {
   1371 		memset(ipad + keylen, 0, padlen);
   1372 		memset(opad + keylen, 0, padlen);
   1373 	}
   1374 	for (i = 0; i < blklen; i++) {
   1375 		ipad[i] ^= 0x36;
   1376 		opad[i] ^= 0x5c;
   1377 	}
   1378 
   1379 	/* ipad */
   1380 	sah->Init(ctx);
   1381 	/* Check the endian of kernel built-in hash state */
   1382 	state_swap = memcmp(hash_def->qshd_alg->qshai_init_state,
   1383 	    ((uint8_t *)ctx) + state_offset, state_word);
   1384 	sah->Update(ctx, ipad, blklen);
   1385 	if (state_swap == 0) {
   1386 		memcpy(state1, ((uint8_t *)ctx) + state_offset, state_size);
   1387 	} else {
   1388 		qat_memcpy_htobe(state1, ((uint8_t *)ctx) + state_offset,
   1389 		    state_size, state_word);
   1390 	}
   1391 
   1392 	/* opad */
   1393 	sah->Init(ctx);
   1394 	sah->Update(ctx, opad, blklen);
   1395 	if (state_swap == 0) {
   1396 		memcpy(state2, ((uint8_t *)ctx) + state_offset, state_size);
   1397 	} else {
   1398 		qat_memcpy_htobe(state2, ((uint8_t *)ctx) + state_offset,
   1399 		    state_size, state_word);
   1400 	}
   1401 }
   1402 
   1403 uint16_t
   1404 qat_crypto_load_cipher_cryptoini(
   1405     struct qat_crypto_desc *desc, struct cryptoini *crie)
   1406 {
   1407 	enum hw_cipher_algo algo = HW_CIPHER_ALGO_NULL;
   1408 	enum hw_cipher_mode mode = HW_CIPHER_CBC_MODE;
   1409 	enum hw_cipher_convert key_convert = HW_CIPHER_NO_CONVERT;
   1410 
   1411 	switch (crie->cri_alg) {
   1412 	case CRYPTO_DES_CBC:
   1413 		algo = HW_CIPHER_ALGO_DES;
   1414 		desc->qcd_cipher_blk_sz = HW_DES_BLK_SZ;
   1415 		break;
   1416 	case CRYPTO_3DES_CBC:
   1417 		algo = HW_CIPHER_ALGO_3DES;
   1418 		desc->qcd_cipher_blk_sz = HW_3DES_BLK_SZ;
   1419 		break;
   1420 	case CRYPTO_AES_CBC:
   1421 		switch (crie->cri_klen / 8) {
   1422 		case HW_AES_128_KEY_SZ:
   1423 			algo = HW_CIPHER_ALGO_AES128;
   1424 			break;
   1425 		case HW_AES_192_KEY_SZ:
   1426 			algo = HW_CIPHER_ALGO_AES192;
   1427 			break;
   1428 		case HW_AES_256_KEY_SZ:
   1429 			algo = HW_CIPHER_ALGO_AES256;
   1430 			break;
   1431 		default:
   1432 			KASSERT(0);
   1433 			break;
   1434 		}
   1435 		desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
   1436 		/*
   1437 		 * AES decrypt key needs to be reversed.
   1438 		 * Instead of reversing the key at session registration,
   1439 		 * it is instead reversed on-the-fly by setting the KEY_CONVERT
   1440 		 * bit here
   1441 		 */
   1442 		if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT)
   1443 			key_convert = HW_CIPHER_KEY_CONVERT;
   1444 
   1445 		break;
   1446 	default:
   1447 		KASSERT(0);
   1448 		break;
   1449 	}
   1450 
   1451 	return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert,
   1452 	    desc->qcd_cipher_dir);
   1453 }
   1454 
   1455 uint16_t
   1456 qat_crypto_load_auth_cryptoini(
   1457     struct qat_crypto_desc *desc, struct cryptoini *cria,
   1458     struct qat_sym_hash_def const **hash_def)
   1459 {
   1460 	const struct swcr_auth_hash *sah;
   1461 	enum qat_sym_hash_algorithm algo = 0;
   1462 
   1463 	switch (cria->cri_alg) {
   1464 	case CRYPTO_MD5_HMAC_96:
   1465 		algo = QAT_SYM_HASH_MD5;
   1466 		break;
   1467 	case CRYPTO_SHA1_HMAC_96:
   1468 		algo = QAT_SYM_HASH_SHA1;
   1469 		break;
   1470 	case CRYPTO_SHA2_256_HMAC:
   1471 		algo = QAT_SYM_HASH_SHA256;
   1472 		break;
   1473 	case CRYPTO_SHA2_384_HMAC:
   1474 		algo = QAT_SYM_HASH_SHA384;
   1475 		break;
   1476 	case CRYPTO_SHA2_512_HMAC:
   1477 		algo = QAT_SYM_HASH_SHA512;
   1478 		break;
   1479 	default:
   1480 		KASSERT(0);
   1481 		break;
   1482 	}
   1483 	*hash_def = &qat_sym_hash_defs[algo];
   1484 	sah = (*hash_def)->qshd_alg->qshai_sah;
   1485 	KASSERT(sah != NULL);
   1486 	desc->qcd_auth_sz = sah->auth_hash->authsize;
   1487 
   1488 	return HW_AUTH_CONFIG_BUILD(HW_AUTH_MODE1,
   1489 	    (*hash_def)->qshd_qat->qshqi_algo_enc,
   1490 	    (*hash_def)->qshd_alg->qshai_digest_len);
   1491 }
   1492 
   1493 int
   1494 qat_crypto_load_buf(struct qat_softc *sc, struct cryptop *crp,
   1495     struct qat_sym_cookie *qsc, struct qat_crypto_desc const *desc,
   1496     uint8_t *icv_buf, int icv_offset, bus_addr_t *icv_paddr)
   1497 {
   1498 	int error, i, nsegs;
   1499 
   1500 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
   1501 		struct mbuf *m = (struct mbuf *)crp->crp_buf;
   1502 
   1503 		if (icv_offset >= 0) {
   1504 			if (m_length(m) == icv_offset) {
   1505 				m_copyback(m, icv_offset, desc->qcd_auth_sz,
   1506 				    icv_buf);
   1507 				if (m_length(m) == icv_offset)
   1508 					return ENOBUFS;
   1509 			} else {
   1510 				struct mbuf *m0;
   1511 				m0 = m_pulldown(m, icv_offset,
   1512 				    desc->qcd_auth_sz, NULL);
   1513 				if (m0 == NULL)
   1514 					return ENOBUFS;
   1515 			}
   1516 		}
   1517 
   1518 		error = bus_dmamap_load_mbuf(sc->sc_dmat, qsc->qsc_buf_dmamap,
   1519 		    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   1520 		if (error == EFBIG) {
   1521 			struct mbuf *m_new;
   1522 			m_new = m_defrag(m, M_DONTWAIT);
   1523 			if (m_new != NULL) {
   1524 				crp->crp_buf = m_new;
   1525 				qsc->qsc_buf = m_new;
   1526 				error = bus_dmamap_load_mbuf(sc->sc_dmat,
   1527 				    qsc->qsc_buf_dmamap, m_new,
   1528 				    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   1529 				if (error) {
   1530 					m_freem(m_new);
   1531 					crp->crp_buf = NULL;
   1532 				}
   1533 			}
   1534 		}
   1535 
   1536 	} else if (crp->crp_flags & CRYPTO_F_IOV) {
   1537 		error = bus_dmamap_load_uio(sc->sc_dmat, qsc->qsc_buf_dmamap,
   1538 		    (struct uio *)crp->crp_buf, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   1539 	} else {
   1540 		error = bus_dmamap_load(sc->sc_dmat, qsc->qsc_buf_dmamap,
   1541 		    crp->crp_buf, crp->crp_ilen, NULL, BUS_DMA_NOWAIT);
   1542 	}
   1543 	if (error) {
   1544 		aprint_debug_dev(sc->sc_dev,
   1545 		    "can't load crp_buf, error %d\n", error);
   1546 		crp->crp_etype = error;
   1547 		return error;
   1548 	}
   1549 
   1550 	nsegs = qsc->qsc_buf_dmamap->dm_nsegs;
   1551 	qsc->qsc_buf_list.num_buffers = nsegs;
   1552 	for (i = 0; i < nsegs; i++) {
   1553 		struct flat_buffer_desc *flatbuf =
   1554 		    &qsc->qsc_buf_list.phy_buffers[i];
   1555 		bus_addr_t paddr = qsc->qsc_buf_dmamap->dm_segs[i].ds_addr;
   1556 		bus_size_t len = qsc->qsc_buf_dmamap->dm_segs[i].ds_len;
   1557 
   1558 		flatbuf->data_len_in_bytes = len;
   1559 		flatbuf->phy_buffer = (uint64_t)paddr;
   1560 
   1561 		if (icv_offset >= 0) {
   1562 			if (icv_offset < len)
   1563 				*icv_paddr = paddr + icv_offset;
   1564 			else
   1565 				icv_offset -= len;
   1566 		}
   1567 	}
   1568 
   1569 	bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0,
   1570 	    qsc->qsc_buf_dmamap->dm_mapsize,
   1571 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1572 
   1573 	return 0;
   1574 }
   1575 
   1576 int
   1577 qat_crypto_load_iv(struct qat_sym_cookie *qsc, struct cryptop *crp,
   1578     struct cryptodesc *crde, struct qat_crypto_desc const *desc)
   1579 {
   1580 	uint32_t rand;
   1581 	uint32_t ivlen = desc->qcd_cipher_blk_sz;
   1582 	int i;
   1583 
   1584 	if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
   1585 		memcpy(qsc->qsc_iv_buf, crde->crd_iv, ivlen);
   1586 	} else {
   1587 		if (crde->crd_flags & CRD_F_ENCRYPT) {
   1588 			for (i = 0; i + sizeof(rand) <= ivlen;
   1589 			    i += sizeof(rand)) {
   1590 				rand = cprng_fast32();
   1591 				memcpy(qsc->qsc_iv_buf + i, &rand, sizeof(rand));
   1592 			}
   1593 			if (sizeof(qsc->qsc_iv_buf) % sizeof(rand) != 0) {
   1594 				rand = cprng_fast32();
   1595 				memcpy(qsc->qsc_iv_buf + i, &rand,
   1596 				       sizeof(qsc->qsc_iv_buf) - i);
   1597 			}
   1598 		} else if (crp->crp_flags & CRYPTO_F_IMBUF) {
   1599 			/* get iv from buf */
   1600 			m_copydata(qsc->qsc_buf, crde->crd_inject, ivlen,
   1601 			    qsc->qsc_iv_buf);
   1602 		} else if (crp->crp_flags & CRYPTO_F_IOV) {
   1603 			cuio_copydata(qsc->qsc_buf, crde->crd_inject, ivlen,
   1604 			    qsc->qsc_iv_buf);
   1605 		}
   1606 	}
   1607 
   1608 	if ((crde->crd_flags & CRD_F_ENCRYPT) != 0 &&
   1609 	    (crde->crd_flags & CRD_F_IV_PRESENT) == 0) {
   1610 		if (crp->crp_flags & CRYPTO_F_IMBUF) {
   1611 			m_copyback(qsc->qsc_buf, crde->crd_inject, ivlen,
   1612 			    qsc->qsc_iv_buf);
   1613 		} else if (crp->crp_flags & CRYPTO_F_IOV) {
   1614 			cuio_copyback(qsc->qsc_buf, crde->crd_inject, ivlen,
   1615 			    qsc->qsc_iv_buf);
   1616 		}
   1617 	}
   1618 
   1619 	return 0;
   1620 }
   1621 
   1622 static inline struct qat_crypto_bank *
   1623 qat_crypto_select_bank(struct qat_crypto *qcy)
   1624 {
   1625 	u_int cpuid = cpu_index(curcpu());
   1626 
   1627 	return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks];
   1628 }
   1629 
   1630 int
   1631 qat_crypto_process(void *arg, struct cryptop *crp, int hint)
   1632 {
   1633 	struct qat_crypto *qcy = arg;
   1634 	struct qat_crypto_bank *qcb;
   1635 	struct qat_session *qs = NULL;
   1636 	struct qat_crypto_desc const *desc;
   1637 	struct qat_sym_cookie *qsc = NULL;
   1638 	struct qat_sym_bulk_cookie *qsbc;
   1639 	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
   1640 	bus_addr_t icv_paddr = 0;
   1641 	int error, icv_offset = -1;
   1642 	uint8_t icv_buf[CRYPTO_MAX_MAC_LEN];
   1643 
   1644 	qs = qcy->qcy_sessions[CRYPTO_SESID2LID(crp->crp_sid)];
   1645 	mutex_spin_enter(&qs->qs_session_mtx);
   1646 	KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
   1647 	qs->qs_inflight++;
   1648 	mutex_spin_exit(&qs->qs_session_mtx);
   1649 
   1650 	qcb = qat_crypto_select_bank(qcy);
   1651 
   1652 	qsc = qat_crypto_alloc_sym_cookie(qcb);
   1653 	if (qsc == NULL) {
   1654 		error = ENOBUFS;
   1655 		goto fail;
   1656 	}
   1657 
   1658 	error = 0;
   1659 	desc = &qs->qs_dec_desc;
   1660 	crd = crp->crp_desc;
   1661 	while (crd != NULL) {
   1662 		switch (crd->crd_alg) {
   1663 		case CRYPTO_DES_CBC:
   1664 		case CRYPTO_3DES_CBC:
   1665 		case CRYPTO_AES_CBC:
   1666 			if (crde != NULL)
   1667 				error = EINVAL;
   1668 			if (crd->crd_flags & CRD_F_ENCRYPT) {
   1669 				/* use encrypt desc */
   1670 				desc = &qs->qs_enc_desc;
   1671 				if (crda != NULL)
   1672 					error = ENOTSUP;
   1673 			}
   1674 			crde = crd;
   1675 			break;
   1676 		case CRYPTO_MD5_HMAC_96:
   1677 		case CRYPTO_SHA1_HMAC_96:
   1678 		case CRYPTO_SHA2_256_HMAC:
   1679 		case CRYPTO_SHA2_384_HMAC:
   1680 		case CRYPTO_SHA2_512_HMAC:
   1681 			if (crda != NULL)
   1682 				error = EINVAL;
   1683 			if (crde != NULL &&
   1684 			    (crde->crd_flags & CRD_F_ENCRYPT) == 0)
   1685 				error = EINVAL;
   1686 			crda = crd;
   1687 			icv_offset = crd->crd_inject;
   1688 			break;
   1689 		}
   1690 		if (error)
   1691 			goto fail;
   1692 
   1693 		crd = crd->crd_next;
   1694 	}
   1695 
   1696 	qsc->qsc_buf = crp->crp_buf;
   1697 
   1698 	if (crde != NULL) {
   1699 		error = qat_crypto_load_iv(qsc, crp, crde, desc);
   1700 		if (error)
   1701 			goto fail;
   1702 	}
   1703 
   1704 	error = qat_crypto_load_buf(qcy->qcy_sc, crp, qsc, desc, icv_buf,
   1705 	    icv_offset, &icv_paddr);
   1706 	if (error)
   1707 		goto fail;
   1708 
   1709 	qsbc = &qsc->u.qsc_bulk_cookie;
   1710 
   1711 	qsbc->qsbc_crypto = qcy;
   1712 	qsbc->qsbc_session = qs;
   1713 	qsbc->qsbc_cb_tag = crp;
   1714 
   1715 	qcy->qcy_sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc,
   1716 	    crde, crda, icv_paddr);
   1717 
   1718 	bus_dmamap_sync(qcy->qcy_sc->sc_dmat, *qsc->qsc_self_dmamap, 0,
   1719 	    offsetof(struct qat_sym_cookie, qsc_self_dmamap),
   1720 	    BUS_DMASYNC_PREWRITE);
   1721 
   1722 	error = qat_etr_put_msg(qcy->qcy_sc, qcb->qcb_sym_tx,
   1723 	    (uint32_t *)qsbc->qsbc_msg);
   1724 	if (error)
   1725 		goto fail;
   1726 
   1727 	return 0;
   1728 fail:
   1729 	if (qsc)
   1730 		qat_crypto_free_sym_cookie(qcb, qsc);
   1731 	mutex_spin_enter(&qs->qs_session_mtx);
   1732 	qs->qs_inflight--;
   1733 	qat_crypto_check_free_session(qcy, qs);
   1734 	crp->crp_etype = error;
   1735 	crypto_done(crp);
   1736 	return 0;
   1737 }
   1738 
   1739 int
   1740 qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
   1741 {
   1742 	int error, i, bank;
   1743 	int curname = 0;
   1744 	char *name;
   1745 
   1746 	bank = qcb->qcb_bank;
   1747 
   1748 	name = qcb->qcb_ring_names[curname++];
   1749 	snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank);
   1750 	error = qat_etr_setup_ring(sc, qcb->qcb_bank,
   1751 	    sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size,
   1752 	    NULL, NULL, name, &qcb->qcb_sym_tx);
   1753 	if (error)
   1754 		return error;
   1755 
   1756 	name = qcb->qcb_ring_names[curname++];
   1757 	snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank);
   1758 	error = qat_etr_setup_ring(sc, qcb->qcb_bank,
   1759 	    sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size,
   1760 	    qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx);
   1761 	if (error)
   1762 		return error;
   1763 
   1764 	for (i = 0; i < QAT_NSYMCOOKIE; i++) {
   1765 		struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i];
   1766 		struct qat_sym_cookie *qsc;
   1767 
   1768 		error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_sym_cookie),
   1769 		    QAT_OPTIMAL_ALIGN);
   1770 		if (error)
   1771 			return error;
   1772 
   1773 		qsc = qdm->qdm_dma_vaddr;
   1774 		qsc->qsc_self_dmamap = &qdm->qdm_dma_map;
   1775 		qsc->qsc_bulk_req_params_buf_paddr =
   1776 		    qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
   1777 		    u.qsc_bulk_cookie.qsbc_req_params_buf);
   1778 		qsc->qsc_buffer_list_desc_paddr =
   1779 		    qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
   1780 		    qsc_buf_list);
   1781 		qsc->qsc_iv_buf_paddr =
   1782 		    qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
   1783 		    qsc_iv_buf);
   1784 		qcb->qcb_symck_free[i] = qsc;
   1785 		qcb->qcb_symck_free_count++;
   1786 
   1787 		error = bus_dmamap_create(sc->sc_dmat, QAT_MAXLEN,
   1788 		    QAT_MAXSEG, MCLBYTES, 0, 0, &qsc->qsc_buf_dmamap);
   1789 		if (error)
   1790 			return error;
   1791 	}
   1792 
   1793 	return 0;
   1794 }
   1795 
   1796 int
   1797 qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb)
   1798 {
   1799 	int error;
   1800 
   1801 	mutex_init(&qcb->qcb_bank_mtx, MUTEX_DEFAULT, IPL_NET);
   1802 
   1803 	QAT_EVCNT_ATTACH(sc, &qcb->qcb_ev_no_symck, EVCNT_TYPE_MISC,
   1804 	    qcb->qcb_ev_no_symck_name, "crypto no_symck");
   1805 
   1806 	error = qat_crypto_setup_ring(sc, qcb);
   1807 	if (error)
   1808 		return error;
   1809 
   1810 	return 0;
   1811 }
   1812 
   1813 int
   1814 qat_crypto_init(struct qat_softc *sc)
   1815 {
   1816 	struct qat_crypto *qcy = &sc->sc_crypto;
   1817 	int error, bank, i;
   1818 	int num_banks;
   1819 
   1820 	qcy->qcy_sc = sc;
   1821 
   1822 	if (sc->sc_hw.qhw_init_arb != NULL)
   1823 		num_banks = uimin(ncpu, sc->sc_hw.qhw_num_banks);
   1824 	else
   1825 		num_banks = sc->sc_ae_num;
   1826 
   1827 	qcy->qcy_num_banks = num_banks;
   1828 
   1829 	qcy->qcy_banks =
   1830 	    qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks);
   1831 
   1832 	for (bank = 0; bank < num_banks; bank++) {
   1833 		struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank];
   1834 		qcb->qcb_bank = bank;
   1835 		qcb->qcb_crypto = qcy;
   1836 		error = qat_crypto_bank_init(sc, qcb);
   1837 		if (error)
   1838 			return error;
   1839 	}
   1840 
   1841 	mutex_init(&qcy->qcy_crypto_mtx, MUTEX_DEFAULT, IPL_NET);
   1842 
   1843 	for (i = 0; i < QAT_NSESSION; i++) {
   1844 		struct qat_dmamem *qdm = &qcy->qcy_session_dmamems[i];
   1845 		struct qat_session *qs;
   1846 
   1847 		error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_session),
   1848 		    QAT_OPTIMAL_ALIGN);
   1849 		if (error)
   1850 			return error;
   1851 
   1852 		qs = qdm->qdm_dma_vaddr;
   1853 		qs->qs_lid = i;
   1854 		qs->qs_dec_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr;
   1855 		qs->qs_dec_desc.qcd_hash_state_paddr =
   1856 		    qs->qs_dec_desc.qcd_desc_paddr +
   1857 		    offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
   1858 		qs->qs_enc_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr +
   1859 		    offsetof(struct qat_session, qs_enc_desc);
   1860 		qs->qs_enc_desc.qcd_hash_state_paddr =
   1861 		    qs->qs_enc_desc.qcd_desc_paddr +
   1862 		    offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
   1863 
   1864 		mutex_init(&qs->qs_session_mtx, MUTEX_DEFAULT, IPL_NET);
   1865 
   1866 		qcy->qcy_sessions[i] = qs;
   1867 		qcy->qcy_session_free[i] = qs;
   1868 		qcy->qcy_session_free_count++;
   1869 	}
   1870 
   1871 	QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_new_sess, EVCNT_TYPE_MISC,
   1872 	    qcy->qcy_ev_new_sess_name, "crypto new_sess");
   1873 	QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_free_sess, EVCNT_TYPE_MISC,
   1874 	    qcy->qcy_ev_free_sess_name, "crypto free_sess");
   1875 	QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_no_sess, EVCNT_TYPE_MISC,
   1876 	    qcy->qcy_ev_no_sess_name, "crypto no_sess");
   1877 
   1878 	return 0;
   1879 }
   1880 
   1881 int
   1882 qat_crypto_new_session(void *arg, uint32_t *lid, struct cryptoini *cri)
   1883 {
   1884 	struct qat_crypto *qcy = arg;
   1885 	struct qat_session *qs = NULL;
   1886 	struct cryptoini *crie = NULL;
   1887 	struct cryptoini *cria = NULL;
   1888 	int slice, error;
   1889 
   1890 	mutex_spin_enter(&qcy->qcy_crypto_mtx);
   1891 
   1892 	if (qcy->qcy_session_free_count == 0) {
   1893 		QAT_EVCNT_INCR(&qcy->qcy_ev_no_sess);
   1894 		mutex_spin_exit(&qcy->qcy_crypto_mtx);
   1895 		return ENOBUFS;
   1896 	}
   1897 	qs = qcy->qcy_session_free[--qcy->qcy_session_free_count];
   1898 	QAT_EVCNT_INCR(&qcy->qcy_ev_new_sess);
   1899 
   1900 	mutex_spin_exit(&qcy->qcy_crypto_mtx);
   1901 
   1902 	qs->qs_status = QAT_SESSION_STATUS_ACTIVE;
   1903 	qs->qs_inflight = 0;
   1904 	*lid = qs->qs_lid;
   1905 
   1906 	error = 0;
   1907 	while (cri) {
   1908 		switch (cri->cri_alg) {
   1909 		case CRYPTO_DES_CBC:
   1910 		case CRYPTO_3DES_CBC:
   1911 		case CRYPTO_AES_CBC:
   1912 			if (crie != NULL)
   1913 				error = EINVAL;
   1914 			crie = cri;
   1915 			break;
   1916 		case CRYPTO_MD5_HMAC_96:
   1917 		case CRYPTO_SHA1_HMAC_96:
   1918 		case CRYPTO_SHA2_256_HMAC:
   1919 		case CRYPTO_SHA2_384_HMAC:
   1920 		case CRYPTO_SHA2_512_HMAC:
   1921 			if (cria != NULL)
   1922 				error = EINVAL;
   1923 			cria = cri;
   1924 			break;
   1925 		default:
   1926 			error = EINVAL;
   1927 		}
   1928 		if (error)
   1929 			goto fail;
   1930 		cri = cri->cri_next;
   1931 	}
   1932 
   1933 	slice = 1;
   1934 	if (crie != NULL && cria != NULL) {
   1935 		slice = 2;
   1936 		/* auth then decrypt */
   1937 		qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH;
   1938 		qs->qs_dec_desc.qcd_slices[1] = FW_SLICE_CIPHER;
   1939 		qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT;
   1940 		qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
   1941 		/* encrypt then auth */
   1942 		qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER;
   1943 		qs->qs_enc_desc.qcd_slices[1] = FW_SLICE_AUTH;
   1944 		qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT;
   1945 		qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
   1946 	} else if (crie != NULL) {
   1947 		/* decrypt */
   1948 		qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_CIPHER;
   1949 		qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT;
   1950 		qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_CIPHER;
   1951 		/* encrypt */
   1952 		qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER;
   1953 		qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT;
   1954 		qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER;
   1955 	} else if (cria != NULL) {
   1956 		/* auth */
   1957 		qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH;
   1958 		qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_AUTH;
   1959 		/* auth */
   1960 		qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_AUTH;
   1961 		qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_AUTH;
   1962 	} else {
   1963 		error = EINVAL;
   1964 		goto fail;
   1965 	}
   1966 	qs->qs_dec_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR;
   1967 	qs->qs_enc_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR;
   1968 
   1969 	qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_dec_desc, crie, cria);
   1970 	qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_enc_desc, crie, cria);
   1971 
   1972 	membar_producer();
   1973 
   1974 	return 0;
   1975 fail:
   1976 	if (qs != NULL) {
   1977 		mutex_spin_enter(&qs->qs_session_mtx);
   1978 		qat_crypto_free_session0(qcy, qs);
   1979 	}
   1980 	return error;
   1981 }
   1982 
   1983 static inline void
   1984 qat_crypto_clean_desc(struct qat_crypto_desc *desc)
   1985 {
   1986 	explicit_memset(desc->qcd_content_desc, 0,
   1987 	    sizeof(desc->qcd_content_desc));
   1988 	explicit_memset(desc->qcd_hash_state_prefix_buf, 0,
   1989 	    sizeof(desc->qcd_hash_state_prefix_buf));
   1990 	explicit_memset(desc->qcd_req_cache, 0,
   1991 	    sizeof(desc->qcd_req_cache));
   1992 }
   1993 
   1994 int
   1995 qat_crypto_free_session0(struct qat_crypto *qcy, struct qat_session *qs)
   1996 {
   1997 
   1998 	qat_crypto_clean_desc(&qs->qs_dec_desc);
   1999 	qat_crypto_clean_desc(&qs->qs_enc_desc);
   2000 	qs->qs_status &= ~QAT_SESSION_STATUS_ACTIVE;
   2001 
   2002 	mutex_spin_exit(&qs->qs_session_mtx);
   2003 
   2004 	mutex_spin_enter(&qcy->qcy_crypto_mtx);
   2005 
   2006 	qcy->qcy_session_free[qcy->qcy_session_free_count++] = qs;
   2007 	QAT_EVCNT_INCR(&qcy->qcy_ev_free_sess);
   2008 
   2009 	mutex_spin_exit(&qcy->qcy_crypto_mtx);
   2010 
   2011 	return 0;
   2012 }
   2013 
   2014 void
   2015 qat_crypto_check_free_session(struct qat_crypto *qcy, struct qat_session *qs)
   2016 {
   2017 
   2018 	if ((qs->qs_status & QAT_SESSION_STATUS_FREEING) &&
   2019 	    qs->qs_inflight == 0) {
   2020 		qat_crypto_free_session0(qcy, qs);
   2021 	} else {
   2022 		mutex_spin_exit(&qs->qs_session_mtx);
   2023 	}
   2024 }
   2025 
   2026 int
   2027 qat_crypto_free_session(void *arg, uint64_t sid)
   2028 {
   2029 	struct qat_crypto *qcy = arg;
   2030 	struct qat_session *qs;
   2031 	int error;
   2032 
   2033 	qs = qcy->qcy_sessions[CRYPTO_SESID2LID(sid)];
   2034 
   2035 	mutex_spin_enter(&qs->qs_session_mtx);
   2036 
   2037 	if (qs->qs_inflight > 0) {
   2038 		qs->qs_status |= QAT_SESSION_STATUS_FREEING;
   2039 		mutex_spin_exit(&qs->qs_session_mtx);
   2040 		return 0;
   2041 	}
   2042 
   2043 	error = qat_crypto_free_session0(qcy, qs);
   2044 
   2045 	return error;
   2046 }
   2047 
   2048 int
   2049 qat_crypto_start(struct qat_softc *sc)
   2050 {
   2051 	struct qat_crypto *qcy = &sc->sc_crypto;
   2052 	int error, i;
   2053 	static const int algs[] = {
   2054 	    CRYPTO_DES_CBC, CRYPTO_3DES_CBC, CRYPTO_AES_CBC,
   2055 	    CRYPTO_MD5_HMAC_96, CRYPTO_SHA1_HMAC_96, CRYPTO_SHA2_256_HMAC,
   2056 	    CRYPTO_SHA2_384_HMAC, CRYPTO_SHA2_512_HMAC,
   2057 	};
   2058 
   2059 	/* opencrypto */
   2060 	qcy->qcy_cid = crypto_get_driverid(0);
   2061 	if (qcy->qcy_cid < 0) {
   2062 		aprint_error_dev(sc->sc_dev,
   2063 		    "could not get opencrypto driver id\n");
   2064 		return ENOENT;
   2065 	}
   2066 
   2067 	for (i = 0; i < __arraycount(algs); i++) {
   2068 		error = crypto_register(qcy->qcy_cid, algs[i], 0, 0,
   2069 		    qat_crypto_new_session, qat_crypto_free_session,
   2070 		    qat_crypto_process, qcy);
   2071 		if (error) {
   2072 			aprint_error_dev(sc->sc_dev,
   2073 			    "could not register crypto: %d\n", error);
   2074 			return error;
   2075 		}
   2076 	}
   2077 
   2078 	return 0;
   2079 }
   2080 
   2081 int
   2082 qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg)
   2083 {
   2084 	struct qat_crypto_bank *qcb = arg;
   2085 	struct qat_crypto *qcy;
   2086 	struct qat_session *qs;
   2087 	struct qat_sym_cookie *qsc;
   2088 	struct qat_sym_bulk_cookie *qsbc;
   2089 	struct cryptop *crp;
   2090 
   2091 	qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset);
   2092 
   2093 	qsbc = &qsc->u.qsc_bulk_cookie;
   2094 	qcy = qsbc->qsbc_crypto;
   2095 	qs = qsbc->qsbc_session;
   2096 	crp = qsbc->qsbc_cb_tag;
   2097 
   2098 	bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0,
   2099 	    qsc->qsc_buf_dmamap->dm_mapsize,
   2100 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2101 	bus_dmamap_unload(sc->sc_dmat, qsc->qsc_buf_dmamap);
   2102 	qat_crypto_free_sym_cookie(qcb, qsc);
   2103 
   2104 	crp->crp_etype = 0;
   2105 	crypto_done(crp);
   2106 
   2107 	mutex_spin_enter(&qs->qs_session_mtx);
   2108 	KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
   2109 	qs->qs_inflight--;
   2110 	qat_crypto_check_free_session(qcy, qs);
   2111 
   2112 	return 1;
   2113 }
   2114 
   2115 #ifdef QAT_DUMP
   2116 
   2117 void
   2118 qat_dump_raw(int flag, const char *label, void *d, size_t len)
   2119 {
   2120 	uintptr_t pc;
   2121 	size_t pos;
   2122 	uint8_t *dp = (uint8_t *)d;
   2123 
   2124 	if ((qat_dump & flag) == 0)
   2125 		return;
   2126 
   2127 	printf("dumping %s at %p len %zu\n", label, d, len);
   2128 
   2129 	pc = __RETURN_ADDRESS;
   2130 	printf("\tcallpc ");
   2131 	qat_print_sym(pc);
   2132 	printf("\n");
   2133 
   2134 	for (pos = 0; pos < len; pos++) {
   2135 		if (pos % 32 == 0)
   2136 			printf("%8zx: ", pos);
   2137 		else if (pos % 4 == 0)
   2138 		        printf(" ");
   2139 
   2140 		printf("%02x", dp[pos]);
   2141 
   2142 		if (pos % 32 == 31 || pos + 1 == len)
   2143 			printf("\n");
   2144 	}
   2145 }
   2146 
   2147 void
   2148 qat_dump_ring(int bank, int ring)
   2149 {
   2150 	struct qat_softc *sc = gsc;
   2151 	struct qat_bank *qb = &sc->sc_etr_banks[bank];
   2152 	struct qat_ring *qr = &qb->qb_et_rings[ring];
   2153 	u_int offset;
   2154 	int i;
   2155 	uint32_t msg;
   2156 
   2157 	printf("dumping bank %d ring %d\n", bank, ring);
   2158 	printf("\tid %d name %s msg size %d ring size %d\n",
   2159 	    qr->qr_ring_id, qr->qr_name,
   2160 	    QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
   2161 	    qr->qr_ring_size);
   2162 	printf("\thost   head 0x%08x tail 0x%08x\n", qr->qr_head, qr->qr_tail);
   2163 	printf("\ttarget head 0x%08x tail 0x%08x\n",
   2164 	    qat_etr_bank_ring_read_4(sc, qr->qr_bank, qr->qr_ring,
   2165 	        ETR_RING_HEAD_OFFSET),
   2166 	    qat_etr_bank_ring_read_4(sc, qr->qr_bank, qr->qr_ring,
   2167 	        ETR_RING_TAIL_OFFSET));
   2168 
   2169 	printf("\n");
   2170 	i = 0;
   2171 	offset = 0;
   2172 	do {
   2173 		if (i % 8 == 0)
   2174 			printf("%8x:", offset);
   2175 
   2176 		if (offset == qr->qr_head) {
   2177 			printf("*");
   2178 		} else if (offset == qr->qr_tail) {
   2179 			printf("v");
   2180 		} else {
   2181 			printf(" ");
   2182 		}
   2183 
   2184 		msg = *(uint32_t *)((uintptr_t)qr->qr_ring_vaddr + offset);
   2185 		printf("%08x", htobe32(msg));
   2186 
   2187 		if (i % 8 == 7)
   2188 			printf("\n");
   2189 
   2190 		i++;
   2191 		offset = qat_modulo(offset +
   2192 		    QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
   2193 		    QAT_RING_SIZE_MODULO(qr->qr_ring_size));
   2194 	} while (offset != 0);
   2195 }
   2196 
   2197 void
   2198 qat_dump_mbuf(struct mbuf *m0, int pre, int post)
   2199 {
   2200 	struct mbuf *m;
   2201 
   2202 	for (m = m0; m != NULL; m = m->m_next) {
   2203 		size_t pos, len;
   2204 		uint8_t *buf_start, *data_start, *data_end, *buf_end;
   2205 		uint8_t *start, *end, *dp;
   2206 		bool skip_ind;
   2207 		const char *ind;
   2208 
   2209 		printf("dumping mbuf %p len %d flags 0x%08x\n",
   2210 		    m, m->m_len, m->m_flags);
   2211 		if (m->m_len == 0)
   2212 			continue;
   2213 
   2214 		data_start = (uint8_t *)m->m_data;
   2215 		data_end = data_start + m->m_len;
   2216 		switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) {
   2217 		case 0:
   2218 			buf_start = (uint8_t *)M_BUFADDR(m);
   2219 			buf_end = buf_start +
   2220 			    ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN);
   2221 			break;
   2222 		case M_EXT|M_EXT_CLUSTER:
   2223 			buf_start = (uint8_t *)m->m_ext.ext_buf;
   2224 			buf_end = buf_start +m->m_ext.ext_size;
   2225 			break;
   2226 		default:
   2227 			/* XXX */
   2228 			buf_start = data_start;
   2229 			buf_end = data_end;
   2230 			break;
   2231 		}
   2232 
   2233 		start = data_start - pre;
   2234 		if (start < buf_start)
   2235 			start = buf_start;
   2236 		end = data_end + post;
   2237 		if (end > buf_end)
   2238 			end = buf_end;
   2239 
   2240 		dp = start;
   2241 		len = (size_t)(end - start);
   2242 		skip_ind = false;
   2243 		for (pos = 0; pos < len; pos++) {
   2244 
   2245 			if (skip_ind)
   2246 				ind = "";
   2247 			else if (&dp[pos] == data_start)
   2248 				ind = "`";
   2249 			else
   2250 				ind = " ";
   2251 
   2252 			if (pos % 32 == 0)
   2253 				printf("%8zx:%s", pos, ind);
   2254 			else if (pos % 2 == 0)
   2255 				printf("%s", ind);
   2256 
   2257 			printf("%02x", dp[pos]);
   2258 
   2259 			skip_ind = false;
   2260 			if (&dp[pos + 1] == data_end) {
   2261 				skip_ind = true;
   2262 				printf("'");
   2263 			}
   2264 
   2265 			if (pos % 32 == 31 || pos + 1 == len) {
   2266 				printf("\n");
   2267 				skip_ind = false;
   2268 			}
   2269 		}
   2270 	}
   2271 }
   2272 
   2273 #endif /* QAT_DUMP */
   2274 
   2275 MODULE(MODULE_CLASS_DRIVER, qat, "pci,opencrypto");
   2276 
   2277 #ifdef _MODULE
   2278 #include "ioconf.c"
   2279 #endif
   2280 
   2281 int
   2282 qat_modcmd(modcmd_t cmd, void *data)
   2283 {
   2284 	int error = 0;
   2285 
   2286 	switch (cmd) {
   2287 	case MODULE_CMD_INIT:
   2288 #ifdef _MODULE
   2289 		error = config_init_component(cfdriver_ioconf_qat,
   2290 		    cfattach_ioconf_qat, cfdata_ioconf_qat);
   2291 #endif
   2292 		return error;
   2293 	case MODULE_CMD_FINI:
   2294 #ifdef _MODULE
   2295 		error = config_fini_component(cfdriver_ioconf_qat,
   2296 		    cfattach_ioconf_qat, cfdata_ioconf_qat);
   2297 #endif
   2298 		return error;
   2299 	default:
   2300 		return ENOTTY;
   2301 	}
   2302 }
   2303