1 /* $NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ 2 3 /* 4 * Copyright (c) 2019 Internet Initiative Japan, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * Copyright(c) 2014 Intel Corporation. 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 35 * * Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * * Redistributions in binary form must reproduce the above copyright 38 * notice, this list of conditions and the following disclaimer in 39 * the documentation and/or other materials provided with the 40 * distribution. 41 * * Neither the name of Intel Corporation nor the names of its 42 * contributors may be used to endorse or promote products derived 43 * from this software without specific prior written permission. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 46 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 47 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 48 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 49 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 51 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 52 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 53 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 54 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 55 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 56 */ 57 58 #include <sys/cdefs.h> 59 __KERNEL_RCSID(0, "$NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $"); 60 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/proc.h> 64 65 #include <opencrypto/xform.h> 66 67 /* XXX same as sys/arch/x86/x86/via_padlock.c */ 68 #include <opencrypto/cryptosoft_xform.c> 69 70 #include <dev/pci/pcireg.h> 71 #include <dev/pci/pcivar.h> 72 73 #include "qatreg.h" 74 #include "qat_hw17reg.h" 75 #include "qatvar.h" 76 #include "qat_hw17var.h" 77 78 int qat_adm_mailbox_put_msg_sync(struct qat_softc *, uint32_t, 79 void *, void *); 80 int qat_adm_mailbox_send(struct qat_softc *, 81 struct fw_init_admin_req *, struct fw_init_admin_resp *); 82 int qat_adm_mailbox_send_init_me(struct qat_softc *); 83 int qat_adm_mailbox_send_hb_timer(struct qat_softc *); 84 int qat_adm_mailbox_send_fw_status(struct qat_softc *); 85 int qat_adm_mailbox_send_constants(struct qat_softc *); 86 87 uint32_t qat_hw17_crypto_setup_cipher_desc(struct qat_session *, 88 struct qat_crypto_desc *, struct cryptoini *, 89 union hw_cipher_algo_blk *, uint32_t, struct fw_la_bulk_req *, 90 enum fw_slice); 91 uint32_t qat_hw17_crypto_setup_auth_desc(struct qat_session *, 92 struct qat_crypto_desc *, struct cryptoini *, 93 union hw_auth_algo_blk *, uint32_t, struct fw_la_bulk_req *, 94 enum fw_slice); 95 void qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *, 96 struct fw_la_bulk_req *); 97 98 int 99 qat_adm_mailbox_init(struct qat_softc *sc) 100 { 101 uint64_t addr; 102 int error; 103 struct qat_dmamem *qdm; 104 105 error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_dma, 106 PAGE_SIZE, PAGE_SIZE); 107 if (error) 108 return error; 109 110 qdm = &sc->sc_admin_comms.qadc_const_tbl_dma; 111 error = qat_alloc_dmamem(sc, qdm, PAGE_SIZE, PAGE_SIZE); 112 if (error) 113 return error; 114 115 memcpy(qdm->qdm_dma_vaddr, 116 mailbox_const_tab, sizeof(mailbox_const_tab)); 117 118 bus_dmamap_sync(sc->sc_dmat, qdm->qdm_dma_map, 0, 119 qdm->qdm_dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 120 121 error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_hb_dma, 122 PAGE_SIZE, PAGE_SIZE); 123 if (error) 124 return error; 125 126 addr = (uint64_t)sc->sc_admin_comms.qadc_dma.qdm_dma_seg.ds_addr; 127 qat_misc_write_4(sc, ADMINMSGUR, addr >> 32); 128 qat_misc_write_4(sc, ADMINMSGLR, addr); 129 130 return 0; 131 } 132 133 int 134 qat_adm_mailbox_put_msg_sync(struct qat_softc *sc, uint32_t ae, 135 void *in, void *out) 136 { 137 uint32_t mailbox; 138 bus_size_t mb_offset = MAILBOX_BASE + (ae * MAILBOX_STRIDE); 139 int offset = ae * ADMINMSG_LEN * 2; 140 int times, received; 141 uint8_t *buf = (uint8_t *)sc->sc_admin_comms.qadc_dma.qdm_dma_vaddr + offset; 142 143 mailbox = qat_misc_read_4(sc, mb_offset); 144 if (mailbox == 1) 145 return EAGAIN; 146 147 memcpy(buf, in, ADMINMSG_LEN); 148 bus_dmamap_sync(sc->sc_dmat, sc->sc_admin_comms.qadc_dma.qdm_dma_map, 0, 149 sc->sc_admin_comms.qadc_dma.qdm_dma_map->dm_mapsize, 150 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 151 qat_misc_write_4(sc, mb_offset, 1); 152 153 received = 0; 154 for (times = 0; times < 50; times++) { 155 delay(20000); 156 if (qat_misc_read_4(sc, mb_offset) == 0) { 157 received = 1; 158 break; 159 } 160 } 161 if (received) { 162 bus_dmamap_sync(sc->sc_dmat, sc->sc_admin_comms.qadc_dma.qdm_dma_map, 0, 163 sc->sc_admin_comms.qadc_dma.qdm_dma_map->dm_mapsize, 164 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 165 memcpy(out, buf + ADMINMSG_LEN, ADMINMSG_LEN); 166 } else { 167 aprint_error_dev(sc->sc_dev, 168 "Failed to send admin msg to accelerator\n"); 169 } 170 171 return received ? 0 : EFAULT; 172 } 173 174 int 175 qat_adm_mailbox_send(struct qat_softc *sc, 176 struct fw_init_admin_req *req, struct fw_init_admin_resp *resp) 177 { 178 int error; 179 uint32_t mask; 180 uint8_t ae; 181 182 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { 183 if (!(mask & 1)) 184 continue; 185 186 error = qat_adm_mailbox_put_msg_sync(sc, ae, req, resp); 187 if (error) 188 return error; 189 if (resp->init_resp_hdr.status) { 190 aprint_error_dev(sc->sc_dev, 191 "Failed to send admin msg: cmd %d\n", 192 req->init_admin_cmd_id); 193 return EFAULT; 194 } 195 } 196 197 return 0; 198 } 199 200 int 201 qat_adm_mailbox_send_init_me(struct qat_softc *sc) 202 { 203 struct fw_init_admin_req req; 204 struct fw_init_admin_resp resp; 205 206 memset(&req, 0, sizeof(req)); 207 req.init_admin_cmd_id = FW_INIT_ME; 208 209 return qat_adm_mailbox_send(sc, &req, &resp); 210 } 211 212 int 213 qat_adm_mailbox_send_hb_timer(struct qat_softc *sc) 214 { 215 struct fw_init_admin_req req; 216 struct fw_init_admin_resp resp; 217 218 memset(&req, 0, sizeof(req)); 219 req.init_admin_cmd_id = FW_HEARTBEAT_TIMER_SET; 220 221 req.init_cfg_ptr = sc->sc_admin_comms.qadc_hb_dma.qdm_dma_seg.ds_addr; 222 req.heartbeat_ticks = 223 sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_HB_INTERVAL; 224 225 return qat_adm_mailbox_send(sc, &req, &resp); 226 } 227 228 int 229 qat_adm_mailbox_send_fw_status(struct qat_softc *sc) 230 { 231 int error; 232 struct fw_init_admin_req req; 233 struct fw_init_admin_resp resp; 234 235 memset(&req, 0, sizeof(req)); 236 req.init_admin_cmd_id = FW_STATUS_GET; 237 238 error = qat_adm_mailbox_send(sc, &req, &resp); 239 if (error) 240 return error; 241 242 aprint_normal_dev(sc->sc_dev, 243 "loaded firmware: version %d.%d.%d\n", 244 resp.u.s.version_major_num, 245 resp.u.s.version_minor_num, 246 resp.init_resp_pars.u.s1.version_patch_num); 247 248 return 0; 249 } 250 251 int 252 qat_adm_mailbox_send_constants(struct qat_softc *sc) 253 { 254 struct fw_init_admin_req req; 255 struct fw_init_admin_resp resp; 256 257 memset(&req, 0, sizeof(req)); 258 req.init_admin_cmd_id = FW_CONSTANTS_CFG; 259 260 req.init_cfg_sz = 1024; 261 req.init_cfg_ptr = 262 sc->sc_admin_comms.qadc_const_tbl_dma.qdm_dma_seg.ds_addr; 263 264 return qat_adm_mailbox_send(sc, &req, &resp); 265 } 266 267 int 268 qat_adm_mailbox_send_init(struct qat_softc *sc) 269 { 270 int error; 271 272 error = qat_adm_mailbox_send_init_me(sc); 273 if (error) 274 return error; 275 276 error = qat_adm_mailbox_send_hb_timer(sc); 277 if (error) 278 return error; 279 280 error = qat_adm_mailbox_send_fw_status(sc); 281 if (error) 282 return error; 283 284 return qat_adm_mailbox_send_constants(sc); 285 } 286 287 int 288 qat_arb_init(struct qat_softc *sc) 289 { 290 uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; 291 uint32_t arb, i; 292 const uint32_t *thd_2_arb_cfg; 293 294 /* Service arb configured for 32 bytes responses and 295 * ring flow control check enabled. */ 296 for (arb = 0; arb < MAX_ARB; arb++) 297 qat_arb_sarconfig_write_4(sc, arb, arb_cfg); 298 299 /* Map worker threads to service arbiters */ 300 sc->sc_hw.qhw_get_arb_mapping(sc, &thd_2_arb_cfg); 301 302 if (!thd_2_arb_cfg) 303 return EINVAL; 304 305 for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) 306 qat_arb_wrk_2_ser_map_write_4(sc, i, *(thd_2_arb_cfg + i)); 307 308 return 0; 309 } 310 311 int 312 qat_set_ssm_wdtimer(struct qat_softc *sc) 313 { 314 uint32_t timer; 315 u_int mask; 316 int i; 317 318 timer = sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_SSM_WDT; 319 for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) { 320 if (!(mask & 1)) 321 continue; 322 qat_misc_write_4(sc, SSMWDT(i), timer); 323 qat_misc_write_4(sc, SSMWDTPKE(i), timer); 324 } 325 326 return 0; 327 } 328 329 int 330 qat_check_slice_hang(struct qat_softc *sc) 331 { 332 int handled = 0; 333 334 return handled; 335 } 336 337 uint32_t 338 qat_hw17_crypto_setup_cipher_desc(struct qat_session *qs, 339 struct qat_crypto_desc *desc, struct cryptoini *crie, 340 union hw_cipher_algo_blk *cipher, uint32_t cd_blk_offset, 341 struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice) 342 { 343 struct fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = 344 (struct fw_cipher_cd_ctrl_hdr *)&req_tmpl->cd_ctrl; 345 int keylen = crie->cri_klen / 8; 346 347 cipher->max.cipher_config.val = 348 qat_crypto_load_cipher_cryptoini(desc, crie); 349 memcpy(cipher->max.key, crie->cri_key, keylen); 350 351 cipher_cd_ctrl->cipher_state_sz = desc->qcd_cipher_blk_sz >> 3; 352 cipher_cd_ctrl->cipher_key_sz = keylen >> 3; 353 cipher_cd_ctrl->cipher_cfg_offset = cd_blk_offset >> 3; 354 FW_COMN_CURR_ID_SET(cipher_cd_ctrl, FW_SLICE_CIPHER); 355 FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, next_slice); 356 357 return roundup(sizeof(struct hw_cipher_config) + keylen, 8); 358 } 359 360 uint32_t 361 qat_hw17_crypto_setup_auth_desc(struct qat_session *qs, 362 struct qat_crypto_desc *desc, struct cryptoini *cria, 363 union hw_auth_algo_blk *auth, uint32_t cd_blk_offset, 364 struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice) 365 { 366 struct fw_auth_cd_ctrl_hdr *auth_cd_ctrl = 367 (struct fw_auth_cd_ctrl_hdr *)&req_tmpl->cd_ctrl; 368 struct qat_sym_hash_def const *hash_def; 369 uint8_t *state1, *state2; 370 371 auth->max.inner_setup.auth_config.config = 372 qat_crypto_load_auth_cryptoini(desc, cria, &hash_def); 373 auth->max.inner_setup.auth_counter.counter = 374 htonl(hash_def->qshd_qat->qshqi_auth_counter); 375 376 auth_cd_ctrl->hash_cfg_offset = cd_blk_offset >> 3; 377 auth_cd_ctrl->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED; 378 auth_cd_ctrl->inner_res_sz = hash_def->qshd_alg->qshai_digest_len; 379 auth_cd_ctrl->final_sz = desc->qcd_auth_sz; 380 381 auth_cd_ctrl->inner_state1_sz = 382 roundup(hash_def->qshd_qat->qshqi_state1_len, 8); 383 auth_cd_ctrl->inner_state2_sz = 384 roundup(hash_def->qshd_qat->qshqi_state2_len, 8); 385 auth_cd_ctrl->inner_state2_offset = 386 auth_cd_ctrl->hash_cfg_offset + 387 ((sizeof(struct hw_auth_setup) + 388 auth_cd_ctrl->inner_state1_sz) >> 3); 389 390 state1 = auth->max.state1; 391 state2 = auth->max.state1 + auth_cd_ctrl->inner_state1_sz; 392 qat_crypto_hmac_precompute(desc, cria, hash_def, state1, state2); 393 394 FW_COMN_CURR_ID_SET(auth_cd_ctrl, FW_SLICE_AUTH); 395 FW_COMN_NEXT_ID_SET(auth_cd_ctrl, next_slice); 396 397 return roundup(auth_cd_ctrl->inner_state1_sz + 398 auth_cd_ctrl->inner_state2_sz + 399 sizeof(struct hw_auth_setup), 8); 400 } 401 402 void 403 qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *desc, 404 struct fw_la_bulk_req *req) 405 { 406 union fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; 407 struct fw_comn_req_hdr *req_hdr = &req->comn_hdr; 408 409 req_hdr->service_cmd_id = desc->qcd_cmd_id; 410 req_hdr->hdr_flags = FW_COMN_VALID; 411 req_hdr->service_type = FW_COMN_REQ_CPM_FW_LA; 412 req_hdr->comn_req_flags = FW_COMN_FLAGS_BUILD( 413 COMN_CD_FLD_TYPE_64BIT_ADR, COMN_PTR_TYPE_SGL); 414 req_hdr->serv_specif_flags = 0; 415 cd_pars->s.content_desc_addr = desc->qcd_desc_paddr; 416 } 417 418 void 419 qat_hw17_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs, 420 struct qat_crypto_desc *desc, 421 struct cryptoini *crie, struct cryptoini *cria) 422 { 423 union hw_cipher_algo_blk *cipher; 424 union hw_auth_algo_blk *auth; 425 struct fw_la_bulk_req *req_tmpl; 426 struct fw_comn_req_hdr *req_hdr; 427 union fw_comn_req_hdr_cd_pars *cd_pars; 428 uint32_t cd_blk_offset = 0; 429 int i; 430 uint8_t *cd_blk_ptr; 431 432 req_tmpl = (struct fw_la_bulk_req *)desc->qcd_req_cache; 433 req_hdr = &req_tmpl->comn_hdr; 434 cd_pars = &req_tmpl->cd_pars; 435 cd_blk_ptr = desc->qcd_content_desc; 436 437 memset(req_tmpl, 0, sizeof(struct fw_la_bulk_req)); 438 qat_hw17_init_comn_req_hdr(desc, req_tmpl); 439 440 for (i = 0; i < MAX_FW_SLICE; i++) { 441 switch (desc->qcd_slices[i]) { 442 case FW_SLICE_CIPHER: 443 cipher = (union hw_cipher_algo_blk *)(cd_blk_ptr + 444 cd_blk_offset); 445 cd_blk_offset += qat_hw17_crypto_setup_cipher_desc( 446 qs, desc, crie, cipher, cd_blk_offset, req_tmpl, 447 desc->qcd_slices[i + 1]); 448 break; 449 case FW_SLICE_AUTH: 450 auth = (union hw_auth_algo_blk *)(cd_blk_ptr + 451 cd_blk_offset); 452 cd_blk_offset += qat_hw17_crypto_setup_auth_desc( 453 qs, desc, cria, auth, cd_blk_offset, req_tmpl, 454 desc->qcd_slices[i + 1]); 455 req_hdr->serv_specif_flags |= FW_LA_RET_AUTH_RES; 456 /* no digest verify */ 457 break; 458 case FW_SLICE_DRAM_WR: 459 i = MAX_FW_SLICE; /* end of chain */ 460 break; 461 default: 462 KASSERT(0); 463 break; 464 } 465 } 466 467 cd_pars->s.content_desc_params_sz = 468 roundup(cd_blk_offset, QAT_OPTIMAL_ALIGN) >> 3; 469 470 #ifdef QAT_DUMP 471 qat_dump_raw(QAT_DUMP_DESC, "qcd_content_desc", 472 desc->qcd_content_desc, cd_pars->s.content_desc_params_sz << 3); 473 qat_dump_raw(QAT_DUMP_DESC, "qcd_req_cache", 474 &desc->qcd_req_cache, sizeof(desc->qcd_req_cache)); 475 #endif 476 477 bus_dmamap_sync(qcy->qcy_sc->sc_dmat, 478 qcy->qcy_session_dmamems[qs->qs_lid].qdm_dma_map, 0, 479 sizeof(struct qat_session), 480 BUS_DMASYNC_PREWRITE); 481 } 482 483 void 484 qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *qcb, struct qat_session *qs, 485 struct qat_crypto_desc const *desc, struct qat_sym_cookie *qsc, 486 struct cryptodesc *crde, struct cryptodesc *crda, bus_addr_t icv_paddr) 487 { 488 struct qat_sym_bulk_cookie *qsbc; 489 struct fw_la_bulk_req *bulk_req; 490 struct fw_la_cipher_req_params *cipher_param; 491 struct fw_la_auth_req_params *auth_param; 492 uint32_t req_params_offset = 0; 493 uint8_t *req_params_ptr; 494 enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id; 495 496 qsbc = &qsc->u.qsc_bulk_cookie; 497 bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg; 498 499 memcpy(bulk_req, desc->qcd_req_cache, sizeof(struct fw_la_bulk_req)); 500 bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc; 501 bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr; 502 bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr; 503 504 if (icv_paddr != 0) 505 bulk_req->comn_hdr.serv_specif_flags |= FW_LA_DIGEST_IN_BUFFER; 506 507 req_params_ptr = (uint8_t *)&bulk_req->serv_specif_rqpars; 508 509 if (cmd_id != FW_LA_CMD_AUTH) { 510 cipher_param = (struct fw_la_cipher_req_params *) 511 (req_params_ptr + req_params_offset); 512 req_params_offset += sizeof(struct fw_la_cipher_req_params); 513 514 cipher_param->u.s.cipher_IV_ptr = qsc->qsc_iv_buf_paddr; 515 cipher_param->cipher_offset = crde->crd_skip; 516 cipher_param->cipher_length = crde->crd_len; 517 } 518 519 if (cmd_id != FW_LA_CMD_CIPHER) { 520 auth_param = (struct fw_la_auth_req_params *) 521 (req_params_ptr + req_params_offset); 522 req_params_offset += sizeof(struct fw_la_auth_req_params); 523 524 auth_param->auth_off = crda->crd_skip; 525 auth_param->auth_len = crda->crd_len; 526 auth_param->auth_res_addr = icv_paddr; 527 auth_param->auth_res_sz = 0; /* XXX no digest verify */ 528 auth_param->hash_state_sz = 0; 529 auth_param->u1.auth_partial_st_prefix = 0; 530 auth_param->u2.aad_sz = 0; 531 } 532 533 #ifdef QAT_DUMP 534 qat_dump_raw(QAT_DUMP_DESC, "req_params", req_params_ptr, req_params_offset); 535 #endif 536 } 537 538