1 /* $NetBSD: qat_ae.c,v 1.2 2021/12/05 07:28:20 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2019 Internet Initiative Japan, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * Copyright(c) 2007-2019 Intel Corporation. All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions 34 * are met: 35 * 36 * * Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * * Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in 40 * the documentation and/or other materials provided with the 41 * distribution. 42 * * Neither the name of Intel Corporation nor the names of its 43 * contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 47 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 48 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 49 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 50 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 51 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 52 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 56 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 #include <sys/cdefs.h> 60 __KERNEL_RCSID(0, "$NetBSD: qat_ae.c,v 1.2 2021/12/05 07:28:20 msaitoh Exp $"); 61 62 #include <sys/param.h> 63 #include <sys/systm.h> 64 65 #include <dev/firmload.h> 66 67 #include <dev/pci/pcireg.h> 68 #include <dev/pci/pcivar.h> 69 #include <dev/pci/pcidevs.h> 70 71 #include "qatreg.h" 72 #include "qatvar.h" 73 #include "qat_aevar.h" 74 75 int qat_ae_write_4(struct qat_softc *, u_char, bus_size_t, 76 uint32_t); 77 int qat_ae_read_4(struct qat_softc *, u_char, bus_size_t, 78 uint32_t *); 79 int qat_ae_write_4(struct qat_softc *, u_char, bus_size_t, 80 uint32_t); 81 void qat_ae_ctx_indr_write(struct qat_softc *, u_char, uint32_t, 82 bus_size_t, uint32_t); 83 int qat_ae_ctx_indr_read(struct qat_softc *, u_char, uint32_t, 84 bus_size_t, uint32_t *); 85 86 u_short qat_aereg_get_10bit_addr(enum aereg_type, u_short); 87 int qat_aereg_rel_data_write(struct qat_softc *, u_char, u_char, 88 enum aereg_type, u_short, uint32_t); 89 int qat_aereg_rel_data_read(struct qat_softc *, u_char, u_char, 90 enum aereg_type, u_short, uint32_t *); 91 int qat_aereg_rel_rdxfer_write(struct qat_softc *, u_char, u_char, 92 enum aereg_type, u_short, uint32_t); 93 int qat_aereg_rel_wrxfer_write(struct qat_softc *, u_char, u_char, 94 enum aereg_type, u_short, uint32_t); 95 int qat_aereg_rel_nn_write(struct qat_softc *, u_char, u_char, 96 enum aereg_type, u_short, uint32_t); 97 int qat_aereg_abs_to_rel(struct qat_softc *, u_char, u_short, 98 u_short *, u_char *); 99 int qat_aereg_abs_data_write(struct qat_softc *, u_char, 100 enum aereg_type, u_short, uint32_t); 101 102 void qat_ae_enable_ctx(struct qat_softc *, u_char, u_int); 103 void qat_ae_disable_ctx(struct qat_softc *, u_char, u_int); 104 void qat_ae_write_ctx_mode(struct qat_softc *, u_char, u_char); 105 void qat_ae_write_nn_mode(struct qat_softc *, u_char, u_char); 106 void qat_ae_write_lm_mode(struct qat_softc *, u_char, 107 enum aereg_type, u_char); 108 void qat_ae_write_shared_cs_mode0(struct qat_softc *, u_char, 109 u_char); 110 void qat_ae_write_shared_cs_mode(struct qat_softc *, u_char, u_char); 111 int qat_ae_set_reload_ustore(struct qat_softc *, u_char, u_int, int, 112 u_int); 113 114 enum qat_ae_status 115 qat_ae_get_status(struct qat_softc *, u_char); 116 int qat_ae_is_active(struct qat_softc *, u_char); 117 int qat_ae_wait_num_cycles(struct qat_softc *, u_char, int, int); 118 119 int qat_ae_clear_reset(struct qat_softc *); 120 int qat_ae_check(struct qat_softc *); 121 int qat_ae_reset_timestamp(struct qat_softc *); 122 void qat_ae_clear_xfer(struct qat_softc *); 123 int qat_ae_clear_gprs(struct qat_softc *); 124 125 void qat_ae_get_shared_ustore_ae(u_char, u_char *); 126 u_int qat_ae_ucode_parity64(uint64_t); 127 uint64_t qat_ae_ucode_set_ecc(uint64_t); 128 int qat_ae_ucode_write(struct qat_softc *, u_char, u_int, u_int, 129 const uint64_t *); 130 int qat_ae_ucode_read(struct qat_softc *, u_char, u_int, u_int, 131 uint64_t *); 132 u_int qat_ae_concat_ucode(uint64_t *, u_int, u_int, u_int, u_int *); 133 int qat_ae_exec_ucode(struct qat_softc *, u_char, u_char, 134 uint64_t *, u_int, int, u_int, u_int *); 135 int qat_ae_exec_ucode_init_lm(struct qat_softc *, u_char, u_char, 136 int *, uint64_t *, u_int, 137 u_int *, u_int *, u_int *, u_int *, u_int *); 138 int qat_ae_restore_init_lm_gprs(struct qat_softc *, u_char, u_char, 139 u_int, u_int, u_int, u_int, u_int); 140 int qat_ae_get_inst_num(int); 141 int qat_ae_batch_put_lm(struct qat_softc *, u_char, 142 struct qat_ae_batch_init_list *, size_t); 143 int qat_ae_write_pc(struct qat_softc *, u_char, u_int, u_int); 144 145 u_int qat_aefw_csum(char *, int); 146 const char * qat_aefw_uof_string(struct qat_softc *, size_t); 147 struct uof_chunk_hdr * 148 qat_aefw_uof_find_chunk(struct qat_softc *, const char *, 149 struct uof_chunk_hdr *); 150 151 int qat_aefw_load_mof(struct qat_softc *); 152 int qat_aefw_load_mmp(struct qat_softc *); 153 154 int qat_aefw_mof_find_uof0(struct qat_softc *, 155 struct mof_uof_hdr *, struct mof_uof_chunk_hdr *, 156 u_int, size_t, const char *, 157 size_t *, void **); 158 int qat_aefw_mof_find_uof(struct qat_softc *); 159 int qat_aefw_mof_parse(struct qat_softc *); 160 161 int qat_aefw_uof_parse_image(struct qat_softc *, 162 struct qat_uof_image *, struct uof_chunk_hdr *uch); 163 int qat_aefw_uof_parse_images(struct qat_softc *); 164 int qat_aefw_uof_parse(struct qat_softc *); 165 166 int qat_aefw_alloc_auth_dmamem(struct qat_softc *, char *, size_t, 167 struct qat_dmamem *); 168 int qat_aefw_auth(struct qat_softc *, struct qat_dmamem *); 169 int qat_aefw_suof_load(struct qat_softc *sc, 170 struct qat_dmamem *dma); 171 int qat_aefw_suof_parse_image(struct qat_softc *, 172 struct qat_suof_image *, struct suof_chunk_hdr *); 173 int qat_aefw_suof_parse(struct qat_softc *); 174 int qat_aefw_suof_write(struct qat_softc *); 175 176 int qat_aefw_uof_assign_image(struct qat_softc *, struct qat_ae *, 177 struct qat_uof_image *); 178 int qat_aefw_uof_init_ae(struct qat_softc *, u_char); 179 int qat_aefw_uof_init(struct qat_softc *); 180 181 int qat_aefw_init_memory_one(struct qat_softc *, 182 struct uof_init_mem *); 183 void qat_aefw_free_lm_init(struct qat_softc *, u_char); 184 int qat_aefw_init_ustore(struct qat_softc *); 185 int qat_aefw_init_reg(struct qat_softc *, u_char, u_char, 186 enum aereg_type, u_short, u_int); 187 int qat_aefw_init_reg_sym_expr(struct qat_softc *, u_char, 188 struct qat_uof_image *); 189 int qat_aefw_init_memory(struct qat_softc *); 190 int qat_aefw_init_globals(struct qat_softc *); 191 uint64_t qat_aefw_get_uof_inst(struct qat_softc *, 192 struct qat_uof_page *, u_int); 193 int qat_aefw_do_pagein(struct qat_softc *, u_char, 194 struct qat_uof_page *); 195 int qat_aefw_uof_write_one(struct qat_softc *, struct qat_uof_image *); 196 int qat_aefw_uof_write(struct qat_softc *); 197 198 int 199 qat_ae_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset, 200 uint32_t value) 201 { 202 int times = TIMEOUT_AE_CSR; 203 204 do { 205 qat_ae_local_write_4(sc, ae, offset, value); 206 if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) & 207 LOCAL_CSR_STATUS_STATUS) == 0) 208 return 0; 209 210 } while (times--); 211 212 aprint_error_dev(sc->sc_dev, 213 "couldn't write AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset); 214 return EFAULT; 215 } 216 217 int 218 qat_ae_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset, 219 uint32_t *value) 220 { 221 int times = TIMEOUT_AE_CSR; 222 uint32_t v; 223 224 do { 225 v = qat_ae_local_read_4(sc, ae, offset); 226 if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) & 227 LOCAL_CSR_STATUS_STATUS) == 0) { 228 *value = v; 229 return 0; 230 } 231 } while (times--); 232 233 aprint_error_dev(sc->sc_dev, 234 "couldn't read AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset); 235 return EFAULT; 236 } 237 238 void 239 qat_ae_ctx_indr_write(struct qat_softc *sc, u_char ae, uint32_t ctx_mask, 240 bus_size_t offset, uint32_t value) 241 { 242 int ctx; 243 uint32_t ctxptr; 244 245 KASSERT(offset == CTX_FUTURE_COUNT_INDIRECT || 246 offset == FUTURE_COUNT_SIGNAL_INDIRECT || 247 offset == CTX_STS_INDIRECT || 248 offset == CTX_WAKEUP_EVENTS_INDIRECT || 249 offset == CTX_SIG_EVENTS_INDIRECT || 250 offset == LM_ADDR_0_INDIRECT || 251 offset == LM_ADDR_1_INDIRECT || 252 offset == INDIRECT_LM_ADDR_0_BYTE_INDEX || 253 offset == INDIRECT_LM_ADDR_1_BYTE_INDEX); 254 255 qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr); 256 for (ctx = 0; ctx < MAX_AE_CTX; ctx++) { 257 if ((ctx_mask & (1 << ctx)) == 0) 258 continue; 259 qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx); 260 qat_ae_write_4(sc, ae, offset, value); 261 } 262 qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr); 263 } 264 265 int 266 qat_ae_ctx_indr_read(struct qat_softc *sc, u_char ae, uint32_t ctx, 267 bus_size_t offset, uint32_t *value) 268 { 269 int error; 270 uint32_t ctxptr; 271 272 KASSERT(offset == CTX_FUTURE_COUNT_INDIRECT || 273 offset == FUTURE_COUNT_SIGNAL_INDIRECT || 274 offset == CTX_STS_INDIRECT || 275 offset == CTX_WAKEUP_EVENTS_INDIRECT || 276 offset == CTX_SIG_EVENTS_INDIRECT || 277 offset == LM_ADDR_0_INDIRECT || 278 offset == LM_ADDR_1_INDIRECT || 279 offset == INDIRECT_LM_ADDR_0_BYTE_INDEX || 280 offset == INDIRECT_LM_ADDR_1_BYTE_INDEX); 281 282 /* save the ctx ptr */ 283 qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr); 284 if ((ctxptr & CSR_CTX_POINTER_CONTEXT) != 285 (ctx & CSR_CTX_POINTER_CONTEXT)) 286 qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx); 287 288 error = qat_ae_read_4(sc, ae, offset, value); 289 290 /* restore ctx ptr */ 291 if ((ctxptr & CSR_CTX_POINTER_CONTEXT) != 292 (ctx & CSR_CTX_POINTER_CONTEXT)) 293 qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr); 294 295 return error; 296 } 297 298 u_short 299 qat_aereg_get_10bit_addr(enum aereg_type regtype, u_short reg) 300 { 301 u_short addr; 302 303 switch (regtype) { 304 case AEREG_GPA_ABS: 305 case AEREG_GPB_ABS: 306 addr = (reg & 0x7f) | 0x80; 307 break; 308 case AEREG_GPA_REL: 309 case AEREG_GPB_REL: 310 addr = reg & 0x1f; 311 break; 312 case AEREG_SR_RD_REL: 313 case AEREG_SR_WR_REL: 314 case AEREG_SR_REL: 315 addr = 0x180 | (reg & 0x1f); 316 break; 317 case AEREG_SR_INDX: 318 addr = 0x140 | ((reg & 0x3) << 1); 319 break; 320 case AEREG_DR_RD_REL: 321 case AEREG_DR_WR_REL: 322 case AEREG_DR_REL: 323 addr = 0x1c0 | (reg & 0x1f); 324 break; 325 case AEREG_DR_INDX: 326 addr = 0x100 | ((reg & 0x3) << 1); 327 break; 328 case AEREG_NEIGH_INDX: 329 addr = 0x241 | ((reg & 0x3) << 1); 330 break; 331 case AEREG_NEIGH_REL: 332 addr = 0x280 | (reg & 0x1f); 333 break; 334 case AEREG_LMEM0: 335 addr = 0x200; 336 break; 337 case AEREG_LMEM1: 338 addr = 0x220; 339 break; 340 case AEREG_NO_DEST: 341 addr = 0x300 | (reg & 0xff); 342 break; 343 default: 344 addr = AEREG_BAD_REGADDR; 345 break; 346 } 347 return (addr); 348 } 349 350 int 351 qat_aereg_rel_data_write(struct qat_softc *sc, u_char ae, u_char ctx, 352 enum aereg_type regtype, u_short relreg, uint32_t value) 353 { 354 uint16_t srchi, srclo, destaddr, data16hi, data16lo; 355 uint64_t inst[] = { 356 0x0F440000000ull, /* immed_w1[reg, val_hi16] */ 357 0x0F040000000ull, /* immed_w0[reg, val_lo16] */ 358 0x0F0000C0300ull, /* nop */ 359 0x0E000010000ull /* ctx_arb[kill] */ 360 }; 361 const int ninst = __arraycount(inst); 362 const int imm_w1 = 0, imm_w0 = 1; 363 unsigned int ctxen; 364 uint16_t mask; 365 366 /* This logic only works for GPRs and LM index registers, 367 not NN or XFER registers! */ 368 KASSERT(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL || 369 regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1); 370 371 if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL)) { 372 /* determine the context mode */ 373 qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); 374 if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { 375 /* 4-ctx mode */ 376 if (ctx & 0x1) 377 return EINVAL; 378 mask = 0x1f; 379 } else { 380 /* 8-ctx mode */ 381 mask = 0x0f; 382 } 383 if (relreg & ~mask) 384 return EINVAL; 385 } 386 if ((destaddr = qat_aereg_get_10bit_addr(regtype, relreg)) == 387 AEREG_BAD_REGADDR) { 388 return EINVAL; 389 } 390 391 data16lo = 0xffff & value; 392 data16hi = 0xffff & (value >> 16); 393 srchi = qat_aereg_get_10bit_addr(AEREG_NO_DEST, 394 (uint16_t)(0xff & data16hi)); 395 srclo = qat_aereg_get_10bit_addr(AEREG_NO_DEST, 396 (uint16_t)(0xff & data16lo)); 397 398 switch (regtype) { 399 case AEREG_GPA_REL: /* A rel source */ 400 inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) | 401 ((srchi & 0x3ff) << 10) | (destaddr & 0x3ff); 402 inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) | 403 ((srclo & 0x3ff) << 10) | (destaddr & 0x3ff); 404 break; 405 default: 406 inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) | 407 ((destaddr & 0x3ff) << 10) | (srchi & 0x3ff); 408 inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) | 409 ((destaddr & 0x3ff) << 10) | (srclo & 0x3ff); 410 break; 411 } 412 413 return qat_ae_exec_ucode(sc, ae, ctx, inst, ninst, 1, ninst * 5, NULL); 414 } 415 416 int 417 qat_aereg_rel_data_read(struct qat_softc *sc, u_char ae, u_char ctx, 418 enum aereg_type regtype, u_short relreg, uint32_t *value) 419 { 420 uint64_t inst, savucode; 421 uint32_t ctxen, misc, nmisc, savctx, ctxarbctl, ulo, uhi; 422 u_int uaddr, ustore_addr; 423 int error; 424 u_short mask, regaddr; 425 u_char nae; 426 427 KASSERT(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL || 428 regtype == AEREG_SR_REL || regtype == AEREG_SR_RD_REL || 429 regtype == AEREG_DR_REL || regtype == AEREG_DR_RD_REL || 430 regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1); 431 432 if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL) || 433 (regtype == AEREG_SR_REL) || (regtype == AEREG_SR_RD_REL) || 434 (regtype == AEREG_DR_REL) || (regtype == AEREG_DR_RD_REL)) 435 { 436 /* determine the context mode */ 437 qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); 438 if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { 439 /* 4-ctx mode */ 440 if (ctx & 0x1) 441 return EINVAL; 442 mask = 0x1f; 443 } else { 444 /* 8-ctx mode */ 445 mask = 0x0f; 446 } 447 if (relreg & ~mask) 448 return EINVAL; 449 } 450 if ((regaddr = qat_aereg_get_10bit_addr(regtype, relreg)) == 451 AEREG_BAD_REGADDR) { 452 return EINVAL; 453 } 454 455 /* instruction -- alu[--, --, B, reg] */ 456 switch (regtype) { 457 case AEREG_GPA_REL: 458 /* A rel source */ 459 inst = 0xA070000000ull | (regaddr & 0x3ff); 460 break; 461 default: 462 inst = (0xA030000000ull | ((regaddr & 0x3ff) << 10)); 463 break; 464 } 465 466 /* backup shared control store bit, and force AE to 467 * none-shared mode before executing ucode snippet */ 468 qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); 469 if (misc & AE_MISC_CONTROL_SHARE_CS) { 470 qat_ae_get_shared_ustore_ae(ae, &nae); 471 if ((1 << nae) & sc->sc_ae_mask && qat_ae_is_active(sc, nae)) 472 return EBUSY; 473 } 474 475 nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS; 476 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc); 477 478 /* read current context */ 479 qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx); 480 qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl); 481 482 qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); 483 /* prevent clearing the W1C bits: the breakpoint bit, 484 ECC error bit, and Parity error bit */ 485 ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; 486 487 /* change the context */ 488 if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) 489 qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 490 ctx & ACTIVE_CTX_STATUS_ACNO); 491 /* save a ustore location */ 492 if ((error = qat_ae_ucode_read(sc, ae, 0, 1, &savucode)) != 0) { 493 /* restore AE_MISC_CONTROL csr */ 494 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); 495 496 /* restore the context */ 497 if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) { 498 qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 499 savctx & ACTIVE_CTX_STATUS_ACNO); 500 } 501 qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl); 502 503 return (error); 504 } 505 506 /* turn off ustore parity */ 507 qat_ae_write_4(sc, ae, CTX_ENABLES, 508 ctxen & (~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE)); 509 510 /* save ustore-addr csr */ 511 qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr); 512 513 /* write the ALU instruction to ustore, enable ecs bit */ 514 uaddr = 0 | USTORE_ADDRESS_ECS; 515 516 /* set the uaddress */ 517 qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); 518 inst = qat_ae_ucode_set_ecc(inst); 519 520 ulo = (uint32_t)(inst & 0xffffffff); 521 uhi = (uint32_t)(inst >> 32); 522 523 qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo); 524 525 /* this will auto increment the address */ 526 qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi); 527 528 /* set the uaddress */ 529 qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); 530 531 /* delay for at least 8 cycles */ 532 qat_ae_wait_num_cycles(sc, ae, 0x8, 0); 533 534 /* read ALU output -- the instruction should have been executed 535 prior to clearing the ECS in putUwords */ 536 qat_ae_read_4(sc, ae, ALU_OUT, value); 537 538 /* restore ustore-addr csr */ 539 qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr); 540 541 /* restore the ustore */ 542 error = qat_ae_ucode_write(sc, ae, 0, 1, &savucode); 543 544 /* restore the context */ 545 if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) { 546 qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 547 savctx & ACTIVE_CTX_STATUS_ACNO); 548 } 549 550 qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl); 551 552 /* restore AE_MISC_CONTROL csr */ 553 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); 554 555 qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); 556 557 return error; 558 } 559 560 int 561 qat_aereg_rel_rdxfer_write(struct qat_softc *sc, u_char ae, u_char ctx, 562 enum aereg_type regtype, u_short relreg, uint32_t value) 563 { 564 bus_size_t addr; 565 int error; 566 uint32_t ctxen; 567 u_short mask; 568 u_short dr_offset; 569 570 KASSERT(regtype == AEREG_SR_REL || regtype == AEREG_DR_REL || 571 regtype == AEREG_SR_RD_REL || regtype == AEREG_DR_RD_REL); 572 573 QAT_YIELD(); 574 575 error = qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); 576 if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { 577 if (ctx & 0x1) { 578 aprint_error_dev(sc->sc_dev, 579 "bad ctx argument in 4-ctx mode,ctx=0x%x\n", ctx); 580 return EINVAL; 581 } 582 mask = 0x1f; 583 dr_offset = 0x20; 584 585 } else { 586 mask = 0x0f; 587 dr_offset = 0x10; 588 } 589 590 if (relreg & ~mask) 591 return EINVAL; 592 593 addr = relreg + (ctx << 0x5); 594 595 switch (regtype) { 596 case AEREG_SR_REL: 597 case AEREG_SR_RD_REL: 598 qat_ae_xfer_write_4(sc, ae, addr, value); 599 break; 600 case AEREG_DR_REL: 601 case AEREG_DR_RD_REL: 602 qat_ae_xfer_write_4(sc, ae, addr + dr_offset, value); 603 break; 604 default: 605 error = EINVAL; 606 } 607 608 return error; 609 } 610 611 int 612 qat_aereg_rel_wrxfer_write(struct qat_softc *sc, u_char ae, u_char ctx, 613 enum aereg_type regtype, u_short relreg, uint32_t value) 614 { 615 616 panic("notyet"); 617 618 return 0; 619 } 620 621 int 622 qat_aereg_rel_nn_write(struct qat_softc *sc, u_char ae, u_char ctx, 623 enum aereg_type regtype, u_short relreg, uint32_t value) 624 { 625 626 panic("notyet"); 627 628 return 0; 629 } 630 631 int 632 qat_aereg_abs_to_rel(struct qat_softc *sc, u_char ae, 633 u_short absreg, u_short *relreg, u_char *ctx) 634 { 635 uint32_t ctxen; 636 637 qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); 638 if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { 639 /* 4-ctx mode */ 640 *relreg = absreg & 0x1f; 641 *ctx = (absreg >> 0x4) & 0x6; 642 } else { 643 /* 8-ctx mode */ 644 *relreg = absreg & 0x0f; 645 *ctx = (absreg >> 0x4) & 0x7; 646 } 647 648 return 0; 649 } 650 651 int 652 qat_aereg_abs_data_write(struct qat_softc *sc, u_char ae, 653 enum aereg_type regtype, u_short absreg, uint32_t value) 654 { 655 int error; 656 u_short relreg; 657 u_char ctx; 658 659 qat_aereg_abs_to_rel(sc, ae, absreg, &relreg, &ctx); 660 661 switch (regtype) { 662 case AEREG_GPA_ABS: 663 KASSERT(absreg < MAX_GPR_REG); 664 error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 665 relreg, value); 666 break; 667 case AEREG_GPB_ABS: 668 KASSERT(absreg < MAX_GPR_REG); 669 error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 670 relreg, value); 671 break; 672 case AEREG_DR_RD_ABS: 673 KASSERT(absreg < MAX_XFER_REG); 674 error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_DR_RD_REL, 675 relreg, value); 676 break; 677 case AEREG_SR_RD_ABS: 678 KASSERT(absreg < MAX_XFER_REG); 679 error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_SR_RD_REL, 680 relreg, value); 681 break; 682 case AEREG_DR_WR_ABS: 683 KASSERT(absreg < MAX_XFER_REG); 684 error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_DR_WR_REL, 685 relreg, value); 686 break; 687 case AEREG_SR_WR_ABS: 688 KASSERT(absreg < MAX_XFER_REG); 689 error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_SR_WR_REL, 690 relreg, value); 691 break; 692 case AEREG_NEIGH_ABS: 693 KASSERT(absreg < MAX_NN_REG); 694 if (absreg >= MAX_NN_REG) 695 return EINVAL; 696 error = qat_aereg_rel_nn_write(sc, ae, ctx, AEREG_NEIGH_REL, 697 relreg, value); 698 break; 699 default: 700 panic("Invalid Register Type"); 701 } 702 703 return error; 704 } 705 706 void 707 qat_ae_enable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask) 708 { 709 uint32_t ctxen; 710 711 qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); 712 ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; 713 714 if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { 715 ctx_mask &= 0x55; 716 } else { 717 ctx_mask &= 0xff; 718 } 719 720 ctxen |= __SHIFTIN(ctx_mask, CTX_ENABLES_ENABLE); 721 qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); 722 } 723 724 void 725 qat_ae_disable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask) 726 { 727 uint32_t ctxen; 728 729 qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); 730 ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; 731 ctxen &= ~(__SHIFTIN(ctx_mask & AE_ALL_CTX, CTX_ENABLES_ENABLE)); 732 qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); 733 } 734 735 void 736 qat_ae_write_ctx_mode(struct qat_softc *sc, u_char ae, u_char mode) 737 { 738 uint32_t val, nval; 739 740 qat_ae_read_4(sc, ae, CTX_ENABLES, &val); 741 val &= CTX_ENABLES_IGNORE_W1C_MASK; 742 743 if (mode == 4) 744 nval = val | CTX_ENABLES_INUSE_CONTEXTS; 745 else 746 nval = val & ~CTX_ENABLES_INUSE_CONTEXTS; 747 748 if (val != nval) 749 qat_ae_write_4(sc, ae, CTX_ENABLES, nval); 750 } 751 752 void 753 qat_ae_write_nn_mode(struct qat_softc *sc, u_char ae, u_char mode) 754 { 755 uint32_t val, nval; 756 757 qat_ae_read_4(sc, ae, CTX_ENABLES, &val); 758 val &= CTX_ENABLES_IGNORE_W1C_MASK; 759 760 if (mode) 761 nval = val | CTX_ENABLES_NN_MODE; 762 else 763 nval = val & ~CTX_ENABLES_NN_MODE; 764 765 if (val != nval) 766 qat_ae_write_4(sc, ae, CTX_ENABLES, nval); 767 } 768 769 void 770 qat_ae_write_lm_mode(struct qat_softc *sc, u_char ae, 771 enum aereg_type lm, u_char mode) 772 { 773 uint32_t val, nval; 774 uint32_t bit; 775 776 qat_ae_read_4(sc, ae, CTX_ENABLES, &val); 777 val &= CTX_ENABLES_IGNORE_W1C_MASK; 778 779 switch (lm) { 780 case AEREG_LMEM0: 781 bit = CTX_ENABLES_LMADDR_0_GLOBAL; 782 break; 783 case AEREG_LMEM1: 784 bit = CTX_ENABLES_LMADDR_1_GLOBAL; 785 break; 786 default: 787 panic("invalid lmem reg type"); 788 break; 789 } 790 791 if (mode) 792 nval = val | bit; 793 else 794 nval = val & ~bit; 795 796 if (val != nval) 797 qat_ae_write_4(sc, ae, CTX_ENABLES, nval); 798 } 799 800 void 801 qat_ae_write_shared_cs_mode0(struct qat_softc *sc, u_char ae, u_char mode) 802 { 803 uint32_t val, nval; 804 805 qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val); 806 807 if (mode == 1) 808 nval = val | AE_MISC_CONTROL_SHARE_CS; 809 else 810 nval = val & ~AE_MISC_CONTROL_SHARE_CS; 811 812 if (val != nval) 813 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nval); 814 } 815 816 void 817 qat_ae_write_shared_cs_mode(struct qat_softc *sc, u_char ae, u_char mode) 818 { 819 u_char nae; 820 821 qat_ae_get_shared_ustore_ae(ae, &nae); 822 823 qat_ae_write_shared_cs_mode0(sc, ae, mode); 824 825 if ((sc->sc_ae_mask & (1 << nae))) { 826 qat_ae_write_shared_cs_mode0(sc, nae, mode); 827 } 828 } 829 830 int 831 qat_ae_set_reload_ustore(struct qat_softc *sc, u_char ae, 832 u_int reload_size, int shared_mode, u_int ustore_dram_addr) 833 { 834 uint32_t val, cs_reload; 835 836 switch (reload_size) { 837 case 0: 838 cs_reload = 0x0; 839 break; 840 case QAT_2K: 841 cs_reload = 0x1; 842 break; 843 case QAT_4K: 844 cs_reload = 0x2; 845 break; 846 case QAT_8K: 847 cs_reload = 0x3; 848 break; 849 default: 850 return EINVAL; 851 } 852 853 if (cs_reload) 854 QAT_AE(sc, ae).qae_ustore_dram_addr = ustore_dram_addr; 855 856 QAT_AE(sc, ae).qae_reload_size = reload_size; 857 858 qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val); 859 val &= ~(AE_MISC_CONTROL_ONE_CTX_RELOAD | 860 AE_MISC_CONTROL_CS_RELOAD | AE_MISC_CONTROL_SHARE_CS); 861 val |= __SHIFTIN(cs_reload, AE_MISC_CONTROL_CS_RELOAD) | 862 __SHIFTIN(shared_mode, AE_MISC_CONTROL_ONE_CTX_RELOAD); 863 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val); 864 865 return 0; 866 } 867 868 enum qat_ae_status 869 qat_ae_get_status(struct qat_softc *sc, u_char ae) 870 { 871 int error; 872 uint32_t val = 0; 873 874 error = qat_ae_read_4(sc, ae, CTX_ENABLES, &val); 875 if (error || val & CTX_ENABLES_ENABLE) 876 return QAT_AE_ENABLED; 877 878 qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val); 879 if (val & ACTIVE_CTX_STATUS_ABO) 880 return QAT_AE_ACTIVE; 881 882 return QAT_AE_DISABLED; 883 } 884 885 886 int 887 qat_ae_is_active(struct qat_softc *sc, u_char ae) 888 { 889 uint32_t val; 890 891 if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED) 892 return 1; 893 894 qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val); 895 if (val & ACTIVE_CTX_STATUS_ABO) 896 return 1; 897 else 898 return 0; 899 } 900 901 /* returns 1 if actually waited for specified number of cycles */ 902 int 903 qat_ae_wait_num_cycles(struct qat_softc *sc, u_char ae, int cycles, int check) 904 { 905 uint32_t cnt, actx; 906 int pcnt, ccnt, elapsed, times; 907 908 qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt); 909 pcnt = cnt & 0xffff; 910 911 times = TIMEOUT_AE_CHECK; 912 do { 913 qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt); 914 ccnt = cnt & 0xffff; 915 916 elapsed = ccnt - pcnt; 917 if (elapsed == 0) { 918 times--; 919 aprint_debug_dev(sc->sc_dev, 920 "qat_ae_wait_num_cycles elapsed 0 times %d\n", 921 times); 922 } 923 if (times <= 0) { 924 aprint_error_dev(sc->sc_dev, 925 "qat_ae_wait_num_cycles timeout\n"); 926 return -1; 927 } 928 929 if (elapsed < 0) 930 elapsed += 0x10000; 931 932 if (elapsed >= CYCLES_FROM_READY2EXE && check) { 933 if (qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, 934 &actx) == 0) { 935 if ((actx & ACTIVE_CTX_STATUS_ABO) == 0) 936 return 0; 937 } 938 } 939 } while (cycles > elapsed); 940 941 if (check && qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &actx) == 0) { 942 if ((actx & ACTIVE_CTX_STATUS_ABO) == 0) 943 return 0; 944 } 945 946 return 1; 947 } 948 949 int 950 qat_ae_init(struct qat_softc *sc) 951 { 952 int error; 953 uint32_t mask, val = 0; 954 u_char ae; 955 956 /* XXX adf_initSysMemInfo */ 957 958 /* XXX Disable clock gating for some chip if debug mode */ 959 960 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { 961 struct qat_ae *qae = &sc->sc_ae[ae]; 962 if (!(mask & 1)) 963 continue; 964 965 qae->qae_ustore_size = USTORE_SIZE; 966 967 qae->qae_free_addr = 0; 968 qae->qae_free_size = USTORE_SIZE; 969 qae->qae_live_ctx_mask = AE_ALL_CTX; 970 qae->qae_ustore_dram_addr = 0; 971 qae->qae_reload_size = 0; 972 } 973 974 /* XXX Enable attention interrupt */ 975 976 error = qat_ae_clear_reset(sc); 977 if (error) 978 return error; 979 980 qat_ae_clear_xfer(sc); 981 982 if (!sc->sc_hw.qhw_fw_auth) { 983 error = qat_ae_clear_gprs(sc); 984 if (error) 985 return error; 986 } 987 988 /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ 989 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { 990 if (!(mask & 1)) 991 continue; 992 qat_ae_read_4(sc, ae, SIGNATURE_ENABLE, &val); 993 val |= 0x1; 994 qat_ae_write_4(sc, ae, SIGNATURE_ENABLE, val); 995 } 996 997 error = qat_ae_clear_reset(sc); 998 if (error) 999 return error; 1000 1001 /* XXX XXX XXX Clean MMP memory if mem scrub is supported */ 1002 /* halMem_ScrubMMPMemory */ 1003 1004 return 0; 1005 } 1006 1007 int 1008 qat_ae_start(struct qat_softc *sc) 1009 { 1010 int error; 1011 1012 u_char ae; 1013 1014 for (ae = 0; ae < sc->sc_ae_num; ae++) { 1015 if ((sc->sc_ae_mask & (1 << ae)) == 0) 1016 continue; 1017 1018 error = qat_aefw_start(sc, ae, 0xff); 1019 if (error) 1020 return error; 1021 1022 aprint_verbose_dev(sc->sc_dev, "Started AE %d\n", ae); 1023 } 1024 1025 return 0; 1026 } 1027 1028 int 1029 qat_ae_cluster_intr(void *arg) 1030 { 1031 /* XXX */ 1032 printf("qat_ae_cluster_intr\n"); 1033 1034 return 1; 1035 } 1036 1037 int 1038 qat_ae_clear_reset(struct qat_softc *sc) 1039 { 1040 int error; 1041 uint32_t times, reset, clock, reg, mask; 1042 u_char ae; 1043 1044 reset = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET); 1045 reset &= ~(__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK)); 1046 reset &= ~(__SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK)); 1047 times = TIMEOUT_AE_RESET; 1048 do { 1049 qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_RESET, reset); 1050 if ((times--) == 0) { 1051 aprint_error_dev(sc->sc_dev, "couldn't reset AEs\n"); 1052 return EBUSY; 1053 } 1054 reg = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET); 1055 } while ((__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK) | 1056 __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK)) 1057 & reg); 1058 1059 /* Enable clock for AE and QAT */ 1060 clock = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_CLK_EN); 1061 clock |= __SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_CLK_EN_AE_MASK); 1062 clock |= __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_CLK_EN_ACCEL_MASK); 1063 qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_CLK_EN, clock); 1064 1065 error = qat_ae_check(sc); 1066 if (error) 1067 return error; 1068 1069 /* 1070 * Set undefined power-up/reset states to reasonable default values... 1071 * just to make sure we're starting from a known point 1072 */ 1073 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { 1074 if (!(mask & 1)) 1075 continue; 1076 1077 /* init the ctx_enable */ 1078 qat_ae_write_4(sc, ae, CTX_ENABLES, 1079 CTX_ENABLES_INIT); 1080 1081 /* initialize the PCs */ 1082 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, 1083 CTX_STS_INDIRECT, 1084 UPC_MASK & CTX_STS_INDIRECT_UPC_INIT); 1085 1086 /* init the ctx_arb */ 1087 qat_ae_write_4(sc, ae, CTX_ARB_CNTL, 1088 CTX_ARB_CNTL_INIT); 1089 1090 /* enable cc */ 1091 qat_ae_write_4(sc, ae, CC_ENABLE, 1092 CC_ENABLE_INIT); 1093 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, 1094 CTX_WAKEUP_EVENTS_INDIRECT, 1095 CTX_WAKEUP_EVENTS_INDIRECT_INIT); 1096 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, 1097 CTX_SIG_EVENTS_INDIRECT, 1098 CTX_SIG_EVENTS_INDIRECT_INIT); 1099 } 1100 1101 if ((sc->sc_ae_mask != 0) && 1102 sc->sc_flags & QAT_FLAG_ESRAM_ENABLE_AUTO_INIT) { 1103 /* XXX XXX XXX init eSram only when this is boot time */ 1104 } 1105 1106 if ((sc->sc_ae_mask != 0) && 1107 sc->sc_flags & QAT_FLAG_SHRAM_WAIT_READY) { 1108 /* XXX XXX XXX wait shram to complete initialization */ 1109 } 1110 1111 qat_ae_reset_timestamp(sc); 1112 1113 return 0; 1114 } 1115 1116 int 1117 qat_ae_check(struct qat_softc *sc) 1118 { 1119 int error, times, ae; 1120 uint32_t cnt, pcnt, mask; 1121 1122 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { 1123 if (!(mask & 1)) 1124 continue; 1125 1126 times = TIMEOUT_AE_CHECK; 1127 error = qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt); 1128 if (error) { 1129 aprint_error_dev(sc->sc_dev, 1130 "couldn't access AE %d CSR\n", ae); 1131 return error; 1132 } 1133 pcnt = cnt & 0xffff; 1134 1135 while (1) { 1136 error = qat_ae_read_4(sc, ae, 1137 PROFILE_COUNT, &cnt); 1138 if (error) { 1139 aprint_error_dev(sc->sc_dev, 1140 "couldn't access AE %d CSR\n", ae); 1141 return error; 1142 } 1143 cnt &= 0xffff; 1144 if (cnt == pcnt) 1145 times--; 1146 else 1147 break; 1148 if (times <= 0) { 1149 aprint_error_dev(sc->sc_dev, 1150 "AE %d CSR is useless\n", ae); 1151 return EFAULT; 1152 } 1153 } 1154 } 1155 1156 return 0; 1157 } 1158 1159 int 1160 qat_ae_reset_timestamp(struct qat_softc *sc) 1161 { 1162 uint32_t misc, mask; 1163 u_char ae; 1164 1165 /* stop the timestamp timers */ 1166 misc = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_MISC); 1167 if (misc & CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN) { 1168 qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC, 1169 misc & (~CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN)); 1170 } 1171 1172 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { 1173 if (!(mask & 1)) 1174 continue; 1175 qat_ae_write_4(sc, ae, TIMESTAMP_LOW, 0); 1176 qat_ae_write_4(sc, ae, TIMESTAMP_HIGH, 0); 1177 } 1178 1179 /* start timestamp timers */ 1180 qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC, 1181 misc | CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN); 1182 1183 return 0; 1184 } 1185 1186 1187 void 1188 qat_ae_clear_xfer(struct qat_softc *sc) 1189 { 1190 u_int mask, reg; 1191 u_char ae; 1192 1193 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { 1194 if (!(mask & 1)) 1195 continue; 1196 1197 for (reg = 0; reg < MAX_GPR_REG; reg++) { 1198 qat_aereg_abs_data_write(sc, ae, AEREG_SR_RD_ABS, 1199 reg, 0); 1200 qat_aereg_abs_data_write(sc, ae, AEREG_DR_RD_ABS, 1201 reg, 0); 1202 } 1203 } 1204 } 1205 1206 int 1207 qat_ae_clear_gprs(struct qat_softc *sc) 1208 { 1209 uint32_t val; 1210 uint32_t saved_ctx = 0; 1211 int times = TIMEOUT_AE_CHECK, rv; 1212 u_char ae; 1213 u_int mask; 1214 1215 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { 1216 if (!(mask & 1)) 1217 continue; 1218 1219 /* turn off share control store bit */ 1220 val = qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val); 1221 val &= ~AE_MISC_CONTROL_SHARE_CS; 1222 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val); 1223 1224 /* turn off ucode parity */ 1225 /* make sure nn_mode is set to self */ 1226 qat_ae_read_4(sc, ae, CTX_ENABLES, &val); 1227 val &= CTX_ENABLES_IGNORE_W1C_MASK; 1228 val |= CTX_ENABLES_NN_MODE; 1229 val &= ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE; 1230 qat_ae_write_4(sc, ae, CTX_ENABLES, val); 1231 1232 /* copy instructions to ustore */ 1233 qat_ae_ucode_write(sc, ae, 0, __arraycount(ae_clear_gprs_inst), 1234 ae_clear_gprs_inst); 1235 1236 /* set PC */ 1237 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_STS_INDIRECT, 1238 UPC_MASK & CTX_STS_INDIRECT_UPC_INIT); 1239 1240 /* save current context */ 1241 qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &saved_ctx); 1242 /* change the active context */ 1243 /* start the context from ctx 0 */ 1244 qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 0); 1245 1246 /* wakeup-event voluntary */ 1247 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, 1248 CTX_WAKEUP_EVENTS_INDIRECT, 1249 CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY); 1250 /* clean signals */ 1251 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, 1252 CTX_SIG_EVENTS_INDIRECT, 0); 1253 qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0); 1254 1255 qat_ae_enable_ctx(sc, ae, AE_ALL_CTX); 1256 } 1257 1258 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { 1259 if (!(mask & 1)) 1260 continue; 1261 /* wait for AE to finish */ 1262 do { 1263 rv = qat_ae_wait_num_cycles(sc, ae, AE_EXEC_CYCLE, 1); 1264 } while (rv && times--); 1265 if (times <= 0) { 1266 aprint_error_dev(sc->sc_dev, 1267 "qat_ae_clear_gprs timeout"); 1268 return ETIMEDOUT; 1269 } 1270 qat_ae_disable_ctx(sc, ae, AE_ALL_CTX); 1271 /* change the active context */ 1272 qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 1273 saved_ctx & ACTIVE_CTX_STATUS_ACNO); 1274 /* init the ctx_enable */ 1275 qat_ae_write_4(sc, ae, CTX_ENABLES, CTX_ENABLES_INIT); 1276 /* initialize the PCs */ 1277 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, 1278 CTX_STS_INDIRECT, UPC_MASK & CTX_STS_INDIRECT_UPC_INIT); 1279 /* init the ctx_arb */ 1280 qat_ae_write_4(sc, ae, CTX_ARB_CNTL, CTX_ARB_CNTL_INIT); 1281 /* enable cc */ 1282 qat_ae_write_4(sc, ae, CC_ENABLE, CC_ENABLE_INIT); 1283 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, 1284 CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_INIT); 1285 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_SIG_EVENTS_INDIRECT, 1286 CTX_SIG_EVENTS_INDIRECT_INIT); 1287 } 1288 1289 return 0; 1290 } 1291 1292 void 1293 qat_ae_get_shared_ustore_ae(u_char ae, u_char *nae) 1294 { 1295 if (ae & 0x1) 1296 *nae = ae - 1; 1297 else 1298 *nae = ae + 1; 1299 } 1300 1301 u_int 1302 qat_ae_ucode_parity64(uint64_t ucode) 1303 { 1304 1305 ucode ^= ucode >> 1; 1306 ucode ^= ucode >> 2; 1307 ucode ^= ucode >> 4; 1308 ucode ^= ucode >> 8; 1309 ucode ^= ucode >> 16; 1310 ucode ^= ucode >> 32; 1311 1312 return ((u_int)(ucode & 1)); 1313 } 1314 1315 uint64_t 1316 qat_ae_ucode_set_ecc(uint64_t ucode) 1317 { 1318 static const uint64_t 1319 bit0mask=0xff800007fffULL, bit1mask=0x1f801ff801fULL, 1320 bit2mask=0xe387e0781e1ULL, bit3mask=0x7cb8e388e22ULL, 1321 bit4mask=0xaf5b2c93244ULL, bit5mask=0xf56d5525488ULL, 1322 bit6mask=0xdaf69a46910ULL; 1323 1324 /* clear the ecc bits */ 1325 ucode &= ~(0x7fULL << USTORE_ECC_BIT_0); 1326 1327 ucode |= (uint64_t)qat_ae_ucode_parity64(bit0mask & ucode) << 1328 USTORE_ECC_BIT_0; 1329 ucode |= (uint64_t)qat_ae_ucode_parity64(bit1mask & ucode) << 1330 USTORE_ECC_BIT_1; 1331 ucode |= (uint64_t)qat_ae_ucode_parity64(bit2mask & ucode) << 1332 USTORE_ECC_BIT_2; 1333 ucode |= (uint64_t)qat_ae_ucode_parity64(bit3mask & ucode) << 1334 USTORE_ECC_BIT_3; 1335 ucode |= (uint64_t)qat_ae_ucode_parity64(bit4mask & ucode) << 1336 USTORE_ECC_BIT_4; 1337 ucode |= (uint64_t)qat_ae_ucode_parity64(bit5mask & ucode) << 1338 USTORE_ECC_BIT_5; 1339 ucode |= (uint64_t)qat_ae_ucode_parity64(bit6mask & ucode) << 1340 USTORE_ECC_BIT_6; 1341 1342 return (ucode); 1343 } 1344 1345 int 1346 qat_ae_ucode_write(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst, 1347 const uint64_t *ucode) 1348 { 1349 uint64_t tmp; 1350 uint32_t ustore_addr, ulo, uhi; 1351 int i; 1352 1353 qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr); 1354 uaddr |= USTORE_ADDRESS_ECS; 1355 1356 qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); 1357 for (i = 0; i < ninst; i++) { 1358 tmp = qat_ae_ucode_set_ecc(ucode[i]); 1359 ulo = (uint32_t)(tmp & 0xffffffff); 1360 uhi = (uint32_t)(tmp >> 32); 1361 1362 qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo); 1363 /* this will auto increment the address */ 1364 qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi); 1365 1366 QAT_YIELD(); 1367 } 1368 qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr); 1369 1370 return 0; 1371 } 1372 1373 int 1374 qat_ae_ucode_read(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst, 1375 uint64_t *ucode) 1376 { 1377 uint32_t misc, ustore_addr, ulo, uhi; 1378 u_int ii; 1379 u_char nae; 1380 1381 if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED) 1382 return EBUSY; 1383 1384 /* determine whether it neighbour AE runs in shared control store 1385 * status */ 1386 qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); 1387 if (misc & AE_MISC_CONTROL_SHARE_CS) { 1388 qat_ae_get_shared_ustore_ae(ae, &nae); 1389 if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae)) 1390 return EBUSY; 1391 } 1392 1393 /* if reloadable, then get it all from dram-ustore */ 1394 if (__SHIFTOUT(misc, AE_MISC_CONTROL_CS_RELOAD)) 1395 panic("notyet"); /* XXX getReloadUwords */ 1396 1397 /* disable SHARE_CS bit to workaround silicon bug */ 1398 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc & 0xfffffffb); 1399 1400 KASSERT(uaddr + ninst <= USTORE_SIZE); 1401 1402 /* save ustore-addr csr */ 1403 qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr); 1404 1405 uaddr |= USTORE_ADDRESS_ECS; /* enable ecs bit */ 1406 for (ii = 0; ii < ninst; ii++) { 1407 qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); 1408 1409 uaddr++; 1410 qat_ae_read_4(sc, ae, USTORE_DATA_LOWER, &ulo); 1411 qat_ae_read_4(sc, ae, USTORE_DATA_UPPER, &uhi); 1412 ucode[ii] = uhi; 1413 ucode[ii] = (ucode[ii] << 32) | ulo; 1414 } 1415 1416 /* restore SHARE_CS bit to workaround silicon bug */ 1417 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); 1418 qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr); 1419 1420 return 0; 1421 } 1422 1423 u_int 1424 qat_ae_concat_ucode(uint64_t *ucode, u_int ninst, u_int size, u_int addr, 1425 u_int *value) 1426 { 1427 const uint64_t *inst_arr; 1428 u_int ninst0, curvalue; 1429 int ii, vali, fixup, usize = 0; 1430 1431 if (size == 0) 1432 return 0; 1433 1434 ninst0 = ninst; 1435 vali = 0; 1436 curvalue = value[vali++]; 1437 1438 switch (size) { 1439 case 0x1: 1440 inst_arr = ae_inst_1b; 1441 usize = __arraycount(ae_inst_1b); 1442 break; 1443 case 0x2: 1444 inst_arr = ae_inst_2b; 1445 usize = __arraycount(ae_inst_2b); 1446 break; 1447 case 0x3: 1448 inst_arr = ae_inst_3b; 1449 usize = __arraycount(ae_inst_3b); 1450 break; 1451 default: 1452 inst_arr = ae_inst_4b; 1453 usize = __arraycount(ae_inst_4b); 1454 break; 1455 } 1456 1457 fixup = ninst; 1458 for (ii = 0; ii < usize; ii++) 1459 ucode[ninst++] = inst_arr[ii]; 1460 1461 INSERT_IMMED_GPRA_CONST(ucode[fixup], (addr)); 1462 fixup++; 1463 INSERT_IMMED_GPRA_CONST(ucode[fixup], 0); 1464 fixup++; 1465 INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0)); 1466 fixup++; 1467 INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16)); 1468 /* XXX fixup++ ? */ 1469 1470 if (size <= 0x4) 1471 return (ninst - ninst0); 1472 1473 size -= sizeof(u_int); 1474 while (size >= sizeof(u_int)) { 1475 curvalue = value[vali++]; 1476 fixup = ninst; 1477 ucode[ninst++] = ae_inst_4b[0x2]; 1478 ucode[ninst++] = ae_inst_4b[0x3]; 1479 ucode[ninst++] = ae_inst_4b[0x8]; 1480 INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16)); 1481 fixup++; 1482 INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0)); 1483 /* XXX fixup++ ? */ 1484 1485 addr += sizeof(u_int); 1486 size -= sizeof(u_int); 1487 } 1488 /* call this function recursive when the left size less than 4 */ 1489 ninst += 1490 qat_ae_concat_ucode(ucode, ninst, size, addr, value + vali); 1491 1492 return (ninst - ninst0); 1493 } 1494 1495 int 1496 qat_ae_exec_ucode(struct qat_softc *sc, u_char ae, u_char ctx, 1497 uint64_t *ucode, u_int ninst, int cond_code_off, u_int max_cycles, 1498 u_int *endpc) 1499 { 1500 int error = 0, share_cs = 0; 1501 uint64_t savucode[MAX_EXEC_INST]; 1502 uint32_t indr_lm_addr_0, indr_lm_addr_1; 1503 uint32_t indr_lm_addr_byte_0, indr_lm_addr_byte_1; 1504 uint32_t indr_future_cnt_sig; 1505 uint32_t indr_sig, active_sig; 1506 uint32_t wakeup_ev, savpc, savcc, savctx, ctxarbctl; 1507 uint32_t misc, nmisc, ctxen; 1508 u_char nae; 1509 1510 KASSERT(ninst <= USTORE_SIZE); 1511 1512 if (qat_ae_is_active(sc, ae)) 1513 return EBUSY; 1514 1515 #if 0 1516 printf("%s: ae %d ctx %d ninst %d code 0x%016llx 0x%016llx\n", 1517 __func__, ae, ctx, ninst, ucode[0], ucode[ninst-1]); 1518 #endif 1519 1520 /* save current LM addr */ 1521 qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_0_INDIRECT, &indr_lm_addr_0); 1522 qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_1_INDIRECT, &indr_lm_addr_1); 1523 qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, 1524 &indr_lm_addr_byte_0); 1525 qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, 1526 &indr_lm_addr_byte_1); 1527 1528 /* backup shared control store bit, and force AE to 1529 none-shared mode before executing ucode snippet */ 1530 qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); 1531 if (misc & AE_MISC_CONTROL_SHARE_CS) { 1532 share_cs = 1; 1533 qat_ae_get_shared_ustore_ae(ae, &nae); 1534 if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae)) 1535 return EBUSY; 1536 } 1537 nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS; 1538 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc); 1539 1540 /* save current states: */ 1541 if (ninst <= MAX_EXEC_INST) { 1542 error = qat_ae_ucode_read(sc, ae, 0, ninst, savucode); 1543 if (error) { 1544 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); 1545 return error; 1546 } 1547 } 1548 1549 /* save wakeup-events */ 1550 qat_ae_ctx_indr_read(sc, ae, ctx, CTX_WAKEUP_EVENTS_INDIRECT, 1551 &wakeup_ev); 1552 /* save PC */ 1553 qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, &savpc); 1554 savpc &= UPC_MASK; 1555 1556 /* save ctx enables */ 1557 qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); 1558 ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; 1559 /* save conditional-code */ 1560 qat_ae_read_4(sc, ae, CC_ENABLE, &savcc); 1561 /* save current context */ 1562 qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx); 1563 qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl); 1564 1565 /* save indirect csrs */ 1566 qat_ae_ctx_indr_read(sc, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, 1567 &indr_future_cnt_sig); 1568 qat_ae_ctx_indr_read(sc, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &indr_sig); 1569 qat_ae_read_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, &active_sig); 1570 1571 /* turn off ucode parity */ 1572 qat_ae_write_4(sc, ae, CTX_ENABLES, 1573 ctxen & ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE); 1574 1575 /* copy instructions to ustore */ 1576 qat_ae_ucode_write(sc, ae, 0, ninst, ucode); 1577 /* set PC */ 1578 qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, 0); 1579 /* change the active context */ 1580 qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 1581 ctx & ACTIVE_CTX_STATUS_ACNO); 1582 1583 if (cond_code_off) { 1584 /* disable conditional-code*/ 1585 qat_ae_write_4(sc, ae, CC_ENABLE, savcc & 0xffffdfff); 1586 } 1587 1588 /* wakeup-event voluntary */ 1589 qat_ae_ctx_indr_write(sc, ae, 1 << ctx, 1590 CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY); 1591 1592 /* clean signals */ 1593 qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT, 0); 1594 qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0); 1595 1596 /* enable context */ 1597 qat_ae_enable_ctx(sc, ae, 1 << ctx); 1598 1599 /* wait for it to finish */ 1600 if (qat_ae_wait_num_cycles(sc, ae, max_cycles, 1) != 0) 1601 error = ETIMEDOUT; 1602 1603 /* see if we need to get the current PC */ 1604 if (endpc != NULL) { 1605 uint32_t ctx_status; 1606 1607 qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, 1608 &ctx_status); 1609 *endpc = ctx_status & UPC_MASK; 1610 } 1611 #if 0 1612 { 1613 uint32_t ctx_status; 1614 1615 qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, 1616 &ctx_status); 1617 printf("%s: endpc 0x%08x\n", __func__, 1618 ctx_status & UPC_MASK); 1619 } 1620 #endif 1621 1622 /* retore to previous states: */ 1623 /* disable context */ 1624 qat_ae_disable_ctx(sc, ae, 1 << ctx); 1625 if (ninst <= MAX_EXEC_INST) { 1626 /* instructions */ 1627 qat_ae_ucode_write(sc, ae, 0, ninst, savucode); 1628 } 1629 /* wakeup-events */ 1630 qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_WAKEUP_EVENTS_INDIRECT, 1631 wakeup_ev); 1632 qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, savpc); 1633 1634 /* only restore shared control store bit, 1635 other bit might be changed by AE code snippet */ 1636 qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); 1637 if (share_cs) 1638 nmisc = misc | AE_MISC_CONTROL_SHARE_CS; 1639 else 1640 nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS; 1641 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc); 1642 /* conditional-code */ 1643 qat_ae_write_4(sc, ae, CC_ENABLE, savcc); 1644 /* change the active context */ 1645 qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 1646 savctx & ACTIVE_CTX_STATUS_ACNO); 1647 /* restore the nxt ctx to run */ 1648 qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl); 1649 /* restore current LM addr */ 1650 qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_0_INDIRECT, 1651 indr_lm_addr_0); 1652 qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_1_INDIRECT, 1653 indr_lm_addr_1); 1654 qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, 1655 indr_lm_addr_byte_0); 1656 qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, 1657 indr_lm_addr_byte_1); 1658 1659 /* restore indirect csrs */ 1660 qat_ae_ctx_indr_write(sc, ae, 1 << ctx, FUTURE_COUNT_SIGNAL_INDIRECT, 1661 indr_future_cnt_sig); 1662 qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT, 1663 indr_sig); 1664 qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, active_sig); 1665 1666 /* ctx-enables */ 1667 qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); 1668 1669 return error; 1670 } 1671 1672 int 1673 qat_ae_exec_ucode_init_lm(struct qat_softc *sc, u_char ae, u_char ctx, 1674 int *first_exec, uint64_t *ucode, u_int ninst, 1675 u_int *gpr_a0, u_int *gpr_a1, u_int *gpr_a2, u_int *gpr_b0, u_int *gpr_b1) 1676 { 1677 1678 if (*first_exec) { 1679 qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0); 1680 qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1); 1681 qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2); 1682 qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0); 1683 qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1); 1684 *first_exec = 0; 1685 } 1686 1687 return qat_ae_exec_ucode(sc, ae, ctx, ucode, ninst, 1, ninst * 5, NULL); 1688 } 1689 1690 int 1691 qat_ae_restore_init_lm_gprs(struct qat_softc *sc, u_char ae, u_char ctx, 1692 u_int gpr_a0, u_int gpr_a1, u_int gpr_a2, u_int gpr_b0, u_int gpr_b1) 1693 { 1694 qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0); 1695 qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1); 1696 qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2); 1697 qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0); 1698 qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1); 1699 1700 return 0; 1701 } 1702 1703 int 1704 qat_ae_get_inst_num(int lmsize) 1705 { 1706 int ninst, left; 1707 1708 if (lmsize == 0) 1709 return 0; 1710 1711 left = lmsize % sizeof(u_int); 1712 1713 if (left) { 1714 ninst = __arraycount(ae_inst_1b) + 1715 qat_ae_get_inst_num(lmsize - left); 1716 } else { 1717 /* 3 instruction is needed for further code */ 1718 ninst = (lmsize - sizeof(u_int)) * 3 / 4 + 1719 __arraycount(ae_inst_4b); 1720 } 1721 1722 return (ninst); 1723 } 1724 1725 int 1726 qat_ae_batch_put_lm(struct qat_softc *sc, u_char ae, 1727 struct qat_ae_batch_init_list *qabi_list, size_t nqabi) 1728 { 1729 struct qat_ae_batch_init *qabi; 1730 size_t alloc_ninst, ninst; 1731 uint64_t *ucode; 1732 u_int gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1; 1733 int insnsz, error = 0, execed = 0, first_exec = 1; 1734 1735 if (SIMPLEQ_FIRST(qabi_list) == NULL) 1736 return 0; 1737 1738 alloc_ninst = uimin(USTORE_SIZE, nqabi); 1739 ucode = qat_alloc_mem(sizeof(uint64_t) * alloc_ninst); 1740 1741 ninst = 0; 1742 SIMPLEQ_FOREACH(qabi, qabi_list, qabi_next) { 1743 insnsz = qat_ae_get_inst_num(qabi->qabi_size); 1744 if (insnsz + ninst > alloc_ninst) { 1745 aprint_debug_dev(sc->sc_dev, 1746 "code page is full, call exection unit\n"); 1747 /* add ctx_arb[kill] */ 1748 ucode[ninst++] = 0x0E000010000ull; 1749 execed = 1; 1750 1751 error = qat_ae_exec_ucode_init_lm(sc, ae, 0, 1752 &first_exec, ucode, ninst, 1753 &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1); 1754 if (error) { 1755 qat_ae_restore_init_lm_gprs(sc, ae, 0, 1756 gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1); 1757 qat_free_mem(ucode); 1758 return error; 1759 } 1760 /* run microExec to execute the microcode */ 1761 ninst = 0; 1762 } 1763 ninst += qat_ae_concat_ucode(ucode, ninst, 1764 qabi->qabi_size, qabi->qabi_addr, qabi->qabi_value); 1765 } 1766 1767 if (ninst > 0) { 1768 ucode[ninst++] = 0x0E000010000ull; 1769 execed = 1; 1770 1771 error = qat_ae_exec_ucode_init_lm(sc, ae, 0, 1772 &first_exec, ucode, ninst, 1773 &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1); 1774 } 1775 if (execed) { 1776 qat_ae_restore_init_lm_gprs(sc, ae, 0, 1777 gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1); 1778 } 1779 1780 qat_free_mem(ucode); 1781 1782 return error; 1783 } 1784 1785 int 1786 qat_ae_write_pc(struct qat_softc *sc, u_char ae, u_int ctx_mask, u_int upc) 1787 { 1788 1789 if (qat_ae_is_active(sc, ae)) 1790 return EBUSY; 1791 1792 qat_ae_ctx_indr_write(sc, ae, ctx_mask, CTX_STS_INDIRECT, 1793 UPC_MASK & upc); 1794 return 0; 1795 } 1796 1797 static inline u_int 1798 qat_aefw_csum_calc(u_int reg, int ch) 1799 { 1800 int i; 1801 u_int topbit = CRC_BITMASK(CRC_WIDTH - 1); 1802 u_int inbyte = (u_int)((reg >> 0x18) ^ ch); 1803 1804 reg ^= inbyte << (CRC_WIDTH - 0x8); 1805 for (i = 0; i < 0x8; i++) { 1806 if (reg & topbit) 1807 reg = (reg << 1) ^ CRC_POLY; 1808 else 1809 reg <<= 1; 1810 } 1811 1812 return (reg & CRC_WIDTHMASK(CRC_WIDTH)); 1813 } 1814 1815 u_int 1816 qat_aefw_csum(char *buf, int size) 1817 { 1818 u_int csum = 0; 1819 1820 while (size--) { 1821 csum = qat_aefw_csum_calc(csum, *buf++); 1822 } 1823 1824 return csum; 1825 } 1826 1827 const char * 1828 qat_aefw_uof_string(struct qat_softc *sc, size_t offset) 1829 { 1830 if (offset >= sc->sc_aefw_uof.qafu_str_tab_size) 1831 return NULL; 1832 if (sc->sc_aefw_uof.qafu_str_tab == NULL) 1833 return NULL; 1834 1835 return (const char *)((uintptr_t)sc->sc_aefw_uof.qafu_str_tab + offset); 1836 } 1837 1838 struct uof_chunk_hdr * 1839 qat_aefw_uof_find_chunk(struct qat_softc *sc, 1840 const char *id, struct uof_chunk_hdr *cur) 1841 { 1842 struct uof_obj_hdr *uoh = sc->sc_aefw_uof.qafu_obj_hdr; 1843 struct uof_chunk_hdr *uch; 1844 int i; 1845 1846 uch = (struct uof_chunk_hdr *)(uoh + 1); 1847 for (i = 0; i < uoh->uoh_num_chunks; i++, uch++) { 1848 if (uch->uch_offset + uch->uch_size > sc->sc_aefw_uof.qafu_size) 1849 return NULL; 1850 1851 if (cur < uch && !strncmp(uch->uch_id, id, UOF_OBJ_ID_LEN)) 1852 return uch; 1853 } 1854 1855 return NULL; 1856 } 1857 1858 int 1859 qat_aefw_load_mof(struct qat_softc *sc) 1860 { 1861 int error = 0; 1862 firmware_handle_t fh = NULL; 1863 off_t fwsize; 1864 1865 /* load MOF firmware */ 1866 error = firmware_open("qat", sc->sc_hw.qhw_mof_fwname, &fh); 1867 if (error) { 1868 aprint_error_dev(sc->sc_dev, "couldn't load mof firmware %s\n", 1869 sc->sc_hw.qhw_mof_fwname); 1870 goto fail; 1871 } 1872 1873 fwsize = firmware_get_size(fh); 1874 if (fwsize == 0 || fwsize > SIZE_MAX) { 1875 error = EINVAL; 1876 goto fail; 1877 } 1878 sc->sc_fw_mof_size = fwsize; 1879 sc->sc_fw_mof = firmware_malloc(sc->sc_fw_mof_size); 1880 1881 error = firmware_read(fh, 0, sc->sc_fw_mof, sc->sc_fw_mof_size); 1882 if (error) 1883 goto fail; 1884 1885 out: 1886 if (fh != NULL) 1887 firmware_close(fh); 1888 return error; 1889 fail: 1890 if (sc->sc_fw_mof != NULL) { 1891 firmware_free(sc->sc_fw_mof, sc->sc_fw_mof_size); 1892 sc->sc_fw_mof = NULL; 1893 } 1894 goto out; 1895 } 1896 1897 int 1898 qat_aefw_load_mmp(struct qat_softc *sc) 1899 { 1900 int error = 0; 1901 firmware_handle_t fh = NULL; 1902 off_t fwsize; 1903 1904 error = firmware_open("qat", sc->sc_hw.qhw_mmp_fwname, &fh); 1905 if (error) { 1906 aprint_error_dev(sc->sc_dev, "couldn't load mmp firmware %s\n", 1907 sc->sc_hw.qhw_mmp_fwname); 1908 goto fail; 1909 } 1910 1911 fwsize = firmware_get_size(fh); 1912 if (fwsize == 0 || fwsize > SIZE_MAX) { 1913 error = EINVAL; 1914 goto fail; 1915 } 1916 sc->sc_fw_mmp_size = fwsize; 1917 sc->sc_fw_mmp = firmware_malloc(sc->sc_fw_mmp_size); 1918 1919 error = firmware_read(fh, 0, sc->sc_fw_mmp, sc->sc_fw_mmp_size); 1920 if (error) 1921 goto fail; 1922 1923 out: 1924 if (fh != NULL) 1925 firmware_close(fh); 1926 return error; 1927 fail: 1928 if (sc->sc_fw_mmp != NULL) { 1929 firmware_free(sc->sc_fw_mmp, sc->sc_fw_mmp_size); 1930 sc->sc_fw_mmp = NULL; 1931 } 1932 goto out; 1933 } 1934 1935 int 1936 qat_aefw_mof_find_uof0(struct qat_softc *sc, 1937 struct mof_uof_hdr *muh, struct mof_uof_chunk_hdr *head, 1938 u_int nchunk, size_t size, const char *id, 1939 size_t *fwsize, void **fwptr) 1940 { 1941 int i; 1942 char *uof_name; 1943 1944 for (i = 0; i < nchunk; i++) { 1945 struct mof_uof_chunk_hdr *much = &head[i]; 1946 1947 if (strncmp(much->much_id, id, MOF_OBJ_ID_LEN)) 1948 return EINVAL; 1949 1950 if (much->much_offset + much->much_size > size) 1951 return EINVAL; 1952 1953 if (sc->sc_mof.qmf_sym_size <= much->much_name) 1954 return EINVAL; 1955 1956 uof_name = (char *)((uintptr_t)sc->sc_mof.qmf_sym + 1957 much->much_name); 1958 1959 if (!strcmp(uof_name, sc->sc_fw_uof_name)) { 1960 *fwptr = (void *)((uintptr_t)muh + 1961 (uintptr_t)much->much_offset); 1962 *fwsize = (size_t)much->much_size; 1963 aprint_verbose_dev(sc->sc_dev, 1964 "%s obj %s at %p size 0x%lx\n", 1965 id, uof_name, *fwptr, *fwsize); 1966 return 0; 1967 } 1968 } 1969 1970 return ENOENT; 1971 } 1972 1973 int 1974 qat_aefw_mof_find_uof(struct qat_softc *sc) 1975 { 1976 struct mof_uof_hdr *uof_hdr, *suof_hdr; 1977 u_int nuof_chunks = 0, nsuof_chunks = 0; 1978 int error; 1979 1980 uof_hdr = sc->sc_mof.qmf_uof_objs; 1981 suof_hdr = sc->sc_mof.qmf_suof_objs; 1982 1983 if (uof_hdr != NULL) { 1984 if (uof_hdr->muh_max_chunks < uof_hdr->muh_num_chunks) { 1985 return EINVAL; 1986 } 1987 nuof_chunks = uof_hdr->muh_num_chunks; 1988 } 1989 if (suof_hdr != NULL) { 1990 if (suof_hdr->muh_max_chunks < suof_hdr->muh_num_chunks) 1991 return EINVAL; 1992 nsuof_chunks = suof_hdr->muh_num_chunks; 1993 } 1994 1995 if (nuof_chunks + nsuof_chunks == 0) 1996 return EINVAL; 1997 1998 if (uof_hdr != NULL) { 1999 error = qat_aefw_mof_find_uof0(sc, uof_hdr, 2000 (struct mof_uof_chunk_hdr *)(uof_hdr + 1), nuof_chunks, 2001 sc->sc_mof.qmf_uof_objs_size, UOF_IMAG, 2002 &sc->sc_fw_uof_size, &sc->sc_fw_uof); 2003 if (error && error != ENOENT) 2004 return error; 2005 } 2006 2007 if (suof_hdr != NULL) { 2008 error = qat_aefw_mof_find_uof0(sc, suof_hdr, 2009 (struct mof_uof_chunk_hdr *)(suof_hdr + 1), nsuof_chunks, 2010 sc->sc_mof.qmf_suof_objs_size, SUOF_IMAG, 2011 &sc->sc_fw_suof_size, &sc->sc_fw_suof); 2012 if (error && error != ENOENT) 2013 return error; 2014 } 2015 2016 if (sc->sc_fw_uof == NULL && sc->sc_fw_suof == NULL) 2017 return ENOENT; 2018 2019 return 0; 2020 } 2021 2022 int 2023 qat_aefw_mof_parse(struct qat_softc *sc) 2024 { 2025 struct mof_file_hdr *mfh; 2026 struct mof_file_chunk_hdr *mfch; 2027 size_t size; 2028 u_int csum; 2029 int error, i; 2030 2031 size = sc->sc_fw_mof_size; 2032 2033 if (size < sizeof(struct mof_file_hdr)) 2034 return EINVAL; 2035 size -= sizeof(struct mof_file_hdr); 2036 2037 mfh = sc->sc_fw_mof; 2038 2039 if (mfh->mfh_fid != MOF_FID) 2040 return EINVAL; 2041 2042 csum = qat_aefw_csum((char *)((uintptr_t)sc->sc_fw_mof + 2043 offsetof(struct mof_file_hdr, mfh_min_ver)), 2044 sc->sc_fw_mof_size - 2045 offsetof(struct mof_file_hdr, mfh_min_ver)); 2046 if (mfh->mfh_csum != csum) 2047 return EINVAL; 2048 2049 if (mfh->mfh_min_ver != MOF_MIN_VER || 2050 mfh->mfh_maj_ver != MOF_MAJ_VER) 2051 return EINVAL; 2052 2053 if (mfh->mfh_max_chunks < mfh->mfh_num_chunks) 2054 return EINVAL; 2055 2056 if (size < sizeof(struct mof_file_chunk_hdr) * mfh->mfh_num_chunks) 2057 return EINVAL; 2058 mfch = (struct mof_file_chunk_hdr *)(mfh + 1); 2059 2060 for (i = 0; i < mfh->mfh_num_chunks; i++, mfch++) { 2061 if (mfch->mfch_offset + mfch->mfch_size > sc->sc_fw_mof_size) 2062 return EINVAL; 2063 2064 if (!strncmp(mfch->mfch_id, SYM_OBJS, MOF_OBJ_ID_LEN)) { 2065 if (sc->sc_mof.qmf_sym != NULL) 2066 return EINVAL; 2067 2068 sc->sc_mof.qmf_sym = 2069 (void *)((uintptr_t)sc->sc_fw_mof + 2070 (uintptr_t)mfch->mfch_offset + sizeof(u_int)); 2071 sc->sc_mof.qmf_sym_size = 2072 *(u_int *)((uintptr_t)sc->sc_fw_mof + 2073 (uintptr_t)mfch->mfch_offset); 2074 2075 if (sc->sc_mof.qmf_sym_size % sizeof(u_int) != 0) 2076 return EINVAL; 2077 if (mfch->mfch_size != sc->sc_mof.qmf_sym_size + 2078 sizeof(u_int) || mfch->mfch_size == 0) 2079 return EINVAL; 2080 if (*(char *)((uintptr_t)sc->sc_mof.qmf_sym + 2081 sc->sc_mof.qmf_sym_size - 1) != '\0') 2082 return EINVAL; 2083 2084 } else if (!strncmp(mfch->mfch_id, UOF_OBJS, MOF_OBJ_ID_LEN)) { 2085 if (sc->sc_mof.qmf_uof_objs != NULL) 2086 return EINVAL; 2087 2088 sc->sc_mof.qmf_uof_objs = 2089 (void *)((uintptr_t)sc->sc_fw_mof + 2090 (uintptr_t)mfch->mfch_offset); 2091 sc->sc_mof.qmf_uof_objs_size = mfch->mfch_size; 2092 2093 } else if (!strncmp(mfch->mfch_id, SUOF_OBJS, MOF_OBJ_ID_LEN)) { 2094 if (sc->sc_mof.qmf_suof_objs != NULL) 2095 return EINVAL; 2096 2097 sc->sc_mof.qmf_suof_objs = 2098 (void *)((uintptr_t)sc->sc_fw_mof + 2099 (uintptr_t)mfch->mfch_offset); 2100 sc->sc_mof.qmf_suof_objs_size = mfch->mfch_size; 2101 } 2102 } 2103 2104 if (sc->sc_mof.qmf_sym == NULL || 2105 (sc->sc_mof.qmf_uof_objs == NULL && 2106 sc->sc_mof.qmf_suof_objs == NULL)) 2107 return EINVAL; 2108 2109 error = qat_aefw_mof_find_uof(sc); 2110 if (error) 2111 return error; 2112 return 0; 2113 } 2114 2115 int 2116 qat_aefw_uof_parse_image(struct qat_softc *sc, 2117 struct qat_uof_image *qui, struct uof_chunk_hdr *uch) 2118 { 2119 struct uof_image *image; 2120 struct uof_code_page *page; 2121 uintptr_t base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr; 2122 size_t lim = uch->uch_offset + uch->uch_size, size; 2123 int i, p; 2124 2125 size = uch->uch_size; 2126 if (size < sizeof(struct uof_image)) 2127 return EINVAL; 2128 size -= sizeof(struct uof_image); 2129 2130 qui->qui_image = image = 2131 (struct uof_image *)(base + uch->uch_offset); 2132 2133 aprint_verbose_dev(sc->sc_dev, 2134 "uof_image name %s\n", 2135 qat_aefw_uof_string(sc, image->ui_name)); 2136 aprint_verbose_dev(sc->sc_dev, 2137 "uof_image ae_assign 0x%08x ctx_assign 0x%08x cpu_type 0x%08x\n", 2138 image->ui_ae_assigned, image->ui_ctx_assigned, image->ui_cpu_type); 2139 aprint_verbose_dev(sc->sc_dev, 2140 "uof_image max_ver 0x%08x min_ver 0x%08x ae_mode 0x%08x\n", 2141 image->ui_max_ver, image->ui_min_ver, image->ui_ae_mode); 2142 aprint_verbose_dev(sc->sc_dev, 2143 "uof_image pages 0x%08x page regions 0x%08x\n", 2144 image->ui_num_pages, image->ui_num_page_regions); 2145 2146 #define ASSIGN_OBJ_TAB(np, typep, type, base, off, lim) \ 2147 do { \ 2148 u_int nent; \ 2149 nent = ((struct uof_obj_table *)((base) + (off)))->uot_nentries;\ 2150 if ((lim) < off + sizeof(struct uof_obj_table) + \ 2151 sizeof(type) * nent) \ 2152 return EINVAL; \ 2153 *(np) = nent; \ 2154 if (nent > 0) \ 2155 *(typep) = (type)((struct uof_obj_table *) \ 2156 ((base) + (off)) + 1); \ 2157 else \ 2158 *(typep) = NULL; \ 2159 } while (0) 2160 2161 ASSIGN_OBJ_TAB(&qui->qui_num_ae_reg, &qui->qui_ae_reg, 2162 struct uof_ae_reg *, base, image->ui_reg_tab, lim); 2163 ASSIGN_OBJ_TAB(&qui->qui_num_init_reg_sym, &qui->qui_init_reg_sym, 2164 struct uof_init_reg_sym *, base, image->ui_init_reg_sym_tab, lim); 2165 ASSIGN_OBJ_TAB(&qui->qui_num_sbreak, &qui->qui_sbreak, 2166 struct qui_sbreak *, base, image->ui_sbreak_tab, lim); 2167 2168 if (size < sizeof(struct uof_code_page) * image->ui_num_pages) 2169 return EINVAL; 2170 if (__arraycount(qui->qui_pages) < image->ui_num_pages) 2171 return EINVAL; 2172 2173 page = (struct uof_code_page *)(image + 1); 2174 2175 for (p = 0; p < image->ui_num_pages; p++, page++) { 2176 struct qat_uof_page *qup = &qui->qui_pages[p]; 2177 struct uof_code_area *uca; 2178 2179 qup->qup_page_num = page->ucp_page_num; 2180 qup->qup_def_page = page->ucp_def_page; 2181 qup->qup_page_region = page->ucp_page_region; 2182 qup->qup_beg_vaddr = page->ucp_beg_vaddr; 2183 qup->qup_beg_paddr = page->ucp_beg_paddr; 2184 2185 ASSIGN_OBJ_TAB(&qup->qup_num_uc_var, &qup->qup_uc_var, 2186 struct uof_uword_fixup *, base, 2187 page->ucp_uc_var_tab, lim); 2188 ASSIGN_OBJ_TAB(&qup->qup_num_imp_var, &qup->qup_imp_var, 2189 struct uof_import_var *, base, 2190 page->ucp_imp_var_tab, lim); 2191 ASSIGN_OBJ_TAB(&qup->qup_num_imp_expr, &qup->qup_imp_expr, 2192 struct uof_uword_fixup *, base, 2193 page->ucp_imp_expr_tab, lim); 2194 ASSIGN_OBJ_TAB(&qup->qup_num_neigh_reg, &qup->qup_neigh_reg, 2195 struct uof_uword_fixup *, base, 2196 page->ucp_neigh_reg_tab, lim); 2197 2198 if (lim < page->ucp_code_area + sizeof(struct uof_code_area)) 2199 return EINVAL; 2200 2201 uca = (struct uof_code_area *)(base + page->ucp_code_area); 2202 qup->qup_num_micro_words = uca->uca_num_micro_words; 2203 2204 ASSIGN_OBJ_TAB(&qup->qup_num_uw_blocks, &qup->qup_uw_blocks, 2205 struct qat_uof_uword_block *, base, 2206 uca->uca_uword_block_tab, lim); 2207 2208 for (i = 0; i < qup->qup_num_uw_blocks; i++) { 2209 u_int uwordoff = ((struct uof_uword_block *)( 2210 &qup->qup_uw_blocks[i]))->uub_uword_offset; 2211 2212 if (lim < uwordoff) 2213 return EINVAL; 2214 2215 qup->qup_uw_blocks[i].quub_micro_words = 2216 (base + uwordoff); 2217 } 2218 } 2219 2220 #undef ASSIGN_OBJ_TAB 2221 2222 return 0; 2223 } 2224 2225 int 2226 qat_aefw_uof_parse_images(struct qat_softc *sc) 2227 { 2228 struct uof_chunk_hdr *uch = NULL; 2229 u_int assigned_ae; 2230 int i, error; 2231 2232 for (i = 0; i < MAX_NUM_AE * MAX_AE_CTX; i++) { 2233 uch = qat_aefw_uof_find_chunk(sc, UOF_IMAG, uch); 2234 if (uch == NULL) 2235 break; 2236 2237 if (i >= __arraycount(sc->sc_aefw_uof.qafu_imgs)) 2238 return ENOENT; 2239 2240 error = qat_aefw_uof_parse_image(sc, &sc->sc_aefw_uof.qafu_imgs[i], uch); 2241 if (error) 2242 return error; 2243 2244 sc->sc_aefw_uof.qafu_num_imgs++; 2245 } 2246 2247 assigned_ae = 0; 2248 for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { 2249 assigned_ae |= sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_ae_assigned; 2250 } 2251 2252 return 0; 2253 } 2254 2255 int 2256 qat_aefw_uof_parse(struct qat_softc *sc) 2257 { 2258 struct uof_file_hdr *ufh; 2259 struct uof_file_chunk_hdr *ufch; 2260 struct uof_obj_hdr *uoh; 2261 struct uof_chunk_hdr *uch; 2262 void *uof = NULL; 2263 size_t size, uof_size, hdr_size; 2264 uintptr_t base; 2265 u_int csum; 2266 int i; 2267 2268 size = sc->sc_fw_uof_size; 2269 if (size < MIN_UOF_SIZE) 2270 return EINVAL; 2271 size -= sizeof(struct uof_file_hdr); 2272 2273 ufh = sc->sc_fw_uof; 2274 2275 if (ufh->ufh_id != UOF_FID) 2276 return EINVAL; 2277 if (ufh->ufh_min_ver != UOF_MIN_VER || ufh->ufh_maj_ver != UOF_MAJ_VER) 2278 return EINVAL; 2279 2280 if (ufh->ufh_max_chunks < ufh->ufh_num_chunks) 2281 return EINVAL; 2282 if (size < sizeof(struct uof_file_chunk_hdr) * ufh->ufh_num_chunks) 2283 return EINVAL; 2284 ufch = (struct uof_file_chunk_hdr *)(ufh + 1); 2285 2286 uof_size = 0; 2287 for (i = 0; i < ufh->ufh_num_chunks; i++, ufch++) { 2288 if (ufch->ufch_offset + ufch->ufch_size > sc->sc_fw_uof_size) 2289 return EINVAL; 2290 2291 if (!strncmp(ufch->ufch_id, UOF_OBJS, UOF_OBJ_ID_LEN)) { 2292 if (uof != NULL) 2293 return EINVAL; 2294 2295 uof = 2296 (void *)((uintptr_t)sc->sc_fw_uof + 2297 ufch->ufch_offset); 2298 uof_size = ufch->ufch_size; 2299 2300 csum = qat_aefw_csum(uof, uof_size); 2301 if (csum != ufch->ufch_csum) 2302 return EINVAL; 2303 2304 aprint_verbose_dev(sc->sc_dev, 2305 "uof at %p size 0x%lx\n", 2306 uof, uof_size); 2307 } 2308 } 2309 2310 if (uof == NULL) 2311 return ENOENT; 2312 2313 size = uof_size; 2314 if (size < sizeof(struct uof_obj_hdr)) 2315 return EINVAL; 2316 size -= sizeof(struct uof_obj_hdr); 2317 2318 uoh = uof; 2319 2320 aprint_verbose_dev(sc->sc_dev, 2321 "uof cpu_type 0x%08x min_cpu_ver 0x%04x max_cpu_ver 0x%04x\n", 2322 uoh->uoh_cpu_type, uoh->uoh_min_cpu_ver, uoh->uoh_max_cpu_ver); 2323 2324 if (size < sizeof(struct uof_chunk_hdr) * uoh->uoh_num_chunks) 2325 return EINVAL; 2326 2327 /* Check if the UOF objects are compatible with the chip */ 2328 if ((uoh->uoh_cpu_type & sc->sc_hw.qhw_prod_type) == 0) 2329 return ENOTSUP; 2330 2331 if (uoh->uoh_min_cpu_ver > sc->sc_rev || 2332 uoh->uoh_max_cpu_ver < sc->sc_rev) 2333 return ENOTSUP; 2334 2335 sc->sc_aefw_uof.qafu_size = uof_size; 2336 sc->sc_aefw_uof.qafu_obj_hdr = uoh; 2337 2338 base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr; 2339 2340 /* map uof string-table */ 2341 uch = qat_aefw_uof_find_chunk(sc, UOF_STRT, NULL); 2342 if (uch != NULL) { 2343 hdr_size = offsetof(struct uof_str_tab, ust_strings); 2344 sc->sc_aefw_uof.qafu_str_tab = 2345 (void *)(base + uch->uch_offset + hdr_size); 2346 sc->sc_aefw_uof.qafu_str_tab_size = uch->uch_size - hdr_size; 2347 } 2348 2349 /* get ustore mem inits table -- should be only one */ 2350 uch = qat_aefw_uof_find_chunk(sc, UOF_IMEM, NULL); 2351 if (uch != NULL) { 2352 if (uch->uch_size < sizeof(struct uof_obj_table)) 2353 return EINVAL; 2354 sc->sc_aefw_uof.qafu_num_init_mem = ((struct uof_obj_table *)(base + 2355 uch->uch_offset))->uot_nentries; 2356 if (sc->sc_aefw_uof.qafu_num_init_mem) { 2357 sc->sc_aefw_uof.qafu_init_mem = 2358 (struct uof_init_mem *)(base + uch->uch_offset + 2359 sizeof(struct uof_obj_table)); 2360 sc->sc_aefw_uof.qafu_init_mem_size = 2361 uch->uch_size - sizeof(struct uof_obj_table); 2362 } 2363 } 2364 2365 uch = qat_aefw_uof_find_chunk(sc, UOF_MSEG, NULL); 2366 if (uch != NULL) { 2367 if (uch->uch_size < sizeof(struct uof_obj_table) + 2368 sizeof(struct uof_var_mem_seg)) 2369 return EINVAL; 2370 sc->sc_aefw_uof.qafu_var_mem_seg = 2371 (struct uof_var_mem_seg *)(base + uch->uch_offset + 2372 sizeof(struct uof_obj_table)); 2373 } 2374 2375 return qat_aefw_uof_parse_images(sc); 2376 } 2377 2378 int 2379 qat_aefw_suof_parse_image(struct qat_softc *sc, struct qat_suof_image *qsi, 2380 struct suof_chunk_hdr *sch) 2381 { 2382 struct qat_aefw_suof *qafs = &sc->sc_aefw_suof; 2383 struct simg_ae_mode *ae_mode; 2384 u_int maj_ver; 2385 2386 qsi->qsi_simg_buf = qafs->qafs_suof_buf + sch->sch_offset + 2387 sizeof(struct suof_obj_hdr); 2388 qsi->qsi_simg_len = 2389 ((struct suof_obj_hdr *) 2390 (qafs->qafs_suof_buf + sch->sch_offset))->soh_img_length; 2391 2392 qsi->qsi_css_header = qsi->qsi_simg_buf; 2393 qsi->qsi_css_key = qsi->qsi_css_header + sizeof(struct css_hdr); 2394 qsi->qsi_css_signature = qsi->qsi_css_key + 2395 CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN; 2396 qsi->qsi_css_simg = qsi->qsi_css_signature + CSS_SIGNATURE_LEN; 2397 2398 ae_mode = (struct simg_ae_mode *)qsi->qsi_css_simg; 2399 qsi->qsi_ae_mask = ae_mode->sam_ae_mask; 2400 qsi->qsi_simg_name = (u_long)&ae_mode->sam_simg_name; 2401 qsi->qsi_appmeta_data = (u_long)&ae_mode->sam_appmeta_data; 2402 qsi->qsi_fw_type = ae_mode->sam_fw_type; 2403 2404 if (ae_mode->sam_dev_type != sc->sc_hw.qhw_prod_type) 2405 return EINVAL; 2406 2407 maj_ver = (QAT_PID_MAJOR_REV | (sc->sc_rev & QAT_PID_MINOR_REV)) & 0xff; 2408 if ((maj_ver > ae_mode->sam_devmax_ver) || 2409 (maj_ver < ae_mode->sam_devmin_ver)) { 2410 return EINVAL; 2411 } 2412 2413 return 0; 2414 } 2415 2416 int 2417 qat_aefw_suof_parse(struct qat_softc *sc) 2418 { 2419 struct suof_file_hdr *sfh; 2420 struct suof_chunk_hdr *sch; 2421 struct qat_aefw_suof *qafs = &sc->sc_aefw_suof; 2422 struct qat_suof_image *qsi; 2423 size_t size; 2424 u_int csum; 2425 int ae0_img = MAX_AE; 2426 int i, error; 2427 2428 size = sc->sc_fw_suof_size; 2429 if (size < sizeof(struct suof_file_hdr)) 2430 return EINVAL; 2431 2432 sfh = sc->sc_fw_suof; 2433 2434 if (sfh->sfh_file_id != SUOF_FID) 2435 return EINVAL; 2436 if (sfh->sfh_fw_type != 0) 2437 return EINVAL; 2438 if (sfh->sfh_num_chunks <= 1) 2439 return EINVAL; 2440 if (sfh->sfh_min_ver != SUOF_MIN_VER || 2441 sfh->sfh_maj_ver != SUOF_MAJ_VER) 2442 return EINVAL; 2443 2444 csum = qat_aefw_csum((char *)&sfh->sfh_min_ver, 2445 size - offsetof(struct suof_file_hdr, sfh_min_ver)); 2446 if (csum != sfh->sfh_check_sum) 2447 return EINVAL; 2448 2449 size -= sizeof(struct suof_file_hdr); 2450 2451 qafs->qafs_file_id = SUOF_FID; 2452 qafs->qafs_suof_buf = sc->sc_fw_suof; 2453 qafs->qafs_suof_size = sc->sc_fw_suof_size; 2454 qafs->qafs_check_sum = sfh->sfh_check_sum; 2455 qafs->qafs_min_ver = sfh->sfh_min_ver; 2456 qafs->qafs_maj_ver = sfh->sfh_maj_ver; 2457 qafs->qafs_fw_type = sfh->sfh_fw_type; 2458 2459 if (size < sizeof(struct suof_chunk_hdr)) 2460 return EINVAL; 2461 sch = (struct suof_chunk_hdr *)(sfh + 1); 2462 size -= sizeof(struct suof_chunk_hdr); 2463 2464 if (size < sizeof(struct suof_str_tab)) 2465 return EINVAL; 2466 size -= offsetof(struct suof_str_tab, sst_strings); 2467 2468 qafs->qafs_sym_size = ((struct suof_str_tab *) 2469 (qafs->qafs_suof_buf + sch->sch_offset))->sst_tab_length; 2470 if (size < qafs->qafs_sym_size) 2471 return EINVAL; 2472 qafs->qafs_sym_str = qafs->qafs_suof_buf + sch->sch_offset + 2473 offsetof(struct suof_str_tab, sst_strings); 2474 2475 qafs->qafs_num_simgs = sfh->sfh_num_chunks - 1; 2476 if (qafs->qafs_num_simgs == 0) 2477 return EINVAL; 2478 2479 qsi = qat_alloc_mem( 2480 sizeof(struct qat_suof_image) * qafs->qafs_num_simgs); 2481 qafs->qafs_simg = qsi; 2482 2483 for (i = 0; i < qafs->qafs_num_simgs; i++) { 2484 error = qat_aefw_suof_parse_image(sc, &qsi[i], &sch[i + 1]); 2485 if (error) 2486 return error; 2487 if ((qsi[i].qsi_ae_mask & 0x1) != 0) 2488 ae0_img = i; 2489 } 2490 2491 if (ae0_img != qafs->qafs_num_simgs - 1) { 2492 struct qat_suof_image last_qsi; 2493 2494 memcpy(&last_qsi, &qsi[qafs->qafs_num_simgs - 1], 2495 sizeof(struct qat_suof_image)); 2496 memcpy(&qsi[qafs->qafs_num_simgs - 1], &qsi[ae0_img], 2497 sizeof(struct qat_suof_image)); 2498 memcpy(&qsi[ae0_img], &last_qsi, 2499 sizeof(struct qat_suof_image)); 2500 } 2501 2502 return 0; 2503 } 2504 2505 int 2506 qat_aefw_alloc_auth_dmamem(struct qat_softc *sc, char *image, size_t size, 2507 struct qat_dmamem *dma) 2508 { 2509 struct css_hdr *css = (struct css_hdr *)image; 2510 struct auth_chunk *auth_chunk; 2511 struct fw_auth_desc *auth_desc; 2512 size_t mapsize, simg_offset = sizeof(struct auth_chunk); 2513 bus_size_t bus_addr; 2514 uintptr_t virt_addr; 2515 int error; 2516 2517 if (size > AE_IMG_OFFSET + CSS_MAX_IMAGE_LEN) 2518 return EINVAL; 2519 2520 mapsize = (css->css_fw_type == CSS_AE_FIRMWARE) ? 2521 CSS_AE_SIMG_LEN + simg_offset : 2522 size + CSS_FWSK_PAD_LEN + simg_offset; 2523 error = qat_alloc_dmamem(sc, dma, mapsize, PAGE_SIZE); 2524 if (error) 2525 return error; 2526 2527 memset(dma->qdm_dma_vaddr, 0, mapsize); 2528 2529 auth_chunk = dma->qdm_dma_vaddr; 2530 auth_chunk->ac_chunk_size = mapsize; 2531 auth_chunk->ac_chunk_bus_addr = dma->qdm_dma_map->dm_segs[0].ds_addr; 2532 2533 virt_addr = (uintptr_t)dma->qdm_dma_vaddr; 2534 virt_addr += simg_offset; 2535 bus_addr = auth_chunk->ac_chunk_bus_addr; 2536 bus_addr += simg_offset; 2537 2538 auth_desc = &auth_chunk->ac_fw_auth_desc; 2539 auth_desc->fad_css_hdr_high = bus_addr >> 32; 2540 auth_desc->fad_css_hdr_low = bus_addr; 2541 2542 memcpy((void *)virt_addr, image, sizeof(struct css_hdr)); 2543 /* pub key */ 2544 virt_addr += sizeof(struct css_hdr); 2545 bus_addr += sizeof(struct css_hdr); 2546 image += sizeof(struct css_hdr); 2547 2548 auth_desc->fad_fwsk_pub_high = bus_addr >> 32; 2549 auth_desc->fad_fwsk_pub_low = bus_addr; 2550 2551 memcpy((void *)virt_addr, image, CSS_FWSK_MODULUS_LEN); 2552 memset((void *)(virt_addr + CSS_FWSK_MODULUS_LEN), 0, CSS_FWSK_PAD_LEN); 2553 memcpy((void *)(virt_addr + CSS_FWSK_MODULUS_LEN + CSS_FWSK_PAD_LEN), 2554 image + CSS_FWSK_MODULUS_LEN, sizeof(uint32_t)); 2555 2556 virt_addr += CSS_FWSK_PUB_LEN; 2557 bus_addr += CSS_FWSK_PUB_LEN; 2558 image += CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN; 2559 2560 auth_desc->fad_signature_high = bus_addr >> 32; 2561 auth_desc->fad_signature_low = bus_addr; 2562 2563 memcpy((void *)virt_addr, image, CSS_SIGNATURE_LEN); 2564 #ifdef QAT_DUMP 2565 qat_dump_raw(QAT_DUMP_AEFW, "aefw signature", image, CSS_SIGNATURE_LEN); 2566 #endif 2567 2568 virt_addr += CSS_SIGNATURE_LEN; 2569 bus_addr += CSS_SIGNATURE_LEN; 2570 image += CSS_SIGNATURE_LEN; 2571 2572 auth_desc->fad_img_high = bus_addr >> 32; 2573 auth_desc->fad_img_low = bus_addr; 2574 auth_desc->fad_img_len = size - AE_IMG_OFFSET; 2575 2576 memcpy((void *)virt_addr, image, auth_desc->fad_img_len); 2577 2578 if (css->css_fw_type == CSS_AE_FIRMWARE) { 2579 auth_desc->fad_img_ae_mode_data_high = auth_desc->fad_img_high; 2580 auth_desc->fad_img_ae_mode_data_low = auth_desc->fad_img_low; 2581 2582 bus_addr += sizeof(struct simg_ae_mode); 2583 2584 auth_desc->fad_img_ae_init_data_high = bus_addr >> 32; 2585 auth_desc->fad_img_ae_init_data_low = bus_addr; 2586 2587 bus_addr += SIMG_AE_INIT_SEQ_LEN; 2588 2589 auth_desc->fad_img_ae_insts_high = bus_addr >> 32; 2590 auth_desc->fad_img_ae_insts_low = bus_addr; 2591 } else { 2592 auth_desc->fad_img_ae_insts_high = auth_desc->fad_img_high; 2593 auth_desc->fad_img_ae_insts_low = auth_desc->fad_img_low; 2594 } 2595 2596 bus_dmamap_sync(sc->sc_dmat, dma->qdm_dma_map, 0, 2597 dma->qdm_dma_map->dm_mapsize, 2598 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2599 2600 return 0; 2601 } 2602 2603 int 2604 qat_aefw_auth(struct qat_softc *sc, struct qat_dmamem *dma) 2605 { 2606 bus_addr_t addr = dma->qdm_dma_map->dm_segs[0].ds_addr; 2607 uint32_t fcu, sts; 2608 int retry = 0; 2609 2610 qat_cap_global_write_4(sc, FCU_DRAM_ADDR_HI, addr >> 32); 2611 qat_cap_global_write_4(sc, FCU_DRAM_ADDR_LO, addr); 2612 qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_AUTH); 2613 2614 do { 2615 delay(FW_AUTH_WAIT_PERIOD * 1000); 2616 fcu = qat_cap_global_read_4(sc, FCU_STATUS); 2617 sts = __SHIFTOUT(fcu, FCU_STATUS_STS); 2618 if (sts == FCU_STATUS_STS_VERI_FAIL) 2619 goto fail; 2620 if (fcu & FCU_STATUS_AUTHFWLD && 2621 sts == FCU_STATUS_STS_VERI_DONE) { 2622 return 0; 2623 } 2624 } while (retry++ < FW_AUTH_MAX_RETRY); 2625 2626 fail: 2627 aprint_error_dev(sc->sc_dev, 2628 "firmware authentication error: status 0x%08x retry %d\n", 2629 fcu, retry); 2630 return EINVAL; 2631 } 2632 2633 int 2634 qat_aefw_suof_load(struct qat_softc *sc, struct qat_dmamem *dma) 2635 { 2636 struct simg_ae_mode *ae_mode; 2637 uint32_t fcu, sts, loaded; 2638 u_int mask; 2639 u_char ae; 2640 int retry = 0; 2641 2642 ae_mode = (struct simg_ae_mode *)((uintptr_t)dma->qdm_dma_vaddr + 2643 sizeof(struct auth_chunk) + sizeof(struct css_hdr) + 2644 CSS_FWSK_PUB_LEN + CSS_SIGNATURE_LEN); 2645 2646 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { 2647 if (!(mask & 1)) 2648 continue; 2649 if (!((ae_mode->sam_ae_mask >> ae) & 0x1)) 2650 continue; 2651 if (qat_ae_is_active(sc, ae)) { 2652 aprint_error_dev(sc->sc_dev, "AE %d is active\n", ae); 2653 return EINVAL; 2654 } 2655 qat_cap_global_write_4(sc, FCU_CTRL, 2656 FCU_CTRL_CMD_LOAD | __SHIFTIN(ae, FCU_CTRL_AE)); 2657 do { 2658 delay(FW_AUTH_WAIT_PERIOD * 1000); 2659 fcu = qat_cap_global_read_4(sc, FCU_STATUS); 2660 sts = __SHIFTOUT(fcu, FCU_STATUS_STS); 2661 loaded = __SHIFTOUT(fcu, FCU_STATUS_LOADED_AE); 2662 if (sts == FCU_STATUS_STS_LOAD_DONE && 2663 (loaded & (1 << ae))) { 2664 break; 2665 } 2666 } while (retry++ < FW_AUTH_MAX_RETRY); 2667 2668 if (retry > FW_AUTH_MAX_RETRY) { 2669 aprint_error_dev(sc->sc_dev, 2670 "firmware load timeout: status %08x\n", fcu); 2671 return EINVAL; 2672 } 2673 } 2674 2675 return 0; 2676 } 2677 2678 int 2679 qat_aefw_suof_write(struct qat_softc *sc) 2680 { 2681 struct qat_suof_image *qsi = NULL; 2682 int i, error = 0; 2683 2684 for (i = 0; i < sc->sc_aefw_suof.qafs_num_simgs; i++) { 2685 qsi = &sc->sc_aefw_suof.qafs_simg[i]; 2686 error = qat_aefw_alloc_auth_dmamem(sc, qsi->qsi_simg_buf, 2687 qsi->qsi_simg_len, &qsi->qsi_dma); 2688 if (error) 2689 return error; 2690 error = qat_aefw_auth(sc, &qsi->qsi_dma); 2691 if (error) 2692 goto fail; 2693 error = qat_aefw_suof_load(sc, &qsi->qsi_dma); 2694 if (error) 2695 goto fail; 2696 2697 qat_free_dmamem(sc, &qsi->qsi_dma); 2698 } 2699 2700 return 0; 2701 fail: 2702 if (qsi != NULL) 2703 qat_free_dmamem(sc, &qsi->qsi_dma); 2704 return error; 2705 } 2706 2707 int 2708 qat_aefw_uof_assign_image(struct qat_softc *sc, struct qat_ae *qae, 2709 struct qat_uof_image *qui) 2710 { 2711 struct qat_ae_slice *slice; 2712 int i, npages, nregions; 2713 2714 if (qae->qae_num_slices >= __arraycount(qae->qae_slices)) 2715 return ENOENT; 2716 2717 if (qui->qui_image->ui_ae_mode & 2718 (AE_MODE_RELOAD_CTX_SHARED | AE_MODE_SHARED_USTORE)) { 2719 /* XXX */ 2720 aprint_error_dev(sc->sc_dev, 2721 "shared ae mode is not supported yet\n"); 2722 return ENOTSUP; 2723 } 2724 2725 qae->qae_shareable_ustore = 0; /* XXX */ 2726 qae->qae_effect_ustore_size = USTORE_SIZE; 2727 2728 slice = &qae->qae_slices[qae->qae_num_slices]; 2729 2730 slice->qas_image = qui; 2731 slice->qas_assigned_ctx_mask = qui->qui_image->ui_ctx_assigned; 2732 2733 nregions = qui->qui_image->ui_num_page_regions; 2734 npages = qui->qui_image->ui_num_pages; 2735 2736 if (nregions > __arraycount(slice->qas_regions)) 2737 return ENOENT; 2738 if (npages > __arraycount(slice->qas_pages)) 2739 return ENOENT; 2740 2741 for (i = 0; i < nregions; i++) { 2742 SIMPLEQ_INIT(&slice->qas_regions[i].qar_waiting_pages); 2743 } 2744 for (i = 0; i < npages; i++) { 2745 struct qat_ae_page *page = &slice->qas_pages[i]; 2746 int region; 2747 2748 page->qap_page = &qui->qui_pages[i]; 2749 region = page->qap_page->qup_page_region; 2750 if (region >= nregions) 2751 return EINVAL; 2752 2753 page->qap_region = &slice->qas_regions[region]; 2754 aprint_verbose_dev(sc->sc_dev, 2755 "ae %p slice %d page %d assign region %d\n", 2756 qae, qae->qae_num_slices, i, region); 2757 } 2758 2759 qae->qae_num_slices++; 2760 2761 return 0; 2762 } 2763 2764 int 2765 qat_aefw_uof_init_ae(struct qat_softc *sc, u_char ae) 2766 { 2767 struct uof_image *image; 2768 struct qat_ae *qae = &(QAT_AE(sc, ae)); 2769 int s; 2770 u_char nn_mode; 2771 2772 for (s = 0; s < qae->qae_num_slices; s++) { 2773 if (qae->qae_slices[s].qas_image == NULL) 2774 continue; 2775 2776 image = qae->qae_slices[s].qas_image->qui_image; 2777 qat_ae_write_ctx_mode(sc, ae, 2778 __SHIFTOUT(image->ui_ae_mode, AE_MODE_CTX_MODE)); 2779 2780 nn_mode = __SHIFTOUT(image->ui_ae_mode, AE_MODE_NN_MODE); 2781 if (nn_mode != AE_MODE_NN_MODE_DONTCARE) 2782 qat_ae_write_nn_mode(sc, ae, nn_mode); 2783 2784 qat_ae_write_lm_mode(sc, ae, AEREG_LMEM0, 2785 __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM0)); 2786 qat_ae_write_lm_mode(sc, ae, AEREG_LMEM1, 2787 __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM1)); 2788 2789 qat_ae_write_shared_cs_mode(sc, ae, 2790 __SHIFTOUT(image->ui_ae_mode, AE_MODE_SHARED_USTORE)); 2791 qat_ae_set_reload_ustore(sc, ae, image->ui_reloadable_size, 2792 __SHIFTOUT(image->ui_ae_mode, AE_MODE_RELOAD_CTX_SHARED), 2793 qae->qae_reloc_ustore_dram); 2794 } 2795 2796 return 0; 2797 } 2798 2799 int 2800 qat_aefw_uof_init(struct qat_softc *sc) 2801 { 2802 int ae, i, error; 2803 uint32_t mask; 2804 2805 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { 2806 struct qat_ae *qae; 2807 2808 if (!(mask & 1)) 2809 continue; 2810 2811 qae = &(QAT_AE(sc, ae)); 2812 2813 for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { 2814 if ((sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_ae_assigned & 2815 (1 << ae)) == 0) 2816 continue; 2817 2818 error = qat_aefw_uof_assign_image(sc, qae, 2819 &sc->sc_aefw_uof.qafu_imgs[i]); 2820 if (error) 2821 return error; 2822 } 2823 2824 /* XXX UcLo_initNumUwordUsed */ 2825 2826 qae->qae_reloc_ustore_dram = UINT_MAX; /* XXX */ 2827 2828 error = qat_aefw_uof_init_ae(sc, ae); 2829 if (error) 2830 return error; 2831 } 2832 2833 return 0; 2834 } 2835 2836 int 2837 qat_aefw_load(struct qat_softc *sc) 2838 { 2839 int error; 2840 2841 error = qat_aefw_load_mof(sc); 2842 if (error) 2843 return error; 2844 2845 error = qat_aefw_load_mmp(sc); 2846 if (error) 2847 return error; 2848 2849 error = qat_aefw_mof_parse(sc); 2850 if (error) { 2851 aprint_error_dev(sc->sc_dev, "couldn't parse mof: %d\n", error); 2852 return error; 2853 } 2854 2855 if (sc->sc_hw.qhw_fw_auth) { 2856 error = qat_aefw_suof_parse(sc); 2857 if (error) { 2858 aprint_error_dev(sc->sc_dev, "couldn't parse suof: %d\n", 2859 error); 2860 return error; 2861 } 2862 2863 error = qat_aefw_suof_write(sc); 2864 if (error) { 2865 aprint_error_dev(sc->sc_dev, 2866 "could not write firmware: %d\n", error); 2867 return error; 2868 } 2869 2870 } else { 2871 error = qat_aefw_uof_parse(sc); 2872 if (error) { 2873 aprint_error_dev(sc->sc_dev, "couldn't parse uof: %d\n", 2874 error); 2875 return error; 2876 } 2877 2878 error = qat_aefw_uof_init(sc); 2879 if (error) { 2880 aprint_error_dev(sc->sc_dev, 2881 "couldn't init for aefw: %d\n", error); 2882 return error; 2883 } 2884 2885 error = qat_aefw_uof_write(sc); 2886 if (error) { 2887 aprint_error_dev(sc->sc_dev, 2888 "Could not write firmware: %d\n", error); 2889 return error; 2890 } 2891 } 2892 2893 return 0; 2894 } 2895 2896 int 2897 qat_aefw_start(struct qat_softc *sc, u_char ae, u_int ctx_mask) 2898 { 2899 uint32_t fcu; 2900 int retry = 0; 2901 2902 if (sc->sc_hw.qhw_fw_auth) { 2903 qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_START); 2904 do { 2905 delay(FW_AUTH_WAIT_PERIOD * 1000); 2906 fcu = qat_cap_global_read_4(sc, FCU_STATUS); 2907 if (fcu & FCU_STATUS_DONE) 2908 return 0; 2909 } while (retry++ < FW_AUTH_MAX_RETRY); 2910 2911 aprint_error_dev(sc->sc_dev, 2912 "firmware start timeout: status %08x\n", fcu); 2913 return EINVAL; 2914 } else { 2915 qat_ae_ctx_indr_write(sc, ae, (~ctx_mask) & AE_ALL_CTX, 2916 CTX_WAKEUP_EVENTS_INDIRECT, 2917 CTX_WAKEUP_EVENTS_INDIRECT_SLEEP); 2918 qat_ae_enable_ctx(sc, ae, ctx_mask); 2919 } 2920 2921 return 0; 2922 } 2923 2924 int 2925 qat_aefw_init_memory_one(struct qat_softc *sc, struct uof_init_mem *uim) 2926 { 2927 struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; 2928 struct qat_ae_batch_init_list *qabi_list; 2929 struct uof_mem_val_attr *memattr; 2930 size_t *curinit; 2931 u_long ael; 2932 int i; 2933 const char *sym; 2934 char *ep; 2935 2936 memattr = (struct uof_mem_val_attr *)(uim + 1); 2937 2938 switch (uim->uim_region) { 2939 case LMEM_REGION: 2940 if ((uim->uim_addr + uim->uim_num_bytes) > MAX_LMEM_REG * 4) { 2941 aprint_error_dev(sc->sc_dev, 2942 "Invalid lmem addr or bytes\n"); 2943 return ENOBUFS; 2944 } 2945 if (uim->uim_scope != UOF_SCOPE_LOCAL) 2946 return EINVAL; 2947 sym = qat_aefw_uof_string(sc, uim->uim_sym_name); 2948 ael = strtoul(sym, &ep, 10); 2949 if (ep == sym || ael > MAX_AE) 2950 return EINVAL; 2951 if ((sc->sc_ae_mask & (1 << ael)) == 0) 2952 return 0; /* ae is fused out */ 2953 2954 curinit = &qafu->qafu_num_lm_init[ael]; 2955 qabi_list = &qafu->qafu_lm_init[ael]; 2956 2957 for (i = 0; i < uim->uim_num_val_attr; i++, memattr++) { 2958 struct qat_ae_batch_init *qabi; 2959 2960 qabi = qat_alloc_mem(sizeof(struct qat_ae_batch_init)); 2961 if (*curinit == 0) 2962 SIMPLEQ_INIT(qabi_list); 2963 SIMPLEQ_INSERT_TAIL(qabi_list, qabi, qabi_next); 2964 2965 qabi->qabi_ae = (u_int)ael; 2966 qabi->qabi_addr = 2967 uim->uim_addr + memattr->umva_byte_offset; 2968 qabi->qabi_value = &memattr->umva_value; 2969 qabi->qabi_size = 4; 2970 qafu->qafu_num_lm_init_inst[ael] += 2971 qat_ae_get_inst_num(qabi->qabi_size); 2972 (*curinit)++; 2973 if (*curinit >= MAX_LMEM_REG) { 2974 aprint_error_dev(sc->sc_dev, 2975 "Invalid lmem val attr\n"); 2976 return ENOBUFS; 2977 } 2978 } 2979 break; 2980 case SRAM_REGION: 2981 case DRAM_REGION: 2982 case DRAM1_REGION: 2983 case SCRATCH_REGION: 2984 case UMEM_REGION: 2985 /* XXX */ 2986 /* fallthrough */ 2987 default: 2988 aprint_error_dev(sc->sc_dev, 2989 "unsupported memory region to init: %d\n", 2990 uim->uim_region); 2991 return ENOTSUP; 2992 } 2993 2994 return 0; 2995 } 2996 2997 void 2998 qat_aefw_free_lm_init(struct qat_softc *sc, u_char ae) 2999 { 3000 struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; 3001 struct qat_ae_batch_init *qabi; 3002 3003 while ((qabi = SIMPLEQ_FIRST(&qafu->qafu_lm_init[ae])) != NULL) { 3004 SIMPLEQ_REMOVE_HEAD(&qafu->qafu_lm_init[ae], qabi_next); 3005 qat_free_mem(qabi); 3006 } 3007 3008 qafu->qafu_num_lm_init[ae] = 0; 3009 qafu->qafu_num_lm_init_inst[ae] = 0; 3010 } 3011 3012 int 3013 qat_aefw_init_ustore(struct qat_softc *sc) 3014 { 3015 uint64_t *fill; 3016 uint32_t dont_init; 3017 int a, i, p; 3018 int error = 0; 3019 int usz, end, start; 3020 u_char ae, nae; 3021 3022 fill = qat_alloc_mem(MAX_USTORE * sizeof(uint64_t)); 3023 3024 for (a = 0; a < sc->sc_aefw_uof.qafu_num_imgs; a++) { 3025 struct qat_uof_image *qui = &sc->sc_aefw_uof.qafu_imgs[a]; 3026 struct uof_image *ui = qui->qui_image; 3027 3028 for (i = 0; i < MAX_USTORE; i++) 3029 memcpy(&fill[i], ui->ui_fill_pattern, sizeof(uint64_t)); 3030 /* 3031 * Compute do_not_init value as a value that will not be equal 3032 * to fill data when cast to an int 3033 */ 3034 dont_init = 0; 3035 if (dont_init == (uint32_t)fill[0]) 3036 dont_init = 0xffffffff; 3037 3038 for (p = 0; p < ui->ui_num_pages; p++) { 3039 struct qat_uof_page *qup = &qui->qui_pages[p]; 3040 if (!qup->qup_def_page) 3041 continue; 3042 3043 for (i = qup->qup_beg_paddr; 3044 i < qup->qup_beg_paddr + qup->qup_num_micro_words; 3045 i++ ) { 3046 fill[i] = (uint64_t)dont_init; 3047 } 3048 } 3049 3050 for (ae = 0; ae < sc->sc_ae_num; ae++) { 3051 KASSERT(ae < UOF_MAX_NUM_OF_AE); 3052 if ((ui->ui_ae_assigned & (1 << ae)) == 0) 3053 continue; 3054 3055 if (QAT_AE(sc, ae).qae_shareable_ustore && (ae & 1)) { 3056 qat_ae_get_shared_ustore_ae(ae, &nae); 3057 if (ui->ui_ae_assigned & (1 << ae)) 3058 continue; 3059 } 3060 usz = QAT_AE(sc, ae).qae_effect_ustore_size; 3061 3062 /* initialize the areas not going to be overwritten */ 3063 end = -1; 3064 do { 3065 /* find next uword that needs to be initialized */ 3066 for (start = end + 1; start < usz; start++) { 3067 if ((uint32_t)fill[start] != dont_init) 3068 break; 3069 } 3070 /* see if there are no more such uwords */ 3071 if (start >= usz) 3072 break; 3073 for (end = start + 1; end < usz; end++) { 3074 if ((uint32_t)fill[end] == dont_init) 3075 break; 3076 } 3077 if (QAT_AE(sc, ae).qae_shareable_ustore) { 3078 error = ENOTSUP; /* XXX */ 3079 goto out; 3080 } else { 3081 error = qat_ae_ucode_write(sc, ae, 3082 start, end - start, &fill[start]); 3083 if (error) { 3084 goto out; 3085 } 3086 } 3087 3088 } while (end < usz); 3089 } 3090 } 3091 3092 out: 3093 qat_free_mem(fill); 3094 return error; 3095 } 3096 3097 int 3098 qat_aefw_init_reg(struct qat_softc *sc, u_char ae, u_char ctx_mask, 3099 enum aereg_type regtype, u_short regaddr, u_int value) 3100 { 3101 int error = 0; 3102 u_char ctx; 3103 3104 switch (regtype) { 3105 case AEREG_GPA_REL: 3106 case AEREG_GPB_REL: 3107 case AEREG_SR_REL: 3108 case AEREG_SR_RD_REL: 3109 case AEREG_SR_WR_REL: 3110 case AEREG_DR_REL: 3111 case AEREG_DR_RD_REL: 3112 case AEREG_DR_WR_REL: 3113 case AEREG_NEIGH_REL: 3114 /* init for all valid ctx */ 3115 for (ctx = 0; ctx < MAX_AE_CTX; ctx++) { 3116 if ((ctx_mask & (1 << ctx)) == 0) 3117 continue; 3118 error = qat_aereg_rel_data_write(sc, ae, ctx, regtype, 3119 regaddr, value); 3120 } 3121 break; 3122 case AEREG_GPA_ABS: 3123 case AEREG_GPB_ABS: 3124 case AEREG_SR_ABS: 3125 case AEREG_SR_RD_ABS: 3126 case AEREG_SR_WR_ABS: 3127 case AEREG_DR_ABS: 3128 case AEREG_DR_RD_ABS: 3129 case AEREG_DR_WR_ABS: 3130 error = qat_aereg_abs_data_write(sc, ae, regtype, 3131 regaddr, value); 3132 break; 3133 default: 3134 error = EINVAL; 3135 break; 3136 } 3137 3138 return error; 3139 } 3140 3141 int 3142 qat_aefw_init_reg_sym_expr(struct qat_softc *sc, u_char ae, 3143 struct qat_uof_image *qui) 3144 { 3145 u_int i, expres; 3146 u_char ctx_mask; 3147 3148 for (i = 0; i < qui->qui_num_init_reg_sym; i++) { 3149 struct uof_init_reg_sym *uirs = &qui->qui_init_reg_sym[i]; 3150 3151 if (uirs->uirs_value_type == EXPR_VAL) { 3152 /* XXX */ 3153 aprint_error_dev(sc->sc_dev, 3154 "does not support initializing EXPR_VAL\n"); 3155 return ENOTSUP; 3156 } else { 3157 expres = uirs->uirs_value; 3158 } 3159 3160 switch (uirs->uirs_init_type) { 3161 case INIT_REG: 3162 if (__SHIFTOUT(qui->qui_image->ui_ae_mode, 3163 AE_MODE_CTX_MODE) == MAX_AE_CTX) { 3164 ctx_mask = 0xff; /* 8-ctx mode */ 3165 } else { 3166 ctx_mask = 0x55; /* 4-ctx mode */ 3167 } 3168 qat_aefw_init_reg(sc, ae, ctx_mask, 3169 (enum aereg_type)uirs->uirs_reg_type, 3170 (u_short)uirs->uirs_addr_offset, expres); 3171 break; 3172 case INIT_REG_CTX: 3173 if (__SHIFTOUT(qui->qui_image->ui_ae_mode, 3174 AE_MODE_CTX_MODE) == MAX_AE_CTX) { 3175 ctx_mask = 0xff; /* 8-ctx mode */ 3176 } else { 3177 ctx_mask = 0x55; /* 4-ctx mode */ 3178 } 3179 if (((1 << uirs->uirs_ctx) & ctx_mask) == 0) 3180 return EINVAL; 3181 qat_aefw_init_reg(sc, ae, 1 << uirs->uirs_ctx, 3182 (enum aereg_type)uirs->uirs_reg_type, 3183 (u_short)uirs->uirs_addr_offset, expres); 3184 break; 3185 case INIT_EXPR: 3186 case INIT_EXPR_ENDIAN_SWAP: 3187 default: 3188 aprint_error_dev(sc->sc_dev, 3189 "does not support initializing init_type %d\n", 3190 uirs->uirs_init_type); 3191 return ENOTSUP; 3192 } 3193 } 3194 3195 return 0; 3196 } 3197 3198 int 3199 qat_aefw_init_memory(struct qat_softc *sc) 3200 { 3201 struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; 3202 size_t uimsz, initmemsz = qafu->qafu_init_mem_size; 3203 struct uof_init_mem *uim; 3204 int error, i; 3205 u_char ae; 3206 3207 uim = qafu->qafu_init_mem; 3208 for (i = 0; i < qafu->qafu_num_init_mem; i++) { 3209 uimsz = sizeof(struct uof_init_mem) + 3210 sizeof(struct uof_mem_val_attr) * uim->uim_num_val_attr; 3211 if (uimsz > initmemsz) { 3212 aprint_error_dev(sc->sc_dev, 3213 "invalid uof_init_mem or uof_mem_val_attr size\n"); 3214 return EINVAL; 3215 } 3216 3217 if (uim->uim_num_bytes > 0) { 3218 error = qat_aefw_init_memory_one(sc, uim); 3219 if (error) { 3220 aprint_error_dev(sc->sc_dev, 3221 "Could not init ae memory: %d\n", error); 3222 return error; 3223 } 3224 } 3225 uim = (struct uof_init_mem *)((uintptr_t)uim + uimsz); 3226 initmemsz -= uimsz; 3227 } 3228 3229 /* run Batch put LM API */ 3230 for (ae = 0; ae < MAX_AE; ae++) { 3231 error = qat_ae_batch_put_lm(sc, ae, &qafu->qafu_lm_init[ae], 3232 qafu->qafu_num_lm_init_inst[ae]); 3233 if (error) 3234 aprint_error_dev(sc->sc_dev, "Could not put lm\n"); 3235 3236 qat_aefw_free_lm_init(sc, ae); 3237 } 3238 3239 error = qat_aefw_init_ustore(sc); 3240 3241 /* XXX run Batch put LM API */ 3242 3243 return error; 3244 } 3245 3246 int 3247 qat_aefw_init_globals(struct qat_softc *sc) 3248 { 3249 struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; 3250 int error, i, p, s; 3251 u_char ae; 3252 3253 /* initialize the memory segments */ 3254 if (qafu->qafu_num_init_mem > 0) { 3255 error = qat_aefw_init_memory(sc); 3256 if (error) 3257 return error; 3258 } else { 3259 error = qat_aefw_init_ustore(sc); 3260 if (error) 3261 return error; 3262 } 3263 3264 /* XXX bind import variables with ivd values */ 3265 3266 /* XXX bind the uC global variables 3267 * local variables will done on-the-fly */ 3268 for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { 3269 for (p = 0; p < sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_num_pages; p++) { 3270 struct qat_uof_page *qup = 3271 &sc->sc_aefw_uof.qafu_imgs[i].qui_pages[p]; 3272 if (qup->qup_num_uw_blocks && 3273 (qup->qup_num_uc_var || qup->qup_num_imp_var)) { 3274 aprint_error_dev(sc->sc_dev, 3275 "not support uC global variables\n"); 3276 return ENOTSUP; 3277 } 3278 } 3279 } 3280 3281 for (ae = 0; ae < sc->sc_ae_num; ae++) { 3282 struct qat_ae *qae = &(QAT_AE(sc, ae)); 3283 3284 for (s = 0; s < qae->qae_num_slices; s++) { 3285 struct qat_ae_slice *qas = &qae->qae_slices[s]; 3286 3287 if (qas->qas_image == NULL) 3288 continue; 3289 3290 error = 3291 qat_aefw_init_reg_sym_expr(sc, ae, qas->qas_image); 3292 if (error) 3293 return error; 3294 } 3295 } 3296 3297 return 0; 3298 } 3299 3300 uint64_t 3301 qat_aefw_get_uof_inst(struct qat_softc *sc, struct qat_uof_page *qup, 3302 u_int addr) 3303 { 3304 uint64_t uinst = 0; 3305 u_int i; 3306 3307 /* find the block */ 3308 for (i = 0; i < qup->qup_num_uw_blocks; i++) { 3309 struct qat_uof_uword_block *quub = &qup->qup_uw_blocks[i]; 3310 3311 if ((addr >= quub->quub_start_addr) && 3312 (addr <= (quub->quub_start_addr + 3313 (quub->quub_num_words - 1)))) { 3314 /* unpack n bytes and assigned to the 64-bit uword value. 3315 note: the microwords are stored as packed bytes. 3316 */ 3317 addr -= quub->quub_start_addr; 3318 addr *= AEV2_PACKED_UWORD_BYTES; 3319 memcpy(&uinst, 3320 (void *)((uintptr_t)quub->quub_micro_words + addr), 3321 AEV2_PACKED_UWORD_BYTES); 3322 uinst = uinst & UWORD_MASK; 3323 3324 return uinst; 3325 } 3326 } 3327 3328 return INVLD_UWORD; 3329 } 3330 3331 int 3332 qat_aefw_do_pagein(struct qat_softc *sc, u_char ae, struct qat_uof_page *qup) 3333 { 3334 struct qat_ae *qae = &(QAT_AE(sc, ae)); 3335 uint64_t fill, *ucode_cpybuf; 3336 u_int error, i, upaddr, uraddr, ninst, cpylen; 3337 3338 if (qup->qup_num_uc_var || qup->qup_num_neigh_reg || 3339 qup->qup_num_imp_var || qup->qup_num_imp_expr) { 3340 aprint_error_dev(sc->sc_dev, 3341 "does not support fixup locals\n"); 3342 return ENOTSUP; 3343 } 3344 3345 ucode_cpybuf = qat_alloc_mem(UWORD_CPYBUF_SIZE * sizeof(uint64_t)); 3346 3347 /* XXX get fill-pattern from an image -- they are all the same */ 3348 memcpy(&fill, sc->sc_aefw_uof.qafu_imgs[0].qui_image->ui_fill_pattern, 3349 sizeof(uint64_t)); 3350 3351 upaddr = qup->qup_beg_paddr; 3352 uraddr = 0; 3353 ninst = qup->qup_num_micro_words; 3354 while (ninst > 0) { 3355 cpylen = uimin(ninst, UWORD_CPYBUF_SIZE); 3356 3357 /* load the buffer */ 3358 for (i = 0; i < cpylen; i++) { 3359 /* keep below code structure in case there are 3360 * different handling for shared secnarios */ 3361 if (!qae->qae_shareable_ustore) { 3362 /* qat_aefw_get_uof_inst() takes an address that 3363 * is relative to the start of the page. 3364 * So we don't need to add in the physical 3365 * offset of the page. */ 3366 if (qup->qup_page_region != 0) { 3367 /* XXX */ 3368 aprint_error_dev(sc->sc_dev, 3369 "region != 0 is not supported\n"); 3370 qat_free_mem(ucode_cpybuf); 3371 return ENOTSUP; 3372 } else { 3373 /* for mixing case, it should take 3374 * physical address */ 3375 ucode_cpybuf[i] = qat_aefw_get_uof_inst( 3376 sc, qup, upaddr + i); 3377 if (ucode_cpybuf[i] == INVLD_UWORD) { 3378 /* fill hole in the uof */ 3379 ucode_cpybuf[i] = fill; 3380 } 3381 } 3382 } else { 3383 /* XXX */ 3384 qat_free_mem(ucode_cpybuf); 3385 return ENOTSUP; 3386 } 3387 } 3388 3389 /* copy the buffer to ustore */ 3390 if (!qae->qae_shareable_ustore) { 3391 error = qat_ae_ucode_write(sc, ae, upaddr, cpylen, 3392 ucode_cpybuf); 3393 if (error) 3394 return error; 3395 } else { 3396 /* XXX */ 3397 qat_free_mem(ucode_cpybuf); 3398 return ENOTSUP; 3399 } 3400 upaddr += cpylen; 3401 uraddr += cpylen; 3402 ninst -= cpylen; 3403 } 3404 3405 qat_free_mem(ucode_cpybuf); 3406 3407 return 0; 3408 } 3409 3410 int 3411 qat_aefw_uof_write_one(struct qat_softc *sc, struct qat_uof_image *qui) 3412 { 3413 struct uof_image *ui = qui->qui_image; 3414 struct qat_ae_page *qap; 3415 u_int s, p, c; 3416 int error; 3417 u_char ae, ctx_mask; 3418 3419 aprint_verbose_dev(sc->sc_dev, 3420 "aefw writing uof %s\n", 3421 qat_aefw_uof_string(sc, qui->qui_image->ui_name)); 3422 3423 error = qat_aefw_init_globals(sc); 3424 if (error) { 3425 aprint_error_dev(sc->sc_dev, 3426 "Could not initialize globals\n"); 3427 return error; 3428 } 3429 3430 if (__SHIFTOUT(ui->ui_ae_mode, AE_MODE_CTX_MODE) == MAX_AE_CTX) 3431 ctx_mask = 0xff; /* 8-ctx mode */ 3432 else 3433 ctx_mask = 0x55; /* 4-ctx mode */ 3434 3435 /* load the default page and set assigned CTX PC 3436 * to the entrypoint address */ 3437 for (ae = 0; ae < sc->sc_ae_num; ae++) { 3438 struct qat_ae *qae = &(QAT_AE(sc, ae)); 3439 struct qat_ae_slice *qas; 3440 u_int metadata; 3441 3442 KASSERT(ae < UOF_MAX_NUM_OF_AE); 3443 3444 if ((ui->ui_ae_assigned & (1 << ae)) == 0) 3445 continue; 3446 3447 /* find the slice to which this image is assigned */ 3448 for (s = 0; s < qae->qae_num_slices; s++) { 3449 qas = &qae->qae_slices[s]; 3450 if (ui->ui_ctx_assigned & qas->qas_assigned_ctx_mask) 3451 break; 3452 } 3453 if (s >= qae->qae_num_slices) 3454 continue; 3455 3456 qas = &qae->qae_slices[s]; 3457 3458 for (p = 0; p < ui->ui_num_pages; p++) { 3459 qap = &qas->qas_pages[p]; 3460 3461 /* Only load pages loaded by default */ 3462 if (!qap->qap_page->qup_def_page) 3463 continue; 3464 3465 error = qat_aefw_do_pagein(sc, ae, qap->qap_page); 3466 if (error) 3467 return error; 3468 } 3469 3470 metadata = qas->qas_image->qui_image->ui_app_metadata; 3471 if (metadata != 0xffffffff) { 3472 aprint_normal_dev(sc->sc_dev, 3473 "loaded firmware: %s\n", 3474 qat_aefw_uof_string(sc, metadata)); 3475 } 3476 3477 /* Assume starting page is page 0 */ 3478 qap = &qas->qas_pages[0]; 3479 for (c = 0; c < MAX_AE_CTX; c++) { 3480 if (ctx_mask & (1 << c)) 3481 qas->qas_cur_pages[c] = qap; 3482 else 3483 qas->qas_cur_pages[c] = NULL; 3484 } 3485 3486 /* set the live context */ 3487 qae->qae_live_ctx_mask = ui->ui_ctx_assigned; 3488 3489 /* set context PC to the image entrypoint address */ 3490 error = qat_ae_write_pc(sc, ae, ui->ui_ctx_assigned, 3491 ui->ui_entry_address); 3492 if (error) 3493 return error; 3494 } 3495 3496 /* XXX store the checksum for convenience */ 3497 3498 return 0; 3499 } 3500 3501 int 3502 qat_aefw_uof_write(struct qat_softc *sc) 3503 { 3504 int error = 0; 3505 int i; 3506 3507 for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { 3508 error = qat_aefw_uof_write_one(sc, 3509 &sc->sc_aefw_uof.qafu_imgs[i]); 3510 if (error) 3511 break; 3512 } 3513 3514 /* XXX UcLo_computeFreeUstore */ 3515 3516 return error; 3517 } 3518