Home | History | Annotate | Line # | Download | only in qat
      1  1.2  msaitoh /*	$NetBSD: qat_ae.c,v 1.2 2021/12/05 07:28:20 msaitoh Exp $	*/
      2  1.1   hikaru 
      3  1.1   hikaru /*
      4  1.1   hikaru  * Copyright (c) 2019 Internet Initiative Japan, Inc.
      5  1.1   hikaru  * All rights reserved.
      6  1.1   hikaru  *
      7  1.1   hikaru  * Redistribution and use in source and binary forms, with or without
      8  1.1   hikaru  * modification, are permitted provided that the following conditions
      9  1.1   hikaru  * are met:
     10  1.1   hikaru  * 1. Redistributions of source code must retain the above copyright
     11  1.1   hikaru  *    notice, this list of conditions and the following disclaimer.
     12  1.1   hikaru  * 2. Redistributions in binary form must reproduce the above copyright
     13  1.1   hikaru  *    notice, this list of conditions and the following disclaimer in the
     14  1.1   hikaru  *    documentation and/or other materials provided with the distribution.
     15  1.1   hikaru  *
     16  1.1   hikaru  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  1.1   hikaru  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  1.1   hikaru  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  1.1   hikaru  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  1.1   hikaru  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  1.1   hikaru  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  1.1   hikaru  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  1.1   hikaru  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  1.1   hikaru  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  1.1   hikaru  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  1.1   hikaru  * POSSIBILITY OF SUCH DAMAGE.
     27  1.1   hikaru  */
     28  1.1   hikaru 
     29  1.1   hikaru /*
     30  1.1   hikaru  *   Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
     31  1.1   hikaru  *
     32  1.1   hikaru  *   Redistribution and use in source and binary forms, with or without
     33  1.1   hikaru  *   modification, are permitted provided that the following conditions
     34  1.1   hikaru  *   are met:
     35  1.1   hikaru  *
     36  1.1   hikaru  *     * Redistributions of source code must retain the above copyright
     37  1.1   hikaru  *       notice, this list of conditions and the following disclaimer.
     38  1.1   hikaru  *     * Redistributions in binary form must reproduce the above copyright
     39  1.1   hikaru  *       notice, this list of conditions and the following disclaimer in
     40  1.1   hikaru  *       the documentation and/or other materials provided with the
     41  1.1   hikaru  *       distribution.
     42  1.1   hikaru  *     * Neither the name of Intel Corporation nor the names of its
     43  1.1   hikaru  *       contributors may be used to endorse or promote products derived
     44  1.1   hikaru  *       from this software without specific prior written permission.
     45  1.1   hikaru  *
     46  1.1   hikaru  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     47  1.1   hikaru  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     48  1.1   hikaru  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     49  1.1   hikaru  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     50  1.1   hikaru  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     51  1.1   hikaru  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     52  1.1   hikaru  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     53  1.1   hikaru  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     54  1.1   hikaru  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     55  1.1   hikaru  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     56  1.1   hikaru  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     57  1.1   hikaru  */
     58  1.1   hikaru 
     59  1.1   hikaru #include <sys/cdefs.h>
     60  1.2  msaitoh __KERNEL_RCSID(0, "$NetBSD: qat_ae.c,v 1.2 2021/12/05 07:28:20 msaitoh Exp $");
     61  1.1   hikaru 
     62  1.1   hikaru #include <sys/param.h>
     63  1.1   hikaru #include <sys/systm.h>
     64  1.1   hikaru 
     65  1.1   hikaru #include <dev/firmload.h>
     66  1.1   hikaru 
     67  1.1   hikaru #include <dev/pci/pcireg.h>
     68  1.1   hikaru #include <dev/pci/pcivar.h>
     69  1.1   hikaru #include <dev/pci/pcidevs.h>
     70  1.1   hikaru 
     71  1.1   hikaru #include "qatreg.h"
     72  1.1   hikaru #include "qatvar.h"
     73  1.1   hikaru #include "qat_aevar.h"
     74  1.1   hikaru 
     75  1.1   hikaru int		qat_ae_write_4(struct qat_softc *, u_char, bus_size_t,
     76  1.1   hikaru 		    uint32_t);
     77  1.1   hikaru int		qat_ae_read_4(struct qat_softc *, u_char, bus_size_t,
     78  1.1   hikaru 		    uint32_t *);
     79  1.1   hikaru int		qat_ae_write_4(struct qat_softc *, u_char, bus_size_t,
     80  1.1   hikaru 		    uint32_t);
     81  1.1   hikaru void		qat_ae_ctx_indr_write(struct qat_softc *, u_char, uint32_t,
     82  1.1   hikaru 		    bus_size_t, uint32_t);
     83  1.1   hikaru int		qat_ae_ctx_indr_read(struct qat_softc *, u_char, uint32_t,
     84  1.1   hikaru 		    bus_size_t, uint32_t *);
     85  1.1   hikaru 
     86  1.1   hikaru u_short		qat_aereg_get_10bit_addr(enum aereg_type, u_short);
     87  1.1   hikaru int		qat_aereg_rel_data_write(struct qat_softc *, u_char, u_char,
     88  1.1   hikaru 		    enum aereg_type, u_short, uint32_t);
     89  1.1   hikaru int		qat_aereg_rel_data_read(struct qat_softc *, u_char, u_char,
     90  1.1   hikaru 		    enum aereg_type, u_short, uint32_t *);
     91  1.1   hikaru int		qat_aereg_rel_rdxfer_write(struct qat_softc *, u_char, u_char,
     92  1.1   hikaru 		    enum aereg_type, u_short, uint32_t);
     93  1.1   hikaru int		qat_aereg_rel_wrxfer_write(struct qat_softc *, u_char, u_char,
     94  1.1   hikaru 		    enum aereg_type, u_short, uint32_t);
     95  1.1   hikaru int		qat_aereg_rel_nn_write(struct qat_softc *, u_char, u_char,
     96  1.1   hikaru 		    enum aereg_type, u_short, uint32_t);
     97  1.1   hikaru int		qat_aereg_abs_to_rel(struct qat_softc *, u_char, u_short,
     98  1.1   hikaru 		    u_short *, u_char *);
     99  1.1   hikaru int		qat_aereg_abs_data_write(struct qat_softc *, u_char,
    100  1.1   hikaru 		    enum aereg_type, u_short, uint32_t);
    101  1.1   hikaru 
    102  1.1   hikaru void		qat_ae_enable_ctx(struct qat_softc *, u_char, u_int);
    103  1.1   hikaru void		qat_ae_disable_ctx(struct qat_softc *, u_char, u_int);
    104  1.1   hikaru void		qat_ae_write_ctx_mode(struct qat_softc *, u_char, u_char);
    105  1.1   hikaru void		qat_ae_write_nn_mode(struct qat_softc *, u_char, u_char);
    106  1.1   hikaru void		qat_ae_write_lm_mode(struct qat_softc *, u_char,
    107  1.1   hikaru 		    enum aereg_type, u_char);
    108  1.1   hikaru void		qat_ae_write_shared_cs_mode0(struct qat_softc *, u_char,
    109  1.1   hikaru 		    u_char);
    110  1.1   hikaru void		qat_ae_write_shared_cs_mode(struct qat_softc *, u_char, u_char);
    111  1.1   hikaru int		qat_ae_set_reload_ustore(struct qat_softc *, u_char, u_int, int,
    112  1.1   hikaru 		    u_int);
    113  1.1   hikaru 
    114  1.1   hikaru enum qat_ae_status
    115  1.1   hikaru 		qat_ae_get_status(struct qat_softc *, u_char);
    116  1.1   hikaru int		qat_ae_is_active(struct qat_softc *, u_char);
    117  1.1   hikaru int		qat_ae_wait_num_cycles(struct qat_softc *, u_char, int, int);
    118  1.1   hikaru 
    119  1.1   hikaru int		qat_ae_clear_reset(struct qat_softc *);
    120  1.1   hikaru int		qat_ae_check(struct qat_softc *);
    121  1.1   hikaru int		qat_ae_reset_timestamp(struct qat_softc *);
    122  1.1   hikaru void		qat_ae_clear_xfer(struct qat_softc *);
    123  1.1   hikaru int		qat_ae_clear_gprs(struct qat_softc *);
    124  1.1   hikaru 
    125  1.1   hikaru void		qat_ae_get_shared_ustore_ae(u_char, u_char *);
    126  1.1   hikaru u_int		qat_ae_ucode_parity64(uint64_t);
    127  1.1   hikaru uint64_t	qat_ae_ucode_set_ecc(uint64_t);
    128  1.1   hikaru int		qat_ae_ucode_write(struct qat_softc *, u_char, u_int, u_int,
    129  1.1   hikaru 		    const uint64_t *);
    130  1.1   hikaru int		qat_ae_ucode_read(struct qat_softc *, u_char, u_int, u_int,
    131  1.1   hikaru 		    uint64_t *);
    132  1.1   hikaru u_int		qat_ae_concat_ucode(uint64_t *, u_int, u_int, u_int, u_int *);
    133  1.1   hikaru int		qat_ae_exec_ucode(struct qat_softc *, u_char, u_char,
    134  1.1   hikaru 		    uint64_t *, u_int, int, u_int, u_int *);
    135  1.1   hikaru int		qat_ae_exec_ucode_init_lm(struct qat_softc *, u_char, u_char,
    136  1.1   hikaru 		    int *, uint64_t *, u_int,
    137  1.1   hikaru 		    u_int *, u_int *, u_int *, u_int *, u_int *);
    138  1.1   hikaru int		qat_ae_restore_init_lm_gprs(struct qat_softc *, u_char, u_char,
    139  1.1   hikaru 		    u_int, u_int, u_int, u_int, u_int);
    140  1.1   hikaru int		qat_ae_get_inst_num(int);
    141  1.1   hikaru int		qat_ae_batch_put_lm(struct qat_softc *, u_char,
    142  1.1   hikaru 		    struct qat_ae_batch_init_list *, size_t);
    143  1.1   hikaru int		qat_ae_write_pc(struct qat_softc *, u_char, u_int, u_int);
    144  1.1   hikaru 
    145  1.1   hikaru u_int		qat_aefw_csum(char *, int);
    146  1.1   hikaru const char *	qat_aefw_uof_string(struct qat_softc *, size_t);
    147  1.1   hikaru struct uof_chunk_hdr *
    148  1.1   hikaru 		qat_aefw_uof_find_chunk(struct qat_softc *, const char *,
    149  1.1   hikaru 		    struct uof_chunk_hdr *);
    150  1.1   hikaru 
    151  1.1   hikaru int		qat_aefw_load_mof(struct qat_softc *);
    152  1.1   hikaru int		qat_aefw_load_mmp(struct qat_softc *);
    153  1.1   hikaru 
    154  1.1   hikaru int		qat_aefw_mof_find_uof0(struct qat_softc *,
    155  1.1   hikaru 		    struct mof_uof_hdr *, struct mof_uof_chunk_hdr *,
    156  1.1   hikaru 		    u_int, size_t, const char *,
    157  1.1   hikaru 		    size_t *, void **);
    158  1.1   hikaru int		qat_aefw_mof_find_uof(struct qat_softc *);
    159  1.1   hikaru int		qat_aefw_mof_parse(struct qat_softc *);
    160  1.1   hikaru 
    161  1.1   hikaru int		qat_aefw_uof_parse_image(struct qat_softc *,
    162  1.1   hikaru 		    struct qat_uof_image *, struct uof_chunk_hdr *uch);
    163  1.1   hikaru int		qat_aefw_uof_parse_images(struct qat_softc *);
    164  1.1   hikaru int		qat_aefw_uof_parse(struct qat_softc *);
    165  1.1   hikaru 
    166  1.1   hikaru int		qat_aefw_alloc_auth_dmamem(struct qat_softc *, char *, size_t,
    167  1.1   hikaru 		    struct qat_dmamem *);
    168  1.1   hikaru int		qat_aefw_auth(struct qat_softc *, struct qat_dmamem *);
    169  1.1   hikaru int		qat_aefw_suof_load(struct qat_softc *sc,
    170  1.1   hikaru 		    struct qat_dmamem *dma);
    171  1.1   hikaru int		qat_aefw_suof_parse_image(struct qat_softc *,
    172  1.1   hikaru 		    struct qat_suof_image *, struct suof_chunk_hdr *);
    173  1.1   hikaru int		qat_aefw_suof_parse(struct qat_softc *);
    174  1.1   hikaru int		qat_aefw_suof_write(struct qat_softc *);
    175  1.1   hikaru 
    176  1.1   hikaru int		qat_aefw_uof_assign_image(struct qat_softc *, struct qat_ae *,
    177  1.1   hikaru 		    struct qat_uof_image *);
    178  1.1   hikaru int		qat_aefw_uof_init_ae(struct qat_softc *, u_char);
    179  1.1   hikaru int		qat_aefw_uof_init(struct qat_softc *);
    180  1.1   hikaru 
    181  1.1   hikaru int		qat_aefw_init_memory_one(struct qat_softc *,
    182  1.1   hikaru 		    struct uof_init_mem *);
    183  1.1   hikaru void		qat_aefw_free_lm_init(struct qat_softc *, u_char);
    184  1.1   hikaru int		qat_aefw_init_ustore(struct qat_softc *);
    185  1.1   hikaru int		qat_aefw_init_reg(struct qat_softc *, u_char, u_char,
    186  1.1   hikaru 		    enum aereg_type, u_short, u_int);
    187  1.1   hikaru int		qat_aefw_init_reg_sym_expr(struct qat_softc *, u_char,
    188  1.1   hikaru 		    struct qat_uof_image *);
    189  1.1   hikaru int		qat_aefw_init_memory(struct qat_softc *);
    190  1.1   hikaru int		qat_aefw_init_globals(struct qat_softc *);
    191  1.1   hikaru uint64_t	qat_aefw_get_uof_inst(struct qat_softc *,
    192  1.1   hikaru 		    struct qat_uof_page *, u_int);
    193  1.1   hikaru int		qat_aefw_do_pagein(struct qat_softc *, u_char,
    194  1.1   hikaru 		    struct qat_uof_page *);
    195  1.1   hikaru int		qat_aefw_uof_write_one(struct qat_softc *, struct qat_uof_image *);
    196  1.1   hikaru int		qat_aefw_uof_write(struct qat_softc *);
    197  1.1   hikaru 
    198  1.1   hikaru int
    199  1.1   hikaru qat_ae_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
    200  1.1   hikaru 	uint32_t value)
    201  1.1   hikaru {
    202  1.1   hikaru 	int times = TIMEOUT_AE_CSR;
    203  1.1   hikaru 
    204  1.1   hikaru 	do {
    205  1.1   hikaru 		qat_ae_local_write_4(sc, ae, offset, value);
    206  1.1   hikaru 		if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) &
    207  1.1   hikaru 		    LOCAL_CSR_STATUS_STATUS) == 0)
    208  1.1   hikaru 			return 0;
    209  1.1   hikaru 
    210  1.1   hikaru 	} while (times--);
    211  1.1   hikaru 
    212  1.1   hikaru 	aprint_error_dev(sc->sc_dev,
    213  1.1   hikaru 	    "couldn't write AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset);
    214  1.1   hikaru 	return EFAULT;
    215  1.1   hikaru }
    216  1.1   hikaru 
    217  1.1   hikaru int
    218  1.1   hikaru qat_ae_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
    219  1.1   hikaru 	uint32_t *value)
    220  1.1   hikaru {
    221  1.1   hikaru 	int times = TIMEOUT_AE_CSR;
    222  1.1   hikaru 	uint32_t v;
    223  1.1   hikaru 
    224  1.1   hikaru 	do {
    225  1.1   hikaru 		v = qat_ae_local_read_4(sc, ae, offset);
    226  1.1   hikaru 		if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) &
    227  1.1   hikaru 		    LOCAL_CSR_STATUS_STATUS) == 0) {
    228  1.1   hikaru 			*value = v;
    229  1.1   hikaru 			return 0;
    230  1.1   hikaru 		}
    231  1.1   hikaru 	} while (times--);
    232  1.1   hikaru 
    233  1.1   hikaru 	aprint_error_dev(sc->sc_dev,
    234  1.1   hikaru 	    "couldn't read AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset);
    235  1.1   hikaru 	return EFAULT;
    236  1.1   hikaru }
    237  1.1   hikaru 
    238  1.1   hikaru void
    239  1.1   hikaru qat_ae_ctx_indr_write(struct qat_softc *sc, u_char ae, uint32_t ctx_mask,
    240  1.1   hikaru     bus_size_t offset, uint32_t value)
    241  1.1   hikaru {
    242  1.1   hikaru 	int ctx;
    243  1.1   hikaru 	uint32_t ctxptr;
    244  1.1   hikaru 
    245  1.1   hikaru 	KASSERT(offset == CTX_FUTURE_COUNT_INDIRECT ||
    246  1.1   hikaru 	    offset == FUTURE_COUNT_SIGNAL_INDIRECT ||
    247  1.1   hikaru 	    offset == CTX_STS_INDIRECT ||
    248  1.1   hikaru 	    offset == CTX_WAKEUP_EVENTS_INDIRECT ||
    249  1.1   hikaru 	    offset == CTX_SIG_EVENTS_INDIRECT ||
    250  1.1   hikaru 	    offset == LM_ADDR_0_INDIRECT ||
    251  1.1   hikaru 	    offset == LM_ADDR_1_INDIRECT ||
    252  1.1   hikaru 	    offset == INDIRECT_LM_ADDR_0_BYTE_INDEX ||
    253  1.1   hikaru 	    offset == INDIRECT_LM_ADDR_1_BYTE_INDEX);
    254  1.1   hikaru 
    255  1.1   hikaru 	qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr);
    256  1.1   hikaru 	for (ctx = 0; ctx < MAX_AE_CTX; ctx++) {
    257  1.1   hikaru 		if ((ctx_mask & (1 << ctx)) == 0)
    258  1.1   hikaru 			continue;
    259  1.1   hikaru 		qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx);
    260  1.1   hikaru 		qat_ae_write_4(sc, ae, offset, value);
    261  1.1   hikaru 	}
    262  1.1   hikaru 	qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr);
    263  1.1   hikaru }
    264  1.1   hikaru 
    265  1.1   hikaru int
    266  1.1   hikaru qat_ae_ctx_indr_read(struct qat_softc *sc, u_char ae, uint32_t ctx,
    267  1.1   hikaru     bus_size_t offset, uint32_t *value)
    268  1.1   hikaru {
    269  1.1   hikaru 	int error;
    270  1.1   hikaru 	uint32_t ctxptr;
    271  1.1   hikaru 
    272  1.1   hikaru 	KASSERT(offset == CTX_FUTURE_COUNT_INDIRECT ||
    273  1.1   hikaru 	    offset == FUTURE_COUNT_SIGNAL_INDIRECT ||
    274  1.1   hikaru 	    offset == CTX_STS_INDIRECT ||
    275  1.1   hikaru 	    offset == CTX_WAKEUP_EVENTS_INDIRECT ||
    276  1.1   hikaru 	    offset == CTX_SIG_EVENTS_INDIRECT ||
    277  1.1   hikaru 	    offset == LM_ADDR_0_INDIRECT ||
    278  1.1   hikaru 	    offset == LM_ADDR_1_INDIRECT ||
    279  1.1   hikaru 	    offset == INDIRECT_LM_ADDR_0_BYTE_INDEX ||
    280  1.1   hikaru 	    offset == INDIRECT_LM_ADDR_1_BYTE_INDEX);
    281  1.1   hikaru 
    282  1.1   hikaru 	/* save the ctx ptr */
    283  1.1   hikaru 	qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr);
    284  1.1   hikaru 	if ((ctxptr & CSR_CTX_POINTER_CONTEXT) !=
    285  1.1   hikaru 	    (ctx & CSR_CTX_POINTER_CONTEXT))
    286  1.1   hikaru 		qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx);
    287  1.1   hikaru 
    288  1.1   hikaru 	error = qat_ae_read_4(sc, ae, offset, value);
    289  1.1   hikaru 
    290  1.1   hikaru 	/* restore ctx ptr */
    291  1.1   hikaru 	if ((ctxptr & CSR_CTX_POINTER_CONTEXT) !=
    292  1.1   hikaru 	    (ctx & CSR_CTX_POINTER_CONTEXT))
    293  1.1   hikaru 		qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr);
    294  1.1   hikaru 
    295  1.1   hikaru 	return error;
    296  1.1   hikaru }
    297  1.1   hikaru 
    298  1.1   hikaru u_short
    299  1.1   hikaru qat_aereg_get_10bit_addr(enum aereg_type regtype, u_short reg)
    300  1.1   hikaru {
    301  1.1   hikaru 	u_short addr;
    302  1.1   hikaru 
    303  1.1   hikaru 	switch (regtype) {
    304  1.1   hikaru 	case AEREG_GPA_ABS:
    305  1.1   hikaru 	case AEREG_GPB_ABS:
    306  1.1   hikaru 		addr = (reg & 0x7f) | 0x80;
    307  1.1   hikaru 		break;
    308  1.1   hikaru 	case AEREG_GPA_REL:
    309  1.1   hikaru 	case AEREG_GPB_REL:
    310  1.1   hikaru 		addr = reg & 0x1f;
    311  1.1   hikaru 		break;
    312  1.1   hikaru 	case AEREG_SR_RD_REL:
    313  1.1   hikaru 	case AEREG_SR_WR_REL:
    314  1.1   hikaru 	case AEREG_SR_REL:
    315  1.1   hikaru 		addr = 0x180 | (reg & 0x1f);
    316  1.1   hikaru 		break;
    317  1.1   hikaru 	case AEREG_SR_INDX:
    318  1.1   hikaru 		addr = 0x140 | ((reg & 0x3) << 1);
    319  1.1   hikaru 		break;
    320  1.1   hikaru 	case AEREG_DR_RD_REL:
    321  1.1   hikaru 	case AEREG_DR_WR_REL:
    322  1.1   hikaru 	case AEREG_DR_REL:
    323  1.1   hikaru 		addr = 0x1c0 | (reg & 0x1f);
    324  1.1   hikaru 		break;
    325  1.1   hikaru 	case AEREG_DR_INDX:
    326  1.1   hikaru 		addr = 0x100 | ((reg & 0x3) << 1);
    327  1.1   hikaru 		break;
    328  1.1   hikaru 	case AEREG_NEIGH_INDX:
    329  1.1   hikaru 		addr = 0x241 | ((reg & 0x3) << 1);
    330  1.1   hikaru 		break;
    331  1.1   hikaru 	case AEREG_NEIGH_REL:
    332  1.1   hikaru 		addr = 0x280 | (reg & 0x1f);
    333  1.1   hikaru 		break;
    334  1.1   hikaru 	case AEREG_LMEM0:
    335  1.1   hikaru 		addr = 0x200;
    336  1.1   hikaru 		break;
    337  1.1   hikaru 	case AEREG_LMEM1:
    338  1.1   hikaru 		addr = 0x220;
    339  1.1   hikaru 		break;
    340  1.1   hikaru 	case AEREG_NO_DEST:
    341  1.1   hikaru 		addr = 0x300 | (reg & 0xff);
    342  1.1   hikaru 		break;
    343  1.1   hikaru 	default:
    344  1.1   hikaru 		addr = AEREG_BAD_REGADDR;
    345  1.1   hikaru 		break;
    346  1.1   hikaru 	}
    347  1.1   hikaru 	return (addr);
    348  1.1   hikaru }
    349  1.1   hikaru 
    350  1.1   hikaru int
    351  1.1   hikaru qat_aereg_rel_data_write(struct qat_softc *sc, u_char ae, u_char ctx,
    352  1.1   hikaru     enum aereg_type regtype, u_short relreg, uint32_t value)
    353  1.1   hikaru {
    354  1.1   hikaru 	uint16_t srchi, srclo, destaddr, data16hi, data16lo;
    355  1.1   hikaru 	uint64_t inst[] = {
    356  1.1   hikaru 		0x0F440000000ull,	/* immed_w1[reg, val_hi16] */
    357  1.1   hikaru 		0x0F040000000ull,	/* immed_w0[reg, val_lo16] */
    358  1.1   hikaru 		0x0F0000C0300ull,	/* nop */
    359  1.1   hikaru 		0x0E000010000ull	/* ctx_arb[kill] */
    360  1.1   hikaru 	};
    361  1.1   hikaru 	const int ninst = __arraycount(inst);
    362  1.1   hikaru 	const int imm_w1 = 0, imm_w0 = 1;
    363  1.1   hikaru 	unsigned int ctxen;
    364  1.1   hikaru 	uint16_t mask;
    365  1.1   hikaru 
    366  1.1   hikaru 	/* This logic only works for GPRs and LM index registers,
    367  1.1   hikaru 	   not NN or XFER registers! */
    368  1.1   hikaru 	KASSERT(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL ||
    369  1.1   hikaru 	    regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1);
    370  1.1   hikaru 
    371  1.1   hikaru 	if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL)) {
    372  1.1   hikaru 		/* determine the context mode */
    373  1.1   hikaru 		qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
    374  1.1   hikaru 		if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
    375  1.1   hikaru 			/* 4-ctx mode */
    376  1.1   hikaru 			if (ctx & 0x1)
    377  1.1   hikaru 				return EINVAL;
    378  1.1   hikaru 			mask = 0x1f;
    379  1.1   hikaru 		} else {
    380  1.1   hikaru 			/* 8-ctx mode */
    381  1.1   hikaru 			mask = 0x0f;
    382  1.1   hikaru 		}
    383  1.1   hikaru 		if (relreg & ~mask)
    384  1.1   hikaru 			return EINVAL;
    385  1.1   hikaru 	}
    386  1.1   hikaru 	if ((destaddr = qat_aereg_get_10bit_addr(regtype, relreg)) ==
    387  1.1   hikaru 	    AEREG_BAD_REGADDR) {
    388  1.1   hikaru 		return EINVAL;
    389  1.1   hikaru 	}
    390  1.1   hikaru 
    391  1.1   hikaru 	data16lo = 0xffff & value;
    392  1.1   hikaru 	data16hi = 0xffff & (value >> 16);
    393  1.1   hikaru 	srchi = qat_aereg_get_10bit_addr(AEREG_NO_DEST,
    394  1.1   hikaru 		(uint16_t)(0xff & data16hi));
    395  1.1   hikaru 	srclo = qat_aereg_get_10bit_addr(AEREG_NO_DEST,
    396  1.1   hikaru 		(uint16_t)(0xff & data16lo));
    397  1.1   hikaru 
    398  1.1   hikaru 	switch (regtype) {
    399  1.1   hikaru 	case AEREG_GPA_REL:	/* A rel source */
    400  1.1   hikaru 		inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) |
    401  1.1   hikaru 		    ((srchi & 0x3ff) << 10) | (destaddr & 0x3ff);
    402  1.1   hikaru 		inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) |
    403  1.1   hikaru 		    ((srclo & 0x3ff) << 10) | (destaddr & 0x3ff);
    404  1.1   hikaru 		break;
    405  1.1   hikaru 	default:
    406  1.1   hikaru 		inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) |
    407  1.1   hikaru 		    ((destaddr & 0x3ff) << 10) | (srchi & 0x3ff);
    408  1.1   hikaru 		inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) |
    409  1.1   hikaru 		    ((destaddr & 0x3ff) << 10) | (srclo & 0x3ff);
    410  1.1   hikaru 		break;
    411  1.1   hikaru 	}
    412  1.1   hikaru 
    413  1.1   hikaru 	return qat_ae_exec_ucode(sc, ae, ctx, inst, ninst, 1, ninst * 5, NULL);
    414  1.1   hikaru }
    415  1.1   hikaru 
    416  1.1   hikaru int
    417  1.1   hikaru qat_aereg_rel_data_read(struct qat_softc *sc, u_char ae, u_char ctx,
    418  1.1   hikaru     enum aereg_type regtype, u_short relreg, uint32_t *value)
    419  1.1   hikaru {
    420  1.1   hikaru 	uint64_t inst, savucode;
    421  1.1   hikaru 	uint32_t ctxen, misc, nmisc, savctx, ctxarbctl, ulo, uhi;
    422  1.1   hikaru 	u_int uaddr, ustore_addr;
    423  1.1   hikaru 	int error;
    424  1.1   hikaru 	u_short mask, regaddr;
    425  1.1   hikaru 	u_char nae;
    426  1.1   hikaru 
    427  1.1   hikaru 	KASSERT(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL ||
    428  1.1   hikaru 	    regtype == AEREG_SR_REL || regtype == AEREG_SR_RD_REL ||
    429  1.1   hikaru 	    regtype == AEREG_DR_REL || regtype == AEREG_DR_RD_REL ||
    430  1.1   hikaru 	    regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1);
    431  1.1   hikaru 
    432  1.1   hikaru 	if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL) ||
    433  1.1   hikaru 	    (regtype == AEREG_SR_REL) || (regtype == AEREG_SR_RD_REL) ||
    434  1.1   hikaru 	    (regtype == AEREG_DR_REL) || (regtype == AEREG_DR_RD_REL))
    435  1.1   hikaru 	{
    436  1.1   hikaru 		/* determine the context mode */
    437  1.1   hikaru 		qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
    438  1.1   hikaru 		if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
    439  1.1   hikaru 			/* 4-ctx mode */
    440  1.1   hikaru 			if (ctx & 0x1)
    441  1.1   hikaru 				return EINVAL;
    442  1.1   hikaru 			mask = 0x1f;
    443  1.1   hikaru 		} else {
    444  1.1   hikaru 			/* 8-ctx mode */
    445  1.1   hikaru 			mask = 0x0f;
    446  1.1   hikaru 		}
    447  1.1   hikaru 		if (relreg & ~mask)
    448  1.1   hikaru 			return EINVAL;
    449  1.1   hikaru 	}
    450  1.1   hikaru 	if ((regaddr = qat_aereg_get_10bit_addr(regtype, relreg)) ==
    451  1.1   hikaru 	    AEREG_BAD_REGADDR) {
    452  1.1   hikaru 		return EINVAL;
    453  1.1   hikaru 	}
    454  1.1   hikaru 
    455  1.1   hikaru 	/* instruction -- alu[--, --, B, reg] */
    456  1.1   hikaru 	switch (regtype) {
    457  1.1   hikaru 	case AEREG_GPA_REL:
    458  1.1   hikaru 		/* A rel source */
    459  1.1   hikaru 		inst = 0xA070000000ull | (regaddr & 0x3ff);
    460  1.1   hikaru 		break;
    461  1.1   hikaru 	default:
    462  1.1   hikaru 		inst = (0xA030000000ull | ((regaddr & 0x3ff) << 10));
    463  1.1   hikaru 		break;
    464  1.1   hikaru 	}
    465  1.1   hikaru 
    466  1.1   hikaru 	/* backup shared control store bit, and force AE to
    467  1.1   hikaru 	 * none-shared mode before executing ucode snippet */
    468  1.1   hikaru 	qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
    469  1.1   hikaru 	if (misc & AE_MISC_CONTROL_SHARE_CS) {
    470  1.1   hikaru 		qat_ae_get_shared_ustore_ae(ae, &nae);
    471  1.1   hikaru 		if ((1 << nae) & sc->sc_ae_mask && qat_ae_is_active(sc, nae))
    472  1.1   hikaru 			return EBUSY;
    473  1.1   hikaru 	}
    474  1.1   hikaru 
    475  1.1   hikaru 	nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS;
    476  1.1   hikaru 	qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc);
    477  1.1   hikaru 
    478  1.1   hikaru 	/* read current context */
    479  1.1   hikaru 	qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx);
    480  1.1   hikaru 	qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl);
    481  1.1   hikaru 
    482  1.1   hikaru 	qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
    483  1.1   hikaru 	/* prevent clearing the W1C bits: the breakpoint bit,
    484  1.1   hikaru 	ECC error bit, and Parity error bit */
    485  1.1   hikaru 	ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
    486  1.1   hikaru 
    487  1.1   hikaru 	/* change the context */
    488  1.1   hikaru 	if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO))
    489  1.1   hikaru 		qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
    490  1.1   hikaru 		    ctx & ACTIVE_CTX_STATUS_ACNO);
    491  1.1   hikaru 	/* save a ustore location */
    492  1.1   hikaru 	if ((error = qat_ae_ucode_read(sc, ae, 0, 1, &savucode)) != 0) {
    493  1.1   hikaru 		/* restore AE_MISC_CONTROL csr */
    494  1.1   hikaru 		qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
    495  1.1   hikaru 
    496  1.1   hikaru 		/* restore the context */
    497  1.1   hikaru 		if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) {
    498  1.1   hikaru 			qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
    499  1.1   hikaru 			    savctx & ACTIVE_CTX_STATUS_ACNO);
    500  1.1   hikaru 		}
    501  1.1   hikaru 		qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl);
    502  1.1   hikaru 
    503  1.1   hikaru 		return (error);
    504  1.1   hikaru 	}
    505  1.1   hikaru 
    506  1.1   hikaru 	/* turn off ustore parity */
    507  1.1   hikaru 	qat_ae_write_4(sc, ae, CTX_ENABLES,
    508  1.1   hikaru 	    ctxen & (~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE));
    509  1.1   hikaru 
    510  1.1   hikaru 	/* save ustore-addr csr */
    511  1.1   hikaru 	qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr);
    512  1.1   hikaru 
    513  1.1   hikaru 	/* write the ALU instruction to ustore, enable ecs bit */
    514  1.1   hikaru 	uaddr = 0 | USTORE_ADDRESS_ECS;
    515  1.1   hikaru 
    516  1.1   hikaru 	/* set the uaddress */
    517  1.1   hikaru 	qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
    518  1.1   hikaru 	inst = qat_ae_ucode_set_ecc(inst);
    519  1.1   hikaru 
    520  1.1   hikaru 	ulo = (uint32_t)(inst & 0xffffffff);
    521  1.1   hikaru 	uhi = (uint32_t)(inst >> 32);
    522  1.1   hikaru 
    523  1.1   hikaru 	qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo);
    524  1.1   hikaru 
    525  1.1   hikaru 	/* this will auto increment the address */
    526  1.1   hikaru 	qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi);
    527  1.1   hikaru 
    528  1.1   hikaru 	/* set the uaddress */
    529  1.1   hikaru 	qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
    530  1.1   hikaru 
    531  1.1   hikaru 	/* delay for at least 8 cycles */
    532  1.1   hikaru 	qat_ae_wait_num_cycles(sc, ae, 0x8, 0);
    533  1.1   hikaru 
    534  1.1   hikaru 	/* read ALU output -- the instruction should have been executed
    535  1.1   hikaru 	prior to clearing the ECS in putUwords */
    536  1.1   hikaru 	qat_ae_read_4(sc, ae, ALU_OUT, value);
    537  1.1   hikaru 
    538  1.1   hikaru 	/* restore ustore-addr csr */
    539  1.1   hikaru 	qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr);
    540  1.1   hikaru 
    541  1.1   hikaru 	/* restore the ustore */
    542  1.1   hikaru 	error = qat_ae_ucode_write(sc, ae, 0, 1, &savucode);
    543  1.1   hikaru 
    544  1.1   hikaru 	/* restore the context */
    545  1.1   hikaru 	if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) {
    546  1.1   hikaru 		qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
    547  1.1   hikaru 		    savctx & ACTIVE_CTX_STATUS_ACNO);
    548  1.1   hikaru 	}
    549  1.1   hikaru 
    550  1.1   hikaru 	qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl);
    551  1.1   hikaru 
    552  1.1   hikaru 	/* restore AE_MISC_CONTROL csr */
    553  1.1   hikaru 	qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
    554  1.1   hikaru 
    555  1.1   hikaru 	qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
    556  1.1   hikaru 
    557  1.1   hikaru 	return error;
    558  1.1   hikaru }
    559  1.1   hikaru 
    560  1.1   hikaru int
    561  1.1   hikaru qat_aereg_rel_rdxfer_write(struct qat_softc *sc, u_char ae, u_char ctx,
    562  1.1   hikaru     enum aereg_type regtype, u_short relreg, uint32_t value)
    563  1.1   hikaru {
    564  1.1   hikaru 	bus_size_t addr;
    565  1.1   hikaru 	int error;
    566  1.1   hikaru 	uint32_t ctxen;
    567  1.1   hikaru 	u_short mask;
    568  1.1   hikaru 	u_short dr_offset;
    569  1.1   hikaru 
    570  1.1   hikaru 	KASSERT(regtype == AEREG_SR_REL || regtype == AEREG_DR_REL ||
    571  1.1   hikaru 	    regtype == AEREG_SR_RD_REL || regtype == AEREG_DR_RD_REL);
    572  1.1   hikaru 
    573  1.1   hikaru 	QAT_YIELD();
    574  1.1   hikaru 
    575  1.1   hikaru 	error = qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
    576  1.1   hikaru 	if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
    577  1.1   hikaru 		if (ctx & 0x1) {
    578  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
    579  1.1   hikaru 			    "bad ctx argument in 4-ctx mode,ctx=0x%x\n", ctx);
    580  1.1   hikaru 			return EINVAL;
    581  1.1   hikaru 		}
    582  1.1   hikaru 		mask = 0x1f;
    583  1.1   hikaru 		dr_offset = 0x20;
    584  1.1   hikaru 
    585  1.1   hikaru 	} else {
    586  1.1   hikaru 		mask = 0x0f;
    587  1.1   hikaru 		dr_offset = 0x10;
    588  1.1   hikaru 	}
    589  1.1   hikaru 
    590  1.1   hikaru 	if (relreg & ~mask)
    591  1.1   hikaru 		return EINVAL;
    592  1.1   hikaru 
    593  1.1   hikaru 	addr = relreg + (ctx << 0x5);
    594  1.1   hikaru 
    595  1.1   hikaru 	switch (regtype) {
    596  1.1   hikaru 	case AEREG_SR_REL:
    597  1.1   hikaru 	case AEREG_SR_RD_REL:
    598  1.1   hikaru 		qat_ae_xfer_write_4(sc, ae, addr, value);
    599  1.1   hikaru 		break;
    600  1.1   hikaru 	case AEREG_DR_REL:
    601  1.1   hikaru 	case AEREG_DR_RD_REL:
    602  1.1   hikaru 		qat_ae_xfer_write_4(sc, ae, addr + dr_offset, value);
    603  1.1   hikaru 		break;
    604  1.1   hikaru 	default:
    605  1.1   hikaru 		error = EINVAL;
    606  1.1   hikaru 	}
    607  1.1   hikaru 
    608  1.1   hikaru 	return error;
    609  1.1   hikaru }
    610  1.1   hikaru 
    611  1.1   hikaru int
    612  1.1   hikaru qat_aereg_rel_wrxfer_write(struct qat_softc *sc, u_char ae, u_char ctx,
    613  1.1   hikaru     enum aereg_type regtype, u_short relreg, uint32_t value)
    614  1.1   hikaru {
    615  1.1   hikaru 
    616  1.1   hikaru 	panic("notyet");
    617  1.1   hikaru 
    618  1.1   hikaru 	return 0;
    619  1.1   hikaru }
    620  1.1   hikaru 
    621  1.1   hikaru int
    622  1.1   hikaru qat_aereg_rel_nn_write(struct qat_softc *sc, u_char ae, u_char ctx,
    623  1.1   hikaru     enum aereg_type regtype, u_short relreg, uint32_t value)
    624  1.1   hikaru {
    625  1.1   hikaru 
    626  1.1   hikaru 	panic("notyet");
    627  1.1   hikaru 
    628  1.1   hikaru 	return 0;
    629  1.1   hikaru }
    630  1.1   hikaru 
    631  1.1   hikaru int
    632  1.1   hikaru qat_aereg_abs_to_rel(struct qat_softc *sc, u_char ae,
    633  1.1   hikaru 	u_short absreg, u_short *relreg, u_char *ctx)
    634  1.1   hikaru {
    635  1.1   hikaru 	uint32_t ctxen;
    636  1.1   hikaru 
    637  1.1   hikaru 	qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
    638  1.1   hikaru 	if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
    639  1.1   hikaru 		/* 4-ctx mode */
    640  1.1   hikaru 		*relreg = absreg & 0x1f;
    641  1.1   hikaru 		*ctx = (absreg >> 0x4) & 0x6;
    642  1.1   hikaru 	} else {
    643  1.1   hikaru 		/* 8-ctx mode */
    644  1.1   hikaru 		*relreg = absreg & 0x0f;
    645  1.1   hikaru 		*ctx = (absreg >> 0x4) & 0x7;
    646  1.1   hikaru 	}
    647  1.1   hikaru 
    648  1.1   hikaru 	return 0;
    649  1.1   hikaru }
    650  1.1   hikaru 
    651  1.1   hikaru int
    652  1.1   hikaru qat_aereg_abs_data_write(struct qat_softc *sc, u_char ae,
    653  1.1   hikaru 	enum aereg_type regtype, u_short absreg, uint32_t value)
    654  1.1   hikaru {
    655  1.1   hikaru 	int error;
    656  1.1   hikaru 	u_short relreg;
    657  1.1   hikaru 	u_char ctx;
    658  1.1   hikaru 
    659  1.1   hikaru 	qat_aereg_abs_to_rel(sc, ae, absreg, &relreg, &ctx);
    660  1.1   hikaru 
    661  1.1   hikaru 	switch (regtype) {
    662  1.1   hikaru 	case AEREG_GPA_ABS:
    663  1.1   hikaru 		KASSERT(absreg < MAX_GPR_REG);
    664  1.1   hikaru 		error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL,
    665  1.1   hikaru 		    relreg, value);
    666  1.1   hikaru 		break;
    667  1.1   hikaru 	case AEREG_GPB_ABS:
    668  1.1   hikaru 		KASSERT(absreg < MAX_GPR_REG);
    669  1.1   hikaru 		error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL,
    670  1.1   hikaru 		    relreg, value);
    671  1.1   hikaru 		break;
    672  1.1   hikaru 	case AEREG_DR_RD_ABS:
    673  1.1   hikaru 		KASSERT(absreg < MAX_XFER_REG);
    674  1.1   hikaru 		error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_DR_RD_REL,
    675  1.1   hikaru 		    relreg, value);
    676  1.1   hikaru 		break;
    677  1.1   hikaru 	case AEREG_SR_RD_ABS:
    678  1.1   hikaru 		KASSERT(absreg < MAX_XFER_REG);
    679  1.1   hikaru 		error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_SR_RD_REL,
    680  1.1   hikaru 		    relreg, value);
    681  1.1   hikaru 		break;
    682  1.1   hikaru 	case AEREG_DR_WR_ABS:
    683  1.1   hikaru 		KASSERT(absreg < MAX_XFER_REG);
    684  1.1   hikaru 		error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_DR_WR_REL,
    685  1.1   hikaru 		    relreg, value);
    686  1.1   hikaru 		break;
    687  1.1   hikaru 	case AEREG_SR_WR_ABS:
    688  1.1   hikaru 		KASSERT(absreg < MAX_XFER_REG);
    689  1.1   hikaru 		error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_SR_WR_REL,
    690  1.1   hikaru 		    relreg, value);
    691  1.1   hikaru 		break;
    692  1.1   hikaru 	case AEREG_NEIGH_ABS:
    693  1.1   hikaru 		KASSERT(absreg < MAX_NN_REG);
    694  1.1   hikaru 		if (absreg >= MAX_NN_REG)
    695  1.1   hikaru 			return EINVAL;
    696  1.1   hikaru 		error = qat_aereg_rel_nn_write(sc, ae, ctx, AEREG_NEIGH_REL,
    697  1.1   hikaru 		    relreg, value);
    698  1.1   hikaru 		break;
    699  1.1   hikaru 	default:
    700  1.1   hikaru 		panic("Invalid Register Type");
    701  1.1   hikaru 	}
    702  1.1   hikaru 
    703  1.1   hikaru 	return error;
    704  1.1   hikaru }
    705  1.1   hikaru 
    706  1.1   hikaru void
    707  1.1   hikaru qat_ae_enable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask)
    708  1.1   hikaru {
    709  1.1   hikaru 	uint32_t ctxen;
    710  1.1   hikaru 
    711  1.1   hikaru 	qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
    712  1.1   hikaru 	ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
    713  1.1   hikaru 
    714  1.1   hikaru 	if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
    715  1.1   hikaru 		ctx_mask &= 0x55;
    716  1.1   hikaru 	} else {
    717  1.1   hikaru 		ctx_mask &= 0xff;
    718  1.1   hikaru 	}
    719  1.1   hikaru 
    720  1.1   hikaru 	ctxen |= __SHIFTIN(ctx_mask, CTX_ENABLES_ENABLE);
    721  1.1   hikaru 	qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
    722  1.1   hikaru }
    723  1.1   hikaru 
    724  1.1   hikaru void
    725  1.1   hikaru qat_ae_disable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask)
    726  1.1   hikaru {
    727  1.1   hikaru 	uint32_t ctxen;
    728  1.1   hikaru 
    729  1.1   hikaru 	qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
    730  1.1   hikaru 	ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
    731  1.1   hikaru 	ctxen &= ~(__SHIFTIN(ctx_mask & AE_ALL_CTX, CTX_ENABLES_ENABLE));
    732  1.1   hikaru 	qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
    733  1.1   hikaru }
    734  1.1   hikaru 
    735  1.1   hikaru void
    736  1.1   hikaru qat_ae_write_ctx_mode(struct qat_softc *sc, u_char ae, u_char mode)
    737  1.1   hikaru {
    738  1.1   hikaru 	uint32_t val, nval;
    739  1.1   hikaru 
    740  1.1   hikaru 	qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
    741  1.1   hikaru 	val &= CTX_ENABLES_IGNORE_W1C_MASK;
    742  1.1   hikaru 
    743  1.1   hikaru 	if (mode == 4)
    744  1.1   hikaru 		nval = val | CTX_ENABLES_INUSE_CONTEXTS;
    745  1.1   hikaru 	else
    746  1.1   hikaru 		nval = val & ~CTX_ENABLES_INUSE_CONTEXTS;
    747  1.1   hikaru 
    748  1.1   hikaru 	if (val != nval)
    749  1.1   hikaru 		qat_ae_write_4(sc, ae, CTX_ENABLES, nval);
    750  1.1   hikaru }
    751  1.1   hikaru 
    752  1.1   hikaru void
    753  1.1   hikaru qat_ae_write_nn_mode(struct qat_softc *sc, u_char ae, u_char mode)
    754  1.1   hikaru {
    755  1.1   hikaru 	uint32_t val, nval;
    756  1.1   hikaru 
    757  1.1   hikaru 	qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
    758  1.1   hikaru 	val &= CTX_ENABLES_IGNORE_W1C_MASK;
    759  1.1   hikaru 
    760  1.1   hikaru 	if (mode)
    761  1.1   hikaru 		nval = val | CTX_ENABLES_NN_MODE;
    762  1.1   hikaru 	else
    763  1.1   hikaru 		nval = val & ~CTX_ENABLES_NN_MODE;
    764  1.1   hikaru 
    765  1.1   hikaru 	if (val != nval)
    766  1.1   hikaru 		qat_ae_write_4(sc, ae, CTX_ENABLES, nval);
    767  1.1   hikaru }
    768  1.1   hikaru 
    769  1.1   hikaru void
    770  1.1   hikaru qat_ae_write_lm_mode(struct qat_softc *sc, u_char ae,
    771  1.1   hikaru 	enum aereg_type lm, u_char mode)
    772  1.1   hikaru {
    773  1.1   hikaru 	uint32_t val, nval;
    774  1.1   hikaru 	uint32_t bit;
    775  1.1   hikaru 
    776  1.1   hikaru 	qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
    777  1.1   hikaru 	val &= CTX_ENABLES_IGNORE_W1C_MASK;
    778  1.1   hikaru 
    779  1.1   hikaru 	switch (lm) {
    780  1.1   hikaru 	case AEREG_LMEM0:
    781  1.1   hikaru 		bit = CTX_ENABLES_LMADDR_0_GLOBAL;
    782  1.1   hikaru 		break;
    783  1.1   hikaru 	case AEREG_LMEM1:
    784  1.1   hikaru 		bit = CTX_ENABLES_LMADDR_1_GLOBAL;
    785  1.1   hikaru 		break;
    786  1.1   hikaru 	default:
    787  1.1   hikaru 		panic("invalid lmem reg type");
    788  1.1   hikaru 		break;
    789  1.1   hikaru 	}
    790  1.1   hikaru 
    791  1.1   hikaru 	if (mode)
    792  1.1   hikaru 		nval = val | bit;
    793  1.1   hikaru 	else
    794  1.1   hikaru 		nval = val & ~bit;
    795  1.1   hikaru 
    796  1.1   hikaru 	if (val != nval)
    797  1.1   hikaru 		qat_ae_write_4(sc, ae, CTX_ENABLES, nval);
    798  1.1   hikaru }
    799  1.1   hikaru 
    800  1.1   hikaru void
    801  1.1   hikaru qat_ae_write_shared_cs_mode0(struct qat_softc *sc, u_char ae, u_char mode)
    802  1.1   hikaru {
    803  1.1   hikaru 	uint32_t val, nval;
    804  1.1   hikaru 
    805  1.1   hikaru 	qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val);
    806  1.1   hikaru 
    807  1.1   hikaru 	if (mode == 1)
    808  1.1   hikaru 		nval = val | AE_MISC_CONTROL_SHARE_CS;
    809  1.1   hikaru 	else
    810  1.1   hikaru 		nval = val & ~AE_MISC_CONTROL_SHARE_CS;
    811  1.1   hikaru 
    812  1.1   hikaru 	if (val != nval)
    813  1.1   hikaru 		qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nval);
    814  1.1   hikaru }
    815  1.1   hikaru 
    816  1.1   hikaru void
    817  1.1   hikaru qat_ae_write_shared_cs_mode(struct qat_softc *sc, u_char ae, u_char mode)
    818  1.1   hikaru {
    819  1.1   hikaru 	u_char nae;
    820  1.1   hikaru 
    821  1.1   hikaru 	qat_ae_get_shared_ustore_ae(ae, &nae);
    822  1.1   hikaru 
    823  1.1   hikaru 	qat_ae_write_shared_cs_mode0(sc, ae, mode);
    824  1.1   hikaru 
    825  1.1   hikaru 	if ((sc->sc_ae_mask & (1 << nae))) {
    826  1.1   hikaru 		qat_ae_write_shared_cs_mode0(sc, nae, mode);
    827  1.1   hikaru 	}
    828  1.1   hikaru }
    829  1.1   hikaru 
    830  1.1   hikaru int
    831  1.1   hikaru qat_ae_set_reload_ustore(struct qat_softc *sc, u_char ae,
    832  1.1   hikaru 	u_int reload_size, int shared_mode, u_int ustore_dram_addr)
    833  1.1   hikaru {
    834  1.1   hikaru 	uint32_t val, cs_reload;
    835  1.1   hikaru 
    836  1.1   hikaru 	switch (reload_size) {
    837  1.1   hikaru 	case 0:
    838  1.1   hikaru 		cs_reload = 0x0;
    839  1.1   hikaru 		break;
    840  1.1   hikaru 	case QAT_2K:
    841  1.1   hikaru 		cs_reload = 0x1;
    842  1.1   hikaru 		break;
    843  1.1   hikaru 	case QAT_4K:
    844  1.1   hikaru 		cs_reload = 0x2;
    845  1.1   hikaru 		break;
    846  1.1   hikaru 	case QAT_8K:
    847  1.1   hikaru 		cs_reload = 0x3;
    848  1.1   hikaru 		break;
    849  1.1   hikaru 	default:
    850  1.1   hikaru 		return EINVAL;
    851  1.1   hikaru 	}
    852  1.1   hikaru 
    853  1.1   hikaru 	if (cs_reload)
    854  1.1   hikaru 		QAT_AE(sc, ae).qae_ustore_dram_addr = ustore_dram_addr;
    855  1.1   hikaru 
    856  1.1   hikaru 	QAT_AE(sc, ae).qae_reload_size = reload_size;
    857  1.1   hikaru 
    858  1.1   hikaru 	qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val);
    859  1.1   hikaru 	val &= ~(AE_MISC_CONTROL_ONE_CTX_RELOAD |
    860  1.1   hikaru 	    AE_MISC_CONTROL_CS_RELOAD | AE_MISC_CONTROL_SHARE_CS);
    861  1.1   hikaru 	val |= __SHIFTIN(cs_reload, AE_MISC_CONTROL_CS_RELOAD) |
    862  1.1   hikaru 	    __SHIFTIN(shared_mode, AE_MISC_CONTROL_ONE_CTX_RELOAD);
    863  1.1   hikaru 	qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val);
    864  1.1   hikaru 
    865  1.1   hikaru 	return 0;
    866  1.1   hikaru }
    867  1.1   hikaru 
    868  1.1   hikaru enum qat_ae_status
    869  1.1   hikaru qat_ae_get_status(struct qat_softc *sc, u_char ae)
    870  1.1   hikaru {
    871  1.1   hikaru 	int error;
    872  1.1   hikaru 	uint32_t val = 0;
    873  1.1   hikaru 
    874  1.1   hikaru 	error = qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
    875  1.1   hikaru 	if (error || val & CTX_ENABLES_ENABLE)
    876  1.1   hikaru 		return QAT_AE_ENABLED;
    877  1.1   hikaru 
    878  1.1   hikaru 	qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val);
    879  1.1   hikaru 	if (val & ACTIVE_CTX_STATUS_ABO)
    880  1.1   hikaru 		return QAT_AE_ACTIVE;
    881  1.1   hikaru 
    882  1.1   hikaru 	return QAT_AE_DISABLED;
    883  1.1   hikaru }
    884  1.1   hikaru 
    885  1.1   hikaru 
    886  1.1   hikaru int
    887  1.1   hikaru qat_ae_is_active(struct qat_softc *sc, u_char ae)
    888  1.1   hikaru {
    889  1.1   hikaru 	uint32_t val;
    890  1.1   hikaru 
    891  1.1   hikaru 	if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED)
    892  1.1   hikaru 		return 1;
    893  1.1   hikaru 
    894  1.1   hikaru 	qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val);
    895  1.1   hikaru 	if (val & ACTIVE_CTX_STATUS_ABO)
    896  1.1   hikaru 		return 1;
    897  1.1   hikaru 	else
    898  1.1   hikaru 		return 0;
    899  1.1   hikaru }
    900  1.1   hikaru 
    901  1.1   hikaru /* returns 1 if actually waited for specified number of cycles */
    902  1.1   hikaru int
    903  1.1   hikaru qat_ae_wait_num_cycles(struct qat_softc *sc, u_char ae, int cycles, int check)
    904  1.1   hikaru {
    905  1.1   hikaru 	uint32_t cnt, actx;
    906  1.1   hikaru 	int pcnt, ccnt, elapsed, times;
    907  1.1   hikaru 
    908  1.1   hikaru 	qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt);
    909  1.1   hikaru 	pcnt = cnt & 0xffff;
    910  1.1   hikaru 
    911  1.1   hikaru 	times = TIMEOUT_AE_CHECK;
    912  1.1   hikaru 	do {
    913  1.1   hikaru 		qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt);
    914  1.1   hikaru 		ccnt = cnt & 0xffff;
    915  1.1   hikaru 
    916  1.1   hikaru 		elapsed = ccnt - pcnt;
    917  1.1   hikaru 		if (elapsed == 0) {
    918  1.1   hikaru 			times--;
    919  1.1   hikaru 			aprint_debug_dev(sc->sc_dev,
    920  1.1   hikaru 			    "qat_ae_wait_num_cycles elapsed 0 times %d\n",
    921  1.1   hikaru 			    times);
    922  1.1   hikaru 		}
    923  1.1   hikaru 		if (times <= 0) {
    924  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
    925  1.1   hikaru 			    "qat_ae_wait_num_cycles timeout\n");
    926  1.1   hikaru 			return -1;
    927  1.1   hikaru 		}
    928  1.1   hikaru 
    929  1.1   hikaru 		if (elapsed < 0)
    930  1.1   hikaru 			elapsed += 0x10000;
    931  1.1   hikaru 
    932  1.1   hikaru 		if (elapsed >= CYCLES_FROM_READY2EXE && check) {
    933  1.1   hikaru 			if (qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS,
    934  1.1   hikaru 			    &actx) == 0) {
    935  1.1   hikaru 				if ((actx & ACTIVE_CTX_STATUS_ABO) == 0)
    936  1.1   hikaru 					return 0;
    937  1.1   hikaru 			}
    938  1.1   hikaru 		}
    939  1.1   hikaru 	} while (cycles > elapsed);
    940  1.1   hikaru 
    941  1.1   hikaru 	if (check && qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &actx) == 0) {
    942  1.1   hikaru 		if ((actx & ACTIVE_CTX_STATUS_ABO) == 0)
    943  1.1   hikaru 			return 0;
    944  1.1   hikaru 	}
    945  1.1   hikaru 
    946  1.1   hikaru 	return 1;
    947  1.1   hikaru }
    948  1.1   hikaru 
    949  1.1   hikaru int
    950  1.1   hikaru qat_ae_init(struct qat_softc *sc)
    951  1.1   hikaru {
    952  1.1   hikaru 	int error;
    953  1.1   hikaru 	uint32_t mask, val = 0;
    954  1.1   hikaru 	u_char ae;
    955  1.1   hikaru 
    956  1.1   hikaru 	/* XXX adf_initSysMemInfo */
    957  1.1   hikaru 
    958  1.1   hikaru 	/* XXX Disable clock gating for some chip if debug mode */
    959  1.1   hikaru 
    960  1.1   hikaru 	for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
    961  1.1   hikaru 		struct qat_ae *qae = &sc->sc_ae[ae];
    962  1.1   hikaru 		if (!(mask & 1))
    963  1.1   hikaru 			continue;
    964  1.1   hikaru 
    965  1.1   hikaru 		qae->qae_ustore_size = USTORE_SIZE;
    966  1.1   hikaru 
    967  1.1   hikaru 		qae->qae_free_addr = 0;
    968  1.1   hikaru 		qae->qae_free_size = USTORE_SIZE;
    969  1.1   hikaru 		qae->qae_live_ctx_mask = AE_ALL_CTX;
    970  1.1   hikaru 		qae->qae_ustore_dram_addr = 0;
    971  1.1   hikaru 		qae->qae_reload_size = 0;
    972  1.1   hikaru 	}
    973  1.1   hikaru 
    974  1.1   hikaru 	/* XXX Enable attention interrupt */
    975  1.1   hikaru 
    976  1.1   hikaru 	error = qat_ae_clear_reset(sc);
    977  1.1   hikaru 	if (error)
    978  1.1   hikaru 		return error;
    979  1.1   hikaru 
    980  1.1   hikaru 	qat_ae_clear_xfer(sc);
    981  1.1   hikaru 
    982  1.1   hikaru 	if (!sc->sc_hw.qhw_fw_auth) {
    983  1.1   hikaru 		error = qat_ae_clear_gprs(sc);
    984  1.1   hikaru 		if (error)
    985  1.1   hikaru 			return error;
    986  1.1   hikaru 	}
    987  1.1   hikaru 
    988  1.1   hikaru 	/* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
    989  1.1   hikaru 	for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
    990  1.1   hikaru 		if (!(mask & 1))
    991  1.1   hikaru 			continue;
    992  1.1   hikaru 		qat_ae_read_4(sc, ae, SIGNATURE_ENABLE, &val);
    993  1.1   hikaru 		val |= 0x1;
    994  1.1   hikaru 		qat_ae_write_4(sc, ae, SIGNATURE_ENABLE, val);
    995  1.1   hikaru 	}
    996  1.1   hikaru 
    997  1.1   hikaru 	error = qat_ae_clear_reset(sc);
    998  1.1   hikaru 	if (error)
    999  1.1   hikaru 		return error;
   1000  1.1   hikaru 
   1001  1.1   hikaru 	/* XXX XXX XXX Clean MMP memory if mem scrub is supported */
   1002  1.1   hikaru 	/* halMem_ScrubMMPMemory */
   1003  1.1   hikaru 
   1004  1.1   hikaru 	return 0;
   1005  1.1   hikaru }
   1006  1.1   hikaru 
   1007  1.1   hikaru int
   1008  1.1   hikaru qat_ae_start(struct qat_softc *sc)
   1009  1.1   hikaru {
   1010  1.1   hikaru 	int error;
   1011  1.1   hikaru 
   1012  1.1   hikaru 	u_char ae;
   1013  1.1   hikaru 
   1014  1.1   hikaru 	for (ae = 0; ae < sc->sc_ae_num; ae++) {
   1015  1.1   hikaru 		if ((sc->sc_ae_mask & (1 << ae)) == 0)
   1016  1.1   hikaru 			continue;
   1017  1.1   hikaru 
   1018  1.1   hikaru 		error = qat_aefw_start(sc, ae, 0xff);
   1019  1.1   hikaru 		if (error)
   1020  1.1   hikaru 			return error;
   1021  1.1   hikaru 
   1022  1.1   hikaru 		aprint_verbose_dev(sc->sc_dev, "Started AE %d\n", ae);
   1023  1.1   hikaru 	}
   1024  1.1   hikaru 
   1025  1.1   hikaru 	return 0;
   1026  1.1   hikaru }
   1027  1.1   hikaru 
   1028  1.1   hikaru int
   1029  1.1   hikaru qat_ae_cluster_intr(void *arg)
   1030  1.1   hikaru {
   1031  1.1   hikaru 	/* XXX */
   1032  1.1   hikaru 	printf("qat_ae_cluster_intr\n");
   1033  1.1   hikaru 
   1034  1.1   hikaru 	return 1;
   1035  1.1   hikaru }
   1036  1.1   hikaru 
   1037  1.1   hikaru int
   1038  1.1   hikaru qat_ae_clear_reset(struct qat_softc *sc)
   1039  1.1   hikaru {
   1040  1.1   hikaru 	int error;
   1041  1.1   hikaru 	uint32_t times, reset, clock, reg, mask;
   1042  1.1   hikaru 	u_char ae;
   1043  1.1   hikaru 
   1044  1.1   hikaru 	reset = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET);
   1045  1.1   hikaru 	reset &= ~(__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK));
   1046  1.1   hikaru 	reset &= ~(__SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK));
   1047  1.1   hikaru 	times = TIMEOUT_AE_RESET;
   1048  1.1   hikaru 	do {
   1049  1.1   hikaru 		qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_RESET, reset);
   1050  1.1   hikaru 		if ((times--) == 0) {
   1051  1.1   hikaru 			aprint_error_dev(sc->sc_dev, "couldn't reset AEs\n");
   1052  1.1   hikaru 			return EBUSY;
   1053  1.1   hikaru 		}
   1054  1.1   hikaru 		reg = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET);
   1055  1.1   hikaru 	} while ((__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK) |
   1056  1.1   hikaru 	    __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK))
   1057  1.1   hikaru 	    & reg);
   1058  1.1   hikaru 
   1059  1.1   hikaru 	/* Enable clock for AE and QAT */
   1060  1.1   hikaru 	clock = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_CLK_EN);
   1061  1.1   hikaru 	clock |= __SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_CLK_EN_AE_MASK);
   1062  1.1   hikaru 	clock |= __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_CLK_EN_ACCEL_MASK);
   1063  1.1   hikaru 	qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_CLK_EN, clock);
   1064  1.1   hikaru 
   1065  1.1   hikaru 	error = qat_ae_check(sc);
   1066  1.1   hikaru 	if (error)
   1067  1.1   hikaru 		return error;
   1068  1.1   hikaru 
   1069  1.1   hikaru 	/*
   1070  1.1   hikaru 	 * Set undefined power-up/reset states to reasonable default values...
   1071  1.1   hikaru 	 * just to make sure we're starting from a known point
   1072  1.1   hikaru 	 */
   1073  1.1   hikaru 	for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
   1074  1.1   hikaru 		if (!(mask & 1))
   1075  1.1   hikaru 			continue;
   1076  1.1   hikaru 
   1077  1.1   hikaru 		/* init the ctx_enable */
   1078  1.1   hikaru 		qat_ae_write_4(sc, ae, CTX_ENABLES,
   1079  1.1   hikaru 		    CTX_ENABLES_INIT);
   1080  1.1   hikaru 
   1081  1.1   hikaru 		/* initialize the PCs */
   1082  1.1   hikaru 		qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
   1083  1.1   hikaru 		    CTX_STS_INDIRECT,
   1084  1.1   hikaru 		    UPC_MASK & CTX_STS_INDIRECT_UPC_INIT);
   1085  1.1   hikaru 
   1086  1.1   hikaru 		/* init the ctx_arb */
   1087  1.1   hikaru 		qat_ae_write_4(sc, ae, CTX_ARB_CNTL,
   1088  1.1   hikaru 		    CTX_ARB_CNTL_INIT);
   1089  1.1   hikaru 
   1090  1.1   hikaru 		/* enable cc */
   1091  1.1   hikaru 		qat_ae_write_4(sc, ae, CC_ENABLE,
   1092  1.1   hikaru 		    CC_ENABLE_INIT);
   1093  1.1   hikaru 		qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
   1094  1.1   hikaru 		    CTX_WAKEUP_EVENTS_INDIRECT,
   1095  1.1   hikaru 		    CTX_WAKEUP_EVENTS_INDIRECT_INIT);
   1096  1.1   hikaru 		qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
   1097  1.1   hikaru 		    CTX_SIG_EVENTS_INDIRECT,
   1098  1.1   hikaru 		    CTX_SIG_EVENTS_INDIRECT_INIT);
   1099  1.1   hikaru 	}
   1100  1.1   hikaru 
   1101  1.1   hikaru 	if ((sc->sc_ae_mask != 0) &&
   1102  1.1   hikaru 	    sc->sc_flags & QAT_FLAG_ESRAM_ENABLE_AUTO_INIT) {
   1103  1.1   hikaru 		/* XXX XXX XXX init eSram only when this is boot time */
   1104  1.1   hikaru 	}
   1105  1.1   hikaru 
   1106  1.1   hikaru 	if ((sc->sc_ae_mask != 0) &&
   1107  1.1   hikaru 	    sc->sc_flags & QAT_FLAG_SHRAM_WAIT_READY) {
   1108  1.1   hikaru 		/* XXX XXX XXX wait shram to complete initialization */
   1109  1.1   hikaru 	}
   1110  1.1   hikaru 
   1111  1.1   hikaru 	qat_ae_reset_timestamp(sc);
   1112  1.1   hikaru 
   1113  1.1   hikaru 	return 0;
   1114  1.1   hikaru }
   1115  1.1   hikaru 
   1116  1.1   hikaru int
   1117  1.1   hikaru qat_ae_check(struct qat_softc *sc)
   1118  1.1   hikaru {
   1119  1.1   hikaru 	int error, times, ae;
   1120  1.1   hikaru 	uint32_t cnt, pcnt, mask;
   1121  1.1   hikaru 
   1122  1.1   hikaru 	for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
   1123  1.1   hikaru 		if (!(mask & 1))
   1124  1.1   hikaru 			continue;
   1125  1.1   hikaru 
   1126  1.1   hikaru 		times = TIMEOUT_AE_CHECK;
   1127  1.1   hikaru 		error = qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt);
   1128  1.1   hikaru 		if (error) {
   1129  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
   1130  1.1   hikaru 			    "couldn't access AE %d CSR\n", ae);
   1131  1.1   hikaru 			return error;
   1132  1.1   hikaru 		}
   1133  1.1   hikaru 		pcnt = cnt & 0xffff;
   1134  1.1   hikaru 
   1135  1.1   hikaru 		while (1) {
   1136  1.1   hikaru 			error = qat_ae_read_4(sc, ae,
   1137  1.1   hikaru 			    PROFILE_COUNT, &cnt);
   1138  1.1   hikaru 			if (error) {
   1139  1.1   hikaru 				aprint_error_dev(sc->sc_dev,
   1140  1.1   hikaru 				    "couldn't access AE %d CSR\n", ae);
   1141  1.1   hikaru 				return error;
   1142  1.1   hikaru 			}
   1143  1.1   hikaru 			cnt &= 0xffff;
   1144  1.1   hikaru 			if (cnt == pcnt)
   1145  1.1   hikaru 				times--;
   1146  1.1   hikaru 			else
   1147  1.1   hikaru 				break;
   1148  1.1   hikaru 			if (times <= 0) {
   1149  1.1   hikaru 				aprint_error_dev(sc->sc_dev,
   1150  1.1   hikaru 				    "AE %d CSR is useless\n", ae);
   1151  1.1   hikaru 				return EFAULT;
   1152  1.1   hikaru 			}
   1153  1.1   hikaru 		}
   1154  1.1   hikaru 	}
   1155  1.1   hikaru 
   1156  1.1   hikaru 	return 0;
   1157  1.1   hikaru }
   1158  1.1   hikaru 
   1159  1.1   hikaru int
   1160  1.1   hikaru qat_ae_reset_timestamp(struct qat_softc *sc)
   1161  1.1   hikaru {
   1162  1.1   hikaru 	uint32_t misc, mask;
   1163  1.1   hikaru 	u_char ae;
   1164  1.1   hikaru 
   1165  1.1   hikaru 	/* stop the timestamp timers */
   1166  1.1   hikaru 	misc = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_MISC);
   1167  1.1   hikaru 	if (misc & CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN) {
   1168  1.1   hikaru 		qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC,
   1169  1.1   hikaru 		    misc & (~CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN));
   1170  1.1   hikaru 	}
   1171  1.1   hikaru 
   1172  1.1   hikaru 	for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
   1173  1.1   hikaru 		if (!(mask & 1))
   1174  1.1   hikaru 			continue;
   1175  1.1   hikaru 		qat_ae_write_4(sc, ae, TIMESTAMP_LOW, 0);
   1176  1.1   hikaru 		qat_ae_write_4(sc, ae, TIMESTAMP_HIGH, 0);
   1177  1.1   hikaru 	}
   1178  1.1   hikaru 
   1179  1.1   hikaru 	/* start timestamp timers */
   1180  1.1   hikaru 	qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC,
   1181  1.1   hikaru 	    misc | CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN);
   1182  1.1   hikaru 
   1183  1.1   hikaru 	return 0;
   1184  1.1   hikaru }
   1185  1.1   hikaru 
   1186  1.1   hikaru 
   1187  1.1   hikaru void
   1188  1.1   hikaru qat_ae_clear_xfer(struct qat_softc *sc)
   1189  1.1   hikaru {
   1190  1.1   hikaru 	u_int mask, reg;
   1191  1.1   hikaru 	u_char ae;
   1192  1.1   hikaru 
   1193  1.1   hikaru 	for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
   1194  1.1   hikaru 		if (!(mask & 1))
   1195  1.1   hikaru 			continue;
   1196  1.1   hikaru 
   1197  1.1   hikaru 		for (reg = 0; reg < MAX_GPR_REG; reg++) {
   1198  1.1   hikaru 			qat_aereg_abs_data_write(sc, ae, AEREG_SR_RD_ABS,
   1199  1.1   hikaru 			    reg, 0);
   1200  1.1   hikaru 			qat_aereg_abs_data_write(sc, ae, AEREG_DR_RD_ABS,
   1201  1.1   hikaru 			    reg, 0);
   1202  1.1   hikaru 		}
   1203  1.1   hikaru 	}
   1204  1.1   hikaru }
   1205  1.1   hikaru 
   1206  1.1   hikaru int
   1207  1.1   hikaru qat_ae_clear_gprs(struct qat_softc *sc)
   1208  1.1   hikaru {
   1209  1.1   hikaru 	uint32_t val;
   1210  1.1   hikaru 	uint32_t saved_ctx = 0;
   1211  1.1   hikaru 	int times = TIMEOUT_AE_CHECK, rv;
   1212  1.1   hikaru 	u_char ae;
   1213  1.1   hikaru 	u_int mask;
   1214  1.1   hikaru 
   1215  1.1   hikaru 	for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
   1216  1.1   hikaru 		if (!(mask & 1))
   1217  1.1   hikaru 			continue;
   1218  1.1   hikaru 
   1219  1.1   hikaru 		/* turn off share control store bit */
   1220  1.1   hikaru 		val = qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val);
   1221  1.1   hikaru 		val &= ~AE_MISC_CONTROL_SHARE_CS;
   1222  1.1   hikaru 		qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val);
   1223  1.1   hikaru 
   1224  1.1   hikaru 		/* turn off ucode parity */
   1225  1.1   hikaru 		/* make sure nn_mode is set to self */
   1226  1.1   hikaru 		qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
   1227  1.1   hikaru 		val &= CTX_ENABLES_IGNORE_W1C_MASK;
   1228  1.1   hikaru 		val |= CTX_ENABLES_NN_MODE;
   1229  1.1   hikaru 		val &= ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE;
   1230  1.1   hikaru 		qat_ae_write_4(sc, ae, CTX_ENABLES, val);
   1231  1.1   hikaru 
   1232  1.1   hikaru 		/* copy instructions to ustore */
   1233  1.1   hikaru 		qat_ae_ucode_write(sc, ae, 0, __arraycount(ae_clear_gprs_inst),
   1234  1.1   hikaru 		    ae_clear_gprs_inst);
   1235  1.1   hikaru 
   1236  1.1   hikaru 		/* set PC */
   1237  1.1   hikaru 		qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_STS_INDIRECT,
   1238  1.1   hikaru 		    UPC_MASK & CTX_STS_INDIRECT_UPC_INIT);
   1239  1.1   hikaru 
   1240  1.1   hikaru 		/* save current context */
   1241  1.1   hikaru 		qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &saved_ctx);
   1242  1.1   hikaru 		/* change the active context */
   1243  1.1   hikaru 		/* start the context from ctx 0 */
   1244  1.1   hikaru 		qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 0);
   1245  1.1   hikaru 
   1246  1.1   hikaru 		/* wakeup-event voluntary */
   1247  1.1   hikaru 		qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
   1248  1.1   hikaru 		    CTX_WAKEUP_EVENTS_INDIRECT,
   1249  1.1   hikaru 		    CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY);
   1250  1.1   hikaru 		/* clean signals */
   1251  1.1   hikaru 		qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
   1252  1.1   hikaru 		    CTX_SIG_EVENTS_INDIRECT, 0);
   1253  1.1   hikaru 		qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0);
   1254  1.1   hikaru 
   1255  1.1   hikaru 		qat_ae_enable_ctx(sc, ae, AE_ALL_CTX);
   1256  1.1   hikaru 	}
   1257  1.1   hikaru 
   1258  1.1   hikaru 	for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
   1259  1.1   hikaru 		if (!(mask & 1))
   1260  1.1   hikaru 			continue;
   1261  1.1   hikaru 		/* wait for AE to finish */
   1262  1.1   hikaru 		do {
   1263  1.1   hikaru 			rv = qat_ae_wait_num_cycles(sc, ae, AE_EXEC_CYCLE, 1);
   1264  1.1   hikaru 		} while (rv && times--);
   1265  1.1   hikaru 		if (times <= 0) {
   1266  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
   1267  1.1   hikaru 			    "qat_ae_clear_gprs timeout");
   1268  1.1   hikaru 			return ETIMEDOUT;
   1269  1.1   hikaru 		}
   1270  1.1   hikaru 		qat_ae_disable_ctx(sc, ae, AE_ALL_CTX);
   1271  1.1   hikaru 		/* change the active context */
   1272  1.1   hikaru 		qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
   1273  1.1   hikaru 		    saved_ctx & ACTIVE_CTX_STATUS_ACNO);
   1274  1.1   hikaru 		/* init the ctx_enable */
   1275  1.1   hikaru 		qat_ae_write_4(sc, ae, CTX_ENABLES, CTX_ENABLES_INIT);
   1276  1.1   hikaru 		/* initialize the PCs */
   1277  1.1   hikaru 		qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
   1278  1.1   hikaru 		    CTX_STS_INDIRECT, UPC_MASK & CTX_STS_INDIRECT_UPC_INIT);
   1279  1.1   hikaru 		/* init the ctx_arb */
   1280  1.1   hikaru 		qat_ae_write_4(sc, ae, CTX_ARB_CNTL, CTX_ARB_CNTL_INIT);
   1281  1.1   hikaru 		/* enable cc */
   1282  1.1   hikaru 		qat_ae_write_4(sc, ae, CC_ENABLE, CC_ENABLE_INIT);
   1283  1.1   hikaru 		qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
   1284  1.1   hikaru 		    CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_INIT);
   1285  1.1   hikaru 		qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_SIG_EVENTS_INDIRECT,
   1286  1.1   hikaru 		    CTX_SIG_EVENTS_INDIRECT_INIT);
   1287  1.1   hikaru 	}
   1288  1.1   hikaru 
   1289  1.1   hikaru 	return 0;
   1290  1.1   hikaru }
   1291  1.1   hikaru 
   1292  1.1   hikaru void
   1293  1.1   hikaru qat_ae_get_shared_ustore_ae(u_char ae, u_char *nae)
   1294  1.1   hikaru {
   1295  1.1   hikaru 	if (ae & 0x1)
   1296  1.1   hikaru 		*nae = ae - 1;
   1297  1.1   hikaru 	else
   1298  1.1   hikaru 		*nae = ae + 1;
   1299  1.1   hikaru }
   1300  1.1   hikaru 
   1301  1.1   hikaru u_int
   1302  1.1   hikaru qat_ae_ucode_parity64(uint64_t ucode)
   1303  1.1   hikaru {
   1304  1.1   hikaru 
   1305  1.1   hikaru 	ucode ^= ucode >> 1;
   1306  1.1   hikaru 	ucode ^= ucode >> 2;
   1307  1.1   hikaru 	ucode ^= ucode >> 4;
   1308  1.1   hikaru 	ucode ^= ucode >> 8;
   1309  1.1   hikaru 	ucode ^= ucode >> 16;
   1310  1.1   hikaru 	ucode ^= ucode >> 32;
   1311  1.1   hikaru 
   1312  1.1   hikaru 	return ((u_int)(ucode & 1));
   1313  1.1   hikaru }
   1314  1.1   hikaru 
   1315  1.1   hikaru uint64_t
   1316  1.1   hikaru qat_ae_ucode_set_ecc(uint64_t ucode)
   1317  1.1   hikaru {
   1318  1.1   hikaru 	static const uint64_t
   1319  1.1   hikaru 		bit0mask=0xff800007fffULL, bit1mask=0x1f801ff801fULL,
   1320  1.1   hikaru 		bit2mask=0xe387e0781e1ULL, bit3mask=0x7cb8e388e22ULL,
   1321  1.1   hikaru 		bit4mask=0xaf5b2c93244ULL, bit5mask=0xf56d5525488ULL,
   1322  1.1   hikaru 		bit6mask=0xdaf69a46910ULL;
   1323  1.1   hikaru 
   1324  1.1   hikaru 	/* clear the ecc bits */
   1325  1.1   hikaru 	ucode &= ~(0x7fULL << USTORE_ECC_BIT_0);
   1326  1.1   hikaru 
   1327  1.1   hikaru 	ucode |= (uint64_t)qat_ae_ucode_parity64(bit0mask & ucode) <<
   1328  1.1   hikaru 	    USTORE_ECC_BIT_0;
   1329  1.1   hikaru 	ucode |= (uint64_t)qat_ae_ucode_parity64(bit1mask & ucode) <<
   1330  1.1   hikaru 	    USTORE_ECC_BIT_1;
   1331  1.1   hikaru 	ucode |= (uint64_t)qat_ae_ucode_parity64(bit2mask & ucode) <<
   1332  1.1   hikaru 	    USTORE_ECC_BIT_2;
   1333  1.1   hikaru 	ucode |= (uint64_t)qat_ae_ucode_parity64(bit3mask & ucode) <<
   1334  1.1   hikaru 	    USTORE_ECC_BIT_3;
   1335  1.1   hikaru 	ucode |= (uint64_t)qat_ae_ucode_parity64(bit4mask & ucode) <<
   1336  1.1   hikaru 	    USTORE_ECC_BIT_4;
   1337  1.1   hikaru 	ucode |= (uint64_t)qat_ae_ucode_parity64(bit5mask & ucode) <<
   1338  1.1   hikaru 	    USTORE_ECC_BIT_5;
   1339  1.1   hikaru 	ucode |= (uint64_t)qat_ae_ucode_parity64(bit6mask & ucode) <<
   1340  1.1   hikaru 	    USTORE_ECC_BIT_6;
   1341  1.1   hikaru 
   1342  1.1   hikaru 	return (ucode);
   1343  1.1   hikaru }
   1344  1.1   hikaru 
   1345  1.1   hikaru int
   1346  1.1   hikaru qat_ae_ucode_write(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst,
   1347  1.1   hikaru 	const uint64_t *ucode)
   1348  1.1   hikaru {
   1349  1.1   hikaru 	uint64_t tmp;
   1350  1.1   hikaru 	uint32_t ustore_addr, ulo, uhi;
   1351  1.1   hikaru 	int i;
   1352  1.1   hikaru 
   1353  1.1   hikaru 	qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr);
   1354  1.1   hikaru 	uaddr |= USTORE_ADDRESS_ECS;
   1355  1.1   hikaru 
   1356  1.1   hikaru 	qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
   1357  1.1   hikaru 	for (i = 0; i < ninst; i++) {
   1358  1.1   hikaru 		tmp = qat_ae_ucode_set_ecc(ucode[i]);
   1359  1.1   hikaru 		ulo = (uint32_t)(tmp & 0xffffffff);
   1360  1.1   hikaru 		uhi = (uint32_t)(tmp >> 32);
   1361  1.1   hikaru 
   1362  1.1   hikaru 		qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo);
   1363  1.1   hikaru 		/* this will auto increment the address */
   1364  1.1   hikaru 		qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi);
   1365  1.1   hikaru 
   1366  1.1   hikaru 		QAT_YIELD();
   1367  1.1   hikaru 	}
   1368  1.1   hikaru 	qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr);
   1369  1.1   hikaru 
   1370  1.1   hikaru 	return 0;
   1371  1.1   hikaru }
   1372  1.1   hikaru 
   1373  1.1   hikaru int
   1374  1.1   hikaru qat_ae_ucode_read(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst,
   1375  1.1   hikaru     uint64_t *ucode)
   1376  1.1   hikaru {
   1377  1.1   hikaru 	uint32_t misc, ustore_addr, ulo, uhi;
   1378  1.1   hikaru 	u_int ii;
   1379  1.1   hikaru 	u_char nae;
   1380  1.1   hikaru 
   1381  1.1   hikaru 	if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED)
   1382  1.1   hikaru 		return EBUSY;
   1383  1.1   hikaru 
   1384  1.1   hikaru 	/* determine whether it neighbour AE runs in shared control store
   1385  1.1   hikaru 	 * status */
   1386  1.1   hikaru 	qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
   1387  1.1   hikaru 	if (misc & AE_MISC_CONTROL_SHARE_CS) {
   1388  1.1   hikaru 		qat_ae_get_shared_ustore_ae(ae, &nae);
   1389  1.1   hikaru 		if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae))
   1390  1.1   hikaru 			return EBUSY;
   1391  1.1   hikaru 	}
   1392  1.1   hikaru 
   1393  1.1   hikaru 	/* if reloadable, then get it all from dram-ustore */
   1394  1.1   hikaru 	if (__SHIFTOUT(misc, AE_MISC_CONTROL_CS_RELOAD))
   1395  1.1   hikaru 		panic("notyet"); /* XXX getReloadUwords */
   1396  1.1   hikaru 
   1397  1.1   hikaru 	/* disable SHARE_CS bit to workaround silicon bug */
   1398  1.1   hikaru 	qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc & 0xfffffffb);
   1399  1.1   hikaru 
   1400  1.1   hikaru 	KASSERT(uaddr + ninst <= USTORE_SIZE);
   1401  1.1   hikaru 
   1402  1.1   hikaru 	/* save ustore-addr csr */
   1403  1.1   hikaru 	qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr);
   1404  1.1   hikaru 
   1405  1.1   hikaru 	uaddr |= USTORE_ADDRESS_ECS;	/* enable ecs bit */
   1406  1.1   hikaru 	for (ii = 0; ii < ninst; ii++) {
   1407  1.1   hikaru 		qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
   1408  1.1   hikaru 
   1409  1.1   hikaru 		uaddr++;
   1410  1.1   hikaru 		qat_ae_read_4(sc, ae, USTORE_DATA_LOWER, &ulo);
   1411  1.1   hikaru 		qat_ae_read_4(sc, ae, USTORE_DATA_UPPER, &uhi);
   1412  1.1   hikaru 		ucode[ii] = uhi;
   1413  1.1   hikaru 		ucode[ii] = (ucode[ii] << 32) | ulo;
   1414  1.1   hikaru 	}
   1415  1.1   hikaru 
   1416  1.1   hikaru 	/* restore SHARE_CS bit to workaround silicon bug */
   1417  1.1   hikaru 	qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
   1418  1.1   hikaru 	qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr);
   1419  1.1   hikaru 
   1420  1.1   hikaru 	return 0;
   1421  1.1   hikaru }
   1422  1.1   hikaru 
   1423  1.1   hikaru u_int
   1424  1.1   hikaru qat_ae_concat_ucode(uint64_t *ucode, u_int ninst, u_int size, u_int addr,
   1425  1.1   hikaru     u_int *value)
   1426  1.1   hikaru {
   1427  1.1   hikaru 	const uint64_t *inst_arr;
   1428  1.1   hikaru 	u_int ninst0, curvalue;
   1429  1.1   hikaru 	int ii, vali, fixup, usize = 0;
   1430  1.1   hikaru 
   1431  1.1   hikaru 	if (size == 0)
   1432  1.1   hikaru 		return 0;
   1433  1.1   hikaru 
   1434  1.1   hikaru 	ninst0 = ninst;
   1435  1.1   hikaru 	vali = 0;
   1436  1.1   hikaru 	curvalue = value[vali++];
   1437  1.1   hikaru 
   1438  1.1   hikaru 	switch (size) {
   1439  1.1   hikaru 	case 0x1:
   1440  1.1   hikaru 		inst_arr = ae_inst_1b;
   1441  1.1   hikaru 		usize = __arraycount(ae_inst_1b);
   1442  1.1   hikaru 		break;
   1443  1.1   hikaru 	case 0x2:
   1444  1.1   hikaru 		inst_arr = ae_inst_2b;
   1445  1.1   hikaru 		usize = __arraycount(ae_inst_2b);
   1446  1.1   hikaru 		break;
   1447  1.1   hikaru 	case 0x3:
   1448  1.1   hikaru 		inst_arr = ae_inst_3b;
   1449  1.1   hikaru 		usize = __arraycount(ae_inst_3b);
   1450  1.1   hikaru 		break;
   1451  1.1   hikaru 	default:
   1452  1.1   hikaru 		inst_arr = ae_inst_4b;
   1453  1.1   hikaru 		usize = __arraycount(ae_inst_4b);
   1454  1.1   hikaru 		break;
   1455  1.1   hikaru 	}
   1456  1.1   hikaru 
   1457  1.1   hikaru 	fixup = ninst;
   1458  1.1   hikaru 	for (ii = 0; ii < usize; ii++)
   1459  1.1   hikaru 		ucode[ninst++] = inst_arr[ii];
   1460  1.1   hikaru 
   1461  1.1   hikaru 	INSERT_IMMED_GPRA_CONST(ucode[fixup], (addr));
   1462  1.1   hikaru 	fixup++;
   1463  1.1   hikaru 	INSERT_IMMED_GPRA_CONST(ucode[fixup], 0);
   1464  1.1   hikaru 	fixup++;
   1465  1.1   hikaru 	INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0));
   1466  1.1   hikaru 	fixup++;
   1467  1.1   hikaru 	INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16));
   1468  1.1   hikaru 	/* XXX fixup++ ? */
   1469  1.1   hikaru 
   1470  1.1   hikaru 	if (size <= 0x4)
   1471  1.1   hikaru 		return (ninst - ninst0);
   1472  1.1   hikaru 
   1473  1.1   hikaru 	size -= sizeof(u_int);
   1474  1.1   hikaru 	while (size >= sizeof(u_int)) {
   1475  1.1   hikaru 		curvalue = value[vali++];
   1476  1.1   hikaru 		fixup = ninst;
   1477  1.1   hikaru 		ucode[ninst++] = ae_inst_4b[0x2];
   1478  1.1   hikaru 		ucode[ninst++] = ae_inst_4b[0x3];
   1479  1.1   hikaru 		ucode[ninst++] = ae_inst_4b[0x8];
   1480  1.1   hikaru 		INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16));
   1481  1.1   hikaru 		fixup++;
   1482  1.1   hikaru 		INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0));
   1483  1.1   hikaru 		/* XXX fixup++ ? */
   1484  1.1   hikaru 
   1485  1.1   hikaru 		addr += sizeof(u_int);
   1486  1.1   hikaru 		size -= sizeof(u_int);
   1487  1.1   hikaru 	}
   1488  1.2  msaitoh 	/* call this function recursive when the left size less than 4 */
   1489  1.1   hikaru 	ninst +=
   1490  1.1   hikaru 	    qat_ae_concat_ucode(ucode, ninst, size, addr, value + vali);
   1491  1.1   hikaru 
   1492  1.1   hikaru 	return (ninst - ninst0);
   1493  1.1   hikaru }
   1494  1.1   hikaru 
   1495  1.1   hikaru int
   1496  1.1   hikaru qat_ae_exec_ucode(struct qat_softc *sc, u_char ae, u_char ctx,
   1497  1.1   hikaru     uint64_t *ucode, u_int ninst, int cond_code_off, u_int max_cycles,
   1498  1.1   hikaru     u_int *endpc)
   1499  1.1   hikaru {
   1500  1.1   hikaru 	int error = 0, share_cs = 0;
   1501  1.1   hikaru 	uint64_t savucode[MAX_EXEC_INST];
   1502  1.1   hikaru 	uint32_t indr_lm_addr_0, indr_lm_addr_1;
   1503  1.1   hikaru 	uint32_t indr_lm_addr_byte_0, indr_lm_addr_byte_1;
   1504  1.1   hikaru 	uint32_t indr_future_cnt_sig;
   1505  1.1   hikaru 	uint32_t indr_sig, active_sig;
   1506  1.1   hikaru 	uint32_t wakeup_ev, savpc, savcc, savctx, ctxarbctl;
   1507  1.1   hikaru 	uint32_t misc, nmisc, ctxen;
   1508  1.1   hikaru 	u_char nae;
   1509  1.1   hikaru 
   1510  1.1   hikaru 	KASSERT(ninst <= USTORE_SIZE);
   1511  1.1   hikaru 
   1512  1.1   hikaru 	if (qat_ae_is_active(sc, ae))
   1513  1.1   hikaru 		return EBUSY;
   1514  1.1   hikaru 
   1515  1.1   hikaru #if 0
   1516  1.1   hikaru 	printf("%s: ae %d ctx %d ninst %d code 0x%016llx 0x%016llx\n",
   1517  1.1   hikaru 	    __func__, ae, ctx, ninst, ucode[0], ucode[ninst-1]);
   1518  1.1   hikaru #endif
   1519  1.1   hikaru 
   1520  1.1   hikaru 	/* save current LM addr */
   1521  1.1   hikaru 	qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_0_INDIRECT, &indr_lm_addr_0);
   1522  1.1   hikaru 	qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_1_INDIRECT, &indr_lm_addr_1);
   1523  1.1   hikaru 	qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
   1524  1.1   hikaru 	    &indr_lm_addr_byte_0);
   1525  1.1   hikaru 	qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
   1526  1.1   hikaru 	    &indr_lm_addr_byte_1);
   1527  1.1   hikaru 
   1528  1.1   hikaru 	/* backup shared control store bit, and force AE to
   1529  1.1   hikaru 	   none-shared mode before executing ucode snippet */
   1530  1.1   hikaru 	qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
   1531  1.1   hikaru 	if (misc & AE_MISC_CONTROL_SHARE_CS) {
   1532  1.1   hikaru 		share_cs = 1;
   1533  1.1   hikaru 		qat_ae_get_shared_ustore_ae(ae, &nae);
   1534  1.1   hikaru 		if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae))
   1535  1.1   hikaru 			return EBUSY;
   1536  1.1   hikaru 	}
   1537  1.1   hikaru 	nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS;
   1538  1.1   hikaru 	qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc);
   1539  1.1   hikaru 
   1540  1.1   hikaru 	/* save current states: */
   1541  1.1   hikaru 	if (ninst <= MAX_EXEC_INST) {
   1542  1.1   hikaru 		error = qat_ae_ucode_read(sc, ae, 0, ninst, savucode);
   1543  1.1   hikaru 		if (error) {
   1544  1.1   hikaru 			qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
   1545  1.1   hikaru 			return error;
   1546  1.1   hikaru 		}
   1547  1.1   hikaru 	}
   1548  1.1   hikaru 
   1549  1.1   hikaru 	/* save wakeup-events */
   1550  1.1   hikaru 	qat_ae_ctx_indr_read(sc, ae, ctx, CTX_WAKEUP_EVENTS_INDIRECT,
   1551  1.1   hikaru 	    &wakeup_ev);
   1552  1.1   hikaru 	/* save PC */
   1553  1.1   hikaru 	qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, &savpc);
   1554  1.1   hikaru 	savpc &= UPC_MASK;
   1555  1.1   hikaru 
   1556  1.1   hikaru 	/* save ctx enables */
   1557  1.1   hikaru 	qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
   1558  1.1   hikaru 	ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
   1559  1.1   hikaru 	/* save conditional-code */
   1560  1.1   hikaru 	qat_ae_read_4(sc, ae, CC_ENABLE, &savcc);
   1561  1.1   hikaru 	/* save current context */
   1562  1.1   hikaru 	qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx);
   1563  1.1   hikaru 	qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl);
   1564  1.1   hikaru 
   1565  1.1   hikaru 	/* save indirect csrs */
   1566  1.1   hikaru 	qat_ae_ctx_indr_read(sc, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
   1567  1.1   hikaru 	    &indr_future_cnt_sig);
   1568  1.1   hikaru 	qat_ae_ctx_indr_read(sc, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &indr_sig);
   1569  1.1   hikaru 	qat_ae_read_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, &active_sig);
   1570  1.1   hikaru 
   1571  1.1   hikaru 	/* turn off ucode parity */
   1572  1.1   hikaru 	qat_ae_write_4(sc, ae, CTX_ENABLES,
   1573  1.1   hikaru 	    ctxen & ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE);
   1574  1.1   hikaru 
   1575  1.1   hikaru 	/* copy instructions to ustore */
   1576  1.1   hikaru 	qat_ae_ucode_write(sc, ae, 0, ninst, ucode);
   1577  1.1   hikaru 	/* set PC */
   1578  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, 0);
   1579  1.1   hikaru 	/* change the active context */
   1580  1.1   hikaru 	qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
   1581  1.1   hikaru 	    ctx & ACTIVE_CTX_STATUS_ACNO);
   1582  1.1   hikaru 
   1583  1.1   hikaru 	if (cond_code_off) {
   1584  1.1   hikaru 		/* disable conditional-code*/
   1585  1.1   hikaru 		qat_ae_write_4(sc, ae, CC_ENABLE, savcc & 0xffffdfff);
   1586  1.1   hikaru 	}
   1587  1.1   hikaru 
   1588  1.1   hikaru 	/* wakeup-event voluntary */
   1589  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, 1 << ctx,
   1590  1.1   hikaru 	    CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY);
   1591  1.1   hikaru 
   1592  1.1   hikaru 	/* clean signals */
   1593  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT, 0);
   1594  1.1   hikaru 	qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0);
   1595  1.1   hikaru 
   1596  1.1   hikaru 	/* enable context */
   1597  1.1   hikaru 	qat_ae_enable_ctx(sc, ae, 1 << ctx);
   1598  1.1   hikaru 
   1599  1.1   hikaru 	/* wait for it to finish */
   1600  1.1   hikaru 	if (qat_ae_wait_num_cycles(sc, ae, max_cycles, 1) != 0)
   1601  1.1   hikaru 		error = ETIMEDOUT;
   1602  1.1   hikaru 
   1603  1.1   hikaru 	/* see if we need to get the current PC */
   1604  1.1   hikaru 	if (endpc != NULL) {
   1605  1.1   hikaru 		uint32_t ctx_status;
   1606  1.1   hikaru 
   1607  1.1   hikaru 		qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT,
   1608  1.1   hikaru 		    &ctx_status);
   1609  1.1   hikaru 		*endpc = ctx_status & UPC_MASK;
   1610  1.1   hikaru 	}
   1611  1.1   hikaru #if 0
   1612  1.1   hikaru 	{
   1613  1.1   hikaru 		uint32_t ctx_status;
   1614  1.1   hikaru 
   1615  1.1   hikaru 		qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT,
   1616  1.1   hikaru 		    &ctx_status);
   1617  1.1   hikaru 		printf("%s: endpc 0x%08x\n", __func__,
   1618  1.1   hikaru 		    ctx_status & UPC_MASK);
   1619  1.1   hikaru 	}
   1620  1.1   hikaru #endif
   1621  1.1   hikaru 
   1622  1.1   hikaru 	/* retore to previous states: */
   1623  1.1   hikaru 	/* disable context */
   1624  1.1   hikaru 	qat_ae_disable_ctx(sc, ae, 1 << ctx);
   1625  1.1   hikaru 	if (ninst <= MAX_EXEC_INST) {
   1626  1.1   hikaru 		/* instructions */
   1627  1.1   hikaru 		qat_ae_ucode_write(sc, ae, 0, ninst, savucode);
   1628  1.1   hikaru 	}
   1629  1.1   hikaru 	/* wakeup-events */
   1630  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_WAKEUP_EVENTS_INDIRECT,
   1631  1.1   hikaru 	    wakeup_ev);
   1632  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, savpc);
   1633  1.1   hikaru 
   1634  1.1   hikaru 	/* only restore shared control store bit,
   1635  1.1   hikaru 	   other bit might be changed by AE code snippet */
   1636  1.1   hikaru 	qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
   1637  1.1   hikaru 	if (share_cs)
   1638  1.1   hikaru 		nmisc = misc | AE_MISC_CONTROL_SHARE_CS;
   1639  1.1   hikaru 	else
   1640  1.1   hikaru 		nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS;
   1641  1.1   hikaru 	qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc);
   1642  1.1   hikaru 	/* conditional-code */
   1643  1.1   hikaru 	qat_ae_write_4(sc, ae, CC_ENABLE, savcc);
   1644  1.1   hikaru 	/* change the active context */
   1645  1.1   hikaru 	qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
   1646  1.1   hikaru 	    savctx & ACTIVE_CTX_STATUS_ACNO);
   1647  1.1   hikaru 	/* restore the nxt ctx to run */
   1648  1.1   hikaru 	qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl);
   1649  1.1   hikaru 	/* restore current LM addr */
   1650  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_0_INDIRECT,
   1651  1.1   hikaru 	    indr_lm_addr_0);
   1652  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_1_INDIRECT,
   1653  1.1   hikaru 	    indr_lm_addr_1);
   1654  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
   1655  1.1   hikaru 	    indr_lm_addr_byte_0);
   1656  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
   1657  1.1   hikaru 	    indr_lm_addr_byte_1);
   1658  1.1   hikaru 
   1659  1.1   hikaru 	/* restore indirect csrs */
   1660  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, 1 << ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
   1661  1.1   hikaru 	    indr_future_cnt_sig);
   1662  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT,
   1663  1.1   hikaru 	    indr_sig);
   1664  1.1   hikaru 	qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, active_sig);
   1665  1.1   hikaru 
   1666  1.1   hikaru 	/* ctx-enables */
   1667  1.1   hikaru 	qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
   1668  1.1   hikaru 
   1669  1.1   hikaru 	return error;
   1670  1.1   hikaru }
   1671  1.1   hikaru 
   1672  1.1   hikaru int
   1673  1.1   hikaru qat_ae_exec_ucode_init_lm(struct qat_softc *sc, u_char ae, u_char ctx,
   1674  1.1   hikaru     int *first_exec, uint64_t *ucode, u_int ninst,
   1675  1.1   hikaru     u_int *gpr_a0, u_int *gpr_a1, u_int *gpr_a2, u_int *gpr_b0, u_int *gpr_b1)
   1676  1.1   hikaru {
   1677  1.1   hikaru 
   1678  1.1   hikaru 	if (*first_exec) {
   1679  1.1   hikaru 		qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0);
   1680  1.1   hikaru 		qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1);
   1681  1.1   hikaru 		qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2);
   1682  1.1   hikaru 		qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0);
   1683  1.1   hikaru 		qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1);
   1684  1.1   hikaru 		*first_exec = 0;
   1685  1.1   hikaru 	}
   1686  1.1   hikaru 
   1687  1.1   hikaru 	return qat_ae_exec_ucode(sc, ae, ctx, ucode, ninst, 1, ninst * 5, NULL);
   1688  1.1   hikaru }
   1689  1.1   hikaru 
   1690  1.1   hikaru int
   1691  1.1   hikaru qat_ae_restore_init_lm_gprs(struct qat_softc *sc, u_char ae, u_char ctx,
   1692  1.1   hikaru     u_int gpr_a0, u_int gpr_a1, u_int gpr_a2, u_int gpr_b0, u_int gpr_b1)
   1693  1.1   hikaru {
   1694  1.1   hikaru 	qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0);
   1695  1.1   hikaru 	qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1);
   1696  1.1   hikaru 	qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2);
   1697  1.1   hikaru 	qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0);
   1698  1.1   hikaru 	qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1);
   1699  1.1   hikaru 
   1700  1.1   hikaru 	return 0;
   1701  1.1   hikaru }
   1702  1.1   hikaru 
   1703  1.1   hikaru int
   1704  1.1   hikaru qat_ae_get_inst_num(int lmsize)
   1705  1.1   hikaru {
   1706  1.1   hikaru 	int ninst, left;
   1707  1.1   hikaru 
   1708  1.1   hikaru 	if (lmsize == 0)
   1709  1.1   hikaru 		return 0;
   1710  1.1   hikaru 
   1711  1.1   hikaru 	left = lmsize % sizeof(u_int);
   1712  1.1   hikaru 
   1713  1.1   hikaru 	if (left) {
   1714  1.1   hikaru 		ninst = __arraycount(ae_inst_1b) +
   1715  1.1   hikaru 		    qat_ae_get_inst_num(lmsize - left);
   1716  1.1   hikaru 	} else {
   1717  1.1   hikaru 		/* 3 instruction is needed for further code */
   1718  1.1   hikaru 		ninst = (lmsize - sizeof(u_int)) * 3 / 4 +
   1719  1.1   hikaru 		    __arraycount(ae_inst_4b);
   1720  1.1   hikaru 	}
   1721  1.1   hikaru 
   1722  1.1   hikaru 	return (ninst);
   1723  1.1   hikaru }
   1724  1.1   hikaru 
   1725  1.1   hikaru int
   1726  1.1   hikaru qat_ae_batch_put_lm(struct qat_softc *sc, u_char ae,
   1727  1.1   hikaru     struct qat_ae_batch_init_list *qabi_list, size_t nqabi)
   1728  1.1   hikaru {
   1729  1.1   hikaru 	struct qat_ae_batch_init *qabi;
   1730  1.1   hikaru 	size_t alloc_ninst, ninst;
   1731  1.1   hikaru 	uint64_t *ucode;
   1732  1.1   hikaru 	u_int gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1;
   1733  1.1   hikaru 	int insnsz, error = 0, execed = 0, first_exec = 1;
   1734  1.1   hikaru 
   1735  1.1   hikaru 	if (SIMPLEQ_FIRST(qabi_list) == NULL)
   1736  1.1   hikaru 		return 0;
   1737  1.1   hikaru 
   1738  1.1   hikaru 	alloc_ninst = uimin(USTORE_SIZE, nqabi);
   1739  1.1   hikaru 	ucode = qat_alloc_mem(sizeof(uint64_t) * alloc_ninst);
   1740  1.1   hikaru 
   1741  1.1   hikaru 	ninst = 0;
   1742  1.1   hikaru 	SIMPLEQ_FOREACH(qabi, qabi_list, qabi_next) {
   1743  1.1   hikaru 		insnsz = qat_ae_get_inst_num(qabi->qabi_size);
   1744  1.1   hikaru 		if (insnsz + ninst > alloc_ninst) {
   1745  1.1   hikaru 			aprint_debug_dev(sc->sc_dev,
   1746  1.1   hikaru 			    "code page is full, call exection unit\n");
   1747  1.1   hikaru 			/* add ctx_arb[kill] */
   1748  1.1   hikaru 			ucode[ninst++] = 0x0E000010000ull;
   1749  1.1   hikaru 			execed = 1;
   1750  1.1   hikaru 
   1751  1.1   hikaru 			error = qat_ae_exec_ucode_init_lm(sc, ae, 0,
   1752  1.1   hikaru 			    &first_exec, ucode, ninst,
   1753  1.1   hikaru 			    &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1);
   1754  1.1   hikaru 			if (error) {
   1755  1.1   hikaru 				qat_ae_restore_init_lm_gprs(sc, ae, 0,
   1756  1.1   hikaru 				    gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1);
   1757  1.1   hikaru 				qat_free_mem(ucode);
   1758  1.1   hikaru 				return error;
   1759  1.1   hikaru 			}
   1760  1.1   hikaru 			/* run microExec to execute the microcode */
   1761  1.1   hikaru 			ninst = 0;
   1762  1.1   hikaru 		}
   1763  1.1   hikaru 		ninst += qat_ae_concat_ucode(ucode, ninst,
   1764  1.1   hikaru 		    qabi->qabi_size, qabi->qabi_addr, qabi->qabi_value);
   1765  1.1   hikaru 	}
   1766  1.1   hikaru 
   1767  1.1   hikaru 	if (ninst > 0) {
   1768  1.1   hikaru 		ucode[ninst++] = 0x0E000010000ull;
   1769  1.1   hikaru 		execed = 1;
   1770  1.1   hikaru 
   1771  1.1   hikaru 		error = qat_ae_exec_ucode_init_lm(sc, ae, 0,
   1772  1.1   hikaru 		    &first_exec, ucode, ninst,
   1773  1.1   hikaru 		    &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1);
   1774  1.1   hikaru 	}
   1775  1.1   hikaru 	if (execed) {
   1776  1.1   hikaru 		qat_ae_restore_init_lm_gprs(sc, ae, 0,
   1777  1.1   hikaru 		    gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1);
   1778  1.1   hikaru 	}
   1779  1.1   hikaru 
   1780  1.1   hikaru 	qat_free_mem(ucode);
   1781  1.1   hikaru 
   1782  1.1   hikaru 	return error;
   1783  1.1   hikaru }
   1784  1.1   hikaru 
   1785  1.1   hikaru int
   1786  1.1   hikaru qat_ae_write_pc(struct qat_softc *sc, u_char ae, u_int ctx_mask, u_int upc)
   1787  1.1   hikaru {
   1788  1.1   hikaru 
   1789  1.1   hikaru 	if (qat_ae_is_active(sc, ae))
   1790  1.1   hikaru 		return EBUSY;
   1791  1.1   hikaru 
   1792  1.1   hikaru 	qat_ae_ctx_indr_write(sc, ae, ctx_mask, CTX_STS_INDIRECT,
   1793  1.1   hikaru 	    UPC_MASK & upc);
   1794  1.1   hikaru 	return 0;
   1795  1.1   hikaru }
   1796  1.1   hikaru 
   1797  1.1   hikaru static inline u_int
   1798  1.1   hikaru qat_aefw_csum_calc(u_int reg, int ch)
   1799  1.1   hikaru {
   1800  1.1   hikaru 	int i;
   1801  1.1   hikaru 	u_int topbit = CRC_BITMASK(CRC_WIDTH - 1);
   1802  1.1   hikaru 	u_int inbyte = (u_int)((reg >> 0x18) ^ ch);
   1803  1.1   hikaru 
   1804  1.1   hikaru 	reg ^= inbyte << (CRC_WIDTH - 0x8);
   1805  1.1   hikaru 	for (i = 0; i < 0x8; i++) {
   1806  1.1   hikaru 		if (reg & topbit)
   1807  1.1   hikaru 			reg = (reg << 1) ^ CRC_POLY;
   1808  1.1   hikaru 		else
   1809  1.1   hikaru 			reg <<= 1;
   1810  1.1   hikaru 	}
   1811  1.1   hikaru 
   1812  1.1   hikaru 	return (reg & CRC_WIDTHMASK(CRC_WIDTH));
   1813  1.1   hikaru }
   1814  1.1   hikaru 
   1815  1.1   hikaru u_int
   1816  1.1   hikaru qat_aefw_csum(char *buf, int size)
   1817  1.1   hikaru {
   1818  1.1   hikaru 	u_int csum = 0;
   1819  1.1   hikaru 
   1820  1.1   hikaru 	while (size--) {
   1821  1.1   hikaru 		csum = qat_aefw_csum_calc(csum, *buf++);
   1822  1.1   hikaru 	}
   1823  1.1   hikaru 
   1824  1.1   hikaru 	return csum;
   1825  1.1   hikaru }
   1826  1.1   hikaru 
   1827  1.1   hikaru const char *
   1828  1.1   hikaru qat_aefw_uof_string(struct qat_softc *sc, size_t offset)
   1829  1.1   hikaru {
   1830  1.1   hikaru 	if (offset >= sc->sc_aefw_uof.qafu_str_tab_size)
   1831  1.1   hikaru 		return NULL;
   1832  1.1   hikaru 	if (sc->sc_aefw_uof.qafu_str_tab == NULL)
   1833  1.1   hikaru 		return NULL;
   1834  1.1   hikaru 
   1835  1.1   hikaru 	return (const char *)((uintptr_t)sc->sc_aefw_uof.qafu_str_tab + offset);
   1836  1.1   hikaru }
   1837  1.1   hikaru 
   1838  1.1   hikaru struct uof_chunk_hdr *
   1839  1.1   hikaru qat_aefw_uof_find_chunk(struct qat_softc *sc,
   1840  1.1   hikaru 	const char *id, struct uof_chunk_hdr *cur)
   1841  1.1   hikaru {
   1842  1.1   hikaru 	struct uof_obj_hdr *uoh = sc->sc_aefw_uof.qafu_obj_hdr;
   1843  1.1   hikaru 	struct uof_chunk_hdr *uch;
   1844  1.1   hikaru 	int i;
   1845  1.1   hikaru 
   1846  1.1   hikaru 	uch = (struct uof_chunk_hdr *)(uoh + 1);
   1847  1.1   hikaru 	for (i = 0; i < uoh->uoh_num_chunks; i++, uch++) {
   1848  1.1   hikaru 		if (uch->uch_offset + uch->uch_size > sc->sc_aefw_uof.qafu_size)
   1849  1.1   hikaru 			return NULL;
   1850  1.1   hikaru 
   1851  1.1   hikaru 		if (cur < uch && !strncmp(uch->uch_id, id, UOF_OBJ_ID_LEN))
   1852  1.1   hikaru 			return uch;
   1853  1.1   hikaru 	}
   1854  1.1   hikaru 
   1855  1.1   hikaru 	return NULL;
   1856  1.1   hikaru }
   1857  1.1   hikaru 
   1858  1.1   hikaru int
   1859  1.1   hikaru qat_aefw_load_mof(struct qat_softc *sc)
   1860  1.1   hikaru {
   1861  1.1   hikaru 	int error = 0;
   1862  1.1   hikaru 	firmware_handle_t fh = NULL;
   1863  1.1   hikaru 	off_t fwsize;
   1864  1.1   hikaru 
   1865  1.1   hikaru 	/* load MOF firmware */
   1866  1.1   hikaru 	error = firmware_open("qat", sc->sc_hw.qhw_mof_fwname, &fh);
   1867  1.1   hikaru 	if (error) {
   1868  1.1   hikaru 		aprint_error_dev(sc->sc_dev, "couldn't load mof firmware %s\n",
   1869  1.1   hikaru 		    sc->sc_hw.qhw_mof_fwname);
   1870  1.1   hikaru 		goto fail;
   1871  1.1   hikaru 	}
   1872  1.1   hikaru 
   1873  1.1   hikaru 	fwsize = firmware_get_size(fh);
   1874  1.1   hikaru 	if (fwsize == 0 || fwsize > SIZE_MAX) {
   1875  1.1   hikaru 		error = EINVAL;
   1876  1.1   hikaru 		goto fail;
   1877  1.1   hikaru 	}
   1878  1.1   hikaru 	sc->sc_fw_mof_size = fwsize;
   1879  1.1   hikaru 	sc->sc_fw_mof = firmware_malloc(sc->sc_fw_mof_size);
   1880  1.1   hikaru 
   1881  1.1   hikaru 	error = firmware_read(fh, 0, sc->sc_fw_mof, sc->sc_fw_mof_size);
   1882  1.1   hikaru 	if (error)
   1883  1.1   hikaru 		goto fail;
   1884  1.1   hikaru 
   1885  1.1   hikaru out:
   1886  1.1   hikaru 	if (fh != NULL)
   1887  1.1   hikaru 		firmware_close(fh);
   1888  1.1   hikaru 	return error;
   1889  1.1   hikaru fail:
   1890  1.1   hikaru 	if (sc->sc_fw_mof != NULL) {
   1891  1.1   hikaru 		firmware_free(sc->sc_fw_mof, sc->sc_fw_mof_size);
   1892  1.1   hikaru 		sc->sc_fw_mof = NULL;
   1893  1.1   hikaru 	}
   1894  1.1   hikaru 	goto out;
   1895  1.1   hikaru }
   1896  1.1   hikaru 
   1897  1.1   hikaru int
   1898  1.1   hikaru qat_aefw_load_mmp(struct qat_softc *sc)
   1899  1.1   hikaru {
   1900  1.1   hikaru 	int error = 0;
   1901  1.1   hikaru 	firmware_handle_t fh = NULL;
   1902  1.1   hikaru 	off_t fwsize;
   1903  1.1   hikaru 
   1904  1.1   hikaru 	error = firmware_open("qat", sc->sc_hw.qhw_mmp_fwname, &fh);
   1905  1.1   hikaru 	if (error) {
   1906  1.1   hikaru 		aprint_error_dev(sc->sc_dev, "couldn't load mmp firmware %s\n",
   1907  1.1   hikaru 		    sc->sc_hw.qhw_mmp_fwname);
   1908  1.1   hikaru 		goto fail;
   1909  1.1   hikaru 	}
   1910  1.1   hikaru 
   1911  1.1   hikaru 	fwsize = firmware_get_size(fh);
   1912  1.1   hikaru 	if (fwsize == 0 || fwsize > SIZE_MAX) {
   1913  1.1   hikaru 		error = EINVAL;
   1914  1.1   hikaru 		goto fail;
   1915  1.1   hikaru 	}
   1916  1.1   hikaru 	sc->sc_fw_mmp_size = fwsize;
   1917  1.1   hikaru 	sc->sc_fw_mmp = firmware_malloc(sc->sc_fw_mmp_size);
   1918  1.1   hikaru 
   1919  1.1   hikaru 	error = firmware_read(fh, 0, sc->sc_fw_mmp, sc->sc_fw_mmp_size);
   1920  1.1   hikaru 	if (error)
   1921  1.1   hikaru 		goto fail;
   1922  1.1   hikaru 
   1923  1.1   hikaru out:
   1924  1.1   hikaru 	if (fh != NULL)
   1925  1.1   hikaru 		firmware_close(fh);
   1926  1.1   hikaru 	return error;
   1927  1.1   hikaru fail:
   1928  1.1   hikaru 	if (sc->sc_fw_mmp != NULL) {
   1929  1.1   hikaru 		firmware_free(sc->sc_fw_mmp, sc->sc_fw_mmp_size);
   1930  1.1   hikaru 		sc->sc_fw_mmp = NULL;
   1931  1.1   hikaru 	}
   1932  1.1   hikaru 	goto out;
   1933  1.1   hikaru }
   1934  1.1   hikaru 
   1935  1.1   hikaru int
   1936  1.1   hikaru qat_aefw_mof_find_uof0(struct qat_softc *sc,
   1937  1.1   hikaru 	struct mof_uof_hdr *muh, struct mof_uof_chunk_hdr *head,
   1938  1.1   hikaru 	u_int nchunk, size_t size, const char *id,
   1939  1.1   hikaru 	size_t *fwsize, void **fwptr)
   1940  1.1   hikaru {
   1941  1.1   hikaru 	int i;
   1942  1.1   hikaru 	char *uof_name;
   1943  1.1   hikaru 
   1944  1.1   hikaru 	for (i = 0; i < nchunk; i++) {
   1945  1.1   hikaru 		struct mof_uof_chunk_hdr *much = &head[i];
   1946  1.1   hikaru 
   1947  1.1   hikaru 		if (strncmp(much->much_id, id, MOF_OBJ_ID_LEN))
   1948  1.1   hikaru 			return EINVAL;
   1949  1.1   hikaru 
   1950  1.1   hikaru 		if (much->much_offset + much->much_size > size)
   1951  1.1   hikaru 			return EINVAL;
   1952  1.1   hikaru 
   1953  1.1   hikaru 		if (sc->sc_mof.qmf_sym_size <= much->much_name)
   1954  1.1   hikaru 			return EINVAL;
   1955  1.1   hikaru 
   1956  1.1   hikaru 		uof_name = (char *)((uintptr_t)sc->sc_mof.qmf_sym +
   1957  1.1   hikaru 		    much->much_name);
   1958  1.1   hikaru 
   1959  1.1   hikaru 		if (!strcmp(uof_name, sc->sc_fw_uof_name)) {
   1960  1.1   hikaru 			*fwptr = (void *)((uintptr_t)muh +
   1961  1.1   hikaru 			    (uintptr_t)much->much_offset);
   1962  1.1   hikaru 			*fwsize = (size_t)much->much_size;
   1963  1.1   hikaru 			aprint_verbose_dev(sc->sc_dev,
   1964  1.1   hikaru 			    "%s obj %s at %p size 0x%lx\n",
   1965  1.1   hikaru 			    id, uof_name, *fwptr, *fwsize);
   1966  1.1   hikaru 			return 0;
   1967  1.1   hikaru 		}
   1968  1.1   hikaru 	}
   1969  1.1   hikaru 
   1970  1.1   hikaru 	return ENOENT;
   1971  1.1   hikaru }
   1972  1.1   hikaru 
   1973  1.1   hikaru int
   1974  1.1   hikaru qat_aefw_mof_find_uof(struct qat_softc *sc)
   1975  1.1   hikaru {
   1976  1.1   hikaru 	struct mof_uof_hdr *uof_hdr, *suof_hdr;
   1977  1.1   hikaru 	u_int nuof_chunks = 0, nsuof_chunks = 0;
   1978  1.1   hikaru 	int error;
   1979  1.1   hikaru 
   1980  1.1   hikaru 	uof_hdr = sc->sc_mof.qmf_uof_objs;
   1981  1.1   hikaru 	suof_hdr = sc->sc_mof.qmf_suof_objs;
   1982  1.1   hikaru 
   1983  1.1   hikaru 	if (uof_hdr != NULL) {
   1984  1.1   hikaru 		if (uof_hdr->muh_max_chunks < uof_hdr->muh_num_chunks) {
   1985  1.1   hikaru 			return EINVAL;
   1986  1.1   hikaru 		}
   1987  1.1   hikaru 		nuof_chunks = uof_hdr->muh_num_chunks;
   1988  1.1   hikaru 	}
   1989  1.1   hikaru 	if (suof_hdr != NULL) {
   1990  1.1   hikaru 		if (suof_hdr->muh_max_chunks < suof_hdr->muh_num_chunks)
   1991  1.1   hikaru 			return EINVAL;
   1992  1.1   hikaru 		nsuof_chunks = suof_hdr->muh_num_chunks;
   1993  1.1   hikaru 	}
   1994  1.1   hikaru 
   1995  1.1   hikaru 	if (nuof_chunks + nsuof_chunks == 0)
   1996  1.1   hikaru 		return EINVAL;
   1997  1.1   hikaru 
   1998  1.1   hikaru 	if (uof_hdr != NULL) {
   1999  1.1   hikaru 		error = qat_aefw_mof_find_uof0(sc, uof_hdr,
   2000  1.1   hikaru 		    (struct mof_uof_chunk_hdr *)(uof_hdr + 1), nuof_chunks,
   2001  1.1   hikaru 		    sc->sc_mof.qmf_uof_objs_size, UOF_IMAG,
   2002  1.1   hikaru 		    &sc->sc_fw_uof_size, &sc->sc_fw_uof);
   2003  1.1   hikaru 		if (error && error != ENOENT)
   2004  1.1   hikaru 			return error;
   2005  1.1   hikaru 	}
   2006  1.1   hikaru 
   2007  1.1   hikaru 	if (suof_hdr != NULL) {
   2008  1.1   hikaru 		error = qat_aefw_mof_find_uof0(sc, suof_hdr,
   2009  1.1   hikaru 		    (struct mof_uof_chunk_hdr *)(suof_hdr + 1), nsuof_chunks,
   2010  1.1   hikaru 		    sc->sc_mof.qmf_suof_objs_size, SUOF_IMAG,
   2011  1.1   hikaru 		    &sc->sc_fw_suof_size, &sc->sc_fw_suof);
   2012  1.1   hikaru 		if (error && error != ENOENT)
   2013  1.1   hikaru 			return error;
   2014  1.1   hikaru 	}
   2015  1.1   hikaru 
   2016  1.1   hikaru 	if (sc->sc_fw_uof == NULL && sc->sc_fw_suof == NULL)
   2017  1.1   hikaru 		return ENOENT;
   2018  1.1   hikaru 
   2019  1.1   hikaru 	return 0;
   2020  1.1   hikaru }
   2021  1.1   hikaru 
   2022  1.1   hikaru int
   2023  1.1   hikaru qat_aefw_mof_parse(struct qat_softc *sc)
   2024  1.1   hikaru {
   2025  1.1   hikaru 	struct mof_file_hdr *mfh;
   2026  1.1   hikaru 	struct mof_file_chunk_hdr *mfch;
   2027  1.1   hikaru 	size_t size;
   2028  1.1   hikaru 	u_int csum;
   2029  1.1   hikaru 	int error, i;
   2030  1.1   hikaru 
   2031  1.1   hikaru 	size = sc->sc_fw_mof_size;
   2032  1.1   hikaru 
   2033  1.1   hikaru 	if (size < sizeof(struct mof_file_hdr))
   2034  1.1   hikaru 		return EINVAL;
   2035  1.1   hikaru 	size -= sizeof(struct mof_file_hdr);
   2036  1.1   hikaru 
   2037  1.1   hikaru 	mfh = sc->sc_fw_mof;
   2038  1.1   hikaru 
   2039  1.1   hikaru 	if (mfh->mfh_fid != MOF_FID)
   2040  1.1   hikaru 		return EINVAL;
   2041  1.1   hikaru 
   2042  1.1   hikaru 	csum = qat_aefw_csum((char *)((uintptr_t)sc->sc_fw_mof +
   2043  1.1   hikaru 	    offsetof(struct mof_file_hdr, mfh_min_ver)),
   2044  1.1   hikaru 	    sc->sc_fw_mof_size -
   2045  1.1   hikaru 	    offsetof(struct mof_file_hdr, mfh_min_ver));
   2046  1.1   hikaru 	if (mfh->mfh_csum != csum)
   2047  1.1   hikaru 		return EINVAL;
   2048  1.1   hikaru 
   2049  1.1   hikaru 	if (mfh->mfh_min_ver != MOF_MIN_VER ||
   2050  1.1   hikaru 	    mfh->mfh_maj_ver != MOF_MAJ_VER)
   2051  1.1   hikaru 		return EINVAL;
   2052  1.1   hikaru 
   2053  1.1   hikaru 	if (mfh->mfh_max_chunks < mfh->mfh_num_chunks)
   2054  1.1   hikaru 		return EINVAL;
   2055  1.1   hikaru 
   2056  1.1   hikaru 	if (size < sizeof(struct mof_file_chunk_hdr) * mfh->mfh_num_chunks)
   2057  1.1   hikaru 		return EINVAL;
   2058  1.1   hikaru 	mfch = (struct mof_file_chunk_hdr *)(mfh + 1);
   2059  1.1   hikaru 
   2060  1.1   hikaru 	for (i = 0; i < mfh->mfh_num_chunks; i++, mfch++) {
   2061  1.1   hikaru 		if (mfch->mfch_offset + mfch->mfch_size > sc->sc_fw_mof_size)
   2062  1.1   hikaru 			return EINVAL;
   2063  1.1   hikaru 
   2064  1.1   hikaru 		if (!strncmp(mfch->mfch_id, SYM_OBJS, MOF_OBJ_ID_LEN)) {
   2065  1.1   hikaru 			if (sc->sc_mof.qmf_sym != NULL)
   2066  1.1   hikaru 				return EINVAL;
   2067  1.1   hikaru 
   2068  1.1   hikaru 			sc->sc_mof.qmf_sym =
   2069  1.1   hikaru 			    (void *)((uintptr_t)sc->sc_fw_mof +
   2070  1.1   hikaru 			    (uintptr_t)mfch->mfch_offset + sizeof(u_int));
   2071  1.1   hikaru 			sc->sc_mof.qmf_sym_size =
   2072  1.1   hikaru 			    *(u_int *)((uintptr_t)sc->sc_fw_mof +
   2073  1.1   hikaru 			    (uintptr_t)mfch->mfch_offset);
   2074  1.1   hikaru 
   2075  1.1   hikaru 			if (sc->sc_mof.qmf_sym_size % sizeof(u_int) != 0)
   2076  1.1   hikaru 				return EINVAL;
   2077  1.1   hikaru 			if (mfch->mfch_size != sc->sc_mof.qmf_sym_size +
   2078  1.1   hikaru 			    sizeof(u_int) || mfch->mfch_size == 0)
   2079  1.1   hikaru 				return EINVAL;
   2080  1.1   hikaru 			if (*(char *)((uintptr_t)sc->sc_mof.qmf_sym +
   2081  1.1   hikaru 			    sc->sc_mof.qmf_sym_size - 1) != '\0')
   2082  1.1   hikaru 				return EINVAL;
   2083  1.1   hikaru 
   2084  1.1   hikaru 		} else if (!strncmp(mfch->mfch_id, UOF_OBJS, MOF_OBJ_ID_LEN)) {
   2085  1.1   hikaru 			if (sc->sc_mof.qmf_uof_objs != NULL)
   2086  1.1   hikaru 				return EINVAL;
   2087  1.1   hikaru 
   2088  1.1   hikaru 			sc->sc_mof.qmf_uof_objs =
   2089  1.1   hikaru 			    (void *)((uintptr_t)sc->sc_fw_mof +
   2090  1.1   hikaru 			    (uintptr_t)mfch->mfch_offset);
   2091  1.1   hikaru 			sc->sc_mof.qmf_uof_objs_size = mfch->mfch_size;
   2092  1.1   hikaru 
   2093  1.1   hikaru 		} else if (!strncmp(mfch->mfch_id, SUOF_OBJS, MOF_OBJ_ID_LEN)) {
   2094  1.1   hikaru 			if (sc->sc_mof.qmf_suof_objs != NULL)
   2095  1.1   hikaru 				return EINVAL;
   2096  1.1   hikaru 
   2097  1.1   hikaru 			sc->sc_mof.qmf_suof_objs =
   2098  1.1   hikaru 			    (void *)((uintptr_t)sc->sc_fw_mof +
   2099  1.1   hikaru 			    (uintptr_t)mfch->mfch_offset);
   2100  1.1   hikaru 			sc->sc_mof.qmf_suof_objs_size = mfch->mfch_size;
   2101  1.1   hikaru 		}
   2102  1.1   hikaru 	}
   2103  1.1   hikaru 
   2104  1.1   hikaru 	if (sc->sc_mof.qmf_sym == NULL ||
   2105  1.1   hikaru 	    (sc->sc_mof.qmf_uof_objs == NULL &&
   2106  1.1   hikaru 	    sc->sc_mof.qmf_suof_objs == NULL))
   2107  1.1   hikaru 		return EINVAL;
   2108  1.1   hikaru 
   2109  1.1   hikaru 	error = qat_aefw_mof_find_uof(sc);
   2110  1.1   hikaru 	if (error)
   2111  1.1   hikaru 		return error;
   2112  1.1   hikaru 	return 0;
   2113  1.1   hikaru }
   2114  1.1   hikaru 
   2115  1.1   hikaru int
   2116  1.1   hikaru qat_aefw_uof_parse_image(struct qat_softc *sc,
   2117  1.1   hikaru 	struct qat_uof_image *qui, struct uof_chunk_hdr *uch)
   2118  1.1   hikaru {
   2119  1.1   hikaru 	struct uof_image *image;
   2120  1.1   hikaru 	struct uof_code_page *page;
   2121  1.1   hikaru 	uintptr_t base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr;
   2122  1.1   hikaru 	size_t lim = uch->uch_offset + uch->uch_size, size;
   2123  1.1   hikaru 	int i, p;
   2124  1.1   hikaru 
   2125  1.1   hikaru 	size = uch->uch_size;
   2126  1.1   hikaru 	if (size < sizeof(struct uof_image))
   2127  1.1   hikaru 		return EINVAL;
   2128  1.1   hikaru 	size -= sizeof(struct uof_image);
   2129  1.1   hikaru 
   2130  1.1   hikaru 	qui->qui_image = image =
   2131  1.1   hikaru 	    (struct uof_image *)(base + uch->uch_offset);
   2132  1.1   hikaru 
   2133  1.1   hikaru 	aprint_verbose_dev(sc->sc_dev,
   2134  1.1   hikaru 	    "uof_image name %s\n",
   2135  1.1   hikaru 	    qat_aefw_uof_string(sc, image->ui_name));
   2136  1.1   hikaru 	aprint_verbose_dev(sc->sc_dev,
   2137  1.1   hikaru 	    "uof_image ae_assign 0x%08x ctx_assign 0x%08x cpu_type 0x%08x\n",
   2138  1.1   hikaru 	    image->ui_ae_assigned, image->ui_ctx_assigned, image->ui_cpu_type);
   2139  1.1   hikaru 	aprint_verbose_dev(sc->sc_dev,
   2140  1.1   hikaru 	    "uof_image max_ver 0x%08x min_ver 0x%08x ae_mode 0x%08x\n",
   2141  1.1   hikaru 	    image->ui_max_ver, image->ui_min_ver, image->ui_ae_mode);
   2142  1.1   hikaru 	aprint_verbose_dev(sc->sc_dev,
   2143  1.1   hikaru 	    "uof_image pages 0x%08x page regions 0x%08x\n",
   2144  1.1   hikaru 	    image->ui_num_pages, image->ui_num_page_regions);
   2145  1.1   hikaru 
   2146  1.1   hikaru #define ASSIGN_OBJ_TAB(np, typep, type, base, off, lim)			\
   2147  1.1   hikaru do {									\
   2148  1.1   hikaru 	u_int nent;							\
   2149  1.1   hikaru 	nent = ((struct uof_obj_table *)((base) + (off)))->uot_nentries;\
   2150  1.1   hikaru 	if ((lim) < off + sizeof(struct uof_obj_table) +		\
   2151  1.1   hikaru 	    sizeof(type) * nent)					\
   2152  1.1   hikaru 		return EINVAL;						\
   2153  1.1   hikaru 	*(np) = nent;							\
   2154  1.1   hikaru 	if (nent > 0)							\
   2155  1.1   hikaru 		*(typep) = (type)((struct uof_obj_table *)		\
   2156  1.1   hikaru 		    ((base) + (off)) + 1);				\
   2157  1.1   hikaru 	else								\
   2158  1.1   hikaru 		*(typep) = NULL;					\
   2159  1.1   hikaru } while (0)
   2160  1.1   hikaru 
   2161  1.1   hikaru 	ASSIGN_OBJ_TAB(&qui->qui_num_ae_reg, &qui->qui_ae_reg,
   2162  1.1   hikaru 	    struct uof_ae_reg *, base, image->ui_reg_tab, lim);
   2163  1.1   hikaru 	ASSIGN_OBJ_TAB(&qui->qui_num_init_reg_sym, &qui->qui_init_reg_sym,
   2164  1.1   hikaru 	    struct uof_init_reg_sym *, base, image->ui_init_reg_sym_tab, lim);
   2165  1.1   hikaru 	ASSIGN_OBJ_TAB(&qui->qui_num_sbreak, &qui->qui_sbreak,
   2166  1.1   hikaru 	    struct qui_sbreak *, base, image->ui_sbreak_tab, lim);
   2167  1.1   hikaru 
   2168  1.1   hikaru 	if (size < sizeof(struct uof_code_page) * image->ui_num_pages)
   2169  1.1   hikaru 		return EINVAL;
   2170  1.1   hikaru 	if (__arraycount(qui->qui_pages) < image->ui_num_pages)
   2171  1.1   hikaru 		return EINVAL;
   2172  1.1   hikaru 
   2173  1.1   hikaru 	page = (struct uof_code_page *)(image + 1);
   2174  1.1   hikaru 
   2175  1.1   hikaru 	for (p = 0; p < image->ui_num_pages; p++, page++) {
   2176  1.1   hikaru 		struct qat_uof_page *qup = &qui->qui_pages[p];
   2177  1.1   hikaru 		struct uof_code_area *uca;
   2178  1.1   hikaru 
   2179  1.1   hikaru 		qup->qup_page_num = page->ucp_page_num;
   2180  1.1   hikaru 		qup->qup_def_page = page->ucp_def_page;
   2181  1.1   hikaru 		qup->qup_page_region = page->ucp_page_region;
   2182  1.1   hikaru 		qup->qup_beg_vaddr = page->ucp_beg_vaddr;
   2183  1.1   hikaru 		qup->qup_beg_paddr = page->ucp_beg_paddr;
   2184  1.1   hikaru 
   2185  1.1   hikaru 		ASSIGN_OBJ_TAB(&qup->qup_num_uc_var, &qup->qup_uc_var,
   2186  1.1   hikaru 		    struct uof_uword_fixup *, base,
   2187  1.1   hikaru 		    page->ucp_uc_var_tab, lim);
   2188  1.1   hikaru 		ASSIGN_OBJ_TAB(&qup->qup_num_imp_var, &qup->qup_imp_var,
   2189  1.1   hikaru 		    struct uof_import_var *, base,
   2190  1.1   hikaru 		    page->ucp_imp_var_tab, lim);
   2191  1.1   hikaru 		ASSIGN_OBJ_TAB(&qup->qup_num_imp_expr, &qup->qup_imp_expr,
   2192  1.1   hikaru 		    struct uof_uword_fixup *, base,
   2193  1.1   hikaru 		    page->ucp_imp_expr_tab, lim);
   2194  1.1   hikaru 		ASSIGN_OBJ_TAB(&qup->qup_num_neigh_reg, &qup->qup_neigh_reg,
   2195  1.1   hikaru 		    struct uof_uword_fixup *, base,
   2196  1.1   hikaru 		    page->ucp_neigh_reg_tab, lim);
   2197  1.1   hikaru 
   2198  1.1   hikaru 		if (lim < page->ucp_code_area + sizeof(struct uof_code_area))
   2199  1.1   hikaru 			return EINVAL;
   2200  1.1   hikaru 
   2201  1.1   hikaru 		uca = (struct uof_code_area *)(base + page->ucp_code_area);
   2202  1.1   hikaru 		qup->qup_num_micro_words = uca->uca_num_micro_words;
   2203  1.1   hikaru 
   2204  1.1   hikaru 		ASSIGN_OBJ_TAB(&qup->qup_num_uw_blocks, &qup->qup_uw_blocks,
   2205  1.1   hikaru 		    struct qat_uof_uword_block *, base,
   2206  1.1   hikaru 		    uca->uca_uword_block_tab, lim);
   2207  1.1   hikaru 
   2208  1.1   hikaru 		for (i = 0; i < qup->qup_num_uw_blocks; i++) {
   2209  1.1   hikaru 			u_int uwordoff = ((struct uof_uword_block *)(
   2210  1.1   hikaru 			    &qup->qup_uw_blocks[i]))->uub_uword_offset;
   2211  1.1   hikaru 
   2212  1.1   hikaru 			if (lim < uwordoff)
   2213  1.1   hikaru 				return EINVAL;
   2214  1.1   hikaru 
   2215  1.1   hikaru 			qup->qup_uw_blocks[i].quub_micro_words =
   2216  1.1   hikaru 			    (base + uwordoff);
   2217  1.1   hikaru 		}
   2218  1.1   hikaru 	}
   2219  1.1   hikaru 
   2220  1.1   hikaru #undef ASSIGN_OBJ_TAB
   2221  1.1   hikaru 
   2222  1.1   hikaru 	return 0;
   2223  1.1   hikaru }
   2224  1.1   hikaru 
   2225  1.1   hikaru int
   2226  1.1   hikaru qat_aefw_uof_parse_images(struct qat_softc *sc)
   2227  1.1   hikaru {
   2228  1.1   hikaru 	struct uof_chunk_hdr *uch = NULL;
   2229  1.1   hikaru 	u_int assigned_ae;
   2230  1.1   hikaru 	int i, error;
   2231  1.1   hikaru 
   2232  1.1   hikaru 	for (i = 0; i < MAX_NUM_AE * MAX_AE_CTX; i++) {
   2233  1.1   hikaru 		uch = qat_aefw_uof_find_chunk(sc, UOF_IMAG, uch);
   2234  1.1   hikaru 		if (uch == NULL)
   2235  1.1   hikaru 			break;
   2236  1.1   hikaru 
   2237  1.1   hikaru 		if (i >= __arraycount(sc->sc_aefw_uof.qafu_imgs))
   2238  1.1   hikaru 			return ENOENT;
   2239  1.1   hikaru 
   2240  1.1   hikaru 		error = qat_aefw_uof_parse_image(sc, &sc->sc_aefw_uof.qafu_imgs[i], uch);
   2241  1.1   hikaru 		if (error)
   2242  1.1   hikaru 			return error;
   2243  1.1   hikaru 
   2244  1.1   hikaru 		sc->sc_aefw_uof.qafu_num_imgs++;
   2245  1.1   hikaru 	}
   2246  1.1   hikaru 
   2247  1.1   hikaru 	assigned_ae = 0;
   2248  1.1   hikaru 	for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) {
   2249  1.1   hikaru 		assigned_ae |= sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_ae_assigned;
   2250  1.1   hikaru 	}
   2251  1.1   hikaru 
   2252  1.1   hikaru 	return 0;
   2253  1.1   hikaru }
   2254  1.1   hikaru 
   2255  1.1   hikaru int
   2256  1.1   hikaru qat_aefw_uof_parse(struct qat_softc *sc)
   2257  1.1   hikaru {
   2258  1.1   hikaru 	struct uof_file_hdr *ufh;
   2259  1.1   hikaru 	struct uof_file_chunk_hdr *ufch;
   2260  1.1   hikaru 	struct uof_obj_hdr *uoh;
   2261  1.1   hikaru 	struct uof_chunk_hdr *uch;
   2262  1.1   hikaru 	void *uof = NULL;
   2263  1.1   hikaru 	size_t size, uof_size, hdr_size;
   2264  1.1   hikaru 	uintptr_t base;
   2265  1.1   hikaru 	u_int csum;
   2266  1.1   hikaru 	int i;
   2267  1.1   hikaru 
   2268  1.1   hikaru 	size = sc->sc_fw_uof_size;
   2269  1.1   hikaru 	if (size < MIN_UOF_SIZE)
   2270  1.1   hikaru 		return EINVAL;
   2271  1.1   hikaru 	size -= sizeof(struct uof_file_hdr);
   2272  1.1   hikaru 
   2273  1.1   hikaru 	ufh = sc->sc_fw_uof;
   2274  1.1   hikaru 
   2275  1.1   hikaru 	if (ufh->ufh_id != UOF_FID)
   2276  1.1   hikaru 		return EINVAL;
   2277  1.1   hikaru 	if (ufh->ufh_min_ver != UOF_MIN_VER || ufh->ufh_maj_ver != UOF_MAJ_VER)
   2278  1.1   hikaru 		return EINVAL;
   2279  1.1   hikaru 
   2280  1.1   hikaru 	if (ufh->ufh_max_chunks < ufh->ufh_num_chunks)
   2281  1.1   hikaru 		return EINVAL;
   2282  1.1   hikaru 	if (size < sizeof(struct uof_file_chunk_hdr) * ufh->ufh_num_chunks)
   2283  1.1   hikaru 		return EINVAL;
   2284  1.1   hikaru 	ufch = (struct uof_file_chunk_hdr *)(ufh + 1);
   2285  1.1   hikaru 
   2286  1.1   hikaru 	uof_size = 0;
   2287  1.1   hikaru 	for (i = 0; i < ufh->ufh_num_chunks; i++, ufch++) {
   2288  1.1   hikaru 		if (ufch->ufch_offset + ufch->ufch_size > sc->sc_fw_uof_size)
   2289  1.1   hikaru 			return EINVAL;
   2290  1.1   hikaru 
   2291  1.1   hikaru 		if (!strncmp(ufch->ufch_id, UOF_OBJS, UOF_OBJ_ID_LEN)) {
   2292  1.1   hikaru 			if (uof != NULL)
   2293  1.1   hikaru 				return EINVAL;
   2294  1.1   hikaru 
   2295  1.1   hikaru 			uof =
   2296  1.1   hikaru 			    (void *)((uintptr_t)sc->sc_fw_uof +
   2297  1.1   hikaru 			    ufch->ufch_offset);
   2298  1.1   hikaru 			uof_size = ufch->ufch_size;
   2299  1.1   hikaru 
   2300  1.1   hikaru 			csum = qat_aefw_csum(uof, uof_size);
   2301  1.1   hikaru 			if (csum != ufch->ufch_csum)
   2302  1.1   hikaru 				return EINVAL;
   2303  1.1   hikaru 
   2304  1.1   hikaru 			aprint_verbose_dev(sc->sc_dev,
   2305  1.1   hikaru 			    "uof at %p size 0x%lx\n",
   2306  1.1   hikaru 			    uof, uof_size);
   2307  1.1   hikaru 		}
   2308  1.1   hikaru 	}
   2309  1.1   hikaru 
   2310  1.1   hikaru 	if (uof == NULL)
   2311  1.1   hikaru 		return ENOENT;
   2312  1.1   hikaru 
   2313  1.1   hikaru 	size = uof_size;
   2314  1.1   hikaru 	if (size < sizeof(struct uof_obj_hdr))
   2315  1.1   hikaru 		return EINVAL;
   2316  1.1   hikaru 	size -= sizeof(struct uof_obj_hdr);
   2317  1.1   hikaru 
   2318  1.1   hikaru 	uoh = uof;
   2319  1.1   hikaru 
   2320  1.1   hikaru 	aprint_verbose_dev(sc->sc_dev,
   2321  1.1   hikaru 	    "uof cpu_type 0x%08x min_cpu_ver 0x%04x max_cpu_ver 0x%04x\n",
   2322  1.1   hikaru 	    uoh->uoh_cpu_type, uoh->uoh_min_cpu_ver, uoh->uoh_max_cpu_ver);
   2323  1.1   hikaru 
   2324  1.1   hikaru 	if (size < sizeof(struct uof_chunk_hdr) * uoh->uoh_num_chunks)
   2325  1.1   hikaru 		return EINVAL;
   2326  1.1   hikaru 
   2327  1.1   hikaru 	/* Check if the UOF objects are compatible with the chip */
   2328  1.1   hikaru 	if ((uoh->uoh_cpu_type & sc->sc_hw.qhw_prod_type) == 0)
   2329  1.1   hikaru 		return ENOTSUP;
   2330  1.1   hikaru 
   2331  1.1   hikaru 	if (uoh->uoh_min_cpu_ver > sc->sc_rev ||
   2332  1.1   hikaru 	    uoh->uoh_max_cpu_ver < sc->sc_rev)
   2333  1.1   hikaru 		return ENOTSUP;
   2334  1.1   hikaru 
   2335  1.1   hikaru 	sc->sc_aefw_uof.qafu_size = uof_size;
   2336  1.1   hikaru 	sc->sc_aefw_uof.qafu_obj_hdr = uoh;
   2337  1.1   hikaru 
   2338  1.1   hikaru 	base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr;
   2339  1.1   hikaru 
   2340  1.1   hikaru 	/* map uof string-table */
   2341  1.1   hikaru 	uch = qat_aefw_uof_find_chunk(sc, UOF_STRT, NULL);
   2342  1.1   hikaru 	if (uch != NULL) {
   2343  1.1   hikaru 		hdr_size = offsetof(struct uof_str_tab, ust_strings);
   2344  1.1   hikaru 		sc->sc_aefw_uof.qafu_str_tab =
   2345  1.1   hikaru 		    (void *)(base + uch->uch_offset + hdr_size);
   2346  1.1   hikaru 		sc->sc_aefw_uof.qafu_str_tab_size = uch->uch_size - hdr_size;
   2347  1.1   hikaru 	}
   2348  1.1   hikaru 
   2349  1.1   hikaru 	/* get ustore mem inits table -- should be only one */
   2350  1.1   hikaru 	uch = qat_aefw_uof_find_chunk(sc, UOF_IMEM, NULL);
   2351  1.1   hikaru 	if (uch != NULL) {
   2352  1.1   hikaru 		if (uch->uch_size < sizeof(struct uof_obj_table))
   2353  1.1   hikaru 			return EINVAL;
   2354  1.1   hikaru 		sc->sc_aefw_uof.qafu_num_init_mem = ((struct uof_obj_table *)(base +
   2355  1.1   hikaru 		    uch->uch_offset))->uot_nentries;
   2356  1.1   hikaru 		if (sc->sc_aefw_uof.qafu_num_init_mem) {
   2357  1.1   hikaru 			sc->sc_aefw_uof.qafu_init_mem =
   2358  1.1   hikaru 			    (struct uof_init_mem *)(base + uch->uch_offset +
   2359  1.1   hikaru 			    sizeof(struct uof_obj_table));
   2360  1.1   hikaru 			sc->sc_aefw_uof.qafu_init_mem_size =
   2361  1.1   hikaru 			    uch->uch_size - sizeof(struct uof_obj_table);
   2362  1.1   hikaru 		}
   2363  1.1   hikaru 	}
   2364  1.1   hikaru 
   2365  1.1   hikaru 	uch = qat_aefw_uof_find_chunk(sc, UOF_MSEG, NULL);
   2366  1.1   hikaru 	if (uch != NULL) {
   2367  1.1   hikaru 		if (uch->uch_size < sizeof(struct uof_obj_table) +
   2368  1.1   hikaru 		    sizeof(struct uof_var_mem_seg))
   2369  1.1   hikaru 			return EINVAL;
   2370  1.1   hikaru 		sc->sc_aefw_uof.qafu_var_mem_seg =
   2371  1.1   hikaru 		    (struct uof_var_mem_seg *)(base + uch->uch_offset +
   2372  1.1   hikaru 		    sizeof(struct uof_obj_table));
   2373  1.1   hikaru 	}
   2374  1.1   hikaru 
   2375  1.1   hikaru 	return qat_aefw_uof_parse_images(sc);
   2376  1.1   hikaru }
   2377  1.1   hikaru 
   2378  1.1   hikaru int
   2379  1.1   hikaru qat_aefw_suof_parse_image(struct qat_softc *sc, struct qat_suof_image *qsi,
   2380  1.1   hikaru     struct suof_chunk_hdr *sch)
   2381  1.1   hikaru {
   2382  1.1   hikaru 	struct qat_aefw_suof *qafs = &sc->sc_aefw_suof;
   2383  1.1   hikaru 	struct simg_ae_mode *ae_mode;
   2384  1.1   hikaru 	u_int maj_ver;
   2385  1.1   hikaru 
   2386  1.1   hikaru 	qsi->qsi_simg_buf = qafs->qafs_suof_buf + sch->sch_offset +
   2387  1.1   hikaru 	    sizeof(struct suof_obj_hdr);
   2388  1.1   hikaru 	qsi->qsi_simg_len =
   2389  1.1   hikaru 	    ((struct suof_obj_hdr *)
   2390  1.1   hikaru 	    (qafs->qafs_suof_buf + sch->sch_offset))->soh_img_length;
   2391  1.1   hikaru 
   2392  1.1   hikaru 	qsi->qsi_css_header = qsi->qsi_simg_buf;
   2393  1.1   hikaru 	qsi->qsi_css_key = qsi->qsi_css_header + sizeof(struct css_hdr);
   2394  1.1   hikaru 	qsi->qsi_css_signature = qsi->qsi_css_key +
   2395  1.1   hikaru 	    CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN;
   2396  1.1   hikaru 	qsi->qsi_css_simg = qsi->qsi_css_signature + CSS_SIGNATURE_LEN;
   2397  1.1   hikaru 
   2398  1.1   hikaru 	ae_mode = (struct simg_ae_mode *)qsi->qsi_css_simg;
   2399  1.1   hikaru 	qsi->qsi_ae_mask = ae_mode->sam_ae_mask;
   2400  1.1   hikaru 	qsi->qsi_simg_name = (u_long)&ae_mode->sam_simg_name;
   2401  1.1   hikaru 	qsi->qsi_appmeta_data = (u_long)&ae_mode->sam_appmeta_data;
   2402  1.1   hikaru 	qsi->qsi_fw_type = ae_mode->sam_fw_type;
   2403  1.1   hikaru 
   2404  1.1   hikaru 	if (ae_mode->sam_dev_type != sc->sc_hw.qhw_prod_type)
   2405  1.1   hikaru 		return EINVAL;
   2406  1.1   hikaru 
   2407  1.1   hikaru 	maj_ver = (QAT_PID_MAJOR_REV | (sc->sc_rev & QAT_PID_MINOR_REV)) & 0xff;
   2408  1.1   hikaru 	if ((maj_ver > ae_mode->sam_devmax_ver) ||
   2409  1.1   hikaru 	    (maj_ver < ae_mode->sam_devmin_ver)) {
   2410  1.1   hikaru 		return EINVAL;
   2411  1.1   hikaru 	}
   2412  1.1   hikaru 
   2413  1.1   hikaru 	return 0;
   2414  1.1   hikaru }
   2415  1.1   hikaru 
   2416  1.1   hikaru int
   2417  1.1   hikaru qat_aefw_suof_parse(struct qat_softc *sc)
   2418  1.1   hikaru {
   2419  1.1   hikaru 	struct suof_file_hdr *sfh;
   2420  1.1   hikaru 	struct suof_chunk_hdr *sch;
   2421  1.1   hikaru 	struct qat_aefw_suof *qafs = &sc->sc_aefw_suof;
   2422  1.1   hikaru 	struct qat_suof_image *qsi;
   2423  1.1   hikaru 	size_t size;
   2424  1.1   hikaru 	u_int csum;
   2425  1.1   hikaru 	int ae0_img = MAX_AE;
   2426  1.1   hikaru 	int i, error;
   2427  1.1   hikaru 
   2428  1.1   hikaru 	size = sc->sc_fw_suof_size;
   2429  1.1   hikaru 	if (size < sizeof(struct suof_file_hdr))
   2430  1.1   hikaru 		return EINVAL;
   2431  1.1   hikaru 
   2432  1.1   hikaru 	sfh = sc->sc_fw_suof;
   2433  1.1   hikaru 
   2434  1.1   hikaru 	if (sfh->sfh_file_id != SUOF_FID)
   2435  1.1   hikaru 		return EINVAL;
   2436  1.1   hikaru 	if (sfh->sfh_fw_type != 0)
   2437  1.1   hikaru 		return EINVAL;
   2438  1.1   hikaru 	if (sfh->sfh_num_chunks <= 1)
   2439  1.1   hikaru 		return EINVAL;
   2440  1.1   hikaru 	if (sfh->sfh_min_ver != SUOF_MIN_VER ||
   2441  1.1   hikaru 	    sfh->sfh_maj_ver != SUOF_MAJ_VER)
   2442  1.1   hikaru 		return EINVAL;
   2443  1.1   hikaru 
   2444  1.1   hikaru 	csum = qat_aefw_csum((char *)&sfh->sfh_min_ver,
   2445  1.1   hikaru 	    size - offsetof(struct suof_file_hdr, sfh_min_ver));
   2446  1.1   hikaru 	if (csum != sfh->sfh_check_sum)
   2447  1.1   hikaru 		return EINVAL;
   2448  1.1   hikaru 
   2449  1.1   hikaru 	size -= sizeof(struct suof_file_hdr);
   2450  1.1   hikaru 
   2451  1.1   hikaru 	qafs->qafs_file_id = SUOF_FID;
   2452  1.1   hikaru 	qafs->qafs_suof_buf = sc->sc_fw_suof;
   2453  1.1   hikaru 	qafs->qafs_suof_size = sc->sc_fw_suof_size;
   2454  1.1   hikaru 	qafs->qafs_check_sum = sfh->sfh_check_sum;
   2455  1.1   hikaru 	qafs->qafs_min_ver = sfh->sfh_min_ver;
   2456  1.1   hikaru 	qafs->qafs_maj_ver = sfh->sfh_maj_ver;
   2457  1.1   hikaru 	qafs->qafs_fw_type = sfh->sfh_fw_type;
   2458  1.1   hikaru 
   2459  1.1   hikaru 	if (size < sizeof(struct suof_chunk_hdr))
   2460  1.1   hikaru 		return EINVAL;
   2461  1.1   hikaru 	sch = (struct suof_chunk_hdr *)(sfh + 1);
   2462  1.1   hikaru 	size -= sizeof(struct suof_chunk_hdr);
   2463  1.1   hikaru 
   2464  1.1   hikaru 	if (size < sizeof(struct suof_str_tab))
   2465  1.1   hikaru 		return EINVAL;
   2466  1.1   hikaru 	size -= offsetof(struct suof_str_tab, sst_strings);
   2467  1.1   hikaru 
   2468  1.1   hikaru 	qafs->qafs_sym_size = ((struct suof_str_tab *)
   2469  1.1   hikaru 	    (qafs->qafs_suof_buf + sch->sch_offset))->sst_tab_length;
   2470  1.1   hikaru 	if (size < qafs->qafs_sym_size)
   2471  1.1   hikaru 		return EINVAL;
   2472  1.1   hikaru 	qafs->qafs_sym_str = qafs->qafs_suof_buf + sch->sch_offset +
   2473  1.1   hikaru 	    offsetof(struct suof_str_tab, sst_strings);
   2474  1.1   hikaru 
   2475  1.1   hikaru 	qafs->qafs_num_simgs = sfh->sfh_num_chunks - 1;
   2476  1.1   hikaru 	if (qafs->qafs_num_simgs == 0)
   2477  1.1   hikaru 		return EINVAL;
   2478  1.1   hikaru 
   2479  1.1   hikaru 	qsi = qat_alloc_mem(
   2480  1.1   hikaru 	    sizeof(struct qat_suof_image) * qafs->qafs_num_simgs);
   2481  1.1   hikaru 	qafs->qafs_simg = qsi;
   2482  1.1   hikaru 
   2483  1.1   hikaru 	for (i = 0; i < qafs->qafs_num_simgs; i++) {
   2484  1.1   hikaru 		error = qat_aefw_suof_parse_image(sc, &qsi[i], &sch[i + 1]);
   2485  1.1   hikaru 		if (error)
   2486  1.1   hikaru 			return error;
   2487  1.1   hikaru 		if ((qsi[i].qsi_ae_mask & 0x1) != 0)
   2488  1.1   hikaru 			ae0_img = i;
   2489  1.1   hikaru 	}
   2490  1.1   hikaru 
   2491  1.1   hikaru 	if (ae0_img != qafs->qafs_num_simgs - 1) {
   2492  1.1   hikaru 		struct qat_suof_image last_qsi;
   2493  1.1   hikaru 
   2494  1.1   hikaru 		memcpy(&last_qsi, &qsi[qafs->qafs_num_simgs - 1],
   2495  1.1   hikaru 		    sizeof(struct qat_suof_image));
   2496  1.1   hikaru 		memcpy(&qsi[qafs->qafs_num_simgs - 1], &qsi[ae0_img],
   2497  1.1   hikaru 		    sizeof(struct qat_suof_image));
   2498  1.1   hikaru 		memcpy(&qsi[ae0_img], &last_qsi,
   2499  1.1   hikaru 		    sizeof(struct qat_suof_image));
   2500  1.1   hikaru 	}
   2501  1.1   hikaru 
   2502  1.1   hikaru 	return 0;
   2503  1.1   hikaru }
   2504  1.1   hikaru 
   2505  1.1   hikaru int
   2506  1.1   hikaru qat_aefw_alloc_auth_dmamem(struct qat_softc *sc, char *image, size_t size,
   2507  1.1   hikaru     struct qat_dmamem *dma)
   2508  1.1   hikaru {
   2509  1.1   hikaru 	struct css_hdr *css = (struct css_hdr *)image;
   2510  1.1   hikaru 	struct auth_chunk *auth_chunk;
   2511  1.1   hikaru 	struct fw_auth_desc *auth_desc;
   2512  1.1   hikaru 	size_t mapsize, simg_offset = sizeof(struct auth_chunk);
   2513  1.1   hikaru 	bus_size_t bus_addr;
   2514  1.1   hikaru 	uintptr_t virt_addr;
   2515  1.1   hikaru 	int error;
   2516  1.1   hikaru 
   2517  1.1   hikaru 	if (size > AE_IMG_OFFSET + CSS_MAX_IMAGE_LEN)
   2518  1.1   hikaru 		return EINVAL;
   2519  1.1   hikaru 
   2520  1.1   hikaru 	mapsize = (css->css_fw_type == CSS_AE_FIRMWARE) ?
   2521  1.1   hikaru 	    CSS_AE_SIMG_LEN + simg_offset :
   2522  1.1   hikaru 	    size + CSS_FWSK_PAD_LEN + simg_offset;
   2523  1.1   hikaru 	error = qat_alloc_dmamem(sc, dma, mapsize, PAGE_SIZE);
   2524  1.1   hikaru 	if (error)
   2525  1.1   hikaru 		return error;
   2526  1.1   hikaru 
   2527  1.1   hikaru 	memset(dma->qdm_dma_vaddr, 0, mapsize);
   2528  1.1   hikaru 
   2529  1.1   hikaru 	auth_chunk = dma->qdm_dma_vaddr;
   2530  1.1   hikaru 	auth_chunk->ac_chunk_size = mapsize;
   2531  1.1   hikaru 	auth_chunk->ac_chunk_bus_addr = dma->qdm_dma_map->dm_segs[0].ds_addr;
   2532  1.1   hikaru 
   2533  1.1   hikaru 	virt_addr = (uintptr_t)dma->qdm_dma_vaddr;
   2534  1.1   hikaru 	virt_addr += simg_offset;
   2535  1.1   hikaru 	bus_addr = auth_chunk->ac_chunk_bus_addr;
   2536  1.1   hikaru 	bus_addr += simg_offset;
   2537  1.1   hikaru 
   2538  1.1   hikaru 	auth_desc = &auth_chunk->ac_fw_auth_desc;
   2539  1.1   hikaru 	auth_desc->fad_css_hdr_high = bus_addr >> 32;
   2540  1.1   hikaru 	auth_desc->fad_css_hdr_low = bus_addr;
   2541  1.1   hikaru 
   2542  1.1   hikaru 	memcpy((void *)virt_addr, image, sizeof(struct css_hdr));
   2543  1.1   hikaru 	/* pub key */
   2544  1.1   hikaru 	virt_addr += sizeof(struct css_hdr);
   2545  1.1   hikaru 	bus_addr += sizeof(struct css_hdr);
   2546  1.1   hikaru 	image += sizeof(struct css_hdr);
   2547  1.1   hikaru 
   2548  1.1   hikaru 	auth_desc->fad_fwsk_pub_high = bus_addr >> 32;
   2549  1.1   hikaru 	auth_desc->fad_fwsk_pub_low = bus_addr;
   2550  1.1   hikaru 
   2551  1.1   hikaru 	memcpy((void *)virt_addr, image, CSS_FWSK_MODULUS_LEN);
   2552  1.1   hikaru 	memset((void *)(virt_addr + CSS_FWSK_MODULUS_LEN), 0, CSS_FWSK_PAD_LEN);
   2553  1.1   hikaru 	memcpy((void *)(virt_addr + CSS_FWSK_MODULUS_LEN + CSS_FWSK_PAD_LEN),
   2554  1.1   hikaru 	    image + CSS_FWSK_MODULUS_LEN, sizeof(uint32_t));
   2555  1.1   hikaru 
   2556  1.1   hikaru 	virt_addr += CSS_FWSK_PUB_LEN;
   2557  1.1   hikaru 	bus_addr += CSS_FWSK_PUB_LEN;
   2558  1.1   hikaru 	image += CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN;
   2559  1.1   hikaru 
   2560  1.1   hikaru 	auth_desc->fad_signature_high = bus_addr >> 32;
   2561  1.1   hikaru 	auth_desc->fad_signature_low = bus_addr;
   2562  1.1   hikaru 
   2563  1.1   hikaru 	memcpy((void *)virt_addr, image, CSS_SIGNATURE_LEN);
   2564  1.1   hikaru #ifdef QAT_DUMP
   2565  1.1   hikaru 	qat_dump_raw(QAT_DUMP_AEFW, "aefw signature", image, CSS_SIGNATURE_LEN);
   2566  1.1   hikaru #endif
   2567  1.1   hikaru 
   2568  1.1   hikaru 	virt_addr += CSS_SIGNATURE_LEN;
   2569  1.1   hikaru 	bus_addr += CSS_SIGNATURE_LEN;
   2570  1.1   hikaru 	image += CSS_SIGNATURE_LEN;
   2571  1.1   hikaru 
   2572  1.1   hikaru 	auth_desc->fad_img_high = bus_addr >> 32;
   2573  1.1   hikaru 	auth_desc->fad_img_low = bus_addr;
   2574  1.1   hikaru 	auth_desc->fad_img_len = size - AE_IMG_OFFSET;
   2575  1.1   hikaru 
   2576  1.1   hikaru 	memcpy((void *)virt_addr, image, auth_desc->fad_img_len);
   2577  1.1   hikaru 
   2578  1.1   hikaru 	if (css->css_fw_type == CSS_AE_FIRMWARE) {
   2579  1.1   hikaru 		auth_desc->fad_img_ae_mode_data_high = auth_desc->fad_img_high;
   2580  1.1   hikaru 		auth_desc->fad_img_ae_mode_data_low = auth_desc->fad_img_low;
   2581  1.1   hikaru 
   2582  1.1   hikaru 		bus_addr += sizeof(struct simg_ae_mode);
   2583  1.1   hikaru 
   2584  1.1   hikaru 		auth_desc->fad_img_ae_init_data_high = bus_addr >> 32;
   2585  1.1   hikaru 		auth_desc->fad_img_ae_init_data_low = bus_addr;
   2586  1.1   hikaru 
   2587  1.1   hikaru 		bus_addr += SIMG_AE_INIT_SEQ_LEN;
   2588  1.1   hikaru 
   2589  1.1   hikaru 		auth_desc->fad_img_ae_insts_high = bus_addr >> 32;
   2590  1.1   hikaru 		auth_desc->fad_img_ae_insts_low = bus_addr;
   2591  1.1   hikaru 	} else {
   2592  1.1   hikaru 		auth_desc->fad_img_ae_insts_high = auth_desc->fad_img_high;
   2593  1.1   hikaru 		auth_desc->fad_img_ae_insts_low = auth_desc->fad_img_low;
   2594  1.1   hikaru 	}
   2595  1.1   hikaru 
   2596  1.1   hikaru 	bus_dmamap_sync(sc->sc_dmat, dma->qdm_dma_map, 0,
   2597  1.1   hikaru 	    dma->qdm_dma_map->dm_mapsize,
   2598  1.1   hikaru 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
   2599  1.1   hikaru 
   2600  1.1   hikaru 	return 0;
   2601  1.1   hikaru }
   2602  1.1   hikaru 
   2603  1.1   hikaru int
   2604  1.1   hikaru qat_aefw_auth(struct qat_softc *sc, struct qat_dmamem *dma)
   2605  1.1   hikaru {
   2606  1.1   hikaru 	bus_addr_t addr = dma->qdm_dma_map->dm_segs[0].ds_addr;
   2607  1.1   hikaru 	uint32_t fcu, sts;
   2608  1.1   hikaru 	int retry = 0;
   2609  1.1   hikaru 
   2610  1.1   hikaru 	qat_cap_global_write_4(sc, FCU_DRAM_ADDR_HI, addr >> 32);
   2611  1.1   hikaru 	qat_cap_global_write_4(sc, FCU_DRAM_ADDR_LO, addr);
   2612  1.1   hikaru 	qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_AUTH);
   2613  1.1   hikaru 
   2614  1.1   hikaru 	do {
   2615  1.1   hikaru 		delay(FW_AUTH_WAIT_PERIOD * 1000);
   2616  1.1   hikaru 		fcu = qat_cap_global_read_4(sc, FCU_STATUS);
   2617  1.1   hikaru 		sts = __SHIFTOUT(fcu, FCU_STATUS_STS);
   2618  1.1   hikaru 		if (sts == FCU_STATUS_STS_VERI_FAIL)
   2619  1.1   hikaru 			goto fail;
   2620  1.1   hikaru 		if (fcu & FCU_STATUS_AUTHFWLD &&
   2621  1.1   hikaru 		    sts == FCU_STATUS_STS_VERI_DONE) {
   2622  1.1   hikaru 			return 0;
   2623  1.1   hikaru 		}
   2624  1.1   hikaru 	} while (retry++ < FW_AUTH_MAX_RETRY);
   2625  1.1   hikaru 
   2626  1.1   hikaru fail:
   2627  1.1   hikaru 	aprint_error_dev(sc->sc_dev,
   2628  1.1   hikaru 	   "firmware authentication error: status 0x%08x retry %d\n",
   2629  1.1   hikaru 	   fcu, retry);
   2630  1.1   hikaru 	return EINVAL;
   2631  1.1   hikaru }
   2632  1.1   hikaru 
   2633  1.1   hikaru int
   2634  1.1   hikaru qat_aefw_suof_load(struct qat_softc *sc, struct qat_dmamem *dma)
   2635  1.1   hikaru {
   2636  1.1   hikaru 	struct simg_ae_mode *ae_mode;
   2637  1.1   hikaru 	uint32_t fcu, sts, loaded;
   2638  1.1   hikaru 	u_int mask;
   2639  1.1   hikaru 	u_char ae;
   2640  1.1   hikaru 	int retry = 0;
   2641  1.1   hikaru 
   2642  1.1   hikaru 	ae_mode = (struct simg_ae_mode *)((uintptr_t)dma->qdm_dma_vaddr +
   2643  1.1   hikaru 	    sizeof(struct auth_chunk) + sizeof(struct css_hdr) +
   2644  1.1   hikaru 	    CSS_FWSK_PUB_LEN + CSS_SIGNATURE_LEN);
   2645  1.1   hikaru 
   2646  1.1   hikaru 	for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
   2647  1.1   hikaru 		if (!(mask & 1))
   2648  1.1   hikaru 			continue;
   2649  1.1   hikaru 		if (!((ae_mode->sam_ae_mask >> ae) & 0x1))
   2650  1.1   hikaru 			continue;
   2651  1.1   hikaru 		if (qat_ae_is_active(sc, ae)) {
   2652  1.1   hikaru 			aprint_error_dev(sc->sc_dev, "AE %d is active\n", ae);
   2653  1.1   hikaru 			return EINVAL;
   2654  1.1   hikaru 		}
   2655  1.1   hikaru 		qat_cap_global_write_4(sc, FCU_CTRL,
   2656  1.1   hikaru 		    FCU_CTRL_CMD_LOAD | __SHIFTIN(ae, FCU_CTRL_AE));
   2657  1.1   hikaru 		do {
   2658  1.1   hikaru 			delay(FW_AUTH_WAIT_PERIOD * 1000);
   2659  1.1   hikaru 			fcu = qat_cap_global_read_4(sc, FCU_STATUS);
   2660  1.1   hikaru 			sts = __SHIFTOUT(fcu, FCU_STATUS_STS);
   2661  1.1   hikaru 			loaded = __SHIFTOUT(fcu, FCU_STATUS_LOADED_AE);
   2662  1.1   hikaru 			if (sts == FCU_STATUS_STS_LOAD_DONE &&
   2663  1.1   hikaru 			    (loaded & (1 << ae))) {
   2664  1.1   hikaru 				break;
   2665  1.1   hikaru 			}
   2666  1.1   hikaru 		} while (retry++ < FW_AUTH_MAX_RETRY);
   2667  1.1   hikaru 
   2668  1.1   hikaru 		if (retry > FW_AUTH_MAX_RETRY) {
   2669  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
   2670  1.1   hikaru 			    "firmware load timeout: status %08x\n", fcu);
   2671  1.1   hikaru 			return EINVAL;
   2672  1.1   hikaru 		}
   2673  1.1   hikaru 	}
   2674  1.1   hikaru 
   2675  1.1   hikaru 	return 0;
   2676  1.1   hikaru }
   2677  1.1   hikaru 
   2678  1.1   hikaru int
   2679  1.1   hikaru qat_aefw_suof_write(struct qat_softc *sc)
   2680  1.1   hikaru {
   2681  1.1   hikaru 	struct qat_suof_image *qsi = NULL;
   2682  1.1   hikaru 	int i, error = 0;
   2683  1.1   hikaru 
   2684  1.1   hikaru 	for (i = 0; i < sc->sc_aefw_suof.qafs_num_simgs; i++) {
   2685  1.1   hikaru 		qsi = &sc->sc_aefw_suof.qafs_simg[i];
   2686  1.1   hikaru 		error = qat_aefw_alloc_auth_dmamem(sc, qsi->qsi_simg_buf,
   2687  1.1   hikaru 		    qsi->qsi_simg_len, &qsi->qsi_dma);
   2688  1.1   hikaru 		if (error)
   2689  1.1   hikaru 			return error;
   2690  1.1   hikaru 		error = qat_aefw_auth(sc, &qsi->qsi_dma);
   2691  1.1   hikaru 		if (error)
   2692  1.1   hikaru 			goto fail;
   2693  1.1   hikaru 		error = qat_aefw_suof_load(sc, &qsi->qsi_dma);
   2694  1.1   hikaru 		if (error)
   2695  1.1   hikaru 			goto fail;
   2696  1.1   hikaru 
   2697  1.1   hikaru 		qat_free_dmamem(sc, &qsi->qsi_dma);
   2698  1.1   hikaru 	}
   2699  1.1   hikaru 
   2700  1.1   hikaru 	return 0;
   2701  1.1   hikaru fail:
   2702  1.1   hikaru 	if (qsi != NULL)
   2703  1.1   hikaru 		qat_free_dmamem(sc, &qsi->qsi_dma);
   2704  1.1   hikaru 	return error;
   2705  1.1   hikaru }
   2706  1.1   hikaru 
   2707  1.1   hikaru int
   2708  1.1   hikaru qat_aefw_uof_assign_image(struct qat_softc *sc, struct qat_ae *qae,
   2709  1.1   hikaru 	struct qat_uof_image *qui)
   2710  1.1   hikaru {
   2711  1.1   hikaru 	struct qat_ae_slice *slice;
   2712  1.1   hikaru 	int i, npages, nregions;
   2713  1.1   hikaru 
   2714  1.1   hikaru 	if (qae->qae_num_slices >= __arraycount(qae->qae_slices))
   2715  1.1   hikaru 		return ENOENT;
   2716  1.1   hikaru 
   2717  1.1   hikaru 	if (qui->qui_image->ui_ae_mode &
   2718  1.1   hikaru 	    (AE_MODE_RELOAD_CTX_SHARED | AE_MODE_SHARED_USTORE)) {
   2719  1.1   hikaru 		/* XXX */
   2720  1.1   hikaru 		aprint_error_dev(sc->sc_dev,
   2721  1.1   hikaru 		    "shared ae mode is not supported yet\n");
   2722  1.1   hikaru 		return ENOTSUP;
   2723  1.1   hikaru 	}
   2724  1.1   hikaru 
   2725  1.1   hikaru 	qae->qae_shareable_ustore = 0; /* XXX */
   2726  1.1   hikaru 	qae->qae_effect_ustore_size = USTORE_SIZE;
   2727  1.1   hikaru 
   2728  1.1   hikaru 	slice = &qae->qae_slices[qae->qae_num_slices];
   2729  1.1   hikaru 
   2730  1.1   hikaru 	slice->qas_image = qui;
   2731  1.1   hikaru 	slice->qas_assigned_ctx_mask = qui->qui_image->ui_ctx_assigned;
   2732  1.1   hikaru 
   2733  1.1   hikaru 	nregions = qui->qui_image->ui_num_page_regions;
   2734  1.1   hikaru 	npages = qui->qui_image->ui_num_pages;
   2735  1.1   hikaru 
   2736  1.1   hikaru 	if (nregions > __arraycount(slice->qas_regions))
   2737  1.1   hikaru 		return ENOENT;
   2738  1.1   hikaru 	if (npages > __arraycount(slice->qas_pages))
   2739  1.1   hikaru 		return ENOENT;
   2740  1.1   hikaru 
   2741  1.1   hikaru 	for (i = 0; i < nregions; i++) {
   2742  1.1   hikaru 		SIMPLEQ_INIT(&slice->qas_regions[i].qar_waiting_pages);
   2743  1.1   hikaru 	}
   2744  1.1   hikaru 	for (i = 0; i < npages; i++) {
   2745  1.1   hikaru 		struct qat_ae_page *page = &slice->qas_pages[i];
   2746  1.1   hikaru 		int region;
   2747  1.1   hikaru 
   2748  1.1   hikaru 		page->qap_page = &qui->qui_pages[i];
   2749  1.1   hikaru 		region = page->qap_page->qup_page_region;
   2750  1.1   hikaru 		if (region >= nregions)
   2751  1.1   hikaru 			return EINVAL;
   2752  1.1   hikaru 
   2753  1.1   hikaru 		page->qap_region = &slice->qas_regions[region];
   2754  1.1   hikaru 		aprint_verbose_dev(sc->sc_dev,
   2755  1.1   hikaru 		    "ae %p slice %d page %d assign region %d\n",
   2756  1.1   hikaru 		    qae, qae->qae_num_slices, i, region);
   2757  1.1   hikaru 	}
   2758  1.1   hikaru 
   2759  1.1   hikaru 	qae->qae_num_slices++;
   2760  1.1   hikaru 
   2761  1.1   hikaru 	return 0;
   2762  1.1   hikaru }
   2763  1.1   hikaru 
   2764  1.1   hikaru int
   2765  1.1   hikaru qat_aefw_uof_init_ae(struct qat_softc *sc, u_char ae)
   2766  1.1   hikaru {
   2767  1.1   hikaru 	struct uof_image *image;
   2768  1.1   hikaru 	struct qat_ae *qae = &(QAT_AE(sc, ae));
   2769  1.1   hikaru 	int s;
   2770  1.1   hikaru 	u_char nn_mode;
   2771  1.1   hikaru 
   2772  1.1   hikaru 	for (s = 0; s < qae->qae_num_slices; s++) {
   2773  1.1   hikaru 		if (qae->qae_slices[s].qas_image == NULL)
   2774  1.1   hikaru 			continue;
   2775  1.1   hikaru 
   2776  1.1   hikaru 		image = qae->qae_slices[s].qas_image->qui_image;
   2777  1.1   hikaru 		qat_ae_write_ctx_mode(sc, ae,
   2778  1.1   hikaru 		    __SHIFTOUT(image->ui_ae_mode, AE_MODE_CTX_MODE));
   2779  1.1   hikaru 
   2780  1.1   hikaru 		nn_mode = __SHIFTOUT(image->ui_ae_mode, AE_MODE_NN_MODE);
   2781  1.1   hikaru 		if (nn_mode != AE_MODE_NN_MODE_DONTCARE)
   2782  1.1   hikaru 			qat_ae_write_nn_mode(sc, ae, nn_mode);
   2783  1.1   hikaru 
   2784  1.1   hikaru 		qat_ae_write_lm_mode(sc, ae, AEREG_LMEM0,
   2785  1.1   hikaru 		    __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM0));
   2786  1.1   hikaru 		qat_ae_write_lm_mode(sc, ae, AEREG_LMEM1,
   2787  1.1   hikaru 		    __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM1));
   2788  1.1   hikaru 
   2789  1.1   hikaru 		qat_ae_write_shared_cs_mode(sc, ae,
   2790  1.1   hikaru 		    __SHIFTOUT(image->ui_ae_mode, AE_MODE_SHARED_USTORE));
   2791  1.1   hikaru 		qat_ae_set_reload_ustore(sc, ae, image->ui_reloadable_size,
   2792  1.1   hikaru 		    __SHIFTOUT(image->ui_ae_mode, AE_MODE_RELOAD_CTX_SHARED),
   2793  1.1   hikaru 		    qae->qae_reloc_ustore_dram);
   2794  1.1   hikaru 	}
   2795  1.1   hikaru 
   2796  1.1   hikaru 	return 0;
   2797  1.1   hikaru }
   2798  1.1   hikaru 
   2799  1.1   hikaru int
   2800  1.1   hikaru qat_aefw_uof_init(struct qat_softc *sc)
   2801  1.1   hikaru {
   2802  1.1   hikaru 	int ae, i, error;
   2803  1.1   hikaru 	uint32_t mask;
   2804  1.1   hikaru 
   2805  1.1   hikaru 	for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
   2806  1.1   hikaru 		struct qat_ae *qae;
   2807  1.1   hikaru 
   2808  1.1   hikaru 		if (!(mask & 1))
   2809  1.1   hikaru 			continue;
   2810  1.1   hikaru 
   2811  1.1   hikaru 		qae = &(QAT_AE(sc, ae));
   2812  1.1   hikaru 
   2813  1.1   hikaru 		for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) {
   2814  1.1   hikaru 			if ((sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_ae_assigned &
   2815  1.1   hikaru 			    (1 << ae)) == 0)
   2816  1.1   hikaru 				continue;
   2817  1.1   hikaru 
   2818  1.1   hikaru 			error = qat_aefw_uof_assign_image(sc, qae,
   2819  1.1   hikaru 			    &sc->sc_aefw_uof.qafu_imgs[i]);
   2820  1.1   hikaru 			if (error)
   2821  1.1   hikaru 				return error;
   2822  1.1   hikaru 		}
   2823  1.1   hikaru 
   2824  1.1   hikaru 		/* XXX UcLo_initNumUwordUsed */
   2825  1.1   hikaru 
   2826  1.1   hikaru 		qae->qae_reloc_ustore_dram = UINT_MAX; /* XXX */
   2827  1.1   hikaru 
   2828  1.1   hikaru 		error = qat_aefw_uof_init_ae(sc, ae);
   2829  1.1   hikaru 		if (error)
   2830  1.1   hikaru 			return error;
   2831  1.1   hikaru 	}
   2832  1.1   hikaru 
   2833  1.1   hikaru 	return 0;
   2834  1.1   hikaru }
   2835  1.1   hikaru 
   2836  1.1   hikaru int
   2837  1.1   hikaru qat_aefw_load(struct qat_softc *sc)
   2838  1.1   hikaru {
   2839  1.1   hikaru 	int error;
   2840  1.1   hikaru 
   2841  1.1   hikaru 	error = qat_aefw_load_mof(sc);
   2842  1.1   hikaru 	if (error)
   2843  1.1   hikaru 		return error;
   2844  1.1   hikaru 
   2845  1.1   hikaru 	error = qat_aefw_load_mmp(sc);
   2846  1.1   hikaru 	if (error)
   2847  1.1   hikaru 		return error;
   2848  1.1   hikaru 
   2849  1.1   hikaru 	error = qat_aefw_mof_parse(sc);
   2850  1.1   hikaru 	if (error) {
   2851  1.1   hikaru 		aprint_error_dev(sc->sc_dev, "couldn't parse mof: %d\n", error);
   2852  1.1   hikaru 		return error;
   2853  1.1   hikaru 	}
   2854  1.1   hikaru 
   2855  1.1   hikaru 	if (sc->sc_hw.qhw_fw_auth) {
   2856  1.1   hikaru 		error = qat_aefw_suof_parse(sc);
   2857  1.1   hikaru 		if (error) {
   2858  1.1   hikaru 			aprint_error_dev(sc->sc_dev, "couldn't parse suof: %d\n",
   2859  1.1   hikaru 			    error);
   2860  1.1   hikaru 			return error;
   2861  1.1   hikaru 		}
   2862  1.1   hikaru 
   2863  1.1   hikaru 		error = qat_aefw_suof_write(sc);
   2864  1.1   hikaru 		if (error) {
   2865  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
   2866  1.1   hikaru 			    "could not write firmware: %d\n", error);
   2867  1.1   hikaru 			return error;
   2868  1.1   hikaru 		}
   2869  1.1   hikaru 
   2870  1.1   hikaru 	} else {
   2871  1.1   hikaru 		error = qat_aefw_uof_parse(sc);
   2872  1.1   hikaru 		if (error) {
   2873  1.1   hikaru 			aprint_error_dev(sc->sc_dev, "couldn't parse uof: %d\n",
   2874  1.1   hikaru 			    error);
   2875  1.1   hikaru 			return error;
   2876  1.1   hikaru 		}
   2877  1.1   hikaru 
   2878  1.1   hikaru 		error = qat_aefw_uof_init(sc);
   2879  1.1   hikaru 		if (error) {
   2880  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
   2881  1.1   hikaru 			    "couldn't init for aefw: %d\n", error);
   2882  1.1   hikaru 			return error;
   2883  1.1   hikaru 		}
   2884  1.1   hikaru 
   2885  1.1   hikaru 		error = qat_aefw_uof_write(sc);
   2886  1.1   hikaru 		if (error) {
   2887  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
   2888  1.1   hikaru 			    "Could not write firmware: %d\n", error);
   2889  1.1   hikaru 			return error;
   2890  1.1   hikaru 		}
   2891  1.1   hikaru 	}
   2892  1.1   hikaru 
   2893  1.1   hikaru 	return 0;
   2894  1.1   hikaru }
   2895  1.1   hikaru 
   2896  1.1   hikaru int
   2897  1.1   hikaru qat_aefw_start(struct qat_softc *sc, u_char ae, u_int ctx_mask)
   2898  1.1   hikaru {
   2899  1.1   hikaru 	uint32_t fcu;
   2900  1.1   hikaru 	int retry = 0;
   2901  1.1   hikaru 
   2902  1.1   hikaru 	if (sc->sc_hw.qhw_fw_auth) {
   2903  1.1   hikaru 		qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_START);
   2904  1.1   hikaru 		do {
   2905  1.1   hikaru 			delay(FW_AUTH_WAIT_PERIOD * 1000);
   2906  1.1   hikaru 			fcu = qat_cap_global_read_4(sc, FCU_STATUS);
   2907  1.1   hikaru 			if (fcu & FCU_STATUS_DONE)
   2908  1.1   hikaru 				return 0;
   2909  1.1   hikaru 		} while (retry++ < FW_AUTH_MAX_RETRY);
   2910  1.1   hikaru 
   2911  1.1   hikaru 		aprint_error_dev(sc->sc_dev,
   2912  1.1   hikaru 		    "firmware start timeout: status %08x\n", fcu);
   2913  1.1   hikaru 		return EINVAL;
   2914  1.1   hikaru 	} else {
   2915  1.1   hikaru 		qat_ae_ctx_indr_write(sc, ae, (~ctx_mask) & AE_ALL_CTX,
   2916  1.1   hikaru 		    CTX_WAKEUP_EVENTS_INDIRECT,
   2917  1.1   hikaru 		    CTX_WAKEUP_EVENTS_INDIRECT_SLEEP);
   2918  1.1   hikaru 		qat_ae_enable_ctx(sc, ae, ctx_mask);
   2919  1.1   hikaru 	}
   2920  1.1   hikaru 
   2921  1.1   hikaru 	return 0;
   2922  1.1   hikaru }
   2923  1.1   hikaru 
   2924  1.1   hikaru int
   2925  1.1   hikaru qat_aefw_init_memory_one(struct qat_softc *sc, struct uof_init_mem *uim)
   2926  1.1   hikaru {
   2927  1.1   hikaru 	struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
   2928  1.1   hikaru 	struct qat_ae_batch_init_list *qabi_list;
   2929  1.1   hikaru 	struct uof_mem_val_attr *memattr;
   2930  1.1   hikaru 	size_t *curinit;
   2931  1.1   hikaru 	u_long ael;
   2932  1.1   hikaru 	int i;
   2933  1.1   hikaru 	const char *sym;
   2934  1.1   hikaru 	char *ep;
   2935  1.1   hikaru 
   2936  1.1   hikaru 	memattr = (struct uof_mem_val_attr *)(uim + 1);
   2937  1.1   hikaru 
   2938  1.1   hikaru 	switch (uim->uim_region) {
   2939  1.1   hikaru 	case LMEM_REGION:
   2940  1.1   hikaru 		if ((uim->uim_addr + uim->uim_num_bytes) > MAX_LMEM_REG * 4) {
   2941  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
   2942  1.1   hikaru 			    "Invalid lmem addr or bytes\n");
   2943  1.1   hikaru 			return ENOBUFS;
   2944  1.1   hikaru 		}
   2945  1.1   hikaru 		if (uim->uim_scope != UOF_SCOPE_LOCAL)
   2946  1.1   hikaru 			return EINVAL;
   2947  1.1   hikaru 		sym = qat_aefw_uof_string(sc, uim->uim_sym_name);
   2948  1.1   hikaru 		ael = strtoul(sym, &ep, 10);
   2949  1.1   hikaru 		if (ep == sym || ael > MAX_AE)
   2950  1.1   hikaru 			return EINVAL;
   2951  1.1   hikaru 		if ((sc->sc_ae_mask & (1 << ael)) == 0)
   2952  1.1   hikaru 			return 0; /* ae is fused out */
   2953  1.1   hikaru 
   2954  1.1   hikaru 		curinit = &qafu->qafu_num_lm_init[ael];
   2955  1.1   hikaru 		qabi_list = &qafu->qafu_lm_init[ael];
   2956  1.1   hikaru 
   2957  1.1   hikaru 		for (i = 0; i < uim->uim_num_val_attr; i++, memattr++) {
   2958  1.1   hikaru 			struct qat_ae_batch_init *qabi;
   2959  1.1   hikaru 
   2960  1.1   hikaru 			qabi = qat_alloc_mem(sizeof(struct qat_ae_batch_init));
   2961  1.1   hikaru 			if (*curinit == 0)
   2962  1.1   hikaru 				SIMPLEQ_INIT(qabi_list);
   2963  1.1   hikaru 			SIMPLEQ_INSERT_TAIL(qabi_list, qabi, qabi_next);
   2964  1.1   hikaru 
   2965  1.1   hikaru 			qabi->qabi_ae = (u_int)ael;
   2966  1.1   hikaru 			qabi->qabi_addr =
   2967  1.1   hikaru 			    uim->uim_addr + memattr->umva_byte_offset;
   2968  1.1   hikaru 			qabi->qabi_value = &memattr->umva_value;
   2969  1.1   hikaru 			qabi->qabi_size = 4;
   2970  1.1   hikaru 			qafu->qafu_num_lm_init_inst[ael] +=
   2971  1.1   hikaru 			    qat_ae_get_inst_num(qabi->qabi_size);
   2972  1.1   hikaru 			(*curinit)++;
   2973  1.1   hikaru 			if (*curinit >= MAX_LMEM_REG) {
   2974  1.1   hikaru 				aprint_error_dev(sc->sc_dev,
   2975  1.1   hikaru 				    "Invalid lmem val attr\n");
   2976  1.1   hikaru 				return ENOBUFS;
   2977  1.1   hikaru 			}
   2978  1.1   hikaru 		}
   2979  1.1   hikaru 		break;
   2980  1.1   hikaru 	case SRAM_REGION:
   2981  1.1   hikaru 	case DRAM_REGION:
   2982  1.1   hikaru 	case DRAM1_REGION:
   2983  1.1   hikaru 	case SCRATCH_REGION:
   2984  1.1   hikaru 	case UMEM_REGION:
   2985  1.1   hikaru 		/* XXX */
   2986  1.1   hikaru 		/* fallthrough */
   2987  1.1   hikaru 	default:
   2988  1.1   hikaru 		aprint_error_dev(sc->sc_dev,
   2989  1.1   hikaru 		    "unsupported memory region to init: %d\n",
   2990  1.1   hikaru 		    uim->uim_region);
   2991  1.1   hikaru 		return ENOTSUP;
   2992  1.1   hikaru 	}
   2993  1.1   hikaru 
   2994  1.1   hikaru 	return 0;
   2995  1.1   hikaru }
   2996  1.1   hikaru 
   2997  1.1   hikaru void
   2998  1.1   hikaru qat_aefw_free_lm_init(struct qat_softc *sc, u_char ae)
   2999  1.1   hikaru {
   3000  1.1   hikaru 	struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
   3001  1.1   hikaru 	struct qat_ae_batch_init *qabi;
   3002  1.1   hikaru 
   3003  1.1   hikaru 	while ((qabi = SIMPLEQ_FIRST(&qafu->qafu_lm_init[ae])) != NULL) {
   3004  1.1   hikaru 		SIMPLEQ_REMOVE_HEAD(&qafu->qafu_lm_init[ae], qabi_next);
   3005  1.1   hikaru 		qat_free_mem(qabi);
   3006  1.1   hikaru 	}
   3007  1.1   hikaru 
   3008  1.1   hikaru 	qafu->qafu_num_lm_init[ae] = 0;
   3009  1.1   hikaru 	qafu->qafu_num_lm_init_inst[ae] = 0;
   3010  1.1   hikaru }
   3011  1.1   hikaru 
   3012  1.1   hikaru int
   3013  1.1   hikaru qat_aefw_init_ustore(struct qat_softc *sc)
   3014  1.1   hikaru {
   3015  1.1   hikaru 	uint64_t *fill;
   3016  1.1   hikaru 	uint32_t dont_init;
   3017  1.1   hikaru 	int a, i, p;
   3018  1.1   hikaru 	int error = 0;
   3019  1.1   hikaru 	int usz, end, start;
   3020  1.1   hikaru 	u_char ae, nae;
   3021  1.1   hikaru 
   3022  1.1   hikaru 	fill = qat_alloc_mem(MAX_USTORE * sizeof(uint64_t));
   3023  1.1   hikaru 
   3024  1.1   hikaru 	for (a = 0; a < sc->sc_aefw_uof.qafu_num_imgs; a++) {
   3025  1.1   hikaru 		struct qat_uof_image *qui = &sc->sc_aefw_uof.qafu_imgs[a];
   3026  1.1   hikaru 		struct uof_image *ui = qui->qui_image;
   3027  1.1   hikaru 
   3028  1.1   hikaru 		for (i = 0; i < MAX_USTORE; i++)
   3029  1.1   hikaru 			memcpy(&fill[i], ui->ui_fill_pattern, sizeof(uint64_t));
   3030  1.1   hikaru 		/*
   3031  1.1   hikaru 		 * Compute do_not_init value as a value that will not be equal
   3032  1.1   hikaru 		 * to fill data when cast to an int
   3033  1.1   hikaru 		 */
   3034  1.1   hikaru 		dont_init = 0;
   3035  1.1   hikaru 		if (dont_init == (uint32_t)fill[0])
   3036  1.1   hikaru 			dont_init = 0xffffffff;
   3037  1.1   hikaru 
   3038  1.1   hikaru 		for (p = 0; p < ui->ui_num_pages; p++) {
   3039  1.1   hikaru 			struct qat_uof_page *qup = &qui->qui_pages[p];
   3040  1.1   hikaru 			if (!qup->qup_def_page)
   3041  1.1   hikaru 				continue;
   3042  1.1   hikaru 
   3043  1.1   hikaru 			for (i = qup->qup_beg_paddr;
   3044  1.1   hikaru 			    i < qup->qup_beg_paddr + qup->qup_num_micro_words;
   3045  1.1   hikaru 			    i++ ) {
   3046  1.1   hikaru 				fill[i] = (uint64_t)dont_init;
   3047  1.1   hikaru 			}
   3048  1.1   hikaru 		}
   3049  1.1   hikaru 
   3050  1.1   hikaru 		for (ae = 0; ae < sc->sc_ae_num; ae++) {
   3051  1.1   hikaru 			KASSERT(ae < UOF_MAX_NUM_OF_AE);
   3052  1.1   hikaru 			if ((ui->ui_ae_assigned & (1 << ae)) == 0)
   3053  1.1   hikaru 				continue;
   3054  1.1   hikaru 
   3055  1.1   hikaru 			if (QAT_AE(sc, ae).qae_shareable_ustore && (ae & 1)) {
   3056  1.1   hikaru 				qat_ae_get_shared_ustore_ae(ae, &nae);
   3057  1.1   hikaru 				if (ui->ui_ae_assigned & (1 << ae))
   3058  1.1   hikaru 					continue;
   3059  1.1   hikaru 			}
   3060  1.1   hikaru 			usz = QAT_AE(sc, ae).qae_effect_ustore_size;
   3061  1.1   hikaru 
   3062  1.1   hikaru 			/* initialize the areas not going to be overwritten */
   3063  1.1   hikaru 			end = -1;
   3064  1.1   hikaru 			do {
   3065  1.1   hikaru 				/* find next uword that needs to be initialized */
   3066  1.1   hikaru 				for (start = end + 1; start < usz; start++) {
   3067  1.1   hikaru 					if ((uint32_t)fill[start] != dont_init)
   3068  1.1   hikaru 						break;
   3069  1.1   hikaru 				}
   3070  1.1   hikaru 				/* see if there are no more such uwords */
   3071  1.1   hikaru 				if (start >= usz)
   3072  1.1   hikaru 					break;
   3073  1.1   hikaru 				for (end = start + 1; end < usz; end++) {
   3074  1.1   hikaru 					if ((uint32_t)fill[end] == dont_init)
   3075  1.1   hikaru 						break;
   3076  1.1   hikaru 				}
   3077  1.1   hikaru 				if (QAT_AE(sc, ae).qae_shareable_ustore) {
   3078  1.1   hikaru 					error = ENOTSUP; /* XXX */
   3079  1.1   hikaru 					goto out;
   3080  1.1   hikaru 				} else {
   3081  1.1   hikaru 					error = qat_ae_ucode_write(sc, ae,
   3082  1.1   hikaru 					    start, end - start, &fill[start]);
   3083  1.1   hikaru 					if (error) {
   3084  1.1   hikaru 						goto out;
   3085  1.1   hikaru 					}
   3086  1.1   hikaru 				}
   3087  1.1   hikaru 
   3088  1.1   hikaru 			} while (end < usz);
   3089  1.1   hikaru 		}
   3090  1.1   hikaru 	}
   3091  1.1   hikaru 
   3092  1.1   hikaru out:
   3093  1.1   hikaru 	qat_free_mem(fill);
   3094  1.1   hikaru 	return error;
   3095  1.1   hikaru }
   3096  1.1   hikaru 
   3097  1.1   hikaru int
   3098  1.1   hikaru qat_aefw_init_reg(struct qat_softc *sc, u_char ae, u_char ctx_mask,
   3099  1.1   hikaru     enum aereg_type regtype, u_short regaddr, u_int value)
   3100  1.1   hikaru {
   3101  1.1   hikaru 	int error = 0;
   3102  1.1   hikaru 	u_char ctx;
   3103  1.1   hikaru 
   3104  1.1   hikaru 	switch (regtype) {
   3105  1.1   hikaru 	case AEREG_GPA_REL:
   3106  1.1   hikaru 	case AEREG_GPB_REL:
   3107  1.1   hikaru 	case AEREG_SR_REL:
   3108  1.1   hikaru 	case AEREG_SR_RD_REL:
   3109  1.1   hikaru 	case AEREG_SR_WR_REL:
   3110  1.1   hikaru 	case AEREG_DR_REL:
   3111  1.1   hikaru 	case AEREG_DR_RD_REL:
   3112  1.1   hikaru 	case AEREG_DR_WR_REL:
   3113  1.1   hikaru 	case AEREG_NEIGH_REL:
   3114  1.1   hikaru 		/* init for all valid ctx */
   3115  1.1   hikaru 		for (ctx = 0; ctx < MAX_AE_CTX; ctx++) {
   3116  1.1   hikaru 			if ((ctx_mask & (1 << ctx)) == 0)
   3117  1.1   hikaru 				continue;
   3118  1.1   hikaru 			error = qat_aereg_rel_data_write(sc, ae, ctx, regtype,
   3119  1.1   hikaru 			    regaddr, value);
   3120  1.1   hikaru 		}
   3121  1.1   hikaru 		break;
   3122  1.1   hikaru 	case AEREG_GPA_ABS:
   3123  1.1   hikaru 	case AEREG_GPB_ABS:
   3124  1.1   hikaru 	case AEREG_SR_ABS:
   3125  1.1   hikaru 	case AEREG_SR_RD_ABS:
   3126  1.1   hikaru 	case AEREG_SR_WR_ABS:
   3127  1.1   hikaru 	case AEREG_DR_ABS:
   3128  1.1   hikaru 	case AEREG_DR_RD_ABS:
   3129  1.1   hikaru 	case AEREG_DR_WR_ABS:
   3130  1.1   hikaru 		error = qat_aereg_abs_data_write(sc, ae, regtype,
   3131  1.1   hikaru 		    regaddr, value);
   3132  1.1   hikaru 		break;
   3133  1.1   hikaru 	default:
   3134  1.1   hikaru 		error = EINVAL;
   3135  1.1   hikaru 		break;
   3136  1.1   hikaru 	}
   3137  1.1   hikaru 
   3138  1.1   hikaru 	return error;
   3139  1.1   hikaru }
   3140  1.1   hikaru 
   3141  1.1   hikaru int
   3142  1.1   hikaru qat_aefw_init_reg_sym_expr(struct qat_softc *sc, u_char ae,
   3143  1.1   hikaru     struct qat_uof_image *qui)
   3144  1.1   hikaru {
   3145  1.1   hikaru 	u_int i, expres;
   3146  1.1   hikaru 	u_char ctx_mask;
   3147  1.1   hikaru 
   3148  1.1   hikaru 	for (i = 0; i < qui->qui_num_init_reg_sym; i++) {
   3149  1.1   hikaru 		struct uof_init_reg_sym *uirs = &qui->qui_init_reg_sym[i];
   3150  1.1   hikaru 
   3151  1.1   hikaru 		if (uirs->uirs_value_type == EXPR_VAL) {
   3152  1.1   hikaru 			/* XXX */
   3153  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
   3154  1.1   hikaru 			    "does not support initializing EXPR_VAL\n");
   3155  1.1   hikaru 			return ENOTSUP;
   3156  1.1   hikaru 		} else {
   3157  1.1   hikaru 			expres = uirs->uirs_value;
   3158  1.1   hikaru 		}
   3159  1.1   hikaru 
   3160  1.1   hikaru 		switch (uirs->uirs_init_type) {
   3161  1.1   hikaru 		case INIT_REG:
   3162  1.1   hikaru 			if (__SHIFTOUT(qui->qui_image->ui_ae_mode,
   3163  1.1   hikaru 			    AE_MODE_CTX_MODE) == MAX_AE_CTX) {
   3164  1.1   hikaru 				ctx_mask = 0xff; /* 8-ctx mode */
   3165  1.1   hikaru 			} else {
   3166  1.1   hikaru 				ctx_mask = 0x55; /* 4-ctx mode */
   3167  1.1   hikaru 			}
   3168  1.1   hikaru 			qat_aefw_init_reg(sc, ae, ctx_mask,
   3169  1.1   hikaru 			    (enum aereg_type)uirs->uirs_reg_type,
   3170  1.1   hikaru 			    (u_short)uirs->uirs_addr_offset, expres);
   3171  1.1   hikaru 			break;
   3172  1.1   hikaru 		case INIT_REG_CTX:
   3173  1.1   hikaru 			if (__SHIFTOUT(qui->qui_image->ui_ae_mode,
   3174  1.1   hikaru 			    AE_MODE_CTX_MODE) == MAX_AE_CTX) {
   3175  1.1   hikaru 				ctx_mask = 0xff; /* 8-ctx mode */
   3176  1.1   hikaru 			} else {
   3177  1.1   hikaru 				ctx_mask = 0x55; /* 4-ctx mode */
   3178  1.1   hikaru 			}
   3179  1.1   hikaru 			if (((1 << uirs->uirs_ctx) & ctx_mask) == 0)
   3180  1.1   hikaru 				return EINVAL;
   3181  1.1   hikaru 			qat_aefw_init_reg(sc, ae, 1 << uirs->uirs_ctx,
   3182  1.1   hikaru 			    (enum aereg_type)uirs->uirs_reg_type,
   3183  1.1   hikaru 			    (u_short)uirs->uirs_addr_offset, expres);
   3184  1.1   hikaru 			break;
   3185  1.1   hikaru 		case INIT_EXPR:
   3186  1.1   hikaru 		case INIT_EXPR_ENDIAN_SWAP:
   3187  1.1   hikaru 		default:
   3188  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
   3189  1.1   hikaru 			    "does not support initializing init_type %d\n",
   3190  1.1   hikaru 			    uirs->uirs_init_type);
   3191  1.1   hikaru 			return ENOTSUP;
   3192  1.1   hikaru 		}
   3193  1.1   hikaru 	}
   3194  1.1   hikaru 
   3195  1.1   hikaru 	return 0;
   3196  1.1   hikaru }
   3197  1.1   hikaru 
   3198  1.1   hikaru int
   3199  1.1   hikaru qat_aefw_init_memory(struct qat_softc *sc)
   3200  1.1   hikaru {
   3201  1.1   hikaru 	struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
   3202  1.1   hikaru 	size_t uimsz, initmemsz = qafu->qafu_init_mem_size;
   3203  1.1   hikaru 	struct uof_init_mem *uim;
   3204  1.1   hikaru 	int error, i;
   3205  1.1   hikaru 	u_char ae;
   3206  1.1   hikaru 
   3207  1.1   hikaru 	uim = qafu->qafu_init_mem;
   3208  1.1   hikaru 	for (i = 0; i < qafu->qafu_num_init_mem; i++) {
   3209  1.1   hikaru 		uimsz = sizeof(struct uof_init_mem) +
   3210  1.1   hikaru 		    sizeof(struct uof_mem_val_attr) * uim->uim_num_val_attr;
   3211  1.1   hikaru 		if (uimsz > initmemsz) {
   3212  1.1   hikaru 			aprint_error_dev(sc->sc_dev,
   3213  1.1   hikaru 			    "invalid uof_init_mem or uof_mem_val_attr size\n");
   3214  1.1   hikaru 			return EINVAL;
   3215  1.1   hikaru 		}
   3216  1.1   hikaru 
   3217  1.1   hikaru 		if (uim->uim_num_bytes > 0) {
   3218  1.1   hikaru 			error = qat_aefw_init_memory_one(sc, uim);
   3219  1.1   hikaru 			if (error) {
   3220  1.1   hikaru 				aprint_error_dev(sc->sc_dev,
   3221  1.1   hikaru 				    "Could not init ae memory: %d\n", error);
   3222  1.1   hikaru 				return error;
   3223  1.1   hikaru 			}
   3224  1.1   hikaru 		}
   3225  1.1   hikaru 		uim = (struct uof_init_mem *)((uintptr_t)uim + uimsz);
   3226  1.1   hikaru 		initmemsz -= uimsz;
   3227  1.1   hikaru 	}
   3228  1.1   hikaru 
   3229  1.1   hikaru 	/* run Batch put LM API */
   3230  1.1   hikaru 	for (ae = 0; ae < MAX_AE; ae++) {
   3231  1.1   hikaru 		error = qat_ae_batch_put_lm(sc, ae, &qafu->qafu_lm_init[ae],
   3232  1.1   hikaru 		    qafu->qafu_num_lm_init_inst[ae]);
   3233  1.1   hikaru 		if (error)
   3234  1.1   hikaru 			aprint_error_dev(sc->sc_dev, "Could not put lm\n");
   3235  1.1   hikaru 
   3236  1.1   hikaru 		qat_aefw_free_lm_init(sc, ae);
   3237  1.1   hikaru 	}
   3238  1.1   hikaru 
   3239  1.1   hikaru 	error = qat_aefw_init_ustore(sc);
   3240  1.1   hikaru 
   3241  1.1   hikaru 	/* XXX run Batch put LM API */
   3242  1.1   hikaru 
   3243  1.1   hikaru 	return error;
   3244  1.1   hikaru }
   3245  1.1   hikaru 
   3246  1.1   hikaru int
   3247  1.1   hikaru qat_aefw_init_globals(struct qat_softc *sc)
   3248  1.1   hikaru {
   3249  1.1   hikaru 	struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
   3250  1.1   hikaru 	int error, i, p, s;
   3251  1.1   hikaru 	u_char ae;
   3252  1.1   hikaru 
   3253  1.1   hikaru 	/* initialize the memory segments */
   3254  1.1   hikaru 	if (qafu->qafu_num_init_mem > 0) {
   3255  1.1   hikaru 		error = qat_aefw_init_memory(sc);
   3256  1.1   hikaru 		if (error)
   3257  1.1   hikaru 			return error;
   3258  1.1   hikaru 	} else {
   3259  1.1   hikaru 		error = qat_aefw_init_ustore(sc);
   3260  1.1   hikaru 		if (error)
   3261  1.1   hikaru 			return error;
   3262  1.1   hikaru 	}
   3263  1.1   hikaru 
   3264  1.1   hikaru 	/* XXX bind import variables with ivd values */
   3265  1.1   hikaru 
   3266  1.1   hikaru 	/* XXX bind the uC global variables
   3267  1.1   hikaru 	 * local variables will done on-the-fly */
   3268  1.1   hikaru 	for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) {
   3269  1.1   hikaru 		for (p = 0; p < sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_num_pages; p++) {
   3270  1.1   hikaru 			struct qat_uof_page *qup =
   3271  1.1   hikaru 			    &sc->sc_aefw_uof.qafu_imgs[i].qui_pages[p];
   3272  1.1   hikaru 			if (qup->qup_num_uw_blocks &&
   3273  1.1   hikaru 			    (qup->qup_num_uc_var || qup->qup_num_imp_var)) {
   3274  1.1   hikaru 				aprint_error_dev(sc->sc_dev,
   3275  1.1   hikaru 				    "not support uC global variables\n");
   3276  1.1   hikaru 				return ENOTSUP;
   3277  1.1   hikaru 			}
   3278  1.1   hikaru 		}
   3279  1.1   hikaru 	}
   3280  1.1   hikaru 
   3281  1.1   hikaru 	for (ae = 0; ae < sc->sc_ae_num; ae++) {
   3282  1.1   hikaru 		struct qat_ae *qae = &(QAT_AE(sc, ae));
   3283  1.1   hikaru 
   3284  1.1   hikaru 		for (s = 0; s < qae->qae_num_slices; s++) {
   3285  1.1   hikaru 			struct qat_ae_slice *qas = &qae->qae_slices[s];
   3286  1.1   hikaru 
   3287  1.1   hikaru 			if (qas->qas_image == NULL)
   3288  1.1   hikaru 				continue;
   3289  1.1   hikaru 
   3290  1.1   hikaru 			error =
   3291  1.1   hikaru 			    qat_aefw_init_reg_sym_expr(sc, ae, qas->qas_image);
   3292  1.1   hikaru 			if (error)
   3293  1.1   hikaru 				return error;
   3294  1.1   hikaru 		}
   3295  1.1   hikaru 	}
   3296  1.1   hikaru 
   3297  1.1   hikaru 	return 0;
   3298  1.1   hikaru }
   3299  1.1   hikaru 
   3300  1.1   hikaru uint64_t
   3301  1.1   hikaru qat_aefw_get_uof_inst(struct qat_softc *sc, struct qat_uof_page *qup,
   3302  1.1   hikaru     u_int addr)
   3303  1.1   hikaru {
   3304  1.1   hikaru 	uint64_t uinst = 0;
   3305  1.1   hikaru 	u_int i;
   3306  1.1   hikaru 
   3307  1.1   hikaru 	/* find the block */
   3308  1.1   hikaru 	for (i = 0; i < qup->qup_num_uw_blocks; i++) {
   3309  1.1   hikaru 		struct qat_uof_uword_block *quub = &qup->qup_uw_blocks[i];
   3310  1.1   hikaru 
   3311  1.1   hikaru 		if ((addr >= quub->quub_start_addr) &&
   3312  1.1   hikaru 		    (addr <= (quub->quub_start_addr +
   3313  1.1   hikaru 		    (quub->quub_num_words - 1)))) {
   3314  1.1   hikaru 			/* unpack n bytes and assigned to the 64-bit uword value.
   3315  1.1   hikaru 			note: the microwords are stored as packed bytes.
   3316  1.1   hikaru 			*/
   3317  1.1   hikaru 			addr -= quub->quub_start_addr;
   3318  1.1   hikaru 			addr *= AEV2_PACKED_UWORD_BYTES;
   3319  1.1   hikaru 			memcpy(&uinst,
   3320  1.1   hikaru 			    (void *)((uintptr_t)quub->quub_micro_words + addr),
   3321  1.1   hikaru 			    AEV2_PACKED_UWORD_BYTES);
   3322  1.1   hikaru 			uinst = uinst & UWORD_MASK;
   3323  1.1   hikaru 
   3324  1.1   hikaru 			return uinst;
   3325  1.1   hikaru 		}
   3326  1.1   hikaru 	}
   3327  1.1   hikaru 
   3328  1.1   hikaru 	return INVLD_UWORD;
   3329  1.1   hikaru }
   3330  1.1   hikaru 
   3331  1.1   hikaru int
   3332  1.1   hikaru qat_aefw_do_pagein(struct qat_softc *sc, u_char ae, struct qat_uof_page *qup)
   3333  1.1   hikaru {
   3334  1.1   hikaru 	struct qat_ae *qae = &(QAT_AE(sc, ae));
   3335  1.1   hikaru 	uint64_t fill, *ucode_cpybuf;
   3336  1.1   hikaru 	u_int error, i, upaddr, uraddr, ninst, cpylen;
   3337  1.1   hikaru 
   3338  1.1   hikaru 	if (qup->qup_num_uc_var || qup->qup_num_neigh_reg ||
   3339  1.1   hikaru 	    qup->qup_num_imp_var || qup->qup_num_imp_expr) {
   3340  1.1   hikaru 		aprint_error_dev(sc->sc_dev,
   3341  1.1   hikaru 		    "does not support fixup locals\n");
   3342  1.1   hikaru 		return ENOTSUP;
   3343  1.1   hikaru 	}
   3344  1.1   hikaru 
   3345  1.1   hikaru 	ucode_cpybuf = qat_alloc_mem(UWORD_CPYBUF_SIZE * sizeof(uint64_t));
   3346  1.1   hikaru 
   3347  1.1   hikaru 	/* XXX get fill-pattern from an image -- they are all the same */
   3348  1.1   hikaru 	memcpy(&fill, sc->sc_aefw_uof.qafu_imgs[0].qui_image->ui_fill_pattern,
   3349  1.1   hikaru 	    sizeof(uint64_t));
   3350  1.1   hikaru 
   3351  1.1   hikaru 	upaddr = qup->qup_beg_paddr;
   3352  1.1   hikaru 	uraddr = 0;
   3353  1.1   hikaru 	ninst = qup->qup_num_micro_words;
   3354  1.1   hikaru 	while (ninst > 0) {
   3355  1.1   hikaru 		cpylen = uimin(ninst, UWORD_CPYBUF_SIZE);
   3356  1.1   hikaru 
   3357  1.1   hikaru 		/* load the buffer */
   3358  1.1   hikaru 		for (i = 0; i < cpylen; i++) {
   3359  1.1   hikaru 			/* keep below code structure in case there are
   3360  1.1   hikaru 			 * different handling for shared secnarios */
   3361  1.1   hikaru 			if (!qae->qae_shareable_ustore) {
   3362  1.1   hikaru 				/* qat_aefw_get_uof_inst() takes an address that
   3363  1.1   hikaru 				 * is relative to the start of the page.
   3364  1.1   hikaru 				 * So we don't need to add in the physical
   3365  1.1   hikaru 				 * offset of the page. */
   3366  1.1   hikaru 				if (qup->qup_page_region != 0) {
   3367  1.1   hikaru 					/* XXX */
   3368  1.1   hikaru 					aprint_error_dev(sc->sc_dev,
   3369  1.1   hikaru 					    "region != 0 is not supported\n");
   3370  1.1   hikaru 					qat_free_mem(ucode_cpybuf);
   3371  1.1   hikaru 					return ENOTSUP;
   3372  1.1   hikaru 				} else {
   3373  1.1   hikaru 					/* for mixing case, it should take
   3374  1.1   hikaru 					 * physical address */
   3375  1.1   hikaru 					ucode_cpybuf[i] = qat_aefw_get_uof_inst(
   3376  1.1   hikaru 					    sc, qup, upaddr + i);
   3377  1.1   hikaru 					if (ucode_cpybuf[i] == INVLD_UWORD) {
   3378  1.1   hikaru 					    /* fill hole in the uof */
   3379  1.1   hikaru 					    ucode_cpybuf[i] = fill;
   3380  1.1   hikaru 					}
   3381  1.1   hikaru 				}
   3382  1.1   hikaru 			} else {
   3383  1.1   hikaru 				/* XXX */
   3384  1.1   hikaru 				qat_free_mem(ucode_cpybuf);
   3385  1.1   hikaru 				return ENOTSUP;
   3386  1.1   hikaru 			}
   3387  1.1   hikaru 		}
   3388  1.1   hikaru 
   3389  1.1   hikaru 		/* copy the buffer to ustore */
   3390  1.1   hikaru 		if (!qae->qae_shareable_ustore) {
   3391  1.1   hikaru 			error = qat_ae_ucode_write(sc, ae, upaddr, cpylen,
   3392  1.1   hikaru 			    ucode_cpybuf);
   3393  1.1   hikaru 			if (error)
   3394  1.1   hikaru 				return error;
   3395  1.1   hikaru 		} else {
   3396  1.1   hikaru 			/* XXX */
   3397  1.1   hikaru 			qat_free_mem(ucode_cpybuf);
   3398  1.1   hikaru 			return ENOTSUP;
   3399  1.1   hikaru 		}
   3400  1.1   hikaru 		upaddr += cpylen;
   3401  1.1   hikaru 		uraddr += cpylen;
   3402  1.1   hikaru 		ninst -= cpylen;
   3403  1.1   hikaru 	}
   3404  1.1   hikaru 
   3405  1.1   hikaru 	qat_free_mem(ucode_cpybuf);
   3406  1.1   hikaru 
   3407  1.1   hikaru 	return 0;
   3408  1.1   hikaru }
   3409  1.1   hikaru 
   3410  1.1   hikaru int
   3411  1.1   hikaru qat_aefw_uof_write_one(struct qat_softc *sc, struct qat_uof_image *qui)
   3412  1.1   hikaru {
   3413  1.1   hikaru 	struct uof_image *ui = qui->qui_image;
   3414  1.1   hikaru 	struct qat_ae_page *qap;
   3415  1.1   hikaru 	u_int s, p, c;
   3416  1.1   hikaru 	int error;
   3417  1.1   hikaru 	u_char ae, ctx_mask;
   3418  1.1   hikaru 
   3419  1.1   hikaru 	aprint_verbose_dev(sc->sc_dev,
   3420  1.1   hikaru 		"aefw writing uof %s\n",
   3421  1.1   hikaru 		qat_aefw_uof_string(sc, qui->qui_image->ui_name));
   3422  1.1   hikaru 
   3423  1.1   hikaru 	error = qat_aefw_init_globals(sc);
   3424  1.1   hikaru 	if (error) {
   3425  1.1   hikaru 		aprint_error_dev(sc->sc_dev,
   3426  1.1   hikaru 		    "Could not initialize globals\n");
   3427  1.1   hikaru 		return error;
   3428  1.1   hikaru 	}
   3429  1.1   hikaru 
   3430  1.1   hikaru 	if (__SHIFTOUT(ui->ui_ae_mode, AE_MODE_CTX_MODE) == MAX_AE_CTX)
   3431  1.1   hikaru 		ctx_mask = 0xff; /* 8-ctx mode */
   3432  1.1   hikaru 	else
   3433  1.1   hikaru 		ctx_mask = 0x55; /* 4-ctx mode */
   3434  1.1   hikaru 
   3435  1.1   hikaru 	/* load the default page and set assigned CTX PC
   3436  1.1   hikaru 	 * to the entrypoint address */
   3437  1.1   hikaru 	for (ae = 0; ae < sc->sc_ae_num; ae++) {
   3438  1.1   hikaru 		struct qat_ae *qae = &(QAT_AE(sc, ae));
   3439  1.1   hikaru 		struct qat_ae_slice *qas;
   3440  1.1   hikaru 		u_int metadata;
   3441  1.1   hikaru 
   3442  1.1   hikaru 		KASSERT(ae < UOF_MAX_NUM_OF_AE);
   3443  1.1   hikaru 
   3444  1.1   hikaru 		if ((ui->ui_ae_assigned & (1 << ae)) == 0)
   3445  1.1   hikaru 			continue;
   3446  1.1   hikaru 
   3447  1.1   hikaru 		/* find the slice to which this image is assigned */
   3448  1.1   hikaru 		for (s = 0; s < qae->qae_num_slices; s++) {
   3449  1.1   hikaru 			qas = &qae->qae_slices[s];
   3450  1.1   hikaru 			if (ui->ui_ctx_assigned & qas->qas_assigned_ctx_mask)
   3451  1.1   hikaru 				break;
   3452  1.1   hikaru 		}
   3453  1.1   hikaru 		if (s >= qae->qae_num_slices)
   3454  1.1   hikaru 			continue;
   3455  1.1   hikaru 
   3456  1.1   hikaru 		qas = &qae->qae_slices[s];
   3457  1.1   hikaru 
   3458  1.1   hikaru 		for (p = 0; p < ui->ui_num_pages; p++) {
   3459  1.1   hikaru 			qap = &qas->qas_pages[p];
   3460  1.1   hikaru 
   3461  1.1   hikaru 			/* Only load pages loaded by default */
   3462  1.1   hikaru 			if (!qap->qap_page->qup_def_page)
   3463  1.1   hikaru 				continue;
   3464  1.1   hikaru 
   3465  1.1   hikaru 			error = qat_aefw_do_pagein(sc, ae, qap->qap_page);
   3466  1.1   hikaru 			if (error)
   3467  1.1   hikaru 				return error;
   3468  1.1   hikaru 		}
   3469  1.1   hikaru 
   3470  1.1   hikaru 		metadata = qas->qas_image->qui_image->ui_app_metadata;
   3471  1.1   hikaru 		if (metadata != 0xffffffff) {
   3472  1.1   hikaru 			aprint_normal_dev(sc->sc_dev,
   3473  1.1   hikaru 			    "loaded firmware: %s\n",
   3474  1.1   hikaru 			    qat_aefw_uof_string(sc, metadata));
   3475  1.1   hikaru 		}
   3476  1.1   hikaru 
   3477  1.1   hikaru 		/* Assume starting page is page 0 */
   3478  1.1   hikaru 		qap = &qas->qas_pages[0];
   3479  1.1   hikaru 		for (c = 0; c < MAX_AE_CTX; c++) {
   3480  1.1   hikaru 			if (ctx_mask & (1 << c))
   3481  1.1   hikaru 				qas->qas_cur_pages[c] = qap;
   3482  1.1   hikaru 			else
   3483  1.1   hikaru 				qas->qas_cur_pages[c] = NULL;
   3484  1.1   hikaru 		}
   3485  1.1   hikaru 
   3486  1.1   hikaru 		/* set the live context */
   3487  1.1   hikaru 		qae->qae_live_ctx_mask = ui->ui_ctx_assigned;
   3488  1.1   hikaru 
   3489  1.1   hikaru 		/* set context PC to the image entrypoint address */
   3490  1.1   hikaru 		error = qat_ae_write_pc(sc, ae, ui->ui_ctx_assigned,
   3491  1.1   hikaru 		    ui->ui_entry_address);
   3492  1.1   hikaru 		if (error)
   3493  1.1   hikaru 			return error;
   3494  1.1   hikaru 	}
   3495  1.1   hikaru 
   3496  1.1   hikaru 	/* XXX store the checksum for convenience */
   3497  1.1   hikaru 
   3498  1.1   hikaru 	return 0;
   3499  1.1   hikaru }
   3500  1.1   hikaru 
   3501  1.1   hikaru int
   3502  1.1   hikaru qat_aefw_uof_write(struct qat_softc *sc)
   3503  1.1   hikaru {
   3504  1.1   hikaru 	int error = 0;
   3505  1.1   hikaru 	int i;
   3506  1.1   hikaru 
   3507  1.1   hikaru 	for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) {
   3508  1.1   hikaru 		error = qat_aefw_uof_write_one(sc,
   3509  1.1   hikaru 		    &sc->sc_aefw_uof.qafu_imgs[i]);
   3510  1.1   hikaru 		if (error)
   3511  1.1   hikaru 			break;
   3512  1.1   hikaru 	}
   3513  1.1   hikaru 
   3514  1.1   hikaru 	/* XXX UcLo_computeFreeUstore */
   3515  1.1   hikaru 
   3516  1.1   hikaru 	return error;
   3517  1.1   hikaru }
   3518