Home | History | Annotate | Line # | Download | only in pci
if_mcx.c revision 1.21
      1 /*	$NetBSD: if_mcx.c,v 1.21 2021/09/26 15:01:55 jmcneill Exp $ */
      2 /*	$OpenBSD: if_mcx.c,v 1.101 2021/06/02 19:16:11 patrick Exp $ */
      3 
      4 /*
      5  * Copyright (c) 2017 David Gwynne <dlg (at) openbsd.org>
      6  * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
      7  *
      8  * Permission to use, copy, modify, and distribute this software for any
      9  * purpose with or without fee is hereby granted, provided that the above
     10  * copyright notice and this permission notice appear in all copies.
     11  *
     12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     19  */
     20 
     21 #ifdef _KERNEL_OPT
     22 #include "opt_net_mpsafe.h"
     23 #endif
     24 
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(0, "$NetBSD: if_mcx.c,v 1.21 2021/09/26 15:01:55 jmcneill Exp $");
     27 
     28 #include <sys/param.h>
     29 #include <sys/systm.h>
     30 #include <sys/sockio.h>
     31 #include <sys/mbuf.h>
     32 #include <sys/kernel.h>
     33 #include <sys/socket.h>
     34 #include <sys/device.h>
     35 #include <sys/pool.h>
     36 #include <sys/queue.h>
     37 #include <sys/callout.h>
     38 #include <sys/workqueue.h>
     39 #include <sys/atomic.h>
     40 #include <sys/timetc.h>
     41 #include <sys/kmem.h>
     42 #include <sys/bus.h>
     43 #include <sys/interrupt.h>
     44 #include <sys/pcq.h>
     45 #include <sys/cpu.h>
     46 
     47 #include <machine/intr.h>
     48 
     49 #include <net/if.h>
     50 #include <net/if_dl.h>
     51 #include <net/if_ether.h>
     52 #include <net/if_media.h>
     53 #include <net/if_vlanvar.h>
     54 #include <net/toeplitz.h>
     55 
     56 #include <net/bpf.h>
     57 
     58 #include <netinet/in.h>
     59 
     60 #include <dev/pci/pcireg.h>
     61 #include <dev/pci/pcivar.h>
     62 #include <dev/pci/pcidevs.h>
     63 
     64 /* TODO: Port kstat key/value stuff to evcnt/sysmon */
     65 #define	NKSTAT		0
     66 
     67 /* XXX This driver is not yet MP-safe; don't claim to be! */
     68 /* #ifdef NET_MPSAFE */
     69 /* #define	MCX_MPSAFE	1 */
     70 /* #define	CALLOUT_FLAGS	CALLOUT_MPSAFE */
     71 /* #else */
     72 #define	CALLOUT_FLAGS	0
     73 /* #endif */
     74 
     75 #define	MCX_TXQ_NUM		2048
     76 
     77 #define BUS_DMASYNC_PRERW	(BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
     78 #define BUS_DMASYNC_POSTRW	(BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
     79 
     80 #define MCX_HCA_BAR	PCI_MAPREG_START /* BAR 0 */
     81 
     82 #define MCX_FW_VER			0x0000
     83 #define  MCX_FW_VER_MAJOR(_v)			((_v) & 0xffff)
     84 #define  MCX_FW_VER_MINOR(_v)			((_v) >> 16)
     85 #define MCX_CMDIF_FW_SUBVER		0x0004
     86 #define  MCX_FW_VER_SUBMINOR(_v)		((_v) & 0xffff)
     87 #define  MCX_CMDIF(_v)				((_v) >> 16)
     88 
     89 #define MCX_ISSI			1 /* as per the PRM */
     90 #define MCX_CMD_IF_SUPPORTED		5
     91 
     92 #define MCX_HARDMTU			9500
     93 
     94 #define MCX_PAGE_SHIFT			12
     95 #define MCX_PAGE_SIZE			(1 << MCX_PAGE_SHIFT)
     96 
     97 /* queue sizes */
     98 #define MCX_LOG_EQ_SIZE			7
     99 #define MCX_LOG_CQ_SIZE			12
    100 #define MCX_LOG_RQ_SIZE			10
    101 #define MCX_LOG_SQ_SIZE			11
    102 
    103 #define MCX_MAX_QUEUES			16
    104 
    105 /* completion event moderation - about 10khz, or 90% of the cq */
    106 #define MCX_CQ_MOD_PERIOD		50
    107 #define MCX_CQ_MOD_COUNTER		\
    108 	(((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
    109 
    110 #define MCX_LOG_SQ_ENTRY_SIZE		6
    111 #define MCX_SQ_ENTRY_MAX_SLOTS		4
    112 #define MCX_SQ_SEGS_PER_SLOT		\
    113 	(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
    114 #define MCX_SQ_MAX_SEGMENTS		\
    115 	1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
    116 
    117 #define MCX_LOG_FLOW_TABLE_SIZE		5
    118 #define MCX_NUM_STATIC_FLOWS		4 /* promisc, allmulti, ucast, bcast */
    119 #define MCX_NUM_MCAST_FLOWS 		\
    120 	((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
    121 
    122 #define MCX_SQ_INLINE_SIZE		18
    123 CTASSERT(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN == MCX_SQ_INLINE_SIZE);
    124 
    125 /* doorbell offsets */
    126 #define MCX_DOORBELL_AREA_SIZE		MCX_PAGE_SIZE
    127 
    128 #define MCX_CQ_DOORBELL_BASE		0
    129 #define MCX_CQ_DOORBELL_STRIDE		64
    130 
    131 #define MCX_WQ_DOORBELL_BASE		MCX_PAGE_SIZE/2
    132 #define MCX_WQ_DOORBELL_STRIDE		64
    133 /* make sure the doorbells fit */
    134 CTASSERT(MCX_MAX_QUEUES * MCX_CQ_DOORBELL_STRIDE < MCX_WQ_DOORBELL_BASE);
    135 CTASSERT(MCX_MAX_QUEUES * MCX_WQ_DOORBELL_STRIDE <
    136     MCX_DOORBELL_AREA_SIZE - MCX_WQ_DOORBELL_BASE);
    137 
    138 #define MCX_WQ_DOORBELL_MASK		0xffff
    139 
    140 /* uar registers */
    141 #define MCX_UAR_CQ_DOORBELL		0x20
    142 #define MCX_UAR_EQ_DOORBELL_ARM		0x40
    143 #define MCX_UAR_EQ_DOORBELL		0x48
    144 #define MCX_UAR_BF			0x800
    145 
    146 #define MCX_CMDQ_ADDR_HI		0x0010
    147 #define MCX_CMDQ_ADDR_LO		0x0014
    148 #define MCX_CMDQ_ADDR_NMASK		0xfff
    149 #define MCX_CMDQ_LOG_SIZE(_v)		((_v) >> 4 & 0xf)
    150 #define MCX_CMDQ_LOG_STRIDE(_v)		((_v) >> 0 & 0xf)
    151 #define MCX_CMDQ_INTERFACE_MASK		(0x3 << 8)
    152 #define MCX_CMDQ_INTERFACE_FULL_DRIVER	(0x0 << 8)
    153 #define MCX_CMDQ_INTERFACE_DISABLED	(0x1 << 8)
    154 
    155 #define MCX_CMDQ_DOORBELL		0x0018
    156 
    157 #define MCX_STATE			0x01fc
    158 #define MCX_STATE_MASK				(1U << 31)
    159 #define MCX_STATE_INITIALIZING			(1 << 31)
    160 #define MCX_STATE_READY				(0 << 31)
    161 #define MCX_STATE_INTERFACE_MASK		(0x3 << 24)
    162 #define MCX_STATE_INTERFACE_FULL_DRIVER		(0x0 << 24)
    163 #define MCX_STATE_INTERFACE_DISABLED		(0x1 << 24)
    164 
    165 #define MCX_INTERNAL_TIMER		0x1000
    166 #define MCX_INTERNAL_TIMER_H		0x1000
    167 #define MCX_INTERNAL_TIMER_L		0x1004
    168 
    169 #define MCX_CLEAR_INT			0x100c
    170 
    171 #define MCX_REG_OP_WRITE		0
    172 #define MCX_REG_OP_READ			1
    173 
    174 #define MCX_REG_PMLP			0x5002
    175 #define MCX_REG_PMTU			0x5003
    176 #define MCX_REG_PTYS			0x5004
    177 #define MCX_REG_PAOS			0x5006
    178 #define MCX_REG_PFCC			0x5007
    179 #define MCX_REG_PPCNT			0x5008
    180 #define MCX_REG_MTCAP			0x9009 /* mgmt temp capabilities */
    181 #define MCX_REG_MTMP			0x900a /* mgmt temp */
    182 #define MCX_REG_MCIA			0x9014
    183 #define MCX_REG_MCAM			0x907f
    184 
    185 #define MCX_ETHER_CAP_SGMII		0
    186 #define MCX_ETHER_CAP_1000_KX		1
    187 #define MCX_ETHER_CAP_10G_CX4		2
    188 #define MCX_ETHER_CAP_10G_KX4		3
    189 #define MCX_ETHER_CAP_10G_KR		4
    190 #define MCX_ETHER_CAP_20G_KR2		5
    191 #define MCX_ETHER_CAP_40G_CR4		6
    192 #define MCX_ETHER_CAP_40G_KR4		7
    193 #define MCX_ETHER_CAP_56G_R4		8
    194 #define MCX_ETHER_CAP_10G_CR		12
    195 #define MCX_ETHER_CAP_10G_SR		13
    196 #define MCX_ETHER_CAP_10G_LR		14
    197 #define MCX_ETHER_CAP_40G_SR4		15
    198 #define MCX_ETHER_CAP_40G_LR4		16
    199 #define MCX_ETHER_CAP_50G_SR2		18
    200 #define MCX_ETHER_CAP_100G_CR4		20
    201 #define MCX_ETHER_CAP_100G_SR4		21
    202 #define MCX_ETHER_CAP_100G_KR4		22
    203 #define MCX_ETHER_CAP_100G_LR4		23
    204 #define MCX_ETHER_CAP_100_TX		24
    205 #define MCX_ETHER_CAP_1000_T		25
    206 #define MCX_ETHER_CAP_10G_T		26
    207 #define MCX_ETHER_CAP_25G_CR		27
    208 #define MCX_ETHER_CAP_25G_KR		28
    209 #define MCX_ETHER_CAP_25G_SR		29
    210 #define MCX_ETHER_CAP_50G_CR2		30
    211 #define MCX_ETHER_CAP_50G_KR2		31
    212 
    213 #define MCX_MAX_CQE			32
    214 
    215 #define MCX_CMD_QUERY_HCA_CAP		0x100
    216 #define MCX_CMD_QUERY_ADAPTER		0x101
    217 #define MCX_CMD_INIT_HCA		0x102
    218 #define MCX_CMD_TEARDOWN_HCA		0x103
    219 #define MCX_CMD_ENABLE_HCA		0x104
    220 #define MCX_CMD_DISABLE_HCA		0x105
    221 #define MCX_CMD_QUERY_PAGES		0x107
    222 #define MCX_CMD_MANAGE_PAGES		0x108
    223 #define MCX_CMD_SET_HCA_CAP		0x109
    224 #define MCX_CMD_QUERY_ISSI		0x10a
    225 #define MCX_CMD_SET_ISSI		0x10b
    226 #define MCX_CMD_SET_DRIVER_VERSION	0x10d
    227 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS	0x203
    228 #define MCX_CMD_CREATE_EQ		0x301
    229 #define MCX_CMD_DESTROY_EQ		0x302
    230 #define MCX_CMD_QUERY_EQ		0x303
    231 #define MCX_CMD_CREATE_CQ		0x400
    232 #define MCX_CMD_DESTROY_CQ		0x401
    233 #define MCX_CMD_QUERY_CQ		0x402
    234 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT 0x754
    235 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
    236 					0x755
    237 #define MCX_CMD_QUERY_VPORT_COUNTERS 	0x770
    238 #define MCX_CMD_ALLOC_PD		0x800
    239 #define MCX_CMD_ALLOC_UAR		0x802
    240 #define MCX_CMD_ACCESS_REG		0x805
    241 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN	0x816
    242 #define MCX_CMD_CREATE_TIR		0x900
    243 #define MCX_CMD_DESTROY_TIR		0x902
    244 #define MCX_CMD_CREATE_SQ		0x904
    245 #define MCX_CMD_MODIFY_SQ		0x905
    246 #define MCX_CMD_DESTROY_SQ		0x906
    247 #define MCX_CMD_QUERY_SQ		0x907
    248 #define MCX_CMD_CREATE_RQ		0x908
    249 #define MCX_CMD_MODIFY_RQ		0x909
    250 #define MCX_CMD_DESTROY_RQ		0x90a
    251 #define MCX_CMD_QUERY_RQ		0x90b
    252 #define MCX_CMD_CREATE_TIS		0x912
    253 #define MCX_CMD_DESTROY_TIS		0x914
    254 #define MCX_CMD_CREATE_RQT		0x916
    255 #define MCX_CMD_DESTROY_RQT		0x918
    256 #define MCX_CMD_SET_FLOW_TABLE_ROOT	0x92f
    257 #define MCX_CMD_CREATE_FLOW_TABLE	0x930
    258 #define MCX_CMD_DESTROY_FLOW_TABLE	0x931
    259 #define MCX_CMD_QUERY_FLOW_TABLE	0x932
    260 #define MCX_CMD_CREATE_FLOW_GROUP	0x933
    261 #define MCX_CMD_DESTROY_FLOW_GROUP	0x934
    262 #define MCX_CMD_QUERY_FLOW_GROUP	0x935
    263 #define MCX_CMD_SET_FLOW_TABLE_ENTRY	0x936
    264 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY	0x937
    265 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY	0x938
    266 #define MCX_CMD_ALLOC_FLOW_COUNTER	0x939
    267 #define MCX_CMD_QUERY_FLOW_COUNTER	0x93b
    268 
    269 #define MCX_QUEUE_STATE_RST		0
    270 #define MCX_QUEUE_STATE_RDY		1
    271 #define MCX_QUEUE_STATE_ERR		3
    272 
    273 #define MCX_FLOW_TABLE_TYPE_RX		0
    274 #define MCX_FLOW_TABLE_TYPE_TX		1
    275 
    276 #define MCX_CMDQ_INLINE_DATASIZE	16
    277 
    278 struct mcx_cmdq_entry {
    279 	uint8_t			cq_type;
    280 #define MCX_CMDQ_TYPE_PCIE		0x7
    281 	uint8_t			cq_reserved0[3];
    282 
    283 	uint32_t		cq_input_length;
    284 	uint64_t		cq_input_ptr;
    285 	uint8_t			cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
    286 
    287 	uint8_t			cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
    288 	uint64_t		cq_output_ptr;
    289 	uint32_t		cq_output_length;
    290 
    291 	uint8_t			cq_token;
    292 	uint8_t			cq_signature;
    293 	uint8_t			cq_reserved1[1];
    294 	uint8_t			cq_status;
    295 #define MCX_CQ_STATUS_SHIFT		1
    296 #define MCX_CQ_STATUS_MASK		(0x7f << MCX_CQ_STATUS_SHIFT)
    297 #define MCX_CQ_STATUS_OK		(0x00 << MCX_CQ_STATUS_SHIFT)
    298 #define MCX_CQ_STATUS_INT_ERR		(0x01 << MCX_CQ_STATUS_SHIFT)
    299 #define MCX_CQ_STATUS_BAD_OPCODE	(0x02 << MCX_CQ_STATUS_SHIFT)
    300 #define MCX_CQ_STATUS_BAD_PARAM		(0x03 << MCX_CQ_STATUS_SHIFT)
    301 #define MCX_CQ_STATUS_BAD_SYS_STATE	(0x04 << MCX_CQ_STATUS_SHIFT)
    302 #define MCX_CQ_STATUS_BAD_RESOURCE	(0x05 << MCX_CQ_STATUS_SHIFT)
    303 #define MCX_CQ_STATUS_RESOURCE_BUSY	(0x06 << MCX_CQ_STATUS_SHIFT)
    304 #define MCX_CQ_STATUS_EXCEED_LIM	(0x08 << MCX_CQ_STATUS_SHIFT)
    305 #define MCX_CQ_STATUS_BAD_RES_STATE	(0x09 << MCX_CQ_STATUS_SHIFT)
    306 #define MCX_CQ_STATUS_BAD_INDEX		(0x0a << MCX_CQ_STATUS_SHIFT)
    307 #define MCX_CQ_STATUS_NO_RESOURCES	(0x0f << MCX_CQ_STATUS_SHIFT)
    308 #define MCX_CQ_STATUS_BAD_INPUT_LEN	(0x50 << MCX_CQ_STATUS_SHIFT)
    309 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN	(0x51 << MCX_CQ_STATUS_SHIFT)
    310 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
    311 					(0x10 << MCX_CQ_STATUS_SHIFT)
    312 #define MCX_CQ_STATUS_BAD_SIZE		(0x40 << MCX_CQ_STATUS_SHIFT)
    313 #define MCX_CQ_STATUS_OWN_MASK		0x1
    314 #define MCX_CQ_STATUS_OWN_SW		0x0
    315 #define MCX_CQ_STATUS_OWN_HW		0x1
    316 } __packed __aligned(8);
    317 
    318 #define MCX_CMDQ_MAILBOX_DATASIZE	512
    319 
    320 struct mcx_cmdq_mailbox {
    321 	uint8_t			mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
    322 	uint8_t			mb_reserved0[48];
    323 	uint64_t		mb_next_ptr;
    324 	uint32_t		mb_block_number;
    325 	uint8_t			mb_reserved1[1];
    326 	uint8_t			mb_token;
    327 	uint8_t			mb_ctrl_signature;
    328 	uint8_t			mb_signature;
    329 } __packed __aligned(8);
    330 
    331 #define MCX_CMDQ_MAILBOX_ALIGN	(1 << 10)
    332 #define MCX_CMDQ_MAILBOX_SIZE	roundup(sizeof(struct mcx_cmdq_mailbox), \
    333 				    MCX_CMDQ_MAILBOX_ALIGN)
    334 /*
    335  * command mailbox structures
    336  */
    337 
    338 struct mcx_cmd_enable_hca_in {
    339 	uint16_t		cmd_opcode;
    340 	uint8_t			cmd_reserved0[4];
    341 	uint16_t		cmd_op_mod;
    342 	uint8_t			cmd_reserved1[2];
    343 	uint16_t		cmd_function_id;
    344 	uint8_t			cmd_reserved2[4];
    345 } __packed __aligned(4);
    346 
    347 struct mcx_cmd_enable_hca_out {
    348 	uint8_t			cmd_status;
    349 	uint8_t			cmd_reserved0[3];
    350 	uint32_t		cmd_syndrome;
    351 	uint8_t			cmd_reserved1[4];
    352 } __packed __aligned(4);
    353 
    354 struct mcx_cmd_init_hca_in {
    355 	uint16_t		cmd_opcode;
    356 	uint8_t			cmd_reserved0[4];
    357 	uint16_t		cmd_op_mod;
    358 	uint8_t			cmd_reserved1[8];
    359 } __packed __aligned(4);
    360 
    361 struct mcx_cmd_init_hca_out {
    362 	uint8_t			cmd_status;
    363 	uint8_t			cmd_reserved0[3];
    364 	uint32_t		cmd_syndrome;
    365 	uint8_t			cmd_reserved1[8];
    366 } __packed __aligned(4);
    367 
    368 struct mcx_cmd_teardown_hca_in {
    369 	uint16_t		cmd_opcode;
    370 	uint8_t			cmd_reserved0[4];
    371 	uint16_t		cmd_op_mod;
    372 	uint8_t			cmd_reserved1[2];
    373 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL	0x0
    374 #define MCX_CMD_TEARDOWN_HCA_PANIC	0x1
    375 	uint16_t		cmd_profile;
    376 	uint8_t			cmd_reserved2[4];
    377 } __packed __aligned(4);
    378 
    379 struct mcx_cmd_teardown_hca_out {
    380 	uint8_t			cmd_status;
    381 	uint8_t			cmd_reserved0[3];
    382 	uint32_t		cmd_syndrome;
    383 	uint8_t			cmd_reserved1[8];
    384 } __packed __aligned(4);
    385 
    386 struct mcx_cmd_access_reg_in {
    387 	uint16_t		cmd_opcode;
    388 	uint8_t			cmd_reserved0[4];
    389 	uint16_t		cmd_op_mod;
    390 	uint8_t			cmd_reserved1[2];
    391 	uint16_t		cmd_register_id;
    392 	uint32_t		cmd_argument;
    393 } __packed __aligned(4);
    394 
    395 struct mcx_cmd_access_reg_out {
    396 	uint8_t			cmd_status;
    397 	uint8_t			cmd_reserved0[3];
    398 	uint32_t		cmd_syndrome;
    399 	uint8_t			cmd_reserved1[8];
    400 } __packed __aligned(4);
    401 
    402 struct mcx_reg_pmtu {
    403 	uint8_t			rp_reserved1;
    404 	uint8_t			rp_local_port;
    405 	uint8_t			rp_reserved2[2];
    406 	uint16_t		rp_max_mtu;
    407 	uint8_t			rp_reserved3[2];
    408 	uint16_t		rp_admin_mtu;
    409 	uint8_t			rp_reserved4[2];
    410 	uint16_t		rp_oper_mtu;
    411 	uint8_t			rp_reserved5[2];
    412 } __packed __aligned(4);
    413 
    414 struct mcx_reg_ptys {
    415 	uint8_t			rp_reserved1;
    416 	uint8_t			rp_local_port;
    417 	uint8_t			rp_reserved2;
    418 	uint8_t			rp_proto_mask;
    419 #define MCX_REG_PTYS_PROTO_MASK_ETH		(1 << 2)
    420 	uint8_t			rp_reserved3[8];
    421 	uint32_t		rp_eth_proto_cap;
    422 	uint8_t			rp_reserved4[8];
    423 	uint32_t		rp_eth_proto_admin;
    424 	uint8_t			rp_reserved5[8];
    425 	uint32_t		rp_eth_proto_oper;
    426 	uint8_t			rp_reserved6[24];
    427 } __packed __aligned(4);
    428 
    429 struct mcx_reg_paos {
    430 	uint8_t			rp_reserved1;
    431 	uint8_t			rp_local_port;
    432 	uint8_t			rp_admin_status;
    433 #define MCX_REG_PAOS_ADMIN_STATUS_UP		1
    434 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN		2
    435 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE	3
    436 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED	4
    437 	uint8_t			rp_oper_status;
    438 #define MCX_REG_PAOS_OPER_STATUS_UP		1
    439 #define MCX_REG_PAOS_OPER_STATUS_DOWN		2
    440 #define MCX_REG_PAOS_OPER_STATUS_FAILED		4
    441 	uint8_t			rp_admin_state_update;
    442 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN	(1 << 7)
    443 	uint8_t			rp_reserved2[11];
    444 } __packed __aligned(4);
    445 
    446 struct mcx_reg_pfcc {
    447 	uint8_t			rp_reserved1;
    448 	uint8_t			rp_local_port;
    449 	uint8_t			rp_reserved2[3];
    450 	uint8_t			rp_prio_mask_tx;
    451 	uint8_t			rp_reserved3;
    452 	uint8_t			rp_prio_mask_rx;
    453 	uint8_t			rp_pptx_aptx;
    454 	uint8_t			rp_pfctx;
    455 	uint8_t			rp_fctx_dis;
    456 	uint8_t			rp_reserved4;
    457 	uint8_t			rp_pprx_aprx;
    458 	uint8_t			rp_pfcrx;
    459 	uint8_t			rp_reserved5[2];
    460 	uint16_t		rp_dev_stall_min;
    461 	uint16_t		rp_dev_stall_crit;
    462 	uint8_t			rp_reserved6[12];
    463 } __packed __aligned(4);
    464 
    465 #define MCX_PMLP_MODULE_NUM_MASK	0xff
    466 struct mcx_reg_pmlp {
    467 	uint8_t			rp_rxtx;
    468 	uint8_t			rp_local_port;
    469 	uint8_t			rp_reserved0;
    470 	uint8_t			rp_width;
    471 	uint32_t		rp_lane0_mapping;
    472 	uint32_t		rp_lane1_mapping;
    473 	uint32_t		rp_lane2_mapping;
    474 	uint32_t		rp_lane3_mapping;
    475 	uint8_t			rp_reserved1[44];
    476 } __packed __aligned(4);
    477 
    478 struct mcx_reg_ppcnt {
    479 	uint8_t			ppcnt_swid;
    480 	uint8_t			ppcnt_local_port;
    481 	uint8_t			ppcnt_pnat;
    482 	uint8_t			ppcnt_grp;
    483 #define MCX_REG_PPCNT_GRP_IEEE8023		0x00
    484 #define MCX_REG_PPCNT_GRP_RFC2863		0x01
    485 #define MCX_REG_PPCNT_GRP_RFC2819		0x02
    486 #define MCX_REG_PPCNT_GRP_RFC3635		0x03
    487 #define MCX_REG_PPCNT_GRP_PER_PRIO		0x10
    488 #define MCX_REG_PPCNT_GRP_PER_TC		0x11
    489 #define MCX_REG_PPCNT_GRP_PER_RX_BUFFER		0x11
    490 
    491 	uint8_t			ppcnt_clr;
    492 	uint8_t			ppcnt_reserved1[2];
    493 	uint8_t			ppcnt_prio_tc;
    494 #define MCX_REG_PPCNT_CLR			(1 << 7)
    495 
    496 	uint8_t			ppcnt_counter_set[248];
    497 } __packed __aligned(8);
    498 CTASSERT(sizeof(struct mcx_reg_ppcnt) == 256);
    499 CTASSERT((offsetof(struct mcx_reg_ppcnt, ppcnt_counter_set) %
    500     sizeof(uint64_t)) == 0);
    501 
    502 enum mcx_ppcnt_ieee8023 {
    503 	frames_transmitted_ok,
    504 	frames_received_ok,
    505 	frame_check_sequence_errors,
    506 	alignment_errors,
    507 	octets_transmitted_ok,
    508 	octets_received_ok,
    509 	multicast_frames_xmitted_ok,
    510 	broadcast_frames_xmitted_ok,
    511 	multicast_frames_received_ok,
    512 	broadcast_frames_received_ok,
    513 	in_range_length_errors,
    514 	out_of_range_length_field,
    515 	frame_too_long_errors,
    516 	symbol_error_during_carrier,
    517 	mac_control_frames_transmitted,
    518 	mac_control_frames_received,
    519 	unsupported_opcodes_received,
    520 	pause_mac_ctrl_frames_received,
    521 	pause_mac_ctrl_frames_transmitted,
    522 
    523 	mcx_ppcnt_ieee8023_count
    524 };
    525 CTASSERT(mcx_ppcnt_ieee8023_count * sizeof(uint64_t) == 0x98);
    526 
    527 enum mcx_ppcnt_rfc2863 {
    528 	in_octets,
    529 	in_ucast_pkts,
    530 	in_discards,
    531 	in_errors,
    532 	in_unknown_protos,
    533 	out_octets,
    534 	out_ucast_pkts,
    535 	out_discards,
    536 	out_errors,
    537 	in_multicast_pkts,
    538 	in_broadcast_pkts,
    539 	out_multicast_pkts,
    540 	out_broadcast_pkts,
    541 
    542 	mcx_ppcnt_rfc2863_count
    543 };
    544 CTASSERT(mcx_ppcnt_rfc2863_count * sizeof(uint64_t) == 0x68);
    545 
    546 enum mcx_ppcnt_rfc2819 {
    547 	drop_events,
    548 	octets,
    549 	pkts,
    550 	broadcast_pkts,
    551 	multicast_pkts,
    552 	crc_align_errors,
    553 	undersize_pkts,
    554 	oversize_pkts,
    555 	fragments,
    556 	jabbers,
    557 	collisions,
    558 	pkts64octets,
    559 	pkts65to127octets,
    560 	pkts128to255octets,
    561 	pkts256to511octets,
    562 	pkts512to1023octets,
    563 	pkts1024to1518octets,
    564 	pkts1519to2047octets,
    565 	pkts2048to4095octets,
    566 	pkts4096to8191octets,
    567 	pkts8192to10239octets,
    568 
    569 	mcx_ppcnt_rfc2819_count
    570 };
    571 CTASSERT((mcx_ppcnt_rfc2819_count * sizeof(uint64_t)) == 0xa8);
    572 
    573 enum mcx_ppcnt_rfc3635 {
    574 	dot3stats_alignment_errors,
    575 	dot3stats_fcs_errors,
    576 	dot3stats_single_collision_frames,
    577 	dot3stats_multiple_collision_frames,
    578 	dot3stats_sqe_test_errors,
    579 	dot3stats_deferred_transmissions,
    580 	dot3stats_late_collisions,
    581 	dot3stats_excessive_collisions,
    582 	dot3stats_internal_mac_transmit_errors,
    583 	dot3stats_carrier_sense_errors,
    584 	dot3stats_frame_too_longs,
    585 	dot3stats_internal_mac_receive_errors,
    586 	dot3stats_symbol_errors,
    587 	dot3control_in_unknown_opcodes,
    588 	dot3in_pause_frames,
    589 	dot3out_pause_frames,
    590 
    591 	mcx_ppcnt_rfc3635_count
    592 };
    593 CTASSERT((mcx_ppcnt_rfc3635_count * sizeof(uint64_t)) == 0x80);
    594 
    595 struct mcx_reg_mcam {
    596 	uint8_t			_reserved1[1];
    597 	uint8_t			mcam_feature_group;
    598 	uint8_t			_reserved2[1];
    599 	uint8_t			mcam_access_reg_group;
    600 	uint8_t			_reserved3[4];
    601 	uint8_t			mcam_access_reg_cap_mask[16];
    602 	uint8_t			_reserved4[16];
    603 	uint8_t			mcam_feature_cap_mask[16];
    604 	uint8_t			_reserved5[16];
    605 } __packed __aligned(4);
    606 
    607 #define MCX_BITFIELD_BIT(bf, b)	(bf[(sizeof bf - 1) - (b / 8)] & (b % 8))
    608 
    609 #define MCX_MCAM_FEATURE_CAP_SENSOR_MAP	6
    610 
    611 struct mcx_reg_mtcap {
    612 	uint8_t			_reserved1[3];
    613 	uint8_t			mtcap_sensor_count;
    614 	uint8_t			_reserved2[4];
    615 
    616 	uint64_t		mtcap_sensor_map;
    617 };
    618 
    619 struct mcx_reg_mtmp {
    620 	uint8_t			_reserved1[2];
    621 	uint16_t		mtmp_sensor_index;
    622 
    623 	uint8_t			_reserved2[2];
    624 	uint16_t		mtmp_temperature;
    625 
    626 	uint16_t		mtmp_mte_mtr;
    627 #define MCX_REG_MTMP_MTE		(1 << 15)
    628 #define MCX_REG_MTMP_MTR		(1 << 14)
    629 	uint16_t		mtmp_max_temperature;
    630 
    631 	uint16_t		mtmp_tee;
    632 #define MCX_REG_MTMP_TEE_NOPE		(0 << 14)
    633 #define MCX_REG_MTMP_TEE_GENERATE	(1 << 14)
    634 #define MCX_REG_MTMP_TEE_GENERATE_ONE	(2 << 14)
    635 	uint16_t		mtmp_temperature_threshold_hi;
    636 
    637 	uint8_t			_reserved3[2];
    638 	uint16_t		mtmp_temperature_threshold_lo;
    639 
    640 	uint8_t			_reserved4[4];
    641 
    642 	uint8_t			mtmp_sensor_name[8];
    643 };
    644 CTASSERT(sizeof(struct mcx_reg_mtmp) == 0x20);
    645 CTASSERT(offsetof(struct mcx_reg_mtmp, mtmp_sensor_name) == 0x18);
    646 
    647 #define MCX_MCIA_EEPROM_BYTES	32
    648 struct mcx_reg_mcia {
    649 	uint8_t			rm_l;
    650 	uint8_t			rm_module;
    651 	uint8_t			rm_reserved0;
    652 	uint8_t			rm_status;
    653 	uint8_t			rm_i2c_addr;
    654 	uint8_t			rm_page_num;
    655 	uint16_t		rm_dev_addr;
    656 	uint16_t		rm_reserved1;
    657 	uint16_t		rm_size;
    658 	uint32_t		rm_reserved2;
    659 	uint8_t			rm_data[48];
    660 } __packed __aligned(4);
    661 
    662 struct mcx_cmd_query_issi_in {
    663 	uint16_t		cmd_opcode;
    664 	uint8_t			cmd_reserved0[4];
    665 	uint16_t		cmd_op_mod;
    666 	uint8_t			cmd_reserved1[8];
    667 } __packed __aligned(4);
    668 
    669 struct mcx_cmd_query_issi_il_out {
    670 	uint8_t			cmd_status;
    671 	uint8_t			cmd_reserved0[3];
    672 	uint32_t		cmd_syndrome;
    673 	uint8_t			cmd_reserved1[2];
    674 	uint16_t		cmd_current_issi;
    675 	uint8_t			cmd_reserved2[4];
    676 } __packed __aligned(4);
    677 
    678 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
    679 
    680 struct mcx_cmd_query_issi_mb_out {
    681 	uint8_t			cmd_reserved2[16];
    682 	uint8_t			cmd_supported_issi[80]; /* very big endian */
    683 } __packed __aligned(4);
    684 
    685 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
    686 
    687 struct mcx_cmd_set_issi_in {
    688 	uint16_t		cmd_opcode;
    689 	uint8_t			cmd_reserved0[4];
    690 	uint16_t		cmd_op_mod;
    691 	uint8_t			cmd_reserved1[2];
    692 	uint16_t		cmd_current_issi;
    693 	uint8_t			cmd_reserved2[4];
    694 } __packed __aligned(4);
    695 
    696 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
    697 
    698 struct mcx_cmd_set_issi_out {
    699 	uint8_t			cmd_status;
    700 	uint8_t			cmd_reserved0[3];
    701 	uint32_t		cmd_syndrome;
    702 	uint8_t			cmd_reserved1[8];
    703 } __packed __aligned(4);
    704 
    705 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
    706 
    707 struct mcx_cmd_query_pages_in {
    708 	uint16_t		cmd_opcode;
    709 	uint8_t			cmd_reserved0[4];
    710 	uint16_t		cmd_op_mod;
    711 #define MCX_CMD_QUERY_PAGES_BOOT	0x01
    712 #define MCX_CMD_QUERY_PAGES_INIT	0x02
    713 #define MCX_CMD_QUERY_PAGES_REGULAR	0x03
    714 	uint8_t			cmd_reserved1[8];
    715 } __packed __aligned(4);
    716 
    717 struct mcx_cmd_query_pages_out {
    718 	uint8_t			cmd_status;
    719 	uint8_t			cmd_reserved0[3];
    720 	uint32_t		cmd_syndrome;
    721 	uint8_t			cmd_reserved1[2];
    722 	uint16_t		cmd_func_id;
    723 	int32_t			cmd_num_pages;
    724 } __packed __aligned(4);
    725 
    726 struct mcx_cmd_manage_pages_in {
    727 	uint16_t		cmd_opcode;
    728 	uint8_t			cmd_reserved0[4];
    729 	uint16_t		cmd_op_mod;
    730 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
    731 					0x00
    732 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
    733 					0x01
    734 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
    735 					0x02
    736 	uint8_t			cmd_reserved1[2];
    737 	uint16_t		cmd_func_id;
    738 	uint32_t		cmd_input_num_entries;
    739 } __packed __aligned(4);
    740 
    741 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
    742 
    743 struct mcx_cmd_manage_pages_out {
    744 	uint8_t			cmd_status;
    745 	uint8_t			cmd_reserved0[3];
    746 	uint32_t		cmd_syndrome;
    747 	uint32_t		cmd_output_num_entries;
    748 	uint8_t			cmd_reserved1[4];
    749 } __packed __aligned(4);
    750 
    751 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
    752 
    753 struct mcx_cmd_query_hca_cap_in {
    754 	uint16_t		cmd_opcode;
    755 	uint8_t			cmd_reserved0[4];
    756 	uint16_t		cmd_op_mod;
    757 #define MCX_CMD_QUERY_HCA_CAP_MAX	(0x0 << 0)
    758 #define MCX_CMD_QUERY_HCA_CAP_CURRENT	(0x1 << 0)
    759 #define MCX_CMD_QUERY_HCA_CAP_DEVICE	(0x0 << 1)
    760 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD	(0x1 << 1)
    761 #define MCX_CMD_QUERY_HCA_CAP_FLOW	(0x7 << 1)
    762 	uint8_t			cmd_reserved1[8];
    763 } __packed __aligned(4);
    764 
    765 struct mcx_cmd_query_hca_cap_out {
    766 	uint8_t			cmd_status;
    767 	uint8_t			cmd_reserved0[3];
    768 	uint32_t		cmd_syndrome;
    769 	uint8_t			cmd_reserved1[8];
    770 } __packed __aligned(4);
    771 
    772 #define MCX_HCA_CAP_LEN			0x1000
    773 #define MCX_HCA_CAP_NMAILBOXES		\
    774 	(MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
    775 
    776 #if __GNUC_PREREQ__(4, 3)
    777 #define __counter__		__COUNTER__
    778 #else
    779 #define __counter__		__LINE__
    780 #endif
    781 
    782 #define __token(_tok, _num)	_tok##_num
    783 #define _token(_tok, _num)	__token(_tok, _num)
    784 #define __reserved__		_token(__reserved, __counter__)
    785 
    786 struct mcx_cap_device {
    787 	uint8_t			reserved0[16];
    788 
    789 	uint8_t			log_max_srq_sz;
    790 	uint8_t			log_max_qp_sz;
    791 	uint8_t			__reserved__[1];
    792 	uint8_t			log_max_qp; /* 5 bits */
    793 #define MCX_CAP_DEVICE_LOG_MAX_QP	0x1f
    794 
    795 	uint8_t			__reserved__[1];
    796 	uint8_t			log_max_srq; /* 5 bits */
    797 #define MCX_CAP_DEVICE_LOG_MAX_SRQ	0x1f
    798 	uint8_t			__reserved__[2];
    799 
    800 	uint8_t			__reserved__[1];
    801 	uint8_t			log_max_cq_sz;
    802 	uint8_t			__reserved__[1];
    803 	uint8_t			log_max_cq; /* 5 bits */
    804 #define MCX_CAP_DEVICE_LOG_MAX_CQ	0x1f
    805 
    806 	uint8_t			log_max_eq_sz;
    807 	uint8_t			log_max_mkey; /* 6 bits */
    808 #define MCX_CAP_DEVICE_LOG_MAX_MKEY	0x3f
    809 	uint8_t			__reserved__[1];
    810 	uint8_t			log_max_eq; /* 4 bits */
    811 #define MCX_CAP_DEVICE_LOG_MAX_EQ	0x0f
    812 
    813 	uint8_t			max_indirection;
    814 	uint8_t			log_max_mrw_sz; /* 7 bits */
    815 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ	0x7f
    816 	uint8_t			teardown_log_max_msf_list_size;
    817 #define MCX_CAP_DEVICE_FORCE_TEARDOWN	0x80
    818 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
    819 					0x3f
    820 	uint8_t			log_max_klm_list_size; /* 6 bits */
    821 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
    822 					0x3f
    823 
    824 	uint8_t			__reserved__[1];
    825 	uint8_t			log_max_ra_req_dc; /* 6 bits */
    826 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC	0x3f
    827 	uint8_t			__reserved__[1];
    828 	uint8_t			log_max_ra_res_dc; /* 6 bits */
    829 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
    830 					0x3f
    831 
    832 	uint8_t			__reserved__[1];
    833 	uint8_t			log_max_ra_req_qp; /* 6 bits */
    834 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
    835 					0x3f
    836 	uint8_t			__reserved__[1];
    837 	uint8_t			log_max_ra_res_qp; /* 6 bits */
    838 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
    839 					0x3f
    840 
    841 	uint8_t			flags1;
    842 #define MCX_CAP_DEVICE_END_PAD		0x80
    843 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED	0x40
    844 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
    845 					0x20
    846 #define MCX_CAP_DEVICE_START_PAD	0x10
    847 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
    848 					0x08
    849 	uint8_t			__reserved__[1];
    850 	uint16_t		gid_table_size;
    851 
    852 	uint16_t		flags2;
    853 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT	0x8000
    854 #define MCX_CAP_DEVICE_VPORT_COUNTERS	0x4000
    855 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
    856 					0x2000
    857 #define MCX_CAP_DEVICE_DEBUG		0x1000
    858 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
    859 					0x8000
    860 #define MCX_CAP_DEVICE_RQ_DELAY_DROP	0x4000
    861 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK	0x03ff
    862 	uint16_t		pkey_table_size;
    863 
    864 	uint8_t			flags3;
    865 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
    866 					0x80
    867 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
    868 					0x40
    869 #define MCX_CAP_DEVICE_IB_VIRTUAL	0x20
    870 #define MCX_CAP_DEVICE_ETH_VIRTUAL	0x10
    871 #define MCX_CAP_DEVICE_ETS		0x04
    872 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE	0x02
    873 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
    874 					0x01
    875 	uint8_t			local_ca_ack_delay; /* 5 bits */
    876 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
    877 					0x1f
    878 #define MCX_CAP_DEVICE_MCAM_REG		0x40
    879 	uint8_t			port_type;
    880 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
    881 					0x80
    882 #define MCX_CAP_DEVICE_PORT_TYPE	0x03
    883 #define MCX_CAP_DEVICE_PORT_TYPE_ETH	0x01
    884 	uint8_t			num_ports;
    885 
    886 	uint8_t			snapshot_log_max_msg;
    887 #define MCX_CAP_DEVICE_SNAPSHOT		0x80
    888 #define MCX_CAP_DEVICE_LOG_MAX_MSG	0x1f
    889 	uint8_t			max_tc; /* 4 bits */
    890 #define MCX_CAP_DEVICE_MAX_TC		0x0f
    891 	uint8_t			flags4;
    892 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT	0x80
    893 #define MCX_CAP_DEVICE_DCBX		0x40
    894 #define MCX_CAP_DEVICE_ROL_S		0x02
    895 #define MCX_CAP_DEVICE_ROL_G		0x01
    896 	uint8_t			wol;
    897 #define MCX_CAP_DEVICE_WOL_S		0x40
    898 #define MCX_CAP_DEVICE_WOL_G		0x20
    899 #define MCX_CAP_DEVICE_WOL_A		0x10
    900 #define MCX_CAP_DEVICE_WOL_B		0x08
    901 #define MCX_CAP_DEVICE_WOL_M		0x04
    902 #define MCX_CAP_DEVICE_WOL_U		0x02
    903 #define MCX_CAP_DEVICE_WOL_P		0x01
    904 
    905 	uint16_t		stat_rate_support;
    906 	uint8_t			__reserved__[1];
    907 	uint8_t			cqe_version; /* 4 bits */
    908 #define MCX_CAP_DEVICE_CQE_VERSION	0x0f
    909 
    910 	uint32_t		flags5;
    911 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
    912 					0x80000000
    913 #define MCX_CAP_DEVICE_STRIDING_RQ	0x40000000
    914 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
    915 					0x10000000
    916 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
    917 					0x08000000
    918 #define MCX_CAP_DEVICE_DC_CONNECT_CP	0x00040000
    919 #define MCX_CAP_DEVICE_DC_CNAK_DRACE	0x00020000
    920 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
    921 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
    922 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM	0x0000c000
    923 #define MCX_CAP_DEVICE_SIGERR_QCE	0x00002000
    924 #define MCX_CAP_DEVICE_WQ_SIGNATURE	0x00000800
    925 #define MCX_CAP_DEVICE_SCTR_DATA_CQE	0x00000400
    926 #define MCX_CAP_DEVICE_SHO		0x00000100
    927 #define MCX_CAP_DEVICE_TPH		0x00000080
    928 #define MCX_CAP_DEVICE_RF		0x00000040
    929 #define MCX_CAP_DEVICE_DCT		0x00000020
    930 #define MCX_CAP_DEVICE_QOS		0x00000010
    931 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS	0x00000008
    932 #define MCX_CAP_DEVICE_ROCE		0x00000004
    933 #define MCX_CAP_DEVICE_ATOMIC		0x00000002
    934 
    935 	uint32_t		flags6;
    936 #define MCX_CAP_DEVICE_CQ_OI		0x80000000
    937 #define MCX_CAP_DEVICE_CQ_RESIZE	0x40000000
    938 #define MCX_CAP_DEVICE_CQ_MODERATION	0x20000000
    939 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
    940 					0x10000000
    941 #define MCX_CAP_DEVICE_CQ_INVALIDATE	0x08000000
    942 #define MCX_CAP_DEVICE_RESERVED_AT_255	0x04000000
    943 #define MCX_CAP_DEVICE_CQ_EQ_REMAP	0x02000000
    944 #define MCX_CAP_DEVICE_PG		0x01000000
    945 #define MCX_CAP_DEVICE_BLOCK_LB_MC	0x00800000
    946 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
    947 					0x00400000
    948 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
    949 					0x00200000
    950 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
    951 					0x00100000
    952 #define MCX_CAP_DEVICE_CD		0x00080000
    953 #define MCX_CAP_DEVICE_ATM		0x00040000
    954 #define MCX_CAP_DEVICE_APM		0x00020000
    955 #define MCX_CAP_DEVICE_IMAICL		0x00010000
    956 #define MCX_CAP_DEVICE_QKV		0x00000200
    957 #define MCX_CAP_DEVICE_PKV		0x00000100
    958 #define MCX_CAP_DEVICE_SET_DETH_SQPN	0x00000080
    959 #define MCX_CAP_DEVICE_XRC		0x00000008
    960 #define MCX_CAP_DEVICE_UD		0x00000004
    961 #define MCX_CAP_DEVICE_UC		0x00000002
    962 #define MCX_CAP_DEVICE_RC		0x00000001
    963 
    964 	uint8_t			uar_flags;
    965 #define MCX_CAP_DEVICE_UAR_4K		0x80
    966 	uint8_t			uar_sz;	/* 6 bits */
    967 #define MCX_CAP_DEVICE_UAR_SZ		0x3f
    968 	uint8_t			__reserved__[1];
    969 	uint8_t			log_pg_sz;
    970 
    971 	uint8_t			flags7;
    972 #define MCX_CAP_DEVICE_BF		0x80
    973 #define MCX_CAP_DEVICE_DRIVER_VERSION	0x40
    974 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
    975 					0x20
    976 	uint8_t			log_bf_reg_size; /* 5 bits */
    977 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE	0x1f
    978 	uint8_t			__reserved__[2];
    979 
    980 	uint16_t		num_of_diagnostic_counters;
    981 	uint16_t		max_wqe_sz_sq;
    982 
    983 	uint8_t			__reserved__[2];
    984 	uint16_t		max_wqe_sz_rq;
    985 
    986 	uint8_t			__reserved__[2];
    987 	uint16_t		max_wqe_sz_sq_dc;
    988 
    989 	uint32_t		max_qp_mcg; /* 25 bits */
    990 #define MCX_CAP_DEVICE_MAX_QP_MCG	0x1ffffff
    991 
    992 	uint8_t			__reserved__[3];
    993 	uint8_t			log_max_mcq;
    994 
    995 	uint8_t			log_max_transport_domain; /* 5 bits */
    996 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
    997 					0x1f
    998 	uint8_t			log_max_pd; /* 5 bits */
    999 #define MCX_CAP_DEVICE_LOG_MAX_PD	0x1f
   1000 	uint8_t			__reserved__[1];
   1001 	uint8_t			log_max_xrcd; /* 5 bits */
   1002 #define MCX_CAP_DEVICE_LOG_MAX_XRCD	0x1f
   1003 
   1004 	uint8_t			__reserved__[2];
   1005 	uint16_t		max_flow_counter;
   1006 
   1007 	uint8_t			log_max_rq; /* 5 bits */
   1008 #define MCX_CAP_DEVICE_LOG_MAX_RQ	0x1f
   1009 	uint8_t			log_max_sq; /* 5 bits */
   1010 #define MCX_CAP_DEVICE_LOG_MAX_SQ	0x1f
   1011 	uint8_t			log_max_tir; /* 5 bits */
   1012 #define MCX_CAP_DEVICE_LOG_MAX_TIR	0x1f
   1013 	uint8_t			log_max_tis; /* 5 bits */
   1014 #define MCX_CAP_DEVICE_LOG_MAX_TIS	0x1f
   1015 
   1016 	uint8_t 		flags8;
   1017 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
   1018 					0x80
   1019 #define MCX_CAP_DEVICE_LOG_MAX_RMP	0x1f
   1020 	uint8_t			log_max_rqt; /* 5 bits */
   1021 #define MCX_CAP_DEVICE_LOG_MAX_RQT	0x1f
   1022 	uint8_t			log_max_rqt_size; /* 5 bits */
   1023 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE	0x1f
   1024 	uint8_t			log_max_tis_per_sq; /* 5 bits */
   1025 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
   1026 					0x1f
   1027 
   1028 	uint8_t			flags9;
   1029 #define MXC_CAP_DEVICE_EXT_STRIDE_NUM_RANGES \
   1030 					0x80
   1031 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_RQ \
   1032 					0x1f
   1033 	uint8_t			log_min_stride_sz_rq; /* 5 bits */
   1034 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_RQ \
   1035 					0x1f
   1036 	uint8_t			log_max_stride_sz_sq; /* 5 bits */
   1037 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_SQ \
   1038 					0x1f
   1039 	uint8_t			log_min_stride_sz_sq; /* 5 bits */
   1040 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_SQ \
   1041 					0x1f
   1042 
   1043 	uint8_t			log_max_hairpin_queues;
   1044 #define MXC_CAP_DEVICE_HAIRPIN		0x80
   1045 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_QUEUES \
   1046 					0x1f
   1047 	uint8_t			log_min_hairpin_queues;
   1048 #define MXC_CAP_DEVICE_LOG_MIN_HAIRPIN_QUEUES \
   1049 					0x1f
   1050 	uint8_t			log_max_hairpin_num_packets;
   1051 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_NUM_PACKETS \
   1052 					0x1f
   1053 	uint8_t			log_max_mq_sz;
   1054 #define MXC_CAP_DEVICE_LOG_MAX_WQ_SZ \
   1055 					0x1f
   1056 
   1057 	uint8_t			log_min_hairpin_wq_data_sz;
   1058 #define MXC_CAP_DEVICE_NIC_VPORT_CHANGE_EVENT \
   1059 					0x80
   1060 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_UC \
   1061 					0x40
   1062 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_MC \
   1063 					0x20
   1064 #define MCX_CAP_DEVICE_LOG_MIN_HAIRPIN_WQ_DATA_SZ \
   1065 					0x1f
   1066 	uint8_t			log_max_vlan_list;
   1067 #define MXC_CAP_DEVICE_SYSTEM_IMAGE_GUID_MODIFIABLE \
   1068 					0x80
   1069 #define MXC_CAP_DEVICE_LOG_MAX_VLAN_LIST \
   1070 					0x1f
   1071 	uint8_t			log_max_current_mc_list;
   1072 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_MC_LIST \
   1073 					0x1f
   1074 	uint8_t			log_max_current_uc_list;
   1075 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_UC_LIST \
   1076 					0x1f
   1077 
   1078 	uint8_t			__reserved__[4];
   1079 
   1080 	uint32_t		create_qp_start_hint; /* 24 bits */
   1081 
   1082 	uint8_t			log_max_uctx; /* 5 bits */
   1083 #define MXC_CAP_DEVICE_LOG_MAX_UCTX	0x1f
   1084 	uint8_t			log_max_umem; /* 5 bits */
   1085 #define MXC_CAP_DEVICE_LOG_MAX_UMEM	0x1f
   1086 	uint16_t		max_num_eqs;
   1087 
   1088 	uint8_t			log_max_l2_table; /* 5 bits */
   1089 #define MXC_CAP_DEVICE_LOG_MAX_L2_TABLE	0x1f
   1090 	uint8_t			__reserved__[1];
   1091 	uint16_t		log_uar_page_sz;
   1092 
   1093 	uint8_t			__reserved__[8];
   1094 
   1095 	uint32_t		device_frequency_mhz;
   1096 	uint32_t		device_frequency_khz;
   1097 } __packed __aligned(8);
   1098 
   1099 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
   1100 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
   1101 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
   1102 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
   1103 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
   1104 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
   1105 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_mhz) == 0x98);
   1106 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_khz) == 0x9c);
   1107 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
   1108 
   1109 struct mcx_cmd_set_driver_version_in {
   1110 	uint16_t		cmd_opcode;
   1111 	uint8_t			cmd_reserved0[4];
   1112 	uint16_t		cmd_op_mod;
   1113 	uint8_t			cmd_reserved1[8];
   1114 } __packed __aligned(4);
   1115 
   1116 struct mcx_cmd_set_driver_version_out {
   1117 	uint8_t			cmd_status;
   1118 	uint8_t			cmd_reserved0[3];
   1119 	uint32_t		cmd_syndrome;
   1120 	uint8_t			cmd_reserved1[8];
   1121 } __packed __aligned(4);
   1122 
   1123 struct mcx_cmd_set_driver_version {
   1124 	uint8_t			cmd_driver_version[64];
   1125 } __packed __aligned(8);
   1126 
   1127 struct mcx_cmd_modify_nic_vport_context_in {
   1128 	uint16_t		cmd_opcode;
   1129 	uint8_t			cmd_reserved0[4];
   1130 	uint16_t		cmd_op_mod;
   1131 	uint8_t			cmd_reserved1[4];
   1132 	uint32_t		cmd_field_select;
   1133 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR	0x04
   1134 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC	0x10
   1135 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU	0x40
   1136 } __packed __aligned(4);
   1137 
   1138 struct mcx_cmd_modify_nic_vport_context_out {
   1139 	uint8_t			cmd_status;
   1140 	uint8_t			cmd_reserved0[3];
   1141 	uint32_t		cmd_syndrome;
   1142 	uint8_t			cmd_reserved1[8];
   1143 } __packed __aligned(4);
   1144 
   1145 struct mcx_cmd_query_nic_vport_context_in {
   1146 	uint16_t		cmd_opcode;
   1147 	uint8_t			cmd_reserved0[4];
   1148 	uint16_t		cmd_op_mod;
   1149 	uint8_t			cmd_reserved1[4];
   1150 	uint8_t			cmd_allowed_list_type;
   1151 	uint8_t			cmd_reserved2[3];
   1152 } __packed __aligned(4);
   1153 
   1154 struct mcx_cmd_query_nic_vport_context_out {
   1155 	uint8_t			cmd_status;
   1156 	uint8_t			cmd_reserved0[3];
   1157 	uint32_t		cmd_syndrome;
   1158 	uint8_t			cmd_reserved1[8];
   1159 } __packed __aligned(4);
   1160 
   1161 struct mcx_nic_vport_ctx {
   1162 	uint32_t		vp_min_wqe_inline_mode;
   1163 	uint8_t			vp_reserved0[32];
   1164 	uint32_t		vp_mtu;
   1165 	uint8_t			vp_reserved1[200];
   1166 	uint16_t		vp_flags;
   1167 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC			(0)
   1168 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC			(1 << 24)
   1169 #define MCX_NIC_VPORT_CTX_LIST_VLAN			(2 << 24)
   1170 #define MCX_NIC_VPORT_CTX_PROMISC_ALL			(1 << 13)
   1171 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST			(1 << 14)
   1172 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST			(1 << 15)
   1173 	uint16_t		vp_allowed_list_size;
   1174 	uint64_t		vp_perm_addr;
   1175 	uint8_t			vp_reserved2[4];
   1176 	/* allowed list follows */
   1177 } __packed __aligned(4);
   1178 
   1179 struct mcx_counter {
   1180 	uint64_t		packets;
   1181 	uint64_t		octets;
   1182 } __packed __aligned(4);
   1183 
   1184 struct mcx_nic_vport_counters {
   1185 	struct mcx_counter	rx_err;
   1186 	struct mcx_counter	tx_err;
   1187 	uint8_t			reserved0[64]; /* 0x30 */
   1188 	struct mcx_counter	rx_bcast;
   1189 	struct mcx_counter	tx_bcast;
   1190 	struct mcx_counter	rx_ucast;
   1191 	struct mcx_counter	tx_ucast;
   1192 	struct mcx_counter	rx_mcast;
   1193 	struct mcx_counter	tx_mcast;
   1194 	uint8_t			reserved1[0x210 - 0xd0];
   1195 } __packed __aligned(4);
   1196 
   1197 struct mcx_cmd_query_vport_counters_in {
   1198 	uint16_t		cmd_opcode;
   1199 	uint8_t			cmd_reserved0[4];
   1200 	uint16_t		cmd_op_mod;
   1201 	uint8_t			cmd_reserved1[8];
   1202 } __packed __aligned(4);
   1203 
   1204 struct mcx_cmd_query_vport_counters_mb_in {
   1205 	uint8_t			cmd_reserved0[8];
   1206 	uint8_t			cmd_clear;
   1207 	uint8_t			cmd_reserved1[7];
   1208 } __packed __aligned(4);
   1209 
   1210 struct mcx_cmd_query_vport_counters_out {
   1211 	uint8_t			cmd_status;
   1212 	uint8_t			cmd_reserved0[3];
   1213 	uint32_t		cmd_syndrome;
   1214 	uint8_t			cmd_reserved1[8];
   1215 } __packed __aligned(4);
   1216 
   1217 struct mcx_cmd_query_flow_counter_in {
   1218 	uint16_t		cmd_opcode;
   1219 	uint8_t			cmd_reserved0[4];
   1220 	uint16_t		cmd_op_mod;
   1221 	uint8_t			cmd_reserved1[8];
   1222 } __packed __aligned(4);
   1223 
   1224 struct mcx_cmd_query_flow_counter_mb_in {
   1225 	uint8_t			cmd_reserved0[8];
   1226 	uint8_t			cmd_clear;
   1227 	uint8_t			cmd_reserved1[5];
   1228 	uint16_t		cmd_flow_counter_id;
   1229 } __packed __aligned(4);
   1230 
   1231 struct mcx_cmd_query_flow_counter_out {
   1232 	uint8_t			cmd_status;
   1233 	uint8_t			cmd_reserved0[3];
   1234 	uint32_t		cmd_syndrome;
   1235 	uint8_t			cmd_reserved1[8];
   1236 } __packed __aligned(4);
   1237 
   1238 struct mcx_cmd_alloc_uar_in {
   1239 	uint16_t		cmd_opcode;
   1240 	uint8_t			cmd_reserved0[4];
   1241 	uint16_t		cmd_op_mod;
   1242 	uint8_t			cmd_reserved1[8];
   1243 } __packed __aligned(4);
   1244 
   1245 struct mcx_cmd_alloc_uar_out {
   1246 	uint8_t			cmd_status;
   1247 	uint8_t			cmd_reserved0[3];
   1248 	uint32_t		cmd_syndrome;
   1249 	uint32_t		cmd_uar;
   1250 	uint8_t			cmd_reserved1[4];
   1251 } __packed __aligned(4);
   1252 
   1253 struct mcx_cmd_query_special_ctx_in {
   1254 	uint16_t		cmd_opcode;
   1255 	uint8_t			cmd_reserved0[4];
   1256 	uint16_t		cmd_op_mod;
   1257 	uint8_t			cmd_reserved1[8];
   1258 } __packed __aligned(4);
   1259 
   1260 struct mcx_cmd_query_special_ctx_out {
   1261 	uint8_t			cmd_status;
   1262 	uint8_t			cmd_reserved0[3];
   1263 	uint32_t		cmd_syndrome;
   1264 	uint8_t			cmd_reserved1[4];
   1265 	uint32_t		cmd_resd_lkey;
   1266 } __packed __aligned(4);
   1267 
   1268 struct mcx_eq_ctx {
   1269 	uint32_t		eq_status;
   1270 #define MCX_EQ_CTX_STATE_SHIFT		8
   1271 #define MCX_EQ_CTX_STATE_MASK		(0xf << MCX_EQ_CTX_STATE_SHIFT)
   1272 #define MCX_EQ_CTX_STATE_ARMED		0x9
   1273 #define MCX_EQ_CTX_STATE_FIRED		0xa
   1274 #define MCX_EQ_CTX_OI_SHIFT		17
   1275 #define MCX_EQ_CTX_OI			(1 << MCX_EQ_CTX_OI_SHIFT)
   1276 #define MCX_EQ_CTX_EC_SHIFT		18
   1277 #define MCX_EQ_CTX_EC			(1 << MCX_EQ_CTX_EC_SHIFT)
   1278 #define MCX_EQ_CTX_STATUS_SHIFT		28
   1279 #define MCX_EQ_CTX_STATUS_MASK		(0xf << MCX_EQ_CTX_STATUS_SHIFT)
   1280 #define MCX_EQ_CTX_STATUS_OK		0x0
   1281 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE 0xa
   1282 	uint32_t		eq_reserved1;
   1283 	uint32_t		eq_page_offset;
   1284 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT	5
   1285 	uint32_t		eq_uar_size;
   1286 #define MCX_EQ_CTX_UAR_PAGE_MASK	0xffffff
   1287 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT	24
   1288 	uint32_t		eq_reserved2;
   1289 	uint8_t			eq_reserved3[3];
   1290 	uint8_t			eq_intr;
   1291 	uint32_t		eq_log_page_size;
   1292 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT	24
   1293 	uint32_t		eq_reserved4[3];
   1294 	uint32_t		eq_consumer_counter;
   1295 	uint32_t		eq_producer_counter;
   1296 #define MCX_EQ_CTX_COUNTER_MASK		0xffffff
   1297 	uint32_t		eq_reserved5[4];
   1298 } __packed __aligned(4);
   1299 
   1300 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
   1301 
   1302 struct mcx_cmd_create_eq_in {
   1303 	uint16_t		cmd_opcode;
   1304 	uint8_t			cmd_reserved0[4];
   1305 	uint16_t		cmd_op_mod;
   1306 	uint8_t			cmd_reserved1[8];
   1307 } __packed __aligned(4);
   1308 
   1309 struct mcx_cmd_create_eq_mb_in {
   1310 	struct mcx_eq_ctx	cmd_eq_ctx;
   1311 	uint8_t			cmd_reserved0[8];
   1312 	uint64_t		cmd_event_bitmask;
   1313 #define MCX_EVENT_TYPE_COMPLETION	0x00
   1314 #define MCX_EVENT_TYPE_CQ_ERROR		0x04
   1315 #define MCX_EVENT_TYPE_INTERNAL_ERROR	0x08
   1316 #define MCX_EVENT_TYPE_PORT_CHANGE	0x09
   1317 #define MCX_EVENT_TYPE_CMD_COMPLETION	0x0a
   1318 #define MCX_EVENT_TYPE_PAGE_REQUEST	0x0b
   1319 #define MCX_EVENT_TYPE_LAST_WQE		0x13
   1320 	uint8_t			cmd_reserved1[176];
   1321 } __packed __aligned(4);
   1322 
   1323 struct mcx_cmd_create_eq_out {
   1324 	uint8_t			cmd_status;
   1325 	uint8_t			cmd_reserved0[3];
   1326 	uint32_t		cmd_syndrome;
   1327 	uint32_t		cmd_eqn;
   1328 	uint8_t			cmd_reserved1[4];
   1329 } __packed __aligned(4);
   1330 
   1331 struct mcx_cmd_query_eq_in {
   1332 	uint16_t		cmd_opcode;
   1333 	uint8_t			cmd_reserved0[4];
   1334 	uint16_t		cmd_op_mod;
   1335 	uint32_t		cmd_eqn;
   1336 	uint8_t			cmd_reserved1[4];
   1337 } __packed __aligned(4);
   1338 
   1339 struct mcx_cmd_query_eq_out {
   1340 	uint8_t			cmd_status;
   1341 	uint8_t			cmd_reserved0[3];
   1342 	uint32_t		cmd_syndrome;
   1343 	uint8_t			cmd_reserved1[8];
   1344 } __packed __aligned(4);
   1345 
   1346 struct mcx_eq_entry {
   1347 	uint8_t			eq_reserved1;
   1348 	uint8_t			eq_event_type;
   1349 	uint8_t			eq_reserved2;
   1350 	uint8_t			eq_event_sub_type;
   1351 
   1352 	uint8_t			eq_reserved3[28];
   1353 	uint32_t		eq_event_data[7];
   1354 	uint8_t			eq_reserved4[2];
   1355 	uint8_t			eq_signature;
   1356 	uint8_t			eq_owner;
   1357 #define MCX_EQ_ENTRY_OWNER_INIT			1
   1358 } __packed __aligned(4);
   1359 
   1360 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
   1361 
   1362 struct mcx_cmd_alloc_pd_in {
   1363 	uint16_t		cmd_opcode;
   1364 	uint8_t			cmd_reserved0[4];
   1365 	uint16_t		cmd_op_mod;
   1366 	uint8_t			cmd_reserved1[8];
   1367 } __packed __aligned(4);
   1368 
   1369 struct mcx_cmd_alloc_pd_out {
   1370 	uint8_t			cmd_status;
   1371 	uint8_t			cmd_reserved0[3];
   1372 	uint32_t		cmd_syndrome;
   1373 	uint32_t		cmd_pd;
   1374 	uint8_t			cmd_reserved1[4];
   1375 } __packed __aligned(4);
   1376 
   1377 struct mcx_cmd_alloc_td_in {
   1378 	uint16_t		cmd_opcode;
   1379 	uint8_t			cmd_reserved0[4];
   1380 	uint16_t		cmd_op_mod;
   1381 	uint8_t			cmd_reserved1[8];
   1382 } __packed __aligned(4);
   1383 
   1384 struct mcx_cmd_alloc_td_out {
   1385 	uint8_t			cmd_status;
   1386 	uint8_t			cmd_reserved0[3];
   1387 	uint32_t		cmd_syndrome;
   1388 	uint32_t		cmd_tdomain;
   1389 	uint8_t			cmd_reserved1[4];
   1390 } __packed __aligned(4);
   1391 
   1392 struct mcx_cmd_create_tir_in {
   1393 	uint16_t		cmd_opcode;
   1394 	uint8_t			cmd_reserved0[4];
   1395 	uint16_t		cmd_op_mod;
   1396 	uint8_t			cmd_reserved1[8];
   1397 } __packed __aligned(4);
   1398 
   1399 struct mcx_cmd_create_tir_mb_in {
   1400 	uint8_t			cmd_reserved0[20];
   1401 	uint32_t		cmd_disp_type;
   1402 #define MCX_TIR_CTX_DISP_TYPE_DIRECT	0
   1403 #define MCX_TIR_CTX_DISP_TYPE_INDIRECT	1
   1404 #define MCX_TIR_CTX_DISP_TYPE_SHIFT	28
   1405 	uint8_t			cmd_reserved1[8];
   1406 	uint32_t		cmd_lro;
   1407 	uint8_t			cmd_reserved2[8];
   1408 	uint32_t		cmd_inline_rqn;
   1409 	uint32_t		cmd_indir_table;
   1410 	uint32_t		cmd_tdomain;
   1411 #define MCX_TIR_CTX_HASH_TOEPLITZ	2
   1412 #define MCX_TIR_CTX_HASH_SHIFT		28
   1413 	uint8_t			cmd_rx_hash_key[40];
   1414 	uint32_t		cmd_rx_hash_sel_outer;
   1415 #define MCX_TIR_CTX_HASH_SEL_SRC_IP	(1 << 0)
   1416 #define MCX_TIR_CTX_HASH_SEL_DST_IP	(1 << 1)
   1417 #define MCX_TIR_CTX_HASH_SEL_SPORT	(1 << 2)
   1418 #define MCX_TIR_CTX_HASH_SEL_DPORT	(1 << 3)
   1419 #define MCX_TIR_CTX_HASH_SEL_IPV4	(0 << 31)
   1420 #define MCX_TIR_CTX_HASH_SEL_IPV6	(1 << 31)
   1421 #define MCX_TIR_CTX_HASH_SEL_TCP	(0 << 30)
   1422 #define MCX_TIR_CTX_HASH_SEL_UDP	(1 << 30)
   1423 	uint32_t		cmd_rx_hash_sel_inner;
   1424 	uint8_t			cmd_reserved3[152];
   1425 } __packed __aligned(4);
   1426 
   1427 struct mcx_cmd_create_tir_out {
   1428 	uint8_t			cmd_status;
   1429 	uint8_t			cmd_reserved0[3];
   1430 	uint32_t		cmd_syndrome;
   1431 	uint32_t		cmd_tirn;
   1432 	uint8_t			cmd_reserved1[4];
   1433 } __packed __aligned(4);
   1434 
   1435 struct mcx_cmd_destroy_tir_in {
   1436 	uint16_t		cmd_opcode;
   1437 	uint8_t			cmd_reserved0[4];
   1438 	uint16_t		cmd_op_mod;
   1439 	uint32_t		cmd_tirn;
   1440 	uint8_t			cmd_reserved1[4];
   1441 } __packed __aligned(4);
   1442 
   1443 struct mcx_cmd_destroy_tir_out {
   1444 	uint8_t			cmd_status;
   1445 	uint8_t			cmd_reserved0[3];
   1446 	uint32_t		cmd_syndrome;
   1447 	uint8_t			cmd_reserved1[8];
   1448 } __packed __aligned(4);
   1449 
   1450 struct mcx_cmd_create_tis_in {
   1451 	uint16_t		cmd_opcode;
   1452 	uint8_t			cmd_reserved0[4];
   1453 	uint16_t		cmd_op_mod;
   1454 	uint8_t			cmd_reserved1[8];
   1455 } __packed __aligned(4);
   1456 
   1457 struct mcx_cmd_create_tis_mb_in {
   1458 	uint8_t			cmd_reserved[16];
   1459 	uint32_t		cmd_prio;
   1460 	uint8_t			cmd_reserved1[32];
   1461 	uint32_t		cmd_tdomain;
   1462 	uint8_t			cmd_reserved2[120];
   1463 } __packed __aligned(4);
   1464 
   1465 struct mcx_cmd_create_tis_out {
   1466 	uint8_t			cmd_status;
   1467 	uint8_t			cmd_reserved0[3];
   1468 	uint32_t		cmd_syndrome;
   1469 	uint32_t		cmd_tisn;
   1470 	uint8_t			cmd_reserved1[4];
   1471 } __packed __aligned(4);
   1472 
   1473 struct mcx_cmd_destroy_tis_in {
   1474 	uint16_t		cmd_opcode;
   1475 	uint8_t			cmd_reserved0[4];
   1476 	uint16_t		cmd_op_mod;
   1477 	uint32_t		cmd_tisn;
   1478 	uint8_t			cmd_reserved1[4];
   1479 } __packed __aligned(4);
   1480 
   1481 struct mcx_cmd_destroy_tis_out {
   1482 	uint8_t			cmd_status;
   1483 	uint8_t			cmd_reserved0[3];
   1484 	uint32_t		cmd_syndrome;
   1485 	uint8_t			cmd_reserved1[8];
   1486 } __packed __aligned(4);
   1487 
   1488 struct mcx_cmd_create_rqt_in {
   1489 	uint16_t		cmd_opcode;
   1490 	uint8_t			cmd_reserved0[4];
   1491 	uint16_t		cmd_op_mod;
   1492 	uint8_t			cmd_reserved1[8];
   1493 } __packed __aligned(4);
   1494 
   1495 struct mcx_rqt_ctx {
   1496 	uint8_t			cmd_reserved0[20];
   1497 	uint16_t		cmd_reserved1;
   1498 	uint16_t		cmd_rqt_max_size;
   1499 	uint16_t		cmd_reserved2;
   1500 	uint16_t		cmd_rqt_actual_size;
   1501 	uint8_t			cmd_reserved3[212];
   1502 } __packed __aligned(4);
   1503 
   1504 struct mcx_cmd_create_rqt_mb_in {
   1505 	uint8_t			cmd_reserved0[16];
   1506 	struct mcx_rqt_ctx	cmd_rqt;
   1507 } __packed __aligned(4);
   1508 
   1509 struct mcx_cmd_create_rqt_out {
   1510 	uint8_t			cmd_status;
   1511 	uint8_t			cmd_reserved0[3];
   1512 	uint32_t		cmd_syndrome;
   1513 	uint32_t		cmd_rqtn;
   1514 	uint8_t			cmd_reserved1[4];
   1515 } __packed __aligned(4);
   1516 
   1517 struct mcx_cmd_destroy_rqt_in {
   1518 	uint16_t		cmd_opcode;
   1519 	uint8_t			cmd_reserved0[4];
   1520 	uint16_t		cmd_op_mod;
   1521 	uint32_t		cmd_rqtn;
   1522 	uint8_t			cmd_reserved1[4];
   1523 } __packed __aligned(4);
   1524 
   1525 struct mcx_cmd_destroy_rqt_out {
   1526 	uint8_t			cmd_status;
   1527 	uint8_t			cmd_reserved0[3];
   1528 	uint32_t		cmd_syndrome;
   1529 	uint8_t			cmd_reserved1[8];
   1530 } __packed __aligned(4);
   1531 
   1532 struct mcx_cq_ctx {
   1533 	uint32_t		cq_status;
   1534 #define MCX_CQ_CTX_STATUS_SHIFT		28
   1535 #define MCX_CQ_CTX_STATUS_MASK		(0xf << MCX_CQ_CTX_STATUS_SHIFT)
   1536 #define MCX_CQ_CTX_STATUS_OK		0x0
   1537 #define MCX_CQ_CTX_STATUS_OVERFLOW	0x9
   1538 #define MCX_CQ_CTX_STATUS_WRITE_FAIL	0xa
   1539 #define MCX_CQ_CTX_STATE_SHIFT		8
   1540 #define MCX_CQ_CTX_STATE_MASK		(0xf << MCX_CQ_CTX_STATE_SHIFT)
   1541 #define MCX_CQ_CTX_STATE_SOLICITED	0x6
   1542 #define MCX_CQ_CTX_STATE_ARMED		0x9
   1543 #define MCX_CQ_CTX_STATE_FIRED		0xa
   1544 	uint32_t		cq_reserved1;
   1545 	uint32_t		cq_page_offset;
   1546 	uint32_t		cq_uar_size;
   1547 #define MCX_CQ_CTX_UAR_PAGE_MASK	0xffffff
   1548 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT	24
   1549 	uint32_t		cq_period_max_count;
   1550 #define MCX_CQ_CTX_PERIOD_SHIFT		16
   1551 	uint32_t		cq_eqn;
   1552 	uint32_t		cq_log_page_size;
   1553 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT	24
   1554 	uint32_t		cq_reserved2;
   1555 	uint32_t		cq_last_notified;
   1556 	uint32_t		cq_last_solicit;
   1557 	uint32_t		cq_consumer_counter;
   1558 	uint32_t		cq_producer_counter;
   1559 	uint8_t			cq_reserved3[8];
   1560 	uint64_t		cq_doorbell;
   1561 } __packed __aligned(4);
   1562 
   1563 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
   1564 
   1565 struct mcx_cmd_create_cq_in {
   1566 	uint16_t		cmd_opcode;
   1567 	uint8_t			cmd_reserved0[4];
   1568 	uint16_t		cmd_op_mod;
   1569 	uint8_t			cmd_reserved1[8];
   1570 } __packed __aligned(4);
   1571 
   1572 struct mcx_cmd_create_cq_mb_in {
   1573 	struct mcx_cq_ctx	cmd_cq_ctx;
   1574 	uint8_t			cmd_reserved1[192];
   1575 } __packed __aligned(4);
   1576 
   1577 struct mcx_cmd_create_cq_out {
   1578 	uint8_t			cmd_status;
   1579 	uint8_t			cmd_reserved0[3];
   1580 	uint32_t		cmd_syndrome;
   1581 	uint32_t		cmd_cqn;
   1582 	uint8_t			cmd_reserved1[4];
   1583 } __packed __aligned(4);
   1584 
   1585 struct mcx_cmd_destroy_cq_in {
   1586 	uint16_t		cmd_opcode;
   1587 	uint8_t			cmd_reserved0[4];
   1588 	uint16_t		cmd_op_mod;
   1589 	uint32_t		cmd_cqn;
   1590 	uint8_t			cmd_reserved1[4];
   1591 } __packed __aligned(4);
   1592 
   1593 struct mcx_cmd_destroy_cq_out {
   1594 	uint8_t			cmd_status;
   1595 	uint8_t			cmd_reserved0[3];
   1596 	uint32_t		cmd_syndrome;
   1597 	uint8_t			cmd_reserved1[8];
   1598 } __packed __aligned(4);
   1599 
   1600 struct mcx_cmd_query_cq_in {
   1601 	uint16_t		cmd_opcode;
   1602 	uint8_t			cmd_reserved0[4];
   1603 	uint16_t		cmd_op_mod;
   1604 	uint32_t		cmd_cqn;
   1605 	uint8_t			cmd_reserved1[4];
   1606 } __packed __aligned(4);
   1607 
   1608 struct mcx_cmd_query_cq_out {
   1609 	uint8_t			cmd_status;
   1610 	uint8_t			cmd_reserved0[3];
   1611 	uint32_t		cmd_syndrome;
   1612 	uint8_t			cmd_reserved1[8];
   1613 } __packed __aligned(4);
   1614 
   1615 struct mcx_cq_entry {
   1616 	uint32_t		__reserved__;
   1617 	uint32_t		cq_lro;
   1618 	uint32_t		cq_lro_ack_seq_num;
   1619 	uint32_t		cq_rx_hash;
   1620 	uint8_t			cq_rx_hash_type;
   1621 	uint8_t			cq_ml_path;
   1622 	uint16_t		__reserved__;
   1623 	uint32_t		cq_checksum;
   1624 	uint32_t		__reserved__;
   1625 	uint32_t		cq_flags;
   1626 #define MCX_CQ_ENTRY_FLAGS_L4_OK		(1 << 26)
   1627 #define MCX_CQ_ENTRY_FLAGS_L3_OK		(1 << 25)
   1628 #define MCX_CQ_ENTRY_FLAGS_L2_OK		(1 << 24)
   1629 #define MCX_CQ_ENTRY_FLAGS_CV			(1 << 16)
   1630 #define MCX_CQ_ENTRY_FLAGS_VLAN_MASK		(0xffff)
   1631 
   1632 	uint32_t		cq_lro_srqn;
   1633 	uint32_t		__reserved__[2];
   1634 	uint32_t		cq_byte_cnt;
   1635 	uint64_t		cq_timestamp;
   1636 	uint8_t			cq_rx_drops;
   1637 	uint8_t			cq_flow_tag[3];
   1638 	uint16_t		cq_wqe_count;
   1639 	uint8_t			cq_signature;
   1640 	uint8_t			cq_opcode_owner;
   1641 #define MCX_CQ_ENTRY_FLAG_OWNER			(1 << 0)
   1642 #define MCX_CQ_ENTRY_FLAG_SE			(1 << 1)
   1643 #define MCX_CQ_ENTRY_FORMAT_SHIFT		2
   1644 #define MCX_CQ_ENTRY_OPCODE_SHIFT		4
   1645 
   1646 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE		0
   1647 #define MCX_CQ_ENTRY_FORMAT_INLINE_32		1
   1648 #define MCX_CQ_ENTRY_FORMAT_INLINE_64		2
   1649 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED		3
   1650 
   1651 #define MCX_CQ_ENTRY_OPCODE_REQ			0
   1652 #define MCX_CQ_ENTRY_OPCODE_SEND		2
   1653 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR		13
   1654 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR		14
   1655 #define MCX_CQ_ENTRY_OPCODE_INVALID		15
   1656 
   1657 } __packed __aligned(4);
   1658 
   1659 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
   1660 
   1661 struct mcx_cq_doorbell {
   1662 	uint32_t		 db_update_ci;
   1663 	uint32_t		 db_arm_ci;
   1664 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT	28
   1665 #define MCX_CQ_DOORBELL_ARM_CMD			(1 << 24)
   1666 #define MCX_CQ_DOORBELL_ARM_CI_MASK		(0xffffff)
   1667 } __packed __aligned(8);
   1668 
   1669 struct mcx_wq_ctx {
   1670 	uint8_t			 wq_type;
   1671 #define MCX_WQ_CTX_TYPE_CYCLIC			(1 << 4)
   1672 #define MCX_WQ_CTX_TYPE_SIGNATURE		(1 << 3)
   1673 	uint8_t			 wq_reserved0[5];
   1674 	uint16_t		 wq_lwm;
   1675 	uint32_t		 wq_pd;
   1676 	uint32_t		 wq_uar_page;
   1677 	uint64_t		 wq_doorbell;
   1678 	uint32_t		 wq_hw_counter;
   1679 	uint32_t		 wq_sw_counter;
   1680 	uint16_t		 wq_log_stride;
   1681 	uint8_t			 wq_log_page_sz;
   1682 	uint8_t			 wq_log_size;
   1683 	uint8_t			 wq_reserved1[156];
   1684 } __packed __aligned(4);
   1685 
   1686 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
   1687 
   1688 struct mcx_sq_ctx {
   1689 	uint32_t		sq_flags;
   1690 #define MCX_SQ_CTX_RLKEY			(1U << 31)
   1691 #define MCX_SQ_CTX_FRE_SHIFT			(1 << 29)
   1692 #define MCX_SQ_CTX_FLUSH_IN_ERROR		(1 << 28)
   1693 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT		24
   1694 #define MCX_SQ_CTX_STATE_SHIFT			20
   1695 #define MCX_SQ_CTX_STATE_MASK			(0xf << 20)
   1696 #define MCX_SQ_CTX_STATE_RST			0
   1697 #define MCX_SQ_CTX_STATE_RDY			1
   1698 #define MCX_SQ_CTX_STATE_ERR			3
   1699 	uint32_t		sq_user_index;
   1700 	uint32_t		sq_cqn;
   1701 	uint32_t		sq_reserved1[5];
   1702 	uint32_t		sq_tis_lst_sz;
   1703 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT		16
   1704 	uint32_t		sq_reserved2[2];
   1705 	uint32_t		sq_tis_num;
   1706 	struct mcx_wq_ctx	sq_wq;
   1707 } __packed __aligned(4);
   1708 
   1709 struct mcx_sq_entry_seg {
   1710 	uint32_t		sqs_byte_count;
   1711 	uint32_t		sqs_lkey;
   1712 	uint64_t		sqs_addr;
   1713 } __packed __aligned(4);
   1714 
   1715 struct mcx_sq_entry {
   1716 	/* control segment */
   1717 	uint32_t		sqe_opcode_index;
   1718 #define MCX_SQE_WQE_INDEX_SHIFT			8
   1719 #define MCX_SQE_WQE_OPCODE_NOP			0x00
   1720 #define MCX_SQE_WQE_OPCODE_SEND			0x0a
   1721 	uint32_t		sqe_ds_sq_num;
   1722 #define MCX_SQE_SQ_NUM_SHIFT			8
   1723 	uint32_t		sqe_signature;
   1724 #define MCX_SQE_SIGNATURE_SHIFT			24
   1725 #define MCX_SQE_SOLICITED_EVENT			0x02
   1726 #define MCX_SQE_CE_CQE_ON_ERR			0x00
   1727 #define MCX_SQE_CE_CQE_FIRST_ERR		0x04
   1728 #define MCX_SQE_CE_CQE_ALWAYS			0x08
   1729 #define MCX_SQE_CE_CQE_SOLICIT			0x0C
   1730 #define MCX_SQE_FM_NO_FENCE			0x00
   1731 #define MCX_SQE_FM_SMALL_FENCE			0x40
   1732 	uint32_t		sqe_mkey;
   1733 
   1734 	/* ethernet segment */
   1735 	uint32_t		sqe_reserved1;
   1736 	uint32_t		sqe_mss_csum;
   1737 #define MCX_SQE_L4_CSUM				(1 << 31)
   1738 #define MCX_SQE_L3_CSUM				(1 << 30)
   1739 	uint32_t		sqe_reserved2;
   1740 	uint16_t		sqe_inline_header_size;
   1741 	uint16_t		sqe_inline_headers[9];
   1742 
   1743 	/* data segment */
   1744 	struct mcx_sq_entry_seg sqe_segs[1];
   1745 } __packed __aligned(64);
   1746 
   1747 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
   1748 
   1749 struct mcx_cmd_create_sq_in {
   1750 	uint16_t		cmd_opcode;
   1751 	uint8_t			cmd_reserved0[4];
   1752 	uint16_t		cmd_op_mod;
   1753 	uint8_t			cmd_reserved1[8];
   1754 } __packed __aligned(4);
   1755 
   1756 struct mcx_cmd_create_sq_out {
   1757 	uint8_t			cmd_status;
   1758 	uint8_t			cmd_reserved0[3];
   1759 	uint32_t		cmd_syndrome;
   1760 	uint32_t		cmd_sqn;
   1761 	uint8_t			cmd_reserved1[4];
   1762 } __packed __aligned(4);
   1763 
   1764 struct mcx_cmd_modify_sq_in {
   1765 	uint16_t		cmd_opcode;
   1766 	uint8_t			cmd_reserved0[4];
   1767 	uint16_t		cmd_op_mod;
   1768 	uint32_t		cmd_sq_state;
   1769 	uint8_t			cmd_reserved1[4];
   1770 } __packed __aligned(4);
   1771 
   1772 struct mcx_cmd_modify_sq_mb_in {
   1773 	uint32_t		cmd_modify_hi;
   1774 	uint32_t		cmd_modify_lo;
   1775 	uint8_t			cmd_reserved0[8];
   1776 	struct mcx_sq_ctx	cmd_sq_ctx;
   1777 } __packed __aligned(4);
   1778 
   1779 struct mcx_cmd_modify_sq_out {
   1780 	uint8_t			cmd_status;
   1781 	uint8_t			cmd_reserved0[3];
   1782 	uint32_t		cmd_syndrome;
   1783 	uint8_t			cmd_reserved1[8];
   1784 } __packed __aligned(4);
   1785 
   1786 struct mcx_cmd_destroy_sq_in {
   1787 	uint16_t		cmd_opcode;
   1788 	uint8_t			cmd_reserved0[4];
   1789 	uint16_t		cmd_op_mod;
   1790 	uint32_t		cmd_sqn;
   1791 	uint8_t			cmd_reserved1[4];
   1792 } __packed __aligned(4);
   1793 
   1794 struct mcx_cmd_destroy_sq_out {
   1795 	uint8_t			cmd_status;
   1796 	uint8_t			cmd_reserved0[3];
   1797 	uint32_t		cmd_syndrome;
   1798 	uint8_t			cmd_reserved1[8];
   1799 } __packed __aligned(4);
   1800 
   1801 
   1802 struct mcx_rq_ctx {
   1803 	uint32_t		rq_flags;
   1804 #define MCX_RQ_CTX_RLKEY			(1U << 31)
   1805 #define MCX_RQ_CTX_VLAN_STRIP_DIS		(1 << 28)
   1806 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT		24
   1807 #define MCX_RQ_CTX_STATE_SHIFT			20
   1808 #define MCX_RQ_CTX_STATE_MASK			(0xf << 20)
   1809 #define MCX_RQ_CTX_STATE_RST			0
   1810 #define MCX_RQ_CTX_STATE_RDY			1
   1811 #define MCX_RQ_CTX_STATE_ERR			3
   1812 #define MCX_RQ_CTX_FLUSH_IN_ERROR		(1 << 18)
   1813 	uint32_t		rq_user_index;
   1814 	uint32_t		rq_cqn;
   1815 	uint32_t		rq_reserved1;
   1816 	uint32_t		rq_rmpn;
   1817 	uint32_t		rq_reserved2[7];
   1818 	struct mcx_wq_ctx	rq_wq;
   1819 } __packed __aligned(4);
   1820 
   1821 struct mcx_rq_entry {
   1822 	uint32_t		rqe_byte_count;
   1823 	uint32_t		rqe_lkey;
   1824 	uint64_t		rqe_addr;
   1825 } __packed __aligned(16);
   1826 
   1827 struct mcx_cmd_create_rq_in {
   1828 	uint16_t		cmd_opcode;
   1829 	uint8_t			cmd_reserved0[4];
   1830 	uint16_t		cmd_op_mod;
   1831 	uint8_t			cmd_reserved1[8];
   1832 } __packed __aligned(4);
   1833 
   1834 struct mcx_cmd_create_rq_out {
   1835 	uint8_t			cmd_status;
   1836 	uint8_t			cmd_reserved0[3];
   1837 	uint32_t		cmd_syndrome;
   1838 	uint32_t		cmd_rqn;
   1839 	uint8_t			cmd_reserved1[4];
   1840 } __packed __aligned(4);
   1841 
   1842 struct mcx_cmd_modify_rq_in {
   1843 	uint16_t		cmd_opcode;
   1844 	uint8_t			cmd_reserved0[4];
   1845 	uint16_t		cmd_op_mod;
   1846 	uint32_t		cmd_rq_state;
   1847 	uint8_t			cmd_reserved1[4];
   1848 } __packed __aligned(4);
   1849 
   1850 struct mcx_cmd_modify_rq_mb_in {
   1851 	uint32_t		cmd_modify_hi;
   1852 	uint32_t		cmd_modify_lo;
   1853 	uint8_t			cmd_reserved0[8];
   1854 	struct mcx_rq_ctx	cmd_rq_ctx;
   1855 } __packed __aligned(4);
   1856 
   1857 struct mcx_cmd_modify_rq_out {
   1858 	uint8_t			cmd_status;
   1859 	uint8_t			cmd_reserved0[3];
   1860 	uint32_t		cmd_syndrome;
   1861 	uint8_t			cmd_reserved1[8];
   1862 } __packed __aligned(4);
   1863 
   1864 struct mcx_cmd_destroy_rq_in {
   1865 	uint16_t		cmd_opcode;
   1866 	uint8_t			cmd_reserved0[4];
   1867 	uint16_t		cmd_op_mod;
   1868 	uint32_t		cmd_rqn;
   1869 	uint8_t			cmd_reserved1[4];
   1870 } __packed __aligned(4);
   1871 
   1872 struct mcx_cmd_destroy_rq_out {
   1873 	uint8_t			cmd_status;
   1874 	uint8_t			cmd_reserved0[3];
   1875 	uint32_t		cmd_syndrome;
   1876 	uint8_t			cmd_reserved1[8];
   1877 } __packed __aligned(4);
   1878 
   1879 struct mcx_cmd_create_flow_table_in {
   1880 	uint16_t		cmd_opcode;
   1881 	uint8_t			cmd_reserved0[4];
   1882 	uint16_t		cmd_op_mod;
   1883 	uint8_t			cmd_reserved1[8];
   1884 } __packed __aligned(4);
   1885 
   1886 struct mcx_flow_table_ctx {
   1887 	uint8_t			ft_miss_action;
   1888 	uint8_t			ft_level;
   1889 	uint8_t			ft_reserved0;
   1890 	uint8_t			ft_log_size;
   1891 	uint32_t		ft_table_miss_id;
   1892 	uint8_t			ft_reserved1[28];
   1893 } __packed __aligned(4);
   1894 
   1895 struct mcx_cmd_create_flow_table_mb_in {
   1896 	uint8_t			cmd_table_type;
   1897 	uint8_t			cmd_reserved0[7];
   1898 	struct mcx_flow_table_ctx cmd_ctx;
   1899 } __packed __aligned(4);
   1900 
   1901 struct mcx_cmd_create_flow_table_out {
   1902 	uint8_t			cmd_status;
   1903 	uint8_t			cmd_reserved0[3];
   1904 	uint32_t		cmd_syndrome;
   1905 	uint32_t		cmd_table_id;
   1906 	uint8_t			cmd_reserved1[4];
   1907 } __packed __aligned(4);
   1908 
   1909 struct mcx_cmd_destroy_flow_table_in {
   1910 	uint16_t		cmd_opcode;
   1911 	uint8_t			cmd_reserved0[4];
   1912 	uint16_t		cmd_op_mod;
   1913 	uint8_t			cmd_reserved1[8];
   1914 } __packed __aligned(4);
   1915 
   1916 struct mcx_cmd_destroy_flow_table_mb_in {
   1917 	uint8_t			cmd_table_type;
   1918 	uint8_t			cmd_reserved0[3];
   1919 	uint32_t		cmd_table_id;
   1920 	uint8_t			cmd_reserved1[40];
   1921 } __packed __aligned(4);
   1922 
   1923 struct mcx_cmd_destroy_flow_table_out {
   1924 	uint8_t			cmd_status;
   1925 	uint8_t			cmd_reserved0[3];
   1926 	uint32_t		cmd_syndrome;
   1927 	uint8_t			cmd_reserved1[8];
   1928 } __packed __aligned(4);
   1929 
   1930 struct mcx_cmd_set_flow_table_root_in {
   1931 	uint16_t		cmd_opcode;
   1932 	uint8_t			cmd_reserved0[4];
   1933 	uint16_t		cmd_op_mod;
   1934 	uint8_t			cmd_reserved1[8];
   1935 } __packed __aligned(4);
   1936 
   1937 struct mcx_cmd_set_flow_table_root_mb_in {
   1938 	uint8_t			cmd_table_type;
   1939 	uint8_t			cmd_reserved0[3];
   1940 	uint32_t		cmd_table_id;
   1941 	uint8_t			cmd_reserved1[56];
   1942 } __packed __aligned(4);
   1943 
   1944 struct mcx_cmd_set_flow_table_root_out {
   1945 	uint8_t			cmd_status;
   1946 	uint8_t			cmd_reserved0[3];
   1947 	uint32_t		cmd_syndrome;
   1948 	uint8_t			cmd_reserved1[8];
   1949 } __packed __aligned(4);
   1950 
   1951 struct mcx_flow_match {
   1952 	/* outer headers */
   1953 	uint8_t			mc_src_mac[6];
   1954 	uint16_t		mc_ethertype;
   1955 	uint8_t			mc_dest_mac[6];
   1956 	uint16_t		mc_first_vlan;
   1957 	uint8_t			mc_ip_proto;
   1958 	uint8_t			mc_ip_dscp_ecn;
   1959 	uint8_t			mc_vlan_flags;
   1960 #define MCX_FLOW_MATCH_IP_FRAG	(1 << 5)
   1961 	uint8_t			mc_tcp_flags;
   1962 	uint16_t		mc_tcp_sport;
   1963 	uint16_t		mc_tcp_dport;
   1964 	uint32_t		mc_reserved0;
   1965 	uint16_t		mc_udp_sport;
   1966 	uint16_t		mc_udp_dport;
   1967 	uint8_t			mc_src_ip[16];
   1968 	uint8_t			mc_dest_ip[16];
   1969 
   1970 	/* misc parameters */
   1971 	uint8_t			mc_reserved1[8];
   1972 	uint16_t		mc_second_vlan;
   1973 	uint8_t			mc_reserved2[2];
   1974 	uint8_t			mc_second_vlan_flags;
   1975 	uint8_t			mc_reserved3[15];
   1976 	uint32_t		mc_outer_ipv6_flow_label;
   1977 	uint8_t			mc_reserved4[32];
   1978 
   1979 	uint8_t			mc_reserved[384];
   1980 } __packed __aligned(4);
   1981 
   1982 CTASSERT(sizeof(struct mcx_flow_match) == 512);
   1983 
   1984 struct mcx_cmd_create_flow_group_in {
   1985 	uint16_t		cmd_opcode;
   1986 	uint8_t			cmd_reserved0[4];
   1987 	uint16_t		cmd_op_mod;
   1988 	uint8_t			cmd_reserved1[8];
   1989 } __packed __aligned(4);
   1990 
   1991 struct mcx_cmd_create_flow_group_mb_in {
   1992 	uint8_t			cmd_table_type;
   1993 	uint8_t			cmd_reserved0[3];
   1994 	uint32_t		cmd_table_id;
   1995 	uint8_t			cmd_reserved1[4];
   1996 	uint32_t		cmd_start_flow_index;
   1997 	uint8_t			cmd_reserved2[4];
   1998 	uint32_t		cmd_end_flow_index;
   1999 	uint8_t			cmd_reserved3[23];
   2000 	uint8_t			cmd_match_criteria_enable;
   2001 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER	(1 << 0)
   2002 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC		(1 << 1)
   2003 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER	(1 << 2)
   2004 	struct mcx_flow_match	cmd_match_criteria;
   2005 	uint8_t			cmd_reserved4[448];
   2006 } __packed __aligned(4);
   2007 
   2008 struct mcx_cmd_create_flow_group_out {
   2009 	uint8_t			cmd_status;
   2010 	uint8_t			cmd_reserved0[3];
   2011 	uint32_t		cmd_syndrome;
   2012 	uint32_t		cmd_group_id;
   2013 	uint8_t			cmd_reserved1[4];
   2014 } __packed __aligned(4);
   2015 
   2016 struct mcx_flow_ctx {
   2017 	uint8_t			fc_reserved0[4];
   2018 	uint32_t		fc_group_id;
   2019 	uint32_t		fc_flow_tag;
   2020 	uint32_t		fc_action;
   2021 #define MCX_FLOW_CONTEXT_ACTION_ALLOW		(1 << 0)
   2022 #define MCX_FLOW_CONTEXT_ACTION_DROP		(1 << 1)
   2023 #define MCX_FLOW_CONTEXT_ACTION_FORWARD		(1 << 2)
   2024 #define MCX_FLOW_CONTEXT_ACTION_COUNT		(1 << 3)
   2025 	uint32_t		fc_dest_list_size;
   2026 	uint32_t		fc_counter_list_size;
   2027 	uint8_t			fc_reserved1[40];
   2028 	struct mcx_flow_match	fc_match_value;
   2029 	uint8_t			fc_reserved2[192];
   2030 } __packed __aligned(4);
   2031 
   2032 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE	(1 << 24)
   2033 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR		(2 << 24)
   2034 
   2035 struct mcx_cmd_destroy_flow_group_in {
   2036 	uint16_t		cmd_opcode;
   2037 	uint8_t			cmd_reserved0[4];
   2038 	uint16_t		cmd_op_mod;
   2039 	uint8_t			cmd_reserved1[8];
   2040 } __packed __aligned(4);
   2041 
   2042 struct mcx_cmd_destroy_flow_group_mb_in {
   2043 	uint8_t			cmd_table_type;
   2044 	uint8_t			cmd_reserved0[3];
   2045 	uint32_t		cmd_table_id;
   2046 	uint32_t		cmd_group_id;
   2047 	uint8_t			cmd_reserved1[36];
   2048 } __packed __aligned(4);
   2049 
   2050 struct mcx_cmd_destroy_flow_group_out {
   2051 	uint8_t			cmd_status;
   2052 	uint8_t			cmd_reserved0[3];
   2053 	uint32_t		cmd_syndrome;
   2054 	uint8_t			cmd_reserved1[8];
   2055 } __packed __aligned(4);
   2056 
   2057 struct mcx_cmd_set_flow_table_entry_in {
   2058 	uint16_t		cmd_opcode;
   2059 	uint8_t			cmd_reserved0[4];
   2060 	uint16_t		cmd_op_mod;
   2061 	uint8_t			cmd_reserved1[8];
   2062 } __packed __aligned(4);
   2063 
   2064 struct mcx_cmd_set_flow_table_entry_mb_in {
   2065 	uint8_t			cmd_table_type;
   2066 	uint8_t			cmd_reserved0[3];
   2067 	uint32_t		cmd_table_id;
   2068 	uint32_t		cmd_modify_enable_mask;
   2069 	uint8_t			cmd_reserved1[4];
   2070 	uint32_t		cmd_flow_index;
   2071 	uint8_t			cmd_reserved2[28];
   2072 	struct mcx_flow_ctx	cmd_flow_ctx;
   2073 } __packed __aligned(4);
   2074 
   2075 struct mcx_cmd_set_flow_table_entry_out {
   2076 	uint8_t			cmd_status;
   2077 	uint8_t			cmd_reserved0[3];
   2078 	uint32_t		cmd_syndrome;
   2079 	uint8_t			cmd_reserved1[8];
   2080 } __packed __aligned(4);
   2081 
   2082 struct mcx_cmd_query_flow_table_entry_in {
   2083 	uint16_t		cmd_opcode;
   2084 	uint8_t			cmd_reserved0[4];
   2085 	uint16_t		cmd_op_mod;
   2086 	uint8_t			cmd_reserved1[8];
   2087 } __packed __aligned(4);
   2088 
   2089 struct mcx_cmd_query_flow_table_entry_mb_in {
   2090 	uint8_t			cmd_table_type;
   2091 	uint8_t			cmd_reserved0[3];
   2092 	uint32_t		cmd_table_id;
   2093 	uint8_t			cmd_reserved1[8];
   2094 	uint32_t		cmd_flow_index;
   2095 	uint8_t			cmd_reserved2[28];
   2096 } __packed __aligned(4);
   2097 
   2098 struct mcx_cmd_query_flow_table_entry_out {
   2099 	uint8_t			cmd_status;
   2100 	uint8_t			cmd_reserved0[3];
   2101 	uint32_t		cmd_syndrome;
   2102 	uint8_t			cmd_reserved1[8];
   2103 } __packed __aligned(4);
   2104 
   2105 struct mcx_cmd_query_flow_table_entry_mb_out {
   2106 	uint8_t			cmd_reserved0[48];
   2107 	struct mcx_flow_ctx	cmd_flow_ctx;
   2108 } __packed __aligned(4);
   2109 
   2110 struct mcx_cmd_delete_flow_table_entry_in {
   2111 	uint16_t		cmd_opcode;
   2112 	uint8_t			cmd_reserved0[4];
   2113 	uint16_t		cmd_op_mod;
   2114 	uint8_t			cmd_reserved1[8];
   2115 } __packed __aligned(4);
   2116 
   2117 struct mcx_cmd_delete_flow_table_entry_mb_in {
   2118 	uint8_t			cmd_table_type;
   2119 	uint8_t			cmd_reserved0[3];
   2120 	uint32_t		cmd_table_id;
   2121 	uint8_t			cmd_reserved1[8];
   2122 	uint32_t		cmd_flow_index;
   2123 	uint8_t			cmd_reserved2[28];
   2124 } __packed __aligned(4);
   2125 
   2126 struct mcx_cmd_delete_flow_table_entry_out {
   2127 	uint8_t			cmd_status;
   2128 	uint8_t			cmd_reserved0[3];
   2129 	uint32_t		cmd_syndrome;
   2130 	uint8_t			cmd_reserved1[8];
   2131 } __packed __aligned(4);
   2132 
   2133 struct mcx_cmd_query_flow_group_in {
   2134 	uint16_t		cmd_opcode;
   2135 	uint8_t			cmd_reserved0[4];
   2136 	uint16_t		cmd_op_mod;
   2137 	uint8_t			cmd_reserved1[8];
   2138 } __packed __aligned(4);
   2139 
   2140 struct mcx_cmd_query_flow_group_mb_in {
   2141 	uint8_t			cmd_table_type;
   2142 	uint8_t			cmd_reserved0[3];
   2143 	uint32_t		cmd_table_id;
   2144 	uint32_t		cmd_group_id;
   2145 	uint8_t			cmd_reserved1[36];
   2146 } __packed __aligned(4);
   2147 
   2148 struct mcx_cmd_query_flow_group_out {
   2149 	uint8_t			cmd_status;
   2150 	uint8_t			cmd_reserved0[3];
   2151 	uint32_t		cmd_syndrome;
   2152 	uint8_t			cmd_reserved1[8];
   2153 } __packed __aligned(4);
   2154 
   2155 struct mcx_cmd_query_flow_group_mb_out {
   2156 	uint8_t			cmd_reserved0[12];
   2157 	uint32_t		cmd_start_flow_index;
   2158 	uint8_t			cmd_reserved1[4];
   2159 	uint32_t		cmd_end_flow_index;
   2160 	uint8_t			cmd_reserved2[20];
   2161 	uint32_t		cmd_match_criteria_enable;
   2162 	uint8_t			cmd_match_criteria[512];
   2163 	uint8_t			cmd_reserved4[448];
   2164 } __packed __aligned(4);
   2165 
   2166 struct mcx_cmd_query_flow_table_in {
   2167 	uint16_t		cmd_opcode;
   2168 	uint8_t			cmd_reserved0[4];
   2169 	uint16_t		cmd_op_mod;
   2170 	uint8_t			cmd_reserved1[8];
   2171 } __packed __aligned(4);
   2172 
   2173 struct mcx_cmd_query_flow_table_mb_in {
   2174 	uint8_t			cmd_table_type;
   2175 	uint8_t			cmd_reserved0[3];
   2176 	uint32_t		cmd_table_id;
   2177 	uint8_t			cmd_reserved1[40];
   2178 } __packed __aligned(4);
   2179 
   2180 struct mcx_cmd_query_flow_table_out {
   2181 	uint8_t			cmd_status;
   2182 	uint8_t			cmd_reserved0[3];
   2183 	uint32_t		cmd_syndrome;
   2184 	uint8_t			cmd_reserved1[8];
   2185 } __packed __aligned(4);
   2186 
   2187 struct mcx_cmd_query_flow_table_mb_out {
   2188 	uint8_t			cmd_reserved0[4];
   2189 	struct mcx_flow_table_ctx cmd_ctx;
   2190 } __packed __aligned(4);
   2191 
   2192 struct mcx_cmd_alloc_flow_counter_in {
   2193 	uint16_t		cmd_opcode;
   2194 	uint8_t			cmd_reserved0[4];
   2195 	uint16_t		cmd_op_mod;
   2196 	uint8_t			cmd_reserved1[8];
   2197 } __packed __aligned(4);
   2198 
   2199 struct mcx_cmd_query_rq_in {
   2200 	uint16_t		cmd_opcode;
   2201 	uint8_t			cmd_reserved0[4];
   2202 	uint16_t		cmd_op_mod;
   2203 	uint32_t		cmd_rqn;
   2204 	uint8_t			cmd_reserved1[4];
   2205 } __packed __aligned(4);
   2206 
   2207 struct mcx_cmd_query_rq_out {
   2208 	uint8_t			cmd_status;
   2209 	uint8_t			cmd_reserved0[3];
   2210 	uint32_t		cmd_syndrome;
   2211 	uint8_t			cmd_reserved1[8];
   2212 } __packed __aligned(4);
   2213 
   2214 struct mcx_cmd_query_rq_mb_out {
   2215 	uint8_t			cmd_reserved0[16];
   2216 	struct mcx_rq_ctx	cmd_ctx;
   2217 };
   2218 
   2219 struct mcx_cmd_query_sq_in {
   2220 	uint16_t		cmd_opcode;
   2221 	uint8_t			cmd_reserved0[4];
   2222 	uint16_t		cmd_op_mod;
   2223 	uint32_t		cmd_sqn;
   2224 	uint8_t			cmd_reserved1[4];
   2225 } __packed __aligned(4);
   2226 
   2227 struct mcx_cmd_query_sq_out {
   2228 	uint8_t			cmd_status;
   2229 	uint8_t			cmd_reserved0[3];
   2230 	uint32_t		cmd_syndrome;
   2231 	uint8_t			cmd_reserved1[8];
   2232 } __packed __aligned(4);
   2233 
   2234 struct mcx_cmd_query_sq_mb_out {
   2235 	uint8_t			cmd_reserved0[16];
   2236 	struct mcx_sq_ctx	cmd_ctx;
   2237 };
   2238 
   2239 struct mcx_cmd_alloc_flow_counter_out {
   2240 	uint8_t			cmd_status;
   2241 	uint8_t			cmd_reserved0[3];
   2242 	uint32_t		cmd_syndrome;
   2243 	uint8_t			cmd_reserved1[2];
   2244 	uint16_t		cmd_flow_counter_id;
   2245 	uint8_t			cmd_reserved2[4];
   2246 } __packed __aligned(4);
   2247 
   2248 struct mcx_wq_doorbell {
   2249 	uint32_t		 db_recv_counter;
   2250 	uint32_t		 db_send_counter;
   2251 } __packed __aligned(8);
   2252 
   2253 struct mcx_dmamem {
   2254 	bus_dmamap_t		 mxm_map;
   2255 	bus_dma_segment_t	 mxm_seg;
   2256 	int			 mxm_nsegs;
   2257 	size_t			 mxm_size;
   2258 	void			*mxm_kva;
   2259 };
   2260 #define MCX_DMA_MAP(_mxm)	((_mxm)->mxm_map)
   2261 #define MCX_DMA_DVA(_mxm)	((_mxm)->mxm_map->dm_segs[0].ds_addr)
   2262 #define MCX_DMA_KVA(_mxm)	((void *)(_mxm)->mxm_kva)
   2263 #define MCX_DMA_OFF(_mxm, _off)	((void *)((char *)(_mxm)->mxm_kva + (_off)))
   2264 #define MCX_DMA_LEN(_mxm)	((_mxm)->mxm_size)
   2265 
   2266 struct mcx_hwmem {
   2267 	bus_dmamap_t		 mhm_map;
   2268 	bus_dma_segment_t	*mhm_segs;
   2269 	unsigned int		 mhm_seg_count;
   2270 	unsigned int		 mhm_npages;
   2271 };
   2272 
   2273 struct mcx_slot {
   2274 	bus_dmamap_t		 ms_map;
   2275 	struct mbuf		*ms_m;
   2276 };
   2277 
   2278 struct mcx_eq {
   2279 	int			 eq_n;
   2280 	uint32_t		 eq_cons;
   2281 	struct mcx_dmamem	 eq_mem;
   2282 };
   2283 
   2284 struct mcx_cq {
   2285 	int			 cq_n;
   2286 	struct mcx_dmamem	 cq_mem;
   2287 	bus_addr_t		 cq_doorbell;
   2288 	uint32_t		 cq_cons;
   2289 	uint32_t		 cq_count;
   2290 };
   2291 
   2292 struct mcx_calibration {
   2293 	uint64_t		 c_timestamp;	/* previous mcx chip time */
   2294 	uint64_t		 c_uptime;	/* previous kernel nanouptime */
   2295 	uint64_t		 c_tbase;	/* mcx chip time */
   2296 	uint64_t		 c_ubase;	/* kernel nanouptime */
   2297 	uint64_t		 c_ratio;
   2298 };
   2299 
   2300 #define MCX_CALIBRATE_FIRST    2
   2301 #define MCX_CALIBRATE_NORMAL   32
   2302 
   2303 struct mcx_rxring {
   2304 	u_int			 rxr_total;
   2305 	u_int			 rxr_inuse;
   2306 };
   2307 
   2308 MBUFQ_HEAD(mcx_mbufq);
   2309 
   2310 struct mcx_rx {
   2311 	struct mcx_softc	*rx_softc;
   2312 
   2313 	int			 rx_rqn;
   2314 	struct mcx_dmamem	 rx_rq_mem;
   2315 	struct mcx_slot		*rx_slots;
   2316 	bus_addr_t		 rx_doorbell;
   2317 
   2318 	uint32_t		 rx_prod;
   2319 	callout_t		 rx_refill;
   2320 	struct mcx_rxring	 rx_rxr;
   2321 } __aligned(64);
   2322 
   2323 struct mcx_tx {
   2324 	struct mcx_softc	*tx_softc;
   2325 	kmutex_t		 tx_lock;
   2326 	pcq_t			*tx_pcq;
   2327 	void			*tx_softint;
   2328 
   2329 	int			 tx_uar;
   2330 	int			 tx_sqn;
   2331 	struct mcx_dmamem	 tx_sq_mem;
   2332 	struct mcx_slot		*tx_slots;
   2333 	bus_addr_t		 tx_doorbell;
   2334 	int			 tx_bf_offset;
   2335 
   2336 	uint32_t		 tx_cons;
   2337 	uint32_t		 tx_prod;
   2338 } __aligned(64);
   2339 
   2340 struct mcx_queues {
   2341 	void			*q_ihc;
   2342 	struct mcx_softc	*q_sc;
   2343 	int			 q_uar;
   2344 	int			 q_index;
   2345 	struct mcx_rx		 q_rx;
   2346 	struct mcx_tx		 q_tx;
   2347 	struct mcx_cq		 q_cq;
   2348 	struct mcx_eq		 q_eq;
   2349 #if NKSTAT > 0
   2350 	struct kstat		*q_kstat;
   2351 #endif
   2352 };
   2353 
   2354 struct mcx_flow_group {
   2355 	int			 g_id;
   2356 	int			 g_table;
   2357 	int			 g_start;
   2358 	int			 g_size;
   2359 };
   2360 
   2361 #define MCX_FLOW_GROUP_PROMISC		0
   2362 #define MCX_FLOW_GROUP_ALLMULTI		1
   2363 #define MCX_FLOW_GROUP_MAC		2
   2364 #define MCX_FLOW_GROUP_RSS_L4		3
   2365 #define MCX_FLOW_GROUP_RSS_L3		4
   2366 #define MCX_FLOW_GROUP_RSS_NONE		5
   2367 #define	MCX_NUM_FLOW_GROUPS		6
   2368 
   2369 #define MCX_HASH_SEL_L3		MCX_TIR_CTX_HASH_SEL_SRC_IP | \
   2370 				MCX_TIR_CTX_HASH_SEL_DST_IP
   2371 #define MCX_HASH_SEL_L4		MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_SPORT | \
   2372 				MCX_TIR_CTX_HASH_SEL_DPORT
   2373 
   2374 #define MCX_RSS_HASH_SEL_V4_TCP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP  |\
   2375 				MCX_TIR_CTX_HASH_SEL_IPV4
   2376 #define MCX_RSS_HASH_SEL_V6_TCP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP | \
   2377 				MCX_TIR_CTX_HASH_SEL_IPV6
   2378 #define MCX_RSS_HASH_SEL_V4_UDP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
   2379 				MCX_TIR_CTX_HASH_SEL_IPV4
   2380 #define MCX_RSS_HASH_SEL_V6_UDP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
   2381 				MCX_TIR_CTX_HASH_SEL_IPV6
   2382 #define MCX_RSS_HASH_SEL_V4	MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV4
   2383 #define MCX_RSS_HASH_SEL_V6	MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV6
   2384 
   2385 /*
   2386  * There are a few different pieces involved in configuring RSS.
   2387  * A Receive Queue Table (RQT) is the indirection table that maps packets to
   2388  * different rx queues based on a hash value.  We only create one, because
   2389  * we want to scatter any traffic we can apply RSS to across all our rx
   2390  * queues.  Anything else will only be delivered to the first rx queue,
   2391  * which doesn't require an RQT.
   2392  *
   2393  * A Transport Interface Receive (TIR) delivers packets to either a single rx
   2394  * queue or an RQT, and in the latter case, specifies the set of fields
   2395  * hashed, the hash function, and the hash key.  We need one of these for each
   2396  * type of RSS traffic - v4 TCP, v6 TCP, v4 UDP, v6 UDP, other v4, other v6,
   2397  * and one for non-RSS traffic.
   2398  *
   2399  * Flow tables hold flow table entries in sequence.  The first entry that
   2400  * matches a packet is applied, sending the packet to either another flow
   2401  * table or a TIR.  We use one flow table to select packets based on
   2402  * destination MAC address, and a second to apply RSS.  The entries in the
   2403  * first table send matching packets to the second, and the entries in the
   2404  * RSS table send packets to RSS TIRs if possible, or the non-RSS TIR.
   2405  *
   2406  * The flow table entry that delivers packets to an RSS TIR must include match
   2407  * criteria that ensure packets delivered to the TIR include all the fields
   2408  * that the TIR hashes on - so for a v4 TCP TIR, the flow table entry must
   2409  * only accept v4 TCP packets.  Accordingly, we need flow table entries for
   2410  * each TIR.
   2411  *
   2412  * All of this is a lot more flexible than we need, and we can describe most
   2413  * of the stuff we need with a simple array.
   2414  *
   2415  * An RSS config creates a TIR with hashing enabled on a set of fields,
   2416  * pointing to either the first rx queue or the RQT containing all the rx
   2417  * queues, and a flow table entry that matches on an ether type and
   2418  * optionally an ip proto, that delivers packets to the TIR.
   2419  */
   2420 static struct mcx_rss_rule {
   2421 	int			hash_sel;
   2422 	int			flow_group;
   2423 	int			ethertype;
   2424 	int			ip_proto;
   2425 } mcx_rss_config[] = {
   2426 	/* udp and tcp for v4/v6 */
   2427 	{ MCX_RSS_HASH_SEL_V4_TCP, MCX_FLOW_GROUP_RSS_L4,
   2428 	  ETHERTYPE_IP, IPPROTO_TCP },
   2429 	{ MCX_RSS_HASH_SEL_V6_TCP, MCX_FLOW_GROUP_RSS_L4,
   2430 	  ETHERTYPE_IPV6, IPPROTO_TCP },
   2431 	{ MCX_RSS_HASH_SEL_V4_UDP, MCX_FLOW_GROUP_RSS_L4,
   2432 	  ETHERTYPE_IP, IPPROTO_UDP },
   2433 	{ MCX_RSS_HASH_SEL_V6_UDP, MCX_FLOW_GROUP_RSS_L4,
   2434 	  ETHERTYPE_IPV6, IPPROTO_UDP },
   2435 
   2436 	/* other v4/v6 */
   2437 	{ MCX_RSS_HASH_SEL_V4, MCX_FLOW_GROUP_RSS_L3,
   2438 	  ETHERTYPE_IP, 0 },
   2439 	{ MCX_RSS_HASH_SEL_V6, MCX_FLOW_GROUP_RSS_L3,
   2440 	  ETHERTYPE_IPV6, 0 },
   2441 
   2442 	/* non v4/v6 */
   2443 	{ 0, MCX_FLOW_GROUP_RSS_NONE, 0, 0 }
   2444 };
   2445 
   2446 struct mcx_softc {
   2447 	device_t		 sc_dev;
   2448 	struct ethercom		 sc_ec;
   2449 	struct ifmedia		 sc_media;
   2450 	uint64_t		 sc_media_status;
   2451 	uint64_t		 sc_media_active;
   2452 	kmutex_t		 sc_media_mutex;
   2453 
   2454 	pci_chipset_tag_t	 sc_pc;
   2455 	pci_intr_handle_t	*sc_intrs;
   2456 	void			*sc_ihc;
   2457 	pcitag_t		 sc_tag;
   2458 
   2459 	bus_dma_tag_t		 sc_dmat;
   2460 	bus_space_tag_t		 sc_memt;
   2461 	bus_space_handle_t	 sc_memh;
   2462 	bus_size_t		 sc_mems;
   2463 
   2464 	struct mcx_dmamem	 sc_cmdq_mem;
   2465 	unsigned int		 sc_cmdq_mask;
   2466 	unsigned int		 sc_cmdq_size;
   2467 
   2468 	unsigned int		 sc_cmdq_token;
   2469 
   2470 	struct mcx_hwmem	 sc_boot_pages;
   2471 	struct mcx_hwmem	 sc_init_pages;
   2472 	struct mcx_hwmem	 sc_regular_pages;
   2473 
   2474 	int			 sc_uar;
   2475 	int			 sc_pd;
   2476 	int			 sc_tdomain;
   2477 	uint32_t		 sc_lkey;
   2478 	int			 sc_tis;
   2479 	int			 sc_tir[__arraycount(mcx_rss_config)];
   2480 	int			 sc_rqt;
   2481 
   2482 	struct mcx_dmamem	 sc_doorbell_mem;
   2483 
   2484 	struct mcx_eq		 sc_admin_eq;
   2485 	struct mcx_eq		 sc_queue_eq;
   2486 
   2487 	int			 sc_hardmtu;
   2488 	int			 sc_rxbufsz;
   2489 
   2490 	int			 sc_bf_size;
   2491 	int			 sc_max_rqt_size;
   2492 
   2493 	struct workqueue	*sc_workq;
   2494 	struct work		 sc_port_change;
   2495 
   2496 	int			 sc_mac_flow_table_id;
   2497 	int			 sc_rss_flow_table_id;
   2498 	struct mcx_flow_group	 sc_flow_group[MCX_NUM_FLOW_GROUPS];
   2499 	int			 sc_promisc_flow_enabled;
   2500 	int			 sc_allmulti_flow_enabled;
   2501 	int			 sc_mcast_flow_base;
   2502 	int			 sc_extra_mcast;
   2503 	uint8_t			 sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
   2504 
   2505 	struct mcx_calibration	 sc_calibration[2];
   2506 	unsigned int		 sc_calibration_gen;
   2507 	callout_t		 sc_calibrate;
   2508 	uint32_t		 sc_mhz;
   2509 	uint32_t		 sc_khz;
   2510 
   2511 	struct mcx_queues	*sc_queues;
   2512 	unsigned int		 sc_nqueues;
   2513 
   2514 	int			 sc_mcam_reg;
   2515 
   2516 #if NKSTAT > 0
   2517 	struct kstat		*sc_kstat_ieee8023;
   2518 	struct kstat		*sc_kstat_rfc2863;
   2519 	struct kstat		*sc_kstat_rfc2819;
   2520 	struct kstat		*sc_kstat_rfc3635;
   2521 	unsigned int		 sc_kstat_mtmp_count;
   2522 	struct kstat		**sc_kstat_mtmp;
   2523 #endif
   2524 
   2525 	struct timecounter	 sc_timecounter;
   2526 };
   2527 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
   2528 
   2529 static int	mcx_match(device_t, cfdata_t, void *);
   2530 static void	mcx_attach(device_t, device_t, void *);
   2531 
   2532 static void *	mcx_establish_intr(struct mcx_softc *, int, kcpuset_t *,
   2533 		    int (*)(void *), void *, const char *);
   2534 
   2535 static void	mcx_rxr_init(struct mcx_rxring *, u_int, u_int);
   2536 static u_int	mcx_rxr_get(struct mcx_rxring *, u_int);
   2537 static void	mcx_rxr_put(struct mcx_rxring *, u_int);
   2538 static u_int	mcx_rxr_inuse(struct mcx_rxring *);
   2539 
   2540 #if NKSTAT > 0
   2541 static void	mcx_kstat_attach(struct mcx_softc *);
   2542 #endif
   2543 
   2544 static void	mcx_timecounter_attach(struct mcx_softc *);
   2545 
   2546 static int	mcx_version(struct mcx_softc *);
   2547 static int	mcx_init_wait(struct mcx_softc *);
   2548 static int	mcx_enable_hca(struct mcx_softc *);
   2549 static int	mcx_teardown_hca(struct mcx_softc *, uint16_t);
   2550 static int	mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
   2551 		    int);
   2552 static int	mcx_issi(struct mcx_softc *);
   2553 static int	mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
   2554 static int	mcx_hca_max_caps(struct mcx_softc *);
   2555 static int	mcx_hca_set_caps(struct mcx_softc *);
   2556 static int	mcx_init_hca(struct mcx_softc *);
   2557 static int	mcx_set_driver_version(struct mcx_softc *);
   2558 static int	mcx_iff(struct mcx_softc *);
   2559 static int	mcx_alloc_uar(struct mcx_softc *, int *);
   2560 static int	mcx_alloc_pd(struct mcx_softc *);
   2561 static int	mcx_alloc_tdomain(struct mcx_softc *);
   2562 static int	mcx_create_eq(struct mcx_softc *, struct mcx_eq *, int,
   2563 		    uint64_t, int);
   2564 static int	mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *);
   2565 static int	mcx_query_special_contexts(struct mcx_softc *);
   2566 static int	mcx_set_port_mtu(struct mcx_softc *, int);
   2567 static int	mcx_create_cq(struct mcx_softc *, struct mcx_cq *, int, int,
   2568 		    int);
   2569 static int	mcx_destroy_cq(struct mcx_softc *, struct mcx_cq *);
   2570 static int	mcx_create_sq(struct mcx_softc *, struct mcx_tx *, int, int,
   2571 		    int);
   2572 static int	mcx_destroy_sq(struct mcx_softc *, struct mcx_tx *);
   2573 static int	mcx_ready_sq(struct mcx_softc *, struct mcx_tx *);
   2574 static int	mcx_create_rq(struct mcx_softc *, struct mcx_rx *, int, int);
   2575 static int	mcx_destroy_rq(struct mcx_softc *, struct mcx_rx *);
   2576 static int	mcx_ready_rq(struct mcx_softc *, struct mcx_rx *);
   2577 static int	mcx_create_tir_direct(struct mcx_softc *, struct mcx_rx *,
   2578 		    int *);
   2579 static int	mcx_create_tir_indirect(struct mcx_softc *, int, uint32_t,
   2580 		    int *);
   2581 static int	mcx_destroy_tir(struct mcx_softc *, int);
   2582 static int	mcx_create_tis(struct mcx_softc *, int *);
   2583 static int	mcx_destroy_tis(struct mcx_softc *, int);
   2584 static int	mcx_create_rqt(struct mcx_softc *, int, int *, int *);
   2585 static int	mcx_destroy_rqt(struct mcx_softc *, int);
   2586 static int	mcx_create_flow_table(struct mcx_softc *, int, int, int *);
   2587 static int	mcx_set_flow_table_root(struct mcx_softc *, int);
   2588 static int	mcx_destroy_flow_table(struct mcx_softc *, int);
   2589 static int	mcx_create_flow_group(struct mcx_softc *, int, int, int,
   2590 		    int, int, struct mcx_flow_match *);
   2591 static int	mcx_destroy_flow_group(struct mcx_softc *, int);
   2592 static int	mcx_set_flow_table_entry_mac(struct mcx_softc *, int, int,
   2593 		    const uint8_t *, uint32_t);
   2594 static int	mcx_set_flow_table_entry_proto(struct mcx_softc *, int, int,
   2595 		    int, int, uint32_t);
   2596 static int	mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
   2597 
   2598 #if NKSTAT > 0
   2599 static int	mcx_query_rq(struct mcx_softc *, struct mcx_rx *, struct mcx_rq_ctx *);
   2600 static int	mcx_query_sq(struct mcx_softc *, struct mcx_tx *, struct mcx_sq_ctx *);
   2601 static int	mcx_query_cq(struct mcx_softc *, struct mcx_cq *, struct mcx_cq_ctx *);
   2602 static int	mcx_query_eq(struct mcx_softc *, struct mcx_eq *, struct mcx_eq_ctx *);
   2603 #endif
   2604 
   2605 #if 0
   2606 static int	mcx_dump_flow_table(struct mcx_softc *, int);
   2607 static int	mcx_dump_flow_table_entry(struct mcx_softc *, int, int);
   2608 static int	mcx_dump_flow_group(struct mcx_softc *, int);
   2609 #endif
   2610 
   2611 
   2612 /*
   2613 static void	mcx_cmdq_dump(const struct mcx_cmdq_entry *);
   2614 static void	mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
   2615 */
   2616 static void	mcx_refill(void *);
   2617 static int	mcx_process_rx(struct mcx_softc *, struct mcx_rx *,
   2618 		    struct mcx_cq_entry *, struct mcx_mbufq *,
   2619 		    const struct mcx_calibration *);
   2620 static int	mcx_process_txeof(struct mcx_softc *, struct mcx_tx *,
   2621 		    struct mcx_cq_entry *);
   2622 static void	mcx_process_cq(struct mcx_softc *, struct mcx_queues *,
   2623 		    struct mcx_cq *);
   2624 
   2625 static void	mcx_arm_cq(struct mcx_softc *, struct mcx_cq *, int);
   2626 static void	mcx_arm_eq(struct mcx_softc *, struct mcx_eq *, int);
   2627 static int	mcx_admin_intr(void *);
   2628 static int	mcx_cq_intr(void *);
   2629 
   2630 static int	mcx_init(struct ifnet *);
   2631 static void	mcx_stop(struct ifnet *, int);
   2632 static int	mcx_ioctl(struct ifnet *, u_long, void *);
   2633 static void	mcx_start(struct ifnet *);
   2634 static int	mcx_transmit(struct ifnet *, struct mbuf *);
   2635 static void	mcx_deferred_transmit(void *);
   2636 static void	mcx_watchdog(struct ifnet *);
   2637 static void	mcx_media_add_types(struct mcx_softc *);
   2638 static void	mcx_media_status(struct ifnet *, struct ifmediareq *);
   2639 static int	mcx_media_change(struct ifnet *);
   2640 #if 0
   2641 static int	mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
   2642 #endif
   2643 static void	mcx_port_change(struct work *, void *);
   2644 
   2645 static void	mcx_calibrate_first(struct mcx_softc *);
   2646 static void	mcx_calibrate(void *);
   2647 
   2648 static inline uint32_t
   2649 		mcx_rd(struct mcx_softc *, bus_size_t);
   2650 static inline void
   2651 		mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
   2652 static inline void
   2653 		mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
   2654 
   2655 static uint64_t	mcx_timer(struct mcx_softc *);
   2656 
   2657 static int	mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
   2658 		    bus_size_t, u_int align);
   2659 static void	mcx_dmamem_zero(struct mcx_dmamem *);
   2660 static void	mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
   2661 
   2662 static int	mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
   2663 		    unsigned int);
   2664 static void	mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
   2665 
   2666 CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL);
   2667 
   2668 static const struct {
   2669 	pci_vendor_id_t		vendor;
   2670 	pci_product_id_t	product;
   2671 } mcx_devices[] = {
   2672 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700 },
   2673 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700VF },
   2674 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710 },
   2675 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710VF },
   2676 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800 },
   2677 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800VF },
   2678 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800 },
   2679 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800VF },
   2680 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28908 },
   2681 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT2892  },
   2682 };
   2683 
   2684 struct mcx_eth_proto_capability {
   2685 	uint64_t	cap_media;
   2686 	uint64_t	cap_baudrate;
   2687 };
   2688 
   2689 static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = {
   2690 	[MCX_ETHER_CAP_SGMII]		= { IFM_1000_SGMII,	IF_Gbps(1) },
   2691 	[MCX_ETHER_CAP_1000_KX]		= { IFM_1000_KX,	IF_Gbps(1) },
   2692 	[MCX_ETHER_CAP_10G_CX4]		= { IFM_10G_CX4,	IF_Gbps(10) },
   2693 	[MCX_ETHER_CAP_10G_KX4]		= { IFM_10G_KX4,	IF_Gbps(10) },
   2694 	[MCX_ETHER_CAP_10G_KR]		= { IFM_10G_KR,		IF_Gbps(10) },
   2695 	[MCX_ETHER_CAP_20G_KR2]		= { IFM_20G_KR2,	IF_Gbps(20) },
   2696 	[MCX_ETHER_CAP_40G_CR4]		= { IFM_40G_CR4,	IF_Gbps(40) },
   2697 	[MCX_ETHER_CAP_40G_KR4]		= { IFM_40G_KR4,	IF_Gbps(40) },
   2698 	[MCX_ETHER_CAP_56G_R4]		= { IFM_56G_R4,		IF_Gbps(56) },
   2699 	[MCX_ETHER_CAP_10G_CR]		= { IFM_10G_CR1,	IF_Gbps(10) },
   2700 	[MCX_ETHER_CAP_10G_SR]		= { IFM_10G_SR,		IF_Gbps(10) },
   2701 	[MCX_ETHER_CAP_10G_LR]		= { IFM_10G_LR,		IF_Gbps(10) },
   2702 	[MCX_ETHER_CAP_40G_SR4]		= { IFM_40G_SR4,	IF_Gbps(40) },
   2703 	[MCX_ETHER_CAP_40G_LR4]		= { IFM_40G_LR4,	IF_Gbps(40) },
   2704 	[MCX_ETHER_CAP_50G_SR2]		= { IFM_50G_SR2,	IF_Gbps(50) },
   2705 	[MCX_ETHER_CAP_100G_CR4]	= { IFM_100G_CR4,	IF_Gbps(100) },
   2706 	[MCX_ETHER_CAP_100G_SR4]	= { IFM_100G_SR4,	IF_Gbps(100) },
   2707 	[MCX_ETHER_CAP_100G_KR4]	= { IFM_100G_KR4,	IF_Gbps(100) },
   2708 	[MCX_ETHER_CAP_100G_LR4]	= { IFM_100G_LR4,	IF_Gbps(100) },
   2709 	[MCX_ETHER_CAP_100_TX]		= { IFM_100_TX,		IF_Mbps(100) },
   2710 	[MCX_ETHER_CAP_1000_T]		= { IFM_1000_T,		IF_Gbps(1) },
   2711 	[MCX_ETHER_CAP_10G_T]		= { IFM_10G_T,		IF_Gbps(10) },
   2712 	[MCX_ETHER_CAP_25G_CR]		= { IFM_25G_CR,		IF_Gbps(25) },
   2713 	[MCX_ETHER_CAP_25G_KR]		= { IFM_25G_KR,		IF_Gbps(25) },
   2714 	[MCX_ETHER_CAP_25G_SR]		= { IFM_25G_SR,		IF_Gbps(25) },
   2715 	[MCX_ETHER_CAP_50G_CR2]		= { IFM_50G_CR2,	IF_Gbps(50) },
   2716 	[MCX_ETHER_CAP_50G_KR2]		= { IFM_50G_KR2,	IF_Gbps(50) },
   2717 };
   2718 
   2719 static int
   2720 mcx_get_id(uint32_t val)
   2721 {
   2722 	return be32toh(val) & 0x00ffffff;
   2723 }
   2724 
   2725 static int
   2726 mcx_match(device_t parent, cfdata_t cf, void *aux)
   2727 {
   2728 	struct pci_attach_args *pa = aux;
   2729 	int n;
   2730 
   2731 	for (n = 0; n < __arraycount(mcx_devices); n++) {
   2732 		if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor &&
   2733 		    PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product)
   2734 			return 1;
   2735 	}
   2736 
   2737 	return 0;
   2738 }
   2739 
   2740 void
   2741 mcx_attach(device_t parent, device_t self, void *aux)
   2742 {
   2743 	struct mcx_softc *sc = device_private(self);
   2744 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2745 	struct pci_attach_args *pa = aux;
   2746 	uint8_t enaddr[ETHER_ADDR_LEN];
   2747 	int counts[PCI_INTR_TYPE_SIZE];
   2748 	char intrxname[32];
   2749 	pcireg_t memtype;
   2750 	uint32_t r;
   2751 	unsigned int cq_stride;
   2752 	unsigned int cq_size;
   2753 	int i, msix;
   2754 	kcpuset_t *affinity;
   2755 
   2756 	sc->sc_dev = self;
   2757 	sc->sc_pc = pa->pa_pc;
   2758 	sc->sc_tag = pa->pa_tag;
   2759 	if (pci_dma64_available(pa))
   2760 		sc->sc_dmat = pa->pa_dmat64;
   2761 	else
   2762 		sc->sc_dmat = pa->pa_dmat;
   2763 
   2764 	/* Map the PCI memory space */
   2765 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
   2766 	if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
   2767 #ifdef __NetBSD__
   2768 	    0,
   2769 #else
   2770 	    BUS_SPACE_MAP_PREFETCHABLE,
   2771 #endif
   2772 	    &sc->sc_memt, &sc->sc_memh,
   2773 	    NULL, &sc->sc_mems)) {
   2774 		aprint_error(": unable to map register memory\n");
   2775 		return;
   2776 	}
   2777 
   2778 	pci_aprint_devinfo(pa, "Ethernet controller");
   2779 
   2780 	mutex_init(&sc->sc_media_mutex, MUTEX_DEFAULT, IPL_SOFTNET);
   2781 
   2782 	if (mcx_version(sc) != 0) {
   2783 		/* error printed by mcx_version */
   2784 		goto unmap;
   2785 	}
   2786 
   2787 	r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
   2788 	cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
   2789 	cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
   2790 	if (cq_size > MCX_MAX_CQE) {
   2791 		aprint_error_dev(self,
   2792 		    "command queue size overflow %u\n", cq_size);
   2793 		goto unmap;
   2794 	}
   2795 	if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
   2796 		aprint_error_dev(self,
   2797 		    "command queue entry size underflow %u\n", cq_stride);
   2798 		goto unmap;
   2799 	}
   2800 	if (cq_stride * cq_size > MCX_PAGE_SIZE) {
   2801 		aprint_error_dev(self, "command queue page overflow\n");
   2802 		goto unmap;
   2803 	}
   2804 
   2805 	if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_DOORBELL_AREA_SIZE,
   2806 	    MCX_PAGE_SIZE) != 0) {
   2807 		aprint_error_dev(self, "unable to allocate doorbell memory\n");
   2808 		goto unmap;
   2809 	}
   2810 
   2811 	if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
   2812 	    MCX_PAGE_SIZE) != 0) {
   2813 		aprint_error_dev(self, "unable to allocate command queue\n");
   2814 		goto dbfree;
   2815 	}
   2816 
   2817 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
   2818 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t),
   2819 	    BUS_SPACE_BARRIER_WRITE);
   2820 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
   2821 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t),
   2822 	    BUS_SPACE_BARRIER_WRITE);
   2823 
   2824 	if (mcx_init_wait(sc) != 0) {
   2825 		aprint_error_dev(self, "timeout waiting for init\n");
   2826 		goto cqfree;
   2827 	}
   2828 
   2829 	sc->sc_cmdq_mask = cq_size - 1;
   2830 	sc->sc_cmdq_size = cq_stride;
   2831 
   2832 	if (mcx_enable_hca(sc) != 0) {
   2833 		/* error printed by mcx_enable_hca */
   2834 		goto cqfree;
   2835 	}
   2836 
   2837 	if (mcx_issi(sc) != 0) {
   2838 		/* error printed by mcx_issi */
   2839 		goto teardown;
   2840 	}
   2841 
   2842 	if (mcx_pages(sc, &sc->sc_boot_pages,
   2843 	    htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
   2844 		/* error printed by mcx_pages */
   2845 		goto teardown;
   2846 	}
   2847 
   2848 	if (mcx_hca_max_caps(sc) != 0) {
   2849 		/* error printed by mcx_hca_max_caps */
   2850 		goto teardown;
   2851 	}
   2852 
   2853 	if (mcx_hca_set_caps(sc) != 0) {
   2854 		/* error printed by mcx_hca_set_caps */
   2855 		goto teardown;
   2856 	}
   2857 
   2858 	if (mcx_pages(sc, &sc->sc_init_pages,
   2859 	    htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
   2860 		/* error printed by mcx_pages */
   2861 		goto teardown;
   2862 	}
   2863 
   2864 	if (mcx_init_hca(sc) != 0) {
   2865 		/* error printed by mcx_init_hca */
   2866 		goto teardown;
   2867 	}
   2868 
   2869 	if (mcx_pages(sc, &sc->sc_regular_pages,
   2870 	    htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
   2871 		/* error printed by mcx_pages */
   2872 		goto teardown;
   2873 	}
   2874 
   2875 	/* apparently not necessary? */
   2876 	if (mcx_set_driver_version(sc) != 0) {
   2877 		/* error printed by mcx_set_driver_version */
   2878 		goto teardown;
   2879 	}
   2880 
   2881 	if (mcx_iff(sc) != 0) {	/* modify nic vport context */
   2882 		/* error printed by mcx_iff? */
   2883 		goto teardown;
   2884 	}
   2885 
   2886 	if (mcx_alloc_uar(sc, &sc->sc_uar) != 0) {
   2887 		/* error printed by mcx_alloc_uar */
   2888 		goto teardown;
   2889 	}
   2890 
   2891 	if (mcx_alloc_pd(sc) != 0) {
   2892 		/* error printed by mcx_alloc_pd */
   2893 		goto teardown;
   2894 	}
   2895 
   2896 	if (mcx_alloc_tdomain(sc) != 0) {
   2897 		/* error printed by mcx_alloc_tdomain */
   2898 		goto teardown;
   2899 	}
   2900 
   2901 	/*
   2902 	 * PRM makes no mention of msi interrupts, just legacy and msi-x.
   2903 	 * mellanox support tells me legacy interrupts are not supported,
   2904 	 * so we're stuck with just msi-x.
   2905 	 */
   2906 	counts[PCI_INTR_TYPE_MSIX] = -1;
   2907 	counts[PCI_INTR_TYPE_MSI] = 0;
   2908 	counts[PCI_INTR_TYPE_INTX] = 0;
   2909 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) {
   2910 		aprint_error_dev(self, "unable to allocate interrupt\n");
   2911 		goto teardown;
   2912 	}
   2913 	if (counts[PCI_INTR_TYPE_MSIX] < 2) {
   2914 		aprint_error_dev(self, "not enough MSI-X vectors\n");
   2915 		goto teardown;
   2916 	}
   2917 	KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX);
   2918 	snprintf(intrxname, sizeof(intrxname), "%s adminq", DEVNAME(sc));
   2919 	sc->sc_ihc = mcx_establish_intr(sc, 0, NULL, mcx_admin_intr, sc,
   2920 	    intrxname);
   2921 	if (sc->sc_ihc == NULL) {
   2922 		aprint_error_dev(self, "couldn't establish adminq interrupt\n");
   2923 		goto teardown;
   2924 	}
   2925 
   2926 	if (mcx_create_eq(sc, &sc->sc_admin_eq, sc->sc_uar,
   2927 	    (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
   2928 	    (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
   2929 	    (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
   2930 	    (1ull << MCX_EVENT_TYPE_PAGE_REQUEST), 0) != 0) {
   2931 		/* error printed by mcx_create_eq */
   2932 		goto teardown;
   2933 	}
   2934 
   2935 	if (mcx_query_nic_vport_context(sc, enaddr) != 0) {
   2936 		/* error printed by mcx_query_nic_vport_context */
   2937 		goto teardown;
   2938 	}
   2939 
   2940 	if (mcx_query_special_contexts(sc) != 0) {
   2941 		/* error printed by mcx_query_special_contexts */
   2942 		goto teardown;
   2943 	}
   2944 
   2945 	if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
   2946 		/* error printed by mcx_set_port_mtu */
   2947 		goto teardown;
   2948 	}
   2949 
   2950 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2951 	    ether_sprintf(enaddr));
   2952 
   2953 	msix = counts[PCI_INTR_TYPE_MSIX];
   2954 	msix--; /* admin ops took one */
   2955 
   2956 	sc->sc_nqueues = uimin(MCX_MAX_QUEUES, msix);
   2957 	sc->sc_nqueues = uimin(sc->sc_nqueues, ncpu);
   2958 	sc->sc_queues = kmem_zalloc(sc->sc_nqueues * sizeof(*sc->sc_queues),
   2959 	    KM_SLEEP);
   2960 
   2961 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
   2962 	ifp->if_softc = sc;
   2963 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
   2964 #ifdef MCX_MPSAFE
   2965 	ifp->if_extflags = IFEF_MPSAFE;
   2966 #endif
   2967 	ifp->if_init = mcx_init;
   2968 	ifp->if_stop = mcx_stop;
   2969 	ifp->if_ioctl = mcx_ioctl;
   2970 	ifp->if_start = mcx_start;
   2971 	if (sc->sc_nqueues > 1) {
   2972 		ifp->if_transmit = mcx_transmit;
   2973 	}
   2974 	ifp->if_watchdog = mcx_watchdog;
   2975 	ifp->if_mtu = sc->sc_hardmtu;
   2976 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx |
   2977 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx |
   2978 	    IFCAP_CSUM_UDPv6_Rx | IFCAP_CSUM_UDPv6_Tx |
   2979 	    IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx |
   2980 	    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_TCPv6_Tx;
   2981 	IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
   2982 	IFQ_SET_READY(&ifp->if_snd);
   2983 
   2984 	sc->sc_ec.ec_capabilities = ETHERCAP_JUMBO_MTU |
   2985 	    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2986 	sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2987 
   2988 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
   2989 	ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, mcx_media_change,
   2990 	    mcx_media_status, &sc->sc_media_mutex);
   2991 	mcx_media_add_types(sc);
   2992 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
   2993 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
   2994 
   2995 	if_attach(ifp);
   2996 	if_deferred_start_init(ifp, NULL);
   2997 
   2998 	ether_ifattach(ifp, enaddr);
   2999 
   3000 	kcpuset_create(&affinity, false);
   3001 	kcpuset_set(affinity, 0);
   3002 
   3003 	for (i = 0; i < sc->sc_nqueues; i++) {
   3004 		struct mcx_queues *q = &sc->sc_queues[i];
   3005 		struct mcx_rx *rx = &q->q_rx;
   3006 		struct mcx_tx *tx = &q->q_tx;
   3007 		int vec;
   3008 
   3009 		vec = i + 1;
   3010 		q->q_sc = sc;
   3011 		q->q_index = i;
   3012 
   3013 		if (mcx_alloc_uar(sc, &q->q_uar) != 0) {
   3014 			aprint_error_dev(self, "unable to alloc uar %d\n", i);
   3015 			goto teardown;
   3016 		}
   3017 
   3018 		if (mcx_create_eq(sc, &q->q_eq, q->q_uar, 0, vec) != 0) {
   3019 			aprint_error_dev(self,
   3020 			    "unable to create event queue %d\n", i);
   3021 			goto teardown;
   3022 		}
   3023 
   3024 		rx->rx_softc = sc;
   3025 		callout_init(&rx->rx_refill, CALLOUT_FLAGS);
   3026 		callout_setfunc(&rx->rx_refill, mcx_refill, rx);
   3027 
   3028 		tx->tx_softc = sc;
   3029 		mutex_init(&tx->tx_lock, MUTEX_DEFAULT, IPL_NET);
   3030 		tx->tx_pcq = pcq_create(MCX_TXQ_NUM, KM_SLEEP);
   3031 		tx->tx_softint = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
   3032 		    mcx_deferred_transmit, tx);
   3033 
   3034 		snprintf(intrxname, sizeof(intrxname), "%s queue %d",
   3035 		    DEVNAME(sc), i);
   3036 		q->q_ihc = mcx_establish_intr(sc, vec, affinity, mcx_cq_intr,
   3037 		    q, intrxname);
   3038 	}
   3039 
   3040 	callout_init(&sc->sc_calibrate, CALLOUT_FLAGS);
   3041 	callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc);
   3042 
   3043 	if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc,
   3044 	    PRI_NONE, IPL_NET, 0) != 0) {
   3045 		aprint_error_dev(self, "couldn't create port change workq\n");
   3046 		goto teardown;
   3047 	}
   3048 
   3049 	mcx_port_change(&sc->sc_port_change, sc);
   3050 
   3051 	sc->sc_mac_flow_table_id = -1;
   3052 	sc->sc_rss_flow_table_id = -1;
   3053 	sc->sc_rqt = -1;
   3054 	for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
   3055 		struct mcx_flow_group *mfg = &sc->sc_flow_group[i];
   3056 		mfg->g_id = -1;
   3057 		mfg->g_table = -1;
   3058 		mfg->g_size = 0;
   3059 		mfg->g_start = 0;
   3060 	}
   3061 	sc->sc_extra_mcast = 0;
   3062 	memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
   3063 
   3064 #if NKSTAT > 0
   3065 	mcx_kstat_attach(sc);
   3066 #endif
   3067 	mcx_timecounter_attach(sc);
   3068 	return;
   3069 
   3070 teardown:
   3071 	mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
   3072 	/* error printed by mcx_teardown_hca, and we're already unwinding */
   3073 cqfree:
   3074 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
   3075 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
   3076 	    BUS_SPACE_BARRIER_WRITE);
   3077 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
   3078 	    MCX_CMDQ_INTERFACE_DISABLED);
   3079 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t),
   3080 	    BUS_SPACE_BARRIER_WRITE);
   3081 
   3082 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
   3083 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
   3084 	    BUS_SPACE_BARRIER_WRITE);
   3085 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
   3086 
   3087 	mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
   3088 dbfree:
   3089 	mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
   3090 unmap:
   3091 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
   3092 	sc->sc_mems = 0;
   3093 }
   3094 
   3095 static void *
   3096 mcx_establish_intr(struct mcx_softc *sc, int index, kcpuset_t *affinity,
   3097     int (*func)(void *), void *arg, const char *xname)
   3098 {
   3099 	char intrbuf[PCI_INTRSTR_LEN];
   3100 	const char *intrstr;
   3101 	void *ih;
   3102 
   3103 	pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[index], PCI_INTR_MPSAFE,
   3104 	    true);
   3105 
   3106 	intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[index], intrbuf,
   3107 	    sizeof(intrbuf));
   3108 	ih = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[index], IPL_NET,
   3109 	    func, arg, xname);
   3110 	if (ih == NULL) {
   3111 		aprint_error_dev(sc->sc_dev,
   3112 		    "unable to establish interrupt%s%s\n",
   3113 		    intrstr ? " at " : "",
   3114 		    intrstr ? intrstr : "");
   3115 		return NULL;
   3116 	}
   3117 
   3118 	if (affinity != NULL && index > 0) {
   3119 		/* Round-robin affinity */
   3120 		kcpuset_zero(affinity);
   3121 		kcpuset_set(affinity, (index - 1) % ncpu);
   3122 		interrupt_distribute(ih, affinity, NULL);
   3123 	}
   3124 
   3125 	return ih;
   3126 }
   3127 
   3128 static void
   3129 mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm)
   3130 {
   3131 	rxr->rxr_total = hwm;
   3132 	rxr->rxr_inuse = 0;
   3133 }
   3134 
   3135 static u_int
   3136 mcx_rxr_get(struct mcx_rxring *rxr, u_int max)
   3137 {
   3138 	const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse);
   3139 
   3140 	rxr->rxr_inuse += taken;
   3141 
   3142 	return taken;
   3143 }
   3144 
   3145 static void
   3146 mcx_rxr_put(struct mcx_rxring *rxr, u_int n)
   3147 {
   3148 	rxr->rxr_inuse -= n;
   3149 }
   3150 
   3151 static u_int
   3152 mcx_rxr_inuse(struct mcx_rxring *rxr)
   3153 {
   3154 	return rxr->rxr_inuse;
   3155 }
   3156 
   3157 static int
   3158 mcx_version(struct mcx_softc *sc)
   3159 {
   3160 	uint32_t fw0, fw1;
   3161 	uint16_t cmdif;
   3162 
   3163 	fw0 = mcx_rd(sc, MCX_FW_VER);
   3164 	fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
   3165 
   3166 	aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0),
   3167 	    MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
   3168 
   3169 	cmdif = MCX_CMDIF(fw1);
   3170 	if (cmdif != MCX_CMD_IF_SUPPORTED) {
   3171 		aprint_error_dev(sc->sc_dev,
   3172 		    "unsupported command interface %u\n", cmdif);
   3173 		return (-1);
   3174 	}
   3175 
   3176 	return (0);
   3177 }
   3178 
   3179 static int
   3180 mcx_init_wait(struct mcx_softc *sc)
   3181 {
   3182 	unsigned int i;
   3183 	uint32_t r;
   3184 
   3185 	for (i = 0; i < 2000; i++) {
   3186 		r = mcx_rd(sc, MCX_STATE);
   3187 		if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
   3188 			return (0);
   3189 
   3190 		delay(1000);
   3191 		mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
   3192 		    BUS_SPACE_BARRIER_READ);
   3193 	}
   3194 
   3195 	return (-1);
   3196 }
   3197 
   3198 static uint8_t
   3199 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
   3200     unsigned int msec)
   3201 {
   3202 	unsigned int i;
   3203 
   3204 	for (i = 0; i < msec; i++) {
   3205 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
   3206 		    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
   3207 
   3208 		if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
   3209 		    MCX_CQ_STATUS_OWN_SW)
   3210 			return (0);
   3211 
   3212 		delay(1000);
   3213 	}
   3214 
   3215 	return (ETIMEDOUT);
   3216 }
   3217 
   3218 static uint32_t
   3219 mcx_mix_u64(uint32_t xor, uint64_t u64)
   3220 {
   3221 	xor ^= u64 >> 32;
   3222 	xor ^= u64;
   3223 
   3224 	return (xor);
   3225 }
   3226 
   3227 static uint32_t
   3228 mcx_mix_u32(uint32_t xor, uint32_t u32)
   3229 {
   3230 	xor ^= u32;
   3231 
   3232 	return (xor);
   3233 }
   3234 
   3235 static uint32_t
   3236 mcx_mix_u8(uint32_t xor, uint8_t u8)
   3237 {
   3238 	xor ^= u8;
   3239 
   3240 	return (xor);
   3241 }
   3242 
   3243 static uint8_t
   3244 mcx_mix_done(uint32_t xor)
   3245 {
   3246 	xor ^= xor >> 16;
   3247 	xor ^= xor >> 8;
   3248 
   3249 	return (xor);
   3250 }
   3251 
   3252 static uint8_t
   3253 mcx_xor(const void *buf, size_t len)
   3254 {
   3255 	const uint32_t *dwords = buf;
   3256 	uint32_t xor = 0xff;
   3257 	size_t i;
   3258 
   3259 	len /= sizeof(*dwords);
   3260 
   3261 	for (i = 0; i < len; i++)
   3262 		xor ^= dwords[i];
   3263 
   3264 	return (mcx_mix_done(xor));
   3265 }
   3266 
   3267 static uint8_t
   3268 mcx_cmdq_token(struct mcx_softc *sc)
   3269 {
   3270 	uint8_t token;
   3271 
   3272 	do {
   3273 		token = ++sc->sc_cmdq_token;
   3274 	} while (token == 0);
   3275 
   3276 	return (token);
   3277 }
   3278 
   3279 static void
   3280 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
   3281     uint32_t ilen, uint32_t olen, uint8_t token)
   3282 {
   3283 	memset(cqe, 0, sc->sc_cmdq_size);
   3284 
   3285 	cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
   3286 	be32enc(&cqe->cq_input_length, ilen);
   3287 	be32enc(&cqe->cq_output_length, olen);
   3288 	cqe->cq_token = token;
   3289 	cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
   3290 }
   3291 
   3292 static void
   3293 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
   3294 {
   3295 	cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
   3296 }
   3297 
   3298 static int
   3299 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
   3300 {
   3301 	/* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 :  0); */
   3302 	return (0);
   3303 }
   3304 
   3305 static void *
   3306 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
   3307 {
   3308 	return (&cqe->cq_input_data);
   3309 }
   3310 
   3311 static void *
   3312 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
   3313 {
   3314 	return (&cqe->cq_output_data);
   3315 }
   3316 
   3317 static void
   3318 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
   3319     unsigned int slot)
   3320 {
   3321 	mcx_cmdq_sign(cqe);
   3322 
   3323 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
   3324 	    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
   3325 
   3326 	mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
   3327 	mcx_bar(sc, MCX_CMDQ_DOORBELL, sizeof(uint32_t),
   3328 	    BUS_SPACE_BARRIER_WRITE);
   3329 }
   3330 
   3331 static int
   3332 mcx_enable_hca(struct mcx_softc *sc)
   3333 {
   3334 	struct mcx_cmdq_entry *cqe;
   3335 	struct mcx_cmd_enable_hca_in *in;
   3336 	struct mcx_cmd_enable_hca_out *out;
   3337 	int error;
   3338 	uint8_t status;
   3339 
   3340 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   3341 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
   3342 
   3343 	in = mcx_cmdq_in(cqe);
   3344 	in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
   3345 	in->cmd_op_mod = htobe16(0);
   3346 	in->cmd_function_id = htobe16(0);
   3347 
   3348 	mcx_cmdq_post(sc, cqe, 0);
   3349 
   3350 	error = mcx_cmdq_poll(sc, cqe, 1000);
   3351 	if (error != 0) {
   3352 		printf(", hca enable timeout\n");
   3353 		return (-1);
   3354 	}
   3355 	if (mcx_cmdq_verify(cqe) != 0) {
   3356 		printf(", hca enable command corrupt\n");
   3357 		return (-1);
   3358 	}
   3359 
   3360 	status = cqe->cq_output_data[0];
   3361 	if (status != MCX_CQ_STATUS_OK) {
   3362 		printf(", hca enable failed (%x)\n", status);
   3363 		return (-1);
   3364 	}
   3365 
   3366 	return (0);
   3367 }
   3368 
   3369 static int
   3370 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
   3371 {
   3372 	struct mcx_cmdq_entry *cqe;
   3373 	struct mcx_cmd_teardown_hca_in *in;
   3374 	struct mcx_cmd_teardown_hca_out *out;
   3375 	int error;
   3376 	uint8_t status;
   3377 
   3378 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   3379 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
   3380 
   3381 	in = mcx_cmdq_in(cqe);
   3382 	in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
   3383 	in->cmd_op_mod = htobe16(0);
   3384 	in->cmd_profile = profile;
   3385 
   3386 	mcx_cmdq_post(sc, cqe, 0);
   3387 
   3388 	error = mcx_cmdq_poll(sc, cqe, 1000);
   3389 	if (error != 0) {
   3390 		printf(", hca teardown timeout\n");
   3391 		return (-1);
   3392 	}
   3393 	if (mcx_cmdq_verify(cqe) != 0) {
   3394 		printf(", hca teardown command corrupt\n");
   3395 		return (-1);
   3396 	}
   3397 
   3398 	status = cqe->cq_output_data[0];
   3399 	if (status != MCX_CQ_STATUS_OK) {
   3400 		printf(", hca teardown failed (%x)\n", status);
   3401 		return (-1);
   3402 	}
   3403 
   3404 	return (0);
   3405 }
   3406 
   3407 static int
   3408 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
   3409     unsigned int nmb, uint64_t *ptr, uint8_t token)
   3410 {
   3411 	uint8_t *kva;
   3412 	uint64_t dva;
   3413 	int i;
   3414 	int error;
   3415 
   3416 	error = mcx_dmamem_alloc(sc, mxm,
   3417 	    nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
   3418 	if (error != 0)
   3419 		return (error);
   3420 
   3421 	mcx_dmamem_zero(mxm);
   3422 
   3423 	dva = MCX_DMA_DVA(mxm);
   3424 	kva = MCX_DMA_KVA(mxm);
   3425 	for (i = 0; i < nmb; i++) {
   3426 		struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
   3427 
   3428 		/* patch the cqe or mbox pointing at this one */
   3429 		be64enc(ptr, dva);
   3430 
   3431 		/* fill in this mbox */
   3432 		be32enc(&mbox->mb_block_number, i);
   3433 		mbox->mb_token = token;
   3434 
   3435 		/* move to the next one */
   3436 		ptr = &mbox->mb_next_ptr;
   3437 
   3438 		dva += MCX_CMDQ_MAILBOX_SIZE;
   3439 		kva += MCX_CMDQ_MAILBOX_SIZE;
   3440 	}
   3441 
   3442 	return (0);
   3443 }
   3444 
   3445 static uint32_t
   3446 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
   3447 {
   3448 	uint32_t xor = 0xff;
   3449 
   3450 	/* only 3 fields get set, so mix them directly */
   3451 	xor = mcx_mix_u64(xor, mb->mb_next_ptr);
   3452 	xor = mcx_mix_u32(xor, mb->mb_block_number);
   3453 	xor = mcx_mix_u8(xor, mb->mb_token);
   3454 
   3455 	return (mcx_mix_done(xor));
   3456 }
   3457 
   3458 static void
   3459 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
   3460 {
   3461 	uint8_t *kva;
   3462 	int i;
   3463 
   3464 	kva = MCX_DMA_KVA(mxm);
   3465 
   3466 	for (i = 0; i < nmb; i++) {
   3467 		struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
   3468 		uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
   3469 		mb->mb_ctrl_signature = sig;
   3470 		mb->mb_signature = sig ^
   3471 		    mcx_xor(mb->mb_data, sizeof(mb->mb_data));
   3472 
   3473 		kva += MCX_CMDQ_MAILBOX_SIZE;
   3474 	}
   3475 }
   3476 
   3477 static void
   3478 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
   3479 {
   3480 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
   3481 	    0, MCX_DMA_LEN(mxm), ops);
   3482 }
   3483 
   3484 static struct mcx_cmdq_mailbox *
   3485 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
   3486 {
   3487 	uint8_t *kva;
   3488 
   3489 	kva = MCX_DMA_KVA(mxm);
   3490 	kva += i * MCX_CMDQ_MAILBOX_SIZE;
   3491 
   3492 	return ((struct mcx_cmdq_mailbox *)kva);
   3493 }
   3494 
   3495 static inline void *
   3496 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
   3497 {
   3498 	return (&mb->mb_data);
   3499 }
   3500 
   3501 static void
   3502 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
   3503     void *b, size_t len)
   3504 {
   3505 	uint8_t *buf = b;
   3506 	struct mcx_cmdq_mailbox *mb;
   3507 	int i;
   3508 
   3509 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
   3510 	for (i = 0; i < nmb; i++) {
   3511 
   3512 		memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len));
   3513 
   3514 		if (sizeof(mb->mb_data) >= len)
   3515 			break;
   3516 
   3517 		buf += sizeof(mb->mb_data);
   3518 		len -= sizeof(mb->mb_data);
   3519 		mb++;
   3520 	}
   3521 }
   3522 
   3523 static void
   3524 mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages,
   3525     struct mcx_dmamem *buf)
   3526 {
   3527 	uint64_t *pas;
   3528 	int mbox, mbox_pages, i;
   3529 
   3530 	mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE;
   3531 	offset %= MCX_CMDQ_MAILBOX_DATASIZE;
   3532 
   3533 	pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
   3534 	pas += (offset / sizeof(*pas));
   3535 	mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE - offset) / sizeof(*pas);
   3536 	for (i = 0; i < npages; i++) {
   3537 		if (i == mbox_pages) {
   3538 			mbox++;
   3539 			pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
   3540 			mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas);
   3541 		}
   3542 		*pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE));
   3543 		pas++;
   3544 	}
   3545 }
   3546 
   3547 static void
   3548 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
   3549 {
   3550 	uint8_t *buf = b;
   3551 	struct mcx_cmdq_mailbox *mb;
   3552 	int i;
   3553 
   3554 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
   3555 	for (i = 0; i < nmb; i++) {
   3556 		memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len));
   3557 
   3558 		if (sizeof(mb->mb_data) >= len)
   3559 			break;
   3560 
   3561 		buf += sizeof(mb->mb_data);
   3562 		len -= sizeof(mb->mb_data);
   3563 		mb++;
   3564 	}
   3565 }
   3566 
   3567 static void
   3568 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
   3569 {
   3570 	mcx_dmamem_free(sc, mxm);
   3571 }
   3572 
   3573 #if 0
   3574 static void
   3575 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
   3576 {
   3577 	unsigned int i;
   3578 
   3579 	printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
   3580 	    be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr));
   3581 
   3582 	printf(", idata ");
   3583 	for (i = 0; i < sizeof(cqe->cq_input_data); i++)
   3584 		printf("%02x", cqe->cq_input_data[i]);
   3585 
   3586 	printf(", odata ");
   3587 	for (i = 0; i < sizeof(cqe->cq_output_data); i++)
   3588 		printf("%02x", cqe->cq_output_data[i]);
   3589 
   3590 	printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
   3591 	    be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length),
   3592 	    cqe->cq_token, cqe->cq_signature, cqe->cq_status);
   3593 }
   3594 
   3595 static void
   3596 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
   3597 {
   3598 	int i, j;
   3599 	uint8_t *d;
   3600 
   3601 	for (i = 0; i < num; i++) {
   3602 		struct mcx_cmdq_mailbox *mbox;
   3603 		mbox = mcx_cq_mbox(mboxes, i);
   3604 
   3605 		d = mcx_cq_mbox_data(mbox);
   3606 		for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
   3607 			if (j != 0 && (j % 16 == 0))
   3608 				printf("\n");
   3609 			printf("%.2x ", d[j]);
   3610 		}
   3611 	}
   3612 }
   3613 #endif
   3614 
   3615 static int
   3616 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
   3617     int len)
   3618 {
   3619 	struct mcx_dmamem mxm;
   3620 	struct mcx_cmdq_entry *cqe;
   3621 	struct mcx_cmd_access_reg_in *in;
   3622 	struct mcx_cmd_access_reg_out *out;
   3623 	uint8_t token = mcx_cmdq_token(sc);
   3624 	int error, nmb;
   3625 
   3626 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   3627 	mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
   3628 	    token);
   3629 
   3630 	in = mcx_cmdq_in(cqe);
   3631 	in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
   3632 	in->cmd_op_mod = htobe16(op);
   3633 	in->cmd_register_id = htobe16(reg);
   3634 
   3635 	nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
   3636 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
   3637 	    &cqe->cq_output_ptr, token) != 0) {
   3638 		printf(", unable to allocate access reg mailboxen\n");
   3639 		return (-1);
   3640 	}
   3641 	cqe->cq_input_ptr = cqe->cq_output_ptr;
   3642 	mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
   3643 	mcx_cmdq_mboxes_sign(&mxm, nmb);
   3644 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
   3645 
   3646 	mcx_cmdq_post(sc, cqe, 0);
   3647 	error = mcx_cmdq_poll(sc, cqe, 1000);
   3648 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
   3649 
   3650 	if (error != 0) {
   3651 		printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
   3652 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
   3653 		goto free;
   3654 	}
   3655 	error = mcx_cmdq_verify(cqe);
   3656 	if (error != 0) {
   3657 		printf("%s: access reg (%s %x) reply corrupt\n",
   3658 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
   3659 		    reg);
   3660 		goto free;
   3661 	}
   3662 
   3663 	out = mcx_cmdq_out(cqe);
   3664 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   3665 		printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
   3666 		    DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
   3667 		    reg, out->cmd_status, be32toh(out->cmd_syndrome));
   3668 		error = -1;
   3669 		goto free;
   3670 	}
   3671 
   3672 	mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
   3673 free:
   3674 	mcx_dmamem_free(sc, &mxm);
   3675 
   3676 	return (error);
   3677 }
   3678 
   3679 static int
   3680 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
   3681     unsigned int slot)
   3682 {
   3683 	struct mcx_cmd_set_issi_in *in;
   3684 	struct mcx_cmd_set_issi_out *out;
   3685 	uint8_t status;
   3686 
   3687 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
   3688 
   3689 	in = mcx_cmdq_in(cqe);
   3690 	in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
   3691 	in->cmd_op_mod = htobe16(0);
   3692 	in->cmd_current_issi = htobe16(MCX_ISSI);
   3693 
   3694 	mcx_cmdq_post(sc, cqe, slot);
   3695 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
   3696 		return (-1);
   3697 	if (mcx_cmdq_verify(cqe) != 0)
   3698 		return (-1);
   3699 
   3700 	status = cqe->cq_output_data[0];
   3701 	if (status != MCX_CQ_STATUS_OK)
   3702 		return (-1);
   3703 
   3704 	return (0);
   3705 }
   3706 
   3707 static int
   3708 mcx_issi(struct mcx_softc *sc)
   3709 {
   3710 	struct mcx_dmamem mxm;
   3711 	struct mcx_cmdq_entry *cqe;
   3712 	struct mcx_cmd_query_issi_in *in;
   3713 	struct mcx_cmd_query_issi_il_out *out;
   3714 	struct mcx_cmd_query_issi_mb_out *mb;
   3715 	uint8_t token = mcx_cmdq_token(sc);
   3716 	uint8_t status;
   3717 	int error;
   3718 
   3719 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   3720 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
   3721 
   3722 	in = mcx_cmdq_in(cqe);
   3723 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
   3724 	in->cmd_op_mod = htobe16(0);
   3725 
   3726 	CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
   3727 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   3728 	    &cqe->cq_output_ptr, token) != 0) {
   3729 		printf(", unable to allocate query issi mailbox\n");
   3730 		return (-1);
   3731 	}
   3732 	mcx_cmdq_mboxes_sign(&mxm, 1);
   3733 
   3734 	mcx_cmdq_post(sc, cqe, 0);
   3735 	error = mcx_cmdq_poll(sc, cqe, 1000);
   3736 	if (error != 0) {
   3737 		printf(", query issi timeout\n");
   3738 		goto free;
   3739 	}
   3740 	error = mcx_cmdq_verify(cqe);
   3741 	if (error != 0) {
   3742 		printf(", query issi reply corrupt\n");
   3743 		goto free;
   3744 	}
   3745 
   3746 	status = cqe->cq_output_data[0];
   3747 	switch (status) {
   3748 	case MCX_CQ_STATUS_OK:
   3749 		break;
   3750 	case MCX_CQ_STATUS_BAD_OPCODE:
   3751 		/* use ISSI 0 */
   3752 		goto free;
   3753 	default:
   3754 		printf(", query issi failed (%x)\n", status);
   3755 		error = -1;
   3756 		goto free;
   3757 	}
   3758 
   3759 	out = mcx_cmdq_out(cqe);
   3760 	if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
   3761 		/* use ISSI 1 */
   3762 		goto free;
   3763 	}
   3764 
   3765 	/* don't need to read cqe anymore, can be used for SET ISSI */
   3766 
   3767 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   3768 	CTASSERT(MCX_ISSI < NBBY);
   3769 	 /* XXX math is hard */
   3770 	if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
   3771 		/* use ISSI 0 */
   3772 		goto free;
   3773 	}
   3774 
   3775 	if (mcx_set_issi(sc, cqe, 0) != 0) {
   3776 		/* ignore the error, just use ISSI 0 */
   3777 	} else {
   3778 		/* use ISSI 1 */
   3779 	}
   3780 
   3781 free:
   3782 	mcx_cq_mboxes_free(sc, &mxm);
   3783 	return (error);
   3784 }
   3785 
   3786 static int
   3787 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
   3788     int32_t *npages, uint16_t *func_id)
   3789 {
   3790 	struct mcx_cmdq_entry *cqe;
   3791 	struct mcx_cmd_query_pages_in *in;
   3792 	struct mcx_cmd_query_pages_out *out;
   3793 
   3794 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   3795 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
   3796 
   3797 	in = mcx_cmdq_in(cqe);
   3798 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
   3799 	in->cmd_op_mod = type;
   3800 
   3801 	mcx_cmdq_post(sc, cqe, 0);
   3802 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
   3803 		printf(", query pages timeout\n");
   3804 		return (-1);
   3805 	}
   3806 	if (mcx_cmdq_verify(cqe) != 0) {
   3807 		printf(", query pages reply corrupt\n");
   3808 		return (-1);
   3809 	}
   3810 
   3811 	out = mcx_cmdq_out(cqe);
   3812 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   3813 		printf(", query pages failed (%x)\n", out->cmd_status);
   3814 		return (-1);
   3815 	}
   3816 
   3817 	*func_id = out->cmd_func_id;
   3818 	*npages = be32dec(&out->cmd_num_pages);
   3819 
   3820 	return (0);
   3821 }
   3822 
   3823 struct bus_dma_iter {
   3824 	bus_dmamap_t		i_map;
   3825 	bus_size_t		i_offset;
   3826 	unsigned int		i_index;
   3827 };
   3828 
   3829 static void
   3830 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
   3831 {
   3832 	i->i_map = map;
   3833 	i->i_offset = 0;
   3834 	i->i_index = 0;
   3835 }
   3836 
   3837 static bus_addr_t
   3838 bus_dma_iter_addr(struct bus_dma_iter *i)
   3839 {
   3840 	return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
   3841 }
   3842 
   3843 static void
   3844 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
   3845 {
   3846 	bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
   3847 	bus_size_t diff;
   3848 
   3849 	do {
   3850 		diff = seg->ds_len - i->i_offset;
   3851 		if (size < diff)
   3852 			break;
   3853 
   3854 		size -= diff;
   3855 
   3856 		seg++;
   3857 
   3858 		i->i_offset = 0;
   3859 		i->i_index++;
   3860 	} while (size > 0);
   3861 
   3862 	i->i_offset += size;
   3863 }
   3864 
   3865 static int
   3866 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
   3867 {
   3868 	struct mcx_dmamem mxm;
   3869 	struct mcx_cmdq_entry *cqe;
   3870 	struct mcx_cmd_manage_pages_in *in;
   3871 	struct mcx_cmd_manage_pages_out *out;
   3872 	unsigned int paslen, nmb, i, j, npages;
   3873 	struct bus_dma_iter iter;
   3874 	uint64_t *pas;
   3875 	uint8_t status;
   3876 	uint8_t token = mcx_cmdq_token(sc);
   3877 	int error;
   3878 
   3879 	npages = mhm->mhm_npages;
   3880 
   3881 	paslen = sizeof(*pas) * npages;
   3882 	nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
   3883 
   3884 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   3885 	mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
   3886 
   3887 	in = mcx_cmdq_in(cqe);
   3888 	in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
   3889 	in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
   3890 	in->cmd_func_id = func_id;
   3891 	be32enc(&in->cmd_input_num_entries, npages);
   3892 
   3893 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
   3894 	    &cqe->cq_input_ptr, token) != 0) {
   3895 		printf(", unable to allocate manage pages mailboxen\n");
   3896 		return (-1);
   3897 	}
   3898 
   3899 	bus_dma_iter_init(&iter, mhm->mhm_map);
   3900 	for (i = 0; i < nmb; i++) {
   3901 		unsigned int lim;
   3902 
   3903 		pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
   3904 		lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
   3905 
   3906 		for (j = 0; j < lim; j++) {
   3907 			be64enc(&pas[j], bus_dma_iter_addr(&iter));
   3908 			bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
   3909 		}
   3910 
   3911 		npages -= lim;
   3912 	}
   3913 
   3914 	mcx_cmdq_mboxes_sign(&mxm, nmb);
   3915 
   3916 	mcx_cmdq_post(sc, cqe, 0);
   3917 	error = mcx_cmdq_poll(sc, cqe, 1000);
   3918 	if (error != 0) {
   3919 		printf(", manage pages timeout\n");
   3920 		goto free;
   3921 	}
   3922 	error = mcx_cmdq_verify(cqe);
   3923 	if (error != 0) {
   3924 		printf(", manage pages reply corrupt\n");
   3925 		goto free;
   3926 	}
   3927 
   3928 	status = cqe->cq_output_data[0];
   3929 	if (status != MCX_CQ_STATUS_OK) {
   3930 		printf(", manage pages failed (%x)\n", status);
   3931 		error = -1;
   3932 		goto free;
   3933 	}
   3934 
   3935 free:
   3936 	mcx_dmamem_free(sc, &mxm);
   3937 
   3938 	return (error);
   3939 }
   3940 
   3941 static int
   3942 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
   3943 {
   3944 	int32_t npages;
   3945 	uint16_t func_id;
   3946 
   3947 	if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
   3948 		/* error printed by mcx_query_pages */
   3949 		return (-1);
   3950 	}
   3951 
   3952 	if (npages < 1)
   3953 		return (0);
   3954 
   3955 	if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
   3956 		printf(", unable to allocate hwmem\n");
   3957 		return (-1);
   3958 	}
   3959 
   3960 	if (mcx_add_pages(sc, mhm, func_id) != 0) {
   3961 		printf(", unable to add hwmem\n");
   3962 		goto free;
   3963 	}
   3964 
   3965 	return (0);
   3966 
   3967 free:
   3968 	mcx_hwmem_free(sc, mhm);
   3969 
   3970 	return (-1);
   3971 }
   3972 
   3973 static int
   3974 mcx_hca_max_caps(struct mcx_softc *sc)
   3975 {
   3976 	struct mcx_dmamem mxm;
   3977 	struct mcx_cmdq_entry *cqe;
   3978 	struct mcx_cmd_query_hca_cap_in *in;
   3979 	struct mcx_cmd_query_hca_cap_out *out;
   3980 	struct mcx_cmdq_mailbox *mb;
   3981 	struct mcx_cap_device *hca;
   3982 	uint8_t status;
   3983 	uint8_t token = mcx_cmdq_token(sc);
   3984 	int error;
   3985 
   3986 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   3987 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
   3988 	    token);
   3989 
   3990 	in = mcx_cmdq_in(cqe);
   3991 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
   3992 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
   3993 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
   3994 
   3995 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
   3996 	    &cqe->cq_output_ptr, token) != 0) {
   3997 		printf(", unable to allocate query hca caps mailboxen\n");
   3998 		return (-1);
   3999 	}
   4000 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
   4001 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
   4002 
   4003 	mcx_cmdq_post(sc, cqe, 0);
   4004 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4005 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
   4006 
   4007 	if (error != 0) {
   4008 		printf(", query hca caps timeout\n");
   4009 		goto free;
   4010 	}
   4011 	error = mcx_cmdq_verify(cqe);
   4012 	if (error != 0) {
   4013 		printf(", query hca caps reply corrupt\n");
   4014 		goto free;
   4015 	}
   4016 
   4017 	status = cqe->cq_output_data[0];
   4018 	if (status != MCX_CQ_STATUS_OK) {
   4019 		printf(", query hca caps failed (%x)\n", status);
   4020 		error = -1;
   4021 		goto free;
   4022 	}
   4023 
   4024 	mb = mcx_cq_mbox(&mxm, 0);
   4025 	hca = mcx_cq_mbox_data(mb);
   4026 
   4027 	if ((hca->port_type & MCX_CAP_DEVICE_PORT_TYPE)
   4028 	    != MCX_CAP_DEVICE_PORT_TYPE_ETH) {
   4029 		printf(", not in ethernet mode\n");
   4030 		error = -1;
   4031 		goto free;
   4032 	}
   4033 	if (hca->log_pg_sz > PAGE_SHIFT) {
   4034 		printf(", minimum system page shift %u is too large\n",
   4035 		    hca->log_pg_sz);
   4036 		error = -1;
   4037 		goto free;
   4038 	}
   4039 	/*
   4040 	 * blueflame register is split into two buffers, and we must alternate
   4041 	 * between the two of them.
   4042 	 */
   4043 	sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
   4044 	sc->sc_max_rqt_size = (1 << hca->log_max_rqt_size);
   4045 
   4046 	if (hca->local_ca_ack_delay & MCX_CAP_DEVICE_MCAM_REG)
   4047 		sc->sc_mcam_reg = 1;
   4048 
   4049 	sc->sc_mhz = be32dec(&hca->device_frequency_mhz);
   4050 	sc->sc_khz = be32dec(&hca->device_frequency_khz);
   4051 
   4052 free:
   4053 	mcx_dmamem_free(sc, &mxm);
   4054 
   4055 	return (error);
   4056 }
   4057 
   4058 static int
   4059 mcx_hca_set_caps(struct mcx_softc *sc)
   4060 {
   4061 	struct mcx_dmamem mxm;
   4062 	struct mcx_cmdq_entry *cqe;
   4063 	struct mcx_cmd_query_hca_cap_in *in;
   4064 	struct mcx_cmd_query_hca_cap_out *out;
   4065 	struct mcx_cmdq_mailbox *mb;
   4066 	struct mcx_cap_device *hca;
   4067 	uint8_t status;
   4068 	uint8_t token = mcx_cmdq_token(sc);
   4069 	int error;
   4070 
   4071 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4072 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
   4073 	    token);
   4074 
   4075 	in = mcx_cmdq_in(cqe);
   4076 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
   4077 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
   4078 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
   4079 
   4080 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
   4081 	    &cqe->cq_output_ptr, token) != 0) {
   4082 		printf(", unable to allocate manage pages mailboxen\n");
   4083 		return (-1);
   4084 	}
   4085 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
   4086 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
   4087 
   4088 	mcx_cmdq_post(sc, cqe, 0);
   4089 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4090 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
   4091 
   4092 	if (error != 0) {
   4093 		printf(", query hca caps timeout\n");
   4094 		goto free;
   4095 	}
   4096 	error = mcx_cmdq_verify(cqe);
   4097 	if (error != 0) {
   4098 		printf(", query hca caps reply corrupt\n");
   4099 		goto free;
   4100 	}
   4101 
   4102 	status = cqe->cq_output_data[0];
   4103 	if (status != MCX_CQ_STATUS_OK) {
   4104 		printf(", query hca caps failed (%x)\n", status);
   4105 		error = -1;
   4106 		goto free;
   4107 	}
   4108 
   4109 	mb = mcx_cq_mbox(&mxm, 0);
   4110 	hca = mcx_cq_mbox_data(mb);
   4111 
   4112 	hca->log_pg_sz = PAGE_SHIFT;
   4113 
   4114 free:
   4115 	mcx_dmamem_free(sc, &mxm);
   4116 
   4117 	return (error);
   4118 }
   4119 
   4120 
   4121 static int
   4122 mcx_init_hca(struct mcx_softc *sc)
   4123 {
   4124 	struct mcx_cmdq_entry *cqe;
   4125 	struct mcx_cmd_init_hca_in *in;
   4126 	struct mcx_cmd_init_hca_out *out;
   4127 	int error;
   4128 	uint8_t status;
   4129 
   4130 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4131 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
   4132 
   4133 	in = mcx_cmdq_in(cqe);
   4134 	in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
   4135 	in->cmd_op_mod = htobe16(0);
   4136 
   4137 	mcx_cmdq_post(sc, cqe, 0);
   4138 
   4139 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4140 	if (error != 0) {
   4141 		printf(", hca init timeout\n");
   4142 		return (-1);
   4143 	}
   4144 	if (mcx_cmdq_verify(cqe) != 0) {
   4145 		printf(", hca init command corrupt\n");
   4146 		return (-1);
   4147 	}
   4148 
   4149 	status = cqe->cq_output_data[0];
   4150 	if (status != MCX_CQ_STATUS_OK) {
   4151 		printf(", hca init failed (%x)\n", status);
   4152 		return (-1);
   4153 	}
   4154 
   4155 	return (0);
   4156 }
   4157 
   4158 static int
   4159 mcx_set_driver_version(struct mcx_softc *sc)
   4160 {
   4161 	struct mcx_dmamem mxm;
   4162 	struct mcx_cmdq_entry *cqe;
   4163 	struct mcx_cmd_set_driver_version_in *in;
   4164 	struct mcx_cmd_set_driver_version_out *out;
   4165 	int error;
   4166 	int token;
   4167 	uint8_t status;
   4168 
   4169 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4170 	token = mcx_cmdq_token(sc);
   4171 	mcx_cmdq_init(sc, cqe, sizeof(*in) +
   4172 	    sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
   4173 
   4174 	in = mcx_cmdq_in(cqe);
   4175 	in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
   4176 	in->cmd_op_mod = htobe16(0);
   4177 
   4178 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   4179 	    &cqe->cq_input_ptr, token) != 0) {
   4180 		printf(", unable to allocate set driver version mailboxen\n");
   4181 		return (-1);
   4182 	}
   4183 	strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
   4184 	    "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
   4185 
   4186 	mcx_cmdq_mboxes_sign(&mxm, 1);
   4187 	mcx_cmdq_post(sc, cqe, 0);
   4188 
   4189 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4190 	if (error != 0) {
   4191 		printf(", set driver version timeout\n");
   4192 		goto free;
   4193 	}
   4194 	if (mcx_cmdq_verify(cqe) != 0) {
   4195 		printf(", set driver version command corrupt\n");
   4196 		goto free;
   4197 	}
   4198 
   4199 	status = cqe->cq_output_data[0];
   4200 	if (status != MCX_CQ_STATUS_OK) {
   4201 		printf(", set driver version failed (%x)\n", status);
   4202 		error = -1;
   4203 		goto free;
   4204 	}
   4205 
   4206 free:
   4207 	mcx_dmamem_free(sc, &mxm);
   4208 
   4209 	return (error);
   4210 }
   4211 
   4212 static int
   4213 mcx_iff(struct mcx_softc *sc)
   4214 {
   4215 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   4216 	struct mcx_dmamem mxm;
   4217 	struct mcx_cmdq_entry *cqe;
   4218 	struct mcx_cmd_modify_nic_vport_context_in *in;
   4219 	struct mcx_cmd_modify_nic_vport_context_out *out;
   4220 	struct mcx_nic_vport_ctx *ctx;
   4221 	int error;
   4222 	int token;
   4223 	int insize;
   4224 	uint32_t dest;
   4225 
   4226 	dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
   4227 	    sc->sc_rss_flow_table_id;
   4228 
   4229 	/* enable or disable the promisc flow */
   4230 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
   4231 		if (sc->sc_promisc_flow_enabled == 0) {
   4232 			mcx_set_flow_table_entry_mac(sc,
   4233 			    MCX_FLOW_GROUP_PROMISC, 0, NULL, dest);
   4234 			sc->sc_promisc_flow_enabled = 1;
   4235 		}
   4236 	} else if (sc->sc_promisc_flow_enabled != 0) {
   4237 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
   4238 		sc->sc_promisc_flow_enabled = 0;
   4239 	}
   4240 
   4241 	/* enable or disable the all-multicast flow */
   4242 	if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
   4243 		if (sc->sc_allmulti_flow_enabled == 0) {
   4244 			uint8_t mcast[ETHER_ADDR_LEN];
   4245 
   4246 			memset(mcast, 0, sizeof(mcast));
   4247 			mcast[0] = 0x01;
   4248 			mcx_set_flow_table_entry_mac(sc,
   4249 			    MCX_FLOW_GROUP_ALLMULTI, 0, mcast, dest);
   4250 			sc->sc_allmulti_flow_enabled = 1;
   4251 		}
   4252 	} else if (sc->sc_allmulti_flow_enabled != 0) {
   4253 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
   4254 		sc->sc_allmulti_flow_enabled = 0;
   4255 	}
   4256 
   4257 	insize = sizeof(struct mcx_nic_vport_ctx) + 240;
   4258 
   4259 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4260 	token = mcx_cmdq_token(sc);
   4261 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
   4262 
   4263 	in = mcx_cmdq_in(cqe);
   4264 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
   4265 	in->cmd_op_mod = htobe16(0);
   4266 	in->cmd_field_select = htobe32(
   4267 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
   4268 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
   4269 
   4270 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
   4271 		printf(", unable to allocate modify "
   4272 		    "nic vport context mailboxen\n");
   4273 		return (-1);
   4274 	}
   4275 	ctx = (struct mcx_nic_vport_ctx *)
   4276 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
   4277 	ctx->vp_mtu = htobe32(sc->sc_hardmtu);
   4278 	/*
   4279          * always leave promisc-all enabled on the vport since we
   4280          * can't give it a vlan list, and we're already doing multicast
   4281          * filtering in the flow table.
   4282 	 */
   4283 	ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
   4284 
   4285 	mcx_cmdq_mboxes_sign(&mxm, 1);
   4286 	mcx_cmdq_post(sc, cqe, 0);
   4287 
   4288 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4289 	if (error != 0) {
   4290 		printf(", modify nic vport context timeout\n");
   4291 		goto free;
   4292 	}
   4293 	if (mcx_cmdq_verify(cqe) != 0) {
   4294 		printf(", modify nic vport context command corrupt\n");
   4295 		goto free;
   4296 	}
   4297 
   4298 	out = mcx_cmdq_out(cqe);
   4299 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4300 		printf(", modify nic vport context failed (%x, %x)\n",
   4301 		    out->cmd_status, be32toh(out->cmd_syndrome));
   4302 		error = -1;
   4303 		goto free;
   4304 	}
   4305 
   4306 free:
   4307 	mcx_dmamem_free(sc, &mxm);
   4308 
   4309 	return (error);
   4310 }
   4311 
   4312 static int
   4313 mcx_alloc_uar(struct mcx_softc *sc, int *uar)
   4314 {
   4315 	struct mcx_cmdq_entry *cqe;
   4316 	struct mcx_cmd_alloc_uar_in *in;
   4317 	struct mcx_cmd_alloc_uar_out *out;
   4318 	int error;
   4319 
   4320 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4321 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
   4322 
   4323 	in = mcx_cmdq_in(cqe);
   4324 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
   4325 	in->cmd_op_mod = htobe16(0);
   4326 
   4327 	mcx_cmdq_post(sc, cqe, 0);
   4328 
   4329 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4330 	if (error != 0) {
   4331 		printf(", alloc uar timeout\n");
   4332 		return (-1);
   4333 	}
   4334 	if (mcx_cmdq_verify(cqe) != 0) {
   4335 		printf(", alloc uar command corrupt\n");
   4336 		return (-1);
   4337 	}
   4338 
   4339 	out = mcx_cmdq_out(cqe);
   4340 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4341 		printf(", alloc uar failed (%x)\n", out->cmd_status);
   4342 		return (-1);
   4343 	}
   4344 
   4345 	*uar = mcx_get_id(out->cmd_uar);
   4346 	return (0);
   4347 }
   4348 
   4349 static int
   4350 mcx_create_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar,
   4351     uint64_t events, int vector)
   4352 {
   4353 	struct mcx_cmdq_entry *cqe;
   4354 	struct mcx_dmamem mxm;
   4355 	struct mcx_cmd_create_eq_in *in;
   4356 	struct mcx_cmd_create_eq_mb_in *mbin;
   4357 	struct mcx_cmd_create_eq_out *out;
   4358 	struct mcx_eq_entry *eqe;
   4359 	int error;
   4360 	uint64_t *pas;
   4361 	int insize, npages, paslen, i, token;
   4362 
   4363 	eq->eq_cons = 0;
   4364 
   4365 	npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
   4366 	    MCX_PAGE_SIZE);
   4367 	paslen = npages * sizeof(*pas);
   4368 	insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
   4369 
   4370 	if (mcx_dmamem_alloc(sc, &eq->eq_mem, npages * MCX_PAGE_SIZE,
   4371 	    MCX_PAGE_SIZE) != 0) {
   4372 		printf(", unable to allocate event queue memory\n");
   4373 		return (-1);
   4374 	}
   4375 
   4376 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
   4377 	for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
   4378 		eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
   4379 	}
   4380 
   4381 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4382 	token = mcx_cmdq_token(sc);
   4383 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
   4384 
   4385 	in = mcx_cmdq_in(cqe);
   4386 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
   4387 	in->cmd_op_mod = htobe16(0);
   4388 
   4389 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
   4390 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
   4391 	    &cqe->cq_input_ptr, token) != 0) {
   4392 		printf(", unable to allocate create eq mailboxen\n");
   4393 		goto free_eq;
   4394 	}
   4395 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   4396 	mbin->cmd_eq_ctx.eq_uar_size = htobe32(
   4397 	    (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | uar);
   4398 	mbin->cmd_eq_ctx.eq_intr = vector;
   4399 	mbin->cmd_event_bitmask = htobe64(events);
   4400 
   4401 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
   4402 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
   4403 
   4404 	/* physical addresses follow the mailbox in data */
   4405 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &eq->eq_mem);
   4406 	mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
   4407 	mcx_cmdq_post(sc, cqe, 0);
   4408 
   4409 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4410 	if (error != 0) {
   4411 		printf(", create eq timeout\n");
   4412 		goto free_mxm;
   4413 	}
   4414 	if (mcx_cmdq_verify(cqe) != 0) {
   4415 		printf(", create eq command corrupt\n");
   4416 		goto free_mxm;
   4417 	}
   4418 
   4419 	out = mcx_cmdq_out(cqe);
   4420 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4421 		printf(", create eq failed (%x, %x)\n", out->cmd_status,
   4422 		    be32toh(out->cmd_syndrome));
   4423 		goto free_mxm;
   4424 	}
   4425 
   4426 	eq->eq_n = mcx_get_id(out->cmd_eqn);
   4427 
   4428 	mcx_dmamem_free(sc, &mxm);
   4429 
   4430 	mcx_arm_eq(sc, eq, uar);
   4431 
   4432 	return (0);
   4433 
   4434 free_mxm:
   4435 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
   4436 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
   4437 	mcx_dmamem_free(sc, &mxm);
   4438 free_eq:
   4439 	mcx_dmamem_free(sc, &eq->eq_mem);
   4440 	return (-1);
   4441 }
   4442 
   4443 static int
   4444 mcx_alloc_pd(struct mcx_softc *sc)
   4445 {
   4446 	struct mcx_cmdq_entry *cqe;
   4447 	struct mcx_cmd_alloc_pd_in *in;
   4448 	struct mcx_cmd_alloc_pd_out *out;
   4449 	int error;
   4450 
   4451 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4452 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
   4453 
   4454 	in = mcx_cmdq_in(cqe);
   4455 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
   4456 	in->cmd_op_mod = htobe16(0);
   4457 
   4458 	mcx_cmdq_post(sc, cqe, 0);
   4459 
   4460 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4461 	if (error != 0) {
   4462 		printf(", alloc pd timeout\n");
   4463 		return (-1);
   4464 	}
   4465 	if (mcx_cmdq_verify(cqe) != 0) {
   4466 		printf(", alloc pd command corrupt\n");
   4467 		return (-1);
   4468 	}
   4469 
   4470 	out = mcx_cmdq_out(cqe);
   4471 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4472 		printf(", alloc pd failed (%x)\n", out->cmd_status);
   4473 		return (-1);
   4474 	}
   4475 
   4476 	sc->sc_pd = mcx_get_id(out->cmd_pd);
   4477 	return (0);
   4478 }
   4479 
   4480 static int
   4481 mcx_alloc_tdomain(struct mcx_softc *sc)
   4482 {
   4483 	struct mcx_cmdq_entry *cqe;
   4484 	struct mcx_cmd_alloc_td_in *in;
   4485 	struct mcx_cmd_alloc_td_out *out;
   4486 	int error;
   4487 
   4488 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4489 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
   4490 
   4491 	in = mcx_cmdq_in(cqe);
   4492 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
   4493 	in->cmd_op_mod = htobe16(0);
   4494 
   4495 	mcx_cmdq_post(sc, cqe, 0);
   4496 
   4497 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4498 	if (error != 0) {
   4499 		printf(", alloc transport domain timeout\n");
   4500 		return (-1);
   4501 	}
   4502 	if (mcx_cmdq_verify(cqe) != 0) {
   4503 		printf(", alloc transport domain command corrupt\n");
   4504 		return (-1);
   4505 	}
   4506 
   4507 	out = mcx_cmdq_out(cqe);
   4508 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4509 		printf(", alloc transport domain failed (%x)\n",
   4510 		    out->cmd_status);
   4511 		return (-1);
   4512 	}
   4513 
   4514 	sc->sc_tdomain = mcx_get_id(out->cmd_tdomain);
   4515 	return (0);
   4516 }
   4517 
   4518 static int
   4519 mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr)
   4520 {
   4521 	struct mcx_dmamem mxm;
   4522 	struct mcx_cmdq_entry *cqe;
   4523 	struct mcx_cmd_query_nic_vport_context_in *in;
   4524 	struct mcx_cmd_query_nic_vport_context_out *out;
   4525 	struct mcx_nic_vport_ctx *ctx;
   4526 	uint8_t *addr;
   4527 	int error, token, i;
   4528 
   4529 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4530 	token = mcx_cmdq_token(sc);
   4531 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
   4532 
   4533 	in = mcx_cmdq_in(cqe);
   4534 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
   4535 	in->cmd_op_mod = htobe16(0);
   4536 	in->cmd_allowed_list_type = 0;
   4537 
   4538 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   4539 	    &cqe->cq_output_ptr, token) != 0) {
   4540 		printf(", unable to allocate "
   4541 		    "query nic vport context mailboxen\n");
   4542 		return (-1);
   4543 	}
   4544 	mcx_cmdq_mboxes_sign(&mxm, 1);
   4545 	mcx_cmdq_post(sc, cqe, 0);
   4546 
   4547 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4548 	if (error != 0) {
   4549 		printf(", query nic vport context timeout\n");
   4550 		goto free;
   4551 	}
   4552 	if (mcx_cmdq_verify(cqe) != 0) {
   4553 		printf(", query nic vport context command corrupt\n");
   4554 		goto free;
   4555 	}
   4556 
   4557 	out = mcx_cmdq_out(cqe);
   4558 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4559 		printf(", query nic vport context failed (%x, %x)\n",
   4560 		    out->cmd_status, be32toh(out->cmd_syndrome));
   4561 		error = -1;
   4562 		goto free;
   4563 	}
   4564 
   4565 	ctx = (struct mcx_nic_vport_ctx *)
   4566 	    mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   4567 	addr = (uint8_t *)&ctx->vp_perm_addr;
   4568 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
   4569 		enaddr[i] = addr[i + 2];
   4570 	}
   4571 free:
   4572 	mcx_dmamem_free(sc, &mxm);
   4573 
   4574 	return (error);
   4575 }
   4576 
   4577 static int
   4578 mcx_query_special_contexts(struct mcx_softc *sc)
   4579 {
   4580 	struct mcx_cmdq_entry *cqe;
   4581 	struct mcx_cmd_query_special_ctx_in *in;
   4582 	struct mcx_cmd_query_special_ctx_out *out;
   4583 	int error;
   4584 
   4585 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4586 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
   4587 
   4588 	in = mcx_cmdq_in(cqe);
   4589 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
   4590 	in->cmd_op_mod = htobe16(0);
   4591 
   4592 	mcx_cmdq_post(sc, cqe, 0);
   4593 
   4594 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4595 	if (error != 0) {
   4596 		printf(", query special contexts timeout\n");
   4597 		return (-1);
   4598 	}
   4599 	if (mcx_cmdq_verify(cqe) != 0) {
   4600 		printf(", query special contexts command corrupt\n");
   4601 		return (-1);
   4602 	}
   4603 
   4604 	out = mcx_cmdq_out(cqe);
   4605 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4606 		printf(", query special contexts failed (%x)\n",
   4607 		    out->cmd_status);
   4608 		return (-1);
   4609 	}
   4610 
   4611 	sc->sc_lkey = be32toh(out->cmd_resd_lkey);
   4612 	return (0);
   4613 }
   4614 
   4615 static int
   4616 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
   4617 {
   4618 	struct mcx_reg_pmtu pmtu;
   4619 	int error;
   4620 
   4621 	/* read max mtu */
   4622 	memset(&pmtu, 0, sizeof(pmtu));
   4623 	pmtu.rp_local_port = 1;
   4624 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
   4625 	    sizeof(pmtu));
   4626 	if (error != 0) {
   4627 		printf(", unable to get port MTU\n");
   4628 		return error;
   4629 	}
   4630 
   4631 	mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu));
   4632 	pmtu.rp_admin_mtu = htobe16(mtu);
   4633 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
   4634 	    sizeof(pmtu));
   4635 	if (error != 0) {
   4636 		printf(", unable to set port MTU\n");
   4637 		return error;
   4638 	}
   4639 
   4640 	sc->sc_hardmtu = mtu;
   4641 	sc->sc_rxbufsz = roundup(mtu + ETHER_ALIGN, sizeof(long));
   4642 	return 0;
   4643 }
   4644 
   4645 static int
   4646 mcx_create_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar, int db, int eqn)
   4647 {
   4648 	struct mcx_cmdq_entry *cmde;
   4649 	struct mcx_cq_entry *cqe;
   4650 	struct mcx_dmamem mxm;
   4651 	struct mcx_cmd_create_cq_in *in;
   4652 	struct mcx_cmd_create_cq_mb_in *mbin;
   4653 	struct mcx_cmd_create_cq_out *out;
   4654 	int error;
   4655 	uint64_t *pas;
   4656 	int insize, npages, paslen, i, token;
   4657 
   4658 	cq->cq_doorbell = MCX_CQ_DOORBELL_BASE + (MCX_CQ_DOORBELL_STRIDE * db);
   4659 
   4660 	npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
   4661 	    MCX_PAGE_SIZE);
   4662 	paslen = npages * sizeof(*pas);
   4663 	insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
   4664 
   4665 	if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
   4666 	    MCX_PAGE_SIZE) != 0) {
   4667 		printf("%s: unable to allocate completion queue memory\n",
   4668 		    DEVNAME(sc));
   4669 		return (-1);
   4670 	}
   4671 	cqe = MCX_DMA_KVA(&cq->cq_mem);
   4672 	for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
   4673 		cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
   4674 	}
   4675 
   4676 	cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4677 	token = mcx_cmdq_token(sc);
   4678 	mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
   4679 
   4680 	in = mcx_cmdq_in(cmde);
   4681 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
   4682 	in->cmd_op_mod = htobe16(0);
   4683 
   4684 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
   4685 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
   4686 	    &cmde->cq_input_ptr, token) != 0) {
   4687 		printf("%s: unable to allocate create cq mailboxen\n",
   4688 		    DEVNAME(sc));
   4689 		goto free_cq;
   4690 	}
   4691 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   4692 	mbin->cmd_cq_ctx.cq_uar_size = htobe32(
   4693 	    (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | uar);
   4694 	mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
   4695 	mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
   4696 	    (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
   4697 	    MCX_CQ_MOD_COUNTER);
   4698 	mbin->cmd_cq_ctx.cq_doorbell = htobe64(
   4699 	    MCX_DMA_DVA(&sc->sc_doorbell_mem) + cq->cq_doorbell);
   4700 
   4701 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
   4702 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
   4703 
   4704 	/* physical addresses follow the mailbox in data */
   4705 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem);
   4706 	mcx_cmdq_post(sc, cmde, 0);
   4707 
   4708 	error = mcx_cmdq_poll(sc, cmde, 1000);
   4709 	if (error != 0) {
   4710 		printf("%s: create cq timeout\n", DEVNAME(sc));
   4711 		goto free_mxm;
   4712 	}
   4713 	if (mcx_cmdq_verify(cmde) != 0) {
   4714 		printf("%s: create cq command corrupt\n", DEVNAME(sc));
   4715 		goto free_mxm;
   4716 	}
   4717 
   4718 	out = mcx_cmdq_out(cmde);
   4719 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4720 		printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
   4721 		    out->cmd_status, be32toh(out->cmd_syndrome));
   4722 		goto free_mxm;
   4723 	}
   4724 
   4725 	cq->cq_n = mcx_get_id(out->cmd_cqn);
   4726 	cq->cq_cons = 0;
   4727 	cq->cq_count = 0;
   4728 
   4729 	mcx_dmamem_free(sc, &mxm);
   4730 
   4731 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   4732 	    cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
   4733 	    BUS_DMASYNC_PREWRITE);
   4734 
   4735 	mcx_arm_cq(sc, cq, uar);
   4736 
   4737 	return (0);
   4738 
   4739 free_mxm:
   4740 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
   4741 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
   4742 	mcx_dmamem_free(sc, &mxm);
   4743 free_cq:
   4744 	mcx_dmamem_free(sc, &cq->cq_mem);
   4745 	return (-1);
   4746 }
   4747 
   4748 static int
   4749 mcx_destroy_cq(struct mcx_softc *sc, struct mcx_cq *cq)
   4750 {
   4751 	struct mcx_cmdq_entry *cqe;
   4752 	struct mcx_cmd_destroy_cq_in *in;
   4753 	struct mcx_cmd_destroy_cq_out *out;
   4754 	int error;
   4755 	int token;
   4756 
   4757 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4758 	token = mcx_cmdq_token(sc);
   4759 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
   4760 
   4761 	in = mcx_cmdq_in(cqe);
   4762 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
   4763 	in->cmd_op_mod = htobe16(0);
   4764 	in->cmd_cqn = htobe32(cq->cq_n);
   4765 
   4766 	mcx_cmdq_post(sc, cqe, 0);
   4767 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4768 	if (error != 0) {
   4769 		printf("%s: destroy cq timeout\n", DEVNAME(sc));
   4770 		return error;
   4771 	}
   4772 	if (mcx_cmdq_verify(cqe) != 0) {
   4773 		printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
   4774 		return error;
   4775 	}
   4776 
   4777 	out = mcx_cmdq_out(cqe);
   4778 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4779 		printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
   4780 		    out->cmd_status, be32toh(out->cmd_syndrome));
   4781 		return -1;
   4782 	}
   4783 
   4784 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   4785 	    cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
   4786 	    BUS_DMASYNC_POSTWRITE);
   4787 
   4788 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
   4789 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
   4790 	mcx_dmamem_free(sc, &cq->cq_mem);
   4791 
   4792 	cq->cq_n = 0;
   4793 	cq->cq_cons = 0;
   4794 	cq->cq_count = 0;
   4795 	return 0;
   4796 }
   4797 
   4798 static int
   4799 mcx_create_rq(struct mcx_softc *sc, struct mcx_rx *rx, int db, int cqn)
   4800 {
   4801 	struct mcx_cmdq_entry *cqe;
   4802 	struct mcx_dmamem mxm;
   4803 	struct mcx_cmd_create_rq_in *in;
   4804 	struct mcx_cmd_create_rq_out *out;
   4805 	struct mcx_rq_ctx *mbin;
   4806 	int error;
   4807 	uint64_t *pas;
   4808 	uint32_t rq_flags;
   4809 	int insize, npages, paslen, token;
   4810 
   4811 	rx->rx_doorbell = MCX_WQ_DOORBELL_BASE +
   4812 	    (db * MCX_WQ_DOORBELL_STRIDE);
   4813 
   4814 	npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
   4815 	    MCX_PAGE_SIZE);
   4816 	paslen = npages * sizeof(*pas);
   4817 	insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
   4818 
   4819 	if (mcx_dmamem_alloc(sc, &rx->rx_rq_mem, npages * MCX_PAGE_SIZE,
   4820 	    MCX_PAGE_SIZE) != 0) {
   4821 		printf("%s: unable to allocate receive queue memory\n",
   4822 		    DEVNAME(sc));
   4823 		return (-1);
   4824 	}
   4825 
   4826 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4827 	token = mcx_cmdq_token(sc);
   4828 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
   4829 
   4830 	in = mcx_cmdq_in(cqe);
   4831 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
   4832 	in->cmd_op_mod = htobe16(0);
   4833 
   4834 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
   4835 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
   4836 	    &cqe->cq_input_ptr, token) != 0) {
   4837 		printf("%s: unable to allocate create rq mailboxen\n",
   4838 		    DEVNAME(sc));
   4839 		goto free_rq;
   4840 	}
   4841 	mbin = (struct mcx_rq_ctx *)
   4842 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
   4843 	rq_flags = MCX_RQ_CTX_RLKEY;
   4844 	mbin->rq_flags = htobe32(rq_flags);
   4845 	mbin->rq_cqn = htobe32(cqn);
   4846 	mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
   4847 	mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
   4848 	mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
   4849 	    rx->rx_doorbell);
   4850 	mbin->rq_wq.wq_log_stride = htobe16(4);
   4851 	mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
   4852 
   4853 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
   4854 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
   4855 
   4856 	/* physical addresses follow the mailbox in data */
   4857 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &rx->rx_rq_mem);
   4858 	mcx_cmdq_post(sc, cqe, 0);
   4859 
   4860 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4861 	if (error != 0) {
   4862 		printf("%s: create rq timeout\n", DEVNAME(sc));
   4863 		goto free_mxm;
   4864 	}
   4865 	if (mcx_cmdq_verify(cqe) != 0) {
   4866 		printf("%s: create rq command corrupt\n", DEVNAME(sc));
   4867 		goto free_mxm;
   4868 	}
   4869 
   4870 	out = mcx_cmdq_out(cqe);
   4871 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4872 		printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
   4873 		    out->cmd_status, be32toh(out->cmd_syndrome));
   4874 		goto free_mxm;
   4875 	}
   4876 
   4877 	rx->rx_rqn = mcx_get_id(out->cmd_rqn);
   4878 
   4879 	mcx_dmamem_free(sc, &mxm);
   4880 
   4881 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   4882 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   4883 
   4884 	return (0);
   4885 
   4886 free_mxm:
   4887 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
   4888 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
   4889 	mcx_dmamem_free(sc, &mxm);
   4890 free_rq:
   4891 	mcx_dmamem_free(sc, &rx->rx_rq_mem);
   4892 	return (-1);
   4893 }
   4894 
   4895 static int
   4896 mcx_ready_rq(struct mcx_softc *sc, struct mcx_rx *rx)
   4897 {
   4898 	struct mcx_cmdq_entry *cqe;
   4899 	struct mcx_dmamem mxm;
   4900 	struct mcx_cmd_modify_rq_in *in;
   4901 	struct mcx_cmd_modify_rq_mb_in *mbin;
   4902 	struct mcx_cmd_modify_rq_out *out;
   4903 	int error;
   4904 	int token;
   4905 
   4906 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4907 	token = mcx_cmdq_token(sc);
   4908 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
   4909 	    sizeof(*out), token);
   4910 
   4911 	in = mcx_cmdq_in(cqe);
   4912 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
   4913 	in->cmd_op_mod = htobe16(0);
   4914 	in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | rx->rx_rqn);
   4915 
   4916 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   4917 	    &cqe->cq_input_ptr, token) != 0) {
   4918 		printf("%s: unable to allocate modify rq mailbox\n",
   4919 		    DEVNAME(sc));
   4920 		return (-1);
   4921 	}
   4922 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   4923 	mbin->cmd_rq_ctx.rq_flags = htobe32(
   4924 	    MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
   4925 
   4926 	mcx_cmdq_mboxes_sign(&mxm, 1);
   4927 	mcx_cmdq_post(sc, cqe, 0);
   4928 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4929 	if (error != 0) {
   4930 		printf("%s: modify rq timeout\n", DEVNAME(sc));
   4931 		goto free;
   4932 	}
   4933 	if (mcx_cmdq_verify(cqe) != 0) {
   4934 		printf("%s: modify rq command corrupt\n", DEVNAME(sc));
   4935 		goto free;
   4936 	}
   4937 
   4938 	out = mcx_cmdq_out(cqe);
   4939 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4940 		printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
   4941 		    out->cmd_status, be32toh(out->cmd_syndrome));
   4942 		error = -1;
   4943 		goto free;
   4944 	}
   4945 
   4946 free:
   4947 	mcx_dmamem_free(sc, &mxm);
   4948 	return (error);
   4949 }
   4950 
   4951 static int
   4952 mcx_destroy_rq(struct mcx_softc *sc, struct mcx_rx *rx)
   4953 {
   4954 	struct mcx_cmdq_entry *cqe;
   4955 	struct mcx_cmd_destroy_rq_in *in;
   4956 	struct mcx_cmd_destroy_rq_out *out;
   4957 	int error;
   4958 	int token;
   4959 
   4960 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   4961 	token = mcx_cmdq_token(sc);
   4962 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
   4963 
   4964 	in = mcx_cmdq_in(cqe);
   4965 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
   4966 	in->cmd_op_mod = htobe16(0);
   4967 	in->cmd_rqn = htobe32(rx->rx_rqn);
   4968 
   4969 	mcx_cmdq_post(sc, cqe, 0);
   4970 	error = mcx_cmdq_poll(sc, cqe, 1000);
   4971 	if (error != 0) {
   4972 		printf("%s: destroy rq timeout\n", DEVNAME(sc));
   4973 		return error;
   4974 	}
   4975 	if (mcx_cmdq_verify(cqe) != 0) {
   4976 		printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
   4977 		return error;
   4978 	}
   4979 
   4980 	out = mcx_cmdq_out(cqe);
   4981 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   4982 		printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
   4983 		    out->cmd_status, be32toh(out->cmd_syndrome));
   4984 		return -1;
   4985 	}
   4986 
   4987 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   4988 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
   4989 
   4990 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
   4991 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
   4992 	mcx_dmamem_free(sc, &rx->rx_rq_mem);
   4993 
   4994 	rx->rx_rqn = 0;
   4995 	return 0;
   4996 }
   4997 
   4998 static int
   4999 mcx_create_tir_direct(struct mcx_softc *sc, struct mcx_rx *rx, int *tirn)
   5000 {
   5001 	struct mcx_cmdq_entry *cqe;
   5002 	struct mcx_dmamem mxm;
   5003 	struct mcx_cmd_create_tir_in *in;
   5004 	struct mcx_cmd_create_tir_mb_in *mbin;
   5005 	struct mcx_cmd_create_tir_out *out;
   5006 	int error;
   5007 	int token;
   5008 
   5009 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5010 	token = mcx_cmdq_token(sc);
   5011 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
   5012 	    sizeof(*out), token);
   5013 
   5014 	in = mcx_cmdq_in(cqe);
   5015 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
   5016 	in->cmd_op_mod = htobe16(0);
   5017 
   5018 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   5019 	    &cqe->cq_input_ptr, token) != 0) {
   5020 		printf("%s: unable to allocate create tir mailbox\n",
   5021 		    DEVNAME(sc));
   5022 		return (-1);
   5023 	}
   5024 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   5025 	/* leave disp_type = 0, so packets get sent to the inline rqn */
   5026 	mbin->cmd_inline_rqn = htobe32(rx->rx_rqn);
   5027 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
   5028 
   5029 	mcx_cmdq_post(sc, cqe, 0);
   5030 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5031 	if (error != 0) {
   5032 		printf("%s: create tir timeout\n", DEVNAME(sc));
   5033 		goto free;
   5034 	}
   5035 	if (mcx_cmdq_verify(cqe) != 0) {
   5036 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
   5037 		goto free;
   5038 	}
   5039 
   5040 	out = mcx_cmdq_out(cqe);
   5041 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5042 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
   5043 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5044 		error = -1;
   5045 		goto free;
   5046 	}
   5047 
   5048 	*tirn = mcx_get_id(out->cmd_tirn);
   5049 free:
   5050 	mcx_dmamem_free(sc, &mxm);
   5051 	return (error);
   5052 }
   5053 
   5054 static int
   5055 mcx_create_tir_indirect(struct mcx_softc *sc, int rqtn, uint32_t hash_sel,
   5056     int *tirn)
   5057 {
   5058 	struct mcx_cmdq_entry *cqe;
   5059 	struct mcx_dmamem mxm;
   5060 	struct mcx_cmd_create_tir_in *in;
   5061 	struct mcx_cmd_create_tir_mb_in *mbin;
   5062 	struct mcx_cmd_create_tir_out *out;
   5063 	int error;
   5064 	int token;
   5065 
   5066 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5067 	token = mcx_cmdq_token(sc);
   5068 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
   5069 	    sizeof(*out), token);
   5070 
   5071 	in = mcx_cmdq_in(cqe);
   5072 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
   5073 	in->cmd_op_mod = htobe16(0);
   5074 
   5075 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   5076 	    &cqe->cq_input_ptr, token) != 0) {
   5077 		printf("%s: unable to allocate create tir mailbox\n",
   5078 		    DEVNAME(sc));
   5079 		return (-1);
   5080 	}
   5081 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   5082 	mbin->cmd_disp_type = htobe32(MCX_TIR_CTX_DISP_TYPE_INDIRECT
   5083 	    << MCX_TIR_CTX_DISP_TYPE_SHIFT);
   5084 	mbin->cmd_indir_table = htobe32(rqtn);
   5085 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain |
   5086 	    MCX_TIR_CTX_HASH_TOEPLITZ << MCX_TIR_CTX_HASH_SHIFT);
   5087 	mbin->cmd_rx_hash_sel_outer = htobe32(hash_sel);
   5088 	stoeplitz_to_key(&mbin->cmd_rx_hash_key,
   5089 	    sizeof(mbin->cmd_rx_hash_key));
   5090 
   5091 	mcx_cmdq_post(sc, cqe, 0);
   5092 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5093 	if (error != 0) {
   5094 		printf("%s: create tir timeout\n", DEVNAME(sc));
   5095 		goto free;
   5096 	}
   5097 	if (mcx_cmdq_verify(cqe) != 0) {
   5098 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
   5099 		goto free;
   5100 	}
   5101 
   5102 	out = mcx_cmdq_out(cqe);
   5103 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5104 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
   5105 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5106 		error = -1;
   5107 		goto free;
   5108 	}
   5109 
   5110 	*tirn = mcx_get_id(out->cmd_tirn);
   5111 free:
   5112 	mcx_dmamem_free(sc, &mxm);
   5113 	return (error);
   5114 }
   5115 
   5116 static int
   5117 mcx_destroy_tir(struct mcx_softc *sc, int tirn)
   5118 {
   5119 	struct mcx_cmdq_entry *cqe;
   5120 	struct mcx_cmd_destroy_tir_in *in;
   5121 	struct mcx_cmd_destroy_tir_out *out;
   5122 	int error;
   5123 	int token;
   5124 
   5125 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5126 	token = mcx_cmdq_token(sc);
   5127 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
   5128 
   5129 	in = mcx_cmdq_in(cqe);
   5130 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
   5131 	in->cmd_op_mod = htobe16(0);
   5132 	in->cmd_tirn = htobe32(tirn);
   5133 
   5134 	mcx_cmdq_post(sc, cqe, 0);
   5135 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5136 	if (error != 0) {
   5137 		printf("%s: destroy tir timeout\n", DEVNAME(sc));
   5138 		return error;
   5139 	}
   5140 	if (mcx_cmdq_verify(cqe) != 0) {
   5141 		printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
   5142 		return error;
   5143 	}
   5144 
   5145 	out = mcx_cmdq_out(cqe);
   5146 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5147 		printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
   5148 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5149 		return -1;
   5150 	}
   5151 
   5152 	return (0);
   5153 }
   5154 
   5155 static int
   5156 mcx_create_sq(struct mcx_softc *sc, struct mcx_tx *tx, int uar, int db,
   5157     int cqn)
   5158 {
   5159 	struct mcx_cmdq_entry *cqe;
   5160 	struct mcx_dmamem mxm;
   5161 	struct mcx_cmd_create_sq_in *in;
   5162 	struct mcx_sq_ctx *mbin;
   5163 	struct mcx_cmd_create_sq_out *out;
   5164 	int error;
   5165 	uint64_t *pas;
   5166 	int insize, npages, paslen, token;
   5167 
   5168 	tx->tx_doorbell = MCX_WQ_DOORBELL_BASE +
   5169 	    (db * MCX_WQ_DOORBELL_STRIDE) + 4;
   5170 
   5171 	npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
   5172 	    MCX_PAGE_SIZE);
   5173 	paslen = npages * sizeof(*pas);
   5174 	insize = sizeof(struct mcx_sq_ctx) + paslen;
   5175 
   5176 	if (mcx_dmamem_alloc(sc, &tx->tx_sq_mem, npages * MCX_PAGE_SIZE,
   5177 	    MCX_PAGE_SIZE) != 0) {
   5178 		printf("%s: unable to allocate send queue memory\n",
   5179 		    DEVNAME(sc));
   5180 		return (-1);
   5181 	}
   5182 
   5183 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5184 	token = mcx_cmdq_token(sc);
   5185 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
   5186 	    token);
   5187 
   5188 	in = mcx_cmdq_in(cqe);
   5189 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
   5190 	in->cmd_op_mod = htobe16(0);
   5191 
   5192 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
   5193 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
   5194 	    &cqe->cq_input_ptr, token) != 0) {
   5195 		printf("%s: unable to allocate create sq mailboxen\n",
   5196 		    DEVNAME(sc));
   5197 		goto free_sq;
   5198 	}
   5199 	mbin = (struct mcx_sq_ctx *)
   5200 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
   5201 	mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
   5202 	    (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
   5203 	mbin->sq_cqn = htobe32(cqn);
   5204 	mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
   5205 	mbin->sq_tis_num = htobe32(sc->sc_tis);
   5206 	mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
   5207 	mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
   5208 	mbin->sq_wq.wq_uar_page = htobe32(uar);
   5209 	mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
   5210 	    tx->tx_doorbell);
   5211 	mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
   5212 	mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
   5213 
   5214 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
   5215 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
   5216 
   5217 	/* physical addresses follow the mailbox in data */
   5218 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10,
   5219 	    npages, &tx->tx_sq_mem);
   5220 	mcx_cmdq_post(sc, cqe, 0);
   5221 
   5222 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5223 	if (error != 0) {
   5224 		printf("%s: create sq timeout\n", DEVNAME(sc));
   5225 		goto free_mxm;
   5226 	}
   5227 	if (mcx_cmdq_verify(cqe) != 0) {
   5228 		printf("%s: create sq command corrupt\n", DEVNAME(sc));
   5229 		goto free_mxm;
   5230 	}
   5231 
   5232 	out = mcx_cmdq_out(cqe);
   5233 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5234 		printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
   5235 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5236 		goto free_mxm;
   5237 	}
   5238 
   5239 	tx->tx_uar = uar;
   5240 	tx->tx_sqn = mcx_get_id(out->cmd_sqn);
   5241 
   5242 	mcx_dmamem_free(sc, &mxm);
   5243 
   5244 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   5245 	    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   5246 
   5247 	return (0);
   5248 
   5249 free_mxm:
   5250 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
   5251 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
   5252 	mcx_dmamem_free(sc, &mxm);
   5253 free_sq:
   5254 	mcx_dmamem_free(sc, &tx->tx_sq_mem);
   5255 	return (-1);
   5256 }
   5257 
   5258 static int
   5259 mcx_destroy_sq(struct mcx_softc *sc, struct mcx_tx *tx)
   5260 {
   5261 	struct mcx_cmdq_entry *cqe;
   5262 	struct mcx_cmd_destroy_sq_in *in;
   5263 	struct mcx_cmd_destroy_sq_out *out;
   5264 	int error;
   5265 	int token;
   5266 
   5267 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5268 	token = mcx_cmdq_token(sc);
   5269 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
   5270 
   5271 	in = mcx_cmdq_in(cqe);
   5272 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
   5273 	in->cmd_op_mod = htobe16(0);
   5274 	in->cmd_sqn = htobe32(tx->tx_sqn);
   5275 
   5276 	mcx_cmdq_post(sc, cqe, 0);
   5277 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5278 	if (error != 0) {
   5279 		printf("%s: destroy sq timeout\n", DEVNAME(sc));
   5280 		return error;
   5281 	}
   5282 	if (mcx_cmdq_verify(cqe) != 0) {
   5283 		printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
   5284 		return error;
   5285 	}
   5286 
   5287 	out = mcx_cmdq_out(cqe);
   5288 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5289 		printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
   5290 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5291 		return -1;
   5292 	}
   5293 
   5294 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   5295 	    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
   5296 
   5297 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
   5298 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
   5299 	mcx_dmamem_free(sc, &tx->tx_sq_mem);
   5300 
   5301 	tx->tx_sqn = 0;
   5302 	return 0;
   5303 }
   5304 
   5305 static int
   5306 mcx_ready_sq(struct mcx_softc *sc, struct mcx_tx *tx)
   5307 {
   5308 	struct mcx_cmdq_entry *cqe;
   5309 	struct mcx_dmamem mxm;
   5310 	struct mcx_cmd_modify_sq_in *in;
   5311 	struct mcx_cmd_modify_sq_mb_in *mbin;
   5312 	struct mcx_cmd_modify_sq_out *out;
   5313 	int error;
   5314 	int token;
   5315 
   5316 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5317 	token = mcx_cmdq_token(sc);
   5318 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
   5319 	    sizeof(*out), token);
   5320 
   5321 	in = mcx_cmdq_in(cqe);
   5322 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
   5323 	in->cmd_op_mod = htobe16(0);
   5324 	in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | tx->tx_sqn);
   5325 
   5326 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   5327 	    &cqe->cq_input_ptr, token) != 0) {
   5328 		printf("%s: unable to allocate modify sq mailbox\n",
   5329 		    DEVNAME(sc));
   5330 		return (-1);
   5331 	}
   5332 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   5333 	mbin->cmd_sq_ctx.sq_flags = htobe32(
   5334 	    MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
   5335 
   5336 	mcx_cmdq_mboxes_sign(&mxm, 1);
   5337 	mcx_cmdq_post(sc, cqe, 0);
   5338 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5339 	if (error != 0) {
   5340 		printf("%s: modify sq timeout\n", DEVNAME(sc));
   5341 		goto free;
   5342 	}
   5343 	if (mcx_cmdq_verify(cqe) != 0) {
   5344 		printf("%s: modify sq command corrupt\n", DEVNAME(sc));
   5345 		goto free;
   5346 	}
   5347 
   5348 	out = mcx_cmdq_out(cqe);
   5349 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5350 		printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
   5351 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5352 		error = -1;
   5353 		goto free;
   5354 	}
   5355 
   5356 free:
   5357 	mcx_dmamem_free(sc, &mxm);
   5358 	return (error);
   5359 }
   5360 
   5361 static int
   5362 mcx_create_tis(struct mcx_softc *sc, int *tis)
   5363 {
   5364 	struct mcx_cmdq_entry *cqe;
   5365 	struct mcx_dmamem mxm;
   5366 	struct mcx_cmd_create_tis_in *in;
   5367 	struct mcx_cmd_create_tis_mb_in *mbin;
   5368 	struct mcx_cmd_create_tis_out *out;
   5369 	int error;
   5370 	int token;
   5371 
   5372 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5373 	token = mcx_cmdq_token(sc);
   5374 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
   5375 	    sizeof(*out), token);
   5376 
   5377 	in = mcx_cmdq_in(cqe);
   5378 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
   5379 	in->cmd_op_mod = htobe16(0);
   5380 
   5381 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   5382 	    &cqe->cq_input_ptr, token) != 0) {
   5383 		printf("%s: unable to allocate create tis mailbox\n",
   5384 		    DEVNAME(sc));
   5385 		return (-1);
   5386 	}
   5387 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   5388 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
   5389 
   5390 	mcx_cmdq_mboxes_sign(&mxm, 1);
   5391 	mcx_cmdq_post(sc, cqe, 0);
   5392 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5393 	if (error != 0) {
   5394 		printf("%s: create tis timeout\n", DEVNAME(sc));
   5395 		goto free;
   5396 	}
   5397 	if (mcx_cmdq_verify(cqe) != 0) {
   5398 		printf("%s: create tis command corrupt\n", DEVNAME(sc));
   5399 		goto free;
   5400 	}
   5401 
   5402 	out = mcx_cmdq_out(cqe);
   5403 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5404 		printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
   5405 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5406 		error = -1;
   5407 		goto free;
   5408 	}
   5409 
   5410 	*tis = mcx_get_id(out->cmd_tisn);
   5411 free:
   5412 	mcx_dmamem_free(sc, &mxm);
   5413 	return (error);
   5414 }
   5415 
   5416 static int
   5417 mcx_destroy_tis(struct mcx_softc *sc, int tis)
   5418 {
   5419 	struct mcx_cmdq_entry *cqe;
   5420 	struct mcx_cmd_destroy_tis_in *in;
   5421 	struct mcx_cmd_destroy_tis_out *out;
   5422 	int error;
   5423 	int token;
   5424 
   5425 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5426 	token = mcx_cmdq_token(sc);
   5427 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
   5428 
   5429 	in = mcx_cmdq_in(cqe);
   5430 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
   5431 	in->cmd_op_mod = htobe16(0);
   5432 	in->cmd_tisn = htobe32(tis);
   5433 
   5434 	mcx_cmdq_post(sc, cqe, 0);
   5435 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5436 	if (error != 0) {
   5437 		printf("%s: destroy tis timeout\n", DEVNAME(sc));
   5438 		return error;
   5439 	}
   5440 	if (mcx_cmdq_verify(cqe) != 0) {
   5441 		printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
   5442 		return error;
   5443 	}
   5444 
   5445 	out = mcx_cmdq_out(cqe);
   5446 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5447 		printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
   5448 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5449 		return -1;
   5450 	}
   5451 
   5452 	return 0;
   5453 }
   5454 
   5455 static int
   5456 mcx_create_rqt(struct mcx_softc *sc, int size, int *rqns, int *rqt)
   5457 {
   5458 	struct mcx_cmdq_entry *cqe;
   5459 	struct mcx_dmamem mxm;
   5460 	struct mcx_cmd_create_rqt_in *in;
   5461 	struct mcx_cmd_create_rqt_mb_in *mbin;
   5462 	struct mcx_cmd_create_rqt_out *out;
   5463 	struct mcx_rqt_ctx *rqt_ctx;
   5464 	int *rqtn;
   5465 	int error;
   5466 	int token;
   5467 	int i;
   5468 
   5469 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5470 	token = mcx_cmdq_token(sc);
   5471 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) +
   5472 	    (size * sizeof(int)), sizeof(*out), token);
   5473 
   5474 	in = mcx_cmdq_in(cqe);
   5475 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQT);
   5476 	in->cmd_op_mod = htobe16(0);
   5477 
   5478 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   5479 	    &cqe->cq_input_ptr, token) != 0) {
   5480 		printf("%s: unable to allocate create rqt mailbox\n",
   5481 		    DEVNAME(sc));
   5482 		return (-1);
   5483 	}
   5484 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   5485 	rqt_ctx = &mbin->cmd_rqt;
   5486 	rqt_ctx->cmd_rqt_max_size = htobe16(sc->sc_max_rqt_size);
   5487 	rqt_ctx->cmd_rqt_actual_size = htobe16(size);
   5488 
   5489 	/* rqt list follows the rqt context */
   5490 	rqtn = (int *)(rqt_ctx + 1);
   5491 	for (i = 0; i < size; i++) {
   5492 		rqtn[i] = htobe32(rqns[i]);
   5493 	}
   5494 
   5495 	mcx_cmdq_mboxes_sign(&mxm, 1);
   5496 	mcx_cmdq_post(sc, cqe, 0);
   5497 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5498 	if (error != 0) {
   5499 		printf("%s: create rqt timeout\n", DEVNAME(sc));
   5500 		goto free;
   5501 	}
   5502 	if (mcx_cmdq_verify(cqe) != 0) {
   5503 		printf("%s: create rqt command corrupt\n", DEVNAME(sc));
   5504 		goto free;
   5505 	}
   5506 
   5507 	out = mcx_cmdq_out(cqe);
   5508 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5509 		printf("%s: create rqt failed (%x, %x)\n", DEVNAME(sc),
   5510 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5511 		error = -1;
   5512 		goto free;
   5513 	}
   5514 
   5515 	*rqt = mcx_get_id(out->cmd_rqtn);
   5516 	return (0);
   5517 free:
   5518 	mcx_dmamem_free(sc, &mxm);
   5519 	return (error);
   5520 }
   5521 
   5522 static int
   5523 mcx_destroy_rqt(struct mcx_softc *sc, int rqt)
   5524 {
   5525 	struct mcx_cmdq_entry *cqe;
   5526 	struct mcx_cmd_destroy_rqt_in *in;
   5527 	struct mcx_cmd_destroy_rqt_out *out;
   5528 	int error;
   5529 	int token;
   5530 
   5531 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5532 	token = mcx_cmdq_token(sc);
   5533 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
   5534 
   5535 	in = mcx_cmdq_in(cqe);
   5536 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQT);
   5537 	in->cmd_op_mod = htobe16(0);
   5538 	in->cmd_rqtn = htobe32(rqt);
   5539 
   5540 	mcx_cmdq_post(sc, cqe, 0);
   5541 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5542 	if (error != 0) {
   5543 		printf("%s: destroy rqt timeout\n", DEVNAME(sc));
   5544 		return error;
   5545 	}
   5546 	if (mcx_cmdq_verify(cqe) != 0) {
   5547 		printf("%s: destroy rqt command corrupt\n", DEVNAME(sc));
   5548 		return error;
   5549 	}
   5550 
   5551 	out = mcx_cmdq_out(cqe);
   5552 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5553 		printf("%s: destroy rqt failed (%x, %x)\n", DEVNAME(sc),
   5554 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5555 		return -1;
   5556 	}
   5557 
   5558 	return 0;
   5559 }
   5560 
   5561 #if 0
   5562 static int
   5563 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
   5564 {
   5565 	struct mcx_cmdq_entry *cqe;
   5566 	struct mcx_cmd_alloc_flow_counter_in *in;
   5567 	struct mcx_cmd_alloc_flow_counter_out *out;
   5568 	int error;
   5569 
   5570 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5571 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
   5572 
   5573 	in = mcx_cmdq_in(cqe);
   5574 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
   5575 	in->cmd_op_mod = htobe16(0);
   5576 
   5577 	mcx_cmdq_post(sc, cqe, 0);
   5578 
   5579 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5580 	if (error != 0) {
   5581 		printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
   5582 		return (-1);
   5583 	}
   5584 	if (mcx_cmdq_verify(cqe) != 0) {
   5585 		printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
   5586 		return (-1);
   5587 	}
   5588 
   5589 	out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
   5590 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5591 		printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
   5592 		    out->cmd_status);
   5593 		return (-1);
   5594 	}
   5595 
   5596 	sc->sc_flow_counter_id[i]  = be16toh(out->cmd_flow_counter_id);
   5597 	printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
   5598 
   5599 	return (0);
   5600 }
   5601 #endif
   5602 
   5603 static int
   5604 mcx_create_flow_table(struct mcx_softc *sc, int log_size, int level,
   5605     int *flow_table_id)
   5606 {
   5607 	struct mcx_cmdq_entry *cqe;
   5608 	struct mcx_dmamem mxm;
   5609 	struct mcx_cmd_create_flow_table_in *in;
   5610 	struct mcx_cmd_create_flow_table_mb_in *mbin;
   5611 	struct mcx_cmd_create_flow_table_out *out;
   5612 	int error;
   5613 	int token;
   5614 
   5615 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5616 	token = mcx_cmdq_token(sc);
   5617 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
   5618 	    sizeof(*out), token);
   5619 
   5620 	in = mcx_cmdq_in(cqe);
   5621 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
   5622 	in->cmd_op_mod = htobe16(0);
   5623 
   5624 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   5625 	    &cqe->cq_input_ptr, token) != 0) {
   5626 		printf("%s: unable to allocate create flow table mailbox\n",
   5627 		    DEVNAME(sc));
   5628 		return (-1);
   5629 	}
   5630 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   5631 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
   5632 	mbin->cmd_ctx.ft_log_size = log_size;
   5633 	mbin->cmd_ctx.ft_level = level;
   5634 
   5635 	mcx_cmdq_mboxes_sign(&mxm, 1);
   5636 	mcx_cmdq_post(sc, cqe, 0);
   5637 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5638 	if (error != 0) {
   5639 		printf("%s: create flow table timeout\n", DEVNAME(sc));
   5640 		goto free;
   5641 	}
   5642 	if (mcx_cmdq_verify(cqe) != 0) {
   5643 		printf("%s: create flow table command corrupt\n", DEVNAME(sc));
   5644 		goto free;
   5645 	}
   5646 
   5647 	out = mcx_cmdq_out(cqe);
   5648 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5649 		printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
   5650 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5651 		error = -1;
   5652 		goto free;
   5653 	}
   5654 
   5655 	*flow_table_id = mcx_get_id(out->cmd_table_id);
   5656 free:
   5657 	mcx_dmamem_free(sc, &mxm);
   5658 	return (error);
   5659 }
   5660 
   5661 static int
   5662 mcx_set_flow_table_root(struct mcx_softc *sc, int flow_table_id)
   5663 {
   5664 	struct mcx_cmdq_entry *cqe;
   5665 	struct mcx_dmamem mxm;
   5666 	struct mcx_cmd_set_flow_table_root_in *in;
   5667 	struct mcx_cmd_set_flow_table_root_mb_in *mbin;
   5668 	struct mcx_cmd_set_flow_table_root_out *out;
   5669 	int error;
   5670 	int token;
   5671 
   5672 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5673 	token = mcx_cmdq_token(sc);
   5674 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
   5675 	    sizeof(*out), token);
   5676 
   5677 	in = mcx_cmdq_in(cqe);
   5678 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
   5679 	in->cmd_op_mod = htobe16(0);
   5680 
   5681 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   5682 	    &cqe->cq_input_ptr, token) != 0) {
   5683 		printf("%s: unable to allocate set flow table root mailbox\n",
   5684 		    DEVNAME(sc));
   5685 		return (-1);
   5686 	}
   5687 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   5688 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
   5689 	mbin->cmd_table_id = htobe32(flow_table_id);
   5690 
   5691 	mcx_cmdq_mboxes_sign(&mxm, 1);
   5692 	mcx_cmdq_post(sc, cqe, 0);
   5693 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5694 	if (error != 0) {
   5695 		printf("%s: set flow table root timeout\n", DEVNAME(sc));
   5696 		goto free;
   5697 	}
   5698 	if (mcx_cmdq_verify(cqe) != 0) {
   5699 		printf("%s: set flow table root command corrupt\n",
   5700 		    DEVNAME(sc));
   5701 		goto free;
   5702 	}
   5703 
   5704 	out = mcx_cmdq_out(cqe);
   5705 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5706 		printf("%s: set flow table root failed (%x, %x)\n",
   5707 		    DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
   5708 		error = -1;
   5709 		goto free;
   5710 	}
   5711 
   5712 free:
   5713 	mcx_dmamem_free(sc, &mxm);
   5714 	return (error);
   5715 }
   5716 
   5717 static int
   5718 mcx_destroy_flow_table(struct mcx_softc *sc, int flow_table_id)
   5719 {
   5720 	struct mcx_cmdq_entry *cqe;
   5721 	struct mcx_dmamem mxm;
   5722 	struct mcx_cmd_destroy_flow_table_in *in;
   5723 	struct mcx_cmd_destroy_flow_table_mb_in *mb;
   5724 	struct mcx_cmd_destroy_flow_table_out *out;
   5725 	int error;
   5726 	int token;
   5727 
   5728 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5729 	token = mcx_cmdq_token(sc);
   5730 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
   5731 
   5732 	in = mcx_cmdq_in(cqe);
   5733 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
   5734 	in->cmd_op_mod = htobe16(0);
   5735 
   5736 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   5737 	    &cqe->cq_input_ptr, token) != 0) {
   5738 		printf("%s: unable to allocate destroy flow table mailbox\n",
   5739 		    DEVNAME(sc));
   5740 		return (-1);
   5741 	}
   5742 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   5743 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
   5744 	mb->cmd_table_id = htobe32(flow_table_id);
   5745 
   5746 	mcx_cmdq_mboxes_sign(&mxm, 1);
   5747 	mcx_cmdq_post(sc, cqe, 0);
   5748 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5749 	if (error != 0) {
   5750 		printf("%s: destroy flow table timeout\n", DEVNAME(sc));
   5751 		goto free;
   5752 	}
   5753 	if (mcx_cmdq_verify(cqe) != 0) {
   5754 		printf("%s: destroy flow table command corrupt\n",
   5755 		    DEVNAME(sc));
   5756 		goto free;
   5757 	}
   5758 
   5759 	out = mcx_cmdq_out(cqe);
   5760 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5761 		printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
   5762 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5763 		error = -1;
   5764 		goto free;
   5765 	}
   5766 
   5767 free:
   5768 	mcx_dmamem_free(sc, &mxm);
   5769 	return (error);
   5770 }
   5771 
   5772 
   5773 static int
   5774 mcx_create_flow_group(struct mcx_softc *sc, int flow_table_id, int group,
   5775     int start, int size, int match_enable, struct mcx_flow_match *match)
   5776 {
   5777 	struct mcx_cmdq_entry *cqe;
   5778 	struct mcx_dmamem mxm;
   5779 	struct mcx_cmd_create_flow_group_in *in;
   5780 	struct mcx_cmd_create_flow_group_mb_in *mbin;
   5781 	struct mcx_cmd_create_flow_group_out *out;
   5782 	struct mcx_flow_group *mfg;
   5783 	int error;
   5784 	int token;
   5785 
   5786 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5787 	token = mcx_cmdq_token(sc);
   5788 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
   5789 	    token);
   5790 
   5791 	in = mcx_cmdq_in(cqe);
   5792 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
   5793 	in->cmd_op_mod = htobe16(0);
   5794 
   5795 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
   5796 	    != 0) {
   5797 		printf("%s: unable to allocate create flow group mailbox\n",
   5798 		    DEVNAME(sc));
   5799 		return (-1);
   5800 	}
   5801 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   5802 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
   5803 	mbin->cmd_table_id = htobe32(flow_table_id);
   5804 	mbin->cmd_start_flow_index = htobe32(start);
   5805 	mbin->cmd_end_flow_index = htobe32(start + (size - 1));
   5806 
   5807 	mbin->cmd_match_criteria_enable = match_enable;
   5808 	memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
   5809 
   5810 	mcx_cmdq_mboxes_sign(&mxm, 2);
   5811 	mcx_cmdq_post(sc, cqe, 0);
   5812 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5813 	if (error != 0) {
   5814 		printf("%s: create flow group timeout\n", DEVNAME(sc));
   5815 		goto free;
   5816 	}
   5817 	if (mcx_cmdq_verify(cqe) != 0) {
   5818 		printf("%s: create flow group command corrupt\n", DEVNAME(sc));
   5819 		goto free;
   5820 	}
   5821 
   5822 	out = mcx_cmdq_out(cqe);
   5823 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5824 		printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
   5825 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5826 		error = -1;
   5827 		goto free;
   5828 	}
   5829 
   5830 	mfg = &sc->sc_flow_group[group];
   5831 	mfg->g_id = mcx_get_id(out->cmd_group_id);
   5832 	mfg->g_table = flow_table_id;
   5833 	mfg->g_start = start;
   5834 	mfg->g_size = size;
   5835 
   5836 free:
   5837 	mcx_dmamem_free(sc, &mxm);
   5838 	return (error);
   5839 }
   5840 
   5841 static int
   5842 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
   5843 {
   5844 	struct mcx_cmdq_entry *cqe;
   5845 	struct mcx_dmamem mxm;
   5846 	struct mcx_cmd_destroy_flow_group_in *in;
   5847 	struct mcx_cmd_destroy_flow_group_mb_in *mb;
   5848 	struct mcx_cmd_destroy_flow_group_out *out;
   5849 	struct mcx_flow_group *mfg;
   5850 	int error;
   5851 	int token;
   5852 
   5853 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5854 	token = mcx_cmdq_token(sc);
   5855 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
   5856 
   5857 	in = mcx_cmdq_in(cqe);
   5858 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
   5859 	in->cmd_op_mod = htobe16(0);
   5860 
   5861 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
   5862 	    &cqe->cq_input_ptr, token) != 0) {
   5863 		printf("%s: unable to allocate destroy flow group mailbox\n",
   5864 		    DEVNAME(sc));
   5865 		return (-1);
   5866 	}
   5867 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   5868 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
   5869 	mfg = &sc->sc_flow_group[group];
   5870 	mb->cmd_table_id = htobe32(mfg->g_table);
   5871 	mb->cmd_group_id = htobe32(mfg->g_id);
   5872 
   5873 	mcx_cmdq_mboxes_sign(&mxm, 2);
   5874 	mcx_cmdq_post(sc, cqe, 0);
   5875 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5876 	if (error != 0) {
   5877 		printf("%s: destroy flow group timeout\n", DEVNAME(sc));
   5878 		goto free;
   5879 	}
   5880 	if (mcx_cmdq_verify(cqe) != 0) {
   5881 		printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
   5882 		goto free;
   5883 	}
   5884 
   5885 	out = mcx_cmdq_out(cqe);
   5886 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5887 		printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
   5888 		    out->cmd_status, be32toh(out->cmd_syndrome));
   5889 		error = -1;
   5890 		goto free;
   5891 	}
   5892 
   5893 	mfg->g_id = -1;
   5894 	mfg->g_table = -1;
   5895 	mfg->g_size = 0;
   5896 	mfg->g_start = 0;
   5897 free:
   5898 	mcx_dmamem_free(sc, &mxm);
   5899 	return (error);
   5900 }
   5901 
   5902 static int
   5903 mcx_set_flow_table_entry_mac(struct mcx_softc *sc, int group, int index,
   5904     const uint8_t *macaddr, uint32_t dest)
   5905 {
   5906 	struct mcx_cmdq_entry *cqe;
   5907 	struct mcx_dmamem mxm;
   5908 	struct mcx_cmd_set_flow_table_entry_in *in;
   5909 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
   5910 	struct mcx_cmd_set_flow_table_entry_out *out;
   5911 	struct mcx_flow_group *mfg;
   5912 	uint32_t *pdest;
   5913 	int error;
   5914 	int token;
   5915 
   5916 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5917 	token = mcx_cmdq_token(sc);
   5918 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
   5919 	    sizeof(*out), token);
   5920 
   5921 	in = mcx_cmdq_in(cqe);
   5922 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
   5923 	in->cmd_op_mod = htobe16(0);
   5924 
   5925 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
   5926 	    != 0) {
   5927 		printf("%s: unable to allocate set flow table entry mailbox\n",
   5928 		    DEVNAME(sc));
   5929 		return (-1);
   5930 	}
   5931 
   5932 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   5933 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
   5934 
   5935 	mfg = &sc->sc_flow_group[group];
   5936 	mbin->cmd_table_id = htobe32(mfg->g_table);
   5937 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
   5938 	mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
   5939 
   5940 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
   5941 	pdest = (uint32_t *)
   5942 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
   5943 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
   5944 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
   5945 	*pdest = htobe32(dest);
   5946 
   5947 	/* the only thing we match on at the moment is the dest mac address */
   5948 	if (macaddr != NULL) {
   5949 		memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
   5950 		    ETHER_ADDR_LEN);
   5951 	}
   5952 
   5953 	mcx_cmdq_mboxes_sign(&mxm, 2);
   5954 	mcx_cmdq_post(sc, cqe, 0);
   5955 	error = mcx_cmdq_poll(sc, cqe, 1000);
   5956 	if (error != 0) {
   5957 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
   5958 		goto free;
   5959 	}
   5960 	if (mcx_cmdq_verify(cqe) != 0) {
   5961 		printf("%s: set flow table entry command corrupt\n",
   5962 		    DEVNAME(sc));
   5963 		goto free;
   5964 	}
   5965 
   5966 	out = mcx_cmdq_out(cqe);
   5967 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   5968 		printf("%s: set flow table entry failed (%x, %x)\n",
   5969 		    DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
   5970 		error = -1;
   5971 		goto free;
   5972 	}
   5973 
   5974 free:
   5975 	mcx_dmamem_free(sc, &mxm);
   5976 	return (error);
   5977 }
   5978 
   5979 static int
   5980 mcx_set_flow_table_entry_proto(struct mcx_softc *sc, int group, int index,
   5981     int ethertype, int ip_proto, uint32_t dest)
   5982 {
   5983 	struct mcx_cmdq_entry *cqe;
   5984 	struct mcx_dmamem mxm;
   5985 	struct mcx_cmd_set_flow_table_entry_in *in;
   5986 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
   5987 	struct mcx_cmd_set_flow_table_entry_out *out;
   5988 	struct mcx_flow_group *mfg;
   5989 	uint32_t *pdest;
   5990 	int error;
   5991 	int token;
   5992 
   5993 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   5994 	token = mcx_cmdq_token(sc);
   5995 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
   5996 	    sizeof(*out), token);
   5997 
   5998 	in = mcx_cmdq_in(cqe);
   5999 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
   6000 	in->cmd_op_mod = htobe16(0);
   6001 
   6002 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
   6003 	    != 0) {
   6004 		printf("%s: unable to allocate set flow table entry mailbox\n",
   6005 		    DEVNAME(sc));
   6006 		return (-1);
   6007 	}
   6008 
   6009 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   6010 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
   6011 
   6012 	mfg = &sc->sc_flow_group[group];
   6013 	mbin->cmd_table_id = htobe32(mfg->g_table);
   6014 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
   6015 	mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
   6016 
   6017 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
   6018 	pdest = (uint32_t *)
   6019 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
   6020 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
   6021 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
   6022 	*pdest = htobe32(dest);
   6023 
   6024 	mbin->cmd_flow_ctx.fc_match_value.mc_ethertype = htobe16(ethertype);
   6025 	mbin->cmd_flow_ctx.fc_match_value.mc_ip_proto = ip_proto;
   6026 
   6027 	mcx_cmdq_mboxes_sign(&mxm, 2);
   6028 	mcx_cmdq_post(sc, cqe, 0);
   6029 	error = mcx_cmdq_poll(sc, cqe, 1000);
   6030 	if (error != 0) {
   6031 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
   6032 		goto free;
   6033 	}
   6034 	if (mcx_cmdq_verify(cqe) != 0) {
   6035 		printf("%s: set flow table entry command corrupt\n",
   6036 		    DEVNAME(sc));
   6037 		goto free;
   6038 	}
   6039 
   6040 	out = mcx_cmdq_out(cqe);
   6041 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   6042 		printf("%s: set flow table entry failed (%x, %x)\n",
   6043 		    DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
   6044 		error = -1;
   6045 		goto free;
   6046 	}
   6047 
   6048 free:
   6049 	mcx_dmamem_free(sc, &mxm);
   6050 	return (error);
   6051 }
   6052 
   6053 static int
   6054 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
   6055 {
   6056 	struct mcx_cmdq_entry *cqe;
   6057 	struct mcx_dmamem mxm;
   6058 	struct mcx_cmd_delete_flow_table_entry_in *in;
   6059 	struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
   6060 	struct mcx_cmd_delete_flow_table_entry_out *out;
   6061 	struct mcx_flow_group *mfg;
   6062 	int error;
   6063 	int token;
   6064 
   6065 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   6066 	token = mcx_cmdq_token(sc);
   6067 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
   6068 	    token);
   6069 
   6070 	in = mcx_cmdq_in(cqe);
   6071 	in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
   6072 	in->cmd_op_mod = htobe16(0);
   6073 
   6074 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
   6075 	    &cqe->cq_input_ptr, token) != 0) {
   6076 		printf("%s: unable to allocate "
   6077 		    "delete flow table entry mailbox\n", DEVNAME(sc));
   6078 		return (-1);
   6079 	}
   6080 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   6081 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
   6082 
   6083 	mfg = &sc->sc_flow_group[group];
   6084 	mbin->cmd_table_id = htobe32(mfg->g_table);
   6085 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
   6086 
   6087 	mcx_cmdq_mboxes_sign(&mxm, 2);
   6088 	mcx_cmdq_post(sc, cqe, 0);
   6089 	error = mcx_cmdq_poll(sc, cqe, 1000);
   6090 	if (error != 0) {
   6091 		printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
   6092 		goto free;
   6093 	}
   6094 	if (mcx_cmdq_verify(cqe) != 0) {
   6095 		printf("%s: delete flow table entry command corrupt\n",
   6096 		    DEVNAME(sc));
   6097 		goto free;
   6098 	}
   6099 
   6100 	out = mcx_cmdq_out(cqe);
   6101 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   6102 		printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
   6103 		    DEVNAME(sc), group, index, out->cmd_status,
   6104 		    be32toh(out->cmd_syndrome));
   6105 		error = -1;
   6106 		goto free;
   6107 	}
   6108 
   6109 free:
   6110 	mcx_dmamem_free(sc, &mxm);
   6111 	return (error);
   6112 }
   6113 
   6114 #if 0
   6115 int
   6116 mcx_dump_flow_table(struct mcx_softc *sc, int flow_table_id)
   6117 {
   6118 	struct mcx_dmamem mxm;
   6119 	struct mcx_cmdq_entry *cqe;
   6120 	struct mcx_cmd_query_flow_table_in *in;
   6121 	struct mcx_cmd_query_flow_table_mb_in *mbin;
   6122 	struct mcx_cmd_query_flow_table_out *out;
   6123 	struct mcx_cmd_query_flow_table_mb_out *mbout;
   6124 	uint8_t token = mcx_cmdq_token(sc);
   6125 	int error;
   6126 	int i;
   6127 	uint8_t *dump;
   6128 
   6129 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   6130 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
   6131 	    sizeof(*out) + sizeof(*mbout) + 16, token);
   6132 
   6133 	in = mcx_cmdq_in(cqe);
   6134 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
   6135 	in->cmd_op_mod = htobe16(0);
   6136 
   6137 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
   6138 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
   6139 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
   6140 	    &cqe->cq_output_ptr, token) != 0) {
   6141 		printf(", unable to allocate query flow table mailboxes\n");
   6142 		return (-1);
   6143 	}
   6144 	cqe->cq_input_ptr = cqe->cq_output_ptr;
   6145 
   6146 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   6147 	mbin->cmd_table_type = 0;
   6148 	mbin->cmd_table_id = htobe32(flow_table_id);
   6149 
   6150 	mcx_cmdq_mboxes_sign(&mxm, 1);
   6151 
   6152 	mcx_cmdq_post(sc, cqe, 0);
   6153 	error = mcx_cmdq_poll(sc, cqe, 1000);
   6154 	if (error != 0) {
   6155 		printf("%s: query flow table timeout\n", DEVNAME(sc));
   6156 		goto free;
   6157 	}
   6158 	error = mcx_cmdq_verify(cqe);
   6159 	if (error != 0) {
   6160 		printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
   6161 		goto free;
   6162 	}
   6163 
   6164 	out = mcx_cmdq_out(cqe);
   6165 	switch (out->cmd_status) {
   6166 	case MCX_CQ_STATUS_OK:
   6167 		break;
   6168 	default:
   6169 		printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
   6170 		    out->cmd_status, be32toh(out->cmd_syndrome));
   6171 		error = -1;
   6172 		goto free;
   6173 	}
   6174 
   6175         mbout = (struct mcx_cmd_query_flow_table_mb_out *)
   6176 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
   6177 	dump = (uint8_t *)mbout + 8;
   6178 	for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
   6179 		printf("%.2x ", dump[i]);
   6180 		if (i % 16 == 15)
   6181 			printf("\n");
   6182 	}
   6183 free:
   6184 	mcx_cq_mboxes_free(sc, &mxm);
   6185 	return (error);
   6186 }
   6187 int
   6188 mcx_dump_flow_table_entry(struct mcx_softc *sc, int flow_table_id, int index)
   6189 {
   6190 	struct mcx_dmamem mxm;
   6191 	struct mcx_cmdq_entry *cqe;
   6192 	struct mcx_cmd_query_flow_table_entry_in *in;
   6193 	struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
   6194 	struct mcx_cmd_query_flow_table_entry_out *out;
   6195 	struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
   6196 	uint8_t token = mcx_cmdq_token(sc);
   6197 	int error;
   6198 	int i;
   6199 	uint8_t *dump;
   6200 
   6201 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   6202 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
   6203 	    sizeof(*out) + sizeof(*mbout) + 16, token);
   6204 
   6205 	in = mcx_cmdq_in(cqe);
   6206 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
   6207 	in->cmd_op_mod = htobe16(0);
   6208 
   6209 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
   6210 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
   6211 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
   6212 	    &cqe->cq_output_ptr, token) != 0) {
   6213 		printf(", unable to allocate "
   6214 		    "query flow table entry mailboxes\n");
   6215 		return (-1);
   6216 	}
   6217 	cqe->cq_input_ptr = cqe->cq_output_ptr;
   6218 
   6219 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   6220 	mbin->cmd_table_type = 0;
   6221 	mbin->cmd_table_id = htobe32(flow_table_id);
   6222 	mbin->cmd_flow_index = htobe32(index);
   6223 
   6224 	mcx_cmdq_mboxes_sign(&mxm, 1);
   6225 
   6226 	mcx_cmdq_post(sc, cqe, 0);
   6227 	error = mcx_cmdq_poll(sc, cqe, 1000);
   6228 	if (error != 0) {
   6229 		printf("%s: query flow table entry timeout\n", DEVNAME(sc));
   6230 		goto free;
   6231 	}
   6232 	error = mcx_cmdq_verify(cqe);
   6233 	if (error != 0) {
   6234 		printf("%s: query flow table entry reply corrupt\n",
   6235 		    DEVNAME(sc));
   6236 		goto free;
   6237 	}
   6238 
   6239 	out = mcx_cmdq_out(cqe);
   6240 	switch (out->cmd_status) {
   6241 	case MCX_CQ_STATUS_OK:
   6242 		break;
   6243 	default:
   6244 		printf("%s: query flow table entry failed (%x/%x)\n",
   6245 		    DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
   6246 		error = -1;
   6247 		goto free;
   6248 	}
   6249 
   6250         mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
   6251 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
   6252 	dump = (uint8_t *)mbout;
   6253 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
   6254 		printf("%.2x ", dump[i]);
   6255 		if (i % 16 == 15)
   6256 			printf("\n");
   6257 	}
   6258 
   6259 free:
   6260 	mcx_cq_mboxes_free(sc, &mxm);
   6261 	return (error);
   6262 }
   6263 
   6264 int
   6265 mcx_dump_flow_group(struct mcx_softc *sc, int flow_table_id)
   6266 {
   6267 	struct mcx_dmamem mxm;
   6268 	struct mcx_cmdq_entry *cqe;
   6269 	struct mcx_cmd_query_flow_group_in *in;
   6270 	struct mcx_cmd_query_flow_group_mb_in *mbin;
   6271 	struct mcx_cmd_query_flow_group_out *out;
   6272 	struct mcx_cmd_query_flow_group_mb_out *mbout;
   6273 	uint8_t token = mcx_cmdq_token(sc);
   6274 	int error;
   6275 	int i;
   6276 	uint8_t *dump;
   6277 
   6278 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   6279 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
   6280 	    sizeof(*out) + sizeof(*mbout) + 16, token);
   6281 
   6282 	in = mcx_cmdq_in(cqe);
   6283 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
   6284 	in->cmd_op_mod = htobe16(0);
   6285 
   6286 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
   6287 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
   6288 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
   6289 	    &cqe->cq_output_ptr, token) != 0) {
   6290 		printf(", unable to allocate query flow group mailboxes\n");
   6291 		return (-1);
   6292 	}
   6293 	cqe->cq_input_ptr = cqe->cq_output_ptr;
   6294 
   6295 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   6296 	mbin->cmd_table_type = 0;
   6297 	mbin->cmd_table_id = htobe32(flow_table_id);
   6298 	mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
   6299 
   6300 	mcx_cmdq_mboxes_sign(&mxm, 1);
   6301 
   6302 	mcx_cmdq_post(sc, cqe, 0);
   6303 	error = mcx_cmdq_poll(sc, cqe, 1000);
   6304 	if (error != 0) {
   6305 		printf("%s: query flow group timeout\n", DEVNAME(sc));
   6306 		goto free;
   6307 	}
   6308 	error = mcx_cmdq_verify(cqe);
   6309 	if (error != 0) {
   6310 		printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
   6311 		goto free;
   6312 	}
   6313 
   6314 	out = mcx_cmdq_out(cqe);
   6315 	switch (out->cmd_status) {
   6316 	case MCX_CQ_STATUS_OK:
   6317 		break;
   6318 	default:
   6319 		printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
   6320 		    out->cmd_status, be32toh(out->cmd_syndrome));
   6321 		error = -1;
   6322 		goto free;
   6323 	}
   6324 
   6325         mbout = (struct mcx_cmd_query_flow_group_mb_out *)
   6326 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
   6327 	dump = (uint8_t *)mbout;
   6328 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
   6329 		printf("%.2x ", dump[i]);
   6330 		if (i % 16 == 15)
   6331 			printf("\n");
   6332 	}
   6333 	dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
   6334 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
   6335 		printf("%.2x ", dump[i]);
   6336 		if (i % 16 == 15)
   6337 			printf("\n");
   6338 	}
   6339 
   6340 free:
   6341 	mcx_cq_mboxes_free(sc, &mxm);
   6342 	return (error);
   6343 }
   6344 
   6345 static int
   6346 mcx_dump_counters(struct mcx_softc *sc)
   6347 {
   6348 	struct mcx_dmamem mxm;
   6349 	struct mcx_cmdq_entry *cqe;
   6350 	struct mcx_cmd_query_vport_counters_in *in;
   6351 	struct mcx_cmd_query_vport_counters_mb_in *mbin;
   6352 	struct mcx_cmd_query_vport_counters_out *out;
   6353 	struct mcx_nic_vport_counters *counters;
   6354 	int error, token;
   6355 
   6356 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   6357 	token = mcx_cmdq_token(sc);
   6358 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
   6359 	    sizeof(*out) + sizeof(*counters), token);
   6360 
   6361 	in = mcx_cmdq_in(cqe);
   6362 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
   6363 	in->cmd_op_mod = htobe16(0);
   6364 
   6365 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   6366 	    &cqe->cq_output_ptr, token) != 0) {
   6367 		printf(", unable to allocate "
   6368 		    "query nic vport counters mailboxen\n");
   6369 		return (-1);
   6370 	}
   6371 	cqe->cq_input_ptr = cqe->cq_output_ptr;
   6372 
   6373 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   6374 	mbin->cmd_clear = 0x80;
   6375 
   6376 	mcx_cmdq_mboxes_sign(&mxm, 1);
   6377 	mcx_cmdq_post(sc, cqe, 0);
   6378 
   6379 	error = mcx_cmdq_poll(sc, cqe, 1000);
   6380 	if (error != 0) {
   6381 		printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
   6382 		goto free;
   6383 	}
   6384 	if (mcx_cmdq_verify(cqe) != 0) {
   6385 		printf("%s: query nic vport counters command corrupt\n",
   6386 		    DEVNAME(sc));
   6387 		goto free;
   6388 	}
   6389 
   6390 	out = mcx_cmdq_out(cqe);
   6391 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   6392 		printf("%s: query nic vport counters failed (%x, %x)\n",
   6393 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
   6394 		error = -1;
   6395 		goto free;
   6396 	}
   6397 
   6398 	counters = (struct mcx_nic_vport_counters *)
   6399 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
   6400 	if (counters->rx_bcast.packets + counters->tx_bcast.packets +
   6401 	    counters->rx_ucast.packets + counters->tx_ucast.packets +
   6402 	    counters->rx_err.packets + counters->tx_err.packets)
   6403 		printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
   6404 		    DEVNAME(sc),
   6405 		    betoh64(counters->tx_err.packets),
   6406 		    betoh64(counters->rx_err.packets),
   6407 		    betoh64(counters->tx_ucast.packets),
   6408 		    betoh64(counters->rx_ucast.packets),
   6409 		    betoh64(counters->tx_bcast.packets),
   6410 		    betoh64(counters->rx_bcast.packets));
   6411 free:
   6412 	mcx_dmamem_free(sc, &mxm);
   6413 
   6414 	return (error);
   6415 }
   6416 
   6417 static int
   6418 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
   6419 {
   6420 	struct mcx_dmamem mxm;
   6421 	struct mcx_cmdq_entry *cqe;
   6422 	struct mcx_cmd_query_flow_counter_in *in;
   6423 	struct mcx_cmd_query_flow_counter_mb_in *mbin;
   6424 	struct mcx_cmd_query_flow_counter_out *out;
   6425 	struct mcx_counter *counters;
   6426 	int error, token;
   6427 
   6428 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   6429 	token = mcx_cmdq_token(sc);
   6430 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
   6431 	    sizeof(*counters), token);
   6432 
   6433 	in = mcx_cmdq_in(cqe);
   6434 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
   6435 	in->cmd_op_mod = htobe16(0);
   6436 
   6437 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
   6438 	    &cqe->cq_output_ptr, token) != 0) {
   6439 		printf(", unable to allocate query flow counter mailboxen\n");
   6440 		return (-1);
   6441 	}
   6442 	cqe->cq_input_ptr = cqe->cq_output_ptr;
   6443 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
   6444 	mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
   6445 	mbin->cmd_clear = 0x80;
   6446 
   6447 	mcx_cmdq_mboxes_sign(&mxm, 1);
   6448 	mcx_cmdq_post(sc, cqe, 0);
   6449 
   6450 	error = mcx_cmdq_poll(sc, cqe, 1000);
   6451 	if (error != 0) {
   6452 		printf("%s: query flow counter timeout\n", DEVNAME(sc));
   6453 		goto free;
   6454 	}
   6455 	if (mcx_cmdq_verify(cqe) != 0) {
   6456 		printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
   6457 		goto free;
   6458 	}
   6459 
   6460 	out = mcx_cmdq_out(cqe);
   6461 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
   6462 		printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
   6463 		    out->cmd_status, betoh32(out->cmd_syndrome));
   6464 		error = -1;
   6465 		goto free;
   6466 	}
   6467 
   6468 	counters = (struct mcx_counter *)
   6469 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
   6470 	if (counters->packets)
   6471 		printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
   6472 		    betoh64(counters->packets));
   6473 free:
   6474 	mcx_dmamem_free(sc, &mxm);
   6475 
   6476 	return (error);
   6477 }
   6478 
   6479 #endif
   6480 
   6481 #if NKSTAT > 0
   6482 
   6483 int
   6484 mcx_query_rq(struct mcx_softc *sc, struct mcx_rx *rx, struct mcx_rq_ctx *rq_ctx)
   6485 {
   6486 	struct mcx_dmamem mxm;
   6487 	struct mcx_cmdq_entry *cqe;
   6488 	struct mcx_cmd_query_rq_in *in;
   6489 	struct mcx_cmd_query_rq_out *out;
   6490 	struct mcx_cmd_query_rq_mb_out *mbout;
   6491 	uint8_t token = mcx_cmdq_token(sc);
   6492 	int error;
   6493 
   6494 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   6495 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
   6496 	    token);
   6497 
   6498 	in = mcx_cmdq_in(cqe);
   6499 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
   6500 	in->cmd_op_mod = htobe16(0);
   6501 	in->cmd_rqn = htobe32(rx->rx_rqn);
   6502 
   6503 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
   6504 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
   6505 	    &cqe->cq_output_ptr, token) != 0) {
   6506 		printf("%s: unable to allocate query rq mailboxes\n", DEVNAME(sc));
   6507 		return (-1);
   6508 	}
   6509 
   6510 	mcx_cmdq_mboxes_sign(&mxm, 1);
   6511 
   6512 	mcx_cmdq_post(sc, cqe, 0);
   6513 	error = mcx_cmdq_poll(sc, cqe, 1000);
   6514 	if (error != 0) {
   6515 		printf("%s: query rq timeout\n", DEVNAME(sc));
   6516 		goto free;
   6517 	}
   6518 	error = mcx_cmdq_verify(cqe);
   6519 	if (error != 0) {
   6520 		printf("%s: query rq reply corrupt\n", DEVNAME(sc));
   6521 		goto free;
   6522 	}
   6523 
   6524 	out = mcx_cmdq_out(cqe);
   6525 	switch (out->cmd_status) {
   6526 	case MCX_CQ_STATUS_OK:
   6527 		break;
   6528 	default:
   6529 		printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
   6530 		    out->cmd_status, be32toh(out->cmd_syndrome));
   6531 		error = -1;
   6532 		goto free;
   6533 	}
   6534 
   6535         mbout = (struct mcx_cmd_query_rq_mb_out *)
   6536 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
   6537 	memcpy(rq_ctx, &mbout->cmd_ctx, sizeof(*rq_ctx));
   6538 
   6539 free:
   6540 	mcx_cq_mboxes_free(sc, &mxm);
   6541 	return (error);
   6542 }
   6543 
   6544 int
   6545 mcx_query_sq(struct mcx_softc *sc, struct mcx_tx *tx, struct mcx_sq_ctx *sq_ctx)
   6546 {
   6547 	struct mcx_dmamem mxm;
   6548 	struct mcx_cmdq_entry *cqe;
   6549 	struct mcx_cmd_query_sq_in *in;
   6550 	struct mcx_cmd_query_sq_out *out;
   6551 	struct mcx_cmd_query_sq_mb_out *mbout;
   6552 	uint8_t token = mcx_cmdq_token(sc);
   6553 	int error;
   6554 
   6555 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   6556 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
   6557 	    token);
   6558 
   6559 	in = mcx_cmdq_in(cqe);
   6560 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
   6561 	in->cmd_op_mod = htobe16(0);
   6562 	in->cmd_sqn = htobe32(tx->tx_sqn);
   6563 
   6564 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
   6565 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
   6566 	    &cqe->cq_output_ptr, token) != 0) {
   6567 		printf("%s: unable to allocate query sq mailboxes\n", DEVNAME(sc));
   6568 		return (-1);
   6569 	}
   6570 
   6571 	mcx_cmdq_mboxes_sign(&mxm, 1);
   6572 
   6573 	mcx_cmdq_post(sc, cqe, 0);
   6574 	error = mcx_cmdq_poll(sc, cqe, 1000);
   6575 	if (error != 0) {
   6576 		printf("%s: query sq timeout\n", DEVNAME(sc));
   6577 		goto free;
   6578 	}
   6579 	error = mcx_cmdq_verify(cqe);
   6580 	if (error != 0) {
   6581 		printf("%s: query sq reply corrupt\n", DEVNAME(sc));
   6582 		goto free;
   6583 	}
   6584 
   6585 	out = mcx_cmdq_out(cqe);
   6586 	switch (out->cmd_status) {
   6587 	case MCX_CQ_STATUS_OK:
   6588 		break;
   6589 	default:
   6590 		printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
   6591 		    out->cmd_status, be32toh(out->cmd_syndrome));
   6592 		error = -1;
   6593 		goto free;
   6594 	}
   6595 
   6596         mbout = (struct mcx_cmd_query_sq_mb_out *)
   6597 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
   6598 	memcpy(sq_ctx, &mbout->cmd_ctx, sizeof(*sq_ctx));
   6599 
   6600 free:
   6601 	mcx_cq_mboxes_free(sc, &mxm);
   6602 	return (error);
   6603 }
   6604 
   6605 int
   6606 mcx_query_cq(struct mcx_softc *sc, struct mcx_cq *cq, struct mcx_cq_ctx *cq_ctx)
   6607 {
   6608 	struct mcx_dmamem mxm;
   6609 	struct mcx_cmdq_entry *cqe;
   6610 	struct mcx_cmd_query_cq_in *in;
   6611 	struct mcx_cmd_query_cq_out *out;
   6612 	struct mcx_cq_ctx *ctx;
   6613 	uint8_t token = mcx_cmdq_token(sc);
   6614 	int error;
   6615 
   6616 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   6617 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
   6618 	    token);
   6619 
   6620 	in = mcx_cmdq_in(cqe);
   6621 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_CQ);
   6622 	in->cmd_op_mod = htobe16(0);
   6623 	in->cmd_cqn = htobe32(cq->cq_n);
   6624 
   6625 	CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
   6626 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
   6627 	    &cqe->cq_output_ptr, token) != 0) {
   6628 		printf("%s: unable to allocate query cq mailboxes\n",
   6629 		    DEVNAME(sc));
   6630 		return (-1);
   6631 	}
   6632 
   6633 	mcx_cmdq_mboxes_sign(&mxm, 1);
   6634 
   6635 	mcx_cmdq_post(sc, cqe, 0);
   6636 	error = mcx_cmdq_poll(sc, cqe, 1000);
   6637 	if (error != 0) {
   6638 		printf("%s: query cq timeout\n", DEVNAME(sc));
   6639 		goto free;
   6640 	}
   6641 	if (mcx_cmdq_verify(cqe) != 0) {
   6642 		printf("%s: query cq reply corrupt\n", DEVNAME(sc));
   6643 		goto free;
   6644 	}
   6645 
   6646 	out = mcx_cmdq_out(cqe);
   6647 	switch (out->cmd_status) {
   6648 	case MCX_CQ_STATUS_OK:
   6649 		break;
   6650 	default:
   6651 		printf("%s: query qc failed (%x/%x)\n", DEVNAME(sc),
   6652 		    out->cmd_status, be32toh(out->cmd_syndrome));
   6653 		error = -1;
   6654 		goto free;
   6655 	}
   6656 
   6657 	ctx = (struct mcx_cq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
   6658 	memcpy(cq_ctx, ctx, sizeof(*cq_ctx));
   6659 free:
   6660 	mcx_dmamem_free(sc, &mxm);
   6661 	return (error);
   6662 }
   6663 
   6664 int
   6665 mcx_query_eq(struct mcx_softc *sc, struct mcx_eq *eq, struct mcx_eq_ctx *eq_ctx)
   6666 {
   6667 	struct mcx_dmamem mxm;
   6668 	struct mcx_cmdq_entry *cqe;
   6669 	struct mcx_cmd_query_eq_in *in;
   6670 	struct mcx_cmd_query_eq_out *out;
   6671 	struct mcq_eq_ctx *ctx;
   6672 	uint8_t token = mcx_cmdq_token(sc);
   6673 	int error;
   6674 
   6675 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
   6676 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
   6677 	    token);
   6678 
   6679 	in = mcx_cmdq_in(cqe);
   6680 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_EQ);
   6681 	in->cmd_op_mod = htobe16(0);
   6682 	in->cmd_eqn = htobe32(eq->eq_n);
   6683 
   6684 	CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
   6685 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
   6686 	    &cqe->cq_output_ptr, token) != 0) {
   6687 		printf("%s: unable to allocate query eq mailboxes\n",
   6688 		    DEVNAME(sc));
   6689 		return (-1);
   6690 	}
   6691 
   6692 	mcx_cmdq_mboxes_sign(&mxm, 1);
   6693 
   6694 	mcx_cmdq_post(sc, cqe, 0);
   6695 	error = mcx_cmdq_poll(sc, cqe, 1000);
   6696 	if (error != 0) {
   6697 		printf("%s: query eq timeout\n", DEVNAME(sc));
   6698 		goto free;
   6699 	}
   6700 	if (mcx_cmdq_verify(cqe) != 0) {
   6701 		printf("%s: query eq reply corrupt\n", DEVNAME(sc));
   6702 		goto free;
   6703 	}
   6704 
   6705 	out = mcx_cmdq_out(cqe);
   6706 	switch (out->cmd_status) {
   6707 	case MCX_CQ_STATUS_OK:
   6708 		break;
   6709 	default:
   6710 		printf("%s: query eq failed (%x/%x)\n", DEVNAME(sc),
   6711 		    out->cmd_status, be32toh(out->cmd_syndrome));
   6712 		error = -1;
   6713 		goto free;
   6714 	}
   6715 
   6716 	ctx = (struct mcx_eq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
   6717 	memcpy(eq_ctx, ctx, sizeof(*eq_ctx));
   6718 free:
   6719 	mcx_dmamem_free(sc, &mxm);
   6720 	return (error);
   6721 }
   6722 
   6723 #endif /* NKSTAT > 0 */
   6724 
   6725 
   6726 static inline unsigned int
   6727 mcx_rx_fill_slots(struct mcx_softc *sc, struct mcx_rx *rx, uint nslots)
   6728 {
   6729 	struct mcx_rq_entry *ring, *rqe;
   6730 	struct mcx_slot *ms;
   6731 	struct mbuf *m;
   6732 	uint slot, p, fills;
   6733 
   6734 	ring = MCX_DMA_KVA(&rx->rx_rq_mem);
   6735 	p = rx->rx_prod;
   6736 
   6737 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
   6738 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
   6739 
   6740 	slot = (p % (1 << MCX_LOG_RQ_SIZE));
   6741 	rqe = ring;
   6742 	for (fills = 0; fills < nslots; fills++) {
   6743 		slot = p % (1 << MCX_LOG_RQ_SIZE);
   6744 
   6745 		ms = &rx->rx_slots[slot];
   6746 		rqe = &ring[slot];
   6747 
   6748 		m = NULL;
   6749 		MGETHDR(m, M_DONTWAIT, MT_DATA);
   6750 		if (m == NULL)
   6751 			break;
   6752 
   6753 		MCLGET(m, M_DONTWAIT);
   6754 		if ((m->m_flags & M_EXT) == 0) {
   6755 			m_freem(m);
   6756 			break;
   6757 		}
   6758 
   6759 		m->m_len = m->m_pkthdr.len = sc->sc_hardmtu;
   6760 		m_adj(m, m->m_ext.ext_size - sc->sc_rxbufsz);
   6761 		m_adj(m, ETHER_ALIGN);
   6762 
   6763 		if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
   6764 		    BUS_DMA_NOWAIT) != 0) {
   6765 			m_freem(m);
   6766 			break;
   6767 		}
   6768 		bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
   6769 		ms->ms_m = m;
   6770 
   6771 		be32enc(&rqe->rqe_byte_count, ms->ms_map->dm_segs[0].ds_len);
   6772 		be64enc(&rqe->rqe_addr, ms->ms_map->dm_segs[0].ds_addr);
   6773 		be32enc(&rqe->rqe_lkey, sc->sc_lkey);
   6774 
   6775 		p++;
   6776 	}
   6777 
   6778 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
   6779 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
   6780 
   6781 	rx->rx_prod = p;
   6782 
   6783 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   6784 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
   6785 	be32enc(MCX_DMA_OFF(&sc->sc_doorbell_mem, rx->rx_doorbell),
   6786 	    p & MCX_WQ_DOORBELL_MASK);
   6787 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   6788 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   6789 
   6790 	return (nslots - fills);
   6791 }
   6792 
   6793 static int
   6794 mcx_rx_fill(struct mcx_softc *sc, struct mcx_rx *rx)
   6795 {
   6796 	u_int slots;
   6797 
   6798 	slots = mcx_rxr_get(&rx->rx_rxr, (1 << MCX_LOG_RQ_SIZE));
   6799 	if (slots == 0)
   6800 		return (1);
   6801 
   6802 	slots = mcx_rx_fill_slots(sc, rx, slots);
   6803 	mcx_rxr_put(&rx->rx_rxr, slots);
   6804 	return (0);
   6805 }
   6806 
   6807 void
   6808 mcx_refill(void *xrx)
   6809 {
   6810 	struct mcx_rx *rx = xrx;
   6811 	struct mcx_softc *sc = rx->rx_softc;
   6812 
   6813 	mcx_rx_fill(sc, rx);
   6814 
   6815 	if (mcx_rxr_inuse(&rx->rx_rxr) == 0)
   6816 		callout_schedule(&rx->rx_refill, 1);
   6817 }
   6818 
   6819 static int
   6820 mcx_process_txeof(struct mcx_softc *sc, struct mcx_tx *tx,
   6821     struct mcx_cq_entry *cqe)
   6822 {
   6823 	struct mcx_slot *ms;
   6824 	bus_dmamap_t map;
   6825 	int slot, slots;
   6826 
   6827 	slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
   6828 
   6829 	ms = &tx->tx_slots[slot];
   6830 	map = ms->ms_map;
   6831 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   6832 	    BUS_DMASYNC_POSTWRITE);
   6833 
   6834 	slots = 1;
   6835 	if (map->dm_nsegs > 1)
   6836 		slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
   6837 
   6838 	bus_dmamap_unload(sc->sc_dmat, map);
   6839 	m_freem(ms->ms_m);
   6840 	ms->ms_m = NULL;
   6841 
   6842 	return (slots);
   6843 }
   6844 
   6845 static uint64_t
   6846 mcx_uptime(void)
   6847 {
   6848 	struct timespec ts;
   6849 
   6850 	nanouptime(&ts);
   6851 
   6852 	return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
   6853 }
   6854 
   6855 static void
   6856 mcx_calibrate_first(struct mcx_softc *sc)
   6857 {
   6858 	struct mcx_calibration *c = &sc->sc_calibration[0];
   6859 	int s;
   6860 
   6861 	sc->sc_calibration_gen = 0;
   6862 
   6863 	s = splhigh(); /* crit_enter? */
   6864 	c->c_ubase = mcx_uptime();
   6865 	c->c_tbase = mcx_timer(sc);
   6866 	splx(s);
   6867 	c->c_ratio = 0;
   6868 
   6869 #if notyet
   6870 	callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz);
   6871 #endif
   6872 }
   6873 
   6874 #define MCX_TIMESTAMP_SHIFT 24
   6875 
   6876 static void
   6877 mcx_calibrate(void *arg)
   6878 {
   6879 	struct mcx_softc *sc = arg;
   6880 	struct mcx_calibration *nc, *pc;
   6881 	uint64_t udiff, tdiff;
   6882 	unsigned int gen;
   6883 	int s;
   6884 
   6885 	if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING))
   6886 		return;
   6887 
   6888 	callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz);
   6889 
   6890 	gen = sc->sc_calibration_gen;
   6891 	pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
   6892 	gen++;
   6893 	nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
   6894 
   6895 	nc->c_uptime = pc->c_ubase;
   6896 	nc->c_timestamp = pc->c_tbase;
   6897 
   6898 	s = splhigh(); /* crit_enter? */
   6899 	nc->c_ubase = mcx_uptime();
   6900 	nc->c_tbase = mcx_timer(sc);
   6901 	splx(s);
   6902 
   6903 	udiff = nc->c_ubase - nc->c_uptime;
   6904 	tdiff = nc->c_tbase - nc->c_timestamp;
   6905 
   6906 	/*
   6907 	 * udiff is the wall clock time between calibration ticks,
   6908 	 * which should be 32 seconds or 32 billion nanoseconds. if
   6909 	 * we squint, 1 billion nanoseconds is kind of like a 32 bit
   6910 	 * number, so 32 billion should still have a lot of high bits
   6911 	 * spare. we use this space by shifting the nanoseconds up
   6912 	 * 24 bits so we have a nice big number to divide by the
   6913 	 * number of mcx timer ticks.
   6914 	 */
   6915 	nc->c_ratio = (udiff << MCX_TIMESTAMP_SHIFT) / tdiff;
   6916 
   6917 	membar_producer();
   6918 	sc->sc_calibration_gen = gen;
   6919 }
   6920 
   6921 static int
   6922 mcx_process_rx(struct mcx_softc *sc, struct mcx_rx *rx,
   6923     struct mcx_cq_entry *cqe, struct mcx_mbufq *mq,
   6924     const struct mcx_calibration *c)
   6925 {
   6926 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   6927 	struct mcx_slot *ms;
   6928 	struct mbuf *m;
   6929 	uint32_t flags, len;
   6930 	int slot;
   6931 
   6932 	len = be32dec(&cqe->cq_byte_cnt);
   6933 	slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
   6934 
   6935 	ms = &rx->rx_slots[slot];
   6936 	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, len, BUS_DMASYNC_POSTREAD);
   6937 	bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
   6938 
   6939 	m = ms->ms_m;
   6940 	ms->ms_m = NULL;
   6941 
   6942 	m_set_rcvif(m, &sc->sc_ec.ec_if);
   6943 	m->m_pkthdr.len = m->m_len = len;
   6944 
   6945 #if 0
   6946 	if (cqe->cq_rx_hash_type) {
   6947 		m->m_pkthdr.ph_flowid = be32toh(cqe->cq_rx_hash);
   6948 		m->m_pkthdr.csum_flags |= M_FLOWID;
   6949 	}
   6950 #endif
   6951 
   6952 	flags = be32dec(&cqe->cq_flags);
   6953 	if (flags & MCX_CQ_ENTRY_FLAGS_L3_OK) {
   6954 		if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6955 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   6956 	}
   6957 	if (flags & MCX_CQ_ENTRY_FLAGS_L4_OK) {
   6958 		if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx)
   6959 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
   6960 		if (ifp->if_capenable & IFCAP_CSUM_TCPv6_Rx)
   6961 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv6;
   6962 		if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx)
   6963 			m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
   6964 		if (ifp->if_capenable & IFCAP_CSUM_UDPv6_Rx)
   6965 			m->m_pkthdr.csum_flags |= M_CSUM_UDPv6;
   6966 	}
   6967 	if (flags & MCX_CQ_ENTRY_FLAGS_CV) {
   6968 		vlan_set_tag(m, flags & MCX_CQ_ENTRY_FLAGS_VLAN_MASK);
   6969 	}
   6970 
   6971 #if notyet
   6972 	if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_LINK0) && c->c_ratio) {
   6973 		uint64_t t = be64dec(&cqe->cq_timestamp);
   6974 		t -= c->c_timestamp;
   6975 		t *= c->c_ratio;
   6976 		t >>= MCX_TIMESTAMP_SHIFT;
   6977 		t += c->c_uptime;
   6978 
   6979 		m->m_pkthdr.ph_timestamp = t;
   6980 		SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
   6981 	}
   6982 #endif
   6983 
   6984 	MBUFQ_ENQUEUE(mq, m);
   6985 
   6986 	return (1);
   6987 }
   6988 
   6989 static struct mcx_cq_entry *
   6990 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
   6991 {
   6992 	struct mcx_cq_entry *cqe;
   6993 	int next;
   6994 
   6995 	cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
   6996 	next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
   6997 
   6998 	if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
   6999 	    ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
   7000 		return (&cqe[next]);
   7001 	}
   7002 
   7003 	return (NULL);
   7004 }
   7005 
   7006 static void
   7007 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar)
   7008 {
   7009 	struct mcx_cq_doorbell *db;
   7010 	bus_size_t offset;
   7011 	uint32_t val;
   7012 	uint64_t uval;
   7013 
   7014 	val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
   7015 	val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
   7016 
   7017 	db = MCX_DMA_OFF(&sc->sc_doorbell_mem, cq->cq_doorbell);
   7018 
   7019 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   7020 	    cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_POSTWRITE);
   7021 
   7022 	be32enc(&db->db_update_ci, cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
   7023 	be32enc(&db->db_arm_ci, val);
   7024 
   7025 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   7026 	    cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_PREWRITE);
   7027 
   7028 	offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_CQ_DOORBELL;
   7029 
   7030 	uval = (uint64_t)val << 32;
   7031 	uval |= cq->cq_n;
   7032 
   7033 	bus_space_write_8(sc->sc_memt, sc->sc_memh, offset, htobe64(uval));
   7034 	mcx_bar(sc, offset, sizeof(uval), BUS_SPACE_BARRIER_WRITE);
   7035 }
   7036 
   7037 void
   7038 mcx_process_cq(struct mcx_softc *sc, struct mcx_queues *q, struct mcx_cq *cq)
   7039 {
   7040 	struct mcx_rx *rx = &q->q_rx;
   7041 	struct mcx_tx *tx = &q->q_tx;
   7042 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   7043 	const struct mcx_calibration *c;
   7044 	unsigned int gen;
   7045 	struct mcx_cq_entry *cqe;
   7046 	struct mcx_mbufq mq;
   7047 	struct mbuf *m;
   7048 	int rxfree, txfree;
   7049 
   7050 	MBUFQ_INIT(&mq);
   7051 
   7052 	gen = sc->sc_calibration_gen;
   7053 	membar_consumer();
   7054 	c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
   7055 
   7056 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
   7057 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
   7058 
   7059 	rxfree = 0;
   7060 	txfree = 0;
   7061 	while ((cqe = mcx_next_cq_entry(sc, cq))) {
   7062 		uint8_t opcode;
   7063 		opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
   7064 		switch (opcode) {
   7065 		case MCX_CQ_ENTRY_OPCODE_REQ:
   7066 			txfree += mcx_process_txeof(sc, tx, cqe);
   7067 			break;
   7068 		case MCX_CQ_ENTRY_OPCODE_SEND:
   7069 			rxfree += mcx_process_rx(sc, rx, cqe, &mq, c);
   7070 			break;
   7071 		case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
   7072 		case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
   7073 			/* uint8_t *cqp = (uint8_t *)cqe; */
   7074 			/* printf("%s: cq completion error: %x\n",
   7075 			    DEVNAME(sc), cqp[0x37]); */
   7076 			break;
   7077 
   7078 		default:
   7079 			/* printf("%s: cq completion opcode %x??\n",
   7080 			    DEVNAME(sc), opcode); */
   7081 			break;
   7082 		}
   7083 
   7084 		cq->cq_cons++;
   7085 	}
   7086 
   7087 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
   7088 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
   7089 
   7090 	if (rxfree > 0) {
   7091 		mcx_rxr_put(&rx->rx_rxr, rxfree);
   7092 		while (MBUFQ_FIRST(&mq) != NULL) {
   7093 			MBUFQ_DEQUEUE(&mq, m);
   7094 			if_percpuq_enqueue(ifp->if_percpuq, m);
   7095 		}
   7096 
   7097 		mcx_rx_fill(sc, rx);
   7098 		if (mcx_rxr_inuse(&rx->rx_rxr) == 0)
   7099 			callout_schedule(&rx->rx_refill, 1);
   7100 	}
   7101 
   7102 	cq->cq_count++;
   7103 	mcx_arm_cq(sc, cq, q->q_uar);
   7104 
   7105 	if (txfree > 0) {
   7106 		tx->tx_cons += txfree;
   7107 		if_schedule_deferred_start(ifp);
   7108 	}
   7109 }
   7110 
   7111 
   7112 static void
   7113 mcx_arm_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar)
   7114 {
   7115 	bus_size_t offset;
   7116 	uint32_t val;
   7117 
   7118 	offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_EQ_DOORBELL_ARM;
   7119 	val = (eq->eq_n << 24) | (eq->eq_cons & 0xffffff);
   7120 
   7121 	mcx_wr(sc, offset, val);
   7122 	mcx_bar(sc, offset, sizeof(val), BUS_SPACE_BARRIER_WRITE);
   7123 }
   7124 
   7125 static struct mcx_eq_entry *
   7126 mcx_next_eq_entry(struct mcx_softc *sc, struct mcx_eq *eq)
   7127 {
   7128 	struct mcx_eq_entry *eqe;
   7129 	int next;
   7130 
   7131 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
   7132 	next = eq->eq_cons % (1 << MCX_LOG_EQ_SIZE);
   7133 	if ((eqe[next].eq_owner & 1) ==
   7134 	    ((eq->eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
   7135 		eq->eq_cons++;
   7136 		return (&eqe[next]);
   7137 	}
   7138 	return (NULL);
   7139 }
   7140 
   7141 int
   7142 mcx_admin_intr(void *xsc)
   7143 {
   7144 	struct mcx_softc *sc = (struct mcx_softc *)xsc;
   7145 	struct mcx_eq *eq = &sc->sc_admin_eq;
   7146 	struct mcx_eq_entry *eqe;
   7147 
   7148 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
   7149 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
   7150 
   7151 	while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
   7152 		switch (eqe->eq_event_type) {
   7153 		case MCX_EVENT_TYPE_LAST_WQE:
   7154 			/* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
   7155 			break;
   7156 
   7157 		case MCX_EVENT_TYPE_CQ_ERROR:
   7158 			/* printf("%s: cq error\n", DEVNAME(sc)); */
   7159 			break;
   7160 
   7161 		case MCX_EVENT_TYPE_CMD_COMPLETION:
   7162 			/* wakeup probably */
   7163 			break;
   7164 
   7165 		case MCX_EVENT_TYPE_PORT_CHANGE:
   7166 			workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL);
   7167 			break;
   7168 
   7169 		default:
   7170 			/* printf("%s: something happened\n", DEVNAME(sc)); */
   7171 			break;
   7172 		}
   7173 	}
   7174 
   7175 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
   7176 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
   7177 
   7178 	mcx_arm_eq(sc, eq, sc->sc_uar);
   7179 
   7180 	return (1);
   7181 }
   7182 
   7183 int
   7184 mcx_cq_intr(void *xq)
   7185 {
   7186 	struct mcx_queues *q = (struct mcx_queues *)xq;
   7187 	struct mcx_softc *sc = q->q_sc;
   7188 	struct mcx_eq *eq = &q->q_eq;
   7189 	struct mcx_eq_entry *eqe;
   7190 	int cqn;
   7191 
   7192 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
   7193 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
   7194 
   7195 	while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
   7196 		switch (eqe->eq_event_type) {
   7197 		case MCX_EVENT_TYPE_COMPLETION:
   7198 			cqn = be32toh(eqe->eq_event_data[6]);
   7199 			if (cqn == q->q_cq.cq_n)
   7200 				mcx_process_cq(sc, q, &q->q_cq);
   7201 			break;
   7202 		}
   7203 	}
   7204 
   7205 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
   7206 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
   7207 
   7208 	mcx_arm_eq(sc, eq, q->q_uar);
   7209 
   7210 	return (1);
   7211 }
   7212 
   7213 static void
   7214 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
   7215     int total)
   7216 {
   7217 	struct mcx_slot *ms;
   7218 
   7219 	int i = allocated;
   7220 	while (i-- > 0) {
   7221 		ms = &slots[i];
   7222 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
   7223 		if (ms->ms_m != NULL)
   7224 			m_freem(ms->ms_m);
   7225 	}
   7226 	kmem_free(slots, total * sizeof(*ms));
   7227 }
   7228 
   7229 static int
   7230 mcx_queue_up(struct mcx_softc *sc, struct mcx_queues *q)
   7231 {
   7232 	struct mcx_rx *rx;
   7233 	struct mcx_tx *tx;
   7234 	struct mcx_slot *ms;
   7235 	int i;
   7236 
   7237 	rx = &q->q_rx;
   7238 	rx->rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE),
   7239 	    KM_SLEEP);
   7240 
   7241 	for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
   7242 		ms = &rx->rx_slots[i];
   7243 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
   7244 		    sc->sc_hardmtu, 0,
   7245 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
   7246 		    &ms->ms_map) != 0) {
   7247 			printf("%s: failed to allocate rx dma maps\n",
   7248 			    DEVNAME(sc));
   7249 			goto destroy_rx_slots;
   7250 		}
   7251 	}
   7252 
   7253 	tx = &q->q_tx;
   7254 	tx->tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE),
   7255 	     KM_SLEEP);
   7256 
   7257 	for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
   7258 		ms = &tx->tx_slots[i];
   7259 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
   7260 		    MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
   7261 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
   7262 		    &ms->ms_map) != 0) {
   7263 			printf("%s: failed to allocate tx dma maps\n",
   7264 			    DEVNAME(sc));
   7265 			goto destroy_tx_slots;
   7266 		}
   7267 	}
   7268 
   7269 	if (mcx_create_cq(sc, &q->q_cq, q->q_uar, q->q_index,
   7270 	    q->q_eq.eq_n) != 0)
   7271 		goto destroy_tx_slots;
   7272 
   7273 	if (mcx_create_sq(sc, tx, q->q_uar, q->q_index, q->q_cq.cq_n)
   7274 	    != 0)
   7275 		goto destroy_cq;
   7276 
   7277 	if (mcx_create_rq(sc, rx, q->q_index, q->q_cq.cq_n) != 0)
   7278 		goto destroy_sq;
   7279 
   7280 	return 0;
   7281 
   7282 destroy_sq:
   7283 	mcx_destroy_sq(sc, tx);
   7284 destroy_cq:
   7285 	mcx_destroy_cq(sc, &q->q_cq);
   7286 destroy_tx_slots:
   7287 	mcx_free_slots(sc, tx->tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
   7288 	tx->tx_slots = NULL;
   7289 
   7290 	i = (1 << MCX_LOG_RQ_SIZE);
   7291 destroy_rx_slots:
   7292 	mcx_free_slots(sc, rx->rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
   7293 	rx->rx_slots = NULL;
   7294 	return ENOMEM;
   7295 }
   7296 
   7297 static int
   7298 mcx_rss_group_entry_count(struct mcx_softc *sc, int group)
   7299 {
   7300 	int i;
   7301 	int count;
   7302 
   7303 	count = 0;
   7304 	for (i = 0; i < __arraycount(mcx_rss_config); i++) {
   7305 		if (mcx_rss_config[i].flow_group == group)
   7306 			count++;
   7307 	}
   7308 
   7309 	return count;
   7310 }
   7311 
   7312 static int
   7313 mcx_init(struct ifnet *ifp)
   7314 {
   7315 	struct mcx_softc *sc = ifp->if_softc;
   7316 	struct mcx_rx *rx;
   7317 	struct mcx_tx *tx;
   7318 	int i, start, count, flow_group, flow_index;
   7319 	struct mcx_flow_match match_crit;
   7320 	struct mcx_rss_rule *rss;
   7321 	uint32_t dest;
   7322 	int rqns[MCX_MAX_QUEUES] = { 0 };
   7323 
   7324 	if (ISSET(ifp->if_flags, IFF_RUNNING))
   7325 		mcx_stop(ifp, 0);
   7326 
   7327 	if (mcx_create_tis(sc, &sc->sc_tis) != 0)
   7328 		goto down;
   7329 
   7330 	for (i = 0; i < sc->sc_nqueues; i++) {
   7331 		if (mcx_queue_up(sc, &sc->sc_queues[i]) != 0) {
   7332 			goto down;
   7333 		}
   7334 	}
   7335 
   7336 	/* RSS flow table and flow groups */
   7337 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 1,
   7338 	    &sc->sc_rss_flow_table_id) != 0)
   7339 		goto down;
   7340 
   7341 	dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
   7342 	    sc->sc_rss_flow_table_id;
   7343 
   7344 	/* L4 RSS flow group (v4/v6 tcp/udp, no fragments) */
   7345 	memset(&match_crit, 0, sizeof(match_crit));
   7346 	match_crit.mc_ethertype = 0xffff;
   7347 	match_crit.mc_ip_proto = 0xff;
   7348 	match_crit.mc_vlan_flags = MCX_FLOW_MATCH_IP_FRAG;
   7349 	start = 0;
   7350 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L4);
   7351 	if (count != 0) {
   7352 		if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
   7353 		    MCX_FLOW_GROUP_RSS_L4, start, count,
   7354 		    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
   7355 			goto down;
   7356 		start += count;
   7357 	}
   7358 
   7359 	/* L3 RSS flow group (v4/v6, including fragments) */
   7360 	memset(&match_crit, 0, sizeof(match_crit));
   7361 	match_crit.mc_ethertype = 0xffff;
   7362 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L3);
   7363 	if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
   7364 	    MCX_FLOW_GROUP_RSS_L3, start, count,
   7365 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
   7366 		goto down;
   7367 	start += count;
   7368 
   7369 	/* non-RSS flow group */
   7370 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_NONE);
   7371 	memset(&match_crit, 0, sizeof(match_crit));
   7372 	if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
   7373 	    MCX_FLOW_GROUP_RSS_NONE, start, count, 0, &match_crit) != 0)
   7374 		goto down;
   7375 
   7376 	/* Root flow table, matching packets based on mac address */
   7377 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 0,
   7378 	    &sc->sc_mac_flow_table_id) != 0)
   7379 		goto down;
   7380 
   7381 	/* promisc flow group */
   7382 	start = 0;
   7383 	memset(&match_crit, 0, sizeof(match_crit));
   7384 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
   7385 	    MCX_FLOW_GROUP_PROMISC, start, 1, 0, &match_crit) != 0)
   7386 		goto down;
   7387 	sc->sc_promisc_flow_enabled = 0;
   7388 	start++;
   7389 
   7390 	/* all multicast flow group */
   7391 	match_crit.mc_dest_mac[0] = 0x01;
   7392 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
   7393 	    MCX_FLOW_GROUP_ALLMULTI, start, 1,
   7394 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
   7395 		goto down;
   7396 	sc->sc_allmulti_flow_enabled = 0;
   7397 	start++;
   7398 
   7399 	/* mac address matching flow group */
   7400 	memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
   7401 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
   7402 	    MCX_FLOW_GROUP_MAC, start, (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
   7403 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
   7404 		goto down;
   7405 
   7406 	/* flow table entries for unicast and broadcast */
   7407 	start = 0;
   7408 	if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
   7409 	    LLADDR(satosdl(ifp->if_dl->ifa_addr)), dest) != 0)
   7410 		goto down;
   7411 	start++;
   7412 
   7413 	if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
   7414 	    etherbroadcastaddr, dest) != 0)
   7415 		goto down;
   7416 	start++;
   7417 
   7418 	/* multicast entries go after that */
   7419 	sc->sc_mcast_flow_base = start;
   7420 
   7421 	/* re-add any existing multicast flows */
   7422 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
   7423 		if (sc->sc_mcast_flows[i][0] != 0) {
   7424 			mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC,
   7425 			    sc->sc_mcast_flow_base + i,
   7426 			    sc->sc_mcast_flows[i], dest);
   7427 		}
   7428 	}
   7429 
   7430 	if (mcx_set_flow_table_root(sc, sc->sc_mac_flow_table_id) != 0)
   7431 		goto down;
   7432 
   7433 	/*
   7434 	 * the RQT can be any size as long as it's a power of two.
   7435 	 * since we also restrict the number of queues to a power of two,
   7436 	 * we can just put each rx queue in once.
   7437 	 */
   7438 	for (i = 0; i < sc->sc_nqueues; i++)
   7439 		rqns[i] = sc->sc_queues[i].q_rx.rx_rqn;
   7440 
   7441 	if (mcx_create_rqt(sc, sc->sc_nqueues, rqns, &sc->sc_rqt) != 0)
   7442 		goto down;
   7443 
   7444 	start = 0;
   7445 	flow_index = 0;
   7446 	flow_group = -1;
   7447 	for (i = 0; i < __arraycount(mcx_rss_config); i++) {
   7448 		rss = &mcx_rss_config[i];
   7449 		if (rss->flow_group != flow_group) {
   7450 			flow_group = rss->flow_group;
   7451 			flow_index = 0;
   7452 		}
   7453 
   7454 		if (rss->hash_sel == 0) {
   7455 			if (mcx_create_tir_direct(sc, &sc->sc_queues[0].q_rx,
   7456 			    &sc->sc_tir[i]) != 0)
   7457 				goto down;
   7458 		} else {
   7459 			if (mcx_create_tir_indirect(sc, sc->sc_rqt,
   7460 			    rss->hash_sel, &sc->sc_tir[i]) != 0)
   7461 				goto down;
   7462 		}
   7463 
   7464 		if (mcx_set_flow_table_entry_proto(sc, flow_group,
   7465 		    flow_index, rss->ethertype, rss->ip_proto,
   7466 		    MCX_FLOW_CONTEXT_DEST_TYPE_TIR | sc->sc_tir[i]) != 0)
   7467 			goto down;
   7468 		flow_index++;
   7469 	}
   7470 
   7471 	for (i = 0; i < sc->sc_nqueues; i++) {
   7472 		struct mcx_queues *q = &sc->sc_queues[i];
   7473 		rx = &q->q_rx;
   7474 		tx = &q->q_tx;
   7475 
   7476 		/* start the queues */
   7477 		if (mcx_ready_sq(sc, tx) != 0)
   7478 			goto down;
   7479 
   7480 		if (mcx_ready_rq(sc, rx) != 0)
   7481 			goto down;
   7482 
   7483 		mcx_rxr_init(&rx->rx_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
   7484 		rx->rx_prod = 0;
   7485 		mcx_rx_fill(sc, rx);
   7486 
   7487 		tx->tx_cons = 0;
   7488 		tx->tx_prod = 0;
   7489 	}
   7490 
   7491 	mcx_calibrate_first(sc);
   7492 
   7493 	SET(ifp->if_flags, IFF_RUNNING);
   7494 	CLR(ifp->if_flags, IFF_OACTIVE);
   7495 	if_schedule_deferred_start(ifp);
   7496 
   7497 	return 0;
   7498 down:
   7499 	mcx_stop(ifp, 0);
   7500 	return EIO;
   7501 }
   7502 
   7503 static void
   7504 mcx_stop(struct ifnet *ifp, int disable)
   7505 {
   7506 	struct mcx_softc *sc = ifp->if_softc;
   7507 	struct mcx_rss_rule *rss;
   7508 	int group, i, flow_group, flow_index;
   7509 
   7510 	CLR(ifp->if_flags, IFF_RUNNING);
   7511 
   7512 	/*
   7513 	 * delete flow table entries first, so no packets can arrive
   7514 	 * after the barriers
   7515 	 */
   7516 	if (sc->sc_promisc_flow_enabled)
   7517 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
   7518 	if (sc->sc_allmulti_flow_enabled)
   7519 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
   7520 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
   7521 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
   7522 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
   7523 		if (sc->sc_mcast_flows[i][0] != 0) {
   7524 			mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
   7525 			    sc->sc_mcast_flow_base + i);
   7526 		}
   7527 	}
   7528 
   7529 	flow_group = -1;
   7530 	flow_index = 0;
   7531 	for (i = 0; i < __arraycount(mcx_rss_config); i++) {
   7532 		rss = &mcx_rss_config[i];
   7533 		if (rss->flow_group != flow_group) {
   7534 			flow_group = rss->flow_group;
   7535 			flow_index = 0;
   7536 		}
   7537 
   7538 		mcx_delete_flow_table_entry(sc, flow_group, flow_index);
   7539 
   7540 		mcx_destroy_tir(sc, sc->sc_tir[i]);
   7541 		sc->sc_tir[i] = 0;
   7542 
   7543 		flow_index++;
   7544 	}
   7545 
   7546 	for (i = 0; i < sc->sc_nqueues; i++) {
   7547 		callout_halt(&sc->sc_queues[i].q_rx.rx_refill, NULL);
   7548 	}
   7549 
   7550 	callout_halt(&sc->sc_calibrate, NULL);
   7551 
   7552 	for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
   7553 		if (sc->sc_flow_group[group].g_id != -1)
   7554 			mcx_destroy_flow_group(sc, group);
   7555 	}
   7556 
   7557 	if (sc->sc_mac_flow_table_id != -1) {
   7558 		mcx_destroy_flow_table(sc, sc->sc_mac_flow_table_id);
   7559 		sc->sc_mac_flow_table_id = -1;
   7560 	}
   7561 	if (sc->sc_rss_flow_table_id != -1) {
   7562 		mcx_destroy_flow_table(sc, sc->sc_rss_flow_table_id);
   7563 		sc->sc_rss_flow_table_id = -1;
   7564 	}
   7565 	if (sc->sc_rqt != -1) {
   7566 		mcx_destroy_rqt(sc, sc->sc_rqt);
   7567 		sc->sc_rqt = -1;
   7568 	}
   7569 
   7570 	for (i = 0; i < sc->sc_nqueues; i++) {
   7571 		struct mcx_queues *q = &sc->sc_queues[i];
   7572 		struct mcx_rx *rx = &q->q_rx;
   7573 		struct mcx_tx *tx = &q->q_tx;
   7574 		struct mcx_cq *cq = &q->q_cq;
   7575 
   7576 		if (rx->rx_rqn != 0)
   7577 			mcx_destroy_rq(sc, rx);
   7578 
   7579 		if (tx->tx_sqn != 0)
   7580 			mcx_destroy_sq(sc, tx);
   7581 
   7582 		if (tx->tx_slots != NULL) {
   7583 			mcx_free_slots(sc, tx->tx_slots,
   7584 			    (1 << MCX_LOG_SQ_SIZE), (1 << MCX_LOG_SQ_SIZE));
   7585 			tx->tx_slots = NULL;
   7586 		}
   7587 		if (rx->rx_slots != NULL) {
   7588 			mcx_free_slots(sc, rx->rx_slots,
   7589 			    (1 << MCX_LOG_RQ_SIZE), (1 << MCX_LOG_RQ_SIZE));
   7590 			rx->rx_slots = NULL;
   7591 		}
   7592 
   7593 		if (cq->cq_n != 0)
   7594 			mcx_destroy_cq(sc, cq);
   7595 	}
   7596 	if (sc->sc_tis != 0) {
   7597 		mcx_destroy_tis(sc, sc->sc_tis);
   7598 		sc->sc_tis = 0;
   7599 	}
   7600 }
   7601 
   7602 static int
   7603 mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   7604 {
   7605 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
   7606 	struct ifreq *ifr = (struct ifreq *)data;
   7607 	struct ethercom *ec = &sc->sc_ec;
   7608 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
   7609 	struct ether_multi *enm;
   7610 	struct ether_multistep step;
   7611 	int s, i, flags, error = 0;
   7612 	uint32_t dest;
   7613 
   7614 	s = splnet();
   7615 	switch (cmd) {
   7616 
   7617 	case SIOCADDMULTI:
   7618 		if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
   7619 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
   7620 			if (error != 0) {
   7621 				splx(s);
   7622 				return (error);
   7623 			}
   7624 
   7625 			dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
   7626 			    sc->sc_rss_flow_table_id;
   7627 
   7628 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
   7629 				if (sc->sc_mcast_flows[i][0] == 0) {
   7630 					memcpy(sc->sc_mcast_flows[i], addrlo,
   7631 					    ETHER_ADDR_LEN);
   7632 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
   7633 						mcx_set_flow_table_entry_mac(sc,
   7634 						    MCX_FLOW_GROUP_MAC,
   7635 						    sc->sc_mcast_flow_base + i,
   7636 						    sc->sc_mcast_flows[i], dest);
   7637 					}
   7638 					break;
   7639 				}
   7640 			}
   7641 
   7642 			if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
   7643 				if (i == MCX_NUM_MCAST_FLOWS) {
   7644 					SET(ifp->if_flags, IFF_ALLMULTI);
   7645 					sc->sc_extra_mcast++;
   7646 					error = ENETRESET;
   7647 				}
   7648 
   7649 				if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN)) {
   7650 					SET(ifp->if_flags, IFF_ALLMULTI);
   7651 					error = ENETRESET;
   7652 				}
   7653 			}
   7654 		}
   7655 		break;
   7656 
   7657 	case SIOCDELMULTI:
   7658 		if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
   7659 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
   7660 			if (error != 0) {
   7661 				splx(s);
   7662 				return (error);
   7663 			}
   7664 
   7665 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
   7666 				if (memcmp(sc->sc_mcast_flows[i], addrlo,
   7667 				    ETHER_ADDR_LEN) == 0) {
   7668 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
   7669 						mcx_delete_flow_table_entry(sc,
   7670 						    MCX_FLOW_GROUP_MAC,
   7671 						    sc->sc_mcast_flow_base + i);
   7672 					}
   7673 					sc->sc_mcast_flows[i][0] = 0;
   7674 					break;
   7675 				}
   7676 			}
   7677 
   7678 			if (i == MCX_NUM_MCAST_FLOWS)
   7679 				sc->sc_extra_mcast--;
   7680 
   7681 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
   7682 			    sc->sc_extra_mcast == 0) {
   7683 				flags = 0;
   7684 				ETHER_LOCK(ec);
   7685 				ETHER_FIRST_MULTI(step, ec, enm);
   7686 				while (enm != NULL) {
   7687 					if (memcmp(enm->enm_addrlo,
   7688 					    enm->enm_addrhi, ETHER_ADDR_LEN)) {
   7689 						SET(flags, IFF_ALLMULTI);
   7690 						break;
   7691 					}
   7692 					ETHER_NEXT_MULTI(step, enm);
   7693 				}
   7694 				ETHER_UNLOCK(ec);
   7695 				if (!ISSET(flags, IFF_ALLMULTI)) {
   7696 					CLR(ifp->if_flags, IFF_ALLMULTI);
   7697 					error = ENETRESET;
   7698 				}
   7699 			}
   7700 		}
   7701 		break;
   7702 
   7703 	default:
   7704 		error = ether_ioctl(ifp, cmd, data);
   7705 	}
   7706 
   7707 	if (error == ENETRESET) {
   7708 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   7709 		    (IFF_UP | IFF_RUNNING))
   7710 			mcx_iff(sc);
   7711 		error = 0;
   7712 	}
   7713 	splx(s);
   7714 
   7715 	return (error);
   7716 }
   7717 
   7718 #if 0
   7719 static int
   7720 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
   7721 {
   7722 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
   7723 	struct mcx_reg_mcia mcia;
   7724 	struct mcx_reg_pmlp pmlp;
   7725 	int offset, error;
   7726 
   7727 	/* get module number */
   7728 	memset(&pmlp, 0, sizeof(pmlp));
   7729 	pmlp.rp_local_port = 1;
   7730 	error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
   7731 	    sizeof(pmlp));
   7732 	if (error != 0) {
   7733 		printf("%s: unable to get eeprom module number\n",
   7734 		    DEVNAME(sc));
   7735 		return error;
   7736 	}
   7737 
   7738 	for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
   7739 		memset(&mcia, 0, sizeof(mcia));
   7740 		mcia.rm_l = 0;
   7741 		mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) &
   7742 		    MCX_PMLP_MODULE_NUM_MASK;
   7743 		mcia.rm_i2c_addr = sff->sff_addr / 2;	/* apparently */
   7744 		mcia.rm_page_num = sff->sff_page;
   7745 		mcia.rm_dev_addr = htobe16(offset);
   7746 		mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
   7747 
   7748 		error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
   7749 		    &mcia, sizeof(mcia));
   7750 		if (error != 0) {
   7751 			printf("%s: unable to read eeprom at %x\n",
   7752 			    DEVNAME(sc), offset);
   7753 			return error;
   7754 		}
   7755 
   7756 		memcpy(sff->sff_data + offset, mcia.rm_data,
   7757 		    MCX_MCIA_EEPROM_BYTES);
   7758 	}
   7759 
   7760 	return 0;
   7761 }
   7762 #endif
   7763 
   7764 static int
   7765 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
   7766 {
   7767 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
   7768 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
   7769 	case 0:
   7770 		break;
   7771 
   7772 	case EFBIG:
   7773 		if (m_defrag(m, M_DONTWAIT) != NULL &&
   7774 		    bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
   7775 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
   7776 			break;
   7777 
   7778 		/* FALLTHROUGH */
   7779 	default:
   7780 		return (1);
   7781 	}
   7782 
   7783 	ms->ms_m = m;
   7784 	return (0);
   7785 }
   7786 
   7787 static void
   7788 mcx_send_common_locked(struct ifnet *ifp, struct mcx_tx *tx, bool is_transmit)
   7789 {
   7790 	struct mcx_softc *sc = ifp->if_softc;
   7791 	struct mcx_sq_entry *sq, *sqe;
   7792 	struct mcx_sq_entry_seg *sqs;
   7793 	struct mcx_slot *ms;
   7794 	bus_dmamap_t map;
   7795 	struct mbuf *m;
   7796 	u_int idx, free, used;
   7797 	uint64_t *bf;
   7798 	uint32_t csum;
   7799 	size_t bf_base;
   7800 	int i, seg, nseg;
   7801 
   7802 	KASSERT(mutex_owned(&tx->tx_lock));
   7803 
   7804 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7805 		return;
   7806 
   7807 	bf_base = (tx->tx_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
   7808 
   7809 	idx = tx->tx_prod % (1 << MCX_LOG_SQ_SIZE);
   7810 	free = (tx->tx_cons + (1 << MCX_LOG_SQ_SIZE)) - tx->tx_prod;
   7811 
   7812 	used = 0;
   7813 	bf = NULL;
   7814 
   7815 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
   7816 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
   7817 
   7818 	sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&tx->tx_sq_mem);
   7819 
   7820 	for (;;) {
   7821 		if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
   7822 			SET(ifp->if_flags, IFF_OACTIVE);
   7823 			break;
   7824 		}
   7825 
   7826 		if (is_transmit) {
   7827 			m = pcq_get(tx->tx_pcq);
   7828 		} else {
   7829 			IFQ_DEQUEUE(&ifp->if_snd, m);
   7830 		}
   7831 		if (m == NULL) {
   7832 			break;
   7833 		}
   7834 
   7835 		sqe = sq + idx;
   7836 		ms = &tx->tx_slots[idx];
   7837 		memset(sqe, 0, sizeof(*sqe));
   7838 
   7839 		/* ctrl segment */
   7840 		sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
   7841 		    ((tx->tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
   7842 		/* always generate a completion event */
   7843 		sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
   7844 
   7845 		/* eth segment */
   7846 		csum = 0;
   7847 		if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
   7848 			csum |= MCX_SQE_L3_CSUM;
   7849 		if (m->m_pkthdr.csum_flags &
   7850 		    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6))
   7851 			csum |= MCX_SQE_L4_CSUM;
   7852 		sqe->sqe_mss_csum = htobe32(csum);
   7853 		sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
   7854 		if (vlan_has_tag(m)) {
   7855 			struct ether_vlan_header *evh;
   7856 			evh = (struct ether_vlan_header *)
   7857 			    &sqe->sqe_inline_headers;
   7858 
   7859 			m_copydata(m, 0, ETHER_HDR_LEN, evh);
   7860 			evh->evl_proto = evh->evl_encap_proto;
   7861 			evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
   7862 			evh->evl_tag = htons(vlan_get_tag(m));
   7863 			m_adj(m, ETHER_HDR_LEN);
   7864 		} else {
   7865 			m_copydata(m, 0, MCX_SQ_INLINE_SIZE,
   7866 			    sqe->sqe_inline_headers);
   7867 			m_adj(m, MCX_SQ_INLINE_SIZE);
   7868 		}
   7869 
   7870 		if (mcx_load_mbuf(sc, ms, m) != 0) {
   7871 			m_freem(m);
   7872 			if_statinc(ifp, if_oerrors);
   7873 			continue;
   7874 		}
   7875 		bf = (uint64_t *)sqe;
   7876 
   7877 		if (ifp->if_bpf != NULL)
   7878 			bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers,
   7879 			    MCX_SQ_INLINE_SIZE, m, BPF_D_OUT);
   7880 
   7881 		map = ms->ms_map;
   7882 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
   7883 		    BUS_DMASYNC_PREWRITE);
   7884 
   7885 		sqe->sqe_ds_sq_num =
   7886 		    htobe32((tx->tx_sqn << MCX_SQE_SQ_NUM_SHIFT) |
   7887 		    (map->dm_nsegs + 3));
   7888 
   7889 		/* data segment - first wqe has one segment */
   7890 		sqs = sqe->sqe_segs;
   7891 		seg = 0;
   7892 		nseg = 1;
   7893 		for (i = 0; i < map->dm_nsegs; i++) {
   7894 			if (seg == nseg) {
   7895 				/* next slot */
   7896 				idx++;
   7897 				if (idx == (1 << MCX_LOG_SQ_SIZE))
   7898 					idx = 0;
   7899 				tx->tx_prod++;
   7900 				used++;
   7901 
   7902 				sqs = (struct mcx_sq_entry_seg *)(sq + idx);
   7903 				seg = 0;
   7904 				nseg = MCX_SQ_SEGS_PER_SLOT;
   7905 			}
   7906 			sqs[seg].sqs_byte_count =
   7907 			    htobe32(map->dm_segs[i].ds_len);
   7908 			sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
   7909 			sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
   7910 			seg++;
   7911 		}
   7912 
   7913 		idx++;
   7914 		if (idx == (1 << MCX_LOG_SQ_SIZE))
   7915 			idx = 0;
   7916 		tx->tx_prod++;
   7917 		used++;
   7918 	}
   7919 
   7920 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
   7921 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
   7922 
   7923 	if (used) {
   7924 		bus_size_t blueflame;
   7925 
   7926 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   7927 		    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
   7928 		be32enc(MCX_DMA_OFF(&sc->sc_doorbell_mem, tx->tx_doorbell),
   7929 		    tx->tx_prod & MCX_WQ_DOORBELL_MASK);
   7930 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
   7931 		    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   7932 
   7933 		/*
   7934 		 * write the first 64 bits of the last sqe we produced
   7935 		 * to the blue flame buffer
   7936 		 */
   7937 
   7938 		blueflame = bf_base + tx->tx_bf_offset;
   7939 		bus_space_write_8(sc->sc_memt, sc->sc_memh,
   7940 		    blueflame, *bf);
   7941 		mcx_bar(sc, blueflame, sizeof(*bf), BUS_SPACE_BARRIER_WRITE);
   7942 
   7943 		/* next write goes to the other buffer */
   7944 		tx->tx_bf_offset ^= sc->sc_bf_size;
   7945 	}
   7946 }
   7947 
   7948 static void
   7949 mcx_start(struct ifnet *ifp)
   7950 {
   7951 	struct mcx_softc *sc = ifp->if_softc;
   7952 	/* mcx_start() always uses TX ring[0] */
   7953 	struct mcx_tx *tx = &sc->sc_queues[0].q_tx;
   7954 
   7955 	mutex_enter(&tx->tx_lock);
   7956 	if (!ISSET(ifp->if_flags, IFF_OACTIVE)) {
   7957 		mcx_send_common_locked(ifp, tx, false);
   7958 	}
   7959 	mutex_exit(&tx->tx_lock);
   7960 }
   7961 
   7962 static int
   7963 mcx_transmit(struct ifnet *ifp, struct mbuf *m)
   7964 {
   7965 	struct mcx_softc *sc = ifp->if_softc;
   7966 	struct mcx_tx *tx;
   7967 
   7968 	tx = &sc->sc_queues[cpu_index(curcpu()) % sc->sc_nqueues].q_tx;
   7969 	if (__predict_false(!pcq_put(tx->tx_pcq, m))) {
   7970 		m_freem(m);
   7971 		return ENOBUFS;
   7972 	}
   7973 
   7974 	if (mutex_tryenter(&tx->tx_lock)) {
   7975 		mcx_send_common_locked(ifp, tx, true);
   7976 		mutex_exit(&tx->tx_lock);
   7977 	} else {
   7978 		softint_schedule(tx->tx_softint);
   7979 	}
   7980 
   7981 	return 0;
   7982 }
   7983 
   7984 static void
   7985 mcx_deferred_transmit(void *arg)
   7986 {
   7987 	struct mcx_tx *tx = arg;
   7988 	struct mcx_softc *sc = tx->tx_softc;
   7989 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   7990 
   7991 	mutex_enter(&tx->tx_lock);
   7992 	if (pcq_peek(tx->tx_pcq) != NULL) {
   7993 		mcx_send_common_locked(ifp, tx, true);
   7994 	}
   7995 	mutex_exit(&tx->tx_lock);
   7996 }
   7997 
   7998 static void
   7999 mcx_watchdog(struct ifnet *ifp)
   8000 {
   8001 }
   8002 
   8003 static void
   8004 mcx_media_add_types(struct mcx_softc *sc)
   8005 {
   8006 	struct mcx_reg_ptys ptys;
   8007 	int i;
   8008 	uint32_t proto_cap;
   8009 
   8010 	memset(&ptys, 0, sizeof(ptys));
   8011 	ptys.rp_local_port = 1;
   8012 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
   8013 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
   8014 	    sizeof(ptys)) != 0) {
   8015 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
   8016 		return;
   8017 	}
   8018 
   8019 	proto_cap = be32toh(ptys.rp_eth_proto_cap);
   8020 	for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
   8021 		const struct mcx_eth_proto_capability *cap;
   8022 		if (!ISSET(proto_cap, 1U << i))
   8023 			continue;
   8024 
   8025 		cap = &mcx_eth_cap_map[i];
   8026 		if (cap->cap_media == 0)
   8027 			continue;
   8028 
   8029 		ifmedia_add(&sc->sc_media, IFM_ETHER | cap->cap_media, 0, NULL);
   8030 	}
   8031 }
   8032 
   8033 static void
   8034 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   8035 {
   8036 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
   8037 	struct mcx_reg_ptys ptys;
   8038 	int i;
   8039 	uint32_t proto_oper;
   8040 	uint64_t media_oper;
   8041 
   8042 	memset(&ptys, 0, sizeof(ptys));
   8043 	ptys.rp_local_port = 1;
   8044 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
   8045 
   8046 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
   8047 	    sizeof(ptys)) != 0) {
   8048 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
   8049 		return;
   8050 	}
   8051 
   8052 	proto_oper = be32toh(ptys.rp_eth_proto_oper);
   8053 
   8054 	media_oper = 0;
   8055 
   8056 	for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
   8057 		const struct mcx_eth_proto_capability *cap;
   8058 		if (!ISSET(proto_oper, 1U << i))
   8059 			continue;
   8060 
   8061 		cap = &mcx_eth_cap_map[i];
   8062 
   8063 		if (cap->cap_media != 0)
   8064 			media_oper = cap->cap_media;
   8065 	}
   8066 
   8067 	ifmr->ifm_status = IFM_AVALID;
   8068 	if (proto_oper != 0) {
   8069 		ifmr->ifm_status |= IFM_ACTIVE;
   8070 		ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
   8071 		/* txpause, rxpause, duplex? */
   8072 	}
   8073 }
   8074 
   8075 static int
   8076 mcx_media_change(struct ifnet *ifp)
   8077 {
   8078 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
   8079 	struct mcx_reg_ptys ptys;
   8080 	struct mcx_reg_paos paos;
   8081 	uint32_t media;
   8082 	int i, error;
   8083 
   8084 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
   8085 		return EINVAL;
   8086 
   8087 	error = 0;
   8088 
   8089 	if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
   8090 		/* read ptys to get supported media */
   8091 		memset(&ptys, 0, sizeof(ptys));
   8092 		ptys.rp_local_port = 1;
   8093 		ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
   8094 		if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
   8095 		    &ptys, sizeof(ptys)) != 0) {
   8096 			printf("%s: unable to read port type/speed\n",
   8097 			    DEVNAME(sc));
   8098 			return EIO;
   8099 		}
   8100 
   8101 		media = be32toh(ptys.rp_eth_proto_cap);
   8102 	} else {
   8103 		/* map media type */
   8104 		media = 0;
   8105 		for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
   8106 			const struct mcx_eth_proto_capability *cap;
   8107 
   8108 			cap = &mcx_eth_cap_map[i];
   8109 			if (cap->cap_media ==
   8110 			    IFM_SUBTYPE(sc->sc_media.ifm_media)) {
   8111 				media = (1 << i);
   8112 				break;
   8113 			}
   8114 		}
   8115 	}
   8116 
   8117 	/* disable the port */
   8118 	memset(&paos, 0, sizeof(paos));
   8119 	paos.rp_local_port = 1;
   8120 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
   8121 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
   8122 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
   8123 	    sizeof(paos)) != 0) {
   8124 		printf("%s: unable to set port state to down\n", DEVNAME(sc));
   8125 		return EIO;
   8126 	}
   8127 
   8128 	memset(&ptys, 0, sizeof(ptys));
   8129 	ptys.rp_local_port = 1;
   8130 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
   8131 	ptys.rp_eth_proto_admin = htobe32(media);
   8132 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
   8133 	    sizeof(ptys)) != 0) {
   8134 		printf("%s: unable to set port media type/speed\n",
   8135 		    DEVNAME(sc));
   8136 		error = EIO;
   8137 	}
   8138 
   8139 	/* re-enable the port to start negotiation */
   8140 	memset(&paos, 0, sizeof(paos));
   8141 	paos.rp_local_port = 1;
   8142 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
   8143 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
   8144 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
   8145 	    sizeof(paos)) != 0) {
   8146 		printf("%s: unable to set port state to up\n", DEVNAME(sc));
   8147 		error = EIO;
   8148 	}
   8149 
   8150 	return error;
   8151 }
   8152 
   8153 static void
   8154 mcx_port_change(struct work *wk, void *xsc)
   8155 {
   8156 	struct mcx_softc *sc = xsc;
   8157 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   8158 	struct mcx_reg_ptys ptys = {
   8159 		.rp_local_port = 1,
   8160 		.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH,
   8161 	};
   8162 	int link_state = LINK_STATE_DOWN;
   8163 
   8164 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
   8165 	    sizeof(ptys)) == 0) {
   8166 		uint32_t proto_oper = be32toh(ptys.rp_eth_proto_oper);
   8167 		uint64_t baudrate = 0;
   8168 		unsigned int i;
   8169 
   8170 		if (proto_oper != 0)
   8171 			link_state = LINK_STATE_UP;
   8172 
   8173 		for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
   8174 			const struct mcx_eth_proto_capability *cap;
   8175 			if (!ISSET(proto_oper, 1U << i))
   8176 				continue;
   8177 
   8178 			cap = &mcx_eth_cap_map[i];
   8179 			if (cap->cap_baudrate == 0)
   8180 				continue;
   8181 
   8182 			baudrate = cap->cap_baudrate;
   8183 			break;
   8184 		}
   8185 
   8186 		ifp->if_baudrate = baudrate;
   8187 	}
   8188 
   8189 	if (link_state != ifp->if_link_state) {
   8190 		if_link_state_change(ifp, link_state);
   8191 	}
   8192 }
   8193 
   8194 
   8195 static inline uint32_t
   8196 mcx_rd(struct mcx_softc *sc, bus_size_t r)
   8197 {
   8198 	uint32_t word;
   8199 
   8200 	word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
   8201 
   8202 	return (be32toh(word));
   8203 }
   8204 
   8205 static inline void
   8206 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
   8207 {
   8208 	bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
   8209 }
   8210 
   8211 static inline void
   8212 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
   8213 {
   8214 #ifndef __NetBSD__
   8215 	bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
   8216 #endif
   8217 }
   8218 
   8219 static uint64_t
   8220 mcx_timer(struct mcx_softc *sc)
   8221 {
   8222 	uint32_t hi, lo, ni;
   8223 
   8224 	hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
   8225 	for (;;) {
   8226 		lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
   8227 		mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
   8228 		ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
   8229 
   8230 		if (ni == hi)
   8231 			break;
   8232 
   8233 		hi = ni;
   8234 	}
   8235 
   8236 	return (((uint64_t)hi << 32) | (uint64_t)lo);
   8237 }
   8238 
   8239 static int
   8240 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
   8241     bus_size_t size, u_int align)
   8242 {
   8243 	mxm->mxm_size = size;
   8244 
   8245 	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
   8246 	    mxm->mxm_size, 0,
   8247 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
   8248 	    &mxm->mxm_map) != 0)
   8249 		return (1);
   8250 	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
   8251 	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
   8252 	    BUS_DMA_WAITOK) != 0)
   8253 		goto destroy;
   8254 	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
   8255 	    mxm->mxm_size, &mxm->mxm_kva,
   8256 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
   8257 		goto free;
   8258 	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
   8259 	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
   8260 		goto unmap;
   8261 
   8262 	mcx_dmamem_zero(mxm);
   8263 
   8264 	return (0);
   8265 unmap:
   8266 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
   8267 free:
   8268 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
   8269 destroy:
   8270 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
   8271 	return (1);
   8272 }
   8273 
   8274 static void
   8275 mcx_dmamem_zero(struct mcx_dmamem *mxm)
   8276 {
   8277 	memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
   8278 }
   8279 
   8280 static void
   8281 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
   8282 {
   8283 	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
   8284 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
   8285 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
   8286 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
   8287 }
   8288 
   8289 static int
   8290 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
   8291 {
   8292 	bus_dma_segment_t *segs;
   8293 	bus_size_t len = pages * MCX_PAGE_SIZE;
   8294 	size_t seglen;
   8295 
   8296 	segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP);
   8297 	seglen = sizeof(*segs) * pages;
   8298 
   8299 	if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
   8300 	    segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
   8301 		goto free_segs;
   8302 
   8303 	if (mhm->mhm_seg_count < pages) {
   8304 		size_t nseglen;
   8305 
   8306 		mhm->mhm_segs = kmem_alloc(
   8307 		    sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP);
   8308 
   8309 		nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
   8310 
   8311 		memcpy(mhm->mhm_segs, segs, nseglen);
   8312 
   8313 		kmem_free(segs, seglen);
   8314 
   8315 		segs = mhm->mhm_segs;
   8316 		seglen = nseglen;
   8317 	} else
   8318 		mhm->mhm_segs = segs;
   8319 
   8320 	if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
   8321 	    MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
   8322 	    &mhm->mhm_map) != 0)
   8323 		goto free_dmamem;
   8324 
   8325 	if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
   8326 	    mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
   8327 		goto destroy;
   8328 
   8329 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
   8330 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
   8331 
   8332 	mhm->mhm_npages = pages;
   8333 
   8334 	return (0);
   8335 
   8336 destroy:
   8337 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
   8338 free_dmamem:
   8339 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
   8340 free_segs:
   8341 	kmem_free(segs, seglen);
   8342 	mhm->mhm_segs = NULL;
   8343 
   8344 	return (-1);
   8345 }
   8346 
   8347 static void
   8348 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
   8349 {
   8350 	if (mhm->mhm_npages == 0)
   8351 		return;
   8352 
   8353 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
   8354 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
   8355 
   8356 	bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
   8357 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
   8358 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
   8359 	kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
   8360 
   8361 	mhm->mhm_npages = 0;
   8362 }
   8363 
   8364 #if NKSTAT > 0
   8365 struct mcx_ppcnt {
   8366 	char			 name[KSTAT_KV_NAMELEN];
   8367 	enum kstat_kv_unit	 unit;
   8368 };
   8369 
   8370 static const struct mcx_ppcnt mcx_ppcnt_ieee8023_tpl[] = {
   8371 	{ "Good Tx",		KSTAT_KV_U_PACKETS, },
   8372 	{ "Good Rx",		KSTAT_KV_U_PACKETS, },
   8373 	{ "FCS errs",		KSTAT_KV_U_PACKETS, },
   8374 	{ "Alignment Errs",	KSTAT_KV_U_PACKETS, },
   8375 	{ "Good Tx",		KSTAT_KV_U_BYTES, },
   8376 	{ "Good Rx",		KSTAT_KV_U_BYTES, },
   8377 	{ "Multicast Tx",	KSTAT_KV_U_PACKETS, },
   8378 	{ "Broadcast Tx",	KSTAT_KV_U_PACKETS, },
   8379 	{ "Multicast Rx",	KSTAT_KV_U_PACKETS, },
   8380 	{ "Broadcast Rx",	KSTAT_KV_U_PACKETS, },
   8381 	{ "In Range Len",	KSTAT_KV_U_PACKETS, },
   8382 	{ "Out Of Range Len",	KSTAT_KV_U_PACKETS, },
   8383 	{ "Frame Too Long",	KSTAT_KV_U_PACKETS, },
   8384 	{ "Symbol Errs",	KSTAT_KV_U_PACKETS, },
   8385 	{ "MAC Ctrl Tx",	KSTAT_KV_U_PACKETS, },
   8386 	{ "MAC Ctrl Rx",	KSTAT_KV_U_PACKETS, },
   8387 	{ "MAC Ctrl Unsup",	KSTAT_KV_U_PACKETS, },
   8388 	{ "Pause Rx",		KSTAT_KV_U_PACKETS, },
   8389 	{ "Pause Tx",		KSTAT_KV_U_PACKETS, },
   8390 };
   8391 CTASSERT(__arraycount(mcx_ppcnt_ieee8023_tpl) == mcx_ppcnt_ieee8023_count);
   8392 
   8393 static const struct mcx_ppcnt mcx_ppcnt_rfc2863_tpl[] = {
   8394 	{ "Rx Bytes",		KSTAT_KV_U_BYTES, },
   8395 	{ "Rx Unicast",		KSTAT_KV_U_PACKETS, },
   8396 	{ "Rx Discards",	KSTAT_KV_U_PACKETS, },
   8397 	{ "Rx Errors",		KSTAT_KV_U_PACKETS, },
   8398 	{ "Rx Unknown Proto",	KSTAT_KV_U_PACKETS, },
   8399 	{ "Tx Bytes",		KSTAT_KV_U_BYTES, },
   8400 	{ "Tx Unicast",		KSTAT_KV_U_PACKETS, },
   8401 	{ "Tx Discards",	KSTAT_KV_U_PACKETS, },
   8402 	{ "Tx Errors",		KSTAT_KV_U_PACKETS, },
   8403 	{ "Rx Multicast",	KSTAT_KV_U_PACKETS, },
   8404 	{ "Rx Broadcast",	KSTAT_KV_U_PACKETS, },
   8405 	{ "Tx Multicast",	KSTAT_KV_U_PACKETS, },
   8406 	{ "Tx Broadcast",	KSTAT_KV_U_PACKETS, },
   8407 };
   8408 CTASSERT(__arraycount(mcx_ppcnt_rfc2863_tpl) == mcx_ppcnt_rfc2863_count);
   8409 
   8410 static const struct mcx_ppcnt mcx_ppcnt_rfc2819_tpl[] = {
   8411 	{ "Drop Events",	KSTAT_KV_U_PACKETS, },
   8412 	{ "Octets",		KSTAT_KV_U_BYTES, },
   8413 	{ "Packets",		KSTAT_KV_U_PACKETS, },
   8414 	{ "Broadcasts",		KSTAT_KV_U_PACKETS, },
   8415 	{ "Multicasts",		KSTAT_KV_U_PACKETS, },
   8416 	{ "CRC Align Errs",	KSTAT_KV_U_PACKETS, },
   8417 	{ "Undersize",		KSTAT_KV_U_PACKETS, },
   8418 	{ "Oversize",		KSTAT_KV_U_PACKETS, },
   8419 	{ "Fragments",		KSTAT_KV_U_PACKETS, },
   8420 	{ "Jabbers",		KSTAT_KV_U_PACKETS, },
   8421 	{ "Collisions",		KSTAT_KV_U_NONE, },
   8422 	{ "64B",		KSTAT_KV_U_PACKETS, },
   8423 	{ "65-127B",		KSTAT_KV_U_PACKETS, },
   8424 	{ "128-255B",		KSTAT_KV_U_PACKETS, },
   8425 	{ "256-511B",		KSTAT_KV_U_PACKETS, },
   8426 	{ "512-1023B",		KSTAT_KV_U_PACKETS, },
   8427 	{ "1024-1518B",		KSTAT_KV_U_PACKETS, },
   8428 	{ "1519-2047B",		KSTAT_KV_U_PACKETS, },
   8429 	{ "2048-4095B",		KSTAT_KV_U_PACKETS, },
   8430 	{ "4096-8191B",		KSTAT_KV_U_PACKETS, },
   8431 	{ "8192-10239B",	KSTAT_KV_U_PACKETS, },
   8432 };
   8433 CTASSERT(__arraycount(mcx_ppcnt_rfc2819_tpl) == mcx_ppcnt_rfc2819_count);
   8434 
   8435 static const struct mcx_ppcnt mcx_ppcnt_rfc3635_tpl[] = {
   8436 	{ "Alignment Errs",	KSTAT_KV_U_PACKETS, },
   8437 	{ "FCS Errs",		KSTAT_KV_U_PACKETS, },
   8438 	{ "Single Colls",	KSTAT_KV_U_PACKETS, },
   8439 	{ "Multiple Colls",	KSTAT_KV_U_PACKETS, },
   8440 	{ "SQE Test Errs",	KSTAT_KV_U_NONE, },
   8441 	{ "Deferred Tx",	KSTAT_KV_U_PACKETS, },
   8442 	{ "Late Colls",		KSTAT_KV_U_NONE, },
   8443 	{ "Exess Colls",	KSTAT_KV_U_NONE, },
   8444 	{ "Int MAC Tx Errs",	KSTAT_KV_U_PACKETS, },
   8445 	{ "CSM Sense Errs",	KSTAT_KV_U_NONE, },
   8446 	{ "Too Long",		KSTAT_KV_U_PACKETS, },
   8447 	{ "Int MAC Rx Errs",	KSTAT_KV_U_PACKETS, },
   8448 	{ "Symbol Errs",	KSTAT_KV_U_NONE, },
   8449 	{ "Unknown Control",	KSTAT_KV_U_PACKETS, },
   8450 	{ "Pause Rx",		KSTAT_KV_U_PACKETS, },
   8451 	{ "Pause Tx",		KSTAT_KV_U_PACKETS, },
   8452 };
   8453 CTASSERT(__arraycount(mcx_ppcnt_rfc3635_tpl) == mcx_ppcnt_rfc3635_count);
   8454 
   8455 struct mcx_kstat_ppcnt {
   8456 	const char		*ksp_name;
   8457 	const struct mcx_ppcnt	*ksp_tpl;
   8458 	unsigned int		 ksp_n;
   8459 	uint8_t			 ksp_grp;
   8460 };
   8461 
   8462 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_ieee8023 = {
   8463 	.ksp_name =		"ieee802.3",
   8464 	.ksp_tpl =		mcx_ppcnt_ieee8023_tpl,
   8465 	.ksp_n =		__arraycount(mcx_ppcnt_ieee8023_tpl),
   8466 	.ksp_grp =		MCX_REG_PPCNT_GRP_IEEE8023,
   8467 };
   8468 
   8469 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2863 = {
   8470 	.ksp_name =		"rfc2863",
   8471 	.ksp_tpl =		mcx_ppcnt_rfc2863_tpl,
   8472 	.ksp_n =		__arraycount(mcx_ppcnt_rfc2863_tpl),
   8473 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC2863,
   8474 };
   8475 
   8476 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2819 = {
   8477 	.ksp_name =		"rfc2819",
   8478 	.ksp_tpl =		mcx_ppcnt_rfc2819_tpl,
   8479 	.ksp_n =		__arraycount(mcx_ppcnt_rfc2819_tpl),
   8480 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC2819,
   8481 };
   8482 
   8483 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc3635 = {
   8484 	.ksp_name =		"rfc3635",
   8485 	.ksp_tpl =		mcx_ppcnt_rfc3635_tpl,
   8486 	.ksp_n =		__arraycount(mcx_ppcnt_rfc3635_tpl),
   8487 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC3635,
   8488 };
   8489 
   8490 static int	mcx_kstat_ppcnt_read(struct kstat *);
   8491 
   8492 static void	mcx_kstat_attach_tmps(struct mcx_softc *sc);
   8493 static void	mcx_kstat_attach_queues(struct mcx_softc *sc);
   8494 
   8495 static struct kstat *
   8496 mcx_kstat_attach_ppcnt(struct mcx_softc *sc,
   8497     const struct mcx_kstat_ppcnt *ksp)
   8498 {
   8499 	struct kstat *ks;
   8500 	struct kstat_kv *kvs;
   8501 	unsigned int i;
   8502 
   8503 	ks = kstat_create(DEVNAME(sc), 0, ksp->ksp_name, 0, KSTAT_T_KV, 0);
   8504 	if (ks == NULL)
   8505 		return (NULL);
   8506 
   8507 	kvs = mallocarray(ksp->ksp_n, sizeof(*kvs),
   8508 	    M_DEVBUF, M_WAITOK);
   8509 
   8510 	for (i = 0; i < ksp->ksp_n; i++) {
   8511 		const struct mcx_ppcnt *tpl = &ksp->ksp_tpl[i];
   8512 
   8513 		kstat_kv_unit_init(&kvs[i], tpl->name,
   8514 		    KSTAT_KV_T_COUNTER64, tpl->unit);
   8515 	}
   8516 
   8517 	ks->ks_softc = sc;
   8518 	ks->ks_ptr = (void *)ksp;
   8519 	ks->ks_data = kvs;
   8520 	ks->ks_datalen = ksp->ksp_n * sizeof(*kvs);
   8521 	ks->ks_read = mcx_kstat_ppcnt_read;
   8522 
   8523 	kstat_install(ks);
   8524 
   8525 	return (ks);
   8526 }
   8527 
   8528 static void
   8529 mcx_kstat_attach(struct mcx_softc *sc)
   8530 {
   8531 	sc->sc_kstat_ieee8023 = mcx_kstat_attach_ppcnt(sc,
   8532 	    &mcx_kstat_ppcnt_ieee8023);
   8533 	sc->sc_kstat_rfc2863 = mcx_kstat_attach_ppcnt(sc,
   8534 	    &mcx_kstat_ppcnt_rfc2863);
   8535 	sc->sc_kstat_rfc2819 = mcx_kstat_attach_ppcnt(sc,
   8536 	    &mcx_kstat_ppcnt_rfc2819);
   8537 	sc->sc_kstat_rfc3635 = mcx_kstat_attach_ppcnt(sc,
   8538 	    &mcx_kstat_ppcnt_rfc3635);
   8539 
   8540 	mcx_kstat_attach_tmps(sc);
   8541 	mcx_kstat_attach_queues(sc);
   8542 }
   8543 
   8544 static int
   8545 mcx_kstat_ppcnt_read(struct kstat *ks)
   8546 {
   8547 	struct mcx_softc *sc = ks->ks_softc;
   8548 	struct mcx_kstat_ppcnt *ksp = ks->ks_ptr;
   8549 	struct mcx_reg_ppcnt ppcnt = {
   8550 		.ppcnt_grp = ksp->ksp_grp,
   8551 		.ppcnt_local_port = 1,
   8552 	};
   8553 	struct kstat_kv *kvs = ks->ks_data;
   8554 	uint64_t *vs = (uint64_t *)&ppcnt.ppcnt_counter_set;
   8555 	unsigned int i;
   8556 	int rv;
   8557 
   8558 	KERNEL_LOCK(); /* XXX */
   8559 	rv = mcx_access_hca_reg(sc, MCX_REG_PPCNT, MCX_REG_OP_READ,
   8560 	    &ppcnt, sizeof(ppcnt));
   8561 	KERNEL_UNLOCK();
   8562 	if (rv != 0)
   8563 		return (EIO);
   8564 
   8565 	nanouptime(&ks->ks_updated);
   8566 
   8567 	for (i = 0; i < ksp->ksp_n; i++)
   8568 		kstat_kv_u64(&kvs[i]) = bemtoh64(&vs[i]);
   8569 
   8570 	return (0);
   8571 }
   8572 
   8573 struct mcx_kstat_mtmp {
   8574 	struct kstat_kv		ktmp_name;
   8575 	struct kstat_kv		ktmp_temperature;
   8576 	struct kstat_kv		ktmp_threshold_lo;
   8577 	struct kstat_kv		ktmp_threshold_hi;
   8578 };
   8579 
   8580 static const struct mcx_kstat_mtmp mcx_kstat_mtmp_tpl = {
   8581 	KSTAT_KV_INITIALIZER("name",		KSTAT_KV_T_ISTR),
   8582 	KSTAT_KV_INITIALIZER("temperature",	KSTAT_KV_T_TEMP),
   8583 	KSTAT_KV_INITIALIZER("lo threshold",	KSTAT_KV_T_TEMP),
   8584 	KSTAT_KV_INITIALIZER("hi threshold",	KSTAT_KV_T_TEMP),
   8585 };
   8586 
   8587 static const struct timeval mcx_kstat_mtmp_rate = { 1, 0 };
   8588 
   8589 static int mcx_kstat_mtmp_read(struct kstat *);
   8590 
   8591 static void
   8592 mcx_kstat_attach_tmps(struct mcx_softc *sc)
   8593 {
   8594 	struct kstat *ks;
   8595 	struct mcx_reg_mcam mcam;
   8596 	struct mcx_reg_mtcap mtcap;
   8597 	struct mcx_kstat_mtmp *ktmp;
   8598 	uint64_t map;
   8599 	unsigned int i, n;
   8600 
   8601 	memset(&mtcap, 0, sizeof(mtcap));
   8602 	memset(&mcam, 0, sizeof(mcam));
   8603 
   8604 	if (sc->sc_mcam_reg == 0) {
   8605 		/* no management capabilities */
   8606 		return;
   8607 	}
   8608 
   8609 	if (mcx_access_hca_reg(sc, MCX_REG_MCAM, MCX_REG_OP_READ,
   8610 	    &mcam, sizeof(mcam)) != 0) {
   8611 		/* unable to check management capabilities? */
   8612 		return;
   8613 	}
   8614 
   8615 	if (MCX_BITFIELD_BIT(mcam.mcam_feature_cap_mask,
   8616 	    MCX_MCAM_FEATURE_CAP_SENSOR_MAP) == 0) {
   8617 		/* no sensor map */
   8618 		return;
   8619 	}
   8620 
   8621 	if (mcx_access_hca_reg(sc, MCX_REG_MTCAP, MCX_REG_OP_READ,
   8622 	    &mtcap, sizeof(mtcap)) != 0) {
   8623 		/* unable to find temperature sensors */
   8624 		return;
   8625 	}
   8626 
   8627 	sc->sc_kstat_mtmp_count = mtcap.mtcap_sensor_count;
   8628 	sc->sc_kstat_mtmp = mallocarray(sc->sc_kstat_mtmp_count,
   8629 	    sizeof(*sc->sc_kstat_mtmp), M_DEVBUF, M_WAITOK);
   8630 
   8631 	n = 0;
   8632 	map = bemtoh64(&mtcap.mtcap_sensor_map);
   8633 	for (i = 0; i < sizeof(map) * NBBY; i++) {
   8634 		if (!ISSET(map, (1ULL << i)))
   8635 			continue;
   8636 
   8637 		ks = kstat_create(DEVNAME(sc), 0, "temperature", i,
   8638 		    KSTAT_T_KV, 0);
   8639 		if (ks == NULL) {
   8640 			/* unable to attach temperature sensor %u, i */
   8641 			continue;
   8642 		}
   8643 
   8644 		ktmp = malloc(sizeof(*ktmp), M_DEVBUF, M_WAITOK|M_ZERO);
   8645 		*ktmp = mcx_kstat_mtmp_tpl;
   8646 
   8647 		ks->ks_data = ktmp;
   8648 		ks->ks_datalen = sizeof(*ktmp);
   8649 		TIMEVAL_TO_TIMESPEC(&mcx_kstat_mtmp_rate, &ks->ks_interval);
   8650 		ks->ks_read = mcx_kstat_mtmp_read;
   8651 
   8652 		ks->ks_softc = sc;
   8653 		kstat_install(ks);
   8654 
   8655 		sc->sc_kstat_mtmp[n++] = ks;
   8656 		if (n >= sc->sc_kstat_mtmp_count)
   8657 			break;
   8658 	}
   8659 }
   8660 
   8661 static uint64_t
   8662 mcx_tmp_to_uK(uint16_t *t)
   8663 {
   8664 	int64_t mt = (int16_t)bemtoh16(t); /* 0.125 C units */
   8665 	mt *= 1000000 / 8; /* convert to uC */
   8666 	mt += 273150000; /* convert to uK */
   8667 
   8668 	return (mt);
   8669 }
   8670 
   8671 static int
   8672 mcx_kstat_mtmp_read(struct kstat *ks)
   8673 {
   8674 	struct mcx_softc *sc = ks->ks_softc;
   8675 	struct mcx_kstat_mtmp *ktmp = ks->ks_data;
   8676 	struct mcx_reg_mtmp mtmp;
   8677 	int rv;
   8678 	struct timeval updated;
   8679 
   8680 	TIMESPEC_TO_TIMEVAL(&updated, &ks->ks_updated);
   8681 
   8682 	if (!ratecheck(&updated, &mcx_kstat_mtmp_rate))
   8683 		return (0);
   8684 
   8685 	memset(&mtmp, 0, sizeof(mtmp));
   8686 	htobem16(&mtmp.mtmp_sensor_index, ks->ks_unit);
   8687 
   8688 	KERNEL_LOCK(); /* XXX */
   8689 	rv = mcx_access_hca_reg(sc, MCX_REG_MTMP, MCX_REG_OP_READ,
   8690 	    &mtmp, sizeof(mtmp));
   8691 	KERNEL_UNLOCK();
   8692 	if (rv != 0)
   8693 		return (EIO);
   8694 
   8695 	memset(kstat_kv_istr(&ktmp->ktmp_name), 0,
   8696 	    sizeof(kstat_kv_istr(&ktmp->ktmp_name)));
   8697 	memcpy(kstat_kv_istr(&ktmp->ktmp_name),
   8698 	    mtmp.mtmp_sensor_name, sizeof(mtmp.mtmp_sensor_name));
   8699 	kstat_kv_temp(&ktmp->ktmp_temperature) =
   8700 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature);
   8701 	kstat_kv_temp(&ktmp->ktmp_threshold_lo) =
   8702 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_lo);
   8703 	kstat_kv_temp(&ktmp->ktmp_threshold_hi) =
   8704 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_hi);
   8705 
   8706 	TIMEVAL_TO_TIMESPEC(&updated, &ks->ks_updated);
   8707 
   8708 	return (0);
   8709 }
   8710 
   8711 struct mcx_queuestat {
   8712 	char			 name[KSTAT_KV_NAMELEN];
   8713 	enum kstat_kv_type	 type;
   8714 };
   8715 
   8716 static const struct mcx_queuestat mcx_queue_kstat_tpl[] = {
   8717 	{ "RQ SW prod",		KSTAT_KV_T_COUNTER64 },
   8718 	{ "RQ HW prod",		KSTAT_KV_T_COUNTER64 },
   8719 	{ "RQ HW cons",		KSTAT_KV_T_COUNTER64 },
   8720 	{ "RQ HW state",	KSTAT_KV_T_ISTR },
   8721 
   8722 	{ "SQ SW prod",		KSTAT_KV_T_COUNTER64 },
   8723 	{ "SQ SW cons",		KSTAT_KV_T_COUNTER64 },
   8724 	{ "SQ HW prod",		KSTAT_KV_T_COUNTER64 },
   8725 	{ "SQ HW cons",		KSTAT_KV_T_COUNTER64 },
   8726 	{ "SQ HW state",	KSTAT_KV_T_ISTR },
   8727 
   8728 	{ "CQ SW cons",		KSTAT_KV_T_COUNTER64 },
   8729 	{ "CQ HW prod",		KSTAT_KV_T_COUNTER64 },
   8730 	{ "CQ HW cons",		KSTAT_KV_T_COUNTER64 },
   8731 	{ "CQ HW notify",	KSTAT_KV_T_COUNTER64 },
   8732 	{ "CQ HW solicit",	KSTAT_KV_T_COUNTER64 },
   8733 	{ "CQ HW status",	KSTAT_KV_T_ISTR },
   8734 	{ "CQ HW state",	KSTAT_KV_T_ISTR },
   8735 
   8736 	{ "EQ SW cons",		KSTAT_KV_T_COUNTER64 },
   8737 	{ "EQ HW prod",		KSTAT_KV_T_COUNTER64 },
   8738 	{ "EQ HW cons",		KSTAT_KV_T_COUNTER64 },
   8739 	{ "EQ HW status",	KSTAT_KV_T_ISTR },
   8740 	{ "EQ HW state",	KSTAT_KV_T_ISTR },
   8741 };
   8742 
   8743 static int	mcx_kstat_queue_read(struct kstat *);
   8744 
   8745 static void
   8746 mcx_kstat_attach_queues(struct mcx_softc *sc)
   8747 {
   8748 	struct kstat *ks;
   8749 	struct kstat_kv *kvs;
   8750 	int q, i;
   8751 
   8752 	for (q = 0; q < sc->sc_nqueues; q++) {
   8753 		ks = kstat_create(DEVNAME(sc), 0, "mcx-queues", q,
   8754 		    KSTAT_T_KV, 0);
   8755 		if (ks == NULL) {
   8756 			/* unable to attach queue stats %u, q */
   8757 			continue;
   8758 		}
   8759 
   8760 		kvs = mallocarray(nitems(mcx_queue_kstat_tpl),
   8761 		    sizeof(*kvs), M_DEVBUF, M_WAITOK);
   8762 
   8763 		for (i = 0; i < nitems(mcx_queue_kstat_tpl); i++) {
   8764 			const struct mcx_queuestat *tpl =
   8765 			    &mcx_queue_kstat_tpl[i];
   8766 
   8767 			kstat_kv_init(&kvs[i], tpl->name, tpl->type);
   8768 		}
   8769 
   8770 		ks->ks_softc = &sc->sc_queues[q];
   8771 		ks->ks_data = kvs;
   8772 		ks->ks_datalen = nitems(mcx_queue_kstat_tpl) * sizeof(*kvs);
   8773 		ks->ks_read = mcx_kstat_queue_read;
   8774 
   8775 		sc->sc_queues[q].q_kstat = ks;
   8776 		kstat_install(ks);
   8777 	}
   8778 }
   8779 
   8780 static int
   8781 mcx_kstat_queue_read(struct kstat *ks)
   8782 {
   8783 	struct mcx_queues *q = ks->ks_softc;
   8784 	struct mcx_softc *sc = q->q_sc;
   8785 	struct kstat_kv *kvs = ks->ks_data;
   8786 	union {
   8787 		struct mcx_rq_ctx rq;
   8788 		struct mcx_sq_ctx sq;
   8789 		struct mcx_cq_ctx cq;
   8790 		struct mcx_eq_ctx eq;
   8791 	} u;
   8792 	const char *text;
   8793 	int error = 0;
   8794 
   8795 	KERNEL_LOCK();
   8796 
   8797 	if (mcx_query_rq(sc, &q->q_rx, &u.rq) != 0) {
   8798 		error = EIO;
   8799 		goto out;
   8800 	}
   8801 
   8802 	kstat_kv_u64(kvs++) = q->q_rx.rx_prod;
   8803 	kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_sw_counter);
   8804 	kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_hw_counter);
   8805 	switch ((bemtoh32(&u.rq.rq_flags) & MCX_RQ_CTX_STATE_MASK) >>
   8806 	    MCX_RQ_CTX_STATE_SHIFT) {
   8807 	case MCX_RQ_CTX_STATE_RST:
   8808 		text = "RST";
   8809 		break;
   8810 	case MCX_RQ_CTX_STATE_RDY:
   8811 		text = "RDY";
   8812 		break;
   8813 	case MCX_RQ_CTX_STATE_ERR:
   8814 		text = "ERR";
   8815 		break;
   8816 	default:
   8817 		text = "unknown";
   8818 		break;
   8819 	}
   8820 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
   8821 	kvs++;
   8822 
   8823 	if (mcx_query_sq(sc, &q->q_tx, &u.sq) != 0) {
   8824 		error = EIO;
   8825 		goto out;
   8826 	}
   8827 
   8828 	kstat_kv_u64(kvs++) = q->q_tx.tx_prod;
   8829 	kstat_kv_u64(kvs++) = q->q_tx.tx_cons;
   8830 	kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_sw_counter);
   8831 	kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_hw_counter);
   8832 	switch ((bemtoh32(&u.sq.sq_flags) & MCX_SQ_CTX_STATE_MASK) >>
   8833 	    MCX_SQ_CTX_STATE_SHIFT) {
   8834 	case MCX_SQ_CTX_STATE_RST:
   8835 		text = "RST";
   8836 		break;
   8837 	case MCX_SQ_CTX_STATE_RDY:
   8838 		text = "RDY";
   8839 		break;
   8840 	case MCX_SQ_CTX_STATE_ERR:
   8841 		text = "ERR";
   8842 		break;
   8843 	default:
   8844 		text = "unknown";
   8845 		break;
   8846 	}
   8847 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
   8848 	kvs++;
   8849 
   8850 	if (mcx_query_cq(sc, &q->q_cq, &u.cq) != 0) {
   8851 		error = EIO;
   8852 		goto out;
   8853 	}
   8854 
   8855 	kstat_kv_u64(kvs++) = q->q_cq.cq_cons;
   8856 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_producer_counter);
   8857 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_consumer_counter);
   8858 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_notified);
   8859 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_solicit);
   8860 
   8861 	switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATUS_MASK) >>
   8862 	    MCX_CQ_CTX_STATUS_SHIFT) {
   8863 	case MCX_CQ_CTX_STATUS_OK:
   8864 		text = "OK";
   8865 		break;
   8866 	case MCX_CQ_CTX_STATUS_OVERFLOW:
   8867 		text = "overflow";
   8868 		break;
   8869 	case MCX_CQ_CTX_STATUS_WRITE_FAIL:
   8870 		text = "write fail";
   8871 		break;
   8872 	default:
   8873 		text = "unknown";
   8874 		break;
   8875 	}
   8876 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
   8877 	kvs++;
   8878 
   8879 	switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATE_MASK) >>
   8880 	    MCX_CQ_CTX_STATE_SHIFT) {
   8881 	case MCX_CQ_CTX_STATE_SOLICITED:
   8882 		text = "solicited";
   8883 		break;
   8884 	case MCX_CQ_CTX_STATE_ARMED:
   8885 		text = "armed";
   8886 		break;
   8887 	case MCX_CQ_CTX_STATE_FIRED:
   8888 		text = "fired";
   8889 		break;
   8890 	default:
   8891 		text = "unknown";
   8892 		break;
   8893 	}
   8894 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
   8895 	kvs++;
   8896 
   8897 	if (mcx_query_eq(sc, &q->q_eq, &u.eq) != 0) {
   8898 		error = EIO;
   8899 		goto out;
   8900 	}
   8901 
   8902 	kstat_kv_u64(kvs++) = q->q_eq.eq_cons;
   8903 	kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_producer_counter);
   8904 	kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_consumer_counter);
   8905 
   8906 	switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATUS_MASK) >>
   8907 	    MCX_EQ_CTX_STATUS_SHIFT) {
   8908 	case MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE:
   8909 		text = "write fail";
   8910 		break;
   8911 	case MCX_EQ_CTX_STATUS_OK:
   8912 		text = "OK";
   8913 		break;
   8914 	default:
   8915 		text = "unknown";
   8916 		break;
   8917 	}
   8918 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
   8919 	kvs++;
   8920 
   8921 	switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATE_MASK) >>
   8922 	    MCX_EQ_CTX_STATE_SHIFT) {
   8923 	case MCX_EQ_CTX_STATE_ARMED:
   8924 		text = "armed";
   8925 		break;
   8926 	case MCX_EQ_CTX_STATE_FIRED:
   8927 		text = "fired";
   8928 		break;
   8929 	default:
   8930 		text = "unknown";
   8931 		break;
   8932 	}
   8933 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
   8934 	kvs++;
   8935 
   8936 	nanouptime(&ks->ks_updated);
   8937 out:
   8938 	KERNEL_UNLOCK();
   8939 	return (error);
   8940 }
   8941 
   8942 #endif /* NKSTAT > 0 */
   8943 
   8944 static unsigned int
   8945 mcx_timecounter_read(struct timecounter *tc)
   8946 {
   8947 	struct mcx_softc *sc = tc->tc_priv;
   8948 
   8949 	return (mcx_rd(sc, MCX_INTERNAL_TIMER_L));
   8950 }
   8951 
   8952 static void
   8953 mcx_timecounter_attach(struct mcx_softc *sc)
   8954 {
   8955 	struct timecounter *tc = &sc->sc_timecounter;
   8956 
   8957 	tc->tc_get_timecount = mcx_timecounter_read;
   8958 	tc->tc_counter_mask = ~0U;
   8959 	tc->tc_frequency = sc->sc_khz * 1000;
   8960 	tc->tc_name = device_xname(sc->sc_dev);
   8961 	tc->tc_quality = -100;
   8962 	tc->tc_priv = sc;
   8963 
   8964 	tc_init(tc);
   8965 }
   8966