if_mcx.c revision 1.15 1 /* $NetBSD: if_mcx.c,v 1.15 2021/01/30 21:26:32 jmcneill Exp $ */
2 /* $OpenBSD: if_mcx.c,v 1.98 2021/01/27 07:46:11 dlg Exp $ */
3
4 /*
5 * Copyright (c) 2017 David Gwynne <dlg (at) openbsd.org>
6 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #ifdef _KERNEL_OPT
22 #include "opt_net_mpsafe.h"
23 #endif
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: if_mcx.c,v 1.15 2021/01/30 21:26:32 jmcneill Exp $");
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sockio.h>
31 #include <sys/mbuf.h>
32 #include <sys/kernel.h>
33 #include <sys/socket.h>
34 #include <sys/device.h>
35 #include <sys/pool.h>
36 #include <sys/queue.h>
37 #include <sys/callout.h>
38 #include <sys/workqueue.h>
39 #include <sys/atomic.h>
40 #include <sys/timetc.h>
41 #include <sys/kmem.h>
42 #include <sys/bus.h>
43 #include <sys/interrupt.h>
44
45 #include <machine/intr.h>
46
47 #include <net/if.h>
48 #include <net/if_dl.h>
49 #include <net/if_ether.h>
50 #include <net/if_media.h>
51 #include <net/if_vlanvar.h>
52 #include <net/toeplitz.h>
53
54 #include <net/bpf.h>
55
56 #include <netinet/in.h>
57
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcivar.h>
60 #include <dev/pci/pcidevs.h>
61
62 /* TODO: Port kstat key/value stuff to evcnt/sysmon */
63 #define NKSTAT 0
64
65 /* XXX This driver is not yet MP-safe; don't claim to be! */
66 /* #ifdef NET_MPSAFE */
67 /* #define MCX_MPSAFE 1 */
68 /* #define CALLOUT_FLAGS CALLOUT_MPSAFE */
69 /* #else */
70 #define CALLOUT_FLAGS 0
71 /* #endif */
72
73 #define BUS_DMASYNC_PRERW (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
74 #define BUS_DMASYNC_POSTRW (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
75
76 #define MCX_HCA_BAR PCI_MAPREG_START /* BAR 0 */
77
78 #define MCX_FW_VER 0x0000
79 #define MCX_FW_VER_MAJOR(_v) ((_v) & 0xffff)
80 #define MCX_FW_VER_MINOR(_v) ((_v) >> 16)
81 #define MCX_CMDIF_FW_SUBVER 0x0004
82 #define MCX_FW_VER_SUBMINOR(_v) ((_v) & 0xffff)
83 #define MCX_CMDIF(_v) ((_v) >> 16)
84
85 #define MCX_ISSI 1 /* as per the PRM */
86 #define MCX_CMD_IF_SUPPORTED 5
87
88 #define MCX_HARDMTU 9500
89
90 #define MCX_PAGE_SHIFT 12
91 #define MCX_PAGE_SIZE (1 << MCX_PAGE_SHIFT)
92
93 /* queue sizes */
94 #define MCX_LOG_EQ_SIZE 7
95 #define MCX_LOG_CQ_SIZE 12
96 #define MCX_LOG_RQ_SIZE 10
97 #define MCX_LOG_SQ_SIZE 11
98
99 #define MCX_MAX_QUEUES 16
100
101 /* completion event moderation - about 10khz, or 90% of the cq */
102 #define MCX_CQ_MOD_PERIOD 50
103 #define MCX_CQ_MOD_COUNTER \
104 (((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
105
106 #define MCX_LOG_SQ_ENTRY_SIZE 6
107 #define MCX_SQ_ENTRY_MAX_SLOTS 4
108 #define MCX_SQ_SEGS_PER_SLOT \
109 (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
110 #define MCX_SQ_MAX_SEGMENTS \
111 1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
112
113 #define MCX_LOG_FLOW_TABLE_SIZE 5
114 #define MCX_NUM_STATIC_FLOWS 4 /* promisc, allmulti, ucast, bcast */
115 #define MCX_NUM_MCAST_FLOWS \
116 ((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
117
118 #define MCX_SQ_INLINE_SIZE 18
119 CTASSERT(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN == MCX_SQ_INLINE_SIZE);
120
121 /* doorbell offsets */
122 #define MCX_DOORBELL_AREA_SIZE MCX_PAGE_SIZE
123
124 #define MCX_CQ_DOORBELL_BASE 0
125 #define MCX_CQ_DOORBELL_STRIDE 64
126
127 #define MCX_WQ_DOORBELL_BASE MCX_PAGE_SIZE/2
128 #define MCX_WQ_DOORBELL_STRIDE 64
129 /* make sure the doorbells fit */
130 CTASSERT(MCX_MAX_QUEUES * MCX_CQ_DOORBELL_STRIDE < MCX_WQ_DOORBELL_BASE);
131 CTASSERT(MCX_MAX_QUEUES * MCX_WQ_DOORBELL_STRIDE <
132 MCX_DOORBELL_AREA_SIZE - MCX_WQ_DOORBELL_BASE);
133
134 #define MCX_WQ_DOORBELL_MASK 0xffff
135
136 /* uar registers */
137 #define MCX_UAR_CQ_DOORBELL 0x20
138 #define MCX_UAR_EQ_DOORBELL_ARM 0x40
139 #define MCX_UAR_EQ_DOORBELL 0x48
140 #define MCX_UAR_BF 0x800
141
142 #define MCX_CMDQ_ADDR_HI 0x0010
143 #define MCX_CMDQ_ADDR_LO 0x0014
144 #define MCX_CMDQ_ADDR_NMASK 0xfff
145 #define MCX_CMDQ_LOG_SIZE(_v) ((_v) >> 4 & 0xf)
146 #define MCX_CMDQ_LOG_STRIDE(_v) ((_v) >> 0 & 0xf)
147 #define MCX_CMDQ_INTERFACE_MASK (0x3 << 8)
148 #define MCX_CMDQ_INTERFACE_FULL_DRIVER (0x0 << 8)
149 #define MCX_CMDQ_INTERFACE_DISABLED (0x1 << 8)
150
151 #define MCX_CMDQ_DOORBELL 0x0018
152
153 #define MCX_STATE 0x01fc
154 #define MCX_STATE_MASK (1U << 31)
155 #define MCX_STATE_INITIALIZING (1 << 31)
156 #define MCX_STATE_READY (0 << 31)
157 #define MCX_STATE_INTERFACE_MASK (0x3 << 24)
158 #define MCX_STATE_INTERFACE_FULL_DRIVER (0x0 << 24)
159 #define MCX_STATE_INTERFACE_DISABLED (0x1 << 24)
160
161 #define MCX_INTERNAL_TIMER 0x1000
162 #define MCX_INTERNAL_TIMER_H 0x1000
163 #define MCX_INTERNAL_TIMER_L 0x1004
164
165 #define MCX_CLEAR_INT 0x100c
166
167 #define MCX_REG_OP_WRITE 0
168 #define MCX_REG_OP_READ 1
169
170 #define MCX_REG_PMLP 0x5002
171 #define MCX_REG_PMTU 0x5003
172 #define MCX_REG_PTYS 0x5004
173 #define MCX_REG_PAOS 0x5006
174 #define MCX_REG_PFCC 0x5007
175 #define MCX_REG_PPCNT 0x5008
176 #define MCX_REG_MTCAP 0x9009 /* mgmt temp capabilities */
177 #define MCX_REG_MTMP 0x900a /* mgmt temp */
178 #define MCX_REG_MCIA 0x9014
179 #define MCX_REG_MCAM 0x907f
180
181 #define MCX_ETHER_CAP_SGMII 0
182 #define MCX_ETHER_CAP_1000_KX 1
183 #define MCX_ETHER_CAP_10G_CX4 2
184 #define MCX_ETHER_CAP_10G_KX4 3
185 #define MCX_ETHER_CAP_10G_KR 4
186 #define MCX_ETHER_CAP_20G_KR2 5
187 #define MCX_ETHER_CAP_40G_CR4 6
188 #define MCX_ETHER_CAP_40G_KR4 7
189 #define MCX_ETHER_CAP_56G_R4 8
190 #define MCX_ETHER_CAP_10G_CR 12
191 #define MCX_ETHER_CAP_10G_SR 13
192 #define MCX_ETHER_CAP_10G_LR 14
193 #define MCX_ETHER_CAP_40G_SR4 15
194 #define MCX_ETHER_CAP_40G_LR4 16
195 #define MCX_ETHER_CAP_50G_SR2 18
196 #define MCX_ETHER_CAP_100G_CR4 20
197 #define MCX_ETHER_CAP_100G_SR4 21
198 #define MCX_ETHER_CAP_100G_KR4 22
199 #define MCX_ETHER_CAP_100G_LR4 23
200 #define MCX_ETHER_CAP_100_TX 24
201 #define MCX_ETHER_CAP_1000_T 25
202 #define MCX_ETHER_CAP_10G_T 26
203 #define MCX_ETHER_CAP_25G_CR 27
204 #define MCX_ETHER_CAP_25G_KR 28
205 #define MCX_ETHER_CAP_25G_SR 29
206 #define MCX_ETHER_CAP_50G_CR2 30
207 #define MCX_ETHER_CAP_50G_KR2 31
208
209 #define MCX_MAX_CQE 32
210
211 #define MCX_CMD_QUERY_HCA_CAP 0x100
212 #define MCX_CMD_QUERY_ADAPTER 0x101
213 #define MCX_CMD_INIT_HCA 0x102
214 #define MCX_CMD_TEARDOWN_HCA 0x103
215 #define MCX_CMD_ENABLE_HCA 0x104
216 #define MCX_CMD_DISABLE_HCA 0x105
217 #define MCX_CMD_QUERY_PAGES 0x107
218 #define MCX_CMD_MANAGE_PAGES 0x108
219 #define MCX_CMD_SET_HCA_CAP 0x109
220 #define MCX_CMD_QUERY_ISSI 0x10a
221 #define MCX_CMD_SET_ISSI 0x10b
222 #define MCX_CMD_SET_DRIVER_VERSION 0x10d
223 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS 0x203
224 #define MCX_CMD_CREATE_EQ 0x301
225 #define MCX_CMD_DESTROY_EQ 0x302
226 #define MCX_CMD_QUERY_EQ 0x303
227 #define MCX_CMD_CREATE_CQ 0x400
228 #define MCX_CMD_DESTROY_CQ 0x401
229 #define MCX_CMD_QUERY_CQ 0x402
230 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT 0x754
231 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
232 0x755
233 #define MCX_CMD_QUERY_VPORT_COUNTERS 0x770
234 #define MCX_CMD_ALLOC_PD 0x800
235 #define MCX_CMD_ALLOC_UAR 0x802
236 #define MCX_CMD_ACCESS_REG 0x805
237 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN 0x816
238 #define MCX_CMD_CREATE_TIR 0x900
239 #define MCX_CMD_DESTROY_TIR 0x902
240 #define MCX_CMD_CREATE_SQ 0x904
241 #define MCX_CMD_MODIFY_SQ 0x905
242 #define MCX_CMD_DESTROY_SQ 0x906
243 #define MCX_CMD_QUERY_SQ 0x907
244 #define MCX_CMD_CREATE_RQ 0x908
245 #define MCX_CMD_MODIFY_RQ 0x909
246 #define MCX_CMD_DESTROY_RQ 0x90a
247 #define MCX_CMD_QUERY_RQ 0x90b
248 #define MCX_CMD_CREATE_TIS 0x912
249 #define MCX_CMD_DESTROY_TIS 0x914
250 #define MCX_CMD_CREATE_RQT 0x916
251 #define MCX_CMD_DESTROY_RQT 0x918
252 #define MCX_CMD_SET_FLOW_TABLE_ROOT 0x92f
253 #define MCX_CMD_CREATE_FLOW_TABLE 0x930
254 #define MCX_CMD_DESTROY_FLOW_TABLE 0x931
255 #define MCX_CMD_QUERY_FLOW_TABLE 0x932
256 #define MCX_CMD_CREATE_FLOW_GROUP 0x933
257 #define MCX_CMD_DESTROY_FLOW_GROUP 0x934
258 #define MCX_CMD_QUERY_FLOW_GROUP 0x935
259 #define MCX_CMD_SET_FLOW_TABLE_ENTRY 0x936
260 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY 0x937
261 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY 0x938
262 #define MCX_CMD_ALLOC_FLOW_COUNTER 0x939
263 #define MCX_CMD_QUERY_FLOW_COUNTER 0x93b
264
265 #define MCX_QUEUE_STATE_RST 0
266 #define MCX_QUEUE_STATE_RDY 1
267 #define MCX_QUEUE_STATE_ERR 3
268
269 #define MCX_FLOW_TABLE_TYPE_RX 0
270 #define MCX_FLOW_TABLE_TYPE_TX 1
271
272 #define MCX_CMDQ_INLINE_DATASIZE 16
273
274 struct mcx_cmdq_entry {
275 uint8_t cq_type;
276 #define MCX_CMDQ_TYPE_PCIE 0x7
277 uint8_t cq_reserved0[3];
278
279 uint32_t cq_input_length;
280 uint64_t cq_input_ptr;
281 uint8_t cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
282
283 uint8_t cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
284 uint64_t cq_output_ptr;
285 uint32_t cq_output_length;
286
287 uint8_t cq_token;
288 uint8_t cq_signature;
289 uint8_t cq_reserved1[1];
290 uint8_t cq_status;
291 #define MCX_CQ_STATUS_SHIFT 1
292 #define MCX_CQ_STATUS_MASK (0x7f << MCX_CQ_STATUS_SHIFT)
293 #define MCX_CQ_STATUS_OK (0x00 << MCX_CQ_STATUS_SHIFT)
294 #define MCX_CQ_STATUS_INT_ERR (0x01 << MCX_CQ_STATUS_SHIFT)
295 #define MCX_CQ_STATUS_BAD_OPCODE (0x02 << MCX_CQ_STATUS_SHIFT)
296 #define MCX_CQ_STATUS_BAD_PARAM (0x03 << MCX_CQ_STATUS_SHIFT)
297 #define MCX_CQ_STATUS_BAD_SYS_STATE (0x04 << MCX_CQ_STATUS_SHIFT)
298 #define MCX_CQ_STATUS_BAD_RESOURCE (0x05 << MCX_CQ_STATUS_SHIFT)
299 #define MCX_CQ_STATUS_RESOURCE_BUSY (0x06 << MCX_CQ_STATUS_SHIFT)
300 #define MCX_CQ_STATUS_EXCEED_LIM (0x08 << MCX_CQ_STATUS_SHIFT)
301 #define MCX_CQ_STATUS_BAD_RES_STATE (0x09 << MCX_CQ_STATUS_SHIFT)
302 #define MCX_CQ_STATUS_BAD_INDEX (0x0a << MCX_CQ_STATUS_SHIFT)
303 #define MCX_CQ_STATUS_NO_RESOURCES (0x0f << MCX_CQ_STATUS_SHIFT)
304 #define MCX_CQ_STATUS_BAD_INPUT_LEN (0x50 << MCX_CQ_STATUS_SHIFT)
305 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN (0x51 << MCX_CQ_STATUS_SHIFT)
306 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
307 (0x10 << MCX_CQ_STATUS_SHIFT)
308 #define MCX_CQ_STATUS_BAD_SIZE (0x40 << MCX_CQ_STATUS_SHIFT)
309 #define MCX_CQ_STATUS_OWN_MASK 0x1
310 #define MCX_CQ_STATUS_OWN_SW 0x0
311 #define MCX_CQ_STATUS_OWN_HW 0x1
312 } __packed __aligned(8);
313
314 #define MCX_CMDQ_MAILBOX_DATASIZE 512
315
316 struct mcx_cmdq_mailbox {
317 uint8_t mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
318 uint8_t mb_reserved0[48];
319 uint64_t mb_next_ptr;
320 uint32_t mb_block_number;
321 uint8_t mb_reserved1[1];
322 uint8_t mb_token;
323 uint8_t mb_ctrl_signature;
324 uint8_t mb_signature;
325 } __packed __aligned(8);
326
327 #define MCX_CMDQ_MAILBOX_ALIGN (1 << 10)
328 #define MCX_CMDQ_MAILBOX_SIZE roundup(sizeof(struct mcx_cmdq_mailbox), \
329 MCX_CMDQ_MAILBOX_ALIGN)
330 /*
331 * command mailbox structres
332 */
333
334 struct mcx_cmd_enable_hca_in {
335 uint16_t cmd_opcode;
336 uint8_t cmd_reserved0[4];
337 uint16_t cmd_op_mod;
338 uint8_t cmd_reserved1[2];
339 uint16_t cmd_function_id;
340 uint8_t cmd_reserved2[4];
341 } __packed __aligned(4);
342
343 struct mcx_cmd_enable_hca_out {
344 uint8_t cmd_status;
345 uint8_t cmd_reserved0[3];
346 uint32_t cmd_syndrome;
347 uint8_t cmd_reserved1[4];
348 } __packed __aligned(4);
349
350 struct mcx_cmd_init_hca_in {
351 uint16_t cmd_opcode;
352 uint8_t cmd_reserved0[4];
353 uint16_t cmd_op_mod;
354 uint8_t cmd_reserved1[8];
355 } __packed __aligned(4);
356
357 struct mcx_cmd_init_hca_out {
358 uint8_t cmd_status;
359 uint8_t cmd_reserved0[3];
360 uint32_t cmd_syndrome;
361 uint8_t cmd_reserved1[8];
362 } __packed __aligned(4);
363
364 struct mcx_cmd_teardown_hca_in {
365 uint16_t cmd_opcode;
366 uint8_t cmd_reserved0[4];
367 uint16_t cmd_op_mod;
368 uint8_t cmd_reserved1[2];
369 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL 0x0
370 #define MCX_CMD_TEARDOWN_HCA_PANIC 0x1
371 uint16_t cmd_profile;
372 uint8_t cmd_reserved2[4];
373 } __packed __aligned(4);
374
375 struct mcx_cmd_teardown_hca_out {
376 uint8_t cmd_status;
377 uint8_t cmd_reserved0[3];
378 uint32_t cmd_syndrome;
379 uint8_t cmd_reserved1[8];
380 } __packed __aligned(4);
381
382 struct mcx_cmd_access_reg_in {
383 uint16_t cmd_opcode;
384 uint8_t cmd_reserved0[4];
385 uint16_t cmd_op_mod;
386 uint8_t cmd_reserved1[2];
387 uint16_t cmd_register_id;
388 uint32_t cmd_argument;
389 } __packed __aligned(4);
390
391 struct mcx_cmd_access_reg_out {
392 uint8_t cmd_status;
393 uint8_t cmd_reserved0[3];
394 uint32_t cmd_syndrome;
395 uint8_t cmd_reserved1[8];
396 } __packed __aligned(4);
397
398 struct mcx_reg_pmtu {
399 uint8_t rp_reserved1;
400 uint8_t rp_local_port;
401 uint8_t rp_reserved2[2];
402 uint16_t rp_max_mtu;
403 uint8_t rp_reserved3[2];
404 uint16_t rp_admin_mtu;
405 uint8_t rp_reserved4[2];
406 uint16_t rp_oper_mtu;
407 uint8_t rp_reserved5[2];
408 } __packed __aligned(4);
409
410 struct mcx_reg_ptys {
411 uint8_t rp_reserved1;
412 uint8_t rp_local_port;
413 uint8_t rp_reserved2;
414 uint8_t rp_proto_mask;
415 #define MCX_REG_PTYS_PROTO_MASK_ETH (1 << 2)
416 uint8_t rp_reserved3[8];
417 uint32_t rp_eth_proto_cap;
418 uint8_t rp_reserved4[8];
419 uint32_t rp_eth_proto_admin;
420 uint8_t rp_reserved5[8];
421 uint32_t rp_eth_proto_oper;
422 uint8_t rp_reserved6[24];
423 } __packed __aligned(4);
424
425 struct mcx_reg_paos {
426 uint8_t rp_reserved1;
427 uint8_t rp_local_port;
428 uint8_t rp_admin_status;
429 #define MCX_REG_PAOS_ADMIN_STATUS_UP 1
430 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN 2
431 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE 3
432 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED 4
433 uint8_t rp_oper_status;
434 #define MCX_REG_PAOS_OPER_STATUS_UP 1
435 #define MCX_REG_PAOS_OPER_STATUS_DOWN 2
436 #define MCX_REG_PAOS_OPER_STATUS_FAILED 4
437 uint8_t rp_admin_state_update;
438 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN (1 << 7)
439 uint8_t rp_reserved2[11];
440 } __packed __aligned(4);
441
442 struct mcx_reg_pfcc {
443 uint8_t rp_reserved1;
444 uint8_t rp_local_port;
445 uint8_t rp_reserved2[3];
446 uint8_t rp_prio_mask_tx;
447 uint8_t rp_reserved3;
448 uint8_t rp_prio_mask_rx;
449 uint8_t rp_pptx_aptx;
450 uint8_t rp_pfctx;
451 uint8_t rp_fctx_dis;
452 uint8_t rp_reserved4;
453 uint8_t rp_pprx_aprx;
454 uint8_t rp_pfcrx;
455 uint8_t rp_reserved5[2];
456 uint16_t rp_dev_stall_min;
457 uint16_t rp_dev_stall_crit;
458 uint8_t rp_reserved6[12];
459 } __packed __aligned(4);
460
461 #define MCX_PMLP_MODULE_NUM_MASK 0xff
462 struct mcx_reg_pmlp {
463 uint8_t rp_rxtx;
464 uint8_t rp_local_port;
465 uint8_t rp_reserved0;
466 uint8_t rp_width;
467 uint32_t rp_lane0_mapping;
468 uint32_t rp_lane1_mapping;
469 uint32_t rp_lane2_mapping;
470 uint32_t rp_lane3_mapping;
471 uint8_t rp_reserved1[44];
472 } __packed __aligned(4);
473
474 struct mcx_reg_ppcnt {
475 uint8_t ppcnt_swid;
476 uint8_t ppcnt_local_port;
477 uint8_t ppcnt_pnat;
478 uint8_t ppcnt_grp;
479 #define MCX_REG_PPCNT_GRP_IEEE8023 0x00
480 #define MCX_REG_PPCNT_GRP_RFC2863 0x01
481 #define MCX_REG_PPCNT_GRP_RFC2819 0x02
482 #define MCX_REG_PPCNT_GRP_RFC3635 0x03
483 #define MCX_REG_PPCNT_GRP_PER_PRIO 0x10
484 #define MCX_REG_PPCNT_GRP_PER_TC 0x11
485 #define MCX_REG_PPCNT_GRP_PER_RX_BUFFER 0x11
486
487 uint8_t ppcnt_clr;
488 uint8_t ppcnt_reserved1[2];
489 uint8_t ppcnt_prio_tc;
490 #define MCX_REG_PPCNT_CLR (1 << 7)
491
492 uint8_t ppcnt_counter_set[248];
493 } __packed __aligned(8);
494 CTASSERT(sizeof(struct mcx_reg_ppcnt) == 256);
495 CTASSERT((offsetof(struct mcx_reg_ppcnt, ppcnt_counter_set) %
496 sizeof(uint64_t)) == 0);
497
498 enum mcx_ppcnt_ieee8023 {
499 frames_transmitted_ok,
500 frames_received_ok,
501 frame_check_sequence_errors,
502 alignment_errors,
503 octets_transmitted_ok,
504 octets_received_ok,
505 multicast_frames_xmitted_ok,
506 broadcast_frames_xmitted_ok,
507 multicast_frames_received_ok,
508 broadcast_frames_received_ok,
509 in_range_length_errors,
510 out_of_range_length_field,
511 frame_too_long_errors,
512 symbol_error_during_carrier,
513 mac_control_frames_transmitted,
514 mac_control_frames_received,
515 unsupported_opcodes_received,
516 pause_mac_ctrl_frames_received,
517 pause_mac_ctrl_frames_transmitted,
518
519 mcx_ppcnt_ieee8023_count
520 };
521 CTASSERT(mcx_ppcnt_ieee8023_count * sizeof(uint64_t) == 0x98);
522
523 enum mcx_ppcnt_rfc2863 {
524 in_octets,
525 in_ucast_pkts,
526 in_discards,
527 in_errors,
528 in_unknown_protos,
529 out_octets,
530 out_ucast_pkts,
531 out_discards,
532 out_errors,
533 in_multicast_pkts,
534 in_broadcast_pkts,
535 out_multicast_pkts,
536 out_broadcast_pkts,
537
538 mcx_ppcnt_rfc2863_count
539 };
540 CTASSERT(mcx_ppcnt_rfc2863_count * sizeof(uint64_t) == 0x68);
541
542 enum mcx_ppcnt_rfc2819 {
543 drop_events,
544 octets,
545 pkts,
546 broadcast_pkts,
547 multicast_pkts,
548 crc_align_errors,
549 undersize_pkts,
550 oversize_pkts,
551 fragments,
552 jabbers,
553 collisions,
554 pkts64octets,
555 pkts65to127octets,
556 pkts128to255octets,
557 pkts256to511octets,
558 pkts512to1023octets,
559 pkts1024to1518octets,
560 pkts1519to2047octets,
561 pkts2048to4095octets,
562 pkts4096to8191octets,
563 pkts8192to10239octets,
564
565 mcx_ppcnt_rfc2819_count
566 };
567 CTASSERT((mcx_ppcnt_rfc2819_count * sizeof(uint64_t)) == 0xa8);
568
569 enum mcx_ppcnt_rfc3635 {
570 dot3stats_alignment_errors,
571 dot3stats_fcs_errors,
572 dot3stats_single_collision_frames,
573 dot3stats_multiple_collision_frames,
574 dot3stats_sqe_test_errors,
575 dot3stats_deferred_transmissions,
576 dot3stats_late_collisions,
577 dot3stats_excessive_collisions,
578 dot3stats_internal_mac_transmit_errors,
579 dot3stats_carrier_sense_errors,
580 dot3stats_frame_too_longs,
581 dot3stats_internal_mac_receive_errors,
582 dot3stats_symbol_errors,
583 dot3control_in_unknown_opcodes,
584 dot3in_pause_frames,
585 dot3out_pause_frames,
586
587 mcx_ppcnt_rfc3635_count
588 };
589 CTASSERT((mcx_ppcnt_rfc3635_count * sizeof(uint64_t)) == 0x80);
590
591 struct mcx_reg_mcam {
592 uint8_t _reserved1[1];
593 uint8_t mcam_feature_group;
594 uint8_t _reserved2[1];
595 uint8_t mcam_access_reg_group;
596 uint8_t _reserved3[4];
597 uint8_t mcam_access_reg_cap_mask[16];
598 uint8_t _reserved4[16];
599 uint8_t mcam_feature_cap_mask[16];
600 uint8_t _reserved5[16];
601 } __packed __aligned(4);
602
603 #define MCX_BITFIELD_BIT(bf, b) (bf[(sizeof bf - 1) - (b / 8)] & (b % 8))
604
605 #define MCX_MCAM_FEATURE_CAP_SENSOR_MAP 6
606
607 struct mcx_reg_mtcap {
608 uint8_t _reserved1[3];
609 uint8_t mtcap_sensor_count;
610 uint8_t _reserved2[4];
611
612 uint64_t mtcap_sensor_map;
613 };
614
615 struct mcx_reg_mtmp {
616 uint8_t _reserved1[2];
617 uint16_t mtmp_sensor_index;
618
619 uint8_t _reserved2[2];
620 uint16_t mtmp_temperature;
621
622 uint16_t mtmp_mte_mtr;
623 #define MCX_REG_MTMP_MTE (1 << 15)
624 #define MCX_REG_MTMP_MTR (1 << 14)
625 uint16_t mtmp_max_temperature;
626
627 uint16_t mtmp_tee;
628 #define MCX_REG_MTMP_TEE_NOPE (0 << 14)
629 #define MCX_REG_MTMP_TEE_GENERATE (1 << 14)
630 #define MCX_REG_MTMP_TEE_GENERATE_ONE (2 << 14)
631 uint16_t mtmp_temperature_threshold_hi;
632
633 uint8_t _reserved3[2];
634 uint16_t mtmp_temperature_threshold_lo;
635
636 uint8_t _reserved4[4];
637
638 uint8_t mtmp_sensor_name[8];
639 };
640 CTASSERT(sizeof(struct mcx_reg_mtmp) == 0x20);
641 CTASSERT(offsetof(struct mcx_reg_mtmp, mtmp_sensor_name) == 0x18);
642
643 #define MCX_MCIA_EEPROM_BYTES 32
644 struct mcx_reg_mcia {
645 uint8_t rm_l;
646 uint8_t rm_module;
647 uint8_t rm_reserved0;
648 uint8_t rm_status;
649 uint8_t rm_i2c_addr;
650 uint8_t rm_page_num;
651 uint16_t rm_dev_addr;
652 uint16_t rm_reserved1;
653 uint16_t rm_size;
654 uint32_t rm_reserved2;
655 uint8_t rm_data[48];
656 } __packed __aligned(4);
657
658 struct mcx_cmd_query_issi_in {
659 uint16_t cmd_opcode;
660 uint8_t cmd_reserved0[4];
661 uint16_t cmd_op_mod;
662 uint8_t cmd_reserved1[8];
663 } __packed __aligned(4);
664
665 struct mcx_cmd_query_issi_il_out {
666 uint8_t cmd_status;
667 uint8_t cmd_reserved0[3];
668 uint32_t cmd_syndrome;
669 uint8_t cmd_reserved1[2];
670 uint16_t cmd_current_issi;
671 uint8_t cmd_reserved2[4];
672 } __packed __aligned(4);
673
674 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
675
676 struct mcx_cmd_query_issi_mb_out {
677 uint8_t cmd_reserved2[16];
678 uint8_t cmd_supported_issi[80]; /* very big endian */
679 } __packed __aligned(4);
680
681 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
682
683 struct mcx_cmd_set_issi_in {
684 uint16_t cmd_opcode;
685 uint8_t cmd_reserved0[4];
686 uint16_t cmd_op_mod;
687 uint8_t cmd_reserved1[2];
688 uint16_t cmd_current_issi;
689 uint8_t cmd_reserved2[4];
690 } __packed __aligned(4);
691
692 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
693
694 struct mcx_cmd_set_issi_out {
695 uint8_t cmd_status;
696 uint8_t cmd_reserved0[3];
697 uint32_t cmd_syndrome;
698 uint8_t cmd_reserved1[8];
699 } __packed __aligned(4);
700
701 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
702
703 struct mcx_cmd_query_pages_in {
704 uint16_t cmd_opcode;
705 uint8_t cmd_reserved0[4];
706 uint16_t cmd_op_mod;
707 #define MCX_CMD_QUERY_PAGES_BOOT 0x01
708 #define MCX_CMD_QUERY_PAGES_INIT 0x02
709 #define MCX_CMD_QUERY_PAGES_REGULAR 0x03
710 uint8_t cmd_reserved1[8];
711 } __packed __aligned(4);
712
713 struct mcx_cmd_query_pages_out {
714 uint8_t cmd_status;
715 uint8_t cmd_reserved0[3];
716 uint32_t cmd_syndrome;
717 uint8_t cmd_reserved1[2];
718 uint16_t cmd_func_id;
719 int32_t cmd_num_pages;
720 } __packed __aligned(4);
721
722 struct mcx_cmd_manage_pages_in {
723 uint16_t cmd_opcode;
724 uint8_t cmd_reserved0[4];
725 uint16_t cmd_op_mod;
726 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
727 0x00
728 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
729 0x01
730 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
731 0x02
732 uint8_t cmd_reserved1[2];
733 uint16_t cmd_func_id;
734 uint32_t cmd_input_num_entries;
735 } __packed __aligned(4);
736
737 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
738
739 struct mcx_cmd_manage_pages_out {
740 uint8_t cmd_status;
741 uint8_t cmd_reserved0[3];
742 uint32_t cmd_syndrome;
743 uint32_t cmd_output_num_entries;
744 uint8_t cmd_reserved1[4];
745 } __packed __aligned(4);
746
747 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
748
749 struct mcx_cmd_query_hca_cap_in {
750 uint16_t cmd_opcode;
751 uint8_t cmd_reserved0[4];
752 uint16_t cmd_op_mod;
753 #define MCX_CMD_QUERY_HCA_CAP_MAX (0x0 << 0)
754 #define MCX_CMD_QUERY_HCA_CAP_CURRENT (0x1 << 0)
755 #define MCX_CMD_QUERY_HCA_CAP_DEVICE (0x0 << 1)
756 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD (0x1 << 1)
757 #define MCX_CMD_QUERY_HCA_CAP_FLOW (0x7 << 1)
758 uint8_t cmd_reserved1[8];
759 } __packed __aligned(4);
760
761 struct mcx_cmd_query_hca_cap_out {
762 uint8_t cmd_status;
763 uint8_t cmd_reserved0[3];
764 uint32_t cmd_syndrome;
765 uint8_t cmd_reserved1[8];
766 } __packed __aligned(4);
767
768 #define MCX_HCA_CAP_LEN 0x1000
769 #define MCX_HCA_CAP_NMAILBOXES \
770 (MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
771
772 #if __GNUC_PREREQ__(4, 3)
773 #define __counter__ __COUNTER__
774 #else
775 #define __counter__ __LINE__
776 #endif
777
778 #define __token(_tok, _num) _tok##_num
779 #define _token(_tok, _num) __token(_tok, _num)
780 #define __reserved__ _token(__reserved, __counter__)
781
782 struct mcx_cap_device {
783 uint8_t reserved0[16];
784
785 uint8_t log_max_srq_sz;
786 uint8_t log_max_qp_sz;
787 uint8_t __reserved__[1];
788 uint8_t log_max_qp; /* 5 bits */
789 #define MCX_CAP_DEVICE_LOG_MAX_QP 0x1f
790
791 uint8_t __reserved__[1];
792 uint8_t log_max_srq; /* 5 bits */
793 #define MCX_CAP_DEVICE_LOG_MAX_SRQ 0x1f
794 uint8_t __reserved__[2];
795
796 uint8_t __reserved__[1];
797 uint8_t log_max_cq_sz;
798 uint8_t __reserved__[1];
799 uint8_t log_max_cq; /* 5 bits */
800 #define MCX_CAP_DEVICE_LOG_MAX_CQ 0x1f
801
802 uint8_t log_max_eq_sz;
803 uint8_t log_max_mkey; /* 6 bits */
804 #define MCX_CAP_DEVICE_LOG_MAX_MKEY 0x3f
805 uint8_t __reserved__[1];
806 uint8_t log_max_eq; /* 4 bits */
807 #define MCX_CAP_DEVICE_LOG_MAX_EQ 0x0f
808
809 uint8_t max_indirection;
810 uint8_t log_max_mrw_sz; /* 7 bits */
811 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ 0x7f
812 uint8_t teardown_log_max_msf_list_size;
813 #define MCX_CAP_DEVICE_FORCE_TEARDOWN 0x80
814 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
815 0x3f
816 uint8_t log_max_klm_list_size; /* 6 bits */
817 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
818 0x3f
819
820 uint8_t __reserved__[1];
821 uint8_t log_max_ra_req_dc; /* 6 bits */
822 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC 0x3f
823 uint8_t __reserved__[1];
824 uint8_t log_max_ra_res_dc; /* 6 bits */
825 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
826 0x3f
827
828 uint8_t __reserved__[1];
829 uint8_t log_max_ra_req_qp; /* 6 bits */
830 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
831 0x3f
832 uint8_t __reserved__[1];
833 uint8_t log_max_ra_res_qp; /* 6 bits */
834 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
835 0x3f
836
837 uint8_t flags1;
838 #define MCX_CAP_DEVICE_END_PAD 0x80
839 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED 0x40
840 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
841 0x20
842 #define MCX_CAP_DEVICE_START_PAD 0x10
843 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
844 0x08
845 uint8_t __reserved__[1];
846 uint16_t gid_table_size;
847
848 uint16_t flags2;
849 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT 0x8000
850 #define MCX_CAP_DEVICE_VPORT_COUNTERS 0x4000
851 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
852 0x2000
853 #define MCX_CAP_DEVICE_DEBUG 0x1000
854 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
855 0x8000
856 #define MCX_CAP_DEVICE_RQ_DELAY_DROP 0x4000
857 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK 0x03ff
858 uint16_t pkey_table_size;
859
860 uint8_t flags3;
861 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
862 0x80
863 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
864 0x40
865 #define MCX_CAP_DEVICE_IB_VIRTUAL 0x20
866 #define MCX_CAP_DEVICE_ETH_VIRTUAL 0x10
867 #define MCX_CAP_DEVICE_ETS 0x04
868 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE 0x02
869 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
870 0x01
871 uint8_t local_ca_ack_delay; /* 5 bits */
872 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
873 0x1f
874 #define MCX_CAP_DEVICE_MCAM_REG 0x40
875 uint8_t port_type;
876 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
877 0x80
878 #define MCX_CAP_DEVICE_PORT_TYPE 0x03
879 #define MCX_CAP_DEVICE_PORT_TYPE_ETH 0x01
880 uint8_t num_ports;
881
882 uint8_t snapshot_log_max_msg;
883 #define MCX_CAP_DEVICE_SNAPSHOT 0x80
884 #define MCX_CAP_DEVICE_LOG_MAX_MSG 0x1f
885 uint8_t max_tc; /* 4 bits */
886 #define MCX_CAP_DEVICE_MAX_TC 0x0f
887 uint8_t flags4;
888 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT 0x80
889 #define MCX_CAP_DEVICE_DCBX 0x40
890 #define MCX_CAP_DEVICE_ROL_S 0x02
891 #define MCX_CAP_DEVICE_ROL_G 0x01
892 uint8_t wol;
893 #define MCX_CAP_DEVICE_WOL_S 0x40
894 #define MCX_CAP_DEVICE_WOL_G 0x20
895 #define MCX_CAP_DEVICE_WOL_A 0x10
896 #define MCX_CAP_DEVICE_WOL_B 0x08
897 #define MCX_CAP_DEVICE_WOL_M 0x04
898 #define MCX_CAP_DEVICE_WOL_U 0x02
899 #define MCX_CAP_DEVICE_WOL_P 0x01
900
901 uint16_t stat_rate_support;
902 uint8_t __reserved__[1];
903 uint8_t cqe_version; /* 4 bits */
904 #define MCX_CAP_DEVICE_CQE_VERSION 0x0f
905
906 uint32_t flags5;
907 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
908 0x80000000
909 #define MCX_CAP_DEVICE_STRIDING_RQ 0x40000000
910 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
911 0x10000000
912 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
913 0x08000000
914 #define MCX_CAP_DEVICE_DC_CONNECT_CP 0x00040000
915 #define MCX_CAP_DEVICE_DC_CNAK_DRACE 0x00020000
916 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
917 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
918 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM 0x0000c000
919 #define MCX_CAP_DEVICE_SIGERR_QCE 0x00002000
920 #define MCX_CAP_DEVICE_WQ_SIGNATURE 0x00000800
921 #define MCX_CAP_DEVICE_SCTR_DATA_CQE 0x00000400
922 #define MCX_CAP_DEVICE_SHO 0x00000100
923 #define MCX_CAP_DEVICE_TPH 0x00000080
924 #define MCX_CAP_DEVICE_RF 0x00000040
925 #define MCX_CAP_DEVICE_DCT 0x00000020
926 #define MCX_CAP_DEVICE_QOS 0x00000010
927 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS 0x00000008
928 #define MCX_CAP_DEVICE_ROCE 0x00000004
929 #define MCX_CAP_DEVICE_ATOMIC 0x00000002
930
931 uint32_t flags6;
932 #define MCX_CAP_DEVICE_CQ_OI 0x80000000
933 #define MCX_CAP_DEVICE_CQ_RESIZE 0x40000000
934 #define MCX_CAP_DEVICE_CQ_MODERATION 0x20000000
935 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
936 0x10000000
937 #define MCX_CAP_DEVICE_CQ_INVALIDATE 0x08000000
938 #define MCX_CAP_DEVICE_RESERVED_AT_255 0x04000000
939 #define MCX_CAP_DEVICE_CQ_EQ_REMAP 0x02000000
940 #define MCX_CAP_DEVICE_PG 0x01000000
941 #define MCX_CAP_DEVICE_BLOCK_LB_MC 0x00800000
942 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
943 0x00400000
944 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
945 0x00200000
946 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
947 0x00100000
948 #define MCX_CAP_DEVICE_CD 0x00080000
949 #define MCX_CAP_DEVICE_ATM 0x00040000
950 #define MCX_CAP_DEVICE_APM 0x00020000
951 #define MCX_CAP_DEVICE_IMAICL 0x00010000
952 #define MCX_CAP_DEVICE_QKV 0x00000200
953 #define MCX_CAP_DEVICE_PKV 0x00000100
954 #define MCX_CAP_DEVICE_SET_DETH_SQPN 0x00000080
955 #define MCX_CAP_DEVICE_XRC 0x00000008
956 #define MCX_CAP_DEVICE_UD 0x00000004
957 #define MCX_CAP_DEVICE_UC 0x00000002
958 #define MCX_CAP_DEVICE_RC 0x00000001
959
960 uint8_t uar_flags;
961 #define MCX_CAP_DEVICE_UAR_4K 0x80
962 uint8_t uar_sz; /* 6 bits */
963 #define MCX_CAP_DEVICE_UAR_SZ 0x3f
964 uint8_t __reserved__[1];
965 uint8_t log_pg_sz;
966
967 uint8_t flags7;
968 #define MCX_CAP_DEVICE_BF 0x80
969 #define MCX_CAP_DEVICE_DRIVER_VERSION 0x40
970 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
971 0x20
972 uint8_t log_bf_reg_size; /* 5 bits */
973 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE 0x1f
974 uint8_t __reserved__[2];
975
976 uint16_t num_of_diagnostic_counters;
977 uint16_t max_wqe_sz_sq;
978
979 uint8_t __reserved__[2];
980 uint16_t max_wqe_sz_rq;
981
982 uint8_t __reserved__[2];
983 uint16_t max_wqe_sz_sq_dc;
984
985 uint32_t max_qp_mcg; /* 25 bits */
986 #define MCX_CAP_DEVICE_MAX_QP_MCG 0x1ffffff
987
988 uint8_t __reserved__[3];
989 uint8_t log_max_mcq;
990
991 uint8_t log_max_transport_domain; /* 5 bits */
992 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
993 0x1f
994 uint8_t log_max_pd; /* 5 bits */
995 #define MCX_CAP_DEVICE_LOG_MAX_PD 0x1f
996 uint8_t __reserved__[1];
997 uint8_t log_max_xrcd; /* 5 bits */
998 #define MCX_CAP_DEVICE_LOG_MAX_XRCD 0x1f
999
1000 uint8_t __reserved__[2];
1001 uint16_t max_flow_counter;
1002
1003 uint8_t log_max_rq; /* 5 bits */
1004 #define MCX_CAP_DEVICE_LOG_MAX_RQ 0x1f
1005 uint8_t log_max_sq; /* 5 bits */
1006 #define MCX_CAP_DEVICE_LOG_MAX_SQ 0x1f
1007 uint8_t log_max_tir; /* 5 bits */
1008 #define MCX_CAP_DEVICE_LOG_MAX_TIR 0x1f
1009 uint8_t log_max_tis; /* 5 bits */
1010 #define MCX_CAP_DEVICE_LOG_MAX_TIS 0x1f
1011
1012 uint8_t flags8;
1013 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
1014 0x80
1015 #define MCX_CAP_DEVICE_LOG_MAX_RMP 0x1f
1016 uint8_t log_max_rqt; /* 5 bits */
1017 #define MCX_CAP_DEVICE_LOG_MAX_RQT 0x1f
1018 uint8_t log_max_rqt_size; /* 5 bits */
1019 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE 0x1f
1020 uint8_t log_max_tis_per_sq; /* 5 bits */
1021 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
1022 0x1f
1023
1024 uint8_t flags9;
1025 #define MXC_CAP_DEVICE_EXT_STRIDE_NUM_RANGES \
1026 0x80
1027 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_RQ \
1028 0x1f
1029 uint8_t log_min_stride_sz_rq; /* 5 bits */
1030 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_RQ \
1031 0x1f
1032 uint8_t log_max_stride_sz_sq; /* 5 bits */
1033 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_SQ \
1034 0x1f
1035 uint8_t log_min_stride_sz_sq; /* 5 bits */
1036 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_SQ \
1037 0x1f
1038
1039 uint8_t log_max_hairpin_queues;
1040 #define MXC_CAP_DEVICE_HAIRPIN 0x80
1041 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_QUEUES \
1042 0x1f
1043 uint8_t log_min_hairpin_queues;
1044 #define MXC_CAP_DEVICE_LOG_MIN_HAIRPIN_QUEUES \
1045 0x1f
1046 uint8_t log_max_hairpin_num_packets;
1047 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_NUM_PACKETS \
1048 0x1f
1049 uint8_t log_max_mq_sz;
1050 #define MXC_CAP_DEVICE_LOG_MAX_WQ_SZ \
1051 0x1f
1052
1053 uint8_t log_min_hairpin_wq_data_sz;
1054 #define MXC_CAP_DEVICE_NIC_VPORT_CHANGE_EVENT \
1055 0x80
1056 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_UC \
1057 0x40
1058 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_MC \
1059 0x20
1060 #define MCX_CAP_DEVICE_LOG_MIN_HAIRPIN_WQ_DATA_SZ \
1061 0x1f
1062 uint8_t log_max_vlan_list;
1063 #define MXC_CAP_DEVICE_SYSTEM_IMAGE_GUID_MODIFIABLE \
1064 0x80
1065 #define MXC_CAP_DEVICE_LOG_MAX_VLAN_LIST \
1066 0x1f
1067 uint8_t log_max_current_mc_list;
1068 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_MC_LIST \
1069 0x1f
1070 uint8_t log_max_current_uc_list;
1071 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_UC_LIST \
1072 0x1f
1073
1074 uint8_t __reserved__[4];
1075
1076 uint32_t create_qp_start_hint; /* 24 bits */
1077
1078 uint8_t log_max_uctx; /* 5 bits */
1079 #define MXC_CAP_DEVICE_LOG_MAX_UCTX 0x1f
1080 uint8_t log_max_umem; /* 5 bits */
1081 #define MXC_CAP_DEVICE_LOG_MAX_UMEM 0x1f
1082 uint16_t max_num_eqs;
1083
1084 uint8_t log_max_l2_table; /* 5 bits */
1085 #define MXC_CAP_DEVICE_LOG_MAX_L2_TABLE 0x1f
1086 uint8_t __reserved__[1];
1087 uint16_t log_uar_page_sz;
1088
1089 uint8_t __reserved__[8];
1090
1091 uint32_t device_frequency_mhz;
1092 uint32_t device_frequency_khz;
1093 } __packed __aligned(8);
1094
1095 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
1096 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
1097 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
1098 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
1099 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
1100 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
1101 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_mhz) == 0x98);
1102 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_khz) == 0x9c);
1103 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
1104
1105 struct mcx_cmd_set_driver_version_in {
1106 uint16_t cmd_opcode;
1107 uint8_t cmd_reserved0[4];
1108 uint16_t cmd_op_mod;
1109 uint8_t cmd_reserved1[8];
1110 } __packed __aligned(4);
1111
1112 struct mcx_cmd_set_driver_version_out {
1113 uint8_t cmd_status;
1114 uint8_t cmd_reserved0[3];
1115 uint32_t cmd_syndrome;
1116 uint8_t cmd_reserved1[8];
1117 } __packed __aligned(4);
1118
1119 struct mcx_cmd_set_driver_version {
1120 uint8_t cmd_driver_version[64];
1121 } __packed __aligned(8);
1122
1123 struct mcx_cmd_modify_nic_vport_context_in {
1124 uint16_t cmd_opcode;
1125 uint8_t cmd_reserved0[4];
1126 uint16_t cmd_op_mod;
1127 uint8_t cmd_reserved1[4];
1128 uint32_t cmd_field_select;
1129 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR 0x04
1130 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC 0x10
1131 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU 0x40
1132 } __packed __aligned(4);
1133
1134 struct mcx_cmd_modify_nic_vport_context_out {
1135 uint8_t cmd_status;
1136 uint8_t cmd_reserved0[3];
1137 uint32_t cmd_syndrome;
1138 uint8_t cmd_reserved1[8];
1139 } __packed __aligned(4);
1140
1141 struct mcx_cmd_query_nic_vport_context_in {
1142 uint16_t cmd_opcode;
1143 uint8_t cmd_reserved0[4];
1144 uint16_t cmd_op_mod;
1145 uint8_t cmd_reserved1[4];
1146 uint8_t cmd_allowed_list_type;
1147 uint8_t cmd_reserved2[3];
1148 } __packed __aligned(4);
1149
1150 struct mcx_cmd_query_nic_vport_context_out {
1151 uint8_t cmd_status;
1152 uint8_t cmd_reserved0[3];
1153 uint32_t cmd_syndrome;
1154 uint8_t cmd_reserved1[8];
1155 } __packed __aligned(4);
1156
1157 struct mcx_nic_vport_ctx {
1158 uint32_t vp_min_wqe_inline_mode;
1159 uint8_t vp_reserved0[32];
1160 uint32_t vp_mtu;
1161 uint8_t vp_reserved1[200];
1162 uint16_t vp_flags;
1163 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC (0)
1164 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC (1 << 24)
1165 #define MCX_NIC_VPORT_CTX_LIST_VLAN (2 << 24)
1166 #define MCX_NIC_VPORT_CTX_PROMISC_ALL (1 << 13)
1167 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST (1 << 14)
1168 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST (1 << 15)
1169 uint16_t vp_allowed_list_size;
1170 uint64_t vp_perm_addr;
1171 uint8_t vp_reserved2[4];
1172 /* allowed list follows */
1173 } __packed __aligned(4);
1174
1175 struct mcx_counter {
1176 uint64_t packets;
1177 uint64_t octets;
1178 } __packed __aligned(4);
1179
1180 struct mcx_nic_vport_counters {
1181 struct mcx_counter rx_err;
1182 struct mcx_counter tx_err;
1183 uint8_t reserved0[64]; /* 0x30 */
1184 struct mcx_counter rx_bcast;
1185 struct mcx_counter tx_bcast;
1186 struct mcx_counter rx_ucast;
1187 struct mcx_counter tx_ucast;
1188 struct mcx_counter rx_mcast;
1189 struct mcx_counter tx_mcast;
1190 uint8_t reserved1[0x210 - 0xd0];
1191 } __packed __aligned(4);
1192
1193 struct mcx_cmd_query_vport_counters_in {
1194 uint16_t cmd_opcode;
1195 uint8_t cmd_reserved0[4];
1196 uint16_t cmd_op_mod;
1197 uint8_t cmd_reserved1[8];
1198 } __packed __aligned(4);
1199
1200 struct mcx_cmd_query_vport_counters_mb_in {
1201 uint8_t cmd_reserved0[8];
1202 uint8_t cmd_clear;
1203 uint8_t cmd_reserved1[7];
1204 } __packed __aligned(4);
1205
1206 struct mcx_cmd_query_vport_counters_out {
1207 uint8_t cmd_status;
1208 uint8_t cmd_reserved0[3];
1209 uint32_t cmd_syndrome;
1210 uint8_t cmd_reserved1[8];
1211 } __packed __aligned(4);
1212
1213 struct mcx_cmd_query_flow_counter_in {
1214 uint16_t cmd_opcode;
1215 uint8_t cmd_reserved0[4];
1216 uint16_t cmd_op_mod;
1217 uint8_t cmd_reserved1[8];
1218 } __packed __aligned(4);
1219
1220 struct mcx_cmd_query_flow_counter_mb_in {
1221 uint8_t cmd_reserved0[8];
1222 uint8_t cmd_clear;
1223 uint8_t cmd_reserved1[5];
1224 uint16_t cmd_flow_counter_id;
1225 } __packed __aligned(4);
1226
1227 struct mcx_cmd_query_flow_counter_out {
1228 uint8_t cmd_status;
1229 uint8_t cmd_reserved0[3];
1230 uint32_t cmd_syndrome;
1231 uint8_t cmd_reserved1[8];
1232 } __packed __aligned(4);
1233
1234 struct mcx_cmd_alloc_uar_in {
1235 uint16_t cmd_opcode;
1236 uint8_t cmd_reserved0[4];
1237 uint16_t cmd_op_mod;
1238 uint8_t cmd_reserved1[8];
1239 } __packed __aligned(4);
1240
1241 struct mcx_cmd_alloc_uar_out {
1242 uint8_t cmd_status;
1243 uint8_t cmd_reserved0[3];
1244 uint32_t cmd_syndrome;
1245 uint32_t cmd_uar;
1246 uint8_t cmd_reserved1[4];
1247 } __packed __aligned(4);
1248
1249 struct mcx_cmd_query_special_ctx_in {
1250 uint16_t cmd_opcode;
1251 uint8_t cmd_reserved0[4];
1252 uint16_t cmd_op_mod;
1253 uint8_t cmd_reserved1[8];
1254 } __packed __aligned(4);
1255
1256 struct mcx_cmd_query_special_ctx_out {
1257 uint8_t cmd_status;
1258 uint8_t cmd_reserved0[3];
1259 uint32_t cmd_syndrome;
1260 uint8_t cmd_reserved1[4];
1261 uint32_t cmd_resd_lkey;
1262 } __packed __aligned(4);
1263
1264 struct mcx_eq_ctx {
1265 uint32_t eq_status;
1266 #define MCX_EQ_CTX_STATE_SHIFT 8
1267 #define MCX_EQ_CTX_STATE_MASK (0xf << MCX_EQ_CTX_STATE_SHIFT)
1268 #define MCX_EQ_CTX_STATE_ARMED 0x9
1269 #define MCX_EQ_CTX_STATE_FIRED 0xa
1270 #define MCX_EQ_CTX_OI_SHIFT 17
1271 #define MCX_EQ_CTX_OI (1 << MCX_EQ_CTX_OI_SHIFT)
1272 #define MCX_EQ_CTX_EC_SHIFT 18
1273 #define MCX_EQ_CTX_EC (1 << MCX_EQ_CTX_EC_SHIFT)
1274 #define MCX_EQ_CTX_STATUS_SHIFT 28
1275 #define MCX_EQ_CTX_STATUS_MASK (0xf << MCX_EQ_CTX_STATUS_SHIFT)
1276 #define MCX_EQ_CTX_STATUS_OK 0x0
1277 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE 0xa
1278 uint32_t eq_reserved1;
1279 uint32_t eq_page_offset;
1280 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT 5
1281 uint32_t eq_uar_size;
1282 #define MCX_EQ_CTX_UAR_PAGE_MASK 0xffffff
1283 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT 24
1284 uint32_t eq_reserved2;
1285 uint8_t eq_reserved3[3];
1286 uint8_t eq_intr;
1287 uint32_t eq_log_page_size;
1288 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1289 uint32_t eq_reserved4[3];
1290 uint32_t eq_consumer_counter;
1291 uint32_t eq_producer_counter;
1292 #define MCX_EQ_CTX_COUNTER_MASK 0xffffff
1293 uint32_t eq_reserved5[4];
1294 } __packed __aligned(4);
1295
1296 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1297
1298 struct mcx_cmd_create_eq_in {
1299 uint16_t cmd_opcode;
1300 uint8_t cmd_reserved0[4];
1301 uint16_t cmd_op_mod;
1302 uint8_t cmd_reserved1[8];
1303 } __packed __aligned(4);
1304
1305 struct mcx_cmd_create_eq_mb_in {
1306 struct mcx_eq_ctx cmd_eq_ctx;
1307 uint8_t cmd_reserved0[8];
1308 uint64_t cmd_event_bitmask;
1309 #define MCX_EVENT_TYPE_COMPLETION 0x00
1310 #define MCX_EVENT_TYPE_CQ_ERROR 0x04
1311 #define MCX_EVENT_TYPE_INTERNAL_ERROR 0x08
1312 #define MCX_EVENT_TYPE_PORT_CHANGE 0x09
1313 #define MCX_EVENT_TYPE_CMD_COMPLETION 0x0a
1314 #define MCX_EVENT_TYPE_PAGE_REQUEST 0x0b
1315 #define MCX_EVENT_TYPE_LAST_WQE 0x13
1316 uint8_t cmd_reserved1[176];
1317 } __packed __aligned(4);
1318
1319 struct mcx_cmd_create_eq_out {
1320 uint8_t cmd_status;
1321 uint8_t cmd_reserved0[3];
1322 uint32_t cmd_syndrome;
1323 uint32_t cmd_eqn;
1324 uint8_t cmd_reserved1[4];
1325 } __packed __aligned(4);
1326
1327 struct mcx_cmd_query_eq_in {
1328 uint16_t cmd_opcode;
1329 uint8_t cmd_reserved0[4];
1330 uint16_t cmd_op_mod;
1331 uint32_t cmd_eqn;
1332 uint8_t cmd_reserved1[4];
1333 } __packed __aligned(4);
1334
1335 struct mcx_cmd_query_eq_out {
1336 uint8_t cmd_status;
1337 uint8_t cmd_reserved0[3];
1338 uint32_t cmd_syndrome;
1339 uint8_t cmd_reserved1[8];
1340 } __packed __aligned(4);
1341
1342 struct mcx_eq_entry {
1343 uint8_t eq_reserved1;
1344 uint8_t eq_event_type;
1345 uint8_t eq_reserved2;
1346 uint8_t eq_event_sub_type;
1347
1348 uint8_t eq_reserved3[28];
1349 uint32_t eq_event_data[7];
1350 uint8_t eq_reserved4[2];
1351 uint8_t eq_signature;
1352 uint8_t eq_owner;
1353 #define MCX_EQ_ENTRY_OWNER_INIT 1
1354 } __packed __aligned(4);
1355
1356 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1357
1358 struct mcx_cmd_alloc_pd_in {
1359 uint16_t cmd_opcode;
1360 uint8_t cmd_reserved0[4];
1361 uint16_t cmd_op_mod;
1362 uint8_t cmd_reserved1[8];
1363 } __packed __aligned(4);
1364
1365 struct mcx_cmd_alloc_pd_out {
1366 uint8_t cmd_status;
1367 uint8_t cmd_reserved0[3];
1368 uint32_t cmd_syndrome;
1369 uint32_t cmd_pd;
1370 uint8_t cmd_reserved1[4];
1371 } __packed __aligned(4);
1372
1373 struct mcx_cmd_alloc_td_in {
1374 uint16_t cmd_opcode;
1375 uint8_t cmd_reserved0[4];
1376 uint16_t cmd_op_mod;
1377 uint8_t cmd_reserved1[8];
1378 } __packed __aligned(4);
1379
1380 struct mcx_cmd_alloc_td_out {
1381 uint8_t cmd_status;
1382 uint8_t cmd_reserved0[3];
1383 uint32_t cmd_syndrome;
1384 uint32_t cmd_tdomain;
1385 uint8_t cmd_reserved1[4];
1386 } __packed __aligned(4);
1387
1388 struct mcx_cmd_create_tir_in {
1389 uint16_t cmd_opcode;
1390 uint8_t cmd_reserved0[4];
1391 uint16_t cmd_op_mod;
1392 uint8_t cmd_reserved1[8];
1393 } __packed __aligned(4);
1394
1395 struct mcx_cmd_create_tir_mb_in {
1396 uint8_t cmd_reserved0[20];
1397 uint32_t cmd_disp_type;
1398 #define MCX_TIR_CTX_DISP_TYPE_DIRECT 0
1399 #define MCX_TIR_CTX_DISP_TYPE_INDIRECT 1
1400 #define MCX_TIR_CTX_DISP_TYPE_SHIFT 28
1401 uint8_t cmd_reserved1[8];
1402 uint32_t cmd_lro;
1403 uint8_t cmd_reserved2[8];
1404 uint32_t cmd_inline_rqn;
1405 uint32_t cmd_indir_table;
1406 uint32_t cmd_tdomain;
1407 #define MCX_TIR_CTX_HASH_TOEPLITZ 2
1408 #define MCX_TIR_CTX_HASH_SHIFT 28
1409 uint8_t cmd_rx_hash_key[40];
1410 uint32_t cmd_rx_hash_sel_outer;
1411 #define MCX_TIR_CTX_HASH_SEL_SRC_IP (1 << 0)
1412 #define MCX_TIR_CTX_HASH_SEL_DST_IP (1 << 1)
1413 #define MCX_TIR_CTX_HASH_SEL_SPORT (1 << 2)
1414 #define MCX_TIR_CTX_HASH_SEL_DPORT (1 << 3)
1415 #define MCX_TIR_CTX_HASH_SEL_IPV4 (0 << 31)
1416 #define MCX_TIR_CTX_HASH_SEL_IPV6 (1 << 31)
1417 #define MCX_TIR_CTX_HASH_SEL_TCP (0 << 30)
1418 #define MCX_TIR_CTX_HASH_SEL_UDP (1 << 30)
1419 uint32_t cmd_rx_hash_sel_inner;
1420 uint8_t cmd_reserved3[152];
1421 } __packed __aligned(4);
1422
1423 struct mcx_cmd_create_tir_out {
1424 uint8_t cmd_status;
1425 uint8_t cmd_reserved0[3];
1426 uint32_t cmd_syndrome;
1427 uint32_t cmd_tirn;
1428 uint8_t cmd_reserved1[4];
1429 } __packed __aligned(4);
1430
1431 struct mcx_cmd_destroy_tir_in {
1432 uint16_t cmd_opcode;
1433 uint8_t cmd_reserved0[4];
1434 uint16_t cmd_op_mod;
1435 uint32_t cmd_tirn;
1436 uint8_t cmd_reserved1[4];
1437 } __packed __aligned(4);
1438
1439 struct mcx_cmd_destroy_tir_out {
1440 uint8_t cmd_status;
1441 uint8_t cmd_reserved0[3];
1442 uint32_t cmd_syndrome;
1443 uint8_t cmd_reserved1[8];
1444 } __packed __aligned(4);
1445
1446 struct mcx_cmd_create_tis_in {
1447 uint16_t cmd_opcode;
1448 uint8_t cmd_reserved0[4];
1449 uint16_t cmd_op_mod;
1450 uint8_t cmd_reserved1[8];
1451 } __packed __aligned(4);
1452
1453 struct mcx_cmd_create_tis_mb_in {
1454 uint8_t cmd_reserved[16];
1455 uint32_t cmd_prio;
1456 uint8_t cmd_reserved1[32];
1457 uint32_t cmd_tdomain;
1458 uint8_t cmd_reserved2[120];
1459 } __packed __aligned(4);
1460
1461 struct mcx_cmd_create_tis_out {
1462 uint8_t cmd_status;
1463 uint8_t cmd_reserved0[3];
1464 uint32_t cmd_syndrome;
1465 uint32_t cmd_tisn;
1466 uint8_t cmd_reserved1[4];
1467 } __packed __aligned(4);
1468
1469 struct mcx_cmd_destroy_tis_in {
1470 uint16_t cmd_opcode;
1471 uint8_t cmd_reserved0[4];
1472 uint16_t cmd_op_mod;
1473 uint32_t cmd_tisn;
1474 uint8_t cmd_reserved1[4];
1475 } __packed __aligned(4);
1476
1477 struct mcx_cmd_destroy_tis_out {
1478 uint8_t cmd_status;
1479 uint8_t cmd_reserved0[3];
1480 uint32_t cmd_syndrome;
1481 uint8_t cmd_reserved1[8];
1482 } __packed __aligned(4);
1483
1484 struct mcx_cmd_create_rqt_in {
1485 uint16_t cmd_opcode;
1486 uint8_t cmd_reserved0[4];
1487 uint16_t cmd_op_mod;
1488 uint8_t cmd_reserved1[8];
1489 } __packed __aligned(4);
1490
1491 struct mcx_rqt_ctx {
1492 uint8_t cmd_reserved0[20];
1493 uint16_t cmd_reserved1;
1494 uint16_t cmd_rqt_max_size;
1495 uint16_t cmd_reserved2;
1496 uint16_t cmd_rqt_actual_size;
1497 uint8_t cmd_reserved3[212];
1498 } __packed __aligned(4);
1499
1500 struct mcx_cmd_create_rqt_mb_in {
1501 uint8_t cmd_reserved0[16];
1502 struct mcx_rqt_ctx cmd_rqt;
1503 } __packed __aligned(4);
1504
1505 struct mcx_cmd_create_rqt_out {
1506 uint8_t cmd_status;
1507 uint8_t cmd_reserved0[3];
1508 uint32_t cmd_syndrome;
1509 uint32_t cmd_rqtn;
1510 uint8_t cmd_reserved1[4];
1511 } __packed __aligned(4);
1512
1513 struct mcx_cmd_destroy_rqt_in {
1514 uint16_t cmd_opcode;
1515 uint8_t cmd_reserved0[4];
1516 uint16_t cmd_op_mod;
1517 uint32_t cmd_rqtn;
1518 uint8_t cmd_reserved1[4];
1519 } __packed __aligned(4);
1520
1521 struct mcx_cmd_destroy_rqt_out {
1522 uint8_t cmd_status;
1523 uint8_t cmd_reserved0[3];
1524 uint32_t cmd_syndrome;
1525 uint8_t cmd_reserved1[8];
1526 } __packed __aligned(4);
1527
1528 struct mcx_cq_ctx {
1529 uint32_t cq_status;
1530 #define MCX_CQ_CTX_STATUS_SHIFT 28
1531 #define MCX_CQ_CTX_STATUS_MASK (0xf << MCX_CQ_CTX_STATUS_SHIFT)
1532 #define MCX_CQ_CTX_STATUS_OK 0x0
1533 #define MCX_CQ_CTX_STATUS_OVERFLOW 0x9
1534 #define MCX_CQ_CTX_STATUS_WRITE_FAIL 0xa
1535 #define MCX_CQ_CTX_STATE_SHIFT 8
1536 #define MCX_CQ_CTX_STATE_MASK (0xf << MCX_CQ_CTX_STATE_SHIFT)
1537 #define MCX_CQ_CTX_STATE_SOLICITED 0x6
1538 #define MCX_CQ_CTX_STATE_ARMED 0x9
1539 #define MCX_CQ_CTX_STATE_FIRED 0xa
1540 uint32_t cq_reserved1;
1541 uint32_t cq_page_offset;
1542 uint32_t cq_uar_size;
1543 #define MCX_CQ_CTX_UAR_PAGE_MASK 0xffffff
1544 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT 24
1545 uint32_t cq_period_max_count;
1546 #define MCX_CQ_CTX_PERIOD_SHIFT 16
1547 uint32_t cq_eqn;
1548 uint32_t cq_log_page_size;
1549 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1550 uint32_t cq_reserved2;
1551 uint32_t cq_last_notified;
1552 uint32_t cq_last_solicit;
1553 uint32_t cq_consumer_counter;
1554 uint32_t cq_producer_counter;
1555 uint8_t cq_reserved3[8];
1556 uint64_t cq_doorbell;
1557 } __packed __aligned(4);
1558
1559 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1560
1561 struct mcx_cmd_create_cq_in {
1562 uint16_t cmd_opcode;
1563 uint8_t cmd_reserved0[4];
1564 uint16_t cmd_op_mod;
1565 uint8_t cmd_reserved1[8];
1566 } __packed __aligned(4);
1567
1568 struct mcx_cmd_create_cq_mb_in {
1569 struct mcx_cq_ctx cmd_cq_ctx;
1570 uint8_t cmd_reserved1[192];
1571 } __packed __aligned(4);
1572
1573 struct mcx_cmd_create_cq_out {
1574 uint8_t cmd_status;
1575 uint8_t cmd_reserved0[3];
1576 uint32_t cmd_syndrome;
1577 uint32_t cmd_cqn;
1578 uint8_t cmd_reserved1[4];
1579 } __packed __aligned(4);
1580
1581 struct mcx_cmd_destroy_cq_in {
1582 uint16_t cmd_opcode;
1583 uint8_t cmd_reserved0[4];
1584 uint16_t cmd_op_mod;
1585 uint32_t cmd_cqn;
1586 uint8_t cmd_reserved1[4];
1587 } __packed __aligned(4);
1588
1589 struct mcx_cmd_destroy_cq_out {
1590 uint8_t cmd_status;
1591 uint8_t cmd_reserved0[3];
1592 uint32_t cmd_syndrome;
1593 uint8_t cmd_reserved1[8];
1594 } __packed __aligned(4);
1595
1596 struct mcx_cmd_query_cq_in {
1597 uint16_t cmd_opcode;
1598 uint8_t cmd_reserved0[4];
1599 uint16_t cmd_op_mod;
1600 uint32_t cmd_cqn;
1601 uint8_t cmd_reserved1[4];
1602 } __packed __aligned(4);
1603
1604 struct mcx_cmd_query_cq_out {
1605 uint8_t cmd_status;
1606 uint8_t cmd_reserved0[3];
1607 uint32_t cmd_syndrome;
1608 uint8_t cmd_reserved1[8];
1609 } __packed __aligned(4);
1610
1611 struct mcx_cq_entry {
1612 uint32_t __reserved__;
1613 uint32_t cq_lro;
1614 uint32_t cq_lro_ack_seq_num;
1615 uint32_t cq_rx_hash;
1616 uint8_t cq_rx_hash_type;
1617 uint8_t cq_ml_path;
1618 uint16_t __reserved__;
1619 uint32_t cq_checksum;
1620 uint32_t __reserved__;
1621 uint32_t cq_flags;
1622 #define MCX_CQ_ENTRY_FLAGS_L4_OK (1 << 26)
1623 #define MCX_CQ_ENTRY_FLAGS_L3_OK (1 << 25)
1624 #define MCX_CQ_ENTRY_FLAGS_L2_OK (1 << 24)
1625 #define MCX_CQ_ENTRY_FLAGS_CV (1 << 16)
1626 #define MCX_CQ_ENTRY_FLAGS_VLAN_MASK (0xffff)
1627
1628 uint32_t cq_lro_srqn;
1629 uint32_t __reserved__[2];
1630 uint32_t cq_byte_cnt;
1631 uint64_t cq_timestamp;
1632 uint8_t cq_rx_drops;
1633 uint8_t cq_flow_tag[3];
1634 uint16_t cq_wqe_count;
1635 uint8_t cq_signature;
1636 uint8_t cq_opcode_owner;
1637 #define MCX_CQ_ENTRY_FLAG_OWNER (1 << 0)
1638 #define MCX_CQ_ENTRY_FLAG_SE (1 << 1)
1639 #define MCX_CQ_ENTRY_FORMAT_SHIFT 2
1640 #define MCX_CQ_ENTRY_OPCODE_SHIFT 4
1641
1642 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE 0
1643 #define MCX_CQ_ENTRY_FORMAT_INLINE_32 1
1644 #define MCX_CQ_ENTRY_FORMAT_INLINE_64 2
1645 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED 3
1646
1647 #define MCX_CQ_ENTRY_OPCODE_REQ 0
1648 #define MCX_CQ_ENTRY_OPCODE_SEND 2
1649 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR 13
1650 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR 14
1651 #define MCX_CQ_ENTRY_OPCODE_INVALID 15
1652
1653 } __packed __aligned(4);
1654
1655 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1656
1657 struct mcx_cq_doorbell {
1658 uint32_t db_update_ci;
1659 uint32_t db_arm_ci;
1660 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT 28
1661 #define MCX_CQ_DOORBELL_ARM_CMD (1 << 24)
1662 #define MCX_CQ_DOORBELL_ARM_CI_MASK (0xffffff)
1663 } __packed __aligned(8);
1664
1665 struct mcx_wq_ctx {
1666 uint8_t wq_type;
1667 #define MCX_WQ_CTX_TYPE_CYCLIC (1 << 4)
1668 #define MCX_WQ_CTX_TYPE_SIGNATURE (1 << 3)
1669 uint8_t wq_reserved0[5];
1670 uint16_t wq_lwm;
1671 uint32_t wq_pd;
1672 uint32_t wq_uar_page;
1673 uint64_t wq_doorbell;
1674 uint32_t wq_hw_counter;
1675 uint32_t wq_sw_counter;
1676 uint16_t wq_log_stride;
1677 uint8_t wq_log_page_sz;
1678 uint8_t wq_log_size;
1679 uint8_t wq_reserved1[156];
1680 } __packed __aligned(4);
1681
1682 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1683
1684 struct mcx_sq_ctx {
1685 uint32_t sq_flags;
1686 #define MCX_SQ_CTX_RLKEY (1U << 31)
1687 #define MCX_SQ_CTX_FRE_SHIFT (1 << 29)
1688 #define MCX_SQ_CTX_FLUSH_IN_ERROR (1 << 28)
1689 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT 24
1690 #define MCX_SQ_CTX_STATE_SHIFT 20
1691 #define MCX_SQ_CTX_STATE_MASK (0xf << 20)
1692 #define MCX_SQ_CTX_STATE_RST 0
1693 #define MCX_SQ_CTX_STATE_RDY 1
1694 #define MCX_SQ_CTX_STATE_ERR 3
1695 uint32_t sq_user_index;
1696 uint32_t sq_cqn;
1697 uint32_t sq_reserved1[5];
1698 uint32_t sq_tis_lst_sz;
1699 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT 16
1700 uint32_t sq_reserved2[2];
1701 uint32_t sq_tis_num;
1702 struct mcx_wq_ctx sq_wq;
1703 } __packed __aligned(4);
1704
1705 struct mcx_sq_entry_seg {
1706 uint32_t sqs_byte_count;
1707 uint32_t sqs_lkey;
1708 uint64_t sqs_addr;
1709 } __packed __aligned(4);
1710
1711 struct mcx_sq_entry {
1712 /* control segment */
1713 uint32_t sqe_opcode_index;
1714 #define MCX_SQE_WQE_INDEX_SHIFT 8
1715 #define MCX_SQE_WQE_OPCODE_NOP 0x00
1716 #define MCX_SQE_WQE_OPCODE_SEND 0x0a
1717 uint32_t sqe_ds_sq_num;
1718 #define MCX_SQE_SQ_NUM_SHIFT 8
1719 uint32_t sqe_signature;
1720 #define MCX_SQE_SIGNATURE_SHIFT 24
1721 #define MCX_SQE_SOLICITED_EVENT 0x02
1722 #define MCX_SQE_CE_CQE_ON_ERR 0x00
1723 #define MCX_SQE_CE_CQE_FIRST_ERR 0x04
1724 #define MCX_SQE_CE_CQE_ALWAYS 0x08
1725 #define MCX_SQE_CE_CQE_SOLICIT 0x0C
1726 #define MCX_SQE_FM_NO_FENCE 0x00
1727 #define MCX_SQE_FM_SMALL_FENCE 0x40
1728 uint32_t sqe_mkey;
1729
1730 /* ethernet segment */
1731 uint32_t sqe_reserved1;
1732 uint32_t sqe_mss_csum;
1733 #define MCX_SQE_L4_CSUM (1 << 31)
1734 #define MCX_SQE_L3_CSUM (1 << 30)
1735 uint32_t sqe_reserved2;
1736 uint16_t sqe_inline_header_size;
1737 uint16_t sqe_inline_headers[9];
1738
1739 /* data segment */
1740 struct mcx_sq_entry_seg sqe_segs[1];
1741 } __packed __aligned(64);
1742
1743 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1744
1745 struct mcx_cmd_create_sq_in {
1746 uint16_t cmd_opcode;
1747 uint8_t cmd_reserved0[4];
1748 uint16_t cmd_op_mod;
1749 uint8_t cmd_reserved1[8];
1750 } __packed __aligned(4);
1751
1752 struct mcx_cmd_create_sq_out {
1753 uint8_t cmd_status;
1754 uint8_t cmd_reserved0[3];
1755 uint32_t cmd_syndrome;
1756 uint32_t cmd_sqn;
1757 uint8_t cmd_reserved1[4];
1758 } __packed __aligned(4);
1759
1760 struct mcx_cmd_modify_sq_in {
1761 uint16_t cmd_opcode;
1762 uint8_t cmd_reserved0[4];
1763 uint16_t cmd_op_mod;
1764 uint32_t cmd_sq_state;
1765 uint8_t cmd_reserved1[4];
1766 } __packed __aligned(4);
1767
1768 struct mcx_cmd_modify_sq_mb_in {
1769 uint32_t cmd_modify_hi;
1770 uint32_t cmd_modify_lo;
1771 uint8_t cmd_reserved0[8];
1772 struct mcx_sq_ctx cmd_sq_ctx;
1773 } __packed __aligned(4);
1774
1775 struct mcx_cmd_modify_sq_out {
1776 uint8_t cmd_status;
1777 uint8_t cmd_reserved0[3];
1778 uint32_t cmd_syndrome;
1779 uint8_t cmd_reserved1[8];
1780 } __packed __aligned(4);
1781
1782 struct mcx_cmd_destroy_sq_in {
1783 uint16_t cmd_opcode;
1784 uint8_t cmd_reserved0[4];
1785 uint16_t cmd_op_mod;
1786 uint32_t cmd_sqn;
1787 uint8_t cmd_reserved1[4];
1788 } __packed __aligned(4);
1789
1790 struct mcx_cmd_destroy_sq_out {
1791 uint8_t cmd_status;
1792 uint8_t cmd_reserved0[3];
1793 uint32_t cmd_syndrome;
1794 uint8_t cmd_reserved1[8];
1795 } __packed __aligned(4);
1796
1797
1798 struct mcx_rq_ctx {
1799 uint32_t rq_flags;
1800 #define MCX_RQ_CTX_RLKEY (1U << 31)
1801 #define MCX_RQ_CTX_VLAN_STRIP_DIS (1 << 28)
1802 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT 24
1803 #define MCX_RQ_CTX_STATE_SHIFT 20
1804 #define MCX_RQ_CTX_STATE_MASK (0xf << 20)
1805 #define MCX_RQ_CTX_STATE_RST 0
1806 #define MCX_RQ_CTX_STATE_RDY 1
1807 #define MCX_RQ_CTX_STATE_ERR 3
1808 #define MCX_RQ_CTX_FLUSH_IN_ERROR (1 << 18)
1809 uint32_t rq_user_index;
1810 uint32_t rq_cqn;
1811 uint32_t rq_reserved1;
1812 uint32_t rq_rmpn;
1813 uint32_t rq_reserved2[7];
1814 struct mcx_wq_ctx rq_wq;
1815 } __packed __aligned(4);
1816
1817 struct mcx_rq_entry {
1818 uint32_t rqe_byte_count;
1819 uint32_t rqe_lkey;
1820 uint64_t rqe_addr;
1821 } __packed __aligned(16);
1822
1823 struct mcx_cmd_create_rq_in {
1824 uint16_t cmd_opcode;
1825 uint8_t cmd_reserved0[4];
1826 uint16_t cmd_op_mod;
1827 uint8_t cmd_reserved1[8];
1828 } __packed __aligned(4);
1829
1830 struct mcx_cmd_create_rq_out {
1831 uint8_t cmd_status;
1832 uint8_t cmd_reserved0[3];
1833 uint32_t cmd_syndrome;
1834 uint32_t cmd_rqn;
1835 uint8_t cmd_reserved1[4];
1836 } __packed __aligned(4);
1837
1838 struct mcx_cmd_modify_rq_in {
1839 uint16_t cmd_opcode;
1840 uint8_t cmd_reserved0[4];
1841 uint16_t cmd_op_mod;
1842 uint32_t cmd_rq_state;
1843 uint8_t cmd_reserved1[4];
1844 } __packed __aligned(4);
1845
1846 struct mcx_cmd_modify_rq_mb_in {
1847 uint32_t cmd_modify_hi;
1848 uint32_t cmd_modify_lo;
1849 uint8_t cmd_reserved0[8];
1850 struct mcx_rq_ctx cmd_rq_ctx;
1851 } __packed __aligned(4);
1852
1853 struct mcx_cmd_modify_rq_out {
1854 uint8_t cmd_status;
1855 uint8_t cmd_reserved0[3];
1856 uint32_t cmd_syndrome;
1857 uint8_t cmd_reserved1[8];
1858 } __packed __aligned(4);
1859
1860 struct mcx_cmd_destroy_rq_in {
1861 uint16_t cmd_opcode;
1862 uint8_t cmd_reserved0[4];
1863 uint16_t cmd_op_mod;
1864 uint32_t cmd_rqn;
1865 uint8_t cmd_reserved1[4];
1866 } __packed __aligned(4);
1867
1868 struct mcx_cmd_destroy_rq_out {
1869 uint8_t cmd_status;
1870 uint8_t cmd_reserved0[3];
1871 uint32_t cmd_syndrome;
1872 uint8_t cmd_reserved1[8];
1873 } __packed __aligned(4);
1874
1875 struct mcx_cmd_create_flow_table_in {
1876 uint16_t cmd_opcode;
1877 uint8_t cmd_reserved0[4];
1878 uint16_t cmd_op_mod;
1879 uint8_t cmd_reserved1[8];
1880 } __packed __aligned(4);
1881
1882 struct mcx_flow_table_ctx {
1883 uint8_t ft_miss_action;
1884 uint8_t ft_level;
1885 uint8_t ft_reserved0;
1886 uint8_t ft_log_size;
1887 uint32_t ft_table_miss_id;
1888 uint8_t ft_reserved1[28];
1889 } __packed __aligned(4);
1890
1891 struct mcx_cmd_create_flow_table_mb_in {
1892 uint8_t cmd_table_type;
1893 uint8_t cmd_reserved0[7];
1894 struct mcx_flow_table_ctx cmd_ctx;
1895 } __packed __aligned(4);
1896
1897 struct mcx_cmd_create_flow_table_out {
1898 uint8_t cmd_status;
1899 uint8_t cmd_reserved0[3];
1900 uint32_t cmd_syndrome;
1901 uint32_t cmd_table_id;
1902 uint8_t cmd_reserved1[4];
1903 } __packed __aligned(4);
1904
1905 struct mcx_cmd_destroy_flow_table_in {
1906 uint16_t cmd_opcode;
1907 uint8_t cmd_reserved0[4];
1908 uint16_t cmd_op_mod;
1909 uint8_t cmd_reserved1[8];
1910 } __packed __aligned(4);
1911
1912 struct mcx_cmd_destroy_flow_table_mb_in {
1913 uint8_t cmd_table_type;
1914 uint8_t cmd_reserved0[3];
1915 uint32_t cmd_table_id;
1916 uint8_t cmd_reserved1[40];
1917 } __packed __aligned(4);
1918
1919 struct mcx_cmd_destroy_flow_table_out {
1920 uint8_t cmd_status;
1921 uint8_t cmd_reserved0[3];
1922 uint32_t cmd_syndrome;
1923 uint8_t cmd_reserved1[8];
1924 } __packed __aligned(4);
1925
1926 struct mcx_cmd_set_flow_table_root_in {
1927 uint16_t cmd_opcode;
1928 uint8_t cmd_reserved0[4];
1929 uint16_t cmd_op_mod;
1930 uint8_t cmd_reserved1[8];
1931 } __packed __aligned(4);
1932
1933 struct mcx_cmd_set_flow_table_root_mb_in {
1934 uint8_t cmd_table_type;
1935 uint8_t cmd_reserved0[3];
1936 uint32_t cmd_table_id;
1937 uint8_t cmd_reserved1[56];
1938 } __packed __aligned(4);
1939
1940 struct mcx_cmd_set_flow_table_root_out {
1941 uint8_t cmd_status;
1942 uint8_t cmd_reserved0[3];
1943 uint32_t cmd_syndrome;
1944 uint8_t cmd_reserved1[8];
1945 } __packed __aligned(4);
1946
1947 struct mcx_flow_match {
1948 /* outer headers */
1949 uint8_t mc_src_mac[6];
1950 uint16_t mc_ethertype;
1951 uint8_t mc_dest_mac[6];
1952 uint16_t mc_first_vlan;
1953 uint8_t mc_ip_proto;
1954 uint8_t mc_ip_dscp_ecn;
1955 uint8_t mc_vlan_flags;
1956 #define MCX_FLOW_MATCH_IP_FRAG (1 << 5)
1957 uint8_t mc_tcp_flags;
1958 uint16_t mc_tcp_sport;
1959 uint16_t mc_tcp_dport;
1960 uint32_t mc_reserved0;
1961 uint16_t mc_udp_sport;
1962 uint16_t mc_udp_dport;
1963 uint8_t mc_src_ip[16];
1964 uint8_t mc_dest_ip[16];
1965
1966 /* misc parameters */
1967 uint8_t mc_reserved1[8];
1968 uint16_t mc_second_vlan;
1969 uint8_t mc_reserved2[2];
1970 uint8_t mc_second_vlan_flags;
1971 uint8_t mc_reserved3[15];
1972 uint32_t mc_outer_ipv6_flow_label;
1973 uint8_t mc_reserved4[32];
1974
1975 uint8_t mc_reserved[384];
1976 } __packed __aligned(4);
1977
1978 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1979
1980 struct mcx_cmd_create_flow_group_in {
1981 uint16_t cmd_opcode;
1982 uint8_t cmd_reserved0[4];
1983 uint16_t cmd_op_mod;
1984 uint8_t cmd_reserved1[8];
1985 } __packed __aligned(4);
1986
1987 struct mcx_cmd_create_flow_group_mb_in {
1988 uint8_t cmd_table_type;
1989 uint8_t cmd_reserved0[3];
1990 uint32_t cmd_table_id;
1991 uint8_t cmd_reserved1[4];
1992 uint32_t cmd_start_flow_index;
1993 uint8_t cmd_reserved2[4];
1994 uint32_t cmd_end_flow_index;
1995 uint8_t cmd_reserved3[23];
1996 uint8_t cmd_match_criteria_enable;
1997 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER (1 << 0)
1998 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC (1 << 1)
1999 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER (1 << 2)
2000 struct mcx_flow_match cmd_match_criteria;
2001 uint8_t cmd_reserved4[448];
2002 } __packed __aligned(4);
2003
2004 struct mcx_cmd_create_flow_group_out {
2005 uint8_t cmd_status;
2006 uint8_t cmd_reserved0[3];
2007 uint32_t cmd_syndrome;
2008 uint32_t cmd_group_id;
2009 uint8_t cmd_reserved1[4];
2010 } __packed __aligned(4);
2011
2012 struct mcx_flow_ctx {
2013 uint8_t fc_reserved0[4];
2014 uint32_t fc_group_id;
2015 uint32_t fc_flow_tag;
2016 uint32_t fc_action;
2017 #define MCX_FLOW_CONTEXT_ACTION_ALLOW (1 << 0)
2018 #define MCX_FLOW_CONTEXT_ACTION_DROP (1 << 1)
2019 #define MCX_FLOW_CONTEXT_ACTION_FORWARD (1 << 2)
2020 #define MCX_FLOW_CONTEXT_ACTION_COUNT (1 << 3)
2021 uint32_t fc_dest_list_size;
2022 uint32_t fc_counter_list_size;
2023 uint8_t fc_reserved1[40];
2024 struct mcx_flow_match fc_match_value;
2025 uint8_t fc_reserved2[192];
2026 } __packed __aligned(4);
2027
2028 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE (1 << 24)
2029 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR (2 << 24)
2030
2031 struct mcx_cmd_destroy_flow_group_in {
2032 uint16_t cmd_opcode;
2033 uint8_t cmd_reserved0[4];
2034 uint16_t cmd_op_mod;
2035 uint8_t cmd_reserved1[8];
2036 } __packed __aligned(4);
2037
2038 struct mcx_cmd_destroy_flow_group_mb_in {
2039 uint8_t cmd_table_type;
2040 uint8_t cmd_reserved0[3];
2041 uint32_t cmd_table_id;
2042 uint32_t cmd_group_id;
2043 uint8_t cmd_reserved1[36];
2044 } __packed __aligned(4);
2045
2046 struct mcx_cmd_destroy_flow_group_out {
2047 uint8_t cmd_status;
2048 uint8_t cmd_reserved0[3];
2049 uint32_t cmd_syndrome;
2050 uint8_t cmd_reserved1[8];
2051 } __packed __aligned(4);
2052
2053 struct mcx_cmd_set_flow_table_entry_in {
2054 uint16_t cmd_opcode;
2055 uint8_t cmd_reserved0[4];
2056 uint16_t cmd_op_mod;
2057 uint8_t cmd_reserved1[8];
2058 } __packed __aligned(4);
2059
2060 struct mcx_cmd_set_flow_table_entry_mb_in {
2061 uint8_t cmd_table_type;
2062 uint8_t cmd_reserved0[3];
2063 uint32_t cmd_table_id;
2064 uint32_t cmd_modify_enable_mask;
2065 uint8_t cmd_reserved1[4];
2066 uint32_t cmd_flow_index;
2067 uint8_t cmd_reserved2[28];
2068 struct mcx_flow_ctx cmd_flow_ctx;
2069 } __packed __aligned(4);
2070
2071 struct mcx_cmd_set_flow_table_entry_out {
2072 uint8_t cmd_status;
2073 uint8_t cmd_reserved0[3];
2074 uint32_t cmd_syndrome;
2075 uint8_t cmd_reserved1[8];
2076 } __packed __aligned(4);
2077
2078 struct mcx_cmd_query_flow_table_entry_in {
2079 uint16_t cmd_opcode;
2080 uint8_t cmd_reserved0[4];
2081 uint16_t cmd_op_mod;
2082 uint8_t cmd_reserved1[8];
2083 } __packed __aligned(4);
2084
2085 struct mcx_cmd_query_flow_table_entry_mb_in {
2086 uint8_t cmd_table_type;
2087 uint8_t cmd_reserved0[3];
2088 uint32_t cmd_table_id;
2089 uint8_t cmd_reserved1[8];
2090 uint32_t cmd_flow_index;
2091 uint8_t cmd_reserved2[28];
2092 } __packed __aligned(4);
2093
2094 struct mcx_cmd_query_flow_table_entry_out {
2095 uint8_t cmd_status;
2096 uint8_t cmd_reserved0[3];
2097 uint32_t cmd_syndrome;
2098 uint8_t cmd_reserved1[8];
2099 } __packed __aligned(4);
2100
2101 struct mcx_cmd_query_flow_table_entry_mb_out {
2102 uint8_t cmd_reserved0[48];
2103 struct mcx_flow_ctx cmd_flow_ctx;
2104 } __packed __aligned(4);
2105
2106 struct mcx_cmd_delete_flow_table_entry_in {
2107 uint16_t cmd_opcode;
2108 uint8_t cmd_reserved0[4];
2109 uint16_t cmd_op_mod;
2110 uint8_t cmd_reserved1[8];
2111 } __packed __aligned(4);
2112
2113 struct mcx_cmd_delete_flow_table_entry_mb_in {
2114 uint8_t cmd_table_type;
2115 uint8_t cmd_reserved0[3];
2116 uint32_t cmd_table_id;
2117 uint8_t cmd_reserved1[8];
2118 uint32_t cmd_flow_index;
2119 uint8_t cmd_reserved2[28];
2120 } __packed __aligned(4);
2121
2122 struct mcx_cmd_delete_flow_table_entry_out {
2123 uint8_t cmd_status;
2124 uint8_t cmd_reserved0[3];
2125 uint32_t cmd_syndrome;
2126 uint8_t cmd_reserved1[8];
2127 } __packed __aligned(4);
2128
2129 struct mcx_cmd_query_flow_group_in {
2130 uint16_t cmd_opcode;
2131 uint8_t cmd_reserved0[4];
2132 uint16_t cmd_op_mod;
2133 uint8_t cmd_reserved1[8];
2134 } __packed __aligned(4);
2135
2136 struct mcx_cmd_query_flow_group_mb_in {
2137 uint8_t cmd_table_type;
2138 uint8_t cmd_reserved0[3];
2139 uint32_t cmd_table_id;
2140 uint32_t cmd_group_id;
2141 uint8_t cmd_reserved1[36];
2142 } __packed __aligned(4);
2143
2144 struct mcx_cmd_query_flow_group_out {
2145 uint8_t cmd_status;
2146 uint8_t cmd_reserved0[3];
2147 uint32_t cmd_syndrome;
2148 uint8_t cmd_reserved1[8];
2149 } __packed __aligned(4);
2150
2151 struct mcx_cmd_query_flow_group_mb_out {
2152 uint8_t cmd_reserved0[12];
2153 uint32_t cmd_start_flow_index;
2154 uint8_t cmd_reserved1[4];
2155 uint32_t cmd_end_flow_index;
2156 uint8_t cmd_reserved2[20];
2157 uint32_t cmd_match_criteria_enable;
2158 uint8_t cmd_match_criteria[512];
2159 uint8_t cmd_reserved4[448];
2160 } __packed __aligned(4);
2161
2162 struct mcx_cmd_query_flow_table_in {
2163 uint16_t cmd_opcode;
2164 uint8_t cmd_reserved0[4];
2165 uint16_t cmd_op_mod;
2166 uint8_t cmd_reserved1[8];
2167 } __packed __aligned(4);
2168
2169 struct mcx_cmd_query_flow_table_mb_in {
2170 uint8_t cmd_table_type;
2171 uint8_t cmd_reserved0[3];
2172 uint32_t cmd_table_id;
2173 uint8_t cmd_reserved1[40];
2174 } __packed __aligned(4);
2175
2176 struct mcx_cmd_query_flow_table_out {
2177 uint8_t cmd_status;
2178 uint8_t cmd_reserved0[3];
2179 uint32_t cmd_syndrome;
2180 uint8_t cmd_reserved1[8];
2181 } __packed __aligned(4);
2182
2183 struct mcx_cmd_query_flow_table_mb_out {
2184 uint8_t cmd_reserved0[4];
2185 struct mcx_flow_table_ctx cmd_ctx;
2186 } __packed __aligned(4);
2187
2188 struct mcx_cmd_alloc_flow_counter_in {
2189 uint16_t cmd_opcode;
2190 uint8_t cmd_reserved0[4];
2191 uint16_t cmd_op_mod;
2192 uint8_t cmd_reserved1[8];
2193 } __packed __aligned(4);
2194
2195 struct mcx_cmd_query_rq_in {
2196 uint16_t cmd_opcode;
2197 uint8_t cmd_reserved0[4];
2198 uint16_t cmd_op_mod;
2199 uint32_t cmd_rqn;
2200 uint8_t cmd_reserved1[4];
2201 } __packed __aligned(4);
2202
2203 struct mcx_cmd_query_rq_out {
2204 uint8_t cmd_status;
2205 uint8_t cmd_reserved0[3];
2206 uint32_t cmd_syndrome;
2207 uint8_t cmd_reserved1[8];
2208 } __packed __aligned(4);
2209
2210 struct mcx_cmd_query_rq_mb_out {
2211 uint8_t cmd_reserved0[16];
2212 struct mcx_rq_ctx cmd_ctx;
2213 };
2214
2215 struct mcx_cmd_query_sq_in {
2216 uint16_t cmd_opcode;
2217 uint8_t cmd_reserved0[4];
2218 uint16_t cmd_op_mod;
2219 uint32_t cmd_sqn;
2220 uint8_t cmd_reserved1[4];
2221 } __packed __aligned(4);
2222
2223 struct mcx_cmd_query_sq_out {
2224 uint8_t cmd_status;
2225 uint8_t cmd_reserved0[3];
2226 uint32_t cmd_syndrome;
2227 uint8_t cmd_reserved1[8];
2228 } __packed __aligned(4);
2229
2230 struct mcx_cmd_query_sq_mb_out {
2231 uint8_t cmd_reserved0[16];
2232 struct mcx_sq_ctx cmd_ctx;
2233 };
2234
2235 struct mcx_cmd_alloc_flow_counter_out {
2236 uint8_t cmd_status;
2237 uint8_t cmd_reserved0[3];
2238 uint32_t cmd_syndrome;
2239 uint8_t cmd_reserved1[2];
2240 uint16_t cmd_flow_counter_id;
2241 uint8_t cmd_reserved2[4];
2242 } __packed __aligned(4);
2243
2244 struct mcx_wq_doorbell {
2245 uint32_t db_recv_counter;
2246 uint32_t db_send_counter;
2247 } __packed __aligned(8);
2248
2249 struct mcx_dmamem {
2250 bus_dmamap_t mxm_map;
2251 bus_dma_segment_t mxm_seg;
2252 int mxm_nsegs;
2253 size_t mxm_size;
2254 void *mxm_kva;
2255 };
2256 #define MCX_DMA_MAP(_mxm) ((_mxm)->mxm_map)
2257 #define MCX_DMA_DVA(_mxm) ((_mxm)->mxm_map->dm_segs[0].ds_addr)
2258 #define MCX_DMA_KVA(_mxm) ((void *)(_mxm)->mxm_kva)
2259 #define MCX_DMA_OFF(_mxm, _off) ((void *)((char *)(_mxm)->mxm_kva + (_off)))
2260 #define MCX_DMA_LEN(_mxm) ((_mxm)->mxm_size)
2261
2262 struct mcx_hwmem {
2263 bus_dmamap_t mhm_map;
2264 bus_dma_segment_t *mhm_segs;
2265 unsigned int mhm_seg_count;
2266 unsigned int mhm_npages;
2267 };
2268
2269 struct mcx_slot {
2270 bus_dmamap_t ms_map;
2271 struct mbuf *ms_m;
2272 };
2273
2274 struct mcx_eq {
2275 int eq_n;
2276 uint32_t eq_cons;
2277 struct mcx_dmamem eq_mem;
2278 };
2279
2280 struct mcx_cq {
2281 int cq_n;
2282 struct mcx_dmamem cq_mem;
2283 bus_addr_t cq_doorbell;
2284 uint32_t cq_cons;
2285 uint32_t cq_count;
2286 };
2287
2288 struct mcx_calibration {
2289 uint64_t c_timestamp; /* previous mcx chip time */
2290 uint64_t c_uptime; /* previous kernel nanouptime */
2291 uint64_t c_tbase; /* mcx chip time */
2292 uint64_t c_ubase; /* kernel nanouptime */
2293 uint64_t c_ratio;
2294 };
2295
2296 #define MCX_CALIBRATE_FIRST 2
2297 #define MCX_CALIBRATE_NORMAL 32
2298
2299 struct mcx_rxring {
2300 u_int rxr_total;
2301 u_int rxr_inuse;
2302 };
2303
2304 MBUFQ_HEAD(mcx_mbufq);
2305
2306 struct mcx_rx {
2307 struct mcx_softc *rx_softc;
2308
2309 int rx_rqn;
2310 struct mcx_dmamem rx_rq_mem;
2311 struct mcx_slot *rx_slots;
2312 bus_addr_t rx_doorbell;
2313
2314 uint32_t rx_prod;
2315 callout_t rx_refill;
2316 struct mcx_rxring rx_rxr;
2317 } __aligned(64);
2318
2319 struct mcx_tx {
2320 struct mcx_softc *tx_softc;
2321
2322 int tx_uar;
2323 int tx_sqn;
2324 struct mcx_dmamem tx_sq_mem;
2325 struct mcx_slot *tx_slots;
2326 bus_addr_t tx_doorbell;
2327 int tx_bf_offset;
2328
2329 uint32_t tx_cons;
2330 uint32_t tx_prod;
2331 } __aligned(64);
2332
2333 struct mcx_queues {
2334 void *q_ihc;
2335 struct mcx_softc *q_sc;
2336 int q_uar;
2337 int q_index;
2338 struct mcx_rx q_rx;
2339 struct mcx_tx q_tx;
2340 struct mcx_cq q_cq;
2341 struct mcx_eq q_eq;
2342 #if NKSTAT > 0
2343 struct kstat *q_kstat;
2344 #endif
2345 };
2346
2347 struct mcx_flow_group {
2348 int g_id;
2349 int g_table;
2350 int g_start;
2351 int g_size;
2352 };
2353
2354 #define MCX_FLOW_GROUP_PROMISC 0
2355 #define MCX_FLOW_GROUP_ALLMULTI 1
2356 #define MCX_FLOW_GROUP_MAC 2
2357 #define MCX_FLOW_GROUP_RSS_L4 3
2358 #define MCX_FLOW_GROUP_RSS_L3 4
2359 #define MCX_FLOW_GROUP_RSS_NONE 5
2360 #define MCX_NUM_FLOW_GROUPS 6
2361
2362 #define MCX_HASH_SEL_L3 MCX_TIR_CTX_HASH_SEL_SRC_IP | \
2363 MCX_TIR_CTX_HASH_SEL_DST_IP
2364 #define MCX_HASH_SEL_L4 MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_SPORT | \
2365 MCX_TIR_CTX_HASH_SEL_DPORT
2366
2367 #define MCX_RSS_HASH_SEL_V4_TCP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP |\
2368 MCX_TIR_CTX_HASH_SEL_IPV4
2369 #define MCX_RSS_HASH_SEL_V6_TCP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP | \
2370 MCX_TIR_CTX_HASH_SEL_IPV6
2371 #define MCX_RSS_HASH_SEL_V4_UDP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2372 MCX_TIR_CTX_HASH_SEL_IPV4
2373 #define MCX_RSS_HASH_SEL_V6_UDP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2374 MCX_TIR_CTX_HASH_SEL_IPV6
2375 #define MCX_RSS_HASH_SEL_V4 MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV4
2376 #define MCX_RSS_HASH_SEL_V6 MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV6
2377
2378 /*
2379 * There are a few different pieces involved in configuring RSS.
2380 * A Receive Queue Table (RQT) is the indirection table that maps packets to
2381 * different rx queues based on a hash value. We only create one, because
2382 * we want to scatter any traffic we can apply RSS to across all our rx
2383 * queues. Anything else will only be delivered to the first rx queue,
2384 * which doesn't require an RQT.
2385 *
2386 * A Transport Interface Receive (TIR) delivers packets to either a single rx
2387 * queue or an RQT, and in the latter case, specifies the set of fields
2388 * hashed, the hash function, and the hash key. We need one of these for each
2389 * type of RSS traffic - v4 TCP, v6 TCP, v4 UDP, v6 UDP, other v4, other v6,
2390 * and one for non-RSS traffic.
2391 *
2392 * Flow tables hold flow table entries in sequence. The first entry that
2393 * matches a packet is applied, sending the packet to either another flow
2394 * table or a TIR. We use one flow table to select packets based on
2395 * destination MAC address, and a second to apply RSS. The entries in the
2396 * first table send matching packets to the second, and the entries in the
2397 * RSS table send packets to RSS TIRs if possible, or the non-RSS TIR.
2398 *
2399 * The flow table entry that delivers packets to an RSS TIR must include match
2400 * criteria that ensure packets delivered to the TIR include all the fields
2401 * that the TIR hashes on - so for a v4 TCP TIR, the flow table entry must
2402 * only accept v4 TCP packets. Accordingly, we need flow table entries for
2403 * each TIR.
2404 *
2405 * All of this is a lot more flexible than we need, and we can describe most
2406 * of the stuff we need with a simple array.
2407 *
2408 * An RSS config creates a TIR with hashing enabled on a set of fields,
2409 * pointing to either the first rx queue or the RQT containing all the rx
2410 * queues, and a flow table entry that matches on an ether type and
2411 * optionally an ip proto, that delivers packets to the TIR.
2412 */
2413 static struct mcx_rss_rule {
2414 int hash_sel;
2415 int flow_group;
2416 int ethertype;
2417 int ip_proto;
2418 } mcx_rss_config[] = {
2419 /* udp and tcp for v4/v6 */
2420 { MCX_RSS_HASH_SEL_V4_TCP, MCX_FLOW_GROUP_RSS_L4,
2421 ETHERTYPE_IP, IPPROTO_TCP },
2422 { MCX_RSS_HASH_SEL_V6_TCP, MCX_FLOW_GROUP_RSS_L4,
2423 ETHERTYPE_IPV6, IPPROTO_TCP },
2424 { MCX_RSS_HASH_SEL_V4_UDP, MCX_FLOW_GROUP_RSS_L4,
2425 ETHERTYPE_IP, IPPROTO_UDP },
2426 { MCX_RSS_HASH_SEL_V6_UDP, MCX_FLOW_GROUP_RSS_L4,
2427 ETHERTYPE_IPV6, IPPROTO_UDP },
2428
2429 /* other v4/v6 */
2430 { MCX_RSS_HASH_SEL_V4, MCX_FLOW_GROUP_RSS_L3,
2431 ETHERTYPE_IP, 0 },
2432 { MCX_RSS_HASH_SEL_V6, MCX_FLOW_GROUP_RSS_L3,
2433 ETHERTYPE_IPV6, 0 },
2434
2435 /* non v4/v6 */
2436 { 0, MCX_FLOW_GROUP_RSS_NONE, 0, 0 }
2437 };
2438
2439 struct mcx_softc {
2440 device_t sc_dev;
2441 struct ethercom sc_ec;
2442 struct ifmedia sc_media;
2443 uint64_t sc_media_status;
2444 uint64_t sc_media_active;
2445 kmutex_t sc_media_mutex;
2446
2447 pci_chipset_tag_t sc_pc;
2448 pci_intr_handle_t *sc_intrs;
2449 void *sc_ihc;
2450 pcitag_t sc_tag;
2451
2452 bus_dma_tag_t sc_dmat;
2453 bus_space_tag_t sc_memt;
2454 bus_space_handle_t sc_memh;
2455 bus_size_t sc_mems;
2456
2457 struct mcx_dmamem sc_cmdq_mem;
2458 unsigned int sc_cmdq_mask;
2459 unsigned int sc_cmdq_size;
2460
2461 unsigned int sc_cmdq_token;
2462
2463 struct mcx_hwmem sc_boot_pages;
2464 struct mcx_hwmem sc_init_pages;
2465 struct mcx_hwmem sc_regular_pages;
2466
2467 int sc_uar;
2468 int sc_pd;
2469 int sc_tdomain;
2470 uint32_t sc_lkey;
2471 int sc_tis;
2472 int sc_tir[__arraycount(mcx_rss_config)];
2473 int sc_rqt;
2474
2475 struct mcx_dmamem sc_doorbell_mem;
2476
2477 struct mcx_eq sc_admin_eq;
2478 struct mcx_eq sc_queue_eq;
2479
2480 int sc_hardmtu;
2481 int sc_rxbufsz;
2482
2483 int sc_bf_size;
2484 int sc_max_rqt_size;
2485
2486 struct workqueue *sc_workq;
2487 struct work sc_port_change;
2488
2489 int sc_mac_flow_table_id;
2490 int sc_rss_flow_table_id;
2491 struct mcx_flow_group sc_flow_group[MCX_NUM_FLOW_GROUPS];
2492 int sc_promisc_flow_enabled;
2493 int sc_allmulti_flow_enabled;
2494 int sc_mcast_flow_base;
2495 int sc_extra_mcast;
2496 uint8_t sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
2497
2498 struct mcx_calibration sc_calibration[2];
2499 unsigned int sc_calibration_gen;
2500 callout_t sc_calibrate;
2501 uint32_t sc_mhz;
2502 uint32_t sc_khz;
2503
2504 struct mcx_queues *sc_queues;
2505 unsigned int sc_nqueues;
2506
2507 int sc_mcam_reg;
2508
2509 #if NKSTAT > 0
2510 struct kstat *sc_kstat_ieee8023;
2511 struct kstat *sc_kstat_rfc2863;
2512 struct kstat *sc_kstat_rfc2819;
2513 struct kstat *sc_kstat_rfc3635;
2514 unsigned int sc_kstat_mtmp_count;
2515 struct kstat **sc_kstat_mtmp;
2516 #endif
2517
2518 struct timecounter sc_timecounter;
2519 };
2520 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
2521
2522 static int mcx_match(device_t, cfdata_t, void *);
2523 static void mcx_attach(device_t, device_t, void *);
2524
2525 static void * mcx_establish_intr(struct mcx_softc *, int, kcpuset_t *,
2526 int (*)(void *), void *, const char *);
2527
2528 static void mcx_rxr_init(struct mcx_rxring *, u_int, u_int);
2529 static u_int mcx_rxr_get(struct mcx_rxring *, u_int);
2530 static void mcx_rxr_put(struct mcx_rxring *, u_int);
2531 static u_int mcx_rxr_inuse(struct mcx_rxring *);
2532
2533 #if NKSTAT > 0
2534 static void mcx_kstat_attach(struct mcx_softc *);
2535 #endif
2536
2537 static void mcx_timecounter_attach(struct mcx_softc *);
2538
2539 static int mcx_version(struct mcx_softc *);
2540 static int mcx_init_wait(struct mcx_softc *);
2541 static int mcx_enable_hca(struct mcx_softc *);
2542 static int mcx_teardown_hca(struct mcx_softc *, uint16_t);
2543 static int mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2544 int);
2545 static int mcx_issi(struct mcx_softc *);
2546 static int mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2547 static int mcx_hca_max_caps(struct mcx_softc *);
2548 static int mcx_hca_set_caps(struct mcx_softc *);
2549 static int mcx_init_hca(struct mcx_softc *);
2550 static int mcx_set_driver_version(struct mcx_softc *);
2551 static int mcx_iff(struct mcx_softc *);
2552 static int mcx_alloc_uar(struct mcx_softc *, int *);
2553 static int mcx_alloc_pd(struct mcx_softc *);
2554 static int mcx_alloc_tdomain(struct mcx_softc *);
2555 static int mcx_create_eq(struct mcx_softc *, struct mcx_eq *, int,
2556 uint64_t, int);
2557 static int mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *);
2558 static int mcx_query_special_contexts(struct mcx_softc *);
2559 static int mcx_set_port_mtu(struct mcx_softc *, int);
2560 static int mcx_create_cq(struct mcx_softc *, struct mcx_cq *, int, int,
2561 int);
2562 static int mcx_destroy_cq(struct mcx_softc *, struct mcx_cq *);
2563 static int mcx_create_sq(struct mcx_softc *, struct mcx_tx *, int, int,
2564 int);
2565 static int mcx_destroy_sq(struct mcx_softc *, struct mcx_tx *);
2566 static int mcx_ready_sq(struct mcx_softc *, struct mcx_tx *);
2567 static int mcx_create_rq(struct mcx_softc *, struct mcx_rx *, int, int);
2568 static int mcx_destroy_rq(struct mcx_softc *, struct mcx_rx *);
2569 static int mcx_ready_rq(struct mcx_softc *, struct mcx_rx *);
2570 static int mcx_create_tir_direct(struct mcx_softc *, struct mcx_rx *,
2571 int *);
2572 static int mcx_create_tir_indirect(struct mcx_softc *, int, uint32_t,
2573 int *);
2574 static int mcx_destroy_tir(struct mcx_softc *, int);
2575 static int mcx_create_tis(struct mcx_softc *, int *);
2576 static int mcx_destroy_tis(struct mcx_softc *, int);
2577 static int mcx_create_rqt(struct mcx_softc *, int, int *, int *);
2578 static int mcx_destroy_rqt(struct mcx_softc *, int);
2579 static int mcx_create_flow_table(struct mcx_softc *, int, int, int *);
2580 static int mcx_set_flow_table_root(struct mcx_softc *, int);
2581 static int mcx_destroy_flow_table(struct mcx_softc *, int);
2582 static int mcx_create_flow_group(struct mcx_softc *, int, int, int,
2583 int, int, struct mcx_flow_match *);
2584 static int mcx_destroy_flow_group(struct mcx_softc *, int);
2585 static int mcx_set_flow_table_entry_mac(struct mcx_softc *, int, int,
2586 const uint8_t *, uint32_t);
2587 static int mcx_set_flow_table_entry_proto(struct mcx_softc *, int, int,
2588 int, int, uint32_t);
2589 static int mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2590
2591 #if NKSTAT > 0
2592 static int mcx_query_rq(struct mcx_softc *, struct mcx_rx *, struct mcx_rq_ctx *);
2593 static int mcx_query_sq(struct mcx_softc *, struct mcx_tx *, struct mcx_sq_ctx *);
2594 static int mcx_query_cq(struct mcx_softc *, struct mcx_cq *, struct mcx_cq_ctx *);
2595 static int mcx_query_eq(struct mcx_softc *, struct mcx_eq *, struct mcx_eq_ctx *);
2596 #endif
2597
2598 #if 0
2599 static int mcx_dump_flow_table(struct mcx_softc *, int);
2600 static int mcx_dump_flow_table_entry(struct mcx_softc *, int, int);
2601 static int mcx_dump_flow_group(struct mcx_softc *, int);
2602 #endif
2603
2604
2605 /*
2606 static void mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2607 static void mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2608 */
2609 static void mcx_refill(void *);
2610 static int mcx_process_rx(struct mcx_softc *, struct mcx_rx *,
2611 struct mcx_cq_entry *, struct mcx_mbufq *,
2612 const struct mcx_calibration *);
2613 static int mcx_process_txeof(struct mcx_softc *, struct mcx_tx *,
2614 struct mcx_cq_entry *);
2615 static void mcx_process_cq(struct mcx_softc *, struct mcx_queues *,
2616 struct mcx_cq *);
2617
2618 static void mcx_arm_cq(struct mcx_softc *, struct mcx_cq *, int);
2619 static void mcx_arm_eq(struct mcx_softc *, struct mcx_eq *, int);
2620 static int mcx_admin_intr(void *);
2621 static int mcx_cq_intr(void *);
2622
2623 static int mcx_init(struct ifnet *);
2624 static void mcx_stop(struct ifnet *, int);
2625 static int mcx_ioctl(struct ifnet *, u_long, void *);
2626 static void mcx_start(struct ifnet *);
2627 static void mcx_watchdog(struct ifnet *);
2628 static void mcx_media_add_types(struct mcx_softc *);
2629 static void mcx_media_status(struct ifnet *, struct ifmediareq *);
2630 static int mcx_media_change(struct ifnet *);
2631 #if 0
2632 static int mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2633 #endif
2634 static void mcx_port_change(struct work *, void *);
2635
2636 static void mcx_calibrate_first(struct mcx_softc *);
2637 static void mcx_calibrate(void *);
2638
2639 static inline uint32_t
2640 mcx_rd(struct mcx_softc *, bus_size_t);
2641 static inline void
2642 mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2643 static inline void
2644 mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2645
2646 static uint64_t mcx_timer(struct mcx_softc *);
2647
2648 static int mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2649 bus_size_t, u_int align);
2650 static void mcx_dmamem_zero(struct mcx_dmamem *);
2651 static void mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2652
2653 static int mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2654 unsigned int);
2655 static void mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2656
2657 CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL);
2658
2659 static const struct {
2660 pci_vendor_id_t vendor;
2661 pci_product_id_t product;
2662 } mcx_devices[] = {
2663 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700 },
2664 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700VF },
2665 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710 },
2666 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710VF },
2667 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800 },
2668 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800VF },
2669 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800 },
2670 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800VF },
2671 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28908 },
2672 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT2892 },
2673 };
2674
2675 struct mcx_eth_proto_capability {
2676 uint64_t cap_media;
2677 uint64_t cap_baudrate;
2678 };
2679
2680 static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = {
2681 [MCX_ETHER_CAP_SGMII] = { IFM_1000_SGMII, IF_Gbps(1) },
2682 [MCX_ETHER_CAP_1000_KX] = { IFM_1000_KX, IF_Gbps(1) },
2683 [MCX_ETHER_CAP_10G_CX4] = { IFM_10G_CX4, IF_Gbps(10) },
2684 [MCX_ETHER_CAP_10G_KX4] = { IFM_10G_KX4, IF_Gbps(10) },
2685 [MCX_ETHER_CAP_10G_KR] = { IFM_10G_KR, IF_Gbps(10) },
2686 [MCX_ETHER_CAP_20G_KR2] = { IFM_20G_KR2, IF_Gbps(20) },
2687 [MCX_ETHER_CAP_40G_CR4] = { IFM_40G_CR4, IF_Gbps(40) },
2688 [MCX_ETHER_CAP_40G_KR4] = { IFM_40G_KR4, IF_Gbps(40) },
2689 [MCX_ETHER_CAP_56G_R4] = { IFM_56G_R4, IF_Gbps(56) },
2690 [MCX_ETHER_CAP_10G_CR] = { IFM_10G_CR1, IF_Gbps(10) },
2691 [MCX_ETHER_CAP_10G_SR] = { IFM_10G_SR, IF_Gbps(10) },
2692 [MCX_ETHER_CAP_10G_LR] = { IFM_10G_LR, IF_Gbps(10) },
2693 [MCX_ETHER_CAP_40G_SR4] = { IFM_40G_SR4, IF_Gbps(40) },
2694 [MCX_ETHER_CAP_40G_LR4] = { IFM_40G_LR4, IF_Gbps(40) },
2695 [MCX_ETHER_CAP_50G_SR2] = { IFM_50G_SR2, IF_Gbps(50) },
2696 [MCX_ETHER_CAP_100G_CR4] = { IFM_100G_CR4, IF_Gbps(100) },
2697 [MCX_ETHER_CAP_100G_SR4] = { IFM_100G_SR4, IF_Gbps(100) },
2698 [MCX_ETHER_CAP_100G_KR4] = { IFM_100G_KR4, IF_Gbps(100) },
2699 [MCX_ETHER_CAP_100G_LR4] = { IFM_100G_LR4, IF_Gbps(100) },
2700 [MCX_ETHER_CAP_100_TX] = { IFM_100_TX, IF_Mbps(100) },
2701 [MCX_ETHER_CAP_1000_T] = { IFM_1000_T, IF_Gbps(1) },
2702 [MCX_ETHER_CAP_10G_T] = { IFM_10G_T, IF_Gbps(10) },
2703 [MCX_ETHER_CAP_25G_CR] = { IFM_25G_CR, IF_Gbps(25) },
2704 [MCX_ETHER_CAP_25G_KR] = { IFM_25G_KR, IF_Gbps(25) },
2705 [MCX_ETHER_CAP_25G_SR] = { IFM_25G_SR, IF_Gbps(25) },
2706 [MCX_ETHER_CAP_50G_CR2] = { IFM_50G_CR2, IF_Gbps(50) },
2707 [MCX_ETHER_CAP_50G_KR2] = { IFM_50G_KR2, IF_Gbps(50) },
2708 };
2709
2710 static int
2711 mcx_get_id(uint32_t val)
2712 {
2713 return be32toh(val) & 0x00ffffff;
2714 }
2715
2716 static int
2717 mcx_match(device_t parent, cfdata_t cf, void *aux)
2718 {
2719 struct pci_attach_args *pa = aux;
2720 int n;
2721
2722 for (n = 0; n < __arraycount(mcx_devices); n++) {
2723 if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor &&
2724 PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product)
2725 return 1;
2726 }
2727
2728 return 0;
2729 }
2730
2731 void
2732 mcx_attach(device_t parent, device_t self, void *aux)
2733 {
2734 struct mcx_softc *sc = device_private(self);
2735 struct ifnet *ifp = &sc->sc_ec.ec_if;
2736 struct pci_attach_args *pa = aux;
2737 uint8_t enaddr[ETHER_ADDR_LEN];
2738 int counts[PCI_INTR_TYPE_SIZE];
2739 char intrxname[32];
2740 pcireg_t memtype;
2741 uint32_t r;
2742 unsigned int cq_stride;
2743 unsigned int cq_size;
2744 int i, msix;
2745 kcpuset_t *affinity;
2746
2747 sc->sc_dev = self;
2748 sc->sc_pc = pa->pa_pc;
2749 sc->sc_tag = pa->pa_tag;
2750 if (pci_dma64_available(pa))
2751 sc->sc_dmat = pa->pa_dmat64;
2752 else
2753 sc->sc_dmat = pa->pa_dmat;
2754
2755 /* Map the PCI memory space */
2756 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2757 if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2758 0 /*BUS_SPACE_MAP_PREFETCHABLE*/, &sc->sc_memt, &sc->sc_memh,
2759 NULL, &sc->sc_mems)) {
2760 aprint_error(": unable to map register memory\n");
2761 return;
2762 }
2763
2764 pci_aprint_devinfo(pa, "Ethernet controller");
2765
2766 mutex_init(&sc->sc_media_mutex, MUTEX_DEFAULT, IPL_SOFTNET);
2767
2768 if (mcx_version(sc) != 0) {
2769 /* error printed by mcx_version */
2770 goto unmap;
2771 }
2772
2773 r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2774 cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2775 cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2776 if (cq_size > MCX_MAX_CQE) {
2777 aprint_error_dev(self,
2778 "command queue size overflow %u\n", cq_size);
2779 goto unmap;
2780 }
2781 if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2782 aprint_error_dev(self,
2783 "command queue entry size underflow %u\n", cq_stride);
2784 goto unmap;
2785 }
2786 if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2787 aprint_error_dev(self, "command queue page overflow\n");
2788 goto unmap;
2789 }
2790
2791 if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_DOORBELL_AREA_SIZE,
2792 MCX_PAGE_SIZE) != 0) {
2793 aprint_error_dev(self, "unable to allocate doorbell memory\n");
2794 goto unmap;
2795 }
2796
2797 if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2798 MCX_PAGE_SIZE) != 0) {
2799 aprint_error_dev(self, "unable to allocate command queue\n");
2800 goto dbfree;
2801 }
2802
2803 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2804 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t),
2805 BUS_SPACE_BARRIER_WRITE);
2806 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2807 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t),
2808 BUS_SPACE_BARRIER_WRITE);
2809
2810 if (mcx_init_wait(sc) != 0) {
2811 aprint_error_dev(self, "timeout waiting for init\n");
2812 goto cqfree;
2813 }
2814
2815 sc->sc_cmdq_mask = cq_size - 1;
2816 sc->sc_cmdq_size = cq_stride;
2817
2818 if (mcx_enable_hca(sc) != 0) {
2819 /* error printed by mcx_enable_hca */
2820 goto cqfree;
2821 }
2822
2823 if (mcx_issi(sc) != 0) {
2824 /* error printed by mcx_issi */
2825 goto teardown;
2826 }
2827
2828 if (mcx_pages(sc, &sc->sc_boot_pages,
2829 htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2830 /* error printed by mcx_pages */
2831 goto teardown;
2832 }
2833
2834 if (mcx_hca_max_caps(sc) != 0) {
2835 /* error printed by mcx_hca_max_caps */
2836 goto teardown;
2837 }
2838
2839 if (mcx_hca_set_caps(sc) != 0) {
2840 /* error printed by mcx_hca_set_caps */
2841 goto teardown;
2842 }
2843
2844 if (mcx_pages(sc, &sc->sc_init_pages,
2845 htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2846 /* error printed by mcx_pages */
2847 goto teardown;
2848 }
2849
2850 if (mcx_init_hca(sc) != 0) {
2851 /* error printed by mcx_init_hca */
2852 goto teardown;
2853 }
2854
2855 if (mcx_pages(sc, &sc->sc_regular_pages,
2856 htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2857 /* error printed by mcx_pages */
2858 goto teardown;
2859 }
2860
2861 /* apparently not necessary? */
2862 if (mcx_set_driver_version(sc) != 0) {
2863 /* error printed by mcx_set_driver_version */
2864 goto teardown;
2865 }
2866
2867 if (mcx_iff(sc) != 0) { /* modify nic vport context */
2868 /* error printed by mcx_iff? */
2869 goto teardown;
2870 }
2871
2872 if (mcx_alloc_uar(sc, &sc->sc_uar) != 0) {
2873 /* error printed by mcx_alloc_uar */
2874 goto teardown;
2875 }
2876
2877 if (mcx_alloc_pd(sc) != 0) {
2878 /* error printed by mcx_alloc_pd */
2879 goto teardown;
2880 }
2881
2882 if (mcx_alloc_tdomain(sc) != 0) {
2883 /* error printed by mcx_alloc_tdomain */
2884 goto teardown;
2885 }
2886
2887 /*
2888 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2889 * mellanox support tells me legacy interrupts are not supported,
2890 * so we're stuck with just msi-x.
2891 */
2892 counts[PCI_INTR_TYPE_MSIX] = -1;
2893 counts[PCI_INTR_TYPE_MSI] = 0;
2894 counts[PCI_INTR_TYPE_INTX] = 0;
2895 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) {
2896 aprint_error_dev(self, "unable to allocate interrupt\n");
2897 goto teardown;
2898 }
2899 if (counts[PCI_INTR_TYPE_MSIX] < 2) {
2900 aprint_error_dev(self, "not enough MSI-X vectors\n");
2901 goto teardown;
2902 }
2903 KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX);
2904 snprintf(intrxname, sizeof(intrxname), "%s adminq", DEVNAME(sc));
2905 sc->sc_ihc = mcx_establish_intr(sc, 0, NULL, mcx_admin_intr, sc,
2906 intrxname);
2907 if (sc->sc_ihc == NULL) {
2908 aprint_error_dev(self, "couldn't establish adminq interrupt\n");
2909 goto teardown;
2910 }
2911
2912 if (mcx_create_eq(sc, &sc->sc_admin_eq, sc->sc_uar,
2913 (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
2914 (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
2915 (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
2916 (1ull << MCX_EVENT_TYPE_PAGE_REQUEST), 0) != 0) {
2917 /* error printed by mcx_create_eq */
2918 goto teardown;
2919 }
2920
2921 if (mcx_query_nic_vport_context(sc, enaddr) != 0) {
2922 /* error printed by mcx_query_nic_vport_context */
2923 goto teardown;
2924 }
2925
2926 if (mcx_query_special_contexts(sc) != 0) {
2927 /* error printed by mcx_query_special_contexts */
2928 goto teardown;
2929 }
2930
2931 if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2932 /* error printed by mcx_set_port_mtu */
2933 goto teardown;
2934 }
2935
2936 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2937 ether_sprintf(enaddr));
2938
2939 msix = counts[PCI_INTR_TYPE_MSIX];
2940 msix--; /* admin ops took one */
2941
2942 sc->sc_nqueues = uimin(MCX_MAX_QUEUES, msix);
2943 sc->sc_nqueues = uimin(sc->sc_nqueues, ncpu);
2944 sc->sc_queues = kmem_zalloc(sc->sc_nqueues * sizeof(*sc->sc_queues),
2945 KM_SLEEP);
2946
2947 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2948 ifp->if_softc = sc;
2949 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2950 #ifdef MCX_MPSAFE
2951 ifp->if_extflags = IFEF_MPSAFE;
2952 #endif
2953 ifp->if_init = mcx_init;
2954 ifp->if_stop = mcx_stop;
2955 ifp->if_ioctl = mcx_ioctl;
2956 ifp->if_start = mcx_start;
2957 ifp->if_watchdog = mcx_watchdog;
2958 ifp->if_mtu = sc->sc_hardmtu;
2959 ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx |
2960 IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx |
2961 IFCAP_CSUM_UDPv6_Rx | IFCAP_CSUM_UDPv6_Tx |
2962 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx |
2963 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_TCPv6_Tx;
2964 IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
2965 IFQ_SET_READY(&ifp->if_snd);
2966
2967 sc->sc_ec.ec_capabilities = ETHERCAP_JUMBO_MTU |
2968 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2969 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
2970
2971 sc->sc_ec.ec_ifmedia = &sc->sc_media;
2972 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, mcx_media_change,
2973 mcx_media_status, &sc->sc_media_mutex);
2974 mcx_media_add_types(sc);
2975 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2976 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2977
2978 if_attach(ifp);
2979 if_deferred_start_init(ifp, NULL);
2980
2981 ether_ifattach(ifp, enaddr);
2982
2983 kcpuset_create(&affinity, false);
2984 kcpuset_set(affinity, 0);
2985
2986 for (i = 0; i < sc->sc_nqueues; i++) {
2987 struct mcx_queues *q = &sc->sc_queues[i];
2988 struct mcx_rx *rx = &q->q_rx;
2989 struct mcx_tx *tx = &q->q_tx;
2990 int vec;
2991
2992 vec = i + 1;
2993 q->q_sc = sc;
2994 q->q_index = i;
2995
2996 if (mcx_alloc_uar(sc, &q->q_uar) != 0) {
2997 aprint_error_dev(self, "unable to alloc uar %d\n", i);
2998 goto teardown;
2999 }
3000
3001 if (mcx_create_eq(sc, &q->q_eq, q->q_uar, 0, vec) != 0) {
3002 aprint_error_dev(self,
3003 "unable to create event queue %d\n", i);
3004 goto teardown;
3005 }
3006
3007 rx->rx_softc = sc;
3008 callout_init(&rx->rx_refill, CALLOUT_FLAGS);
3009 callout_setfunc(&rx->rx_refill, mcx_refill, rx);
3010
3011 tx->tx_softc = sc;
3012
3013 snprintf(intrxname, sizeof(intrxname), "%s queue %d",
3014 DEVNAME(sc), i);
3015 q->q_ihc = mcx_establish_intr(sc, vec, affinity, mcx_cq_intr,
3016 q, intrxname);
3017 }
3018
3019 callout_init(&sc->sc_calibrate, CALLOUT_FLAGS);
3020 callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc);
3021
3022 if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc,
3023 PRI_NONE, IPL_NET, 0) != 0) {
3024 aprint_error_dev(self, "couldn't create port change workq\n");
3025 goto teardown;
3026 }
3027
3028 mcx_port_change(&sc->sc_port_change, sc);
3029
3030 sc->sc_mac_flow_table_id = -1;
3031 sc->sc_rss_flow_table_id = -1;
3032 sc->sc_rqt = -1;
3033 for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
3034 struct mcx_flow_group *mfg = &sc->sc_flow_group[i];
3035 mfg->g_id = -1;
3036 mfg->g_table = -1;
3037 mfg->g_size = 0;
3038 mfg->g_start = 0;
3039 }
3040 sc->sc_extra_mcast = 0;
3041 memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
3042
3043 #if NKSTAT > 0
3044 mcx_kstat_attach(sc);
3045 #endif
3046 mcx_timecounter_attach(sc);
3047 return;
3048
3049 teardown:
3050 mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
3051 /* error printed by mcx_teardown_hca, and we're already unwinding */
3052 cqfree:
3053 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
3054 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3055 BUS_SPACE_BARRIER_WRITE);
3056 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
3057 MCX_CMDQ_INTERFACE_DISABLED);
3058 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t),
3059 BUS_SPACE_BARRIER_WRITE);
3060
3061 mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
3062 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3063 BUS_SPACE_BARRIER_WRITE);
3064 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
3065
3066 mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
3067 dbfree:
3068 mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
3069 unmap:
3070 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
3071 sc->sc_mems = 0;
3072 }
3073
3074 static void *
3075 mcx_establish_intr(struct mcx_softc *sc, int index, kcpuset_t *affinity,
3076 int (*func)(void *), void *arg, const char *xname)
3077 {
3078 char intrbuf[PCI_INTRSTR_LEN];
3079 const char *intrstr;
3080 void *ih;
3081
3082 pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[index], PCI_INTR_MPSAFE,
3083 true);
3084
3085 intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[index], intrbuf,
3086 sizeof(intrbuf));
3087 ih = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[index], IPL_NET,
3088 func, arg, xname);
3089 if (ih == NULL) {
3090 aprint_error_dev(sc->sc_dev,
3091 "unable to establish interrupt%s%s\n",
3092 intrstr ? " at " : "",
3093 intrstr ? intrstr : "");
3094 return NULL;
3095 }
3096
3097 if (affinity != NULL && index > 0) {
3098 /* Round-robin affinity */
3099 kcpuset_zero(affinity);
3100 kcpuset_set(affinity, (index - 1) % ncpu);
3101 interrupt_distribute(ih, affinity, NULL);
3102 }
3103
3104 return ih;
3105 }
3106
3107 static void
3108 mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm)
3109 {
3110 rxr->rxr_total = hwm;
3111 rxr->rxr_inuse = 0;
3112 }
3113
3114 static u_int
3115 mcx_rxr_get(struct mcx_rxring *rxr, u_int max)
3116 {
3117 const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse);
3118
3119 rxr->rxr_inuse += taken;
3120
3121 return taken;
3122 }
3123
3124 static void
3125 mcx_rxr_put(struct mcx_rxring *rxr, u_int n)
3126 {
3127 rxr->rxr_inuse -= n;
3128 }
3129
3130 static u_int
3131 mcx_rxr_inuse(struct mcx_rxring *rxr)
3132 {
3133 return rxr->rxr_inuse;
3134 }
3135
3136 static int
3137 mcx_version(struct mcx_softc *sc)
3138 {
3139 uint32_t fw0, fw1;
3140 uint16_t cmdif;
3141
3142 fw0 = mcx_rd(sc, MCX_FW_VER);
3143 fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
3144
3145 aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0),
3146 MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
3147
3148 cmdif = MCX_CMDIF(fw1);
3149 if (cmdif != MCX_CMD_IF_SUPPORTED) {
3150 aprint_error_dev(sc->sc_dev,
3151 "unsupported command interface %u\n", cmdif);
3152 return (-1);
3153 }
3154
3155 return (0);
3156 }
3157
3158 static int
3159 mcx_init_wait(struct mcx_softc *sc)
3160 {
3161 unsigned int i;
3162 uint32_t r;
3163
3164 for (i = 0; i < 2000; i++) {
3165 r = mcx_rd(sc, MCX_STATE);
3166 if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
3167 return (0);
3168
3169 delay(1000);
3170 mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
3171 BUS_SPACE_BARRIER_READ);
3172 }
3173
3174 return (-1);
3175 }
3176
3177 static uint8_t
3178 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3179 unsigned int msec)
3180 {
3181 unsigned int i;
3182
3183 for (i = 0; i < msec; i++) {
3184 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3185 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
3186
3187 if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
3188 MCX_CQ_STATUS_OWN_SW)
3189 return (0);
3190
3191 delay(1000);
3192 }
3193
3194 return (ETIMEDOUT);
3195 }
3196
3197 static uint32_t
3198 mcx_mix_u64(uint32_t xor, uint64_t u64)
3199 {
3200 xor ^= u64 >> 32;
3201 xor ^= u64;
3202
3203 return (xor);
3204 }
3205
3206 static uint32_t
3207 mcx_mix_u32(uint32_t xor, uint32_t u32)
3208 {
3209 xor ^= u32;
3210
3211 return (xor);
3212 }
3213
3214 static uint32_t
3215 mcx_mix_u8(uint32_t xor, uint8_t u8)
3216 {
3217 xor ^= u8;
3218
3219 return (xor);
3220 }
3221
3222 static uint8_t
3223 mcx_mix_done(uint32_t xor)
3224 {
3225 xor ^= xor >> 16;
3226 xor ^= xor >> 8;
3227
3228 return (xor);
3229 }
3230
3231 static uint8_t
3232 mcx_xor(const void *buf, size_t len)
3233 {
3234 const uint32_t *dwords = buf;
3235 uint32_t xor = 0xff;
3236 size_t i;
3237
3238 len /= sizeof(*dwords);
3239
3240 for (i = 0; i < len; i++)
3241 xor ^= dwords[i];
3242
3243 return (mcx_mix_done(xor));
3244 }
3245
3246 static uint8_t
3247 mcx_cmdq_token(struct mcx_softc *sc)
3248 {
3249 uint8_t token;
3250
3251 do {
3252 token = ++sc->sc_cmdq_token;
3253 } while (token == 0);
3254
3255 return (token);
3256 }
3257
3258 static void
3259 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3260 uint32_t ilen, uint32_t olen, uint8_t token)
3261 {
3262 memset(cqe, 0, sc->sc_cmdq_size);
3263
3264 cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
3265 be32enc(&cqe->cq_input_length, ilen);
3266 be32enc(&cqe->cq_output_length, olen);
3267 cqe->cq_token = token;
3268 cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
3269 }
3270
3271 static void
3272 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
3273 {
3274 cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
3275 }
3276
3277 static int
3278 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
3279 {
3280 /* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 : 0); */
3281 return (0);
3282 }
3283
3284 static void *
3285 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
3286 {
3287 return (&cqe->cq_input_data);
3288 }
3289
3290 static void *
3291 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
3292 {
3293 return (&cqe->cq_output_data);
3294 }
3295
3296 static void
3297 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3298 unsigned int slot)
3299 {
3300 mcx_cmdq_sign(cqe);
3301
3302 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3303 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
3304
3305 mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
3306 mcx_bar(sc, MCX_CMDQ_DOORBELL, sizeof(uint32_t),
3307 BUS_SPACE_BARRIER_WRITE);
3308 }
3309
3310 static int
3311 mcx_enable_hca(struct mcx_softc *sc)
3312 {
3313 struct mcx_cmdq_entry *cqe;
3314 struct mcx_cmd_enable_hca_in *in;
3315 struct mcx_cmd_enable_hca_out *out;
3316 int error;
3317 uint8_t status;
3318
3319 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3320 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3321
3322 in = mcx_cmdq_in(cqe);
3323 in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
3324 in->cmd_op_mod = htobe16(0);
3325 in->cmd_function_id = htobe16(0);
3326
3327 mcx_cmdq_post(sc, cqe, 0);
3328
3329 error = mcx_cmdq_poll(sc, cqe, 1000);
3330 if (error != 0) {
3331 printf(", hca enable timeout\n");
3332 return (-1);
3333 }
3334 if (mcx_cmdq_verify(cqe) != 0) {
3335 printf(", hca enable command corrupt\n");
3336 return (-1);
3337 }
3338
3339 status = cqe->cq_output_data[0];
3340 if (status != MCX_CQ_STATUS_OK) {
3341 printf(", hca enable failed (%x)\n", status);
3342 return (-1);
3343 }
3344
3345 return (0);
3346 }
3347
3348 static int
3349 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
3350 {
3351 struct mcx_cmdq_entry *cqe;
3352 struct mcx_cmd_teardown_hca_in *in;
3353 struct mcx_cmd_teardown_hca_out *out;
3354 int error;
3355 uint8_t status;
3356
3357 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3358 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3359
3360 in = mcx_cmdq_in(cqe);
3361 in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
3362 in->cmd_op_mod = htobe16(0);
3363 in->cmd_profile = profile;
3364
3365 mcx_cmdq_post(sc, cqe, 0);
3366
3367 error = mcx_cmdq_poll(sc, cqe, 1000);
3368 if (error != 0) {
3369 printf(", hca teardown timeout\n");
3370 return (-1);
3371 }
3372 if (mcx_cmdq_verify(cqe) != 0) {
3373 printf(", hca teardown command corrupt\n");
3374 return (-1);
3375 }
3376
3377 status = cqe->cq_output_data[0];
3378 if (status != MCX_CQ_STATUS_OK) {
3379 printf(", hca teardown failed (%x)\n", status);
3380 return (-1);
3381 }
3382
3383 return (0);
3384 }
3385
3386 static int
3387 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
3388 unsigned int nmb, uint64_t *ptr, uint8_t token)
3389 {
3390 uint8_t *kva;
3391 uint64_t dva;
3392 int i;
3393 int error;
3394
3395 error = mcx_dmamem_alloc(sc, mxm,
3396 nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
3397 if (error != 0)
3398 return (error);
3399
3400 mcx_dmamem_zero(mxm);
3401
3402 dva = MCX_DMA_DVA(mxm);
3403 kva = MCX_DMA_KVA(mxm);
3404 for (i = 0; i < nmb; i++) {
3405 struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
3406
3407 /* patch the cqe or mbox pointing at this one */
3408 be64enc(ptr, dva);
3409
3410 /* fill in this mbox */
3411 be32enc(&mbox->mb_block_number, i);
3412 mbox->mb_token = token;
3413
3414 /* move to the next one */
3415 ptr = &mbox->mb_next_ptr;
3416
3417 dva += MCX_CMDQ_MAILBOX_SIZE;
3418 kva += MCX_CMDQ_MAILBOX_SIZE;
3419 }
3420
3421 return (0);
3422 }
3423
3424 static uint32_t
3425 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
3426 {
3427 uint32_t xor = 0xff;
3428
3429 /* only 3 fields get set, so mix them directly */
3430 xor = mcx_mix_u64(xor, mb->mb_next_ptr);
3431 xor = mcx_mix_u32(xor, mb->mb_block_number);
3432 xor = mcx_mix_u8(xor, mb->mb_token);
3433
3434 return (mcx_mix_done(xor));
3435 }
3436
3437 static void
3438 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
3439 {
3440 uint8_t *kva;
3441 int i;
3442
3443 kva = MCX_DMA_KVA(mxm);
3444
3445 for (i = 0; i < nmb; i++) {
3446 struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
3447 uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
3448 mb->mb_ctrl_signature = sig;
3449 mb->mb_signature = sig ^
3450 mcx_xor(mb->mb_data, sizeof(mb->mb_data));
3451
3452 kva += MCX_CMDQ_MAILBOX_SIZE;
3453 }
3454 }
3455
3456 static void
3457 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
3458 {
3459 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
3460 0, MCX_DMA_LEN(mxm), ops);
3461 }
3462
3463 static struct mcx_cmdq_mailbox *
3464 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
3465 {
3466 uint8_t *kva;
3467
3468 kva = MCX_DMA_KVA(mxm);
3469 kva += i * MCX_CMDQ_MAILBOX_SIZE;
3470
3471 return ((struct mcx_cmdq_mailbox *)kva);
3472 }
3473
3474 static inline void *
3475 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
3476 {
3477 return (&mb->mb_data);
3478 }
3479
3480 static void
3481 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
3482 void *b, size_t len)
3483 {
3484 uint8_t *buf = b;
3485 struct mcx_cmdq_mailbox *mb;
3486 int i;
3487
3488 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3489 for (i = 0; i < nmb; i++) {
3490
3491 memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len));
3492
3493 if (sizeof(mb->mb_data) >= len)
3494 break;
3495
3496 buf += sizeof(mb->mb_data);
3497 len -= sizeof(mb->mb_data);
3498 mb++;
3499 }
3500 }
3501
3502 static void
3503 mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages,
3504 struct mcx_dmamem *buf)
3505 {
3506 uint64_t *pas;
3507 int mbox, mbox_pages, i;
3508
3509 mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE;
3510 offset %= MCX_CMDQ_MAILBOX_DATASIZE;
3511
3512 pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3513 pas += (offset / sizeof(*pas));
3514 mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE - offset) / sizeof(*pas);
3515 for (i = 0; i < npages; i++) {
3516 if (i == mbox_pages) {
3517 mbox++;
3518 pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3519 mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas);
3520 }
3521 *pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE));
3522 pas++;
3523 }
3524 }
3525
3526 static void
3527 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
3528 {
3529 uint8_t *buf = b;
3530 struct mcx_cmdq_mailbox *mb;
3531 int i;
3532
3533 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3534 for (i = 0; i < nmb; i++) {
3535 memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len));
3536
3537 if (sizeof(mb->mb_data) >= len)
3538 break;
3539
3540 buf += sizeof(mb->mb_data);
3541 len -= sizeof(mb->mb_data);
3542 mb++;
3543 }
3544 }
3545
3546 static void
3547 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
3548 {
3549 mcx_dmamem_free(sc, mxm);
3550 }
3551
3552 #if 0
3553 static void
3554 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
3555 {
3556 unsigned int i;
3557
3558 printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
3559 be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr));
3560
3561 printf(", idata ");
3562 for (i = 0; i < sizeof(cqe->cq_input_data); i++)
3563 printf("%02x", cqe->cq_input_data[i]);
3564
3565 printf(", odata ");
3566 for (i = 0; i < sizeof(cqe->cq_output_data); i++)
3567 printf("%02x", cqe->cq_output_data[i]);
3568
3569 printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
3570 be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length),
3571 cqe->cq_token, cqe->cq_signature, cqe->cq_status);
3572 }
3573
3574 static void
3575 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
3576 {
3577 int i, j;
3578 uint8_t *d;
3579
3580 for (i = 0; i < num; i++) {
3581 struct mcx_cmdq_mailbox *mbox;
3582 mbox = mcx_cq_mbox(mboxes, i);
3583
3584 d = mcx_cq_mbox_data(mbox);
3585 for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
3586 if (j != 0 && (j % 16 == 0))
3587 printf("\n");
3588 printf("%.2x ", d[j]);
3589 }
3590 }
3591 }
3592 #endif
3593
3594 static int
3595 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
3596 int len)
3597 {
3598 struct mcx_dmamem mxm;
3599 struct mcx_cmdq_entry *cqe;
3600 struct mcx_cmd_access_reg_in *in;
3601 struct mcx_cmd_access_reg_out *out;
3602 uint8_t token = mcx_cmdq_token(sc);
3603 int error, nmb;
3604
3605 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3606 mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
3607 token);
3608
3609 in = mcx_cmdq_in(cqe);
3610 in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
3611 in->cmd_op_mod = htobe16(op);
3612 in->cmd_register_id = htobe16(reg);
3613
3614 nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
3615 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3616 &cqe->cq_output_ptr, token) != 0) {
3617 printf(", unable to allocate access reg mailboxen\n");
3618 return (-1);
3619 }
3620 cqe->cq_input_ptr = cqe->cq_output_ptr;
3621 mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
3622 mcx_cmdq_mboxes_sign(&mxm, nmb);
3623 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3624
3625 mcx_cmdq_post(sc, cqe, 0);
3626 error = mcx_cmdq_poll(sc, cqe, 1000);
3627 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3628
3629 if (error != 0) {
3630 printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
3631 (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
3632 goto free;
3633 }
3634 error = mcx_cmdq_verify(cqe);
3635 if (error != 0) {
3636 printf("%s: access reg (%s %x) reply corrupt\n",
3637 (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
3638 reg);
3639 goto free;
3640 }
3641
3642 out = mcx_cmdq_out(cqe);
3643 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3644 printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
3645 DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
3646 reg, out->cmd_status, be32toh(out->cmd_syndrome));
3647 error = -1;
3648 goto free;
3649 }
3650
3651 mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
3652 free:
3653 mcx_dmamem_free(sc, &mxm);
3654
3655 return (error);
3656 }
3657
3658 static int
3659 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3660 unsigned int slot)
3661 {
3662 struct mcx_cmd_set_issi_in *in;
3663 struct mcx_cmd_set_issi_out *out;
3664 uint8_t status;
3665
3666 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3667
3668 in = mcx_cmdq_in(cqe);
3669 in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3670 in->cmd_op_mod = htobe16(0);
3671 in->cmd_current_issi = htobe16(MCX_ISSI);
3672
3673 mcx_cmdq_post(sc, cqe, slot);
3674 if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3675 return (-1);
3676 if (mcx_cmdq_verify(cqe) != 0)
3677 return (-1);
3678
3679 status = cqe->cq_output_data[0];
3680 if (status != MCX_CQ_STATUS_OK)
3681 return (-1);
3682
3683 return (0);
3684 }
3685
3686 static int
3687 mcx_issi(struct mcx_softc *sc)
3688 {
3689 struct mcx_dmamem mxm;
3690 struct mcx_cmdq_entry *cqe;
3691 struct mcx_cmd_query_issi_in *in;
3692 struct mcx_cmd_query_issi_il_out *out;
3693 struct mcx_cmd_query_issi_mb_out *mb;
3694 uint8_t token = mcx_cmdq_token(sc);
3695 uint8_t status;
3696 int error;
3697
3698 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3699 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3700
3701 in = mcx_cmdq_in(cqe);
3702 in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3703 in->cmd_op_mod = htobe16(0);
3704
3705 CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3706 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3707 &cqe->cq_output_ptr, token) != 0) {
3708 printf(", unable to allocate query issi mailbox\n");
3709 return (-1);
3710 }
3711 mcx_cmdq_mboxes_sign(&mxm, 1);
3712
3713 mcx_cmdq_post(sc, cqe, 0);
3714 error = mcx_cmdq_poll(sc, cqe, 1000);
3715 if (error != 0) {
3716 printf(", query issi timeout\n");
3717 goto free;
3718 }
3719 error = mcx_cmdq_verify(cqe);
3720 if (error != 0) {
3721 printf(", query issi reply corrupt\n");
3722 goto free;
3723 }
3724
3725 status = cqe->cq_output_data[0];
3726 switch (status) {
3727 case MCX_CQ_STATUS_OK:
3728 break;
3729 case MCX_CQ_STATUS_BAD_OPCODE:
3730 /* use ISSI 0 */
3731 goto free;
3732 default:
3733 printf(", query issi failed (%x)\n", status);
3734 error = -1;
3735 goto free;
3736 }
3737
3738 out = mcx_cmdq_out(cqe);
3739 if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3740 /* use ISSI 1 */
3741 goto free;
3742 }
3743
3744 /* don't need to read cqe anymore, can be used for SET ISSI */
3745
3746 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3747 CTASSERT(MCX_ISSI < NBBY);
3748 /* XXX math is hard */
3749 if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3750 /* use ISSI 0 */
3751 goto free;
3752 }
3753
3754 if (mcx_set_issi(sc, cqe, 0) != 0) {
3755 /* ignore the error, just use ISSI 0 */
3756 } else {
3757 /* use ISSI 1 */
3758 }
3759
3760 free:
3761 mcx_cq_mboxes_free(sc, &mxm);
3762 return (error);
3763 }
3764
3765 static int
3766 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3767 int32_t *npages, uint16_t *func_id)
3768 {
3769 struct mcx_cmdq_entry *cqe;
3770 struct mcx_cmd_query_pages_in *in;
3771 struct mcx_cmd_query_pages_out *out;
3772
3773 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3774 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3775
3776 in = mcx_cmdq_in(cqe);
3777 in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3778 in->cmd_op_mod = type;
3779
3780 mcx_cmdq_post(sc, cqe, 0);
3781 if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3782 printf(", query pages timeout\n");
3783 return (-1);
3784 }
3785 if (mcx_cmdq_verify(cqe) != 0) {
3786 printf(", query pages reply corrupt\n");
3787 return (-1);
3788 }
3789
3790 out = mcx_cmdq_out(cqe);
3791 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3792 printf(", query pages failed (%x)\n", out->cmd_status);
3793 return (-1);
3794 }
3795
3796 *func_id = out->cmd_func_id;
3797 *npages = be32dec(&out->cmd_num_pages);
3798
3799 return (0);
3800 }
3801
3802 struct bus_dma_iter {
3803 bus_dmamap_t i_map;
3804 bus_size_t i_offset;
3805 unsigned int i_index;
3806 };
3807
3808 static void
3809 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3810 {
3811 i->i_map = map;
3812 i->i_offset = 0;
3813 i->i_index = 0;
3814 }
3815
3816 static bus_addr_t
3817 bus_dma_iter_addr(struct bus_dma_iter *i)
3818 {
3819 return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3820 }
3821
3822 static void
3823 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3824 {
3825 bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3826 bus_size_t diff;
3827
3828 do {
3829 diff = seg->ds_len - i->i_offset;
3830 if (size < diff)
3831 break;
3832
3833 size -= diff;
3834
3835 seg++;
3836
3837 i->i_offset = 0;
3838 i->i_index++;
3839 } while (size > 0);
3840
3841 i->i_offset += size;
3842 }
3843
3844 static int
3845 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3846 {
3847 struct mcx_dmamem mxm;
3848 struct mcx_cmdq_entry *cqe;
3849 struct mcx_cmd_manage_pages_in *in;
3850 struct mcx_cmd_manage_pages_out *out;
3851 unsigned int paslen, nmb, i, j, npages;
3852 struct bus_dma_iter iter;
3853 uint64_t *pas;
3854 uint8_t status;
3855 uint8_t token = mcx_cmdq_token(sc);
3856 int error;
3857
3858 npages = mhm->mhm_npages;
3859
3860 paslen = sizeof(*pas) * npages;
3861 nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3862
3863 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3864 mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3865
3866 in = mcx_cmdq_in(cqe);
3867 in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3868 in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3869 in->cmd_func_id = func_id;
3870 be32enc(&in->cmd_input_num_entries, npages);
3871
3872 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3873 &cqe->cq_input_ptr, token) != 0) {
3874 printf(", unable to allocate manage pages mailboxen\n");
3875 return (-1);
3876 }
3877
3878 bus_dma_iter_init(&iter, mhm->mhm_map);
3879 for (i = 0; i < nmb; i++) {
3880 unsigned int lim;
3881
3882 pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3883 lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3884
3885 for (j = 0; j < lim; j++) {
3886 be64enc(&pas[j], bus_dma_iter_addr(&iter));
3887 bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3888 }
3889
3890 npages -= lim;
3891 }
3892
3893 mcx_cmdq_mboxes_sign(&mxm, nmb);
3894
3895 mcx_cmdq_post(sc, cqe, 0);
3896 error = mcx_cmdq_poll(sc, cqe, 1000);
3897 if (error != 0) {
3898 printf(", manage pages timeout\n");
3899 goto free;
3900 }
3901 error = mcx_cmdq_verify(cqe);
3902 if (error != 0) {
3903 printf(", manage pages reply corrupt\n");
3904 goto free;
3905 }
3906
3907 status = cqe->cq_output_data[0];
3908 if (status != MCX_CQ_STATUS_OK) {
3909 printf(", manage pages failed (%x)\n", status);
3910 error = -1;
3911 goto free;
3912 }
3913
3914 free:
3915 mcx_dmamem_free(sc, &mxm);
3916
3917 return (error);
3918 }
3919
3920 static int
3921 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3922 {
3923 int32_t npages;
3924 uint16_t func_id;
3925
3926 if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3927 /* error printed by mcx_query_pages */
3928 return (-1);
3929 }
3930
3931 if (npages < 1)
3932 return (0);
3933
3934 if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3935 printf(", unable to allocate hwmem\n");
3936 return (-1);
3937 }
3938
3939 if (mcx_add_pages(sc, mhm, func_id) != 0) {
3940 printf(", unable to add hwmem\n");
3941 goto free;
3942 }
3943
3944 return (0);
3945
3946 free:
3947 mcx_hwmem_free(sc, mhm);
3948
3949 return (-1);
3950 }
3951
3952 static int
3953 mcx_hca_max_caps(struct mcx_softc *sc)
3954 {
3955 struct mcx_dmamem mxm;
3956 struct mcx_cmdq_entry *cqe;
3957 struct mcx_cmd_query_hca_cap_in *in;
3958 struct mcx_cmd_query_hca_cap_out *out;
3959 struct mcx_cmdq_mailbox *mb;
3960 struct mcx_cap_device *hca;
3961 uint8_t status;
3962 uint8_t token = mcx_cmdq_token(sc);
3963 int error;
3964
3965 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3966 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3967 token);
3968
3969 in = mcx_cmdq_in(cqe);
3970 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3971 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3972 MCX_CMD_QUERY_HCA_CAP_DEVICE);
3973
3974 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3975 &cqe->cq_output_ptr, token) != 0) {
3976 printf(", unable to allocate query hca caps mailboxen\n");
3977 return (-1);
3978 }
3979 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3980 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3981
3982 mcx_cmdq_post(sc, cqe, 0);
3983 error = mcx_cmdq_poll(sc, cqe, 1000);
3984 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3985
3986 if (error != 0) {
3987 printf(", query hca caps timeout\n");
3988 goto free;
3989 }
3990 error = mcx_cmdq_verify(cqe);
3991 if (error != 0) {
3992 printf(", query hca caps reply corrupt\n");
3993 goto free;
3994 }
3995
3996 status = cqe->cq_output_data[0];
3997 if (status != MCX_CQ_STATUS_OK) {
3998 printf(", query hca caps failed (%x)\n", status);
3999 error = -1;
4000 goto free;
4001 }
4002
4003 mb = mcx_cq_mbox(&mxm, 0);
4004 hca = mcx_cq_mbox_data(mb);
4005
4006 if ((hca->port_type & MCX_CAP_DEVICE_PORT_TYPE)
4007 != MCX_CAP_DEVICE_PORT_TYPE_ETH) {
4008 printf(", not in ethernet mode\n");
4009 error = -1;
4010 goto free;
4011 }
4012 if (hca->log_pg_sz > PAGE_SHIFT) {
4013 printf(", minimum system page shift %u is too large\n",
4014 hca->log_pg_sz);
4015 error = -1;
4016 goto free;
4017 }
4018 /*
4019 * blueflame register is split into two buffers, and we must alternate
4020 * between the two of them.
4021 */
4022 sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
4023 sc->sc_max_rqt_size = (1 << hca->log_max_rqt_size);
4024
4025 if (hca->local_ca_ack_delay & MCX_CAP_DEVICE_MCAM_REG)
4026 sc->sc_mcam_reg = 1;
4027
4028 sc->sc_mhz = be32dec(&hca->device_frequency_mhz);
4029 sc->sc_khz = be32dec(&hca->device_frequency_khz);
4030
4031 free:
4032 mcx_dmamem_free(sc, &mxm);
4033
4034 return (error);
4035 }
4036
4037 static int
4038 mcx_hca_set_caps(struct mcx_softc *sc)
4039 {
4040 struct mcx_dmamem mxm;
4041 struct mcx_cmdq_entry *cqe;
4042 struct mcx_cmd_query_hca_cap_in *in;
4043 struct mcx_cmd_query_hca_cap_out *out;
4044 struct mcx_cmdq_mailbox *mb;
4045 struct mcx_cap_device *hca;
4046 uint8_t status;
4047 uint8_t token = mcx_cmdq_token(sc);
4048 int error;
4049
4050 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4051 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
4052 token);
4053
4054 in = mcx_cmdq_in(cqe);
4055 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
4056 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
4057 MCX_CMD_QUERY_HCA_CAP_DEVICE);
4058
4059 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
4060 &cqe->cq_output_ptr, token) != 0) {
4061 printf(", unable to allocate manage pages mailboxen\n");
4062 return (-1);
4063 }
4064 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
4065 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
4066
4067 mcx_cmdq_post(sc, cqe, 0);
4068 error = mcx_cmdq_poll(sc, cqe, 1000);
4069 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
4070
4071 if (error != 0) {
4072 printf(", query hca caps timeout\n");
4073 goto free;
4074 }
4075 error = mcx_cmdq_verify(cqe);
4076 if (error != 0) {
4077 printf(", query hca caps reply corrupt\n");
4078 goto free;
4079 }
4080
4081 status = cqe->cq_output_data[0];
4082 if (status != MCX_CQ_STATUS_OK) {
4083 printf(", query hca caps failed (%x)\n", status);
4084 error = -1;
4085 goto free;
4086 }
4087
4088 mb = mcx_cq_mbox(&mxm, 0);
4089 hca = mcx_cq_mbox_data(mb);
4090
4091 hca->log_pg_sz = PAGE_SHIFT;
4092
4093 free:
4094 mcx_dmamem_free(sc, &mxm);
4095
4096 return (error);
4097 }
4098
4099
4100 static int
4101 mcx_init_hca(struct mcx_softc *sc)
4102 {
4103 struct mcx_cmdq_entry *cqe;
4104 struct mcx_cmd_init_hca_in *in;
4105 struct mcx_cmd_init_hca_out *out;
4106 int error;
4107 uint8_t status;
4108
4109 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4110 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4111
4112 in = mcx_cmdq_in(cqe);
4113 in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
4114 in->cmd_op_mod = htobe16(0);
4115
4116 mcx_cmdq_post(sc, cqe, 0);
4117
4118 error = mcx_cmdq_poll(sc, cqe, 1000);
4119 if (error != 0) {
4120 printf(", hca init timeout\n");
4121 return (-1);
4122 }
4123 if (mcx_cmdq_verify(cqe) != 0) {
4124 printf(", hca init command corrupt\n");
4125 return (-1);
4126 }
4127
4128 status = cqe->cq_output_data[0];
4129 if (status != MCX_CQ_STATUS_OK) {
4130 printf(", hca init failed (%x)\n", status);
4131 return (-1);
4132 }
4133
4134 return (0);
4135 }
4136
4137 static int
4138 mcx_set_driver_version(struct mcx_softc *sc)
4139 {
4140 struct mcx_dmamem mxm;
4141 struct mcx_cmdq_entry *cqe;
4142 struct mcx_cmd_set_driver_version_in *in;
4143 struct mcx_cmd_set_driver_version_out *out;
4144 int error;
4145 int token;
4146 uint8_t status;
4147
4148 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4149 token = mcx_cmdq_token(sc);
4150 mcx_cmdq_init(sc, cqe, sizeof(*in) +
4151 sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
4152
4153 in = mcx_cmdq_in(cqe);
4154 in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
4155 in->cmd_op_mod = htobe16(0);
4156
4157 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4158 &cqe->cq_input_ptr, token) != 0) {
4159 printf(", unable to allocate set driver version mailboxen\n");
4160 return (-1);
4161 }
4162 strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
4163 "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
4164
4165 mcx_cmdq_mboxes_sign(&mxm, 1);
4166 mcx_cmdq_post(sc, cqe, 0);
4167
4168 error = mcx_cmdq_poll(sc, cqe, 1000);
4169 if (error != 0) {
4170 printf(", set driver version timeout\n");
4171 goto free;
4172 }
4173 if (mcx_cmdq_verify(cqe) != 0) {
4174 printf(", set driver version command corrupt\n");
4175 goto free;
4176 }
4177
4178 status = cqe->cq_output_data[0];
4179 if (status != MCX_CQ_STATUS_OK) {
4180 printf(", set driver version failed (%x)\n", status);
4181 error = -1;
4182 goto free;
4183 }
4184
4185 free:
4186 mcx_dmamem_free(sc, &mxm);
4187
4188 return (error);
4189 }
4190
4191 static int
4192 mcx_iff(struct mcx_softc *sc)
4193 {
4194 struct ifnet *ifp = &sc->sc_ec.ec_if;
4195 struct mcx_dmamem mxm;
4196 struct mcx_cmdq_entry *cqe;
4197 struct mcx_cmd_modify_nic_vport_context_in *in;
4198 struct mcx_cmd_modify_nic_vport_context_out *out;
4199 struct mcx_nic_vport_ctx *ctx;
4200 int error;
4201 int token;
4202 int insize;
4203 uint32_t dest;
4204
4205 dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
4206 sc->sc_rss_flow_table_id;
4207
4208 /* enable or disable the promisc flow */
4209 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
4210 if (sc->sc_promisc_flow_enabled == 0) {
4211 mcx_set_flow_table_entry_mac(sc,
4212 MCX_FLOW_GROUP_PROMISC, 0, NULL, dest);
4213 sc->sc_promisc_flow_enabled = 1;
4214 }
4215 } else if (sc->sc_promisc_flow_enabled != 0) {
4216 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
4217 sc->sc_promisc_flow_enabled = 0;
4218 }
4219
4220 /* enable or disable the all-multicast flow */
4221 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
4222 if (sc->sc_allmulti_flow_enabled == 0) {
4223 uint8_t mcast[ETHER_ADDR_LEN];
4224
4225 memset(mcast, 0, sizeof(mcast));
4226 mcast[0] = 0x01;
4227 mcx_set_flow_table_entry_mac(sc,
4228 MCX_FLOW_GROUP_ALLMULTI, 0, mcast, dest);
4229 sc->sc_allmulti_flow_enabled = 1;
4230 }
4231 } else if (sc->sc_allmulti_flow_enabled != 0) {
4232 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
4233 sc->sc_allmulti_flow_enabled = 0;
4234 }
4235
4236 insize = sizeof(struct mcx_nic_vport_ctx) + 240;
4237
4238 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4239 token = mcx_cmdq_token(sc);
4240 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4241
4242 in = mcx_cmdq_in(cqe);
4243 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
4244 in->cmd_op_mod = htobe16(0);
4245 in->cmd_field_select = htobe32(
4246 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
4247 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
4248
4249 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4250 printf(", unable to allocate modify "
4251 "nic vport context mailboxen\n");
4252 return (-1);
4253 }
4254 ctx = (struct mcx_nic_vport_ctx *)
4255 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
4256 ctx->vp_mtu = htobe32(sc->sc_hardmtu);
4257 /*
4258 * always leave promisc-all enabled on the vport since we
4259 * can't give it a vlan list, and we're already doing multicast
4260 * filtering in the flow table.
4261 */
4262 ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
4263
4264 mcx_cmdq_mboxes_sign(&mxm, 1);
4265 mcx_cmdq_post(sc, cqe, 0);
4266
4267 error = mcx_cmdq_poll(sc, cqe, 1000);
4268 if (error != 0) {
4269 printf(", modify nic vport context timeout\n");
4270 goto free;
4271 }
4272 if (mcx_cmdq_verify(cqe) != 0) {
4273 printf(", modify nic vport context command corrupt\n");
4274 goto free;
4275 }
4276
4277 out = mcx_cmdq_out(cqe);
4278 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4279 printf(", modify nic vport context failed (%x, %x)\n",
4280 out->cmd_status, be32toh(out->cmd_syndrome));
4281 error = -1;
4282 goto free;
4283 }
4284
4285 free:
4286 mcx_dmamem_free(sc, &mxm);
4287
4288 return (error);
4289 }
4290
4291 static int
4292 mcx_alloc_uar(struct mcx_softc *sc, int *uar)
4293 {
4294 struct mcx_cmdq_entry *cqe;
4295 struct mcx_cmd_alloc_uar_in *in;
4296 struct mcx_cmd_alloc_uar_out *out;
4297 int error;
4298
4299 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4300 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4301
4302 in = mcx_cmdq_in(cqe);
4303 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
4304 in->cmd_op_mod = htobe16(0);
4305
4306 mcx_cmdq_post(sc, cqe, 0);
4307
4308 error = mcx_cmdq_poll(sc, cqe, 1000);
4309 if (error != 0) {
4310 printf(", alloc uar timeout\n");
4311 return (-1);
4312 }
4313 if (mcx_cmdq_verify(cqe) != 0) {
4314 printf(", alloc uar command corrupt\n");
4315 return (-1);
4316 }
4317
4318 out = mcx_cmdq_out(cqe);
4319 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4320 printf(", alloc uar failed (%x)\n", out->cmd_status);
4321 return (-1);
4322 }
4323
4324 *uar = mcx_get_id(out->cmd_uar);
4325 return (0);
4326 }
4327
4328 static int
4329 mcx_create_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar,
4330 uint64_t events, int vector)
4331 {
4332 struct mcx_cmdq_entry *cqe;
4333 struct mcx_dmamem mxm;
4334 struct mcx_cmd_create_eq_in *in;
4335 struct mcx_cmd_create_eq_mb_in *mbin;
4336 struct mcx_cmd_create_eq_out *out;
4337 struct mcx_eq_entry *eqe;
4338 int error;
4339 uint64_t *pas;
4340 int insize, npages, paslen, i, token;
4341
4342 eq->eq_cons = 0;
4343
4344 npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
4345 MCX_PAGE_SIZE);
4346 paslen = npages * sizeof(*pas);
4347 insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
4348
4349 if (mcx_dmamem_alloc(sc, &eq->eq_mem, npages * MCX_PAGE_SIZE,
4350 MCX_PAGE_SIZE) != 0) {
4351 printf(", unable to allocate event queue memory\n");
4352 return (-1);
4353 }
4354
4355 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
4356 for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
4357 eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
4358 }
4359
4360 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4361 token = mcx_cmdq_token(sc);
4362 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4363
4364 in = mcx_cmdq_in(cqe);
4365 in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
4366 in->cmd_op_mod = htobe16(0);
4367
4368 if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4369 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4370 &cqe->cq_input_ptr, token) != 0) {
4371 printf(", unable to allocate create eq mailboxen\n");
4372 goto free_eq;
4373 }
4374 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4375 mbin->cmd_eq_ctx.eq_uar_size = htobe32(
4376 (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | uar);
4377 mbin->cmd_eq_ctx.eq_intr = vector;
4378 mbin->cmd_event_bitmask = htobe64(events);
4379
4380 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4381 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
4382
4383 /* physical addresses follow the mailbox in data */
4384 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &eq->eq_mem);
4385 mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
4386 mcx_cmdq_post(sc, cqe, 0);
4387
4388 error = mcx_cmdq_poll(sc, cqe, 1000);
4389 if (error != 0) {
4390 printf(", create eq timeout\n");
4391 goto free_mxm;
4392 }
4393 if (mcx_cmdq_verify(cqe) != 0) {
4394 printf(", create eq command corrupt\n");
4395 goto free_mxm;
4396 }
4397
4398 out = mcx_cmdq_out(cqe);
4399 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4400 printf(", create eq failed (%x, %x)\n", out->cmd_status,
4401 be32toh(out->cmd_syndrome));
4402 goto free_mxm;
4403 }
4404
4405 eq->eq_n = mcx_get_id(out->cmd_eqn);
4406
4407 mcx_dmamem_free(sc, &mxm);
4408
4409 mcx_arm_eq(sc, eq, uar);
4410
4411 return (0);
4412
4413 free_mxm:
4414 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4415 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
4416 mcx_dmamem_free(sc, &mxm);
4417 free_eq:
4418 mcx_dmamem_free(sc, &eq->eq_mem);
4419 return (-1);
4420 }
4421
4422 static int
4423 mcx_alloc_pd(struct mcx_softc *sc)
4424 {
4425 struct mcx_cmdq_entry *cqe;
4426 struct mcx_cmd_alloc_pd_in *in;
4427 struct mcx_cmd_alloc_pd_out *out;
4428 int error;
4429
4430 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4431 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4432
4433 in = mcx_cmdq_in(cqe);
4434 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
4435 in->cmd_op_mod = htobe16(0);
4436
4437 mcx_cmdq_post(sc, cqe, 0);
4438
4439 error = mcx_cmdq_poll(sc, cqe, 1000);
4440 if (error != 0) {
4441 printf(", alloc pd timeout\n");
4442 return (-1);
4443 }
4444 if (mcx_cmdq_verify(cqe) != 0) {
4445 printf(", alloc pd command corrupt\n");
4446 return (-1);
4447 }
4448
4449 out = mcx_cmdq_out(cqe);
4450 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4451 printf(", alloc pd failed (%x)\n", out->cmd_status);
4452 return (-1);
4453 }
4454
4455 sc->sc_pd = mcx_get_id(out->cmd_pd);
4456 return (0);
4457 }
4458
4459 static int
4460 mcx_alloc_tdomain(struct mcx_softc *sc)
4461 {
4462 struct mcx_cmdq_entry *cqe;
4463 struct mcx_cmd_alloc_td_in *in;
4464 struct mcx_cmd_alloc_td_out *out;
4465 int error;
4466
4467 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4468 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4469
4470 in = mcx_cmdq_in(cqe);
4471 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
4472 in->cmd_op_mod = htobe16(0);
4473
4474 mcx_cmdq_post(sc, cqe, 0);
4475
4476 error = mcx_cmdq_poll(sc, cqe, 1000);
4477 if (error != 0) {
4478 printf(", alloc transport domain timeout\n");
4479 return (-1);
4480 }
4481 if (mcx_cmdq_verify(cqe) != 0) {
4482 printf(", alloc transport domain command corrupt\n");
4483 return (-1);
4484 }
4485
4486 out = mcx_cmdq_out(cqe);
4487 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4488 printf(", alloc transport domain failed (%x)\n",
4489 out->cmd_status);
4490 return (-1);
4491 }
4492
4493 sc->sc_tdomain = mcx_get_id(out->cmd_tdomain);
4494 return (0);
4495 }
4496
4497 static int
4498 mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr)
4499 {
4500 struct mcx_dmamem mxm;
4501 struct mcx_cmdq_entry *cqe;
4502 struct mcx_cmd_query_nic_vport_context_in *in;
4503 struct mcx_cmd_query_nic_vport_context_out *out;
4504 struct mcx_nic_vport_ctx *ctx;
4505 uint8_t *addr;
4506 int error, token, i;
4507
4508 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4509 token = mcx_cmdq_token(sc);
4510 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
4511
4512 in = mcx_cmdq_in(cqe);
4513 in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
4514 in->cmd_op_mod = htobe16(0);
4515 in->cmd_allowed_list_type = 0;
4516
4517 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4518 &cqe->cq_output_ptr, token) != 0) {
4519 printf(", unable to allocate "
4520 "query nic vport context mailboxen\n");
4521 return (-1);
4522 }
4523 mcx_cmdq_mboxes_sign(&mxm, 1);
4524 mcx_cmdq_post(sc, cqe, 0);
4525
4526 error = mcx_cmdq_poll(sc, cqe, 1000);
4527 if (error != 0) {
4528 printf(", query nic vport context timeout\n");
4529 goto free;
4530 }
4531 if (mcx_cmdq_verify(cqe) != 0) {
4532 printf(", query nic vport context command corrupt\n");
4533 goto free;
4534 }
4535
4536 out = mcx_cmdq_out(cqe);
4537 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4538 printf(", query nic vport context failed (%x, %x)\n",
4539 out->cmd_status, be32toh(out->cmd_syndrome));
4540 error = -1;
4541 goto free;
4542 }
4543
4544 ctx = (struct mcx_nic_vport_ctx *)
4545 mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4546 addr = (uint8_t *)&ctx->vp_perm_addr;
4547 for (i = 0; i < ETHER_ADDR_LEN; i++) {
4548 enaddr[i] = addr[i + 2];
4549 }
4550 free:
4551 mcx_dmamem_free(sc, &mxm);
4552
4553 return (error);
4554 }
4555
4556 static int
4557 mcx_query_special_contexts(struct mcx_softc *sc)
4558 {
4559 struct mcx_cmdq_entry *cqe;
4560 struct mcx_cmd_query_special_ctx_in *in;
4561 struct mcx_cmd_query_special_ctx_out *out;
4562 int error;
4563
4564 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4565 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4566
4567 in = mcx_cmdq_in(cqe);
4568 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
4569 in->cmd_op_mod = htobe16(0);
4570
4571 mcx_cmdq_post(sc, cqe, 0);
4572
4573 error = mcx_cmdq_poll(sc, cqe, 1000);
4574 if (error != 0) {
4575 printf(", query special contexts timeout\n");
4576 return (-1);
4577 }
4578 if (mcx_cmdq_verify(cqe) != 0) {
4579 printf(", query special contexts command corrupt\n");
4580 return (-1);
4581 }
4582
4583 out = mcx_cmdq_out(cqe);
4584 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4585 printf(", query special contexts failed (%x)\n",
4586 out->cmd_status);
4587 return (-1);
4588 }
4589
4590 sc->sc_lkey = be32toh(out->cmd_resd_lkey);
4591 return (0);
4592 }
4593
4594 static int
4595 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
4596 {
4597 struct mcx_reg_pmtu pmtu;
4598 int error;
4599
4600 /* read max mtu */
4601 memset(&pmtu, 0, sizeof(pmtu));
4602 pmtu.rp_local_port = 1;
4603 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
4604 sizeof(pmtu));
4605 if (error != 0) {
4606 printf(", unable to get port MTU\n");
4607 return error;
4608 }
4609
4610 mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu));
4611 pmtu.rp_admin_mtu = htobe16(mtu);
4612 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
4613 sizeof(pmtu));
4614 if (error != 0) {
4615 printf(", unable to set port MTU\n");
4616 return error;
4617 }
4618
4619 sc->sc_hardmtu = mtu;
4620 sc->sc_rxbufsz = roundup(mtu + ETHER_ALIGN, sizeof(long));
4621 return 0;
4622 }
4623
4624 static int
4625 mcx_create_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar, int db, int eqn)
4626 {
4627 struct mcx_cmdq_entry *cmde;
4628 struct mcx_cq_entry *cqe;
4629 struct mcx_dmamem mxm;
4630 struct mcx_cmd_create_cq_in *in;
4631 struct mcx_cmd_create_cq_mb_in *mbin;
4632 struct mcx_cmd_create_cq_out *out;
4633 int error;
4634 uint64_t *pas;
4635 int insize, npages, paslen, i, token;
4636
4637 cq->cq_doorbell = MCX_CQ_DOORBELL_BASE + (MCX_CQ_DOORBELL_STRIDE * db);
4638
4639 npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
4640 MCX_PAGE_SIZE);
4641 paslen = npages * sizeof(*pas);
4642 insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
4643
4644 if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
4645 MCX_PAGE_SIZE) != 0) {
4646 printf("%s: unable to allocate completion queue memory\n",
4647 DEVNAME(sc));
4648 return (-1);
4649 }
4650 cqe = MCX_DMA_KVA(&cq->cq_mem);
4651 for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
4652 cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
4653 }
4654
4655 cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4656 token = mcx_cmdq_token(sc);
4657 mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
4658
4659 in = mcx_cmdq_in(cmde);
4660 in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
4661 in->cmd_op_mod = htobe16(0);
4662
4663 if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4664 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4665 &cmde->cq_input_ptr, token) != 0) {
4666 printf("%s: unable to allocate create cq mailboxen\n",
4667 DEVNAME(sc));
4668 goto free_cq;
4669 }
4670 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4671 mbin->cmd_cq_ctx.cq_uar_size = htobe32(
4672 (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | uar);
4673 mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
4674 mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
4675 (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
4676 MCX_CQ_MOD_COUNTER);
4677 mbin->cmd_cq_ctx.cq_doorbell = htobe64(
4678 MCX_DMA_DVA(&sc->sc_doorbell_mem) + cq->cq_doorbell);
4679
4680 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4681 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
4682
4683 /* physical addresses follow the mailbox in data */
4684 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem);
4685 mcx_cmdq_post(sc, cmde, 0);
4686
4687 error = mcx_cmdq_poll(sc, cmde, 1000);
4688 if (error != 0) {
4689 printf("%s: create cq timeout\n", DEVNAME(sc));
4690 goto free_mxm;
4691 }
4692 if (mcx_cmdq_verify(cmde) != 0) {
4693 printf("%s: create cq command corrupt\n", DEVNAME(sc));
4694 goto free_mxm;
4695 }
4696
4697 out = mcx_cmdq_out(cmde);
4698 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4699 printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4700 out->cmd_status, be32toh(out->cmd_syndrome));
4701 goto free_mxm;
4702 }
4703
4704 cq->cq_n = mcx_get_id(out->cmd_cqn);
4705 cq->cq_cons = 0;
4706 cq->cq_count = 0;
4707
4708 mcx_dmamem_free(sc, &mxm);
4709
4710 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4711 cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4712 BUS_DMASYNC_PREWRITE);
4713
4714 mcx_arm_cq(sc, cq, uar);
4715
4716 return (0);
4717
4718 free_mxm:
4719 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4720 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4721 mcx_dmamem_free(sc, &mxm);
4722 free_cq:
4723 mcx_dmamem_free(sc, &cq->cq_mem);
4724 return (-1);
4725 }
4726
4727 static int
4728 mcx_destroy_cq(struct mcx_softc *sc, struct mcx_cq *cq)
4729 {
4730 struct mcx_cmdq_entry *cqe;
4731 struct mcx_cmd_destroy_cq_in *in;
4732 struct mcx_cmd_destroy_cq_out *out;
4733 int error;
4734 int token;
4735
4736 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4737 token = mcx_cmdq_token(sc);
4738 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4739
4740 in = mcx_cmdq_in(cqe);
4741 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4742 in->cmd_op_mod = htobe16(0);
4743 in->cmd_cqn = htobe32(cq->cq_n);
4744
4745 mcx_cmdq_post(sc, cqe, 0);
4746 error = mcx_cmdq_poll(sc, cqe, 1000);
4747 if (error != 0) {
4748 printf("%s: destroy cq timeout\n", DEVNAME(sc));
4749 return error;
4750 }
4751 if (mcx_cmdq_verify(cqe) != 0) {
4752 printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4753 return error;
4754 }
4755
4756 out = mcx_cmdq_out(cqe);
4757 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4758 printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4759 out->cmd_status, be32toh(out->cmd_syndrome));
4760 return -1;
4761 }
4762
4763 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4764 cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4765 BUS_DMASYNC_POSTWRITE);
4766
4767 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4768 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4769 mcx_dmamem_free(sc, &cq->cq_mem);
4770
4771 cq->cq_n = 0;
4772 cq->cq_cons = 0;
4773 cq->cq_count = 0;
4774 return 0;
4775 }
4776
4777 static int
4778 mcx_create_rq(struct mcx_softc *sc, struct mcx_rx *rx, int db, int cqn)
4779 {
4780 struct mcx_cmdq_entry *cqe;
4781 struct mcx_dmamem mxm;
4782 struct mcx_cmd_create_rq_in *in;
4783 struct mcx_cmd_create_rq_out *out;
4784 struct mcx_rq_ctx *mbin;
4785 int error;
4786 uint64_t *pas;
4787 uint32_t rq_flags;
4788 int insize, npages, paslen, token;
4789
4790 rx->rx_doorbell = MCX_WQ_DOORBELL_BASE +
4791 (db * MCX_WQ_DOORBELL_STRIDE);
4792
4793 npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4794 MCX_PAGE_SIZE);
4795 paslen = npages * sizeof(*pas);
4796 insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4797
4798 if (mcx_dmamem_alloc(sc, &rx->rx_rq_mem, npages * MCX_PAGE_SIZE,
4799 MCX_PAGE_SIZE) != 0) {
4800 printf("%s: unable to allocate receive queue memory\n",
4801 DEVNAME(sc));
4802 return (-1);
4803 }
4804
4805 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4806 token = mcx_cmdq_token(sc);
4807 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4808
4809 in = mcx_cmdq_in(cqe);
4810 in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4811 in->cmd_op_mod = htobe16(0);
4812
4813 if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4814 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4815 &cqe->cq_input_ptr, token) != 0) {
4816 printf("%s: unable to allocate create rq mailboxen\n",
4817 DEVNAME(sc));
4818 goto free_rq;
4819 }
4820 mbin = (struct mcx_rq_ctx *)
4821 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4822 rq_flags = MCX_RQ_CTX_RLKEY;
4823 mbin->rq_flags = htobe32(rq_flags);
4824 mbin->rq_cqn = htobe32(cqn);
4825 mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4826 mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4827 mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4828 rx->rx_doorbell);
4829 mbin->rq_wq.wq_log_stride = htobe16(4);
4830 mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4831
4832 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4833 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
4834
4835 /* physical addresses follow the mailbox in data */
4836 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &rx->rx_rq_mem);
4837 mcx_cmdq_post(sc, cqe, 0);
4838
4839 error = mcx_cmdq_poll(sc, cqe, 1000);
4840 if (error != 0) {
4841 printf("%s: create rq timeout\n", DEVNAME(sc));
4842 goto free_mxm;
4843 }
4844 if (mcx_cmdq_verify(cqe) != 0) {
4845 printf("%s: create rq command corrupt\n", DEVNAME(sc));
4846 goto free_mxm;
4847 }
4848
4849 out = mcx_cmdq_out(cqe);
4850 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4851 printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4852 out->cmd_status, be32toh(out->cmd_syndrome));
4853 goto free_mxm;
4854 }
4855
4856 rx->rx_rqn = mcx_get_id(out->cmd_rqn);
4857
4858 mcx_dmamem_free(sc, &mxm);
4859
4860 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4861 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
4862
4863 return (0);
4864
4865 free_mxm:
4866 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4867 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
4868 mcx_dmamem_free(sc, &mxm);
4869 free_rq:
4870 mcx_dmamem_free(sc, &rx->rx_rq_mem);
4871 return (-1);
4872 }
4873
4874 static int
4875 mcx_ready_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4876 {
4877 struct mcx_cmdq_entry *cqe;
4878 struct mcx_dmamem mxm;
4879 struct mcx_cmd_modify_rq_in *in;
4880 struct mcx_cmd_modify_rq_mb_in *mbin;
4881 struct mcx_cmd_modify_rq_out *out;
4882 int error;
4883 int token;
4884
4885 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4886 token = mcx_cmdq_token(sc);
4887 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
4888 sizeof(*out), token);
4889
4890 in = mcx_cmdq_in(cqe);
4891 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4892 in->cmd_op_mod = htobe16(0);
4893 in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | rx->rx_rqn);
4894
4895 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4896 &cqe->cq_input_ptr, token) != 0) {
4897 printf("%s: unable to allocate modify rq mailbox\n",
4898 DEVNAME(sc));
4899 return (-1);
4900 }
4901 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4902 mbin->cmd_rq_ctx.rq_flags = htobe32(
4903 MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4904
4905 mcx_cmdq_mboxes_sign(&mxm, 1);
4906 mcx_cmdq_post(sc, cqe, 0);
4907 error = mcx_cmdq_poll(sc, cqe, 1000);
4908 if (error != 0) {
4909 printf("%s: modify rq timeout\n", DEVNAME(sc));
4910 goto free;
4911 }
4912 if (mcx_cmdq_verify(cqe) != 0) {
4913 printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4914 goto free;
4915 }
4916
4917 out = mcx_cmdq_out(cqe);
4918 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4919 printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4920 out->cmd_status, be32toh(out->cmd_syndrome));
4921 error = -1;
4922 goto free;
4923 }
4924
4925 free:
4926 mcx_dmamem_free(sc, &mxm);
4927 return (error);
4928 }
4929
4930 static int
4931 mcx_destroy_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4932 {
4933 struct mcx_cmdq_entry *cqe;
4934 struct mcx_cmd_destroy_rq_in *in;
4935 struct mcx_cmd_destroy_rq_out *out;
4936 int error;
4937 int token;
4938
4939 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4940 token = mcx_cmdq_token(sc);
4941 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4942
4943 in = mcx_cmdq_in(cqe);
4944 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4945 in->cmd_op_mod = htobe16(0);
4946 in->cmd_rqn = htobe32(rx->rx_rqn);
4947
4948 mcx_cmdq_post(sc, cqe, 0);
4949 error = mcx_cmdq_poll(sc, cqe, 1000);
4950 if (error != 0) {
4951 printf("%s: destroy rq timeout\n", DEVNAME(sc));
4952 return error;
4953 }
4954 if (mcx_cmdq_verify(cqe) != 0) {
4955 printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4956 return error;
4957 }
4958
4959 out = mcx_cmdq_out(cqe);
4960 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4961 printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4962 out->cmd_status, be32toh(out->cmd_syndrome));
4963 return -1;
4964 }
4965
4966 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4967 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
4968
4969 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4970 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
4971 mcx_dmamem_free(sc, &rx->rx_rq_mem);
4972
4973 rx->rx_rqn = 0;
4974 return 0;
4975 }
4976
4977 static int
4978 mcx_create_tir_direct(struct mcx_softc *sc, struct mcx_rx *rx, int *tirn)
4979 {
4980 struct mcx_cmdq_entry *cqe;
4981 struct mcx_dmamem mxm;
4982 struct mcx_cmd_create_tir_in *in;
4983 struct mcx_cmd_create_tir_mb_in *mbin;
4984 struct mcx_cmd_create_tir_out *out;
4985 int error;
4986 int token;
4987
4988 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4989 token = mcx_cmdq_token(sc);
4990 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
4991 sizeof(*out), token);
4992
4993 in = mcx_cmdq_in(cqe);
4994 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4995 in->cmd_op_mod = htobe16(0);
4996
4997 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4998 &cqe->cq_input_ptr, token) != 0) {
4999 printf("%s: unable to allocate create tir mailbox\n",
5000 DEVNAME(sc));
5001 return (-1);
5002 }
5003 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5004 /* leave disp_type = 0, so packets get sent to the inline rqn */
5005 mbin->cmd_inline_rqn = htobe32(rx->rx_rqn);
5006 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
5007
5008 mcx_cmdq_post(sc, cqe, 0);
5009 error = mcx_cmdq_poll(sc, cqe, 1000);
5010 if (error != 0) {
5011 printf("%s: create tir timeout\n", DEVNAME(sc));
5012 goto free;
5013 }
5014 if (mcx_cmdq_verify(cqe) != 0) {
5015 printf("%s: create tir command corrupt\n", DEVNAME(sc));
5016 goto free;
5017 }
5018
5019 out = mcx_cmdq_out(cqe);
5020 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5021 printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
5022 out->cmd_status, be32toh(out->cmd_syndrome));
5023 error = -1;
5024 goto free;
5025 }
5026
5027 *tirn = mcx_get_id(out->cmd_tirn);
5028 free:
5029 mcx_dmamem_free(sc, &mxm);
5030 return (error);
5031 }
5032
5033 static int
5034 mcx_create_tir_indirect(struct mcx_softc *sc, int rqtn, uint32_t hash_sel,
5035 int *tirn)
5036 {
5037 struct mcx_cmdq_entry *cqe;
5038 struct mcx_dmamem mxm;
5039 struct mcx_cmd_create_tir_in *in;
5040 struct mcx_cmd_create_tir_mb_in *mbin;
5041 struct mcx_cmd_create_tir_out *out;
5042 int error;
5043 int token;
5044
5045 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5046 token = mcx_cmdq_token(sc);
5047 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5048 sizeof(*out), token);
5049
5050 in = mcx_cmdq_in(cqe);
5051 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
5052 in->cmd_op_mod = htobe16(0);
5053
5054 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5055 &cqe->cq_input_ptr, token) != 0) {
5056 printf("%s: unable to allocate create tir mailbox\n",
5057 DEVNAME(sc));
5058 return (-1);
5059 }
5060 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5061 mbin->cmd_disp_type = htobe32(MCX_TIR_CTX_DISP_TYPE_INDIRECT
5062 << MCX_TIR_CTX_DISP_TYPE_SHIFT);
5063 mbin->cmd_indir_table = htobe32(rqtn);
5064 mbin->cmd_tdomain = htobe32(sc->sc_tdomain |
5065 MCX_TIR_CTX_HASH_TOEPLITZ << MCX_TIR_CTX_HASH_SHIFT);
5066 mbin->cmd_rx_hash_sel_outer = htobe32(hash_sel);
5067 stoeplitz_to_key(&mbin->cmd_rx_hash_key,
5068 sizeof(mbin->cmd_rx_hash_key));
5069
5070 mcx_cmdq_post(sc, cqe, 0);
5071 error = mcx_cmdq_poll(sc, cqe, 1000);
5072 if (error != 0) {
5073 printf("%s: create tir timeout\n", DEVNAME(sc));
5074 goto free;
5075 }
5076 if (mcx_cmdq_verify(cqe) != 0) {
5077 printf("%s: create tir command corrupt\n", DEVNAME(sc));
5078 goto free;
5079 }
5080
5081 out = mcx_cmdq_out(cqe);
5082 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5083 printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
5084 out->cmd_status, be32toh(out->cmd_syndrome));
5085 error = -1;
5086 goto free;
5087 }
5088
5089 *tirn = mcx_get_id(out->cmd_tirn);
5090 free:
5091 mcx_dmamem_free(sc, &mxm);
5092 return (error);
5093 }
5094
5095 static int
5096 mcx_destroy_tir(struct mcx_softc *sc, int tirn)
5097 {
5098 struct mcx_cmdq_entry *cqe;
5099 struct mcx_cmd_destroy_tir_in *in;
5100 struct mcx_cmd_destroy_tir_out *out;
5101 int error;
5102 int token;
5103
5104 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5105 token = mcx_cmdq_token(sc);
5106 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5107
5108 in = mcx_cmdq_in(cqe);
5109 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
5110 in->cmd_op_mod = htobe16(0);
5111 in->cmd_tirn = htobe32(tirn);
5112
5113 mcx_cmdq_post(sc, cqe, 0);
5114 error = mcx_cmdq_poll(sc, cqe, 1000);
5115 if (error != 0) {
5116 printf("%s: destroy tir timeout\n", DEVNAME(sc));
5117 return error;
5118 }
5119 if (mcx_cmdq_verify(cqe) != 0) {
5120 printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
5121 return error;
5122 }
5123
5124 out = mcx_cmdq_out(cqe);
5125 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5126 printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
5127 out->cmd_status, be32toh(out->cmd_syndrome));
5128 return -1;
5129 }
5130
5131 return (0);
5132 }
5133
5134 static int
5135 mcx_create_sq(struct mcx_softc *sc, struct mcx_tx *tx, int uar, int db,
5136 int cqn)
5137 {
5138 struct mcx_cmdq_entry *cqe;
5139 struct mcx_dmamem mxm;
5140 struct mcx_cmd_create_sq_in *in;
5141 struct mcx_sq_ctx *mbin;
5142 struct mcx_cmd_create_sq_out *out;
5143 int error;
5144 uint64_t *pas;
5145 int insize, npages, paslen, token;
5146
5147 tx->tx_doorbell = MCX_WQ_DOORBELL_BASE +
5148 (db * MCX_WQ_DOORBELL_STRIDE) + 4;
5149
5150 npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
5151 MCX_PAGE_SIZE);
5152 paslen = npages * sizeof(*pas);
5153 insize = sizeof(struct mcx_sq_ctx) + paslen;
5154
5155 if (mcx_dmamem_alloc(sc, &tx->tx_sq_mem, npages * MCX_PAGE_SIZE,
5156 MCX_PAGE_SIZE) != 0) {
5157 printf("%s: unable to allocate send queue memory\n",
5158 DEVNAME(sc));
5159 return (-1);
5160 }
5161
5162 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5163 token = mcx_cmdq_token(sc);
5164 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
5165 token);
5166
5167 in = mcx_cmdq_in(cqe);
5168 in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
5169 in->cmd_op_mod = htobe16(0);
5170
5171 if (mcx_cmdq_mboxes_alloc(sc, &mxm,
5172 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
5173 &cqe->cq_input_ptr, token) != 0) {
5174 printf("%s: unable to allocate create sq mailboxen\n",
5175 DEVNAME(sc));
5176 goto free_sq;
5177 }
5178 mbin = (struct mcx_sq_ctx *)
5179 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
5180 mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
5181 (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
5182 mbin->sq_cqn = htobe32(cqn);
5183 mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
5184 mbin->sq_tis_num = htobe32(sc->sc_tis);
5185 mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
5186 mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
5187 mbin->sq_wq.wq_uar_page = htobe32(uar);
5188 mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
5189 tx->tx_doorbell);
5190 mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
5191 mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
5192
5193 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5194 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
5195
5196 /* physical addresses follow the mailbox in data */
5197 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10,
5198 npages, &tx->tx_sq_mem);
5199 mcx_cmdq_post(sc, cqe, 0);
5200
5201 error = mcx_cmdq_poll(sc, cqe, 1000);
5202 if (error != 0) {
5203 printf("%s: create sq timeout\n", DEVNAME(sc));
5204 goto free_mxm;
5205 }
5206 if (mcx_cmdq_verify(cqe) != 0) {
5207 printf("%s: create sq command corrupt\n", DEVNAME(sc));
5208 goto free_mxm;
5209 }
5210
5211 out = mcx_cmdq_out(cqe);
5212 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5213 printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
5214 out->cmd_status, be32toh(out->cmd_syndrome));
5215 goto free_mxm;
5216 }
5217
5218 tx->tx_uar = uar;
5219 tx->tx_sqn = mcx_get_id(out->cmd_sqn);
5220
5221 mcx_dmamem_free(sc, &mxm);
5222
5223 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5224 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
5225
5226 return (0);
5227
5228 free_mxm:
5229 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5230 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5231 mcx_dmamem_free(sc, &mxm);
5232 free_sq:
5233 mcx_dmamem_free(sc, &tx->tx_sq_mem);
5234 return (-1);
5235 }
5236
5237 static int
5238 mcx_destroy_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5239 {
5240 struct mcx_cmdq_entry *cqe;
5241 struct mcx_cmd_destroy_sq_in *in;
5242 struct mcx_cmd_destroy_sq_out *out;
5243 int error;
5244 int token;
5245
5246 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5247 token = mcx_cmdq_token(sc);
5248 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5249
5250 in = mcx_cmdq_in(cqe);
5251 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
5252 in->cmd_op_mod = htobe16(0);
5253 in->cmd_sqn = htobe32(tx->tx_sqn);
5254
5255 mcx_cmdq_post(sc, cqe, 0);
5256 error = mcx_cmdq_poll(sc, cqe, 1000);
5257 if (error != 0) {
5258 printf("%s: destroy sq timeout\n", DEVNAME(sc));
5259 return error;
5260 }
5261 if (mcx_cmdq_verify(cqe) != 0) {
5262 printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
5263 return error;
5264 }
5265
5266 out = mcx_cmdq_out(cqe);
5267 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5268 printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
5269 out->cmd_status, be32toh(out->cmd_syndrome));
5270 return -1;
5271 }
5272
5273 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5274 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
5275
5276 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5277 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5278 mcx_dmamem_free(sc, &tx->tx_sq_mem);
5279
5280 tx->tx_sqn = 0;
5281 return 0;
5282 }
5283
5284 static int
5285 mcx_ready_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5286 {
5287 struct mcx_cmdq_entry *cqe;
5288 struct mcx_dmamem mxm;
5289 struct mcx_cmd_modify_sq_in *in;
5290 struct mcx_cmd_modify_sq_mb_in *mbin;
5291 struct mcx_cmd_modify_sq_out *out;
5292 int error;
5293 int token;
5294
5295 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5296 token = mcx_cmdq_token(sc);
5297 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5298 sizeof(*out), token);
5299
5300 in = mcx_cmdq_in(cqe);
5301 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
5302 in->cmd_op_mod = htobe16(0);
5303 in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | tx->tx_sqn);
5304
5305 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5306 &cqe->cq_input_ptr, token) != 0) {
5307 printf("%s: unable to allocate modify sq mailbox\n",
5308 DEVNAME(sc));
5309 return (-1);
5310 }
5311 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5312 mbin->cmd_sq_ctx.sq_flags = htobe32(
5313 MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
5314
5315 mcx_cmdq_mboxes_sign(&mxm, 1);
5316 mcx_cmdq_post(sc, cqe, 0);
5317 error = mcx_cmdq_poll(sc, cqe, 1000);
5318 if (error != 0) {
5319 printf("%s: modify sq timeout\n", DEVNAME(sc));
5320 goto free;
5321 }
5322 if (mcx_cmdq_verify(cqe) != 0) {
5323 printf("%s: modify sq command corrupt\n", DEVNAME(sc));
5324 goto free;
5325 }
5326
5327 out = mcx_cmdq_out(cqe);
5328 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5329 printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
5330 out->cmd_status, be32toh(out->cmd_syndrome));
5331 error = -1;
5332 goto free;
5333 }
5334
5335 free:
5336 mcx_dmamem_free(sc, &mxm);
5337 return (error);
5338 }
5339
5340 static int
5341 mcx_create_tis(struct mcx_softc *sc, int *tis)
5342 {
5343 struct mcx_cmdq_entry *cqe;
5344 struct mcx_dmamem mxm;
5345 struct mcx_cmd_create_tis_in *in;
5346 struct mcx_cmd_create_tis_mb_in *mbin;
5347 struct mcx_cmd_create_tis_out *out;
5348 int error;
5349 int token;
5350
5351 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5352 token = mcx_cmdq_token(sc);
5353 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5354 sizeof(*out), token);
5355
5356 in = mcx_cmdq_in(cqe);
5357 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
5358 in->cmd_op_mod = htobe16(0);
5359
5360 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5361 &cqe->cq_input_ptr, token) != 0) {
5362 printf("%s: unable to allocate create tis mailbox\n",
5363 DEVNAME(sc));
5364 return (-1);
5365 }
5366 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5367 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
5368
5369 mcx_cmdq_mboxes_sign(&mxm, 1);
5370 mcx_cmdq_post(sc, cqe, 0);
5371 error = mcx_cmdq_poll(sc, cqe, 1000);
5372 if (error != 0) {
5373 printf("%s: create tis timeout\n", DEVNAME(sc));
5374 goto free;
5375 }
5376 if (mcx_cmdq_verify(cqe) != 0) {
5377 printf("%s: create tis command corrupt\n", DEVNAME(sc));
5378 goto free;
5379 }
5380
5381 out = mcx_cmdq_out(cqe);
5382 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5383 printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
5384 out->cmd_status, be32toh(out->cmd_syndrome));
5385 error = -1;
5386 goto free;
5387 }
5388
5389 *tis = mcx_get_id(out->cmd_tisn);
5390 free:
5391 mcx_dmamem_free(sc, &mxm);
5392 return (error);
5393 }
5394
5395 static int
5396 mcx_destroy_tis(struct mcx_softc *sc, int tis)
5397 {
5398 struct mcx_cmdq_entry *cqe;
5399 struct mcx_cmd_destroy_tis_in *in;
5400 struct mcx_cmd_destroy_tis_out *out;
5401 int error;
5402 int token;
5403
5404 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5405 token = mcx_cmdq_token(sc);
5406 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5407
5408 in = mcx_cmdq_in(cqe);
5409 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
5410 in->cmd_op_mod = htobe16(0);
5411 in->cmd_tisn = htobe32(tis);
5412
5413 mcx_cmdq_post(sc, cqe, 0);
5414 error = mcx_cmdq_poll(sc, cqe, 1000);
5415 if (error != 0) {
5416 printf("%s: destroy tis timeout\n", DEVNAME(sc));
5417 return error;
5418 }
5419 if (mcx_cmdq_verify(cqe) != 0) {
5420 printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
5421 return error;
5422 }
5423
5424 out = mcx_cmdq_out(cqe);
5425 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5426 printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
5427 out->cmd_status, be32toh(out->cmd_syndrome));
5428 return -1;
5429 }
5430
5431 return 0;
5432 }
5433
5434 static int
5435 mcx_create_rqt(struct mcx_softc *sc, int size, int *rqns, int *rqt)
5436 {
5437 struct mcx_cmdq_entry *cqe;
5438 struct mcx_dmamem mxm;
5439 struct mcx_cmd_create_rqt_in *in;
5440 struct mcx_cmd_create_rqt_mb_in *mbin;
5441 struct mcx_cmd_create_rqt_out *out;
5442 struct mcx_rqt_ctx *rqt_ctx;
5443 int *rqtn;
5444 int error;
5445 int token;
5446 int i;
5447
5448 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5449 token = mcx_cmdq_token(sc);
5450 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) +
5451 (size * sizeof(int)), sizeof(*out), token);
5452
5453 in = mcx_cmdq_in(cqe);
5454 in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQT);
5455 in->cmd_op_mod = htobe16(0);
5456
5457 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5458 &cqe->cq_input_ptr, token) != 0) {
5459 printf("%s: unable to allocate create rqt mailbox\n",
5460 DEVNAME(sc));
5461 return (-1);
5462 }
5463 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5464 rqt_ctx = &mbin->cmd_rqt;
5465 rqt_ctx->cmd_rqt_max_size = htobe16(sc->sc_max_rqt_size);
5466 rqt_ctx->cmd_rqt_actual_size = htobe16(size);
5467
5468 /* rqt list follows the rqt context */
5469 rqtn = (int *)(rqt_ctx + 1);
5470 for (i = 0; i < size; i++) {
5471 rqtn[i] = htobe32(rqns[i]);
5472 }
5473
5474 mcx_cmdq_mboxes_sign(&mxm, 1);
5475 mcx_cmdq_post(sc, cqe, 0);
5476 error = mcx_cmdq_poll(sc, cqe, 1000);
5477 if (error != 0) {
5478 printf("%s: create rqt timeout\n", DEVNAME(sc));
5479 goto free;
5480 }
5481 if (mcx_cmdq_verify(cqe) != 0) {
5482 printf("%s: create rqt command corrupt\n", DEVNAME(sc));
5483 goto free;
5484 }
5485
5486 out = mcx_cmdq_out(cqe);
5487 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5488 printf("%s: create rqt failed (%x, %x)\n", DEVNAME(sc),
5489 out->cmd_status, be32toh(out->cmd_syndrome));
5490 error = -1;
5491 goto free;
5492 }
5493
5494 *rqt = mcx_get_id(out->cmd_rqtn);
5495 return (0);
5496 free:
5497 mcx_dmamem_free(sc, &mxm);
5498 return (error);
5499 }
5500
5501 static int
5502 mcx_destroy_rqt(struct mcx_softc *sc, int rqt)
5503 {
5504 struct mcx_cmdq_entry *cqe;
5505 struct mcx_cmd_destroy_rqt_in *in;
5506 struct mcx_cmd_destroy_rqt_out *out;
5507 int error;
5508 int token;
5509
5510 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5511 token = mcx_cmdq_token(sc);
5512 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5513
5514 in = mcx_cmdq_in(cqe);
5515 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQT);
5516 in->cmd_op_mod = htobe16(0);
5517 in->cmd_rqtn = htobe32(rqt);
5518
5519 mcx_cmdq_post(sc, cqe, 0);
5520 error = mcx_cmdq_poll(sc, cqe, 1000);
5521 if (error != 0) {
5522 printf("%s: destroy rqt timeout\n", DEVNAME(sc));
5523 return error;
5524 }
5525 if (mcx_cmdq_verify(cqe) != 0) {
5526 printf("%s: destroy rqt command corrupt\n", DEVNAME(sc));
5527 return error;
5528 }
5529
5530 out = mcx_cmdq_out(cqe);
5531 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5532 printf("%s: destroy rqt failed (%x, %x)\n", DEVNAME(sc),
5533 out->cmd_status, be32toh(out->cmd_syndrome));
5534 return -1;
5535 }
5536
5537 return 0;
5538 }
5539
5540 #if 0
5541 static int
5542 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
5543 {
5544 struct mcx_cmdq_entry *cqe;
5545 struct mcx_cmd_alloc_flow_counter_in *in;
5546 struct mcx_cmd_alloc_flow_counter_out *out;
5547 int error;
5548
5549 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5550 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
5551
5552 in = mcx_cmdq_in(cqe);
5553 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
5554 in->cmd_op_mod = htobe16(0);
5555
5556 mcx_cmdq_post(sc, cqe, 0);
5557
5558 error = mcx_cmdq_poll(sc, cqe, 1000);
5559 if (error != 0) {
5560 printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
5561 return (-1);
5562 }
5563 if (mcx_cmdq_verify(cqe) != 0) {
5564 printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
5565 return (-1);
5566 }
5567
5568 out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
5569 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5570 printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
5571 out->cmd_status);
5572 return (-1);
5573 }
5574
5575 sc->sc_flow_counter_id[i] = be16toh(out->cmd_flow_counter_id);
5576 printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
5577
5578 return (0);
5579 }
5580 #endif
5581
5582 static int
5583 mcx_create_flow_table(struct mcx_softc *sc, int log_size, int level,
5584 int *flow_table_id)
5585 {
5586 struct mcx_cmdq_entry *cqe;
5587 struct mcx_dmamem mxm;
5588 struct mcx_cmd_create_flow_table_in *in;
5589 struct mcx_cmd_create_flow_table_mb_in *mbin;
5590 struct mcx_cmd_create_flow_table_out *out;
5591 int error;
5592 int token;
5593
5594 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5595 token = mcx_cmdq_token(sc);
5596 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5597 sizeof(*out), token);
5598
5599 in = mcx_cmdq_in(cqe);
5600 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
5601 in->cmd_op_mod = htobe16(0);
5602
5603 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5604 &cqe->cq_input_ptr, token) != 0) {
5605 printf("%s: unable to allocate create flow table mailbox\n",
5606 DEVNAME(sc));
5607 return (-1);
5608 }
5609 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5610 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5611 mbin->cmd_ctx.ft_log_size = log_size;
5612 mbin->cmd_ctx.ft_level = level;
5613
5614 mcx_cmdq_mboxes_sign(&mxm, 1);
5615 mcx_cmdq_post(sc, cqe, 0);
5616 error = mcx_cmdq_poll(sc, cqe, 1000);
5617 if (error != 0) {
5618 printf("%s: create flow table timeout\n", DEVNAME(sc));
5619 goto free;
5620 }
5621 if (mcx_cmdq_verify(cqe) != 0) {
5622 printf("%s: create flow table command corrupt\n", DEVNAME(sc));
5623 goto free;
5624 }
5625
5626 out = mcx_cmdq_out(cqe);
5627 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5628 printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
5629 out->cmd_status, be32toh(out->cmd_syndrome));
5630 error = -1;
5631 goto free;
5632 }
5633
5634 *flow_table_id = mcx_get_id(out->cmd_table_id);
5635 free:
5636 mcx_dmamem_free(sc, &mxm);
5637 return (error);
5638 }
5639
5640 static int
5641 mcx_set_flow_table_root(struct mcx_softc *sc, int flow_table_id)
5642 {
5643 struct mcx_cmdq_entry *cqe;
5644 struct mcx_dmamem mxm;
5645 struct mcx_cmd_set_flow_table_root_in *in;
5646 struct mcx_cmd_set_flow_table_root_mb_in *mbin;
5647 struct mcx_cmd_set_flow_table_root_out *out;
5648 int error;
5649 int token;
5650
5651 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5652 token = mcx_cmdq_token(sc);
5653 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5654 sizeof(*out), token);
5655
5656 in = mcx_cmdq_in(cqe);
5657 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
5658 in->cmd_op_mod = htobe16(0);
5659
5660 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5661 &cqe->cq_input_ptr, token) != 0) {
5662 printf("%s: unable to allocate set flow table root mailbox\n",
5663 DEVNAME(sc));
5664 return (-1);
5665 }
5666 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5667 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5668 mbin->cmd_table_id = htobe32(flow_table_id);
5669
5670 mcx_cmdq_mboxes_sign(&mxm, 1);
5671 mcx_cmdq_post(sc, cqe, 0);
5672 error = mcx_cmdq_poll(sc, cqe, 1000);
5673 if (error != 0) {
5674 printf("%s: set flow table root timeout\n", DEVNAME(sc));
5675 goto free;
5676 }
5677 if (mcx_cmdq_verify(cqe) != 0) {
5678 printf("%s: set flow table root command corrupt\n",
5679 DEVNAME(sc));
5680 goto free;
5681 }
5682
5683 out = mcx_cmdq_out(cqe);
5684 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5685 printf("%s: set flow table root failed (%x, %x)\n",
5686 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5687 error = -1;
5688 goto free;
5689 }
5690
5691 free:
5692 mcx_dmamem_free(sc, &mxm);
5693 return (error);
5694 }
5695
5696 static int
5697 mcx_destroy_flow_table(struct mcx_softc *sc, int flow_table_id)
5698 {
5699 struct mcx_cmdq_entry *cqe;
5700 struct mcx_dmamem mxm;
5701 struct mcx_cmd_destroy_flow_table_in *in;
5702 struct mcx_cmd_destroy_flow_table_mb_in *mb;
5703 struct mcx_cmd_destroy_flow_table_out *out;
5704 int error;
5705 int token;
5706
5707 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5708 token = mcx_cmdq_token(sc);
5709 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5710
5711 in = mcx_cmdq_in(cqe);
5712 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
5713 in->cmd_op_mod = htobe16(0);
5714
5715 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5716 &cqe->cq_input_ptr, token) != 0) {
5717 printf("%s: unable to allocate destroy flow table mailbox\n",
5718 DEVNAME(sc));
5719 return (-1);
5720 }
5721 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5722 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5723 mb->cmd_table_id = htobe32(flow_table_id);
5724
5725 mcx_cmdq_mboxes_sign(&mxm, 1);
5726 mcx_cmdq_post(sc, cqe, 0);
5727 error = mcx_cmdq_poll(sc, cqe, 1000);
5728 if (error != 0) {
5729 printf("%s: destroy flow table timeout\n", DEVNAME(sc));
5730 goto free;
5731 }
5732 if (mcx_cmdq_verify(cqe) != 0) {
5733 printf("%s: destroy flow table command corrupt\n",
5734 DEVNAME(sc));
5735 goto free;
5736 }
5737
5738 out = mcx_cmdq_out(cqe);
5739 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5740 printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
5741 out->cmd_status, be32toh(out->cmd_syndrome));
5742 error = -1;
5743 goto free;
5744 }
5745
5746 free:
5747 mcx_dmamem_free(sc, &mxm);
5748 return (error);
5749 }
5750
5751
5752 static int
5753 mcx_create_flow_group(struct mcx_softc *sc, int flow_table_id, int group,
5754 int start, int size, int match_enable, struct mcx_flow_match *match)
5755 {
5756 struct mcx_cmdq_entry *cqe;
5757 struct mcx_dmamem mxm;
5758 struct mcx_cmd_create_flow_group_in *in;
5759 struct mcx_cmd_create_flow_group_mb_in *mbin;
5760 struct mcx_cmd_create_flow_group_out *out;
5761 struct mcx_flow_group *mfg;
5762 int error;
5763 int token;
5764
5765 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5766 token = mcx_cmdq_token(sc);
5767 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5768 token);
5769
5770 in = mcx_cmdq_in(cqe);
5771 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
5772 in->cmd_op_mod = htobe16(0);
5773
5774 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5775 != 0) {
5776 printf("%s: unable to allocate create flow group mailbox\n",
5777 DEVNAME(sc));
5778 return (-1);
5779 }
5780 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5781 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5782 mbin->cmd_table_id = htobe32(flow_table_id);
5783 mbin->cmd_start_flow_index = htobe32(start);
5784 mbin->cmd_end_flow_index = htobe32(start + (size - 1));
5785
5786 mbin->cmd_match_criteria_enable = match_enable;
5787 memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
5788
5789 mcx_cmdq_mboxes_sign(&mxm, 2);
5790 mcx_cmdq_post(sc, cqe, 0);
5791 error = mcx_cmdq_poll(sc, cqe, 1000);
5792 if (error != 0) {
5793 printf("%s: create flow group timeout\n", DEVNAME(sc));
5794 goto free;
5795 }
5796 if (mcx_cmdq_verify(cqe) != 0) {
5797 printf("%s: create flow group command corrupt\n", DEVNAME(sc));
5798 goto free;
5799 }
5800
5801 out = mcx_cmdq_out(cqe);
5802 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5803 printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
5804 out->cmd_status, be32toh(out->cmd_syndrome));
5805 error = -1;
5806 goto free;
5807 }
5808
5809 mfg = &sc->sc_flow_group[group];
5810 mfg->g_id = mcx_get_id(out->cmd_group_id);
5811 mfg->g_table = flow_table_id;
5812 mfg->g_start = start;
5813 mfg->g_size = size;
5814
5815 free:
5816 mcx_dmamem_free(sc, &mxm);
5817 return (error);
5818 }
5819
5820 static int
5821 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
5822 {
5823 struct mcx_cmdq_entry *cqe;
5824 struct mcx_dmamem mxm;
5825 struct mcx_cmd_destroy_flow_group_in *in;
5826 struct mcx_cmd_destroy_flow_group_mb_in *mb;
5827 struct mcx_cmd_destroy_flow_group_out *out;
5828 struct mcx_flow_group *mfg;
5829 int error;
5830 int token;
5831
5832 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5833 token = mcx_cmdq_token(sc);
5834 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5835
5836 in = mcx_cmdq_in(cqe);
5837 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
5838 in->cmd_op_mod = htobe16(0);
5839
5840 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5841 &cqe->cq_input_ptr, token) != 0) {
5842 printf("%s: unable to allocate destroy flow group mailbox\n",
5843 DEVNAME(sc));
5844 return (-1);
5845 }
5846 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5847 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5848 mfg = &sc->sc_flow_group[group];
5849 mb->cmd_table_id = htobe32(mfg->g_table);
5850 mb->cmd_group_id = htobe32(mfg->g_id);
5851
5852 mcx_cmdq_mboxes_sign(&mxm, 2);
5853 mcx_cmdq_post(sc, cqe, 0);
5854 error = mcx_cmdq_poll(sc, cqe, 1000);
5855 if (error != 0) {
5856 printf("%s: destroy flow group timeout\n", DEVNAME(sc));
5857 goto free;
5858 }
5859 if (mcx_cmdq_verify(cqe) != 0) {
5860 printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
5861 goto free;
5862 }
5863
5864 out = mcx_cmdq_out(cqe);
5865 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5866 printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
5867 out->cmd_status, be32toh(out->cmd_syndrome));
5868 error = -1;
5869 goto free;
5870 }
5871
5872 mfg->g_id = -1;
5873 mfg->g_table = -1;
5874 mfg->g_size = 0;
5875 mfg->g_start = 0;
5876 free:
5877 mcx_dmamem_free(sc, &mxm);
5878 return (error);
5879 }
5880
5881 static int
5882 mcx_set_flow_table_entry_mac(struct mcx_softc *sc, int group, int index,
5883 const uint8_t *macaddr, uint32_t dest)
5884 {
5885 struct mcx_cmdq_entry *cqe;
5886 struct mcx_dmamem mxm;
5887 struct mcx_cmd_set_flow_table_entry_in *in;
5888 struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5889 struct mcx_cmd_set_flow_table_entry_out *out;
5890 struct mcx_flow_group *mfg;
5891 uint32_t *pdest;
5892 int error;
5893 int token;
5894
5895 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5896 token = mcx_cmdq_token(sc);
5897 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
5898 sizeof(*out), token);
5899
5900 in = mcx_cmdq_in(cqe);
5901 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
5902 in->cmd_op_mod = htobe16(0);
5903
5904 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5905 != 0) {
5906 printf("%s: unable to allocate set flow table entry mailbox\n",
5907 DEVNAME(sc));
5908 return (-1);
5909 }
5910
5911 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5912 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5913
5914 mfg = &sc->sc_flow_group[group];
5915 mbin->cmd_table_id = htobe32(mfg->g_table);
5916 mbin->cmd_flow_index = htobe32(mfg->g_start + index);
5917 mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
5918
5919 /* flow context ends at offset 0x330, 0x130 into the second mbox */
5920 pdest = (uint32_t *)
5921 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
5922 mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5923 mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5924 *pdest = htobe32(dest);
5925
5926 /* the only thing we match on at the moment is the dest mac address */
5927 if (macaddr != NULL) {
5928 memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
5929 ETHER_ADDR_LEN);
5930 }
5931
5932 mcx_cmdq_mboxes_sign(&mxm, 2);
5933 mcx_cmdq_post(sc, cqe, 0);
5934 error = mcx_cmdq_poll(sc, cqe, 1000);
5935 if (error != 0) {
5936 printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5937 goto free;
5938 }
5939 if (mcx_cmdq_verify(cqe) != 0) {
5940 printf("%s: set flow table entry command corrupt\n",
5941 DEVNAME(sc));
5942 goto free;
5943 }
5944
5945 out = mcx_cmdq_out(cqe);
5946 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5947 printf("%s: set flow table entry failed (%x, %x)\n",
5948 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5949 error = -1;
5950 goto free;
5951 }
5952
5953 free:
5954 mcx_dmamem_free(sc, &mxm);
5955 return (error);
5956 }
5957
5958 static int
5959 mcx_set_flow_table_entry_proto(struct mcx_softc *sc, int group, int index,
5960 int ethertype, int ip_proto, uint32_t dest)
5961 {
5962 struct mcx_cmdq_entry *cqe;
5963 struct mcx_dmamem mxm;
5964 struct mcx_cmd_set_flow_table_entry_in *in;
5965 struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5966 struct mcx_cmd_set_flow_table_entry_out *out;
5967 struct mcx_flow_group *mfg;
5968 uint32_t *pdest;
5969 int error;
5970 int token;
5971
5972 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5973 token = mcx_cmdq_token(sc);
5974 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
5975 sizeof(*out), token);
5976
5977 in = mcx_cmdq_in(cqe);
5978 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
5979 in->cmd_op_mod = htobe16(0);
5980
5981 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5982 != 0) {
5983 printf("%s: unable to allocate set flow table entry mailbox\n",
5984 DEVNAME(sc));
5985 return (-1);
5986 }
5987
5988 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5989 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5990
5991 mfg = &sc->sc_flow_group[group];
5992 mbin->cmd_table_id = htobe32(mfg->g_table);
5993 mbin->cmd_flow_index = htobe32(mfg->g_start + index);
5994 mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
5995
5996 /* flow context ends at offset 0x330, 0x130 into the second mbox */
5997 pdest = (uint32_t *)
5998 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
5999 mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
6000 mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
6001 *pdest = htobe32(dest);
6002
6003 mbin->cmd_flow_ctx.fc_match_value.mc_ethertype = htobe16(ethertype);
6004 mbin->cmd_flow_ctx.fc_match_value.mc_ip_proto = ip_proto;
6005
6006 mcx_cmdq_mboxes_sign(&mxm, 2);
6007 mcx_cmdq_post(sc, cqe, 0);
6008 error = mcx_cmdq_poll(sc, cqe, 1000);
6009 if (error != 0) {
6010 printf("%s: set flow table entry timeout\n", DEVNAME(sc));
6011 goto free;
6012 }
6013 if (mcx_cmdq_verify(cqe) != 0) {
6014 printf("%s: set flow table entry command corrupt\n",
6015 DEVNAME(sc));
6016 goto free;
6017 }
6018
6019 out = mcx_cmdq_out(cqe);
6020 if (out->cmd_status != MCX_CQ_STATUS_OK) {
6021 printf("%s: set flow table entry failed (%x, %x)\n",
6022 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
6023 error = -1;
6024 goto free;
6025 }
6026
6027 free:
6028 mcx_dmamem_free(sc, &mxm);
6029 return (error);
6030 }
6031
6032 static int
6033 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
6034 {
6035 struct mcx_cmdq_entry *cqe;
6036 struct mcx_dmamem mxm;
6037 struct mcx_cmd_delete_flow_table_entry_in *in;
6038 struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
6039 struct mcx_cmd_delete_flow_table_entry_out *out;
6040 struct mcx_flow_group *mfg;
6041 int error;
6042 int token;
6043
6044 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6045 token = mcx_cmdq_token(sc);
6046 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
6047 token);
6048
6049 in = mcx_cmdq_in(cqe);
6050 in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
6051 in->cmd_op_mod = htobe16(0);
6052
6053 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6054 &cqe->cq_input_ptr, token) != 0) {
6055 printf("%s: unable to allocate "
6056 "delete flow table entry mailbox\n", DEVNAME(sc));
6057 return (-1);
6058 }
6059 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6060 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
6061
6062 mfg = &sc->sc_flow_group[group];
6063 mbin->cmd_table_id = htobe32(mfg->g_table);
6064 mbin->cmd_flow_index = htobe32(mfg->g_start + index);
6065
6066 mcx_cmdq_mboxes_sign(&mxm, 2);
6067 mcx_cmdq_post(sc, cqe, 0);
6068 error = mcx_cmdq_poll(sc, cqe, 1000);
6069 if (error != 0) {
6070 printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
6071 goto free;
6072 }
6073 if (mcx_cmdq_verify(cqe) != 0) {
6074 printf("%s: delete flow table entry command corrupt\n",
6075 DEVNAME(sc));
6076 goto free;
6077 }
6078
6079 out = mcx_cmdq_out(cqe);
6080 if (out->cmd_status != MCX_CQ_STATUS_OK) {
6081 printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
6082 DEVNAME(sc), group, index, out->cmd_status,
6083 be32toh(out->cmd_syndrome));
6084 error = -1;
6085 goto free;
6086 }
6087
6088 free:
6089 mcx_dmamem_free(sc, &mxm);
6090 return (error);
6091 }
6092
6093 #if 0
6094 int
6095 mcx_dump_flow_table(struct mcx_softc *sc, int flow_table_id)
6096 {
6097 struct mcx_dmamem mxm;
6098 struct mcx_cmdq_entry *cqe;
6099 struct mcx_cmd_query_flow_table_in *in;
6100 struct mcx_cmd_query_flow_table_mb_in *mbin;
6101 struct mcx_cmd_query_flow_table_out *out;
6102 struct mcx_cmd_query_flow_table_mb_out *mbout;
6103 uint8_t token = mcx_cmdq_token(sc);
6104 int error;
6105 int i;
6106 uint8_t *dump;
6107
6108 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6109 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6110 sizeof(*out) + sizeof(*mbout) + 16, token);
6111
6112 in = mcx_cmdq_in(cqe);
6113 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
6114 in->cmd_op_mod = htobe16(0);
6115
6116 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6117 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
6118 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6119 &cqe->cq_output_ptr, token) != 0) {
6120 printf(", unable to allocate query flow table mailboxes\n");
6121 return (-1);
6122 }
6123 cqe->cq_input_ptr = cqe->cq_output_ptr;
6124
6125 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6126 mbin->cmd_table_type = 0;
6127 mbin->cmd_table_id = htobe32(flow_table_id);
6128
6129 mcx_cmdq_mboxes_sign(&mxm, 1);
6130
6131 mcx_cmdq_post(sc, cqe, 0);
6132 error = mcx_cmdq_poll(sc, cqe, 1000);
6133 if (error != 0) {
6134 printf("%s: query flow table timeout\n", DEVNAME(sc));
6135 goto free;
6136 }
6137 error = mcx_cmdq_verify(cqe);
6138 if (error != 0) {
6139 printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
6140 goto free;
6141 }
6142
6143 out = mcx_cmdq_out(cqe);
6144 switch (out->cmd_status) {
6145 case MCX_CQ_STATUS_OK:
6146 break;
6147 default:
6148 printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
6149 out->cmd_status, be32toh(out->cmd_syndrome));
6150 error = -1;
6151 goto free;
6152 }
6153
6154 mbout = (struct mcx_cmd_query_flow_table_mb_out *)
6155 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6156 dump = (uint8_t *)mbout + 8;
6157 for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
6158 printf("%.2x ", dump[i]);
6159 if (i % 16 == 15)
6160 printf("\n");
6161 }
6162 free:
6163 mcx_cq_mboxes_free(sc, &mxm);
6164 return (error);
6165 }
6166 int
6167 mcx_dump_flow_table_entry(struct mcx_softc *sc, int flow_table_id, int index)
6168 {
6169 struct mcx_dmamem mxm;
6170 struct mcx_cmdq_entry *cqe;
6171 struct mcx_cmd_query_flow_table_entry_in *in;
6172 struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
6173 struct mcx_cmd_query_flow_table_entry_out *out;
6174 struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
6175 uint8_t token = mcx_cmdq_token(sc);
6176 int error;
6177 int i;
6178 uint8_t *dump;
6179
6180 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6181 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6182 sizeof(*out) + sizeof(*mbout) + 16, token);
6183
6184 in = mcx_cmdq_in(cqe);
6185 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
6186 in->cmd_op_mod = htobe16(0);
6187
6188 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6189 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6190 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6191 &cqe->cq_output_ptr, token) != 0) {
6192 printf(", unable to allocate "
6193 "query flow table entry mailboxes\n");
6194 return (-1);
6195 }
6196 cqe->cq_input_ptr = cqe->cq_output_ptr;
6197
6198 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6199 mbin->cmd_table_type = 0;
6200 mbin->cmd_table_id = htobe32(flow_table_id);
6201 mbin->cmd_flow_index = htobe32(index);
6202
6203 mcx_cmdq_mboxes_sign(&mxm, 1);
6204
6205 mcx_cmdq_post(sc, cqe, 0);
6206 error = mcx_cmdq_poll(sc, cqe, 1000);
6207 if (error != 0) {
6208 printf("%s: query flow table entry timeout\n", DEVNAME(sc));
6209 goto free;
6210 }
6211 error = mcx_cmdq_verify(cqe);
6212 if (error != 0) {
6213 printf("%s: query flow table entry reply corrupt\n",
6214 DEVNAME(sc));
6215 goto free;
6216 }
6217
6218 out = mcx_cmdq_out(cqe);
6219 switch (out->cmd_status) {
6220 case MCX_CQ_STATUS_OK:
6221 break;
6222 default:
6223 printf("%s: query flow table entry failed (%x/%x)\n",
6224 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
6225 error = -1;
6226 goto free;
6227 }
6228
6229 mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
6230 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6231 dump = (uint8_t *)mbout;
6232 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6233 printf("%.2x ", dump[i]);
6234 if (i % 16 == 15)
6235 printf("\n");
6236 }
6237
6238 free:
6239 mcx_cq_mboxes_free(sc, &mxm);
6240 return (error);
6241 }
6242
6243 int
6244 mcx_dump_flow_group(struct mcx_softc *sc, int flow_table_id)
6245 {
6246 struct mcx_dmamem mxm;
6247 struct mcx_cmdq_entry *cqe;
6248 struct mcx_cmd_query_flow_group_in *in;
6249 struct mcx_cmd_query_flow_group_mb_in *mbin;
6250 struct mcx_cmd_query_flow_group_out *out;
6251 struct mcx_cmd_query_flow_group_mb_out *mbout;
6252 uint8_t token = mcx_cmdq_token(sc);
6253 int error;
6254 int i;
6255 uint8_t *dump;
6256
6257 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6258 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6259 sizeof(*out) + sizeof(*mbout) + 16, token);
6260
6261 in = mcx_cmdq_in(cqe);
6262 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
6263 in->cmd_op_mod = htobe16(0);
6264
6265 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6266 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6267 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6268 &cqe->cq_output_ptr, token) != 0) {
6269 printf(", unable to allocate query flow group mailboxes\n");
6270 return (-1);
6271 }
6272 cqe->cq_input_ptr = cqe->cq_output_ptr;
6273
6274 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6275 mbin->cmd_table_type = 0;
6276 mbin->cmd_table_id = htobe32(flow_table_id);
6277 mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
6278
6279 mcx_cmdq_mboxes_sign(&mxm, 1);
6280
6281 mcx_cmdq_post(sc, cqe, 0);
6282 error = mcx_cmdq_poll(sc, cqe, 1000);
6283 if (error != 0) {
6284 printf("%s: query flow group timeout\n", DEVNAME(sc));
6285 goto free;
6286 }
6287 error = mcx_cmdq_verify(cqe);
6288 if (error != 0) {
6289 printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
6290 goto free;
6291 }
6292
6293 out = mcx_cmdq_out(cqe);
6294 switch (out->cmd_status) {
6295 case MCX_CQ_STATUS_OK:
6296 break;
6297 default:
6298 printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
6299 out->cmd_status, be32toh(out->cmd_syndrome));
6300 error = -1;
6301 goto free;
6302 }
6303
6304 mbout = (struct mcx_cmd_query_flow_group_mb_out *)
6305 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6306 dump = (uint8_t *)mbout;
6307 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6308 printf("%.2x ", dump[i]);
6309 if (i % 16 == 15)
6310 printf("\n");
6311 }
6312 dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
6313 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6314 printf("%.2x ", dump[i]);
6315 if (i % 16 == 15)
6316 printf("\n");
6317 }
6318
6319 free:
6320 mcx_cq_mboxes_free(sc, &mxm);
6321 return (error);
6322 }
6323
6324 static int
6325 mcx_dump_counters(struct mcx_softc *sc)
6326 {
6327 struct mcx_dmamem mxm;
6328 struct mcx_cmdq_entry *cqe;
6329 struct mcx_cmd_query_vport_counters_in *in;
6330 struct mcx_cmd_query_vport_counters_mb_in *mbin;
6331 struct mcx_cmd_query_vport_counters_out *out;
6332 struct mcx_nic_vport_counters *counters;
6333 int error, token;
6334
6335 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6336 token = mcx_cmdq_token(sc);
6337 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6338 sizeof(*out) + sizeof(*counters), token);
6339
6340 in = mcx_cmdq_in(cqe);
6341 in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
6342 in->cmd_op_mod = htobe16(0);
6343
6344 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6345 &cqe->cq_output_ptr, token) != 0) {
6346 printf(", unable to allocate "
6347 "query nic vport counters mailboxen\n");
6348 return (-1);
6349 }
6350 cqe->cq_input_ptr = cqe->cq_output_ptr;
6351
6352 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6353 mbin->cmd_clear = 0x80;
6354
6355 mcx_cmdq_mboxes_sign(&mxm, 1);
6356 mcx_cmdq_post(sc, cqe, 0);
6357
6358 error = mcx_cmdq_poll(sc, cqe, 1000);
6359 if (error != 0) {
6360 printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
6361 goto free;
6362 }
6363 if (mcx_cmdq_verify(cqe) != 0) {
6364 printf("%s: query nic vport counters command corrupt\n",
6365 DEVNAME(sc));
6366 goto free;
6367 }
6368
6369 out = mcx_cmdq_out(cqe);
6370 if (out->cmd_status != MCX_CQ_STATUS_OK) {
6371 printf("%s: query nic vport counters failed (%x, %x)\n",
6372 DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
6373 error = -1;
6374 goto free;
6375 }
6376
6377 counters = (struct mcx_nic_vport_counters *)
6378 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6379 if (counters->rx_bcast.packets + counters->tx_bcast.packets +
6380 counters->rx_ucast.packets + counters->tx_ucast.packets +
6381 counters->rx_err.packets + counters->tx_err.packets)
6382 printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
6383 DEVNAME(sc),
6384 betoh64(counters->tx_err.packets),
6385 betoh64(counters->rx_err.packets),
6386 betoh64(counters->tx_ucast.packets),
6387 betoh64(counters->rx_ucast.packets),
6388 betoh64(counters->tx_bcast.packets),
6389 betoh64(counters->rx_bcast.packets));
6390 free:
6391 mcx_dmamem_free(sc, &mxm);
6392
6393 return (error);
6394 }
6395
6396 static int
6397 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
6398 {
6399 struct mcx_dmamem mxm;
6400 struct mcx_cmdq_entry *cqe;
6401 struct mcx_cmd_query_flow_counter_in *in;
6402 struct mcx_cmd_query_flow_counter_mb_in *mbin;
6403 struct mcx_cmd_query_flow_counter_out *out;
6404 struct mcx_counter *counters;
6405 int error, token;
6406
6407 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6408 token = mcx_cmdq_token(sc);
6409 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
6410 sizeof(*counters), token);
6411
6412 in = mcx_cmdq_in(cqe);
6413 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
6414 in->cmd_op_mod = htobe16(0);
6415
6416 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6417 &cqe->cq_output_ptr, token) != 0) {
6418 printf(", unable to allocate query flow counter mailboxen\n");
6419 return (-1);
6420 }
6421 cqe->cq_input_ptr = cqe->cq_output_ptr;
6422 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6423 mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
6424 mbin->cmd_clear = 0x80;
6425
6426 mcx_cmdq_mboxes_sign(&mxm, 1);
6427 mcx_cmdq_post(sc, cqe, 0);
6428
6429 error = mcx_cmdq_poll(sc, cqe, 1000);
6430 if (error != 0) {
6431 printf("%s: query flow counter timeout\n", DEVNAME(sc));
6432 goto free;
6433 }
6434 if (mcx_cmdq_verify(cqe) != 0) {
6435 printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
6436 goto free;
6437 }
6438
6439 out = mcx_cmdq_out(cqe);
6440 if (out->cmd_status != MCX_CQ_STATUS_OK) {
6441 printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
6442 out->cmd_status, betoh32(out->cmd_syndrome));
6443 error = -1;
6444 goto free;
6445 }
6446
6447 counters = (struct mcx_counter *)
6448 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6449 if (counters->packets)
6450 printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
6451 betoh64(counters->packets));
6452 free:
6453 mcx_dmamem_free(sc, &mxm);
6454
6455 return (error);
6456 }
6457
6458 #endif
6459
6460 #if NKSTAT > 0
6461
6462 int
6463 mcx_query_rq(struct mcx_softc *sc, struct mcx_rx *rx, struct mcx_rq_ctx *rq_ctx)
6464 {
6465 struct mcx_dmamem mxm;
6466 struct mcx_cmdq_entry *cqe;
6467 struct mcx_cmd_query_rq_in *in;
6468 struct mcx_cmd_query_rq_out *out;
6469 struct mcx_cmd_query_rq_mb_out *mbout;
6470 uint8_t token = mcx_cmdq_token(sc);
6471 int error;
6472
6473 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6474 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6475 token);
6476
6477 in = mcx_cmdq_in(cqe);
6478 in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
6479 in->cmd_op_mod = htobe16(0);
6480 in->cmd_rqn = htobe32(rx->rx_rqn);
6481
6482 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6483 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6484 &cqe->cq_output_ptr, token) != 0) {
6485 printf("%s: unable to allocate query rq mailboxes\n", DEVNAME(sc));
6486 return (-1);
6487 }
6488
6489 mcx_cmdq_mboxes_sign(&mxm, 1);
6490
6491 mcx_cmdq_post(sc, cqe, 0);
6492 error = mcx_cmdq_poll(sc, cqe, 1000);
6493 if (error != 0) {
6494 printf("%s: query rq timeout\n", DEVNAME(sc));
6495 goto free;
6496 }
6497 error = mcx_cmdq_verify(cqe);
6498 if (error != 0) {
6499 printf("%s: query rq reply corrupt\n", DEVNAME(sc));
6500 goto free;
6501 }
6502
6503 out = mcx_cmdq_out(cqe);
6504 switch (out->cmd_status) {
6505 case MCX_CQ_STATUS_OK:
6506 break;
6507 default:
6508 printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
6509 out->cmd_status, be32toh(out->cmd_syndrome));
6510 error = -1;
6511 goto free;
6512 }
6513
6514 mbout = (struct mcx_cmd_query_rq_mb_out *)
6515 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6516 memcpy(rq_ctx, &mbout->cmd_ctx, sizeof(*rq_ctx));
6517
6518 free:
6519 mcx_cq_mboxes_free(sc, &mxm);
6520 return (error);
6521 }
6522
6523 int
6524 mcx_query_sq(struct mcx_softc *sc, struct mcx_tx *tx, struct mcx_sq_ctx *sq_ctx)
6525 {
6526 struct mcx_dmamem mxm;
6527 struct mcx_cmdq_entry *cqe;
6528 struct mcx_cmd_query_sq_in *in;
6529 struct mcx_cmd_query_sq_out *out;
6530 struct mcx_cmd_query_sq_mb_out *mbout;
6531 uint8_t token = mcx_cmdq_token(sc);
6532 int error;
6533
6534 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6535 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6536 token);
6537
6538 in = mcx_cmdq_in(cqe);
6539 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
6540 in->cmd_op_mod = htobe16(0);
6541 in->cmd_sqn = htobe32(tx->tx_sqn);
6542
6543 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6544 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6545 &cqe->cq_output_ptr, token) != 0) {
6546 printf("%s: unable to allocate query sq mailboxes\n", DEVNAME(sc));
6547 return (-1);
6548 }
6549
6550 mcx_cmdq_mboxes_sign(&mxm, 1);
6551
6552 mcx_cmdq_post(sc, cqe, 0);
6553 error = mcx_cmdq_poll(sc, cqe, 1000);
6554 if (error != 0) {
6555 printf("%s: query sq timeout\n", DEVNAME(sc));
6556 goto free;
6557 }
6558 error = mcx_cmdq_verify(cqe);
6559 if (error != 0) {
6560 printf("%s: query sq reply corrupt\n", DEVNAME(sc));
6561 goto free;
6562 }
6563
6564 out = mcx_cmdq_out(cqe);
6565 switch (out->cmd_status) {
6566 case MCX_CQ_STATUS_OK:
6567 break;
6568 default:
6569 printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
6570 out->cmd_status, be32toh(out->cmd_syndrome));
6571 error = -1;
6572 goto free;
6573 }
6574
6575 mbout = (struct mcx_cmd_query_sq_mb_out *)
6576 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6577 memcpy(sq_ctx, &mbout->cmd_ctx, sizeof(*sq_ctx));
6578
6579 free:
6580 mcx_cq_mboxes_free(sc, &mxm);
6581 return (error);
6582 }
6583
6584 int
6585 mcx_query_cq(struct mcx_softc *sc, struct mcx_cq *cq, struct mcx_cq_ctx *cq_ctx)
6586 {
6587 struct mcx_dmamem mxm;
6588 struct mcx_cmdq_entry *cqe;
6589 struct mcx_cmd_query_cq_in *in;
6590 struct mcx_cmd_query_cq_out *out;
6591 struct mcx_cq_ctx *ctx;
6592 uint8_t token = mcx_cmdq_token(sc);
6593 int error;
6594
6595 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6596 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6597 token);
6598
6599 in = mcx_cmdq_in(cqe);
6600 in->cmd_opcode = htobe16(MCX_CMD_QUERY_CQ);
6601 in->cmd_op_mod = htobe16(0);
6602 in->cmd_cqn = htobe32(cq->cq_n);
6603
6604 CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6605 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6606 &cqe->cq_output_ptr, token) != 0) {
6607 printf("%s: unable to allocate query cq mailboxes\n",
6608 DEVNAME(sc));
6609 return (-1);
6610 }
6611
6612 mcx_cmdq_mboxes_sign(&mxm, 1);
6613
6614 mcx_cmdq_post(sc, cqe, 0);
6615 error = mcx_cmdq_poll(sc, cqe, 1000);
6616 if (error != 0) {
6617 printf("%s: query cq timeout\n", DEVNAME(sc));
6618 goto free;
6619 }
6620 if (mcx_cmdq_verify(cqe) != 0) {
6621 printf("%s: query cq reply corrupt\n", DEVNAME(sc));
6622 goto free;
6623 }
6624
6625 out = mcx_cmdq_out(cqe);
6626 switch (out->cmd_status) {
6627 case MCX_CQ_STATUS_OK:
6628 break;
6629 default:
6630 printf("%s: query qc failed (%x/%x)\n", DEVNAME(sc),
6631 out->cmd_status, be32toh(out->cmd_syndrome));
6632 error = -1;
6633 goto free;
6634 }
6635
6636 ctx = (struct mcx_cq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6637 memcpy(cq_ctx, ctx, sizeof(*cq_ctx));
6638 free:
6639 mcx_dmamem_free(sc, &mxm);
6640 return (error);
6641 }
6642
6643 int
6644 mcx_query_eq(struct mcx_softc *sc, struct mcx_eq *eq, struct mcx_eq_ctx *eq_ctx)
6645 {
6646 struct mcx_dmamem mxm;
6647 struct mcx_cmdq_entry *cqe;
6648 struct mcx_cmd_query_eq_in *in;
6649 struct mcx_cmd_query_eq_out *out;
6650 struct mcq_eq_ctx *ctx;
6651 uint8_t token = mcx_cmdq_token(sc);
6652 int error;
6653
6654 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6655 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6656 token);
6657
6658 in = mcx_cmdq_in(cqe);
6659 in->cmd_opcode = htobe16(MCX_CMD_QUERY_EQ);
6660 in->cmd_op_mod = htobe16(0);
6661 in->cmd_eqn = htobe32(eq->eq_n);
6662
6663 CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6664 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6665 &cqe->cq_output_ptr, token) != 0) {
6666 printf("%s: unable to allocate query eq mailboxes\n",
6667 DEVNAME(sc));
6668 return (-1);
6669 }
6670
6671 mcx_cmdq_mboxes_sign(&mxm, 1);
6672
6673 mcx_cmdq_post(sc, cqe, 0);
6674 error = mcx_cmdq_poll(sc, cqe, 1000);
6675 if (error != 0) {
6676 printf("%s: query eq timeout\n", DEVNAME(sc));
6677 goto free;
6678 }
6679 if (mcx_cmdq_verify(cqe) != 0) {
6680 printf("%s: query eq reply corrupt\n", DEVNAME(sc));
6681 goto free;
6682 }
6683
6684 out = mcx_cmdq_out(cqe);
6685 switch (out->cmd_status) {
6686 case MCX_CQ_STATUS_OK:
6687 break;
6688 default:
6689 printf("%s: query eq failed (%x/%x)\n", DEVNAME(sc),
6690 out->cmd_status, be32toh(out->cmd_syndrome));
6691 error = -1;
6692 goto free;
6693 }
6694
6695 ctx = (struct mcx_eq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6696 memcpy(eq_ctx, ctx, sizeof(*eq_ctx));
6697 free:
6698 mcx_dmamem_free(sc, &mxm);
6699 return (error);
6700 }
6701
6702 #endif /* NKSTAT > 0 */
6703
6704
6705 static inline unsigned int
6706 mcx_rx_fill_slots(struct mcx_softc *sc, struct mcx_rx *rx, uint nslots)
6707 {
6708 struct mcx_rq_entry *ring, *rqe;
6709 struct mcx_slot *ms;
6710 struct mbuf *m;
6711 uint slot, p, fills;
6712
6713 ring = MCX_DMA_KVA(&rx->rx_rq_mem);
6714 p = rx->rx_prod;
6715
6716 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6717 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
6718
6719 slot = (p % (1 << MCX_LOG_RQ_SIZE));
6720 rqe = ring;
6721 for (fills = 0; fills < nslots; fills++) {
6722 slot = p % (1 << MCX_LOG_RQ_SIZE);
6723
6724 ms = &rx->rx_slots[slot];
6725 rqe = &ring[slot];
6726
6727 m = NULL;
6728 MGETHDR(m, M_DONTWAIT, MT_DATA);
6729 if (m == NULL)
6730 break;
6731
6732 MCLGET(m, M_DONTWAIT);
6733 if ((m->m_flags & M_EXT) == 0) {
6734 m_freem(m);
6735 break;
6736 }
6737
6738 m->m_len = m->m_pkthdr.len = sc->sc_hardmtu;
6739 m_adj(m, m->m_ext.ext_size - sc->sc_rxbufsz);
6740 m_adj(m, ETHER_ALIGN);
6741
6742 if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6743 BUS_DMA_NOWAIT) != 0) {
6744 m_freem(m);
6745 break;
6746 }
6747 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
6748 ms->ms_m = m;
6749
6750 be32enc(&rqe->rqe_byte_count, ms->ms_map->dm_segs[0].ds_len);
6751 be64enc(&rqe->rqe_addr, ms->ms_map->dm_segs[0].ds_addr);
6752 be32enc(&rqe->rqe_lkey, sc->sc_lkey);
6753
6754 p++;
6755 }
6756
6757 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6758 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
6759
6760 rx->rx_prod = p;
6761
6762 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6763 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
6764 be32enc(MCX_DMA_OFF(&sc->sc_doorbell_mem, rx->rx_doorbell),
6765 p & MCX_WQ_DOORBELL_MASK);
6766 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6767 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
6768
6769 return (nslots - fills);
6770 }
6771
6772 static int
6773 mcx_rx_fill(struct mcx_softc *sc, struct mcx_rx *rx)
6774 {
6775 u_int slots;
6776
6777 slots = mcx_rxr_get(&rx->rx_rxr, (1 << MCX_LOG_RQ_SIZE));
6778 if (slots == 0)
6779 return (1);
6780
6781 slots = mcx_rx_fill_slots(sc, rx, slots);
6782 mcx_rxr_put(&rx->rx_rxr, slots);
6783 return (0);
6784 }
6785
6786 void
6787 mcx_refill(void *xrx)
6788 {
6789 struct mcx_rx *rx = xrx;
6790 struct mcx_softc *sc = rx->rx_softc;
6791
6792 mcx_rx_fill(sc, rx);
6793
6794 if (mcx_rxr_inuse(&rx->rx_rxr) == 0)
6795 callout_schedule(&rx->rx_refill, 1);
6796 }
6797
6798 static int
6799 mcx_process_txeof(struct mcx_softc *sc, struct mcx_tx *tx,
6800 struct mcx_cq_entry *cqe)
6801 {
6802 struct mcx_slot *ms;
6803 bus_dmamap_t map;
6804 int slot, slots;
6805
6806 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
6807
6808 ms = &tx->tx_slots[slot];
6809 map = ms->ms_map;
6810 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6811 BUS_DMASYNC_POSTWRITE);
6812
6813 slots = 1;
6814 if (map->dm_nsegs > 1)
6815 slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
6816
6817 bus_dmamap_unload(sc->sc_dmat, map);
6818 m_freem(ms->ms_m);
6819 ms->ms_m = NULL;
6820
6821 return (slots);
6822 }
6823
6824 static uint64_t
6825 mcx_uptime(void)
6826 {
6827 struct timespec ts;
6828
6829 nanouptime(&ts);
6830
6831 return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
6832 }
6833
6834 static void
6835 mcx_calibrate_first(struct mcx_softc *sc)
6836 {
6837 struct mcx_calibration *c = &sc->sc_calibration[0];
6838 int s;
6839
6840 sc->sc_calibration_gen = 0;
6841
6842 s = splhigh(); /* crit_enter? */
6843 c->c_ubase = mcx_uptime();
6844 c->c_tbase = mcx_timer(sc);
6845 splx(s);
6846 c->c_ratio = 0;
6847
6848 #if notyet
6849 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz);
6850 #endif
6851 }
6852
6853 #define MCX_TIMESTAMP_SHIFT 24
6854
6855 static void
6856 mcx_calibrate(void *arg)
6857 {
6858 struct mcx_softc *sc = arg;
6859 struct mcx_calibration *nc, *pc;
6860 uint64_t udiff, tdiff;
6861 unsigned int gen;
6862 int s;
6863
6864 if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING))
6865 return;
6866
6867 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz);
6868
6869 gen = sc->sc_calibration_gen;
6870 pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
6871 gen++;
6872 nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
6873
6874 nc->c_uptime = pc->c_ubase;
6875 nc->c_timestamp = pc->c_tbase;
6876
6877 s = splhigh(); /* crit_enter? */
6878 nc->c_ubase = mcx_uptime();
6879 nc->c_tbase = mcx_timer(sc);
6880 splx(s);
6881
6882 udiff = nc->c_ubase - nc->c_uptime;
6883 tdiff = nc->c_tbase - nc->c_timestamp;
6884
6885 /*
6886 * udiff is the wall clock time between calibration ticks,
6887 * which should be 32 seconds or 32 billion nanoseconds. if
6888 * we squint, 1 billion nanoseconds is kind of like a 32 bit
6889 * number, so 32 billion should still have a lot of high bits
6890 * spare. we use this space by shifting the nanoseconds up
6891 * 24 bits so we have a nice big number to divide by the
6892 * number of mcx timer ticks.
6893 */
6894 nc->c_ratio = (udiff << MCX_TIMESTAMP_SHIFT) / tdiff;
6895
6896 membar_producer();
6897 sc->sc_calibration_gen = gen;
6898 }
6899
6900 static int
6901 mcx_process_rx(struct mcx_softc *sc, struct mcx_rx *rx,
6902 struct mcx_cq_entry *cqe, struct mcx_mbufq *mq,
6903 const struct mcx_calibration *c)
6904 {
6905 struct ifnet *ifp = &sc->sc_ec.ec_if;
6906 struct mcx_slot *ms;
6907 struct mbuf *m;
6908 uint32_t flags;
6909 int slot;
6910
6911 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
6912
6913 ms = &rx->rx_slots[slot];
6914 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize,
6915 BUS_DMASYNC_POSTREAD);
6916 bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
6917
6918 m = ms->ms_m;
6919 ms->ms_m = NULL;
6920
6921 m_set_rcvif(m, &sc->sc_ec.ec_if);
6922 m->m_pkthdr.len = m->m_len = be32dec(&cqe->cq_byte_cnt);
6923
6924 #if 0
6925 if (cqe->cq_rx_hash_type) {
6926 m->m_pkthdr.ph_flowid = be32toh(cqe->cq_rx_hash);
6927 m->m_pkthdr.csum_flags |= M_FLOWID;
6928 }
6929 #endif
6930
6931 flags = be32dec(&cqe->cq_flags);
6932 if (flags & MCX_CQ_ENTRY_FLAGS_L3_OK) {
6933 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6934 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
6935 }
6936 if (flags & MCX_CQ_ENTRY_FLAGS_L4_OK) {
6937 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx)
6938 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
6939 if (ifp->if_capenable & IFCAP_CSUM_TCPv6_Rx)
6940 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6;
6941 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx)
6942 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
6943 if (ifp->if_capenable & IFCAP_CSUM_UDPv6_Rx)
6944 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6;
6945 }
6946 if (flags & MCX_CQ_ENTRY_FLAGS_CV) {
6947 vlan_set_tag(m, flags & MCX_CQ_ENTRY_FLAGS_VLAN_MASK);
6948 }
6949
6950 #if notyet
6951 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_LINK0) && c->c_ratio) {
6952 uint64_t t = be64dec(&cqe->cq_timestamp);
6953 t -= c->c_timestamp;
6954 t *= c->c_ratio;
6955 t >>= MCX_TIMESTAMP_SHIFT;
6956 t += c->c_uptime;
6957
6958 m->m_pkthdr.ph_timestamp = t;
6959 SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
6960 }
6961 #endif
6962
6963 MBUFQ_ENQUEUE(mq, m);
6964
6965 return (1);
6966 }
6967
6968 static struct mcx_cq_entry *
6969 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
6970 {
6971 struct mcx_cq_entry *cqe;
6972 int next;
6973
6974 cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
6975 next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
6976
6977 if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
6978 ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
6979 return (&cqe[next]);
6980 }
6981
6982 return (NULL);
6983 }
6984
6985 static void
6986 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar)
6987 {
6988 struct mcx_cq_doorbell *db;
6989 bus_size_t offset;
6990 uint32_t val;
6991 uint64_t uval;
6992
6993 val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
6994 val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
6995
6996 db = MCX_DMA_OFF(&sc->sc_doorbell_mem, cq->cq_doorbell);
6997
6998 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6999 cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_POSTWRITE);
7000
7001 be32enc(&db->db_update_ci, cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
7002 be32enc(&db->db_arm_ci, val);
7003
7004 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7005 cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_PREWRITE);
7006
7007 offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_CQ_DOORBELL;
7008
7009 uval = (uint64_t)val << 32;
7010 uval |= cq->cq_n;
7011
7012 bus_space_write_8(sc->sc_memt, sc->sc_memh, offset, htobe64(uval));
7013 mcx_bar(sc, offset, sizeof(uval), BUS_SPACE_BARRIER_WRITE);
7014 }
7015
7016 void
7017 mcx_process_cq(struct mcx_softc *sc, struct mcx_queues *q, struct mcx_cq *cq)
7018 {
7019 struct mcx_rx *rx = &q->q_rx;
7020 struct mcx_tx *tx = &q->q_tx;
7021 struct ifnet *ifp = &sc->sc_ec.ec_if;
7022 const struct mcx_calibration *c;
7023 unsigned int gen;
7024 struct mcx_cq_entry *cqe;
7025 struct mcx_mbufq mq;
7026 struct mbuf *m;
7027 int rxfree, txfree;
7028
7029 MBUFQ_INIT(&mq);
7030
7031 gen = sc->sc_calibration_gen;
7032 membar_consumer();
7033 c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
7034
7035 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
7036 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
7037
7038 rxfree = 0;
7039 txfree = 0;
7040 while ((cqe = mcx_next_cq_entry(sc, cq))) {
7041 uint8_t opcode;
7042 opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
7043 switch (opcode) {
7044 case MCX_CQ_ENTRY_OPCODE_REQ:
7045 txfree += mcx_process_txeof(sc, tx, cqe);
7046 break;
7047 case MCX_CQ_ENTRY_OPCODE_SEND:
7048 rxfree += mcx_process_rx(sc, rx, cqe, &mq, c);
7049 break;
7050 case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
7051 case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
7052 /* uint8_t *cqp = (uint8_t *)cqe; */
7053 /* printf("%s: cq completion error: %x\n",
7054 DEVNAME(sc), cqp[0x37]); */
7055 break;
7056
7057 default:
7058 /* printf("%s: cq completion opcode %x??\n",
7059 DEVNAME(sc), opcode); */
7060 break;
7061 }
7062
7063 cq->cq_cons++;
7064 }
7065
7066 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
7067 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
7068
7069 cq->cq_count++;
7070 mcx_arm_cq(sc, cq, q->q_uar);
7071
7072 if (rxfree > 0) {
7073 mcx_rxr_put(&rx->rx_rxr, rxfree);
7074 while (MBUFQ_FIRST(&mq) != NULL) {
7075 MBUFQ_DEQUEUE(&mq, m);
7076 if_percpuq_enqueue(ifp->if_percpuq, m);
7077 }
7078
7079 mcx_rx_fill(sc, rx);
7080 if (mcx_rxr_inuse(&rx->rx_rxr) == 0)
7081 callout_schedule(&rx->rx_refill, 1);
7082 }
7083 if (txfree > 0) {
7084 tx->tx_cons += txfree;
7085 if_schedule_deferred_start(ifp);
7086 }
7087 }
7088
7089
7090 static void
7091 mcx_arm_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar)
7092 {
7093 bus_size_t offset;
7094 uint32_t val;
7095
7096 offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_EQ_DOORBELL_ARM;
7097 val = (eq->eq_n << 24) | (eq->eq_cons & 0xffffff);
7098
7099 mcx_wr(sc, offset, val);
7100 mcx_bar(sc, offset, sizeof(val), BUS_SPACE_BARRIER_WRITE);
7101 }
7102
7103 static struct mcx_eq_entry *
7104 mcx_next_eq_entry(struct mcx_softc *sc, struct mcx_eq *eq)
7105 {
7106 struct mcx_eq_entry *eqe;
7107 int next;
7108
7109 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
7110 next = eq->eq_cons % (1 << MCX_LOG_EQ_SIZE);
7111 if ((eqe[next].eq_owner & 1) ==
7112 ((eq->eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
7113 eq->eq_cons++;
7114 return (&eqe[next]);
7115 }
7116 return (NULL);
7117 }
7118
7119 int
7120 mcx_admin_intr(void *xsc)
7121 {
7122 struct mcx_softc *sc = (struct mcx_softc *)xsc;
7123 struct mcx_eq *eq = &sc->sc_admin_eq;
7124 struct mcx_eq_entry *eqe;
7125
7126 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7127 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7128
7129 while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7130 switch (eqe->eq_event_type) {
7131 case MCX_EVENT_TYPE_LAST_WQE:
7132 /* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
7133 break;
7134
7135 case MCX_EVENT_TYPE_CQ_ERROR:
7136 /* printf("%s: cq error\n", DEVNAME(sc)); */
7137 break;
7138
7139 case MCX_EVENT_TYPE_CMD_COMPLETION:
7140 /* wakeup probably */
7141 break;
7142
7143 case MCX_EVENT_TYPE_PORT_CHANGE:
7144 workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL);
7145 break;
7146
7147 default:
7148 /* printf("%s: something happened\n", DEVNAME(sc)); */
7149 break;
7150 }
7151 }
7152
7153 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7154 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7155
7156 mcx_arm_eq(sc, eq, sc->sc_uar);
7157
7158 return (1);
7159 }
7160
7161 int
7162 mcx_cq_intr(void *xq)
7163 {
7164 struct mcx_queues *q = (struct mcx_queues *)xq;
7165 struct mcx_softc *sc = q->q_sc;
7166 struct mcx_eq *eq = &q->q_eq;
7167 struct mcx_eq_entry *eqe;
7168 int cqn;
7169
7170 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7171 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7172
7173 while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7174 switch (eqe->eq_event_type) {
7175 case MCX_EVENT_TYPE_COMPLETION:
7176 cqn = be32toh(eqe->eq_event_data[6]);
7177 if (cqn == q->q_cq.cq_n)
7178 mcx_process_cq(sc, q, &q->q_cq);
7179 break;
7180 }
7181 }
7182
7183 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7184 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7185
7186 mcx_arm_eq(sc, eq, q->q_uar);
7187
7188 return (1);
7189 }
7190
7191 static void
7192 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
7193 int total)
7194 {
7195 struct mcx_slot *ms;
7196
7197 int i = allocated;
7198 while (i-- > 0) {
7199 ms = &slots[i];
7200 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
7201 if (ms->ms_m != NULL)
7202 m_freem(ms->ms_m);
7203 }
7204 kmem_free(slots, total * sizeof(*ms));
7205 }
7206
7207 static int
7208 mcx_queue_up(struct mcx_softc *sc, struct mcx_queues *q)
7209 {
7210 struct mcx_rx *rx;
7211 struct mcx_tx *tx;
7212 struct mcx_slot *ms;
7213 int i;
7214
7215 rx = &q->q_rx;
7216 rx->rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE),
7217 KM_SLEEP);
7218
7219 for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
7220 ms = &rx->rx_slots[i];
7221 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
7222 sc->sc_hardmtu, 0,
7223 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
7224 &ms->ms_map) != 0) {
7225 printf("%s: failed to allocate rx dma maps\n",
7226 DEVNAME(sc));
7227 goto destroy_rx_slots;
7228 }
7229 }
7230
7231 tx = &q->q_tx;
7232 tx->tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE),
7233 KM_SLEEP);
7234
7235 for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
7236 ms = &tx->tx_slots[i];
7237 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
7238 MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
7239 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
7240 &ms->ms_map) != 0) {
7241 printf("%s: failed to allocate tx dma maps\n",
7242 DEVNAME(sc));
7243 goto destroy_tx_slots;
7244 }
7245 }
7246
7247 if (mcx_create_cq(sc, &q->q_cq, q->q_uar, q->q_index,
7248 q->q_eq.eq_n) != 0)
7249 goto destroy_tx_slots;
7250
7251 if (mcx_create_sq(sc, tx, q->q_uar, q->q_index, q->q_cq.cq_n)
7252 != 0)
7253 goto destroy_cq;
7254
7255 if (mcx_create_rq(sc, rx, q->q_index, q->q_cq.cq_n) != 0)
7256 goto destroy_sq;
7257
7258 return 0;
7259
7260 destroy_sq:
7261 mcx_destroy_sq(sc, tx);
7262 destroy_cq:
7263 mcx_destroy_cq(sc, &q->q_cq);
7264 destroy_tx_slots:
7265 mcx_free_slots(sc, tx->tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
7266 tx->tx_slots = NULL;
7267
7268 i = (1 << MCX_LOG_RQ_SIZE);
7269 destroy_rx_slots:
7270 mcx_free_slots(sc, rx->rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
7271 rx->rx_slots = NULL;
7272 return ENOMEM;
7273 }
7274
7275 static int
7276 mcx_rss_group_entry_count(struct mcx_softc *sc, int group)
7277 {
7278 int i;
7279 int count;
7280
7281 count = 0;
7282 for (i = 0; i < __arraycount(mcx_rss_config); i++) {
7283 if (mcx_rss_config[i].flow_group == group)
7284 count++;
7285 }
7286
7287 return count;
7288 }
7289
7290 static int
7291 mcx_init(struct ifnet *ifp)
7292 {
7293 struct mcx_softc *sc = ifp->if_softc;
7294 struct mcx_rx *rx;
7295 struct mcx_tx *tx;
7296 int i, start, count, flow_group, flow_index;
7297 struct mcx_flow_match match_crit;
7298 struct mcx_rss_rule *rss;
7299 uint32_t dest;
7300 int rqns[MCX_MAX_QUEUES] = { 0 };
7301
7302 if (ISSET(ifp->if_flags, IFF_RUNNING))
7303 mcx_stop(ifp, 0);
7304
7305 if (mcx_create_tis(sc, &sc->sc_tis) != 0)
7306 goto down;
7307
7308 for (i = 0; i < sc->sc_nqueues; i++) {
7309 if (mcx_queue_up(sc, &sc->sc_queues[i]) != 0) {
7310 goto down;
7311 }
7312 }
7313
7314 /* RSS flow table and flow groups */
7315 if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 1,
7316 &sc->sc_rss_flow_table_id) != 0)
7317 goto down;
7318
7319 dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7320 sc->sc_rss_flow_table_id;
7321
7322 /* L4 RSS flow group (v4/v6 tcp/udp, no fragments) */
7323 memset(&match_crit, 0, sizeof(match_crit));
7324 match_crit.mc_ethertype = 0xffff;
7325 match_crit.mc_ip_proto = 0xff;
7326 match_crit.mc_vlan_flags = MCX_FLOW_MATCH_IP_FRAG;
7327 start = 0;
7328 count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L4);
7329 if (count != 0) {
7330 if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7331 MCX_FLOW_GROUP_RSS_L4, start, count,
7332 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7333 goto down;
7334 start += count;
7335 }
7336
7337 /* L3 RSS flow group (v4/v6, including fragments) */
7338 memset(&match_crit, 0, sizeof(match_crit));
7339 match_crit.mc_ethertype = 0xffff;
7340 count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L3);
7341 if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7342 MCX_FLOW_GROUP_RSS_L3, start, count,
7343 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7344 goto down;
7345 start += count;
7346
7347 /* non-RSS flow group */
7348 count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_NONE);
7349 memset(&match_crit, 0, sizeof(match_crit));
7350 if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7351 MCX_FLOW_GROUP_RSS_NONE, start, count, 0, &match_crit) != 0)
7352 goto down;
7353
7354 /* Root flow table, matching packets based on mac address */
7355 if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 0,
7356 &sc->sc_mac_flow_table_id) != 0)
7357 goto down;
7358
7359 /* promisc flow group */
7360 start = 0;
7361 memset(&match_crit, 0, sizeof(match_crit));
7362 if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7363 MCX_FLOW_GROUP_PROMISC, start, 1, 0, &match_crit) != 0)
7364 goto down;
7365 sc->sc_promisc_flow_enabled = 0;
7366 start++;
7367
7368 /* all multicast flow group */
7369 match_crit.mc_dest_mac[0] = 0x01;
7370 if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7371 MCX_FLOW_GROUP_ALLMULTI, start, 1,
7372 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7373 goto down;
7374 sc->sc_allmulti_flow_enabled = 0;
7375 start++;
7376
7377 /* mac address matching flow group */
7378 memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
7379 if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7380 MCX_FLOW_GROUP_MAC, start, (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
7381 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7382 goto down;
7383
7384 /* flow table entries for unicast and broadcast */
7385 start = 0;
7386 if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7387 LLADDR(satosdl(ifp->if_dl->ifa_addr)), dest) != 0)
7388 goto down;
7389 start++;
7390
7391 if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7392 etherbroadcastaddr, dest) != 0)
7393 goto down;
7394 start++;
7395
7396 /* multicast entries go after that */
7397 sc->sc_mcast_flow_base = start;
7398
7399 /* re-add any existing multicast flows */
7400 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7401 if (sc->sc_mcast_flows[i][0] != 0) {
7402 mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC,
7403 sc->sc_mcast_flow_base + i,
7404 sc->sc_mcast_flows[i], dest);
7405 }
7406 }
7407
7408 if (mcx_set_flow_table_root(sc, sc->sc_mac_flow_table_id) != 0)
7409 goto down;
7410
7411 /*
7412 * the RQT can be any size as long as it's a power of two.
7413 * since we also restrict the number of queues to a power of two,
7414 * we can just put each rx queue in once.
7415 */
7416 for (i = 0; i < sc->sc_nqueues; i++)
7417 rqns[i] = sc->sc_queues[i].q_rx.rx_rqn;
7418
7419 if (mcx_create_rqt(sc, sc->sc_nqueues, rqns, &sc->sc_rqt) != 0)
7420 goto down;
7421
7422 start = 0;
7423 flow_index = 0;
7424 flow_group = -1;
7425 for (i = 0; i < __arraycount(mcx_rss_config); i++) {
7426 rss = &mcx_rss_config[i];
7427 if (rss->flow_group != flow_group) {
7428 flow_group = rss->flow_group;
7429 flow_index = 0;
7430 }
7431
7432 if (rss->hash_sel == 0) {
7433 if (mcx_create_tir_direct(sc, &sc->sc_queues[0].q_rx,
7434 &sc->sc_tir[i]) != 0)
7435 goto down;
7436 } else {
7437 if (mcx_create_tir_indirect(sc, sc->sc_rqt,
7438 rss->hash_sel, &sc->sc_tir[i]) != 0)
7439 goto down;
7440 }
7441
7442 if (mcx_set_flow_table_entry_proto(sc, flow_group,
7443 flow_index, rss->ethertype, rss->ip_proto,
7444 MCX_FLOW_CONTEXT_DEST_TYPE_TIR | sc->sc_tir[i]) != 0)
7445 goto down;
7446 flow_index++;
7447 }
7448
7449 for (i = 0; i < sc->sc_nqueues; i++) {
7450 struct mcx_queues *q = &sc->sc_queues[i];
7451 rx = &q->q_rx;
7452 tx = &q->q_tx;
7453
7454 /* start the queues */
7455 if (mcx_ready_sq(sc, tx) != 0)
7456 goto down;
7457
7458 if (mcx_ready_rq(sc, rx) != 0)
7459 goto down;
7460
7461 mcx_rxr_init(&rx->rx_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
7462 rx->rx_prod = 0;
7463 mcx_rx_fill(sc, rx);
7464
7465 tx->tx_cons = 0;
7466 tx->tx_prod = 0;
7467 }
7468
7469 mcx_calibrate_first(sc);
7470
7471 SET(ifp->if_flags, IFF_RUNNING);
7472 CLR(ifp->if_flags, IFF_OACTIVE);
7473 if_schedule_deferred_start(ifp);
7474
7475 return 0;
7476 down:
7477 mcx_stop(ifp, 0);
7478 return EIO;
7479 }
7480
7481 static void
7482 mcx_stop(struct ifnet *ifp, int disable)
7483 {
7484 struct mcx_softc *sc = ifp->if_softc;
7485 struct mcx_rss_rule *rss;
7486 int group, i, flow_group, flow_index;
7487
7488 CLR(ifp->if_flags, IFF_RUNNING);
7489
7490 /*
7491 * delete flow table entries first, so no packets can arrive
7492 * after the barriers
7493 */
7494 if (sc->sc_promisc_flow_enabled)
7495 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
7496 if (sc->sc_allmulti_flow_enabled)
7497 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
7498 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
7499 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
7500 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7501 if (sc->sc_mcast_flows[i][0] != 0) {
7502 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
7503 sc->sc_mcast_flow_base + i);
7504 }
7505 }
7506
7507 flow_group = -1;
7508 flow_index = 0;
7509 for (i = 0; i < __arraycount(mcx_rss_config); i++) {
7510 rss = &mcx_rss_config[i];
7511 if (rss->flow_group != flow_group) {
7512 flow_group = rss->flow_group;
7513 flow_index = 0;
7514 }
7515
7516 mcx_delete_flow_table_entry(sc, flow_group, flow_index);
7517
7518 mcx_destroy_tir(sc, sc->sc_tir[i]);
7519 sc->sc_tir[i] = 0;
7520
7521 flow_index++;
7522 }
7523
7524 for (i = 0; i < sc->sc_nqueues; i++) {
7525 callout_halt(&sc->sc_queues[i].q_rx.rx_refill, NULL);
7526 }
7527
7528 callout_halt(&sc->sc_calibrate, NULL);
7529
7530 for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
7531 if (sc->sc_flow_group[group].g_id != -1)
7532 mcx_destroy_flow_group(sc, group);
7533 }
7534
7535 if (sc->sc_mac_flow_table_id != -1) {
7536 mcx_destroy_flow_table(sc, sc->sc_mac_flow_table_id);
7537 sc->sc_mac_flow_table_id = -1;
7538 }
7539 if (sc->sc_rss_flow_table_id != -1) {
7540 mcx_destroy_flow_table(sc, sc->sc_rss_flow_table_id);
7541 sc->sc_rss_flow_table_id = -1;
7542 }
7543 if (sc->sc_rqt != -1) {
7544 mcx_destroy_rqt(sc, sc->sc_rqt);
7545 sc->sc_rqt = -1;
7546 }
7547
7548 for (i = 0; i < sc->sc_nqueues; i++) {
7549 struct mcx_queues *q = &sc->sc_queues[i];
7550 struct mcx_rx *rx = &q->q_rx;
7551 struct mcx_tx *tx = &q->q_tx;
7552 struct mcx_cq *cq = &q->q_cq;
7553
7554 if (rx->rx_rqn != 0)
7555 mcx_destroy_rq(sc, rx);
7556
7557 if (tx->tx_sqn != 0)
7558 mcx_destroy_sq(sc, tx);
7559
7560 if (tx->tx_slots != NULL) {
7561 mcx_free_slots(sc, tx->tx_slots,
7562 (1 << MCX_LOG_SQ_SIZE), (1 << MCX_LOG_SQ_SIZE));
7563 tx->tx_slots = NULL;
7564 }
7565 if (rx->rx_slots != NULL) {
7566 mcx_free_slots(sc, rx->rx_slots,
7567 (1 << MCX_LOG_RQ_SIZE), (1 << MCX_LOG_RQ_SIZE));
7568 rx->rx_slots = NULL;
7569 }
7570
7571 if (cq->cq_n != 0)
7572 mcx_destroy_cq(sc, cq);
7573 }
7574 if (sc->sc_tis != 0) {
7575 mcx_destroy_tis(sc, sc->sc_tis);
7576 sc->sc_tis = 0;
7577 }
7578 }
7579
7580 static int
7581 mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
7582 {
7583 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7584 struct ifreq *ifr = (struct ifreq *)data;
7585 struct ethercom *ec = &sc->sc_ec;
7586 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
7587 struct ether_multi *enm;
7588 struct ether_multistep step;
7589 int s, i, flags, error = 0;
7590 uint32_t dest;
7591
7592 s = splnet();
7593 switch (cmd) {
7594
7595 case SIOCADDMULTI:
7596 if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
7597 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7598 if (error != 0) {
7599 splx(s);
7600 return (error);
7601 }
7602
7603 dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7604 sc->sc_rss_flow_table_id;
7605
7606 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7607 if (sc->sc_mcast_flows[i][0] == 0) {
7608 memcpy(sc->sc_mcast_flows[i], addrlo,
7609 ETHER_ADDR_LEN);
7610 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7611 mcx_set_flow_table_entry_mac(sc,
7612 MCX_FLOW_GROUP_MAC,
7613 sc->sc_mcast_flow_base + i,
7614 sc->sc_mcast_flows[i], dest);
7615 }
7616 break;
7617 }
7618 }
7619
7620 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
7621 if (i == MCX_NUM_MCAST_FLOWS) {
7622 SET(ifp->if_flags, IFF_ALLMULTI);
7623 sc->sc_extra_mcast++;
7624 error = ENETRESET;
7625 }
7626
7627 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN)) {
7628 SET(ifp->if_flags, IFF_ALLMULTI);
7629 error = ENETRESET;
7630 }
7631 }
7632 }
7633 break;
7634
7635 case SIOCDELMULTI:
7636 if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
7637 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7638 if (error != 0) {
7639 splx(s);
7640 return (error);
7641 }
7642
7643 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7644 if (memcmp(sc->sc_mcast_flows[i], addrlo,
7645 ETHER_ADDR_LEN) == 0) {
7646 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7647 mcx_delete_flow_table_entry(sc,
7648 MCX_FLOW_GROUP_MAC,
7649 sc->sc_mcast_flow_base + i);
7650 }
7651 sc->sc_mcast_flows[i][0] = 0;
7652 break;
7653 }
7654 }
7655
7656 if (i == MCX_NUM_MCAST_FLOWS)
7657 sc->sc_extra_mcast--;
7658
7659 if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
7660 sc->sc_extra_mcast == 0) {
7661 flags = 0;
7662 ETHER_LOCK(ec);
7663 ETHER_FIRST_MULTI(step, ec, enm);
7664 while (enm != NULL) {
7665 if (memcmp(enm->enm_addrlo,
7666 enm->enm_addrhi, ETHER_ADDR_LEN)) {
7667 SET(flags, IFF_ALLMULTI);
7668 break;
7669 }
7670 ETHER_NEXT_MULTI(step, enm);
7671 }
7672 ETHER_UNLOCK(ec);
7673 if (!ISSET(flags, IFF_ALLMULTI)) {
7674 CLR(ifp->if_flags, IFF_ALLMULTI);
7675 error = ENETRESET;
7676 }
7677 }
7678 }
7679 break;
7680
7681 default:
7682 error = ether_ioctl(ifp, cmd, data);
7683 }
7684
7685 if (error == ENETRESET) {
7686 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7687 (IFF_UP | IFF_RUNNING))
7688 mcx_iff(sc);
7689 error = 0;
7690 }
7691 splx(s);
7692
7693 return (error);
7694 }
7695
7696 #if 0
7697 static int
7698 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
7699 {
7700 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7701 struct mcx_reg_mcia mcia;
7702 struct mcx_reg_pmlp pmlp;
7703 int offset, error;
7704
7705 /* get module number */
7706 memset(&pmlp, 0, sizeof(pmlp));
7707 pmlp.rp_local_port = 1;
7708 error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
7709 sizeof(pmlp));
7710 if (error != 0) {
7711 printf("%s: unable to get eeprom module number\n",
7712 DEVNAME(sc));
7713 return error;
7714 }
7715
7716 for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
7717 memset(&mcia, 0, sizeof(mcia));
7718 mcia.rm_l = 0;
7719 mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) &
7720 MCX_PMLP_MODULE_NUM_MASK;
7721 mcia.rm_i2c_addr = sff->sff_addr / 2; /* apparently */
7722 mcia.rm_page_num = sff->sff_page;
7723 mcia.rm_dev_addr = htobe16(offset);
7724 mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
7725
7726 error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
7727 &mcia, sizeof(mcia));
7728 if (error != 0) {
7729 printf("%s: unable to read eeprom at %x\n",
7730 DEVNAME(sc), offset);
7731 return error;
7732 }
7733
7734 memcpy(sff->sff_data + offset, mcia.rm_data,
7735 MCX_MCIA_EEPROM_BYTES);
7736 }
7737
7738 return 0;
7739 }
7740 #endif
7741
7742 static int
7743 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
7744 {
7745 switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7746 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
7747 case 0:
7748 break;
7749
7750 case EFBIG:
7751 if (m_defrag(m, M_DONTWAIT) != NULL &&
7752 bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7753 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
7754 break;
7755
7756 /* FALLTHROUGH */
7757 default:
7758 return (1);
7759 }
7760
7761 ms->ms_m = m;
7762 return (0);
7763 }
7764
7765 static void
7766 mcx_start(struct ifnet *ifp)
7767 {
7768 struct mcx_softc *sc = ifp->if_softc;
7769 /* mcx_start() always uses TX ring[0] */
7770 struct mcx_tx *tx = &sc->sc_queues[0].q_tx;
7771 struct mcx_sq_entry *sq, *sqe;
7772 struct mcx_sq_entry_seg *sqs;
7773 struct mcx_slot *ms;
7774 bus_dmamap_t map;
7775 struct mbuf *m;
7776 u_int idx, free, used;
7777 uint64_t *bf;
7778 uint32_t csum;
7779 size_t bf_base;
7780 int i, seg, nseg;
7781
7782 bf_base = (tx->tx_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
7783
7784 idx = tx->tx_prod % (1 << MCX_LOG_SQ_SIZE);
7785 free = (tx->tx_cons + (1 << MCX_LOG_SQ_SIZE)) - tx->tx_prod;
7786
7787 used = 0;
7788 bf = NULL;
7789
7790 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7791 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
7792
7793 sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&tx->tx_sq_mem);
7794
7795 for (;;) {
7796 if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
7797 SET(ifp->if_flags, IFF_OACTIVE);
7798 break;
7799 }
7800
7801 IFQ_DEQUEUE(&ifp->if_snd, m);
7802 if (m == NULL) {
7803 break;
7804 }
7805
7806 sqe = sq + idx;
7807 ms = &tx->tx_slots[idx];
7808 memset(sqe, 0, sizeof(*sqe));
7809
7810 /* ctrl segment */
7811 sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
7812 ((tx->tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
7813 /* always generate a completion event */
7814 sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
7815
7816 /* eth segment */
7817 csum = 0;
7818 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
7819 csum |= MCX_SQE_L3_CSUM;
7820 if (m->m_pkthdr.csum_flags &
7821 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6))
7822 csum |= MCX_SQE_L4_CSUM;
7823 sqe->sqe_mss_csum = htobe32(csum);
7824 sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
7825 if (vlan_has_tag(m)) {
7826 struct ether_vlan_header *evh;
7827 evh = (struct ether_vlan_header *)
7828 &sqe->sqe_inline_headers;
7829
7830 m_copydata(m, 0, ETHER_HDR_LEN, evh);
7831 evh->evl_proto = evh->evl_encap_proto;
7832 evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
7833 evh->evl_tag = htons(vlan_get_tag(m));
7834 m_adj(m, ETHER_HDR_LEN);
7835 } else {
7836 m_copydata(m, 0, MCX_SQ_INLINE_SIZE,
7837 sqe->sqe_inline_headers);
7838 m_adj(m, MCX_SQ_INLINE_SIZE);
7839 }
7840
7841 if (mcx_load_mbuf(sc, ms, m) != 0) {
7842 m_freem(m);
7843 if_statinc(ifp, if_oerrors);
7844 continue;
7845 }
7846 bf = (uint64_t *)sqe;
7847
7848 if (ifp->if_bpf != NULL)
7849 bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers,
7850 MCX_SQ_INLINE_SIZE, m, BPF_D_OUT);
7851
7852 map = ms->ms_map;
7853 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
7854 BUS_DMASYNC_PREWRITE);
7855
7856 sqe->sqe_ds_sq_num =
7857 htobe32((tx->tx_sqn << MCX_SQE_SQ_NUM_SHIFT) |
7858 (map->dm_nsegs + 3));
7859
7860 /* data segment - first wqe has one segment */
7861 sqs = sqe->sqe_segs;
7862 seg = 0;
7863 nseg = 1;
7864 for (i = 0; i < map->dm_nsegs; i++) {
7865 if (seg == nseg) {
7866 /* next slot */
7867 idx++;
7868 if (idx == (1 << MCX_LOG_SQ_SIZE))
7869 idx = 0;
7870 tx->tx_prod++;
7871 used++;
7872
7873 sqs = (struct mcx_sq_entry_seg *)(sq + idx);
7874 seg = 0;
7875 nseg = MCX_SQ_SEGS_PER_SLOT;
7876 }
7877 sqs[seg].sqs_byte_count =
7878 htobe32(map->dm_segs[i].ds_len);
7879 sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
7880 sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
7881 seg++;
7882 }
7883
7884 idx++;
7885 if (idx == (1 << MCX_LOG_SQ_SIZE))
7886 idx = 0;
7887 tx->tx_prod++;
7888 used++;
7889 }
7890
7891 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7892 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
7893
7894 if (used) {
7895 bus_size_t blueflame;
7896
7897 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7898 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
7899 be32enc(MCX_DMA_OFF(&sc->sc_doorbell_mem, tx->tx_doorbell),
7900 tx->tx_prod & MCX_WQ_DOORBELL_MASK);
7901 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7902 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
7903
7904 /*
7905 * write the first 64 bits of the last sqe we produced
7906 * to the blue flame buffer
7907 */
7908
7909 blueflame = bf_base + tx->tx_bf_offset;
7910 bus_space_write_8(sc->sc_memt, sc->sc_memh,
7911 blueflame, *bf);
7912 mcx_bar(sc, blueflame, sizeof(*bf), BUS_SPACE_BARRIER_WRITE);
7913
7914 /* next write goes to the other buffer */
7915 tx->tx_bf_offset ^= sc->sc_bf_size;
7916 }
7917 }
7918
7919 static void
7920 mcx_watchdog(struct ifnet *ifp)
7921 {
7922 }
7923
7924 static void
7925 mcx_media_add_types(struct mcx_softc *sc)
7926 {
7927 struct mcx_reg_ptys ptys;
7928 int i;
7929 uint32_t proto_cap;
7930
7931 memset(&ptys, 0, sizeof(ptys));
7932 ptys.rp_local_port = 1;
7933 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
7934 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
7935 sizeof(ptys)) != 0) {
7936 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
7937 return;
7938 }
7939
7940 proto_cap = be32toh(ptys.rp_eth_proto_cap);
7941 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
7942 const struct mcx_eth_proto_capability *cap;
7943 if (!ISSET(proto_cap, 1U << i))
7944 continue;
7945
7946 cap = &mcx_eth_cap_map[i];
7947 if (cap->cap_media == 0)
7948 continue;
7949
7950 ifmedia_add(&sc->sc_media, IFM_ETHER | cap->cap_media, 0, NULL);
7951 }
7952 }
7953
7954 static void
7955 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
7956 {
7957 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7958 struct mcx_reg_ptys ptys;
7959 int i;
7960 uint32_t proto_oper;
7961 uint64_t media_oper;
7962
7963 memset(&ptys, 0, sizeof(ptys));
7964 ptys.rp_local_port = 1;
7965 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
7966
7967 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
7968 sizeof(ptys)) != 0) {
7969 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
7970 return;
7971 }
7972
7973 proto_oper = be32toh(ptys.rp_eth_proto_oper);
7974
7975 media_oper = 0;
7976
7977 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
7978 const struct mcx_eth_proto_capability *cap;
7979 if (!ISSET(proto_oper, 1U << i))
7980 continue;
7981
7982 cap = &mcx_eth_cap_map[i];
7983
7984 if (cap->cap_media != 0)
7985 media_oper = cap->cap_media;
7986 }
7987
7988 ifmr->ifm_status = IFM_AVALID;
7989 if (proto_oper != 0) {
7990 ifmr->ifm_status |= IFM_ACTIVE;
7991 ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
7992 /* txpause, rxpause, duplex? */
7993 }
7994 }
7995
7996 static int
7997 mcx_media_change(struct ifnet *ifp)
7998 {
7999 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
8000 struct mcx_reg_ptys ptys;
8001 struct mcx_reg_paos paos;
8002 uint32_t media;
8003 int i, error;
8004
8005 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
8006 return EINVAL;
8007
8008 error = 0;
8009
8010 if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
8011 /* read ptys to get supported media */
8012 memset(&ptys, 0, sizeof(ptys));
8013 ptys.rp_local_port = 1;
8014 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8015 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
8016 &ptys, sizeof(ptys)) != 0) {
8017 printf("%s: unable to read port type/speed\n",
8018 DEVNAME(sc));
8019 return EIO;
8020 }
8021
8022 media = be32toh(ptys.rp_eth_proto_cap);
8023 } else {
8024 /* map media type */
8025 media = 0;
8026 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
8027 const struct mcx_eth_proto_capability *cap;
8028
8029 cap = &mcx_eth_cap_map[i];
8030 if (cap->cap_media ==
8031 IFM_SUBTYPE(sc->sc_media.ifm_media)) {
8032 media = (1 << i);
8033 break;
8034 }
8035 }
8036 }
8037
8038 /* disable the port */
8039 memset(&paos, 0, sizeof(paos));
8040 paos.rp_local_port = 1;
8041 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
8042 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
8043 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
8044 sizeof(paos)) != 0) {
8045 printf("%s: unable to set port state to down\n", DEVNAME(sc));
8046 return EIO;
8047 }
8048
8049 memset(&ptys, 0, sizeof(ptys));
8050 ptys.rp_local_port = 1;
8051 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8052 ptys.rp_eth_proto_admin = htobe32(media);
8053 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
8054 sizeof(ptys)) != 0) {
8055 printf("%s: unable to set port media type/speed\n",
8056 DEVNAME(sc));
8057 error = EIO;
8058 }
8059
8060 /* re-enable the port to start negotiation */
8061 memset(&paos, 0, sizeof(paos));
8062 paos.rp_local_port = 1;
8063 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
8064 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
8065 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
8066 sizeof(paos)) != 0) {
8067 printf("%s: unable to set port state to up\n", DEVNAME(sc));
8068 error = EIO;
8069 }
8070
8071 return error;
8072 }
8073
8074 static void
8075 mcx_port_change(struct work *wk, void *xsc)
8076 {
8077 struct mcx_softc *sc = xsc;
8078 struct ifnet *ifp = &sc->sc_ec.ec_if;
8079 struct mcx_reg_ptys ptys = {
8080 .rp_local_port = 1,
8081 .rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH,
8082 };
8083 int link_state = LINK_STATE_DOWN;
8084
8085 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
8086 sizeof(ptys)) == 0) {
8087 uint32_t proto_oper = be32toh(ptys.rp_eth_proto_oper);
8088 uint64_t baudrate = 0;
8089 unsigned int i;
8090
8091 if (proto_oper != 0)
8092 link_state = LINK_STATE_UP;
8093
8094 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
8095 const struct mcx_eth_proto_capability *cap;
8096 if (!ISSET(proto_oper, 1U << i))
8097 continue;
8098
8099 cap = &mcx_eth_cap_map[i];
8100 if (cap->cap_baudrate == 0)
8101 continue;
8102
8103 baudrate = cap->cap_baudrate;
8104 break;
8105 }
8106
8107 ifp->if_baudrate = baudrate;
8108 }
8109
8110 if (link_state != ifp->if_link_state) {
8111 if_link_state_change(ifp, link_state);
8112 }
8113 }
8114
8115
8116 static inline uint32_t
8117 mcx_rd(struct mcx_softc *sc, bus_size_t r)
8118 {
8119 uint32_t word;
8120
8121 word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
8122
8123 return (be32toh(word));
8124 }
8125
8126 static inline void
8127 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
8128 {
8129 bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
8130 }
8131
8132 static inline void
8133 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
8134 {
8135 bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
8136 }
8137
8138 static uint64_t
8139 mcx_timer(struct mcx_softc *sc)
8140 {
8141 uint32_t hi, lo, ni;
8142
8143 hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8144 for (;;) {
8145 lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
8146 mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
8147 ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8148
8149 if (ni == hi)
8150 break;
8151
8152 hi = ni;
8153 }
8154
8155 return (((uint64_t)hi << 32) | (uint64_t)lo);
8156 }
8157
8158 static int
8159 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
8160 bus_size_t size, u_int align)
8161 {
8162 mxm->mxm_size = size;
8163
8164 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
8165 mxm->mxm_size, 0,
8166 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
8167 &mxm->mxm_map) != 0)
8168 return (1);
8169 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
8170 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
8171 BUS_DMA_WAITOK) != 0)
8172 goto destroy;
8173 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
8174 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
8175 goto free;
8176 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
8177 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
8178 goto unmap;
8179
8180 mcx_dmamem_zero(mxm);
8181
8182 return (0);
8183 unmap:
8184 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8185 free:
8186 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8187 destroy:
8188 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8189 return (1);
8190 }
8191
8192 static void
8193 mcx_dmamem_zero(struct mcx_dmamem *mxm)
8194 {
8195 memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
8196 }
8197
8198 static void
8199 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
8200 {
8201 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
8202 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8203 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8204 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8205 }
8206
8207 static int
8208 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
8209 {
8210 bus_dma_segment_t *segs;
8211 bus_size_t len = pages * MCX_PAGE_SIZE;
8212 size_t seglen;
8213
8214 segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP);
8215 seglen = sizeof(*segs) * pages;
8216
8217 if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
8218 segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
8219 goto free_segs;
8220
8221 if (mhm->mhm_seg_count < pages) {
8222 size_t nseglen;
8223
8224 mhm->mhm_segs = kmem_alloc(
8225 sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP);
8226
8227 nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
8228
8229 memcpy(mhm->mhm_segs, segs, nseglen);
8230
8231 kmem_free(segs, seglen);
8232
8233 segs = mhm->mhm_segs;
8234 seglen = nseglen;
8235 } else
8236 mhm->mhm_segs = segs;
8237
8238 if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
8239 MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
8240 &mhm->mhm_map) != 0)
8241 goto free_dmamem;
8242
8243 if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
8244 mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
8245 goto destroy;
8246
8247 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8248 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
8249
8250 mhm->mhm_npages = pages;
8251
8252 return (0);
8253
8254 destroy:
8255 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8256 free_dmamem:
8257 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8258 free_segs:
8259 kmem_free(segs, seglen);
8260 mhm->mhm_segs = NULL;
8261
8262 return (-1);
8263 }
8264
8265 static void
8266 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
8267 {
8268 if (mhm->mhm_npages == 0)
8269 return;
8270
8271 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8272 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
8273
8274 bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
8275 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8276 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8277 kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
8278
8279 mhm->mhm_npages = 0;
8280 }
8281
8282 #if NKSTAT > 0
8283 struct mcx_ppcnt {
8284 char name[KSTAT_KV_NAMELEN];
8285 enum kstat_kv_unit unit;
8286 };
8287
8288 static const struct mcx_ppcnt mcx_ppcnt_ieee8023_tpl[] = {
8289 { "Good Tx", KSTAT_KV_U_PACKETS, },
8290 { "Good Rx", KSTAT_KV_U_PACKETS, },
8291 { "FCS errs", KSTAT_KV_U_PACKETS, },
8292 { "Alignment Errs", KSTAT_KV_U_PACKETS, },
8293 { "Good Tx", KSTAT_KV_U_BYTES, },
8294 { "Good Rx", KSTAT_KV_U_BYTES, },
8295 { "Multicast Tx", KSTAT_KV_U_PACKETS, },
8296 { "Broadcast Tx", KSTAT_KV_U_PACKETS, },
8297 { "Multicast Rx", KSTAT_KV_U_PACKETS, },
8298 { "Broadcast Rx", KSTAT_KV_U_PACKETS, },
8299 { "In Range Len", KSTAT_KV_U_PACKETS, },
8300 { "Out Of Range Len", KSTAT_KV_U_PACKETS, },
8301 { "Frame Too Long", KSTAT_KV_U_PACKETS, },
8302 { "Symbol Errs", KSTAT_KV_U_PACKETS, },
8303 { "MAC Ctrl Tx", KSTAT_KV_U_PACKETS, },
8304 { "MAC Ctrl Rx", KSTAT_KV_U_PACKETS, },
8305 { "MAC Ctrl Unsup", KSTAT_KV_U_PACKETS, },
8306 { "Pause Rx", KSTAT_KV_U_PACKETS, },
8307 { "Pause Tx", KSTAT_KV_U_PACKETS, },
8308 };
8309 CTASSERT(__arraycount(mcx_ppcnt_ieee8023_tpl) == mcx_ppcnt_ieee8023_count);
8310
8311 static const struct mcx_ppcnt mcx_ppcnt_rfc2863_tpl[] = {
8312 { "Rx Bytes", KSTAT_KV_U_BYTES, },
8313 { "Rx Unicast", KSTAT_KV_U_PACKETS, },
8314 { "Rx Discards", KSTAT_KV_U_PACKETS, },
8315 { "Rx Errors", KSTAT_KV_U_PACKETS, },
8316 { "Rx Unknown Proto", KSTAT_KV_U_PACKETS, },
8317 { "Tx Bytes", KSTAT_KV_U_BYTES, },
8318 { "Tx Unicast", KSTAT_KV_U_PACKETS, },
8319 { "Tx Discards", KSTAT_KV_U_PACKETS, },
8320 { "Tx Errors", KSTAT_KV_U_PACKETS, },
8321 { "Rx Multicast", KSTAT_KV_U_PACKETS, },
8322 { "Rx Broadcast", KSTAT_KV_U_PACKETS, },
8323 { "Tx Multicast", KSTAT_KV_U_PACKETS, },
8324 { "Tx Broadcast", KSTAT_KV_U_PACKETS, },
8325 };
8326 CTASSERT(__arraycount(mcx_ppcnt_rfc2863_tpl) == mcx_ppcnt_rfc2863_count);
8327
8328 static const struct mcx_ppcnt mcx_ppcnt_rfc2819_tpl[] = {
8329 { "Drop Events", KSTAT_KV_U_PACKETS, },
8330 { "Octets", KSTAT_KV_U_BYTES, },
8331 { "Packets", KSTAT_KV_U_PACKETS, },
8332 { "Broadcasts", KSTAT_KV_U_PACKETS, },
8333 { "Multicasts", KSTAT_KV_U_PACKETS, },
8334 { "CRC Align Errs", KSTAT_KV_U_PACKETS, },
8335 { "Undersize", KSTAT_KV_U_PACKETS, },
8336 { "Oversize", KSTAT_KV_U_PACKETS, },
8337 { "Fragments", KSTAT_KV_U_PACKETS, },
8338 { "Jabbers", KSTAT_KV_U_PACKETS, },
8339 { "Collisions", KSTAT_KV_U_NONE, },
8340 { "64B", KSTAT_KV_U_PACKETS, },
8341 { "65-127B", KSTAT_KV_U_PACKETS, },
8342 { "128-255B", KSTAT_KV_U_PACKETS, },
8343 { "256-511B", KSTAT_KV_U_PACKETS, },
8344 { "512-1023B", KSTAT_KV_U_PACKETS, },
8345 { "1024-1518B", KSTAT_KV_U_PACKETS, },
8346 { "1519-2047B", KSTAT_KV_U_PACKETS, },
8347 { "2048-4095B", KSTAT_KV_U_PACKETS, },
8348 { "4096-8191B", KSTAT_KV_U_PACKETS, },
8349 { "8192-10239B", KSTAT_KV_U_PACKETS, },
8350 };
8351 CTASSERT(__arraycount(mcx_ppcnt_rfc2819_tpl) == mcx_ppcnt_rfc2819_count);
8352
8353 static const struct mcx_ppcnt mcx_ppcnt_rfc3635_tpl[] = {
8354 { "Alignment Errs", KSTAT_KV_U_PACKETS, },
8355 { "FCS Errs", KSTAT_KV_U_PACKETS, },
8356 { "Single Colls", KSTAT_KV_U_PACKETS, },
8357 { "Multiple Colls", KSTAT_KV_U_PACKETS, },
8358 { "SQE Test Errs", KSTAT_KV_U_NONE, },
8359 { "Deferred Tx", KSTAT_KV_U_PACKETS, },
8360 { "Late Colls", KSTAT_KV_U_NONE, },
8361 { "Exess Colls", KSTAT_KV_U_NONE, },
8362 { "Int MAC Tx Errs", KSTAT_KV_U_PACKETS, },
8363 { "CSM Sense Errs", KSTAT_KV_U_NONE, },
8364 { "Too Long", KSTAT_KV_U_PACKETS, },
8365 { "Int MAC Rx Errs", KSTAT_KV_U_PACKETS, },
8366 { "Symbol Errs", KSTAT_KV_U_NONE, },
8367 { "Unknown Control", KSTAT_KV_U_PACKETS, },
8368 { "Pause Rx", KSTAT_KV_U_PACKETS, },
8369 { "Pause Tx", KSTAT_KV_U_PACKETS, },
8370 };
8371 CTASSERT(__arraycount(mcx_ppcnt_rfc3635_tpl) == mcx_ppcnt_rfc3635_count);
8372
8373 struct mcx_kstat_ppcnt {
8374 const char *ksp_name;
8375 const struct mcx_ppcnt *ksp_tpl;
8376 unsigned int ksp_n;
8377 uint8_t ksp_grp;
8378 };
8379
8380 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_ieee8023 = {
8381 .ksp_name = "ieee802.3",
8382 .ksp_tpl = mcx_ppcnt_ieee8023_tpl,
8383 .ksp_n = __arraycount(mcx_ppcnt_ieee8023_tpl),
8384 .ksp_grp = MCX_REG_PPCNT_GRP_IEEE8023,
8385 };
8386
8387 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2863 = {
8388 .ksp_name = "rfc2863",
8389 .ksp_tpl = mcx_ppcnt_rfc2863_tpl,
8390 .ksp_n = __arraycount(mcx_ppcnt_rfc2863_tpl),
8391 .ksp_grp = MCX_REG_PPCNT_GRP_RFC2863,
8392 };
8393
8394 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2819 = {
8395 .ksp_name = "rfc2819",
8396 .ksp_tpl = mcx_ppcnt_rfc2819_tpl,
8397 .ksp_n = __arraycount(mcx_ppcnt_rfc2819_tpl),
8398 .ksp_grp = MCX_REG_PPCNT_GRP_RFC2819,
8399 };
8400
8401 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc3635 = {
8402 .ksp_name = "rfc3635",
8403 .ksp_tpl = mcx_ppcnt_rfc3635_tpl,
8404 .ksp_n = __arraycount(mcx_ppcnt_rfc3635_tpl),
8405 .ksp_grp = MCX_REG_PPCNT_GRP_RFC3635,
8406 };
8407
8408 static int mcx_kstat_ppcnt_read(struct kstat *);
8409
8410 static void mcx_kstat_attach_tmps(struct mcx_softc *sc);
8411 static void mcx_kstat_attach_queues(struct mcx_softc *sc);
8412
8413 static struct kstat *
8414 mcx_kstat_attach_ppcnt(struct mcx_softc *sc,
8415 const struct mcx_kstat_ppcnt *ksp)
8416 {
8417 struct kstat *ks;
8418 struct kstat_kv *kvs;
8419 unsigned int i;
8420
8421 ks = kstat_create(DEVNAME(sc), 0, ksp->ksp_name, 0, KSTAT_T_KV, 0);
8422 if (ks == NULL)
8423 return (NULL);
8424
8425 kvs = mallocarray(ksp->ksp_n, sizeof(*kvs),
8426 M_DEVBUF, M_WAITOK);
8427
8428 for (i = 0; i < ksp->ksp_n; i++) {
8429 const struct mcx_ppcnt *tpl = &ksp->ksp_tpl[i];
8430
8431 kstat_kv_unit_init(&kvs[i], tpl->name,
8432 KSTAT_KV_T_COUNTER64, tpl->unit);
8433 }
8434
8435 ks->ks_softc = sc;
8436 ks->ks_ptr = (void *)ksp;
8437 ks->ks_data = kvs;
8438 ks->ks_datalen = ksp->ksp_n * sizeof(*kvs);
8439 ks->ks_read = mcx_kstat_ppcnt_read;
8440
8441 kstat_install(ks);
8442
8443 return (ks);
8444 }
8445
8446 static void
8447 mcx_kstat_attach(struct mcx_softc *sc)
8448 {
8449 sc->sc_kstat_ieee8023 = mcx_kstat_attach_ppcnt(sc,
8450 &mcx_kstat_ppcnt_ieee8023);
8451 sc->sc_kstat_rfc2863 = mcx_kstat_attach_ppcnt(sc,
8452 &mcx_kstat_ppcnt_rfc2863);
8453 sc->sc_kstat_rfc2819 = mcx_kstat_attach_ppcnt(sc,
8454 &mcx_kstat_ppcnt_rfc2819);
8455 sc->sc_kstat_rfc3635 = mcx_kstat_attach_ppcnt(sc,
8456 &mcx_kstat_ppcnt_rfc3635);
8457
8458 mcx_kstat_attach_tmps(sc);
8459 mcx_kstat_attach_queues(sc);
8460 }
8461
8462 static int
8463 mcx_kstat_ppcnt_read(struct kstat *ks)
8464 {
8465 struct mcx_softc *sc = ks->ks_softc;
8466 struct mcx_kstat_ppcnt *ksp = ks->ks_ptr;
8467 struct mcx_reg_ppcnt ppcnt = {
8468 .ppcnt_grp = ksp->ksp_grp,
8469 .ppcnt_local_port = 1,
8470 };
8471 struct kstat_kv *kvs = ks->ks_data;
8472 uint64_t *vs = (uint64_t *)&ppcnt.ppcnt_counter_set;
8473 unsigned int i;
8474 int rv;
8475
8476 KERNEL_LOCK(); /* XXX */
8477 rv = mcx_access_hca_reg(sc, MCX_REG_PPCNT, MCX_REG_OP_READ,
8478 &ppcnt, sizeof(ppcnt));
8479 KERNEL_UNLOCK();
8480 if (rv != 0)
8481 return (EIO);
8482
8483 nanouptime(&ks->ks_updated);
8484
8485 for (i = 0; i < ksp->ksp_n; i++)
8486 kstat_kv_u64(&kvs[i]) = bemtoh64(&vs[i]);
8487
8488 return (0);
8489 }
8490
8491 struct mcx_kstat_mtmp {
8492 struct kstat_kv ktmp_name;
8493 struct kstat_kv ktmp_temperature;
8494 struct kstat_kv ktmp_threshold_lo;
8495 struct kstat_kv ktmp_threshold_hi;
8496 };
8497
8498 static const struct mcx_kstat_mtmp mcx_kstat_mtmp_tpl = {
8499 KSTAT_KV_INITIALIZER("name", KSTAT_KV_T_ISTR),
8500 KSTAT_KV_INITIALIZER("temperature", KSTAT_KV_T_TEMP),
8501 KSTAT_KV_INITIALIZER("lo threshold", KSTAT_KV_T_TEMP),
8502 KSTAT_KV_INITIALIZER("hi threshold", KSTAT_KV_T_TEMP),
8503 };
8504
8505 static const struct timeval mcx_kstat_mtmp_rate = { 1, 0 };
8506
8507 static int mcx_kstat_mtmp_read(struct kstat *);
8508
8509 static void
8510 mcx_kstat_attach_tmps(struct mcx_softc *sc)
8511 {
8512 struct kstat *ks;
8513 struct mcx_reg_mcam mcam;
8514 struct mcx_reg_mtcap mtcap;
8515 struct mcx_kstat_mtmp *ktmp;
8516 uint64_t map;
8517 unsigned int i, n;
8518
8519 memset(&mtcap, 0, sizeof(mtcap));
8520 memset(&mcam, 0, sizeof(mcam));
8521
8522 if (sc->sc_mcam_reg == 0) {
8523 /* no management capabilities */
8524 return;
8525 }
8526
8527 if (mcx_access_hca_reg(sc, MCX_REG_MCAM, MCX_REG_OP_READ,
8528 &mcam, sizeof(mcam)) != 0) {
8529 /* unable to check management capabilities? */
8530 return;
8531 }
8532
8533 if (MCX_BITFIELD_BIT(mcam.mcam_feature_cap_mask,
8534 MCX_MCAM_FEATURE_CAP_SENSOR_MAP) == 0) {
8535 /* no sensor map */
8536 return;
8537 }
8538
8539 if (mcx_access_hca_reg(sc, MCX_REG_MTCAP, MCX_REG_OP_READ,
8540 &mtcap, sizeof(mtcap)) != 0) {
8541 /* unable to find temperature sensors */
8542 return;
8543 }
8544
8545 sc->sc_kstat_mtmp_count = mtcap.mtcap_sensor_count;
8546 sc->sc_kstat_mtmp = mallocarray(sc->sc_kstat_mtmp_count,
8547 sizeof(*sc->sc_kstat_mtmp), M_DEVBUF, M_WAITOK);
8548
8549 n = 0;
8550 map = bemtoh64(&mtcap.mtcap_sensor_map);
8551 for (i = 0; i < sizeof(map) * NBBY; i++) {
8552 if (!ISSET(map, (1ULL << i)))
8553 continue;
8554
8555 ks = kstat_create(DEVNAME(sc), 0, "temperature", i,
8556 KSTAT_T_KV, 0);
8557 if (ks == NULL) {
8558 /* unable to attach temperature sensor %u, i */
8559 continue;
8560 }
8561
8562 ktmp = malloc(sizeof(*ktmp), M_DEVBUF, M_WAITOK|M_ZERO);
8563 *ktmp = mcx_kstat_mtmp_tpl;
8564
8565 ks->ks_data = ktmp;
8566 ks->ks_datalen = sizeof(*ktmp);
8567 TIMEVAL_TO_TIMESPEC(&mcx_kstat_mtmp_rate, &ks->ks_interval);
8568 ks->ks_read = mcx_kstat_mtmp_read;
8569
8570 ks->ks_softc = sc;
8571 kstat_install(ks);
8572
8573 sc->sc_kstat_mtmp[n++] = ks;
8574 if (n >= sc->sc_kstat_mtmp_count)
8575 break;
8576 }
8577 }
8578
8579 static uint64_t
8580 mcx_tmp_to_uK(uint16_t *t)
8581 {
8582 int64_t mt = (int16_t)bemtoh16(t); /* 0.125 C units */
8583 mt *= 1000000 / 8; /* convert to uC */
8584 mt += 273150000; /* convert to uK */
8585
8586 return (mt);
8587 }
8588
8589 static int
8590 mcx_kstat_mtmp_read(struct kstat *ks)
8591 {
8592 struct mcx_softc *sc = ks->ks_softc;
8593 struct mcx_kstat_mtmp *ktmp = ks->ks_data;
8594 struct mcx_reg_mtmp mtmp;
8595 int rv;
8596 struct timeval updated;
8597
8598 TIMESPEC_TO_TIMEVAL(&updated, &ks->ks_updated);
8599
8600 if (!ratecheck(&updated, &mcx_kstat_mtmp_rate))
8601 return (0);
8602
8603 memset(&mtmp, 0, sizeof(mtmp));
8604 htobem16(&mtmp.mtmp_sensor_index, ks->ks_unit);
8605
8606 KERNEL_LOCK(); /* XXX */
8607 rv = mcx_access_hca_reg(sc, MCX_REG_MTMP, MCX_REG_OP_READ,
8608 &mtmp, sizeof(mtmp));
8609 KERNEL_UNLOCK();
8610 if (rv != 0)
8611 return (EIO);
8612
8613 memset(kstat_kv_istr(&ktmp->ktmp_name), 0,
8614 sizeof(kstat_kv_istr(&ktmp->ktmp_name)));
8615 memcpy(kstat_kv_istr(&ktmp->ktmp_name),
8616 mtmp.mtmp_sensor_name, sizeof(mtmp.mtmp_sensor_name));
8617 kstat_kv_temp(&ktmp->ktmp_temperature) =
8618 mcx_tmp_to_uK(&mtmp.mtmp_temperature);
8619 kstat_kv_temp(&ktmp->ktmp_threshold_lo) =
8620 mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_lo);
8621 kstat_kv_temp(&ktmp->ktmp_threshold_hi) =
8622 mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_hi);
8623
8624 TIMEVAL_TO_TIMESPEC(&updated, &ks->ks_updated);
8625
8626 return (0);
8627 }
8628
8629 struct mcx_queuestat {
8630 char name[KSTAT_KV_NAMELEN];
8631 enum kstat_kv_type type;
8632 };
8633
8634 static const struct mcx_queuestat mcx_queue_kstat_tpl[] = {
8635 { "RQ SW prod", KSTAT_KV_T_COUNTER64 },
8636 { "RQ HW prod", KSTAT_KV_T_COUNTER64 },
8637 { "RQ HW cons", KSTAT_KV_T_COUNTER64 },
8638 { "RQ HW state", KSTAT_KV_T_ISTR },
8639
8640 { "SQ SW prod", KSTAT_KV_T_COUNTER64 },
8641 { "SQ SW cons", KSTAT_KV_T_COUNTER64 },
8642 { "SQ HW prod", KSTAT_KV_T_COUNTER64 },
8643 { "SQ HW cons", KSTAT_KV_T_COUNTER64 },
8644 { "SQ HW state", KSTAT_KV_T_ISTR },
8645
8646 { "CQ SW cons", KSTAT_KV_T_COUNTER64 },
8647 { "CQ HW prod", KSTAT_KV_T_COUNTER64 },
8648 { "CQ HW cons", KSTAT_KV_T_COUNTER64 },
8649 { "CQ HW notify", KSTAT_KV_T_COUNTER64 },
8650 { "CQ HW solicit", KSTAT_KV_T_COUNTER64 },
8651 { "CQ HW status", KSTAT_KV_T_ISTR },
8652 { "CQ HW state", KSTAT_KV_T_ISTR },
8653
8654 { "EQ SW cons", KSTAT_KV_T_COUNTER64 },
8655 { "EQ HW prod", KSTAT_KV_T_COUNTER64 },
8656 { "EQ HW cons", KSTAT_KV_T_COUNTER64 },
8657 { "EQ HW status", KSTAT_KV_T_ISTR },
8658 { "EQ HW state", KSTAT_KV_T_ISTR },
8659 };
8660
8661 static int mcx_kstat_queue_read(struct kstat *);
8662
8663 static void
8664 mcx_kstat_attach_queues(struct mcx_softc *sc)
8665 {
8666 struct kstat *ks;
8667 struct kstat_kv *kvs;
8668 int q, i;
8669
8670 for (q = 0; q < sc->sc_nqueues; q++) {
8671 ks = kstat_create(DEVNAME(sc), 0, "mcx-queues", q,
8672 KSTAT_T_KV, 0);
8673 if (ks == NULL) {
8674 /* unable to attach queue stats %u, q */
8675 continue;
8676 }
8677
8678 kvs = mallocarray(nitems(mcx_queue_kstat_tpl),
8679 sizeof(*kvs), M_DEVBUF, M_WAITOK);
8680
8681 for (i = 0; i < nitems(mcx_queue_kstat_tpl); i++) {
8682 const struct mcx_queuestat *tpl =
8683 &mcx_queue_kstat_tpl[i];
8684
8685 kstat_kv_init(&kvs[i], tpl->name, tpl->type);
8686 }
8687
8688 ks->ks_softc = &sc->sc_queues[q];
8689 ks->ks_data = kvs;
8690 ks->ks_datalen = nitems(mcx_queue_kstat_tpl) * sizeof(*kvs);
8691 ks->ks_read = mcx_kstat_queue_read;
8692
8693 sc->sc_queues[q].q_kstat = ks;
8694 kstat_install(ks);
8695 }
8696 }
8697
8698 static int
8699 mcx_kstat_queue_read(struct kstat *ks)
8700 {
8701 struct mcx_queues *q = ks->ks_softc;
8702 struct mcx_softc *sc = q->q_sc;
8703 struct kstat_kv *kvs = ks->ks_data;
8704 union {
8705 struct mcx_rq_ctx rq;
8706 struct mcx_sq_ctx sq;
8707 struct mcx_cq_ctx cq;
8708 struct mcx_eq_ctx eq;
8709 } u;
8710 const char *text;
8711 int error = 0;
8712
8713 KERNEL_LOCK();
8714
8715 if (mcx_query_rq(sc, &q->q_rx, &u.rq) != 0) {
8716 error = EIO;
8717 goto out;
8718 }
8719
8720 kstat_kv_u64(kvs++) = q->q_rx.rx_prod;
8721 kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_sw_counter);
8722 kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_hw_counter);
8723 switch ((bemtoh32(&u.rq.rq_flags) & MCX_RQ_CTX_STATE_MASK) >>
8724 MCX_RQ_CTX_STATE_SHIFT) {
8725 case MCX_RQ_CTX_STATE_RST:
8726 text = "RST";
8727 break;
8728 case MCX_RQ_CTX_STATE_RDY:
8729 text = "RDY";
8730 break;
8731 case MCX_RQ_CTX_STATE_ERR:
8732 text = "ERR";
8733 break;
8734 default:
8735 text = "unknown";
8736 break;
8737 }
8738 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8739 kvs++;
8740
8741 if (mcx_query_sq(sc, &q->q_tx, &u.sq) != 0) {
8742 error = EIO;
8743 goto out;
8744 }
8745
8746 kstat_kv_u64(kvs++) = q->q_tx.tx_prod;
8747 kstat_kv_u64(kvs++) = q->q_tx.tx_cons;
8748 kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_sw_counter);
8749 kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_hw_counter);
8750 switch ((bemtoh32(&u.sq.sq_flags) & MCX_SQ_CTX_STATE_MASK) >>
8751 MCX_SQ_CTX_STATE_SHIFT) {
8752 case MCX_SQ_CTX_STATE_RST:
8753 text = "RST";
8754 break;
8755 case MCX_SQ_CTX_STATE_RDY:
8756 text = "RDY";
8757 break;
8758 case MCX_SQ_CTX_STATE_ERR:
8759 text = "ERR";
8760 break;
8761 default:
8762 text = "unknown";
8763 break;
8764 }
8765 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8766 kvs++;
8767
8768 if (mcx_query_cq(sc, &q->q_cq, &u.cq) != 0) {
8769 error = EIO;
8770 goto out;
8771 }
8772
8773 kstat_kv_u64(kvs++) = q->q_cq.cq_cons;
8774 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_producer_counter);
8775 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_consumer_counter);
8776 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_notified);
8777 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_solicit);
8778
8779 switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATUS_MASK) >>
8780 MCX_CQ_CTX_STATUS_SHIFT) {
8781 case MCX_CQ_CTX_STATUS_OK:
8782 text = "OK";
8783 break;
8784 case MCX_CQ_CTX_STATUS_OVERFLOW:
8785 text = "overflow";
8786 break;
8787 case MCX_CQ_CTX_STATUS_WRITE_FAIL:
8788 text = "write fail";
8789 break;
8790 default:
8791 text = "unknown";
8792 break;
8793 }
8794 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8795 kvs++;
8796
8797 switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATE_MASK) >>
8798 MCX_CQ_CTX_STATE_SHIFT) {
8799 case MCX_CQ_CTX_STATE_SOLICITED:
8800 text = "solicited";
8801 break;
8802 case MCX_CQ_CTX_STATE_ARMED:
8803 text = "armed";
8804 break;
8805 case MCX_CQ_CTX_STATE_FIRED:
8806 text = "fired";
8807 break;
8808 default:
8809 text = "unknown";
8810 break;
8811 }
8812 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8813 kvs++;
8814
8815 if (mcx_query_eq(sc, &q->q_eq, &u.eq) != 0) {
8816 error = EIO;
8817 goto out;
8818 }
8819
8820 kstat_kv_u64(kvs++) = q->q_eq.eq_cons;
8821 kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_producer_counter);
8822 kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_consumer_counter);
8823
8824 switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATUS_MASK) >>
8825 MCX_EQ_CTX_STATUS_SHIFT) {
8826 case MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE:
8827 text = "write fail";
8828 break;
8829 case MCX_EQ_CTX_STATUS_OK:
8830 text = "OK";
8831 break;
8832 default:
8833 text = "unknown";
8834 break;
8835 }
8836 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8837 kvs++;
8838
8839 switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATE_MASK) >>
8840 MCX_EQ_CTX_STATE_SHIFT) {
8841 case MCX_EQ_CTX_STATE_ARMED:
8842 text = "armed";
8843 break;
8844 case MCX_EQ_CTX_STATE_FIRED:
8845 text = "fired";
8846 break;
8847 default:
8848 text = "unknown";
8849 break;
8850 }
8851 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8852 kvs++;
8853
8854 nanouptime(&ks->ks_updated);
8855 out:
8856 KERNEL_UNLOCK();
8857 return (error);
8858 }
8859
8860 #endif /* NKSTAT > 0 */
8861
8862 static unsigned int
8863 mcx_timecounter_read(struct timecounter *tc)
8864 {
8865 struct mcx_softc *sc = tc->tc_priv;
8866
8867 return (mcx_rd(sc, MCX_INTERNAL_TIMER_L));
8868 }
8869
8870 static void
8871 mcx_timecounter_attach(struct mcx_softc *sc)
8872 {
8873 struct timecounter *tc = &sc->sc_timecounter;
8874
8875 tc->tc_get_timecount = mcx_timecounter_read;
8876 tc->tc_counter_mask = ~0U;
8877 tc->tc_frequency = sc->sc_khz * 1000;
8878 tc->tc_name = device_xname(sc->sc_dev);
8879 tc->tc_quality = -100;
8880 tc->tc_priv = sc;
8881
8882 tc_init(tc);
8883 }
8884