if_mcx.c revision 1.28 1 /* $NetBSD: if_mcx.c,v 1.28 2024/07/05 04:31:51 rin Exp $ */
2 /* $OpenBSD: if_mcx.c,v 1.101 2021/06/02 19:16:11 patrick Exp $ */
3
4 /*
5 * Copyright (c) 2017 David Gwynne <dlg (at) openbsd.org>
6 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #ifdef _KERNEL_OPT
22 #include "opt_net_mpsafe.h"
23 #endif
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: if_mcx.c,v 1.28 2024/07/05 04:31:51 rin Exp $");
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sockio.h>
31 #include <sys/mbuf.h>
32 #include <sys/kernel.h>
33 #include <sys/socket.h>
34 #include <sys/device.h>
35 #include <sys/pool.h>
36 #include <sys/queue.h>
37 #include <sys/callout.h>
38 #include <sys/workqueue.h>
39 #include <sys/atomic.h>
40 #include <sys/timetc.h>
41 #include <sys/kmem.h>
42 #include <sys/bus.h>
43 #include <sys/interrupt.h>
44 #include <sys/pcq.h>
45 #include <sys/cpu.h>
46 #include <sys/bitops.h>
47
48 #include <machine/intr.h>
49
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_ether.h>
53 #include <net/if_media.h>
54 #include <net/if_vlanvar.h>
55 #include <net/toeplitz.h>
56
57 #include <net/bpf.h>
58
59 #include <netinet/in.h>
60
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pcidevs.h>
64
65 /* TODO: Port kstat key/value stuff to evcnt/sysmon */
66 #define NKSTAT 0
67
68 /* XXX This driver is not yet MP-safe; don't claim to be! */
69 /* #ifdef NET_MPSAFE */
70 /* #define MCX_MPSAFE 1 */
71 /* #define CALLOUT_FLAGS CALLOUT_MPSAFE */
72 /* #else */
73 #define CALLOUT_FLAGS 0
74 /* #endif */
75
76 #define MCX_TXQ_NUM 2048
77
78 #define BUS_DMASYNC_PRERW (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
79 #define BUS_DMASYNC_POSTRW (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
80
81 #define MCX_HCA_BAR PCI_MAPREG_START /* BAR 0 */
82
83 #define MCX_FW_VER 0x0000
84 #define MCX_FW_VER_MAJOR(_v) ((_v) & 0xffff)
85 #define MCX_FW_VER_MINOR(_v) ((_v) >> 16)
86 #define MCX_CMDIF_FW_SUBVER 0x0004
87 #define MCX_FW_VER_SUBMINOR(_v) ((_v) & 0xffff)
88 #define MCX_CMDIF(_v) ((_v) >> 16)
89
90 #define MCX_ISSI 1 /* as per the PRM */
91 #define MCX_CMD_IF_SUPPORTED 5
92
93 #define MCX_HARDMTU 9500
94
95 #define MCX_PAGE_SHIFT 12
96 #define MCX_PAGE_SIZE (1 << MCX_PAGE_SHIFT)
97
98 /* queue sizes */
99 #define MCX_LOG_EQ_SIZE 7
100 #define MCX_LOG_CQ_SIZE 12
101 #define MCX_LOG_RQ_SIZE 10
102 #define MCX_LOG_SQ_SIZE 11
103
104 #define MCX_MAX_QUEUES 16
105
106 /* completion event moderation - about 10khz, or 90% of the cq */
107 #define MCX_CQ_MOD_PERIOD 50
108 #define MCX_CQ_MOD_COUNTER \
109 (((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
110
111 #define MCX_LOG_SQ_ENTRY_SIZE 6
112 #define MCX_SQ_ENTRY_MAX_SLOTS 4
113 #define MCX_SQ_SEGS_PER_SLOT \
114 (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
115 #define MCX_SQ_MAX_SEGMENTS \
116 1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
117
118 #define MCX_LOG_FLOW_TABLE_SIZE 5
119 #define MCX_NUM_STATIC_FLOWS 4 /* promisc, allmulti, ucast, bcast */
120 #define MCX_NUM_MCAST_FLOWS \
121 ((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
122
123 #define MCX_SQ_INLINE_SIZE 18
124 CTASSERT(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN == MCX_SQ_INLINE_SIZE);
125
126 /* doorbell offsets */
127 #define MCX_DOORBELL_AREA_SIZE MCX_PAGE_SIZE
128
129 #define MCX_CQ_DOORBELL_BASE 0
130 #define MCX_CQ_DOORBELL_STRIDE 64
131
132 #define MCX_WQ_DOORBELL_BASE MCX_PAGE_SIZE/2
133 #define MCX_WQ_DOORBELL_STRIDE 64
134 /* make sure the doorbells fit */
135 CTASSERT(MCX_MAX_QUEUES * MCX_CQ_DOORBELL_STRIDE < MCX_WQ_DOORBELL_BASE);
136 CTASSERT(MCX_MAX_QUEUES * MCX_WQ_DOORBELL_STRIDE <
137 MCX_DOORBELL_AREA_SIZE - MCX_WQ_DOORBELL_BASE);
138
139 #define MCX_WQ_DOORBELL_MASK 0xffff
140
141 /* uar registers */
142 #define MCX_UAR_CQ_DOORBELL 0x20
143 #define MCX_UAR_EQ_DOORBELL_ARM 0x40
144 #define MCX_UAR_EQ_DOORBELL 0x48
145 #define MCX_UAR_BF 0x800
146
147 #define MCX_CMDQ_ADDR_HI 0x0010
148 #define MCX_CMDQ_ADDR_LO 0x0014
149 #define MCX_CMDQ_ADDR_NMASK 0xfff
150 #define MCX_CMDQ_LOG_SIZE(_v) ((_v) >> 4 & 0xf)
151 #define MCX_CMDQ_LOG_STRIDE(_v) ((_v) >> 0 & 0xf)
152 #define MCX_CMDQ_INTERFACE_MASK (0x3 << 8)
153 #define MCX_CMDQ_INTERFACE_FULL_DRIVER (0x0 << 8)
154 #define MCX_CMDQ_INTERFACE_DISABLED (0x1 << 8)
155
156 #define MCX_CMDQ_DOORBELL 0x0018
157
158 #define MCX_STATE 0x01fc
159 #define MCX_STATE_MASK (1U << 31)
160 #define MCX_STATE_INITIALIZING (1 << 31)
161 #define MCX_STATE_READY (0 << 31)
162 #define MCX_STATE_INTERFACE_MASK (0x3 << 24)
163 #define MCX_STATE_INTERFACE_FULL_DRIVER (0x0 << 24)
164 #define MCX_STATE_INTERFACE_DISABLED (0x1 << 24)
165
166 #define MCX_INTERNAL_TIMER 0x1000
167 #define MCX_INTERNAL_TIMER_H 0x1000
168 #define MCX_INTERNAL_TIMER_L 0x1004
169
170 #define MCX_CLEAR_INT 0x100c
171
172 #define MCX_REG_OP_WRITE 0
173 #define MCX_REG_OP_READ 1
174
175 #define MCX_REG_PMLP 0x5002
176 #define MCX_REG_PMTU 0x5003
177 #define MCX_REG_PTYS 0x5004
178 #define MCX_REG_PAOS 0x5006
179 #define MCX_REG_PFCC 0x5007
180 #define MCX_REG_PPCNT 0x5008
181 #define MCX_REG_MTCAP 0x9009 /* mgmt temp capabilities */
182 #define MCX_REG_MTMP 0x900a /* mgmt temp */
183 #define MCX_REG_MCIA 0x9014
184 #define MCX_REG_MCAM 0x907f
185
186 #define MCX_ETHER_CAP_SGMII 0
187 #define MCX_ETHER_CAP_1000_KX 1
188 #define MCX_ETHER_CAP_10G_CX4 2
189 #define MCX_ETHER_CAP_10G_KX4 3
190 #define MCX_ETHER_CAP_10G_KR 4
191 #define MCX_ETHER_CAP_20G_KR2 5
192 #define MCX_ETHER_CAP_40G_CR4 6
193 #define MCX_ETHER_CAP_40G_KR4 7
194 #define MCX_ETHER_CAP_56G_R4 8
195 #define MCX_ETHER_CAP_10G_CR 12
196 #define MCX_ETHER_CAP_10G_SR 13
197 #define MCX_ETHER_CAP_10G_LR 14
198 #define MCX_ETHER_CAP_40G_SR4 15
199 #define MCX_ETHER_CAP_40G_LR4 16
200 #define MCX_ETHER_CAP_50G_SR2 18
201 #define MCX_ETHER_CAP_100G_CR4 20
202 #define MCX_ETHER_CAP_100G_SR4 21
203 #define MCX_ETHER_CAP_100G_KR4 22
204 #define MCX_ETHER_CAP_100G_LR4 23
205 #define MCX_ETHER_CAP_100_TX 24
206 #define MCX_ETHER_CAP_1000_T 25
207 #define MCX_ETHER_CAP_10G_T 26
208 #define MCX_ETHER_CAP_25G_CR 27
209 #define MCX_ETHER_CAP_25G_KR 28
210 #define MCX_ETHER_CAP_25G_SR 29
211 #define MCX_ETHER_CAP_50G_CR2 30
212 #define MCX_ETHER_CAP_50G_KR2 31
213
214 #define MCX_MAX_CQE 32
215
216 #define MCX_CMD_QUERY_HCA_CAP 0x100
217 #define MCX_CMD_QUERY_ADAPTER 0x101
218 #define MCX_CMD_INIT_HCA 0x102
219 #define MCX_CMD_TEARDOWN_HCA 0x103
220 #define MCX_CMD_ENABLE_HCA 0x104
221 #define MCX_CMD_DISABLE_HCA 0x105
222 #define MCX_CMD_QUERY_PAGES 0x107
223 #define MCX_CMD_MANAGE_PAGES 0x108
224 #define MCX_CMD_SET_HCA_CAP 0x109
225 #define MCX_CMD_QUERY_ISSI 0x10a
226 #define MCX_CMD_SET_ISSI 0x10b
227 #define MCX_CMD_SET_DRIVER_VERSION 0x10d
228 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS 0x203
229 #define MCX_CMD_CREATE_EQ 0x301
230 #define MCX_CMD_DESTROY_EQ 0x302
231 #define MCX_CMD_QUERY_EQ 0x303
232 #define MCX_CMD_CREATE_CQ 0x400
233 #define MCX_CMD_DESTROY_CQ 0x401
234 #define MCX_CMD_QUERY_CQ 0x402
235 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT 0x754
236 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
237 0x755
238 #define MCX_CMD_QUERY_VPORT_COUNTERS 0x770
239 #define MCX_CMD_ALLOC_PD 0x800
240 #define MCX_CMD_ALLOC_UAR 0x802
241 #define MCX_CMD_ACCESS_REG 0x805
242 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN 0x816
243 #define MCX_CMD_CREATE_TIR 0x900
244 #define MCX_CMD_DESTROY_TIR 0x902
245 #define MCX_CMD_CREATE_SQ 0x904
246 #define MCX_CMD_MODIFY_SQ 0x905
247 #define MCX_CMD_DESTROY_SQ 0x906
248 #define MCX_CMD_QUERY_SQ 0x907
249 #define MCX_CMD_CREATE_RQ 0x908
250 #define MCX_CMD_MODIFY_RQ 0x909
251 #define MCX_CMD_DESTROY_RQ 0x90a
252 #define MCX_CMD_QUERY_RQ 0x90b
253 #define MCX_CMD_CREATE_TIS 0x912
254 #define MCX_CMD_DESTROY_TIS 0x914
255 #define MCX_CMD_CREATE_RQT 0x916
256 #define MCX_CMD_DESTROY_RQT 0x918
257 #define MCX_CMD_SET_FLOW_TABLE_ROOT 0x92f
258 #define MCX_CMD_CREATE_FLOW_TABLE 0x930
259 #define MCX_CMD_DESTROY_FLOW_TABLE 0x931
260 #define MCX_CMD_QUERY_FLOW_TABLE 0x932
261 #define MCX_CMD_CREATE_FLOW_GROUP 0x933
262 #define MCX_CMD_DESTROY_FLOW_GROUP 0x934
263 #define MCX_CMD_QUERY_FLOW_GROUP 0x935
264 #define MCX_CMD_SET_FLOW_TABLE_ENTRY 0x936
265 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY 0x937
266 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY 0x938
267 #define MCX_CMD_ALLOC_FLOW_COUNTER 0x939
268 #define MCX_CMD_QUERY_FLOW_COUNTER 0x93b
269
270 #define MCX_QUEUE_STATE_RST 0
271 #define MCX_QUEUE_STATE_RDY 1
272 #define MCX_QUEUE_STATE_ERR 3
273
274 #define MCX_FLOW_TABLE_TYPE_RX 0
275 #define MCX_FLOW_TABLE_TYPE_TX 1
276
277 #define MCX_CMDQ_INLINE_DATASIZE 16
278
279 struct mcx_cmdq_entry {
280 uint8_t cq_type;
281 #define MCX_CMDQ_TYPE_PCIE 0x7
282 uint8_t cq_reserved0[3];
283
284 uint32_t cq_input_length;
285 uint64_t cq_input_ptr;
286 uint8_t cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
287
288 uint8_t cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
289 uint64_t cq_output_ptr;
290 uint32_t cq_output_length;
291
292 uint8_t cq_token;
293 uint8_t cq_signature;
294 uint8_t cq_reserved1[1];
295 uint8_t cq_status;
296 #define MCX_CQ_STATUS_SHIFT 1
297 #define MCX_CQ_STATUS_MASK (0x7f << MCX_CQ_STATUS_SHIFT)
298 #define MCX_CQ_STATUS_OK (0x00 << MCX_CQ_STATUS_SHIFT)
299 #define MCX_CQ_STATUS_INT_ERR (0x01 << MCX_CQ_STATUS_SHIFT)
300 #define MCX_CQ_STATUS_BAD_OPCODE (0x02 << MCX_CQ_STATUS_SHIFT)
301 #define MCX_CQ_STATUS_BAD_PARAM (0x03 << MCX_CQ_STATUS_SHIFT)
302 #define MCX_CQ_STATUS_BAD_SYS_STATE (0x04 << MCX_CQ_STATUS_SHIFT)
303 #define MCX_CQ_STATUS_BAD_RESOURCE (0x05 << MCX_CQ_STATUS_SHIFT)
304 #define MCX_CQ_STATUS_RESOURCE_BUSY (0x06 << MCX_CQ_STATUS_SHIFT)
305 #define MCX_CQ_STATUS_EXCEED_LIM (0x08 << MCX_CQ_STATUS_SHIFT)
306 #define MCX_CQ_STATUS_BAD_RES_STATE (0x09 << MCX_CQ_STATUS_SHIFT)
307 #define MCX_CQ_STATUS_BAD_INDEX (0x0a << MCX_CQ_STATUS_SHIFT)
308 #define MCX_CQ_STATUS_NO_RESOURCES (0x0f << MCX_CQ_STATUS_SHIFT)
309 #define MCX_CQ_STATUS_BAD_INPUT_LEN (0x50 << MCX_CQ_STATUS_SHIFT)
310 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN (0x51 << MCX_CQ_STATUS_SHIFT)
311 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
312 (0x10 << MCX_CQ_STATUS_SHIFT)
313 #define MCX_CQ_STATUS_BAD_SIZE (0x40 << MCX_CQ_STATUS_SHIFT)
314 #define MCX_CQ_STATUS_OWN_MASK 0x1
315 #define MCX_CQ_STATUS_OWN_SW 0x0
316 #define MCX_CQ_STATUS_OWN_HW 0x1
317 } __packed __aligned(8);
318
319 #define MCX_CMDQ_MAILBOX_DATASIZE 512
320
321 struct mcx_cmdq_mailbox {
322 uint8_t mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
323 uint8_t mb_reserved0[48];
324 uint64_t mb_next_ptr;
325 uint32_t mb_block_number;
326 uint8_t mb_reserved1[1];
327 uint8_t mb_token;
328 uint8_t mb_ctrl_signature;
329 uint8_t mb_signature;
330 } __packed __aligned(8);
331
332 #define MCX_CMDQ_MAILBOX_ALIGN (1 << 10)
333 #define MCX_CMDQ_MAILBOX_SIZE roundup(sizeof(struct mcx_cmdq_mailbox), \
334 MCX_CMDQ_MAILBOX_ALIGN)
335 /*
336 * command mailbox structures
337 */
338
339 struct mcx_cmd_enable_hca_in {
340 uint16_t cmd_opcode;
341 uint8_t cmd_reserved0[4];
342 uint16_t cmd_op_mod;
343 uint8_t cmd_reserved1[2];
344 uint16_t cmd_function_id;
345 uint8_t cmd_reserved2[4];
346 } __packed __aligned(4);
347
348 struct mcx_cmd_enable_hca_out {
349 uint8_t cmd_status;
350 uint8_t cmd_reserved0[3];
351 uint32_t cmd_syndrome;
352 uint8_t cmd_reserved1[4];
353 } __packed __aligned(4);
354
355 struct mcx_cmd_init_hca_in {
356 uint16_t cmd_opcode;
357 uint8_t cmd_reserved0[4];
358 uint16_t cmd_op_mod;
359 uint8_t cmd_reserved1[8];
360 } __packed __aligned(4);
361
362 struct mcx_cmd_init_hca_out {
363 uint8_t cmd_status;
364 uint8_t cmd_reserved0[3];
365 uint32_t cmd_syndrome;
366 uint8_t cmd_reserved1[8];
367 } __packed __aligned(4);
368
369 struct mcx_cmd_teardown_hca_in {
370 uint16_t cmd_opcode;
371 uint8_t cmd_reserved0[4];
372 uint16_t cmd_op_mod;
373 uint8_t cmd_reserved1[2];
374 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL 0x0
375 #define MCX_CMD_TEARDOWN_HCA_PANIC 0x1
376 uint16_t cmd_profile;
377 uint8_t cmd_reserved2[4];
378 } __packed __aligned(4);
379
380 struct mcx_cmd_teardown_hca_out {
381 uint8_t cmd_status;
382 uint8_t cmd_reserved0[3];
383 uint32_t cmd_syndrome;
384 uint8_t cmd_reserved1[8];
385 } __packed __aligned(4);
386
387 struct mcx_cmd_access_reg_in {
388 uint16_t cmd_opcode;
389 uint8_t cmd_reserved0[4];
390 uint16_t cmd_op_mod;
391 uint8_t cmd_reserved1[2];
392 uint16_t cmd_register_id;
393 uint32_t cmd_argument;
394 } __packed __aligned(4);
395
396 struct mcx_cmd_access_reg_out {
397 uint8_t cmd_status;
398 uint8_t cmd_reserved0[3];
399 uint32_t cmd_syndrome;
400 uint8_t cmd_reserved1[8];
401 } __packed __aligned(4);
402
403 struct mcx_reg_pmtu {
404 uint8_t rp_reserved1;
405 uint8_t rp_local_port;
406 uint8_t rp_reserved2[2];
407 uint16_t rp_max_mtu;
408 uint8_t rp_reserved3[2];
409 uint16_t rp_admin_mtu;
410 uint8_t rp_reserved4[2];
411 uint16_t rp_oper_mtu;
412 uint8_t rp_reserved5[2];
413 } __packed __aligned(4);
414
415 struct mcx_reg_ptys {
416 uint8_t rp_reserved1;
417 uint8_t rp_local_port;
418 uint8_t rp_reserved2;
419 uint8_t rp_proto_mask;
420 #define MCX_REG_PTYS_PROTO_MASK_ETH (1 << 2)
421 uint8_t rp_reserved3[8];
422 uint32_t rp_eth_proto_cap;
423 uint8_t rp_reserved4[8];
424 uint32_t rp_eth_proto_admin;
425 uint8_t rp_reserved5[8];
426 uint32_t rp_eth_proto_oper;
427 uint8_t rp_reserved6[24];
428 } __packed __aligned(4);
429
430 struct mcx_reg_paos {
431 uint8_t rp_reserved1;
432 uint8_t rp_local_port;
433 uint8_t rp_admin_status;
434 #define MCX_REG_PAOS_ADMIN_STATUS_UP 1
435 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN 2
436 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE 3
437 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED 4
438 uint8_t rp_oper_status;
439 #define MCX_REG_PAOS_OPER_STATUS_UP 1
440 #define MCX_REG_PAOS_OPER_STATUS_DOWN 2
441 #define MCX_REG_PAOS_OPER_STATUS_FAILED 4
442 uint8_t rp_admin_state_update;
443 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN (1 << 7)
444 uint8_t rp_reserved2[11];
445 } __packed __aligned(4);
446
447 struct mcx_reg_pfcc {
448 uint8_t rp_reserved1;
449 uint8_t rp_local_port;
450 uint8_t rp_reserved2[3];
451 uint8_t rp_prio_mask_tx;
452 uint8_t rp_reserved3;
453 uint8_t rp_prio_mask_rx;
454 uint8_t rp_pptx_aptx;
455 uint8_t rp_pfctx;
456 uint8_t rp_fctx_dis;
457 uint8_t rp_reserved4;
458 uint8_t rp_pprx_aprx;
459 uint8_t rp_pfcrx;
460 uint8_t rp_reserved5[2];
461 uint16_t rp_dev_stall_min;
462 uint16_t rp_dev_stall_crit;
463 uint8_t rp_reserved6[12];
464 } __packed __aligned(4);
465
466 #define MCX_PMLP_MODULE_NUM_MASK 0xff
467 struct mcx_reg_pmlp {
468 uint8_t rp_rxtx;
469 uint8_t rp_local_port;
470 uint8_t rp_reserved0;
471 uint8_t rp_width;
472 uint32_t rp_lane0_mapping;
473 uint32_t rp_lane1_mapping;
474 uint32_t rp_lane2_mapping;
475 uint32_t rp_lane3_mapping;
476 uint8_t rp_reserved1[44];
477 } __packed __aligned(4);
478
479 struct mcx_reg_ppcnt {
480 uint8_t ppcnt_swid;
481 uint8_t ppcnt_local_port;
482 uint8_t ppcnt_pnat;
483 uint8_t ppcnt_grp;
484 #define MCX_REG_PPCNT_GRP_IEEE8023 0x00
485 #define MCX_REG_PPCNT_GRP_RFC2863 0x01
486 #define MCX_REG_PPCNT_GRP_RFC2819 0x02
487 #define MCX_REG_PPCNT_GRP_RFC3635 0x03
488 #define MCX_REG_PPCNT_GRP_PER_PRIO 0x10
489 #define MCX_REG_PPCNT_GRP_PER_TC 0x11
490 #define MCX_REG_PPCNT_GRP_PER_RX_BUFFER 0x11
491
492 uint8_t ppcnt_clr;
493 uint8_t ppcnt_reserved1[2];
494 uint8_t ppcnt_prio_tc;
495 #define MCX_REG_PPCNT_CLR (1 << 7)
496
497 uint8_t ppcnt_counter_set[248];
498 } __packed __aligned(8);
499 CTASSERT(sizeof(struct mcx_reg_ppcnt) == 256);
500 CTASSERT((offsetof(struct mcx_reg_ppcnt, ppcnt_counter_set) %
501 sizeof(uint64_t)) == 0);
502
503 enum mcx_ppcnt_ieee8023 {
504 frames_transmitted_ok,
505 frames_received_ok,
506 frame_check_sequence_errors,
507 alignment_errors,
508 octets_transmitted_ok,
509 octets_received_ok,
510 multicast_frames_xmitted_ok,
511 broadcast_frames_xmitted_ok,
512 multicast_frames_received_ok,
513 broadcast_frames_received_ok,
514 in_range_length_errors,
515 out_of_range_length_field,
516 frame_too_long_errors,
517 symbol_error_during_carrier,
518 mac_control_frames_transmitted,
519 mac_control_frames_received,
520 unsupported_opcodes_received,
521 pause_mac_ctrl_frames_received,
522 pause_mac_ctrl_frames_transmitted,
523
524 mcx_ppcnt_ieee8023_count
525 };
526 CTASSERT(mcx_ppcnt_ieee8023_count * sizeof(uint64_t) == 0x98);
527
528 enum mcx_ppcnt_rfc2863 {
529 in_octets,
530 in_ucast_pkts,
531 in_discards,
532 in_errors,
533 in_unknown_protos,
534 out_octets,
535 out_ucast_pkts,
536 out_discards,
537 out_errors,
538 in_multicast_pkts,
539 in_broadcast_pkts,
540 out_multicast_pkts,
541 out_broadcast_pkts,
542
543 mcx_ppcnt_rfc2863_count
544 };
545 CTASSERT(mcx_ppcnt_rfc2863_count * sizeof(uint64_t) == 0x68);
546
547 enum mcx_ppcnt_rfc2819 {
548 drop_events,
549 octets,
550 pkts,
551 broadcast_pkts,
552 multicast_pkts,
553 crc_align_errors,
554 undersize_pkts,
555 oversize_pkts,
556 fragments,
557 jabbers,
558 collisions,
559 pkts64octets,
560 pkts65to127octets,
561 pkts128to255octets,
562 pkts256to511octets,
563 pkts512to1023octets,
564 pkts1024to1518octets,
565 pkts1519to2047octets,
566 pkts2048to4095octets,
567 pkts4096to8191octets,
568 pkts8192to10239octets,
569
570 mcx_ppcnt_rfc2819_count
571 };
572 CTASSERT((mcx_ppcnt_rfc2819_count * sizeof(uint64_t)) == 0xa8);
573
574 enum mcx_ppcnt_rfc3635 {
575 dot3stats_alignment_errors,
576 dot3stats_fcs_errors,
577 dot3stats_single_collision_frames,
578 dot3stats_multiple_collision_frames,
579 dot3stats_sqe_test_errors,
580 dot3stats_deferred_transmissions,
581 dot3stats_late_collisions,
582 dot3stats_excessive_collisions,
583 dot3stats_internal_mac_transmit_errors,
584 dot3stats_carrier_sense_errors,
585 dot3stats_frame_too_longs,
586 dot3stats_internal_mac_receive_errors,
587 dot3stats_symbol_errors,
588 dot3control_in_unknown_opcodes,
589 dot3in_pause_frames,
590 dot3out_pause_frames,
591
592 mcx_ppcnt_rfc3635_count
593 };
594 CTASSERT((mcx_ppcnt_rfc3635_count * sizeof(uint64_t)) == 0x80);
595
596 struct mcx_reg_mcam {
597 uint8_t _reserved1[1];
598 uint8_t mcam_feature_group;
599 uint8_t _reserved2[1];
600 uint8_t mcam_access_reg_group;
601 uint8_t _reserved3[4];
602 uint8_t mcam_access_reg_cap_mask[16];
603 uint8_t _reserved4[16];
604 uint8_t mcam_feature_cap_mask[16];
605 uint8_t _reserved5[16];
606 } __packed __aligned(4);
607
608 #define MCX_BITFIELD_BIT(bf, b) (bf[(sizeof bf - 1) - (b / 8)] & (b % 8))
609
610 #define MCX_MCAM_FEATURE_CAP_SENSOR_MAP 6
611
612 struct mcx_reg_mtcap {
613 uint8_t _reserved1[3];
614 uint8_t mtcap_sensor_count;
615 uint8_t _reserved2[4];
616
617 uint64_t mtcap_sensor_map;
618 };
619
620 struct mcx_reg_mtmp {
621 uint8_t _reserved1[2];
622 uint16_t mtmp_sensor_index;
623
624 uint8_t _reserved2[2];
625 uint16_t mtmp_temperature;
626
627 uint16_t mtmp_mte_mtr;
628 #define MCX_REG_MTMP_MTE (1 << 15)
629 #define MCX_REG_MTMP_MTR (1 << 14)
630 uint16_t mtmp_max_temperature;
631
632 uint16_t mtmp_tee;
633 #define MCX_REG_MTMP_TEE_NOPE (0 << 14)
634 #define MCX_REG_MTMP_TEE_GENERATE (1 << 14)
635 #define MCX_REG_MTMP_TEE_GENERATE_ONE (2 << 14)
636 uint16_t mtmp_temperature_threshold_hi;
637
638 uint8_t _reserved3[2];
639 uint16_t mtmp_temperature_threshold_lo;
640
641 uint8_t _reserved4[4];
642
643 uint8_t mtmp_sensor_name[8];
644 };
645 CTASSERT(sizeof(struct mcx_reg_mtmp) == 0x20);
646 CTASSERT(offsetof(struct mcx_reg_mtmp, mtmp_sensor_name) == 0x18);
647
648 #define MCX_MCIA_EEPROM_BYTES 32
649 struct mcx_reg_mcia {
650 uint8_t rm_l;
651 uint8_t rm_module;
652 uint8_t rm_reserved0;
653 uint8_t rm_status;
654 uint8_t rm_i2c_addr;
655 uint8_t rm_page_num;
656 uint16_t rm_dev_addr;
657 uint16_t rm_reserved1;
658 uint16_t rm_size;
659 uint32_t rm_reserved2;
660 uint8_t rm_data[48];
661 } __packed __aligned(4);
662
663 struct mcx_cmd_query_issi_in {
664 uint16_t cmd_opcode;
665 uint8_t cmd_reserved0[4];
666 uint16_t cmd_op_mod;
667 uint8_t cmd_reserved1[8];
668 } __packed __aligned(4);
669
670 struct mcx_cmd_query_issi_il_out {
671 uint8_t cmd_status;
672 uint8_t cmd_reserved0[3];
673 uint32_t cmd_syndrome;
674 uint8_t cmd_reserved1[2];
675 uint16_t cmd_current_issi;
676 uint8_t cmd_reserved2[4];
677 } __packed __aligned(4);
678
679 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
680
681 struct mcx_cmd_query_issi_mb_out {
682 uint8_t cmd_reserved2[16];
683 uint8_t cmd_supported_issi[80]; /* very big endian */
684 } __packed __aligned(4);
685
686 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
687
688 struct mcx_cmd_set_issi_in {
689 uint16_t cmd_opcode;
690 uint8_t cmd_reserved0[4];
691 uint16_t cmd_op_mod;
692 uint8_t cmd_reserved1[2];
693 uint16_t cmd_current_issi;
694 uint8_t cmd_reserved2[4];
695 } __packed __aligned(4);
696
697 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
698
699 struct mcx_cmd_set_issi_out {
700 uint8_t cmd_status;
701 uint8_t cmd_reserved0[3];
702 uint32_t cmd_syndrome;
703 uint8_t cmd_reserved1[8];
704 } __packed __aligned(4);
705
706 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
707
708 struct mcx_cmd_query_pages_in {
709 uint16_t cmd_opcode;
710 uint8_t cmd_reserved0[4];
711 uint16_t cmd_op_mod;
712 #define MCX_CMD_QUERY_PAGES_BOOT 0x01
713 #define MCX_CMD_QUERY_PAGES_INIT 0x02
714 #define MCX_CMD_QUERY_PAGES_REGULAR 0x03
715 uint8_t cmd_reserved1[8];
716 } __packed __aligned(4);
717
718 struct mcx_cmd_query_pages_out {
719 uint8_t cmd_status;
720 uint8_t cmd_reserved0[3];
721 uint32_t cmd_syndrome;
722 uint8_t cmd_reserved1[2];
723 uint16_t cmd_func_id;
724 int32_t cmd_num_pages;
725 } __packed __aligned(4);
726
727 struct mcx_cmd_manage_pages_in {
728 uint16_t cmd_opcode;
729 uint8_t cmd_reserved0[4];
730 uint16_t cmd_op_mod;
731 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
732 0x00
733 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
734 0x01
735 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
736 0x02
737 uint8_t cmd_reserved1[2];
738 uint16_t cmd_func_id;
739 uint32_t cmd_input_num_entries;
740 } __packed __aligned(4);
741
742 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
743
744 struct mcx_cmd_manage_pages_out {
745 uint8_t cmd_status;
746 uint8_t cmd_reserved0[3];
747 uint32_t cmd_syndrome;
748 uint32_t cmd_output_num_entries;
749 uint8_t cmd_reserved1[4];
750 } __packed __aligned(4);
751
752 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
753
754 struct mcx_cmd_query_hca_cap_in {
755 uint16_t cmd_opcode;
756 uint8_t cmd_reserved0[4];
757 uint16_t cmd_op_mod;
758 #define MCX_CMD_QUERY_HCA_CAP_MAX (0x0 << 0)
759 #define MCX_CMD_QUERY_HCA_CAP_CURRENT (0x1 << 0)
760 #define MCX_CMD_QUERY_HCA_CAP_DEVICE (0x0 << 1)
761 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD (0x1 << 1)
762 #define MCX_CMD_QUERY_HCA_CAP_FLOW (0x7 << 1)
763 uint8_t cmd_reserved1[8];
764 } __packed __aligned(4);
765
766 struct mcx_cmd_query_hca_cap_out {
767 uint8_t cmd_status;
768 uint8_t cmd_reserved0[3];
769 uint32_t cmd_syndrome;
770 uint8_t cmd_reserved1[8];
771 } __packed __aligned(4);
772
773 #define MCX_HCA_CAP_LEN 0x1000
774 #define MCX_HCA_CAP_NMAILBOXES \
775 (MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
776
777 #if __GNUC_PREREQ__(4, 3)
778 #define __counter__ __COUNTER__
779 #else
780 #define __counter__ __LINE__
781 #endif
782
783 #define __token(_tok, _num) _tok##_num
784 #define _token(_tok, _num) __token(_tok, _num)
785 #define __reserved__ _token(__reserved, __counter__)
786
787 struct mcx_cap_device {
788 uint8_t reserved0[16];
789
790 uint8_t log_max_srq_sz;
791 uint8_t log_max_qp_sz;
792 uint8_t __reserved__[1];
793 uint8_t log_max_qp; /* 5 bits */
794 #define MCX_CAP_DEVICE_LOG_MAX_QP 0x1f
795
796 uint8_t __reserved__[1];
797 uint8_t log_max_srq; /* 5 bits */
798 #define MCX_CAP_DEVICE_LOG_MAX_SRQ 0x1f
799 uint8_t __reserved__[2];
800
801 uint8_t __reserved__[1];
802 uint8_t log_max_cq_sz;
803 uint8_t __reserved__[1];
804 uint8_t log_max_cq; /* 5 bits */
805 #define MCX_CAP_DEVICE_LOG_MAX_CQ 0x1f
806
807 uint8_t log_max_eq_sz;
808 uint8_t log_max_mkey; /* 6 bits */
809 #define MCX_CAP_DEVICE_LOG_MAX_MKEY 0x3f
810 uint8_t __reserved__[1];
811 uint8_t log_max_eq; /* 4 bits */
812 #define MCX_CAP_DEVICE_LOG_MAX_EQ 0x0f
813
814 uint8_t max_indirection;
815 uint8_t log_max_mrw_sz; /* 7 bits */
816 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ 0x7f
817 uint8_t teardown_log_max_msf_list_size;
818 #define MCX_CAP_DEVICE_FORCE_TEARDOWN 0x80
819 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
820 0x3f
821 uint8_t log_max_klm_list_size; /* 6 bits */
822 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
823 0x3f
824
825 uint8_t __reserved__[1];
826 uint8_t log_max_ra_req_dc; /* 6 bits */
827 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC 0x3f
828 uint8_t __reserved__[1];
829 uint8_t log_max_ra_res_dc; /* 6 bits */
830 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
831 0x3f
832
833 uint8_t __reserved__[1];
834 uint8_t log_max_ra_req_qp; /* 6 bits */
835 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
836 0x3f
837 uint8_t __reserved__[1];
838 uint8_t log_max_ra_res_qp; /* 6 bits */
839 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
840 0x3f
841
842 uint8_t flags1;
843 #define MCX_CAP_DEVICE_END_PAD 0x80
844 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED 0x40
845 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
846 0x20
847 #define MCX_CAP_DEVICE_START_PAD 0x10
848 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
849 0x08
850 uint8_t __reserved__[1];
851 uint16_t gid_table_size;
852
853 uint16_t flags2;
854 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT 0x8000
855 #define MCX_CAP_DEVICE_VPORT_COUNTERS 0x4000
856 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
857 0x2000
858 #define MCX_CAP_DEVICE_DEBUG 0x1000
859 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
860 0x8000
861 #define MCX_CAP_DEVICE_RQ_DELAY_DROP 0x4000
862 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK 0x03ff
863 uint16_t pkey_table_size;
864
865 uint8_t flags3;
866 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
867 0x80
868 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
869 0x40
870 #define MCX_CAP_DEVICE_IB_VIRTUAL 0x20
871 #define MCX_CAP_DEVICE_ETH_VIRTUAL 0x10
872 #define MCX_CAP_DEVICE_ETS 0x04
873 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE 0x02
874 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
875 0x01
876 uint8_t local_ca_ack_delay; /* 5 bits */
877 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
878 0x1f
879 #define MCX_CAP_DEVICE_MCAM_REG 0x40
880 uint8_t port_type;
881 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
882 0x80
883 #define MCX_CAP_DEVICE_PORT_TYPE 0x03
884 #define MCX_CAP_DEVICE_PORT_TYPE_ETH 0x01
885 uint8_t num_ports;
886
887 uint8_t snapshot_log_max_msg;
888 #define MCX_CAP_DEVICE_SNAPSHOT 0x80
889 #define MCX_CAP_DEVICE_LOG_MAX_MSG 0x1f
890 uint8_t max_tc; /* 4 bits */
891 #define MCX_CAP_DEVICE_MAX_TC 0x0f
892 uint8_t flags4;
893 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT 0x80
894 #define MCX_CAP_DEVICE_DCBX 0x40
895 #define MCX_CAP_DEVICE_ROL_S 0x02
896 #define MCX_CAP_DEVICE_ROL_G 0x01
897 uint8_t wol;
898 #define MCX_CAP_DEVICE_WOL_S 0x40
899 #define MCX_CAP_DEVICE_WOL_G 0x20
900 #define MCX_CAP_DEVICE_WOL_A 0x10
901 #define MCX_CAP_DEVICE_WOL_B 0x08
902 #define MCX_CAP_DEVICE_WOL_M 0x04
903 #define MCX_CAP_DEVICE_WOL_U 0x02
904 #define MCX_CAP_DEVICE_WOL_P 0x01
905
906 uint16_t stat_rate_support;
907 uint8_t __reserved__[1];
908 uint8_t cqe_version; /* 4 bits */
909 #define MCX_CAP_DEVICE_CQE_VERSION 0x0f
910
911 uint32_t flags5;
912 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
913 0x80000000
914 #define MCX_CAP_DEVICE_STRIDING_RQ 0x40000000
915 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
916 0x10000000
917 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
918 0x08000000
919 #define MCX_CAP_DEVICE_DC_CONNECT_CP 0x00040000
920 #define MCX_CAP_DEVICE_DC_CNAK_DRACE 0x00020000
921 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
922 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
923 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM 0x0000c000
924 #define MCX_CAP_DEVICE_SIGERR_QCE 0x00002000
925 #define MCX_CAP_DEVICE_WQ_SIGNATURE 0x00000800
926 #define MCX_CAP_DEVICE_SCTR_DATA_CQE 0x00000400
927 #define MCX_CAP_DEVICE_SHO 0x00000100
928 #define MCX_CAP_DEVICE_TPH 0x00000080
929 #define MCX_CAP_DEVICE_RF 0x00000040
930 #define MCX_CAP_DEVICE_DCT 0x00000020
931 #define MCX_CAP_DEVICE_QOS 0x00000010
932 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS 0x00000008
933 #define MCX_CAP_DEVICE_ROCE 0x00000004
934 #define MCX_CAP_DEVICE_ATOMIC 0x00000002
935
936 uint32_t flags6;
937 #define MCX_CAP_DEVICE_CQ_OI 0x80000000
938 #define MCX_CAP_DEVICE_CQ_RESIZE 0x40000000
939 #define MCX_CAP_DEVICE_CQ_MODERATION 0x20000000
940 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
941 0x10000000
942 #define MCX_CAP_DEVICE_CQ_INVALIDATE 0x08000000
943 #define MCX_CAP_DEVICE_RESERVED_AT_255 0x04000000
944 #define MCX_CAP_DEVICE_CQ_EQ_REMAP 0x02000000
945 #define MCX_CAP_DEVICE_PG 0x01000000
946 #define MCX_CAP_DEVICE_BLOCK_LB_MC 0x00800000
947 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
948 0x00400000
949 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
950 0x00200000
951 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
952 0x00100000
953 #define MCX_CAP_DEVICE_CD 0x00080000
954 #define MCX_CAP_DEVICE_ATM 0x00040000
955 #define MCX_CAP_DEVICE_APM 0x00020000
956 #define MCX_CAP_DEVICE_IMAICL 0x00010000
957 #define MCX_CAP_DEVICE_QKV 0x00000200
958 #define MCX_CAP_DEVICE_PKV 0x00000100
959 #define MCX_CAP_DEVICE_SET_DETH_SQPN 0x00000080
960 #define MCX_CAP_DEVICE_XRC 0x00000008
961 #define MCX_CAP_DEVICE_UD 0x00000004
962 #define MCX_CAP_DEVICE_UC 0x00000002
963 #define MCX_CAP_DEVICE_RC 0x00000001
964
965 uint8_t uar_flags;
966 #define MCX_CAP_DEVICE_UAR_4K 0x80
967 uint8_t uar_sz; /* 6 bits */
968 #define MCX_CAP_DEVICE_UAR_SZ 0x3f
969 uint8_t __reserved__[1];
970 uint8_t log_pg_sz;
971
972 uint8_t flags7;
973 #define MCX_CAP_DEVICE_BF 0x80
974 #define MCX_CAP_DEVICE_DRIVER_VERSION 0x40
975 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
976 0x20
977 uint8_t log_bf_reg_size; /* 5 bits */
978 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE 0x1f
979 uint8_t __reserved__[2];
980
981 uint16_t num_of_diagnostic_counters;
982 uint16_t max_wqe_sz_sq;
983
984 uint8_t __reserved__[2];
985 uint16_t max_wqe_sz_rq;
986
987 uint8_t __reserved__[2];
988 uint16_t max_wqe_sz_sq_dc;
989
990 uint32_t max_qp_mcg; /* 25 bits */
991 #define MCX_CAP_DEVICE_MAX_QP_MCG 0x1ffffff
992
993 uint8_t __reserved__[3];
994 uint8_t log_max_mcq;
995
996 uint8_t log_max_transport_domain; /* 5 bits */
997 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
998 0x1f
999 uint8_t log_max_pd; /* 5 bits */
1000 #define MCX_CAP_DEVICE_LOG_MAX_PD 0x1f
1001 uint8_t __reserved__[1];
1002 uint8_t log_max_xrcd; /* 5 bits */
1003 #define MCX_CAP_DEVICE_LOG_MAX_XRCD 0x1f
1004
1005 uint8_t __reserved__[2];
1006 uint16_t max_flow_counter;
1007
1008 uint8_t log_max_rq; /* 5 bits */
1009 #define MCX_CAP_DEVICE_LOG_MAX_RQ 0x1f
1010 uint8_t log_max_sq; /* 5 bits */
1011 #define MCX_CAP_DEVICE_LOG_MAX_SQ 0x1f
1012 uint8_t log_max_tir; /* 5 bits */
1013 #define MCX_CAP_DEVICE_LOG_MAX_TIR 0x1f
1014 uint8_t log_max_tis; /* 5 bits */
1015 #define MCX_CAP_DEVICE_LOG_MAX_TIS 0x1f
1016
1017 uint8_t flags8;
1018 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
1019 0x80
1020 #define MCX_CAP_DEVICE_LOG_MAX_RMP 0x1f
1021 uint8_t log_max_rqt; /* 5 bits */
1022 #define MCX_CAP_DEVICE_LOG_MAX_RQT 0x1f
1023 uint8_t log_max_rqt_size; /* 5 bits */
1024 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE 0x1f
1025 uint8_t log_max_tis_per_sq; /* 5 bits */
1026 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
1027 0x1f
1028
1029 uint8_t flags9;
1030 #define MXC_CAP_DEVICE_EXT_STRIDE_NUM_RANGES \
1031 0x80
1032 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_RQ \
1033 0x1f
1034 uint8_t log_min_stride_sz_rq; /* 5 bits */
1035 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_RQ \
1036 0x1f
1037 uint8_t log_max_stride_sz_sq; /* 5 bits */
1038 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_SQ \
1039 0x1f
1040 uint8_t log_min_stride_sz_sq; /* 5 bits */
1041 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_SQ \
1042 0x1f
1043
1044 uint8_t log_max_hairpin_queues;
1045 #define MXC_CAP_DEVICE_HAIRPIN 0x80
1046 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_QUEUES \
1047 0x1f
1048 uint8_t log_min_hairpin_queues;
1049 #define MXC_CAP_DEVICE_LOG_MIN_HAIRPIN_QUEUES \
1050 0x1f
1051 uint8_t log_max_hairpin_num_packets;
1052 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_NUM_PACKETS \
1053 0x1f
1054 uint8_t log_max_mq_sz;
1055 #define MXC_CAP_DEVICE_LOG_MAX_WQ_SZ \
1056 0x1f
1057
1058 uint8_t log_min_hairpin_wq_data_sz;
1059 #define MXC_CAP_DEVICE_NIC_VPORT_CHANGE_EVENT \
1060 0x80
1061 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_UC \
1062 0x40
1063 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_MC \
1064 0x20
1065 #define MCX_CAP_DEVICE_LOG_MIN_HAIRPIN_WQ_DATA_SZ \
1066 0x1f
1067 uint8_t log_max_vlan_list;
1068 #define MXC_CAP_DEVICE_SYSTEM_IMAGE_GUID_MODIFIABLE \
1069 0x80
1070 #define MXC_CAP_DEVICE_LOG_MAX_VLAN_LIST \
1071 0x1f
1072 uint8_t log_max_current_mc_list;
1073 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_MC_LIST \
1074 0x1f
1075 uint8_t log_max_current_uc_list;
1076 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_UC_LIST \
1077 0x1f
1078
1079 uint8_t __reserved__[4];
1080
1081 uint32_t create_qp_start_hint; /* 24 bits */
1082
1083 uint8_t log_max_uctx; /* 5 bits */
1084 #define MXC_CAP_DEVICE_LOG_MAX_UCTX 0x1f
1085 uint8_t log_max_umem; /* 5 bits */
1086 #define MXC_CAP_DEVICE_LOG_MAX_UMEM 0x1f
1087 uint16_t max_num_eqs;
1088
1089 uint8_t log_max_l2_table; /* 5 bits */
1090 #define MXC_CAP_DEVICE_LOG_MAX_L2_TABLE 0x1f
1091 uint8_t __reserved__[1];
1092 uint16_t log_uar_page_sz;
1093
1094 uint8_t __reserved__[8];
1095
1096 uint32_t device_frequency_mhz;
1097 uint32_t device_frequency_khz;
1098 } __packed __aligned(8);
1099
1100 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
1101 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
1102 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
1103 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
1104 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
1105 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
1106 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_mhz) == 0x98);
1107 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_khz) == 0x9c);
1108 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
1109
1110 struct mcx_cmd_set_driver_version_in {
1111 uint16_t cmd_opcode;
1112 uint8_t cmd_reserved0[4];
1113 uint16_t cmd_op_mod;
1114 uint8_t cmd_reserved1[8];
1115 } __packed __aligned(4);
1116
1117 struct mcx_cmd_set_driver_version_out {
1118 uint8_t cmd_status;
1119 uint8_t cmd_reserved0[3];
1120 uint32_t cmd_syndrome;
1121 uint8_t cmd_reserved1[8];
1122 } __packed __aligned(4);
1123
1124 struct mcx_cmd_set_driver_version {
1125 uint8_t cmd_driver_version[64];
1126 } __packed __aligned(8);
1127
1128 struct mcx_cmd_modify_nic_vport_context_in {
1129 uint16_t cmd_opcode;
1130 uint8_t cmd_reserved0[4];
1131 uint16_t cmd_op_mod;
1132 uint8_t cmd_reserved1[4];
1133 uint32_t cmd_field_select;
1134 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR 0x04
1135 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC 0x10
1136 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU 0x40
1137 } __packed __aligned(4);
1138
1139 struct mcx_cmd_modify_nic_vport_context_out {
1140 uint8_t cmd_status;
1141 uint8_t cmd_reserved0[3];
1142 uint32_t cmd_syndrome;
1143 uint8_t cmd_reserved1[8];
1144 } __packed __aligned(4);
1145
1146 struct mcx_cmd_query_nic_vport_context_in {
1147 uint16_t cmd_opcode;
1148 uint8_t cmd_reserved0[4];
1149 uint16_t cmd_op_mod;
1150 uint8_t cmd_reserved1[4];
1151 uint8_t cmd_allowed_list_type;
1152 uint8_t cmd_reserved2[3];
1153 } __packed __aligned(4);
1154
1155 struct mcx_cmd_query_nic_vport_context_out {
1156 uint8_t cmd_status;
1157 uint8_t cmd_reserved0[3];
1158 uint32_t cmd_syndrome;
1159 uint8_t cmd_reserved1[8];
1160 } __packed __aligned(4);
1161
1162 struct mcx_nic_vport_ctx {
1163 uint32_t vp_min_wqe_inline_mode;
1164 uint8_t vp_reserved0[32];
1165 uint32_t vp_mtu;
1166 uint8_t vp_reserved1[200];
1167 uint16_t vp_flags;
1168 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC (0)
1169 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC (1 << 24)
1170 #define MCX_NIC_VPORT_CTX_LIST_VLAN (2 << 24)
1171 #define MCX_NIC_VPORT_CTX_PROMISC_ALL (1 << 13)
1172 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST (1 << 14)
1173 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST (1 << 15)
1174 uint16_t vp_allowed_list_size;
1175 uint64_t vp_perm_addr;
1176 uint8_t vp_reserved2[4];
1177 /* allowed list follows */
1178 } __packed __aligned(4);
1179
1180 struct mcx_counter {
1181 uint64_t packets;
1182 uint64_t octets;
1183 } __packed __aligned(4);
1184
1185 struct mcx_nic_vport_counters {
1186 struct mcx_counter rx_err;
1187 struct mcx_counter tx_err;
1188 uint8_t reserved0[64]; /* 0x30 */
1189 struct mcx_counter rx_bcast;
1190 struct mcx_counter tx_bcast;
1191 struct mcx_counter rx_ucast;
1192 struct mcx_counter tx_ucast;
1193 struct mcx_counter rx_mcast;
1194 struct mcx_counter tx_mcast;
1195 uint8_t reserved1[0x210 - 0xd0];
1196 } __packed __aligned(4);
1197
1198 struct mcx_cmd_query_vport_counters_in {
1199 uint16_t cmd_opcode;
1200 uint8_t cmd_reserved0[4];
1201 uint16_t cmd_op_mod;
1202 uint8_t cmd_reserved1[8];
1203 } __packed __aligned(4);
1204
1205 struct mcx_cmd_query_vport_counters_mb_in {
1206 uint8_t cmd_reserved0[8];
1207 uint8_t cmd_clear;
1208 uint8_t cmd_reserved1[7];
1209 } __packed __aligned(4);
1210
1211 struct mcx_cmd_query_vport_counters_out {
1212 uint8_t cmd_status;
1213 uint8_t cmd_reserved0[3];
1214 uint32_t cmd_syndrome;
1215 uint8_t cmd_reserved1[8];
1216 } __packed __aligned(4);
1217
1218 struct mcx_cmd_query_flow_counter_in {
1219 uint16_t cmd_opcode;
1220 uint8_t cmd_reserved0[4];
1221 uint16_t cmd_op_mod;
1222 uint8_t cmd_reserved1[8];
1223 } __packed __aligned(4);
1224
1225 struct mcx_cmd_query_flow_counter_mb_in {
1226 uint8_t cmd_reserved0[8];
1227 uint8_t cmd_clear;
1228 uint8_t cmd_reserved1[5];
1229 uint16_t cmd_flow_counter_id;
1230 } __packed __aligned(4);
1231
1232 struct mcx_cmd_query_flow_counter_out {
1233 uint8_t cmd_status;
1234 uint8_t cmd_reserved0[3];
1235 uint32_t cmd_syndrome;
1236 uint8_t cmd_reserved1[8];
1237 } __packed __aligned(4);
1238
1239 struct mcx_cmd_alloc_uar_in {
1240 uint16_t cmd_opcode;
1241 uint8_t cmd_reserved0[4];
1242 uint16_t cmd_op_mod;
1243 uint8_t cmd_reserved1[8];
1244 } __packed __aligned(4);
1245
1246 struct mcx_cmd_alloc_uar_out {
1247 uint8_t cmd_status;
1248 uint8_t cmd_reserved0[3];
1249 uint32_t cmd_syndrome;
1250 uint32_t cmd_uar;
1251 uint8_t cmd_reserved1[4];
1252 } __packed __aligned(4);
1253
1254 struct mcx_cmd_query_special_ctx_in {
1255 uint16_t cmd_opcode;
1256 uint8_t cmd_reserved0[4];
1257 uint16_t cmd_op_mod;
1258 uint8_t cmd_reserved1[8];
1259 } __packed __aligned(4);
1260
1261 struct mcx_cmd_query_special_ctx_out {
1262 uint8_t cmd_status;
1263 uint8_t cmd_reserved0[3];
1264 uint32_t cmd_syndrome;
1265 uint8_t cmd_reserved1[4];
1266 uint32_t cmd_resd_lkey;
1267 } __packed __aligned(4);
1268
1269 struct mcx_eq_ctx {
1270 uint32_t eq_status;
1271 #define MCX_EQ_CTX_STATE_SHIFT 8
1272 #define MCX_EQ_CTX_STATE_MASK (0xf << MCX_EQ_CTX_STATE_SHIFT)
1273 #define MCX_EQ_CTX_STATE_ARMED 0x9
1274 #define MCX_EQ_CTX_STATE_FIRED 0xa
1275 #define MCX_EQ_CTX_OI_SHIFT 17
1276 #define MCX_EQ_CTX_OI (1 << MCX_EQ_CTX_OI_SHIFT)
1277 #define MCX_EQ_CTX_EC_SHIFT 18
1278 #define MCX_EQ_CTX_EC (1 << MCX_EQ_CTX_EC_SHIFT)
1279 #define MCX_EQ_CTX_STATUS_SHIFT 28
1280 #define MCX_EQ_CTX_STATUS_MASK (0xf << MCX_EQ_CTX_STATUS_SHIFT)
1281 #define MCX_EQ_CTX_STATUS_OK 0x0
1282 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE 0xa
1283 uint32_t eq_reserved1;
1284 uint32_t eq_page_offset;
1285 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT 5
1286 uint32_t eq_uar_size;
1287 #define MCX_EQ_CTX_UAR_PAGE_MASK 0xffffff
1288 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT 24
1289 uint32_t eq_reserved2;
1290 uint8_t eq_reserved3[3];
1291 uint8_t eq_intr;
1292 uint32_t eq_log_page_size;
1293 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1294 uint32_t eq_reserved4[3];
1295 uint32_t eq_consumer_counter;
1296 uint32_t eq_producer_counter;
1297 #define MCX_EQ_CTX_COUNTER_MASK 0xffffff
1298 uint32_t eq_reserved5[4];
1299 } __packed __aligned(4);
1300
1301 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1302
1303 struct mcx_cmd_create_eq_in {
1304 uint16_t cmd_opcode;
1305 uint8_t cmd_reserved0[4];
1306 uint16_t cmd_op_mod;
1307 uint8_t cmd_reserved1[8];
1308 } __packed __aligned(4);
1309
1310 struct mcx_cmd_create_eq_mb_in {
1311 struct mcx_eq_ctx cmd_eq_ctx;
1312 uint8_t cmd_reserved0[8];
1313 uint64_t cmd_event_bitmask;
1314 #define MCX_EVENT_TYPE_COMPLETION 0x00
1315 #define MCX_EVENT_TYPE_CQ_ERROR 0x04
1316 #define MCX_EVENT_TYPE_INTERNAL_ERROR 0x08
1317 #define MCX_EVENT_TYPE_PORT_CHANGE 0x09
1318 #define MCX_EVENT_TYPE_CMD_COMPLETION 0x0a
1319 #define MCX_EVENT_TYPE_PAGE_REQUEST 0x0b
1320 #define MCX_EVENT_TYPE_LAST_WQE 0x13
1321 uint8_t cmd_reserved1[176];
1322 } __packed __aligned(4);
1323
1324 struct mcx_cmd_create_eq_out {
1325 uint8_t cmd_status;
1326 uint8_t cmd_reserved0[3];
1327 uint32_t cmd_syndrome;
1328 uint32_t cmd_eqn;
1329 uint8_t cmd_reserved1[4];
1330 } __packed __aligned(4);
1331
1332 struct mcx_cmd_query_eq_in {
1333 uint16_t cmd_opcode;
1334 uint8_t cmd_reserved0[4];
1335 uint16_t cmd_op_mod;
1336 uint32_t cmd_eqn;
1337 uint8_t cmd_reserved1[4];
1338 } __packed __aligned(4);
1339
1340 struct mcx_cmd_query_eq_out {
1341 uint8_t cmd_status;
1342 uint8_t cmd_reserved0[3];
1343 uint32_t cmd_syndrome;
1344 uint8_t cmd_reserved1[8];
1345 } __packed __aligned(4);
1346
1347 struct mcx_eq_entry {
1348 uint8_t eq_reserved1;
1349 uint8_t eq_event_type;
1350 uint8_t eq_reserved2;
1351 uint8_t eq_event_sub_type;
1352
1353 uint8_t eq_reserved3[28];
1354 uint32_t eq_event_data[7];
1355 uint8_t eq_reserved4[2];
1356 uint8_t eq_signature;
1357 uint8_t eq_owner;
1358 #define MCX_EQ_ENTRY_OWNER_INIT 1
1359 } __packed __aligned(4);
1360
1361 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1362
1363 struct mcx_cmd_alloc_pd_in {
1364 uint16_t cmd_opcode;
1365 uint8_t cmd_reserved0[4];
1366 uint16_t cmd_op_mod;
1367 uint8_t cmd_reserved1[8];
1368 } __packed __aligned(4);
1369
1370 struct mcx_cmd_alloc_pd_out {
1371 uint8_t cmd_status;
1372 uint8_t cmd_reserved0[3];
1373 uint32_t cmd_syndrome;
1374 uint32_t cmd_pd;
1375 uint8_t cmd_reserved1[4];
1376 } __packed __aligned(4);
1377
1378 struct mcx_cmd_alloc_td_in {
1379 uint16_t cmd_opcode;
1380 uint8_t cmd_reserved0[4];
1381 uint16_t cmd_op_mod;
1382 uint8_t cmd_reserved1[8];
1383 } __packed __aligned(4);
1384
1385 struct mcx_cmd_alloc_td_out {
1386 uint8_t cmd_status;
1387 uint8_t cmd_reserved0[3];
1388 uint32_t cmd_syndrome;
1389 uint32_t cmd_tdomain;
1390 uint8_t cmd_reserved1[4];
1391 } __packed __aligned(4);
1392
1393 struct mcx_cmd_create_tir_in {
1394 uint16_t cmd_opcode;
1395 uint8_t cmd_reserved0[4];
1396 uint16_t cmd_op_mod;
1397 uint8_t cmd_reserved1[8];
1398 } __packed __aligned(4);
1399
1400 struct mcx_cmd_create_tir_mb_in {
1401 uint8_t cmd_reserved0[20];
1402 uint32_t cmd_disp_type;
1403 #define MCX_TIR_CTX_DISP_TYPE_DIRECT 0
1404 #define MCX_TIR_CTX_DISP_TYPE_INDIRECT 1
1405 #define MCX_TIR_CTX_DISP_TYPE_SHIFT 28
1406 uint8_t cmd_reserved1[8];
1407 uint32_t cmd_lro;
1408 uint8_t cmd_reserved2[8];
1409 uint32_t cmd_inline_rqn;
1410 uint32_t cmd_indir_table;
1411 uint32_t cmd_tdomain;
1412 #define MCX_TIR_CTX_HASH_TOEPLITZ 2
1413 #define MCX_TIR_CTX_HASH_SHIFT 28
1414 uint8_t cmd_rx_hash_key[40];
1415 uint32_t cmd_rx_hash_sel_outer;
1416 #define MCX_TIR_CTX_HASH_SEL_SRC_IP (1 << 0)
1417 #define MCX_TIR_CTX_HASH_SEL_DST_IP (1 << 1)
1418 #define MCX_TIR_CTX_HASH_SEL_SPORT (1 << 2)
1419 #define MCX_TIR_CTX_HASH_SEL_DPORT (1 << 3)
1420 #define MCX_TIR_CTX_HASH_SEL_IPV4 (0 << 31)
1421 #define MCX_TIR_CTX_HASH_SEL_IPV6 (1 << 31)
1422 #define MCX_TIR_CTX_HASH_SEL_TCP (0 << 30)
1423 #define MCX_TIR_CTX_HASH_SEL_UDP (1 << 30)
1424 uint32_t cmd_rx_hash_sel_inner;
1425 uint8_t cmd_reserved3[152];
1426 } __packed __aligned(4);
1427
1428 struct mcx_cmd_create_tir_out {
1429 uint8_t cmd_status;
1430 uint8_t cmd_reserved0[3];
1431 uint32_t cmd_syndrome;
1432 uint32_t cmd_tirn;
1433 uint8_t cmd_reserved1[4];
1434 } __packed __aligned(4);
1435
1436 struct mcx_cmd_destroy_tir_in {
1437 uint16_t cmd_opcode;
1438 uint8_t cmd_reserved0[4];
1439 uint16_t cmd_op_mod;
1440 uint32_t cmd_tirn;
1441 uint8_t cmd_reserved1[4];
1442 } __packed __aligned(4);
1443
1444 struct mcx_cmd_destroy_tir_out {
1445 uint8_t cmd_status;
1446 uint8_t cmd_reserved0[3];
1447 uint32_t cmd_syndrome;
1448 uint8_t cmd_reserved1[8];
1449 } __packed __aligned(4);
1450
1451 struct mcx_cmd_create_tis_in {
1452 uint16_t cmd_opcode;
1453 uint8_t cmd_reserved0[4];
1454 uint16_t cmd_op_mod;
1455 uint8_t cmd_reserved1[8];
1456 } __packed __aligned(4);
1457
1458 struct mcx_cmd_create_tis_mb_in {
1459 uint8_t cmd_reserved[16];
1460 uint32_t cmd_prio;
1461 uint8_t cmd_reserved1[32];
1462 uint32_t cmd_tdomain;
1463 uint8_t cmd_reserved2[120];
1464 } __packed __aligned(4);
1465
1466 struct mcx_cmd_create_tis_out {
1467 uint8_t cmd_status;
1468 uint8_t cmd_reserved0[3];
1469 uint32_t cmd_syndrome;
1470 uint32_t cmd_tisn;
1471 uint8_t cmd_reserved1[4];
1472 } __packed __aligned(4);
1473
1474 struct mcx_cmd_destroy_tis_in {
1475 uint16_t cmd_opcode;
1476 uint8_t cmd_reserved0[4];
1477 uint16_t cmd_op_mod;
1478 uint32_t cmd_tisn;
1479 uint8_t cmd_reserved1[4];
1480 } __packed __aligned(4);
1481
1482 struct mcx_cmd_destroy_tis_out {
1483 uint8_t cmd_status;
1484 uint8_t cmd_reserved0[3];
1485 uint32_t cmd_syndrome;
1486 uint8_t cmd_reserved1[8];
1487 } __packed __aligned(4);
1488
1489 struct mcx_cmd_create_rqt_in {
1490 uint16_t cmd_opcode;
1491 uint8_t cmd_reserved0[4];
1492 uint16_t cmd_op_mod;
1493 uint8_t cmd_reserved1[8];
1494 } __packed __aligned(4);
1495
1496 struct mcx_rqt_ctx {
1497 uint8_t cmd_reserved0[20];
1498 uint16_t cmd_reserved1;
1499 uint16_t cmd_rqt_max_size;
1500 uint16_t cmd_reserved2;
1501 uint16_t cmd_rqt_actual_size;
1502 uint8_t cmd_reserved3[212];
1503 } __packed __aligned(4);
1504
1505 struct mcx_cmd_create_rqt_mb_in {
1506 uint8_t cmd_reserved0[16];
1507 struct mcx_rqt_ctx cmd_rqt;
1508 } __packed __aligned(4);
1509
1510 struct mcx_cmd_create_rqt_out {
1511 uint8_t cmd_status;
1512 uint8_t cmd_reserved0[3];
1513 uint32_t cmd_syndrome;
1514 uint32_t cmd_rqtn;
1515 uint8_t cmd_reserved1[4];
1516 } __packed __aligned(4);
1517
1518 struct mcx_cmd_destroy_rqt_in {
1519 uint16_t cmd_opcode;
1520 uint8_t cmd_reserved0[4];
1521 uint16_t cmd_op_mod;
1522 uint32_t cmd_rqtn;
1523 uint8_t cmd_reserved1[4];
1524 } __packed __aligned(4);
1525
1526 struct mcx_cmd_destroy_rqt_out {
1527 uint8_t cmd_status;
1528 uint8_t cmd_reserved0[3];
1529 uint32_t cmd_syndrome;
1530 uint8_t cmd_reserved1[8];
1531 } __packed __aligned(4);
1532
1533 struct mcx_cq_ctx {
1534 uint32_t cq_status;
1535 #define MCX_CQ_CTX_STATUS_SHIFT 28
1536 #define MCX_CQ_CTX_STATUS_MASK (0xf << MCX_CQ_CTX_STATUS_SHIFT)
1537 #define MCX_CQ_CTX_STATUS_OK 0x0
1538 #define MCX_CQ_CTX_STATUS_OVERFLOW 0x9
1539 #define MCX_CQ_CTX_STATUS_WRITE_FAIL 0xa
1540 #define MCX_CQ_CTX_STATE_SHIFT 8
1541 #define MCX_CQ_CTX_STATE_MASK (0xf << MCX_CQ_CTX_STATE_SHIFT)
1542 #define MCX_CQ_CTX_STATE_SOLICITED 0x6
1543 #define MCX_CQ_CTX_STATE_ARMED 0x9
1544 #define MCX_CQ_CTX_STATE_FIRED 0xa
1545 uint32_t cq_reserved1;
1546 uint32_t cq_page_offset;
1547 uint32_t cq_uar_size;
1548 #define MCX_CQ_CTX_UAR_PAGE_MASK 0xffffff
1549 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT 24
1550 uint32_t cq_period_max_count;
1551 #define MCX_CQ_CTX_PERIOD_SHIFT 16
1552 uint32_t cq_eqn;
1553 uint32_t cq_log_page_size;
1554 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1555 uint32_t cq_reserved2;
1556 uint32_t cq_last_notified;
1557 uint32_t cq_last_solicit;
1558 uint32_t cq_consumer_counter;
1559 uint32_t cq_producer_counter;
1560 uint8_t cq_reserved3[8];
1561 uint64_t cq_doorbell;
1562 } __packed __aligned(4);
1563
1564 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1565
1566 struct mcx_cmd_create_cq_in {
1567 uint16_t cmd_opcode;
1568 uint8_t cmd_reserved0[4];
1569 uint16_t cmd_op_mod;
1570 uint8_t cmd_reserved1[8];
1571 } __packed __aligned(4);
1572
1573 struct mcx_cmd_create_cq_mb_in {
1574 struct mcx_cq_ctx cmd_cq_ctx;
1575 uint8_t cmd_reserved1[192];
1576 } __packed __aligned(4);
1577
1578 struct mcx_cmd_create_cq_out {
1579 uint8_t cmd_status;
1580 uint8_t cmd_reserved0[3];
1581 uint32_t cmd_syndrome;
1582 uint32_t cmd_cqn;
1583 uint8_t cmd_reserved1[4];
1584 } __packed __aligned(4);
1585
1586 struct mcx_cmd_destroy_cq_in {
1587 uint16_t cmd_opcode;
1588 uint8_t cmd_reserved0[4];
1589 uint16_t cmd_op_mod;
1590 uint32_t cmd_cqn;
1591 uint8_t cmd_reserved1[4];
1592 } __packed __aligned(4);
1593
1594 struct mcx_cmd_destroy_cq_out {
1595 uint8_t cmd_status;
1596 uint8_t cmd_reserved0[3];
1597 uint32_t cmd_syndrome;
1598 uint8_t cmd_reserved1[8];
1599 } __packed __aligned(4);
1600
1601 struct mcx_cmd_query_cq_in {
1602 uint16_t cmd_opcode;
1603 uint8_t cmd_reserved0[4];
1604 uint16_t cmd_op_mod;
1605 uint32_t cmd_cqn;
1606 uint8_t cmd_reserved1[4];
1607 } __packed __aligned(4);
1608
1609 struct mcx_cmd_query_cq_out {
1610 uint8_t cmd_status;
1611 uint8_t cmd_reserved0[3];
1612 uint32_t cmd_syndrome;
1613 uint8_t cmd_reserved1[8];
1614 } __packed __aligned(4);
1615
1616 struct mcx_cq_entry {
1617 uint32_t __reserved__;
1618 uint32_t cq_lro;
1619 uint32_t cq_lro_ack_seq_num;
1620 uint32_t cq_rx_hash;
1621 uint8_t cq_rx_hash_type;
1622 uint8_t cq_ml_path;
1623 uint16_t __reserved__;
1624 uint32_t cq_checksum;
1625 uint32_t __reserved__;
1626 uint32_t cq_flags;
1627 #define MCX_CQ_ENTRY_FLAGS_L4_OK (1 << 26)
1628 #define MCX_CQ_ENTRY_FLAGS_L3_OK (1 << 25)
1629 #define MCX_CQ_ENTRY_FLAGS_L2_OK (1 << 24)
1630 #define MCX_CQ_ENTRY_FLAGS_CV (1 << 16)
1631 #define MCX_CQ_ENTRY_FLAGS_VLAN_MASK (0xffff)
1632
1633 uint32_t cq_lro_srqn;
1634 uint32_t __reserved__[2];
1635 uint32_t cq_byte_cnt;
1636 uint64_t cq_timestamp;
1637 uint8_t cq_rx_drops;
1638 uint8_t cq_flow_tag[3];
1639 uint16_t cq_wqe_count;
1640 uint8_t cq_signature;
1641 uint8_t cq_opcode_owner;
1642 #define MCX_CQ_ENTRY_FLAG_OWNER (1 << 0)
1643 #define MCX_CQ_ENTRY_FLAG_SE (1 << 1)
1644 #define MCX_CQ_ENTRY_FORMAT_SHIFT 2
1645 #define MCX_CQ_ENTRY_OPCODE_SHIFT 4
1646
1647 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE 0
1648 #define MCX_CQ_ENTRY_FORMAT_INLINE_32 1
1649 #define MCX_CQ_ENTRY_FORMAT_INLINE_64 2
1650 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED 3
1651
1652 #define MCX_CQ_ENTRY_OPCODE_REQ 0
1653 #define MCX_CQ_ENTRY_OPCODE_SEND 2
1654 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR 13
1655 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR 14
1656 #define MCX_CQ_ENTRY_OPCODE_INVALID 15
1657
1658 } __packed __aligned(4);
1659
1660 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1661
1662 struct mcx_cq_doorbell {
1663 uint32_t db_update_ci;
1664 uint32_t db_arm_ci;
1665 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT 28
1666 #define MCX_CQ_DOORBELL_ARM_CMD (1 << 24)
1667 #define MCX_CQ_DOORBELL_ARM_CI_MASK (0xffffff)
1668 } __packed __aligned(8);
1669
1670 struct mcx_wq_ctx {
1671 uint8_t wq_type;
1672 #define MCX_WQ_CTX_TYPE_CYCLIC (1 << 4)
1673 #define MCX_WQ_CTX_TYPE_SIGNATURE (1 << 3)
1674 uint8_t wq_reserved0[5];
1675 uint16_t wq_lwm;
1676 uint32_t wq_pd;
1677 uint32_t wq_uar_page;
1678 uint64_t wq_doorbell;
1679 uint32_t wq_hw_counter;
1680 uint32_t wq_sw_counter;
1681 uint16_t wq_log_stride;
1682 uint8_t wq_log_page_sz;
1683 uint8_t wq_log_size;
1684 uint8_t wq_reserved1[156];
1685 } __packed __aligned(4);
1686
1687 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1688
1689 struct mcx_sq_ctx {
1690 uint32_t sq_flags;
1691 #define MCX_SQ_CTX_RLKEY (1U << 31)
1692 #define MCX_SQ_CTX_FRE_SHIFT (1 << 29)
1693 #define MCX_SQ_CTX_FLUSH_IN_ERROR (1 << 28)
1694 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT 24
1695 #define MCX_SQ_CTX_STATE_SHIFT 20
1696 #define MCX_SQ_CTX_STATE_MASK (0xf << 20)
1697 #define MCX_SQ_CTX_STATE_RST 0
1698 #define MCX_SQ_CTX_STATE_RDY 1
1699 #define MCX_SQ_CTX_STATE_ERR 3
1700 uint32_t sq_user_index;
1701 uint32_t sq_cqn;
1702 uint32_t sq_reserved1[5];
1703 uint32_t sq_tis_lst_sz;
1704 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT 16
1705 uint32_t sq_reserved2[2];
1706 uint32_t sq_tis_num;
1707 struct mcx_wq_ctx sq_wq;
1708 } __packed __aligned(4);
1709
1710 struct mcx_sq_entry_seg {
1711 uint32_t sqs_byte_count;
1712 uint32_t sqs_lkey;
1713 uint64_t sqs_addr;
1714 } __packed __aligned(4);
1715
1716 struct mcx_sq_entry {
1717 /* control segment */
1718 uint32_t sqe_opcode_index;
1719 #define MCX_SQE_WQE_INDEX_SHIFT 8
1720 #define MCX_SQE_WQE_OPCODE_NOP 0x00
1721 #define MCX_SQE_WQE_OPCODE_SEND 0x0a
1722 uint32_t sqe_ds_sq_num;
1723 #define MCX_SQE_SQ_NUM_SHIFT 8
1724 uint32_t sqe_signature;
1725 #define MCX_SQE_SIGNATURE_SHIFT 24
1726 #define MCX_SQE_SOLICITED_EVENT 0x02
1727 #define MCX_SQE_CE_CQE_ON_ERR 0x00
1728 #define MCX_SQE_CE_CQE_FIRST_ERR 0x04
1729 #define MCX_SQE_CE_CQE_ALWAYS 0x08
1730 #define MCX_SQE_CE_CQE_SOLICIT 0x0C
1731 #define MCX_SQE_FM_NO_FENCE 0x00
1732 #define MCX_SQE_FM_SMALL_FENCE 0x40
1733 uint32_t sqe_mkey;
1734
1735 /* ethernet segment */
1736 uint32_t sqe_reserved1;
1737 uint32_t sqe_mss_csum;
1738 #define MCX_SQE_L4_CSUM (1 << 31)
1739 #define MCX_SQE_L3_CSUM (1 << 30)
1740 uint32_t sqe_reserved2;
1741 uint16_t sqe_inline_header_size;
1742 uint16_t sqe_inline_headers[9];
1743
1744 /* data segment */
1745 struct mcx_sq_entry_seg sqe_segs[1];
1746 } __packed __aligned(64);
1747
1748 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1749
1750 struct mcx_cmd_create_sq_in {
1751 uint16_t cmd_opcode;
1752 uint8_t cmd_reserved0[4];
1753 uint16_t cmd_op_mod;
1754 uint8_t cmd_reserved1[8];
1755 } __packed __aligned(4);
1756
1757 struct mcx_cmd_create_sq_out {
1758 uint8_t cmd_status;
1759 uint8_t cmd_reserved0[3];
1760 uint32_t cmd_syndrome;
1761 uint32_t cmd_sqn;
1762 uint8_t cmd_reserved1[4];
1763 } __packed __aligned(4);
1764
1765 struct mcx_cmd_modify_sq_in {
1766 uint16_t cmd_opcode;
1767 uint8_t cmd_reserved0[4];
1768 uint16_t cmd_op_mod;
1769 uint32_t cmd_sq_state;
1770 uint8_t cmd_reserved1[4];
1771 } __packed __aligned(4);
1772
1773 struct mcx_cmd_modify_sq_mb_in {
1774 uint32_t cmd_modify_hi;
1775 uint32_t cmd_modify_lo;
1776 uint8_t cmd_reserved0[8];
1777 struct mcx_sq_ctx cmd_sq_ctx;
1778 } __packed __aligned(4);
1779
1780 struct mcx_cmd_modify_sq_out {
1781 uint8_t cmd_status;
1782 uint8_t cmd_reserved0[3];
1783 uint32_t cmd_syndrome;
1784 uint8_t cmd_reserved1[8];
1785 } __packed __aligned(4);
1786
1787 struct mcx_cmd_destroy_sq_in {
1788 uint16_t cmd_opcode;
1789 uint8_t cmd_reserved0[4];
1790 uint16_t cmd_op_mod;
1791 uint32_t cmd_sqn;
1792 uint8_t cmd_reserved1[4];
1793 } __packed __aligned(4);
1794
1795 struct mcx_cmd_destroy_sq_out {
1796 uint8_t cmd_status;
1797 uint8_t cmd_reserved0[3];
1798 uint32_t cmd_syndrome;
1799 uint8_t cmd_reserved1[8];
1800 } __packed __aligned(4);
1801
1802
1803 struct mcx_rq_ctx {
1804 uint32_t rq_flags;
1805 #define MCX_RQ_CTX_RLKEY (1U << 31)
1806 #define MCX_RQ_CTX_VLAN_STRIP_DIS (1 << 28)
1807 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT 24
1808 #define MCX_RQ_CTX_STATE_SHIFT 20
1809 #define MCX_RQ_CTX_STATE_MASK (0xf << 20)
1810 #define MCX_RQ_CTX_STATE_RST 0
1811 #define MCX_RQ_CTX_STATE_RDY 1
1812 #define MCX_RQ_CTX_STATE_ERR 3
1813 #define MCX_RQ_CTX_FLUSH_IN_ERROR (1 << 18)
1814 uint32_t rq_user_index;
1815 uint32_t rq_cqn;
1816 uint32_t rq_reserved1;
1817 uint32_t rq_rmpn;
1818 uint32_t rq_reserved2[7];
1819 struct mcx_wq_ctx rq_wq;
1820 } __packed __aligned(4);
1821
1822 struct mcx_rq_entry {
1823 uint32_t rqe_byte_count;
1824 uint32_t rqe_lkey;
1825 uint64_t rqe_addr;
1826 } __packed __aligned(16);
1827
1828 struct mcx_cmd_create_rq_in {
1829 uint16_t cmd_opcode;
1830 uint8_t cmd_reserved0[4];
1831 uint16_t cmd_op_mod;
1832 uint8_t cmd_reserved1[8];
1833 } __packed __aligned(4);
1834
1835 struct mcx_cmd_create_rq_out {
1836 uint8_t cmd_status;
1837 uint8_t cmd_reserved0[3];
1838 uint32_t cmd_syndrome;
1839 uint32_t cmd_rqn;
1840 uint8_t cmd_reserved1[4];
1841 } __packed __aligned(4);
1842
1843 struct mcx_cmd_modify_rq_in {
1844 uint16_t cmd_opcode;
1845 uint8_t cmd_reserved0[4];
1846 uint16_t cmd_op_mod;
1847 uint32_t cmd_rq_state;
1848 uint8_t cmd_reserved1[4];
1849 } __packed __aligned(4);
1850
1851 struct mcx_cmd_modify_rq_mb_in {
1852 uint32_t cmd_modify_hi;
1853 uint32_t cmd_modify_lo;
1854 uint8_t cmd_reserved0[8];
1855 struct mcx_rq_ctx cmd_rq_ctx;
1856 } __packed __aligned(4);
1857
1858 struct mcx_cmd_modify_rq_out {
1859 uint8_t cmd_status;
1860 uint8_t cmd_reserved0[3];
1861 uint32_t cmd_syndrome;
1862 uint8_t cmd_reserved1[8];
1863 } __packed __aligned(4);
1864
1865 struct mcx_cmd_destroy_rq_in {
1866 uint16_t cmd_opcode;
1867 uint8_t cmd_reserved0[4];
1868 uint16_t cmd_op_mod;
1869 uint32_t cmd_rqn;
1870 uint8_t cmd_reserved1[4];
1871 } __packed __aligned(4);
1872
1873 struct mcx_cmd_destroy_rq_out {
1874 uint8_t cmd_status;
1875 uint8_t cmd_reserved0[3];
1876 uint32_t cmd_syndrome;
1877 uint8_t cmd_reserved1[8];
1878 } __packed __aligned(4);
1879
1880 struct mcx_cmd_create_flow_table_in {
1881 uint16_t cmd_opcode;
1882 uint8_t cmd_reserved0[4];
1883 uint16_t cmd_op_mod;
1884 uint8_t cmd_reserved1[8];
1885 } __packed __aligned(4);
1886
1887 struct mcx_flow_table_ctx {
1888 uint8_t ft_miss_action;
1889 uint8_t ft_level;
1890 uint8_t ft_reserved0;
1891 uint8_t ft_log_size;
1892 uint32_t ft_table_miss_id;
1893 uint8_t ft_reserved1[28];
1894 } __packed __aligned(4);
1895
1896 struct mcx_cmd_create_flow_table_mb_in {
1897 uint8_t cmd_table_type;
1898 uint8_t cmd_reserved0[7];
1899 struct mcx_flow_table_ctx cmd_ctx;
1900 } __packed __aligned(4);
1901
1902 struct mcx_cmd_create_flow_table_out {
1903 uint8_t cmd_status;
1904 uint8_t cmd_reserved0[3];
1905 uint32_t cmd_syndrome;
1906 uint32_t cmd_table_id;
1907 uint8_t cmd_reserved1[4];
1908 } __packed __aligned(4);
1909
1910 struct mcx_cmd_destroy_flow_table_in {
1911 uint16_t cmd_opcode;
1912 uint8_t cmd_reserved0[4];
1913 uint16_t cmd_op_mod;
1914 uint8_t cmd_reserved1[8];
1915 } __packed __aligned(4);
1916
1917 struct mcx_cmd_destroy_flow_table_mb_in {
1918 uint8_t cmd_table_type;
1919 uint8_t cmd_reserved0[3];
1920 uint32_t cmd_table_id;
1921 uint8_t cmd_reserved1[40];
1922 } __packed __aligned(4);
1923
1924 struct mcx_cmd_destroy_flow_table_out {
1925 uint8_t cmd_status;
1926 uint8_t cmd_reserved0[3];
1927 uint32_t cmd_syndrome;
1928 uint8_t cmd_reserved1[8];
1929 } __packed __aligned(4);
1930
1931 struct mcx_cmd_set_flow_table_root_in {
1932 uint16_t cmd_opcode;
1933 uint8_t cmd_reserved0[4];
1934 uint16_t cmd_op_mod;
1935 uint8_t cmd_reserved1[8];
1936 } __packed __aligned(4);
1937
1938 struct mcx_cmd_set_flow_table_root_mb_in {
1939 uint8_t cmd_table_type;
1940 uint8_t cmd_reserved0[3];
1941 uint32_t cmd_table_id;
1942 uint8_t cmd_reserved1[56];
1943 } __packed __aligned(4);
1944
1945 struct mcx_cmd_set_flow_table_root_out {
1946 uint8_t cmd_status;
1947 uint8_t cmd_reserved0[3];
1948 uint32_t cmd_syndrome;
1949 uint8_t cmd_reserved1[8];
1950 } __packed __aligned(4);
1951
1952 struct mcx_flow_match {
1953 /* outer headers */
1954 uint8_t mc_src_mac[6];
1955 uint16_t mc_ethertype;
1956 uint8_t mc_dest_mac[6];
1957 uint16_t mc_first_vlan;
1958 uint8_t mc_ip_proto;
1959 uint8_t mc_ip_dscp_ecn;
1960 uint8_t mc_vlan_flags;
1961 #define MCX_FLOW_MATCH_IP_FRAG (1 << 5)
1962 uint8_t mc_tcp_flags;
1963 uint16_t mc_tcp_sport;
1964 uint16_t mc_tcp_dport;
1965 uint32_t mc_reserved0;
1966 uint16_t mc_udp_sport;
1967 uint16_t mc_udp_dport;
1968 uint8_t mc_src_ip[16];
1969 uint8_t mc_dest_ip[16];
1970
1971 /* misc parameters */
1972 uint8_t mc_reserved1[8];
1973 uint16_t mc_second_vlan;
1974 uint8_t mc_reserved2[2];
1975 uint8_t mc_second_vlan_flags;
1976 uint8_t mc_reserved3[15];
1977 uint32_t mc_outer_ipv6_flow_label;
1978 uint8_t mc_reserved4[32];
1979
1980 uint8_t mc_reserved[384];
1981 } __packed __aligned(4);
1982
1983 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1984
1985 struct mcx_cmd_create_flow_group_in {
1986 uint16_t cmd_opcode;
1987 uint8_t cmd_reserved0[4];
1988 uint16_t cmd_op_mod;
1989 uint8_t cmd_reserved1[8];
1990 } __packed __aligned(4);
1991
1992 struct mcx_cmd_create_flow_group_mb_in {
1993 uint8_t cmd_table_type;
1994 uint8_t cmd_reserved0[3];
1995 uint32_t cmd_table_id;
1996 uint8_t cmd_reserved1[4];
1997 uint32_t cmd_start_flow_index;
1998 uint8_t cmd_reserved2[4];
1999 uint32_t cmd_end_flow_index;
2000 uint8_t cmd_reserved3[23];
2001 uint8_t cmd_match_criteria_enable;
2002 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER (1 << 0)
2003 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC (1 << 1)
2004 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER (1 << 2)
2005 struct mcx_flow_match cmd_match_criteria;
2006 uint8_t cmd_reserved4[448];
2007 } __packed __aligned(4);
2008
2009 struct mcx_cmd_create_flow_group_out {
2010 uint8_t cmd_status;
2011 uint8_t cmd_reserved0[3];
2012 uint32_t cmd_syndrome;
2013 uint32_t cmd_group_id;
2014 uint8_t cmd_reserved1[4];
2015 } __packed __aligned(4);
2016
2017 struct mcx_flow_ctx {
2018 uint8_t fc_reserved0[4];
2019 uint32_t fc_group_id;
2020 uint32_t fc_flow_tag;
2021 uint32_t fc_action;
2022 #define MCX_FLOW_CONTEXT_ACTION_ALLOW (1 << 0)
2023 #define MCX_FLOW_CONTEXT_ACTION_DROP (1 << 1)
2024 #define MCX_FLOW_CONTEXT_ACTION_FORWARD (1 << 2)
2025 #define MCX_FLOW_CONTEXT_ACTION_COUNT (1 << 3)
2026 uint32_t fc_dest_list_size;
2027 uint32_t fc_counter_list_size;
2028 uint8_t fc_reserved1[40];
2029 struct mcx_flow_match fc_match_value;
2030 uint8_t fc_reserved2[192];
2031 } __packed __aligned(4);
2032
2033 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE (1 << 24)
2034 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR (2 << 24)
2035
2036 struct mcx_cmd_destroy_flow_group_in {
2037 uint16_t cmd_opcode;
2038 uint8_t cmd_reserved0[4];
2039 uint16_t cmd_op_mod;
2040 uint8_t cmd_reserved1[8];
2041 } __packed __aligned(4);
2042
2043 struct mcx_cmd_destroy_flow_group_mb_in {
2044 uint8_t cmd_table_type;
2045 uint8_t cmd_reserved0[3];
2046 uint32_t cmd_table_id;
2047 uint32_t cmd_group_id;
2048 uint8_t cmd_reserved1[36];
2049 } __packed __aligned(4);
2050
2051 struct mcx_cmd_destroy_flow_group_out {
2052 uint8_t cmd_status;
2053 uint8_t cmd_reserved0[3];
2054 uint32_t cmd_syndrome;
2055 uint8_t cmd_reserved1[8];
2056 } __packed __aligned(4);
2057
2058 struct mcx_cmd_set_flow_table_entry_in {
2059 uint16_t cmd_opcode;
2060 uint8_t cmd_reserved0[4];
2061 uint16_t cmd_op_mod;
2062 uint8_t cmd_reserved1[8];
2063 } __packed __aligned(4);
2064
2065 struct mcx_cmd_set_flow_table_entry_mb_in {
2066 uint8_t cmd_table_type;
2067 uint8_t cmd_reserved0[3];
2068 uint32_t cmd_table_id;
2069 uint32_t cmd_modify_enable_mask;
2070 uint8_t cmd_reserved1[4];
2071 uint32_t cmd_flow_index;
2072 uint8_t cmd_reserved2[28];
2073 struct mcx_flow_ctx cmd_flow_ctx;
2074 } __packed __aligned(4);
2075
2076 struct mcx_cmd_set_flow_table_entry_out {
2077 uint8_t cmd_status;
2078 uint8_t cmd_reserved0[3];
2079 uint32_t cmd_syndrome;
2080 uint8_t cmd_reserved1[8];
2081 } __packed __aligned(4);
2082
2083 struct mcx_cmd_query_flow_table_entry_in {
2084 uint16_t cmd_opcode;
2085 uint8_t cmd_reserved0[4];
2086 uint16_t cmd_op_mod;
2087 uint8_t cmd_reserved1[8];
2088 } __packed __aligned(4);
2089
2090 struct mcx_cmd_query_flow_table_entry_mb_in {
2091 uint8_t cmd_table_type;
2092 uint8_t cmd_reserved0[3];
2093 uint32_t cmd_table_id;
2094 uint8_t cmd_reserved1[8];
2095 uint32_t cmd_flow_index;
2096 uint8_t cmd_reserved2[28];
2097 } __packed __aligned(4);
2098
2099 struct mcx_cmd_query_flow_table_entry_out {
2100 uint8_t cmd_status;
2101 uint8_t cmd_reserved0[3];
2102 uint32_t cmd_syndrome;
2103 uint8_t cmd_reserved1[8];
2104 } __packed __aligned(4);
2105
2106 struct mcx_cmd_query_flow_table_entry_mb_out {
2107 uint8_t cmd_reserved0[48];
2108 struct mcx_flow_ctx cmd_flow_ctx;
2109 } __packed __aligned(4);
2110
2111 struct mcx_cmd_delete_flow_table_entry_in {
2112 uint16_t cmd_opcode;
2113 uint8_t cmd_reserved0[4];
2114 uint16_t cmd_op_mod;
2115 uint8_t cmd_reserved1[8];
2116 } __packed __aligned(4);
2117
2118 struct mcx_cmd_delete_flow_table_entry_mb_in {
2119 uint8_t cmd_table_type;
2120 uint8_t cmd_reserved0[3];
2121 uint32_t cmd_table_id;
2122 uint8_t cmd_reserved1[8];
2123 uint32_t cmd_flow_index;
2124 uint8_t cmd_reserved2[28];
2125 } __packed __aligned(4);
2126
2127 struct mcx_cmd_delete_flow_table_entry_out {
2128 uint8_t cmd_status;
2129 uint8_t cmd_reserved0[3];
2130 uint32_t cmd_syndrome;
2131 uint8_t cmd_reserved1[8];
2132 } __packed __aligned(4);
2133
2134 struct mcx_cmd_query_flow_group_in {
2135 uint16_t cmd_opcode;
2136 uint8_t cmd_reserved0[4];
2137 uint16_t cmd_op_mod;
2138 uint8_t cmd_reserved1[8];
2139 } __packed __aligned(4);
2140
2141 struct mcx_cmd_query_flow_group_mb_in {
2142 uint8_t cmd_table_type;
2143 uint8_t cmd_reserved0[3];
2144 uint32_t cmd_table_id;
2145 uint32_t cmd_group_id;
2146 uint8_t cmd_reserved1[36];
2147 } __packed __aligned(4);
2148
2149 struct mcx_cmd_query_flow_group_out {
2150 uint8_t cmd_status;
2151 uint8_t cmd_reserved0[3];
2152 uint32_t cmd_syndrome;
2153 uint8_t cmd_reserved1[8];
2154 } __packed __aligned(4);
2155
2156 struct mcx_cmd_query_flow_group_mb_out {
2157 uint8_t cmd_reserved0[12];
2158 uint32_t cmd_start_flow_index;
2159 uint8_t cmd_reserved1[4];
2160 uint32_t cmd_end_flow_index;
2161 uint8_t cmd_reserved2[20];
2162 uint32_t cmd_match_criteria_enable;
2163 uint8_t cmd_match_criteria[512];
2164 uint8_t cmd_reserved4[448];
2165 } __packed __aligned(4);
2166
2167 struct mcx_cmd_query_flow_table_in {
2168 uint16_t cmd_opcode;
2169 uint8_t cmd_reserved0[4];
2170 uint16_t cmd_op_mod;
2171 uint8_t cmd_reserved1[8];
2172 } __packed __aligned(4);
2173
2174 struct mcx_cmd_query_flow_table_mb_in {
2175 uint8_t cmd_table_type;
2176 uint8_t cmd_reserved0[3];
2177 uint32_t cmd_table_id;
2178 uint8_t cmd_reserved1[40];
2179 } __packed __aligned(4);
2180
2181 struct mcx_cmd_query_flow_table_out {
2182 uint8_t cmd_status;
2183 uint8_t cmd_reserved0[3];
2184 uint32_t cmd_syndrome;
2185 uint8_t cmd_reserved1[8];
2186 } __packed __aligned(4);
2187
2188 struct mcx_cmd_query_flow_table_mb_out {
2189 uint8_t cmd_reserved0[4];
2190 struct mcx_flow_table_ctx cmd_ctx;
2191 } __packed __aligned(4);
2192
2193 struct mcx_cmd_alloc_flow_counter_in {
2194 uint16_t cmd_opcode;
2195 uint8_t cmd_reserved0[4];
2196 uint16_t cmd_op_mod;
2197 uint8_t cmd_reserved1[8];
2198 } __packed __aligned(4);
2199
2200 struct mcx_cmd_query_rq_in {
2201 uint16_t cmd_opcode;
2202 uint8_t cmd_reserved0[4];
2203 uint16_t cmd_op_mod;
2204 uint32_t cmd_rqn;
2205 uint8_t cmd_reserved1[4];
2206 } __packed __aligned(4);
2207
2208 struct mcx_cmd_query_rq_out {
2209 uint8_t cmd_status;
2210 uint8_t cmd_reserved0[3];
2211 uint32_t cmd_syndrome;
2212 uint8_t cmd_reserved1[8];
2213 } __packed __aligned(4);
2214
2215 struct mcx_cmd_query_rq_mb_out {
2216 uint8_t cmd_reserved0[16];
2217 struct mcx_rq_ctx cmd_ctx;
2218 };
2219
2220 struct mcx_cmd_query_sq_in {
2221 uint16_t cmd_opcode;
2222 uint8_t cmd_reserved0[4];
2223 uint16_t cmd_op_mod;
2224 uint32_t cmd_sqn;
2225 uint8_t cmd_reserved1[4];
2226 } __packed __aligned(4);
2227
2228 struct mcx_cmd_query_sq_out {
2229 uint8_t cmd_status;
2230 uint8_t cmd_reserved0[3];
2231 uint32_t cmd_syndrome;
2232 uint8_t cmd_reserved1[8];
2233 } __packed __aligned(4);
2234
2235 struct mcx_cmd_query_sq_mb_out {
2236 uint8_t cmd_reserved0[16];
2237 struct mcx_sq_ctx cmd_ctx;
2238 };
2239
2240 struct mcx_cmd_alloc_flow_counter_out {
2241 uint8_t cmd_status;
2242 uint8_t cmd_reserved0[3];
2243 uint32_t cmd_syndrome;
2244 uint8_t cmd_reserved1[2];
2245 uint16_t cmd_flow_counter_id;
2246 uint8_t cmd_reserved2[4];
2247 } __packed __aligned(4);
2248
2249 struct mcx_wq_doorbell {
2250 uint32_t db_recv_counter;
2251 uint32_t db_send_counter;
2252 } __packed __aligned(8);
2253
2254 struct mcx_dmamem {
2255 bus_dmamap_t mxm_map;
2256 bus_dma_segment_t mxm_seg;
2257 int mxm_nsegs;
2258 size_t mxm_size;
2259 void *mxm_kva;
2260 };
2261 #define MCX_DMA_MAP(_mxm) ((_mxm)->mxm_map)
2262 #define MCX_DMA_DVA(_mxm) ((_mxm)->mxm_map->dm_segs[0].ds_addr)
2263 #define MCX_DMA_KVA(_mxm) ((void *)(_mxm)->mxm_kva)
2264 #define MCX_DMA_OFF(_mxm, _off) ((void *)((char *)(_mxm)->mxm_kva + (_off)))
2265 #define MCX_DMA_LEN(_mxm) ((_mxm)->mxm_size)
2266
2267 struct mcx_hwmem {
2268 bus_dmamap_t mhm_map;
2269 bus_dma_segment_t *mhm_segs;
2270 unsigned int mhm_seg_count;
2271 unsigned int mhm_npages;
2272 };
2273
2274 struct mcx_slot {
2275 bus_dmamap_t ms_map;
2276 struct mbuf *ms_m;
2277 };
2278
2279 struct mcx_eq {
2280 int eq_n;
2281 uint32_t eq_cons;
2282 struct mcx_dmamem eq_mem;
2283 };
2284
2285 struct mcx_cq {
2286 int cq_n;
2287 struct mcx_dmamem cq_mem;
2288 bus_addr_t cq_doorbell;
2289 uint32_t cq_cons;
2290 uint32_t cq_count;
2291 };
2292
2293 struct mcx_calibration {
2294 uint64_t c_timestamp; /* previous mcx chip time */
2295 uint64_t c_uptime; /* previous kernel nanouptime */
2296 uint64_t c_tbase; /* mcx chip time */
2297 uint64_t c_ubase; /* kernel nanouptime */
2298 uint64_t c_ratio;
2299 };
2300
2301 #define MCX_CALIBRATE_FIRST 2
2302 #define MCX_CALIBRATE_NORMAL 32
2303
2304 struct mcx_rxring {
2305 u_int rxr_total;
2306 u_int rxr_inuse;
2307 };
2308
2309 MBUFQ_HEAD(mcx_mbufq);
2310
2311 struct mcx_rx {
2312 struct mcx_softc *rx_softc;
2313
2314 int rx_rqn;
2315 struct mcx_dmamem rx_rq_mem;
2316 struct mcx_slot *rx_slots;
2317 bus_addr_t rx_doorbell;
2318
2319 uint32_t rx_prod;
2320 callout_t rx_refill;
2321 struct mcx_rxring rx_rxr;
2322 } __aligned(64);
2323
2324 struct mcx_tx {
2325 struct mcx_softc *tx_softc;
2326 kmutex_t tx_lock;
2327 pcq_t *tx_pcq;
2328 void *tx_softint;
2329
2330 int tx_uar;
2331 int tx_sqn;
2332 struct mcx_dmamem tx_sq_mem;
2333 struct mcx_slot *tx_slots;
2334 bus_addr_t tx_doorbell;
2335 int tx_bf_offset;
2336
2337 uint32_t tx_cons;
2338 uint32_t tx_prod;
2339 } __aligned(64);
2340
2341 struct mcx_queues {
2342 void *q_ihc;
2343 struct mcx_softc *q_sc;
2344 int q_uar;
2345 int q_index;
2346 struct mcx_rx q_rx;
2347 struct mcx_tx q_tx;
2348 struct mcx_cq q_cq;
2349 struct mcx_eq q_eq;
2350 #if NKSTAT > 0
2351 struct kstat *q_kstat;
2352 #endif
2353 };
2354
2355 struct mcx_flow_group {
2356 int g_id;
2357 int g_table;
2358 int g_start;
2359 int g_size;
2360 };
2361
2362 #define MCX_FLOW_GROUP_PROMISC 0
2363 #define MCX_FLOW_GROUP_ALLMULTI 1
2364 #define MCX_FLOW_GROUP_MAC 2
2365 #define MCX_FLOW_GROUP_RSS_L4 3
2366 #define MCX_FLOW_GROUP_RSS_L3 4
2367 #define MCX_FLOW_GROUP_RSS_NONE 5
2368 #define MCX_NUM_FLOW_GROUPS 6
2369
2370 #define MCX_HASH_SEL_L3 MCX_TIR_CTX_HASH_SEL_SRC_IP | \
2371 MCX_TIR_CTX_HASH_SEL_DST_IP
2372 #define MCX_HASH_SEL_L4 MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_SPORT | \
2373 MCX_TIR_CTX_HASH_SEL_DPORT
2374
2375 #define MCX_RSS_HASH_SEL_V4_TCP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP |\
2376 MCX_TIR_CTX_HASH_SEL_IPV4
2377 #define MCX_RSS_HASH_SEL_V6_TCP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP | \
2378 MCX_TIR_CTX_HASH_SEL_IPV6
2379 #define MCX_RSS_HASH_SEL_V4_UDP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2380 MCX_TIR_CTX_HASH_SEL_IPV4
2381 #define MCX_RSS_HASH_SEL_V6_UDP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2382 MCX_TIR_CTX_HASH_SEL_IPV6
2383 #define MCX_RSS_HASH_SEL_V4 MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV4
2384 #define MCX_RSS_HASH_SEL_V6 MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV6
2385
2386 /*
2387 * There are a few different pieces involved in configuring RSS.
2388 * A Receive Queue Table (RQT) is the indirection table that maps packets to
2389 * different rx queues based on a hash value. We only create one, because
2390 * we want to scatter any traffic we can apply RSS to across all our rx
2391 * queues. Anything else will only be delivered to the first rx queue,
2392 * which doesn't require an RQT.
2393 *
2394 * A Transport Interface Receive (TIR) delivers packets to either a single rx
2395 * queue or an RQT, and in the latter case, specifies the set of fields
2396 * hashed, the hash function, and the hash key. We need one of these for each
2397 * type of RSS traffic - v4 TCP, v6 TCP, v4 UDP, v6 UDP, other v4, other v6,
2398 * and one for non-RSS traffic.
2399 *
2400 * Flow tables hold flow table entries in sequence. The first entry that
2401 * matches a packet is applied, sending the packet to either another flow
2402 * table or a TIR. We use one flow table to select packets based on
2403 * destination MAC address, and a second to apply RSS. The entries in the
2404 * first table send matching packets to the second, and the entries in the
2405 * RSS table send packets to RSS TIRs if possible, or the non-RSS TIR.
2406 *
2407 * The flow table entry that delivers packets to an RSS TIR must include match
2408 * criteria that ensure packets delivered to the TIR include all the fields
2409 * that the TIR hashes on - so for a v4 TCP TIR, the flow table entry must
2410 * only accept v4 TCP packets. Accordingly, we need flow table entries for
2411 * each TIR.
2412 *
2413 * All of this is a lot more flexible than we need, and we can describe most
2414 * of the stuff we need with a simple array.
2415 *
2416 * An RSS config creates a TIR with hashing enabled on a set of fields,
2417 * pointing to either the first rx queue or the RQT containing all the rx
2418 * queues, and a flow table entry that matches on an ether type and
2419 * optionally an ip proto, that delivers packets to the TIR.
2420 */
2421 static struct mcx_rss_rule {
2422 int hash_sel;
2423 int flow_group;
2424 int ethertype;
2425 int ip_proto;
2426 } mcx_rss_config[] = {
2427 /* udp and tcp for v4/v6 */
2428 { MCX_RSS_HASH_SEL_V4_TCP, MCX_FLOW_GROUP_RSS_L4,
2429 ETHERTYPE_IP, IPPROTO_TCP },
2430 { MCX_RSS_HASH_SEL_V6_TCP, MCX_FLOW_GROUP_RSS_L4,
2431 ETHERTYPE_IPV6, IPPROTO_TCP },
2432 { MCX_RSS_HASH_SEL_V4_UDP, MCX_FLOW_GROUP_RSS_L4,
2433 ETHERTYPE_IP, IPPROTO_UDP },
2434 { MCX_RSS_HASH_SEL_V6_UDP, MCX_FLOW_GROUP_RSS_L4,
2435 ETHERTYPE_IPV6, IPPROTO_UDP },
2436
2437 /* other v4/v6 */
2438 { MCX_RSS_HASH_SEL_V4, MCX_FLOW_GROUP_RSS_L3,
2439 ETHERTYPE_IP, 0 },
2440 { MCX_RSS_HASH_SEL_V6, MCX_FLOW_GROUP_RSS_L3,
2441 ETHERTYPE_IPV6, 0 },
2442
2443 /* non v4/v6 */
2444 { 0, MCX_FLOW_GROUP_RSS_NONE, 0, 0 }
2445 };
2446
2447 struct mcx_softc {
2448 device_t sc_dev;
2449 struct ethercom sc_ec;
2450 struct ifmedia sc_media;
2451 uint64_t sc_media_status;
2452 uint64_t sc_media_active;
2453 kmutex_t sc_media_mutex;
2454
2455 pci_chipset_tag_t sc_pc;
2456 pci_intr_handle_t *sc_intrs;
2457 void *sc_ihc;
2458 pcitag_t sc_tag;
2459
2460 bus_dma_tag_t sc_dmat;
2461 bus_space_tag_t sc_memt;
2462 bus_space_handle_t sc_memh;
2463 bus_size_t sc_mems;
2464
2465 struct mcx_dmamem sc_cmdq_mem;
2466 unsigned int sc_cmdq_mask;
2467 unsigned int sc_cmdq_size;
2468
2469 unsigned int sc_cmdq_token;
2470
2471 struct mcx_hwmem sc_boot_pages;
2472 struct mcx_hwmem sc_init_pages;
2473 struct mcx_hwmem sc_regular_pages;
2474
2475 int sc_uar;
2476 int sc_pd;
2477 int sc_tdomain;
2478 uint32_t sc_lkey;
2479 int sc_tis;
2480 int sc_tir[__arraycount(mcx_rss_config)];
2481 int sc_rqt;
2482
2483 struct mcx_dmamem sc_doorbell_mem;
2484
2485 struct mcx_eq sc_admin_eq;
2486 struct mcx_eq sc_queue_eq;
2487
2488 int sc_hardmtu;
2489 int sc_rxbufsz;
2490
2491 int sc_bf_size;
2492 int sc_max_rqt_size;
2493
2494 struct workqueue *sc_workq;
2495 struct work sc_port_change;
2496
2497 int sc_mac_flow_table_id;
2498 int sc_rss_flow_table_id;
2499 struct mcx_flow_group sc_flow_group[MCX_NUM_FLOW_GROUPS];
2500 int sc_promisc_flow_enabled;
2501 int sc_allmulti_flow_enabled;
2502 int sc_mcast_flow_base;
2503 int sc_extra_mcast;
2504 uint8_t sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
2505
2506 struct mcx_calibration sc_calibration[2];
2507 unsigned int sc_calibration_gen;
2508 callout_t sc_calibrate;
2509 uint32_t sc_mhz;
2510 uint32_t sc_khz;
2511
2512 struct mcx_queues *sc_queues;
2513 unsigned int sc_nqueues;
2514
2515 int sc_mcam_reg;
2516
2517 #if NKSTAT > 0
2518 struct kstat *sc_kstat_ieee8023;
2519 struct kstat *sc_kstat_rfc2863;
2520 struct kstat *sc_kstat_rfc2819;
2521 struct kstat *sc_kstat_rfc3635;
2522 unsigned int sc_kstat_mtmp_count;
2523 struct kstat **sc_kstat_mtmp;
2524 #endif
2525
2526 struct timecounter sc_timecounter;
2527 };
2528 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
2529
2530 static int mcx_match(device_t, cfdata_t, void *);
2531 static void mcx_attach(device_t, device_t, void *);
2532
2533 static void * mcx_establish_intr(struct mcx_softc *, int, kcpuset_t *,
2534 int (*)(void *), void *, const char *);
2535
2536 static void mcx_rxr_init(struct mcx_rxring *, u_int, u_int);
2537 static u_int mcx_rxr_get(struct mcx_rxring *, u_int);
2538 static void mcx_rxr_put(struct mcx_rxring *, u_int);
2539 static u_int mcx_rxr_inuse(struct mcx_rxring *);
2540
2541 #if NKSTAT > 0
2542 static void mcx_kstat_attach(struct mcx_softc *);
2543 #endif
2544
2545 static void mcx_timecounter_attach(struct mcx_softc *);
2546
2547 static int mcx_version(struct mcx_softc *);
2548 static int mcx_init_wait(struct mcx_softc *);
2549 static int mcx_enable_hca(struct mcx_softc *);
2550 static int mcx_teardown_hca(struct mcx_softc *, uint16_t);
2551 static int mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2552 int);
2553 static int mcx_issi(struct mcx_softc *);
2554 static int mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2555 static int mcx_hca_max_caps(struct mcx_softc *);
2556 static int mcx_hca_set_caps(struct mcx_softc *);
2557 static int mcx_init_hca(struct mcx_softc *);
2558 static int mcx_set_driver_version(struct mcx_softc *);
2559 static int mcx_iff(struct mcx_softc *);
2560 static int mcx_alloc_uar(struct mcx_softc *, int *);
2561 static int mcx_alloc_pd(struct mcx_softc *);
2562 static int mcx_alloc_tdomain(struct mcx_softc *);
2563 static int mcx_create_eq(struct mcx_softc *, struct mcx_eq *, int,
2564 uint64_t, int);
2565 static int mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *);
2566 static int mcx_query_special_contexts(struct mcx_softc *);
2567 static int mcx_set_port_mtu(struct mcx_softc *, int);
2568 static int mcx_create_cq(struct mcx_softc *, struct mcx_cq *, int, int,
2569 int);
2570 static int mcx_destroy_cq(struct mcx_softc *, struct mcx_cq *);
2571 static int mcx_create_sq(struct mcx_softc *, struct mcx_tx *, int, int,
2572 int);
2573 static int mcx_destroy_sq(struct mcx_softc *, struct mcx_tx *);
2574 static int mcx_ready_sq(struct mcx_softc *, struct mcx_tx *);
2575 static int mcx_create_rq(struct mcx_softc *, struct mcx_rx *, int, int);
2576 static int mcx_destroy_rq(struct mcx_softc *, struct mcx_rx *);
2577 static int mcx_ready_rq(struct mcx_softc *, struct mcx_rx *);
2578 static int mcx_create_tir_direct(struct mcx_softc *, struct mcx_rx *,
2579 int *);
2580 static int mcx_create_tir_indirect(struct mcx_softc *, int, uint32_t,
2581 int *);
2582 static int mcx_destroy_tir(struct mcx_softc *, int);
2583 static int mcx_create_tis(struct mcx_softc *, int *);
2584 static int mcx_destroy_tis(struct mcx_softc *, int);
2585 static int mcx_create_rqt(struct mcx_softc *, int, int *, int *);
2586 static int mcx_destroy_rqt(struct mcx_softc *, int);
2587 static int mcx_create_flow_table(struct mcx_softc *, int, int, int *);
2588 static int mcx_set_flow_table_root(struct mcx_softc *, int);
2589 static int mcx_destroy_flow_table(struct mcx_softc *, int);
2590 static int mcx_create_flow_group(struct mcx_softc *, int, int, int,
2591 int, int, struct mcx_flow_match *);
2592 static int mcx_destroy_flow_group(struct mcx_softc *, int);
2593 static int mcx_set_flow_table_entry_mac(struct mcx_softc *, int, int,
2594 const uint8_t *, uint32_t);
2595 static int mcx_set_flow_table_entry_proto(struct mcx_softc *, int, int,
2596 int, int, uint32_t);
2597 static int mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2598
2599 #if NKSTAT > 0
2600 static int mcx_query_rq(struct mcx_softc *, struct mcx_rx *, struct mcx_rq_ctx *);
2601 static int mcx_query_sq(struct mcx_softc *, struct mcx_tx *, struct mcx_sq_ctx *);
2602 static int mcx_query_cq(struct mcx_softc *, struct mcx_cq *, struct mcx_cq_ctx *);
2603 static int mcx_query_eq(struct mcx_softc *, struct mcx_eq *, struct mcx_eq_ctx *);
2604 #endif
2605
2606 #if 0
2607 static int mcx_dump_flow_table(struct mcx_softc *, int);
2608 static int mcx_dump_flow_table_entry(struct mcx_softc *, int, int);
2609 static int mcx_dump_flow_group(struct mcx_softc *, int);
2610 #endif
2611
2612
2613 /*
2614 static void mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2615 static void mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2616 */
2617 static void mcx_refill(void *);
2618 static int mcx_process_rx(struct mcx_softc *, struct mcx_rx *,
2619 struct mcx_cq_entry *, struct mcx_mbufq *,
2620 const struct mcx_calibration *);
2621 static int mcx_process_txeof(struct mcx_softc *, struct mcx_tx *,
2622 struct mcx_cq_entry *);
2623 static void mcx_process_cq(struct mcx_softc *, struct mcx_queues *,
2624 struct mcx_cq *);
2625
2626 static void mcx_arm_cq(struct mcx_softc *, struct mcx_cq *, int);
2627 static void mcx_arm_eq(struct mcx_softc *, struct mcx_eq *, int);
2628 static int mcx_admin_intr(void *);
2629 static int mcx_cq_intr(void *);
2630
2631 static int mcx_init(struct ifnet *);
2632 static void mcx_stop(struct ifnet *, int);
2633 static int mcx_ioctl(struct ifnet *, u_long, void *);
2634 static void mcx_start(struct ifnet *);
2635 static int mcx_transmit(struct ifnet *, struct mbuf *);
2636 static void mcx_deferred_transmit(void *);
2637 static void mcx_media_add_types(struct mcx_softc *);
2638 static void mcx_media_status(struct ifnet *, struct ifmediareq *);
2639 static int mcx_media_change(struct ifnet *);
2640 #if 0
2641 static int mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2642 #endif
2643 static void mcx_port_change(struct work *, void *);
2644
2645 static void mcx_calibrate_first(struct mcx_softc *);
2646 static void mcx_calibrate(void *);
2647
2648 static inline uint32_t
2649 mcx_rd(struct mcx_softc *, bus_size_t);
2650 static inline void
2651 mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2652 static inline void
2653 mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2654
2655 static uint64_t mcx_timer(struct mcx_softc *);
2656
2657 static int mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2658 bus_size_t, u_int align);
2659 static void mcx_dmamem_zero(struct mcx_dmamem *);
2660 static void mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2661
2662 static int mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2663 unsigned int);
2664 static void mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2665
2666 CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL);
2667
2668 static const struct {
2669 pci_vendor_id_t vendor;
2670 pci_product_id_t product;
2671 } mcx_devices[] = {
2672 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700 },
2673 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700VF },
2674 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710 },
2675 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710VF },
2676 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800 },
2677 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800VF },
2678 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800 },
2679 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800VF },
2680 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28908 },
2681 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT2892 },
2682 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT2894 },
2683 };
2684
2685 struct mcx_eth_proto_capability {
2686 uint64_t cap_media;
2687 uint64_t cap_baudrate;
2688 };
2689
2690 static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = {
2691 [MCX_ETHER_CAP_SGMII] = { IFM_1000_SGMII, IF_Gbps(1) },
2692 [MCX_ETHER_CAP_1000_KX] = { IFM_1000_KX, IF_Gbps(1) },
2693 [MCX_ETHER_CAP_10G_CX4] = { IFM_10G_CX4, IF_Gbps(10) },
2694 [MCX_ETHER_CAP_10G_KX4] = { IFM_10G_KX4, IF_Gbps(10) },
2695 [MCX_ETHER_CAP_10G_KR] = { IFM_10G_KR, IF_Gbps(10) },
2696 [MCX_ETHER_CAP_20G_KR2] = { IFM_20G_KR2, IF_Gbps(20) },
2697 [MCX_ETHER_CAP_40G_CR4] = { IFM_40G_CR4, IF_Gbps(40) },
2698 [MCX_ETHER_CAP_40G_KR4] = { IFM_40G_KR4, IF_Gbps(40) },
2699 [MCX_ETHER_CAP_56G_R4] = { IFM_56G_R4, IF_Gbps(56) },
2700 [MCX_ETHER_CAP_10G_CR] = { IFM_10G_CR1, IF_Gbps(10) },
2701 [MCX_ETHER_CAP_10G_SR] = { IFM_10G_SR, IF_Gbps(10) },
2702 [MCX_ETHER_CAP_10G_LR] = { IFM_10G_LR, IF_Gbps(10) },
2703 [MCX_ETHER_CAP_40G_SR4] = { IFM_40G_SR4, IF_Gbps(40) },
2704 [MCX_ETHER_CAP_40G_LR4] = { IFM_40G_LR4, IF_Gbps(40) },
2705 [MCX_ETHER_CAP_50G_SR2] = { IFM_50G_SR2, IF_Gbps(50) },
2706 [MCX_ETHER_CAP_100G_CR4] = { IFM_100G_CR4, IF_Gbps(100) },
2707 [MCX_ETHER_CAP_100G_SR4] = { IFM_100G_SR4, IF_Gbps(100) },
2708 [MCX_ETHER_CAP_100G_KR4] = { IFM_100G_KR4, IF_Gbps(100) },
2709 [MCX_ETHER_CAP_100G_LR4] = { IFM_100G_LR4, IF_Gbps(100) },
2710 [MCX_ETHER_CAP_100_TX] = { IFM_100_TX, IF_Mbps(100) },
2711 [MCX_ETHER_CAP_1000_T] = { IFM_1000_T, IF_Gbps(1) },
2712 [MCX_ETHER_CAP_10G_T] = { IFM_10G_T, IF_Gbps(10) },
2713 [MCX_ETHER_CAP_25G_CR] = { IFM_25G_CR, IF_Gbps(25) },
2714 [MCX_ETHER_CAP_25G_KR] = { IFM_25G_KR, IF_Gbps(25) },
2715 [MCX_ETHER_CAP_25G_SR] = { IFM_25G_SR, IF_Gbps(25) },
2716 [MCX_ETHER_CAP_50G_CR2] = { IFM_50G_CR2, IF_Gbps(50) },
2717 [MCX_ETHER_CAP_50G_KR2] = { IFM_50G_KR2, IF_Gbps(50) },
2718 };
2719
2720 static int
2721 mcx_get_id(uint32_t val)
2722 {
2723 return be32toh(val) & 0x00ffffff;
2724 }
2725
2726 static int
2727 mcx_match(device_t parent, cfdata_t cf, void *aux)
2728 {
2729 struct pci_attach_args *pa = aux;
2730 int n;
2731
2732 for (n = 0; n < __arraycount(mcx_devices); n++) {
2733 if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor &&
2734 PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product)
2735 return 1;
2736 }
2737
2738 return 0;
2739 }
2740
2741 void
2742 mcx_attach(device_t parent, device_t self, void *aux)
2743 {
2744 struct mcx_softc *sc = device_private(self);
2745 struct ifnet *ifp = &sc->sc_ec.ec_if;
2746 struct pci_attach_args *pa = aux;
2747 struct ifcapreq ifcr;
2748 uint8_t enaddr[ETHER_ADDR_LEN];
2749 int counts[PCI_INTR_TYPE_SIZE];
2750 char intrxname[32];
2751 pcireg_t memtype;
2752 uint32_t r;
2753 unsigned int cq_stride;
2754 unsigned int cq_size;
2755 int i, msix;
2756 kcpuset_t *affinity;
2757
2758 sc->sc_dev = self;
2759 sc->sc_pc = pa->pa_pc;
2760 sc->sc_tag = pa->pa_tag;
2761 if (pci_dma64_available(pa))
2762 sc->sc_dmat = pa->pa_dmat64;
2763 else
2764 sc->sc_dmat = pa->pa_dmat;
2765
2766 /* Map the PCI memory space */
2767 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2768 if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2769 #ifdef __NetBSD__
2770 0,
2771 #else
2772 BUS_SPACE_MAP_PREFETCHABLE,
2773 #endif
2774 &sc->sc_memt, &sc->sc_memh,
2775 NULL, &sc->sc_mems)) {
2776 aprint_error(": unable to map register memory\n");
2777 return;
2778 }
2779
2780 pci_aprint_devinfo(pa, "Ethernet controller");
2781
2782 mutex_init(&sc->sc_media_mutex, MUTEX_DEFAULT, IPL_SOFTNET);
2783
2784 if (mcx_version(sc) != 0) {
2785 /* error printed by mcx_version */
2786 goto unmap;
2787 }
2788
2789 r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2790 cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2791 cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2792 if (cq_size > MCX_MAX_CQE) {
2793 aprint_error_dev(self,
2794 "command queue size overflow %u\n", cq_size);
2795 goto unmap;
2796 }
2797 if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2798 aprint_error_dev(self,
2799 "command queue entry size underflow %u\n", cq_stride);
2800 goto unmap;
2801 }
2802 if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2803 aprint_error_dev(self, "command queue page overflow\n");
2804 goto unmap;
2805 }
2806
2807 if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_DOORBELL_AREA_SIZE,
2808 MCX_PAGE_SIZE) != 0) {
2809 aprint_error_dev(self, "unable to allocate doorbell memory\n");
2810 goto unmap;
2811 }
2812
2813 if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2814 MCX_PAGE_SIZE) != 0) {
2815 aprint_error_dev(self, "unable to allocate command queue\n");
2816 goto dbfree;
2817 }
2818
2819 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2820 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t),
2821 BUS_SPACE_BARRIER_WRITE);
2822 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2823 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t),
2824 BUS_SPACE_BARRIER_WRITE);
2825
2826 if (mcx_init_wait(sc) != 0) {
2827 aprint_error_dev(self, "timeout waiting for init\n");
2828 goto cqfree;
2829 }
2830
2831 sc->sc_cmdq_mask = cq_size - 1;
2832 sc->sc_cmdq_size = cq_stride;
2833
2834 if (mcx_enable_hca(sc) != 0) {
2835 /* error printed by mcx_enable_hca */
2836 goto cqfree;
2837 }
2838
2839 if (mcx_issi(sc) != 0) {
2840 /* error printed by mcx_issi */
2841 goto teardown;
2842 }
2843
2844 if (mcx_pages(sc, &sc->sc_boot_pages,
2845 htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2846 /* error printed by mcx_pages */
2847 goto teardown;
2848 }
2849
2850 if (mcx_hca_max_caps(sc) != 0) {
2851 /* error printed by mcx_hca_max_caps */
2852 goto teardown;
2853 }
2854
2855 if (mcx_hca_set_caps(sc) != 0) {
2856 /* error printed by mcx_hca_set_caps */
2857 goto teardown;
2858 }
2859
2860 if (mcx_pages(sc, &sc->sc_init_pages,
2861 htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2862 /* error printed by mcx_pages */
2863 goto teardown;
2864 }
2865
2866 if (mcx_init_hca(sc) != 0) {
2867 /* error printed by mcx_init_hca */
2868 goto teardown;
2869 }
2870
2871 if (mcx_pages(sc, &sc->sc_regular_pages,
2872 htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2873 /* error printed by mcx_pages */
2874 goto teardown;
2875 }
2876
2877 /* apparently not necessary? */
2878 if (mcx_set_driver_version(sc) != 0) {
2879 /* error printed by mcx_set_driver_version */
2880 goto teardown;
2881 }
2882
2883 if (mcx_iff(sc) != 0) { /* modify nic vport context */
2884 /* error printed by mcx_iff? */
2885 goto teardown;
2886 }
2887
2888 if (mcx_alloc_uar(sc, &sc->sc_uar) != 0) {
2889 /* error printed by mcx_alloc_uar */
2890 goto teardown;
2891 }
2892
2893 if (mcx_alloc_pd(sc) != 0) {
2894 /* error printed by mcx_alloc_pd */
2895 goto teardown;
2896 }
2897
2898 if (mcx_alloc_tdomain(sc) != 0) {
2899 /* error printed by mcx_alloc_tdomain */
2900 goto teardown;
2901 }
2902
2903 /*
2904 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2905 * mellanox support tells me legacy interrupts are not supported,
2906 * so we're stuck with just msi-x.
2907 */
2908 counts[PCI_INTR_TYPE_MSIX] = -1;
2909 counts[PCI_INTR_TYPE_MSI] = 0;
2910 counts[PCI_INTR_TYPE_INTX] = 0;
2911 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) {
2912 aprint_error_dev(self, "unable to allocate interrupt\n");
2913 goto teardown;
2914 }
2915 if (counts[PCI_INTR_TYPE_MSIX] < 2) {
2916 aprint_error_dev(self, "not enough MSI-X vectors\n");
2917 goto teardown;
2918 }
2919 KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX);
2920 snprintf(intrxname, sizeof(intrxname), "%s adminq", DEVNAME(sc));
2921 sc->sc_ihc = mcx_establish_intr(sc, 0, NULL, mcx_admin_intr, sc,
2922 intrxname);
2923 if (sc->sc_ihc == NULL) {
2924 aprint_error_dev(self, "couldn't establish adminq interrupt\n");
2925 goto teardown;
2926 }
2927
2928 if (mcx_create_eq(sc, &sc->sc_admin_eq, sc->sc_uar,
2929 (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
2930 (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
2931 (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
2932 (1ull << MCX_EVENT_TYPE_PAGE_REQUEST), 0) != 0) {
2933 /* error printed by mcx_create_eq */
2934 goto teardown;
2935 }
2936
2937 if (mcx_query_nic_vport_context(sc, enaddr) != 0) {
2938 /* error printed by mcx_query_nic_vport_context */
2939 goto teardown;
2940 }
2941
2942 if (mcx_query_special_contexts(sc) != 0) {
2943 /* error printed by mcx_query_special_contexts */
2944 goto teardown;
2945 }
2946
2947 if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2948 /* error printed by mcx_set_port_mtu */
2949 goto teardown;
2950 }
2951
2952 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2953 ether_sprintf(enaddr));
2954
2955 msix = counts[PCI_INTR_TYPE_MSIX];
2956 msix--; /* admin ops took one */
2957
2958 sc->sc_nqueues = uimin(MCX_MAX_QUEUES, msix);
2959 sc->sc_nqueues = uimin(sc->sc_nqueues, ncpu);
2960 /* Round down to a power of two. */
2961 sc->sc_nqueues = 1U << ilog2(sc->sc_nqueues);
2962 sc->sc_queues = kmem_zalloc(sc->sc_nqueues * sizeof(*sc->sc_queues),
2963 KM_SLEEP);
2964
2965 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2966 ifp->if_softc = sc;
2967 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2968 #ifdef MCX_MPSAFE
2969 ifp->if_extflags = IFEF_MPSAFE;
2970 #endif
2971 ifp->if_init = mcx_init;
2972 ifp->if_stop = mcx_stop;
2973 ifp->if_ioctl = mcx_ioctl;
2974 ifp->if_start = mcx_start;
2975 if (sc->sc_nqueues > 1) {
2976 ifp->if_transmit = mcx_transmit;
2977 }
2978 ifp->if_mtu = sc->sc_hardmtu;
2979 ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx |
2980 IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx |
2981 IFCAP_CSUM_UDPv6_Rx | IFCAP_CSUM_UDPv6_Tx |
2982 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx |
2983 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_TCPv6_Tx;
2984 IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
2985 IFQ_SET_READY(&ifp->if_snd);
2986
2987 sc->sc_ec.ec_capabilities = ETHERCAP_JUMBO_MTU |
2988 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2989 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
2990
2991 sc->sc_ec.ec_ifmedia = &sc->sc_media;
2992 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, mcx_media_change,
2993 mcx_media_status, &sc->sc_media_mutex);
2994 mcx_media_add_types(sc);
2995 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2996 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2997
2998 if_attach(ifp);
2999
3000 /* Enable hardware offload by default */
3001 memset(&ifcr, 0, sizeof(ifcr));
3002 ifcr.ifcr_capenable = ifp->if_capabilities;
3003 ifioctl_common(ifp, SIOCSIFCAP, &ifcr);
3004
3005 if_deferred_start_init(ifp, NULL);
3006
3007 ether_ifattach(ifp, enaddr);
3008
3009 kcpuset_create(&affinity, false);
3010 kcpuset_set(affinity, 0);
3011
3012 for (i = 0; i < sc->sc_nqueues; i++) {
3013 struct mcx_queues *q = &sc->sc_queues[i];
3014 struct mcx_rx *rx = &q->q_rx;
3015 struct mcx_tx *tx = &q->q_tx;
3016 int vec;
3017
3018 vec = i + 1;
3019 q->q_sc = sc;
3020 q->q_index = i;
3021
3022 if (mcx_alloc_uar(sc, &q->q_uar) != 0) {
3023 aprint_error_dev(self, "unable to alloc uar %d\n", i);
3024 goto teardown;
3025 }
3026
3027 if (mcx_create_eq(sc, &q->q_eq, q->q_uar, 0, vec) != 0) {
3028 aprint_error_dev(self,
3029 "unable to create event queue %d\n", i);
3030 goto teardown;
3031 }
3032
3033 rx->rx_softc = sc;
3034 callout_init(&rx->rx_refill, CALLOUT_FLAGS);
3035 callout_setfunc(&rx->rx_refill, mcx_refill, rx);
3036
3037 tx->tx_softc = sc;
3038 mutex_init(&tx->tx_lock, MUTEX_DEFAULT, IPL_NET);
3039 tx->tx_pcq = pcq_create(MCX_TXQ_NUM, KM_SLEEP);
3040 tx->tx_softint = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
3041 mcx_deferred_transmit, tx);
3042
3043 snprintf(intrxname, sizeof(intrxname), "%s queue %d",
3044 DEVNAME(sc), i);
3045 q->q_ihc = mcx_establish_intr(sc, vec, affinity, mcx_cq_intr,
3046 q, intrxname);
3047 }
3048
3049 callout_init(&sc->sc_calibrate, CALLOUT_FLAGS);
3050 callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc);
3051
3052 if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc,
3053 PRI_NONE, IPL_NET, 0) != 0) {
3054 aprint_error_dev(self, "couldn't create port change workq\n");
3055 goto teardown;
3056 }
3057
3058 mcx_port_change(&sc->sc_port_change, sc);
3059
3060 sc->sc_mac_flow_table_id = -1;
3061 sc->sc_rss_flow_table_id = -1;
3062 sc->sc_rqt = -1;
3063 for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
3064 struct mcx_flow_group *mfg = &sc->sc_flow_group[i];
3065 mfg->g_id = -1;
3066 mfg->g_table = -1;
3067 mfg->g_size = 0;
3068 mfg->g_start = 0;
3069 }
3070 sc->sc_extra_mcast = 0;
3071 memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
3072
3073 #if NKSTAT > 0
3074 mcx_kstat_attach(sc);
3075 #endif
3076 mcx_timecounter_attach(sc);
3077 return;
3078
3079 teardown:
3080 mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
3081 /* error printed by mcx_teardown_hca, and we're already unwinding */
3082 cqfree:
3083 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
3084 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3085 BUS_SPACE_BARRIER_WRITE);
3086 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
3087 MCX_CMDQ_INTERFACE_DISABLED);
3088 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t),
3089 BUS_SPACE_BARRIER_WRITE);
3090
3091 mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
3092 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3093 BUS_SPACE_BARRIER_WRITE);
3094 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
3095
3096 mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
3097 dbfree:
3098 mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
3099 unmap:
3100 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
3101 sc->sc_mems = 0;
3102 }
3103
3104 static void *
3105 mcx_establish_intr(struct mcx_softc *sc, int index, kcpuset_t *affinity,
3106 int (*func)(void *), void *arg, const char *xname)
3107 {
3108 char intrbuf[PCI_INTRSTR_LEN];
3109 const char *intrstr;
3110 void *ih;
3111
3112 pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[index], PCI_INTR_MPSAFE,
3113 true);
3114
3115 intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[index], intrbuf,
3116 sizeof(intrbuf));
3117 ih = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[index], IPL_NET,
3118 func, arg, xname);
3119 if (ih == NULL) {
3120 aprint_error_dev(sc->sc_dev,
3121 "unable to establish interrupt%s%s\n",
3122 intrstr ? " at " : "",
3123 intrstr ? intrstr : "");
3124 return NULL;
3125 }
3126
3127 if (affinity != NULL && index > 0) {
3128 /* Round-robin affinity */
3129 kcpuset_zero(affinity);
3130 kcpuset_set(affinity, (index - 1) % ncpu);
3131 interrupt_distribute(ih, affinity, NULL);
3132 }
3133
3134 return ih;
3135 }
3136
3137 static void
3138 mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm)
3139 {
3140 rxr->rxr_total = hwm;
3141 rxr->rxr_inuse = 0;
3142 }
3143
3144 static u_int
3145 mcx_rxr_get(struct mcx_rxring *rxr, u_int max)
3146 {
3147 const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse);
3148
3149 rxr->rxr_inuse += taken;
3150
3151 return taken;
3152 }
3153
3154 static void
3155 mcx_rxr_put(struct mcx_rxring *rxr, u_int n)
3156 {
3157 rxr->rxr_inuse -= n;
3158 }
3159
3160 static u_int
3161 mcx_rxr_inuse(struct mcx_rxring *rxr)
3162 {
3163 return rxr->rxr_inuse;
3164 }
3165
3166 static int
3167 mcx_version(struct mcx_softc *sc)
3168 {
3169 uint32_t fw0, fw1;
3170 uint16_t cmdif;
3171
3172 fw0 = mcx_rd(sc, MCX_FW_VER);
3173 fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
3174
3175 aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0),
3176 MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
3177
3178 cmdif = MCX_CMDIF(fw1);
3179 if (cmdif != MCX_CMD_IF_SUPPORTED) {
3180 aprint_error_dev(sc->sc_dev,
3181 "unsupported command interface %u\n", cmdif);
3182 return (-1);
3183 }
3184
3185 return (0);
3186 }
3187
3188 static int
3189 mcx_init_wait(struct mcx_softc *sc)
3190 {
3191 unsigned int i;
3192 uint32_t r;
3193
3194 for (i = 0; i < 2000; i++) {
3195 r = mcx_rd(sc, MCX_STATE);
3196 if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
3197 return (0);
3198
3199 delay(1000);
3200 mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
3201 BUS_SPACE_BARRIER_READ);
3202 }
3203
3204 return (-1);
3205 }
3206
3207 static uint8_t
3208 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3209 unsigned int msec)
3210 {
3211 unsigned int i;
3212
3213 for (i = 0; i < msec; i++) {
3214 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3215 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
3216
3217 if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
3218 MCX_CQ_STATUS_OWN_SW)
3219 return (0);
3220
3221 delay(1000);
3222 }
3223
3224 return (ETIMEDOUT);
3225 }
3226
3227 static uint32_t
3228 mcx_mix_u64(uint32_t xor, uint64_t u64)
3229 {
3230 xor ^= u64 >> 32;
3231 xor ^= u64;
3232
3233 return (xor);
3234 }
3235
3236 static uint32_t
3237 mcx_mix_u32(uint32_t xor, uint32_t u32)
3238 {
3239 xor ^= u32;
3240
3241 return (xor);
3242 }
3243
3244 static uint32_t
3245 mcx_mix_u8(uint32_t xor, uint8_t u8)
3246 {
3247 xor ^= u8;
3248
3249 return (xor);
3250 }
3251
3252 static uint8_t
3253 mcx_mix_done(uint32_t xor)
3254 {
3255 xor ^= xor >> 16;
3256 xor ^= xor >> 8;
3257
3258 return (xor);
3259 }
3260
3261 static uint8_t
3262 mcx_xor(const void *buf, size_t len)
3263 {
3264 const uint32_t *dwords = buf;
3265 uint32_t xor = 0xff;
3266 size_t i;
3267
3268 len /= sizeof(*dwords);
3269
3270 for (i = 0; i < len; i++)
3271 xor ^= dwords[i];
3272
3273 return (mcx_mix_done(xor));
3274 }
3275
3276 static uint8_t
3277 mcx_cmdq_token(struct mcx_softc *sc)
3278 {
3279 uint8_t token;
3280
3281 do {
3282 token = ++sc->sc_cmdq_token;
3283 } while (token == 0);
3284
3285 return (token);
3286 }
3287
3288 static void
3289 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3290 uint32_t ilen, uint32_t olen, uint8_t token)
3291 {
3292 memset(cqe, 0, sc->sc_cmdq_size);
3293
3294 cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
3295 be32enc(&cqe->cq_input_length, ilen);
3296 be32enc(&cqe->cq_output_length, olen);
3297 cqe->cq_token = token;
3298 cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
3299 }
3300
3301 static void
3302 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
3303 {
3304 cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
3305 }
3306
3307 static int
3308 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
3309 {
3310 /* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 : 0); */
3311 return (0);
3312 }
3313
3314 static void *
3315 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
3316 {
3317 return (&cqe->cq_input_data);
3318 }
3319
3320 static void *
3321 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
3322 {
3323 return (&cqe->cq_output_data);
3324 }
3325
3326 static void
3327 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3328 unsigned int slot)
3329 {
3330 mcx_cmdq_sign(cqe);
3331
3332 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3333 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
3334
3335 mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
3336 mcx_bar(sc, MCX_CMDQ_DOORBELL, sizeof(uint32_t),
3337 BUS_SPACE_BARRIER_WRITE);
3338 }
3339
3340 static int
3341 mcx_enable_hca(struct mcx_softc *sc)
3342 {
3343 struct mcx_cmdq_entry *cqe;
3344 struct mcx_cmd_enable_hca_in *in;
3345 struct mcx_cmd_enable_hca_out *out;
3346 int error;
3347 uint8_t status;
3348
3349 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3350 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3351
3352 in = mcx_cmdq_in(cqe);
3353 in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
3354 in->cmd_op_mod = htobe16(0);
3355 in->cmd_function_id = htobe16(0);
3356
3357 mcx_cmdq_post(sc, cqe, 0);
3358
3359 error = mcx_cmdq_poll(sc, cqe, 1000);
3360 if (error != 0) {
3361 printf(", hca enable timeout\n");
3362 return (-1);
3363 }
3364 if (mcx_cmdq_verify(cqe) != 0) {
3365 printf(", hca enable command corrupt\n");
3366 return (-1);
3367 }
3368
3369 status = cqe->cq_output_data[0];
3370 if (status != MCX_CQ_STATUS_OK) {
3371 printf(", hca enable failed (%x)\n", status);
3372 return (-1);
3373 }
3374
3375 return (0);
3376 }
3377
3378 static int
3379 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
3380 {
3381 struct mcx_cmdq_entry *cqe;
3382 struct mcx_cmd_teardown_hca_in *in;
3383 struct mcx_cmd_teardown_hca_out *out;
3384 int error;
3385 uint8_t status;
3386
3387 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3388 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3389
3390 in = mcx_cmdq_in(cqe);
3391 in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
3392 in->cmd_op_mod = htobe16(0);
3393 in->cmd_profile = profile;
3394
3395 mcx_cmdq_post(sc, cqe, 0);
3396
3397 error = mcx_cmdq_poll(sc, cqe, 1000);
3398 if (error != 0) {
3399 printf(", hca teardown timeout\n");
3400 return (-1);
3401 }
3402 if (mcx_cmdq_verify(cqe) != 0) {
3403 printf(", hca teardown command corrupt\n");
3404 return (-1);
3405 }
3406
3407 status = cqe->cq_output_data[0];
3408 if (status != MCX_CQ_STATUS_OK) {
3409 printf(", hca teardown failed (%x)\n", status);
3410 return (-1);
3411 }
3412
3413 return (0);
3414 }
3415
3416 static int
3417 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
3418 unsigned int nmb, uint64_t *ptr, uint8_t token)
3419 {
3420 uint8_t *kva;
3421 uint64_t dva;
3422 int i;
3423 int error;
3424
3425 error = mcx_dmamem_alloc(sc, mxm,
3426 nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
3427 if (error != 0)
3428 return (error);
3429
3430 mcx_dmamem_zero(mxm);
3431
3432 dva = MCX_DMA_DVA(mxm);
3433 kva = MCX_DMA_KVA(mxm);
3434 for (i = 0; i < nmb; i++) {
3435 struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
3436
3437 /* patch the cqe or mbox pointing at this one */
3438 be64enc(ptr, dva);
3439
3440 /* fill in this mbox */
3441 be32enc(&mbox->mb_block_number, i);
3442 mbox->mb_token = token;
3443
3444 /* move to the next one */
3445 ptr = &mbox->mb_next_ptr;
3446
3447 dva += MCX_CMDQ_MAILBOX_SIZE;
3448 kva += MCX_CMDQ_MAILBOX_SIZE;
3449 }
3450
3451 return (0);
3452 }
3453
3454 static uint32_t
3455 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
3456 {
3457 uint32_t xor = 0xff;
3458
3459 /* only 3 fields get set, so mix them directly */
3460 xor = mcx_mix_u64(xor, mb->mb_next_ptr);
3461 xor = mcx_mix_u32(xor, mb->mb_block_number);
3462 xor = mcx_mix_u8(xor, mb->mb_token);
3463
3464 return (mcx_mix_done(xor));
3465 }
3466
3467 static void
3468 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
3469 {
3470 uint8_t *kva;
3471 int i;
3472
3473 kva = MCX_DMA_KVA(mxm);
3474
3475 for (i = 0; i < nmb; i++) {
3476 struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
3477 uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
3478 mb->mb_ctrl_signature = sig;
3479 mb->mb_signature = sig ^
3480 mcx_xor(mb->mb_data, sizeof(mb->mb_data));
3481
3482 kva += MCX_CMDQ_MAILBOX_SIZE;
3483 }
3484 }
3485
3486 static void
3487 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
3488 {
3489 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
3490 0, MCX_DMA_LEN(mxm), ops);
3491 }
3492
3493 static struct mcx_cmdq_mailbox *
3494 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
3495 {
3496 uint8_t *kva;
3497
3498 kva = MCX_DMA_KVA(mxm);
3499 kva += i * MCX_CMDQ_MAILBOX_SIZE;
3500
3501 return ((struct mcx_cmdq_mailbox *)kva);
3502 }
3503
3504 static inline void *
3505 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
3506 {
3507 return (&mb->mb_data);
3508 }
3509
3510 static void
3511 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
3512 void *b, size_t len)
3513 {
3514 uint8_t *buf = b;
3515 struct mcx_cmdq_mailbox *mb;
3516 int i;
3517
3518 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3519 for (i = 0; i < nmb; i++) {
3520
3521 memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len));
3522
3523 if (sizeof(mb->mb_data) >= len)
3524 break;
3525
3526 buf += sizeof(mb->mb_data);
3527 len -= sizeof(mb->mb_data);
3528 mb++;
3529 }
3530 }
3531
3532 static void
3533 mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages,
3534 struct mcx_dmamem *buf)
3535 {
3536 uint64_t *pas;
3537 int mbox, mbox_pages, i;
3538
3539 mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE;
3540 offset %= MCX_CMDQ_MAILBOX_DATASIZE;
3541
3542 pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3543 pas += (offset / sizeof(*pas));
3544 mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE - offset) / sizeof(*pas);
3545 for (i = 0; i < npages; i++) {
3546 if (i == mbox_pages) {
3547 mbox++;
3548 pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3549 mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas);
3550 }
3551 *pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE));
3552 pas++;
3553 }
3554 }
3555
3556 static void
3557 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
3558 {
3559 uint8_t *buf = b;
3560 struct mcx_cmdq_mailbox *mb;
3561 int i;
3562
3563 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3564 for (i = 0; i < nmb; i++) {
3565 memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len));
3566
3567 if (sizeof(mb->mb_data) >= len)
3568 break;
3569
3570 buf += sizeof(mb->mb_data);
3571 len -= sizeof(mb->mb_data);
3572 mb++;
3573 }
3574 }
3575
3576 static void
3577 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
3578 {
3579 mcx_dmamem_free(sc, mxm);
3580 }
3581
3582 #if 0
3583 static void
3584 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
3585 {
3586 unsigned int i;
3587
3588 printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
3589 be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr));
3590
3591 printf(", idata ");
3592 for (i = 0; i < sizeof(cqe->cq_input_data); i++)
3593 printf("%02x", cqe->cq_input_data[i]);
3594
3595 printf(", odata ");
3596 for (i = 0; i < sizeof(cqe->cq_output_data); i++)
3597 printf("%02x", cqe->cq_output_data[i]);
3598
3599 printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
3600 be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length),
3601 cqe->cq_token, cqe->cq_signature, cqe->cq_status);
3602 }
3603
3604 static void
3605 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
3606 {
3607 int i, j;
3608 uint8_t *d;
3609
3610 for (i = 0; i < num; i++) {
3611 struct mcx_cmdq_mailbox *mbox;
3612 mbox = mcx_cq_mbox(mboxes, i);
3613
3614 d = mcx_cq_mbox_data(mbox);
3615 for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
3616 if (j != 0 && (j % 16 == 0))
3617 printf("\n");
3618 printf("%.2x ", d[j]);
3619 }
3620 }
3621 }
3622 #endif
3623
3624 static int
3625 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
3626 int len)
3627 {
3628 struct mcx_dmamem mxm;
3629 struct mcx_cmdq_entry *cqe;
3630 struct mcx_cmd_access_reg_in *in;
3631 struct mcx_cmd_access_reg_out *out;
3632 uint8_t token = mcx_cmdq_token(sc);
3633 int error, nmb;
3634
3635 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3636 mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
3637 token);
3638
3639 in = mcx_cmdq_in(cqe);
3640 in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
3641 in->cmd_op_mod = htobe16(op);
3642 in->cmd_register_id = htobe16(reg);
3643
3644 nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
3645 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3646 &cqe->cq_output_ptr, token) != 0) {
3647 printf(", unable to allocate access reg mailboxen\n");
3648 return (-1);
3649 }
3650 cqe->cq_input_ptr = cqe->cq_output_ptr;
3651 mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
3652 mcx_cmdq_mboxes_sign(&mxm, nmb);
3653 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3654
3655 mcx_cmdq_post(sc, cqe, 0);
3656 error = mcx_cmdq_poll(sc, cqe, 1000);
3657 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3658
3659 if (error != 0) {
3660 printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
3661 (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
3662 goto free;
3663 }
3664 error = mcx_cmdq_verify(cqe);
3665 if (error != 0) {
3666 printf("%s: access reg (%s %x) reply corrupt\n",
3667 (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
3668 reg);
3669 goto free;
3670 }
3671
3672 out = mcx_cmdq_out(cqe);
3673 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3674 printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
3675 DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
3676 reg, out->cmd_status, be32toh(out->cmd_syndrome));
3677 error = -1;
3678 goto free;
3679 }
3680
3681 mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
3682 free:
3683 mcx_dmamem_free(sc, &mxm);
3684
3685 return (error);
3686 }
3687
3688 static int
3689 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3690 unsigned int slot)
3691 {
3692 struct mcx_cmd_set_issi_in *in;
3693 struct mcx_cmd_set_issi_out *out;
3694 uint8_t status;
3695
3696 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3697
3698 in = mcx_cmdq_in(cqe);
3699 in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3700 in->cmd_op_mod = htobe16(0);
3701 in->cmd_current_issi = htobe16(MCX_ISSI);
3702
3703 mcx_cmdq_post(sc, cqe, slot);
3704 if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3705 return (-1);
3706 if (mcx_cmdq_verify(cqe) != 0)
3707 return (-1);
3708
3709 status = cqe->cq_output_data[0];
3710 if (status != MCX_CQ_STATUS_OK)
3711 return (-1);
3712
3713 return (0);
3714 }
3715
3716 static int
3717 mcx_issi(struct mcx_softc *sc)
3718 {
3719 struct mcx_dmamem mxm;
3720 struct mcx_cmdq_entry *cqe;
3721 struct mcx_cmd_query_issi_in *in;
3722 struct mcx_cmd_query_issi_il_out *out;
3723 struct mcx_cmd_query_issi_mb_out *mb;
3724 uint8_t token = mcx_cmdq_token(sc);
3725 uint8_t status;
3726 int error;
3727
3728 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3729 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3730
3731 in = mcx_cmdq_in(cqe);
3732 in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3733 in->cmd_op_mod = htobe16(0);
3734
3735 CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3736 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3737 &cqe->cq_output_ptr, token) != 0) {
3738 printf(", unable to allocate query issi mailbox\n");
3739 return (-1);
3740 }
3741 mcx_cmdq_mboxes_sign(&mxm, 1);
3742
3743 mcx_cmdq_post(sc, cqe, 0);
3744 error = mcx_cmdq_poll(sc, cqe, 1000);
3745 if (error != 0) {
3746 printf(", query issi timeout\n");
3747 goto free;
3748 }
3749 error = mcx_cmdq_verify(cqe);
3750 if (error != 0) {
3751 printf(", query issi reply corrupt\n");
3752 goto free;
3753 }
3754
3755 status = cqe->cq_output_data[0];
3756 switch (status) {
3757 case MCX_CQ_STATUS_OK:
3758 break;
3759 case MCX_CQ_STATUS_BAD_OPCODE:
3760 /* use ISSI 0 */
3761 goto free;
3762 default:
3763 printf(", query issi failed (%x)\n", status);
3764 error = -1;
3765 goto free;
3766 }
3767
3768 out = mcx_cmdq_out(cqe);
3769 if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3770 /* use ISSI 1 */
3771 goto free;
3772 }
3773
3774 /* don't need to read cqe anymore, can be used for SET ISSI */
3775
3776 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3777 CTASSERT(MCX_ISSI < NBBY);
3778 /* XXX math is hard */
3779 if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3780 /* use ISSI 0 */
3781 goto free;
3782 }
3783
3784 if (mcx_set_issi(sc, cqe, 0) != 0) {
3785 /* ignore the error, just use ISSI 0 */
3786 } else {
3787 /* use ISSI 1 */
3788 }
3789
3790 free:
3791 mcx_cq_mboxes_free(sc, &mxm);
3792 return (error);
3793 }
3794
3795 static int
3796 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3797 int32_t *npages, uint16_t *func_id)
3798 {
3799 struct mcx_cmdq_entry *cqe;
3800 struct mcx_cmd_query_pages_in *in;
3801 struct mcx_cmd_query_pages_out *out;
3802
3803 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3804 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3805
3806 in = mcx_cmdq_in(cqe);
3807 in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3808 in->cmd_op_mod = type;
3809
3810 mcx_cmdq_post(sc, cqe, 0);
3811 if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3812 printf(", query pages timeout\n");
3813 return (-1);
3814 }
3815 if (mcx_cmdq_verify(cqe) != 0) {
3816 printf(", query pages reply corrupt\n");
3817 return (-1);
3818 }
3819
3820 out = mcx_cmdq_out(cqe);
3821 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3822 printf(", query pages failed (%x)\n", out->cmd_status);
3823 return (-1);
3824 }
3825
3826 *func_id = out->cmd_func_id;
3827 *npages = be32dec(&out->cmd_num_pages);
3828
3829 return (0);
3830 }
3831
3832 struct bus_dma_iter {
3833 bus_dmamap_t i_map;
3834 bus_size_t i_offset;
3835 unsigned int i_index;
3836 };
3837
3838 static void
3839 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3840 {
3841 i->i_map = map;
3842 i->i_offset = 0;
3843 i->i_index = 0;
3844 }
3845
3846 static bus_addr_t
3847 bus_dma_iter_addr(struct bus_dma_iter *i)
3848 {
3849 return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3850 }
3851
3852 static void
3853 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3854 {
3855 bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3856 bus_size_t diff;
3857
3858 do {
3859 diff = seg->ds_len - i->i_offset;
3860 if (size < diff)
3861 break;
3862
3863 size -= diff;
3864
3865 seg++;
3866
3867 i->i_offset = 0;
3868 i->i_index++;
3869 } while (size > 0);
3870
3871 i->i_offset += size;
3872 }
3873
3874 static int
3875 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3876 {
3877 struct mcx_dmamem mxm;
3878 struct mcx_cmdq_entry *cqe;
3879 struct mcx_cmd_manage_pages_in *in;
3880 struct mcx_cmd_manage_pages_out *out;
3881 unsigned int paslen, nmb, i, j, npages;
3882 struct bus_dma_iter iter;
3883 uint64_t *pas;
3884 uint8_t status;
3885 uint8_t token = mcx_cmdq_token(sc);
3886 int error;
3887
3888 npages = mhm->mhm_npages;
3889
3890 paslen = sizeof(*pas) * npages;
3891 nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3892
3893 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3894 mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3895
3896 in = mcx_cmdq_in(cqe);
3897 in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3898 in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3899 in->cmd_func_id = func_id;
3900 be32enc(&in->cmd_input_num_entries, npages);
3901
3902 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3903 &cqe->cq_input_ptr, token) != 0) {
3904 printf(", unable to allocate manage pages mailboxen\n");
3905 return (-1);
3906 }
3907
3908 bus_dma_iter_init(&iter, mhm->mhm_map);
3909 for (i = 0; i < nmb; i++) {
3910 unsigned int lim;
3911
3912 pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3913 lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3914
3915 for (j = 0; j < lim; j++) {
3916 be64enc(&pas[j], bus_dma_iter_addr(&iter));
3917 bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3918 }
3919
3920 npages -= lim;
3921 }
3922
3923 mcx_cmdq_mboxes_sign(&mxm, nmb);
3924
3925 mcx_cmdq_post(sc, cqe, 0);
3926 error = mcx_cmdq_poll(sc, cqe, 1000);
3927 if (error != 0) {
3928 printf(", manage pages timeout\n");
3929 goto free;
3930 }
3931 error = mcx_cmdq_verify(cqe);
3932 if (error != 0) {
3933 printf(", manage pages reply corrupt\n");
3934 goto free;
3935 }
3936
3937 status = cqe->cq_output_data[0];
3938 if (status != MCX_CQ_STATUS_OK) {
3939 printf(", manage pages failed (%x)\n", status);
3940 error = -1;
3941 goto free;
3942 }
3943
3944 free:
3945 mcx_dmamem_free(sc, &mxm);
3946
3947 return (error);
3948 }
3949
3950 static int
3951 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3952 {
3953 int32_t npages;
3954 uint16_t func_id;
3955
3956 if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3957 /* error printed by mcx_query_pages */
3958 return (-1);
3959 }
3960
3961 if (npages < 1)
3962 return (0);
3963
3964 if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3965 printf(", unable to allocate hwmem\n");
3966 return (-1);
3967 }
3968
3969 if (mcx_add_pages(sc, mhm, func_id) != 0) {
3970 printf(", unable to add hwmem\n");
3971 goto free;
3972 }
3973
3974 return (0);
3975
3976 free:
3977 mcx_hwmem_free(sc, mhm);
3978
3979 return (-1);
3980 }
3981
3982 static int
3983 mcx_hca_max_caps(struct mcx_softc *sc)
3984 {
3985 struct mcx_dmamem mxm;
3986 struct mcx_cmdq_entry *cqe;
3987 struct mcx_cmd_query_hca_cap_in *in;
3988 struct mcx_cmd_query_hca_cap_out *out;
3989 struct mcx_cmdq_mailbox *mb;
3990 struct mcx_cap_device *hca;
3991 uint8_t status;
3992 uint8_t token = mcx_cmdq_token(sc);
3993 int error;
3994
3995 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3996 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3997 token);
3998
3999 in = mcx_cmdq_in(cqe);
4000 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
4001 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
4002 MCX_CMD_QUERY_HCA_CAP_DEVICE);
4003
4004 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
4005 &cqe->cq_output_ptr, token) != 0) {
4006 printf(", unable to allocate query hca caps mailboxen\n");
4007 return (-1);
4008 }
4009 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
4010 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
4011
4012 mcx_cmdq_post(sc, cqe, 0);
4013 error = mcx_cmdq_poll(sc, cqe, 1000);
4014 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
4015
4016 if (error != 0) {
4017 printf(", query hca caps timeout\n");
4018 goto free;
4019 }
4020 error = mcx_cmdq_verify(cqe);
4021 if (error != 0) {
4022 printf(", query hca caps reply corrupt\n");
4023 goto free;
4024 }
4025
4026 status = cqe->cq_output_data[0];
4027 if (status != MCX_CQ_STATUS_OK) {
4028 printf(", query hca caps failed (%x)\n", status);
4029 error = -1;
4030 goto free;
4031 }
4032
4033 mb = mcx_cq_mbox(&mxm, 0);
4034 hca = mcx_cq_mbox_data(mb);
4035
4036 if ((hca->port_type & MCX_CAP_DEVICE_PORT_TYPE)
4037 != MCX_CAP_DEVICE_PORT_TYPE_ETH) {
4038 printf(", not in ethernet mode\n");
4039 error = -1;
4040 goto free;
4041 }
4042 if (hca->log_pg_sz > PAGE_SHIFT) {
4043 printf(", minimum system page shift %u is too large\n",
4044 hca->log_pg_sz);
4045 error = -1;
4046 goto free;
4047 }
4048 /*
4049 * blueflame register is split into two buffers, and we must alternate
4050 * between the two of them.
4051 */
4052 sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
4053 sc->sc_max_rqt_size = (1 << hca->log_max_rqt_size);
4054
4055 if (hca->local_ca_ack_delay & MCX_CAP_DEVICE_MCAM_REG)
4056 sc->sc_mcam_reg = 1;
4057
4058 sc->sc_mhz = be32dec(&hca->device_frequency_mhz);
4059 sc->sc_khz = be32dec(&hca->device_frequency_khz);
4060
4061 free:
4062 mcx_dmamem_free(sc, &mxm);
4063
4064 return (error);
4065 }
4066
4067 static int
4068 mcx_hca_set_caps(struct mcx_softc *sc)
4069 {
4070 struct mcx_dmamem mxm;
4071 struct mcx_cmdq_entry *cqe;
4072 struct mcx_cmd_query_hca_cap_in *in;
4073 struct mcx_cmd_query_hca_cap_out *out;
4074 struct mcx_cmdq_mailbox *mb;
4075 struct mcx_cap_device *hca;
4076 uint8_t status;
4077 uint8_t token = mcx_cmdq_token(sc);
4078 int error;
4079
4080 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4081 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
4082 token);
4083
4084 in = mcx_cmdq_in(cqe);
4085 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
4086 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
4087 MCX_CMD_QUERY_HCA_CAP_DEVICE);
4088
4089 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
4090 &cqe->cq_output_ptr, token) != 0) {
4091 printf(", unable to allocate manage pages mailboxen\n");
4092 return (-1);
4093 }
4094 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
4095 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
4096
4097 mcx_cmdq_post(sc, cqe, 0);
4098 error = mcx_cmdq_poll(sc, cqe, 1000);
4099 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
4100
4101 if (error != 0) {
4102 printf(", query hca caps timeout\n");
4103 goto free;
4104 }
4105 error = mcx_cmdq_verify(cqe);
4106 if (error != 0) {
4107 printf(", query hca caps reply corrupt\n");
4108 goto free;
4109 }
4110
4111 status = cqe->cq_output_data[0];
4112 if (status != MCX_CQ_STATUS_OK) {
4113 printf(", query hca caps failed (%x)\n", status);
4114 error = -1;
4115 goto free;
4116 }
4117
4118 mb = mcx_cq_mbox(&mxm, 0);
4119 hca = mcx_cq_mbox_data(mb);
4120
4121 hca->log_pg_sz = PAGE_SHIFT;
4122
4123 free:
4124 mcx_dmamem_free(sc, &mxm);
4125
4126 return (error);
4127 }
4128
4129
4130 static int
4131 mcx_init_hca(struct mcx_softc *sc)
4132 {
4133 struct mcx_cmdq_entry *cqe;
4134 struct mcx_cmd_init_hca_in *in;
4135 struct mcx_cmd_init_hca_out *out;
4136 int error;
4137 uint8_t status;
4138
4139 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4140 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4141
4142 in = mcx_cmdq_in(cqe);
4143 in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
4144 in->cmd_op_mod = htobe16(0);
4145
4146 mcx_cmdq_post(sc, cqe, 0);
4147
4148 error = mcx_cmdq_poll(sc, cqe, 1000);
4149 if (error != 0) {
4150 printf(", hca init timeout\n");
4151 return (-1);
4152 }
4153 if (mcx_cmdq_verify(cqe) != 0) {
4154 printf(", hca init command corrupt\n");
4155 return (-1);
4156 }
4157
4158 status = cqe->cq_output_data[0];
4159 if (status != MCX_CQ_STATUS_OK) {
4160 printf(", hca init failed (%x)\n", status);
4161 return (-1);
4162 }
4163
4164 return (0);
4165 }
4166
4167 static int
4168 mcx_set_driver_version(struct mcx_softc *sc)
4169 {
4170 struct mcx_dmamem mxm;
4171 struct mcx_cmdq_entry *cqe;
4172 struct mcx_cmd_set_driver_version_in *in;
4173 struct mcx_cmd_set_driver_version_out *out;
4174 int error;
4175 int token;
4176 uint8_t status;
4177
4178 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4179 token = mcx_cmdq_token(sc);
4180 mcx_cmdq_init(sc, cqe, sizeof(*in) +
4181 sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
4182
4183 in = mcx_cmdq_in(cqe);
4184 in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
4185 in->cmd_op_mod = htobe16(0);
4186
4187 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4188 &cqe->cq_input_ptr, token) != 0) {
4189 printf(", unable to allocate set driver version mailboxen\n");
4190 return (-1);
4191 }
4192 strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
4193 "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
4194
4195 mcx_cmdq_mboxes_sign(&mxm, 1);
4196 mcx_cmdq_post(sc, cqe, 0);
4197
4198 error = mcx_cmdq_poll(sc, cqe, 1000);
4199 if (error != 0) {
4200 printf(", set driver version timeout\n");
4201 goto free;
4202 }
4203 if (mcx_cmdq_verify(cqe) != 0) {
4204 printf(", set driver version command corrupt\n");
4205 goto free;
4206 }
4207
4208 status = cqe->cq_output_data[0];
4209 if (status != MCX_CQ_STATUS_OK) {
4210 printf(", set driver version failed (%x)\n", status);
4211 error = -1;
4212 goto free;
4213 }
4214
4215 free:
4216 mcx_dmamem_free(sc, &mxm);
4217
4218 return (error);
4219 }
4220
4221 static int
4222 mcx_iff(struct mcx_softc *sc)
4223 {
4224 struct ifnet *ifp = &sc->sc_ec.ec_if;
4225 struct mcx_dmamem mxm;
4226 struct mcx_cmdq_entry *cqe;
4227 struct mcx_cmd_modify_nic_vport_context_in *in;
4228 struct mcx_cmd_modify_nic_vport_context_out *out;
4229 struct mcx_nic_vport_ctx *ctx;
4230 int error;
4231 int token;
4232 int insize;
4233 uint32_t dest;
4234
4235 dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
4236 sc->sc_rss_flow_table_id;
4237
4238 /* enable or disable the promisc flow */
4239 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
4240 if (sc->sc_promisc_flow_enabled == 0) {
4241 mcx_set_flow_table_entry_mac(sc,
4242 MCX_FLOW_GROUP_PROMISC, 0, NULL, dest);
4243 sc->sc_promisc_flow_enabled = 1;
4244 }
4245 } else if (sc->sc_promisc_flow_enabled != 0) {
4246 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
4247 sc->sc_promisc_flow_enabled = 0;
4248 }
4249
4250 /* enable or disable the all-multicast flow */
4251 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
4252 if (sc->sc_allmulti_flow_enabled == 0) {
4253 uint8_t mcast[ETHER_ADDR_LEN];
4254
4255 memset(mcast, 0, sizeof(mcast));
4256 mcast[0] = 0x01;
4257 mcx_set_flow_table_entry_mac(sc,
4258 MCX_FLOW_GROUP_ALLMULTI, 0, mcast, dest);
4259 sc->sc_allmulti_flow_enabled = 1;
4260 }
4261 } else if (sc->sc_allmulti_flow_enabled != 0) {
4262 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
4263 sc->sc_allmulti_flow_enabled = 0;
4264 }
4265
4266 insize = sizeof(struct mcx_nic_vport_ctx) + 240;
4267
4268 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4269 token = mcx_cmdq_token(sc);
4270 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4271
4272 in = mcx_cmdq_in(cqe);
4273 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
4274 in->cmd_op_mod = htobe16(0);
4275 in->cmd_field_select = htobe32(
4276 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
4277 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
4278
4279 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4280 printf(", unable to allocate modify "
4281 "nic vport context mailboxen\n");
4282 return (-1);
4283 }
4284 ctx = (struct mcx_nic_vport_ctx *)
4285 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
4286 ctx->vp_mtu = htobe32(sc->sc_hardmtu);
4287 /*
4288 * always leave promisc-all enabled on the vport since we
4289 * can't give it a vlan list, and we're already doing multicast
4290 * filtering in the flow table.
4291 */
4292 ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
4293
4294 mcx_cmdq_mboxes_sign(&mxm, 1);
4295 mcx_cmdq_post(sc, cqe, 0);
4296
4297 error = mcx_cmdq_poll(sc, cqe, 1000);
4298 if (error != 0) {
4299 printf(", modify nic vport context timeout\n");
4300 goto free;
4301 }
4302 if (mcx_cmdq_verify(cqe) != 0) {
4303 printf(", modify nic vport context command corrupt\n");
4304 goto free;
4305 }
4306
4307 out = mcx_cmdq_out(cqe);
4308 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4309 printf(", modify nic vport context failed (%x, %x)\n",
4310 out->cmd_status, be32toh(out->cmd_syndrome));
4311 error = -1;
4312 goto free;
4313 }
4314
4315 free:
4316 mcx_dmamem_free(sc, &mxm);
4317
4318 return (error);
4319 }
4320
4321 static int
4322 mcx_alloc_uar(struct mcx_softc *sc, int *uar)
4323 {
4324 struct mcx_cmdq_entry *cqe;
4325 struct mcx_cmd_alloc_uar_in *in;
4326 struct mcx_cmd_alloc_uar_out *out;
4327 int error;
4328
4329 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4330 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4331
4332 in = mcx_cmdq_in(cqe);
4333 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
4334 in->cmd_op_mod = htobe16(0);
4335
4336 mcx_cmdq_post(sc, cqe, 0);
4337
4338 error = mcx_cmdq_poll(sc, cqe, 1000);
4339 if (error != 0) {
4340 printf(", alloc uar timeout\n");
4341 return (-1);
4342 }
4343 if (mcx_cmdq_verify(cqe) != 0) {
4344 printf(", alloc uar command corrupt\n");
4345 return (-1);
4346 }
4347
4348 out = mcx_cmdq_out(cqe);
4349 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4350 printf(", alloc uar failed (%x)\n", out->cmd_status);
4351 return (-1);
4352 }
4353
4354 *uar = mcx_get_id(out->cmd_uar);
4355 return (0);
4356 }
4357
4358 static int
4359 mcx_create_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar,
4360 uint64_t events, int vector)
4361 {
4362 struct mcx_cmdq_entry *cqe;
4363 struct mcx_dmamem mxm;
4364 struct mcx_cmd_create_eq_in *in;
4365 struct mcx_cmd_create_eq_mb_in *mbin;
4366 struct mcx_cmd_create_eq_out *out;
4367 struct mcx_eq_entry *eqe;
4368 int error;
4369 uint64_t *pas;
4370 int insize, npages, paslen, i, token;
4371
4372 eq->eq_cons = 0;
4373
4374 npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
4375 MCX_PAGE_SIZE);
4376 paslen = npages * sizeof(*pas);
4377 insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
4378
4379 if (mcx_dmamem_alloc(sc, &eq->eq_mem, npages * MCX_PAGE_SIZE,
4380 MCX_PAGE_SIZE) != 0) {
4381 printf(", unable to allocate event queue memory\n");
4382 return (-1);
4383 }
4384
4385 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
4386 for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
4387 eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
4388 }
4389
4390 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4391 token = mcx_cmdq_token(sc);
4392 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4393
4394 in = mcx_cmdq_in(cqe);
4395 in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
4396 in->cmd_op_mod = htobe16(0);
4397
4398 if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4399 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4400 &cqe->cq_input_ptr, token) != 0) {
4401 printf(", unable to allocate create eq mailboxen\n");
4402 goto free_eq;
4403 }
4404 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4405 mbin->cmd_eq_ctx.eq_uar_size = htobe32(
4406 (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | uar);
4407 mbin->cmd_eq_ctx.eq_intr = vector;
4408 mbin->cmd_event_bitmask = htobe64(events);
4409
4410 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4411 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
4412
4413 /* physical addresses follow the mailbox in data */
4414 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &eq->eq_mem);
4415 mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
4416 mcx_cmdq_post(sc, cqe, 0);
4417
4418 error = mcx_cmdq_poll(sc, cqe, 1000);
4419 if (error != 0) {
4420 printf(", create eq timeout\n");
4421 goto free_mxm;
4422 }
4423 if (mcx_cmdq_verify(cqe) != 0) {
4424 printf(", create eq command corrupt\n");
4425 goto free_mxm;
4426 }
4427
4428 out = mcx_cmdq_out(cqe);
4429 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4430 printf(", create eq failed (%x, %x)\n", out->cmd_status,
4431 be32toh(out->cmd_syndrome));
4432 goto free_mxm;
4433 }
4434
4435 eq->eq_n = mcx_get_id(out->cmd_eqn);
4436
4437 mcx_dmamem_free(sc, &mxm);
4438
4439 mcx_arm_eq(sc, eq, uar);
4440
4441 return (0);
4442
4443 free_mxm:
4444 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4445 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
4446 mcx_dmamem_free(sc, &mxm);
4447 free_eq:
4448 mcx_dmamem_free(sc, &eq->eq_mem);
4449 return (-1);
4450 }
4451
4452 static int
4453 mcx_alloc_pd(struct mcx_softc *sc)
4454 {
4455 struct mcx_cmdq_entry *cqe;
4456 struct mcx_cmd_alloc_pd_in *in;
4457 struct mcx_cmd_alloc_pd_out *out;
4458 int error;
4459
4460 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4461 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4462
4463 in = mcx_cmdq_in(cqe);
4464 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
4465 in->cmd_op_mod = htobe16(0);
4466
4467 mcx_cmdq_post(sc, cqe, 0);
4468
4469 error = mcx_cmdq_poll(sc, cqe, 1000);
4470 if (error != 0) {
4471 printf(", alloc pd timeout\n");
4472 return (-1);
4473 }
4474 if (mcx_cmdq_verify(cqe) != 0) {
4475 printf(", alloc pd command corrupt\n");
4476 return (-1);
4477 }
4478
4479 out = mcx_cmdq_out(cqe);
4480 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4481 printf(", alloc pd failed (%x)\n", out->cmd_status);
4482 return (-1);
4483 }
4484
4485 sc->sc_pd = mcx_get_id(out->cmd_pd);
4486 return (0);
4487 }
4488
4489 static int
4490 mcx_alloc_tdomain(struct mcx_softc *sc)
4491 {
4492 struct mcx_cmdq_entry *cqe;
4493 struct mcx_cmd_alloc_td_in *in;
4494 struct mcx_cmd_alloc_td_out *out;
4495 int error;
4496
4497 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4498 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4499
4500 in = mcx_cmdq_in(cqe);
4501 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
4502 in->cmd_op_mod = htobe16(0);
4503
4504 mcx_cmdq_post(sc, cqe, 0);
4505
4506 error = mcx_cmdq_poll(sc, cqe, 1000);
4507 if (error != 0) {
4508 printf(", alloc transport domain timeout\n");
4509 return (-1);
4510 }
4511 if (mcx_cmdq_verify(cqe) != 0) {
4512 printf(", alloc transport domain command corrupt\n");
4513 return (-1);
4514 }
4515
4516 out = mcx_cmdq_out(cqe);
4517 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4518 printf(", alloc transport domain failed (%x)\n",
4519 out->cmd_status);
4520 return (-1);
4521 }
4522
4523 sc->sc_tdomain = mcx_get_id(out->cmd_tdomain);
4524 return (0);
4525 }
4526
4527 static int
4528 mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr)
4529 {
4530 struct mcx_dmamem mxm;
4531 struct mcx_cmdq_entry *cqe;
4532 struct mcx_cmd_query_nic_vport_context_in *in;
4533 struct mcx_cmd_query_nic_vport_context_out *out;
4534 struct mcx_nic_vport_ctx *ctx;
4535 uint8_t *addr;
4536 int error, token, i;
4537
4538 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4539 token = mcx_cmdq_token(sc);
4540 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
4541
4542 in = mcx_cmdq_in(cqe);
4543 in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
4544 in->cmd_op_mod = htobe16(0);
4545 in->cmd_allowed_list_type = 0;
4546
4547 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4548 &cqe->cq_output_ptr, token) != 0) {
4549 printf(", unable to allocate "
4550 "query nic vport context mailboxen\n");
4551 return (-1);
4552 }
4553 mcx_cmdq_mboxes_sign(&mxm, 1);
4554 mcx_cmdq_post(sc, cqe, 0);
4555
4556 error = mcx_cmdq_poll(sc, cqe, 1000);
4557 if (error != 0) {
4558 printf(", query nic vport context timeout\n");
4559 goto free;
4560 }
4561 if (mcx_cmdq_verify(cqe) != 0) {
4562 printf(", query nic vport context command corrupt\n");
4563 goto free;
4564 }
4565
4566 out = mcx_cmdq_out(cqe);
4567 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4568 printf(", query nic vport context failed (%x, %x)\n",
4569 out->cmd_status, be32toh(out->cmd_syndrome));
4570 error = -1;
4571 goto free;
4572 }
4573
4574 ctx = (struct mcx_nic_vport_ctx *)
4575 mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4576 addr = (uint8_t *)&ctx->vp_perm_addr;
4577 for (i = 0; i < ETHER_ADDR_LEN; i++) {
4578 enaddr[i] = addr[i + 2];
4579 }
4580 free:
4581 mcx_dmamem_free(sc, &mxm);
4582
4583 return (error);
4584 }
4585
4586 static int
4587 mcx_query_special_contexts(struct mcx_softc *sc)
4588 {
4589 struct mcx_cmdq_entry *cqe;
4590 struct mcx_cmd_query_special_ctx_in *in;
4591 struct mcx_cmd_query_special_ctx_out *out;
4592 int error;
4593
4594 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4595 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4596
4597 in = mcx_cmdq_in(cqe);
4598 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
4599 in->cmd_op_mod = htobe16(0);
4600
4601 mcx_cmdq_post(sc, cqe, 0);
4602
4603 error = mcx_cmdq_poll(sc, cqe, 1000);
4604 if (error != 0) {
4605 printf(", query special contexts timeout\n");
4606 return (-1);
4607 }
4608 if (mcx_cmdq_verify(cqe) != 0) {
4609 printf(", query special contexts command corrupt\n");
4610 return (-1);
4611 }
4612
4613 out = mcx_cmdq_out(cqe);
4614 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4615 printf(", query special contexts failed (%x)\n",
4616 out->cmd_status);
4617 return (-1);
4618 }
4619
4620 sc->sc_lkey = be32toh(out->cmd_resd_lkey);
4621 return (0);
4622 }
4623
4624 static int
4625 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
4626 {
4627 struct mcx_reg_pmtu pmtu;
4628 int error;
4629
4630 /* read max mtu */
4631 memset(&pmtu, 0, sizeof(pmtu));
4632 pmtu.rp_local_port = 1;
4633 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
4634 sizeof(pmtu));
4635 if (error != 0) {
4636 printf(", unable to get port MTU\n");
4637 return error;
4638 }
4639
4640 mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu));
4641 pmtu.rp_admin_mtu = htobe16(mtu);
4642 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
4643 sizeof(pmtu));
4644 if (error != 0) {
4645 printf(", unable to set port MTU\n");
4646 return error;
4647 }
4648
4649 sc->sc_hardmtu = mtu;
4650 sc->sc_rxbufsz = roundup(mtu + ETHER_ALIGN, sizeof(long));
4651 return 0;
4652 }
4653
4654 static int
4655 mcx_create_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar, int db, int eqn)
4656 {
4657 struct mcx_cmdq_entry *cmde;
4658 struct mcx_cq_entry *cqe;
4659 struct mcx_dmamem mxm;
4660 struct mcx_cmd_create_cq_in *in;
4661 struct mcx_cmd_create_cq_mb_in *mbin;
4662 struct mcx_cmd_create_cq_out *out;
4663 int error;
4664 uint64_t *pas;
4665 int insize, npages, paslen, i, token;
4666
4667 cq->cq_doorbell = MCX_CQ_DOORBELL_BASE + (MCX_CQ_DOORBELL_STRIDE * db);
4668
4669 npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
4670 MCX_PAGE_SIZE);
4671 paslen = npages * sizeof(*pas);
4672 insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
4673
4674 if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
4675 MCX_PAGE_SIZE) != 0) {
4676 printf("%s: unable to allocate completion queue memory\n",
4677 DEVNAME(sc));
4678 return (-1);
4679 }
4680 cqe = MCX_DMA_KVA(&cq->cq_mem);
4681 for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
4682 cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
4683 }
4684
4685 cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4686 token = mcx_cmdq_token(sc);
4687 mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
4688
4689 in = mcx_cmdq_in(cmde);
4690 in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
4691 in->cmd_op_mod = htobe16(0);
4692
4693 if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4694 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4695 &cmde->cq_input_ptr, token) != 0) {
4696 printf("%s: unable to allocate create cq mailboxen\n",
4697 DEVNAME(sc));
4698 goto free_cq;
4699 }
4700 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4701 mbin->cmd_cq_ctx.cq_uar_size = htobe32(
4702 (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | uar);
4703 mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
4704 mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
4705 (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
4706 MCX_CQ_MOD_COUNTER);
4707 mbin->cmd_cq_ctx.cq_doorbell = htobe64(
4708 MCX_DMA_DVA(&sc->sc_doorbell_mem) + cq->cq_doorbell);
4709
4710 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4711 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
4712
4713 /* physical addresses follow the mailbox in data */
4714 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem);
4715 mcx_cmdq_post(sc, cmde, 0);
4716
4717 error = mcx_cmdq_poll(sc, cmde, 1000);
4718 if (error != 0) {
4719 printf("%s: create cq timeout\n", DEVNAME(sc));
4720 goto free_mxm;
4721 }
4722 if (mcx_cmdq_verify(cmde) != 0) {
4723 printf("%s: create cq command corrupt\n", DEVNAME(sc));
4724 goto free_mxm;
4725 }
4726
4727 out = mcx_cmdq_out(cmde);
4728 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4729 printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4730 out->cmd_status, be32toh(out->cmd_syndrome));
4731 goto free_mxm;
4732 }
4733
4734 cq->cq_n = mcx_get_id(out->cmd_cqn);
4735 cq->cq_cons = 0;
4736 cq->cq_count = 0;
4737
4738 mcx_dmamem_free(sc, &mxm);
4739
4740 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4741 cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4742 BUS_DMASYNC_PREWRITE);
4743
4744 mcx_arm_cq(sc, cq, uar);
4745
4746 return (0);
4747
4748 free_mxm:
4749 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4750 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4751 mcx_dmamem_free(sc, &mxm);
4752 free_cq:
4753 mcx_dmamem_free(sc, &cq->cq_mem);
4754 return (-1);
4755 }
4756
4757 static int
4758 mcx_destroy_cq(struct mcx_softc *sc, struct mcx_cq *cq)
4759 {
4760 struct mcx_cmdq_entry *cqe;
4761 struct mcx_cmd_destroy_cq_in *in;
4762 struct mcx_cmd_destroy_cq_out *out;
4763 int error;
4764 int token;
4765
4766 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4767 token = mcx_cmdq_token(sc);
4768 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4769
4770 in = mcx_cmdq_in(cqe);
4771 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4772 in->cmd_op_mod = htobe16(0);
4773 in->cmd_cqn = htobe32(cq->cq_n);
4774
4775 mcx_cmdq_post(sc, cqe, 0);
4776 error = mcx_cmdq_poll(sc, cqe, 1000);
4777 if (error != 0) {
4778 printf("%s: destroy cq timeout\n", DEVNAME(sc));
4779 return error;
4780 }
4781 if (mcx_cmdq_verify(cqe) != 0) {
4782 printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4783 return error;
4784 }
4785
4786 out = mcx_cmdq_out(cqe);
4787 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4788 printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4789 out->cmd_status, be32toh(out->cmd_syndrome));
4790 return -1;
4791 }
4792
4793 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4794 cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4795 BUS_DMASYNC_POSTWRITE);
4796
4797 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4798 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4799 mcx_dmamem_free(sc, &cq->cq_mem);
4800
4801 cq->cq_n = 0;
4802 cq->cq_cons = 0;
4803 cq->cq_count = 0;
4804 return 0;
4805 }
4806
4807 static int
4808 mcx_create_rq(struct mcx_softc *sc, struct mcx_rx *rx, int db, int cqn)
4809 {
4810 struct mcx_cmdq_entry *cqe;
4811 struct mcx_dmamem mxm;
4812 struct mcx_cmd_create_rq_in *in;
4813 struct mcx_cmd_create_rq_out *out;
4814 struct mcx_rq_ctx *mbin;
4815 int error;
4816 uint64_t *pas;
4817 uint32_t rq_flags;
4818 int insize, npages, paslen, token;
4819
4820 rx->rx_doorbell = MCX_WQ_DOORBELL_BASE +
4821 (db * MCX_WQ_DOORBELL_STRIDE);
4822
4823 npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4824 MCX_PAGE_SIZE);
4825 paslen = npages * sizeof(*pas);
4826 insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4827
4828 if (mcx_dmamem_alloc(sc, &rx->rx_rq_mem, npages * MCX_PAGE_SIZE,
4829 MCX_PAGE_SIZE) != 0) {
4830 printf("%s: unable to allocate receive queue memory\n",
4831 DEVNAME(sc));
4832 return (-1);
4833 }
4834
4835 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4836 token = mcx_cmdq_token(sc);
4837 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4838
4839 in = mcx_cmdq_in(cqe);
4840 in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4841 in->cmd_op_mod = htobe16(0);
4842
4843 if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4844 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4845 &cqe->cq_input_ptr, token) != 0) {
4846 printf("%s: unable to allocate create rq mailboxen\n",
4847 DEVNAME(sc));
4848 goto free_rq;
4849 }
4850 mbin = (struct mcx_rq_ctx *)
4851 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4852 rq_flags = MCX_RQ_CTX_RLKEY;
4853 mbin->rq_flags = htobe32(rq_flags);
4854 mbin->rq_cqn = htobe32(cqn);
4855 mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4856 mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4857 mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4858 rx->rx_doorbell);
4859 mbin->rq_wq.wq_log_stride = htobe16(4);
4860 mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4861
4862 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4863 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
4864
4865 /* physical addresses follow the mailbox in data */
4866 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &rx->rx_rq_mem);
4867 mcx_cmdq_post(sc, cqe, 0);
4868
4869 error = mcx_cmdq_poll(sc, cqe, 1000);
4870 if (error != 0) {
4871 printf("%s: create rq timeout\n", DEVNAME(sc));
4872 goto free_mxm;
4873 }
4874 if (mcx_cmdq_verify(cqe) != 0) {
4875 printf("%s: create rq command corrupt\n", DEVNAME(sc));
4876 goto free_mxm;
4877 }
4878
4879 out = mcx_cmdq_out(cqe);
4880 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4881 printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4882 out->cmd_status, be32toh(out->cmd_syndrome));
4883 goto free_mxm;
4884 }
4885
4886 rx->rx_rqn = mcx_get_id(out->cmd_rqn);
4887
4888 mcx_dmamem_free(sc, &mxm);
4889
4890 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4891 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
4892
4893 return (0);
4894
4895 free_mxm:
4896 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4897 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
4898 mcx_dmamem_free(sc, &mxm);
4899 free_rq:
4900 mcx_dmamem_free(sc, &rx->rx_rq_mem);
4901 return (-1);
4902 }
4903
4904 static int
4905 mcx_ready_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4906 {
4907 struct mcx_cmdq_entry *cqe;
4908 struct mcx_dmamem mxm;
4909 struct mcx_cmd_modify_rq_in *in;
4910 struct mcx_cmd_modify_rq_mb_in *mbin;
4911 struct mcx_cmd_modify_rq_out *out;
4912 int error;
4913 int token;
4914
4915 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4916 token = mcx_cmdq_token(sc);
4917 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
4918 sizeof(*out), token);
4919
4920 in = mcx_cmdq_in(cqe);
4921 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4922 in->cmd_op_mod = htobe16(0);
4923 in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | rx->rx_rqn);
4924
4925 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4926 &cqe->cq_input_ptr, token) != 0) {
4927 printf("%s: unable to allocate modify rq mailbox\n",
4928 DEVNAME(sc));
4929 return (-1);
4930 }
4931 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4932 mbin->cmd_rq_ctx.rq_flags = htobe32(
4933 MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4934
4935 mcx_cmdq_mboxes_sign(&mxm, 1);
4936 mcx_cmdq_post(sc, cqe, 0);
4937 error = mcx_cmdq_poll(sc, cqe, 1000);
4938 if (error != 0) {
4939 printf("%s: modify rq timeout\n", DEVNAME(sc));
4940 goto free;
4941 }
4942 if (mcx_cmdq_verify(cqe) != 0) {
4943 printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4944 goto free;
4945 }
4946
4947 out = mcx_cmdq_out(cqe);
4948 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4949 printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4950 out->cmd_status, be32toh(out->cmd_syndrome));
4951 error = -1;
4952 goto free;
4953 }
4954
4955 free:
4956 mcx_dmamem_free(sc, &mxm);
4957 return (error);
4958 }
4959
4960 static int
4961 mcx_destroy_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4962 {
4963 struct mcx_cmdq_entry *cqe;
4964 struct mcx_cmd_destroy_rq_in *in;
4965 struct mcx_cmd_destroy_rq_out *out;
4966 int error;
4967 int token;
4968
4969 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4970 token = mcx_cmdq_token(sc);
4971 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4972
4973 in = mcx_cmdq_in(cqe);
4974 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4975 in->cmd_op_mod = htobe16(0);
4976 in->cmd_rqn = htobe32(rx->rx_rqn);
4977
4978 mcx_cmdq_post(sc, cqe, 0);
4979 error = mcx_cmdq_poll(sc, cqe, 1000);
4980 if (error != 0) {
4981 printf("%s: destroy rq timeout\n", DEVNAME(sc));
4982 return error;
4983 }
4984 if (mcx_cmdq_verify(cqe) != 0) {
4985 printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4986 return error;
4987 }
4988
4989 out = mcx_cmdq_out(cqe);
4990 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4991 printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4992 out->cmd_status, be32toh(out->cmd_syndrome));
4993 return -1;
4994 }
4995
4996 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4997 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
4998
4999 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
5000 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
5001 mcx_dmamem_free(sc, &rx->rx_rq_mem);
5002
5003 rx->rx_rqn = 0;
5004 return 0;
5005 }
5006
5007 static int
5008 mcx_create_tir_direct(struct mcx_softc *sc, struct mcx_rx *rx, int *tirn)
5009 {
5010 struct mcx_cmdq_entry *cqe;
5011 struct mcx_dmamem mxm;
5012 struct mcx_cmd_create_tir_in *in;
5013 struct mcx_cmd_create_tir_mb_in *mbin;
5014 struct mcx_cmd_create_tir_out *out;
5015 int error;
5016 int token;
5017
5018 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5019 token = mcx_cmdq_token(sc);
5020 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5021 sizeof(*out), token);
5022
5023 in = mcx_cmdq_in(cqe);
5024 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
5025 in->cmd_op_mod = htobe16(0);
5026
5027 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5028 &cqe->cq_input_ptr, token) != 0) {
5029 printf("%s: unable to allocate create tir mailbox\n",
5030 DEVNAME(sc));
5031 return (-1);
5032 }
5033 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5034 /* leave disp_type = 0, so packets get sent to the inline rqn */
5035 mbin->cmd_inline_rqn = htobe32(rx->rx_rqn);
5036 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
5037
5038 mcx_cmdq_post(sc, cqe, 0);
5039 error = mcx_cmdq_poll(sc, cqe, 1000);
5040 if (error != 0) {
5041 printf("%s: create tir timeout\n", DEVNAME(sc));
5042 goto free;
5043 }
5044 if (mcx_cmdq_verify(cqe) != 0) {
5045 printf("%s: create tir command corrupt\n", DEVNAME(sc));
5046 goto free;
5047 }
5048
5049 out = mcx_cmdq_out(cqe);
5050 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5051 printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
5052 out->cmd_status, be32toh(out->cmd_syndrome));
5053 error = -1;
5054 goto free;
5055 }
5056
5057 *tirn = mcx_get_id(out->cmd_tirn);
5058 free:
5059 mcx_dmamem_free(sc, &mxm);
5060 return (error);
5061 }
5062
5063 static int
5064 mcx_create_tir_indirect(struct mcx_softc *sc, int rqtn, uint32_t hash_sel,
5065 int *tirn)
5066 {
5067 struct mcx_cmdq_entry *cqe;
5068 struct mcx_dmamem mxm;
5069 struct mcx_cmd_create_tir_in *in;
5070 struct mcx_cmd_create_tir_mb_in *mbin;
5071 struct mcx_cmd_create_tir_out *out;
5072 int error;
5073 int token;
5074
5075 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5076 token = mcx_cmdq_token(sc);
5077 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5078 sizeof(*out), token);
5079
5080 in = mcx_cmdq_in(cqe);
5081 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
5082 in->cmd_op_mod = htobe16(0);
5083
5084 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5085 &cqe->cq_input_ptr, token) != 0) {
5086 printf("%s: unable to allocate create tir mailbox\n",
5087 DEVNAME(sc));
5088 return (-1);
5089 }
5090 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5091 mbin->cmd_disp_type = htobe32(MCX_TIR_CTX_DISP_TYPE_INDIRECT
5092 << MCX_TIR_CTX_DISP_TYPE_SHIFT);
5093 mbin->cmd_indir_table = htobe32(rqtn);
5094 mbin->cmd_tdomain = htobe32(sc->sc_tdomain |
5095 MCX_TIR_CTX_HASH_TOEPLITZ << MCX_TIR_CTX_HASH_SHIFT);
5096 mbin->cmd_rx_hash_sel_outer = htobe32(hash_sel);
5097 stoeplitz_to_key(&mbin->cmd_rx_hash_key,
5098 sizeof(mbin->cmd_rx_hash_key));
5099
5100 mcx_cmdq_post(sc, cqe, 0);
5101 error = mcx_cmdq_poll(sc, cqe, 1000);
5102 if (error != 0) {
5103 printf("%s: create tir timeout\n", DEVNAME(sc));
5104 goto free;
5105 }
5106 if (mcx_cmdq_verify(cqe) != 0) {
5107 printf("%s: create tir command corrupt\n", DEVNAME(sc));
5108 goto free;
5109 }
5110
5111 out = mcx_cmdq_out(cqe);
5112 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5113 printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
5114 out->cmd_status, be32toh(out->cmd_syndrome));
5115 error = -1;
5116 goto free;
5117 }
5118
5119 *tirn = mcx_get_id(out->cmd_tirn);
5120 free:
5121 mcx_dmamem_free(sc, &mxm);
5122 return (error);
5123 }
5124
5125 static int
5126 mcx_destroy_tir(struct mcx_softc *sc, int tirn)
5127 {
5128 struct mcx_cmdq_entry *cqe;
5129 struct mcx_cmd_destroy_tir_in *in;
5130 struct mcx_cmd_destroy_tir_out *out;
5131 int error;
5132 int token;
5133
5134 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5135 token = mcx_cmdq_token(sc);
5136 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5137
5138 in = mcx_cmdq_in(cqe);
5139 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
5140 in->cmd_op_mod = htobe16(0);
5141 in->cmd_tirn = htobe32(tirn);
5142
5143 mcx_cmdq_post(sc, cqe, 0);
5144 error = mcx_cmdq_poll(sc, cqe, 1000);
5145 if (error != 0) {
5146 printf("%s: destroy tir timeout\n", DEVNAME(sc));
5147 return error;
5148 }
5149 if (mcx_cmdq_verify(cqe) != 0) {
5150 printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
5151 return error;
5152 }
5153
5154 out = mcx_cmdq_out(cqe);
5155 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5156 printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
5157 out->cmd_status, be32toh(out->cmd_syndrome));
5158 return -1;
5159 }
5160
5161 return (0);
5162 }
5163
5164 static int
5165 mcx_create_sq(struct mcx_softc *sc, struct mcx_tx *tx, int uar, int db,
5166 int cqn)
5167 {
5168 struct mcx_cmdq_entry *cqe;
5169 struct mcx_dmamem mxm;
5170 struct mcx_cmd_create_sq_in *in;
5171 struct mcx_sq_ctx *mbin;
5172 struct mcx_cmd_create_sq_out *out;
5173 int error;
5174 uint64_t *pas;
5175 int insize, npages, paslen, token;
5176
5177 tx->tx_doorbell = MCX_WQ_DOORBELL_BASE +
5178 (db * MCX_WQ_DOORBELL_STRIDE) + 4;
5179
5180 npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
5181 MCX_PAGE_SIZE);
5182 paslen = npages * sizeof(*pas);
5183 insize = sizeof(struct mcx_sq_ctx) + paslen;
5184
5185 if (mcx_dmamem_alloc(sc, &tx->tx_sq_mem, npages * MCX_PAGE_SIZE,
5186 MCX_PAGE_SIZE) != 0) {
5187 printf("%s: unable to allocate send queue memory\n",
5188 DEVNAME(sc));
5189 return (-1);
5190 }
5191
5192 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5193 token = mcx_cmdq_token(sc);
5194 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
5195 token);
5196
5197 in = mcx_cmdq_in(cqe);
5198 in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
5199 in->cmd_op_mod = htobe16(0);
5200
5201 if (mcx_cmdq_mboxes_alloc(sc, &mxm,
5202 howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
5203 &cqe->cq_input_ptr, token) != 0) {
5204 printf("%s: unable to allocate create sq mailboxen\n",
5205 DEVNAME(sc));
5206 goto free_sq;
5207 }
5208 mbin = (struct mcx_sq_ctx *)
5209 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
5210 mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
5211 (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
5212 mbin->sq_cqn = htobe32(cqn);
5213 mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
5214 mbin->sq_tis_num = htobe32(sc->sc_tis);
5215 mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
5216 mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
5217 mbin->sq_wq.wq_uar_page = htobe32(uar);
5218 mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
5219 tx->tx_doorbell);
5220 mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
5221 mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
5222
5223 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5224 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
5225
5226 /* physical addresses follow the mailbox in data */
5227 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10,
5228 npages, &tx->tx_sq_mem);
5229 mcx_cmdq_post(sc, cqe, 0);
5230
5231 error = mcx_cmdq_poll(sc, cqe, 1000);
5232 if (error != 0) {
5233 printf("%s: create sq timeout\n", DEVNAME(sc));
5234 goto free_mxm;
5235 }
5236 if (mcx_cmdq_verify(cqe) != 0) {
5237 printf("%s: create sq command corrupt\n", DEVNAME(sc));
5238 goto free_mxm;
5239 }
5240
5241 out = mcx_cmdq_out(cqe);
5242 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5243 printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
5244 out->cmd_status, be32toh(out->cmd_syndrome));
5245 goto free_mxm;
5246 }
5247
5248 tx->tx_uar = uar;
5249 tx->tx_sqn = mcx_get_id(out->cmd_sqn);
5250
5251 mcx_dmamem_free(sc, &mxm);
5252
5253 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5254 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
5255
5256 return (0);
5257
5258 free_mxm:
5259 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5260 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5261 mcx_dmamem_free(sc, &mxm);
5262 free_sq:
5263 mcx_dmamem_free(sc, &tx->tx_sq_mem);
5264 return (-1);
5265 }
5266
5267 static int
5268 mcx_destroy_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5269 {
5270 struct mcx_cmdq_entry *cqe;
5271 struct mcx_cmd_destroy_sq_in *in;
5272 struct mcx_cmd_destroy_sq_out *out;
5273 int error;
5274 int token;
5275
5276 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5277 token = mcx_cmdq_token(sc);
5278 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5279
5280 in = mcx_cmdq_in(cqe);
5281 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
5282 in->cmd_op_mod = htobe16(0);
5283 in->cmd_sqn = htobe32(tx->tx_sqn);
5284
5285 mcx_cmdq_post(sc, cqe, 0);
5286 error = mcx_cmdq_poll(sc, cqe, 1000);
5287 if (error != 0) {
5288 printf("%s: destroy sq timeout\n", DEVNAME(sc));
5289 return error;
5290 }
5291 if (mcx_cmdq_verify(cqe) != 0) {
5292 printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
5293 return error;
5294 }
5295
5296 out = mcx_cmdq_out(cqe);
5297 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5298 printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
5299 out->cmd_status, be32toh(out->cmd_syndrome));
5300 return -1;
5301 }
5302
5303 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5304 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
5305
5306 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5307 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5308 mcx_dmamem_free(sc, &tx->tx_sq_mem);
5309
5310 tx->tx_sqn = 0;
5311 return 0;
5312 }
5313
5314 static int
5315 mcx_ready_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5316 {
5317 struct mcx_cmdq_entry *cqe;
5318 struct mcx_dmamem mxm;
5319 struct mcx_cmd_modify_sq_in *in;
5320 struct mcx_cmd_modify_sq_mb_in *mbin;
5321 struct mcx_cmd_modify_sq_out *out;
5322 int error;
5323 int token;
5324
5325 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5326 token = mcx_cmdq_token(sc);
5327 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5328 sizeof(*out), token);
5329
5330 in = mcx_cmdq_in(cqe);
5331 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
5332 in->cmd_op_mod = htobe16(0);
5333 in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | tx->tx_sqn);
5334
5335 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5336 &cqe->cq_input_ptr, token) != 0) {
5337 printf("%s: unable to allocate modify sq mailbox\n",
5338 DEVNAME(sc));
5339 return (-1);
5340 }
5341 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5342 mbin->cmd_sq_ctx.sq_flags = htobe32(
5343 MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
5344
5345 mcx_cmdq_mboxes_sign(&mxm, 1);
5346 mcx_cmdq_post(sc, cqe, 0);
5347 error = mcx_cmdq_poll(sc, cqe, 1000);
5348 if (error != 0) {
5349 printf("%s: modify sq timeout\n", DEVNAME(sc));
5350 goto free;
5351 }
5352 if (mcx_cmdq_verify(cqe) != 0) {
5353 printf("%s: modify sq command corrupt\n", DEVNAME(sc));
5354 goto free;
5355 }
5356
5357 out = mcx_cmdq_out(cqe);
5358 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5359 printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
5360 out->cmd_status, be32toh(out->cmd_syndrome));
5361 error = -1;
5362 goto free;
5363 }
5364
5365 free:
5366 mcx_dmamem_free(sc, &mxm);
5367 return (error);
5368 }
5369
5370 static int
5371 mcx_create_tis(struct mcx_softc *sc, int *tis)
5372 {
5373 struct mcx_cmdq_entry *cqe;
5374 struct mcx_dmamem mxm;
5375 struct mcx_cmd_create_tis_in *in;
5376 struct mcx_cmd_create_tis_mb_in *mbin;
5377 struct mcx_cmd_create_tis_out *out;
5378 int error;
5379 int token;
5380
5381 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5382 token = mcx_cmdq_token(sc);
5383 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5384 sizeof(*out), token);
5385
5386 in = mcx_cmdq_in(cqe);
5387 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
5388 in->cmd_op_mod = htobe16(0);
5389
5390 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5391 &cqe->cq_input_ptr, token) != 0) {
5392 printf("%s: unable to allocate create tis mailbox\n",
5393 DEVNAME(sc));
5394 return (-1);
5395 }
5396 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5397 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
5398
5399 mcx_cmdq_mboxes_sign(&mxm, 1);
5400 mcx_cmdq_post(sc, cqe, 0);
5401 error = mcx_cmdq_poll(sc, cqe, 1000);
5402 if (error != 0) {
5403 printf("%s: create tis timeout\n", DEVNAME(sc));
5404 goto free;
5405 }
5406 if (mcx_cmdq_verify(cqe) != 0) {
5407 printf("%s: create tis command corrupt\n", DEVNAME(sc));
5408 goto free;
5409 }
5410
5411 out = mcx_cmdq_out(cqe);
5412 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5413 printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
5414 out->cmd_status, be32toh(out->cmd_syndrome));
5415 error = -1;
5416 goto free;
5417 }
5418
5419 *tis = mcx_get_id(out->cmd_tisn);
5420 free:
5421 mcx_dmamem_free(sc, &mxm);
5422 return (error);
5423 }
5424
5425 static int
5426 mcx_destroy_tis(struct mcx_softc *sc, int tis)
5427 {
5428 struct mcx_cmdq_entry *cqe;
5429 struct mcx_cmd_destroy_tis_in *in;
5430 struct mcx_cmd_destroy_tis_out *out;
5431 int error;
5432 int token;
5433
5434 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5435 token = mcx_cmdq_token(sc);
5436 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5437
5438 in = mcx_cmdq_in(cqe);
5439 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
5440 in->cmd_op_mod = htobe16(0);
5441 in->cmd_tisn = htobe32(tis);
5442
5443 mcx_cmdq_post(sc, cqe, 0);
5444 error = mcx_cmdq_poll(sc, cqe, 1000);
5445 if (error != 0) {
5446 printf("%s: destroy tis timeout\n", DEVNAME(sc));
5447 return error;
5448 }
5449 if (mcx_cmdq_verify(cqe) != 0) {
5450 printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
5451 return error;
5452 }
5453
5454 out = mcx_cmdq_out(cqe);
5455 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5456 printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
5457 out->cmd_status, be32toh(out->cmd_syndrome));
5458 return -1;
5459 }
5460
5461 return 0;
5462 }
5463
5464 static int
5465 mcx_create_rqt(struct mcx_softc *sc, int size, int *rqns, int *rqt)
5466 {
5467 struct mcx_cmdq_entry *cqe;
5468 struct mcx_dmamem mxm;
5469 struct mcx_cmd_create_rqt_in *in;
5470 struct mcx_cmd_create_rqt_mb_in *mbin;
5471 struct mcx_cmd_create_rqt_out *out;
5472 struct mcx_rqt_ctx *rqt_ctx;
5473 int *rqtn;
5474 int error;
5475 int token;
5476 int i;
5477
5478 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5479 token = mcx_cmdq_token(sc);
5480 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) +
5481 (size * sizeof(int)), sizeof(*out), token);
5482
5483 in = mcx_cmdq_in(cqe);
5484 in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQT);
5485 in->cmd_op_mod = htobe16(0);
5486
5487 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5488 &cqe->cq_input_ptr, token) != 0) {
5489 printf("%s: unable to allocate create rqt mailbox\n",
5490 DEVNAME(sc));
5491 return (-1);
5492 }
5493 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5494 rqt_ctx = &mbin->cmd_rqt;
5495 rqt_ctx->cmd_rqt_max_size = htobe16(sc->sc_max_rqt_size);
5496 rqt_ctx->cmd_rqt_actual_size = htobe16(size);
5497
5498 /* rqt list follows the rqt context */
5499 rqtn = (int *)(rqt_ctx + 1);
5500 for (i = 0; i < size; i++) {
5501 rqtn[i] = htobe32(rqns[i]);
5502 }
5503
5504 mcx_cmdq_mboxes_sign(&mxm, 1);
5505 mcx_cmdq_post(sc, cqe, 0);
5506 error = mcx_cmdq_poll(sc, cqe, 1000);
5507 if (error != 0) {
5508 printf("%s: create rqt timeout\n", DEVNAME(sc));
5509 goto free;
5510 }
5511 if (mcx_cmdq_verify(cqe) != 0) {
5512 printf("%s: create rqt command corrupt\n", DEVNAME(sc));
5513 goto free;
5514 }
5515
5516 out = mcx_cmdq_out(cqe);
5517 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5518 printf("%s: create rqt failed (%x, %x)\n", DEVNAME(sc),
5519 out->cmd_status, be32toh(out->cmd_syndrome));
5520 error = -1;
5521 goto free;
5522 }
5523
5524 *rqt = mcx_get_id(out->cmd_rqtn);
5525 return (0);
5526 free:
5527 mcx_dmamem_free(sc, &mxm);
5528 return (error);
5529 }
5530
5531 static int
5532 mcx_destroy_rqt(struct mcx_softc *sc, int rqt)
5533 {
5534 struct mcx_cmdq_entry *cqe;
5535 struct mcx_cmd_destroy_rqt_in *in;
5536 struct mcx_cmd_destroy_rqt_out *out;
5537 int error;
5538 int token;
5539
5540 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5541 token = mcx_cmdq_token(sc);
5542 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5543
5544 in = mcx_cmdq_in(cqe);
5545 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQT);
5546 in->cmd_op_mod = htobe16(0);
5547 in->cmd_rqtn = htobe32(rqt);
5548
5549 mcx_cmdq_post(sc, cqe, 0);
5550 error = mcx_cmdq_poll(sc, cqe, 1000);
5551 if (error != 0) {
5552 printf("%s: destroy rqt timeout\n", DEVNAME(sc));
5553 return error;
5554 }
5555 if (mcx_cmdq_verify(cqe) != 0) {
5556 printf("%s: destroy rqt command corrupt\n", DEVNAME(sc));
5557 return error;
5558 }
5559
5560 out = mcx_cmdq_out(cqe);
5561 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5562 printf("%s: destroy rqt failed (%x, %x)\n", DEVNAME(sc),
5563 out->cmd_status, be32toh(out->cmd_syndrome));
5564 return -1;
5565 }
5566
5567 return 0;
5568 }
5569
5570 #if 0
5571 static int
5572 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
5573 {
5574 struct mcx_cmdq_entry *cqe;
5575 struct mcx_cmd_alloc_flow_counter_in *in;
5576 struct mcx_cmd_alloc_flow_counter_out *out;
5577 int error;
5578
5579 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5580 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
5581
5582 in = mcx_cmdq_in(cqe);
5583 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
5584 in->cmd_op_mod = htobe16(0);
5585
5586 mcx_cmdq_post(sc, cqe, 0);
5587
5588 error = mcx_cmdq_poll(sc, cqe, 1000);
5589 if (error != 0) {
5590 printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
5591 return (-1);
5592 }
5593 if (mcx_cmdq_verify(cqe) != 0) {
5594 printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
5595 return (-1);
5596 }
5597
5598 out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
5599 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5600 printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
5601 out->cmd_status);
5602 return (-1);
5603 }
5604
5605 sc->sc_flow_counter_id[i] = be16toh(out->cmd_flow_counter_id);
5606 printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
5607
5608 return (0);
5609 }
5610 #endif
5611
5612 static int
5613 mcx_create_flow_table(struct mcx_softc *sc, int log_size, int level,
5614 int *flow_table_id)
5615 {
5616 struct mcx_cmdq_entry *cqe;
5617 struct mcx_dmamem mxm;
5618 struct mcx_cmd_create_flow_table_in *in;
5619 struct mcx_cmd_create_flow_table_mb_in *mbin;
5620 struct mcx_cmd_create_flow_table_out *out;
5621 int error;
5622 int token;
5623
5624 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5625 token = mcx_cmdq_token(sc);
5626 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5627 sizeof(*out), token);
5628
5629 in = mcx_cmdq_in(cqe);
5630 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
5631 in->cmd_op_mod = htobe16(0);
5632
5633 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5634 &cqe->cq_input_ptr, token) != 0) {
5635 printf("%s: unable to allocate create flow table mailbox\n",
5636 DEVNAME(sc));
5637 return (-1);
5638 }
5639 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5640 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5641 mbin->cmd_ctx.ft_log_size = log_size;
5642 mbin->cmd_ctx.ft_level = level;
5643
5644 mcx_cmdq_mboxes_sign(&mxm, 1);
5645 mcx_cmdq_post(sc, cqe, 0);
5646 error = mcx_cmdq_poll(sc, cqe, 1000);
5647 if (error != 0) {
5648 printf("%s: create flow table timeout\n", DEVNAME(sc));
5649 goto free;
5650 }
5651 if (mcx_cmdq_verify(cqe) != 0) {
5652 printf("%s: create flow table command corrupt\n", DEVNAME(sc));
5653 goto free;
5654 }
5655
5656 out = mcx_cmdq_out(cqe);
5657 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5658 printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
5659 out->cmd_status, be32toh(out->cmd_syndrome));
5660 error = -1;
5661 goto free;
5662 }
5663
5664 *flow_table_id = mcx_get_id(out->cmd_table_id);
5665 free:
5666 mcx_dmamem_free(sc, &mxm);
5667 return (error);
5668 }
5669
5670 static int
5671 mcx_set_flow_table_root(struct mcx_softc *sc, int flow_table_id)
5672 {
5673 struct mcx_cmdq_entry *cqe;
5674 struct mcx_dmamem mxm;
5675 struct mcx_cmd_set_flow_table_root_in *in;
5676 struct mcx_cmd_set_flow_table_root_mb_in *mbin;
5677 struct mcx_cmd_set_flow_table_root_out *out;
5678 int error;
5679 int token;
5680
5681 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5682 token = mcx_cmdq_token(sc);
5683 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5684 sizeof(*out), token);
5685
5686 in = mcx_cmdq_in(cqe);
5687 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
5688 in->cmd_op_mod = htobe16(0);
5689
5690 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5691 &cqe->cq_input_ptr, token) != 0) {
5692 printf("%s: unable to allocate set flow table root mailbox\n",
5693 DEVNAME(sc));
5694 return (-1);
5695 }
5696 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5697 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5698 mbin->cmd_table_id = htobe32(flow_table_id);
5699
5700 mcx_cmdq_mboxes_sign(&mxm, 1);
5701 mcx_cmdq_post(sc, cqe, 0);
5702 error = mcx_cmdq_poll(sc, cqe, 1000);
5703 if (error != 0) {
5704 printf("%s: set flow table root timeout\n", DEVNAME(sc));
5705 goto free;
5706 }
5707 if (mcx_cmdq_verify(cqe) != 0) {
5708 printf("%s: set flow table root command corrupt\n",
5709 DEVNAME(sc));
5710 goto free;
5711 }
5712
5713 out = mcx_cmdq_out(cqe);
5714 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5715 printf("%s: set flow table root failed (%x, %x)\n",
5716 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5717 error = -1;
5718 goto free;
5719 }
5720
5721 free:
5722 mcx_dmamem_free(sc, &mxm);
5723 return (error);
5724 }
5725
5726 static int
5727 mcx_destroy_flow_table(struct mcx_softc *sc, int flow_table_id)
5728 {
5729 struct mcx_cmdq_entry *cqe;
5730 struct mcx_dmamem mxm;
5731 struct mcx_cmd_destroy_flow_table_in *in;
5732 struct mcx_cmd_destroy_flow_table_mb_in *mb;
5733 struct mcx_cmd_destroy_flow_table_out *out;
5734 int error;
5735 int token;
5736
5737 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5738 token = mcx_cmdq_token(sc);
5739 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5740
5741 in = mcx_cmdq_in(cqe);
5742 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
5743 in->cmd_op_mod = htobe16(0);
5744
5745 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5746 &cqe->cq_input_ptr, token) != 0) {
5747 printf("%s: unable to allocate destroy flow table mailbox\n",
5748 DEVNAME(sc));
5749 return (-1);
5750 }
5751 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5752 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5753 mb->cmd_table_id = htobe32(flow_table_id);
5754
5755 mcx_cmdq_mboxes_sign(&mxm, 1);
5756 mcx_cmdq_post(sc, cqe, 0);
5757 error = mcx_cmdq_poll(sc, cqe, 1000);
5758 if (error != 0) {
5759 printf("%s: destroy flow table timeout\n", DEVNAME(sc));
5760 goto free;
5761 }
5762 if (mcx_cmdq_verify(cqe) != 0) {
5763 printf("%s: destroy flow table command corrupt\n",
5764 DEVNAME(sc));
5765 goto free;
5766 }
5767
5768 out = mcx_cmdq_out(cqe);
5769 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5770 printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
5771 out->cmd_status, be32toh(out->cmd_syndrome));
5772 error = -1;
5773 goto free;
5774 }
5775
5776 free:
5777 mcx_dmamem_free(sc, &mxm);
5778 return (error);
5779 }
5780
5781
5782 static int
5783 mcx_create_flow_group(struct mcx_softc *sc, int flow_table_id, int group,
5784 int start, int size, int match_enable, struct mcx_flow_match *match)
5785 {
5786 struct mcx_cmdq_entry *cqe;
5787 struct mcx_dmamem mxm;
5788 struct mcx_cmd_create_flow_group_in *in;
5789 struct mcx_cmd_create_flow_group_mb_in *mbin;
5790 struct mcx_cmd_create_flow_group_out *out;
5791 struct mcx_flow_group *mfg;
5792 int error;
5793 int token;
5794
5795 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5796 token = mcx_cmdq_token(sc);
5797 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5798 token);
5799
5800 in = mcx_cmdq_in(cqe);
5801 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
5802 in->cmd_op_mod = htobe16(0);
5803
5804 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5805 != 0) {
5806 printf("%s: unable to allocate create flow group mailbox\n",
5807 DEVNAME(sc));
5808 return (-1);
5809 }
5810 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5811 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5812 mbin->cmd_table_id = htobe32(flow_table_id);
5813 mbin->cmd_start_flow_index = htobe32(start);
5814 mbin->cmd_end_flow_index = htobe32(start + (size - 1));
5815
5816 mbin->cmd_match_criteria_enable = match_enable;
5817 memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
5818
5819 mcx_cmdq_mboxes_sign(&mxm, 2);
5820 mcx_cmdq_post(sc, cqe, 0);
5821 error = mcx_cmdq_poll(sc, cqe, 1000);
5822 if (error != 0) {
5823 printf("%s: create flow group timeout\n", DEVNAME(sc));
5824 goto free;
5825 }
5826 if (mcx_cmdq_verify(cqe) != 0) {
5827 printf("%s: create flow group command corrupt\n", DEVNAME(sc));
5828 goto free;
5829 }
5830
5831 out = mcx_cmdq_out(cqe);
5832 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5833 printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
5834 out->cmd_status, be32toh(out->cmd_syndrome));
5835 error = -1;
5836 goto free;
5837 }
5838
5839 mfg = &sc->sc_flow_group[group];
5840 mfg->g_id = mcx_get_id(out->cmd_group_id);
5841 mfg->g_table = flow_table_id;
5842 mfg->g_start = start;
5843 mfg->g_size = size;
5844
5845 free:
5846 mcx_dmamem_free(sc, &mxm);
5847 return (error);
5848 }
5849
5850 static int
5851 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
5852 {
5853 struct mcx_cmdq_entry *cqe;
5854 struct mcx_dmamem mxm;
5855 struct mcx_cmd_destroy_flow_group_in *in;
5856 struct mcx_cmd_destroy_flow_group_mb_in *mb;
5857 struct mcx_cmd_destroy_flow_group_out *out;
5858 struct mcx_flow_group *mfg;
5859 int error;
5860 int token;
5861
5862 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5863 token = mcx_cmdq_token(sc);
5864 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5865
5866 in = mcx_cmdq_in(cqe);
5867 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
5868 in->cmd_op_mod = htobe16(0);
5869
5870 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5871 &cqe->cq_input_ptr, token) != 0) {
5872 printf("%s: unable to allocate destroy flow group mailbox\n",
5873 DEVNAME(sc));
5874 return (-1);
5875 }
5876 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5877 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5878 mfg = &sc->sc_flow_group[group];
5879 mb->cmd_table_id = htobe32(mfg->g_table);
5880 mb->cmd_group_id = htobe32(mfg->g_id);
5881
5882 mcx_cmdq_mboxes_sign(&mxm, 2);
5883 mcx_cmdq_post(sc, cqe, 0);
5884 error = mcx_cmdq_poll(sc, cqe, 1000);
5885 if (error != 0) {
5886 printf("%s: destroy flow group timeout\n", DEVNAME(sc));
5887 goto free;
5888 }
5889 if (mcx_cmdq_verify(cqe) != 0) {
5890 printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
5891 goto free;
5892 }
5893
5894 out = mcx_cmdq_out(cqe);
5895 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5896 printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
5897 out->cmd_status, be32toh(out->cmd_syndrome));
5898 error = -1;
5899 goto free;
5900 }
5901
5902 mfg->g_id = -1;
5903 mfg->g_table = -1;
5904 mfg->g_size = 0;
5905 mfg->g_start = 0;
5906 free:
5907 mcx_dmamem_free(sc, &mxm);
5908 return (error);
5909 }
5910
5911 static int
5912 mcx_set_flow_table_entry_mac(struct mcx_softc *sc, int group, int index,
5913 const uint8_t *macaddr, uint32_t dest)
5914 {
5915 struct mcx_cmdq_entry *cqe;
5916 struct mcx_dmamem mxm;
5917 struct mcx_cmd_set_flow_table_entry_in *in;
5918 struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5919 struct mcx_cmd_set_flow_table_entry_out *out;
5920 struct mcx_flow_group *mfg;
5921 uint32_t *pdest;
5922 int error;
5923 int token;
5924
5925 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5926 token = mcx_cmdq_token(sc);
5927 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
5928 sizeof(*out), token);
5929
5930 in = mcx_cmdq_in(cqe);
5931 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
5932 in->cmd_op_mod = htobe16(0);
5933
5934 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5935 != 0) {
5936 printf("%s: unable to allocate set flow table entry mailbox\n",
5937 DEVNAME(sc));
5938 return (-1);
5939 }
5940
5941 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5942 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5943
5944 mfg = &sc->sc_flow_group[group];
5945 mbin->cmd_table_id = htobe32(mfg->g_table);
5946 mbin->cmd_flow_index = htobe32(mfg->g_start + index);
5947 mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
5948
5949 /* flow context ends at offset 0x330, 0x130 into the second mbox */
5950 pdest = (uint32_t *)
5951 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
5952 mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5953 mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5954 *pdest = htobe32(dest);
5955
5956 /* the only thing we match on at the moment is the dest mac address */
5957 if (macaddr != NULL) {
5958 memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
5959 ETHER_ADDR_LEN);
5960 }
5961
5962 mcx_cmdq_mboxes_sign(&mxm, 2);
5963 mcx_cmdq_post(sc, cqe, 0);
5964 error = mcx_cmdq_poll(sc, cqe, 1000);
5965 if (error != 0) {
5966 printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5967 goto free;
5968 }
5969 if (mcx_cmdq_verify(cqe) != 0) {
5970 printf("%s: set flow table entry command corrupt\n",
5971 DEVNAME(sc));
5972 goto free;
5973 }
5974
5975 out = mcx_cmdq_out(cqe);
5976 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5977 printf("%s: set flow table entry failed (%x, %x)\n",
5978 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5979 error = -1;
5980 goto free;
5981 }
5982
5983 free:
5984 mcx_dmamem_free(sc, &mxm);
5985 return (error);
5986 }
5987
5988 static int
5989 mcx_set_flow_table_entry_proto(struct mcx_softc *sc, int group, int index,
5990 int ethertype, int ip_proto, uint32_t dest)
5991 {
5992 struct mcx_cmdq_entry *cqe;
5993 struct mcx_dmamem mxm;
5994 struct mcx_cmd_set_flow_table_entry_in *in;
5995 struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5996 struct mcx_cmd_set_flow_table_entry_out *out;
5997 struct mcx_flow_group *mfg;
5998 uint32_t *pdest;
5999 int error;
6000 int token;
6001
6002 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6003 token = mcx_cmdq_token(sc);
6004 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
6005 sizeof(*out), token);
6006
6007 in = mcx_cmdq_in(cqe);
6008 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
6009 in->cmd_op_mod = htobe16(0);
6010
6011 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
6012 != 0) {
6013 printf("%s: unable to allocate set flow table entry mailbox\n",
6014 DEVNAME(sc));
6015 return (-1);
6016 }
6017
6018 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6019 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
6020
6021 mfg = &sc->sc_flow_group[group];
6022 mbin->cmd_table_id = htobe32(mfg->g_table);
6023 mbin->cmd_flow_index = htobe32(mfg->g_start + index);
6024 mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
6025
6026 /* flow context ends at offset 0x330, 0x130 into the second mbox */
6027 pdest = (uint32_t *)
6028 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
6029 mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
6030 mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
6031 *pdest = htobe32(dest);
6032
6033 mbin->cmd_flow_ctx.fc_match_value.mc_ethertype = htobe16(ethertype);
6034 mbin->cmd_flow_ctx.fc_match_value.mc_ip_proto = ip_proto;
6035
6036 mcx_cmdq_mboxes_sign(&mxm, 2);
6037 mcx_cmdq_post(sc, cqe, 0);
6038 error = mcx_cmdq_poll(sc, cqe, 1000);
6039 if (error != 0) {
6040 printf("%s: set flow table entry timeout\n", DEVNAME(sc));
6041 goto free;
6042 }
6043 if (mcx_cmdq_verify(cqe) != 0) {
6044 printf("%s: set flow table entry command corrupt\n",
6045 DEVNAME(sc));
6046 goto free;
6047 }
6048
6049 out = mcx_cmdq_out(cqe);
6050 if (out->cmd_status != MCX_CQ_STATUS_OK) {
6051 printf("%s: set flow table entry failed (%x, %x)\n",
6052 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
6053 error = -1;
6054 goto free;
6055 }
6056
6057 free:
6058 mcx_dmamem_free(sc, &mxm);
6059 return (error);
6060 }
6061
6062 static int
6063 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
6064 {
6065 struct mcx_cmdq_entry *cqe;
6066 struct mcx_dmamem mxm;
6067 struct mcx_cmd_delete_flow_table_entry_in *in;
6068 struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
6069 struct mcx_cmd_delete_flow_table_entry_out *out;
6070 struct mcx_flow_group *mfg;
6071 int error;
6072 int token;
6073
6074 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6075 token = mcx_cmdq_token(sc);
6076 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
6077 token);
6078
6079 in = mcx_cmdq_in(cqe);
6080 in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
6081 in->cmd_op_mod = htobe16(0);
6082
6083 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6084 &cqe->cq_input_ptr, token) != 0) {
6085 printf("%s: unable to allocate "
6086 "delete flow table entry mailbox\n", DEVNAME(sc));
6087 return (-1);
6088 }
6089 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6090 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
6091
6092 mfg = &sc->sc_flow_group[group];
6093 mbin->cmd_table_id = htobe32(mfg->g_table);
6094 mbin->cmd_flow_index = htobe32(mfg->g_start + index);
6095
6096 mcx_cmdq_mboxes_sign(&mxm, 2);
6097 mcx_cmdq_post(sc, cqe, 0);
6098 error = mcx_cmdq_poll(sc, cqe, 1000);
6099 if (error != 0) {
6100 printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
6101 goto free;
6102 }
6103 if (mcx_cmdq_verify(cqe) != 0) {
6104 printf("%s: delete flow table entry command corrupt\n",
6105 DEVNAME(sc));
6106 goto free;
6107 }
6108
6109 out = mcx_cmdq_out(cqe);
6110 if (out->cmd_status != MCX_CQ_STATUS_OK) {
6111 printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
6112 DEVNAME(sc), group, index, out->cmd_status,
6113 be32toh(out->cmd_syndrome));
6114 error = -1;
6115 goto free;
6116 }
6117
6118 free:
6119 mcx_dmamem_free(sc, &mxm);
6120 return (error);
6121 }
6122
6123 #if 0
6124 int
6125 mcx_dump_flow_table(struct mcx_softc *sc, int flow_table_id)
6126 {
6127 struct mcx_dmamem mxm;
6128 struct mcx_cmdq_entry *cqe;
6129 struct mcx_cmd_query_flow_table_in *in;
6130 struct mcx_cmd_query_flow_table_mb_in *mbin;
6131 struct mcx_cmd_query_flow_table_out *out;
6132 struct mcx_cmd_query_flow_table_mb_out *mbout;
6133 uint8_t token = mcx_cmdq_token(sc);
6134 int error;
6135 int i;
6136 uint8_t *dump;
6137
6138 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6139 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6140 sizeof(*out) + sizeof(*mbout) + 16, token);
6141
6142 in = mcx_cmdq_in(cqe);
6143 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
6144 in->cmd_op_mod = htobe16(0);
6145
6146 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6147 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
6148 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6149 &cqe->cq_output_ptr, token) != 0) {
6150 printf(", unable to allocate query flow table mailboxes\n");
6151 return (-1);
6152 }
6153 cqe->cq_input_ptr = cqe->cq_output_ptr;
6154
6155 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6156 mbin->cmd_table_type = 0;
6157 mbin->cmd_table_id = htobe32(flow_table_id);
6158
6159 mcx_cmdq_mboxes_sign(&mxm, 1);
6160
6161 mcx_cmdq_post(sc, cqe, 0);
6162 error = mcx_cmdq_poll(sc, cqe, 1000);
6163 if (error != 0) {
6164 printf("%s: query flow table timeout\n", DEVNAME(sc));
6165 goto free;
6166 }
6167 error = mcx_cmdq_verify(cqe);
6168 if (error != 0) {
6169 printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
6170 goto free;
6171 }
6172
6173 out = mcx_cmdq_out(cqe);
6174 switch (out->cmd_status) {
6175 case MCX_CQ_STATUS_OK:
6176 break;
6177 default:
6178 printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
6179 out->cmd_status, be32toh(out->cmd_syndrome));
6180 error = -1;
6181 goto free;
6182 }
6183
6184 mbout = (struct mcx_cmd_query_flow_table_mb_out *)
6185 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6186 dump = (uint8_t *)mbout + 8;
6187 for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
6188 printf("%.2x ", dump[i]);
6189 if (i % 16 == 15)
6190 printf("\n");
6191 }
6192 free:
6193 mcx_cq_mboxes_free(sc, &mxm);
6194 return (error);
6195 }
6196 int
6197 mcx_dump_flow_table_entry(struct mcx_softc *sc, int flow_table_id, int index)
6198 {
6199 struct mcx_dmamem mxm;
6200 struct mcx_cmdq_entry *cqe;
6201 struct mcx_cmd_query_flow_table_entry_in *in;
6202 struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
6203 struct mcx_cmd_query_flow_table_entry_out *out;
6204 struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
6205 uint8_t token = mcx_cmdq_token(sc);
6206 int error;
6207 int i;
6208 uint8_t *dump;
6209
6210 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6211 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6212 sizeof(*out) + sizeof(*mbout) + 16, token);
6213
6214 in = mcx_cmdq_in(cqe);
6215 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
6216 in->cmd_op_mod = htobe16(0);
6217
6218 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6219 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6220 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6221 &cqe->cq_output_ptr, token) != 0) {
6222 printf(", unable to allocate "
6223 "query flow table entry mailboxes\n");
6224 return (-1);
6225 }
6226 cqe->cq_input_ptr = cqe->cq_output_ptr;
6227
6228 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6229 mbin->cmd_table_type = 0;
6230 mbin->cmd_table_id = htobe32(flow_table_id);
6231 mbin->cmd_flow_index = htobe32(index);
6232
6233 mcx_cmdq_mboxes_sign(&mxm, 1);
6234
6235 mcx_cmdq_post(sc, cqe, 0);
6236 error = mcx_cmdq_poll(sc, cqe, 1000);
6237 if (error != 0) {
6238 printf("%s: query flow table entry timeout\n", DEVNAME(sc));
6239 goto free;
6240 }
6241 error = mcx_cmdq_verify(cqe);
6242 if (error != 0) {
6243 printf("%s: query flow table entry reply corrupt\n",
6244 DEVNAME(sc));
6245 goto free;
6246 }
6247
6248 out = mcx_cmdq_out(cqe);
6249 switch (out->cmd_status) {
6250 case MCX_CQ_STATUS_OK:
6251 break;
6252 default:
6253 printf("%s: query flow table entry failed (%x/%x)\n",
6254 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
6255 error = -1;
6256 goto free;
6257 }
6258
6259 mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
6260 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6261 dump = (uint8_t *)mbout;
6262 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6263 printf("%.2x ", dump[i]);
6264 if (i % 16 == 15)
6265 printf("\n");
6266 }
6267
6268 free:
6269 mcx_cq_mboxes_free(sc, &mxm);
6270 return (error);
6271 }
6272
6273 int
6274 mcx_dump_flow_group(struct mcx_softc *sc, int flow_table_id)
6275 {
6276 struct mcx_dmamem mxm;
6277 struct mcx_cmdq_entry *cqe;
6278 struct mcx_cmd_query_flow_group_in *in;
6279 struct mcx_cmd_query_flow_group_mb_in *mbin;
6280 struct mcx_cmd_query_flow_group_out *out;
6281 struct mcx_cmd_query_flow_group_mb_out *mbout;
6282 uint8_t token = mcx_cmdq_token(sc);
6283 int error;
6284 int i;
6285 uint8_t *dump;
6286
6287 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6288 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6289 sizeof(*out) + sizeof(*mbout) + 16, token);
6290
6291 in = mcx_cmdq_in(cqe);
6292 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
6293 in->cmd_op_mod = htobe16(0);
6294
6295 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6296 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6297 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6298 &cqe->cq_output_ptr, token) != 0) {
6299 printf(", unable to allocate query flow group mailboxes\n");
6300 return (-1);
6301 }
6302 cqe->cq_input_ptr = cqe->cq_output_ptr;
6303
6304 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6305 mbin->cmd_table_type = 0;
6306 mbin->cmd_table_id = htobe32(flow_table_id);
6307 mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
6308
6309 mcx_cmdq_mboxes_sign(&mxm, 1);
6310
6311 mcx_cmdq_post(sc, cqe, 0);
6312 error = mcx_cmdq_poll(sc, cqe, 1000);
6313 if (error != 0) {
6314 printf("%s: query flow group timeout\n", DEVNAME(sc));
6315 goto free;
6316 }
6317 error = mcx_cmdq_verify(cqe);
6318 if (error != 0) {
6319 printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
6320 goto free;
6321 }
6322
6323 out = mcx_cmdq_out(cqe);
6324 switch (out->cmd_status) {
6325 case MCX_CQ_STATUS_OK:
6326 break;
6327 default:
6328 printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
6329 out->cmd_status, be32toh(out->cmd_syndrome));
6330 error = -1;
6331 goto free;
6332 }
6333
6334 mbout = (struct mcx_cmd_query_flow_group_mb_out *)
6335 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6336 dump = (uint8_t *)mbout;
6337 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6338 printf("%.2x ", dump[i]);
6339 if (i % 16 == 15)
6340 printf("\n");
6341 }
6342 dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
6343 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6344 printf("%.2x ", dump[i]);
6345 if (i % 16 == 15)
6346 printf("\n");
6347 }
6348
6349 free:
6350 mcx_cq_mboxes_free(sc, &mxm);
6351 return (error);
6352 }
6353
6354 static int
6355 mcx_dump_counters(struct mcx_softc *sc)
6356 {
6357 struct mcx_dmamem mxm;
6358 struct mcx_cmdq_entry *cqe;
6359 struct mcx_cmd_query_vport_counters_in *in;
6360 struct mcx_cmd_query_vport_counters_mb_in *mbin;
6361 struct mcx_cmd_query_vport_counters_out *out;
6362 struct mcx_nic_vport_counters *counters;
6363 int error, token;
6364
6365 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6366 token = mcx_cmdq_token(sc);
6367 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6368 sizeof(*out) + sizeof(*counters), token);
6369
6370 in = mcx_cmdq_in(cqe);
6371 in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
6372 in->cmd_op_mod = htobe16(0);
6373
6374 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6375 &cqe->cq_output_ptr, token) != 0) {
6376 printf(", unable to allocate "
6377 "query nic vport counters mailboxen\n");
6378 return (-1);
6379 }
6380 cqe->cq_input_ptr = cqe->cq_output_ptr;
6381
6382 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6383 mbin->cmd_clear = 0x80;
6384
6385 mcx_cmdq_mboxes_sign(&mxm, 1);
6386 mcx_cmdq_post(sc, cqe, 0);
6387
6388 error = mcx_cmdq_poll(sc, cqe, 1000);
6389 if (error != 0) {
6390 printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
6391 goto free;
6392 }
6393 if (mcx_cmdq_verify(cqe) != 0) {
6394 printf("%s: query nic vport counters command corrupt\n",
6395 DEVNAME(sc));
6396 goto free;
6397 }
6398
6399 out = mcx_cmdq_out(cqe);
6400 if (out->cmd_status != MCX_CQ_STATUS_OK) {
6401 printf("%s: query nic vport counters failed (%x, %x)\n",
6402 DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
6403 error = -1;
6404 goto free;
6405 }
6406
6407 counters = (struct mcx_nic_vport_counters *)
6408 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6409 if (counters->rx_bcast.packets + counters->tx_bcast.packets +
6410 counters->rx_ucast.packets + counters->tx_ucast.packets +
6411 counters->rx_err.packets + counters->tx_err.packets)
6412 printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
6413 DEVNAME(sc),
6414 betoh64(counters->tx_err.packets),
6415 betoh64(counters->rx_err.packets),
6416 betoh64(counters->tx_ucast.packets),
6417 betoh64(counters->rx_ucast.packets),
6418 betoh64(counters->tx_bcast.packets),
6419 betoh64(counters->rx_bcast.packets));
6420 free:
6421 mcx_dmamem_free(sc, &mxm);
6422
6423 return (error);
6424 }
6425
6426 static int
6427 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
6428 {
6429 struct mcx_dmamem mxm;
6430 struct mcx_cmdq_entry *cqe;
6431 struct mcx_cmd_query_flow_counter_in *in;
6432 struct mcx_cmd_query_flow_counter_mb_in *mbin;
6433 struct mcx_cmd_query_flow_counter_out *out;
6434 struct mcx_counter *counters;
6435 int error, token;
6436
6437 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6438 token = mcx_cmdq_token(sc);
6439 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
6440 sizeof(*counters), token);
6441
6442 in = mcx_cmdq_in(cqe);
6443 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
6444 in->cmd_op_mod = htobe16(0);
6445
6446 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6447 &cqe->cq_output_ptr, token) != 0) {
6448 printf(", unable to allocate query flow counter mailboxen\n");
6449 return (-1);
6450 }
6451 cqe->cq_input_ptr = cqe->cq_output_ptr;
6452 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6453 mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
6454 mbin->cmd_clear = 0x80;
6455
6456 mcx_cmdq_mboxes_sign(&mxm, 1);
6457 mcx_cmdq_post(sc, cqe, 0);
6458
6459 error = mcx_cmdq_poll(sc, cqe, 1000);
6460 if (error != 0) {
6461 printf("%s: query flow counter timeout\n", DEVNAME(sc));
6462 goto free;
6463 }
6464 if (mcx_cmdq_verify(cqe) != 0) {
6465 printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
6466 goto free;
6467 }
6468
6469 out = mcx_cmdq_out(cqe);
6470 if (out->cmd_status != MCX_CQ_STATUS_OK) {
6471 printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
6472 out->cmd_status, betoh32(out->cmd_syndrome));
6473 error = -1;
6474 goto free;
6475 }
6476
6477 counters = (struct mcx_counter *)
6478 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6479 if (counters->packets)
6480 printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
6481 betoh64(counters->packets));
6482 free:
6483 mcx_dmamem_free(sc, &mxm);
6484
6485 return (error);
6486 }
6487
6488 #endif
6489
6490 #if NKSTAT > 0
6491
6492 int
6493 mcx_query_rq(struct mcx_softc *sc, struct mcx_rx *rx, struct mcx_rq_ctx *rq_ctx)
6494 {
6495 struct mcx_dmamem mxm;
6496 struct mcx_cmdq_entry *cqe;
6497 struct mcx_cmd_query_rq_in *in;
6498 struct mcx_cmd_query_rq_out *out;
6499 struct mcx_cmd_query_rq_mb_out *mbout;
6500 uint8_t token = mcx_cmdq_token(sc);
6501 int error;
6502
6503 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6504 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6505 token);
6506
6507 in = mcx_cmdq_in(cqe);
6508 in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
6509 in->cmd_op_mod = htobe16(0);
6510 in->cmd_rqn = htobe32(rx->rx_rqn);
6511
6512 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6513 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6514 &cqe->cq_output_ptr, token) != 0) {
6515 printf("%s: unable to allocate query rq mailboxes\n", DEVNAME(sc));
6516 return (-1);
6517 }
6518
6519 mcx_cmdq_mboxes_sign(&mxm, 1);
6520
6521 mcx_cmdq_post(sc, cqe, 0);
6522 error = mcx_cmdq_poll(sc, cqe, 1000);
6523 if (error != 0) {
6524 printf("%s: query rq timeout\n", DEVNAME(sc));
6525 goto free;
6526 }
6527 error = mcx_cmdq_verify(cqe);
6528 if (error != 0) {
6529 printf("%s: query rq reply corrupt\n", DEVNAME(sc));
6530 goto free;
6531 }
6532
6533 out = mcx_cmdq_out(cqe);
6534 switch (out->cmd_status) {
6535 case MCX_CQ_STATUS_OK:
6536 break;
6537 default:
6538 printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
6539 out->cmd_status, be32toh(out->cmd_syndrome));
6540 error = -1;
6541 goto free;
6542 }
6543
6544 mbout = (struct mcx_cmd_query_rq_mb_out *)
6545 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6546 memcpy(rq_ctx, &mbout->cmd_ctx, sizeof(*rq_ctx));
6547
6548 free:
6549 mcx_cq_mboxes_free(sc, &mxm);
6550 return (error);
6551 }
6552
6553 int
6554 mcx_query_sq(struct mcx_softc *sc, struct mcx_tx *tx, struct mcx_sq_ctx *sq_ctx)
6555 {
6556 struct mcx_dmamem mxm;
6557 struct mcx_cmdq_entry *cqe;
6558 struct mcx_cmd_query_sq_in *in;
6559 struct mcx_cmd_query_sq_out *out;
6560 struct mcx_cmd_query_sq_mb_out *mbout;
6561 uint8_t token = mcx_cmdq_token(sc);
6562 int error;
6563
6564 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6565 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6566 token);
6567
6568 in = mcx_cmdq_in(cqe);
6569 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
6570 in->cmd_op_mod = htobe16(0);
6571 in->cmd_sqn = htobe32(tx->tx_sqn);
6572
6573 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6574 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6575 &cqe->cq_output_ptr, token) != 0) {
6576 printf("%s: unable to allocate query sq mailboxes\n", DEVNAME(sc));
6577 return (-1);
6578 }
6579
6580 mcx_cmdq_mboxes_sign(&mxm, 1);
6581
6582 mcx_cmdq_post(sc, cqe, 0);
6583 error = mcx_cmdq_poll(sc, cqe, 1000);
6584 if (error != 0) {
6585 printf("%s: query sq timeout\n", DEVNAME(sc));
6586 goto free;
6587 }
6588 error = mcx_cmdq_verify(cqe);
6589 if (error != 0) {
6590 printf("%s: query sq reply corrupt\n", DEVNAME(sc));
6591 goto free;
6592 }
6593
6594 out = mcx_cmdq_out(cqe);
6595 switch (out->cmd_status) {
6596 case MCX_CQ_STATUS_OK:
6597 break;
6598 default:
6599 printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
6600 out->cmd_status, be32toh(out->cmd_syndrome));
6601 error = -1;
6602 goto free;
6603 }
6604
6605 mbout = (struct mcx_cmd_query_sq_mb_out *)
6606 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6607 memcpy(sq_ctx, &mbout->cmd_ctx, sizeof(*sq_ctx));
6608
6609 free:
6610 mcx_cq_mboxes_free(sc, &mxm);
6611 return (error);
6612 }
6613
6614 int
6615 mcx_query_cq(struct mcx_softc *sc, struct mcx_cq *cq, struct mcx_cq_ctx *cq_ctx)
6616 {
6617 struct mcx_dmamem mxm;
6618 struct mcx_cmdq_entry *cqe;
6619 struct mcx_cmd_query_cq_in *in;
6620 struct mcx_cmd_query_cq_out *out;
6621 struct mcx_cq_ctx *ctx;
6622 uint8_t token = mcx_cmdq_token(sc);
6623 int error;
6624
6625 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6626 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6627 token);
6628
6629 in = mcx_cmdq_in(cqe);
6630 in->cmd_opcode = htobe16(MCX_CMD_QUERY_CQ);
6631 in->cmd_op_mod = htobe16(0);
6632 in->cmd_cqn = htobe32(cq->cq_n);
6633
6634 CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6635 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6636 &cqe->cq_output_ptr, token) != 0) {
6637 printf("%s: unable to allocate query cq mailboxes\n",
6638 DEVNAME(sc));
6639 return (-1);
6640 }
6641
6642 mcx_cmdq_mboxes_sign(&mxm, 1);
6643
6644 mcx_cmdq_post(sc, cqe, 0);
6645 error = mcx_cmdq_poll(sc, cqe, 1000);
6646 if (error != 0) {
6647 printf("%s: query cq timeout\n", DEVNAME(sc));
6648 goto free;
6649 }
6650 if (mcx_cmdq_verify(cqe) != 0) {
6651 printf("%s: query cq reply corrupt\n", DEVNAME(sc));
6652 goto free;
6653 }
6654
6655 out = mcx_cmdq_out(cqe);
6656 switch (out->cmd_status) {
6657 case MCX_CQ_STATUS_OK:
6658 break;
6659 default:
6660 printf("%s: query qc failed (%x/%x)\n", DEVNAME(sc),
6661 out->cmd_status, be32toh(out->cmd_syndrome));
6662 error = -1;
6663 goto free;
6664 }
6665
6666 ctx = (struct mcx_cq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6667 memcpy(cq_ctx, ctx, sizeof(*cq_ctx));
6668 free:
6669 mcx_dmamem_free(sc, &mxm);
6670 return (error);
6671 }
6672
6673 int
6674 mcx_query_eq(struct mcx_softc *sc, struct mcx_eq *eq, struct mcx_eq_ctx *eq_ctx)
6675 {
6676 struct mcx_dmamem mxm;
6677 struct mcx_cmdq_entry *cqe;
6678 struct mcx_cmd_query_eq_in *in;
6679 struct mcx_cmd_query_eq_out *out;
6680 struct mcq_eq_ctx *ctx;
6681 uint8_t token = mcx_cmdq_token(sc);
6682 int error;
6683
6684 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6685 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6686 token);
6687
6688 in = mcx_cmdq_in(cqe);
6689 in->cmd_opcode = htobe16(MCX_CMD_QUERY_EQ);
6690 in->cmd_op_mod = htobe16(0);
6691 in->cmd_eqn = htobe32(eq->eq_n);
6692
6693 CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6694 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6695 &cqe->cq_output_ptr, token) != 0) {
6696 printf("%s: unable to allocate query eq mailboxes\n",
6697 DEVNAME(sc));
6698 return (-1);
6699 }
6700
6701 mcx_cmdq_mboxes_sign(&mxm, 1);
6702
6703 mcx_cmdq_post(sc, cqe, 0);
6704 error = mcx_cmdq_poll(sc, cqe, 1000);
6705 if (error != 0) {
6706 printf("%s: query eq timeout\n", DEVNAME(sc));
6707 goto free;
6708 }
6709 if (mcx_cmdq_verify(cqe) != 0) {
6710 printf("%s: query eq reply corrupt\n", DEVNAME(sc));
6711 goto free;
6712 }
6713
6714 out = mcx_cmdq_out(cqe);
6715 switch (out->cmd_status) {
6716 case MCX_CQ_STATUS_OK:
6717 break;
6718 default:
6719 printf("%s: query eq failed (%x/%x)\n", DEVNAME(sc),
6720 out->cmd_status, be32toh(out->cmd_syndrome));
6721 error = -1;
6722 goto free;
6723 }
6724
6725 ctx = (struct mcx_eq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6726 memcpy(eq_ctx, ctx, sizeof(*eq_ctx));
6727 free:
6728 mcx_dmamem_free(sc, &mxm);
6729 return (error);
6730 }
6731
6732 #endif /* NKSTAT > 0 */
6733
6734
6735 static inline unsigned int
6736 mcx_rx_fill_slots(struct mcx_softc *sc, struct mcx_rx *rx, uint nslots)
6737 {
6738 struct mcx_rq_entry *ring, *rqe;
6739 struct mcx_slot *ms;
6740 struct mbuf *m;
6741 uint slot, p, fills;
6742
6743 ring = MCX_DMA_KVA(&rx->rx_rq_mem);
6744 p = rx->rx_prod;
6745
6746 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6747 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
6748
6749 slot = (p % (1 << MCX_LOG_RQ_SIZE));
6750 rqe = ring;
6751 for (fills = 0; fills < nslots; fills++) {
6752 slot = p % (1 << MCX_LOG_RQ_SIZE);
6753
6754 ms = &rx->rx_slots[slot];
6755 rqe = &ring[slot];
6756
6757 m = NULL;
6758 MGETHDR(m, M_DONTWAIT, MT_DATA);
6759 if (m == NULL)
6760 break;
6761
6762 MCLGET(m, M_DONTWAIT);
6763 if ((m->m_flags & M_EXT) == 0) {
6764 m_freem(m);
6765 break;
6766 }
6767
6768 m->m_len = m->m_pkthdr.len = sc->sc_hardmtu;
6769 m_adj(m, m->m_ext.ext_size - sc->sc_rxbufsz);
6770 m_adj(m, ETHER_ALIGN);
6771
6772 if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6773 BUS_DMA_NOWAIT) != 0) {
6774 m_freem(m);
6775 break;
6776 }
6777 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
6778 ms->ms_m = m;
6779
6780 be32enc(&rqe->rqe_byte_count, ms->ms_map->dm_segs[0].ds_len);
6781 be64enc(&rqe->rqe_addr, ms->ms_map->dm_segs[0].ds_addr);
6782 be32enc(&rqe->rqe_lkey, sc->sc_lkey);
6783
6784 p++;
6785 }
6786
6787 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6788 0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
6789
6790 rx->rx_prod = p;
6791
6792 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6793 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
6794 be32enc(MCX_DMA_OFF(&sc->sc_doorbell_mem, rx->rx_doorbell),
6795 p & MCX_WQ_DOORBELL_MASK);
6796 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6797 rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
6798
6799 return (nslots - fills);
6800 }
6801
6802 static int
6803 mcx_rx_fill(struct mcx_softc *sc, struct mcx_rx *rx)
6804 {
6805 u_int slots;
6806
6807 slots = mcx_rxr_get(&rx->rx_rxr, (1 << MCX_LOG_RQ_SIZE));
6808 if (slots == 0)
6809 return (1);
6810
6811 slots = mcx_rx_fill_slots(sc, rx, slots);
6812 mcx_rxr_put(&rx->rx_rxr, slots);
6813 return (0);
6814 }
6815
6816 void
6817 mcx_refill(void *xrx)
6818 {
6819 struct mcx_rx *rx = xrx;
6820 struct mcx_softc *sc = rx->rx_softc;
6821
6822 mcx_rx_fill(sc, rx);
6823
6824 if (mcx_rxr_inuse(&rx->rx_rxr) == 0)
6825 callout_schedule(&rx->rx_refill, 1);
6826 }
6827
6828 static int
6829 mcx_process_txeof(struct mcx_softc *sc, struct mcx_tx *tx,
6830 struct mcx_cq_entry *cqe)
6831 {
6832 struct mcx_slot *ms;
6833 bus_dmamap_t map;
6834 int slot, slots;
6835
6836 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
6837
6838 ms = &tx->tx_slots[slot];
6839 map = ms->ms_map;
6840 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6841 BUS_DMASYNC_POSTWRITE);
6842
6843 slots = 1;
6844 if (map->dm_nsegs > 1)
6845 slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
6846
6847 bus_dmamap_unload(sc->sc_dmat, map);
6848 m_freem(ms->ms_m);
6849 ms->ms_m = NULL;
6850
6851 return (slots);
6852 }
6853
6854 static uint64_t
6855 mcx_uptime(void)
6856 {
6857 struct timespec ts;
6858
6859 nanouptime(&ts);
6860
6861 return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
6862 }
6863
6864 static void
6865 mcx_calibrate_first(struct mcx_softc *sc)
6866 {
6867 struct mcx_calibration *c = &sc->sc_calibration[0];
6868 int s;
6869
6870 sc->sc_calibration_gen = 0;
6871
6872 s = splhigh(); /* crit_enter? */
6873 c->c_ubase = mcx_uptime();
6874 c->c_tbase = mcx_timer(sc);
6875 splx(s);
6876 c->c_ratio = 0;
6877
6878 #if notyet
6879 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz);
6880 #endif
6881 }
6882
6883 #define MCX_TIMESTAMP_SHIFT 24
6884
6885 static void
6886 mcx_calibrate(void *arg)
6887 {
6888 struct mcx_softc *sc = arg;
6889 struct mcx_calibration *nc, *pc;
6890 uint64_t udiff, tdiff;
6891 unsigned int gen;
6892 int s;
6893
6894 if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING))
6895 return;
6896
6897 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz);
6898
6899 gen = sc->sc_calibration_gen;
6900 pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
6901 gen++;
6902 nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
6903
6904 nc->c_uptime = pc->c_ubase;
6905 nc->c_timestamp = pc->c_tbase;
6906
6907 s = splhigh(); /* crit_enter? */
6908 nc->c_ubase = mcx_uptime();
6909 nc->c_tbase = mcx_timer(sc);
6910 splx(s);
6911
6912 udiff = nc->c_ubase - nc->c_uptime;
6913 tdiff = nc->c_tbase - nc->c_timestamp;
6914
6915 /*
6916 * udiff is the wall clock time between calibration ticks,
6917 * which should be 32 seconds or 32 billion nanoseconds. if
6918 * we squint, 1 billion nanoseconds is kind of like a 32 bit
6919 * number, so 32 billion should still have a lot of high bits
6920 * spare. we use this space by shifting the nanoseconds up
6921 * 24 bits so we have a nice big number to divide by the
6922 * number of mcx timer ticks.
6923 */
6924 nc->c_ratio = (udiff << MCX_TIMESTAMP_SHIFT) / tdiff;
6925
6926 membar_producer();
6927 sc->sc_calibration_gen = gen;
6928 }
6929
6930 static int
6931 mcx_process_rx(struct mcx_softc *sc, struct mcx_rx *rx,
6932 struct mcx_cq_entry *cqe, struct mcx_mbufq *mq,
6933 const struct mcx_calibration *c)
6934 {
6935 struct ifnet *ifp = &sc->sc_ec.ec_if;
6936 struct mcx_slot *ms;
6937 struct mbuf *m;
6938 uint32_t flags, len;
6939 int slot;
6940
6941 len = be32dec(&cqe->cq_byte_cnt);
6942 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
6943
6944 ms = &rx->rx_slots[slot];
6945 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, len, BUS_DMASYNC_POSTREAD);
6946 bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
6947
6948 m = ms->ms_m;
6949 ms->ms_m = NULL;
6950
6951 m_set_rcvif(m, &sc->sc_ec.ec_if);
6952 m->m_pkthdr.len = m->m_len = len;
6953
6954 #if 0
6955 if (cqe->cq_rx_hash_type) {
6956 m->m_pkthdr.ph_flowid = be32toh(cqe->cq_rx_hash);
6957 m->m_pkthdr.csum_flags |= M_FLOWID;
6958 }
6959 #endif
6960
6961 flags = be32dec(&cqe->cq_flags);
6962 if (flags & MCX_CQ_ENTRY_FLAGS_L3_OK) {
6963 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6964 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
6965 }
6966 if (flags & MCX_CQ_ENTRY_FLAGS_L4_OK) {
6967 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx)
6968 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
6969 if (ifp->if_capenable & IFCAP_CSUM_TCPv6_Rx)
6970 m->m_pkthdr.csum_flags |= M_CSUM_TCPv6;
6971 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx)
6972 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
6973 if (ifp->if_capenable & IFCAP_CSUM_UDPv6_Rx)
6974 m->m_pkthdr.csum_flags |= M_CSUM_UDPv6;
6975 }
6976 if (flags & MCX_CQ_ENTRY_FLAGS_CV) {
6977 vlan_set_tag(m, flags & MCX_CQ_ENTRY_FLAGS_VLAN_MASK);
6978 }
6979
6980 #if notyet
6981 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_LINK0) && c->c_ratio) {
6982 uint64_t t = be64dec(&cqe->cq_timestamp);
6983 t -= c->c_timestamp;
6984 t *= c->c_ratio;
6985 t >>= MCX_TIMESTAMP_SHIFT;
6986 t += c->c_uptime;
6987
6988 m->m_pkthdr.ph_timestamp = t;
6989 SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
6990 }
6991 #endif
6992
6993 MBUFQ_ENQUEUE(mq, m);
6994
6995 return (1);
6996 }
6997
6998 static struct mcx_cq_entry *
6999 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
7000 {
7001 struct mcx_cq_entry *cqe;
7002 int next;
7003
7004 cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
7005 next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
7006
7007 if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
7008 ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
7009 return (&cqe[next]);
7010 }
7011
7012 return (NULL);
7013 }
7014
7015 static void
7016 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar)
7017 {
7018 struct mcx_cq_doorbell *db;
7019 bus_size_t offset;
7020 uint32_t val;
7021 uint64_t uval;
7022
7023 val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
7024 val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
7025
7026 db = MCX_DMA_OFF(&sc->sc_doorbell_mem, cq->cq_doorbell);
7027
7028 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7029 cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_POSTWRITE);
7030
7031 be32enc(&db->db_update_ci, cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
7032 be32enc(&db->db_arm_ci, val);
7033
7034 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7035 cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_PREWRITE);
7036
7037 offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_CQ_DOORBELL;
7038
7039 uval = (uint64_t)val << 32;
7040 uval |= cq->cq_n;
7041
7042 bus_space_write_8(sc->sc_memt, sc->sc_memh, offset, htobe64(uval));
7043 mcx_bar(sc, offset, sizeof(uval), BUS_SPACE_BARRIER_WRITE);
7044 }
7045
7046 void
7047 mcx_process_cq(struct mcx_softc *sc, struct mcx_queues *q, struct mcx_cq *cq)
7048 {
7049 struct mcx_rx *rx = &q->q_rx;
7050 struct mcx_tx *tx = &q->q_tx;
7051 struct ifnet *ifp = &sc->sc_ec.ec_if;
7052 const struct mcx_calibration *c;
7053 unsigned int gen;
7054 struct mcx_cq_entry *cqe;
7055 struct mcx_mbufq mq;
7056 struct mbuf *m;
7057 int rxfree, txfree;
7058
7059 MBUFQ_INIT(&mq);
7060
7061 gen = sc->sc_calibration_gen;
7062 membar_consumer();
7063 c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
7064
7065 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
7066 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
7067
7068 rxfree = 0;
7069 txfree = 0;
7070 while ((cqe = mcx_next_cq_entry(sc, cq))) {
7071 uint8_t opcode;
7072 opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
7073 switch (opcode) {
7074 case MCX_CQ_ENTRY_OPCODE_REQ:
7075 txfree += mcx_process_txeof(sc, tx, cqe);
7076 break;
7077 case MCX_CQ_ENTRY_OPCODE_SEND:
7078 rxfree += mcx_process_rx(sc, rx, cqe, &mq, c);
7079 break;
7080 case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
7081 case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
7082 /* uint8_t *cqp = (uint8_t *)cqe; */
7083 /* printf("%s: cq completion error: %x\n",
7084 DEVNAME(sc), cqp[0x37]); */
7085 break;
7086
7087 default:
7088 /* printf("%s: cq completion opcode %x??\n",
7089 DEVNAME(sc), opcode); */
7090 break;
7091 }
7092
7093 cq->cq_cons++;
7094 }
7095
7096 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
7097 0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
7098
7099 if (rxfree > 0) {
7100 mcx_rxr_put(&rx->rx_rxr, rxfree);
7101 while (MBUFQ_FIRST(&mq) != NULL) {
7102 MBUFQ_DEQUEUE(&mq, m);
7103 if_percpuq_enqueue(ifp->if_percpuq, m);
7104 }
7105
7106 mcx_rx_fill(sc, rx);
7107 if (mcx_rxr_inuse(&rx->rx_rxr) == 0)
7108 callout_schedule(&rx->rx_refill, 1);
7109 }
7110
7111 cq->cq_count++;
7112 mcx_arm_cq(sc, cq, q->q_uar);
7113
7114 if (txfree > 0) {
7115 tx->tx_cons += txfree;
7116 if_schedule_deferred_start(ifp);
7117 }
7118 }
7119
7120
7121 static void
7122 mcx_arm_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar)
7123 {
7124 bus_size_t offset;
7125 uint32_t val;
7126
7127 offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_EQ_DOORBELL_ARM;
7128 val = (eq->eq_n << 24) | (eq->eq_cons & 0xffffff);
7129
7130 mcx_wr(sc, offset, val);
7131 mcx_bar(sc, offset, sizeof(val), BUS_SPACE_BARRIER_WRITE);
7132 }
7133
7134 static struct mcx_eq_entry *
7135 mcx_next_eq_entry(struct mcx_softc *sc, struct mcx_eq *eq)
7136 {
7137 struct mcx_eq_entry *eqe;
7138 int next;
7139
7140 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
7141 next = eq->eq_cons % (1 << MCX_LOG_EQ_SIZE);
7142 if ((eqe[next].eq_owner & 1) ==
7143 ((eq->eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
7144 eq->eq_cons++;
7145 return (&eqe[next]);
7146 }
7147 return (NULL);
7148 }
7149
7150 int
7151 mcx_admin_intr(void *xsc)
7152 {
7153 struct mcx_softc *sc = (struct mcx_softc *)xsc;
7154 struct mcx_eq *eq = &sc->sc_admin_eq;
7155 struct mcx_eq_entry *eqe;
7156
7157 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7158 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7159
7160 while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7161 switch (eqe->eq_event_type) {
7162 case MCX_EVENT_TYPE_LAST_WQE:
7163 /* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
7164 break;
7165
7166 case MCX_EVENT_TYPE_CQ_ERROR:
7167 /* printf("%s: cq error\n", DEVNAME(sc)); */
7168 break;
7169
7170 case MCX_EVENT_TYPE_CMD_COMPLETION:
7171 /* wakeup probably */
7172 break;
7173
7174 case MCX_EVENT_TYPE_PORT_CHANGE:
7175 workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL);
7176 break;
7177
7178 default:
7179 /* printf("%s: something happened\n", DEVNAME(sc)); */
7180 break;
7181 }
7182 }
7183
7184 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7185 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7186
7187 mcx_arm_eq(sc, eq, sc->sc_uar);
7188
7189 return (1);
7190 }
7191
7192 int
7193 mcx_cq_intr(void *xq)
7194 {
7195 struct mcx_queues *q = (struct mcx_queues *)xq;
7196 struct mcx_softc *sc = q->q_sc;
7197 struct mcx_eq *eq = &q->q_eq;
7198 struct mcx_eq_entry *eqe;
7199 int cqn;
7200
7201 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7202 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7203
7204 while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7205 switch (eqe->eq_event_type) {
7206 case MCX_EVENT_TYPE_COMPLETION:
7207 cqn = be32toh(eqe->eq_event_data[6]);
7208 if (cqn == q->q_cq.cq_n)
7209 mcx_process_cq(sc, q, &q->q_cq);
7210 break;
7211 }
7212 }
7213
7214 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7215 0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7216
7217 mcx_arm_eq(sc, eq, q->q_uar);
7218
7219 return (1);
7220 }
7221
7222 static void
7223 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
7224 int total)
7225 {
7226 struct mcx_slot *ms;
7227
7228 int i = allocated;
7229 while (i-- > 0) {
7230 ms = &slots[i];
7231 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
7232 m_freem(ms->ms_m);
7233 }
7234 kmem_free(slots, total * sizeof(*ms));
7235 }
7236
7237 static int
7238 mcx_queue_up(struct mcx_softc *sc, struct mcx_queues *q)
7239 {
7240 struct mcx_rx *rx;
7241 struct mcx_tx *tx;
7242 struct mcx_slot *ms;
7243 int i;
7244
7245 rx = &q->q_rx;
7246 rx->rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE),
7247 KM_SLEEP);
7248
7249 for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
7250 ms = &rx->rx_slots[i];
7251 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
7252 sc->sc_hardmtu, 0,
7253 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
7254 &ms->ms_map) != 0) {
7255 printf("%s: failed to allocate rx dma maps\n",
7256 DEVNAME(sc));
7257 goto destroy_rx_slots;
7258 }
7259 }
7260
7261 tx = &q->q_tx;
7262 tx->tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE),
7263 KM_SLEEP);
7264
7265 for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
7266 ms = &tx->tx_slots[i];
7267 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
7268 MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
7269 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
7270 &ms->ms_map) != 0) {
7271 printf("%s: failed to allocate tx dma maps\n",
7272 DEVNAME(sc));
7273 goto destroy_tx_slots;
7274 }
7275 }
7276
7277 if (mcx_create_cq(sc, &q->q_cq, q->q_uar, q->q_index,
7278 q->q_eq.eq_n) != 0)
7279 goto destroy_tx_slots;
7280
7281 if (mcx_create_sq(sc, tx, q->q_uar, q->q_index, q->q_cq.cq_n)
7282 != 0)
7283 goto destroy_cq;
7284
7285 if (mcx_create_rq(sc, rx, q->q_index, q->q_cq.cq_n) != 0)
7286 goto destroy_sq;
7287
7288 return 0;
7289
7290 destroy_sq:
7291 mcx_destroy_sq(sc, tx);
7292 destroy_cq:
7293 mcx_destroy_cq(sc, &q->q_cq);
7294 destroy_tx_slots:
7295 mcx_free_slots(sc, tx->tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
7296 tx->tx_slots = NULL;
7297
7298 i = (1 << MCX_LOG_RQ_SIZE);
7299 destroy_rx_slots:
7300 mcx_free_slots(sc, rx->rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
7301 rx->rx_slots = NULL;
7302 return ENOMEM;
7303 }
7304
7305 static int
7306 mcx_rss_group_entry_count(struct mcx_softc *sc, int group)
7307 {
7308 int i;
7309 int count;
7310
7311 count = 0;
7312 for (i = 0; i < __arraycount(mcx_rss_config); i++) {
7313 if (mcx_rss_config[i].flow_group == group)
7314 count++;
7315 }
7316
7317 return count;
7318 }
7319
7320 static int
7321 mcx_init(struct ifnet *ifp)
7322 {
7323 struct mcx_softc *sc = ifp->if_softc;
7324 struct mcx_rx *rx;
7325 struct mcx_tx *tx;
7326 int i, start, count, flow_group, flow_index;
7327 struct mcx_flow_match match_crit;
7328 struct mcx_rss_rule *rss;
7329 uint32_t dest;
7330 int rqns[MCX_MAX_QUEUES] = { 0 };
7331
7332 if (ISSET(ifp->if_flags, IFF_RUNNING))
7333 mcx_stop(ifp, 0);
7334
7335 if (mcx_create_tis(sc, &sc->sc_tis) != 0)
7336 goto down;
7337
7338 for (i = 0; i < sc->sc_nqueues; i++) {
7339 if (mcx_queue_up(sc, &sc->sc_queues[i]) != 0) {
7340 goto down;
7341 }
7342 }
7343
7344 /* RSS flow table and flow groups */
7345 if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 1,
7346 &sc->sc_rss_flow_table_id) != 0)
7347 goto down;
7348
7349 dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7350 sc->sc_rss_flow_table_id;
7351
7352 /* L4 RSS flow group (v4/v6 tcp/udp, no fragments) */
7353 memset(&match_crit, 0, sizeof(match_crit));
7354 match_crit.mc_ethertype = 0xffff;
7355 match_crit.mc_ip_proto = 0xff;
7356 match_crit.mc_vlan_flags = MCX_FLOW_MATCH_IP_FRAG;
7357 start = 0;
7358 count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L4);
7359 if (count != 0) {
7360 if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7361 MCX_FLOW_GROUP_RSS_L4, start, count,
7362 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7363 goto down;
7364 start += count;
7365 }
7366
7367 /* L3 RSS flow group (v4/v6, including fragments) */
7368 memset(&match_crit, 0, sizeof(match_crit));
7369 match_crit.mc_ethertype = 0xffff;
7370 count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L3);
7371 if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7372 MCX_FLOW_GROUP_RSS_L3, start, count,
7373 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7374 goto down;
7375 start += count;
7376
7377 /* non-RSS flow group */
7378 count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_NONE);
7379 memset(&match_crit, 0, sizeof(match_crit));
7380 if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7381 MCX_FLOW_GROUP_RSS_NONE, start, count, 0, &match_crit) != 0)
7382 goto down;
7383
7384 /* Root flow table, matching packets based on mac address */
7385 if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 0,
7386 &sc->sc_mac_flow_table_id) != 0)
7387 goto down;
7388
7389 /* promisc flow group */
7390 start = 0;
7391 memset(&match_crit, 0, sizeof(match_crit));
7392 if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7393 MCX_FLOW_GROUP_PROMISC, start, 1, 0, &match_crit) != 0)
7394 goto down;
7395 sc->sc_promisc_flow_enabled = 0;
7396 start++;
7397
7398 /* all multicast flow group */
7399 match_crit.mc_dest_mac[0] = 0x01;
7400 if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7401 MCX_FLOW_GROUP_ALLMULTI, start, 1,
7402 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7403 goto down;
7404 sc->sc_allmulti_flow_enabled = 0;
7405 start++;
7406
7407 /* mac address matching flow group */
7408 memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
7409 if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7410 MCX_FLOW_GROUP_MAC, start, (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
7411 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7412 goto down;
7413
7414 /* flow table entries for unicast and broadcast */
7415 start = 0;
7416 if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7417 LLADDR(satosdl(ifp->if_dl->ifa_addr)), dest) != 0)
7418 goto down;
7419 start++;
7420
7421 if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7422 etherbroadcastaddr, dest) != 0)
7423 goto down;
7424 start++;
7425
7426 /* multicast entries go after that */
7427 sc->sc_mcast_flow_base = start;
7428
7429 /* re-add any existing multicast flows */
7430 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7431 if (sc->sc_mcast_flows[i][0] != 0) {
7432 mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC,
7433 sc->sc_mcast_flow_base + i,
7434 sc->sc_mcast_flows[i], dest);
7435 }
7436 }
7437
7438 if (mcx_set_flow_table_root(sc, sc->sc_mac_flow_table_id) != 0)
7439 goto down;
7440
7441 /*
7442 * the RQT can be any size as long as it's a power of two.
7443 * since we also restrict the number of queues to a power of two,
7444 * we can just put each rx queue in once.
7445 */
7446 for (i = 0; i < sc->sc_nqueues; i++)
7447 rqns[i] = sc->sc_queues[i].q_rx.rx_rqn;
7448
7449 if (mcx_create_rqt(sc, sc->sc_nqueues, rqns, &sc->sc_rqt) != 0)
7450 goto down;
7451
7452 start = 0;
7453 flow_index = 0;
7454 flow_group = -1;
7455 for (i = 0; i < __arraycount(mcx_rss_config); i++) {
7456 rss = &mcx_rss_config[i];
7457 if (rss->flow_group != flow_group) {
7458 flow_group = rss->flow_group;
7459 flow_index = 0;
7460 }
7461
7462 if (rss->hash_sel == 0) {
7463 if (mcx_create_tir_direct(sc, &sc->sc_queues[0].q_rx,
7464 &sc->sc_tir[i]) != 0)
7465 goto down;
7466 } else {
7467 if (mcx_create_tir_indirect(sc, sc->sc_rqt,
7468 rss->hash_sel, &sc->sc_tir[i]) != 0)
7469 goto down;
7470 }
7471
7472 if (mcx_set_flow_table_entry_proto(sc, flow_group,
7473 flow_index, rss->ethertype, rss->ip_proto,
7474 MCX_FLOW_CONTEXT_DEST_TYPE_TIR | sc->sc_tir[i]) != 0)
7475 goto down;
7476 flow_index++;
7477 }
7478
7479 for (i = 0; i < sc->sc_nqueues; i++) {
7480 struct mcx_queues *q = &sc->sc_queues[i];
7481 rx = &q->q_rx;
7482 tx = &q->q_tx;
7483
7484 /* start the queues */
7485 if (mcx_ready_sq(sc, tx) != 0)
7486 goto down;
7487
7488 if (mcx_ready_rq(sc, rx) != 0)
7489 goto down;
7490
7491 mcx_rxr_init(&rx->rx_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
7492 rx->rx_prod = 0;
7493 mcx_rx_fill(sc, rx);
7494
7495 tx->tx_cons = 0;
7496 tx->tx_prod = 0;
7497 }
7498
7499 mcx_calibrate_first(sc);
7500
7501 SET(ifp->if_flags, IFF_RUNNING);
7502 CLR(ifp->if_flags, IFF_OACTIVE);
7503 if_schedule_deferred_start(ifp);
7504
7505 return 0;
7506 down:
7507 mcx_stop(ifp, 0);
7508 return EIO;
7509 }
7510
7511 static void
7512 mcx_stop(struct ifnet *ifp, int disable)
7513 {
7514 struct mcx_softc *sc = ifp->if_softc;
7515 struct mcx_rss_rule *rss;
7516 int group, i, flow_group, flow_index;
7517
7518 CLR(ifp->if_flags, IFF_RUNNING);
7519
7520 /*
7521 * delete flow table entries first, so no packets can arrive
7522 * after the barriers
7523 */
7524 if (sc->sc_promisc_flow_enabled)
7525 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
7526 if (sc->sc_allmulti_flow_enabled)
7527 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
7528 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
7529 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
7530 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7531 if (sc->sc_mcast_flows[i][0] != 0) {
7532 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
7533 sc->sc_mcast_flow_base + i);
7534 }
7535 }
7536
7537 flow_group = -1;
7538 flow_index = 0;
7539 for (i = 0; i < __arraycount(mcx_rss_config); i++) {
7540 rss = &mcx_rss_config[i];
7541 if (rss->flow_group != flow_group) {
7542 flow_group = rss->flow_group;
7543 flow_index = 0;
7544 }
7545
7546 mcx_delete_flow_table_entry(sc, flow_group, flow_index);
7547
7548 mcx_destroy_tir(sc, sc->sc_tir[i]);
7549 sc->sc_tir[i] = 0;
7550
7551 flow_index++;
7552 }
7553
7554 for (i = 0; i < sc->sc_nqueues; i++) {
7555 callout_halt(&sc->sc_queues[i].q_rx.rx_refill, NULL);
7556 }
7557
7558 callout_halt(&sc->sc_calibrate, NULL);
7559
7560 for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
7561 if (sc->sc_flow_group[group].g_id != -1)
7562 mcx_destroy_flow_group(sc, group);
7563 }
7564
7565 if (sc->sc_mac_flow_table_id != -1) {
7566 mcx_destroy_flow_table(sc, sc->sc_mac_flow_table_id);
7567 sc->sc_mac_flow_table_id = -1;
7568 }
7569 if (sc->sc_rss_flow_table_id != -1) {
7570 mcx_destroy_flow_table(sc, sc->sc_rss_flow_table_id);
7571 sc->sc_rss_flow_table_id = -1;
7572 }
7573 if (sc->sc_rqt != -1) {
7574 mcx_destroy_rqt(sc, sc->sc_rqt);
7575 sc->sc_rqt = -1;
7576 }
7577
7578 for (i = 0; i < sc->sc_nqueues; i++) {
7579 struct mcx_queues *q = &sc->sc_queues[i];
7580 struct mcx_rx *rx = &q->q_rx;
7581 struct mcx_tx *tx = &q->q_tx;
7582 struct mcx_cq *cq = &q->q_cq;
7583
7584 if (rx->rx_rqn != 0)
7585 mcx_destroy_rq(sc, rx);
7586
7587 if (tx->tx_sqn != 0)
7588 mcx_destroy_sq(sc, tx);
7589
7590 if (tx->tx_slots != NULL) {
7591 mcx_free_slots(sc, tx->tx_slots,
7592 (1 << MCX_LOG_SQ_SIZE), (1 << MCX_LOG_SQ_SIZE));
7593 tx->tx_slots = NULL;
7594 }
7595 if (rx->rx_slots != NULL) {
7596 mcx_free_slots(sc, rx->rx_slots,
7597 (1 << MCX_LOG_RQ_SIZE), (1 << MCX_LOG_RQ_SIZE));
7598 rx->rx_slots = NULL;
7599 }
7600
7601 if (cq->cq_n != 0)
7602 mcx_destroy_cq(sc, cq);
7603 }
7604 if (sc->sc_tis != 0) {
7605 mcx_destroy_tis(sc, sc->sc_tis);
7606 sc->sc_tis = 0;
7607 }
7608 }
7609
7610 static int
7611 mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
7612 {
7613 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7614 struct ifreq *ifr = (struct ifreq *)data;
7615 struct ethercom *ec = &sc->sc_ec;
7616 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
7617 struct ether_multi *enm;
7618 struct ether_multistep step;
7619 int s, i, flags, error = 0;
7620 uint32_t dest;
7621
7622 s = splnet();
7623 switch (cmd) {
7624
7625 case SIOCADDMULTI:
7626 if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
7627 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7628 if (error != 0) {
7629 splx(s);
7630 return (error);
7631 }
7632
7633 dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7634 sc->sc_rss_flow_table_id;
7635
7636 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7637 if (sc->sc_mcast_flows[i][0] == 0) {
7638 memcpy(sc->sc_mcast_flows[i], addrlo,
7639 ETHER_ADDR_LEN);
7640 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7641 mcx_set_flow_table_entry_mac(sc,
7642 MCX_FLOW_GROUP_MAC,
7643 sc->sc_mcast_flow_base + i,
7644 sc->sc_mcast_flows[i], dest);
7645 }
7646 break;
7647 }
7648 }
7649
7650 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
7651 if (i == MCX_NUM_MCAST_FLOWS) {
7652 SET(ifp->if_flags, IFF_ALLMULTI);
7653 sc->sc_extra_mcast++;
7654 error = ENETRESET;
7655 }
7656
7657 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN)) {
7658 SET(ifp->if_flags, IFF_ALLMULTI);
7659 error = ENETRESET;
7660 }
7661 }
7662 }
7663 break;
7664
7665 case SIOCDELMULTI:
7666 if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
7667 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7668 if (error != 0) {
7669 splx(s);
7670 return (error);
7671 }
7672
7673 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7674 if (memcmp(sc->sc_mcast_flows[i], addrlo,
7675 ETHER_ADDR_LEN) == 0) {
7676 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7677 mcx_delete_flow_table_entry(sc,
7678 MCX_FLOW_GROUP_MAC,
7679 sc->sc_mcast_flow_base + i);
7680 }
7681 sc->sc_mcast_flows[i][0] = 0;
7682 break;
7683 }
7684 }
7685
7686 if (i == MCX_NUM_MCAST_FLOWS)
7687 sc->sc_extra_mcast--;
7688
7689 if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
7690 sc->sc_extra_mcast == 0) {
7691 flags = 0;
7692 ETHER_LOCK(ec);
7693 ETHER_FIRST_MULTI(step, ec, enm);
7694 while (enm != NULL) {
7695 if (memcmp(enm->enm_addrlo,
7696 enm->enm_addrhi, ETHER_ADDR_LEN)) {
7697 SET(flags, IFF_ALLMULTI);
7698 break;
7699 }
7700 ETHER_NEXT_MULTI(step, enm);
7701 }
7702 ETHER_UNLOCK(ec);
7703 if (!ISSET(flags, IFF_ALLMULTI)) {
7704 CLR(ifp->if_flags, IFF_ALLMULTI);
7705 error = ENETRESET;
7706 }
7707 }
7708 }
7709 break;
7710
7711 default:
7712 error = ether_ioctl(ifp, cmd, data);
7713 }
7714
7715 if (error == ENETRESET) {
7716 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7717 (IFF_UP | IFF_RUNNING))
7718 mcx_iff(sc);
7719 error = 0;
7720 }
7721 splx(s);
7722
7723 return (error);
7724 }
7725
7726 #if 0
7727 static int
7728 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
7729 {
7730 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7731 struct mcx_reg_mcia mcia;
7732 struct mcx_reg_pmlp pmlp;
7733 int offset, error;
7734
7735 /* get module number */
7736 memset(&pmlp, 0, sizeof(pmlp));
7737 pmlp.rp_local_port = 1;
7738 error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
7739 sizeof(pmlp));
7740 if (error != 0) {
7741 printf("%s: unable to get eeprom module number\n",
7742 DEVNAME(sc));
7743 return error;
7744 }
7745
7746 for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
7747 memset(&mcia, 0, sizeof(mcia));
7748 mcia.rm_l = 0;
7749 mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) &
7750 MCX_PMLP_MODULE_NUM_MASK;
7751 mcia.rm_i2c_addr = sff->sff_addr / 2; /* apparently */
7752 mcia.rm_page_num = sff->sff_page;
7753 mcia.rm_dev_addr = htobe16(offset);
7754 mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
7755
7756 error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
7757 &mcia, sizeof(mcia));
7758 if (error != 0) {
7759 printf("%s: unable to read eeprom at %x\n",
7760 DEVNAME(sc), offset);
7761 return error;
7762 }
7763
7764 memcpy(sff->sff_data + offset, mcia.rm_data,
7765 MCX_MCIA_EEPROM_BYTES);
7766 }
7767
7768 return 0;
7769 }
7770 #endif
7771
7772 static int
7773 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
7774 {
7775 switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7776 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
7777 case 0:
7778 break;
7779
7780 case EFBIG:
7781 if (m_defrag(m, M_DONTWAIT) != NULL &&
7782 bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7783 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
7784 break;
7785
7786 /* FALLTHROUGH */
7787 default:
7788 return (1);
7789 }
7790
7791 ms->ms_m = m;
7792 return (0);
7793 }
7794
7795 static void
7796 mcx_send_common_locked(struct ifnet *ifp, struct mcx_tx *tx, bool is_transmit)
7797 {
7798 struct mcx_softc *sc = ifp->if_softc;
7799 struct mcx_sq_entry *sq, *sqe;
7800 struct mcx_sq_entry_seg *sqs;
7801 struct mcx_slot *ms;
7802 bus_dmamap_t map;
7803 struct mbuf *m;
7804 u_int idx, free, used;
7805 uint64_t *bf;
7806 uint32_t csum;
7807 size_t bf_base;
7808 int i, seg, nseg;
7809
7810 KASSERT(mutex_owned(&tx->tx_lock));
7811
7812 if ((ifp->if_flags & IFF_RUNNING) == 0)
7813 return;
7814
7815 bf_base = (tx->tx_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
7816
7817 idx = tx->tx_prod % (1 << MCX_LOG_SQ_SIZE);
7818 free = (tx->tx_cons + (1 << MCX_LOG_SQ_SIZE)) - tx->tx_prod;
7819
7820 used = 0;
7821 bf = NULL;
7822
7823 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7824 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
7825
7826 sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&tx->tx_sq_mem);
7827
7828 for (;;) {
7829 if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
7830 SET(ifp->if_flags, IFF_OACTIVE);
7831 break;
7832 }
7833
7834 if (is_transmit) {
7835 m = pcq_get(tx->tx_pcq);
7836 } else {
7837 IFQ_DEQUEUE(&ifp->if_snd, m);
7838 }
7839 if (m == NULL) {
7840 break;
7841 }
7842
7843 sqe = sq + idx;
7844 ms = &tx->tx_slots[idx];
7845 memset(sqe, 0, sizeof(*sqe));
7846
7847 /* ctrl segment */
7848 sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
7849 ((tx->tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
7850 /* always generate a completion event */
7851 sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
7852
7853 /* eth segment */
7854 csum = 0;
7855 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
7856 csum |= MCX_SQE_L3_CSUM;
7857 if (m->m_pkthdr.csum_flags &
7858 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6))
7859 csum |= MCX_SQE_L4_CSUM;
7860 sqe->sqe_mss_csum = htobe32(csum);
7861 sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
7862 if (vlan_has_tag(m)) {
7863 struct ether_vlan_header *evh;
7864 evh = (struct ether_vlan_header *)
7865 &sqe->sqe_inline_headers;
7866
7867 m_copydata(m, 0, ETHER_HDR_LEN, evh);
7868 evh->evl_proto = evh->evl_encap_proto;
7869 evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
7870 evh->evl_tag = htons(vlan_get_tag(m));
7871 m_adj(m, ETHER_HDR_LEN);
7872 } else {
7873 m_copydata(m, 0, MCX_SQ_INLINE_SIZE,
7874 sqe->sqe_inline_headers);
7875 m_adj(m, MCX_SQ_INLINE_SIZE);
7876 }
7877
7878 if (mcx_load_mbuf(sc, ms, m) != 0) {
7879 m_freem(m);
7880 if_statinc(ifp, if_oerrors);
7881 continue;
7882 }
7883 bf = (uint64_t *)sqe;
7884
7885 if (ifp->if_bpf != NULL)
7886 bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers,
7887 MCX_SQ_INLINE_SIZE, m, BPF_D_OUT);
7888
7889 map = ms->ms_map;
7890 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
7891 BUS_DMASYNC_PREWRITE);
7892
7893 sqe->sqe_ds_sq_num =
7894 htobe32((tx->tx_sqn << MCX_SQE_SQ_NUM_SHIFT) |
7895 (map->dm_nsegs + 3));
7896
7897 /* data segment - first wqe has one segment */
7898 sqs = sqe->sqe_segs;
7899 seg = 0;
7900 nseg = 1;
7901 for (i = 0; i < map->dm_nsegs; i++) {
7902 if (seg == nseg) {
7903 /* next slot */
7904 idx++;
7905 if (idx == (1 << MCX_LOG_SQ_SIZE))
7906 idx = 0;
7907 tx->tx_prod++;
7908 used++;
7909
7910 sqs = (struct mcx_sq_entry_seg *)(sq + idx);
7911 seg = 0;
7912 nseg = MCX_SQ_SEGS_PER_SLOT;
7913 }
7914 sqs[seg].sqs_byte_count =
7915 htobe32(map->dm_segs[i].ds_len);
7916 sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
7917 sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
7918 seg++;
7919 }
7920
7921 idx++;
7922 if (idx == (1 << MCX_LOG_SQ_SIZE))
7923 idx = 0;
7924 tx->tx_prod++;
7925 used++;
7926 }
7927
7928 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7929 0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
7930
7931 if (used) {
7932 bus_size_t blueflame;
7933
7934 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7935 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
7936 be32enc(MCX_DMA_OFF(&sc->sc_doorbell_mem, tx->tx_doorbell),
7937 tx->tx_prod & MCX_WQ_DOORBELL_MASK);
7938 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7939 tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
7940
7941 /*
7942 * write the first 64 bits of the last sqe we produced
7943 * to the blue flame buffer
7944 */
7945
7946 blueflame = bf_base + tx->tx_bf_offset;
7947 bus_space_write_8(sc->sc_memt, sc->sc_memh,
7948 blueflame, *bf);
7949 mcx_bar(sc, blueflame, sizeof(*bf), BUS_SPACE_BARRIER_WRITE);
7950
7951 /* next write goes to the other buffer */
7952 tx->tx_bf_offset ^= sc->sc_bf_size;
7953 }
7954 }
7955
7956 static void
7957 mcx_start(struct ifnet *ifp)
7958 {
7959 struct mcx_softc *sc = ifp->if_softc;
7960 /* mcx_start() always uses TX ring[0] */
7961 struct mcx_tx *tx = &sc->sc_queues[0].q_tx;
7962
7963 mutex_enter(&tx->tx_lock);
7964 if (!ISSET(ifp->if_flags, IFF_OACTIVE)) {
7965 mcx_send_common_locked(ifp, tx, false);
7966 }
7967 mutex_exit(&tx->tx_lock);
7968 }
7969
7970 static int
7971 mcx_transmit(struct ifnet *ifp, struct mbuf *m)
7972 {
7973 struct mcx_softc *sc = ifp->if_softc;
7974 struct mcx_tx *tx;
7975
7976 tx = &sc->sc_queues[cpu_index(curcpu()) % sc->sc_nqueues].q_tx;
7977 if (__predict_false(!pcq_put(tx->tx_pcq, m))) {
7978 m_freem(m);
7979 return ENOBUFS;
7980 }
7981
7982 if (mutex_tryenter(&tx->tx_lock)) {
7983 mcx_send_common_locked(ifp, tx, true);
7984 mutex_exit(&tx->tx_lock);
7985 } else {
7986 softint_schedule(tx->tx_softint);
7987 }
7988
7989 return 0;
7990 }
7991
7992 static void
7993 mcx_deferred_transmit(void *arg)
7994 {
7995 struct mcx_tx *tx = arg;
7996 struct mcx_softc *sc = tx->tx_softc;
7997 struct ifnet *ifp = &sc->sc_ec.ec_if;
7998
7999 mutex_enter(&tx->tx_lock);
8000 if (pcq_peek(tx->tx_pcq) != NULL) {
8001 mcx_send_common_locked(ifp, tx, true);
8002 }
8003 mutex_exit(&tx->tx_lock);
8004 }
8005
8006
8007 static void
8008 mcx_media_add_types(struct mcx_softc *sc)
8009 {
8010 struct mcx_reg_ptys ptys;
8011 int i;
8012 uint32_t proto_cap;
8013
8014 memset(&ptys, 0, sizeof(ptys));
8015 ptys.rp_local_port = 1;
8016 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8017 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
8018 sizeof(ptys)) != 0) {
8019 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
8020 return;
8021 }
8022
8023 proto_cap = be32toh(ptys.rp_eth_proto_cap);
8024 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
8025 const struct mcx_eth_proto_capability *cap;
8026 if (!ISSET(proto_cap, 1U << i))
8027 continue;
8028
8029 cap = &mcx_eth_cap_map[i];
8030 if (cap->cap_media == 0)
8031 continue;
8032
8033 ifmedia_add(&sc->sc_media, IFM_ETHER | cap->cap_media, 0, NULL);
8034 }
8035 }
8036
8037 static void
8038 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
8039 {
8040 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
8041 struct mcx_reg_ptys ptys;
8042 int i;
8043 uint32_t proto_oper;
8044 uint64_t media_oper;
8045
8046 memset(&ptys, 0, sizeof(ptys));
8047 ptys.rp_local_port = 1;
8048 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8049
8050 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
8051 sizeof(ptys)) != 0) {
8052 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
8053 return;
8054 }
8055
8056 proto_oper = be32toh(ptys.rp_eth_proto_oper);
8057
8058 media_oper = 0;
8059
8060 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
8061 const struct mcx_eth_proto_capability *cap;
8062 if (!ISSET(proto_oper, 1U << i))
8063 continue;
8064
8065 cap = &mcx_eth_cap_map[i];
8066
8067 if (cap->cap_media != 0)
8068 media_oper = cap->cap_media;
8069 }
8070
8071 ifmr->ifm_status = IFM_AVALID;
8072 if (proto_oper != 0) {
8073 ifmr->ifm_status |= IFM_ACTIVE;
8074 ifmr->ifm_active = IFM_ETHER | IFM_FDX | IFM_AUTO | media_oper;
8075 /* txpause, rxpause, duplex? */
8076 }
8077 }
8078
8079 static int
8080 mcx_media_change(struct ifnet *ifp)
8081 {
8082 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
8083 struct mcx_reg_ptys ptys;
8084 struct mcx_reg_paos paos;
8085 uint32_t media;
8086 int i, error;
8087
8088 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
8089 return EINVAL;
8090
8091 error = 0;
8092
8093 if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
8094 /* read ptys to get supported media */
8095 memset(&ptys, 0, sizeof(ptys));
8096 ptys.rp_local_port = 1;
8097 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8098 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
8099 &ptys, sizeof(ptys)) != 0) {
8100 printf("%s: unable to read port type/speed\n",
8101 DEVNAME(sc));
8102 return EIO;
8103 }
8104
8105 media = be32toh(ptys.rp_eth_proto_cap);
8106 } else {
8107 /* map media type */
8108 media = 0;
8109 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
8110 const struct mcx_eth_proto_capability *cap;
8111
8112 cap = &mcx_eth_cap_map[i];
8113 if (cap->cap_media ==
8114 IFM_SUBTYPE(sc->sc_media.ifm_media)) {
8115 media = (1 << i);
8116 break;
8117 }
8118 }
8119 }
8120
8121 /* disable the port */
8122 memset(&paos, 0, sizeof(paos));
8123 paos.rp_local_port = 1;
8124 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
8125 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
8126 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
8127 sizeof(paos)) != 0) {
8128 printf("%s: unable to set port state to down\n", DEVNAME(sc));
8129 return EIO;
8130 }
8131
8132 memset(&ptys, 0, sizeof(ptys));
8133 ptys.rp_local_port = 1;
8134 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8135 ptys.rp_eth_proto_admin = htobe32(media);
8136 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
8137 sizeof(ptys)) != 0) {
8138 printf("%s: unable to set port media type/speed\n",
8139 DEVNAME(sc));
8140 error = EIO;
8141 }
8142
8143 /* re-enable the port to start negotiation */
8144 memset(&paos, 0, sizeof(paos));
8145 paos.rp_local_port = 1;
8146 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
8147 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
8148 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
8149 sizeof(paos)) != 0) {
8150 printf("%s: unable to set port state to up\n", DEVNAME(sc));
8151 error = EIO;
8152 }
8153
8154 return error;
8155 }
8156
8157 static void
8158 mcx_port_change(struct work *wk, void *xsc)
8159 {
8160 struct mcx_softc *sc = xsc;
8161 struct ifnet *ifp = &sc->sc_ec.ec_if;
8162 struct mcx_reg_ptys ptys = {
8163 .rp_local_port = 1,
8164 .rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH,
8165 };
8166 int link_state = LINK_STATE_DOWN;
8167
8168 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
8169 sizeof(ptys)) == 0) {
8170 uint32_t proto_oper = be32toh(ptys.rp_eth_proto_oper);
8171 uint64_t baudrate = 0;
8172 unsigned int i;
8173
8174 if (proto_oper != 0)
8175 link_state = LINK_STATE_UP;
8176
8177 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
8178 const struct mcx_eth_proto_capability *cap;
8179 if (!ISSET(proto_oper, 1U << i))
8180 continue;
8181
8182 cap = &mcx_eth_cap_map[i];
8183 if (cap->cap_baudrate == 0)
8184 continue;
8185
8186 baudrate = cap->cap_baudrate;
8187 break;
8188 }
8189
8190 ifp->if_baudrate = baudrate;
8191 }
8192
8193 if (link_state != ifp->if_link_state) {
8194 if_link_state_change(ifp, link_state);
8195 }
8196 }
8197
8198
8199 static inline uint32_t
8200 mcx_rd(struct mcx_softc *sc, bus_size_t r)
8201 {
8202 uint32_t word;
8203
8204 word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
8205
8206 return (be32toh(word));
8207 }
8208
8209 static inline void
8210 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
8211 {
8212 bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
8213 }
8214
8215 static inline void
8216 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
8217 {
8218 #ifndef __NetBSD__
8219 bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
8220 #endif
8221 }
8222
8223 static uint64_t
8224 mcx_timer(struct mcx_softc *sc)
8225 {
8226 uint32_t hi, lo, ni;
8227
8228 hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8229 for (;;) {
8230 lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
8231 mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
8232 ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8233
8234 if (ni == hi)
8235 break;
8236
8237 hi = ni;
8238 }
8239
8240 return (((uint64_t)hi << 32) | (uint64_t)lo);
8241 }
8242
8243 static int
8244 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
8245 bus_size_t size, u_int align)
8246 {
8247 mxm->mxm_size = size;
8248
8249 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
8250 mxm->mxm_size, 0,
8251 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
8252 &mxm->mxm_map) != 0)
8253 return (1);
8254 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
8255 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
8256 BUS_DMA_WAITOK) != 0)
8257 goto destroy;
8258 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
8259 mxm->mxm_size, &mxm->mxm_kva,
8260 BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
8261 goto free;
8262 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
8263 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
8264 goto unmap;
8265
8266 mcx_dmamem_zero(mxm);
8267
8268 return (0);
8269 unmap:
8270 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8271 free:
8272 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8273 destroy:
8274 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8275 return (1);
8276 }
8277
8278 static void
8279 mcx_dmamem_zero(struct mcx_dmamem *mxm)
8280 {
8281 memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
8282 }
8283
8284 static void
8285 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
8286 {
8287 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
8288 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8289 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8290 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8291 }
8292
8293 static int
8294 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
8295 {
8296 bus_dma_segment_t *segs;
8297 bus_size_t len = pages * MCX_PAGE_SIZE;
8298 size_t seglen;
8299
8300 segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP);
8301 seglen = sizeof(*segs) * pages;
8302
8303 if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
8304 segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
8305 goto free_segs;
8306
8307 if (mhm->mhm_seg_count < pages) {
8308 size_t nseglen;
8309
8310 mhm->mhm_segs = kmem_alloc(
8311 sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP);
8312
8313 nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
8314
8315 memcpy(mhm->mhm_segs, segs, nseglen);
8316
8317 kmem_free(segs, seglen);
8318
8319 segs = mhm->mhm_segs;
8320 seglen = nseglen;
8321 } else
8322 mhm->mhm_segs = segs;
8323
8324 if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
8325 MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
8326 &mhm->mhm_map) != 0)
8327 goto free_dmamem;
8328
8329 if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
8330 mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
8331 goto destroy;
8332
8333 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8334 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
8335
8336 mhm->mhm_npages = pages;
8337
8338 return (0);
8339
8340 destroy:
8341 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8342 free_dmamem:
8343 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8344 free_segs:
8345 kmem_free(segs, seglen);
8346 mhm->mhm_segs = NULL;
8347
8348 return (-1);
8349 }
8350
8351 static void
8352 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
8353 {
8354 if (mhm->mhm_npages == 0)
8355 return;
8356
8357 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8358 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
8359
8360 bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
8361 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8362 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8363 kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
8364
8365 mhm->mhm_npages = 0;
8366 }
8367
8368 #if NKSTAT > 0
8369 struct mcx_ppcnt {
8370 char name[KSTAT_KV_NAMELEN];
8371 enum kstat_kv_unit unit;
8372 };
8373
8374 static const struct mcx_ppcnt mcx_ppcnt_ieee8023_tpl[] = {
8375 { "Good Tx", KSTAT_KV_U_PACKETS, },
8376 { "Good Rx", KSTAT_KV_U_PACKETS, },
8377 { "FCS errs", KSTAT_KV_U_PACKETS, },
8378 { "Alignment Errs", KSTAT_KV_U_PACKETS, },
8379 { "Good Tx", KSTAT_KV_U_BYTES, },
8380 { "Good Rx", KSTAT_KV_U_BYTES, },
8381 { "Multicast Tx", KSTAT_KV_U_PACKETS, },
8382 { "Broadcast Tx", KSTAT_KV_U_PACKETS, },
8383 { "Multicast Rx", KSTAT_KV_U_PACKETS, },
8384 { "Broadcast Rx", KSTAT_KV_U_PACKETS, },
8385 { "In Range Len", KSTAT_KV_U_PACKETS, },
8386 { "Out Of Range Len", KSTAT_KV_U_PACKETS, },
8387 { "Frame Too Long", KSTAT_KV_U_PACKETS, },
8388 { "Symbol Errs", KSTAT_KV_U_PACKETS, },
8389 { "MAC Ctrl Tx", KSTAT_KV_U_PACKETS, },
8390 { "MAC Ctrl Rx", KSTAT_KV_U_PACKETS, },
8391 { "MAC Ctrl Unsup", KSTAT_KV_U_PACKETS, },
8392 { "Pause Rx", KSTAT_KV_U_PACKETS, },
8393 { "Pause Tx", KSTAT_KV_U_PACKETS, },
8394 };
8395 CTASSERT(__arraycount(mcx_ppcnt_ieee8023_tpl) == mcx_ppcnt_ieee8023_count);
8396
8397 static const struct mcx_ppcnt mcx_ppcnt_rfc2863_tpl[] = {
8398 { "Rx Bytes", KSTAT_KV_U_BYTES, },
8399 { "Rx Unicast", KSTAT_KV_U_PACKETS, },
8400 { "Rx Discards", KSTAT_KV_U_PACKETS, },
8401 { "Rx Errors", KSTAT_KV_U_PACKETS, },
8402 { "Rx Unknown Proto", KSTAT_KV_U_PACKETS, },
8403 { "Tx Bytes", KSTAT_KV_U_BYTES, },
8404 { "Tx Unicast", KSTAT_KV_U_PACKETS, },
8405 { "Tx Discards", KSTAT_KV_U_PACKETS, },
8406 { "Tx Errors", KSTAT_KV_U_PACKETS, },
8407 { "Rx Multicast", KSTAT_KV_U_PACKETS, },
8408 { "Rx Broadcast", KSTAT_KV_U_PACKETS, },
8409 { "Tx Multicast", KSTAT_KV_U_PACKETS, },
8410 { "Tx Broadcast", KSTAT_KV_U_PACKETS, },
8411 };
8412 CTASSERT(__arraycount(mcx_ppcnt_rfc2863_tpl) == mcx_ppcnt_rfc2863_count);
8413
8414 static const struct mcx_ppcnt mcx_ppcnt_rfc2819_tpl[] = {
8415 { "Drop Events", KSTAT_KV_U_PACKETS, },
8416 { "Octets", KSTAT_KV_U_BYTES, },
8417 { "Packets", KSTAT_KV_U_PACKETS, },
8418 { "Broadcasts", KSTAT_KV_U_PACKETS, },
8419 { "Multicasts", KSTAT_KV_U_PACKETS, },
8420 { "CRC Align Errs", KSTAT_KV_U_PACKETS, },
8421 { "Undersize", KSTAT_KV_U_PACKETS, },
8422 { "Oversize", KSTAT_KV_U_PACKETS, },
8423 { "Fragments", KSTAT_KV_U_PACKETS, },
8424 { "Jabbers", KSTAT_KV_U_PACKETS, },
8425 { "Collisions", KSTAT_KV_U_NONE, },
8426 { "64B", KSTAT_KV_U_PACKETS, },
8427 { "65-127B", KSTAT_KV_U_PACKETS, },
8428 { "128-255B", KSTAT_KV_U_PACKETS, },
8429 { "256-511B", KSTAT_KV_U_PACKETS, },
8430 { "512-1023B", KSTAT_KV_U_PACKETS, },
8431 { "1024-1518B", KSTAT_KV_U_PACKETS, },
8432 { "1519-2047B", KSTAT_KV_U_PACKETS, },
8433 { "2048-4095B", KSTAT_KV_U_PACKETS, },
8434 { "4096-8191B", KSTAT_KV_U_PACKETS, },
8435 { "8192-10239B", KSTAT_KV_U_PACKETS, },
8436 };
8437 CTASSERT(__arraycount(mcx_ppcnt_rfc2819_tpl) == mcx_ppcnt_rfc2819_count);
8438
8439 static const struct mcx_ppcnt mcx_ppcnt_rfc3635_tpl[] = {
8440 { "Alignment Errs", KSTAT_KV_U_PACKETS, },
8441 { "FCS Errs", KSTAT_KV_U_PACKETS, },
8442 { "Single Colls", KSTAT_KV_U_PACKETS, },
8443 { "Multiple Colls", KSTAT_KV_U_PACKETS, },
8444 { "SQE Test Errs", KSTAT_KV_U_NONE, },
8445 { "Deferred Tx", KSTAT_KV_U_PACKETS, },
8446 { "Late Colls", KSTAT_KV_U_NONE, },
8447 { "Exess Colls", KSTAT_KV_U_NONE, },
8448 { "Int MAC Tx Errs", KSTAT_KV_U_PACKETS, },
8449 { "CSM Sense Errs", KSTAT_KV_U_NONE, },
8450 { "Too Long", KSTAT_KV_U_PACKETS, },
8451 { "Int MAC Rx Errs", KSTAT_KV_U_PACKETS, },
8452 { "Symbol Errs", KSTAT_KV_U_NONE, },
8453 { "Unknown Control", KSTAT_KV_U_PACKETS, },
8454 { "Pause Rx", KSTAT_KV_U_PACKETS, },
8455 { "Pause Tx", KSTAT_KV_U_PACKETS, },
8456 };
8457 CTASSERT(__arraycount(mcx_ppcnt_rfc3635_tpl) == mcx_ppcnt_rfc3635_count);
8458
8459 struct mcx_kstat_ppcnt {
8460 const char *ksp_name;
8461 const struct mcx_ppcnt *ksp_tpl;
8462 unsigned int ksp_n;
8463 uint8_t ksp_grp;
8464 };
8465
8466 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_ieee8023 = {
8467 .ksp_name = "ieee802.3",
8468 .ksp_tpl = mcx_ppcnt_ieee8023_tpl,
8469 .ksp_n = __arraycount(mcx_ppcnt_ieee8023_tpl),
8470 .ksp_grp = MCX_REG_PPCNT_GRP_IEEE8023,
8471 };
8472
8473 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2863 = {
8474 .ksp_name = "rfc2863",
8475 .ksp_tpl = mcx_ppcnt_rfc2863_tpl,
8476 .ksp_n = __arraycount(mcx_ppcnt_rfc2863_tpl),
8477 .ksp_grp = MCX_REG_PPCNT_GRP_RFC2863,
8478 };
8479
8480 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2819 = {
8481 .ksp_name = "rfc2819",
8482 .ksp_tpl = mcx_ppcnt_rfc2819_tpl,
8483 .ksp_n = __arraycount(mcx_ppcnt_rfc2819_tpl),
8484 .ksp_grp = MCX_REG_PPCNT_GRP_RFC2819,
8485 };
8486
8487 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc3635 = {
8488 .ksp_name = "rfc3635",
8489 .ksp_tpl = mcx_ppcnt_rfc3635_tpl,
8490 .ksp_n = __arraycount(mcx_ppcnt_rfc3635_tpl),
8491 .ksp_grp = MCX_REG_PPCNT_GRP_RFC3635,
8492 };
8493
8494 static int mcx_kstat_ppcnt_read(struct kstat *);
8495
8496 static void mcx_kstat_attach_tmps(struct mcx_softc *sc);
8497 static void mcx_kstat_attach_queues(struct mcx_softc *sc);
8498
8499 static struct kstat *
8500 mcx_kstat_attach_ppcnt(struct mcx_softc *sc,
8501 const struct mcx_kstat_ppcnt *ksp)
8502 {
8503 struct kstat *ks;
8504 struct kstat_kv *kvs;
8505 unsigned int i;
8506
8507 ks = kstat_create(DEVNAME(sc), 0, ksp->ksp_name, 0, KSTAT_T_KV, 0);
8508 if (ks == NULL)
8509 return (NULL);
8510
8511 kvs = mallocarray(ksp->ksp_n, sizeof(*kvs),
8512 M_DEVBUF, M_WAITOK);
8513
8514 for (i = 0; i < ksp->ksp_n; i++) {
8515 const struct mcx_ppcnt *tpl = &ksp->ksp_tpl[i];
8516
8517 kstat_kv_unit_init(&kvs[i], tpl->name,
8518 KSTAT_KV_T_COUNTER64, tpl->unit);
8519 }
8520
8521 ks->ks_softc = sc;
8522 ks->ks_ptr = (void *)ksp;
8523 ks->ks_data = kvs;
8524 ks->ks_datalen = ksp->ksp_n * sizeof(*kvs);
8525 ks->ks_read = mcx_kstat_ppcnt_read;
8526
8527 kstat_install(ks);
8528
8529 return (ks);
8530 }
8531
8532 static void
8533 mcx_kstat_attach(struct mcx_softc *sc)
8534 {
8535 sc->sc_kstat_ieee8023 = mcx_kstat_attach_ppcnt(sc,
8536 &mcx_kstat_ppcnt_ieee8023);
8537 sc->sc_kstat_rfc2863 = mcx_kstat_attach_ppcnt(sc,
8538 &mcx_kstat_ppcnt_rfc2863);
8539 sc->sc_kstat_rfc2819 = mcx_kstat_attach_ppcnt(sc,
8540 &mcx_kstat_ppcnt_rfc2819);
8541 sc->sc_kstat_rfc3635 = mcx_kstat_attach_ppcnt(sc,
8542 &mcx_kstat_ppcnt_rfc3635);
8543
8544 mcx_kstat_attach_tmps(sc);
8545 mcx_kstat_attach_queues(sc);
8546 }
8547
8548 static int
8549 mcx_kstat_ppcnt_read(struct kstat *ks)
8550 {
8551 struct mcx_softc *sc = ks->ks_softc;
8552 struct mcx_kstat_ppcnt *ksp = ks->ks_ptr;
8553 struct mcx_reg_ppcnt ppcnt = {
8554 .ppcnt_grp = ksp->ksp_grp,
8555 .ppcnt_local_port = 1,
8556 };
8557 struct kstat_kv *kvs = ks->ks_data;
8558 uint64_t *vs = (uint64_t *)&ppcnt.ppcnt_counter_set;
8559 unsigned int i;
8560 int rv;
8561
8562 KERNEL_LOCK(); /* XXX */
8563 rv = mcx_access_hca_reg(sc, MCX_REG_PPCNT, MCX_REG_OP_READ,
8564 &ppcnt, sizeof(ppcnt));
8565 KERNEL_UNLOCK();
8566 if (rv != 0)
8567 return (EIO);
8568
8569 nanouptime(&ks->ks_updated);
8570
8571 for (i = 0; i < ksp->ksp_n; i++)
8572 kstat_kv_u64(&kvs[i]) = bemtoh64(&vs[i]);
8573
8574 return (0);
8575 }
8576
8577 struct mcx_kstat_mtmp {
8578 struct kstat_kv ktmp_name;
8579 struct kstat_kv ktmp_temperature;
8580 struct kstat_kv ktmp_threshold_lo;
8581 struct kstat_kv ktmp_threshold_hi;
8582 };
8583
8584 static const struct mcx_kstat_mtmp mcx_kstat_mtmp_tpl = {
8585 KSTAT_KV_INITIALIZER("name", KSTAT_KV_T_ISTR),
8586 KSTAT_KV_INITIALIZER("temperature", KSTAT_KV_T_TEMP),
8587 KSTAT_KV_INITIALIZER("lo threshold", KSTAT_KV_T_TEMP),
8588 KSTAT_KV_INITIALIZER("hi threshold", KSTAT_KV_T_TEMP),
8589 };
8590
8591 static const struct timeval mcx_kstat_mtmp_rate = { 1, 0 };
8592
8593 static int mcx_kstat_mtmp_read(struct kstat *);
8594
8595 static void
8596 mcx_kstat_attach_tmps(struct mcx_softc *sc)
8597 {
8598 struct kstat *ks;
8599 struct mcx_reg_mcam mcam;
8600 struct mcx_reg_mtcap mtcap;
8601 struct mcx_kstat_mtmp *ktmp;
8602 uint64_t map;
8603 unsigned int i, n;
8604
8605 memset(&mtcap, 0, sizeof(mtcap));
8606 memset(&mcam, 0, sizeof(mcam));
8607
8608 if (sc->sc_mcam_reg == 0) {
8609 /* no management capabilities */
8610 return;
8611 }
8612
8613 if (mcx_access_hca_reg(sc, MCX_REG_MCAM, MCX_REG_OP_READ,
8614 &mcam, sizeof(mcam)) != 0) {
8615 /* unable to check management capabilities? */
8616 return;
8617 }
8618
8619 if (MCX_BITFIELD_BIT(mcam.mcam_feature_cap_mask,
8620 MCX_MCAM_FEATURE_CAP_SENSOR_MAP) == 0) {
8621 /* no sensor map */
8622 return;
8623 }
8624
8625 if (mcx_access_hca_reg(sc, MCX_REG_MTCAP, MCX_REG_OP_READ,
8626 &mtcap, sizeof(mtcap)) != 0) {
8627 /* unable to find temperature sensors */
8628 return;
8629 }
8630
8631 sc->sc_kstat_mtmp_count = mtcap.mtcap_sensor_count;
8632 sc->sc_kstat_mtmp = mallocarray(sc->sc_kstat_mtmp_count,
8633 sizeof(*sc->sc_kstat_mtmp), M_DEVBUF, M_WAITOK);
8634
8635 n = 0;
8636 map = bemtoh64(&mtcap.mtcap_sensor_map);
8637 for (i = 0; i < sizeof(map) * NBBY; i++) {
8638 if (!ISSET(map, (1ULL << i)))
8639 continue;
8640
8641 ks = kstat_create(DEVNAME(sc), 0, "temperature", i,
8642 KSTAT_T_KV, 0);
8643 if (ks == NULL) {
8644 /* unable to attach temperature sensor %u, i */
8645 continue;
8646 }
8647
8648 ktmp = malloc(sizeof(*ktmp), M_DEVBUF, M_WAITOK|M_ZERO);
8649 *ktmp = mcx_kstat_mtmp_tpl;
8650
8651 ks->ks_data = ktmp;
8652 ks->ks_datalen = sizeof(*ktmp);
8653 TIMEVAL_TO_TIMESPEC(&mcx_kstat_mtmp_rate, &ks->ks_interval);
8654 ks->ks_read = mcx_kstat_mtmp_read;
8655
8656 ks->ks_softc = sc;
8657 kstat_install(ks);
8658
8659 sc->sc_kstat_mtmp[n++] = ks;
8660 if (n >= sc->sc_kstat_mtmp_count)
8661 break;
8662 }
8663 }
8664
8665 static uint64_t
8666 mcx_tmp_to_uK(uint16_t *t)
8667 {
8668 int64_t mt = (int16_t)bemtoh16(t); /* 0.125 C units */
8669 mt *= 1000000 / 8; /* convert to uC */
8670 mt += 273150000; /* convert to uK */
8671
8672 return (mt);
8673 }
8674
8675 static int
8676 mcx_kstat_mtmp_read(struct kstat *ks)
8677 {
8678 struct mcx_softc *sc = ks->ks_softc;
8679 struct mcx_kstat_mtmp *ktmp = ks->ks_data;
8680 struct mcx_reg_mtmp mtmp;
8681 int rv;
8682 struct timeval updated;
8683
8684 TIMESPEC_TO_TIMEVAL(&updated, &ks->ks_updated);
8685
8686 if (!ratecheck(&updated, &mcx_kstat_mtmp_rate))
8687 return (0);
8688
8689 memset(&mtmp, 0, sizeof(mtmp));
8690 htobem16(&mtmp.mtmp_sensor_index, ks->ks_unit);
8691
8692 KERNEL_LOCK(); /* XXX */
8693 rv = mcx_access_hca_reg(sc, MCX_REG_MTMP, MCX_REG_OP_READ,
8694 &mtmp, sizeof(mtmp));
8695 KERNEL_UNLOCK();
8696 if (rv != 0)
8697 return (EIO);
8698
8699 memset(kstat_kv_istr(&ktmp->ktmp_name), 0,
8700 sizeof(kstat_kv_istr(&ktmp->ktmp_name)));
8701 memcpy(kstat_kv_istr(&ktmp->ktmp_name),
8702 mtmp.mtmp_sensor_name, sizeof(mtmp.mtmp_sensor_name));
8703 kstat_kv_temp(&ktmp->ktmp_temperature) =
8704 mcx_tmp_to_uK(&mtmp.mtmp_temperature);
8705 kstat_kv_temp(&ktmp->ktmp_threshold_lo) =
8706 mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_lo);
8707 kstat_kv_temp(&ktmp->ktmp_threshold_hi) =
8708 mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_hi);
8709
8710 TIMEVAL_TO_TIMESPEC(&updated, &ks->ks_updated);
8711
8712 return (0);
8713 }
8714
8715 struct mcx_queuestat {
8716 char name[KSTAT_KV_NAMELEN];
8717 enum kstat_kv_type type;
8718 };
8719
8720 static const struct mcx_queuestat mcx_queue_kstat_tpl[] = {
8721 { "RQ SW prod", KSTAT_KV_T_COUNTER64 },
8722 { "RQ HW prod", KSTAT_KV_T_COUNTER64 },
8723 { "RQ HW cons", KSTAT_KV_T_COUNTER64 },
8724 { "RQ HW state", KSTAT_KV_T_ISTR },
8725
8726 { "SQ SW prod", KSTAT_KV_T_COUNTER64 },
8727 { "SQ SW cons", KSTAT_KV_T_COUNTER64 },
8728 { "SQ HW prod", KSTAT_KV_T_COUNTER64 },
8729 { "SQ HW cons", KSTAT_KV_T_COUNTER64 },
8730 { "SQ HW state", KSTAT_KV_T_ISTR },
8731
8732 { "CQ SW cons", KSTAT_KV_T_COUNTER64 },
8733 { "CQ HW prod", KSTAT_KV_T_COUNTER64 },
8734 { "CQ HW cons", KSTAT_KV_T_COUNTER64 },
8735 { "CQ HW notify", KSTAT_KV_T_COUNTER64 },
8736 { "CQ HW solicit", KSTAT_KV_T_COUNTER64 },
8737 { "CQ HW status", KSTAT_KV_T_ISTR },
8738 { "CQ HW state", KSTAT_KV_T_ISTR },
8739
8740 { "EQ SW cons", KSTAT_KV_T_COUNTER64 },
8741 { "EQ HW prod", KSTAT_KV_T_COUNTER64 },
8742 { "EQ HW cons", KSTAT_KV_T_COUNTER64 },
8743 { "EQ HW status", KSTAT_KV_T_ISTR },
8744 { "EQ HW state", KSTAT_KV_T_ISTR },
8745 };
8746
8747 static int mcx_kstat_queue_read(struct kstat *);
8748
8749 static void
8750 mcx_kstat_attach_queues(struct mcx_softc *sc)
8751 {
8752 struct kstat *ks;
8753 struct kstat_kv *kvs;
8754 int q, i;
8755
8756 for (q = 0; q < sc->sc_nqueues; q++) {
8757 ks = kstat_create(DEVNAME(sc), 0, "mcx-queues", q,
8758 KSTAT_T_KV, 0);
8759 if (ks == NULL) {
8760 /* unable to attach queue stats %u, q */
8761 continue;
8762 }
8763
8764 kvs = mallocarray(nitems(mcx_queue_kstat_tpl),
8765 sizeof(*kvs), M_DEVBUF, M_WAITOK);
8766
8767 for (i = 0; i < nitems(mcx_queue_kstat_tpl); i++) {
8768 const struct mcx_queuestat *tpl =
8769 &mcx_queue_kstat_tpl[i];
8770
8771 kstat_kv_init(&kvs[i], tpl->name, tpl->type);
8772 }
8773
8774 ks->ks_softc = &sc->sc_queues[q];
8775 ks->ks_data = kvs;
8776 ks->ks_datalen = nitems(mcx_queue_kstat_tpl) * sizeof(*kvs);
8777 ks->ks_read = mcx_kstat_queue_read;
8778
8779 sc->sc_queues[q].q_kstat = ks;
8780 kstat_install(ks);
8781 }
8782 }
8783
8784 static int
8785 mcx_kstat_queue_read(struct kstat *ks)
8786 {
8787 struct mcx_queues *q = ks->ks_softc;
8788 struct mcx_softc *sc = q->q_sc;
8789 struct kstat_kv *kvs = ks->ks_data;
8790 union {
8791 struct mcx_rq_ctx rq;
8792 struct mcx_sq_ctx sq;
8793 struct mcx_cq_ctx cq;
8794 struct mcx_eq_ctx eq;
8795 } u;
8796 const char *text;
8797 int error = 0;
8798
8799 KERNEL_LOCK();
8800
8801 if (mcx_query_rq(sc, &q->q_rx, &u.rq) != 0) {
8802 error = EIO;
8803 goto out;
8804 }
8805
8806 kstat_kv_u64(kvs++) = q->q_rx.rx_prod;
8807 kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_sw_counter);
8808 kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_hw_counter);
8809 switch ((bemtoh32(&u.rq.rq_flags) & MCX_RQ_CTX_STATE_MASK) >>
8810 MCX_RQ_CTX_STATE_SHIFT) {
8811 case MCX_RQ_CTX_STATE_RST:
8812 text = "RST";
8813 break;
8814 case MCX_RQ_CTX_STATE_RDY:
8815 text = "RDY";
8816 break;
8817 case MCX_RQ_CTX_STATE_ERR:
8818 text = "ERR";
8819 break;
8820 default:
8821 text = "unknown";
8822 break;
8823 }
8824 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8825 kvs++;
8826
8827 if (mcx_query_sq(sc, &q->q_tx, &u.sq) != 0) {
8828 error = EIO;
8829 goto out;
8830 }
8831
8832 kstat_kv_u64(kvs++) = q->q_tx.tx_prod;
8833 kstat_kv_u64(kvs++) = q->q_tx.tx_cons;
8834 kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_sw_counter);
8835 kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_hw_counter);
8836 switch ((bemtoh32(&u.sq.sq_flags) & MCX_SQ_CTX_STATE_MASK) >>
8837 MCX_SQ_CTX_STATE_SHIFT) {
8838 case MCX_SQ_CTX_STATE_RST:
8839 text = "RST";
8840 break;
8841 case MCX_SQ_CTX_STATE_RDY:
8842 text = "RDY";
8843 break;
8844 case MCX_SQ_CTX_STATE_ERR:
8845 text = "ERR";
8846 break;
8847 default:
8848 text = "unknown";
8849 break;
8850 }
8851 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8852 kvs++;
8853
8854 if (mcx_query_cq(sc, &q->q_cq, &u.cq) != 0) {
8855 error = EIO;
8856 goto out;
8857 }
8858
8859 kstat_kv_u64(kvs++) = q->q_cq.cq_cons;
8860 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_producer_counter);
8861 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_consumer_counter);
8862 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_notified);
8863 kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_solicit);
8864
8865 switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATUS_MASK) >>
8866 MCX_CQ_CTX_STATUS_SHIFT) {
8867 case MCX_CQ_CTX_STATUS_OK:
8868 text = "OK";
8869 break;
8870 case MCX_CQ_CTX_STATUS_OVERFLOW:
8871 text = "overflow";
8872 break;
8873 case MCX_CQ_CTX_STATUS_WRITE_FAIL:
8874 text = "write fail";
8875 break;
8876 default:
8877 text = "unknown";
8878 break;
8879 }
8880 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8881 kvs++;
8882
8883 switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATE_MASK) >>
8884 MCX_CQ_CTX_STATE_SHIFT) {
8885 case MCX_CQ_CTX_STATE_SOLICITED:
8886 text = "solicited";
8887 break;
8888 case MCX_CQ_CTX_STATE_ARMED:
8889 text = "armed";
8890 break;
8891 case MCX_CQ_CTX_STATE_FIRED:
8892 text = "fired";
8893 break;
8894 default:
8895 text = "unknown";
8896 break;
8897 }
8898 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8899 kvs++;
8900
8901 if (mcx_query_eq(sc, &q->q_eq, &u.eq) != 0) {
8902 error = EIO;
8903 goto out;
8904 }
8905
8906 kstat_kv_u64(kvs++) = q->q_eq.eq_cons;
8907 kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_producer_counter);
8908 kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_consumer_counter);
8909
8910 switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATUS_MASK) >>
8911 MCX_EQ_CTX_STATUS_SHIFT) {
8912 case MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE:
8913 text = "write fail";
8914 break;
8915 case MCX_EQ_CTX_STATUS_OK:
8916 text = "OK";
8917 break;
8918 default:
8919 text = "unknown";
8920 break;
8921 }
8922 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8923 kvs++;
8924
8925 switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATE_MASK) >>
8926 MCX_EQ_CTX_STATE_SHIFT) {
8927 case MCX_EQ_CTX_STATE_ARMED:
8928 text = "armed";
8929 break;
8930 case MCX_EQ_CTX_STATE_FIRED:
8931 text = "fired";
8932 break;
8933 default:
8934 text = "unknown";
8935 break;
8936 }
8937 strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8938 kvs++;
8939
8940 nanouptime(&ks->ks_updated);
8941 out:
8942 KERNEL_UNLOCK();
8943 return (error);
8944 }
8945
8946 #endif /* NKSTAT > 0 */
8947
8948 static unsigned int
8949 mcx_timecounter_read(struct timecounter *tc)
8950 {
8951 struct mcx_softc *sc = tc->tc_priv;
8952
8953 return (mcx_rd(sc, MCX_INTERNAL_TIMER_L));
8954 }
8955
8956 static void
8957 mcx_timecounter_attach(struct mcx_softc *sc)
8958 {
8959 struct timecounter *tc = &sc->sc_timecounter;
8960
8961 tc->tc_get_timecount = mcx_timecounter_read;
8962 tc->tc_counter_mask = ~0U;
8963 tc->tc_frequency = sc->sc_khz * 1000;
8964 tc->tc_name = device_xname(sc->sc_dev);
8965 tc->tc_quality = -100;
8966 tc->tc_priv = sc;
8967
8968 tc_init(tc);
8969 }
8970