if_mcx.c revision 1.13 1 /* $NetBSD: if_mcx.c,v 1.13 2020/04/24 12:58:42 jmcneill Exp $ */
2 /* $OpenBSD: if_mcx.c,v 1.44 2020/04/24 07:28:37 mestre Exp $ */
3
4 /*
5 * Copyright (c) 2017 David Gwynne <dlg (at) openbsd.org>
6 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #ifdef _KERNEL_OPT
22 #include "opt_net_mpsafe.h"
23 #endif
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/device.h>
32 #include <sys/pool.h>
33 #include <sys/queue.h>
34 #include <sys/callout.h>
35 #include <sys/workqueue.h>
36 #include <sys/atomic.h>
37 #include <sys/kmem.h>
38 #include <sys/bus.h>
39
40 #include <machine/intr.h>
41
42 #include <net/if.h>
43 #include <net/if_dl.h>
44 #include <net/if_ether.h>
45 #include <net/if_media.h>
46
47 #include <net/bpf.h>
48
49 #include <netinet/in.h>
50
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pcidevs.h>
54
55 /* XXX This driver is not yet MP-safe; don't claim to be! */
56 /* #ifdef NET_MPSAFE */
57 /* #define MCX_MPSAFE 1 */
58 /* #define CALLOUT_FLAGS CALLOUT_MPSAFE */
59 /* #else */
60 #define CALLOUT_FLAGS 0
61 /* #endif */
62
63 #define MCX_MAX_NINTR 1
64
65 #define BUS_DMASYNC_PRERW (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
66 #define BUS_DMASYNC_POSTRW (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
67
68 #define MCX_HCA_BAR PCI_MAPREG_START /* BAR 0 */
69
70 #define MCX_FW_VER 0x0000
71 #define MCX_FW_VER_MAJOR(_v) ((_v) & 0xffff)
72 #define MCX_FW_VER_MINOR(_v) ((_v) >> 16)
73 #define MCX_CMDIF_FW_SUBVER 0x0004
74 #define MCX_FW_VER_SUBMINOR(_v) ((_v) & 0xffff)
75 #define MCX_CMDIF(_v) ((_v) >> 16)
76
77 #define MCX_ISSI 1 /* as per the PRM */
78 #define MCX_CMD_IF_SUPPORTED 5
79
80 #define MCX_HARDMTU 9500
81
82 #define MCX_MAX_CQS 2 /* rq, sq */
83
84 /* queue sizes */
85 #define MCX_LOG_EQ_SIZE 6 /* one page */
86 #define MCX_LOG_CQ_SIZE 12
87 #define MCX_LOG_RQ_SIZE 10
88 #define MCX_LOG_SQ_SIZE 11
89
90 /* completion event moderation - about 10khz, or 90% of the cq */
91 #define MCX_CQ_MOD_PERIOD 50
92 #define MCX_CQ_MOD_COUNTER (((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
93
94 #define MCX_LOG_SQ_ENTRY_SIZE 6
95 #define MCX_SQ_ENTRY_MAX_SLOTS 4
96 #define MCX_SQ_SEGS_PER_SLOT \
97 (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
98 #define MCX_SQ_MAX_SEGMENTS \
99 1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
100
101 #define MCX_LOG_FLOW_TABLE_SIZE 5
102 #define MCX_NUM_STATIC_FLOWS 4 /* promisc, allmulti, ucast, bcast */
103 #define MCX_NUM_MCAST_FLOWS \
104 ((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
105
106 #define MCX_SQ_INLINE_SIZE 18
107
108 /* doorbell offsets */
109 #define MCX_CQ_DOORBELL_OFFSET 0
110 #define MCX_CQ_DOORBELL_SIZE 16
111 #define MCX_RQ_DOORBELL_OFFSET 64
112 #define MCX_SQ_DOORBELL_OFFSET 64
113
114 #define MCX_WQ_DOORBELL_MASK 0xffff
115
116 /* uar registers */
117 #define MCX_UAR_CQ_DOORBELL 0x20
118 #define MCX_UAR_EQ_DOORBELL_ARM 0x40
119 #define MCX_UAR_EQ_DOORBELL 0x48
120 #define MCX_UAR_BF 0x800
121
122 #define MCX_CMDQ_ADDR_HI 0x0010
123 #define MCX_CMDQ_ADDR_LO 0x0014
124 #define MCX_CMDQ_ADDR_NMASK 0xfff
125 #define MCX_CMDQ_LOG_SIZE(_v) ((_v) >> 4 & 0xf)
126 #define MCX_CMDQ_LOG_STRIDE(_v) ((_v) >> 0 & 0xf)
127 #define MCX_CMDQ_INTERFACE_MASK (0x3 << 8)
128 #define MCX_CMDQ_INTERFACE_FULL_DRIVER (0x0 << 8)
129 #define MCX_CMDQ_INTERFACE_DISABLED (0x1 << 8)
130
131 #define MCX_CMDQ_DOORBELL 0x0018
132
133 #define MCX_STATE 0x01fc
134 #define MCX_STATE_MASK (1U << 31)
135 #define MCX_STATE_INITIALIZING (1 << 31)
136 #define MCX_STATE_READY (0 << 31)
137 #define MCX_STATE_INTERFACE_MASK (0x3 << 24)
138 #define MCX_STATE_INTERFACE_FULL_DRIVER (0x0 << 24)
139 #define MCX_STATE_INTERFACE_DISABLED (0x1 << 24)
140
141 #define MCX_INTERNAL_TIMER 0x1000
142 #define MCX_INTERNAL_TIMER_H 0x1000
143 #define MCX_INTERNAL_TIMER_L 0x1004
144
145 #define MCX_CLEAR_INT 0x100c
146
147 #define MCX_REG_OP_WRITE 0
148 #define MCX_REG_OP_READ 1
149
150 #define MCX_REG_PMLP 0x5002
151 #define MCX_REG_PMTU 0x5003
152 #define MCX_REG_PTYS 0x5004
153 #define MCX_REG_PAOS 0x5006
154 #define MCX_REG_PFCC 0x5007
155 #define MCX_REG_PPCNT 0x5008
156 #define MCX_REG_MCIA 0x9014
157
158 #define MCX_ETHER_CAP_SGMII 0
159 #define MCX_ETHER_CAP_1000_KX 1
160 #define MCX_ETHER_CAP_10G_CX4 2
161 #define MCX_ETHER_CAP_10G_KX4 3
162 #define MCX_ETHER_CAP_10G_KR 4
163 #define MCX_ETHER_CAP_20G_KR2 5
164 #define MCX_ETHER_CAP_40G_CR4 6
165 #define MCX_ETHER_CAP_40G_KR4 7
166 #define MCX_ETHER_CAP_56G_R4 8
167 #define MCX_ETHER_CAP_10G_CR 12
168 #define MCX_ETHER_CAP_10G_SR 13
169 #define MCX_ETHER_CAP_10G_LR 14
170 #define MCX_ETHER_CAP_40G_SR4 15
171 #define MCX_ETHER_CAP_40G_LR4 16
172 #define MCX_ETHER_CAP_50G_SR2 18
173 #define MCX_ETHER_CAP_100G_CR4 20
174 #define MCX_ETHER_CAP_100G_SR4 21
175 #define MCX_ETHER_CAP_100G_KR4 22
176 #define MCX_ETHER_CAP_100G_LR4 23
177 #define MCX_ETHER_CAP_100_TX 24
178 #define MCX_ETHER_CAP_1000_T 25
179 #define MCX_ETHER_CAP_10G_T 26
180 #define MCX_ETHER_CAP_25G_CR 27
181 #define MCX_ETHER_CAP_25G_KR 28
182 #define MCX_ETHER_CAP_25G_SR 29
183 #define MCX_ETHER_CAP_50G_CR2 30
184 #define MCX_ETHER_CAP_50G_KR2 31
185
186 #define MCX_PAGE_SHIFT 12
187 #define MCX_PAGE_SIZE (1 << MCX_PAGE_SHIFT)
188 #define MCX_MAX_CQE 32
189
190 #define MCX_CMD_QUERY_HCA_CAP 0x100
191 #define MCX_CMD_QUERY_ADAPTER 0x101
192 #define MCX_CMD_INIT_HCA 0x102
193 #define MCX_CMD_TEARDOWN_HCA 0x103
194 #define MCX_CMD_ENABLE_HCA 0x104
195 #define MCX_CMD_DISABLE_HCA 0x105
196 #define MCX_CMD_QUERY_PAGES 0x107
197 #define MCX_CMD_MANAGE_PAGES 0x108
198 #define MCX_CMD_SET_HCA_CAP 0x109
199 #define MCX_CMD_QUERY_ISSI 0x10a
200 #define MCX_CMD_SET_ISSI 0x10b
201 #define MCX_CMD_SET_DRIVER_VERSION \
202 0x10d
203 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS \
204 0x203
205 #define MCX_CMD_CREATE_EQ 0x301
206 #define MCX_CMD_DESTROY_EQ 0x302
207 #define MCX_CMD_CREATE_CQ 0x400
208 #define MCX_CMD_DESTROY_CQ 0x401
209 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT \
210 0x754
211 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
212 0x755
213 #define MCX_CMD_QUERY_VPORT_COUNTERS \
214 0x770
215 #define MCX_CMD_ALLOC_PD 0x800
216 #define MCX_CMD_ALLOC_UAR 0x802
217 #define MCX_CMD_ACCESS_REG 0x805
218 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN \
219 0x816
220 #define MCX_CMD_CREATE_TIR 0x900
221 #define MCX_CMD_DESTROY_TIR 0x902
222 #define MCX_CMD_CREATE_SQ 0x904
223 #define MCX_CMD_MODIFY_SQ 0x905
224 #define MCX_CMD_DESTROY_SQ 0x906
225 #define MCX_CMD_QUERY_SQ 0x907
226 #define MCX_CMD_CREATE_RQ 0x908
227 #define MCX_CMD_MODIFY_RQ 0x909
228 #define MCX_CMD_DESTROY_RQ 0x90a
229 #define MCX_CMD_QUERY_RQ 0x90b
230 #define MCX_CMD_CREATE_TIS 0x912
231 #define MCX_CMD_DESTROY_TIS 0x914
232 #define MCX_CMD_SET_FLOW_TABLE_ROOT \
233 0x92f
234 #define MCX_CMD_CREATE_FLOW_TABLE \
235 0x930
236 #define MCX_CMD_DESTROY_FLOW_TABLE \
237 0x931
238 #define MCX_CMD_QUERY_FLOW_TABLE \
239 0x932
240 #define MCX_CMD_CREATE_FLOW_GROUP \
241 0x933
242 #define MCX_CMD_DESTROY_FLOW_GROUP \
243 0x934
244 #define MCX_CMD_QUERY_FLOW_GROUP \
245 0x935
246 #define MCX_CMD_SET_FLOW_TABLE_ENTRY \
247 0x936
248 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY \
249 0x937
250 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY \
251 0x938
252 #define MCX_CMD_ALLOC_FLOW_COUNTER \
253 0x939
254 #define MCX_CMD_QUERY_FLOW_COUNTER \
255 0x93b
256
257 #define MCX_QUEUE_STATE_RST 0
258 #define MCX_QUEUE_STATE_RDY 1
259 #define MCX_QUEUE_STATE_ERR 3
260
261 #define MCX_FLOW_TABLE_TYPE_RX 0
262 #define MCX_FLOW_TABLE_TYPE_TX 1
263
264 #define MCX_CMDQ_INLINE_DATASIZE 16
265
266 struct mcx_cmdq_entry {
267 uint8_t cq_type;
268 #define MCX_CMDQ_TYPE_PCIE 0x7
269 uint8_t cq_reserved0[3];
270
271 uint32_t cq_input_length;
272 uint64_t cq_input_ptr;
273 uint8_t cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
274
275 uint8_t cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
276 uint64_t cq_output_ptr;
277 uint32_t cq_output_length;
278
279 uint8_t cq_token;
280 uint8_t cq_signature;
281 uint8_t cq_reserved1[1];
282 uint8_t cq_status;
283 #define MCX_CQ_STATUS_SHIFT 1
284 #define MCX_CQ_STATUS_MASK (0x7f << MCX_CQ_STATUS_SHIFT)
285 #define MCX_CQ_STATUS_OK (0x00 << MCX_CQ_STATUS_SHIFT)
286 #define MCX_CQ_STATUS_INT_ERR (0x01 << MCX_CQ_STATUS_SHIFT)
287 #define MCX_CQ_STATUS_BAD_OPCODE (0x02 << MCX_CQ_STATUS_SHIFT)
288 #define MCX_CQ_STATUS_BAD_PARAM (0x03 << MCX_CQ_STATUS_SHIFT)
289 #define MCX_CQ_STATUS_BAD_SYS_STATE (0x04 << MCX_CQ_STATUS_SHIFT)
290 #define MCX_CQ_STATUS_BAD_RESOURCE (0x05 << MCX_CQ_STATUS_SHIFT)
291 #define MCX_CQ_STATUS_RESOURCE_BUSY (0x06 << MCX_CQ_STATUS_SHIFT)
292 #define MCX_CQ_STATUS_EXCEED_LIM (0x08 << MCX_CQ_STATUS_SHIFT)
293 #define MCX_CQ_STATUS_BAD_RES_STATE (0x09 << MCX_CQ_STATUS_SHIFT)
294 #define MCX_CQ_STATUS_BAD_INDEX (0x0a << MCX_CQ_STATUS_SHIFT)
295 #define MCX_CQ_STATUS_NO_RESOURCES (0x0f << MCX_CQ_STATUS_SHIFT)
296 #define MCX_CQ_STATUS_BAD_INPUT_LEN (0x50 << MCX_CQ_STATUS_SHIFT)
297 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN (0x51 << MCX_CQ_STATUS_SHIFT)
298 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
299 (0x10 << MCX_CQ_STATUS_SHIFT)
300 #define MCX_CQ_STATUS_BAD_SIZE (0x40 << MCX_CQ_STATUS_SHIFT)
301 #define MCX_CQ_STATUS_OWN_MASK 0x1
302 #define MCX_CQ_STATUS_OWN_SW 0x0
303 #define MCX_CQ_STATUS_OWN_HW 0x1
304 } __packed __aligned(8);
305
306 #define MCX_CMDQ_MAILBOX_DATASIZE 512
307
308 struct mcx_cmdq_mailbox {
309 uint8_t mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
310 uint8_t mb_reserved0[48];
311 uint64_t mb_next_ptr;
312 uint32_t mb_block_number;
313 uint8_t mb_reserved1[1];
314 uint8_t mb_token;
315 uint8_t mb_ctrl_signature;
316 uint8_t mb_signature;
317 } __packed __aligned(8);
318
319 #define MCX_CMDQ_MAILBOX_ALIGN (1 << 10)
320 #define MCX_CMDQ_MAILBOX_SIZE roundup(sizeof(struct mcx_cmdq_mailbox), \
321 MCX_CMDQ_MAILBOX_ALIGN)
322 /*
323 * command mailbox structres
324 */
325
326 struct mcx_cmd_enable_hca_in {
327 uint16_t cmd_opcode;
328 uint8_t cmd_reserved0[4];
329 uint16_t cmd_op_mod;
330 uint8_t cmd_reserved1[2];
331 uint16_t cmd_function_id;
332 uint8_t cmd_reserved2[4];
333 } __packed __aligned(4);
334
335 struct mcx_cmd_enable_hca_out {
336 uint8_t cmd_status;
337 uint8_t cmd_reserved0[3];
338 uint32_t cmd_syndrome;
339 uint8_t cmd_reserved1[4];
340 } __packed __aligned(4);
341
342 struct mcx_cmd_init_hca_in {
343 uint16_t cmd_opcode;
344 uint8_t cmd_reserved0[4];
345 uint16_t cmd_op_mod;
346 uint8_t cmd_reserved1[8];
347 } __packed __aligned(4);
348
349 struct mcx_cmd_init_hca_out {
350 uint8_t cmd_status;
351 uint8_t cmd_reserved0[3];
352 uint32_t cmd_syndrome;
353 uint8_t cmd_reserved1[8];
354 } __packed __aligned(4);
355
356 struct mcx_cmd_teardown_hca_in {
357 uint16_t cmd_opcode;
358 uint8_t cmd_reserved0[4];
359 uint16_t cmd_op_mod;
360 uint8_t cmd_reserved1[2];
361 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL 0x0
362 #define MCX_CMD_TEARDOWN_HCA_PANIC 0x1
363 uint16_t cmd_profile;
364 uint8_t cmd_reserved2[4];
365 } __packed __aligned(4);
366
367 struct mcx_cmd_teardown_hca_out {
368 uint8_t cmd_status;
369 uint8_t cmd_reserved0[3];
370 uint32_t cmd_syndrome;
371 uint8_t cmd_reserved1[8];
372 } __packed __aligned(4);
373
374 struct mcx_cmd_access_reg_in {
375 uint16_t cmd_opcode;
376 uint8_t cmd_reserved0[4];
377 uint16_t cmd_op_mod;
378 uint8_t cmd_reserved1[2];
379 uint16_t cmd_register_id;
380 uint32_t cmd_argument;
381 } __packed __aligned(4);
382
383 struct mcx_cmd_access_reg_out {
384 uint8_t cmd_status;
385 uint8_t cmd_reserved0[3];
386 uint32_t cmd_syndrome;
387 uint8_t cmd_reserved1[8];
388 } __packed __aligned(4);
389
390 struct mcx_reg_pmtu {
391 uint8_t rp_reserved1;
392 uint8_t rp_local_port;
393 uint8_t rp_reserved2[2];
394 uint16_t rp_max_mtu;
395 uint8_t rp_reserved3[2];
396 uint16_t rp_admin_mtu;
397 uint8_t rp_reserved4[2];
398 uint16_t rp_oper_mtu;
399 uint8_t rp_reserved5[2];
400 } __packed __aligned(4);
401
402 struct mcx_reg_ptys {
403 uint8_t rp_reserved1;
404 uint8_t rp_local_port;
405 uint8_t rp_reserved2;
406 uint8_t rp_proto_mask;
407 #define MCX_REG_PTYS_PROTO_MASK_ETH (1 << 2)
408 uint8_t rp_reserved3[8];
409 uint32_t rp_eth_proto_cap;
410 uint8_t rp_reserved4[8];
411 uint32_t rp_eth_proto_admin;
412 uint8_t rp_reserved5[8];
413 uint32_t rp_eth_proto_oper;
414 uint8_t rp_reserved6[24];
415 } __packed __aligned(4);
416
417 struct mcx_reg_paos {
418 uint8_t rp_reserved1;
419 uint8_t rp_local_port;
420 uint8_t rp_admin_status;
421 #define MCX_REG_PAOS_ADMIN_STATUS_UP 1
422 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN 2
423 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE 3
424 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED 4
425 uint8_t rp_oper_status;
426 #define MCX_REG_PAOS_OPER_STATUS_UP 1
427 #define MCX_REG_PAOS_OPER_STATUS_DOWN 2
428 #define MCX_REG_PAOS_OPER_STATUS_FAILED 4
429 uint8_t rp_admin_state_update;
430 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN (1 << 7)
431 uint8_t rp_reserved2[11];
432 } __packed __aligned(4);
433
434 struct mcx_reg_pfcc {
435 uint8_t rp_reserved1;
436 uint8_t rp_local_port;
437 uint8_t rp_reserved2[3];
438 uint8_t rp_prio_mask_tx;
439 uint8_t rp_reserved3;
440 uint8_t rp_prio_mask_rx;
441 uint8_t rp_pptx_aptx;
442 uint8_t rp_pfctx;
443 uint8_t rp_fctx_dis;
444 uint8_t rp_reserved4;
445 uint8_t rp_pprx_aprx;
446 uint8_t rp_pfcrx;
447 uint8_t rp_reserved5[2];
448 uint16_t rp_dev_stall_min;
449 uint16_t rp_dev_stall_crit;
450 uint8_t rp_reserved6[12];
451 } __packed __aligned(4);
452
453 #define MCX_PMLP_MODULE_NUM_MASK 0xff
454 struct mcx_reg_pmlp {
455 uint8_t rp_rxtx;
456 uint8_t rp_local_port;
457 uint8_t rp_reserved0;
458 uint8_t rp_width;
459 uint32_t rp_lane0_mapping;
460 uint32_t rp_lane1_mapping;
461 uint32_t rp_lane2_mapping;
462 uint32_t rp_lane3_mapping;
463 uint8_t rp_reserved1[44];
464 } __packed __aligned(4);
465
466 #define MCX_MCIA_EEPROM_BYTES 32
467 struct mcx_reg_mcia {
468 uint8_t rm_l;
469 uint8_t rm_module;
470 uint8_t rm_reserved0;
471 uint8_t rm_status;
472 uint8_t rm_i2c_addr;
473 uint8_t rm_page_num;
474 uint16_t rm_dev_addr;
475 uint16_t rm_reserved1;
476 uint16_t rm_size;
477 uint32_t rm_reserved2;
478 uint8_t rm_data[48];
479 } __packed __aligned(4);
480
481 struct mcx_cmd_query_issi_in {
482 uint16_t cmd_opcode;
483 uint8_t cmd_reserved0[4];
484 uint16_t cmd_op_mod;
485 uint8_t cmd_reserved1[8];
486 } __packed __aligned(4);
487
488 struct mcx_cmd_query_issi_il_out {
489 uint8_t cmd_status;
490 uint8_t cmd_reserved0[3];
491 uint32_t cmd_syndrome;
492 uint8_t cmd_reserved1[2];
493 uint16_t cmd_current_issi;
494 uint8_t cmd_reserved2[4];
495 } __packed __aligned(4);
496
497 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
498
499 struct mcx_cmd_query_issi_mb_out {
500 uint8_t cmd_reserved2[16];
501 uint8_t cmd_supported_issi[80]; /* very big endian */
502 } __packed __aligned(4);
503
504 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
505
506 struct mcx_cmd_set_issi_in {
507 uint16_t cmd_opcode;
508 uint8_t cmd_reserved0[4];
509 uint16_t cmd_op_mod;
510 uint8_t cmd_reserved1[2];
511 uint16_t cmd_current_issi;
512 uint8_t cmd_reserved2[4];
513 } __packed __aligned(4);
514
515 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
516
517 struct mcx_cmd_set_issi_out {
518 uint8_t cmd_status;
519 uint8_t cmd_reserved0[3];
520 uint32_t cmd_syndrome;
521 uint8_t cmd_reserved1[8];
522 } __packed __aligned(4);
523
524 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
525
526 struct mcx_cmd_query_pages_in {
527 uint16_t cmd_opcode;
528 uint8_t cmd_reserved0[4];
529 uint16_t cmd_op_mod;
530 #define MCX_CMD_QUERY_PAGES_BOOT 0x01
531 #define MCX_CMD_QUERY_PAGES_INIT 0x02
532 #define MCX_CMD_QUERY_PAGES_REGULAR 0x03
533 uint8_t cmd_reserved1[8];
534 } __packed __aligned(4);
535
536 struct mcx_cmd_query_pages_out {
537 uint8_t cmd_status;
538 uint8_t cmd_reserved0[3];
539 uint32_t cmd_syndrome;
540 uint8_t cmd_reserved1[2];
541 uint16_t cmd_func_id;
542 uint32_t cmd_num_pages;
543 } __packed __aligned(4);
544
545 struct mcx_cmd_manage_pages_in {
546 uint16_t cmd_opcode;
547 uint8_t cmd_reserved0[4];
548 uint16_t cmd_op_mod;
549 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
550 0x00
551 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
552 0x01
553 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
554 0x02
555 uint8_t cmd_reserved1[2];
556 uint16_t cmd_func_id;
557 uint32_t cmd_input_num_entries;
558 } __packed __aligned(4);
559
560 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
561
562 struct mcx_cmd_manage_pages_out {
563 uint8_t cmd_status;
564 uint8_t cmd_reserved0[3];
565 uint32_t cmd_syndrome;
566 uint32_t cmd_output_num_entries;
567 uint8_t cmd_reserved1[4];
568 } __packed __aligned(4);
569
570 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
571
572 struct mcx_cmd_query_hca_cap_in {
573 uint16_t cmd_opcode;
574 uint8_t cmd_reserved0[4];
575 uint16_t cmd_op_mod;
576 #define MCX_CMD_QUERY_HCA_CAP_MAX (0x0 << 0)
577 #define MCX_CMD_QUERY_HCA_CAP_CURRENT (0x1 << 0)
578 #define MCX_CMD_QUERY_HCA_CAP_DEVICE (0x0 << 1)
579 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD (0x1 << 1)
580 #define MCX_CMD_QUERY_HCA_CAP_FLOW (0x7 << 1)
581 uint8_t cmd_reserved1[8];
582 } __packed __aligned(4);
583
584 struct mcx_cmd_query_hca_cap_out {
585 uint8_t cmd_status;
586 uint8_t cmd_reserved0[3];
587 uint32_t cmd_syndrome;
588 uint8_t cmd_reserved1[8];
589 } __packed __aligned(4);
590
591 #define MCX_HCA_CAP_LEN 0x1000
592 #define MCX_HCA_CAP_NMAILBOXES \
593 (MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
594
595 #if __GNUC_PREREQ__(4, 3)
596 #define __counter__ __COUNTER__
597 #else
598 #define __counter__ __LINE__
599 #endif
600
601 #define __token(_tok, _num) _tok##_num
602 #define _token(_tok, _num) __token(_tok, _num)
603 #define __reserved__ _token(__reserved, __counter__)
604
605 struct mcx_cap_device {
606 uint8_t reserved0[16];
607
608 uint8_t log_max_srq_sz;
609 uint8_t log_max_qp_sz;
610 uint8_t __reserved__[1];
611 uint8_t log_max_qp; /* 5 bits */
612 #define MCX_CAP_DEVICE_LOG_MAX_QP 0x1f
613
614 uint8_t __reserved__[1];
615 uint8_t log_max_srq; /* 5 bits */
616 #define MCX_CAP_DEVICE_LOG_MAX_SRQ 0x1f
617 uint8_t __reserved__[2];
618
619 uint8_t __reserved__[1];
620 uint8_t log_max_cq_sz;
621 uint8_t __reserved__[1];
622 uint8_t log_max_cq; /* 5 bits */
623 #define MCX_CAP_DEVICE_LOG_MAX_CQ 0x1f
624
625 uint8_t log_max_eq_sz;
626 uint8_t log_max_mkey; /* 6 bits */
627 #define MCX_CAP_DEVICE_LOG_MAX_MKEY 0x3f
628 uint8_t __reserved__[1];
629 uint8_t log_max_eq; /* 4 bits */
630 #define MCX_CAP_DEVICE_LOG_MAX_EQ 0x0f
631
632 uint8_t max_indirection;
633 uint8_t log_max_mrw_sz; /* 7 bits */
634 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ 0x7f
635 uint8_t teardown_log_max_msf_list_size;
636 #define MCX_CAP_DEVICE_FORCE_TEARDOWN 0x80
637 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
638 0x3f
639 uint8_t log_max_klm_list_size; /* 6 bits */
640 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
641 0x3f
642
643 uint8_t __reserved__[1];
644 uint8_t log_max_ra_req_dc; /* 6 bits */
645 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC 0x3f
646 uint8_t __reserved__[1];
647 uint8_t log_max_ra_res_dc; /* 6 bits */
648 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
649 0x3f
650
651 uint8_t __reserved__[1];
652 uint8_t log_max_ra_req_qp; /* 6 bits */
653 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
654 0x3f
655 uint8_t __reserved__[1];
656 uint8_t log_max_ra_res_qp; /* 6 bits */
657 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
658 0x3f
659
660 uint8_t flags1;
661 #define MCX_CAP_DEVICE_END_PAD 0x80
662 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED 0x40
663 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
664 0x20
665 #define MCX_CAP_DEVICE_START_PAD 0x10
666 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
667 0x08
668 uint8_t __reserved__[1];
669 uint16_t gid_table_size;
670
671 uint16_t flags2;
672 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT 0x8000
673 #define MCX_CAP_DEVICE_VPORT_COUNTERS 0x4000
674 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
675 0x2000
676 #define MCX_CAP_DEVICE_DEBUG 0x1000
677 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
678 0x8000
679 #define MCX_CAP_DEVICE_RQ_DELAY_DROP 0x4000
680 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK 0x03ff
681 uint16_t pkey_table_size;
682
683 uint8_t flags3;
684 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
685 0x80
686 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
687 0x40
688 #define MCX_CAP_DEVICE_IB_VIRTUAL 0x20
689 #define MCX_CAP_DEVICE_ETH_VIRTUAL 0x10
690 #define MCX_CAP_DEVICE_ETS 0x04
691 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE 0x02
692 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
693 0x01
694 uint8_t local_ca_ack_delay; /* 5 bits */
695 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
696 0x1f
697 uint8_t port_type;
698 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
699 0x80
700 #define MCX_CAP_DEVICE_PORT_TYPE 0x03
701 uint8_t num_ports;
702
703 uint8_t snapshot_log_max_msg;
704 #define MCX_CAP_DEVICE_SNAPSHOT 0x80
705 #define MCX_CAP_DEVICE_LOG_MAX_MSG 0x1f
706 uint8_t max_tc; /* 4 bits */
707 #define MCX_CAP_DEVICE_MAX_TC 0x0f
708 uint8_t flags4;
709 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT 0x80
710 #define MCX_CAP_DEVICE_DCBX 0x40
711 #define MCX_CAP_DEVICE_ROL_S 0x02
712 #define MCX_CAP_DEVICE_ROL_G 0x01
713 uint8_t wol;
714 #define MCX_CAP_DEVICE_WOL_S 0x40
715 #define MCX_CAP_DEVICE_WOL_G 0x20
716 #define MCX_CAP_DEVICE_WOL_A 0x10
717 #define MCX_CAP_DEVICE_WOL_B 0x08
718 #define MCX_CAP_DEVICE_WOL_M 0x04
719 #define MCX_CAP_DEVICE_WOL_U 0x02
720 #define MCX_CAP_DEVICE_WOL_P 0x01
721
722 uint16_t stat_rate_support;
723 uint8_t __reserved__[1];
724 uint8_t cqe_version; /* 4 bits */
725 #define MCX_CAP_DEVICE_CQE_VERSION 0x0f
726
727 uint32_t flags5;
728 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
729 0x80000000
730 #define MCX_CAP_DEVICE_STRIDING_RQ 0x40000000
731 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
732 0x10000000
733 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
734 0x08000000
735 #define MCX_CAP_DEVICE_DC_CONNECT_CP 0x00040000
736 #define MCX_CAP_DEVICE_DC_CNAK_DRACE 0x00020000
737 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
738 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
739 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM 0x0000c000
740 #define MCX_CAP_DEVICE_SIGERR_QCE 0x00002000
741 #define MCX_CAP_DEVICE_WQ_SIGNATURE 0x00000800
742 #define MCX_CAP_DEVICE_SCTR_DATA_CQE 0x00000400
743 #define MCX_CAP_DEVICE_SHO 0x00000100
744 #define MCX_CAP_DEVICE_TPH 0x00000080
745 #define MCX_CAP_DEVICE_RF 0x00000040
746 #define MCX_CAP_DEVICE_DCT 0x00000020
747 #define MCX_CAP_DEVICE_QOS 0x00000010
748 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS 0x00000008
749 #define MCX_CAP_DEVICE_ROCE 0x00000004
750 #define MCX_CAP_DEVICE_ATOMIC 0x00000002
751
752 uint32_t flags6;
753 #define MCX_CAP_DEVICE_CQ_OI 0x80000000
754 #define MCX_CAP_DEVICE_CQ_RESIZE 0x40000000
755 #define MCX_CAP_DEVICE_CQ_MODERATION 0x20000000
756 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
757 0x10000000
758 #define MCX_CAP_DEVICE_CQ_INVALIDATE 0x08000000
759 #define MCX_CAP_DEVICE_RESERVED_AT_255 0x04000000
760 #define MCX_CAP_DEVICE_CQ_EQ_REMAP 0x02000000
761 #define MCX_CAP_DEVICE_PG 0x01000000
762 #define MCX_CAP_DEVICE_BLOCK_LB_MC 0x00800000
763 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
764 0x00400000
765 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
766 0x00200000
767 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
768 0x00100000
769 #define MCX_CAP_DEVICE_CD 0x00080000
770 #define MCX_CAP_DEVICE_ATM 0x00040000
771 #define MCX_CAP_DEVICE_APM 0x00020000
772 #define MCX_CAP_DEVICE_IMAICL 0x00010000
773 #define MCX_CAP_DEVICE_QKV 0x00000200
774 #define MCX_CAP_DEVICE_PKV 0x00000100
775 #define MCX_CAP_DEVICE_SET_DETH_SQPN 0x00000080
776 #define MCX_CAP_DEVICE_XRC 0x00000008
777 #define MCX_CAP_DEVICE_UD 0x00000004
778 #define MCX_CAP_DEVICE_UC 0x00000002
779 #define MCX_CAP_DEVICE_RC 0x00000001
780
781 uint8_t uar_flags;
782 #define MCX_CAP_DEVICE_UAR_4K 0x80
783 uint8_t uar_sz; /* 6 bits */
784 #define MCX_CAP_DEVICE_UAR_SZ 0x3f
785 uint8_t __reserved__[1];
786 uint8_t log_pg_sz;
787
788 uint8_t flags7;
789 #define MCX_CAP_DEVICE_BF 0x80
790 #define MCX_CAP_DEVICE_DRIVER_VERSION 0x40
791 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
792 0x20
793 uint8_t log_bf_reg_size; /* 5 bits */
794 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE 0x1f
795 uint8_t __reserved__[2];
796
797 uint16_t num_of_diagnostic_counters;
798 uint16_t max_wqe_sz_sq;
799
800 uint8_t __reserved__[2];
801 uint16_t max_wqe_sz_rq;
802
803 uint8_t __reserved__[2];
804 uint16_t max_wqe_sz_sq_dc;
805
806 uint32_t max_qp_mcg; /* 25 bits */
807 #define MCX_CAP_DEVICE_MAX_QP_MCG 0x1ffffff
808
809 uint8_t __reserved__[3];
810 uint8_t log_max_mcq;
811
812 uint8_t log_max_transport_domain; /* 5 bits */
813 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
814 0x1f
815 uint8_t log_max_pd; /* 5 bits */
816 #define MCX_CAP_DEVICE_LOG_MAX_PD 0x1f
817 uint8_t __reserved__[1];
818 uint8_t log_max_xrcd; /* 5 bits */
819 #define MCX_CAP_DEVICE_LOG_MAX_XRCD 0x1f
820
821 uint8_t __reserved__[2];
822 uint16_t max_flow_counter;
823
824 uint8_t log_max_rq; /* 5 bits */
825 #define MCX_CAP_DEVICE_LOG_MAX_RQ 0x1f
826 uint8_t log_max_sq; /* 5 bits */
827 #define MCX_CAP_DEVICE_LOG_MAX_SQ 0x1f
828 uint8_t log_max_tir; /* 5 bits */
829 #define MCX_CAP_DEVICE_LOG_MAX_TIR 0x1f
830 uint8_t log_max_tis; /* 5 bits */
831 #define MCX_CAP_DEVICE_LOG_MAX_TIS 0x1f
832
833 uint8_t flags8;
834 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
835 0x80
836 #define MCX_CAP_DEVICE_LOG_MAX_RMP 0x1f
837 uint8_t log_max_rqt; /* 5 bits */
838 #define MCX_CAP_DEVICE_LOG_MAX_RQT 0x1f
839 uint8_t log_max_rqt_size; /* 5 bits */
840 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE 0x1f
841 uint8_t log_max_tis_per_sq; /* 5 bits */
842 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
843 0x1f
844 } __packed __aligned(8);
845
846 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
847 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
848 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
849 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
850 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
851 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
852 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
853
854 struct mcx_cmd_set_driver_version_in {
855 uint16_t cmd_opcode;
856 uint8_t cmd_reserved0[4];
857 uint16_t cmd_op_mod;
858 uint8_t cmd_reserved1[8];
859 } __packed __aligned(4);
860
861 struct mcx_cmd_set_driver_version_out {
862 uint8_t cmd_status;
863 uint8_t cmd_reserved0[3];
864 uint32_t cmd_syndrome;
865 uint8_t cmd_reserved1[8];
866 } __packed __aligned(4);
867
868 struct mcx_cmd_set_driver_version {
869 uint8_t cmd_driver_version[64];
870 } __packed __aligned(8);
871
872 struct mcx_cmd_modify_nic_vport_context_in {
873 uint16_t cmd_opcode;
874 uint8_t cmd_reserved0[4];
875 uint16_t cmd_op_mod;
876 uint8_t cmd_reserved1[4];
877 uint32_t cmd_field_select;
878 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR 0x04
879 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC 0x10
880 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU 0x40
881 } __packed __aligned(4);
882
883 struct mcx_cmd_modify_nic_vport_context_out {
884 uint8_t cmd_status;
885 uint8_t cmd_reserved0[3];
886 uint32_t cmd_syndrome;
887 uint8_t cmd_reserved1[8];
888 } __packed __aligned(4);
889
890 struct mcx_cmd_query_nic_vport_context_in {
891 uint16_t cmd_opcode;
892 uint8_t cmd_reserved0[4];
893 uint16_t cmd_op_mod;
894 uint8_t cmd_reserved1[4];
895 uint8_t cmd_allowed_list_type;
896 uint8_t cmd_reserved2[3];
897 } __packed __aligned(4);
898
899 struct mcx_cmd_query_nic_vport_context_out {
900 uint8_t cmd_status;
901 uint8_t cmd_reserved0[3];
902 uint32_t cmd_syndrome;
903 uint8_t cmd_reserved1[8];
904 } __packed __aligned(4);
905
906 struct mcx_nic_vport_ctx {
907 uint32_t vp_min_wqe_inline_mode;
908 uint8_t vp_reserved0[32];
909 uint32_t vp_mtu;
910 uint8_t vp_reserved1[200];
911 uint16_t vp_flags;
912 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC (0)
913 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC (1 << 24)
914 #define MCX_NIC_VPORT_CTX_LIST_VLAN (2 << 24)
915 #define MCX_NIC_VPORT_CTX_PROMISC_ALL (1 << 13)
916 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST (1 << 14)
917 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST (1 << 15)
918 uint16_t vp_allowed_list_size;
919 uint64_t vp_perm_addr;
920 uint8_t vp_reserved2[4];
921 /* allowed list follows */
922 } __packed __aligned(4);
923
924 struct mcx_counter {
925 uint64_t packets;
926 uint64_t octets;
927 } __packed __aligned(4);
928
929 struct mcx_nic_vport_counters {
930 struct mcx_counter rx_err;
931 struct mcx_counter tx_err;
932 uint8_t reserved0[64]; /* 0x30 */
933 struct mcx_counter rx_bcast;
934 struct mcx_counter tx_bcast;
935 struct mcx_counter rx_ucast;
936 struct mcx_counter tx_ucast;
937 struct mcx_counter rx_mcast;
938 struct mcx_counter tx_mcast;
939 uint8_t reserved1[0x210 - 0xd0];
940 } __packed __aligned(4);
941
942 struct mcx_cmd_query_vport_counters_in {
943 uint16_t cmd_opcode;
944 uint8_t cmd_reserved0[4];
945 uint16_t cmd_op_mod;
946 uint8_t cmd_reserved1[8];
947 } __packed __aligned(4);
948
949 struct mcx_cmd_query_vport_counters_mb_in {
950 uint8_t cmd_reserved0[8];
951 uint8_t cmd_clear;
952 uint8_t cmd_reserved1[7];
953 } __packed __aligned(4);
954
955 struct mcx_cmd_query_vport_counters_out {
956 uint8_t cmd_status;
957 uint8_t cmd_reserved0[3];
958 uint32_t cmd_syndrome;
959 uint8_t cmd_reserved1[8];
960 } __packed __aligned(4);
961
962 struct mcx_cmd_query_flow_counter_in {
963 uint16_t cmd_opcode;
964 uint8_t cmd_reserved0[4];
965 uint16_t cmd_op_mod;
966 uint8_t cmd_reserved1[8];
967 } __packed __aligned(4);
968
969 struct mcx_cmd_query_flow_counter_mb_in {
970 uint8_t cmd_reserved0[8];
971 uint8_t cmd_clear;
972 uint8_t cmd_reserved1[5];
973 uint16_t cmd_flow_counter_id;
974 } __packed __aligned(4);
975
976 struct mcx_cmd_query_flow_counter_out {
977 uint8_t cmd_status;
978 uint8_t cmd_reserved0[3];
979 uint32_t cmd_syndrome;
980 uint8_t cmd_reserved1[8];
981 } __packed __aligned(4);
982
983 struct mcx_cmd_alloc_uar_in {
984 uint16_t cmd_opcode;
985 uint8_t cmd_reserved0[4];
986 uint16_t cmd_op_mod;
987 uint8_t cmd_reserved1[8];
988 } __packed __aligned(4);
989
990 struct mcx_cmd_alloc_uar_out {
991 uint8_t cmd_status;
992 uint8_t cmd_reserved0[3];
993 uint32_t cmd_syndrome;
994 uint32_t cmd_uar;
995 uint8_t cmd_reserved1[4];
996 } __packed __aligned(4);
997
998 struct mcx_cmd_query_special_ctx_in {
999 uint16_t cmd_opcode;
1000 uint8_t cmd_reserved0[4];
1001 uint16_t cmd_op_mod;
1002 uint8_t cmd_reserved1[8];
1003 } __packed __aligned(4);
1004
1005 struct mcx_cmd_query_special_ctx_out {
1006 uint8_t cmd_status;
1007 uint8_t cmd_reserved0[3];
1008 uint32_t cmd_syndrome;
1009 uint8_t cmd_reserved1[4];
1010 uint32_t cmd_resd_lkey;
1011 } __packed __aligned(4);
1012
1013 struct mcx_eq_ctx {
1014 uint32_t eq_status;
1015 #define MCX_EQ_CTX_ST_SHIFT 8
1016 #define MCX_EQ_CTX_ST_MASK (0xf << MCX_EQ_CTX_ST_SHIFT)
1017 #define MCX_EQ_CTX_ST_ARMED (0x9 << MCX_EQ_CTX_ST_SHIFT)
1018 #define MCX_EQ_CTX_ST_FIRED (0xa << MCX_EQ_CTX_ST_SHIFT)
1019 #define MCX_EQ_CTX_OI_SHIFT 17
1020 #define MCX_EQ_CTX_OI (1 << MCX_EQ_CTX_OI_SHIFT)
1021 #define MCX_EQ_CTX_EC_SHIFT 18
1022 #define MCX_EQ_CTX_EC (1 << MCX_EQ_CTX_EC_SHIFT)
1023 #define MCX_EQ_CTX_STATUS_SHIFT 28
1024 #define MCX_EQ_CTX_STATUS_MASK (0xf << MCX_EQ_CTX_STATUS_SHIFT)
1025 #define MCX_EQ_CTX_STATUS_OK (0x0 << MCX_EQ_CTX_STATUS_SHIFT)
1026 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE \
1027 (0xa << MCX_EQ_CTX_STATUS_SHIFT)
1028 uint32_t eq_reserved1;
1029 uint32_t eq_page_offset;
1030 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT 5
1031 uint32_t eq_uar_size;
1032 #define MCX_EQ_CTX_UAR_PAGE_MASK 0xffffff
1033 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT 24
1034 uint32_t eq_reserved2;
1035 uint8_t eq_reserved3[3];
1036 uint8_t eq_intr;
1037 uint32_t eq_log_page_size;
1038 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1039 uint32_t eq_reserved4[3];
1040 uint32_t eq_consumer_counter;
1041 uint32_t eq_producer_counter;
1042 #define MCX_EQ_CTX_COUNTER_MASK 0xffffff
1043 uint32_t eq_reserved5[4];
1044 } __packed __aligned(4);
1045
1046 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1047
1048 struct mcx_cmd_create_eq_in {
1049 uint16_t cmd_opcode;
1050 uint8_t cmd_reserved0[4];
1051 uint16_t cmd_op_mod;
1052 uint8_t cmd_reserved1[8];
1053 } __packed __aligned(4);
1054
1055 struct mcx_cmd_create_eq_mb_in {
1056 struct mcx_eq_ctx cmd_eq_ctx;
1057 uint8_t cmd_reserved0[8];
1058 uint64_t cmd_event_bitmask;
1059 #define MCX_EVENT_TYPE_COMPLETION 0x00
1060 #define MCX_EVENT_TYPE_CQ_ERROR 0x04
1061 #define MCX_EVENT_TYPE_INTERNAL_ERROR 0x08
1062 #define MCX_EVENT_TYPE_PORT_CHANGE 0x09
1063 #define MCX_EVENT_TYPE_CMD_COMPLETION 0x0a
1064 #define MCX_EVENT_TYPE_PAGE_REQUEST 0x0b
1065 #define MCX_EVENT_TYPE_LAST_WQE 0x13
1066 uint8_t cmd_reserved1[176];
1067 } __packed __aligned(4);
1068
1069 struct mcx_cmd_create_eq_out {
1070 uint8_t cmd_status;
1071 uint8_t cmd_reserved0[3];
1072 uint32_t cmd_syndrome;
1073 uint32_t cmd_eqn;
1074 uint8_t cmd_reserved1[4];
1075 } __packed __aligned(4);
1076
1077 struct mcx_eq_entry {
1078 uint8_t eq_reserved1;
1079 uint8_t eq_event_type;
1080 uint8_t eq_reserved2;
1081 uint8_t eq_event_sub_type;
1082
1083 uint8_t eq_reserved3[28];
1084 uint32_t eq_event_data[7];
1085 uint8_t eq_reserved4[2];
1086 uint8_t eq_signature;
1087 uint8_t eq_owner;
1088 #define MCX_EQ_ENTRY_OWNER_INIT 1
1089 } __packed __aligned(4);
1090
1091 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1092
1093 struct mcx_cmd_alloc_pd_in {
1094 uint16_t cmd_opcode;
1095 uint8_t cmd_reserved0[4];
1096 uint16_t cmd_op_mod;
1097 uint8_t cmd_reserved1[8];
1098 } __packed __aligned(4);
1099
1100 struct mcx_cmd_alloc_pd_out {
1101 uint8_t cmd_status;
1102 uint8_t cmd_reserved0[3];
1103 uint32_t cmd_syndrome;
1104 uint32_t cmd_pd;
1105 uint8_t cmd_reserved1[4];
1106 } __packed __aligned(4);
1107
1108 struct mcx_cmd_alloc_td_in {
1109 uint16_t cmd_opcode;
1110 uint8_t cmd_reserved0[4];
1111 uint16_t cmd_op_mod;
1112 uint8_t cmd_reserved1[8];
1113 } __packed __aligned(4);
1114
1115 struct mcx_cmd_alloc_td_out {
1116 uint8_t cmd_status;
1117 uint8_t cmd_reserved0[3];
1118 uint32_t cmd_syndrome;
1119 uint32_t cmd_tdomain;
1120 uint8_t cmd_reserved1[4];
1121 } __packed __aligned(4);
1122
1123 struct mcx_cmd_create_tir_in {
1124 uint16_t cmd_opcode;
1125 uint8_t cmd_reserved0[4];
1126 uint16_t cmd_op_mod;
1127 uint8_t cmd_reserved1[8];
1128 } __packed __aligned(4);
1129
1130 struct mcx_cmd_create_tir_mb_in {
1131 uint8_t cmd_reserved0[20];
1132 uint32_t cmd_disp_type;
1133 #define MCX_TIR_CTX_DISP_TYPE_SHIFT 28
1134 uint8_t cmd_reserved1[8];
1135 uint32_t cmd_lro;
1136 uint8_t cmd_reserved2[8];
1137 uint32_t cmd_inline_rqn;
1138 uint32_t cmd_indir_table;
1139 uint32_t cmd_tdomain;
1140 uint8_t cmd_rx_hash_key[40];
1141 uint32_t cmd_rx_hash_sel_outer;
1142 uint32_t cmd_rx_hash_sel_inner;
1143 uint8_t cmd_reserved3[152];
1144 } __packed __aligned(4);
1145
1146 struct mcx_cmd_create_tir_out {
1147 uint8_t cmd_status;
1148 uint8_t cmd_reserved0[3];
1149 uint32_t cmd_syndrome;
1150 uint32_t cmd_tirn;
1151 uint8_t cmd_reserved1[4];
1152 } __packed __aligned(4);
1153
1154 struct mcx_cmd_destroy_tir_in {
1155 uint16_t cmd_opcode;
1156 uint8_t cmd_reserved0[4];
1157 uint16_t cmd_op_mod;
1158 uint32_t cmd_tirn;
1159 uint8_t cmd_reserved1[4];
1160 } __packed __aligned(4);
1161
1162 struct mcx_cmd_destroy_tir_out {
1163 uint8_t cmd_status;
1164 uint8_t cmd_reserved0[3];
1165 uint32_t cmd_syndrome;
1166 uint8_t cmd_reserved1[8];
1167 } __packed __aligned(4);
1168
1169 struct mcx_cmd_create_tis_in {
1170 uint16_t cmd_opcode;
1171 uint8_t cmd_reserved0[4];
1172 uint16_t cmd_op_mod;
1173 uint8_t cmd_reserved1[8];
1174 } __packed __aligned(4);
1175
1176 struct mcx_cmd_create_tis_mb_in {
1177 uint8_t cmd_reserved[16];
1178 uint32_t cmd_prio;
1179 uint8_t cmd_reserved1[32];
1180 uint32_t cmd_tdomain;
1181 uint8_t cmd_reserved2[120];
1182 } __packed __aligned(4);
1183
1184 struct mcx_cmd_create_tis_out {
1185 uint8_t cmd_status;
1186 uint8_t cmd_reserved0[3];
1187 uint32_t cmd_syndrome;
1188 uint32_t cmd_tisn;
1189 uint8_t cmd_reserved1[4];
1190 } __packed __aligned(4);
1191
1192 struct mcx_cmd_destroy_tis_in {
1193 uint16_t cmd_opcode;
1194 uint8_t cmd_reserved0[4];
1195 uint16_t cmd_op_mod;
1196 uint32_t cmd_tisn;
1197 uint8_t cmd_reserved1[4];
1198 } __packed __aligned(4);
1199
1200 struct mcx_cmd_destroy_tis_out {
1201 uint8_t cmd_status;
1202 uint8_t cmd_reserved0[3];
1203 uint32_t cmd_syndrome;
1204 uint8_t cmd_reserved1[8];
1205 } __packed __aligned(4);
1206
1207 struct mcx_cq_ctx {
1208 uint32_t cq_status;
1209 uint32_t cq_reserved1;
1210 uint32_t cq_page_offset;
1211 uint32_t cq_uar_size;
1212 #define MCX_CQ_CTX_UAR_PAGE_MASK 0xffffff
1213 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT 24
1214 uint32_t cq_period_max_count;
1215 #define MCX_CQ_CTX_PERIOD_SHIFT 16
1216 uint32_t cq_eqn;
1217 uint32_t cq_log_page_size;
1218 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1219 uint32_t cq_reserved2;
1220 uint32_t cq_last_notified;
1221 uint32_t cq_last_solicit;
1222 uint32_t cq_consumer_counter;
1223 uint32_t cq_producer_counter;
1224 uint8_t cq_reserved3[8];
1225 uint64_t cq_doorbell;
1226 } __packed __aligned(4);
1227
1228 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1229
1230 struct mcx_cmd_create_cq_in {
1231 uint16_t cmd_opcode;
1232 uint8_t cmd_reserved0[4];
1233 uint16_t cmd_op_mod;
1234 uint8_t cmd_reserved1[8];
1235 } __packed __aligned(4);
1236
1237 struct mcx_cmd_create_cq_mb_in {
1238 struct mcx_cq_ctx cmd_cq_ctx;
1239 uint8_t cmd_reserved1[192];
1240 } __packed __aligned(4);
1241
1242 struct mcx_cmd_create_cq_out {
1243 uint8_t cmd_status;
1244 uint8_t cmd_reserved0[3];
1245 uint32_t cmd_syndrome;
1246 uint32_t cmd_cqn;
1247 uint8_t cmd_reserved1[4];
1248 } __packed __aligned(4);
1249
1250 struct mcx_cmd_destroy_cq_in {
1251 uint16_t cmd_opcode;
1252 uint8_t cmd_reserved0[4];
1253 uint16_t cmd_op_mod;
1254 uint32_t cmd_cqn;
1255 uint8_t cmd_reserved1[4];
1256 } __packed __aligned(4);
1257
1258 struct mcx_cmd_destroy_cq_out {
1259 uint8_t cmd_status;
1260 uint8_t cmd_reserved0[3];
1261 uint32_t cmd_syndrome;
1262 uint8_t cmd_reserved1[8];
1263 } __packed __aligned(4);
1264
1265 struct mcx_cq_entry {
1266 uint32_t __reserved__;
1267 uint32_t cq_lro;
1268 uint32_t cq_lro_ack_seq_num;
1269 uint32_t cq_rx_hash;
1270 uint8_t cq_rx_hash_type;
1271 uint8_t cq_ml_path;
1272 uint16_t __reserved__;
1273 uint32_t cq_checksum;
1274 uint32_t __reserved__;
1275 uint32_t cq_flags;
1276 uint32_t cq_lro_srqn;
1277 uint32_t __reserved__[2];
1278 uint32_t cq_byte_cnt;
1279 uint64_t cq_timestamp;
1280 uint8_t cq_rx_drops;
1281 uint8_t cq_flow_tag[3];
1282 uint16_t cq_wqe_count;
1283 uint8_t cq_signature;
1284 uint8_t cq_opcode_owner;
1285 #define MCX_CQ_ENTRY_FLAG_OWNER (1 << 0)
1286 #define MCX_CQ_ENTRY_FLAG_SE (1 << 1)
1287 #define MCX_CQ_ENTRY_FORMAT_SHIFT 2
1288 #define MCX_CQ_ENTRY_OPCODE_SHIFT 4
1289
1290 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE 0
1291 #define MCX_CQ_ENTRY_FORMAT_INLINE_32 1
1292 #define MCX_CQ_ENTRY_FORMAT_INLINE_64 2
1293 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED 3
1294
1295 #define MCX_CQ_ENTRY_OPCODE_REQ 0
1296 #define MCX_CQ_ENTRY_OPCODE_SEND 2
1297 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR 13
1298 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR 14
1299 #define MCX_CQ_ENTRY_OPCODE_INVALID 15
1300
1301 } __packed __aligned(4);
1302
1303 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1304
1305 struct mcx_cq_doorbell {
1306 uint32_t db_update_ci;
1307 uint32_t db_arm_ci;
1308 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT 28
1309 #define MCX_CQ_DOORBELL_ARM_CMD (1 << 24)
1310 #define MCX_CQ_DOORBELL_ARM_CI_MASK (0xffffff)
1311 } __packed __aligned(8);
1312
1313 struct mcx_wq_ctx {
1314 uint8_t wq_type;
1315 #define MCX_WQ_CTX_TYPE_CYCLIC (1 << 4)
1316 #define MCX_WQ_CTX_TYPE_SIGNATURE (1 << 3)
1317 uint8_t wq_reserved0[5];
1318 uint16_t wq_lwm;
1319 uint32_t wq_pd;
1320 uint32_t wq_uar_page;
1321 uint64_t wq_doorbell;
1322 uint32_t wq_hw_counter;
1323 uint32_t wq_sw_counter;
1324 uint16_t wq_log_stride;
1325 uint8_t wq_log_page_sz;
1326 uint8_t wq_log_size;
1327 uint8_t wq_reserved1[156];
1328 } __packed __aligned(4);
1329
1330 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1331
1332 struct mcx_sq_ctx {
1333 uint32_t sq_flags;
1334 #define MCX_SQ_CTX_RLKEY (1U << 31)
1335 #define MCX_SQ_CTX_FRE_SHIFT (1 << 29)
1336 #define MCX_SQ_CTX_FLUSH_IN_ERROR (1 << 28)
1337 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT 24
1338 #define MCX_SQ_CTX_STATE_SHIFT 20
1339 uint32_t sq_user_index;
1340 uint32_t sq_cqn;
1341 uint32_t sq_reserved1[5];
1342 uint32_t sq_tis_lst_sz;
1343 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT 16
1344 uint32_t sq_reserved2[2];
1345 uint32_t sq_tis_num;
1346 struct mcx_wq_ctx sq_wq;
1347 } __packed __aligned(4);
1348
1349 struct mcx_sq_entry_seg {
1350 uint32_t sqs_byte_count;
1351 uint32_t sqs_lkey;
1352 uint64_t sqs_addr;
1353 } __packed __aligned(4);
1354
1355 struct mcx_sq_entry {
1356 /* control segment */
1357 uint32_t sqe_opcode_index;
1358 #define MCX_SQE_WQE_INDEX_SHIFT 8
1359 #define MCX_SQE_WQE_OPCODE_NOP 0x00
1360 #define MCX_SQE_WQE_OPCODE_SEND 0x0a
1361 uint32_t sqe_ds_sq_num;
1362 #define MCX_SQE_SQ_NUM_SHIFT 8
1363 uint32_t sqe_signature;
1364 #define MCX_SQE_SIGNATURE_SHIFT 24
1365 #define MCX_SQE_SOLICITED_EVENT 0x02
1366 #define MCX_SQE_CE_CQE_ON_ERR 0x00
1367 #define MCX_SQE_CE_CQE_FIRST_ERR 0x04
1368 #define MCX_SQE_CE_CQE_ALWAYS 0x08
1369 #define MCX_SQE_CE_CQE_SOLICIT 0x0C
1370 #define MCX_SQE_FM_NO_FENCE 0x00
1371 #define MCX_SQE_FM_SMALL_FENCE 0x40
1372 uint32_t sqe_mkey;
1373
1374 /* ethernet segment */
1375 uint32_t sqe_reserved1;
1376 uint32_t sqe_mss_csum;
1377 #define MCX_SQE_L4_CSUM (1 << 31)
1378 #define MCX_SQE_L3_CSUM (1 << 30)
1379 uint32_t sqe_reserved2;
1380 uint16_t sqe_inline_header_size;
1381 uint16_t sqe_inline_headers[9];
1382
1383 /* data segment */
1384 struct mcx_sq_entry_seg sqe_segs[1];
1385 } __packed __aligned(64);
1386
1387 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1388
1389 struct mcx_cmd_create_sq_in {
1390 uint16_t cmd_opcode;
1391 uint8_t cmd_reserved0[4];
1392 uint16_t cmd_op_mod;
1393 uint8_t cmd_reserved1[8];
1394 } __packed __aligned(4);
1395
1396 struct mcx_cmd_create_sq_out {
1397 uint8_t cmd_status;
1398 uint8_t cmd_reserved0[3];
1399 uint32_t cmd_syndrome;
1400 uint32_t cmd_sqn;
1401 uint8_t cmd_reserved1[4];
1402 } __packed __aligned(4);
1403
1404 struct mcx_cmd_modify_sq_in {
1405 uint16_t cmd_opcode;
1406 uint8_t cmd_reserved0[4];
1407 uint16_t cmd_op_mod;
1408 uint32_t cmd_sq_state;
1409 uint8_t cmd_reserved1[4];
1410 } __packed __aligned(4);
1411
1412 struct mcx_cmd_modify_sq_mb_in {
1413 uint32_t cmd_modify_hi;
1414 uint32_t cmd_modify_lo;
1415 uint8_t cmd_reserved0[8];
1416 struct mcx_sq_ctx cmd_sq_ctx;
1417 } __packed __aligned(4);
1418
1419 struct mcx_cmd_modify_sq_out {
1420 uint8_t cmd_status;
1421 uint8_t cmd_reserved0[3];
1422 uint32_t cmd_syndrome;
1423 uint8_t cmd_reserved1[8];
1424 } __packed __aligned(4);
1425
1426 struct mcx_cmd_destroy_sq_in {
1427 uint16_t cmd_opcode;
1428 uint8_t cmd_reserved0[4];
1429 uint16_t cmd_op_mod;
1430 uint32_t cmd_sqn;
1431 uint8_t cmd_reserved1[4];
1432 } __packed __aligned(4);
1433
1434 struct mcx_cmd_destroy_sq_out {
1435 uint8_t cmd_status;
1436 uint8_t cmd_reserved0[3];
1437 uint32_t cmd_syndrome;
1438 uint8_t cmd_reserved1[8];
1439 } __packed __aligned(4);
1440
1441
1442 struct mcx_rq_ctx {
1443 uint32_t rq_flags;
1444 #define MCX_RQ_CTX_RLKEY (1U << 31)
1445 #define MCX_RQ_CTX_VLAN_STRIP_DIS (1 << 28)
1446 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT 24
1447 #define MCX_RQ_CTX_STATE_SHIFT 20
1448 #define MCX_RQ_CTX_FLUSH_IN_ERROR (1 << 18)
1449 uint32_t rq_user_index;
1450 uint32_t rq_cqn;
1451 uint32_t rq_reserved1;
1452 uint32_t rq_rmpn;
1453 uint32_t rq_reserved2[7];
1454 struct mcx_wq_ctx rq_wq;
1455 } __packed __aligned(4);
1456
1457 struct mcx_rq_entry {
1458 uint32_t rqe_byte_count;
1459 uint32_t rqe_lkey;
1460 uint64_t rqe_addr;
1461 } __packed __aligned(16);
1462
1463 struct mcx_cmd_create_rq_in {
1464 uint16_t cmd_opcode;
1465 uint8_t cmd_reserved0[4];
1466 uint16_t cmd_op_mod;
1467 uint8_t cmd_reserved1[8];
1468 } __packed __aligned(4);
1469
1470 struct mcx_cmd_create_rq_out {
1471 uint8_t cmd_status;
1472 uint8_t cmd_reserved0[3];
1473 uint32_t cmd_syndrome;
1474 uint32_t cmd_rqn;
1475 uint8_t cmd_reserved1[4];
1476 } __packed __aligned(4);
1477
1478 struct mcx_cmd_modify_rq_in {
1479 uint16_t cmd_opcode;
1480 uint8_t cmd_reserved0[4];
1481 uint16_t cmd_op_mod;
1482 uint32_t cmd_rq_state;
1483 uint8_t cmd_reserved1[4];
1484 } __packed __aligned(4);
1485
1486 struct mcx_cmd_modify_rq_mb_in {
1487 uint32_t cmd_modify_hi;
1488 uint32_t cmd_modify_lo;
1489 uint8_t cmd_reserved0[8];
1490 struct mcx_rq_ctx cmd_rq_ctx;
1491 } __packed __aligned(4);
1492
1493 struct mcx_cmd_modify_rq_out {
1494 uint8_t cmd_status;
1495 uint8_t cmd_reserved0[3];
1496 uint32_t cmd_syndrome;
1497 uint8_t cmd_reserved1[8];
1498 } __packed __aligned(4);
1499
1500 struct mcx_cmd_destroy_rq_in {
1501 uint16_t cmd_opcode;
1502 uint8_t cmd_reserved0[4];
1503 uint16_t cmd_op_mod;
1504 uint32_t cmd_rqn;
1505 uint8_t cmd_reserved1[4];
1506 } __packed __aligned(4);
1507
1508 struct mcx_cmd_destroy_rq_out {
1509 uint8_t cmd_status;
1510 uint8_t cmd_reserved0[3];
1511 uint32_t cmd_syndrome;
1512 uint8_t cmd_reserved1[8];
1513 } __packed __aligned(4);
1514
1515 struct mcx_cmd_create_flow_table_in {
1516 uint16_t cmd_opcode;
1517 uint8_t cmd_reserved0[4];
1518 uint16_t cmd_op_mod;
1519 uint8_t cmd_reserved1[8];
1520 } __packed __aligned(4);
1521
1522 struct mcx_flow_table_ctx {
1523 uint8_t ft_miss_action;
1524 uint8_t ft_level;
1525 uint8_t ft_reserved0;
1526 uint8_t ft_log_size;
1527 uint32_t ft_table_miss_id;
1528 uint8_t ft_reserved1[28];
1529 } __packed __aligned(4);
1530
1531 struct mcx_cmd_create_flow_table_mb_in {
1532 uint8_t cmd_table_type;
1533 uint8_t cmd_reserved0[7];
1534 struct mcx_flow_table_ctx cmd_ctx;
1535 } __packed __aligned(4);
1536
1537 struct mcx_cmd_create_flow_table_out {
1538 uint8_t cmd_status;
1539 uint8_t cmd_reserved0[3];
1540 uint32_t cmd_syndrome;
1541 uint32_t cmd_table_id;
1542 uint8_t cmd_reserved1[4];
1543 } __packed __aligned(4);
1544
1545 struct mcx_cmd_destroy_flow_table_in {
1546 uint16_t cmd_opcode;
1547 uint8_t cmd_reserved0[4];
1548 uint16_t cmd_op_mod;
1549 uint8_t cmd_reserved1[8];
1550 } __packed __aligned(4);
1551
1552 struct mcx_cmd_destroy_flow_table_mb_in {
1553 uint8_t cmd_table_type;
1554 uint8_t cmd_reserved0[3];
1555 uint32_t cmd_table_id;
1556 uint8_t cmd_reserved1[40];
1557 } __packed __aligned(4);
1558
1559 struct mcx_cmd_destroy_flow_table_out {
1560 uint8_t cmd_status;
1561 uint8_t cmd_reserved0[3];
1562 uint32_t cmd_syndrome;
1563 uint8_t cmd_reserved1[8];
1564 } __packed __aligned(4);
1565
1566 struct mcx_cmd_set_flow_table_root_in {
1567 uint16_t cmd_opcode;
1568 uint8_t cmd_reserved0[4];
1569 uint16_t cmd_op_mod;
1570 uint8_t cmd_reserved1[8];
1571 } __packed __aligned(4);
1572
1573 struct mcx_cmd_set_flow_table_root_mb_in {
1574 uint8_t cmd_table_type;
1575 uint8_t cmd_reserved0[3];
1576 uint32_t cmd_table_id;
1577 uint8_t cmd_reserved1[56];
1578 } __packed __aligned(4);
1579
1580 struct mcx_cmd_set_flow_table_root_out {
1581 uint8_t cmd_status;
1582 uint8_t cmd_reserved0[3];
1583 uint32_t cmd_syndrome;
1584 uint8_t cmd_reserved1[8];
1585 } __packed __aligned(4);
1586
1587 struct mcx_flow_match {
1588 /* outer headers */
1589 uint8_t mc_src_mac[6];
1590 uint16_t mc_ethertype;
1591 uint8_t mc_dest_mac[6];
1592 uint16_t mc_first_vlan;
1593 uint8_t mc_ip_proto;
1594 uint8_t mc_ip_dscp_ecn;
1595 uint8_t mc_vlan_flags;
1596 uint8_t mc_tcp_flags;
1597 uint16_t mc_tcp_sport;
1598 uint16_t mc_tcp_dport;
1599 uint32_t mc_reserved0;
1600 uint16_t mc_udp_sport;
1601 uint16_t mc_udp_dport;
1602 uint8_t mc_src_ip[16];
1603 uint8_t mc_dest_ip[16];
1604
1605 /* misc parameters */
1606 uint8_t mc_reserved1[8];
1607 uint16_t mc_second_vlan;
1608 uint8_t mc_reserved2[2];
1609 uint8_t mc_second_vlan_flags;
1610 uint8_t mc_reserved3[15];
1611 uint32_t mc_outer_ipv6_flow_label;
1612 uint8_t mc_reserved4[32];
1613
1614 uint8_t mc_reserved[384];
1615 } __packed __aligned(4);
1616
1617 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1618
1619 struct mcx_cmd_create_flow_group_in {
1620 uint16_t cmd_opcode;
1621 uint8_t cmd_reserved0[4];
1622 uint16_t cmd_op_mod;
1623 uint8_t cmd_reserved1[8];
1624 } __packed __aligned(4);
1625
1626 struct mcx_cmd_create_flow_group_mb_in {
1627 uint8_t cmd_table_type;
1628 uint8_t cmd_reserved0[3];
1629 uint32_t cmd_table_id;
1630 uint8_t cmd_reserved1[4];
1631 uint32_t cmd_start_flow_index;
1632 uint8_t cmd_reserved2[4];
1633 uint32_t cmd_end_flow_index;
1634 uint8_t cmd_reserved3[23];
1635 uint8_t cmd_match_criteria_enable;
1636 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER (1 << 0)
1637 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC (1 << 1)
1638 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER (1 << 2)
1639 struct mcx_flow_match cmd_match_criteria;
1640 uint8_t cmd_reserved4[448];
1641 } __packed __aligned(4);
1642
1643 struct mcx_cmd_create_flow_group_out {
1644 uint8_t cmd_status;
1645 uint8_t cmd_reserved0[3];
1646 uint32_t cmd_syndrome;
1647 uint32_t cmd_group_id;
1648 uint8_t cmd_reserved1[4];
1649 } __packed __aligned(4);
1650
1651 struct mcx_flow_ctx {
1652 uint8_t fc_reserved0[4];
1653 uint32_t fc_group_id;
1654 uint32_t fc_flow_tag;
1655 uint32_t fc_action;
1656 #define MCX_FLOW_CONTEXT_ACTION_ALLOW (1 << 0)
1657 #define MCX_FLOW_CONTEXT_ACTION_DROP (1 << 1)
1658 #define MCX_FLOW_CONTEXT_ACTION_FORWARD (1 << 2)
1659 #define MCX_FLOW_CONTEXT_ACTION_COUNT (1 << 3)
1660 uint32_t fc_dest_list_size;
1661 uint32_t fc_counter_list_size;
1662 uint8_t fc_reserved1[40];
1663 struct mcx_flow_match fc_match_value;
1664 uint8_t fc_reserved2[192];
1665 } __packed __aligned(4);
1666
1667 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE (1 << 24)
1668 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR (2 << 24)
1669
1670 struct mcx_cmd_destroy_flow_group_in {
1671 uint16_t cmd_opcode;
1672 uint8_t cmd_reserved0[4];
1673 uint16_t cmd_op_mod;
1674 uint8_t cmd_reserved1[8];
1675 } __packed __aligned(4);
1676
1677 struct mcx_cmd_destroy_flow_group_mb_in {
1678 uint8_t cmd_table_type;
1679 uint8_t cmd_reserved0[3];
1680 uint32_t cmd_table_id;
1681 uint32_t cmd_group_id;
1682 uint8_t cmd_reserved1[36];
1683 } __packed __aligned(4);
1684
1685 struct mcx_cmd_destroy_flow_group_out {
1686 uint8_t cmd_status;
1687 uint8_t cmd_reserved0[3];
1688 uint32_t cmd_syndrome;
1689 uint8_t cmd_reserved1[8];
1690 } __packed __aligned(4);
1691
1692 struct mcx_cmd_set_flow_table_entry_in {
1693 uint16_t cmd_opcode;
1694 uint8_t cmd_reserved0[4];
1695 uint16_t cmd_op_mod;
1696 uint8_t cmd_reserved1[8];
1697 } __packed __aligned(4);
1698
1699 struct mcx_cmd_set_flow_table_entry_mb_in {
1700 uint8_t cmd_table_type;
1701 uint8_t cmd_reserved0[3];
1702 uint32_t cmd_table_id;
1703 uint32_t cmd_modify_enable_mask;
1704 uint8_t cmd_reserved1[4];
1705 uint32_t cmd_flow_index;
1706 uint8_t cmd_reserved2[28];
1707 struct mcx_flow_ctx cmd_flow_ctx;
1708 } __packed __aligned(4);
1709
1710 struct mcx_cmd_set_flow_table_entry_out {
1711 uint8_t cmd_status;
1712 uint8_t cmd_reserved0[3];
1713 uint32_t cmd_syndrome;
1714 uint8_t cmd_reserved1[8];
1715 } __packed __aligned(4);
1716
1717 struct mcx_cmd_query_flow_table_entry_in {
1718 uint16_t cmd_opcode;
1719 uint8_t cmd_reserved0[4];
1720 uint16_t cmd_op_mod;
1721 uint8_t cmd_reserved1[8];
1722 } __packed __aligned(4);
1723
1724 struct mcx_cmd_query_flow_table_entry_mb_in {
1725 uint8_t cmd_table_type;
1726 uint8_t cmd_reserved0[3];
1727 uint32_t cmd_table_id;
1728 uint8_t cmd_reserved1[8];
1729 uint32_t cmd_flow_index;
1730 uint8_t cmd_reserved2[28];
1731 } __packed __aligned(4);
1732
1733 struct mcx_cmd_query_flow_table_entry_out {
1734 uint8_t cmd_status;
1735 uint8_t cmd_reserved0[3];
1736 uint32_t cmd_syndrome;
1737 uint8_t cmd_reserved1[8];
1738 } __packed __aligned(4);
1739
1740 struct mcx_cmd_query_flow_table_entry_mb_out {
1741 uint8_t cmd_reserved0[48];
1742 struct mcx_flow_ctx cmd_flow_ctx;
1743 } __packed __aligned(4);
1744
1745 struct mcx_cmd_delete_flow_table_entry_in {
1746 uint16_t cmd_opcode;
1747 uint8_t cmd_reserved0[4];
1748 uint16_t cmd_op_mod;
1749 uint8_t cmd_reserved1[8];
1750 } __packed __aligned(4);
1751
1752 struct mcx_cmd_delete_flow_table_entry_mb_in {
1753 uint8_t cmd_table_type;
1754 uint8_t cmd_reserved0[3];
1755 uint32_t cmd_table_id;
1756 uint8_t cmd_reserved1[8];
1757 uint32_t cmd_flow_index;
1758 uint8_t cmd_reserved2[28];
1759 } __packed __aligned(4);
1760
1761 struct mcx_cmd_delete_flow_table_entry_out {
1762 uint8_t cmd_status;
1763 uint8_t cmd_reserved0[3];
1764 uint32_t cmd_syndrome;
1765 uint8_t cmd_reserved1[8];
1766 } __packed __aligned(4);
1767
1768 struct mcx_cmd_query_flow_group_in {
1769 uint16_t cmd_opcode;
1770 uint8_t cmd_reserved0[4];
1771 uint16_t cmd_op_mod;
1772 uint8_t cmd_reserved1[8];
1773 } __packed __aligned(4);
1774
1775 struct mcx_cmd_query_flow_group_mb_in {
1776 uint8_t cmd_table_type;
1777 uint8_t cmd_reserved0[3];
1778 uint32_t cmd_table_id;
1779 uint32_t cmd_group_id;
1780 uint8_t cmd_reserved1[36];
1781 } __packed __aligned(4);
1782
1783 struct mcx_cmd_query_flow_group_out {
1784 uint8_t cmd_status;
1785 uint8_t cmd_reserved0[3];
1786 uint32_t cmd_syndrome;
1787 uint8_t cmd_reserved1[8];
1788 } __packed __aligned(4);
1789
1790 struct mcx_cmd_query_flow_group_mb_out {
1791 uint8_t cmd_reserved0[12];
1792 uint32_t cmd_start_flow_index;
1793 uint8_t cmd_reserved1[4];
1794 uint32_t cmd_end_flow_index;
1795 uint8_t cmd_reserved2[20];
1796 uint32_t cmd_match_criteria_enable;
1797 uint8_t cmd_match_criteria[512];
1798 uint8_t cmd_reserved4[448];
1799 } __packed __aligned(4);
1800
1801 struct mcx_cmd_query_flow_table_in {
1802 uint16_t cmd_opcode;
1803 uint8_t cmd_reserved0[4];
1804 uint16_t cmd_op_mod;
1805 uint8_t cmd_reserved1[8];
1806 } __packed __aligned(4);
1807
1808 struct mcx_cmd_query_flow_table_mb_in {
1809 uint8_t cmd_table_type;
1810 uint8_t cmd_reserved0[3];
1811 uint32_t cmd_table_id;
1812 uint8_t cmd_reserved1[40];
1813 } __packed __aligned(4);
1814
1815 struct mcx_cmd_query_flow_table_out {
1816 uint8_t cmd_status;
1817 uint8_t cmd_reserved0[3];
1818 uint32_t cmd_syndrome;
1819 uint8_t cmd_reserved1[8];
1820 } __packed __aligned(4);
1821
1822 struct mcx_cmd_query_flow_table_mb_out {
1823 uint8_t cmd_reserved0[4];
1824 struct mcx_flow_table_ctx cmd_ctx;
1825 } __packed __aligned(4);
1826
1827 struct mcx_cmd_alloc_flow_counter_in {
1828 uint16_t cmd_opcode;
1829 uint8_t cmd_reserved0[4];
1830 uint16_t cmd_op_mod;
1831 uint8_t cmd_reserved1[8];
1832 } __packed __aligned(4);
1833
1834 struct mcx_cmd_query_rq_in {
1835 uint16_t cmd_opcode;
1836 uint8_t cmd_reserved0[4];
1837 uint16_t cmd_op_mod;
1838 uint32_t cmd_rqn;
1839 uint8_t cmd_reserved1[4];
1840 } __packed __aligned(4);
1841
1842 struct mcx_cmd_query_rq_out {
1843 uint8_t cmd_status;
1844 uint8_t cmd_reserved0[3];
1845 uint32_t cmd_syndrome;
1846 uint8_t cmd_reserved1[8];
1847 } __packed __aligned(4);
1848
1849 struct mcx_cmd_query_rq_mb_out {
1850 uint8_t cmd_reserved0[16];
1851 struct mcx_rq_ctx cmd_ctx;
1852 };
1853
1854 struct mcx_cmd_query_sq_in {
1855 uint16_t cmd_opcode;
1856 uint8_t cmd_reserved0[4];
1857 uint16_t cmd_op_mod;
1858 uint32_t cmd_sqn;
1859 uint8_t cmd_reserved1[4];
1860 } __packed __aligned(4);
1861
1862 struct mcx_cmd_query_sq_out {
1863 uint8_t cmd_status;
1864 uint8_t cmd_reserved0[3];
1865 uint32_t cmd_syndrome;
1866 uint8_t cmd_reserved1[8];
1867 } __packed __aligned(4);
1868
1869 struct mcx_cmd_query_sq_mb_out {
1870 uint8_t cmd_reserved0[16];
1871 struct mcx_sq_ctx cmd_ctx;
1872 };
1873
1874 struct mcx_cmd_alloc_flow_counter_out {
1875 uint8_t cmd_status;
1876 uint8_t cmd_reserved0[3];
1877 uint32_t cmd_syndrome;
1878 uint8_t cmd_reserved1[2];
1879 uint16_t cmd_flow_counter_id;
1880 uint8_t cmd_reserved2[4];
1881 } __packed __aligned(4);
1882
1883 struct mcx_wq_doorbell {
1884 uint32_t db_recv_counter;
1885 uint32_t db_send_counter;
1886 } __packed __aligned(8);
1887
1888 struct mcx_dmamem {
1889 bus_dmamap_t mxm_map;
1890 bus_dma_segment_t mxm_seg;
1891 int mxm_nsegs;
1892 size_t mxm_size;
1893 void *mxm_kva;
1894 };
1895 #define MCX_DMA_MAP(_mxm) ((_mxm)->mxm_map)
1896 #define MCX_DMA_DVA(_mxm) ((_mxm)->mxm_map->dm_segs[0].ds_addr)
1897 #define MCX_DMA_KVA(_mxm) ((void *)(_mxm)->mxm_kva)
1898 #define MCX_DMA_LEN(_mxm) ((_mxm)->mxm_size)
1899
1900 struct mcx_hwmem {
1901 bus_dmamap_t mhm_map;
1902 bus_dma_segment_t *mhm_segs;
1903 unsigned int mhm_seg_count;
1904 unsigned int mhm_npages;
1905 };
1906
1907 struct mcx_slot {
1908 bus_dmamap_t ms_map;
1909 struct mbuf *ms_m;
1910 };
1911
1912 struct mcx_cq {
1913 int cq_n;
1914 struct mcx_dmamem cq_mem;
1915 uint32_t *cq_doorbell;
1916 uint32_t cq_cons;
1917 uint32_t cq_count;
1918 };
1919
1920 struct mcx_calibration {
1921 uint64_t c_timestamp; /* previous mcx chip time */
1922 uint64_t c_uptime; /* previous kernel nanouptime */
1923 uint64_t c_tbase; /* mcx chip time */
1924 uint64_t c_ubase; /* kernel nanouptime */
1925 uint64_t c_tdiff;
1926 uint64_t c_udiff;
1927 };
1928
1929 #define MCX_CALIBRATE_FIRST 2
1930 #define MCX_CALIBRATE_NORMAL 30
1931
1932 struct mcx_rxring {
1933 u_int rxr_total;
1934 u_int rxr_inuse;
1935 };
1936
1937 MBUFQ_HEAD(mcx_mbufq);
1938
1939 struct mcx_softc {
1940 device_t sc_dev;
1941 struct ethercom sc_ec;
1942 struct ifmedia sc_media;
1943 uint64_t sc_media_status;
1944 uint64_t sc_media_active;
1945 kmutex_t sc_media_mutex;
1946
1947 pci_chipset_tag_t sc_pc;
1948 pci_intr_handle_t *sc_intrs;
1949 void *sc_ihs[MCX_MAX_NINTR];
1950 pcitag_t sc_tag;
1951
1952 bus_dma_tag_t sc_dmat;
1953 bus_space_tag_t sc_memt;
1954 bus_space_handle_t sc_memh;
1955 bus_size_t sc_mems;
1956
1957 struct mcx_dmamem sc_cmdq_mem;
1958 unsigned int sc_cmdq_mask;
1959 unsigned int sc_cmdq_size;
1960
1961 unsigned int sc_cmdq_token;
1962
1963 struct mcx_hwmem sc_boot_pages;
1964 struct mcx_hwmem sc_init_pages;
1965 struct mcx_hwmem sc_regular_pages;
1966
1967 int sc_uar;
1968 int sc_pd;
1969 int sc_tdomain;
1970 uint32_t sc_lkey;
1971
1972 struct mcx_dmamem sc_doorbell_mem;
1973
1974 int sc_eqn;
1975 uint32_t sc_eq_cons;
1976 struct mcx_dmamem sc_eq_mem;
1977 int sc_hardmtu;
1978
1979 struct workqueue *sc_workq;
1980 struct work sc_port_change;
1981
1982 int sc_flow_table_id;
1983 #define MCX_FLOW_GROUP_PROMISC 0
1984 #define MCX_FLOW_GROUP_ALLMULTI 1
1985 #define MCX_FLOW_GROUP_MAC 2
1986 #define MCX_NUM_FLOW_GROUPS 3
1987 int sc_flow_group_id[MCX_NUM_FLOW_GROUPS];
1988 int sc_flow_group_size[MCX_NUM_FLOW_GROUPS];
1989 int sc_flow_group_start[MCX_NUM_FLOW_GROUPS];
1990 int sc_promisc_flow_enabled;
1991 int sc_allmulti_flow_enabled;
1992 int sc_mcast_flow_base;
1993 int sc_extra_mcast;
1994 uint8_t sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
1995
1996 struct mcx_calibration sc_calibration[2];
1997 unsigned int sc_calibration_gen;
1998 callout_t sc_calibrate;
1999
2000 struct mcx_cq sc_cq[MCX_MAX_CQS];
2001 int sc_num_cq;
2002
2003 /* rx */
2004 int sc_tirn;
2005 int sc_rqn;
2006 struct mcx_dmamem sc_rq_mem;
2007 struct mcx_slot *sc_rx_slots;
2008 uint32_t *sc_rx_doorbell;
2009
2010 uint32_t sc_rx_prod;
2011 callout_t sc_rx_refill;
2012 struct mcx_rxring sc_rxr;
2013
2014 /* tx */
2015 int sc_tisn;
2016 int sc_sqn;
2017 struct mcx_dmamem sc_sq_mem;
2018 struct mcx_slot *sc_tx_slots;
2019 uint32_t *sc_tx_doorbell;
2020 int sc_bf_size;
2021 int sc_bf_offset;
2022
2023 uint32_t sc_tx_cons;
2024 uint32_t sc_tx_prod;
2025
2026 uint64_t sc_last_cq_db;
2027 uint64_t sc_last_srq_db;
2028 };
2029 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
2030
2031 static int mcx_match(device_t, cfdata_t, void *);
2032 static void mcx_attach(device_t, device_t, void *);
2033
2034 static void mcx_rxr_init(struct mcx_rxring *, u_int, u_int);
2035 static u_int mcx_rxr_get(struct mcx_rxring *, u_int);
2036 static void mcx_rxr_put(struct mcx_rxring *, u_int);
2037 static u_int mcx_rxr_inuse(struct mcx_rxring *);
2038
2039 static int mcx_version(struct mcx_softc *);
2040 static int mcx_init_wait(struct mcx_softc *);
2041 static int mcx_enable_hca(struct mcx_softc *);
2042 static int mcx_teardown_hca(struct mcx_softc *, uint16_t);
2043 static int mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2044 int);
2045 static int mcx_issi(struct mcx_softc *);
2046 static int mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2047 static int mcx_hca_max_caps(struct mcx_softc *);
2048 static int mcx_hca_set_caps(struct mcx_softc *);
2049 static int mcx_init_hca(struct mcx_softc *);
2050 static int mcx_set_driver_version(struct mcx_softc *);
2051 static int mcx_iff(struct mcx_softc *);
2052 static int mcx_alloc_uar(struct mcx_softc *);
2053 static int mcx_alloc_pd(struct mcx_softc *);
2054 static int mcx_alloc_tdomain(struct mcx_softc *);
2055 static int mcx_create_eq(struct mcx_softc *);
2056 static int mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *);
2057 static int mcx_query_special_contexts(struct mcx_softc *);
2058 static int mcx_set_port_mtu(struct mcx_softc *, int);
2059 static int mcx_create_cq(struct mcx_softc *, int);
2060 static int mcx_destroy_cq(struct mcx_softc *, int);
2061 static int mcx_create_sq(struct mcx_softc *, int);
2062 static int mcx_destroy_sq(struct mcx_softc *);
2063 static int mcx_ready_sq(struct mcx_softc *);
2064 static int mcx_create_rq(struct mcx_softc *, int);
2065 static int mcx_destroy_rq(struct mcx_softc *);
2066 static int mcx_ready_rq(struct mcx_softc *);
2067 static int mcx_create_tir(struct mcx_softc *);
2068 static int mcx_destroy_tir(struct mcx_softc *);
2069 static int mcx_create_tis(struct mcx_softc *);
2070 static int mcx_destroy_tis(struct mcx_softc *);
2071 static int mcx_create_flow_table(struct mcx_softc *, int);
2072 static int mcx_set_flow_table_root(struct mcx_softc *);
2073 static int mcx_destroy_flow_table(struct mcx_softc *);
2074 static int mcx_create_flow_group(struct mcx_softc *, int, int,
2075 int, int, struct mcx_flow_match *);
2076 static int mcx_destroy_flow_group(struct mcx_softc *, int);
2077 static int mcx_set_flow_table_entry(struct mcx_softc *, int, int,
2078 const uint8_t *);
2079 static int mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2080
2081 #if 0
2082 static int mcx_dump_flow_table(struct mcx_softc *);
2083 static int mcx_dump_flow_table_entry(struct mcx_softc *, int);
2084 static int mcx_dump_flow_group(struct mcx_softc *);
2085 static int mcx_dump_rq(struct mcx_softc *);
2086 static int mcx_dump_sq(struct mcx_softc *);
2087 #endif
2088
2089
2090 /*
2091 static void mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2092 static void mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2093 */
2094 static void mcx_refill(void *);
2095 static int mcx_process_rx(struct mcx_softc *, struct mcx_cq_entry *,
2096 struct mcx_mbufq *, const struct mcx_calibration *);
2097 static void mcx_process_txeof(struct mcx_softc *, struct mcx_cq_entry *,
2098 int *);
2099 static void mcx_process_cq(struct mcx_softc *, struct mcx_cq *);
2100
2101 static void mcx_arm_cq(struct mcx_softc *, struct mcx_cq *);
2102 static void mcx_arm_eq(struct mcx_softc *);
2103 static int mcx_intr(void *);
2104
2105 static int mcx_init(struct ifnet *);
2106 static void mcx_stop(struct ifnet *, int);
2107 static int mcx_ioctl(struct ifnet *, u_long, void *);
2108 static void mcx_start(struct ifnet *);
2109 static void mcx_watchdog(struct ifnet *);
2110 static void mcx_media_add_types(struct mcx_softc *);
2111 static void mcx_media_status(struct ifnet *, struct ifmediareq *);
2112 static int mcx_media_change(struct ifnet *);
2113 #if 0
2114 static int mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2115 #endif
2116 static void mcx_port_change(struct work *, void *);
2117
2118 static void mcx_calibrate_first(struct mcx_softc *);
2119 static void mcx_calibrate(void *);
2120
2121 static inline uint32_t
2122 mcx_rd(struct mcx_softc *, bus_size_t);
2123 static inline void
2124 mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2125 static inline void
2126 mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2127
2128 static uint64_t mcx_timer(struct mcx_softc *);
2129
2130 static int mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2131 bus_size_t, u_int align);
2132 static void mcx_dmamem_zero(struct mcx_dmamem *);
2133 static void mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2134
2135 static int mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2136 unsigned int);
2137 static void mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2138
2139 CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL);
2140
2141 static const struct {
2142 pci_vendor_id_t vendor;
2143 pci_product_id_t product;
2144 } mcx_devices[] = {
2145 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700 },
2146 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710 },
2147 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800 },
2148 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800 },
2149 };
2150
2151 struct mcx_eth_proto_capability {
2152 uint64_t cap_media;
2153 uint64_t cap_baudrate;
2154 };
2155
2156 static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = {
2157 [MCX_ETHER_CAP_SGMII] = { IFM_1000_SGMII, IF_Gbps(1) },
2158 [MCX_ETHER_CAP_1000_KX] = { IFM_1000_KX, IF_Gbps(1) },
2159 [MCX_ETHER_CAP_10G_CX4] = { IFM_10G_CX4, IF_Gbps(10) },
2160 [MCX_ETHER_CAP_10G_KX4] = { IFM_10G_KX4, IF_Gbps(10) },
2161 [MCX_ETHER_CAP_10G_KR] = { IFM_10G_KR, IF_Gbps(10) },
2162 [MCX_ETHER_CAP_20G_KR2] = { IFM_20G_KR2, IF_Gbps(20) },
2163 [MCX_ETHER_CAP_40G_CR4] = { IFM_40G_CR4, IF_Gbps(40) },
2164 [MCX_ETHER_CAP_40G_KR4] = { IFM_40G_KR4, IF_Gbps(40) },
2165 [MCX_ETHER_CAP_56G_R4] = { IFM_56G_R4, IF_Gbps(56) },
2166 [MCX_ETHER_CAP_10G_CR] = { IFM_10G_CR1, IF_Gbps(10) },
2167 [MCX_ETHER_CAP_10G_SR] = { IFM_10G_SR, IF_Gbps(10) },
2168 [MCX_ETHER_CAP_10G_LR] = { IFM_10G_LR, IF_Gbps(10) },
2169 [MCX_ETHER_CAP_40G_SR4] = { IFM_40G_SR4, IF_Gbps(40) },
2170 [MCX_ETHER_CAP_40G_LR4] = { IFM_40G_LR4, IF_Gbps(40) },
2171 [MCX_ETHER_CAP_50G_SR2] = { IFM_50G_SR2, IF_Gbps(50) },
2172 [MCX_ETHER_CAP_100G_CR4] = { IFM_100G_CR4, IF_Gbps(100) },
2173 [MCX_ETHER_CAP_100G_SR4] = { IFM_100G_SR4, IF_Gbps(100) },
2174 [MCX_ETHER_CAP_100G_KR4] = { IFM_100G_KR4, IF_Gbps(100) },
2175 [MCX_ETHER_CAP_100G_LR4] = { IFM_100G_LR4, IF_Gbps(100) },
2176 [MCX_ETHER_CAP_100_TX] = { IFM_100_TX, IF_Mbps(100) },
2177 [MCX_ETHER_CAP_1000_T] = { IFM_1000_T, IF_Gbps(1) },
2178 [MCX_ETHER_CAP_10G_T] = { IFM_10G_T, IF_Gbps(10) },
2179 [MCX_ETHER_CAP_25G_CR] = { IFM_25G_CR, IF_Gbps(25) },
2180 [MCX_ETHER_CAP_25G_KR] = { IFM_25G_KR, IF_Gbps(25) },
2181 [MCX_ETHER_CAP_25G_SR] = { IFM_25G_SR, IF_Gbps(25) },
2182 [MCX_ETHER_CAP_50G_CR2] = { IFM_50G_CR2, IF_Gbps(50) },
2183 [MCX_ETHER_CAP_50G_KR2] = { IFM_50G_KR2, IF_Gbps(50) },
2184 };
2185
2186 static int
2187 mcx_get_id(uint32_t val)
2188 {
2189 return be32toh(val) & 0x00ffffff;
2190 }
2191
2192 static int
2193 mcx_match(device_t parent, cfdata_t cf, void *aux)
2194 {
2195 struct pci_attach_args *pa = aux;
2196 int n;
2197
2198 for (n = 0; n < __arraycount(mcx_devices); n++) {
2199 if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor &&
2200 PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product)
2201 return 1;
2202 }
2203
2204 return 0;
2205 }
2206
2207 void
2208 mcx_attach(device_t parent, device_t self, void *aux)
2209 {
2210 struct mcx_softc *sc = device_private(self);
2211 struct ifnet *ifp = &sc->sc_ec.ec_if;
2212 struct pci_attach_args *pa = aux;
2213 uint8_t enaddr[ETHER_ADDR_LEN];
2214 int counts[PCI_INTR_TYPE_SIZE];
2215 char intrbuf[PCI_INTRSTR_LEN];
2216 pcireg_t memtype;
2217 uint32_t r;
2218 unsigned int cq_stride;
2219 unsigned int cq_size;
2220 const char *intrstr;
2221 int i;
2222
2223 sc->sc_dev = self;
2224 sc->sc_pc = pa->pa_pc;
2225 sc->sc_tag = pa->pa_tag;
2226 if (pci_dma64_available(pa))
2227 sc->sc_dmat = pa->pa_dmat64;
2228 else
2229 sc->sc_dmat = pa->pa_dmat;
2230
2231 /* Map the PCI memory space */
2232 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2233 if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2234 0 /*BUS_SPACE_MAP_PREFETCHABLE*/, &sc->sc_memt, &sc->sc_memh,
2235 NULL, &sc->sc_mems)) {
2236 aprint_error(": unable to map register memory\n");
2237 return;
2238 }
2239
2240 pci_aprint_devinfo(pa, "Ethernet controller");
2241
2242 mutex_init(&sc->sc_media_mutex, MUTEX_DEFAULT, IPL_SOFTNET);
2243
2244 if (mcx_version(sc) != 0) {
2245 /* error printed by mcx_version */
2246 goto unmap;
2247 }
2248
2249 r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2250 cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2251 cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2252 if (cq_size > MCX_MAX_CQE) {
2253 aprint_error_dev(self,
2254 "command queue size overflow %u\n", cq_size);
2255 goto unmap;
2256 }
2257 if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2258 aprint_error_dev(self,
2259 "command queue entry size underflow %u\n", cq_stride);
2260 goto unmap;
2261 }
2262 if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2263 aprint_error_dev(self, "command queue page overflow\n");
2264 goto unmap;
2265 }
2266
2267 if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_PAGE_SIZE,
2268 MCX_PAGE_SIZE) != 0) {
2269 aprint_error_dev(self, "unable to allocate doorbell memory\n");
2270 goto unmap;
2271 }
2272
2273 if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2274 MCX_PAGE_SIZE) != 0) {
2275 aprint_error_dev(self, "unable to allocate command queue\n");
2276 goto dbfree;
2277 }
2278
2279 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2280 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2281 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2282 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2283
2284 if (mcx_init_wait(sc) != 0) {
2285 aprint_error_dev(self, "timeout waiting for init\n");
2286 goto cqfree;
2287 }
2288
2289 sc->sc_cmdq_mask = cq_size - 1;
2290 sc->sc_cmdq_size = cq_stride;
2291
2292 if (mcx_enable_hca(sc) != 0) {
2293 /* error printed by mcx_enable_hca */
2294 goto cqfree;
2295 }
2296
2297 if (mcx_issi(sc) != 0) {
2298 /* error printed by mcx_issi */
2299 goto teardown;
2300 }
2301
2302 if (mcx_pages(sc, &sc->sc_boot_pages,
2303 htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2304 /* error printed by mcx_pages */
2305 goto teardown;
2306 }
2307
2308 if (mcx_hca_max_caps(sc) != 0) {
2309 /* error printed by mcx_hca_max_caps */
2310 goto teardown;
2311 }
2312
2313 if (mcx_hca_set_caps(sc) != 0) {
2314 /* error printed by mcx_hca_set_caps */
2315 goto teardown;
2316 }
2317
2318 if (mcx_pages(sc, &sc->sc_init_pages,
2319 htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2320 /* error printed by mcx_pages */
2321 goto teardown;
2322 }
2323
2324 if (mcx_init_hca(sc) != 0) {
2325 /* error printed by mcx_init_hca */
2326 goto teardown;
2327 }
2328
2329 if (mcx_pages(sc, &sc->sc_regular_pages,
2330 htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2331 /* error printed by mcx_pages */
2332 goto teardown;
2333 }
2334
2335 /* apparently not necessary? */
2336 if (mcx_set_driver_version(sc) != 0) {
2337 /* error printed by mcx_set_driver_version */
2338 goto teardown;
2339 }
2340
2341 if (mcx_iff(sc) != 0) { /* modify nic vport context */
2342 /* error printed by mcx_iff? */
2343 goto teardown;
2344 }
2345
2346 if (mcx_alloc_uar(sc) != 0) {
2347 /* error printed by mcx_alloc_uar */
2348 goto teardown;
2349 }
2350
2351 if (mcx_alloc_pd(sc) != 0) {
2352 /* error printed by mcx_alloc_pd */
2353 goto teardown;
2354 }
2355
2356 if (mcx_alloc_tdomain(sc) != 0) {
2357 /* error printed by mcx_alloc_tdomain */
2358 goto teardown;
2359 }
2360
2361 /*
2362 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2363 * mellanox support tells me legacy interrupts are not supported,
2364 * so we're stuck with just msi-x.
2365 */
2366 counts[PCI_INTR_TYPE_MSIX] = 1;
2367 counts[PCI_INTR_TYPE_MSI] = 0;
2368 counts[PCI_INTR_TYPE_INTX] = 0;
2369 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) {
2370 aprint_error_dev(self, "unable to allocate interrupt\n");
2371 goto teardown;
2372 }
2373 KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX);
2374
2375 #ifdef MCX_MPSAFE
2376 pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
2377 #endif
2378
2379 intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[0], intrbuf,
2380 sizeof(intrbuf));
2381 sc->sc_ihs[0] = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[0],
2382 IPL_NET, mcx_intr, sc, DEVNAME(sc));
2383 if (sc->sc_ihs[0] == NULL) {
2384 aprint_error_dev(self, "unable to establish interrupt%s%s\n",
2385 intrstr ? " at " : "",
2386 intrstr ? intrstr : "");
2387 goto teardown;
2388 }
2389
2390 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
2391
2392 if (mcx_create_eq(sc) != 0) {
2393 /* error printed by mcx_create_eq */
2394 goto teardown;
2395 }
2396
2397 if (mcx_query_nic_vport_context(sc, enaddr) != 0) {
2398 /* error printed by mcx_query_nic_vport_context */
2399 goto teardown;
2400 }
2401
2402 if (mcx_query_special_contexts(sc) != 0) {
2403 /* error printed by mcx_query_special_contexts */
2404 goto teardown;
2405 }
2406
2407 if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2408 /* error printed by mcx_set_port_mtu */
2409 goto teardown;
2410 }
2411
2412 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2413 ether_sprintf(enaddr));
2414
2415 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2416 ifp->if_softc = sc;
2417 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2418 #ifdef MCX_MPSAFE
2419 ifp->if_extflags = IFEF_MPSAFE;
2420 #endif
2421 ifp->if_init = mcx_init;
2422 ifp->if_stop = mcx_stop;
2423 ifp->if_ioctl = mcx_ioctl;
2424 ifp->if_start = mcx_start;
2425 ifp->if_watchdog = mcx_watchdog;
2426 ifp->if_mtu = sc->sc_hardmtu;
2427 IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
2428 IFQ_SET_READY(&ifp->if_snd);
2429
2430 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
2431
2432 sc->sc_ec.ec_ifmedia = &sc->sc_media;
2433 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, mcx_media_change,
2434 mcx_media_status, &sc->sc_media_mutex);
2435 mcx_media_add_types(sc);
2436 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2437 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2438
2439 if_attach(ifp);
2440 if_deferred_start_init(ifp, NULL);
2441
2442 ether_ifattach(ifp, enaddr);
2443
2444 callout_init(&sc->sc_rx_refill, CALLOUT_FLAGS);
2445 callout_setfunc(&sc->sc_rx_refill, mcx_refill, sc);
2446 callout_init(&sc->sc_calibrate, CALLOUT_FLAGS);
2447 callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc);
2448
2449 if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc,
2450 PRI_NONE, IPL_NET, 0) != 0) {
2451 aprint_error_dev(self, "couldn't create port change workq\n");
2452 goto teardown;
2453 }
2454
2455 mcx_port_change(&sc->sc_port_change, sc);
2456
2457 sc->sc_flow_table_id = -1;
2458 for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
2459 sc->sc_flow_group_id[i] = -1;
2460 sc->sc_flow_group_size[i] = 0;
2461 sc->sc_flow_group_start[i] = 0;
2462 }
2463 sc->sc_extra_mcast = 0;
2464 memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
2465 return;
2466
2467 teardown:
2468 mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
2469 /* error printed by mcx_teardown_hca, and we're already unwinding */
2470 cqfree:
2471 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2472 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2473 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
2474 MCX_CMDQ_INTERFACE_DISABLED);
2475 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2476
2477 mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
2478 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2479 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
2480
2481 mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
2482 dbfree:
2483 mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
2484 unmap:
2485 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2486 sc->sc_mems = 0;
2487 }
2488
2489 static void
2490 mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm)
2491 {
2492 rxr->rxr_total = hwm;
2493 rxr->rxr_inuse = 0;
2494 }
2495
2496 static u_int
2497 mcx_rxr_get(struct mcx_rxring *rxr, u_int max)
2498 {
2499 const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse);
2500
2501 rxr->rxr_inuse += taken;
2502
2503 return taken;
2504 }
2505
2506 static void
2507 mcx_rxr_put(struct mcx_rxring *rxr, u_int n)
2508 {
2509 rxr->rxr_inuse -= n;
2510 }
2511
2512 static u_int
2513 mcx_rxr_inuse(struct mcx_rxring *rxr)
2514 {
2515 return rxr->rxr_inuse;
2516 }
2517
2518 static int
2519 mcx_version(struct mcx_softc *sc)
2520 {
2521 uint32_t fw0, fw1;
2522 uint16_t cmdif;
2523
2524 fw0 = mcx_rd(sc, MCX_FW_VER);
2525 fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
2526
2527 aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0),
2528 MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
2529
2530 cmdif = MCX_CMDIF(fw1);
2531 if (cmdif != MCX_CMD_IF_SUPPORTED) {
2532 aprint_error_dev(sc->sc_dev,
2533 "unsupported command interface %u\n", cmdif);
2534 return (-1);
2535 }
2536
2537 return (0);
2538 }
2539
2540 static int
2541 mcx_init_wait(struct mcx_softc *sc)
2542 {
2543 unsigned int i;
2544 uint32_t r;
2545
2546 for (i = 0; i < 2000; i++) {
2547 r = mcx_rd(sc, MCX_STATE);
2548 if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
2549 return (0);
2550
2551 delay(1000);
2552 mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
2553 BUS_SPACE_BARRIER_READ);
2554 }
2555
2556 return (-1);
2557 }
2558
2559 static uint8_t
2560 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2561 unsigned int msec)
2562 {
2563 unsigned int i;
2564
2565 for (i = 0; i < msec; i++) {
2566 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2567 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
2568
2569 if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
2570 MCX_CQ_STATUS_OWN_SW)
2571 return (0);
2572
2573 delay(1000);
2574 }
2575
2576 return (ETIMEDOUT);
2577 }
2578
2579 static uint32_t
2580 mcx_mix_u64(uint32_t xor, uint64_t u64)
2581 {
2582 xor ^= u64 >> 32;
2583 xor ^= u64;
2584
2585 return (xor);
2586 }
2587
2588 static uint32_t
2589 mcx_mix_u32(uint32_t xor, uint32_t u32)
2590 {
2591 xor ^= u32;
2592
2593 return (xor);
2594 }
2595
2596 static uint32_t
2597 mcx_mix_u8(uint32_t xor, uint8_t u8)
2598 {
2599 xor ^= u8;
2600
2601 return (xor);
2602 }
2603
2604 static uint8_t
2605 mcx_mix_done(uint32_t xor)
2606 {
2607 xor ^= xor >> 16;
2608 xor ^= xor >> 8;
2609
2610 return (xor);
2611 }
2612
2613 static uint8_t
2614 mcx_xor(const void *buf, size_t len)
2615 {
2616 const uint32_t *dwords = buf;
2617 uint32_t xor = 0xff;
2618 size_t i;
2619
2620 len /= sizeof(*dwords);
2621
2622 for (i = 0; i < len; i++)
2623 xor ^= dwords[i];
2624
2625 return (mcx_mix_done(xor));
2626 }
2627
2628 static uint8_t
2629 mcx_cmdq_token(struct mcx_softc *sc)
2630 {
2631 uint8_t token;
2632
2633 do {
2634 token = ++sc->sc_cmdq_token;
2635 } while (token == 0);
2636
2637 return (token);
2638 }
2639
2640 static void
2641 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2642 uint32_t ilen, uint32_t olen, uint8_t token)
2643 {
2644 memset(cqe, 0, sc->sc_cmdq_size);
2645
2646 cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
2647 be32enc(&cqe->cq_input_length, ilen);
2648 be32enc(&cqe->cq_output_length, olen);
2649 cqe->cq_token = token;
2650 cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
2651 }
2652
2653 static void
2654 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
2655 {
2656 cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
2657 }
2658
2659 static int
2660 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
2661 {
2662 /* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 : 0); */
2663 return (0);
2664 }
2665
2666 static void *
2667 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
2668 {
2669 return (&cqe->cq_input_data);
2670 }
2671
2672 static void *
2673 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
2674 {
2675 return (&cqe->cq_output_data);
2676 }
2677
2678 static void
2679 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2680 unsigned int slot)
2681 {
2682 mcx_cmdq_sign(cqe);
2683
2684 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2685 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
2686
2687 mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
2688 }
2689
2690 static int
2691 mcx_enable_hca(struct mcx_softc *sc)
2692 {
2693 struct mcx_cmdq_entry *cqe;
2694 struct mcx_cmd_enable_hca_in *in;
2695 struct mcx_cmd_enable_hca_out *out;
2696 int error;
2697 uint8_t status;
2698
2699 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2700 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2701
2702 in = mcx_cmdq_in(cqe);
2703 in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
2704 in->cmd_op_mod = htobe16(0);
2705 in->cmd_function_id = htobe16(0);
2706
2707 mcx_cmdq_post(sc, cqe, 0);
2708
2709 error = mcx_cmdq_poll(sc, cqe, 1000);
2710 if (error != 0) {
2711 printf(", hca enable timeout\n");
2712 return (-1);
2713 }
2714 if (mcx_cmdq_verify(cqe) != 0) {
2715 printf(", hca enable command corrupt\n");
2716 return (-1);
2717 }
2718
2719 status = cqe->cq_output_data[0];
2720 if (status != MCX_CQ_STATUS_OK) {
2721 printf(", hca enable failed (%x)\n", status);
2722 return (-1);
2723 }
2724
2725 return (0);
2726 }
2727
2728 static int
2729 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
2730 {
2731 struct mcx_cmdq_entry *cqe;
2732 struct mcx_cmd_teardown_hca_in *in;
2733 struct mcx_cmd_teardown_hca_out *out;
2734 int error;
2735 uint8_t status;
2736
2737 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2738 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2739
2740 in = mcx_cmdq_in(cqe);
2741 in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
2742 in->cmd_op_mod = htobe16(0);
2743 in->cmd_profile = profile;
2744
2745 mcx_cmdq_post(sc, cqe, 0);
2746
2747 error = mcx_cmdq_poll(sc, cqe, 1000);
2748 if (error != 0) {
2749 printf(", hca teardown timeout\n");
2750 return (-1);
2751 }
2752 if (mcx_cmdq_verify(cqe) != 0) {
2753 printf(", hca teardown command corrupt\n");
2754 return (-1);
2755 }
2756
2757 status = cqe->cq_output_data[0];
2758 if (status != MCX_CQ_STATUS_OK) {
2759 printf(", hca teardown failed (%x)\n", status);
2760 return (-1);
2761 }
2762
2763 return (0);
2764 }
2765
2766 static int
2767 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
2768 unsigned int nmb, uint64_t *ptr, uint8_t token)
2769 {
2770 uint8_t *kva;
2771 uint64_t dva;
2772 int i;
2773 int error;
2774
2775 error = mcx_dmamem_alloc(sc, mxm,
2776 nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
2777 if (error != 0)
2778 return (error);
2779
2780 mcx_dmamem_zero(mxm);
2781
2782 dva = MCX_DMA_DVA(mxm);
2783 kva = MCX_DMA_KVA(mxm);
2784 for (i = 0; i < nmb; i++) {
2785 struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
2786
2787 /* patch the cqe or mbox pointing at this one */
2788 be64enc(ptr, dva);
2789
2790 /* fill in this mbox */
2791 be32enc(&mbox->mb_block_number, i);
2792 mbox->mb_token = token;
2793
2794 /* move to the next one */
2795 ptr = &mbox->mb_next_ptr;
2796
2797 dva += MCX_CMDQ_MAILBOX_SIZE;
2798 kva += MCX_CMDQ_MAILBOX_SIZE;
2799 }
2800
2801 return (0);
2802 }
2803
2804 static uint32_t
2805 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
2806 {
2807 uint32_t xor = 0xff;
2808
2809 /* only 3 fields get set, so mix them directly */
2810 xor = mcx_mix_u64(xor, mb->mb_next_ptr);
2811 xor = mcx_mix_u32(xor, mb->mb_block_number);
2812 xor = mcx_mix_u8(xor, mb->mb_token);
2813
2814 return (mcx_mix_done(xor));
2815 }
2816
2817 static void
2818 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
2819 {
2820 uint8_t *kva;
2821 int i;
2822
2823 kva = MCX_DMA_KVA(mxm);
2824
2825 for (i = 0; i < nmb; i++) {
2826 struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
2827 uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
2828 mb->mb_ctrl_signature = sig;
2829 mb->mb_signature = sig ^
2830 mcx_xor(mb->mb_data, sizeof(mb->mb_data));
2831
2832 kva += MCX_CMDQ_MAILBOX_SIZE;
2833 }
2834 }
2835
2836 static void
2837 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
2838 {
2839 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
2840 0, MCX_DMA_LEN(mxm), ops);
2841 }
2842
2843 static struct mcx_cmdq_mailbox *
2844 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
2845 {
2846 uint8_t *kva;
2847
2848 kva = MCX_DMA_KVA(mxm);
2849 kva += i * MCX_CMDQ_MAILBOX_SIZE;
2850
2851 return ((struct mcx_cmdq_mailbox *)kva);
2852 }
2853
2854 static inline void *
2855 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
2856 {
2857 return (&mb->mb_data);
2858 }
2859
2860 static void
2861 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
2862 void *b, size_t len)
2863 {
2864 uint8_t *buf = b;
2865 struct mcx_cmdq_mailbox *mb;
2866 int i;
2867
2868 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2869 for (i = 0; i < nmb; i++) {
2870
2871 memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len));
2872
2873 if (sizeof(mb->mb_data) >= len)
2874 break;
2875
2876 buf += sizeof(mb->mb_data);
2877 len -= sizeof(mb->mb_data);
2878 mb++;
2879 }
2880 }
2881
2882 static void
2883 mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages,
2884 struct mcx_dmamem *buf)
2885 {
2886 uint64_t *pas;
2887 int mbox, mbox_pages, i;
2888
2889 mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE;
2890 offset %= MCX_CMDQ_MAILBOX_DATASIZE;
2891
2892 pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
2893 pas += (offset / sizeof(*pas));
2894 mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE - offset) / sizeof(*pas);
2895 for (i = 0; i < npages; i++) {
2896 if (i == mbox_pages) {
2897 mbox++;
2898 pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
2899 mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas);
2900 }
2901 *pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE));
2902 pas++;
2903 }
2904 }
2905
2906 static void
2907 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
2908 {
2909 uint8_t *buf = b;
2910 struct mcx_cmdq_mailbox *mb;
2911 int i;
2912
2913 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2914 for (i = 0; i < nmb; i++) {
2915 memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len));
2916
2917 if (sizeof(mb->mb_data) >= len)
2918 break;
2919
2920 buf += sizeof(mb->mb_data);
2921 len -= sizeof(mb->mb_data);
2922 mb++;
2923 }
2924 }
2925
2926 static void
2927 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
2928 {
2929 mcx_dmamem_free(sc, mxm);
2930 }
2931
2932 #if 0
2933 static void
2934 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
2935 {
2936 unsigned int i;
2937
2938 printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
2939 be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr));
2940
2941 printf(", idata ");
2942 for (i = 0; i < sizeof(cqe->cq_input_data); i++)
2943 printf("%02x", cqe->cq_input_data[i]);
2944
2945 printf(", odata ");
2946 for (i = 0; i < sizeof(cqe->cq_output_data); i++)
2947 printf("%02x", cqe->cq_output_data[i]);
2948
2949 printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
2950 be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length),
2951 cqe->cq_token, cqe->cq_signature, cqe->cq_status);
2952 }
2953
2954 static void
2955 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
2956 {
2957 int i, j;
2958 uint8_t *d;
2959
2960 for (i = 0; i < num; i++) {
2961 struct mcx_cmdq_mailbox *mbox;
2962 mbox = mcx_cq_mbox(mboxes, i);
2963
2964 d = mcx_cq_mbox_data(mbox);
2965 for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
2966 if (j != 0 && (j % 16 == 0))
2967 printf("\n");
2968 printf("%.2x ", d[j]);
2969 }
2970 }
2971 }
2972 #endif
2973
2974 static int
2975 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
2976 int len)
2977 {
2978 struct mcx_dmamem mxm;
2979 struct mcx_cmdq_entry *cqe;
2980 struct mcx_cmd_access_reg_in *in;
2981 struct mcx_cmd_access_reg_out *out;
2982 uint8_t token = mcx_cmdq_token(sc);
2983 int error, nmb;
2984
2985 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2986 mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
2987 token);
2988
2989 in = mcx_cmdq_in(cqe);
2990 in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
2991 in->cmd_op_mod = htobe16(op);
2992 in->cmd_register_id = htobe16(reg);
2993
2994 nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
2995 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, &cqe->cq_output_ptr, token) != 0) {
2996 printf(", unable to allocate access reg mailboxen\n");
2997 return (-1);
2998 }
2999 cqe->cq_input_ptr = cqe->cq_output_ptr;
3000 mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
3001 mcx_cmdq_mboxes_sign(&mxm, nmb);
3002 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3003
3004 mcx_cmdq_post(sc, cqe, 0);
3005 error = mcx_cmdq_poll(sc, cqe, 1000);
3006 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3007
3008 if (error != 0) {
3009 printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
3010 (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
3011 goto free;
3012 }
3013 error = mcx_cmdq_verify(cqe);
3014 if (error != 0) {
3015 printf("%s: access reg (%s %x) reply corrupt\n",
3016 (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
3017 reg);
3018 goto free;
3019 }
3020
3021 out = mcx_cmdq_out(cqe);
3022 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3023 printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
3024 DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
3025 reg, out->cmd_status, out->cmd_syndrome);
3026 error = -1;
3027 goto free;
3028 }
3029
3030 mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
3031 free:
3032 mcx_dmamem_free(sc, &mxm);
3033
3034 return (error);
3035 }
3036
3037 static int
3038 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, unsigned int slot)
3039 {
3040 struct mcx_cmd_set_issi_in *in;
3041 struct mcx_cmd_set_issi_out *out;
3042 uint8_t status;
3043
3044 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3045
3046 in = mcx_cmdq_in(cqe);
3047 in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3048 in->cmd_op_mod = htobe16(0);
3049 in->cmd_current_issi = htobe16(MCX_ISSI);
3050
3051 mcx_cmdq_post(sc, cqe, slot);
3052 if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3053 return (-1);
3054 if (mcx_cmdq_verify(cqe) != 0)
3055 return (-1);
3056
3057 status = cqe->cq_output_data[0];
3058 if (status != MCX_CQ_STATUS_OK)
3059 return (-1);
3060
3061 return (0);
3062 }
3063
3064 static int
3065 mcx_issi(struct mcx_softc *sc)
3066 {
3067 struct mcx_dmamem mxm;
3068 struct mcx_cmdq_entry *cqe;
3069 struct mcx_cmd_query_issi_in *in;
3070 struct mcx_cmd_query_issi_il_out *out;
3071 struct mcx_cmd_query_issi_mb_out *mb;
3072 uint8_t token = mcx_cmdq_token(sc);
3073 uint8_t status;
3074 int error;
3075
3076 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3077 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3078
3079 in = mcx_cmdq_in(cqe);
3080 in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3081 in->cmd_op_mod = htobe16(0);
3082
3083 CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3084 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3085 &cqe->cq_output_ptr, token) != 0) {
3086 printf(", unable to allocate query issi mailbox\n");
3087 return (-1);
3088 }
3089 mcx_cmdq_mboxes_sign(&mxm, 1);
3090
3091 mcx_cmdq_post(sc, cqe, 0);
3092 error = mcx_cmdq_poll(sc, cqe, 1000);
3093 if (error != 0) {
3094 printf(", query issi timeout\n");
3095 goto free;
3096 }
3097 error = mcx_cmdq_verify(cqe);
3098 if (error != 0) {
3099 printf(", query issi reply corrupt\n");
3100 goto free;
3101 }
3102
3103 status = cqe->cq_output_data[0];
3104 switch (status) {
3105 case MCX_CQ_STATUS_OK:
3106 break;
3107 case MCX_CQ_STATUS_BAD_OPCODE:
3108 /* use ISSI 0 */
3109 goto free;
3110 default:
3111 printf(", query issi failed (%x)\n", status);
3112 error = -1;
3113 goto free;
3114 }
3115
3116 out = mcx_cmdq_out(cqe);
3117 if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3118 /* use ISSI 1 */
3119 goto free;
3120 }
3121
3122 /* don't need to read cqe anymore, can be used for SET ISSI */
3123
3124 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3125 CTASSERT(MCX_ISSI < NBBY);
3126 /* XXX math is hard */
3127 if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3128 /* use ISSI 0 */
3129 goto free;
3130 }
3131
3132 if (mcx_set_issi(sc, cqe, 0) != 0) {
3133 /* ignore the error, just use ISSI 0 */
3134 } else {
3135 /* use ISSI 1 */
3136 }
3137
3138 free:
3139 mcx_cq_mboxes_free(sc, &mxm);
3140 return (error);
3141 }
3142
3143 static int
3144 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3145 uint32_t *npages, uint16_t *func_id)
3146 {
3147 struct mcx_cmdq_entry *cqe;
3148 struct mcx_cmd_query_pages_in *in;
3149 struct mcx_cmd_query_pages_out *out;
3150
3151 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3152 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3153
3154 in = mcx_cmdq_in(cqe);
3155 in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3156 in->cmd_op_mod = type;
3157
3158 mcx_cmdq_post(sc, cqe, 0);
3159 if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3160 printf(", query pages timeout\n");
3161 return (-1);
3162 }
3163 if (mcx_cmdq_verify(cqe) != 0) {
3164 printf(", query pages reply corrupt\n");
3165 return (-1);
3166 }
3167
3168 out = mcx_cmdq_out(cqe);
3169 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3170 printf(", query pages failed (%x)\n", out->cmd_status);
3171 return (-1);
3172 }
3173
3174 *func_id = out->cmd_func_id;
3175 *npages = be32dec(&out->cmd_num_pages);
3176
3177 return (0);
3178 }
3179
3180 struct bus_dma_iter {
3181 bus_dmamap_t i_map;
3182 bus_size_t i_offset;
3183 unsigned int i_index;
3184 };
3185
3186 static void
3187 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3188 {
3189 i->i_map = map;
3190 i->i_offset = 0;
3191 i->i_index = 0;
3192 }
3193
3194 static bus_addr_t
3195 bus_dma_iter_addr(struct bus_dma_iter *i)
3196 {
3197 return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3198 }
3199
3200 static void
3201 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3202 {
3203 bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3204 bus_size_t diff;
3205
3206 do {
3207 diff = seg->ds_len - i->i_offset;
3208 if (size < diff)
3209 break;
3210
3211 size -= diff;
3212
3213 seg++;
3214
3215 i->i_offset = 0;
3216 i->i_index++;
3217 } while (size > 0);
3218
3219 i->i_offset += size;
3220 }
3221
3222 static int
3223 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3224 {
3225 struct mcx_dmamem mxm;
3226 struct mcx_cmdq_entry *cqe;
3227 struct mcx_cmd_manage_pages_in *in;
3228 struct mcx_cmd_manage_pages_out *out;
3229 unsigned int paslen, nmb, i, j, npages;
3230 struct bus_dma_iter iter;
3231 uint64_t *pas;
3232 uint8_t status;
3233 uint8_t token = mcx_cmdq_token(sc);
3234 int error;
3235
3236 npages = mhm->mhm_npages;
3237
3238 paslen = sizeof(*pas) * npages;
3239 nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3240
3241 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3242 mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3243
3244 in = mcx_cmdq_in(cqe);
3245 in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3246 in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3247 in->cmd_func_id = func_id;
3248 be32enc(&in->cmd_input_num_entries, npages);
3249
3250 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3251 &cqe->cq_input_ptr, token) != 0) {
3252 printf(", unable to allocate manage pages mailboxen\n");
3253 return (-1);
3254 }
3255
3256 bus_dma_iter_init(&iter, mhm->mhm_map);
3257 for (i = 0; i < nmb; i++) {
3258 unsigned int lim;
3259
3260 pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3261 lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3262
3263 for (j = 0; j < lim; j++) {
3264 be64enc(&pas[j], bus_dma_iter_addr(&iter));
3265 bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3266 }
3267
3268 npages -= lim;
3269 }
3270
3271 mcx_cmdq_mboxes_sign(&mxm, nmb);
3272
3273 mcx_cmdq_post(sc, cqe, 0);
3274 error = mcx_cmdq_poll(sc, cqe, 1000);
3275 if (error != 0) {
3276 printf(", manage pages timeout\n");
3277 goto free;
3278 }
3279 error = mcx_cmdq_verify(cqe);
3280 if (error != 0) {
3281 printf(", manage pages reply corrupt\n");
3282 goto free;
3283 }
3284
3285 status = cqe->cq_output_data[0];
3286 if (status != MCX_CQ_STATUS_OK) {
3287 printf(", manage pages failed (%x)\n", status);
3288 error = -1;
3289 goto free;
3290 }
3291
3292 free:
3293 mcx_dmamem_free(sc, &mxm);
3294
3295 return (error);
3296 }
3297
3298 static int
3299 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3300 {
3301 uint32_t npages;
3302 uint16_t func_id;
3303
3304 if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3305 /* error printed by mcx_query_pages */
3306 return (-1);
3307 }
3308
3309 if (npages == 0)
3310 return (0);
3311
3312 if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3313 printf(", unable to allocate hwmem\n");
3314 return (-1);
3315 }
3316
3317 if (mcx_add_pages(sc, mhm, func_id) != 0) {
3318 printf(", unable to add hwmem\n");
3319 goto free;
3320 }
3321
3322 return (0);
3323
3324 free:
3325 mcx_hwmem_free(sc, mhm);
3326
3327 return (-1);
3328 }
3329
3330 static int
3331 mcx_hca_max_caps(struct mcx_softc *sc)
3332 {
3333 struct mcx_dmamem mxm;
3334 struct mcx_cmdq_entry *cqe;
3335 struct mcx_cmd_query_hca_cap_in *in;
3336 struct mcx_cmd_query_hca_cap_out *out;
3337 struct mcx_cmdq_mailbox *mb;
3338 struct mcx_cap_device *hca;
3339 uint8_t status;
3340 uint8_t token = mcx_cmdq_token(sc);
3341 int error;
3342
3343 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3344 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3345 token);
3346
3347 in = mcx_cmdq_in(cqe);
3348 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3349 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3350 MCX_CMD_QUERY_HCA_CAP_DEVICE);
3351
3352 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3353 &cqe->cq_output_ptr, token) != 0) {
3354 printf(", unable to allocate query hca caps mailboxen\n");
3355 return (-1);
3356 }
3357 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3358 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3359
3360 mcx_cmdq_post(sc, cqe, 0);
3361 error = mcx_cmdq_poll(sc, cqe, 1000);
3362 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3363
3364 if (error != 0) {
3365 printf(", query hca caps timeout\n");
3366 goto free;
3367 }
3368 error = mcx_cmdq_verify(cqe);
3369 if (error != 0) {
3370 printf(", query hca caps reply corrupt\n");
3371 goto free;
3372 }
3373
3374 status = cqe->cq_output_data[0];
3375 if (status != MCX_CQ_STATUS_OK) {
3376 printf(", query hca caps failed (%x)\n", status);
3377 error = -1;
3378 goto free;
3379 }
3380
3381 mb = mcx_cq_mbox(&mxm, 0);
3382 hca = mcx_cq_mbox_data(mb);
3383
3384 if (hca->log_pg_sz > PAGE_SHIFT) {
3385 printf(", minimum system page shift %u is too large\n",
3386 hca->log_pg_sz);
3387 error = -1;
3388 goto free;
3389 }
3390 /*
3391 * blueflame register is split into two buffers, and we must alternate
3392 * between the two of them.
3393 */
3394 sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
3395
3396 free:
3397 mcx_dmamem_free(sc, &mxm);
3398
3399 return (error);
3400 }
3401
3402 static int
3403 mcx_hca_set_caps(struct mcx_softc *sc)
3404 {
3405 struct mcx_dmamem mxm;
3406 struct mcx_cmdq_entry *cqe;
3407 struct mcx_cmd_query_hca_cap_in *in;
3408 struct mcx_cmd_query_hca_cap_out *out;
3409 struct mcx_cmdq_mailbox *mb;
3410 struct mcx_cap_device *hca;
3411 uint8_t status;
3412 uint8_t token = mcx_cmdq_token(sc);
3413 int error;
3414
3415 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3416 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3417 token);
3418
3419 in = mcx_cmdq_in(cqe);
3420 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3421 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
3422 MCX_CMD_QUERY_HCA_CAP_DEVICE);
3423
3424 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3425 &cqe->cq_output_ptr, token) != 0) {
3426 printf(", unable to allocate manage pages mailboxen\n");
3427 return (-1);
3428 }
3429 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3430 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3431
3432 mcx_cmdq_post(sc, cqe, 0);
3433 error = mcx_cmdq_poll(sc, cqe, 1000);
3434 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3435
3436 if (error != 0) {
3437 printf(", query hca caps timeout\n");
3438 goto free;
3439 }
3440 error = mcx_cmdq_verify(cqe);
3441 if (error != 0) {
3442 printf(", query hca caps reply corrupt\n");
3443 goto free;
3444 }
3445
3446 status = cqe->cq_output_data[0];
3447 if (status != MCX_CQ_STATUS_OK) {
3448 printf(", query hca caps failed (%x)\n", status);
3449 error = -1;
3450 goto free;
3451 }
3452
3453 mb = mcx_cq_mbox(&mxm, 0);
3454 hca = mcx_cq_mbox_data(mb);
3455
3456 hca->log_pg_sz = PAGE_SHIFT;
3457
3458 free:
3459 mcx_dmamem_free(sc, &mxm);
3460
3461 return (error);
3462 }
3463
3464
3465 static int
3466 mcx_init_hca(struct mcx_softc *sc)
3467 {
3468 struct mcx_cmdq_entry *cqe;
3469 struct mcx_cmd_init_hca_in *in;
3470 struct mcx_cmd_init_hca_out *out;
3471 int error;
3472 uint8_t status;
3473
3474 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3475 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3476
3477 in = mcx_cmdq_in(cqe);
3478 in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
3479 in->cmd_op_mod = htobe16(0);
3480
3481 mcx_cmdq_post(sc, cqe, 0);
3482
3483 error = mcx_cmdq_poll(sc, cqe, 1000);
3484 if (error != 0) {
3485 printf(", hca init timeout\n");
3486 return (-1);
3487 }
3488 if (mcx_cmdq_verify(cqe) != 0) {
3489 printf(", hca init command corrupt\n");
3490 return (-1);
3491 }
3492
3493 status = cqe->cq_output_data[0];
3494 if (status != MCX_CQ_STATUS_OK) {
3495 printf(", hca init failed (%x)\n", status);
3496 return (-1);
3497 }
3498
3499 return (0);
3500 }
3501
3502 static int
3503 mcx_set_driver_version(struct mcx_softc *sc)
3504 {
3505 struct mcx_dmamem mxm;
3506 struct mcx_cmdq_entry *cqe;
3507 struct mcx_cmd_set_driver_version_in *in;
3508 struct mcx_cmd_set_driver_version_out *out;
3509 int error;
3510 int token;
3511 uint8_t status;
3512
3513 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3514 token = mcx_cmdq_token(sc);
3515 mcx_cmdq_init(sc, cqe, sizeof(*in) +
3516 sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
3517
3518 in = mcx_cmdq_in(cqe);
3519 in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
3520 in->cmd_op_mod = htobe16(0);
3521
3522 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3523 &cqe->cq_input_ptr, token) != 0) {
3524 printf(", unable to allocate set driver version mailboxen\n");
3525 return (-1);
3526 }
3527 strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
3528 "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
3529
3530 mcx_cmdq_mboxes_sign(&mxm, 1);
3531 mcx_cmdq_post(sc, cqe, 0);
3532
3533 error = mcx_cmdq_poll(sc, cqe, 1000);
3534 if (error != 0) {
3535 printf(", set driver version timeout\n");
3536 goto free;
3537 }
3538 if (mcx_cmdq_verify(cqe) != 0) {
3539 printf(", set driver version command corrupt\n");
3540 goto free;
3541 }
3542
3543 status = cqe->cq_output_data[0];
3544 if (status != MCX_CQ_STATUS_OK) {
3545 printf(", set driver version failed (%x)\n", status);
3546 error = -1;
3547 goto free;
3548 }
3549
3550 free:
3551 mcx_dmamem_free(sc, &mxm);
3552
3553 return (error);
3554 }
3555
3556 static int
3557 mcx_iff(struct mcx_softc *sc)
3558 {
3559 struct ifnet *ifp = &sc->sc_ec.ec_if;
3560 struct mcx_dmamem mxm;
3561 struct mcx_cmdq_entry *cqe;
3562 struct mcx_cmd_modify_nic_vport_context_in *in;
3563 struct mcx_cmd_modify_nic_vport_context_out *out;
3564 struct mcx_nic_vport_ctx *ctx;
3565 int error;
3566 int token;
3567 int insize;
3568
3569 /* enable or disable the promisc flow */
3570 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
3571 if (sc->sc_promisc_flow_enabled == 0) {
3572 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC,
3573 0, NULL);
3574 sc->sc_promisc_flow_enabled = 1;
3575 }
3576 } else if (sc->sc_promisc_flow_enabled != 0) {
3577 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
3578 sc->sc_promisc_flow_enabled = 0;
3579 }
3580
3581 /* enable or disable the all-multicast flow */
3582 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3583 if (sc->sc_allmulti_flow_enabled == 0) {
3584 uint8_t mcast[ETHER_ADDR_LEN];
3585
3586 memset(mcast, 0, sizeof(mcast));
3587 mcast[0] = 0x01;
3588 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI,
3589 0, mcast);
3590 sc->sc_allmulti_flow_enabled = 1;
3591 }
3592 } else if (sc->sc_allmulti_flow_enabled != 0) {
3593 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
3594 sc->sc_allmulti_flow_enabled = 0;
3595 }
3596
3597 insize = sizeof(struct mcx_nic_vport_ctx) + 240;
3598
3599 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3600 token = mcx_cmdq_token(sc);
3601 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3602
3603 in = mcx_cmdq_in(cqe);
3604 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
3605 in->cmd_op_mod = htobe16(0);
3606 in->cmd_field_select = htobe32(
3607 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
3608 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
3609
3610 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
3611 printf(", unable to allocate modify nic vport context mailboxen\n");
3612 return (-1);
3613 }
3614 ctx = (struct mcx_nic_vport_ctx *)
3615 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
3616 ctx->vp_mtu = htobe32(sc->sc_hardmtu);
3617 /*
3618 * always leave promisc-all enabled on the vport since we can't give it
3619 * a vlan list, and we're already doing multicast filtering in the flow
3620 * table.
3621 */
3622 ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
3623
3624 mcx_cmdq_mboxes_sign(&mxm, 1);
3625 mcx_cmdq_post(sc, cqe, 0);
3626
3627 error = mcx_cmdq_poll(sc, cqe, 1000);
3628 if (error != 0) {
3629 printf(", modify nic vport context timeout\n");
3630 goto free;
3631 }
3632 if (mcx_cmdq_verify(cqe) != 0) {
3633 printf(", modify nic vport context command corrupt\n");
3634 goto free;
3635 }
3636
3637 out = mcx_cmdq_out(cqe);
3638 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3639 printf(", modify nic vport context failed (%x, %x)\n",
3640 out->cmd_status, out->cmd_syndrome);
3641 error = -1;
3642 goto free;
3643 }
3644
3645 free:
3646 mcx_dmamem_free(sc, &mxm);
3647
3648 return (error);
3649 }
3650
3651 static int
3652 mcx_alloc_uar(struct mcx_softc *sc)
3653 {
3654 struct mcx_cmdq_entry *cqe;
3655 struct mcx_cmd_alloc_uar_in *in;
3656 struct mcx_cmd_alloc_uar_out *out;
3657 int error;
3658
3659 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3660 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3661
3662 in = mcx_cmdq_in(cqe);
3663 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
3664 in->cmd_op_mod = htobe16(0);
3665
3666 mcx_cmdq_post(sc, cqe, 0);
3667
3668 error = mcx_cmdq_poll(sc, cqe, 1000);
3669 if (error != 0) {
3670 printf(", alloc uar timeout\n");
3671 return (-1);
3672 }
3673 if (mcx_cmdq_verify(cqe) != 0) {
3674 printf(", alloc uar command corrupt\n");
3675 return (-1);
3676 }
3677
3678 out = mcx_cmdq_out(cqe);
3679 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3680 printf(", alloc uar failed (%x)\n", out->cmd_status);
3681 return (-1);
3682 }
3683
3684 sc->sc_uar = mcx_get_id(out->cmd_uar);
3685
3686 return (0);
3687 }
3688
3689 static int
3690 mcx_create_eq(struct mcx_softc *sc)
3691 {
3692 struct mcx_cmdq_entry *cqe;
3693 struct mcx_dmamem mxm;
3694 struct mcx_cmd_create_eq_in *in;
3695 struct mcx_cmd_create_eq_mb_in *mbin;
3696 struct mcx_cmd_create_eq_out *out;
3697 struct mcx_eq_entry *eqe;
3698 int error;
3699 uint64_t *pas;
3700 int insize, npages, paslen, i, token;
3701
3702 sc->sc_eq_cons = 0;
3703
3704 npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
3705 MCX_PAGE_SIZE);
3706 paslen = npages * sizeof(*pas);
3707 insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
3708
3709 if (mcx_dmamem_alloc(sc, &sc->sc_eq_mem, npages * MCX_PAGE_SIZE,
3710 MCX_PAGE_SIZE) != 0) {
3711 printf(", unable to allocate event queue memory\n");
3712 return (-1);
3713 }
3714
3715 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
3716 for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
3717 eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
3718 }
3719
3720 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3721 token = mcx_cmdq_token(sc);
3722 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3723
3724 in = mcx_cmdq_in(cqe);
3725 in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
3726 in->cmd_op_mod = htobe16(0);
3727
3728 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3729 &cqe->cq_input_ptr, token) != 0) {
3730 printf(", unable to allocate create eq mailboxen\n");
3731 return (-1);
3732 }
3733 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3734 mbin->cmd_eq_ctx.eq_uar_size = htobe32(
3735 (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | sc->sc_uar);
3736 mbin->cmd_event_bitmask = htobe64(
3737 (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
3738 (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
3739 (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
3740 (1ull << MCX_EVENT_TYPE_PAGE_REQUEST));
3741
3742 /* physical addresses follow the mailbox in data */
3743 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &sc->sc_eq_mem);
3744 mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
3745 mcx_cmdq_post(sc, cqe, 0);
3746
3747 error = mcx_cmdq_poll(sc, cqe, 1000);
3748 if (error != 0) {
3749 printf(", create eq timeout\n");
3750 goto free;
3751 }
3752 if (mcx_cmdq_verify(cqe) != 0) {
3753 printf(", create eq command corrupt\n");
3754 goto free;
3755 }
3756
3757 out = mcx_cmdq_out(cqe);
3758 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3759 printf(", create eq failed (%x, %x)\n", out->cmd_status,
3760 be32toh(out->cmd_syndrome));
3761 error = -1;
3762 goto free;
3763 }
3764
3765 sc->sc_eqn = mcx_get_id(out->cmd_eqn);
3766 mcx_arm_eq(sc);
3767 free:
3768 mcx_dmamem_free(sc, &mxm);
3769 return (error);
3770 }
3771
3772 static int
3773 mcx_alloc_pd(struct mcx_softc *sc)
3774 {
3775 struct mcx_cmdq_entry *cqe;
3776 struct mcx_cmd_alloc_pd_in *in;
3777 struct mcx_cmd_alloc_pd_out *out;
3778 int error;
3779
3780 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3781 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3782
3783 in = mcx_cmdq_in(cqe);
3784 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
3785 in->cmd_op_mod = htobe16(0);
3786
3787 mcx_cmdq_post(sc, cqe, 0);
3788
3789 error = mcx_cmdq_poll(sc, cqe, 1000);
3790 if (error != 0) {
3791 printf(", alloc pd timeout\n");
3792 return (-1);
3793 }
3794 if (mcx_cmdq_verify(cqe) != 0) {
3795 printf(", alloc pd command corrupt\n");
3796 return (-1);
3797 }
3798
3799 out = mcx_cmdq_out(cqe);
3800 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3801 printf(", alloc pd failed (%x)\n", out->cmd_status);
3802 return (-1);
3803 }
3804
3805 sc->sc_pd = mcx_get_id(out->cmd_pd);
3806 return (0);
3807 }
3808
3809 static int
3810 mcx_alloc_tdomain(struct mcx_softc *sc)
3811 {
3812 struct mcx_cmdq_entry *cqe;
3813 struct mcx_cmd_alloc_td_in *in;
3814 struct mcx_cmd_alloc_td_out *out;
3815 int error;
3816
3817 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3818 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3819
3820 in = mcx_cmdq_in(cqe);
3821 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
3822 in->cmd_op_mod = htobe16(0);
3823
3824 mcx_cmdq_post(sc, cqe, 0);
3825
3826 error = mcx_cmdq_poll(sc, cqe, 1000);
3827 if (error != 0) {
3828 printf(", alloc transport domain timeout\n");
3829 return (-1);
3830 }
3831 if (mcx_cmdq_verify(cqe) != 0) {
3832 printf(", alloc transport domain command corrupt\n");
3833 return (-1);
3834 }
3835
3836 out = mcx_cmdq_out(cqe);
3837 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3838 printf(", alloc transport domain failed (%x)\n",
3839 out->cmd_status);
3840 return (-1);
3841 }
3842
3843 sc->sc_tdomain = mcx_get_id(out->cmd_tdomain);
3844 return (0);
3845 }
3846
3847 static int
3848 mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr)
3849 {
3850 struct mcx_dmamem mxm;
3851 struct mcx_cmdq_entry *cqe;
3852 struct mcx_cmd_query_nic_vport_context_in *in;
3853 struct mcx_cmd_query_nic_vport_context_out *out;
3854 struct mcx_nic_vport_ctx *ctx;
3855 uint8_t *addr;
3856 int error, token, i;
3857
3858 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3859 token = mcx_cmdq_token(sc);
3860 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
3861
3862 in = mcx_cmdq_in(cqe);
3863 in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
3864 in->cmd_op_mod = htobe16(0);
3865 in->cmd_allowed_list_type = 0;
3866
3867 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
3868 printf(", unable to allocate query nic vport context mailboxen\n");
3869 return (-1);
3870 }
3871 mcx_cmdq_mboxes_sign(&mxm, 1);
3872 mcx_cmdq_post(sc, cqe, 0);
3873
3874 error = mcx_cmdq_poll(sc, cqe, 1000);
3875 if (error != 0) {
3876 printf(", query nic vport context timeout\n");
3877 goto free;
3878 }
3879 if (mcx_cmdq_verify(cqe) != 0) {
3880 printf(", query nic vport context command corrupt\n");
3881 goto free;
3882 }
3883
3884 out = mcx_cmdq_out(cqe);
3885 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3886 printf(", query nic vport context failed (%x, %x)\n",
3887 out->cmd_status, out->cmd_syndrome);
3888 error = -1;
3889 goto free;
3890 }
3891
3892 ctx = (struct mcx_nic_vport_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
3893 addr = (uint8_t *)&ctx->vp_perm_addr;
3894 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3895 enaddr[i] = addr[i + 2];
3896 }
3897 free:
3898 mcx_dmamem_free(sc, &mxm);
3899
3900 return (error);
3901 }
3902
3903 static int
3904 mcx_query_special_contexts(struct mcx_softc *sc)
3905 {
3906 struct mcx_cmdq_entry *cqe;
3907 struct mcx_cmd_query_special_ctx_in *in;
3908 struct mcx_cmd_query_special_ctx_out *out;
3909 int error;
3910
3911 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3912 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3913
3914 in = mcx_cmdq_in(cqe);
3915 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
3916 in->cmd_op_mod = htobe16(0);
3917
3918 mcx_cmdq_post(sc, cqe, 0);
3919
3920 error = mcx_cmdq_poll(sc, cqe, 1000);
3921 if (error != 0) {
3922 printf(", query special contexts timeout\n");
3923 return (-1);
3924 }
3925 if (mcx_cmdq_verify(cqe) != 0) {
3926 printf(", query special contexts command corrupt\n");
3927 return (-1);
3928 }
3929
3930 out = mcx_cmdq_out(cqe);
3931 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3932 printf(", query special contexts failed (%x)\n",
3933 out->cmd_status);
3934 return (-1);
3935 }
3936
3937 sc->sc_lkey = be32toh(out->cmd_resd_lkey);
3938 return (0);
3939 }
3940
3941 static int
3942 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
3943 {
3944 struct mcx_reg_pmtu pmtu;
3945 int error;
3946
3947 /* read max mtu */
3948 memset(&pmtu, 0, sizeof(pmtu));
3949 pmtu.rp_local_port = 1;
3950 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
3951 sizeof(pmtu));
3952 if (error != 0) {
3953 printf(", unable to get port MTU\n");
3954 return error;
3955 }
3956
3957 mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu));
3958 pmtu.rp_admin_mtu = htobe16(mtu);
3959 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
3960 sizeof(pmtu));
3961 if (error != 0) {
3962 printf(", unable to set port MTU\n");
3963 return error;
3964 }
3965
3966 sc->sc_hardmtu = mtu;
3967 return 0;
3968 }
3969
3970 static int
3971 mcx_create_cq(struct mcx_softc *sc, int eqn)
3972 {
3973 struct mcx_cmdq_entry *cmde;
3974 struct mcx_cq_entry *cqe;
3975 struct mcx_cq *cq;
3976 struct mcx_dmamem mxm;
3977 struct mcx_cmd_create_cq_in *in;
3978 struct mcx_cmd_create_cq_mb_in *mbin;
3979 struct mcx_cmd_create_cq_out *out;
3980 int error;
3981 uint64_t *pas;
3982 int insize, npages, paslen, i, token;
3983
3984 if (sc->sc_num_cq >= MCX_MAX_CQS) {
3985 printf("%s: tried to create too many cqs\n", DEVNAME(sc));
3986 return (-1);
3987 }
3988 cq = &sc->sc_cq[sc->sc_num_cq];
3989
3990 npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
3991 MCX_PAGE_SIZE);
3992 paslen = npages * sizeof(*pas);
3993 insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
3994
3995 if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
3996 MCX_PAGE_SIZE) != 0) {
3997 printf("%s: unable to allocate completion queue memory\n",
3998 DEVNAME(sc));
3999 return (-1);
4000 }
4001 cqe = MCX_DMA_KVA(&cq->cq_mem);
4002 for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
4003 cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
4004 }
4005
4006 cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4007 token = mcx_cmdq_token(sc);
4008 mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
4009
4010 in = mcx_cmdq_in(cmde);
4011 in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
4012 in->cmd_op_mod = htobe16(0);
4013
4014 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4015 &cmde->cq_input_ptr, token) != 0) {
4016 printf("%s: unable to allocate create cq mailboxen\n", DEVNAME(sc));
4017 error = -1;
4018 goto free;
4019 }
4020 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4021 mbin->cmd_cq_ctx.cq_uar_size = htobe32(
4022 (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | sc->sc_uar);
4023 mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
4024 mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
4025 (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
4026 MCX_CQ_MOD_COUNTER);
4027 mbin->cmd_cq_ctx.cq_doorbell = htobe64(
4028 MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4029 MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4030
4031 /* physical addresses follow the mailbox in data */
4032 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem);
4033 mcx_cmdq_post(sc, cmde, 0);
4034
4035 error = mcx_cmdq_poll(sc, cmde, 1000);
4036 if (error != 0) {
4037 printf("%s: create cq timeout\n", DEVNAME(sc));
4038 goto free;
4039 }
4040 if (mcx_cmdq_verify(cmde) != 0) {
4041 printf("%s: create cq command corrupt\n", DEVNAME(sc));
4042 goto free;
4043 }
4044
4045 out = mcx_cmdq_out(cmde);
4046 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4047 printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4048 out->cmd_status, be32toh(out->cmd_syndrome));
4049 error = -1;
4050 goto free;
4051 }
4052
4053 cq->cq_n = mcx_get_id(out->cmd_cqn);
4054 cq->cq_cons = 0;
4055 cq->cq_count = 0;
4056 cq->cq_doorbell = (void *)((uint8_t *)MCX_DMA_KVA(&sc->sc_doorbell_mem) +
4057 MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4058 mcx_arm_cq(sc, cq);
4059 sc->sc_num_cq++;
4060
4061 free:
4062 mcx_dmamem_free(sc, &mxm);
4063 return (error);
4064 }
4065
4066 static int
4067 mcx_destroy_cq(struct mcx_softc *sc, int index)
4068 {
4069 struct mcx_cmdq_entry *cqe;
4070 struct mcx_cmd_destroy_cq_in *in;
4071 struct mcx_cmd_destroy_cq_out *out;
4072 int error;
4073 int token;
4074
4075 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4076 token = mcx_cmdq_token(sc);
4077 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4078
4079 in = mcx_cmdq_in(cqe);
4080 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4081 in->cmd_op_mod = htobe16(0);
4082 in->cmd_cqn = htobe32(sc->sc_cq[index].cq_n);
4083
4084 mcx_cmdq_post(sc, cqe, 0);
4085 error = mcx_cmdq_poll(sc, cqe, 1000);
4086 if (error != 0) {
4087 printf("%s: destroy cq timeout\n", DEVNAME(sc));
4088 return error;
4089 }
4090 if (mcx_cmdq_verify(cqe) != 0) {
4091 printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4092 return error;
4093 }
4094
4095 out = mcx_cmdq_out(cqe);
4096 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4097 printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4098 out->cmd_status, be32toh(out->cmd_syndrome));
4099 return -1;
4100 }
4101
4102 sc->sc_cq[index].cq_n = 0;
4103 mcx_dmamem_free(sc, &sc->sc_cq[index].cq_mem);
4104 sc->sc_cq[index].cq_cons = 0;
4105 sc->sc_cq[index].cq_count = 0;
4106 return 0;
4107 }
4108
4109 static int
4110 mcx_create_rq(struct mcx_softc *sc, int cqn)
4111 {
4112 struct mcx_cmdq_entry *cqe;
4113 struct mcx_dmamem mxm;
4114 struct mcx_cmd_create_rq_in *in;
4115 struct mcx_cmd_create_rq_out *out;
4116 struct mcx_rq_ctx *mbin;
4117 int error;
4118 uint64_t *pas;
4119 uint8_t *doorbell;
4120 int insize, npages, paslen, token;
4121
4122 npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4123 MCX_PAGE_SIZE);
4124 paslen = npages * sizeof(*pas);
4125 insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4126
4127 if (mcx_dmamem_alloc(sc, &sc->sc_rq_mem, npages * MCX_PAGE_SIZE,
4128 MCX_PAGE_SIZE) != 0) {
4129 printf("%s: unable to allocate receive queue memory\n",
4130 DEVNAME(sc));
4131 return (-1);
4132 }
4133
4134 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4135 token = mcx_cmdq_token(sc);
4136 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4137
4138 in = mcx_cmdq_in(cqe);
4139 in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4140 in->cmd_op_mod = htobe16(0);
4141
4142 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4143 &cqe->cq_input_ptr, token) != 0) {
4144 printf("%s: unable to allocate create rq mailboxen\n",
4145 DEVNAME(sc));
4146 error = -1;
4147 goto free;
4148 }
4149 mbin = (struct mcx_rq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4150 mbin->rq_flags = htobe32(MCX_RQ_CTX_RLKEY | MCX_RQ_CTX_VLAN_STRIP_DIS);
4151 mbin->rq_cqn = htobe32(cqn);
4152 mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4153 mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4154 mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4155 MCX_RQ_DOORBELL_OFFSET);
4156 mbin->rq_wq.wq_log_stride = htobe16(4);
4157 mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4158
4159 /* physical addresses follow the mailbox in data */
4160 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &sc->sc_rq_mem);
4161 mcx_cmdq_post(sc, cqe, 0);
4162
4163 error = mcx_cmdq_poll(sc, cqe, 1000);
4164 if (error != 0) {
4165 printf("%s: create rq timeout\n", DEVNAME(sc));
4166 goto free;
4167 }
4168 if (mcx_cmdq_verify(cqe) != 0) {
4169 printf("%s: create rq command corrupt\n", DEVNAME(sc));
4170 goto free;
4171 }
4172
4173 out = mcx_cmdq_out(cqe);
4174 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4175 printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4176 out->cmd_status, be32toh(out->cmd_syndrome));
4177 error = -1;
4178 goto free;
4179 }
4180
4181 sc->sc_rqn = mcx_get_id(out->cmd_rqn);
4182
4183 doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4184 sc->sc_rx_doorbell = (uint32_t *)(doorbell + MCX_RQ_DOORBELL_OFFSET);
4185
4186 free:
4187 mcx_dmamem_free(sc, &mxm);
4188 return (error);
4189 }
4190
4191 static int
4192 mcx_ready_rq(struct mcx_softc *sc)
4193 {
4194 struct mcx_cmdq_entry *cqe;
4195 struct mcx_dmamem mxm;
4196 struct mcx_cmd_modify_rq_in *in;
4197 struct mcx_cmd_modify_rq_mb_in *mbin;
4198 struct mcx_cmd_modify_rq_out *out;
4199 int error;
4200 int token;
4201
4202 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4203 token = mcx_cmdq_token(sc);
4204 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4205
4206 in = mcx_cmdq_in(cqe);
4207 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4208 in->cmd_op_mod = htobe16(0);
4209 in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_rqn);
4210
4211 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4212 printf("%s: unable to allocate modify rq mailbox\n", DEVNAME(sc));
4213 return (-1);
4214 }
4215 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4216 mbin->cmd_rq_ctx.rq_flags = htobe32(
4217 MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4218
4219 mcx_cmdq_mboxes_sign(&mxm, 1);
4220 mcx_cmdq_post(sc, cqe, 0);
4221 error = mcx_cmdq_poll(sc, cqe, 1000);
4222 if (error != 0) {
4223 printf("%s: modify rq timeout\n", DEVNAME(sc));
4224 goto free;
4225 }
4226 if (mcx_cmdq_verify(cqe) != 0) {
4227 printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4228 goto free;
4229 }
4230
4231 out = mcx_cmdq_out(cqe);
4232 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4233 printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4234 out->cmd_status, be32toh(out->cmd_syndrome));
4235 error = -1;
4236 goto free;
4237 }
4238
4239 free:
4240 mcx_dmamem_free(sc, &mxm);
4241 return (error);
4242 }
4243
4244 static int
4245 mcx_destroy_rq(struct mcx_softc *sc)
4246 {
4247 struct mcx_cmdq_entry *cqe;
4248 struct mcx_cmd_destroy_rq_in *in;
4249 struct mcx_cmd_destroy_rq_out *out;
4250 int error;
4251 int token;
4252
4253 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4254 token = mcx_cmdq_token(sc);
4255 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4256
4257 in = mcx_cmdq_in(cqe);
4258 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4259 in->cmd_op_mod = htobe16(0);
4260 in->cmd_rqn = htobe32(sc->sc_rqn);
4261
4262 mcx_cmdq_post(sc, cqe, 0);
4263 error = mcx_cmdq_poll(sc, cqe, 1000);
4264 if (error != 0) {
4265 printf("%s: destroy rq timeout\n", DEVNAME(sc));
4266 return error;
4267 }
4268 if (mcx_cmdq_verify(cqe) != 0) {
4269 printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4270 return error;
4271 }
4272
4273 out = mcx_cmdq_out(cqe);
4274 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4275 printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4276 out->cmd_status, be32toh(out->cmd_syndrome));
4277 return -1;
4278 }
4279
4280 sc->sc_rqn = 0;
4281 return 0;
4282 }
4283
4284 static int
4285 mcx_create_tir(struct mcx_softc *sc)
4286 {
4287 struct mcx_cmdq_entry *cqe;
4288 struct mcx_dmamem mxm;
4289 struct mcx_cmd_create_tir_in *in;
4290 struct mcx_cmd_create_tir_mb_in *mbin;
4291 struct mcx_cmd_create_tir_out *out;
4292 int error;
4293 int token;
4294
4295 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4296 token = mcx_cmdq_token(sc);
4297 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4298
4299 in = mcx_cmdq_in(cqe);
4300 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4301 in->cmd_op_mod = htobe16(0);
4302
4303 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4304 printf("%s: unable to allocate create tir mailbox\n",
4305 DEVNAME(sc));
4306 return (-1);
4307 }
4308 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4309 /* leave disp_type = 0, so packets get sent to the inline rqn */
4310 mbin->cmd_inline_rqn = htobe32(sc->sc_rqn);
4311 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4312
4313 mcx_cmdq_post(sc, cqe, 0);
4314 error = mcx_cmdq_poll(sc, cqe, 1000);
4315 if (error != 0) {
4316 printf("%s: create tir timeout\n", DEVNAME(sc));
4317 goto free;
4318 }
4319 if (mcx_cmdq_verify(cqe) != 0) {
4320 printf("%s: create tir command corrupt\n", DEVNAME(sc));
4321 goto free;
4322 }
4323
4324 out = mcx_cmdq_out(cqe);
4325 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4326 printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
4327 out->cmd_status, be32toh(out->cmd_syndrome));
4328 error = -1;
4329 goto free;
4330 }
4331
4332 sc->sc_tirn = mcx_get_id(out->cmd_tirn);
4333 free:
4334 mcx_dmamem_free(sc, &mxm);
4335 return (error);
4336 }
4337
4338 static int
4339 mcx_destroy_tir(struct mcx_softc *sc)
4340 {
4341 struct mcx_cmdq_entry *cqe;
4342 struct mcx_cmd_destroy_tir_in *in;
4343 struct mcx_cmd_destroy_tir_out *out;
4344 int error;
4345 int token;
4346
4347 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4348 token = mcx_cmdq_token(sc);
4349 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4350
4351 in = mcx_cmdq_in(cqe);
4352 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
4353 in->cmd_op_mod = htobe16(0);
4354 in->cmd_tirn = htobe32(sc->sc_tirn);
4355
4356 mcx_cmdq_post(sc, cqe, 0);
4357 error = mcx_cmdq_poll(sc, cqe, 1000);
4358 if (error != 0) {
4359 printf("%s: destroy tir timeout\n", DEVNAME(sc));
4360 return error;
4361 }
4362 if (mcx_cmdq_verify(cqe) != 0) {
4363 printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
4364 return error;
4365 }
4366
4367 out = mcx_cmdq_out(cqe);
4368 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4369 printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
4370 out->cmd_status, be32toh(out->cmd_syndrome));
4371 return -1;
4372 }
4373
4374 sc->sc_tirn = 0;
4375 return 0;
4376 }
4377
4378 static int
4379 mcx_create_sq(struct mcx_softc *sc, int cqn)
4380 {
4381 struct mcx_cmdq_entry *cqe;
4382 struct mcx_dmamem mxm;
4383 struct mcx_cmd_create_sq_in *in;
4384 struct mcx_sq_ctx *mbin;
4385 struct mcx_cmd_create_sq_out *out;
4386 int error;
4387 uint64_t *pas;
4388 uint8_t *doorbell;
4389 int insize, npages, paslen, token;
4390
4391 npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
4392 MCX_PAGE_SIZE);
4393 paslen = npages * sizeof(*pas);
4394 insize = sizeof(struct mcx_sq_ctx) + paslen;
4395
4396 if (mcx_dmamem_alloc(sc, &sc->sc_sq_mem, npages * MCX_PAGE_SIZE,
4397 MCX_PAGE_SIZE) != 0) {
4398 printf("%s: unable to allocate send queue memory\n", DEVNAME(sc));
4399 return (-1);
4400 }
4401
4402 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4403 token = mcx_cmdq_token(sc);
4404 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
4405 token);
4406
4407 in = mcx_cmdq_in(cqe);
4408 in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
4409 in->cmd_op_mod = htobe16(0);
4410
4411 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4412 &cqe->cq_input_ptr, token) != 0) {
4413 printf("%s: unable to allocate create sq mailboxen\n", DEVNAME(sc));
4414 error = -1;
4415 goto free;
4416 }
4417 mbin = (struct mcx_sq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4418 mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
4419 (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
4420 mbin->sq_cqn = htobe32(cqn);
4421 mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
4422 mbin->sq_tis_num = htobe32(sc->sc_tisn);
4423 mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4424 mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
4425 mbin->sq_wq.wq_uar_page = htobe32(sc->sc_uar);
4426 mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4427 MCX_SQ_DOORBELL_OFFSET);
4428 mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
4429 mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
4430
4431 /* physical addresses follow the mailbox in data */
4432 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &sc->sc_sq_mem);
4433 mcx_cmdq_post(sc, cqe, 0);
4434
4435 error = mcx_cmdq_poll(sc, cqe, 1000);
4436 if (error != 0) {
4437 printf("%s: create sq timeout\n", DEVNAME(sc));
4438 goto free;
4439 }
4440 if (mcx_cmdq_verify(cqe) != 0) {
4441 printf("%s: create sq command corrupt\n", DEVNAME(sc));
4442 goto free;
4443 }
4444
4445 out = mcx_cmdq_out(cqe);
4446 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4447 printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
4448 out->cmd_status, be32toh(out->cmd_syndrome));
4449 error = -1;
4450 goto free;
4451 }
4452
4453 sc->sc_sqn = mcx_get_id(out->cmd_sqn);
4454
4455 doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4456 sc->sc_tx_doorbell = (uint32_t *)(doorbell + MCX_SQ_DOORBELL_OFFSET + 4);
4457 free:
4458 mcx_dmamem_free(sc, &mxm);
4459 return (error);
4460 }
4461
4462 static int
4463 mcx_destroy_sq(struct mcx_softc *sc)
4464 {
4465 struct mcx_cmdq_entry *cqe;
4466 struct mcx_cmd_destroy_sq_in *in;
4467 struct mcx_cmd_destroy_sq_out *out;
4468 int error;
4469 int token;
4470
4471 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4472 token = mcx_cmdq_token(sc);
4473 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4474
4475 in = mcx_cmdq_in(cqe);
4476 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
4477 in->cmd_op_mod = htobe16(0);
4478 in->cmd_sqn = htobe32(sc->sc_sqn);
4479
4480 mcx_cmdq_post(sc, cqe, 0);
4481 error = mcx_cmdq_poll(sc, cqe, 1000);
4482 if (error != 0) {
4483 printf("%s: destroy sq timeout\n", DEVNAME(sc));
4484 return error;
4485 }
4486 if (mcx_cmdq_verify(cqe) != 0) {
4487 printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
4488 return error;
4489 }
4490
4491 out = mcx_cmdq_out(cqe);
4492 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4493 printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
4494 out->cmd_status, be32toh(out->cmd_syndrome));
4495 return -1;
4496 }
4497
4498 sc->sc_sqn = 0;
4499 return 0;
4500 }
4501
4502 static int
4503 mcx_ready_sq(struct mcx_softc *sc)
4504 {
4505 struct mcx_cmdq_entry *cqe;
4506 struct mcx_dmamem mxm;
4507 struct mcx_cmd_modify_sq_in *in;
4508 struct mcx_cmd_modify_sq_mb_in *mbin;
4509 struct mcx_cmd_modify_sq_out *out;
4510 int error;
4511 int token;
4512
4513 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4514 token = mcx_cmdq_token(sc);
4515 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4516
4517 in = mcx_cmdq_in(cqe);
4518 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
4519 in->cmd_op_mod = htobe16(0);
4520 in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_sqn);
4521
4522 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4523 printf("%s: unable to allocate modify sq mailbox\n",
4524 DEVNAME(sc));
4525 return (-1);
4526 }
4527 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4528 mbin->cmd_sq_ctx.sq_flags = htobe32(
4529 MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
4530
4531 mcx_cmdq_mboxes_sign(&mxm, 1);
4532 mcx_cmdq_post(sc, cqe, 0);
4533 error = mcx_cmdq_poll(sc, cqe, 1000);
4534 if (error != 0) {
4535 printf("%s: modify sq timeout\n", DEVNAME(sc));
4536 goto free;
4537 }
4538 if (mcx_cmdq_verify(cqe) != 0) {
4539 printf("%s: modify sq command corrupt\n", DEVNAME(sc));
4540 goto free;
4541 }
4542
4543 out = mcx_cmdq_out(cqe);
4544 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4545 printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
4546 out->cmd_status, be32toh(out->cmd_syndrome));
4547 error = -1;
4548 goto free;
4549 }
4550
4551 free:
4552 mcx_dmamem_free(sc, &mxm);
4553 return (error);
4554 }
4555
4556 static int
4557 mcx_create_tis(struct mcx_softc *sc)
4558 {
4559 struct mcx_cmdq_entry *cqe;
4560 struct mcx_dmamem mxm;
4561 struct mcx_cmd_create_tis_in *in;
4562 struct mcx_cmd_create_tis_mb_in *mbin;
4563 struct mcx_cmd_create_tis_out *out;
4564 int error;
4565 int token;
4566
4567 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4568 token = mcx_cmdq_token(sc);
4569 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4570
4571 in = mcx_cmdq_in(cqe);
4572 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
4573 in->cmd_op_mod = htobe16(0);
4574
4575 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4576 printf("%s: unable to allocate create tis mailbox\n", DEVNAME(sc));
4577 return (-1);
4578 }
4579 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4580 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4581
4582 mcx_cmdq_mboxes_sign(&mxm, 1);
4583 mcx_cmdq_post(sc, cqe, 0);
4584 error = mcx_cmdq_poll(sc, cqe, 1000);
4585 if (error != 0) {
4586 printf("%s: create tis timeout\n", DEVNAME(sc));
4587 goto free;
4588 }
4589 if (mcx_cmdq_verify(cqe) != 0) {
4590 printf("%s: create tis command corrupt\n", DEVNAME(sc));
4591 goto free;
4592 }
4593
4594 out = mcx_cmdq_out(cqe);
4595 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4596 printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
4597 out->cmd_status, be32toh(out->cmd_syndrome));
4598 error = -1;
4599 goto free;
4600 }
4601
4602 sc->sc_tisn = mcx_get_id(out->cmd_tisn);
4603 free:
4604 mcx_dmamem_free(sc, &mxm);
4605 return (error);
4606 }
4607
4608 static int
4609 mcx_destroy_tis(struct mcx_softc *sc)
4610 {
4611 struct mcx_cmdq_entry *cqe;
4612 struct mcx_cmd_destroy_tis_in *in;
4613 struct mcx_cmd_destroy_tis_out *out;
4614 int error;
4615 int token;
4616
4617 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4618 token = mcx_cmdq_token(sc);
4619 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4620
4621 in = mcx_cmdq_in(cqe);
4622 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
4623 in->cmd_op_mod = htobe16(0);
4624 in->cmd_tisn = htobe32(sc->sc_tisn);
4625
4626 mcx_cmdq_post(sc, cqe, 0);
4627 error = mcx_cmdq_poll(sc, cqe, 1000);
4628 if (error != 0) {
4629 printf("%s: destroy tis timeout\n", DEVNAME(sc));
4630 return error;
4631 }
4632 if (mcx_cmdq_verify(cqe) != 0) {
4633 printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
4634 return error;
4635 }
4636
4637 out = mcx_cmdq_out(cqe);
4638 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4639 printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
4640 out->cmd_status, be32toh(out->cmd_syndrome));
4641 return -1;
4642 }
4643
4644 sc->sc_tirn = 0;
4645 return 0;
4646 }
4647
4648 #if 0
4649 static int
4650 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
4651 {
4652 struct mcx_cmdq_entry *cqe;
4653 struct mcx_cmd_alloc_flow_counter_in *in;
4654 struct mcx_cmd_alloc_flow_counter_out *out;
4655 int error;
4656
4657 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4658 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4659
4660 in = mcx_cmdq_in(cqe);
4661 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
4662 in->cmd_op_mod = htobe16(0);
4663
4664 mcx_cmdq_post(sc, cqe, 0);
4665
4666 error = mcx_cmdq_poll(sc, cqe, 1000);
4667 if (error != 0) {
4668 printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
4669 return (-1);
4670 }
4671 if (mcx_cmdq_verify(cqe) != 0) {
4672 printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
4673 return (-1);
4674 }
4675
4676 out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
4677 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4678 printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
4679 out->cmd_status);
4680 return (-1);
4681 }
4682
4683 sc->sc_flow_counter_id[i] = be16toh(out->cmd_flow_counter_id);
4684 printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
4685
4686 return (0);
4687 }
4688 #endif
4689
4690 static int
4691 mcx_create_flow_table(struct mcx_softc *sc, int log_size)
4692 {
4693 struct mcx_cmdq_entry *cqe;
4694 struct mcx_dmamem mxm;
4695 struct mcx_cmd_create_flow_table_in *in;
4696 struct mcx_cmd_create_flow_table_mb_in *mbin;
4697 struct mcx_cmd_create_flow_table_out *out;
4698 int error;
4699 int token;
4700
4701 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4702 token = mcx_cmdq_token(sc);
4703 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4704
4705 in = mcx_cmdq_in(cqe);
4706 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
4707 in->cmd_op_mod = htobe16(0);
4708
4709 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4710 printf("%s: unable to allocate create flow table mailbox\n",
4711 DEVNAME(sc));
4712 return (-1);
4713 }
4714 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4715 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4716 mbin->cmd_ctx.ft_log_size = log_size;
4717
4718 mcx_cmdq_mboxes_sign(&mxm, 1);
4719 mcx_cmdq_post(sc, cqe, 0);
4720 error = mcx_cmdq_poll(sc, cqe, 1000);
4721 if (error != 0) {
4722 printf("%s: create flow table timeout\n", DEVNAME(sc));
4723 goto free;
4724 }
4725 if (mcx_cmdq_verify(cqe) != 0) {
4726 printf("%s: create flow table command corrupt\n", DEVNAME(sc));
4727 goto free;
4728 }
4729
4730 out = mcx_cmdq_out(cqe);
4731 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4732 printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
4733 out->cmd_status, be32toh(out->cmd_syndrome));
4734 error = -1;
4735 goto free;
4736 }
4737
4738 sc->sc_flow_table_id = mcx_get_id(out->cmd_table_id);
4739 free:
4740 mcx_dmamem_free(sc, &mxm);
4741 return (error);
4742 }
4743
4744 static int
4745 mcx_set_flow_table_root(struct mcx_softc *sc)
4746 {
4747 struct mcx_cmdq_entry *cqe;
4748 struct mcx_dmamem mxm;
4749 struct mcx_cmd_set_flow_table_root_in *in;
4750 struct mcx_cmd_set_flow_table_root_mb_in *mbin;
4751 struct mcx_cmd_set_flow_table_root_out *out;
4752 int error;
4753 int token;
4754
4755 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4756 token = mcx_cmdq_token(sc);
4757 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4758
4759 in = mcx_cmdq_in(cqe);
4760 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
4761 in->cmd_op_mod = htobe16(0);
4762
4763 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4764 printf("%s: unable to allocate set flow table root mailbox\n",
4765 DEVNAME(sc));
4766 return (-1);
4767 }
4768 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4769 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4770 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4771
4772 mcx_cmdq_mboxes_sign(&mxm, 1);
4773 mcx_cmdq_post(sc, cqe, 0);
4774 error = mcx_cmdq_poll(sc, cqe, 1000);
4775 if (error != 0) {
4776 printf("%s: set flow table root timeout\n", DEVNAME(sc));
4777 goto free;
4778 }
4779 if (mcx_cmdq_verify(cqe) != 0) {
4780 printf("%s: set flow table root command corrupt\n",
4781 DEVNAME(sc));
4782 goto free;
4783 }
4784
4785 out = mcx_cmdq_out(cqe);
4786 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4787 printf("%s: set flow table root failed (%x, %x)\n",
4788 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
4789 error = -1;
4790 goto free;
4791 }
4792
4793 free:
4794 mcx_dmamem_free(sc, &mxm);
4795 return (error);
4796 }
4797
4798 static int
4799 mcx_destroy_flow_table(struct mcx_softc *sc)
4800 {
4801 struct mcx_cmdq_entry *cqe;
4802 struct mcx_dmamem mxm;
4803 struct mcx_cmd_destroy_flow_table_in *in;
4804 struct mcx_cmd_destroy_flow_table_mb_in *mb;
4805 struct mcx_cmd_destroy_flow_table_out *out;
4806 int error;
4807 int token;
4808
4809 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4810 token = mcx_cmdq_token(sc);
4811 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4812
4813 in = mcx_cmdq_in(cqe);
4814 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
4815 in->cmd_op_mod = htobe16(0);
4816
4817 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4818 printf("%s: unable to allocate destroy flow table mailbox\n",
4819 DEVNAME(sc));
4820 return (-1);
4821 }
4822 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4823 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4824 mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4825
4826 mcx_cmdq_mboxes_sign(&mxm, 1);
4827 mcx_cmdq_post(sc, cqe, 0);
4828 error = mcx_cmdq_poll(sc, cqe, 1000);
4829 if (error != 0) {
4830 printf("%s: destroy flow table timeout\n", DEVNAME(sc));
4831 goto free;
4832 }
4833 if (mcx_cmdq_verify(cqe) != 0) {
4834 printf("%s: destroy flow table command corrupt\n", DEVNAME(sc));
4835 goto free;
4836 }
4837
4838 out = mcx_cmdq_out(cqe);
4839 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4840 printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
4841 out->cmd_status, be32toh(out->cmd_syndrome));
4842 error = -1;
4843 goto free;
4844 }
4845
4846 sc->sc_flow_table_id = -1;
4847 free:
4848 mcx_dmamem_free(sc, &mxm);
4849 return (error);
4850 }
4851
4852
4853 static int
4854 mcx_create_flow_group(struct mcx_softc *sc, int group, int start, int size,
4855 int match_enable, struct mcx_flow_match *match)
4856 {
4857 struct mcx_cmdq_entry *cqe;
4858 struct mcx_dmamem mxm;
4859 struct mcx_cmd_create_flow_group_in *in;
4860 struct mcx_cmd_create_flow_group_mb_in *mbin;
4861 struct mcx_cmd_create_flow_group_out *out;
4862 int error;
4863 int token;
4864
4865 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4866 token = mcx_cmdq_token(sc);
4867 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
4868 token);
4869
4870 in = mcx_cmdq_in(cqe);
4871 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
4872 in->cmd_op_mod = htobe16(0);
4873
4874 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4875 != 0) {
4876 printf("%s: unable to allocate create flow group mailbox\n",
4877 DEVNAME(sc));
4878 return (-1);
4879 }
4880 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4881 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4882 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4883 mbin->cmd_start_flow_index = htobe32(start);
4884 mbin->cmd_end_flow_index = htobe32(start + (size - 1));
4885
4886 mbin->cmd_match_criteria_enable = match_enable;
4887 memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
4888
4889 mcx_cmdq_mboxes_sign(&mxm, 2);
4890 mcx_cmdq_post(sc, cqe, 0);
4891 error = mcx_cmdq_poll(sc, cqe, 1000);
4892 if (error != 0) {
4893 printf("%s: create flow group timeout\n", DEVNAME(sc));
4894 goto free;
4895 }
4896 if (mcx_cmdq_verify(cqe) != 0) {
4897 printf("%s: create flow group command corrupt\n", DEVNAME(sc));
4898 goto free;
4899 }
4900
4901 out = mcx_cmdq_out(cqe);
4902 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4903 printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
4904 out->cmd_status, be32toh(out->cmd_syndrome));
4905 error = -1;
4906 goto free;
4907 }
4908
4909 sc->sc_flow_group_id[group] = mcx_get_id(out->cmd_group_id);
4910 sc->sc_flow_group_size[group] = size;
4911 sc->sc_flow_group_start[group] = start;
4912
4913 free:
4914 mcx_dmamem_free(sc, &mxm);
4915 return (error);
4916 }
4917
4918 static int
4919 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
4920 {
4921 struct mcx_cmdq_entry *cqe;
4922 struct mcx_dmamem mxm;
4923 struct mcx_cmd_destroy_flow_group_in *in;
4924 struct mcx_cmd_destroy_flow_group_mb_in *mb;
4925 struct mcx_cmd_destroy_flow_group_out *out;
4926 int error;
4927 int token;
4928
4929 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4930 token = mcx_cmdq_token(sc);
4931 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4932
4933 in = mcx_cmdq_in(cqe);
4934 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
4935 in->cmd_op_mod = htobe16(0);
4936
4937 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
4938 printf("%s: unable to allocate destroy flow group mailbox\n",
4939 DEVNAME(sc));
4940 return (-1);
4941 }
4942 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4943 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4944 mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4945 mb->cmd_group_id = htobe32(sc->sc_flow_group_id[group]);
4946
4947 mcx_cmdq_mboxes_sign(&mxm, 2);
4948 mcx_cmdq_post(sc, cqe, 0);
4949 error = mcx_cmdq_poll(sc, cqe, 1000);
4950 if (error != 0) {
4951 printf("%s: destroy flow group timeout\n", DEVNAME(sc));
4952 goto free;
4953 }
4954 if (mcx_cmdq_verify(cqe) != 0) {
4955 printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
4956 goto free;
4957 }
4958
4959 out = mcx_cmdq_out(cqe);
4960 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4961 printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
4962 out->cmd_status, be32toh(out->cmd_syndrome));
4963 error = -1;
4964 goto free;
4965 }
4966
4967 sc->sc_flow_group_id[group] = -1;
4968 sc->sc_flow_group_size[group] = 0;
4969 free:
4970 mcx_dmamem_free(sc, &mxm);
4971 return (error);
4972 }
4973
4974 static int
4975 mcx_set_flow_table_entry(struct mcx_softc *sc, int group, int index,
4976 const uint8_t *macaddr)
4977 {
4978 struct mcx_cmdq_entry *cqe;
4979 struct mcx_dmamem mxm;
4980 struct mcx_cmd_set_flow_table_entry_in *in;
4981 struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
4982 struct mcx_cmd_set_flow_table_entry_out *out;
4983 uint32_t *dest;
4984 int error;
4985 int token;
4986
4987 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4988 token = mcx_cmdq_token(sc);
4989 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*dest),
4990 sizeof(*out), token);
4991
4992 in = mcx_cmdq_in(cqe);
4993 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
4994 in->cmd_op_mod = htobe16(0);
4995
4996 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4997 != 0) {
4998 printf("%s: unable to allocate set flow table entry mailbox\n",
4999 DEVNAME(sc));
5000 return (-1);
5001 }
5002 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5003 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5004 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5005 mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
5006 mbin->cmd_flow_ctx.fc_group_id = htobe32(sc->sc_flow_group_id[group]);
5007
5008 /* flow context ends at offset 0x330, 0x130 into the second mbox */
5009 dest = (uint32_t *)
5010 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
5011 mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5012 mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5013 *dest = htobe32(sc->sc_tirn | MCX_FLOW_CONTEXT_DEST_TYPE_TIR);
5014
5015 /* the only thing we match on at the moment is the dest mac address */
5016 if (macaddr != NULL) {
5017 memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
5018 ETHER_ADDR_LEN);
5019 }
5020
5021 mcx_cmdq_mboxes_sign(&mxm, 2);
5022 mcx_cmdq_post(sc, cqe, 0);
5023 error = mcx_cmdq_poll(sc, cqe, 1000);
5024 if (error != 0) {
5025 printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5026 goto free;
5027 }
5028 if (mcx_cmdq_verify(cqe) != 0) {
5029 printf("%s: set flow table entry command corrupt\n",
5030 DEVNAME(sc));
5031 goto free;
5032 }
5033
5034 out = mcx_cmdq_out(cqe);
5035 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5036 printf("%s: set flow table entry failed (%x, %x)\n",
5037 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5038 error = -1;
5039 goto free;
5040 }
5041
5042 free:
5043 mcx_dmamem_free(sc, &mxm);
5044 return (error);
5045 }
5046
5047 static int
5048 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
5049 {
5050 struct mcx_cmdq_entry *cqe;
5051 struct mcx_dmamem mxm;
5052 struct mcx_cmd_delete_flow_table_entry_in *in;
5053 struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
5054 struct mcx_cmd_delete_flow_table_entry_out *out;
5055 int error;
5056 int token;
5057
5058 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5059 token = mcx_cmdq_token(sc);
5060 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5061 token);
5062
5063 in = mcx_cmdq_in(cqe);
5064 in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
5065 in->cmd_op_mod = htobe16(0);
5066
5067 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
5068 printf("%s: unable to allocate delete flow table entry mailbox\n",
5069 DEVNAME(sc));
5070 return (-1);
5071 }
5072 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5073 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5074 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5075 mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
5076
5077 mcx_cmdq_mboxes_sign(&mxm, 2);
5078 mcx_cmdq_post(sc, cqe, 0);
5079 error = mcx_cmdq_poll(sc, cqe, 1000);
5080 if (error != 0) {
5081 printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
5082 goto free;
5083 }
5084 if (mcx_cmdq_verify(cqe) != 0) {
5085 printf("%s: delete flow table entry command corrupt\n",
5086 DEVNAME(sc));
5087 goto free;
5088 }
5089
5090 out = mcx_cmdq_out(cqe);
5091 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5092 printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
5093 DEVNAME(sc), group, index, out->cmd_status,
5094 be32toh(out->cmd_syndrome));
5095 error = -1;
5096 goto free;
5097 }
5098
5099 free:
5100 mcx_dmamem_free(sc, &mxm);
5101 return (error);
5102 }
5103
5104 #if 0
5105 int
5106 mcx_dump_flow_table(struct mcx_softc *sc)
5107 {
5108 struct mcx_dmamem mxm;
5109 struct mcx_cmdq_entry *cqe;
5110 struct mcx_cmd_query_flow_table_in *in;
5111 struct mcx_cmd_query_flow_table_mb_in *mbin;
5112 struct mcx_cmd_query_flow_table_out *out;
5113 struct mcx_cmd_query_flow_table_mb_out *mbout;
5114 uint8_t token = mcx_cmdq_token(sc);
5115 int error;
5116 int i;
5117 uint8_t *dump;
5118
5119 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5120 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5121 sizeof(*out) + sizeof(*mbout) + 16, token);
5122
5123 in = mcx_cmdq_in(cqe);
5124 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
5125 in->cmd_op_mod = htobe16(0);
5126
5127 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5128 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
5129 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5130 &cqe->cq_output_ptr, token) != 0) {
5131 printf(", unable to allocate query flow table mailboxes\n");
5132 return (-1);
5133 }
5134 cqe->cq_input_ptr = cqe->cq_output_ptr;
5135
5136 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5137 mbin->cmd_table_type = 0;
5138 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5139
5140 mcx_cmdq_mboxes_sign(&mxm, 1);
5141
5142 mcx_cmdq_post(sc, cqe, 0);
5143 error = mcx_cmdq_poll(sc, cqe, 1000);
5144 if (error != 0) {
5145 printf("%s: query flow table timeout\n", DEVNAME(sc));
5146 goto free;
5147 }
5148 error = mcx_cmdq_verify(cqe);
5149 if (error != 0) {
5150 printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
5151 goto free;
5152 }
5153
5154 out = mcx_cmdq_out(cqe);
5155 switch (out->cmd_status) {
5156 case MCX_CQ_STATUS_OK:
5157 break;
5158 default:
5159 printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
5160 out->cmd_status, be32toh(out->cmd_syndrome));
5161 error = -1;
5162 goto free;
5163 }
5164
5165 mbout = (struct mcx_cmd_query_flow_table_mb_out *)
5166 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5167 dump = (uint8_t *)mbout + 8;
5168 for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
5169 printf("%.2x ", dump[i]);
5170 if (i % 16 == 15)
5171 printf("\n");
5172 }
5173 free:
5174 mcx_cq_mboxes_free(sc, &mxm);
5175 return (error);
5176 }
5177 int
5178 mcx_dump_flow_table_entry(struct mcx_softc *sc, int index)
5179 {
5180 struct mcx_dmamem mxm;
5181 struct mcx_cmdq_entry *cqe;
5182 struct mcx_cmd_query_flow_table_entry_in *in;
5183 struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
5184 struct mcx_cmd_query_flow_table_entry_out *out;
5185 struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
5186 uint8_t token = mcx_cmdq_token(sc);
5187 int error;
5188 int i;
5189 uint8_t *dump;
5190
5191 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5192 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5193 sizeof(*out) + sizeof(*mbout) + 16, token);
5194
5195 in = mcx_cmdq_in(cqe);
5196 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
5197 in->cmd_op_mod = htobe16(0);
5198
5199 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5200 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5201 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5202 &cqe->cq_output_ptr, token) != 0) {
5203 printf(", unable to allocate query flow table entry mailboxes\n");
5204 return (-1);
5205 }
5206 cqe->cq_input_ptr = cqe->cq_output_ptr;
5207
5208 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5209 mbin->cmd_table_type = 0;
5210 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5211 mbin->cmd_flow_index = htobe32(index);
5212
5213 mcx_cmdq_mboxes_sign(&mxm, 1);
5214
5215 mcx_cmdq_post(sc, cqe, 0);
5216 error = mcx_cmdq_poll(sc, cqe, 1000);
5217 if (error != 0) {
5218 printf("%s: query flow table entry timeout\n", DEVNAME(sc));
5219 goto free;
5220 }
5221 error = mcx_cmdq_verify(cqe);
5222 if (error != 0) {
5223 printf("%s: query flow table entry reply corrupt\n",
5224 DEVNAME(sc));
5225 goto free;
5226 }
5227
5228 out = mcx_cmdq_out(cqe);
5229 switch (out->cmd_status) {
5230 case MCX_CQ_STATUS_OK:
5231 break;
5232 default:
5233 printf("%s: query flow table entry failed (%x/%x)\n",
5234 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5235 error = -1;
5236 goto free;
5237 }
5238
5239 mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
5240 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5241 dump = (uint8_t *)mbout;
5242 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5243 printf("%.2x ", dump[i]);
5244 if (i % 16 == 15)
5245 printf("\n");
5246 }
5247
5248 free:
5249 mcx_cq_mboxes_free(sc, &mxm);
5250 return (error);
5251 }
5252
5253 int
5254 mcx_dump_flow_group(struct mcx_softc *sc)
5255 {
5256 struct mcx_dmamem mxm;
5257 struct mcx_cmdq_entry *cqe;
5258 struct mcx_cmd_query_flow_group_in *in;
5259 struct mcx_cmd_query_flow_group_mb_in *mbin;
5260 struct mcx_cmd_query_flow_group_out *out;
5261 struct mcx_cmd_query_flow_group_mb_out *mbout;
5262 uint8_t token = mcx_cmdq_token(sc);
5263 int error;
5264 int i;
5265 uint8_t *dump;
5266
5267 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5268 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5269 sizeof(*out) + sizeof(*mbout) + 16, token);
5270
5271 in = mcx_cmdq_in(cqe);
5272 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
5273 in->cmd_op_mod = htobe16(0);
5274
5275 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5276 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5277 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5278 &cqe->cq_output_ptr, token) != 0) {
5279 printf(", unable to allocate query flow group mailboxes\n");
5280 return (-1);
5281 }
5282 cqe->cq_input_ptr = cqe->cq_output_ptr;
5283
5284 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5285 mbin->cmd_table_type = 0;
5286 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5287 mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
5288
5289 mcx_cmdq_mboxes_sign(&mxm, 1);
5290
5291 mcx_cmdq_post(sc, cqe, 0);
5292 error = mcx_cmdq_poll(sc, cqe, 1000);
5293 if (error != 0) {
5294 printf("%s: query flow group timeout\n", DEVNAME(sc));
5295 goto free;
5296 }
5297 error = mcx_cmdq_verify(cqe);
5298 if (error != 0) {
5299 printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
5300 goto free;
5301 }
5302
5303 out = mcx_cmdq_out(cqe);
5304 switch (out->cmd_status) {
5305 case MCX_CQ_STATUS_OK:
5306 break;
5307 default:
5308 printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
5309 out->cmd_status, be32toh(out->cmd_syndrome));
5310 error = -1;
5311 goto free;
5312 }
5313
5314 mbout = (struct mcx_cmd_query_flow_group_mb_out *)
5315 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5316 dump = (uint8_t *)mbout;
5317 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5318 printf("%.2x ", dump[i]);
5319 if (i % 16 == 15)
5320 printf("\n");
5321 }
5322 dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
5323 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5324 printf("%.2x ", dump[i]);
5325 if (i % 16 == 15)
5326 printf("\n");
5327 }
5328
5329 free:
5330 mcx_cq_mboxes_free(sc, &mxm);
5331 return (error);
5332 }
5333
5334 int
5335 mcx_dump_rq(struct mcx_softc *sc)
5336 {
5337 struct mcx_dmamem mxm;
5338 struct mcx_cmdq_entry *cqe;
5339 struct mcx_cmd_query_rq_in *in;
5340 struct mcx_cmd_query_rq_out *out;
5341 struct mcx_cmd_query_rq_mb_out *mbout;
5342 uint8_t token = mcx_cmdq_token(sc);
5343 int error;
5344
5345 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5346 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5347 token);
5348
5349 in = mcx_cmdq_in(cqe);
5350 in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
5351 in->cmd_op_mod = htobe16(0);
5352 in->cmd_rqn = htobe32(sc->sc_rqn);
5353
5354 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5355 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5356 &cqe->cq_output_ptr, token) != 0) {
5357 printf(", unable to allocate query flow group mailboxes\n");
5358 return (-1);
5359 }
5360
5361 mcx_cmdq_mboxes_sign(&mxm, 1);
5362
5363 mcx_cmdq_post(sc, cqe, 0);
5364 error = mcx_cmdq_poll(sc, cqe, 1000);
5365 if (error != 0) {
5366 printf("%s: query rq timeout\n", DEVNAME(sc));
5367 goto free;
5368 }
5369 error = mcx_cmdq_verify(cqe);
5370 if (error != 0) {
5371 printf("%s: query rq reply corrupt\n", DEVNAME(sc));
5372 goto free;
5373 }
5374
5375 out = mcx_cmdq_out(cqe);
5376 switch (out->cmd_status) {
5377 case MCX_CQ_STATUS_OK:
5378 break;
5379 default:
5380 printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
5381 out->cmd_status, be32toh(out->cmd_syndrome));
5382 error = -1;
5383 goto free;
5384 }
5385
5386 mbout = (struct mcx_cmd_query_rq_mb_out *)
5387 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5388 printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5389 DEVNAME(sc),
5390 (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5391 be32toh(mbout->cmd_ctx.rq_user_index),
5392 be32toh(mbout->cmd_ctx.rq_cqn),
5393 be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5394 mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5395 mbout->cmd_ctx.rq_wq.wq_log_size,
5396 be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5397 be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5398
5399 free:
5400 mcx_cq_mboxes_free(sc, &mxm);
5401 return (error);
5402 }
5403
5404 int
5405 mcx_dump_sq(struct mcx_softc *sc)
5406 {
5407 struct mcx_dmamem mxm;
5408 struct mcx_cmdq_entry *cqe;
5409 struct mcx_cmd_query_sq_in *in;
5410 struct mcx_cmd_query_sq_out *out;
5411 struct mcx_cmd_query_sq_mb_out *mbout;
5412 uint8_t token = mcx_cmdq_token(sc);
5413 int error;
5414 int i;
5415 uint8_t *dump;
5416
5417 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5418 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5419 token);
5420
5421 in = mcx_cmdq_in(cqe);
5422 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
5423 in->cmd_op_mod = htobe16(0);
5424 in->cmd_sqn = htobe32(sc->sc_sqn);
5425
5426 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5427 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5428 &cqe->cq_output_ptr, token) != 0) {
5429 printf(", unable to allocate query sq mailboxes\n");
5430 return (-1);
5431 }
5432
5433 mcx_cmdq_mboxes_sign(&mxm, 1);
5434
5435 mcx_cmdq_post(sc, cqe, 0);
5436 error = mcx_cmdq_poll(sc, cqe, 1000);
5437 if (error != 0) {
5438 printf("%s: query sq timeout\n", DEVNAME(sc));
5439 goto free;
5440 }
5441 error = mcx_cmdq_verify(cqe);
5442 if (error != 0) {
5443 printf("%s: query sq reply corrupt\n", DEVNAME(sc));
5444 goto free;
5445 }
5446
5447 out = mcx_cmdq_out(cqe);
5448 switch (out->cmd_status) {
5449 case MCX_CQ_STATUS_OK:
5450 break;
5451 default:
5452 printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
5453 out->cmd_status, be32toh(out->cmd_syndrome));
5454 error = -1;
5455 goto free;
5456 }
5457
5458 mbout = (struct mcx_cmd_query_sq_mb_out *)
5459 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5460 /*
5461 printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5462 DEVNAME(sc),
5463 (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5464 be32toh(mbout->cmd_ctx.rq_user_index),
5465 be32toh(mbout->cmd_ctx.rq_cqn),
5466 be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5467 mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5468 mbout->cmd_ctx.rq_wq.wq_log_size,
5469 be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5470 be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5471 */
5472 dump = (uint8_t *)mbout;
5473 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5474 printf("%.2x ", dump[i]);
5475 if (i % 16 == 15)
5476 printf("\n");
5477 }
5478
5479 free:
5480 mcx_cq_mboxes_free(sc, &mxm);
5481 return (error);
5482 }
5483
5484 static int
5485 mcx_dump_counters(struct mcx_softc *sc)
5486 {
5487 struct mcx_dmamem mxm;
5488 struct mcx_cmdq_entry *cqe;
5489 struct mcx_cmd_query_vport_counters_in *in;
5490 struct mcx_cmd_query_vport_counters_mb_in *mbin;
5491 struct mcx_cmd_query_vport_counters_out *out;
5492 struct mcx_nic_vport_counters *counters;
5493 int error, token;
5494
5495 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5496 token = mcx_cmdq_token(sc);
5497 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5498 sizeof(*out) + sizeof(*counters), token);
5499
5500 in = mcx_cmdq_in(cqe);
5501 in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
5502 in->cmd_op_mod = htobe16(0);
5503
5504 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5505 printf(", unable to allocate query nic vport counters mailboxen\n");
5506 return (-1);
5507 }
5508 cqe->cq_input_ptr = cqe->cq_output_ptr;
5509
5510 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5511 mbin->cmd_clear = 0x80;
5512
5513 mcx_cmdq_mboxes_sign(&mxm, 1);
5514 mcx_cmdq_post(sc, cqe, 0);
5515
5516 error = mcx_cmdq_poll(sc, cqe, 1000);
5517 if (error != 0) {
5518 printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
5519 goto free;
5520 }
5521 if (mcx_cmdq_verify(cqe) != 0) {
5522 printf("%s: query nic vport counters command corrupt\n",
5523 DEVNAME(sc));
5524 goto free;
5525 }
5526
5527 out = mcx_cmdq_out(cqe);
5528 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5529 printf("%s: query nic vport counters failed (%x, %x)\n",
5530 DEVNAME(sc), out->cmd_status, out->cmd_syndrome);
5531 error = -1;
5532 goto free;
5533 }
5534
5535 counters = (struct mcx_nic_vport_counters *)
5536 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5537 if (counters->rx_bcast.packets + counters->tx_bcast.packets +
5538 counters->rx_ucast.packets + counters->tx_ucast.packets +
5539 counters->rx_err.packets + counters->tx_err.packets)
5540 printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
5541 DEVNAME(sc),
5542 be64toh(counters->tx_err.packets),
5543 be64toh(counters->rx_err.packets),
5544 be64toh(counters->tx_ucast.packets),
5545 be64toh(counters->rx_ucast.packets),
5546 be64toh(counters->tx_bcast.packets),
5547 be64toh(counters->rx_bcast.packets));
5548 free:
5549 mcx_dmamem_free(sc, &mxm);
5550
5551 return (error);
5552 }
5553
5554 static int
5555 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
5556 {
5557 struct mcx_dmamem mxm;
5558 struct mcx_cmdq_entry *cqe;
5559 struct mcx_cmd_query_flow_counter_in *in;
5560 struct mcx_cmd_query_flow_counter_mb_in *mbin;
5561 struct mcx_cmd_query_flow_counter_out *out;
5562 struct mcx_counter *counters;
5563 int error, token;
5564
5565 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5566 token = mcx_cmdq_token(sc);
5567 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
5568 sizeof(*counters), token);
5569
5570 in = mcx_cmdq_in(cqe);
5571 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
5572 in->cmd_op_mod = htobe16(0);
5573
5574 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5575 printf(", unable to allocate query flow counter mailboxen\n");
5576 return (-1);
5577 }
5578 cqe->cq_input_ptr = cqe->cq_output_ptr;
5579 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5580 mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
5581 mbin->cmd_clear = 0x80;
5582
5583 mcx_cmdq_mboxes_sign(&mxm, 1);
5584 mcx_cmdq_post(sc, cqe, 0);
5585
5586 error = mcx_cmdq_poll(sc, cqe, 1000);
5587 if (error != 0) {
5588 printf("%s: query flow counter timeout\n", DEVNAME(sc));
5589 goto free;
5590 }
5591 if (mcx_cmdq_verify(cqe) != 0) {
5592 printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
5593 goto free;
5594 }
5595
5596 out = mcx_cmdq_out(cqe);
5597 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5598 printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
5599 out->cmd_status, out->cmd_syndrome);
5600 error = -1;
5601 goto free;
5602 }
5603
5604 counters = (struct mcx_counter *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5605 if (counters->packets)
5606 printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
5607 be64toh(counters->packets));
5608 free:
5609 mcx_dmamem_free(sc, &mxm);
5610
5611 return (error);
5612 }
5613
5614 #endif
5615
5616 static int
5617 mcx_rx_fill_slots(struct mcx_softc *sc, void *ring, struct mcx_slot *slots,
5618 uint *prod, int bufsize, uint nslots)
5619 {
5620 struct mcx_rq_entry *rqe;
5621 struct mcx_slot *ms;
5622 struct mbuf *m;
5623 uint slot, p, fills;
5624
5625 p = *prod;
5626 slot = (p % (1 << MCX_LOG_RQ_SIZE));
5627 rqe = ring;
5628 for (fills = 0; fills < nslots; fills++) {
5629 ms = &slots[slot];
5630 #if 0
5631 m = MCLGETI(NULL, M_DONTWAIT, NULL, bufsize + ETHER_ALIGN);
5632 if (m == NULL)
5633 break;
5634 #else
5635 m = NULL;
5636 MGETHDR(m, M_DONTWAIT, MT_DATA);
5637 if (m == NULL)
5638 break;
5639
5640 MCLGET(m, M_DONTWAIT);
5641 if ((m->m_flags & M_EXT) == 0) {
5642 m_freem(m);
5643 break;
5644 }
5645 #endif
5646
5647 m->m_data += ETHER_ALIGN;
5648 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size - ETHER_ALIGN;
5649 if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
5650 BUS_DMA_NOWAIT) != 0) {
5651 m_freem(m);
5652 break;
5653 }
5654 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5655 ms->ms_m = m;
5656
5657 rqe[slot].rqe_byte_count = htobe32(m->m_len);
5658 rqe[slot].rqe_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
5659 rqe[slot].rqe_lkey = htobe32(sc->sc_lkey);
5660
5661 p++;
5662 slot++;
5663 if (slot == (1 << MCX_LOG_RQ_SIZE))
5664 slot = 0;
5665 }
5666
5667 if (fills != 0) {
5668 *sc->sc_rx_doorbell = htobe32(p & MCX_WQ_DOORBELL_MASK);
5669 /* barrier? */
5670 }
5671
5672 *prod = p;
5673
5674 return (nslots - fills);
5675 }
5676
5677 static int
5678 mcx_rx_fill(struct mcx_softc *sc)
5679 {
5680 u_int slots;
5681
5682 slots = mcx_rxr_get(&sc->sc_rxr, (1 << MCX_LOG_RQ_SIZE));
5683 if (slots == 0)
5684 return (1);
5685
5686 slots = mcx_rx_fill_slots(sc, MCX_DMA_KVA(&sc->sc_rq_mem),
5687 sc->sc_rx_slots, &sc->sc_rx_prod, sc->sc_hardmtu, slots);
5688 mcx_rxr_put(&sc->sc_rxr, slots);
5689 return (0);
5690 }
5691
5692 void
5693 mcx_refill(void *xsc)
5694 {
5695 struct mcx_softc *sc = xsc;
5696
5697 mcx_rx_fill(sc);
5698
5699 if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5700 callout_schedule(&sc->sc_rx_refill, 1);
5701 }
5702
5703 void
5704 mcx_process_txeof(struct mcx_softc *sc, struct mcx_cq_entry *cqe, int *txfree)
5705 {
5706 struct mcx_slot *ms;
5707 bus_dmamap_t map;
5708 int slot, slots;
5709
5710 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
5711
5712 ms = &sc->sc_tx_slots[slot];
5713 map = ms->ms_map;
5714 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
5715 BUS_DMASYNC_POSTWRITE);
5716
5717 slots = 1;
5718 if (map->dm_nsegs > 1)
5719 slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
5720
5721 (*txfree) += slots;
5722 bus_dmamap_unload(sc->sc_dmat, map);
5723 m_freem(ms->ms_m);
5724 ms->ms_m = NULL;
5725 }
5726
5727 static uint64_t
5728 mcx_uptime(void)
5729 {
5730 struct timespec ts;
5731
5732 nanouptime(&ts);
5733
5734 return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
5735 }
5736
5737 static void
5738 mcx_calibrate_first(struct mcx_softc *sc)
5739 {
5740 struct mcx_calibration *c = &sc->sc_calibration[0];
5741
5742 sc->sc_calibration_gen = 0;
5743
5744 c->c_ubase = mcx_uptime();
5745 c->c_tbase = mcx_timer(sc);
5746 c->c_tdiff = 0;
5747
5748 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz);
5749 }
5750
5751 #define MCX_TIMESTAMP_SHIFT 10
5752
5753 static void
5754 mcx_calibrate(void *arg)
5755 {
5756 struct mcx_softc *sc = arg;
5757 struct mcx_calibration *nc, *pc;
5758 unsigned int gen;
5759
5760 if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING))
5761 return;
5762
5763 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz);
5764
5765 gen = sc->sc_calibration_gen;
5766 pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5767 gen++;
5768 nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5769
5770 nc->c_uptime = pc->c_ubase;
5771 nc->c_timestamp = pc->c_tbase;
5772
5773 nc->c_ubase = mcx_uptime();
5774 nc->c_tbase = mcx_timer(sc);
5775
5776 nc->c_udiff = (nc->c_ubase - nc->c_uptime) >> MCX_TIMESTAMP_SHIFT;
5777 nc->c_tdiff = (nc->c_tbase - nc->c_timestamp) >> MCX_TIMESTAMP_SHIFT;
5778
5779 membar_producer();
5780 sc->sc_calibration_gen = gen;
5781 }
5782
5783 static int
5784 mcx_process_rx(struct mcx_softc *sc, struct mcx_cq_entry *cqe,
5785 struct mcx_mbufq *mq, const struct mcx_calibration *c)
5786 {
5787 struct mcx_slot *ms;
5788 struct mbuf *m;
5789 int slot;
5790
5791 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
5792
5793 ms = &sc->sc_rx_slots[slot];
5794 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize,
5795 BUS_DMASYNC_POSTREAD);
5796 bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
5797
5798 m = ms->ms_m;
5799 ms->ms_m = NULL;
5800
5801 m_set_rcvif(m, &sc->sc_ec.ec_if);
5802 m->m_pkthdr.len = m->m_len = be32dec(&cqe->cq_byte_cnt);
5803
5804 #if 0
5805 if (cqe->cq_rx_hash_type) {
5806 m->m_pkthdr.ph_flowid = M_FLOWID_VALID |
5807 be32toh(cqe->cq_rx_hash);
5808 }
5809 #endif
5810
5811 #if 0
5812 if (c->c_tdiff) {
5813 uint64_t t = be64dec(&cqe->cq_timestamp) - c->c_timestamp;
5814 t *= c->c_udiff;
5815 t /= c->c_tdiff;
5816
5817 m->m_pkthdr.ph_timestamp = c->c_uptime + t;
5818 SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
5819 }
5820 #endif
5821
5822 MBUFQ_ENQUEUE(mq, m);
5823
5824 return (1);
5825 }
5826
5827 static struct mcx_cq_entry *
5828 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
5829 {
5830 struct mcx_cq_entry *cqe;
5831 int next;
5832
5833 cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
5834 next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
5835
5836 if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
5837 ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
5838 return (&cqe[next]);
5839 }
5840
5841 return (NULL);
5842 }
5843
5844 static void
5845 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5846 {
5847 bus_size_t offset;
5848 uint32_t val;
5849 uint64_t uval;
5850
5851 /* different uar per cq? */
5852 offset = (MCX_PAGE_SIZE * sc->sc_uar);
5853 val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
5854 val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5855
5856 cq->cq_doorbell[0] = htobe32(cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5857 cq->cq_doorbell[1] = htobe32(val);
5858
5859 uval = val;
5860 uval <<= 32;
5861 uval |= cq->cq_n;
5862 bus_space_write_8(sc->sc_memt, sc->sc_memh,
5863 offset + MCX_UAR_CQ_DOORBELL, htobe64(uval));
5864 mcx_bar(sc, offset + MCX_UAR_CQ_DOORBELL, sizeof(uint64_t),
5865 BUS_SPACE_BARRIER_WRITE);
5866 }
5867
5868 void
5869 mcx_process_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5870 {
5871 struct ifnet *ifp = &sc->sc_ec.ec_if;
5872 const struct mcx_calibration *c;
5873 unsigned int gen;
5874 struct mcx_cq_entry *cqe;
5875 struct mcx_mbufq mq;
5876 struct mbuf *m;
5877 int rxfree, txfree;
5878
5879 MBUFQ_INIT(&mq);
5880
5881 gen = sc->sc_calibration_gen;
5882 membar_consumer();
5883 c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5884
5885 rxfree = 0;
5886 txfree = 0;
5887 while ((cqe = mcx_next_cq_entry(sc, cq))) {
5888 uint8_t opcode;
5889 opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
5890 switch (opcode) {
5891 case MCX_CQ_ENTRY_OPCODE_REQ:
5892 mcx_process_txeof(sc, cqe, &txfree);
5893 break;
5894 case MCX_CQ_ENTRY_OPCODE_SEND:
5895 rxfree += mcx_process_rx(sc, cqe, &mq, c);
5896 break;
5897 case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
5898 case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
5899 /* uint8_t *cqp = (uint8_t *)cqe; */
5900 /* printf("%s: cq completion error: %x\n", DEVNAME(sc), cqp[0x37]); */
5901 break;
5902
5903 default:
5904 /* printf("%s: cq completion opcode %x??\n", DEVNAME(sc), opcode); */
5905 break;
5906 }
5907
5908 cq->cq_cons++;
5909 }
5910
5911 cq->cq_count++;
5912 mcx_arm_cq(sc, cq);
5913
5914 if (rxfree > 0) {
5915 mcx_rxr_put(&sc->sc_rxr, rxfree);
5916 while (MBUFQ_FIRST(&mq) != NULL) {
5917 MBUFQ_DEQUEUE(&mq, m);
5918 if_percpuq_enqueue(ifp->if_percpuq, m);
5919 }
5920
5921 mcx_rx_fill(sc);
5922
5923 if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5924 callout_schedule(&sc->sc_rx_refill, 1);
5925 }
5926 if (txfree > 0) {
5927 sc->sc_tx_cons += txfree;
5928 if_schedule_deferred_start(ifp);
5929 }
5930 }
5931
5932 static void
5933 mcx_arm_eq(struct mcx_softc *sc)
5934 {
5935 bus_size_t offset;
5936 uint32_t val;
5937
5938 offset = (MCX_PAGE_SIZE * sc->sc_uar) + MCX_UAR_EQ_DOORBELL_ARM;
5939 val = (sc->sc_eqn << 24) | (sc->sc_eq_cons & 0xffffff);
5940
5941 mcx_wr(sc, offset, val);
5942 /* barrier? */
5943 }
5944
5945 static struct mcx_eq_entry *
5946 mcx_next_eq_entry(struct mcx_softc *sc)
5947 {
5948 struct mcx_eq_entry *eqe;
5949 int next;
5950
5951 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
5952 next = sc->sc_eq_cons % (1 << MCX_LOG_EQ_SIZE);
5953 if ((eqe[next].eq_owner & 1) == ((sc->sc_eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
5954 sc->sc_eq_cons++;
5955 return (&eqe[next]);
5956 }
5957 return (NULL);
5958 }
5959
5960 int
5961 mcx_intr(void *xsc)
5962 {
5963 struct mcx_softc *sc = (struct mcx_softc *)xsc;
5964 struct mcx_eq_entry *eqe;
5965 int i, cq;
5966
5967 while ((eqe = mcx_next_eq_entry(sc))) {
5968 switch (eqe->eq_event_type) {
5969 case MCX_EVENT_TYPE_COMPLETION:
5970 cq = be32toh(eqe->eq_event_data[6]);
5971 for (i = 0; i < sc->sc_num_cq; i++) {
5972 if (sc->sc_cq[i].cq_n == cq) {
5973 mcx_process_cq(sc, &sc->sc_cq[i]);
5974 break;
5975 }
5976 }
5977 break;
5978
5979 case MCX_EVENT_TYPE_LAST_WQE:
5980 /* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
5981 break;
5982
5983 case MCX_EVENT_TYPE_CQ_ERROR:
5984 /* printf("%s: cq error\n", DEVNAME(sc)); */
5985 break;
5986
5987 case MCX_EVENT_TYPE_CMD_COMPLETION:
5988 /* wakeup probably */
5989 break;
5990
5991 case MCX_EVENT_TYPE_PORT_CHANGE:
5992 workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL);
5993 break;
5994
5995 default:
5996 /* printf("%s: something happened\n", DEVNAME(sc)); */
5997 break;
5998 }
5999 }
6000 mcx_arm_eq(sc);
6001 return (1);
6002 }
6003
6004 static void
6005 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
6006 int total)
6007 {
6008 struct mcx_slot *ms;
6009
6010 int i = allocated;
6011 while (i-- > 0) {
6012 ms = &slots[i];
6013 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
6014 if (ms->ms_m != NULL)
6015 m_freem(ms->ms_m);
6016 }
6017 kmem_free(slots, total * sizeof(*ms));
6018 }
6019
6020 static int
6021 mcx_init(struct ifnet *ifp)
6022 {
6023 struct mcx_softc *sc = ifp->if_softc;
6024 struct mcx_slot *ms;
6025 int i, start;
6026 struct mcx_flow_match match_crit;
6027
6028 if (ISSET(ifp->if_flags, IFF_RUNNING))
6029 mcx_stop(ifp, 0);
6030
6031 sc->sc_rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE),
6032 KM_SLEEP);
6033
6034 for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
6035 ms = &sc->sc_rx_slots[i];
6036 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
6037 sc->sc_hardmtu, 0,
6038 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6039 &ms->ms_map) != 0) {
6040 printf("%s: failed to allocate rx dma maps\n",
6041 DEVNAME(sc));
6042 goto destroy_rx_slots;
6043 }
6044 }
6045
6046 sc->sc_tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE),
6047 KM_SLEEP);
6048
6049 for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
6050 ms = &sc->sc_tx_slots[i];
6051 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
6052 MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
6053 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6054 &ms->ms_map) != 0) {
6055 printf("%s: failed to allocate tx dma maps\n",
6056 DEVNAME(sc));
6057 goto destroy_tx_slots;
6058 }
6059 }
6060
6061 if (mcx_create_cq(sc, sc->sc_eqn) != 0)
6062 goto down;
6063
6064 /* send queue */
6065 if (mcx_create_tis(sc) != 0)
6066 goto down;
6067
6068 if (mcx_create_sq(sc, sc->sc_cq[0].cq_n) != 0)
6069 goto down;
6070
6071 /* receive queue */
6072 if (mcx_create_rq(sc, sc->sc_cq[0].cq_n) != 0)
6073 goto down;
6074
6075 if (mcx_create_tir(sc) != 0)
6076 goto down;
6077
6078 if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE) != 0)
6079 goto down;
6080
6081 /* promisc flow group */
6082 start = 0;
6083 memset(&match_crit, 0, sizeof(match_crit));
6084 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_PROMISC, start, 1,
6085 0, &match_crit) != 0)
6086 goto down;
6087 sc->sc_promisc_flow_enabled = 0;
6088 start++;
6089
6090 /* all multicast flow group */
6091 match_crit.mc_dest_mac[0] = 0x01;
6092 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_ALLMULTI, start, 1,
6093 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6094 goto down;
6095 sc->sc_allmulti_flow_enabled = 0;
6096 start++;
6097
6098 /* mac address matching flow group */
6099 memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
6100 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_MAC, start,
6101 (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
6102 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6103 goto down;
6104
6105 /* flow table entries for unicast and broadcast */
6106 start = 0;
6107 if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6108 LLADDR(satosdl(ifp->if_dl->ifa_addr))) != 0)
6109 goto down;
6110 start++;
6111
6112 if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6113 etherbroadcastaddr) != 0)
6114 goto down;
6115 start++;
6116
6117 /* multicast entries go after that */
6118 sc->sc_mcast_flow_base = start;
6119
6120 /* re-add any existing multicast flows */
6121 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6122 if (sc->sc_mcast_flows[i][0] != 0) {
6123 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6124 sc->sc_mcast_flow_base + i,
6125 sc->sc_mcast_flows[i]);
6126 }
6127 }
6128
6129 if (mcx_set_flow_table_root(sc) != 0)
6130 goto down;
6131
6132 /* start the queues */
6133 if (mcx_ready_sq(sc) != 0)
6134 goto down;
6135
6136 if (mcx_ready_rq(sc) != 0)
6137 goto down;
6138
6139 mcx_rxr_init(&sc->sc_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
6140 sc->sc_rx_prod = 0;
6141 mcx_rx_fill(sc);
6142
6143 mcx_calibrate_first(sc);
6144
6145 SET(ifp->if_flags, IFF_RUNNING);
6146
6147 sc->sc_tx_cons = 0;
6148 sc->sc_tx_prod = 0;
6149 CLR(ifp->if_flags, IFF_OACTIVE);
6150 if_schedule_deferred_start(ifp);
6151
6152 return 0;
6153 destroy_tx_slots:
6154 mcx_free_slots(sc, sc->sc_tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
6155 sc->sc_tx_slots = NULL;
6156
6157 i = (1 << MCX_LOG_RQ_SIZE);
6158 destroy_rx_slots:
6159 mcx_free_slots(sc, sc->sc_rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
6160 sc->sc_rx_slots = NULL;
6161 down:
6162 mcx_stop(ifp, 0);
6163 return EIO;
6164 }
6165
6166 static void
6167 mcx_stop(struct ifnet *ifp, int disable)
6168 {
6169 struct mcx_softc *sc = ifp->if_softc;
6170 int group, i;
6171
6172 CLR(ifp->if_flags, IFF_RUNNING);
6173
6174 /*
6175 * delete flow table entries first, so no packets can arrive
6176 * after the barriers
6177 */
6178 if (sc->sc_promisc_flow_enabled)
6179 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
6180 if (sc->sc_allmulti_flow_enabled)
6181 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
6182 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
6183 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
6184 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6185 if (sc->sc_mcast_flows[i][0] != 0) {
6186 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6187 sc->sc_mcast_flow_base + i);
6188 }
6189 }
6190
6191 callout_halt(&sc->sc_calibrate, NULL);
6192
6193 for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
6194 if (sc->sc_flow_group_id[group] != -1)
6195 mcx_destroy_flow_group(sc,
6196 sc->sc_flow_group_id[group]);
6197 }
6198
6199 if (sc->sc_flow_table_id != -1)
6200 mcx_destroy_flow_table(sc);
6201
6202 if (sc->sc_tirn != 0)
6203 mcx_destroy_tir(sc);
6204 if (sc->sc_rqn != 0)
6205 mcx_destroy_rq(sc);
6206
6207 if (sc->sc_sqn != 0)
6208 mcx_destroy_sq(sc);
6209 if (sc->sc_tisn != 0)
6210 mcx_destroy_tis(sc);
6211
6212 for (i = 0; i < sc->sc_num_cq; i++)
6213 mcx_destroy_cq(sc, i);
6214 sc->sc_num_cq = 0;
6215
6216 if (sc->sc_tx_slots != NULL) {
6217 mcx_free_slots(sc, sc->sc_tx_slots, (1 << MCX_LOG_SQ_SIZE),
6218 (1 << MCX_LOG_SQ_SIZE));
6219 sc->sc_tx_slots = NULL;
6220 }
6221 if (sc->sc_rx_slots != NULL) {
6222 mcx_free_slots(sc, sc->sc_rx_slots, (1 << MCX_LOG_RQ_SIZE),
6223 (1 << MCX_LOG_RQ_SIZE));
6224 sc->sc_rx_slots = NULL;
6225 }
6226 }
6227
6228 static int
6229 mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6230 {
6231 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6232 struct ifreq *ifr = (struct ifreq *)data;
6233 struct ethercom *ec = &sc->sc_ec;
6234 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
6235 struct ether_multi *enm;
6236 struct ether_multistep step;
6237 int s, i, flags, error = 0;
6238
6239 s = splnet();
6240 switch (cmd) {
6241
6242 case SIOCADDMULTI:
6243 if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6244 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6245 if (error != 0) {
6246 splx(s);
6247 return (error);
6248 }
6249
6250 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6251 if (sc->sc_mcast_flows[i][0] == 0) {
6252 memcpy(sc->sc_mcast_flows[i], addrlo,
6253 ETHER_ADDR_LEN);
6254 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6255 mcx_set_flow_table_entry(sc,
6256 MCX_FLOW_GROUP_MAC,
6257 sc->sc_mcast_flow_base + i,
6258 sc->sc_mcast_flows[i]);
6259 }
6260 break;
6261 }
6262 }
6263
6264 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
6265 if (i == MCX_NUM_MCAST_FLOWS) {
6266 SET(ifp->if_flags, IFF_ALLMULTI);
6267 sc->sc_extra_mcast++;
6268 error = ENETRESET;
6269 }
6270
6271 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN)) {
6272 SET(ifp->if_flags, IFF_ALLMULTI);
6273 error = ENETRESET;
6274 }
6275 }
6276 }
6277 break;
6278
6279 case SIOCDELMULTI:
6280 if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6281 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6282 if (error != 0) {
6283 splx(s);
6284 return (error);
6285 }
6286
6287 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6288 if (memcmp(sc->sc_mcast_flows[i], addrlo,
6289 ETHER_ADDR_LEN) == 0) {
6290 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6291 mcx_delete_flow_table_entry(sc,
6292 MCX_FLOW_GROUP_MAC,
6293 sc->sc_mcast_flow_base + i);
6294 }
6295 sc->sc_mcast_flows[i][0] = 0;
6296 break;
6297 }
6298 }
6299
6300 if (i == MCX_NUM_MCAST_FLOWS)
6301 sc->sc_extra_mcast--;
6302
6303 if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
6304 sc->sc_extra_mcast == 0) {
6305 flags = 0;
6306 ETHER_LOCK(ec);
6307 ETHER_FIRST_MULTI(step, ec, enm);
6308 while (enm != NULL) {
6309 if (memcmp(enm->enm_addrlo,
6310 enm->enm_addrhi, ETHER_ADDR_LEN)) {
6311 SET(flags, IFF_ALLMULTI);
6312 break;
6313 }
6314 ETHER_NEXT_MULTI(step, enm);
6315 }
6316 ETHER_UNLOCK(ec);
6317 if (!ISSET(flags, IFF_ALLMULTI)) {
6318 CLR(ifp->if_flags, IFF_ALLMULTI);
6319 error = ENETRESET;
6320 }
6321 }
6322 }
6323 break;
6324
6325 default:
6326 error = ether_ioctl(ifp, cmd, data);
6327 }
6328
6329 if (error == ENETRESET) {
6330 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6331 (IFF_UP | IFF_RUNNING))
6332 mcx_iff(sc);
6333 error = 0;
6334 }
6335 splx(s);
6336
6337 return (error);
6338 }
6339
6340 #if 0
6341 static int
6342 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
6343 {
6344 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6345 struct mcx_reg_mcia mcia;
6346 struct mcx_reg_pmlp pmlp;
6347 int offset, error;
6348
6349 /* get module number */
6350 memset(&pmlp, 0, sizeof(pmlp));
6351 pmlp.rp_local_port = 1;
6352 error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
6353 sizeof(pmlp));
6354 if (error != 0) {
6355 printf("%s: unable to get eeprom module number\n",
6356 DEVNAME(sc));
6357 return error;
6358 }
6359
6360 for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
6361 memset(&mcia, 0, sizeof(mcia));
6362 mcia.rm_l = 0;
6363 mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) &
6364 MCX_PMLP_MODULE_NUM_MASK;
6365 mcia.rm_i2c_addr = sff->sff_addr / 2; /* apparently */
6366 mcia.rm_page_num = sff->sff_page;
6367 mcia.rm_dev_addr = htobe16(offset);
6368 mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
6369
6370 error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
6371 &mcia, sizeof(mcia));
6372 if (error != 0) {
6373 printf("%s: unable to read eeprom at %x\n",
6374 DEVNAME(sc), offset);
6375 return error;
6376 }
6377
6378 memcpy(sff->sff_data + offset, mcia.rm_data,
6379 MCX_MCIA_EEPROM_BYTES);
6380 }
6381
6382 return 0;
6383 }
6384 #endif
6385
6386 static int
6387 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
6388 {
6389 switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6390 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
6391 case 0:
6392 break;
6393
6394 case EFBIG:
6395 if (m_defrag(m, M_DONTWAIT) != NULL &&
6396 bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6397 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
6398 break;
6399
6400 /* FALLTHROUGH */
6401 default:
6402 return (1);
6403 }
6404
6405 ms->ms_m = m;
6406 return (0);
6407 }
6408
6409 static void
6410 mcx_start(struct ifnet *ifp)
6411 {
6412 struct mcx_softc *sc = ifp->if_softc;
6413 struct mcx_sq_entry *sq, *sqe;
6414 struct mcx_sq_entry_seg *sqs;
6415 struct mcx_slot *ms;
6416 bus_dmamap_t map;
6417 struct mbuf *m;
6418 u_int idx, free, used;
6419 uint64_t *bf;
6420 size_t bf_base;
6421 int i, seg, nseg;
6422
6423 bf_base = (sc->sc_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
6424
6425 idx = sc->sc_tx_prod % (1 << MCX_LOG_SQ_SIZE);
6426 free = (sc->sc_tx_cons + (1 << MCX_LOG_SQ_SIZE)) - sc->sc_tx_prod;
6427
6428 used = 0;
6429 bf = NULL;
6430 sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&sc->sc_sq_mem);
6431
6432 for (;;) {
6433 if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
6434 SET(ifp->if_flags, IFF_OACTIVE);
6435 break;
6436 }
6437
6438 IFQ_DEQUEUE(&ifp->if_snd, m);
6439 if (m == NULL) {
6440 break;
6441 }
6442
6443 sqe = sq + idx;
6444 ms = &sc->sc_tx_slots[idx];
6445 memset(sqe, 0, sizeof(*sqe));
6446
6447 /* ctrl segment */
6448 sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
6449 ((sc->sc_tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
6450 /* always generate a completion event */
6451 sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
6452
6453 /* eth segment */
6454 sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
6455 m_copydata(m, 0, MCX_SQ_INLINE_SIZE, sqe->sqe_inline_headers);
6456 m_adj(m, MCX_SQ_INLINE_SIZE);
6457
6458 if (mcx_load_mbuf(sc, ms, m) != 0) {
6459 m_freem(m);
6460 if_statinc(ifp, if_oerrors);
6461 continue;
6462 }
6463 bf = (uint64_t *)sqe;
6464
6465 if (ifp->if_bpf != NULL)
6466 bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers,
6467 MCX_SQ_INLINE_SIZE, m, BPF_D_OUT);
6468
6469 map = ms->ms_map;
6470 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6471 BUS_DMASYNC_PREWRITE);
6472
6473 sqe->sqe_ds_sq_num =
6474 htobe32((sc->sc_sqn << MCX_SQE_SQ_NUM_SHIFT) |
6475 (map->dm_nsegs + 3));
6476
6477 /* data segment - first wqe has one segment */
6478 sqs = sqe->sqe_segs;
6479 seg = 0;
6480 nseg = 1;
6481 for (i = 0; i < map->dm_nsegs; i++) {
6482 if (seg == nseg) {
6483 /* next slot */
6484 idx++;
6485 if (idx == (1 << MCX_LOG_SQ_SIZE))
6486 idx = 0;
6487 sc->sc_tx_prod++;
6488 used++;
6489
6490 sqs = (struct mcx_sq_entry_seg *)(sq + idx);
6491 seg = 0;
6492 nseg = MCX_SQ_SEGS_PER_SLOT;
6493 }
6494 sqs[seg].sqs_byte_count =
6495 htobe32(map->dm_segs[i].ds_len);
6496 sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
6497 sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
6498 seg++;
6499 }
6500
6501 idx++;
6502 if (idx == (1 << MCX_LOG_SQ_SIZE))
6503 idx = 0;
6504 sc->sc_tx_prod++;
6505 used++;
6506 }
6507
6508 if (used) {
6509 *sc->sc_tx_doorbell = htobe32(sc->sc_tx_prod & MCX_WQ_DOORBELL_MASK);
6510
6511 membar_sync();
6512
6513 /*
6514 * write the first 64 bits of the last sqe we produced
6515 * to the blue flame buffer
6516 */
6517 bus_space_write_8(sc->sc_memt, sc->sc_memh,
6518 bf_base + sc->sc_bf_offset, *bf);
6519 /* next write goes to the other buffer */
6520 sc->sc_bf_offset ^= sc->sc_bf_size;
6521
6522 membar_sync();
6523 }
6524 }
6525
6526 static void
6527 mcx_watchdog(struct ifnet *ifp)
6528 {
6529 }
6530
6531 static void
6532 mcx_media_add_types(struct mcx_softc *sc)
6533 {
6534 struct mcx_reg_ptys ptys;
6535 int i;
6536 uint32_t proto_cap;
6537
6538 memset(&ptys, 0, sizeof(ptys));
6539 ptys.rp_local_port = 1;
6540 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6541 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6542 sizeof(ptys)) != 0) {
6543 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6544 return;
6545 }
6546
6547 proto_cap = be32toh(ptys.rp_eth_proto_cap);
6548 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6549 const struct mcx_eth_proto_capability *cap;
6550 if (!ISSET(proto_cap, 1U << i))
6551 continue;
6552
6553 cap = &mcx_eth_cap_map[i];
6554 if (cap->cap_media == 0)
6555 continue;
6556
6557 ifmedia_add(&sc->sc_media, IFM_ETHER | cap->cap_media, 0, NULL);
6558 }
6559 }
6560
6561 static void
6562 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
6563 {
6564 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6565 struct mcx_reg_ptys ptys;
6566 int i;
6567 uint32_t proto_oper;
6568 uint64_t media_oper;
6569
6570 memset(&ptys, 0, sizeof(ptys));
6571 ptys.rp_local_port = 1;
6572 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6573
6574 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6575 sizeof(ptys)) != 0) {
6576 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6577 return;
6578 }
6579
6580 proto_oper = be32toh(ptys.rp_eth_proto_oper);
6581
6582 media_oper = 0;
6583
6584 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6585 const struct mcx_eth_proto_capability *cap;
6586 if (!ISSET(proto_oper, 1U << i))
6587 continue;
6588
6589 cap = &mcx_eth_cap_map[i];
6590
6591 if (cap->cap_media != 0)
6592 media_oper = cap->cap_media;
6593 }
6594
6595 ifmr->ifm_status = IFM_AVALID;
6596 /* not sure if this is the right thing to check, maybe paos? */
6597 if (proto_oper != 0) {
6598 ifmr->ifm_status |= IFM_ACTIVE;
6599 ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
6600 /* txpause, rxpause, duplex? */
6601 }
6602 }
6603
6604 static int
6605 mcx_media_change(struct ifnet *ifp)
6606 {
6607 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6608 struct mcx_reg_ptys ptys;
6609 struct mcx_reg_paos paos;
6610 uint32_t media;
6611 int i, error;
6612
6613 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
6614 return EINVAL;
6615
6616 error = 0;
6617
6618 if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
6619 /* read ptys to get supported media */
6620 memset(&ptys, 0, sizeof(ptys));
6621 ptys.rp_local_port = 1;
6622 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6623 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
6624 &ptys, sizeof(ptys)) != 0) {
6625 printf("%s: unable to read port type/speed\n",
6626 DEVNAME(sc));
6627 return EIO;
6628 }
6629
6630 media = be32toh(ptys.rp_eth_proto_cap);
6631 } else {
6632 /* map media type */
6633 media = 0;
6634 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6635 const struct mcx_eth_proto_capability *cap;
6636
6637 cap = &mcx_eth_cap_map[i];
6638 if (cap->cap_media ==
6639 IFM_SUBTYPE(sc->sc_media.ifm_media)) {
6640 media = (1 << i);
6641 break;
6642 }
6643 }
6644 }
6645
6646 /* disable the port */
6647 memset(&paos, 0, sizeof(paos));
6648 paos.rp_local_port = 1;
6649 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
6650 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6651 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6652 sizeof(paos)) != 0) {
6653 printf("%s: unable to set port state to down\n", DEVNAME(sc));
6654 return EIO;
6655 }
6656
6657 memset(&ptys, 0, sizeof(ptys));
6658 ptys.rp_local_port = 1;
6659 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6660 ptys.rp_eth_proto_admin = htobe32(media);
6661 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
6662 sizeof(ptys)) != 0) {
6663 printf("%s: unable to set port media type/speed\n",
6664 DEVNAME(sc));
6665 error = EIO;
6666 }
6667
6668 /* re-enable the port to start negotiation */
6669 memset(&paos, 0, sizeof(paos));
6670 paos.rp_local_port = 1;
6671 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
6672 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6673 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6674 sizeof(paos)) != 0) {
6675 printf("%s: unable to set port state to up\n", DEVNAME(sc));
6676 error = EIO;
6677 }
6678
6679 return error;
6680 }
6681
6682 static void
6683 mcx_port_change(struct work *wk, void *xsc)
6684 {
6685 struct mcx_softc *sc = xsc;
6686 struct ifnet *ifp = &sc->sc_ec.ec_if;
6687 struct mcx_reg_paos paos = {
6688 .rp_local_port = 1,
6689 };
6690 struct mcx_reg_ptys ptys = {
6691 .rp_local_port = 1,
6692 .rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH,
6693 };
6694 int link_state = LINK_STATE_DOWN;
6695
6696 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_READ, &paos,
6697 sizeof(paos)) == 0) {
6698 if (paos.rp_oper_status == MCX_REG_PAOS_OPER_STATUS_UP)
6699 link_state = LINK_STATE_UP;
6700 }
6701
6702 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6703 sizeof(ptys)) == 0) {
6704 uint32_t proto_oper = be32toh(ptys.rp_eth_proto_oper);
6705 uint64_t baudrate = 0;
6706 unsigned int i;
6707
6708 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6709 const struct mcx_eth_proto_capability *cap;
6710 if (!ISSET(proto_oper, 1U << i))
6711 continue;
6712
6713 cap = &mcx_eth_cap_map[i];
6714 if (cap->cap_baudrate == 0)
6715 continue;
6716
6717 baudrate = cap->cap_baudrate;
6718 break;
6719 }
6720
6721 ifp->if_baudrate = baudrate;
6722 }
6723
6724 if (link_state != ifp->if_link_state) {
6725 if_link_state_change(ifp, link_state);
6726 }
6727 }
6728
6729
6730 static inline uint32_t
6731 mcx_rd(struct mcx_softc *sc, bus_size_t r)
6732 {
6733 uint32_t word;
6734
6735 word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
6736
6737 return (be32toh(word));
6738 }
6739
6740 static inline void
6741 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
6742 {
6743 bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
6744 }
6745
6746 static inline void
6747 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
6748 {
6749 bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
6750 }
6751
6752 static uint64_t
6753 mcx_timer(struct mcx_softc *sc)
6754 {
6755 uint32_t hi, lo, ni;
6756
6757 hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6758 for (;;) {
6759 lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
6760 mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
6761 ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6762
6763 if (ni == hi)
6764 break;
6765
6766 hi = ni;
6767 }
6768
6769 return (((uint64_t)hi << 32) | (uint64_t)lo);
6770 }
6771
6772 static int
6773 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
6774 bus_size_t size, u_int align)
6775 {
6776 mxm->mxm_size = size;
6777
6778 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
6779 mxm->mxm_size, 0,
6780 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6781 &mxm->mxm_map) != 0)
6782 return (1);
6783 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
6784 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
6785 BUS_DMA_WAITOK) != 0)
6786 goto destroy;
6787 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
6788 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
6789 goto free;
6790 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
6791 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
6792 goto unmap;
6793
6794 mcx_dmamem_zero(mxm);
6795
6796 return (0);
6797 unmap:
6798 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6799 free:
6800 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6801 destroy:
6802 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6803 return (1);
6804 }
6805
6806 static void
6807 mcx_dmamem_zero(struct mcx_dmamem *mxm)
6808 {
6809 memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
6810 }
6811
6812 static void
6813 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
6814 {
6815 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
6816 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6817 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6818 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6819 }
6820
6821 static int
6822 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
6823 {
6824 bus_dma_segment_t *segs;
6825 bus_size_t len = pages * MCX_PAGE_SIZE;
6826 size_t seglen;
6827
6828 segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP);
6829 seglen = sizeof(*segs) * pages;
6830
6831 if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
6832 segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
6833 goto free_segs;
6834
6835 if (mhm->mhm_seg_count < pages) {
6836 size_t nseglen;
6837
6838 mhm->mhm_segs = kmem_alloc(
6839 sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP);
6840
6841 nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
6842
6843 memcpy(mhm->mhm_segs, segs, nseglen);
6844
6845 kmem_free(segs, seglen);
6846
6847 segs = mhm->mhm_segs;
6848 seglen = nseglen;
6849 } else
6850 mhm->mhm_segs = segs;
6851
6852 if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
6853 MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
6854 &mhm->mhm_map) != 0)
6855 goto free_dmamem;
6856
6857 if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
6858 mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
6859 goto destroy;
6860
6861 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6862 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
6863
6864 mhm->mhm_npages = pages;
6865
6866 return (0);
6867
6868 destroy:
6869 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6870 free_dmamem:
6871 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6872 free_segs:
6873 kmem_free(segs, seglen);
6874 mhm->mhm_segs = NULL;
6875
6876 return (-1);
6877 }
6878
6879 static void
6880 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
6881 {
6882 if (mhm->mhm_npages == 0)
6883 return;
6884
6885 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6886 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
6887
6888 bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
6889 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6890 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6891 kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
6892
6893 mhm->mhm_npages = 0;
6894 }
6895