if_mcx.c revision 1.14 1 /* $NetBSD: if_mcx.c,v 1.14 2020/05/25 10:35:17 jmcneill Exp $ */
2 /* $OpenBSD: if_mcx.c,v 1.44 2020/04/24 07:28:37 mestre Exp $ */
3
4 /*
5 * Copyright (c) 2017 David Gwynne <dlg (at) openbsd.org>
6 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #ifdef _KERNEL_OPT
22 #include "opt_net_mpsafe.h"
23 #endif
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: if_mcx.c,v 1.14 2020/05/25 10:35:17 jmcneill Exp $");
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sockio.h>
31 #include <sys/mbuf.h>
32 #include <sys/kernel.h>
33 #include <sys/socket.h>
34 #include <sys/device.h>
35 #include <sys/pool.h>
36 #include <sys/queue.h>
37 #include <sys/callout.h>
38 #include <sys/workqueue.h>
39 #include <sys/atomic.h>
40 #include <sys/kmem.h>
41 #include <sys/bus.h>
42
43 #include <machine/intr.h>
44
45 #include <net/if.h>
46 #include <net/if_dl.h>
47 #include <net/if_ether.h>
48 #include <net/if_media.h>
49
50 #include <net/bpf.h>
51
52 #include <netinet/in.h>
53
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 #include <dev/pci/pcidevs.h>
57
58 /* XXX This driver is not yet MP-safe; don't claim to be! */
59 /* #ifdef NET_MPSAFE */
60 /* #define MCX_MPSAFE 1 */
61 /* #define CALLOUT_FLAGS CALLOUT_MPSAFE */
62 /* #else */
63 #define CALLOUT_FLAGS 0
64 /* #endif */
65
66 #define MCX_MAX_NINTR 1
67
68 #define BUS_DMASYNC_PRERW (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
69 #define BUS_DMASYNC_POSTRW (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
70
71 #define MCX_HCA_BAR PCI_MAPREG_START /* BAR 0 */
72
73 #define MCX_FW_VER 0x0000
74 #define MCX_FW_VER_MAJOR(_v) ((_v) & 0xffff)
75 #define MCX_FW_VER_MINOR(_v) ((_v) >> 16)
76 #define MCX_CMDIF_FW_SUBVER 0x0004
77 #define MCX_FW_VER_SUBMINOR(_v) ((_v) & 0xffff)
78 #define MCX_CMDIF(_v) ((_v) >> 16)
79
80 #define MCX_ISSI 1 /* as per the PRM */
81 #define MCX_CMD_IF_SUPPORTED 5
82
83 #define MCX_HARDMTU 9500
84
85 #define MCX_MAX_CQS 2 /* rq, sq */
86
87 /* queue sizes */
88 #define MCX_LOG_EQ_SIZE 6 /* one page */
89 #define MCX_LOG_CQ_SIZE 12
90 #define MCX_LOG_RQ_SIZE 10
91 #define MCX_LOG_SQ_SIZE 11
92
93 /* completion event moderation - about 10khz, or 90% of the cq */
94 #define MCX_CQ_MOD_PERIOD 50
95 #define MCX_CQ_MOD_COUNTER (((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
96
97 #define MCX_LOG_SQ_ENTRY_SIZE 6
98 #define MCX_SQ_ENTRY_MAX_SLOTS 4
99 #define MCX_SQ_SEGS_PER_SLOT \
100 (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
101 #define MCX_SQ_MAX_SEGMENTS \
102 1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
103
104 #define MCX_LOG_FLOW_TABLE_SIZE 5
105 #define MCX_NUM_STATIC_FLOWS 4 /* promisc, allmulti, ucast, bcast */
106 #define MCX_NUM_MCAST_FLOWS \
107 ((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
108
109 #define MCX_SQ_INLINE_SIZE 18
110
111 /* doorbell offsets */
112 #define MCX_CQ_DOORBELL_OFFSET 0
113 #define MCX_CQ_DOORBELL_SIZE 16
114 #define MCX_RQ_DOORBELL_OFFSET 64
115 #define MCX_SQ_DOORBELL_OFFSET 64
116
117 #define MCX_WQ_DOORBELL_MASK 0xffff
118
119 /* uar registers */
120 #define MCX_UAR_CQ_DOORBELL 0x20
121 #define MCX_UAR_EQ_DOORBELL_ARM 0x40
122 #define MCX_UAR_EQ_DOORBELL 0x48
123 #define MCX_UAR_BF 0x800
124
125 #define MCX_CMDQ_ADDR_HI 0x0010
126 #define MCX_CMDQ_ADDR_LO 0x0014
127 #define MCX_CMDQ_ADDR_NMASK 0xfff
128 #define MCX_CMDQ_LOG_SIZE(_v) ((_v) >> 4 & 0xf)
129 #define MCX_CMDQ_LOG_STRIDE(_v) ((_v) >> 0 & 0xf)
130 #define MCX_CMDQ_INTERFACE_MASK (0x3 << 8)
131 #define MCX_CMDQ_INTERFACE_FULL_DRIVER (0x0 << 8)
132 #define MCX_CMDQ_INTERFACE_DISABLED (0x1 << 8)
133
134 #define MCX_CMDQ_DOORBELL 0x0018
135
136 #define MCX_STATE 0x01fc
137 #define MCX_STATE_MASK (1U << 31)
138 #define MCX_STATE_INITIALIZING (1 << 31)
139 #define MCX_STATE_READY (0 << 31)
140 #define MCX_STATE_INTERFACE_MASK (0x3 << 24)
141 #define MCX_STATE_INTERFACE_FULL_DRIVER (0x0 << 24)
142 #define MCX_STATE_INTERFACE_DISABLED (0x1 << 24)
143
144 #define MCX_INTERNAL_TIMER 0x1000
145 #define MCX_INTERNAL_TIMER_H 0x1000
146 #define MCX_INTERNAL_TIMER_L 0x1004
147
148 #define MCX_CLEAR_INT 0x100c
149
150 #define MCX_REG_OP_WRITE 0
151 #define MCX_REG_OP_READ 1
152
153 #define MCX_REG_PMLP 0x5002
154 #define MCX_REG_PMTU 0x5003
155 #define MCX_REG_PTYS 0x5004
156 #define MCX_REG_PAOS 0x5006
157 #define MCX_REG_PFCC 0x5007
158 #define MCX_REG_PPCNT 0x5008
159 #define MCX_REG_MCIA 0x9014
160
161 #define MCX_ETHER_CAP_SGMII 0
162 #define MCX_ETHER_CAP_1000_KX 1
163 #define MCX_ETHER_CAP_10G_CX4 2
164 #define MCX_ETHER_CAP_10G_KX4 3
165 #define MCX_ETHER_CAP_10G_KR 4
166 #define MCX_ETHER_CAP_20G_KR2 5
167 #define MCX_ETHER_CAP_40G_CR4 6
168 #define MCX_ETHER_CAP_40G_KR4 7
169 #define MCX_ETHER_CAP_56G_R4 8
170 #define MCX_ETHER_CAP_10G_CR 12
171 #define MCX_ETHER_CAP_10G_SR 13
172 #define MCX_ETHER_CAP_10G_LR 14
173 #define MCX_ETHER_CAP_40G_SR4 15
174 #define MCX_ETHER_CAP_40G_LR4 16
175 #define MCX_ETHER_CAP_50G_SR2 18
176 #define MCX_ETHER_CAP_100G_CR4 20
177 #define MCX_ETHER_CAP_100G_SR4 21
178 #define MCX_ETHER_CAP_100G_KR4 22
179 #define MCX_ETHER_CAP_100G_LR4 23
180 #define MCX_ETHER_CAP_100_TX 24
181 #define MCX_ETHER_CAP_1000_T 25
182 #define MCX_ETHER_CAP_10G_T 26
183 #define MCX_ETHER_CAP_25G_CR 27
184 #define MCX_ETHER_CAP_25G_KR 28
185 #define MCX_ETHER_CAP_25G_SR 29
186 #define MCX_ETHER_CAP_50G_CR2 30
187 #define MCX_ETHER_CAP_50G_KR2 31
188
189 #define MCX_PAGE_SHIFT 12
190 #define MCX_PAGE_SIZE (1 << MCX_PAGE_SHIFT)
191 #define MCX_MAX_CQE 32
192
193 #define MCX_CMD_QUERY_HCA_CAP 0x100
194 #define MCX_CMD_QUERY_ADAPTER 0x101
195 #define MCX_CMD_INIT_HCA 0x102
196 #define MCX_CMD_TEARDOWN_HCA 0x103
197 #define MCX_CMD_ENABLE_HCA 0x104
198 #define MCX_CMD_DISABLE_HCA 0x105
199 #define MCX_CMD_QUERY_PAGES 0x107
200 #define MCX_CMD_MANAGE_PAGES 0x108
201 #define MCX_CMD_SET_HCA_CAP 0x109
202 #define MCX_CMD_QUERY_ISSI 0x10a
203 #define MCX_CMD_SET_ISSI 0x10b
204 #define MCX_CMD_SET_DRIVER_VERSION \
205 0x10d
206 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS \
207 0x203
208 #define MCX_CMD_CREATE_EQ 0x301
209 #define MCX_CMD_DESTROY_EQ 0x302
210 #define MCX_CMD_CREATE_CQ 0x400
211 #define MCX_CMD_DESTROY_CQ 0x401
212 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT \
213 0x754
214 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
215 0x755
216 #define MCX_CMD_QUERY_VPORT_COUNTERS \
217 0x770
218 #define MCX_CMD_ALLOC_PD 0x800
219 #define MCX_CMD_ALLOC_UAR 0x802
220 #define MCX_CMD_ACCESS_REG 0x805
221 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN \
222 0x816
223 #define MCX_CMD_CREATE_TIR 0x900
224 #define MCX_CMD_DESTROY_TIR 0x902
225 #define MCX_CMD_CREATE_SQ 0x904
226 #define MCX_CMD_MODIFY_SQ 0x905
227 #define MCX_CMD_DESTROY_SQ 0x906
228 #define MCX_CMD_QUERY_SQ 0x907
229 #define MCX_CMD_CREATE_RQ 0x908
230 #define MCX_CMD_MODIFY_RQ 0x909
231 #define MCX_CMD_DESTROY_RQ 0x90a
232 #define MCX_CMD_QUERY_RQ 0x90b
233 #define MCX_CMD_CREATE_TIS 0x912
234 #define MCX_CMD_DESTROY_TIS 0x914
235 #define MCX_CMD_SET_FLOW_TABLE_ROOT \
236 0x92f
237 #define MCX_CMD_CREATE_FLOW_TABLE \
238 0x930
239 #define MCX_CMD_DESTROY_FLOW_TABLE \
240 0x931
241 #define MCX_CMD_QUERY_FLOW_TABLE \
242 0x932
243 #define MCX_CMD_CREATE_FLOW_GROUP \
244 0x933
245 #define MCX_CMD_DESTROY_FLOW_GROUP \
246 0x934
247 #define MCX_CMD_QUERY_FLOW_GROUP \
248 0x935
249 #define MCX_CMD_SET_FLOW_TABLE_ENTRY \
250 0x936
251 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY \
252 0x937
253 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY \
254 0x938
255 #define MCX_CMD_ALLOC_FLOW_COUNTER \
256 0x939
257 #define MCX_CMD_QUERY_FLOW_COUNTER \
258 0x93b
259
260 #define MCX_QUEUE_STATE_RST 0
261 #define MCX_QUEUE_STATE_RDY 1
262 #define MCX_QUEUE_STATE_ERR 3
263
264 #define MCX_FLOW_TABLE_TYPE_RX 0
265 #define MCX_FLOW_TABLE_TYPE_TX 1
266
267 #define MCX_CMDQ_INLINE_DATASIZE 16
268
269 struct mcx_cmdq_entry {
270 uint8_t cq_type;
271 #define MCX_CMDQ_TYPE_PCIE 0x7
272 uint8_t cq_reserved0[3];
273
274 uint32_t cq_input_length;
275 uint64_t cq_input_ptr;
276 uint8_t cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
277
278 uint8_t cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
279 uint64_t cq_output_ptr;
280 uint32_t cq_output_length;
281
282 uint8_t cq_token;
283 uint8_t cq_signature;
284 uint8_t cq_reserved1[1];
285 uint8_t cq_status;
286 #define MCX_CQ_STATUS_SHIFT 1
287 #define MCX_CQ_STATUS_MASK (0x7f << MCX_CQ_STATUS_SHIFT)
288 #define MCX_CQ_STATUS_OK (0x00 << MCX_CQ_STATUS_SHIFT)
289 #define MCX_CQ_STATUS_INT_ERR (0x01 << MCX_CQ_STATUS_SHIFT)
290 #define MCX_CQ_STATUS_BAD_OPCODE (0x02 << MCX_CQ_STATUS_SHIFT)
291 #define MCX_CQ_STATUS_BAD_PARAM (0x03 << MCX_CQ_STATUS_SHIFT)
292 #define MCX_CQ_STATUS_BAD_SYS_STATE (0x04 << MCX_CQ_STATUS_SHIFT)
293 #define MCX_CQ_STATUS_BAD_RESOURCE (0x05 << MCX_CQ_STATUS_SHIFT)
294 #define MCX_CQ_STATUS_RESOURCE_BUSY (0x06 << MCX_CQ_STATUS_SHIFT)
295 #define MCX_CQ_STATUS_EXCEED_LIM (0x08 << MCX_CQ_STATUS_SHIFT)
296 #define MCX_CQ_STATUS_BAD_RES_STATE (0x09 << MCX_CQ_STATUS_SHIFT)
297 #define MCX_CQ_STATUS_BAD_INDEX (0x0a << MCX_CQ_STATUS_SHIFT)
298 #define MCX_CQ_STATUS_NO_RESOURCES (0x0f << MCX_CQ_STATUS_SHIFT)
299 #define MCX_CQ_STATUS_BAD_INPUT_LEN (0x50 << MCX_CQ_STATUS_SHIFT)
300 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN (0x51 << MCX_CQ_STATUS_SHIFT)
301 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
302 (0x10 << MCX_CQ_STATUS_SHIFT)
303 #define MCX_CQ_STATUS_BAD_SIZE (0x40 << MCX_CQ_STATUS_SHIFT)
304 #define MCX_CQ_STATUS_OWN_MASK 0x1
305 #define MCX_CQ_STATUS_OWN_SW 0x0
306 #define MCX_CQ_STATUS_OWN_HW 0x1
307 } __packed __aligned(8);
308
309 #define MCX_CMDQ_MAILBOX_DATASIZE 512
310
311 struct mcx_cmdq_mailbox {
312 uint8_t mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
313 uint8_t mb_reserved0[48];
314 uint64_t mb_next_ptr;
315 uint32_t mb_block_number;
316 uint8_t mb_reserved1[1];
317 uint8_t mb_token;
318 uint8_t mb_ctrl_signature;
319 uint8_t mb_signature;
320 } __packed __aligned(8);
321
322 #define MCX_CMDQ_MAILBOX_ALIGN (1 << 10)
323 #define MCX_CMDQ_MAILBOX_SIZE roundup(sizeof(struct mcx_cmdq_mailbox), \
324 MCX_CMDQ_MAILBOX_ALIGN)
325 /*
326 * command mailbox structres
327 */
328
329 struct mcx_cmd_enable_hca_in {
330 uint16_t cmd_opcode;
331 uint8_t cmd_reserved0[4];
332 uint16_t cmd_op_mod;
333 uint8_t cmd_reserved1[2];
334 uint16_t cmd_function_id;
335 uint8_t cmd_reserved2[4];
336 } __packed __aligned(4);
337
338 struct mcx_cmd_enable_hca_out {
339 uint8_t cmd_status;
340 uint8_t cmd_reserved0[3];
341 uint32_t cmd_syndrome;
342 uint8_t cmd_reserved1[4];
343 } __packed __aligned(4);
344
345 struct mcx_cmd_init_hca_in {
346 uint16_t cmd_opcode;
347 uint8_t cmd_reserved0[4];
348 uint16_t cmd_op_mod;
349 uint8_t cmd_reserved1[8];
350 } __packed __aligned(4);
351
352 struct mcx_cmd_init_hca_out {
353 uint8_t cmd_status;
354 uint8_t cmd_reserved0[3];
355 uint32_t cmd_syndrome;
356 uint8_t cmd_reserved1[8];
357 } __packed __aligned(4);
358
359 struct mcx_cmd_teardown_hca_in {
360 uint16_t cmd_opcode;
361 uint8_t cmd_reserved0[4];
362 uint16_t cmd_op_mod;
363 uint8_t cmd_reserved1[2];
364 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL 0x0
365 #define MCX_CMD_TEARDOWN_HCA_PANIC 0x1
366 uint16_t cmd_profile;
367 uint8_t cmd_reserved2[4];
368 } __packed __aligned(4);
369
370 struct mcx_cmd_teardown_hca_out {
371 uint8_t cmd_status;
372 uint8_t cmd_reserved0[3];
373 uint32_t cmd_syndrome;
374 uint8_t cmd_reserved1[8];
375 } __packed __aligned(4);
376
377 struct mcx_cmd_access_reg_in {
378 uint16_t cmd_opcode;
379 uint8_t cmd_reserved0[4];
380 uint16_t cmd_op_mod;
381 uint8_t cmd_reserved1[2];
382 uint16_t cmd_register_id;
383 uint32_t cmd_argument;
384 } __packed __aligned(4);
385
386 struct mcx_cmd_access_reg_out {
387 uint8_t cmd_status;
388 uint8_t cmd_reserved0[3];
389 uint32_t cmd_syndrome;
390 uint8_t cmd_reserved1[8];
391 } __packed __aligned(4);
392
393 struct mcx_reg_pmtu {
394 uint8_t rp_reserved1;
395 uint8_t rp_local_port;
396 uint8_t rp_reserved2[2];
397 uint16_t rp_max_mtu;
398 uint8_t rp_reserved3[2];
399 uint16_t rp_admin_mtu;
400 uint8_t rp_reserved4[2];
401 uint16_t rp_oper_mtu;
402 uint8_t rp_reserved5[2];
403 } __packed __aligned(4);
404
405 struct mcx_reg_ptys {
406 uint8_t rp_reserved1;
407 uint8_t rp_local_port;
408 uint8_t rp_reserved2;
409 uint8_t rp_proto_mask;
410 #define MCX_REG_PTYS_PROTO_MASK_ETH (1 << 2)
411 uint8_t rp_reserved3[8];
412 uint32_t rp_eth_proto_cap;
413 uint8_t rp_reserved4[8];
414 uint32_t rp_eth_proto_admin;
415 uint8_t rp_reserved5[8];
416 uint32_t rp_eth_proto_oper;
417 uint8_t rp_reserved6[24];
418 } __packed __aligned(4);
419
420 struct mcx_reg_paos {
421 uint8_t rp_reserved1;
422 uint8_t rp_local_port;
423 uint8_t rp_admin_status;
424 #define MCX_REG_PAOS_ADMIN_STATUS_UP 1
425 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN 2
426 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE 3
427 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED 4
428 uint8_t rp_oper_status;
429 #define MCX_REG_PAOS_OPER_STATUS_UP 1
430 #define MCX_REG_PAOS_OPER_STATUS_DOWN 2
431 #define MCX_REG_PAOS_OPER_STATUS_FAILED 4
432 uint8_t rp_admin_state_update;
433 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN (1 << 7)
434 uint8_t rp_reserved2[11];
435 } __packed __aligned(4);
436
437 struct mcx_reg_pfcc {
438 uint8_t rp_reserved1;
439 uint8_t rp_local_port;
440 uint8_t rp_reserved2[3];
441 uint8_t rp_prio_mask_tx;
442 uint8_t rp_reserved3;
443 uint8_t rp_prio_mask_rx;
444 uint8_t rp_pptx_aptx;
445 uint8_t rp_pfctx;
446 uint8_t rp_fctx_dis;
447 uint8_t rp_reserved4;
448 uint8_t rp_pprx_aprx;
449 uint8_t rp_pfcrx;
450 uint8_t rp_reserved5[2];
451 uint16_t rp_dev_stall_min;
452 uint16_t rp_dev_stall_crit;
453 uint8_t rp_reserved6[12];
454 } __packed __aligned(4);
455
456 #define MCX_PMLP_MODULE_NUM_MASK 0xff
457 struct mcx_reg_pmlp {
458 uint8_t rp_rxtx;
459 uint8_t rp_local_port;
460 uint8_t rp_reserved0;
461 uint8_t rp_width;
462 uint32_t rp_lane0_mapping;
463 uint32_t rp_lane1_mapping;
464 uint32_t rp_lane2_mapping;
465 uint32_t rp_lane3_mapping;
466 uint8_t rp_reserved1[44];
467 } __packed __aligned(4);
468
469 #define MCX_MCIA_EEPROM_BYTES 32
470 struct mcx_reg_mcia {
471 uint8_t rm_l;
472 uint8_t rm_module;
473 uint8_t rm_reserved0;
474 uint8_t rm_status;
475 uint8_t rm_i2c_addr;
476 uint8_t rm_page_num;
477 uint16_t rm_dev_addr;
478 uint16_t rm_reserved1;
479 uint16_t rm_size;
480 uint32_t rm_reserved2;
481 uint8_t rm_data[48];
482 } __packed __aligned(4);
483
484 struct mcx_cmd_query_issi_in {
485 uint16_t cmd_opcode;
486 uint8_t cmd_reserved0[4];
487 uint16_t cmd_op_mod;
488 uint8_t cmd_reserved1[8];
489 } __packed __aligned(4);
490
491 struct mcx_cmd_query_issi_il_out {
492 uint8_t cmd_status;
493 uint8_t cmd_reserved0[3];
494 uint32_t cmd_syndrome;
495 uint8_t cmd_reserved1[2];
496 uint16_t cmd_current_issi;
497 uint8_t cmd_reserved2[4];
498 } __packed __aligned(4);
499
500 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
501
502 struct mcx_cmd_query_issi_mb_out {
503 uint8_t cmd_reserved2[16];
504 uint8_t cmd_supported_issi[80]; /* very big endian */
505 } __packed __aligned(4);
506
507 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
508
509 struct mcx_cmd_set_issi_in {
510 uint16_t cmd_opcode;
511 uint8_t cmd_reserved0[4];
512 uint16_t cmd_op_mod;
513 uint8_t cmd_reserved1[2];
514 uint16_t cmd_current_issi;
515 uint8_t cmd_reserved2[4];
516 } __packed __aligned(4);
517
518 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
519
520 struct mcx_cmd_set_issi_out {
521 uint8_t cmd_status;
522 uint8_t cmd_reserved0[3];
523 uint32_t cmd_syndrome;
524 uint8_t cmd_reserved1[8];
525 } __packed __aligned(4);
526
527 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
528
529 struct mcx_cmd_query_pages_in {
530 uint16_t cmd_opcode;
531 uint8_t cmd_reserved0[4];
532 uint16_t cmd_op_mod;
533 #define MCX_CMD_QUERY_PAGES_BOOT 0x01
534 #define MCX_CMD_QUERY_PAGES_INIT 0x02
535 #define MCX_CMD_QUERY_PAGES_REGULAR 0x03
536 uint8_t cmd_reserved1[8];
537 } __packed __aligned(4);
538
539 struct mcx_cmd_query_pages_out {
540 uint8_t cmd_status;
541 uint8_t cmd_reserved0[3];
542 uint32_t cmd_syndrome;
543 uint8_t cmd_reserved1[2];
544 uint16_t cmd_func_id;
545 uint32_t cmd_num_pages;
546 } __packed __aligned(4);
547
548 struct mcx_cmd_manage_pages_in {
549 uint16_t cmd_opcode;
550 uint8_t cmd_reserved0[4];
551 uint16_t cmd_op_mod;
552 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
553 0x00
554 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
555 0x01
556 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
557 0x02
558 uint8_t cmd_reserved1[2];
559 uint16_t cmd_func_id;
560 uint32_t cmd_input_num_entries;
561 } __packed __aligned(4);
562
563 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
564
565 struct mcx_cmd_manage_pages_out {
566 uint8_t cmd_status;
567 uint8_t cmd_reserved0[3];
568 uint32_t cmd_syndrome;
569 uint32_t cmd_output_num_entries;
570 uint8_t cmd_reserved1[4];
571 } __packed __aligned(4);
572
573 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
574
575 struct mcx_cmd_query_hca_cap_in {
576 uint16_t cmd_opcode;
577 uint8_t cmd_reserved0[4];
578 uint16_t cmd_op_mod;
579 #define MCX_CMD_QUERY_HCA_CAP_MAX (0x0 << 0)
580 #define MCX_CMD_QUERY_HCA_CAP_CURRENT (0x1 << 0)
581 #define MCX_CMD_QUERY_HCA_CAP_DEVICE (0x0 << 1)
582 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD (0x1 << 1)
583 #define MCX_CMD_QUERY_HCA_CAP_FLOW (0x7 << 1)
584 uint8_t cmd_reserved1[8];
585 } __packed __aligned(4);
586
587 struct mcx_cmd_query_hca_cap_out {
588 uint8_t cmd_status;
589 uint8_t cmd_reserved0[3];
590 uint32_t cmd_syndrome;
591 uint8_t cmd_reserved1[8];
592 } __packed __aligned(4);
593
594 #define MCX_HCA_CAP_LEN 0x1000
595 #define MCX_HCA_CAP_NMAILBOXES \
596 (MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
597
598 #if __GNUC_PREREQ__(4, 3)
599 #define __counter__ __COUNTER__
600 #else
601 #define __counter__ __LINE__
602 #endif
603
604 #define __token(_tok, _num) _tok##_num
605 #define _token(_tok, _num) __token(_tok, _num)
606 #define __reserved__ _token(__reserved, __counter__)
607
608 struct mcx_cap_device {
609 uint8_t reserved0[16];
610
611 uint8_t log_max_srq_sz;
612 uint8_t log_max_qp_sz;
613 uint8_t __reserved__[1];
614 uint8_t log_max_qp; /* 5 bits */
615 #define MCX_CAP_DEVICE_LOG_MAX_QP 0x1f
616
617 uint8_t __reserved__[1];
618 uint8_t log_max_srq; /* 5 bits */
619 #define MCX_CAP_DEVICE_LOG_MAX_SRQ 0x1f
620 uint8_t __reserved__[2];
621
622 uint8_t __reserved__[1];
623 uint8_t log_max_cq_sz;
624 uint8_t __reserved__[1];
625 uint8_t log_max_cq; /* 5 bits */
626 #define MCX_CAP_DEVICE_LOG_MAX_CQ 0x1f
627
628 uint8_t log_max_eq_sz;
629 uint8_t log_max_mkey; /* 6 bits */
630 #define MCX_CAP_DEVICE_LOG_MAX_MKEY 0x3f
631 uint8_t __reserved__[1];
632 uint8_t log_max_eq; /* 4 bits */
633 #define MCX_CAP_DEVICE_LOG_MAX_EQ 0x0f
634
635 uint8_t max_indirection;
636 uint8_t log_max_mrw_sz; /* 7 bits */
637 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ 0x7f
638 uint8_t teardown_log_max_msf_list_size;
639 #define MCX_CAP_DEVICE_FORCE_TEARDOWN 0x80
640 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
641 0x3f
642 uint8_t log_max_klm_list_size; /* 6 bits */
643 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
644 0x3f
645
646 uint8_t __reserved__[1];
647 uint8_t log_max_ra_req_dc; /* 6 bits */
648 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC 0x3f
649 uint8_t __reserved__[1];
650 uint8_t log_max_ra_res_dc; /* 6 bits */
651 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
652 0x3f
653
654 uint8_t __reserved__[1];
655 uint8_t log_max_ra_req_qp; /* 6 bits */
656 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
657 0x3f
658 uint8_t __reserved__[1];
659 uint8_t log_max_ra_res_qp; /* 6 bits */
660 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
661 0x3f
662
663 uint8_t flags1;
664 #define MCX_CAP_DEVICE_END_PAD 0x80
665 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED 0x40
666 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
667 0x20
668 #define MCX_CAP_DEVICE_START_PAD 0x10
669 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
670 0x08
671 uint8_t __reserved__[1];
672 uint16_t gid_table_size;
673
674 uint16_t flags2;
675 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT 0x8000
676 #define MCX_CAP_DEVICE_VPORT_COUNTERS 0x4000
677 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
678 0x2000
679 #define MCX_CAP_DEVICE_DEBUG 0x1000
680 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
681 0x8000
682 #define MCX_CAP_DEVICE_RQ_DELAY_DROP 0x4000
683 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK 0x03ff
684 uint16_t pkey_table_size;
685
686 uint8_t flags3;
687 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
688 0x80
689 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
690 0x40
691 #define MCX_CAP_DEVICE_IB_VIRTUAL 0x20
692 #define MCX_CAP_DEVICE_ETH_VIRTUAL 0x10
693 #define MCX_CAP_DEVICE_ETS 0x04
694 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE 0x02
695 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
696 0x01
697 uint8_t local_ca_ack_delay; /* 5 bits */
698 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
699 0x1f
700 uint8_t port_type;
701 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
702 0x80
703 #define MCX_CAP_DEVICE_PORT_TYPE 0x03
704 uint8_t num_ports;
705
706 uint8_t snapshot_log_max_msg;
707 #define MCX_CAP_DEVICE_SNAPSHOT 0x80
708 #define MCX_CAP_DEVICE_LOG_MAX_MSG 0x1f
709 uint8_t max_tc; /* 4 bits */
710 #define MCX_CAP_DEVICE_MAX_TC 0x0f
711 uint8_t flags4;
712 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT 0x80
713 #define MCX_CAP_DEVICE_DCBX 0x40
714 #define MCX_CAP_DEVICE_ROL_S 0x02
715 #define MCX_CAP_DEVICE_ROL_G 0x01
716 uint8_t wol;
717 #define MCX_CAP_DEVICE_WOL_S 0x40
718 #define MCX_CAP_DEVICE_WOL_G 0x20
719 #define MCX_CAP_DEVICE_WOL_A 0x10
720 #define MCX_CAP_DEVICE_WOL_B 0x08
721 #define MCX_CAP_DEVICE_WOL_M 0x04
722 #define MCX_CAP_DEVICE_WOL_U 0x02
723 #define MCX_CAP_DEVICE_WOL_P 0x01
724
725 uint16_t stat_rate_support;
726 uint8_t __reserved__[1];
727 uint8_t cqe_version; /* 4 bits */
728 #define MCX_CAP_DEVICE_CQE_VERSION 0x0f
729
730 uint32_t flags5;
731 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
732 0x80000000
733 #define MCX_CAP_DEVICE_STRIDING_RQ 0x40000000
734 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
735 0x10000000
736 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
737 0x08000000
738 #define MCX_CAP_DEVICE_DC_CONNECT_CP 0x00040000
739 #define MCX_CAP_DEVICE_DC_CNAK_DRACE 0x00020000
740 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
741 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
742 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM 0x0000c000
743 #define MCX_CAP_DEVICE_SIGERR_QCE 0x00002000
744 #define MCX_CAP_DEVICE_WQ_SIGNATURE 0x00000800
745 #define MCX_CAP_DEVICE_SCTR_DATA_CQE 0x00000400
746 #define MCX_CAP_DEVICE_SHO 0x00000100
747 #define MCX_CAP_DEVICE_TPH 0x00000080
748 #define MCX_CAP_DEVICE_RF 0x00000040
749 #define MCX_CAP_DEVICE_DCT 0x00000020
750 #define MCX_CAP_DEVICE_QOS 0x00000010
751 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS 0x00000008
752 #define MCX_CAP_DEVICE_ROCE 0x00000004
753 #define MCX_CAP_DEVICE_ATOMIC 0x00000002
754
755 uint32_t flags6;
756 #define MCX_CAP_DEVICE_CQ_OI 0x80000000
757 #define MCX_CAP_DEVICE_CQ_RESIZE 0x40000000
758 #define MCX_CAP_DEVICE_CQ_MODERATION 0x20000000
759 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
760 0x10000000
761 #define MCX_CAP_DEVICE_CQ_INVALIDATE 0x08000000
762 #define MCX_CAP_DEVICE_RESERVED_AT_255 0x04000000
763 #define MCX_CAP_DEVICE_CQ_EQ_REMAP 0x02000000
764 #define MCX_CAP_DEVICE_PG 0x01000000
765 #define MCX_CAP_DEVICE_BLOCK_LB_MC 0x00800000
766 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
767 0x00400000
768 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
769 0x00200000
770 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
771 0x00100000
772 #define MCX_CAP_DEVICE_CD 0x00080000
773 #define MCX_CAP_DEVICE_ATM 0x00040000
774 #define MCX_CAP_DEVICE_APM 0x00020000
775 #define MCX_CAP_DEVICE_IMAICL 0x00010000
776 #define MCX_CAP_DEVICE_QKV 0x00000200
777 #define MCX_CAP_DEVICE_PKV 0x00000100
778 #define MCX_CAP_DEVICE_SET_DETH_SQPN 0x00000080
779 #define MCX_CAP_DEVICE_XRC 0x00000008
780 #define MCX_CAP_DEVICE_UD 0x00000004
781 #define MCX_CAP_DEVICE_UC 0x00000002
782 #define MCX_CAP_DEVICE_RC 0x00000001
783
784 uint8_t uar_flags;
785 #define MCX_CAP_DEVICE_UAR_4K 0x80
786 uint8_t uar_sz; /* 6 bits */
787 #define MCX_CAP_DEVICE_UAR_SZ 0x3f
788 uint8_t __reserved__[1];
789 uint8_t log_pg_sz;
790
791 uint8_t flags7;
792 #define MCX_CAP_DEVICE_BF 0x80
793 #define MCX_CAP_DEVICE_DRIVER_VERSION 0x40
794 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
795 0x20
796 uint8_t log_bf_reg_size; /* 5 bits */
797 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE 0x1f
798 uint8_t __reserved__[2];
799
800 uint16_t num_of_diagnostic_counters;
801 uint16_t max_wqe_sz_sq;
802
803 uint8_t __reserved__[2];
804 uint16_t max_wqe_sz_rq;
805
806 uint8_t __reserved__[2];
807 uint16_t max_wqe_sz_sq_dc;
808
809 uint32_t max_qp_mcg; /* 25 bits */
810 #define MCX_CAP_DEVICE_MAX_QP_MCG 0x1ffffff
811
812 uint8_t __reserved__[3];
813 uint8_t log_max_mcq;
814
815 uint8_t log_max_transport_domain; /* 5 bits */
816 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
817 0x1f
818 uint8_t log_max_pd; /* 5 bits */
819 #define MCX_CAP_DEVICE_LOG_MAX_PD 0x1f
820 uint8_t __reserved__[1];
821 uint8_t log_max_xrcd; /* 5 bits */
822 #define MCX_CAP_DEVICE_LOG_MAX_XRCD 0x1f
823
824 uint8_t __reserved__[2];
825 uint16_t max_flow_counter;
826
827 uint8_t log_max_rq; /* 5 bits */
828 #define MCX_CAP_DEVICE_LOG_MAX_RQ 0x1f
829 uint8_t log_max_sq; /* 5 bits */
830 #define MCX_CAP_DEVICE_LOG_MAX_SQ 0x1f
831 uint8_t log_max_tir; /* 5 bits */
832 #define MCX_CAP_DEVICE_LOG_MAX_TIR 0x1f
833 uint8_t log_max_tis; /* 5 bits */
834 #define MCX_CAP_DEVICE_LOG_MAX_TIS 0x1f
835
836 uint8_t flags8;
837 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
838 0x80
839 #define MCX_CAP_DEVICE_LOG_MAX_RMP 0x1f
840 uint8_t log_max_rqt; /* 5 bits */
841 #define MCX_CAP_DEVICE_LOG_MAX_RQT 0x1f
842 uint8_t log_max_rqt_size; /* 5 bits */
843 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE 0x1f
844 uint8_t log_max_tis_per_sq; /* 5 bits */
845 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
846 0x1f
847 } __packed __aligned(8);
848
849 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
850 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
851 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
852 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
853 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
854 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
855 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
856
857 struct mcx_cmd_set_driver_version_in {
858 uint16_t cmd_opcode;
859 uint8_t cmd_reserved0[4];
860 uint16_t cmd_op_mod;
861 uint8_t cmd_reserved1[8];
862 } __packed __aligned(4);
863
864 struct mcx_cmd_set_driver_version_out {
865 uint8_t cmd_status;
866 uint8_t cmd_reserved0[3];
867 uint32_t cmd_syndrome;
868 uint8_t cmd_reserved1[8];
869 } __packed __aligned(4);
870
871 struct mcx_cmd_set_driver_version {
872 uint8_t cmd_driver_version[64];
873 } __packed __aligned(8);
874
875 struct mcx_cmd_modify_nic_vport_context_in {
876 uint16_t cmd_opcode;
877 uint8_t cmd_reserved0[4];
878 uint16_t cmd_op_mod;
879 uint8_t cmd_reserved1[4];
880 uint32_t cmd_field_select;
881 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR 0x04
882 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC 0x10
883 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU 0x40
884 } __packed __aligned(4);
885
886 struct mcx_cmd_modify_nic_vport_context_out {
887 uint8_t cmd_status;
888 uint8_t cmd_reserved0[3];
889 uint32_t cmd_syndrome;
890 uint8_t cmd_reserved1[8];
891 } __packed __aligned(4);
892
893 struct mcx_cmd_query_nic_vport_context_in {
894 uint16_t cmd_opcode;
895 uint8_t cmd_reserved0[4];
896 uint16_t cmd_op_mod;
897 uint8_t cmd_reserved1[4];
898 uint8_t cmd_allowed_list_type;
899 uint8_t cmd_reserved2[3];
900 } __packed __aligned(4);
901
902 struct mcx_cmd_query_nic_vport_context_out {
903 uint8_t cmd_status;
904 uint8_t cmd_reserved0[3];
905 uint32_t cmd_syndrome;
906 uint8_t cmd_reserved1[8];
907 } __packed __aligned(4);
908
909 struct mcx_nic_vport_ctx {
910 uint32_t vp_min_wqe_inline_mode;
911 uint8_t vp_reserved0[32];
912 uint32_t vp_mtu;
913 uint8_t vp_reserved1[200];
914 uint16_t vp_flags;
915 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC (0)
916 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC (1 << 24)
917 #define MCX_NIC_VPORT_CTX_LIST_VLAN (2 << 24)
918 #define MCX_NIC_VPORT_CTX_PROMISC_ALL (1 << 13)
919 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST (1 << 14)
920 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST (1 << 15)
921 uint16_t vp_allowed_list_size;
922 uint64_t vp_perm_addr;
923 uint8_t vp_reserved2[4];
924 /* allowed list follows */
925 } __packed __aligned(4);
926
927 struct mcx_counter {
928 uint64_t packets;
929 uint64_t octets;
930 } __packed __aligned(4);
931
932 struct mcx_nic_vport_counters {
933 struct mcx_counter rx_err;
934 struct mcx_counter tx_err;
935 uint8_t reserved0[64]; /* 0x30 */
936 struct mcx_counter rx_bcast;
937 struct mcx_counter tx_bcast;
938 struct mcx_counter rx_ucast;
939 struct mcx_counter tx_ucast;
940 struct mcx_counter rx_mcast;
941 struct mcx_counter tx_mcast;
942 uint8_t reserved1[0x210 - 0xd0];
943 } __packed __aligned(4);
944
945 struct mcx_cmd_query_vport_counters_in {
946 uint16_t cmd_opcode;
947 uint8_t cmd_reserved0[4];
948 uint16_t cmd_op_mod;
949 uint8_t cmd_reserved1[8];
950 } __packed __aligned(4);
951
952 struct mcx_cmd_query_vport_counters_mb_in {
953 uint8_t cmd_reserved0[8];
954 uint8_t cmd_clear;
955 uint8_t cmd_reserved1[7];
956 } __packed __aligned(4);
957
958 struct mcx_cmd_query_vport_counters_out {
959 uint8_t cmd_status;
960 uint8_t cmd_reserved0[3];
961 uint32_t cmd_syndrome;
962 uint8_t cmd_reserved1[8];
963 } __packed __aligned(4);
964
965 struct mcx_cmd_query_flow_counter_in {
966 uint16_t cmd_opcode;
967 uint8_t cmd_reserved0[4];
968 uint16_t cmd_op_mod;
969 uint8_t cmd_reserved1[8];
970 } __packed __aligned(4);
971
972 struct mcx_cmd_query_flow_counter_mb_in {
973 uint8_t cmd_reserved0[8];
974 uint8_t cmd_clear;
975 uint8_t cmd_reserved1[5];
976 uint16_t cmd_flow_counter_id;
977 } __packed __aligned(4);
978
979 struct mcx_cmd_query_flow_counter_out {
980 uint8_t cmd_status;
981 uint8_t cmd_reserved0[3];
982 uint32_t cmd_syndrome;
983 uint8_t cmd_reserved1[8];
984 } __packed __aligned(4);
985
986 struct mcx_cmd_alloc_uar_in {
987 uint16_t cmd_opcode;
988 uint8_t cmd_reserved0[4];
989 uint16_t cmd_op_mod;
990 uint8_t cmd_reserved1[8];
991 } __packed __aligned(4);
992
993 struct mcx_cmd_alloc_uar_out {
994 uint8_t cmd_status;
995 uint8_t cmd_reserved0[3];
996 uint32_t cmd_syndrome;
997 uint32_t cmd_uar;
998 uint8_t cmd_reserved1[4];
999 } __packed __aligned(4);
1000
1001 struct mcx_cmd_query_special_ctx_in {
1002 uint16_t cmd_opcode;
1003 uint8_t cmd_reserved0[4];
1004 uint16_t cmd_op_mod;
1005 uint8_t cmd_reserved1[8];
1006 } __packed __aligned(4);
1007
1008 struct mcx_cmd_query_special_ctx_out {
1009 uint8_t cmd_status;
1010 uint8_t cmd_reserved0[3];
1011 uint32_t cmd_syndrome;
1012 uint8_t cmd_reserved1[4];
1013 uint32_t cmd_resd_lkey;
1014 } __packed __aligned(4);
1015
1016 struct mcx_eq_ctx {
1017 uint32_t eq_status;
1018 #define MCX_EQ_CTX_ST_SHIFT 8
1019 #define MCX_EQ_CTX_ST_MASK (0xf << MCX_EQ_CTX_ST_SHIFT)
1020 #define MCX_EQ_CTX_ST_ARMED (0x9 << MCX_EQ_CTX_ST_SHIFT)
1021 #define MCX_EQ_CTX_ST_FIRED (0xa << MCX_EQ_CTX_ST_SHIFT)
1022 #define MCX_EQ_CTX_OI_SHIFT 17
1023 #define MCX_EQ_CTX_OI (1 << MCX_EQ_CTX_OI_SHIFT)
1024 #define MCX_EQ_CTX_EC_SHIFT 18
1025 #define MCX_EQ_CTX_EC (1 << MCX_EQ_CTX_EC_SHIFT)
1026 #define MCX_EQ_CTX_STATUS_SHIFT 28
1027 #define MCX_EQ_CTX_STATUS_MASK (0xf << MCX_EQ_CTX_STATUS_SHIFT)
1028 #define MCX_EQ_CTX_STATUS_OK (0x0 << MCX_EQ_CTX_STATUS_SHIFT)
1029 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE \
1030 (0xa << MCX_EQ_CTX_STATUS_SHIFT)
1031 uint32_t eq_reserved1;
1032 uint32_t eq_page_offset;
1033 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT 5
1034 uint32_t eq_uar_size;
1035 #define MCX_EQ_CTX_UAR_PAGE_MASK 0xffffff
1036 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT 24
1037 uint32_t eq_reserved2;
1038 uint8_t eq_reserved3[3];
1039 uint8_t eq_intr;
1040 uint32_t eq_log_page_size;
1041 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1042 uint32_t eq_reserved4[3];
1043 uint32_t eq_consumer_counter;
1044 uint32_t eq_producer_counter;
1045 #define MCX_EQ_CTX_COUNTER_MASK 0xffffff
1046 uint32_t eq_reserved5[4];
1047 } __packed __aligned(4);
1048
1049 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1050
1051 struct mcx_cmd_create_eq_in {
1052 uint16_t cmd_opcode;
1053 uint8_t cmd_reserved0[4];
1054 uint16_t cmd_op_mod;
1055 uint8_t cmd_reserved1[8];
1056 } __packed __aligned(4);
1057
1058 struct mcx_cmd_create_eq_mb_in {
1059 struct mcx_eq_ctx cmd_eq_ctx;
1060 uint8_t cmd_reserved0[8];
1061 uint64_t cmd_event_bitmask;
1062 #define MCX_EVENT_TYPE_COMPLETION 0x00
1063 #define MCX_EVENT_TYPE_CQ_ERROR 0x04
1064 #define MCX_EVENT_TYPE_INTERNAL_ERROR 0x08
1065 #define MCX_EVENT_TYPE_PORT_CHANGE 0x09
1066 #define MCX_EVENT_TYPE_CMD_COMPLETION 0x0a
1067 #define MCX_EVENT_TYPE_PAGE_REQUEST 0x0b
1068 #define MCX_EVENT_TYPE_LAST_WQE 0x13
1069 uint8_t cmd_reserved1[176];
1070 } __packed __aligned(4);
1071
1072 struct mcx_cmd_create_eq_out {
1073 uint8_t cmd_status;
1074 uint8_t cmd_reserved0[3];
1075 uint32_t cmd_syndrome;
1076 uint32_t cmd_eqn;
1077 uint8_t cmd_reserved1[4];
1078 } __packed __aligned(4);
1079
1080 struct mcx_eq_entry {
1081 uint8_t eq_reserved1;
1082 uint8_t eq_event_type;
1083 uint8_t eq_reserved2;
1084 uint8_t eq_event_sub_type;
1085
1086 uint8_t eq_reserved3[28];
1087 uint32_t eq_event_data[7];
1088 uint8_t eq_reserved4[2];
1089 uint8_t eq_signature;
1090 uint8_t eq_owner;
1091 #define MCX_EQ_ENTRY_OWNER_INIT 1
1092 } __packed __aligned(4);
1093
1094 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1095
1096 struct mcx_cmd_alloc_pd_in {
1097 uint16_t cmd_opcode;
1098 uint8_t cmd_reserved0[4];
1099 uint16_t cmd_op_mod;
1100 uint8_t cmd_reserved1[8];
1101 } __packed __aligned(4);
1102
1103 struct mcx_cmd_alloc_pd_out {
1104 uint8_t cmd_status;
1105 uint8_t cmd_reserved0[3];
1106 uint32_t cmd_syndrome;
1107 uint32_t cmd_pd;
1108 uint8_t cmd_reserved1[4];
1109 } __packed __aligned(4);
1110
1111 struct mcx_cmd_alloc_td_in {
1112 uint16_t cmd_opcode;
1113 uint8_t cmd_reserved0[4];
1114 uint16_t cmd_op_mod;
1115 uint8_t cmd_reserved1[8];
1116 } __packed __aligned(4);
1117
1118 struct mcx_cmd_alloc_td_out {
1119 uint8_t cmd_status;
1120 uint8_t cmd_reserved0[3];
1121 uint32_t cmd_syndrome;
1122 uint32_t cmd_tdomain;
1123 uint8_t cmd_reserved1[4];
1124 } __packed __aligned(4);
1125
1126 struct mcx_cmd_create_tir_in {
1127 uint16_t cmd_opcode;
1128 uint8_t cmd_reserved0[4];
1129 uint16_t cmd_op_mod;
1130 uint8_t cmd_reserved1[8];
1131 } __packed __aligned(4);
1132
1133 struct mcx_cmd_create_tir_mb_in {
1134 uint8_t cmd_reserved0[20];
1135 uint32_t cmd_disp_type;
1136 #define MCX_TIR_CTX_DISP_TYPE_SHIFT 28
1137 uint8_t cmd_reserved1[8];
1138 uint32_t cmd_lro;
1139 uint8_t cmd_reserved2[8];
1140 uint32_t cmd_inline_rqn;
1141 uint32_t cmd_indir_table;
1142 uint32_t cmd_tdomain;
1143 uint8_t cmd_rx_hash_key[40];
1144 uint32_t cmd_rx_hash_sel_outer;
1145 uint32_t cmd_rx_hash_sel_inner;
1146 uint8_t cmd_reserved3[152];
1147 } __packed __aligned(4);
1148
1149 struct mcx_cmd_create_tir_out {
1150 uint8_t cmd_status;
1151 uint8_t cmd_reserved0[3];
1152 uint32_t cmd_syndrome;
1153 uint32_t cmd_tirn;
1154 uint8_t cmd_reserved1[4];
1155 } __packed __aligned(4);
1156
1157 struct mcx_cmd_destroy_tir_in {
1158 uint16_t cmd_opcode;
1159 uint8_t cmd_reserved0[4];
1160 uint16_t cmd_op_mod;
1161 uint32_t cmd_tirn;
1162 uint8_t cmd_reserved1[4];
1163 } __packed __aligned(4);
1164
1165 struct mcx_cmd_destroy_tir_out {
1166 uint8_t cmd_status;
1167 uint8_t cmd_reserved0[3];
1168 uint32_t cmd_syndrome;
1169 uint8_t cmd_reserved1[8];
1170 } __packed __aligned(4);
1171
1172 struct mcx_cmd_create_tis_in {
1173 uint16_t cmd_opcode;
1174 uint8_t cmd_reserved0[4];
1175 uint16_t cmd_op_mod;
1176 uint8_t cmd_reserved1[8];
1177 } __packed __aligned(4);
1178
1179 struct mcx_cmd_create_tis_mb_in {
1180 uint8_t cmd_reserved[16];
1181 uint32_t cmd_prio;
1182 uint8_t cmd_reserved1[32];
1183 uint32_t cmd_tdomain;
1184 uint8_t cmd_reserved2[120];
1185 } __packed __aligned(4);
1186
1187 struct mcx_cmd_create_tis_out {
1188 uint8_t cmd_status;
1189 uint8_t cmd_reserved0[3];
1190 uint32_t cmd_syndrome;
1191 uint32_t cmd_tisn;
1192 uint8_t cmd_reserved1[4];
1193 } __packed __aligned(4);
1194
1195 struct mcx_cmd_destroy_tis_in {
1196 uint16_t cmd_opcode;
1197 uint8_t cmd_reserved0[4];
1198 uint16_t cmd_op_mod;
1199 uint32_t cmd_tisn;
1200 uint8_t cmd_reserved1[4];
1201 } __packed __aligned(4);
1202
1203 struct mcx_cmd_destroy_tis_out {
1204 uint8_t cmd_status;
1205 uint8_t cmd_reserved0[3];
1206 uint32_t cmd_syndrome;
1207 uint8_t cmd_reserved1[8];
1208 } __packed __aligned(4);
1209
1210 struct mcx_cq_ctx {
1211 uint32_t cq_status;
1212 uint32_t cq_reserved1;
1213 uint32_t cq_page_offset;
1214 uint32_t cq_uar_size;
1215 #define MCX_CQ_CTX_UAR_PAGE_MASK 0xffffff
1216 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT 24
1217 uint32_t cq_period_max_count;
1218 #define MCX_CQ_CTX_PERIOD_SHIFT 16
1219 uint32_t cq_eqn;
1220 uint32_t cq_log_page_size;
1221 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1222 uint32_t cq_reserved2;
1223 uint32_t cq_last_notified;
1224 uint32_t cq_last_solicit;
1225 uint32_t cq_consumer_counter;
1226 uint32_t cq_producer_counter;
1227 uint8_t cq_reserved3[8];
1228 uint64_t cq_doorbell;
1229 } __packed __aligned(4);
1230
1231 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1232
1233 struct mcx_cmd_create_cq_in {
1234 uint16_t cmd_opcode;
1235 uint8_t cmd_reserved0[4];
1236 uint16_t cmd_op_mod;
1237 uint8_t cmd_reserved1[8];
1238 } __packed __aligned(4);
1239
1240 struct mcx_cmd_create_cq_mb_in {
1241 struct mcx_cq_ctx cmd_cq_ctx;
1242 uint8_t cmd_reserved1[192];
1243 } __packed __aligned(4);
1244
1245 struct mcx_cmd_create_cq_out {
1246 uint8_t cmd_status;
1247 uint8_t cmd_reserved0[3];
1248 uint32_t cmd_syndrome;
1249 uint32_t cmd_cqn;
1250 uint8_t cmd_reserved1[4];
1251 } __packed __aligned(4);
1252
1253 struct mcx_cmd_destroy_cq_in {
1254 uint16_t cmd_opcode;
1255 uint8_t cmd_reserved0[4];
1256 uint16_t cmd_op_mod;
1257 uint32_t cmd_cqn;
1258 uint8_t cmd_reserved1[4];
1259 } __packed __aligned(4);
1260
1261 struct mcx_cmd_destroy_cq_out {
1262 uint8_t cmd_status;
1263 uint8_t cmd_reserved0[3];
1264 uint32_t cmd_syndrome;
1265 uint8_t cmd_reserved1[8];
1266 } __packed __aligned(4);
1267
1268 struct mcx_cq_entry {
1269 uint32_t __reserved__;
1270 uint32_t cq_lro;
1271 uint32_t cq_lro_ack_seq_num;
1272 uint32_t cq_rx_hash;
1273 uint8_t cq_rx_hash_type;
1274 uint8_t cq_ml_path;
1275 uint16_t __reserved__;
1276 uint32_t cq_checksum;
1277 uint32_t __reserved__;
1278 uint32_t cq_flags;
1279 uint32_t cq_lro_srqn;
1280 uint32_t __reserved__[2];
1281 uint32_t cq_byte_cnt;
1282 uint64_t cq_timestamp;
1283 uint8_t cq_rx_drops;
1284 uint8_t cq_flow_tag[3];
1285 uint16_t cq_wqe_count;
1286 uint8_t cq_signature;
1287 uint8_t cq_opcode_owner;
1288 #define MCX_CQ_ENTRY_FLAG_OWNER (1 << 0)
1289 #define MCX_CQ_ENTRY_FLAG_SE (1 << 1)
1290 #define MCX_CQ_ENTRY_FORMAT_SHIFT 2
1291 #define MCX_CQ_ENTRY_OPCODE_SHIFT 4
1292
1293 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE 0
1294 #define MCX_CQ_ENTRY_FORMAT_INLINE_32 1
1295 #define MCX_CQ_ENTRY_FORMAT_INLINE_64 2
1296 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED 3
1297
1298 #define MCX_CQ_ENTRY_OPCODE_REQ 0
1299 #define MCX_CQ_ENTRY_OPCODE_SEND 2
1300 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR 13
1301 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR 14
1302 #define MCX_CQ_ENTRY_OPCODE_INVALID 15
1303
1304 } __packed __aligned(4);
1305
1306 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1307
1308 struct mcx_cq_doorbell {
1309 uint32_t db_update_ci;
1310 uint32_t db_arm_ci;
1311 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT 28
1312 #define MCX_CQ_DOORBELL_ARM_CMD (1 << 24)
1313 #define MCX_CQ_DOORBELL_ARM_CI_MASK (0xffffff)
1314 } __packed __aligned(8);
1315
1316 struct mcx_wq_ctx {
1317 uint8_t wq_type;
1318 #define MCX_WQ_CTX_TYPE_CYCLIC (1 << 4)
1319 #define MCX_WQ_CTX_TYPE_SIGNATURE (1 << 3)
1320 uint8_t wq_reserved0[5];
1321 uint16_t wq_lwm;
1322 uint32_t wq_pd;
1323 uint32_t wq_uar_page;
1324 uint64_t wq_doorbell;
1325 uint32_t wq_hw_counter;
1326 uint32_t wq_sw_counter;
1327 uint16_t wq_log_stride;
1328 uint8_t wq_log_page_sz;
1329 uint8_t wq_log_size;
1330 uint8_t wq_reserved1[156];
1331 } __packed __aligned(4);
1332
1333 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1334
1335 struct mcx_sq_ctx {
1336 uint32_t sq_flags;
1337 #define MCX_SQ_CTX_RLKEY (1U << 31)
1338 #define MCX_SQ_CTX_FRE_SHIFT (1 << 29)
1339 #define MCX_SQ_CTX_FLUSH_IN_ERROR (1 << 28)
1340 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT 24
1341 #define MCX_SQ_CTX_STATE_SHIFT 20
1342 uint32_t sq_user_index;
1343 uint32_t sq_cqn;
1344 uint32_t sq_reserved1[5];
1345 uint32_t sq_tis_lst_sz;
1346 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT 16
1347 uint32_t sq_reserved2[2];
1348 uint32_t sq_tis_num;
1349 struct mcx_wq_ctx sq_wq;
1350 } __packed __aligned(4);
1351
1352 struct mcx_sq_entry_seg {
1353 uint32_t sqs_byte_count;
1354 uint32_t sqs_lkey;
1355 uint64_t sqs_addr;
1356 } __packed __aligned(4);
1357
1358 struct mcx_sq_entry {
1359 /* control segment */
1360 uint32_t sqe_opcode_index;
1361 #define MCX_SQE_WQE_INDEX_SHIFT 8
1362 #define MCX_SQE_WQE_OPCODE_NOP 0x00
1363 #define MCX_SQE_WQE_OPCODE_SEND 0x0a
1364 uint32_t sqe_ds_sq_num;
1365 #define MCX_SQE_SQ_NUM_SHIFT 8
1366 uint32_t sqe_signature;
1367 #define MCX_SQE_SIGNATURE_SHIFT 24
1368 #define MCX_SQE_SOLICITED_EVENT 0x02
1369 #define MCX_SQE_CE_CQE_ON_ERR 0x00
1370 #define MCX_SQE_CE_CQE_FIRST_ERR 0x04
1371 #define MCX_SQE_CE_CQE_ALWAYS 0x08
1372 #define MCX_SQE_CE_CQE_SOLICIT 0x0C
1373 #define MCX_SQE_FM_NO_FENCE 0x00
1374 #define MCX_SQE_FM_SMALL_FENCE 0x40
1375 uint32_t sqe_mkey;
1376
1377 /* ethernet segment */
1378 uint32_t sqe_reserved1;
1379 uint32_t sqe_mss_csum;
1380 #define MCX_SQE_L4_CSUM (1 << 31)
1381 #define MCX_SQE_L3_CSUM (1 << 30)
1382 uint32_t sqe_reserved2;
1383 uint16_t sqe_inline_header_size;
1384 uint16_t sqe_inline_headers[9];
1385
1386 /* data segment */
1387 struct mcx_sq_entry_seg sqe_segs[1];
1388 } __packed __aligned(64);
1389
1390 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1391
1392 struct mcx_cmd_create_sq_in {
1393 uint16_t cmd_opcode;
1394 uint8_t cmd_reserved0[4];
1395 uint16_t cmd_op_mod;
1396 uint8_t cmd_reserved1[8];
1397 } __packed __aligned(4);
1398
1399 struct mcx_cmd_create_sq_out {
1400 uint8_t cmd_status;
1401 uint8_t cmd_reserved0[3];
1402 uint32_t cmd_syndrome;
1403 uint32_t cmd_sqn;
1404 uint8_t cmd_reserved1[4];
1405 } __packed __aligned(4);
1406
1407 struct mcx_cmd_modify_sq_in {
1408 uint16_t cmd_opcode;
1409 uint8_t cmd_reserved0[4];
1410 uint16_t cmd_op_mod;
1411 uint32_t cmd_sq_state;
1412 uint8_t cmd_reserved1[4];
1413 } __packed __aligned(4);
1414
1415 struct mcx_cmd_modify_sq_mb_in {
1416 uint32_t cmd_modify_hi;
1417 uint32_t cmd_modify_lo;
1418 uint8_t cmd_reserved0[8];
1419 struct mcx_sq_ctx cmd_sq_ctx;
1420 } __packed __aligned(4);
1421
1422 struct mcx_cmd_modify_sq_out {
1423 uint8_t cmd_status;
1424 uint8_t cmd_reserved0[3];
1425 uint32_t cmd_syndrome;
1426 uint8_t cmd_reserved1[8];
1427 } __packed __aligned(4);
1428
1429 struct mcx_cmd_destroy_sq_in {
1430 uint16_t cmd_opcode;
1431 uint8_t cmd_reserved0[4];
1432 uint16_t cmd_op_mod;
1433 uint32_t cmd_sqn;
1434 uint8_t cmd_reserved1[4];
1435 } __packed __aligned(4);
1436
1437 struct mcx_cmd_destroy_sq_out {
1438 uint8_t cmd_status;
1439 uint8_t cmd_reserved0[3];
1440 uint32_t cmd_syndrome;
1441 uint8_t cmd_reserved1[8];
1442 } __packed __aligned(4);
1443
1444
1445 struct mcx_rq_ctx {
1446 uint32_t rq_flags;
1447 #define MCX_RQ_CTX_RLKEY (1U << 31)
1448 #define MCX_RQ_CTX_VLAN_STRIP_DIS (1 << 28)
1449 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT 24
1450 #define MCX_RQ_CTX_STATE_SHIFT 20
1451 #define MCX_RQ_CTX_FLUSH_IN_ERROR (1 << 18)
1452 uint32_t rq_user_index;
1453 uint32_t rq_cqn;
1454 uint32_t rq_reserved1;
1455 uint32_t rq_rmpn;
1456 uint32_t rq_reserved2[7];
1457 struct mcx_wq_ctx rq_wq;
1458 } __packed __aligned(4);
1459
1460 struct mcx_rq_entry {
1461 uint32_t rqe_byte_count;
1462 uint32_t rqe_lkey;
1463 uint64_t rqe_addr;
1464 } __packed __aligned(16);
1465
1466 struct mcx_cmd_create_rq_in {
1467 uint16_t cmd_opcode;
1468 uint8_t cmd_reserved0[4];
1469 uint16_t cmd_op_mod;
1470 uint8_t cmd_reserved1[8];
1471 } __packed __aligned(4);
1472
1473 struct mcx_cmd_create_rq_out {
1474 uint8_t cmd_status;
1475 uint8_t cmd_reserved0[3];
1476 uint32_t cmd_syndrome;
1477 uint32_t cmd_rqn;
1478 uint8_t cmd_reserved1[4];
1479 } __packed __aligned(4);
1480
1481 struct mcx_cmd_modify_rq_in {
1482 uint16_t cmd_opcode;
1483 uint8_t cmd_reserved0[4];
1484 uint16_t cmd_op_mod;
1485 uint32_t cmd_rq_state;
1486 uint8_t cmd_reserved1[4];
1487 } __packed __aligned(4);
1488
1489 struct mcx_cmd_modify_rq_mb_in {
1490 uint32_t cmd_modify_hi;
1491 uint32_t cmd_modify_lo;
1492 uint8_t cmd_reserved0[8];
1493 struct mcx_rq_ctx cmd_rq_ctx;
1494 } __packed __aligned(4);
1495
1496 struct mcx_cmd_modify_rq_out {
1497 uint8_t cmd_status;
1498 uint8_t cmd_reserved0[3];
1499 uint32_t cmd_syndrome;
1500 uint8_t cmd_reserved1[8];
1501 } __packed __aligned(4);
1502
1503 struct mcx_cmd_destroy_rq_in {
1504 uint16_t cmd_opcode;
1505 uint8_t cmd_reserved0[4];
1506 uint16_t cmd_op_mod;
1507 uint32_t cmd_rqn;
1508 uint8_t cmd_reserved1[4];
1509 } __packed __aligned(4);
1510
1511 struct mcx_cmd_destroy_rq_out {
1512 uint8_t cmd_status;
1513 uint8_t cmd_reserved0[3];
1514 uint32_t cmd_syndrome;
1515 uint8_t cmd_reserved1[8];
1516 } __packed __aligned(4);
1517
1518 struct mcx_cmd_create_flow_table_in {
1519 uint16_t cmd_opcode;
1520 uint8_t cmd_reserved0[4];
1521 uint16_t cmd_op_mod;
1522 uint8_t cmd_reserved1[8];
1523 } __packed __aligned(4);
1524
1525 struct mcx_flow_table_ctx {
1526 uint8_t ft_miss_action;
1527 uint8_t ft_level;
1528 uint8_t ft_reserved0;
1529 uint8_t ft_log_size;
1530 uint32_t ft_table_miss_id;
1531 uint8_t ft_reserved1[28];
1532 } __packed __aligned(4);
1533
1534 struct mcx_cmd_create_flow_table_mb_in {
1535 uint8_t cmd_table_type;
1536 uint8_t cmd_reserved0[7];
1537 struct mcx_flow_table_ctx cmd_ctx;
1538 } __packed __aligned(4);
1539
1540 struct mcx_cmd_create_flow_table_out {
1541 uint8_t cmd_status;
1542 uint8_t cmd_reserved0[3];
1543 uint32_t cmd_syndrome;
1544 uint32_t cmd_table_id;
1545 uint8_t cmd_reserved1[4];
1546 } __packed __aligned(4);
1547
1548 struct mcx_cmd_destroy_flow_table_in {
1549 uint16_t cmd_opcode;
1550 uint8_t cmd_reserved0[4];
1551 uint16_t cmd_op_mod;
1552 uint8_t cmd_reserved1[8];
1553 } __packed __aligned(4);
1554
1555 struct mcx_cmd_destroy_flow_table_mb_in {
1556 uint8_t cmd_table_type;
1557 uint8_t cmd_reserved0[3];
1558 uint32_t cmd_table_id;
1559 uint8_t cmd_reserved1[40];
1560 } __packed __aligned(4);
1561
1562 struct mcx_cmd_destroy_flow_table_out {
1563 uint8_t cmd_status;
1564 uint8_t cmd_reserved0[3];
1565 uint32_t cmd_syndrome;
1566 uint8_t cmd_reserved1[8];
1567 } __packed __aligned(4);
1568
1569 struct mcx_cmd_set_flow_table_root_in {
1570 uint16_t cmd_opcode;
1571 uint8_t cmd_reserved0[4];
1572 uint16_t cmd_op_mod;
1573 uint8_t cmd_reserved1[8];
1574 } __packed __aligned(4);
1575
1576 struct mcx_cmd_set_flow_table_root_mb_in {
1577 uint8_t cmd_table_type;
1578 uint8_t cmd_reserved0[3];
1579 uint32_t cmd_table_id;
1580 uint8_t cmd_reserved1[56];
1581 } __packed __aligned(4);
1582
1583 struct mcx_cmd_set_flow_table_root_out {
1584 uint8_t cmd_status;
1585 uint8_t cmd_reserved0[3];
1586 uint32_t cmd_syndrome;
1587 uint8_t cmd_reserved1[8];
1588 } __packed __aligned(4);
1589
1590 struct mcx_flow_match {
1591 /* outer headers */
1592 uint8_t mc_src_mac[6];
1593 uint16_t mc_ethertype;
1594 uint8_t mc_dest_mac[6];
1595 uint16_t mc_first_vlan;
1596 uint8_t mc_ip_proto;
1597 uint8_t mc_ip_dscp_ecn;
1598 uint8_t mc_vlan_flags;
1599 uint8_t mc_tcp_flags;
1600 uint16_t mc_tcp_sport;
1601 uint16_t mc_tcp_dport;
1602 uint32_t mc_reserved0;
1603 uint16_t mc_udp_sport;
1604 uint16_t mc_udp_dport;
1605 uint8_t mc_src_ip[16];
1606 uint8_t mc_dest_ip[16];
1607
1608 /* misc parameters */
1609 uint8_t mc_reserved1[8];
1610 uint16_t mc_second_vlan;
1611 uint8_t mc_reserved2[2];
1612 uint8_t mc_second_vlan_flags;
1613 uint8_t mc_reserved3[15];
1614 uint32_t mc_outer_ipv6_flow_label;
1615 uint8_t mc_reserved4[32];
1616
1617 uint8_t mc_reserved[384];
1618 } __packed __aligned(4);
1619
1620 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1621
1622 struct mcx_cmd_create_flow_group_in {
1623 uint16_t cmd_opcode;
1624 uint8_t cmd_reserved0[4];
1625 uint16_t cmd_op_mod;
1626 uint8_t cmd_reserved1[8];
1627 } __packed __aligned(4);
1628
1629 struct mcx_cmd_create_flow_group_mb_in {
1630 uint8_t cmd_table_type;
1631 uint8_t cmd_reserved0[3];
1632 uint32_t cmd_table_id;
1633 uint8_t cmd_reserved1[4];
1634 uint32_t cmd_start_flow_index;
1635 uint8_t cmd_reserved2[4];
1636 uint32_t cmd_end_flow_index;
1637 uint8_t cmd_reserved3[23];
1638 uint8_t cmd_match_criteria_enable;
1639 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER (1 << 0)
1640 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC (1 << 1)
1641 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER (1 << 2)
1642 struct mcx_flow_match cmd_match_criteria;
1643 uint8_t cmd_reserved4[448];
1644 } __packed __aligned(4);
1645
1646 struct mcx_cmd_create_flow_group_out {
1647 uint8_t cmd_status;
1648 uint8_t cmd_reserved0[3];
1649 uint32_t cmd_syndrome;
1650 uint32_t cmd_group_id;
1651 uint8_t cmd_reserved1[4];
1652 } __packed __aligned(4);
1653
1654 struct mcx_flow_ctx {
1655 uint8_t fc_reserved0[4];
1656 uint32_t fc_group_id;
1657 uint32_t fc_flow_tag;
1658 uint32_t fc_action;
1659 #define MCX_FLOW_CONTEXT_ACTION_ALLOW (1 << 0)
1660 #define MCX_FLOW_CONTEXT_ACTION_DROP (1 << 1)
1661 #define MCX_FLOW_CONTEXT_ACTION_FORWARD (1 << 2)
1662 #define MCX_FLOW_CONTEXT_ACTION_COUNT (1 << 3)
1663 uint32_t fc_dest_list_size;
1664 uint32_t fc_counter_list_size;
1665 uint8_t fc_reserved1[40];
1666 struct mcx_flow_match fc_match_value;
1667 uint8_t fc_reserved2[192];
1668 } __packed __aligned(4);
1669
1670 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE (1 << 24)
1671 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR (2 << 24)
1672
1673 struct mcx_cmd_destroy_flow_group_in {
1674 uint16_t cmd_opcode;
1675 uint8_t cmd_reserved0[4];
1676 uint16_t cmd_op_mod;
1677 uint8_t cmd_reserved1[8];
1678 } __packed __aligned(4);
1679
1680 struct mcx_cmd_destroy_flow_group_mb_in {
1681 uint8_t cmd_table_type;
1682 uint8_t cmd_reserved0[3];
1683 uint32_t cmd_table_id;
1684 uint32_t cmd_group_id;
1685 uint8_t cmd_reserved1[36];
1686 } __packed __aligned(4);
1687
1688 struct mcx_cmd_destroy_flow_group_out {
1689 uint8_t cmd_status;
1690 uint8_t cmd_reserved0[3];
1691 uint32_t cmd_syndrome;
1692 uint8_t cmd_reserved1[8];
1693 } __packed __aligned(4);
1694
1695 struct mcx_cmd_set_flow_table_entry_in {
1696 uint16_t cmd_opcode;
1697 uint8_t cmd_reserved0[4];
1698 uint16_t cmd_op_mod;
1699 uint8_t cmd_reserved1[8];
1700 } __packed __aligned(4);
1701
1702 struct mcx_cmd_set_flow_table_entry_mb_in {
1703 uint8_t cmd_table_type;
1704 uint8_t cmd_reserved0[3];
1705 uint32_t cmd_table_id;
1706 uint32_t cmd_modify_enable_mask;
1707 uint8_t cmd_reserved1[4];
1708 uint32_t cmd_flow_index;
1709 uint8_t cmd_reserved2[28];
1710 struct mcx_flow_ctx cmd_flow_ctx;
1711 } __packed __aligned(4);
1712
1713 struct mcx_cmd_set_flow_table_entry_out {
1714 uint8_t cmd_status;
1715 uint8_t cmd_reserved0[3];
1716 uint32_t cmd_syndrome;
1717 uint8_t cmd_reserved1[8];
1718 } __packed __aligned(4);
1719
1720 struct mcx_cmd_query_flow_table_entry_in {
1721 uint16_t cmd_opcode;
1722 uint8_t cmd_reserved0[4];
1723 uint16_t cmd_op_mod;
1724 uint8_t cmd_reserved1[8];
1725 } __packed __aligned(4);
1726
1727 struct mcx_cmd_query_flow_table_entry_mb_in {
1728 uint8_t cmd_table_type;
1729 uint8_t cmd_reserved0[3];
1730 uint32_t cmd_table_id;
1731 uint8_t cmd_reserved1[8];
1732 uint32_t cmd_flow_index;
1733 uint8_t cmd_reserved2[28];
1734 } __packed __aligned(4);
1735
1736 struct mcx_cmd_query_flow_table_entry_out {
1737 uint8_t cmd_status;
1738 uint8_t cmd_reserved0[3];
1739 uint32_t cmd_syndrome;
1740 uint8_t cmd_reserved1[8];
1741 } __packed __aligned(4);
1742
1743 struct mcx_cmd_query_flow_table_entry_mb_out {
1744 uint8_t cmd_reserved0[48];
1745 struct mcx_flow_ctx cmd_flow_ctx;
1746 } __packed __aligned(4);
1747
1748 struct mcx_cmd_delete_flow_table_entry_in {
1749 uint16_t cmd_opcode;
1750 uint8_t cmd_reserved0[4];
1751 uint16_t cmd_op_mod;
1752 uint8_t cmd_reserved1[8];
1753 } __packed __aligned(4);
1754
1755 struct mcx_cmd_delete_flow_table_entry_mb_in {
1756 uint8_t cmd_table_type;
1757 uint8_t cmd_reserved0[3];
1758 uint32_t cmd_table_id;
1759 uint8_t cmd_reserved1[8];
1760 uint32_t cmd_flow_index;
1761 uint8_t cmd_reserved2[28];
1762 } __packed __aligned(4);
1763
1764 struct mcx_cmd_delete_flow_table_entry_out {
1765 uint8_t cmd_status;
1766 uint8_t cmd_reserved0[3];
1767 uint32_t cmd_syndrome;
1768 uint8_t cmd_reserved1[8];
1769 } __packed __aligned(4);
1770
1771 struct mcx_cmd_query_flow_group_in {
1772 uint16_t cmd_opcode;
1773 uint8_t cmd_reserved0[4];
1774 uint16_t cmd_op_mod;
1775 uint8_t cmd_reserved1[8];
1776 } __packed __aligned(4);
1777
1778 struct mcx_cmd_query_flow_group_mb_in {
1779 uint8_t cmd_table_type;
1780 uint8_t cmd_reserved0[3];
1781 uint32_t cmd_table_id;
1782 uint32_t cmd_group_id;
1783 uint8_t cmd_reserved1[36];
1784 } __packed __aligned(4);
1785
1786 struct mcx_cmd_query_flow_group_out {
1787 uint8_t cmd_status;
1788 uint8_t cmd_reserved0[3];
1789 uint32_t cmd_syndrome;
1790 uint8_t cmd_reserved1[8];
1791 } __packed __aligned(4);
1792
1793 struct mcx_cmd_query_flow_group_mb_out {
1794 uint8_t cmd_reserved0[12];
1795 uint32_t cmd_start_flow_index;
1796 uint8_t cmd_reserved1[4];
1797 uint32_t cmd_end_flow_index;
1798 uint8_t cmd_reserved2[20];
1799 uint32_t cmd_match_criteria_enable;
1800 uint8_t cmd_match_criteria[512];
1801 uint8_t cmd_reserved4[448];
1802 } __packed __aligned(4);
1803
1804 struct mcx_cmd_query_flow_table_in {
1805 uint16_t cmd_opcode;
1806 uint8_t cmd_reserved0[4];
1807 uint16_t cmd_op_mod;
1808 uint8_t cmd_reserved1[8];
1809 } __packed __aligned(4);
1810
1811 struct mcx_cmd_query_flow_table_mb_in {
1812 uint8_t cmd_table_type;
1813 uint8_t cmd_reserved0[3];
1814 uint32_t cmd_table_id;
1815 uint8_t cmd_reserved1[40];
1816 } __packed __aligned(4);
1817
1818 struct mcx_cmd_query_flow_table_out {
1819 uint8_t cmd_status;
1820 uint8_t cmd_reserved0[3];
1821 uint32_t cmd_syndrome;
1822 uint8_t cmd_reserved1[8];
1823 } __packed __aligned(4);
1824
1825 struct mcx_cmd_query_flow_table_mb_out {
1826 uint8_t cmd_reserved0[4];
1827 struct mcx_flow_table_ctx cmd_ctx;
1828 } __packed __aligned(4);
1829
1830 struct mcx_cmd_alloc_flow_counter_in {
1831 uint16_t cmd_opcode;
1832 uint8_t cmd_reserved0[4];
1833 uint16_t cmd_op_mod;
1834 uint8_t cmd_reserved1[8];
1835 } __packed __aligned(4);
1836
1837 struct mcx_cmd_query_rq_in {
1838 uint16_t cmd_opcode;
1839 uint8_t cmd_reserved0[4];
1840 uint16_t cmd_op_mod;
1841 uint32_t cmd_rqn;
1842 uint8_t cmd_reserved1[4];
1843 } __packed __aligned(4);
1844
1845 struct mcx_cmd_query_rq_out {
1846 uint8_t cmd_status;
1847 uint8_t cmd_reserved0[3];
1848 uint32_t cmd_syndrome;
1849 uint8_t cmd_reserved1[8];
1850 } __packed __aligned(4);
1851
1852 struct mcx_cmd_query_rq_mb_out {
1853 uint8_t cmd_reserved0[16];
1854 struct mcx_rq_ctx cmd_ctx;
1855 };
1856
1857 struct mcx_cmd_query_sq_in {
1858 uint16_t cmd_opcode;
1859 uint8_t cmd_reserved0[4];
1860 uint16_t cmd_op_mod;
1861 uint32_t cmd_sqn;
1862 uint8_t cmd_reserved1[4];
1863 } __packed __aligned(4);
1864
1865 struct mcx_cmd_query_sq_out {
1866 uint8_t cmd_status;
1867 uint8_t cmd_reserved0[3];
1868 uint32_t cmd_syndrome;
1869 uint8_t cmd_reserved1[8];
1870 } __packed __aligned(4);
1871
1872 struct mcx_cmd_query_sq_mb_out {
1873 uint8_t cmd_reserved0[16];
1874 struct mcx_sq_ctx cmd_ctx;
1875 };
1876
1877 struct mcx_cmd_alloc_flow_counter_out {
1878 uint8_t cmd_status;
1879 uint8_t cmd_reserved0[3];
1880 uint32_t cmd_syndrome;
1881 uint8_t cmd_reserved1[2];
1882 uint16_t cmd_flow_counter_id;
1883 uint8_t cmd_reserved2[4];
1884 } __packed __aligned(4);
1885
1886 struct mcx_wq_doorbell {
1887 uint32_t db_recv_counter;
1888 uint32_t db_send_counter;
1889 } __packed __aligned(8);
1890
1891 struct mcx_dmamem {
1892 bus_dmamap_t mxm_map;
1893 bus_dma_segment_t mxm_seg;
1894 int mxm_nsegs;
1895 size_t mxm_size;
1896 void *mxm_kva;
1897 };
1898 #define MCX_DMA_MAP(_mxm) ((_mxm)->mxm_map)
1899 #define MCX_DMA_DVA(_mxm) ((_mxm)->mxm_map->dm_segs[0].ds_addr)
1900 #define MCX_DMA_KVA(_mxm) ((void *)(_mxm)->mxm_kva)
1901 #define MCX_DMA_LEN(_mxm) ((_mxm)->mxm_size)
1902
1903 struct mcx_hwmem {
1904 bus_dmamap_t mhm_map;
1905 bus_dma_segment_t *mhm_segs;
1906 unsigned int mhm_seg_count;
1907 unsigned int mhm_npages;
1908 };
1909
1910 struct mcx_slot {
1911 bus_dmamap_t ms_map;
1912 struct mbuf *ms_m;
1913 };
1914
1915 struct mcx_cq {
1916 int cq_n;
1917 struct mcx_dmamem cq_mem;
1918 uint32_t *cq_doorbell;
1919 uint32_t cq_cons;
1920 uint32_t cq_count;
1921 };
1922
1923 struct mcx_calibration {
1924 uint64_t c_timestamp; /* previous mcx chip time */
1925 uint64_t c_uptime; /* previous kernel nanouptime */
1926 uint64_t c_tbase; /* mcx chip time */
1927 uint64_t c_ubase; /* kernel nanouptime */
1928 uint64_t c_tdiff;
1929 uint64_t c_udiff;
1930 };
1931
1932 #define MCX_CALIBRATE_FIRST 2
1933 #define MCX_CALIBRATE_NORMAL 30
1934
1935 struct mcx_rxring {
1936 u_int rxr_total;
1937 u_int rxr_inuse;
1938 };
1939
1940 MBUFQ_HEAD(mcx_mbufq);
1941
1942 struct mcx_softc {
1943 device_t sc_dev;
1944 struct ethercom sc_ec;
1945 struct ifmedia sc_media;
1946 uint64_t sc_media_status;
1947 uint64_t sc_media_active;
1948 kmutex_t sc_media_mutex;
1949
1950 pci_chipset_tag_t sc_pc;
1951 pci_intr_handle_t *sc_intrs;
1952 void *sc_ihs[MCX_MAX_NINTR];
1953 pcitag_t sc_tag;
1954
1955 bus_dma_tag_t sc_dmat;
1956 bus_space_tag_t sc_memt;
1957 bus_space_handle_t sc_memh;
1958 bus_size_t sc_mems;
1959
1960 struct mcx_dmamem sc_cmdq_mem;
1961 unsigned int sc_cmdq_mask;
1962 unsigned int sc_cmdq_size;
1963
1964 unsigned int sc_cmdq_token;
1965
1966 struct mcx_hwmem sc_boot_pages;
1967 struct mcx_hwmem sc_init_pages;
1968 struct mcx_hwmem sc_regular_pages;
1969
1970 int sc_uar;
1971 int sc_pd;
1972 int sc_tdomain;
1973 uint32_t sc_lkey;
1974
1975 struct mcx_dmamem sc_doorbell_mem;
1976
1977 int sc_eqn;
1978 uint32_t sc_eq_cons;
1979 struct mcx_dmamem sc_eq_mem;
1980 int sc_hardmtu;
1981
1982 struct workqueue *sc_workq;
1983 struct work sc_port_change;
1984
1985 int sc_flow_table_id;
1986 #define MCX_FLOW_GROUP_PROMISC 0
1987 #define MCX_FLOW_GROUP_ALLMULTI 1
1988 #define MCX_FLOW_GROUP_MAC 2
1989 #define MCX_NUM_FLOW_GROUPS 3
1990 int sc_flow_group_id[MCX_NUM_FLOW_GROUPS];
1991 int sc_flow_group_size[MCX_NUM_FLOW_GROUPS];
1992 int sc_flow_group_start[MCX_NUM_FLOW_GROUPS];
1993 int sc_promisc_flow_enabled;
1994 int sc_allmulti_flow_enabled;
1995 int sc_mcast_flow_base;
1996 int sc_extra_mcast;
1997 uint8_t sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
1998
1999 struct mcx_calibration sc_calibration[2];
2000 unsigned int sc_calibration_gen;
2001 callout_t sc_calibrate;
2002
2003 struct mcx_cq sc_cq[MCX_MAX_CQS];
2004 int sc_num_cq;
2005
2006 /* rx */
2007 int sc_tirn;
2008 int sc_rqn;
2009 struct mcx_dmamem sc_rq_mem;
2010 struct mcx_slot *sc_rx_slots;
2011 uint32_t *sc_rx_doorbell;
2012
2013 uint32_t sc_rx_prod;
2014 callout_t sc_rx_refill;
2015 struct mcx_rxring sc_rxr;
2016
2017 /* tx */
2018 int sc_tisn;
2019 int sc_sqn;
2020 struct mcx_dmamem sc_sq_mem;
2021 struct mcx_slot *sc_tx_slots;
2022 uint32_t *sc_tx_doorbell;
2023 int sc_bf_size;
2024 int sc_bf_offset;
2025
2026 uint32_t sc_tx_cons;
2027 uint32_t sc_tx_prod;
2028
2029 uint64_t sc_last_cq_db;
2030 uint64_t sc_last_srq_db;
2031 };
2032 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
2033
2034 static int mcx_match(device_t, cfdata_t, void *);
2035 static void mcx_attach(device_t, device_t, void *);
2036
2037 static void mcx_rxr_init(struct mcx_rxring *, u_int, u_int);
2038 static u_int mcx_rxr_get(struct mcx_rxring *, u_int);
2039 static void mcx_rxr_put(struct mcx_rxring *, u_int);
2040 static u_int mcx_rxr_inuse(struct mcx_rxring *);
2041
2042 static int mcx_version(struct mcx_softc *);
2043 static int mcx_init_wait(struct mcx_softc *);
2044 static int mcx_enable_hca(struct mcx_softc *);
2045 static int mcx_teardown_hca(struct mcx_softc *, uint16_t);
2046 static int mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2047 int);
2048 static int mcx_issi(struct mcx_softc *);
2049 static int mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2050 static int mcx_hca_max_caps(struct mcx_softc *);
2051 static int mcx_hca_set_caps(struct mcx_softc *);
2052 static int mcx_init_hca(struct mcx_softc *);
2053 static int mcx_set_driver_version(struct mcx_softc *);
2054 static int mcx_iff(struct mcx_softc *);
2055 static int mcx_alloc_uar(struct mcx_softc *);
2056 static int mcx_alloc_pd(struct mcx_softc *);
2057 static int mcx_alloc_tdomain(struct mcx_softc *);
2058 static int mcx_create_eq(struct mcx_softc *);
2059 static int mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *);
2060 static int mcx_query_special_contexts(struct mcx_softc *);
2061 static int mcx_set_port_mtu(struct mcx_softc *, int);
2062 static int mcx_create_cq(struct mcx_softc *, int);
2063 static int mcx_destroy_cq(struct mcx_softc *, int);
2064 static int mcx_create_sq(struct mcx_softc *, int);
2065 static int mcx_destroy_sq(struct mcx_softc *);
2066 static int mcx_ready_sq(struct mcx_softc *);
2067 static int mcx_create_rq(struct mcx_softc *, int);
2068 static int mcx_destroy_rq(struct mcx_softc *);
2069 static int mcx_ready_rq(struct mcx_softc *);
2070 static int mcx_create_tir(struct mcx_softc *);
2071 static int mcx_destroy_tir(struct mcx_softc *);
2072 static int mcx_create_tis(struct mcx_softc *);
2073 static int mcx_destroy_tis(struct mcx_softc *);
2074 static int mcx_create_flow_table(struct mcx_softc *, int);
2075 static int mcx_set_flow_table_root(struct mcx_softc *);
2076 static int mcx_destroy_flow_table(struct mcx_softc *);
2077 static int mcx_create_flow_group(struct mcx_softc *, int, int,
2078 int, int, struct mcx_flow_match *);
2079 static int mcx_destroy_flow_group(struct mcx_softc *, int);
2080 static int mcx_set_flow_table_entry(struct mcx_softc *, int, int,
2081 const uint8_t *);
2082 static int mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2083
2084 #if 0
2085 static int mcx_dump_flow_table(struct mcx_softc *);
2086 static int mcx_dump_flow_table_entry(struct mcx_softc *, int);
2087 static int mcx_dump_flow_group(struct mcx_softc *);
2088 static int mcx_dump_rq(struct mcx_softc *);
2089 static int mcx_dump_sq(struct mcx_softc *);
2090 #endif
2091
2092
2093 /*
2094 static void mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2095 static void mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2096 */
2097 static void mcx_refill(void *);
2098 static int mcx_process_rx(struct mcx_softc *, struct mcx_cq_entry *,
2099 struct mcx_mbufq *, const struct mcx_calibration *);
2100 static void mcx_process_txeof(struct mcx_softc *, struct mcx_cq_entry *,
2101 int *);
2102 static void mcx_process_cq(struct mcx_softc *, struct mcx_cq *);
2103
2104 static void mcx_arm_cq(struct mcx_softc *, struct mcx_cq *);
2105 static void mcx_arm_eq(struct mcx_softc *);
2106 static int mcx_intr(void *);
2107
2108 static int mcx_init(struct ifnet *);
2109 static void mcx_stop(struct ifnet *, int);
2110 static int mcx_ioctl(struct ifnet *, u_long, void *);
2111 static void mcx_start(struct ifnet *);
2112 static void mcx_watchdog(struct ifnet *);
2113 static void mcx_media_add_types(struct mcx_softc *);
2114 static void mcx_media_status(struct ifnet *, struct ifmediareq *);
2115 static int mcx_media_change(struct ifnet *);
2116 #if 0
2117 static int mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2118 #endif
2119 static void mcx_port_change(struct work *, void *);
2120
2121 static void mcx_calibrate_first(struct mcx_softc *);
2122 static void mcx_calibrate(void *);
2123
2124 static inline uint32_t
2125 mcx_rd(struct mcx_softc *, bus_size_t);
2126 static inline void
2127 mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2128 static inline void
2129 mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2130
2131 static uint64_t mcx_timer(struct mcx_softc *);
2132
2133 static int mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2134 bus_size_t, u_int align);
2135 static void mcx_dmamem_zero(struct mcx_dmamem *);
2136 static void mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2137
2138 static int mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2139 unsigned int);
2140 static void mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2141
2142 CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL);
2143
2144 static const struct {
2145 pci_vendor_id_t vendor;
2146 pci_product_id_t product;
2147 } mcx_devices[] = {
2148 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700 },
2149 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710 },
2150 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800 },
2151 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800 },
2152 };
2153
2154 struct mcx_eth_proto_capability {
2155 uint64_t cap_media;
2156 uint64_t cap_baudrate;
2157 };
2158
2159 static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = {
2160 [MCX_ETHER_CAP_SGMII] = { IFM_1000_SGMII, IF_Gbps(1) },
2161 [MCX_ETHER_CAP_1000_KX] = { IFM_1000_KX, IF_Gbps(1) },
2162 [MCX_ETHER_CAP_10G_CX4] = { IFM_10G_CX4, IF_Gbps(10) },
2163 [MCX_ETHER_CAP_10G_KX4] = { IFM_10G_KX4, IF_Gbps(10) },
2164 [MCX_ETHER_CAP_10G_KR] = { IFM_10G_KR, IF_Gbps(10) },
2165 [MCX_ETHER_CAP_20G_KR2] = { IFM_20G_KR2, IF_Gbps(20) },
2166 [MCX_ETHER_CAP_40G_CR4] = { IFM_40G_CR4, IF_Gbps(40) },
2167 [MCX_ETHER_CAP_40G_KR4] = { IFM_40G_KR4, IF_Gbps(40) },
2168 [MCX_ETHER_CAP_56G_R4] = { IFM_56G_R4, IF_Gbps(56) },
2169 [MCX_ETHER_CAP_10G_CR] = { IFM_10G_CR1, IF_Gbps(10) },
2170 [MCX_ETHER_CAP_10G_SR] = { IFM_10G_SR, IF_Gbps(10) },
2171 [MCX_ETHER_CAP_10G_LR] = { IFM_10G_LR, IF_Gbps(10) },
2172 [MCX_ETHER_CAP_40G_SR4] = { IFM_40G_SR4, IF_Gbps(40) },
2173 [MCX_ETHER_CAP_40G_LR4] = { IFM_40G_LR4, IF_Gbps(40) },
2174 [MCX_ETHER_CAP_50G_SR2] = { IFM_50G_SR2, IF_Gbps(50) },
2175 [MCX_ETHER_CAP_100G_CR4] = { IFM_100G_CR4, IF_Gbps(100) },
2176 [MCX_ETHER_CAP_100G_SR4] = { IFM_100G_SR4, IF_Gbps(100) },
2177 [MCX_ETHER_CAP_100G_KR4] = { IFM_100G_KR4, IF_Gbps(100) },
2178 [MCX_ETHER_CAP_100G_LR4] = { IFM_100G_LR4, IF_Gbps(100) },
2179 [MCX_ETHER_CAP_100_TX] = { IFM_100_TX, IF_Mbps(100) },
2180 [MCX_ETHER_CAP_1000_T] = { IFM_1000_T, IF_Gbps(1) },
2181 [MCX_ETHER_CAP_10G_T] = { IFM_10G_T, IF_Gbps(10) },
2182 [MCX_ETHER_CAP_25G_CR] = { IFM_25G_CR, IF_Gbps(25) },
2183 [MCX_ETHER_CAP_25G_KR] = { IFM_25G_KR, IF_Gbps(25) },
2184 [MCX_ETHER_CAP_25G_SR] = { IFM_25G_SR, IF_Gbps(25) },
2185 [MCX_ETHER_CAP_50G_CR2] = { IFM_50G_CR2, IF_Gbps(50) },
2186 [MCX_ETHER_CAP_50G_KR2] = { IFM_50G_KR2, IF_Gbps(50) },
2187 };
2188
2189 static int
2190 mcx_get_id(uint32_t val)
2191 {
2192 return be32toh(val) & 0x00ffffff;
2193 }
2194
2195 static int
2196 mcx_match(device_t parent, cfdata_t cf, void *aux)
2197 {
2198 struct pci_attach_args *pa = aux;
2199 int n;
2200
2201 for (n = 0; n < __arraycount(mcx_devices); n++) {
2202 if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor &&
2203 PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product)
2204 return 1;
2205 }
2206
2207 return 0;
2208 }
2209
2210 void
2211 mcx_attach(device_t parent, device_t self, void *aux)
2212 {
2213 struct mcx_softc *sc = device_private(self);
2214 struct ifnet *ifp = &sc->sc_ec.ec_if;
2215 struct pci_attach_args *pa = aux;
2216 uint8_t enaddr[ETHER_ADDR_LEN];
2217 int counts[PCI_INTR_TYPE_SIZE];
2218 char intrbuf[PCI_INTRSTR_LEN];
2219 pcireg_t memtype;
2220 uint32_t r;
2221 unsigned int cq_stride;
2222 unsigned int cq_size;
2223 const char *intrstr;
2224 int i;
2225
2226 sc->sc_dev = self;
2227 sc->sc_pc = pa->pa_pc;
2228 sc->sc_tag = pa->pa_tag;
2229 if (pci_dma64_available(pa))
2230 sc->sc_dmat = pa->pa_dmat64;
2231 else
2232 sc->sc_dmat = pa->pa_dmat;
2233
2234 /* Map the PCI memory space */
2235 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2236 if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2237 0 /*BUS_SPACE_MAP_PREFETCHABLE*/, &sc->sc_memt, &sc->sc_memh,
2238 NULL, &sc->sc_mems)) {
2239 aprint_error(": unable to map register memory\n");
2240 return;
2241 }
2242
2243 pci_aprint_devinfo(pa, "Ethernet controller");
2244
2245 mutex_init(&sc->sc_media_mutex, MUTEX_DEFAULT, IPL_SOFTNET);
2246
2247 if (mcx_version(sc) != 0) {
2248 /* error printed by mcx_version */
2249 goto unmap;
2250 }
2251
2252 r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2253 cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2254 cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2255 if (cq_size > MCX_MAX_CQE) {
2256 aprint_error_dev(self,
2257 "command queue size overflow %u\n", cq_size);
2258 goto unmap;
2259 }
2260 if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2261 aprint_error_dev(self,
2262 "command queue entry size underflow %u\n", cq_stride);
2263 goto unmap;
2264 }
2265 if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2266 aprint_error_dev(self, "command queue page overflow\n");
2267 goto unmap;
2268 }
2269
2270 if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_PAGE_SIZE,
2271 MCX_PAGE_SIZE) != 0) {
2272 aprint_error_dev(self, "unable to allocate doorbell memory\n");
2273 goto unmap;
2274 }
2275
2276 if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2277 MCX_PAGE_SIZE) != 0) {
2278 aprint_error_dev(self, "unable to allocate command queue\n");
2279 goto dbfree;
2280 }
2281
2282 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2283 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2284 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2285 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2286
2287 if (mcx_init_wait(sc) != 0) {
2288 aprint_error_dev(self, "timeout waiting for init\n");
2289 goto cqfree;
2290 }
2291
2292 sc->sc_cmdq_mask = cq_size - 1;
2293 sc->sc_cmdq_size = cq_stride;
2294
2295 if (mcx_enable_hca(sc) != 0) {
2296 /* error printed by mcx_enable_hca */
2297 goto cqfree;
2298 }
2299
2300 if (mcx_issi(sc) != 0) {
2301 /* error printed by mcx_issi */
2302 goto teardown;
2303 }
2304
2305 if (mcx_pages(sc, &sc->sc_boot_pages,
2306 htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2307 /* error printed by mcx_pages */
2308 goto teardown;
2309 }
2310
2311 if (mcx_hca_max_caps(sc) != 0) {
2312 /* error printed by mcx_hca_max_caps */
2313 goto teardown;
2314 }
2315
2316 if (mcx_hca_set_caps(sc) != 0) {
2317 /* error printed by mcx_hca_set_caps */
2318 goto teardown;
2319 }
2320
2321 if (mcx_pages(sc, &sc->sc_init_pages,
2322 htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2323 /* error printed by mcx_pages */
2324 goto teardown;
2325 }
2326
2327 if (mcx_init_hca(sc) != 0) {
2328 /* error printed by mcx_init_hca */
2329 goto teardown;
2330 }
2331
2332 if (mcx_pages(sc, &sc->sc_regular_pages,
2333 htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2334 /* error printed by mcx_pages */
2335 goto teardown;
2336 }
2337
2338 /* apparently not necessary? */
2339 if (mcx_set_driver_version(sc) != 0) {
2340 /* error printed by mcx_set_driver_version */
2341 goto teardown;
2342 }
2343
2344 if (mcx_iff(sc) != 0) { /* modify nic vport context */
2345 /* error printed by mcx_iff? */
2346 goto teardown;
2347 }
2348
2349 if (mcx_alloc_uar(sc) != 0) {
2350 /* error printed by mcx_alloc_uar */
2351 goto teardown;
2352 }
2353
2354 if (mcx_alloc_pd(sc) != 0) {
2355 /* error printed by mcx_alloc_pd */
2356 goto teardown;
2357 }
2358
2359 if (mcx_alloc_tdomain(sc) != 0) {
2360 /* error printed by mcx_alloc_tdomain */
2361 goto teardown;
2362 }
2363
2364 /*
2365 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2366 * mellanox support tells me legacy interrupts are not supported,
2367 * so we're stuck with just msi-x.
2368 */
2369 counts[PCI_INTR_TYPE_MSIX] = 1;
2370 counts[PCI_INTR_TYPE_MSI] = 0;
2371 counts[PCI_INTR_TYPE_INTX] = 0;
2372 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) {
2373 aprint_error_dev(self, "unable to allocate interrupt\n");
2374 goto teardown;
2375 }
2376 KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX);
2377
2378 #ifdef MCX_MPSAFE
2379 pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
2380 #endif
2381
2382 intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[0], intrbuf,
2383 sizeof(intrbuf));
2384 sc->sc_ihs[0] = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[0],
2385 IPL_NET, mcx_intr, sc, DEVNAME(sc));
2386 if (sc->sc_ihs[0] == NULL) {
2387 aprint_error_dev(self, "unable to establish interrupt%s%s\n",
2388 intrstr ? " at " : "",
2389 intrstr ? intrstr : "");
2390 goto teardown;
2391 }
2392
2393 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
2394
2395 if (mcx_create_eq(sc) != 0) {
2396 /* error printed by mcx_create_eq */
2397 goto teardown;
2398 }
2399
2400 if (mcx_query_nic_vport_context(sc, enaddr) != 0) {
2401 /* error printed by mcx_query_nic_vport_context */
2402 goto teardown;
2403 }
2404
2405 if (mcx_query_special_contexts(sc) != 0) {
2406 /* error printed by mcx_query_special_contexts */
2407 goto teardown;
2408 }
2409
2410 if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2411 /* error printed by mcx_set_port_mtu */
2412 goto teardown;
2413 }
2414
2415 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2416 ether_sprintf(enaddr));
2417
2418 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2419 ifp->if_softc = sc;
2420 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2421 #ifdef MCX_MPSAFE
2422 ifp->if_extflags = IFEF_MPSAFE;
2423 #endif
2424 ifp->if_init = mcx_init;
2425 ifp->if_stop = mcx_stop;
2426 ifp->if_ioctl = mcx_ioctl;
2427 ifp->if_start = mcx_start;
2428 ifp->if_watchdog = mcx_watchdog;
2429 ifp->if_mtu = sc->sc_hardmtu;
2430 IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
2431 IFQ_SET_READY(&ifp->if_snd);
2432
2433 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
2434
2435 sc->sc_ec.ec_ifmedia = &sc->sc_media;
2436 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, mcx_media_change,
2437 mcx_media_status, &sc->sc_media_mutex);
2438 mcx_media_add_types(sc);
2439 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2440 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2441
2442 if_attach(ifp);
2443 if_deferred_start_init(ifp, NULL);
2444
2445 ether_ifattach(ifp, enaddr);
2446
2447 callout_init(&sc->sc_rx_refill, CALLOUT_FLAGS);
2448 callout_setfunc(&sc->sc_rx_refill, mcx_refill, sc);
2449 callout_init(&sc->sc_calibrate, CALLOUT_FLAGS);
2450 callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc);
2451
2452 if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc,
2453 PRI_NONE, IPL_NET, 0) != 0) {
2454 aprint_error_dev(self, "couldn't create port change workq\n");
2455 goto teardown;
2456 }
2457
2458 mcx_port_change(&sc->sc_port_change, sc);
2459
2460 sc->sc_flow_table_id = -1;
2461 for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
2462 sc->sc_flow_group_id[i] = -1;
2463 sc->sc_flow_group_size[i] = 0;
2464 sc->sc_flow_group_start[i] = 0;
2465 }
2466 sc->sc_extra_mcast = 0;
2467 memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
2468 return;
2469
2470 teardown:
2471 mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
2472 /* error printed by mcx_teardown_hca, and we're already unwinding */
2473 cqfree:
2474 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2475 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2476 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
2477 MCX_CMDQ_INTERFACE_DISABLED);
2478 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2479
2480 mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
2481 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2482 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
2483
2484 mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
2485 dbfree:
2486 mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
2487 unmap:
2488 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2489 sc->sc_mems = 0;
2490 }
2491
2492 static void
2493 mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm)
2494 {
2495 rxr->rxr_total = hwm;
2496 rxr->rxr_inuse = 0;
2497 }
2498
2499 static u_int
2500 mcx_rxr_get(struct mcx_rxring *rxr, u_int max)
2501 {
2502 const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse);
2503
2504 rxr->rxr_inuse += taken;
2505
2506 return taken;
2507 }
2508
2509 static void
2510 mcx_rxr_put(struct mcx_rxring *rxr, u_int n)
2511 {
2512 rxr->rxr_inuse -= n;
2513 }
2514
2515 static u_int
2516 mcx_rxr_inuse(struct mcx_rxring *rxr)
2517 {
2518 return rxr->rxr_inuse;
2519 }
2520
2521 static int
2522 mcx_version(struct mcx_softc *sc)
2523 {
2524 uint32_t fw0, fw1;
2525 uint16_t cmdif;
2526
2527 fw0 = mcx_rd(sc, MCX_FW_VER);
2528 fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
2529
2530 aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0),
2531 MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
2532
2533 cmdif = MCX_CMDIF(fw1);
2534 if (cmdif != MCX_CMD_IF_SUPPORTED) {
2535 aprint_error_dev(sc->sc_dev,
2536 "unsupported command interface %u\n", cmdif);
2537 return (-1);
2538 }
2539
2540 return (0);
2541 }
2542
2543 static int
2544 mcx_init_wait(struct mcx_softc *sc)
2545 {
2546 unsigned int i;
2547 uint32_t r;
2548
2549 for (i = 0; i < 2000; i++) {
2550 r = mcx_rd(sc, MCX_STATE);
2551 if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
2552 return (0);
2553
2554 delay(1000);
2555 mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
2556 BUS_SPACE_BARRIER_READ);
2557 }
2558
2559 return (-1);
2560 }
2561
2562 static uint8_t
2563 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2564 unsigned int msec)
2565 {
2566 unsigned int i;
2567
2568 for (i = 0; i < msec; i++) {
2569 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2570 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
2571
2572 if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
2573 MCX_CQ_STATUS_OWN_SW)
2574 return (0);
2575
2576 delay(1000);
2577 }
2578
2579 return (ETIMEDOUT);
2580 }
2581
2582 static uint32_t
2583 mcx_mix_u64(uint32_t xor, uint64_t u64)
2584 {
2585 xor ^= u64 >> 32;
2586 xor ^= u64;
2587
2588 return (xor);
2589 }
2590
2591 static uint32_t
2592 mcx_mix_u32(uint32_t xor, uint32_t u32)
2593 {
2594 xor ^= u32;
2595
2596 return (xor);
2597 }
2598
2599 static uint32_t
2600 mcx_mix_u8(uint32_t xor, uint8_t u8)
2601 {
2602 xor ^= u8;
2603
2604 return (xor);
2605 }
2606
2607 static uint8_t
2608 mcx_mix_done(uint32_t xor)
2609 {
2610 xor ^= xor >> 16;
2611 xor ^= xor >> 8;
2612
2613 return (xor);
2614 }
2615
2616 static uint8_t
2617 mcx_xor(const void *buf, size_t len)
2618 {
2619 const uint32_t *dwords = buf;
2620 uint32_t xor = 0xff;
2621 size_t i;
2622
2623 len /= sizeof(*dwords);
2624
2625 for (i = 0; i < len; i++)
2626 xor ^= dwords[i];
2627
2628 return (mcx_mix_done(xor));
2629 }
2630
2631 static uint8_t
2632 mcx_cmdq_token(struct mcx_softc *sc)
2633 {
2634 uint8_t token;
2635
2636 do {
2637 token = ++sc->sc_cmdq_token;
2638 } while (token == 0);
2639
2640 return (token);
2641 }
2642
2643 static void
2644 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2645 uint32_t ilen, uint32_t olen, uint8_t token)
2646 {
2647 memset(cqe, 0, sc->sc_cmdq_size);
2648
2649 cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
2650 be32enc(&cqe->cq_input_length, ilen);
2651 be32enc(&cqe->cq_output_length, olen);
2652 cqe->cq_token = token;
2653 cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
2654 }
2655
2656 static void
2657 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
2658 {
2659 cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
2660 }
2661
2662 static int
2663 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
2664 {
2665 /* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 : 0); */
2666 return (0);
2667 }
2668
2669 static void *
2670 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
2671 {
2672 return (&cqe->cq_input_data);
2673 }
2674
2675 static void *
2676 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
2677 {
2678 return (&cqe->cq_output_data);
2679 }
2680
2681 static void
2682 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2683 unsigned int slot)
2684 {
2685 mcx_cmdq_sign(cqe);
2686
2687 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2688 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
2689
2690 mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
2691 }
2692
2693 static int
2694 mcx_enable_hca(struct mcx_softc *sc)
2695 {
2696 struct mcx_cmdq_entry *cqe;
2697 struct mcx_cmd_enable_hca_in *in;
2698 struct mcx_cmd_enable_hca_out *out;
2699 int error;
2700 uint8_t status;
2701
2702 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2703 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2704
2705 in = mcx_cmdq_in(cqe);
2706 in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
2707 in->cmd_op_mod = htobe16(0);
2708 in->cmd_function_id = htobe16(0);
2709
2710 mcx_cmdq_post(sc, cqe, 0);
2711
2712 error = mcx_cmdq_poll(sc, cqe, 1000);
2713 if (error != 0) {
2714 printf(", hca enable timeout\n");
2715 return (-1);
2716 }
2717 if (mcx_cmdq_verify(cqe) != 0) {
2718 printf(", hca enable command corrupt\n");
2719 return (-1);
2720 }
2721
2722 status = cqe->cq_output_data[0];
2723 if (status != MCX_CQ_STATUS_OK) {
2724 printf(", hca enable failed (%x)\n", status);
2725 return (-1);
2726 }
2727
2728 return (0);
2729 }
2730
2731 static int
2732 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
2733 {
2734 struct mcx_cmdq_entry *cqe;
2735 struct mcx_cmd_teardown_hca_in *in;
2736 struct mcx_cmd_teardown_hca_out *out;
2737 int error;
2738 uint8_t status;
2739
2740 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2741 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2742
2743 in = mcx_cmdq_in(cqe);
2744 in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
2745 in->cmd_op_mod = htobe16(0);
2746 in->cmd_profile = profile;
2747
2748 mcx_cmdq_post(sc, cqe, 0);
2749
2750 error = mcx_cmdq_poll(sc, cqe, 1000);
2751 if (error != 0) {
2752 printf(", hca teardown timeout\n");
2753 return (-1);
2754 }
2755 if (mcx_cmdq_verify(cqe) != 0) {
2756 printf(", hca teardown command corrupt\n");
2757 return (-1);
2758 }
2759
2760 status = cqe->cq_output_data[0];
2761 if (status != MCX_CQ_STATUS_OK) {
2762 printf(", hca teardown failed (%x)\n", status);
2763 return (-1);
2764 }
2765
2766 return (0);
2767 }
2768
2769 static int
2770 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
2771 unsigned int nmb, uint64_t *ptr, uint8_t token)
2772 {
2773 uint8_t *kva;
2774 uint64_t dva;
2775 int i;
2776 int error;
2777
2778 error = mcx_dmamem_alloc(sc, mxm,
2779 nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
2780 if (error != 0)
2781 return (error);
2782
2783 mcx_dmamem_zero(mxm);
2784
2785 dva = MCX_DMA_DVA(mxm);
2786 kva = MCX_DMA_KVA(mxm);
2787 for (i = 0; i < nmb; i++) {
2788 struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
2789
2790 /* patch the cqe or mbox pointing at this one */
2791 be64enc(ptr, dva);
2792
2793 /* fill in this mbox */
2794 be32enc(&mbox->mb_block_number, i);
2795 mbox->mb_token = token;
2796
2797 /* move to the next one */
2798 ptr = &mbox->mb_next_ptr;
2799
2800 dva += MCX_CMDQ_MAILBOX_SIZE;
2801 kva += MCX_CMDQ_MAILBOX_SIZE;
2802 }
2803
2804 return (0);
2805 }
2806
2807 static uint32_t
2808 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
2809 {
2810 uint32_t xor = 0xff;
2811
2812 /* only 3 fields get set, so mix them directly */
2813 xor = mcx_mix_u64(xor, mb->mb_next_ptr);
2814 xor = mcx_mix_u32(xor, mb->mb_block_number);
2815 xor = mcx_mix_u8(xor, mb->mb_token);
2816
2817 return (mcx_mix_done(xor));
2818 }
2819
2820 static void
2821 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
2822 {
2823 uint8_t *kva;
2824 int i;
2825
2826 kva = MCX_DMA_KVA(mxm);
2827
2828 for (i = 0; i < nmb; i++) {
2829 struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
2830 uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
2831 mb->mb_ctrl_signature = sig;
2832 mb->mb_signature = sig ^
2833 mcx_xor(mb->mb_data, sizeof(mb->mb_data));
2834
2835 kva += MCX_CMDQ_MAILBOX_SIZE;
2836 }
2837 }
2838
2839 static void
2840 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
2841 {
2842 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
2843 0, MCX_DMA_LEN(mxm), ops);
2844 }
2845
2846 static struct mcx_cmdq_mailbox *
2847 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
2848 {
2849 uint8_t *kva;
2850
2851 kva = MCX_DMA_KVA(mxm);
2852 kva += i * MCX_CMDQ_MAILBOX_SIZE;
2853
2854 return ((struct mcx_cmdq_mailbox *)kva);
2855 }
2856
2857 static inline void *
2858 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
2859 {
2860 return (&mb->mb_data);
2861 }
2862
2863 static void
2864 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
2865 void *b, size_t len)
2866 {
2867 uint8_t *buf = b;
2868 struct mcx_cmdq_mailbox *mb;
2869 int i;
2870
2871 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2872 for (i = 0; i < nmb; i++) {
2873
2874 memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len));
2875
2876 if (sizeof(mb->mb_data) >= len)
2877 break;
2878
2879 buf += sizeof(mb->mb_data);
2880 len -= sizeof(mb->mb_data);
2881 mb++;
2882 }
2883 }
2884
2885 static void
2886 mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages,
2887 struct mcx_dmamem *buf)
2888 {
2889 uint64_t *pas;
2890 int mbox, mbox_pages, i;
2891
2892 mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE;
2893 offset %= MCX_CMDQ_MAILBOX_DATASIZE;
2894
2895 pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
2896 pas += (offset / sizeof(*pas));
2897 mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE - offset) / sizeof(*pas);
2898 for (i = 0; i < npages; i++) {
2899 if (i == mbox_pages) {
2900 mbox++;
2901 pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
2902 mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas);
2903 }
2904 *pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE));
2905 pas++;
2906 }
2907 }
2908
2909 static void
2910 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
2911 {
2912 uint8_t *buf = b;
2913 struct mcx_cmdq_mailbox *mb;
2914 int i;
2915
2916 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2917 for (i = 0; i < nmb; i++) {
2918 memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len));
2919
2920 if (sizeof(mb->mb_data) >= len)
2921 break;
2922
2923 buf += sizeof(mb->mb_data);
2924 len -= sizeof(mb->mb_data);
2925 mb++;
2926 }
2927 }
2928
2929 static void
2930 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
2931 {
2932 mcx_dmamem_free(sc, mxm);
2933 }
2934
2935 #if 0
2936 static void
2937 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
2938 {
2939 unsigned int i;
2940
2941 printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
2942 be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr));
2943
2944 printf(", idata ");
2945 for (i = 0; i < sizeof(cqe->cq_input_data); i++)
2946 printf("%02x", cqe->cq_input_data[i]);
2947
2948 printf(", odata ");
2949 for (i = 0; i < sizeof(cqe->cq_output_data); i++)
2950 printf("%02x", cqe->cq_output_data[i]);
2951
2952 printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
2953 be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length),
2954 cqe->cq_token, cqe->cq_signature, cqe->cq_status);
2955 }
2956
2957 static void
2958 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
2959 {
2960 int i, j;
2961 uint8_t *d;
2962
2963 for (i = 0; i < num; i++) {
2964 struct mcx_cmdq_mailbox *mbox;
2965 mbox = mcx_cq_mbox(mboxes, i);
2966
2967 d = mcx_cq_mbox_data(mbox);
2968 for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
2969 if (j != 0 && (j % 16 == 0))
2970 printf("\n");
2971 printf("%.2x ", d[j]);
2972 }
2973 }
2974 }
2975 #endif
2976
2977 static int
2978 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
2979 int len)
2980 {
2981 struct mcx_dmamem mxm;
2982 struct mcx_cmdq_entry *cqe;
2983 struct mcx_cmd_access_reg_in *in;
2984 struct mcx_cmd_access_reg_out *out;
2985 uint8_t token = mcx_cmdq_token(sc);
2986 int error, nmb;
2987
2988 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2989 mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
2990 token);
2991
2992 in = mcx_cmdq_in(cqe);
2993 in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
2994 in->cmd_op_mod = htobe16(op);
2995 in->cmd_register_id = htobe16(reg);
2996
2997 nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
2998 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, &cqe->cq_output_ptr, token) != 0) {
2999 printf(", unable to allocate access reg mailboxen\n");
3000 return (-1);
3001 }
3002 cqe->cq_input_ptr = cqe->cq_output_ptr;
3003 mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
3004 mcx_cmdq_mboxes_sign(&mxm, nmb);
3005 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3006
3007 mcx_cmdq_post(sc, cqe, 0);
3008 error = mcx_cmdq_poll(sc, cqe, 1000);
3009 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3010
3011 if (error != 0) {
3012 printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
3013 (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
3014 goto free;
3015 }
3016 error = mcx_cmdq_verify(cqe);
3017 if (error != 0) {
3018 printf("%s: access reg (%s %x) reply corrupt\n",
3019 (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
3020 reg);
3021 goto free;
3022 }
3023
3024 out = mcx_cmdq_out(cqe);
3025 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3026 printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
3027 DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
3028 reg, out->cmd_status, out->cmd_syndrome);
3029 error = -1;
3030 goto free;
3031 }
3032
3033 mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
3034 free:
3035 mcx_dmamem_free(sc, &mxm);
3036
3037 return (error);
3038 }
3039
3040 static int
3041 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, unsigned int slot)
3042 {
3043 struct mcx_cmd_set_issi_in *in;
3044 struct mcx_cmd_set_issi_out *out;
3045 uint8_t status;
3046
3047 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3048
3049 in = mcx_cmdq_in(cqe);
3050 in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3051 in->cmd_op_mod = htobe16(0);
3052 in->cmd_current_issi = htobe16(MCX_ISSI);
3053
3054 mcx_cmdq_post(sc, cqe, slot);
3055 if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3056 return (-1);
3057 if (mcx_cmdq_verify(cqe) != 0)
3058 return (-1);
3059
3060 status = cqe->cq_output_data[0];
3061 if (status != MCX_CQ_STATUS_OK)
3062 return (-1);
3063
3064 return (0);
3065 }
3066
3067 static int
3068 mcx_issi(struct mcx_softc *sc)
3069 {
3070 struct mcx_dmamem mxm;
3071 struct mcx_cmdq_entry *cqe;
3072 struct mcx_cmd_query_issi_in *in;
3073 struct mcx_cmd_query_issi_il_out *out;
3074 struct mcx_cmd_query_issi_mb_out *mb;
3075 uint8_t token = mcx_cmdq_token(sc);
3076 uint8_t status;
3077 int error;
3078
3079 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3080 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3081
3082 in = mcx_cmdq_in(cqe);
3083 in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3084 in->cmd_op_mod = htobe16(0);
3085
3086 CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3087 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3088 &cqe->cq_output_ptr, token) != 0) {
3089 printf(", unable to allocate query issi mailbox\n");
3090 return (-1);
3091 }
3092 mcx_cmdq_mboxes_sign(&mxm, 1);
3093
3094 mcx_cmdq_post(sc, cqe, 0);
3095 error = mcx_cmdq_poll(sc, cqe, 1000);
3096 if (error != 0) {
3097 printf(", query issi timeout\n");
3098 goto free;
3099 }
3100 error = mcx_cmdq_verify(cqe);
3101 if (error != 0) {
3102 printf(", query issi reply corrupt\n");
3103 goto free;
3104 }
3105
3106 status = cqe->cq_output_data[0];
3107 switch (status) {
3108 case MCX_CQ_STATUS_OK:
3109 break;
3110 case MCX_CQ_STATUS_BAD_OPCODE:
3111 /* use ISSI 0 */
3112 goto free;
3113 default:
3114 printf(", query issi failed (%x)\n", status);
3115 error = -1;
3116 goto free;
3117 }
3118
3119 out = mcx_cmdq_out(cqe);
3120 if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3121 /* use ISSI 1 */
3122 goto free;
3123 }
3124
3125 /* don't need to read cqe anymore, can be used for SET ISSI */
3126
3127 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3128 CTASSERT(MCX_ISSI < NBBY);
3129 /* XXX math is hard */
3130 if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3131 /* use ISSI 0 */
3132 goto free;
3133 }
3134
3135 if (mcx_set_issi(sc, cqe, 0) != 0) {
3136 /* ignore the error, just use ISSI 0 */
3137 } else {
3138 /* use ISSI 1 */
3139 }
3140
3141 free:
3142 mcx_cq_mboxes_free(sc, &mxm);
3143 return (error);
3144 }
3145
3146 static int
3147 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3148 uint32_t *npages, uint16_t *func_id)
3149 {
3150 struct mcx_cmdq_entry *cqe;
3151 struct mcx_cmd_query_pages_in *in;
3152 struct mcx_cmd_query_pages_out *out;
3153
3154 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3155 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3156
3157 in = mcx_cmdq_in(cqe);
3158 in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3159 in->cmd_op_mod = type;
3160
3161 mcx_cmdq_post(sc, cqe, 0);
3162 if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3163 printf(", query pages timeout\n");
3164 return (-1);
3165 }
3166 if (mcx_cmdq_verify(cqe) != 0) {
3167 printf(", query pages reply corrupt\n");
3168 return (-1);
3169 }
3170
3171 out = mcx_cmdq_out(cqe);
3172 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3173 printf(", query pages failed (%x)\n", out->cmd_status);
3174 return (-1);
3175 }
3176
3177 *func_id = out->cmd_func_id;
3178 *npages = be32dec(&out->cmd_num_pages);
3179
3180 return (0);
3181 }
3182
3183 struct bus_dma_iter {
3184 bus_dmamap_t i_map;
3185 bus_size_t i_offset;
3186 unsigned int i_index;
3187 };
3188
3189 static void
3190 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3191 {
3192 i->i_map = map;
3193 i->i_offset = 0;
3194 i->i_index = 0;
3195 }
3196
3197 static bus_addr_t
3198 bus_dma_iter_addr(struct bus_dma_iter *i)
3199 {
3200 return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3201 }
3202
3203 static void
3204 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3205 {
3206 bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3207 bus_size_t diff;
3208
3209 do {
3210 diff = seg->ds_len - i->i_offset;
3211 if (size < diff)
3212 break;
3213
3214 size -= diff;
3215
3216 seg++;
3217
3218 i->i_offset = 0;
3219 i->i_index++;
3220 } while (size > 0);
3221
3222 i->i_offset += size;
3223 }
3224
3225 static int
3226 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3227 {
3228 struct mcx_dmamem mxm;
3229 struct mcx_cmdq_entry *cqe;
3230 struct mcx_cmd_manage_pages_in *in;
3231 struct mcx_cmd_manage_pages_out *out;
3232 unsigned int paslen, nmb, i, j, npages;
3233 struct bus_dma_iter iter;
3234 uint64_t *pas;
3235 uint8_t status;
3236 uint8_t token = mcx_cmdq_token(sc);
3237 int error;
3238
3239 npages = mhm->mhm_npages;
3240
3241 paslen = sizeof(*pas) * npages;
3242 nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3243
3244 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3245 mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3246
3247 in = mcx_cmdq_in(cqe);
3248 in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3249 in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3250 in->cmd_func_id = func_id;
3251 be32enc(&in->cmd_input_num_entries, npages);
3252
3253 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3254 &cqe->cq_input_ptr, token) != 0) {
3255 printf(", unable to allocate manage pages mailboxen\n");
3256 return (-1);
3257 }
3258
3259 bus_dma_iter_init(&iter, mhm->mhm_map);
3260 for (i = 0; i < nmb; i++) {
3261 unsigned int lim;
3262
3263 pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3264 lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3265
3266 for (j = 0; j < lim; j++) {
3267 be64enc(&pas[j], bus_dma_iter_addr(&iter));
3268 bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3269 }
3270
3271 npages -= lim;
3272 }
3273
3274 mcx_cmdq_mboxes_sign(&mxm, nmb);
3275
3276 mcx_cmdq_post(sc, cqe, 0);
3277 error = mcx_cmdq_poll(sc, cqe, 1000);
3278 if (error != 0) {
3279 printf(", manage pages timeout\n");
3280 goto free;
3281 }
3282 error = mcx_cmdq_verify(cqe);
3283 if (error != 0) {
3284 printf(", manage pages reply corrupt\n");
3285 goto free;
3286 }
3287
3288 status = cqe->cq_output_data[0];
3289 if (status != MCX_CQ_STATUS_OK) {
3290 printf(", manage pages failed (%x)\n", status);
3291 error = -1;
3292 goto free;
3293 }
3294
3295 free:
3296 mcx_dmamem_free(sc, &mxm);
3297
3298 return (error);
3299 }
3300
3301 static int
3302 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3303 {
3304 uint32_t npages;
3305 uint16_t func_id;
3306
3307 if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3308 /* error printed by mcx_query_pages */
3309 return (-1);
3310 }
3311
3312 if (npages == 0)
3313 return (0);
3314
3315 if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3316 printf(", unable to allocate hwmem\n");
3317 return (-1);
3318 }
3319
3320 if (mcx_add_pages(sc, mhm, func_id) != 0) {
3321 printf(", unable to add hwmem\n");
3322 goto free;
3323 }
3324
3325 return (0);
3326
3327 free:
3328 mcx_hwmem_free(sc, mhm);
3329
3330 return (-1);
3331 }
3332
3333 static int
3334 mcx_hca_max_caps(struct mcx_softc *sc)
3335 {
3336 struct mcx_dmamem mxm;
3337 struct mcx_cmdq_entry *cqe;
3338 struct mcx_cmd_query_hca_cap_in *in;
3339 struct mcx_cmd_query_hca_cap_out *out;
3340 struct mcx_cmdq_mailbox *mb;
3341 struct mcx_cap_device *hca;
3342 uint8_t status;
3343 uint8_t token = mcx_cmdq_token(sc);
3344 int error;
3345
3346 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3347 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3348 token);
3349
3350 in = mcx_cmdq_in(cqe);
3351 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3352 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3353 MCX_CMD_QUERY_HCA_CAP_DEVICE);
3354
3355 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3356 &cqe->cq_output_ptr, token) != 0) {
3357 printf(", unable to allocate query hca caps mailboxen\n");
3358 return (-1);
3359 }
3360 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3361 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3362
3363 mcx_cmdq_post(sc, cqe, 0);
3364 error = mcx_cmdq_poll(sc, cqe, 1000);
3365 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3366
3367 if (error != 0) {
3368 printf(", query hca caps timeout\n");
3369 goto free;
3370 }
3371 error = mcx_cmdq_verify(cqe);
3372 if (error != 0) {
3373 printf(", query hca caps reply corrupt\n");
3374 goto free;
3375 }
3376
3377 status = cqe->cq_output_data[0];
3378 if (status != MCX_CQ_STATUS_OK) {
3379 printf(", query hca caps failed (%x)\n", status);
3380 error = -1;
3381 goto free;
3382 }
3383
3384 mb = mcx_cq_mbox(&mxm, 0);
3385 hca = mcx_cq_mbox_data(mb);
3386
3387 if (hca->log_pg_sz > PAGE_SHIFT) {
3388 printf(", minimum system page shift %u is too large\n",
3389 hca->log_pg_sz);
3390 error = -1;
3391 goto free;
3392 }
3393 /*
3394 * blueflame register is split into two buffers, and we must alternate
3395 * between the two of them.
3396 */
3397 sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
3398
3399 free:
3400 mcx_dmamem_free(sc, &mxm);
3401
3402 return (error);
3403 }
3404
3405 static int
3406 mcx_hca_set_caps(struct mcx_softc *sc)
3407 {
3408 struct mcx_dmamem mxm;
3409 struct mcx_cmdq_entry *cqe;
3410 struct mcx_cmd_query_hca_cap_in *in;
3411 struct mcx_cmd_query_hca_cap_out *out;
3412 struct mcx_cmdq_mailbox *mb;
3413 struct mcx_cap_device *hca;
3414 uint8_t status;
3415 uint8_t token = mcx_cmdq_token(sc);
3416 int error;
3417
3418 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3419 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3420 token);
3421
3422 in = mcx_cmdq_in(cqe);
3423 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3424 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
3425 MCX_CMD_QUERY_HCA_CAP_DEVICE);
3426
3427 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3428 &cqe->cq_output_ptr, token) != 0) {
3429 printf(", unable to allocate manage pages mailboxen\n");
3430 return (-1);
3431 }
3432 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3433 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3434
3435 mcx_cmdq_post(sc, cqe, 0);
3436 error = mcx_cmdq_poll(sc, cqe, 1000);
3437 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3438
3439 if (error != 0) {
3440 printf(", query hca caps timeout\n");
3441 goto free;
3442 }
3443 error = mcx_cmdq_verify(cqe);
3444 if (error != 0) {
3445 printf(", query hca caps reply corrupt\n");
3446 goto free;
3447 }
3448
3449 status = cqe->cq_output_data[0];
3450 if (status != MCX_CQ_STATUS_OK) {
3451 printf(", query hca caps failed (%x)\n", status);
3452 error = -1;
3453 goto free;
3454 }
3455
3456 mb = mcx_cq_mbox(&mxm, 0);
3457 hca = mcx_cq_mbox_data(mb);
3458
3459 hca->log_pg_sz = PAGE_SHIFT;
3460
3461 free:
3462 mcx_dmamem_free(sc, &mxm);
3463
3464 return (error);
3465 }
3466
3467
3468 static int
3469 mcx_init_hca(struct mcx_softc *sc)
3470 {
3471 struct mcx_cmdq_entry *cqe;
3472 struct mcx_cmd_init_hca_in *in;
3473 struct mcx_cmd_init_hca_out *out;
3474 int error;
3475 uint8_t status;
3476
3477 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3478 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3479
3480 in = mcx_cmdq_in(cqe);
3481 in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
3482 in->cmd_op_mod = htobe16(0);
3483
3484 mcx_cmdq_post(sc, cqe, 0);
3485
3486 error = mcx_cmdq_poll(sc, cqe, 1000);
3487 if (error != 0) {
3488 printf(", hca init timeout\n");
3489 return (-1);
3490 }
3491 if (mcx_cmdq_verify(cqe) != 0) {
3492 printf(", hca init command corrupt\n");
3493 return (-1);
3494 }
3495
3496 status = cqe->cq_output_data[0];
3497 if (status != MCX_CQ_STATUS_OK) {
3498 printf(", hca init failed (%x)\n", status);
3499 return (-1);
3500 }
3501
3502 return (0);
3503 }
3504
3505 static int
3506 mcx_set_driver_version(struct mcx_softc *sc)
3507 {
3508 struct mcx_dmamem mxm;
3509 struct mcx_cmdq_entry *cqe;
3510 struct mcx_cmd_set_driver_version_in *in;
3511 struct mcx_cmd_set_driver_version_out *out;
3512 int error;
3513 int token;
3514 uint8_t status;
3515
3516 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3517 token = mcx_cmdq_token(sc);
3518 mcx_cmdq_init(sc, cqe, sizeof(*in) +
3519 sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
3520
3521 in = mcx_cmdq_in(cqe);
3522 in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
3523 in->cmd_op_mod = htobe16(0);
3524
3525 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3526 &cqe->cq_input_ptr, token) != 0) {
3527 printf(", unable to allocate set driver version mailboxen\n");
3528 return (-1);
3529 }
3530 strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
3531 "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
3532
3533 mcx_cmdq_mboxes_sign(&mxm, 1);
3534 mcx_cmdq_post(sc, cqe, 0);
3535
3536 error = mcx_cmdq_poll(sc, cqe, 1000);
3537 if (error != 0) {
3538 printf(", set driver version timeout\n");
3539 goto free;
3540 }
3541 if (mcx_cmdq_verify(cqe) != 0) {
3542 printf(", set driver version command corrupt\n");
3543 goto free;
3544 }
3545
3546 status = cqe->cq_output_data[0];
3547 if (status != MCX_CQ_STATUS_OK) {
3548 printf(", set driver version failed (%x)\n", status);
3549 error = -1;
3550 goto free;
3551 }
3552
3553 free:
3554 mcx_dmamem_free(sc, &mxm);
3555
3556 return (error);
3557 }
3558
3559 static int
3560 mcx_iff(struct mcx_softc *sc)
3561 {
3562 struct ifnet *ifp = &sc->sc_ec.ec_if;
3563 struct mcx_dmamem mxm;
3564 struct mcx_cmdq_entry *cqe;
3565 struct mcx_cmd_modify_nic_vport_context_in *in;
3566 struct mcx_cmd_modify_nic_vport_context_out *out;
3567 struct mcx_nic_vport_ctx *ctx;
3568 int error;
3569 int token;
3570 int insize;
3571
3572 /* enable or disable the promisc flow */
3573 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
3574 if (sc->sc_promisc_flow_enabled == 0) {
3575 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC,
3576 0, NULL);
3577 sc->sc_promisc_flow_enabled = 1;
3578 }
3579 } else if (sc->sc_promisc_flow_enabled != 0) {
3580 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
3581 sc->sc_promisc_flow_enabled = 0;
3582 }
3583
3584 /* enable or disable the all-multicast flow */
3585 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3586 if (sc->sc_allmulti_flow_enabled == 0) {
3587 uint8_t mcast[ETHER_ADDR_LEN];
3588
3589 memset(mcast, 0, sizeof(mcast));
3590 mcast[0] = 0x01;
3591 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI,
3592 0, mcast);
3593 sc->sc_allmulti_flow_enabled = 1;
3594 }
3595 } else if (sc->sc_allmulti_flow_enabled != 0) {
3596 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
3597 sc->sc_allmulti_flow_enabled = 0;
3598 }
3599
3600 insize = sizeof(struct mcx_nic_vport_ctx) + 240;
3601
3602 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3603 token = mcx_cmdq_token(sc);
3604 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3605
3606 in = mcx_cmdq_in(cqe);
3607 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
3608 in->cmd_op_mod = htobe16(0);
3609 in->cmd_field_select = htobe32(
3610 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
3611 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
3612
3613 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
3614 printf(", unable to allocate modify nic vport context mailboxen\n");
3615 return (-1);
3616 }
3617 ctx = (struct mcx_nic_vport_ctx *)
3618 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
3619 ctx->vp_mtu = htobe32(sc->sc_hardmtu);
3620 /*
3621 * always leave promisc-all enabled on the vport since we can't give it
3622 * a vlan list, and we're already doing multicast filtering in the flow
3623 * table.
3624 */
3625 ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
3626
3627 mcx_cmdq_mboxes_sign(&mxm, 1);
3628 mcx_cmdq_post(sc, cqe, 0);
3629
3630 error = mcx_cmdq_poll(sc, cqe, 1000);
3631 if (error != 0) {
3632 printf(", modify nic vport context timeout\n");
3633 goto free;
3634 }
3635 if (mcx_cmdq_verify(cqe) != 0) {
3636 printf(", modify nic vport context command corrupt\n");
3637 goto free;
3638 }
3639
3640 out = mcx_cmdq_out(cqe);
3641 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3642 printf(", modify nic vport context failed (%x, %x)\n",
3643 out->cmd_status, out->cmd_syndrome);
3644 error = -1;
3645 goto free;
3646 }
3647
3648 free:
3649 mcx_dmamem_free(sc, &mxm);
3650
3651 return (error);
3652 }
3653
3654 static int
3655 mcx_alloc_uar(struct mcx_softc *sc)
3656 {
3657 struct mcx_cmdq_entry *cqe;
3658 struct mcx_cmd_alloc_uar_in *in;
3659 struct mcx_cmd_alloc_uar_out *out;
3660 int error;
3661
3662 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3663 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3664
3665 in = mcx_cmdq_in(cqe);
3666 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
3667 in->cmd_op_mod = htobe16(0);
3668
3669 mcx_cmdq_post(sc, cqe, 0);
3670
3671 error = mcx_cmdq_poll(sc, cqe, 1000);
3672 if (error != 0) {
3673 printf(", alloc uar timeout\n");
3674 return (-1);
3675 }
3676 if (mcx_cmdq_verify(cqe) != 0) {
3677 printf(", alloc uar command corrupt\n");
3678 return (-1);
3679 }
3680
3681 out = mcx_cmdq_out(cqe);
3682 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3683 printf(", alloc uar failed (%x)\n", out->cmd_status);
3684 return (-1);
3685 }
3686
3687 sc->sc_uar = mcx_get_id(out->cmd_uar);
3688
3689 return (0);
3690 }
3691
3692 static int
3693 mcx_create_eq(struct mcx_softc *sc)
3694 {
3695 struct mcx_cmdq_entry *cqe;
3696 struct mcx_dmamem mxm;
3697 struct mcx_cmd_create_eq_in *in;
3698 struct mcx_cmd_create_eq_mb_in *mbin;
3699 struct mcx_cmd_create_eq_out *out;
3700 struct mcx_eq_entry *eqe;
3701 int error;
3702 uint64_t *pas;
3703 int insize, npages, paslen, i, token;
3704
3705 sc->sc_eq_cons = 0;
3706
3707 npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
3708 MCX_PAGE_SIZE);
3709 paslen = npages * sizeof(*pas);
3710 insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
3711
3712 if (mcx_dmamem_alloc(sc, &sc->sc_eq_mem, npages * MCX_PAGE_SIZE,
3713 MCX_PAGE_SIZE) != 0) {
3714 printf(", unable to allocate event queue memory\n");
3715 return (-1);
3716 }
3717
3718 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
3719 for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
3720 eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
3721 }
3722
3723 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3724 token = mcx_cmdq_token(sc);
3725 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3726
3727 in = mcx_cmdq_in(cqe);
3728 in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
3729 in->cmd_op_mod = htobe16(0);
3730
3731 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3732 &cqe->cq_input_ptr, token) != 0) {
3733 printf(", unable to allocate create eq mailboxen\n");
3734 return (-1);
3735 }
3736 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3737 mbin->cmd_eq_ctx.eq_uar_size = htobe32(
3738 (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | sc->sc_uar);
3739 mbin->cmd_event_bitmask = htobe64(
3740 (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
3741 (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
3742 (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
3743 (1ull << MCX_EVENT_TYPE_PAGE_REQUEST));
3744
3745 /* physical addresses follow the mailbox in data */
3746 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &sc->sc_eq_mem);
3747 mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
3748 mcx_cmdq_post(sc, cqe, 0);
3749
3750 error = mcx_cmdq_poll(sc, cqe, 1000);
3751 if (error != 0) {
3752 printf(", create eq timeout\n");
3753 goto free;
3754 }
3755 if (mcx_cmdq_verify(cqe) != 0) {
3756 printf(", create eq command corrupt\n");
3757 goto free;
3758 }
3759
3760 out = mcx_cmdq_out(cqe);
3761 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3762 printf(", create eq failed (%x, %x)\n", out->cmd_status,
3763 be32toh(out->cmd_syndrome));
3764 error = -1;
3765 goto free;
3766 }
3767
3768 sc->sc_eqn = mcx_get_id(out->cmd_eqn);
3769 mcx_arm_eq(sc);
3770 free:
3771 mcx_dmamem_free(sc, &mxm);
3772 return (error);
3773 }
3774
3775 static int
3776 mcx_alloc_pd(struct mcx_softc *sc)
3777 {
3778 struct mcx_cmdq_entry *cqe;
3779 struct mcx_cmd_alloc_pd_in *in;
3780 struct mcx_cmd_alloc_pd_out *out;
3781 int error;
3782
3783 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3784 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3785
3786 in = mcx_cmdq_in(cqe);
3787 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
3788 in->cmd_op_mod = htobe16(0);
3789
3790 mcx_cmdq_post(sc, cqe, 0);
3791
3792 error = mcx_cmdq_poll(sc, cqe, 1000);
3793 if (error != 0) {
3794 printf(", alloc pd timeout\n");
3795 return (-1);
3796 }
3797 if (mcx_cmdq_verify(cqe) != 0) {
3798 printf(", alloc pd command corrupt\n");
3799 return (-1);
3800 }
3801
3802 out = mcx_cmdq_out(cqe);
3803 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3804 printf(", alloc pd failed (%x)\n", out->cmd_status);
3805 return (-1);
3806 }
3807
3808 sc->sc_pd = mcx_get_id(out->cmd_pd);
3809 return (0);
3810 }
3811
3812 static int
3813 mcx_alloc_tdomain(struct mcx_softc *sc)
3814 {
3815 struct mcx_cmdq_entry *cqe;
3816 struct mcx_cmd_alloc_td_in *in;
3817 struct mcx_cmd_alloc_td_out *out;
3818 int error;
3819
3820 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3821 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3822
3823 in = mcx_cmdq_in(cqe);
3824 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
3825 in->cmd_op_mod = htobe16(0);
3826
3827 mcx_cmdq_post(sc, cqe, 0);
3828
3829 error = mcx_cmdq_poll(sc, cqe, 1000);
3830 if (error != 0) {
3831 printf(", alloc transport domain timeout\n");
3832 return (-1);
3833 }
3834 if (mcx_cmdq_verify(cqe) != 0) {
3835 printf(", alloc transport domain command corrupt\n");
3836 return (-1);
3837 }
3838
3839 out = mcx_cmdq_out(cqe);
3840 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3841 printf(", alloc transport domain failed (%x)\n",
3842 out->cmd_status);
3843 return (-1);
3844 }
3845
3846 sc->sc_tdomain = mcx_get_id(out->cmd_tdomain);
3847 return (0);
3848 }
3849
3850 static int
3851 mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr)
3852 {
3853 struct mcx_dmamem mxm;
3854 struct mcx_cmdq_entry *cqe;
3855 struct mcx_cmd_query_nic_vport_context_in *in;
3856 struct mcx_cmd_query_nic_vport_context_out *out;
3857 struct mcx_nic_vport_ctx *ctx;
3858 uint8_t *addr;
3859 int error, token, i;
3860
3861 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3862 token = mcx_cmdq_token(sc);
3863 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
3864
3865 in = mcx_cmdq_in(cqe);
3866 in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
3867 in->cmd_op_mod = htobe16(0);
3868 in->cmd_allowed_list_type = 0;
3869
3870 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
3871 printf(", unable to allocate query nic vport context mailboxen\n");
3872 return (-1);
3873 }
3874 mcx_cmdq_mboxes_sign(&mxm, 1);
3875 mcx_cmdq_post(sc, cqe, 0);
3876
3877 error = mcx_cmdq_poll(sc, cqe, 1000);
3878 if (error != 0) {
3879 printf(", query nic vport context timeout\n");
3880 goto free;
3881 }
3882 if (mcx_cmdq_verify(cqe) != 0) {
3883 printf(", query nic vport context command corrupt\n");
3884 goto free;
3885 }
3886
3887 out = mcx_cmdq_out(cqe);
3888 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3889 printf(", query nic vport context failed (%x, %x)\n",
3890 out->cmd_status, out->cmd_syndrome);
3891 error = -1;
3892 goto free;
3893 }
3894
3895 ctx = (struct mcx_nic_vport_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
3896 addr = (uint8_t *)&ctx->vp_perm_addr;
3897 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3898 enaddr[i] = addr[i + 2];
3899 }
3900 free:
3901 mcx_dmamem_free(sc, &mxm);
3902
3903 return (error);
3904 }
3905
3906 static int
3907 mcx_query_special_contexts(struct mcx_softc *sc)
3908 {
3909 struct mcx_cmdq_entry *cqe;
3910 struct mcx_cmd_query_special_ctx_in *in;
3911 struct mcx_cmd_query_special_ctx_out *out;
3912 int error;
3913
3914 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3915 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3916
3917 in = mcx_cmdq_in(cqe);
3918 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
3919 in->cmd_op_mod = htobe16(0);
3920
3921 mcx_cmdq_post(sc, cqe, 0);
3922
3923 error = mcx_cmdq_poll(sc, cqe, 1000);
3924 if (error != 0) {
3925 printf(", query special contexts timeout\n");
3926 return (-1);
3927 }
3928 if (mcx_cmdq_verify(cqe) != 0) {
3929 printf(", query special contexts command corrupt\n");
3930 return (-1);
3931 }
3932
3933 out = mcx_cmdq_out(cqe);
3934 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3935 printf(", query special contexts failed (%x)\n",
3936 out->cmd_status);
3937 return (-1);
3938 }
3939
3940 sc->sc_lkey = be32toh(out->cmd_resd_lkey);
3941 return (0);
3942 }
3943
3944 static int
3945 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
3946 {
3947 struct mcx_reg_pmtu pmtu;
3948 int error;
3949
3950 /* read max mtu */
3951 memset(&pmtu, 0, sizeof(pmtu));
3952 pmtu.rp_local_port = 1;
3953 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
3954 sizeof(pmtu));
3955 if (error != 0) {
3956 printf(", unable to get port MTU\n");
3957 return error;
3958 }
3959
3960 mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu));
3961 pmtu.rp_admin_mtu = htobe16(mtu);
3962 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
3963 sizeof(pmtu));
3964 if (error != 0) {
3965 printf(", unable to set port MTU\n");
3966 return error;
3967 }
3968
3969 sc->sc_hardmtu = mtu;
3970 return 0;
3971 }
3972
3973 static int
3974 mcx_create_cq(struct mcx_softc *sc, int eqn)
3975 {
3976 struct mcx_cmdq_entry *cmde;
3977 struct mcx_cq_entry *cqe;
3978 struct mcx_cq *cq;
3979 struct mcx_dmamem mxm;
3980 struct mcx_cmd_create_cq_in *in;
3981 struct mcx_cmd_create_cq_mb_in *mbin;
3982 struct mcx_cmd_create_cq_out *out;
3983 int error;
3984 uint64_t *pas;
3985 int insize, npages, paslen, i, token;
3986
3987 if (sc->sc_num_cq >= MCX_MAX_CQS) {
3988 printf("%s: tried to create too many cqs\n", DEVNAME(sc));
3989 return (-1);
3990 }
3991 cq = &sc->sc_cq[sc->sc_num_cq];
3992
3993 npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
3994 MCX_PAGE_SIZE);
3995 paslen = npages * sizeof(*pas);
3996 insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
3997
3998 if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
3999 MCX_PAGE_SIZE) != 0) {
4000 printf("%s: unable to allocate completion queue memory\n",
4001 DEVNAME(sc));
4002 return (-1);
4003 }
4004 cqe = MCX_DMA_KVA(&cq->cq_mem);
4005 for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
4006 cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
4007 }
4008
4009 cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4010 token = mcx_cmdq_token(sc);
4011 mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
4012
4013 in = mcx_cmdq_in(cmde);
4014 in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
4015 in->cmd_op_mod = htobe16(0);
4016
4017 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4018 &cmde->cq_input_ptr, token) != 0) {
4019 printf("%s: unable to allocate create cq mailboxen\n", DEVNAME(sc));
4020 error = -1;
4021 goto free;
4022 }
4023 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4024 mbin->cmd_cq_ctx.cq_uar_size = htobe32(
4025 (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | sc->sc_uar);
4026 mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
4027 mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
4028 (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
4029 MCX_CQ_MOD_COUNTER);
4030 mbin->cmd_cq_ctx.cq_doorbell = htobe64(
4031 MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4032 MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4033
4034 /* physical addresses follow the mailbox in data */
4035 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem);
4036 mcx_cmdq_post(sc, cmde, 0);
4037
4038 error = mcx_cmdq_poll(sc, cmde, 1000);
4039 if (error != 0) {
4040 printf("%s: create cq timeout\n", DEVNAME(sc));
4041 goto free;
4042 }
4043 if (mcx_cmdq_verify(cmde) != 0) {
4044 printf("%s: create cq command corrupt\n", DEVNAME(sc));
4045 goto free;
4046 }
4047
4048 out = mcx_cmdq_out(cmde);
4049 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4050 printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4051 out->cmd_status, be32toh(out->cmd_syndrome));
4052 error = -1;
4053 goto free;
4054 }
4055
4056 cq->cq_n = mcx_get_id(out->cmd_cqn);
4057 cq->cq_cons = 0;
4058 cq->cq_count = 0;
4059 cq->cq_doorbell = (void *)((uint8_t *)MCX_DMA_KVA(&sc->sc_doorbell_mem) +
4060 MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4061 mcx_arm_cq(sc, cq);
4062 sc->sc_num_cq++;
4063
4064 free:
4065 mcx_dmamem_free(sc, &mxm);
4066 return (error);
4067 }
4068
4069 static int
4070 mcx_destroy_cq(struct mcx_softc *sc, int index)
4071 {
4072 struct mcx_cmdq_entry *cqe;
4073 struct mcx_cmd_destroy_cq_in *in;
4074 struct mcx_cmd_destroy_cq_out *out;
4075 int error;
4076 int token;
4077
4078 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4079 token = mcx_cmdq_token(sc);
4080 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4081
4082 in = mcx_cmdq_in(cqe);
4083 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4084 in->cmd_op_mod = htobe16(0);
4085 in->cmd_cqn = htobe32(sc->sc_cq[index].cq_n);
4086
4087 mcx_cmdq_post(sc, cqe, 0);
4088 error = mcx_cmdq_poll(sc, cqe, 1000);
4089 if (error != 0) {
4090 printf("%s: destroy cq timeout\n", DEVNAME(sc));
4091 return error;
4092 }
4093 if (mcx_cmdq_verify(cqe) != 0) {
4094 printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4095 return error;
4096 }
4097
4098 out = mcx_cmdq_out(cqe);
4099 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4100 printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4101 out->cmd_status, be32toh(out->cmd_syndrome));
4102 return -1;
4103 }
4104
4105 sc->sc_cq[index].cq_n = 0;
4106 mcx_dmamem_free(sc, &sc->sc_cq[index].cq_mem);
4107 sc->sc_cq[index].cq_cons = 0;
4108 sc->sc_cq[index].cq_count = 0;
4109 return 0;
4110 }
4111
4112 static int
4113 mcx_create_rq(struct mcx_softc *sc, int cqn)
4114 {
4115 struct mcx_cmdq_entry *cqe;
4116 struct mcx_dmamem mxm;
4117 struct mcx_cmd_create_rq_in *in;
4118 struct mcx_cmd_create_rq_out *out;
4119 struct mcx_rq_ctx *mbin;
4120 int error;
4121 uint64_t *pas;
4122 uint8_t *doorbell;
4123 int insize, npages, paslen, token;
4124
4125 npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4126 MCX_PAGE_SIZE);
4127 paslen = npages * sizeof(*pas);
4128 insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4129
4130 if (mcx_dmamem_alloc(sc, &sc->sc_rq_mem, npages * MCX_PAGE_SIZE,
4131 MCX_PAGE_SIZE) != 0) {
4132 printf("%s: unable to allocate receive queue memory\n",
4133 DEVNAME(sc));
4134 return (-1);
4135 }
4136
4137 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4138 token = mcx_cmdq_token(sc);
4139 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4140
4141 in = mcx_cmdq_in(cqe);
4142 in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4143 in->cmd_op_mod = htobe16(0);
4144
4145 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4146 &cqe->cq_input_ptr, token) != 0) {
4147 printf("%s: unable to allocate create rq mailboxen\n",
4148 DEVNAME(sc));
4149 error = -1;
4150 goto free;
4151 }
4152 mbin = (struct mcx_rq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4153 mbin->rq_flags = htobe32(MCX_RQ_CTX_RLKEY | MCX_RQ_CTX_VLAN_STRIP_DIS);
4154 mbin->rq_cqn = htobe32(cqn);
4155 mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4156 mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4157 mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4158 MCX_RQ_DOORBELL_OFFSET);
4159 mbin->rq_wq.wq_log_stride = htobe16(4);
4160 mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4161
4162 /* physical addresses follow the mailbox in data */
4163 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &sc->sc_rq_mem);
4164 mcx_cmdq_post(sc, cqe, 0);
4165
4166 error = mcx_cmdq_poll(sc, cqe, 1000);
4167 if (error != 0) {
4168 printf("%s: create rq timeout\n", DEVNAME(sc));
4169 goto free;
4170 }
4171 if (mcx_cmdq_verify(cqe) != 0) {
4172 printf("%s: create rq command corrupt\n", DEVNAME(sc));
4173 goto free;
4174 }
4175
4176 out = mcx_cmdq_out(cqe);
4177 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4178 printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4179 out->cmd_status, be32toh(out->cmd_syndrome));
4180 error = -1;
4181 goto free;
4182 }
4183
4184 sc->sc_rqn = mcx_get_id(out->cmd_rqn);
4185
4186 doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4187 sc->sc_rx_doorbell = (uint32_t *)(doorbell + MCX_RQ_DOORBELL_OFFSET);
4188
4189 free:
4190 mcx_dmamem_free(sc, &mxm);
4191 return (error);
4192 }
4193
4194 static int
4195 mcx_ready_rq(struct mcx_softc *sc)
4196 {
4197 struct mcx_cmdq_entry *cqe;
4198 struct mcx_dmamem mxm;
4199 struct mcx_cmd_modify_rq_in *in;
4200 struct mcx_cmd_modify_rq_mb_in *mbin;
4201 struct mcx_cmd_modify_rq_out *out;
4202 int error;
4203 int token;
4204
4205 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4206 token = mcx_cmdq_token(sc);
4207 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4208
4209 in = mcx_cmdq_in(cqe);
4210 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4211 in->cmd_op_mod = htobe16(0);
4212 in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_rqn);
4213
4214 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4215 printf("%s: unable to allocate modify rq mailbox\n", DEVNAME(sc));
4216 return (-1);
4217 }
4218 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4219 mbin->cmd_rq_ctx.rq_flags = htobe32(
4220 MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4221
4222 mcx_cmdq_mboxes_sign(&mxm, 1);
4223 mcx_cmdq_post(sc, cqe, 0);
4224 error = mcx_cmdq_poll(sc, cqe, 1000);
4225 if (error != 0) {
4226 printf("%s: modify rq timeout\n", DEVNAME(sc));
4227 goto free;
4228 }
4229 if (mcx_cmdq_verify(cqe) != 0) {
4230 printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4231 goto free;
4232 }
4233
4234 out = mcx_cmdq_out(cqe);
4235 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4236 printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4237 out->cmd_status, be32toh(out->cmd_syndrome));
4238 error = -1;
4239 goto free;
4240 }
4241
4242 free:
4243 mcx_dmamem_free(sc, &mxm);
4244 return (error);
4245 }
4246
4247 static int
4248 mcx_destroy_rq(struct mcx_softc *sc)
4249 {
4250 struct mcx_cmdq_entry *cqe;
4251 struct mcx_cmd_destroy_rq_in *in;
4252 struct mcx_cmd_destroy_rq_out *out;
4253 int error;
4254 int token;
4255
4256 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4257 token = mcx_cmdq_token(sc);
4258 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4259
4260 in = mcx_cmdq_in(cqe);
4261 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4262 in->cmd_op_mod = htobe16(0);
4263 in->cmd_rqn = htobe32(sc->sc_rqn);
4264
4265 mcx_cmdq_post(sc, cqe, 0);
4266 error = mcx_cmdq_poll(sc, cqe, 1000);
4267 if (error != 0) {
4268 printf("%s: destroy rq timeout\n", DEVNAME(sc));
4269 return error;
4270 }
4271 if (mcx_cmdq_verify(cqe) != 0) {
4272 printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4273 return error;
4274 }
4275
4276 out = mcx_cmdq_out(cqe);
4277 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4278 printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4279 out->cmd_status, be32toh(out->cmd_syndrome));
4280 return -1;
4281 }
4282
4283 sc->sc_rqn = 0;
4284 return 0;
4285 }
4286
4287 static int
4288 mcx_create_tir(struct mcx_softc *sc)
4289 {
4290 struct mcx_cmdq_entry *cqe;
4291 struct mcx_dmamem mxm;
4292 struct mcx_cmd_create_tir_in *in;
4293 struct mcx_cmd_create_tir_mb_in *mbin;
4294 struct mcx_cmd_create_tir_out *out;
4295 int error;
4296 int token;
4297
4298 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4299 token = mcx_cmdq_token(sc);
4300 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4301
4302 in = mcx_cmdq_in(cqe);
4303 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4304 in->cmd_op_mod = htobe16(0);
4305
4306 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4307 printf("%s: unable to allocate create tir mailbox\n",
4308 DEVNAME(sc));
4309 return (-1);
4310 }
4311 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4312 /* leave disp_type = 0, so packets get sent to the inline rqn */
4313 mbin->cmd_inline_rqn = htobe32(sc->sc_rqn);
4314 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4315
4316 mcx_cmdq_post(sc, cqe, 0);
4317 error = mcx_cmdq_poll(sc, cqe, 1000);
4318 if (error != 0) {
4319 printf("%s: create tir timeout\n", DEVNAME(sc));
4320 goto free;
4321 }
4322 if (mcx_cmdq_verify(cqe) != 0) {
4323 printf("%s: create tir command corrupt\n", DEVNAME(sc));
4324 goto free;
4325 }
4326
4327 out = mcx_cmdq_out(cqe);
4328 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4329 printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
4330 out->cmd_status, be32toh(out->cmd_syndrome));
4331 error = -1;
4332 goto free;
4333 }
4334
4335 sc->sc_tirn = mcx_get_id(out->cmd_tirn);
4336 free:
4337 mcx_dmamem_free(sc, &mxm);
4338 return (error);
4339 }
4340
4341 static int
4342 mcx_destroy_tir(struct mcx_softc *sc)
4343 {
4344 struct mcx_cmdq_entry *cqe;
4345 struct mcx_cmd_destroy_tir_in *in;
4346 struct mcx_cmd_destroy_tir_out *out;
4347 int error;
4348 int token;
4349
4350 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4351 token = mcx_cmdq_token(sc);
4352 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4353
4354 in = mcx_cmdq_in(cqe);
4355 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
4356 in->cmd_op_mod = htobe16(0);
4357 in->cmd_tirn = htobe32(sc->sc_tirn);
4358
4359 mcx_cmdq_post(sc, cqe, 0);
4360 error = mcx_cmdq_poll(sc, cqe, 1000);
4361 if (error != 0) {
4362 printf("%s: destroy tir timeout\n", DEVNAME(sc));
4363 return error;
4364 }
4365 if (mcx_cmdq_verify(cqe) != 0) {
4366 printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
4367 return error;
4368 }
4369
4370 out = mcx_cmdq_out(cqe);
4371 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4372 printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
4373 out->cmd_status, be32toh(out->cmd_syndrome));
4374 return -1;
4375 }
4376
4377 sc->sc_tirn = 0;
4378 return 0;
4379 }
4380
4381 static int
4382 mcx_create_sq(struct mcx_softc *sc, int cqn)
4383 {
4384 struct mcx_cmdq_entry *cqe;
4385 struct mcx_dmamem mxm;
4386 struct mcx_cmd_create_sq_in *in;
4387 struct mcx_sq_ctx *mbin;
4388 struct mcx_cmd_create_sq_out *out;
4389 int error;
4390 uint64_t *pas;
4391 uint8_t *doorbell;
4392 int insize, npages, paslen, token;
4393
4394 npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
4395 MCX_PAGE_SIZE);
4396 paslen = npages * sizeof(*pas);
4397 insize = sizeof(struct mcx_sq_ctx) + paslen;
4398
4399 if (mcx_dmamem_alloc(sc, &sc->sc_sq_mem, npages * MCX_PAGE_SIZE,
4400 MCX_PAGE_SIZE) != 0) {
4401 printf("%s: unable to allocate send queue memory\n", DEVNAME(sc));
4402 return (-1);
4403 }
4404
4405 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4406 token = mcx_cmdq_token(sc);
4407 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
4408 token);
4409
4410 in = mcx_cmdq_in(cqe);
4411 in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
4412 in->cmd_op_mod = htobe16(0);
4413
4414 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4415 &cqe->cq_input_ptr, token) != 0) {
4416 printf("%s: unable to allocate create sq mailboxen\n", DEVNAME(sc));
4417 error = -1;
4418 goto free;
4419 }
4420 mbin = (struct mcx_sq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4421 mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
4422 (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
4423 mbin->sq_cqn = htobe32(cqn);
4424 mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
4425 mbin->sq_tis_num = htobe32(sc->sc_tisn);
4426 mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4427 mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
4428 mbin->sq_wq.wq_uar_page = htobe32(sc->sc_uar);
4429 mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4430 MCX_SQ_DOORBELL_OFFSET);
4431 mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
4432 mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
4433
4434 /* physical addresses follow the mailbox in data */
4435 mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &sc->sc_sq_mem);
4436 mcx_cmdq_post(sc, cqe, 0);
4437
4438 error = mcx_cmdq_poll(sc, cqe, 1000);
4439 if (error != 0) {
4440 printf("%s: create sq timeout\n", DEVNAME(sc));
4441 goto free;
4442 }
4443 if (mcx_cmdq_verify(cqe) != 0) {
4444 printf("%s: create sq command corrupt\n", DEVNAME(sc));
4445 goto free;
4446 }
4447
4448 out = mcx_cmdq_out(cqe);
4449 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4450 printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
4451 out->cmd_status, be32toh(out->cmd_syndrome));
4452 error = -1;
4453 goto free;
4454 }
4455
4456 sc->sc_sqn = mcx_get_id(out->cmd_sqn);
4457
4458 doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4459 sc->sc_tx_doorbell = (uint32_t *)(doorbell + MCX_SQ_DOORBELL_OFFSET + 4);
4460 free:
4461 mcx_dmamem_free(sc, &mxm);
4462 return (error);
4463 }
4464
4465 static int
4466 mcx_destroy_sq(struct mcx_softc *sc)
4467 {
4468 struct mcx_cmdq_entry *cqe;
4469 struct mcx_cmd_destroy_sq_in *in;
4470 struct mcx_cmd_destroy_sq_out *out;
4471 int error;
4472 int token;
4473
4474 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4475 token = mcx_cmdq_token(sc);
4476 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4477
4478 in = mcx_cmdq_in(cqe);
4479 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
4480 in->cmd_op_mod = htobe16(0);
4481 in->cmd_sqn = htobe32(sc->sc_sqn);
4482
4483 mcx_cmdq_post(sc, cqe, 0);
4484 error = mcx_cmdq_poll(sc, cqe, 1000);
4485 if (error != 0) {
4486 printf("%s: destroy sq timeout\n", DEVNAME(sc));
4487 return error;
4488 }
4489 if (mcx_cmdq_verify(cqe) != 0) {
4490 printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
4491 return error;
4492 }
4493
4494 out = mcx_cmdq_out(cqe);
4495 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4496 printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
4497 out->cmd_status, be32toh(out->cmd_syndrome));
4498 return -1;
4499 }
4500
4501 sc->sc_sqn = 0;
4502 return 0;
4503 }
4504
4505 static int
4506 mcx_ready_sq(struct mcx_softc *sc)
4507 {
4508 struct mcx_cmdq_entry *cqe;
4509 struct mcx_dmamem mxm;
4510 struct mcx_cmd_modify_sq_in *in;
4511 struct mcx_cmd_modify_sq_mb_in *mbin;
4512 struct mcx_cmd_modify_sq_out *out;
4513 int error;
4514 int token;
4515
4516 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4517 token = mcx_cmdq_token(sc);
4518 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4519
4520 in = mcx_cmdq_in(cqe);
4521 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
4522 in->cmd_op_mod = htobe16(0);
4523 in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_sqn);
4524
4525 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4526 printf("%s: unable to allocate modify sq mailbox\n",
4527 DEVNAME(sc));
4528 return (-1);
4529 }
4530 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4531 mbin->cmd_sq_ctx.sq_flags = htobe32(
4532 MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
4533
4534 mcx_cmdq_mboxes_sign(&mxm, 1);
4535 mcx_cmdq_post(sc, cqe, 0);
4536 error = mcx_cmdq_poll(sc, cqe, 1000);
4537 if (error != 0) {
4538 printf("%s: modify sq timeout\n", DEVNAME(sc));
4539 goto free;
4540 }
4541 if (mcx_cmdq_verify(cqe) != 0) {
4542 printf("%s: modify sq command corrupt\n", DEVNAME(sc));
4543 goto free;
4544 }
4545
4546 out = mcx_cmdq_out(cqe);
4547 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4548 printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
4549 out->cmd_status, be32toh(out->cmd_syndrome));
4550 error = -1;
4551 goto free;
4552 }
4553
4554 free:
4555 mcx_dmamem_free(sc, &mxm);
4556 return (error);
4557 }
4558
4559 static int
4560 mcx_create_tis(struct mcx_softc *sc)
4561 {
4562 struct mcx_cmdq_entry *cqe;
4563 struct mcx_dmamem mxm;
4564 struct mcx_cmd_create_tis_in *in;
4565 struct mcx_cmd_create_tis_mb_in *mbin;
4566 struct mcx_cmd_create_tis_out *out;
4567 int error;
4568 int token;
4569
4570 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4571 token = mcx_cmdq_token(sc);
4572 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4573
4574 in = mcx_cmdq_in(cqe);
4575 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
4576 in->cmd_op_mod = htobe16(0);
4577
4578 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4579 printf("%s: unable to allocate create tis mailbox\n", DEVNAME(sc));
4580 return (-1);
4581 }
4582 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4583 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4584
4585 mcx_cmdq_mboxes_sign(&mxm, 1);
4586 mcx_cmdq_post(sc, cqe, 0);
4587 error = mcx_cmdq_poll(sc, cqe, 1000);
4588 if (error != 0) {
4589 printf("%s: create tis timeout\n", DEVNAME(sc));
4590 goto free;
4591 }
4592 if (mcx_cmdq_verify(cqe) != 0) {
4593 printf("%s: create tis command corrupt\n", DEVNAME(sc));
4594 goto free;
4595 }
4596
4597 out = mcx_cmdq_out(cqe);
4598 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4599 printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
4600 out->cmd_status, be32toh(out->cmd_syndrome));
4601 error = -1;
4602 goto free;
4603 }
4604
4605 sc->sc_tisn = mcx_get_id(out->cmd_tisn);
4606 free:
4607 mcx_dmamem_free(sc, &mxm);
4608 return (error);
4609 }
4610
4611 static int
4612 mcx_destroy_tis(struct mcx_softc *sc)
4613 {
4614 struct mcx_cmdq_entry *cqe;
4615 struct mcx_cmd_destroy_tis_in *in;
4616 struct mcx_cmd_destroy_tis_out *out;
4617 int error;
4618 int token;
4619
4620 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4621 token = mcx_cmdq_token(sc);
4622 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4623
4624 in = mcx_cmdq_in(cqe);
4625 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
4626 in->cmd_op_mod = htobe16(0);
4627 in->cmd_tisn = htobe32(sc->sc_tisn);
4628
4629 mcx_cmdq_post(sc, cqe, 0);
4630 error = mcx_cmdq_poll(sc, cqe, 1000);
4631 if (error != 0) {
4632 printf("%s: destroy tis timeout\n", DEVNAME(sc));
4633 return error;
4634 }
4635 if (mcx_cmdq_verify(cqe) != 0) {
4636 printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
4637 return error;
4638 }
4639
4640 out = mcx_cmdq_out(cqe);
4641 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4642 printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
4643 out->cmd_status, be32toh(out->cmd_syndrome));
4644 return -1;
4645 }
4646
4647 sc->sc_tirn = 0;
4648 return 0;
4649 }
4650
4651 #if 0
4652 static int
4653 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
4654 {
4655 struct mcx_cmdq_entry *cqe;
4656 struct mcx_cmd_alloc_flow_counter_in *in;
4657 struct mcx_cmd_alloc_flow_counter_out *out;
4658 int error;
4659
4660 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4661 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4662
4663 in = mcx_cmdq_in(cqe);
4664 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
4665 in->cmd_op_mod = htobe16(0);
4666
4667 mcx_cmdq_post(sc, cqe, 0);
4668
4669 error = mcx_cmdq_poll(sc, cqe, 1000);
4670 if (error != 0) {
4671 printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
4672 return (-1);
4673 }
4674 if (mcx_cmdq_verify(cqe) != 0) {
4675 printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
4676 return (-1);
4677 }
4678
4679 out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
4680 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4681 printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
4682 out->cmd_status);
4683 return (-1);
4684 }
4685
4686 sc->sc_flow_counter_id[i] = be16toh(out->cmd_flow_counter_id);
4687 printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
4688
4689 return (0);
4690 }
4691 #endif
4692
4693 static int
4694 mcx_create_flow_table(struct mcx_softc *sc, int log_size)
4695 {
4696 struct mcx_cmdq_entry *cqe;
4697 struct mcx_dmamem mxm;
4698 struct mcx_cmd_create_flow_table_in *in;
4699 struct mcx_cmd_create_flow_table_mb_in *mbin;
4700 struct mcx_cmd_create_flow_table_out *out;
4701 int error;
4702 int token;
4703
4704 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4705 token = mcx_cmdq_token(sc);
4706 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4707
4708 in = mcx_cmdq_in(cqe);
4709 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
4710 in->cmd_op_mod = htobe16(0);
4711
4712 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4713 printf("%s: unable to allocate create flow table mailbox\n",
4714 DEVNAME(sc));
4715 return (-1);
4716 }
4717 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4718 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4719 mbin->cmd_ctx.ft_log_size = log_size;
4720
4721 mcx_cmdq_mboxes_sign(&mxm, 1);
4722 mcx_cmdq_post(sc, cqe, 0);
4723 error = mcx_cmdq_poll(sc, cqe, 1000);
4724 if (error != 0) {
4725 printf("%s: create flow table timeout\n", DEVNAME(sc));
4726 goto free;
4727 }
4728 if (mcx_cmdq_verify(cqe) != 0) {
4729 printf("%s: create flow table command corrupt\n", DEVNAME(sc));
4730 goto free;
4731 }
4732
4733 out = mcx_cmdq_out(cqe);
4734 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4735 printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
4736 out->cmd_status, be32toh(out->cmd_syndrome));
4737 error = -1;
4738 goto free;
4739 }
4740
4741 sc->sc_flow_table_id = mcx_get_id(out->cmd_table_id);
4742 free:
4743 mcx_dmamem_free(sc, &mxm);
4744 return (error);
4745 }
4746
4747 static int
4748 mcx_set_flow_table_root(struct mcx_softc *sc)
4749 {
4750 struct mcx_cmdq_entry *cqe;
4751 struct mcx_dmamem mxm;
4752 struct mcx_cmd_set_flow_table_root_in *in;
4753 struct mcx_cmd_set_flow_table_root_mb_in *mbin;
4754 struct mcx_cmd_set_flow_table_root_out *out;
4755 int error;
4756 int token;
4757
4758 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4759 token = mcx_cmdq_token(sc);
4760 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4761
4762 in = mcx_cmdq_in(cqe);
4763 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
4764 in->cmd_op_mod = htobe16(0);
4765
4766 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4767 printf("%s: unable to allocate set flow table root mailbox\n",
4768 DEVNAME(sc));
4769 return (-1);
4770 }
4771 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4772 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4773 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4774
4775 mcx_cmdq_mboxes_sign(&mxm, 1);
4776 mcx_cmdq_post(sc, cqe, 0);
4777 error = mcx_cmdq_poll(sc, cqe, 1000);
4778 if (error != 0) {
4779 printf("%s: set flow table root timeout\n", DEVNAME(sc));
4780 goto free;
4781 }
4782 if (mcx_cmdq_verify(cqe) != 0) {
4783 printf("%s: set flow table root command corrupt\n",
4784 DEVNAME(sc));
4785 goto free;
4786 }
4787
4788 out = mcx_cmdq_out(cqe);
4789 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4790 printf("%s: set flow table root failed (%x, %x)\n",
4791 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
4792 error = -1;
4793 goto free;
4794 }
4795
4796 free:
4797 mcx_dmamem_free(sc, &mxm);
4798 return (error);
4799 }
4800
4801 static int
4802 mcx_destroy_flow_table(struct mcx_softc *sc)
4803 {
4804 struct mcx_cmdq_entry *cqe;
4805 struct mcx_dmamem mxm;
4806 struct mcx_cmd_destroy_flow_table_in *in;
4807 struct mcx_cmd_destroy_flow_table_mb_in *mb;
4808 struct mcx_cmd_destroy_flow_table_out *out;
4809 int error;
4810 int token;
4811
4812 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4813 token = mcx_cmdq_token(sc);
4814 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4815
4816 in = mcx_cmdq_in(cqe);
4817 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
4818 in->cmd_op_mod = htobe16(0);
4819
4820 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4821 printf("%s: unable to allocate destroy flow table mailbox\n",
4822 DEVNAME(sc));
4823 return (-1);
4824 }
4825 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4826 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4827 mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4828
4829 mcx_cmdq_mboxes_sign(&mxm, 1);
4830 mcx_cmdq_post(sc, cqe, 0);
4831 error = mcx_cmdq_poll(sc, cqe, 1000);
4832 if (error != 0) {
4833 printf("%s: destroy flow table timeout\n", DEVNAME(sc));
4834 goto free;
4835 }
4836 if (mcx_cmdq_verify(cqe) != 0) {
4837 printf("%s: destroy flow table command corrupt\n", DEVNAME(sc));
4838 goto free;
4839 }
4840
4841 out = mcx_cmdq_out(cqe);
4842 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4843 printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
4844 out->cmd_status, be32toh(out->cmd_syndrome));
4845 error = -1;
4846 goto free;
4847 }
4848
4849 sc->sc_flow_table_id = -1;
4850 free:
4851 mcx_dmamem_free(sc, &mxm);
4852 return (error);
4853 }
4854
4855
4856 static int
4857 mcx_create_flow_group(struct mcx_softc *sc, int group, int start, int size,
4858 int match_enable, struct mcx_flow_match *match)
4859 {
4860 struct mcx_cmdq_entry *cqe;
4861 struct mcx_dmamem mxm;
4862 struct mcx_cmd_create_flow_group_in *in;
4863 struct mcx_cmd_create_flow_group_mb_in *mbin;
4864 struct mcx_cmd_create_flow_group_out *out;
4865 int error;
4866 int token;
4867
4868 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4869 token = mcx_cmdq_token(sc);
4870 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
4871 token);
4872
4873 in = mcx_cmdq_in(cqe);
4874 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
4875 in->cmd_op_mod = htobe16(0);
4876
4877 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4878 != 0) {
4879 printf("%s: unable to allocate create flow group mailbox\n",
4880 DEVNAME(sc));
4881 return (-1);
4882 }
4883 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4884 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4885 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4886 mbin->cmd_start_flow_index = htobe32(start);
4887 mbin->cmd_end_flow_index = htobe32(start + (size - 1));
4888
4889 mbin->cmd_match_criteria_enable = match_enable;
4890 memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
4891
4892 mcx_cmdq_mboxes_sign(&mxm, 2);
4893 mcx_cmdq_post(sc, cqe, 0);
4894 error = mcx_cmdq_poll(sc, cqe, 1000);
4895 if (error != 0) {
4896 printf("%s: create flow group timeout\n", DEVNAME(sc));
4897 goto free;
4898 }
4899 if (mcx_cmdq_verify(cqe) != 0) {
4900 printf("%s: create flow group command corrupt\n", DEVNAME(sc));
4901 goto free;
4902 }
4903
4904 out = mcx_cmdq_out(cqe);
4905 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4906 printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
4907 out->cmd_status, be32toh(out->cmd_syndrome));
4908 error = -1;
4909 goto free;
4910 }
4911
4912 sc->sc_flow_group_id[group] = mcx_get_id(out->cmd_group_id);
4913 sc->sc_flow_group_size[group] = size;
4914 sc->sc_flow_group_start[group] = start;
4915
4916 free:
4917 mcx_dmamem_free(sc, &mxm);
4918 return (error);
4919 }
4920
4921 static int
4922 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
4923 {
4924 struct mcx_cmdq_entry *cqe;
4925 struct mcx_dmamem mxm;
4926 struct mcx_cmd_destroy_flow_group_in *in;
4927 struct mcx_cmd_destroy_flow_group_mb_in *mb;
4928 struct mcx_cmd_destroy_flow_group_out *out;
4929 int error;
4930 int token;
4931
4932 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4933 token = mcx_cmdq_token(sc);
4934 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4935
4936 in = mcx_cmdq_in(cqe);
4937 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
4938 in->cmd_op_mod = htobe16(0);
4939
4940 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
4941 printf("%s: unable to allocate destroy flow group mailbox\n",
4942 DEVNAME(sc));
4943 return (-1);
4944 }
4945 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4946 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4947 mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4948 mb->cmd_group_id = htobe32(sc->sc_flow_group_id[group]);
4949
4950 mcx_cmdq_mboxes_sign(&mxm, 2);
4951 mcx_cmdq_post(sc, cqe, 0);
4952 error = mcx_cmdq_poll(sc, cqe, 1000);
4953 if (error != 0) {
4954 printf("%s: destroy flow group timeout\n", DEVNAME(sc));
4955 goto free;
4956 }
4957 if (mcx_cmdq_verify(cqe) != 0) {
4958 printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
4959 goto free;
4960 }
4961
4962 out = mcx_cmdq_out(cqe);
4963 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4964 printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
4965 out->cmd_status, be32toh(out->cmd_syndrome));
4966 error = -1;
4967 goto free;
4968 }
4969
4970 sc->sc_flow_group_id[group] = -1;
4971 sc->sc_flow_group_size[group] = 0;
4972 free:
4973 mcx_dmamem_free(sc, &mxm);
4974 return (error);
4975 }
4976
4977 static int
4978 mcx_set_flow_table_entry(struct mcx_softc *sc, int group, int index,
4979 const uint8_t *macaddr)
4980 {
4981 struct mcx_cmdq_entry *cqe;
4982 struct mcx_dmamem mxm;
4983 struct mcx_cmd_set_flow_table_entry_in *in;
4984 struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
4985 struct mcx_cmd_set_flow_table_entry_out *out;
4986 uint32_t *dest;
4987 int error;
4988 int token;
4989
4990 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4991 token = mcx_cmdq_token(sc);
4992 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*dest),
4993 sizeof(*out), token);
4994
4995 in = mcx_cmdq_in(cqe);
4996 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
4997 in->cmd_op_mod = htobe16(0);
4998
4999 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5000 != 0) {
5001 printf("%s: unable to allocate set flow table entry mailbox\n",
5002 DEVNAME(sc));
5003 return (-1);
5004 }
5005 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5006 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5007 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5008 mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
5009 mbin->cmd_flow_ctx.fc_group_id = htobe32(sc->sc_flow_group_id[group]);
5010
5011 /* flow context ends at offset 0x330, 0x130 into the second mbox */
5012 dest = (uint32_t *)
5013 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
5014 mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5015 mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5016 *dest = htobe32(sc->sc_tirn | MCX_FLOW_CONTEXT_DEST_TYPE_TIR);
5017
5018 /* the only thing we match on at the moment is the dest mac address */
5019 if (macaddr != NULL) {
5020 memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
5021 ETHER_ADDR_LEN);
5022 }
5023
5024 mcx_cmdq_mboxes_sign(&mxm, 2);
5025 mcx_cmdq_post(sc, cqe, 0);
5026 error = mcx_cmdq_poll(sc, cqe, 1000);
5027 if (error != 0) {
5028 printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5029 goto free;
5030 }
5031 if (mcx_cmdq_verify(cqe) != 0) {
5032 printf("%s: set flow table entry command corrupt\n",
5033 DEVNAME(sc));
5034 goto free;
5035 }
5036
5037 out = mcx_cmdq_out(cqe);
5038 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5039 printf("%s: set flow table entry failed (%x, %x)\n",
5040 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5041 error = -1;
5042 goto free;
5043 }
5044
5045 free:
5046 mcx_dmamem_free(sc, &mxm);
5047 return (error);
5048 }
5049
5050 static int
5051 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
5052 {
5053 struct mcx_cmdq_entry *cqe;
5054 struct mcx_dmamem mxm;
5055 struct mcx_cmd_delete_flow_table_entry_in *in;
5056 struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
5057 struct mcx_cmd_delete_flow_table_entry_out *out;
5058 int error;
5059 int token;
5060
5061 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5062 token = mcx_cmdq_token(sc);
5063 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5064 token);
5065
5066 in = mcx_cmdq_in(cqe);
5067 in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
5068 in->cmd_op_mod = htobe16(0);
5069
5070 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
5071 printf("%s: unable to allocate delete flow table entry mailbox\n",
5072 DEVNAME(sc));
5073 return (-1);
5074 }
5075 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5076 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5077 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5078 mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
5079
5080 mcx_cmdq_mboxes_sign(&mxm, 2);
5081 mcx_cmdq_post(sc, cqe, 0);
5082 error = mcx_cmdq_poll(sc, cqe, 1000);
5083 if (error != 0) {
5084 printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
5085 goto free;
5086 }
5087 if (mcx_cmdq_verify(cqe) != 0) {
5088 printf("%s: delete flow table entry command corrupt\n",
5089 DEVNAME(sc));
5090 goto free;
5091 }
5092
5093 out = mcx_cmdq_out(cqe);
5094 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5095 printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
5096 DEVNAME(sc), group, index, out->cmd_status,
5097 be32toh(out->cmd_syndrome));
5098 error = -1;
5099 goto free;
5100 }
5101
5102 free:
5103 mcx_dmamem_free(sc, &mxm);
5104 return (error);
5105 }
5106
5107 #if 0
5108 int
5109 mcx_dump_flow_table(struct mcx_softc *sc)
5110 {
5111 struct mcx_dmamem mxm;
5112 struct mcx_cmdq_entry *cqe;
5113 struct mcx_cmd_query_flow_table_in *in;
5114 struct mcx_cmd_query_flow_table_mb_in *mbin;
5115 struct mcx_cmd_query_flow_table_out *out;
5116 struct mcx_cmd_query_flow_table_mb_out *mbout;
5117 uint8_t token = mcx_cmdq_token(sc);
5118 int error;
5119 int i;
5120 uint8_t *dump;
5121
5122 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5123 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5124 sizeof(*out) + sizeof(*mbout) + 16, token);
5125
5126 in = mcx_cmdq_in(cqe);
5127 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
5128 in->cmd_op_mod = htobe16(0);
5129
5130 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5131 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
5132 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5133 &cqe->cq_output_ptr, token) != 0) {
5134 printf(", unable to allocate query flow table mailboxes\n");
5135 return (-1);
5136 }
5137 cqe->cq_input_ptr = cqe->cq_output_ptr;
5138
5139 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5140 mbin->cmd_table_type = 0;
5141 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5142
5143 mcx_cmdq_mboxes_sign(&mxm, 1);
5144
5145 mcx_cmdq_post(sc, cqe, 0);
5146 error = mcx_cmdq_poll(sc, cqe, 1000);
5147 if (error != 0) {
5148 printf("%s: query flow table timeout\n", DEVNAME(sc));
5149 goto free;
5150 }
5151 error = mcx_cmdq_verify(cqe);
5152 if (error != 0) {
5153 printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
5154 goto free;
5155 }
5156
5157 out = mcx_cmdq_out(cqe);
5158 switch (out->cmd_status) {
5159 case MCX_CQ_STATUS_OK:
5160 break;
5161 default:
5162 printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
5163 out->cmd_status, be32toh(out->cmd_syndrome));
5164 error = -1;
5165 goto free;
5166 }
5167
5168 mbout = (struct mcx_cmd_query_flow_table_mb_out *)
5169 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5170 dump = (uint8_t *)mbout + 8;
5171 for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
5172 printf("%.2x ", dump[i]);
5173 if (i % 16 == 15)
5174 printf("\n");
5175 }
5176 free:
5177 mcx_cq_mboxes_free(sc, &mxm);
5178 return (error);
5179 }
5180 int
5181 mcx_dump_flow_table_entry(struct mcx_softc *sc, int index)
5182 {
5183 struct mcx_dmamem mxm;
5184 struct mcx_cmdq_entry *cqe;
5185 struct mcx_cmd_query_flow_table_entry_in *in;
5186 struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
5187 struct mcx_cmd_query_flow_table_entry_out *out;
5188 struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
5189 uint8_t token = mcx_cmdq_token(sc);
5190 int error;
5191 int i;
5192 uint8_t *dump;
5193
5194 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5195 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5196 sizeof(*out) + sizeof(*mbout) + 16, token);
5197
5198 in = mcx_cmdq_in(cqe);
5199 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
5200 in->cmd_op_mod = htobe16(0);
5201
5202 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5203 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5204 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5205 &cqe->cq_output_ptr, token) != 0) {
5206 printf(", unable to allocate query flow table entry mailboxes\n");
5207 return (-1);
5208 }
5209 cqe->cq_input_ptr = cqe->cq_output_ptr;
5210
5211 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5212 mbin->cmd_table_type = 0;
5213 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5214 mbin->cmd_flow_index = htobe32(index);
5215
5216 mcx_cmdq_mboxes_sign(&mxm, 1);
5217
5218 mcx_cmdq_post(sc, cqe, 0);
5219 error = mcx_cmdq_poll(sc, cqe, 1000);
5220 if (error != 0) {
5221 printf("%s: query flow table entry timeout\n", DEVNAME(sc));
5222 goto free;
5223 }
5224 error = mcx_cmdq_verify(cqe);
5225 if (error != 0) {
5226 printf("%s: query flow table entry reply corrupt\n",
5227 DEVNAME(sc));
5228 goto free;
5229 }
5230
5231 out = mcx_cmdq_out(cqe);
5232 switch (out->cmd_status) {
5233 case MCX_CQ_STATUS_OK:
5234 break;
5235 default:
5236 printf("%s: query flow table entry failed (%x/%x)\n",
5237 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5238 error = -1;
5239 goto free;
5240 }
5241
5242 mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
5243 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5244 dump = (uint8_t *)mbout;
5245 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5246 printf("%.2x ", dump[i]);
5247 if (i % 16 == 15)
5248 printf("\n");
5249 }
5250
5251 free:
5252 mcx_cq_mboxes_free(sc, &mxm);
5253 return (error);
5254 }
5255
5256 int
5257 mcx_dump_flow_group(struct mcx_softc *sc)
5258 {
5259 struct mcx_dmamem mxm;
5260 struct mcx_cmdq_entry *cqe;
5261 struct mcx_cmd_query_flow_group_in *in;
5262 struct mcx_cmd_query_flow_group_mb_in *mbin;
5263 struct mcx_cmd_query_flow_group_out *out;
5264 struct mcx_cmd_query_flow_group_mb_out *mbout;
5265 uint8_t token = mcx_cmdq_token(sc);
5266 int error;
5267 int i;
5268 uint8_t *dump;
5269
5270 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5271 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5272 sizeof(*out) + sizeof(*mbout) + 16, token);
5273
5274 in = mcx_cmdq_in(cqe);
5275 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
5276 in->cmd_op_mod = htobe16(0);
5277
5278 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5279 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5280 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5281 &cqe->cq_output_ptr, token) != 0) {
5282 printf(", unable to allocate query flow group mailboxes\n");
5283 return (-1);
5284 }
5285 cqe->cq_input_ptr = cqe->cq_output_ptr;
5286
5287 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5288 mbin->cmd_table_type = 0;
5289 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5290 mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
5291
5292 mcx_cmdq_mboxes_sign(&mxm, 1);
5293
5294 mcx_cmdq_post(sc, cqe, 0);
5295 error = mcx_cmdq_poll(sc, cqe, 1000);
5296 if (error != 0) {
5297 printf("%s: query flow group timeout\n", DEVNAME(sc));
5298 goto free;
5299 }
5300 error = mcx_cmdq_verify(cqe);
5301 if (error != 0) {
5302 printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
5303 goto free;
5304 }
5305
5306 out = mcx_cmdq_out(cqe);
5307 switch (out->cmd_status) {
5308 case MCX_CQ_STATUS_OK:
5309 break;
5310 default:
5311 printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
5312 out->cmd_status, be32toh(out->cmd_syndrome));
5313 error = -1;
5314 goto free;
5315 }
5316
5317 mbout = (struct mcx_cmd_query_flow_group_mb_out *)
5318 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5319 dump = (uint8_t *)mbout;
5320 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5321 printf("%.2x ", dump[i]);
5322 if (i % 16 == 15)
5323 printf("\n");
5324 }
5325 dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
5326 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5327 printf("%.2x ", dump[i]);
5328 if (i % 16 == 15)
5329 printf("\n");
5330 }
5331
5332 free:
5333 mcx_cq_mboxes_free(sc, &mxm);
5334 return (error);
5335 }
5336
5337 int
5338 mcx_dump_rq(struct mcx_softc *sc)
5339 {
5340 struct mcx_dmamem mxm;
5341 struct mcx_cmdq_entry *cqe;
5342 struct mcx_cmd_query_rq_in *in;
5343 struct mcx_cmd_query_rq_out *out;
5344 struct mcx_cmd_query_rq_mb_out *mbout;
5345 uint8_t token = mcx_cmdq_token(sc);
5346 int error;
5347
5348 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5349 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5350 token);
5351
5352 in = mcx_cmdq_in(cqe);
5353 in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
5354 in->cmd_op_mod = htobe16(0);
5355 in->cmd_rqn = htobe32(sc->sc_rqn);
5356
5357 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5358 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5359 &cqe->cq_output_ptr, token) != 0) {
5360 printf(", unable to allocate query flow group mailboxes\n");
5361 return (-1);
5362 }
5363
5364 mcx_cmdq_mboxes_sign(&mxm, 1);
5365
5366 mcx_cmdq_post(sc, cqe, 0);
5367 error = mcx_cmdq_poll(sc, cqe, 1000);
5368 if (error != 0) {
5369 printf("%s: query rq timeout\n", DEVNAME(sc));
5370 goto free;
5371 }
5372 error = mcx_cmdq_verify(cqe);
5373 if (error != 0) {
5374 printf("%s: query rq reply corrupt\n", DEVNAME(sc));
5375 goto free;
5376 }
5377
5378 out = mcx_cmdq_out(cqe);
5379 switch (out->cmd_status) {
5380 case MCX_CQ_STATUS_OK:
5381 break;
5382 default:
5383 printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
5384 out->cmd_status, be32toh(out->cmd_syndrome));
5385 error = -1;
5386 goto free;
5387 }
5388
5389 mbout = (struct mcx_cmd_query_rq_mb_out *)
5390 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5391 printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5392 DEVNAME(sc),
5393 (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5394 be32toh(mbout->cmd_ctx.rq_user_index),
5395 be32toh(mbout->cmd_ctx.rq_cqn),
5396 be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5397 mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5398 mbout->cmd_ctx.rq_wq.wq_log_size,
5399 be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5400 be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5401
5402 free:
5403 mcx_cq_mboxes_free(sc, &mxm);
5404 return (error);
5405 }
5406
5407 int
5408 mcx_dump_sq(struct mcx_softc *sc)
5409 {
5410 struct mcx_dmamem mxm;
5411 struct mcx_cmdq_entry *cqe;
5412 struct mcx_cmd_query_sq_in *in;
5413 struct mcx_cmd_query_sq_out *out;
5414 struct mcx_cmd_query_sq_mb_out *mbout;
5415 uint8_t token = mcx_cmdq_token(sc);
5416 int error;
5417 int i;
5418 uint8_t *dump;
5419
5420 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5421 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5422 token);
5423
5424 in = mcx_cmdq_in(cqe);
5425 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
5426 in->cmd_op_mod = htobe16(0);
5427 in->cmd_sqn = htobe32(sc->sc_sqn);
5428
5429 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5430 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5431 &cqe->cq_output_ptr, token) != 0) {
5432 printf(", unable to allocate query sq mailboxes\n");
5433 return (-1);
5434 }
5435
5436 mcx_cmdq_mboxes_sign(&mxm, 1);
5437
5438 mcx_cmdq_post(sc, cqe, 0);
5439 error = mcx_cmdq_poll(sc, cqe, 1000);
5440 if (error != 0) {
5441 printf("%s: query sq timeout\n", DEVNAME(sc));
5442 goto free;
5443 }
5444 error = mcx_cmdq_verify(cqe);
5445 if (error != 0) {
5446 printf("%s: query sq reply corrupt\n", DEVNAME(sc));
5447 goto free;
5448 }
5449
5450 out = mcx_cmdq_out(cqe);
5451 switch (out->cmd_status) {
5452 case MCX_CQ_STATUS_OK:
5453 break;
5454 default:
5455 printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
5456 out->cmd_status, be32toh(out->cmd_syndrome));
5457 error = -1;
5458 goto free;
5459 }
5460
5461 mbout = (struct mcx_cmd_query_sq_mb_out *)
5462 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5463 /*
5464 printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5465 DEVNAME(sc),
5466 (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5467 be32toh(mbout->cmd_ctx.rq_user_index),
5468 be32toh(mbout->cmd_ctx.rq_cqn),
5469 be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5470 mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5471 mbout->cmd_ctx.rq_wq.wq_log_size,
5472 be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5473 be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5474 */
5475 dump = (uint8_t *)mbout;
5476 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5477 printf("%.2x ", dump[i]);
5478 if (i % 16 == 15)
5479 printf("\n");
5480 }
5481
5482 free:
5483 mcx_cq_mboxes_free(sc, &mxm);
5484 return (error);
5485 }
5486
5487 static int
5488 mcx_dump_counters(struct mcx_softc *sc)
5489 {
5490 struct mcx_dmamem mxm;
5491 struct mcx_cmdq_entry *cqe;
5492 struct mcx_cmd_query_vport_counters_in *in;
5493 struct mcx_cmd_query_vport_counters_mb_in *mbin;
5494 struct mcx_cmd_query_vport_counters_out *out;
5495 struct mcx_nic_vport_counters *counters;
5496 int error, token;
5497
5498 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5499 token = mcx_cmdq_token(sc);
5500 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5501 sizeof(*out) + sizeof(*counters), token);
5502
5503 in = mcx_cmdq_in(cqe);
5504 in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
5505 in->cmd_op_mod = htobe16(0);
5506
5507 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5508 printf(", unable to allocate query nic vport counters mailboxen\n");
5509 return (-1);
5510 }
5511 cqe->cq_input_ptr = cqe->cq_output_ptr;
5512
5513 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5514 mbin->cmd_clear = 0x80;
5515
5516 mcx_cmdq_mboxes_sign(&mxm, 1);
5517 mcx_cmdq_post(sc, cqe, 0);
5518
5519 error = mcx_cmdq_poll(sc, cqe, 1000);
5520 if (error != 0) {
5521 printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
5522 goto free;
5523 }
5524 if (mcx_cmdq_verify(cqe) != 0) {
5525 printf("%s: query nic vport counters command corrupt\n",
5526 DEVNAME(sc));
5527 goto free;
5528 }
5529
5530 out = mcx_cmdq_out(cqe);
5531 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5532 printf("%s: query nic vport counters failed (%x, %x)\n",
5533 DEVNAME(sc), out->cmd_status, out->cmd_syndrome);
5534 error = -1;
5535 goto free;
5536 }
5537
5538 counters = (struct mcx_nic_vport_counters *)
5539 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5540 if (counters->rx_bcast.packets + counters->tx_bcast.packets +
5541 counters->rx_ucast.packets + counters->tx_ucast.packets +
5542 counters->rx_err.packets + counters->tx_err.packets)
5543 printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
5544 DEVNAME(sc),
5545 be64toh(counters->tx_err.packets),
5546 be64toh(counters->rx_err.packets),
5547 be64toh(counters->tx_ucast.packets),
5548 be64toh(counters->rx_ucast.packets),
5549 be64toh(counters->tx_bcast.packets),
5550 be64toh(counters->rx_bcast.packets));
5551 free:
5552 mcx_dmamem_free(sc, &mxm);
5553
5554 return (error);
5555 }
5556
5557 static int
5558 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
5559 {
5560 struct mcx_dmamem mxm;
5561 struct mcx_cmdq_entry *cqe;
5562 struct mcx_cmd_query_flow_counter_in *in;
5563 struct mcx_cmd_query_flow_counter_mb_in *mbin;
5564 struct mcx_cmd_query_flow_counter_out *out;
5565 struct mcx_counter *counters;
5566 int error, token;
5567
5568 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5569 token = mcx_cmdq_token(sc);
5570 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
5571 sizeof(*counters), token);
5572
5573 in = mcx_cmdq_in(cqe);
5574 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
5575 in->cmd_op_mod = htobe16(0);
5576
5577 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5578 printf(", unable to allocate query flow counter mailboxen\n");
5579 return (-1);
5580 }
5581 cqe->cq_input_ptr = cqe->cq_output_ptr;
5582 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5583 mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
5584 mbin->cmd_clear = 0x80;
5585
5586 mcx_cmdq_mboxes_sign(&mxm, 1);
5587 mcx_cmdq_post(sc, cqe, 0);
5588
5589 error = mcx_cmdq_poll(sc, cqe, 1000);
5590 if (error != 0) {
5591 printf("%s: query flow counter timeout\n", DEVNAME(sc));
5592 goto free;
5593 }
5594 if (mcx_cmdq_verify(cqe) != 0) {
5595 printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
5596 goto free;
5597 }
5598
5599 out = mcx_cmdq_out(cqe);
5600 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5601 printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
5602 out->cmd_status, out->cmd_syndrome);
5603 error = -1;
5604 goto free;
5605 }
5606
5607 counters = (struct mcx_counter *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5608 if (counters->packets)
5609 printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
5610 be64toh(counters->packets));
5611 free:
5612 mcx_dmamem_free(sc, &mxm);
5613
5614 return (error);
5615 }
5616
5617 #endif
5618
5619 static int
5620 mcx_rx_fill_slots(struct mcx_softc *sc, void *ring, struct mcx_slot *slots,
5621 uint *prod, int bufsize, uint nslots)
5622 {
5623 struct mcx_rq_entry *rqe;
5624 struct mcx_slot *ms;
5625 struct mbuf *m;
5626 uint slot, p, fills;
5627
5628 p = *prod;
5629 slot = (p % (1 << MCX_LOG_RQ_SIZE));
5630 rqe = ring;
5631 for (fills = 0; fills < nslots; fills++) {
5632 ms = &slots[slot];
5633 #if 0
5634 m = MCLGETI(NULL, M_DONTWAIT, NULL, bufsize + ETHER_ALIGN);
5635 if (m == NULL)
5636 break;
5637 #else
5638 m = NULL;
5639 MGETHDR(m, M_DONTWAIT, MT_DATA);
5640 if (m == NULL)
5641 break;
5642
5643 MCLGET(m, M_DONTWAIT);
5644 if ((m->m_flags & M_EXT) == 0) {
5645 m_freem(m);
5646 break;
5647 }
5648 #endif
5649
5650 m->m_data += ETHER_ALIGN;
5651 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size - ETHER_ALIGN;
5652 if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
5653 BUS_DMA_NOWAIT) != 0) {
5654 m_freem(m);
5655 break;
5656 }
5657 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5658 ms->ms_m = m;
5659
5660 rqe[slot].rqe_byte_count = htobe32(m->m_len);
5661 rqe[slot].rqe_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
5662 rqe[slot].rqe_lkey = htobe32(sc->sc_lkey);
5663
5664 p++;
5665 slot++;
5666 if (slot == (1 << MCX_LOG_RQ_SIZE))
5667 slot = 0;
5668 }
5669
5670 if (fills != 0) {
5671 *sc->sc_rx_doorbell = htobe32(p & MCX_WQ_DOORBELL_MASK);
5672 /* barrier? */
5673 }
5674
5675 *prod = p;
5676
5677 return (nslots - fills);
5678 }
5679
5680 static int
5681 mcx_rx_fill(struct mcx_softc *sc)
5682 {
5683 u_int slots;
5684
5685 slots = mcx_rxr_get(&sc->sc_rxr, (1 << MCX_LOG_RQ_SIZE));
5686 if (slots == 0)
5687 return (1);
5688
5689 slots = mcx_rx_fill_slots(sc, MCX_DMA_KVA(&sc->sc_rq_mem),
5690 sc->sc_rx_slots, &sc->sc_rx_prod, sc->sc_hardmtu, slots);
5691 mcx_rxr_put(&sc->sc_rxr, slots);
5692 return (0);
5693 }
5694
5695 void
5696 mcx_refill(void *xsc)
5697 {
5698 struct mcx_softc *sc = xsc;
5699
5700 mcx_rx_fill(sc);
5701
5702 if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5703 callout_schedule(&sc->sc_rx_refill, 1);
5704 }
5705
5706 void
5707 mcx_process_txeof(struct mcx_softc *sc, struct mcx_cq_entry *cqe, int *txfree)
5708 {
5709 struct mcx_slot *ms;
5710 bus_dmamap_t map;
5711 int slot, slots;
5712
5713 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
5714
5715 ms = &sc->sc_tx_slots[slot];
5716 map = ms->ms_map;
5717 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
5718 BUS_DMASYNC_POSTWRITE);
5719
5720 slots = 1;
5721 if (map->dm_nsegs > 1)
5722 slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
5723
5724 (*txfree) += slots;
5725 bus_dmamap_unload(sc->sc_dmat, map);
5726 m_freem(ms->ms_m);
5727 ms->ms_m = NULL;
5728 }
5729
5730 static uint64_t
5731 mcx_uptime(void)
5732 {
5733 struct timespec ts;
5734
5735 nanouptime(&ts);
5736
5737 return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
5738 }
5739
5740 static void
5741 mcx_calibrate_first(struct mcx_softc *sc)
5742 {
5743 struct mcx_calibration *c = &sc->sc_calibration[0];
5744
5745 sc->sc_calibration_gen = 0;
5746
5747 c->c_ubase = mcx_uptime();
5748 c->c_tbase = mcx_timer(sc);
5749 c->c_tdiff = 0;
5750
5751 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz);
5752 }
5753
5754 #define MCX_TIMESTAMP_SHIFT 10
5755
5756 static void
5757 mcx_calibrate(void *arg)
5758 {
5759 struct mcx_softc *sc = arg;
5760 struct mcx_calibration *nc, *pc;
5761 unsigned int gen;
5762
5763 if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING))
5764 return;
5765
5766 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz);
5767
5768 gen = sc->sc_calibration_gen;
5769 pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5770 gen++;
5771 nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5772
5773 nc->c_uptime = pc->c_ubase;
5774 nc->c_timestamp = pc->c_tbase;
5775
5776 nc->c_ubase = mcx_uptime();
5777 nc->c_tbase = mcx_timer(sc);
5778
5779 nc->c_udiff = (nc->c_ubase - nc->c_uptime) >> MCX_TIMESTAMP_SHIFT;
5780 nc->c_tdiff = (nc->c_tbase - nc->c_timestamp) >> MCX_TIMESTAMP_SHIFT;
5781
5782 membar_producer();
5783 sc->sc_calibration_gen = gen;
5784 }
5785
5786 static int
5787 mcx_process_rx(struct mcx_softc *sc, struct mcx_cq_entry *cqe,
5788 struct mcx_mbufq *mq, const struct mcx_calibration *c)
5789 {
5790 struct mcx_slot *ms;
5791 struct mbuf *m;
5792 int slot;
5793
5794 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
5795
5796 ms = &sc->sc_rx_slots[slot];
5797 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize,
5798 BUS_DMASYNC_POSTREAD);
5799 bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
5800
5801 m = ms->ms_m;
5802 ms->ms_m = NULL;
5803
5804 m_set_rcvif(m, &sc->sc_ec.ec_if);
5805 m->m_pkthdr.len = m->m_len = be32dec(&cqe->cq_byte_cnt);
5806
5807 #if 0
5808 if (cqe->cq_rx_hash_type) {
5809 m->m_pkthdr.ph_flowid = M_FLOWID_VALID |
5810 be32toh(cqe->cq_rx_hash);
5811 }
5812 #endif
5813
5814 #if 0
5815 if (c->c_tdiff) {
5816 uint64_t t = be64dec(&cqe->cq_timestamp) - c->c_timestamp;
5817 t *= c->c_udiff;
5818 t /= c->c_tdiff;
5819
5820 m->m_pkthdr.ph_timestamp = c->c_uptime + t;
5821 SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
5822 }
5823 #endif
5824
5825 MBUFQ_ENQUEUE(mq, m);
5826
5827 return (1);
5828 }
5829
5830 static struct mcx_cq_entry *
5831 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
5832 {
5833 struct mcx_cq_entry *cqe;
5834 int next;
5835
5836 cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
5837 next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
5838
5839 if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
5840 ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
5841 return (&cqe[next]);
5842 }
5843
5844 return (NULL);
5845 }
5846
5847 static void
5848 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5849 {
5850 bus_size_t offset;
5851 uint32_t val;
5852 uint64_t uval;
5853
5854 /* different uar per cq? */
5855 offset = (MCX_PAGE_SIZE * sc->sc_uar);
5856 val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
5857 val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5858
5859 cq->cq_doorbell[0] = htobe32(cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5860 cq->cq_doorbell[1] = htobe32(val);
5861
5862 uval = val;
5863 uval <<= 32;
5864 uval |= cq->cq_n;
5865 bus_space_write_8(sc->sc_memt, sc->sc_memh,
5866 offset + MCX_UAR_CQ_DOORBELL, htobe64(uval));
5867 mcx_bar(sc, offset + MCX_UAR_CQ_DOORBELL, sizeof(uint64_t),
5868 BUS_SPACE_BARRIER_WRITE);
5869 }
5870
5871 void
5872 mcx_process_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5873 {
5874 struct ifnet *ifp = &sc->sc_ec.ec_if;
5875 const struct mcx_calibration *c;
5876 unsigned int gen;
5877 struct mcx_cq_entry *cqe;
5878 struct mcx_mbufq mq;
5879 struct mbuf *m;
5880 int rxfree, txfree;
5881
5882 MBUFQ_INIT(&mq);
5883
5884 gen = sc->sc_calibration_gen;
5885 membar_consumer();
5886 c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5887
5888 rxfree = 0;
5889 txfree = 0;
5890 while ((cqe = mcx_next_cq_entry(sc, cq))) {
5891 uint8_t opcode;
5892 opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
5893 switch (opcode) {
5894 case MCX_CQ_ENTRY_OPCODE_REQ:
5895 mcx_process_txeof(sc, cqe, &txfree);
5896 break;
5897 case MCX_CQ_ENTRY_OPCODE_SEND:
5898 rxfree += mcx_process_rx(sc, cqe, &mq, c);
5899 break;
5900 case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
5901 case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
5902 /* uint8_t *cqp = (uint8_t *)cqe; */
5903 /* printf("%s: cq completion error: %x\n", DEVNAME(sc), cqp[0x37]); */
5904 break;
5905
5906 default:
5907 /* printf("%s: cq completion opcode %x??\n", DEVNAME(sc), opcode); */
5908 break;
5909 }
5910
5911 cq->cq_cons++;
5912 }
5913
5914 cq->cq_count++;
5915 mcx_arm_cq(sc, cq);
5916
5917 if (rxfree > 0) {
5918 mcx_rxr_put(&sc->sc_rxr, rxfree);
5919 while (MBUFQ_FIRST(&mq) != NULL) {
5920 MBUFQ_DEQUEUE(&mq, m);
5921 if_percpuq_enqueue(ifp->if_percpuq, m);
5922 }
5923
5924 mcx_rx_fill(sc);
5925
5926 if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5927 callout_schedule(&sc->sc_rx_refill, 1);
5928 }
5929 if (txfree > 0) {
5930 sc->sc_tx_cons += txfree;
5931 if_schedule_deferred_start(ifp);
5932 }
5933 }
5934
5935 static void
5936 mcx_arm_eq(struct mcx_softc *sc)
5937 {
5938 bus_size_t offset;
5939 uint32_t val;
5940
5941 offset = (MCX_PAGE_SIZE * sc->sc_uar) + MCX_UAR_EQ_DOORBELL_ARM;
5942 val = (sc->sc_eqn << 24) | (sc->sc_eq_cons & 0xffffff);
5943
5944 mcx_wr(sc, offset, val);
5945 /* barrier? */
5946 }
5947
5948 static struct mcx_eq_entry *
5949 mcx_next_eq_entry(struct mcx_softc *sc)
5950 {
5951 struct mcx_eq_entry *eqe;
5952 int next;
5953
5954 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
5955 next = sc->sc_eq_cons % (1 << MCX_LOG_EQ_SIZE);
5956 if ((eqe[next].eq_owner & 1) == ((sc->sc_eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
5957 sc->sc_eq_cons++;
5958 return (&eqe[next]);
5959 }
5960 return (NULL);
5961 }
5962
5963 int
5964 mcx_intr(void *xsc)
5965 {
5966 struct mcx_softc *sc = (struct mcx_softc *)xsc;
5967 struct mcx_eq_entry *eqe;
5968 int i, cq;
5969
5970 while ((eqe = mcx_next_eq_entry(sc))) {
5971 switch (eqe->eq_event_type) {
5972 case MCX_EVENT_TYPE_COMPLETION:
5973 cq = be32toh(eqe->eq_event_data[6]);
5974 for (i = 0; i < sc->sc_num_cq; i++) {
5975 if (sc->sc_cq[i].cq_n == cq) {
5976 mcx_process_cq(sc, &sc->sc_cq[i]);
5977 break;
5978 }
5979 }
5980 break;
5981
5982 case MCX_EVENT_TYPE_LAST_WQE:
5983 /* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
5984 break;
5985
5986 case MCX_EVENT_TYPE_CQ_ERROR:
5987 /* printf("%s: cq error\n", DEVNAME(sc)); */
5988 break;
5989
5990 case MCX_EVENT_TYPE_CMD_COMPLETION:
5991 /* wakeup probably */
5992 break;
5993
5994 case MCX_EVENT_TYPE_PORT_CHANGE:
5995 workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL);
5996 break;
5997
5998 default:
5999 /* printf("%s: something happened\n", DEVNAME(sc)); */
6000 break;
6001 }
6002 }
6003 mcx_arm_eq(sc);
6004 return (1);
6005 }
6006
6007 static void
6008 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
6009 int total)
6010 {
6011 struct mcx_slot *ms;
6012
6013 int i = allocated;
6014 while (i-- > 0) {
6015 ms = &slots[i];
6016 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
6017 if (ms->ms_m != NULL)
6018 m_freem(ms->ms_m);
6019 }
6020 kmem_free(slots, total * sizeof(*ms));
6021 }
6022
6023 static int
6024 mcx_init(struct ifnet *ifp)
6025 {
6026 struct mcx_softc *sc = ifp->if_softc;
6027 struct mcx_slot *ms;
6028 int i, start;
6029 struct mcx_flow_match match_crit;
6030
6031 if (ISSET(ifp->if_flags, IFF_RUNNING))
6032 mcx_stop(ifp, 0);
6033
6034 sc->sc_rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE),
6035 KM_SLEEP);
6036
6037 for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
6038 ms = &sc->sc_rx_slots[i];
6039 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
6040 sc->sc_hardmtu, 0,
6041 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6042 &ms->ms_map) != 0) {
6043 printf("%s: failed to allocate rx dma maps\n",
6044 DEVNAME(sc));
6045 goto destroy_rx_slots;
6046 }
6047 }
6048
6049 sc->sc_tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE),
6050 KM_SLEEP);
6051
6052 for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
6053 ms = &sc->sc_tx_slots[i];
6054 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
6055 MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
6056 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6057 &ms->ms_map) != 0) {
6058 printf("%s: failed to allocate tx dma maps\n",
6059 DEVNAME(sc));
6060 goto destroy_tx_slots;
6061 }
6062 }
6063
6064 if (mcx_create_cq(sc, sc->sc_eqn) != 0)
6065 goto down;
6066
6067 /* send queue */
6068 if (mcx_create_tis(sc) != 0)
6069 goto down;
6070
6071 if (mcx_create_sq(sc, sc->sc_cq[0].cq_n) != 0)
6072 goto down;
6073
6074 /* receive queue */
6075 if (mcx_create_rq(sc, sc->sc_cq[0].cq_n) != 0)
6076 goto down;
6077
6078 if (mcx_create_tir(sc) != 0)
6079 goto down;
6080
6081 if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE) != 0)
6082 goto down;
6083
6084 /* promisc flow group */
6085 start = 0;
6086 memset(&match_crit, 0, sizeof(match_crit));
6087 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_PROMISC, start, 1,
6088 0, &match_crit) != 0)
6089 goto down;
6090 sc->sc_promisc_flow_enabled = 0;
6091 start++;
6092
6093 /* all multicast flow group */
6094 match_crit.mc_dest_mac[0] = 0x01;
6095 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_ALLMULTI, start, 1,
6096 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6097 goto down;
6098 sc->sc_allmulti_flow_enabled = 0;
6099 start++;
6100
6101 /* mac address matching flow group */
6102 memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
6103 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_MAC, start,
6104 (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
6105 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6106 goto down;
6107
6108 /* flow table entries for unicast and broadcast */
6109 start = 0;
6110 if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6111 LLADDR(satosdl(ifp->if_dl->ifa_addr))) != 0)
6112 goto down;
6113 start++;
6114
6115 if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6116 etherbroadcastaddr) != 0)
6117 goto down;
6118 start++;
6119
6120 /* multicast entries go after that */
6121 sc->sc_mcast_flow_base = start;
6122
6123 /* re-add any existing multicast flows */
6124 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6125 if (sc->sc_mcast_flows[i][0] != 0) {
6126 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6127 sc->sc_mcast_flow_base + i,
6128 sc->sc_mcast_flows[i]);
6129 }
6130 }
6131
6132 if (mcx_set_flow_table_root(sc) != 0)
6133 goto down;
6134
6135 /* start the queues */
6136 if (mcx_ready_sq(sc) != 0)
6137 goto down;
6138
6139 if (mcx_ready_rq(sc) != 0)
6140 goto down;
6141
6142 mcx_rxr_init(&sc->sc_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
6143 sc->sc_rx_prod = 0;
6144 mcx_rx_fill(sc);
6145
6146 mcx_calibrate_first(sc);
6147
6148 SET(ifp->if_flags, IFF_RUNNING);
6149
6150 sc->sc_tx_cons = 0;
6151 sc->sc_tx_prod = 0;
6152 CLR(ifp->if_flags, IFF_OACTIVE);
6153 if_schedule_deferred_start(ifp);
6154
6155 return 0;
6156 destroy_tx_slots:
6157 mcx_free_slots(sc, sc->sc_tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
6158 sc->sc_tx_slots = NULL;
6159
6160 i = (1 << MCX_LOG_RQ_SIZE);
6161 destroy_rx_slots:
6162 mcx_free_slots(sc, sc->sc_rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
6163 sc->sc_rx_slots = NULL;
6164 down:
6165 mcx_stop(ifp, 0);
6166 return EIO;
6167 }
6168
6169 static void
6170 mcx_stop(struct ifnet *ifp, int disable)
6171 {
6172 struct mcx_softc *sc = ifp->if_softc;
6173 int group, i;
6174
6175 CLR(ifp->if_flags, IFF_RUNNING);
6176
6177 /*
6178 * delete flow table entries first, so no packets can arrive
6179 * after the barriers
6180 */
6181 if (sc->sc_promisc_flow_enabled)
6182 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
6183 if (sc->sc_allmulti_flow_enabled)
6184 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
6185 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
6186 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
6187 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6188 if (sc->sc_mcast_flows[i][0] != 0) {
6189 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6190 sc->sc_mcast_flow_base + i);
6191 }
6192 }
6193
6194 callout_halt(&sc->sc_calibrate, NULL);
6195
6196 for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
6197 if (sc->sc_flow_group_id[group] != -1)
6198 mcx_destroy_flow_group(sc,
6199 sc->sc_flow_group_id[group]);
6200 }
6201
6202 if (sc->sc_flow_table_id != -1)
6203 mcx_destroy_flow_table(sc);
6204
6205 if (sc->sc_tirn != 0)
6206 mcx_destroy_tir(sc);
6207 if (sc->sc_rqn != 0)
6208 mcx_destroy_rq(sc);
6209
6210 if (sc->sc_sqn != 0)
6211 mcx_destroy_sq(sc);
6212 if (sc->sc_tisn != 0)
6213 mcx_destroy_tis(sc);
6214
6215 for (i = 0; i < sc->sc_num_cq; i++)
6216 mcx_destroy_cq(sc, i);
6217 sc->sc_num_cq = 0;
6218
6219 if (sc->sc_tx_slots != NULL) {
6220 mcx_free_slots(sc, sc->sc_tx_slots, (1 << MCX_LOG_SQ_SIZE),
6221 (1 << MCX_LOG_SQ_SIZE));
6222 sc->sc_tx_slots = NULL;
6223 }
6224 if (sc->sc_rx_slots != NULL) {
6225 mcx_free_slots(sc, sc->sc_rx_slots, (1 << MCX_LOG_RQ_SIZE),
6226 (1 << MCX_LOG_RQ_SIZE));
6227 sc->sc_rx_slots = NULL;
6228 }
6229 }
6230
6231 static int
6232 mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6233 {
6234 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6235 struct ifreq *ifr = (struct ifreq *)data;
6236 struct ethercom *ec = &sc->sc_ec;
6237 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
6238 struct ether_multi *enm;
6239 struct ether_multistep step;
6240 int s, i, flags, error = 0;
6241
6242 s = splnet();
6243 switch (cmd) {
6244
6245 case SIOCADDMULTI:
6246 if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6247 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6248 if (error != 0) {
6249 splx(s);
6250 return (error);
6251 }
6252
6253 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6254 if (sc->sc_mcast_flows[i][0] == 0) {
6255 memcpy(sc->sc_mcast_flows[i], addrlo,
6256 ETHER_ADDR_LEN);
6257 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6258 mcx_set_flow_table_entry(sc,
6259 MCX_FLOW_GROUP_MAC,
6260 sc->sc_mcast_flow_base + i,
6261 sc->sc_mcast_flows[i]);
6262 }
6263 break;
6264 }
6265 }
6266
6267 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
6268 if (i == MCX_NUM_MCAST_FLOWS) {
6269 SET(ifp->if_flags, IFF_ALLMULTI);
6270 sc->sc_extra_mcast++;
6271 error = ENETRESET;
6272 }
6273
6274 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN)) {
6275 SET(ifp->if_flags, IFF_ALLMULTI);
6276 error = ENETRESET;
6277 }
6278 }
6279 }
6280 break;
6281
6282 case SIOCDELMULTI:
6283 if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6284 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6285 if (error != 0) {
6286 splx(s);
6287 return (error);
6288 }
6289
6290 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6291 if (memcmp(sc->sc_mcast_flows[i], addrlo,
6292 ETHER_ADDR_LEN) == 0) {
6293 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6294 mcx_delete_flow_table_entry(sc,
6295 MCX_FLOW_GROUP_MAC,
6296 sc->sc_mcast_flow_base + i);
6297 }
6298 sc->sc_mcast_flows[i][0] = 0;
6299 break;
6300 }
6301 }
6302
6303 if (i == MCX_NUM_MCAST_FLOWS)
6304 sc->sc_extra_mcast--;
6305
6306 if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
6307 sc->sc_extra_mcast == 0) {
6308 flags = 0;
6309 ETHER_LOCK(ec);
6310 ETHER_FIRST_MULTI(step, ec, enm);
6311 while (enm != NULL) {
6312 if (memcmp(enm->enm_addrlo,
6313 enm->enm_addrhi, ETHER_ADDR_LEN)) {
6314 SET(flags, IFF_ALLMULTI);
6315 break;
6316 }
6317 ETHER_NEXT_MULTI(step, enm);
6318 }
6319 ETHER_UNLOCK(ec);
6320 if (!ISSET(flags, IFF_ALLMULTI)) {
6321 CLR(ifp->if_flags, IFF_ALLMULTI);
6322 error = ENETRESET;
6323 }
6324 }
6325 }
6326 break;
6327
6328 default:
6329 error = ether_ioctl(ifp, cmd, data);
6330 }
6331
6332 if (error == ENETRESET) {
6333 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6334 (IFF_UP | IFF_RUNNING))
6335 mcx_iff(sc);
6336 error = 0;
6337 }
6338 splx(s);
6339
6340 return (error);
6341 }
6342
6343 #if 0
6344 static int
6345 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
6346 {
6347 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6348 struct mcx_reg_mcia mcia;
6349 struct mcx_reg_pmlp pmlp;
6350 int offset, error;
6351
6352 /* get module number */
6353 memset(&pmlp, 0, sizeof(pmlp));
6354 pmlp.rp_local_port = 1;
6355 error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
6356 sizeof(pmlp));
6357 if (error != 0) {
6358 printf("%s: unable to get eeprom module number\n",
6359 DEVNAME(sc));
6360 return error;
6361 }
6362
6363 for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
6364 memset(&mcia, 0, sizeof(mcia));
6365 mcia.rm_l = 0;
6366 mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) &
6367 MCX_PMLP_MODULE_NUM_MASK;
6368 mcia.rm_i2c_addr = sff->sff_addr / 2; /* apparently */
6369 mcia.rm_page_num = sff->sff_page;
6370 mcia.rm_dev_addr = htobe16(offset);
6371 mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
6372
6373 error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
6374 &mcia, sizeof(mcia));
6375 if (error != 0) {
6376 printf("%s: unable to read eeprom at %x\n",
6377 DEVNAME(sc), offset);
6378 return error;
6379 }
6380
6381 memcpy(sff->sff_data + offset, mcia.rm_data,
6382 MCX_MCIA_EEPROM_BYTES);
6383 }
6384
6385 return 0;
6386 }
6387 #endif
6388
6389 static int
6390 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
6391 {
6392 switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6393 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
6394 case 0:
6395 break;
6396
6397 case EFBIG:
6398 if (m_defrag(m, M_DONTWAIT) != NULL &&
6399 bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6400 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
6401 break;
6402
6403 /* FALLTHROUGH */
6404 default:
6405 return (1);
6406 }
6407
6408 ms->ms_m = m;
6409 return (0);
6410 }
6411
6412 static void
6413 mcx_start(struct ifnet *ifp)
6414 {
6415 struct mcx_softc *sc = ifp->if_softc;
6416 struct mcx_sq_entry *sq, *sqe;
6417 struct mcx_sq_entry_seg *sqs;
6418 struct mcx_slot *ms;
6419 bus_dmamap_t map;
6420 struct mbuf *m;
6421 u_int idx, free, used;
6422 uint64_t *bf;
6423 size_t bf_base;
6424 int i, seg, nseg;
6425
6426 bf_base = (sc->sc_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
6427
6428 idx = sc->sc_tx_prod % (1 << MCX_LOG_SQ_SIZE);
6429 free = (sc->sc_tx_cons + (1 << MCX_LOG_SQ_SIZE)) - sc->sc_tx_prod;
6430
6431 used = 0;
6432 bf = NULL;
6433 sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&sc->sc_sq_mem);
6434
6435 for (;;) {
6436 if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
6437 SET(ifp->if_flags, IFF_OACTIVE);
6438 break;
6439 }
6440
6441 IFQ_DEQUEUE(&ifp->if_snd, m);
6442 if (m == NULL) {
6443 break;
6444 }
6445
6446 sqe = sq + idx;
6447 ms = &sc->sc_tx_slots[idx];
6448 memset(sqe, 0, sizeof(*sqe));
6449
6450 /* ctrl segment */
6451 sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
6452 ((sc->sc_tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
6453 /* always generate a completion event */
6454 sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
6455
6456 /* eth segment */
6457 sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
6458 m_copydata(m, 0, MCX_SQ_INLINE_SIZE, sqe->sqe_inline_headers);
6459 m_adj(m, MCX_SQ_INLINE_SIZE);
6460
6461 if (mcx_load_mbuf(sc, ms, m) != 0) {
6462 m_freem(m);
6463 if_statinc(ifp, if_oerrors);
6464 continue;
6465 }
6466 bf = (uint64_t *)sqe;
6467
6468 if (ifp->if_bpf != NULL)
6469 bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers,
6470 MCX_SQ_INLINE_SIZE, m, BPF_D_OUT);
6471
6472 map = ms->ms_map;
6473 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6474 BUS_DMASYNC_PREWRITE);
6475
6476 sqe->sqe_ds_sq_num =
6477 htobe32((sc->sc_sqn << MCX_SQE_SQ_NUM_SHIFT) |
6478 (map->dm_nsegs + 3));
6479
6480 /* data segment - first wqe has one segment */
6481 sqs = sqe->sqe_segs;
6482 seg = 0;
6483 nseg = 1;
6484 for (i = 0; i < map->dm_nsegs; i++) {
6485 if (seg == nseg) {
6486 /* next slot */
6487 idx++;
6488 if (idx == (1 << MCX_LOG_SQ_SIZE))
6489 idx = 0;
6490 sc->sc_tx_prod++;
6491 used++;
6492
6493 sqs = (struct mcx_sq_entry_seg *)(sq + idx);
6494 seg = 0;
6495 nseg = MCX_SQ_SEGS_PER_SLOT;
6496 }
6497 sqs[seg].sqs_byte_count =
6498 htobe32(map->dm_segs[i].ds_len);
6499 sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
6500 sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
6501 seg++;
6502 }
6503
6504 idx++;
6505 if (idx == (1 << MCX_LOG_SQ_SIZE))
6506 idx = 0;
6507 sc->sc_tx_prod++;
6508 used++;
6509 }
6510
6511 if (used) {
6512 *sc->sc_tx_doorbell = htobe32(sc->sc_tx_prod & MCX_WQ_DOORBELL_MASK);
6513
6514 membar_sync();
6515
6516 /*
6517 * write the first 64 bits of the last sqe we produced
6518 * to the blue flame buffer
6519 */
6520 bus_space_write_8(sc->sc_memt, sc->sc_memh,
6521 bf_base + sc->sc_bf_offset, *bf);
6522 /* next write goes to the other buffer */
6523 sc->sc_bf_offset ^= sc->sc_bf_size;
6524
6525 membar_sync();
6526 }
6527 }
6528
6529 static void
6530 mcx_watchdog(struct ifnet *ifp)
6531 {
6532 }
6533
6534 static void
6535 mcx_media_add_types(struct mcx_softc *sc)
6536 {
6537 struct mcx_reg_ptys ptys;
6538 int i;
6539 uint32_t proto_cap;
6540
6541 memset(&ptys, 0, sizeof(ptys));
6542 ptys.rp_local_port = 1;
6543 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6544 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6545 sizeof(ptys)) != 0) {
6546 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6547 return;
6548 }
6549
6550 proto_cap = be32toh(ptys.rp_eth_proto_cap);
6551 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6552 const struct mcx_eth_proto_capability *cap;
6553 if (!ISSET(proto_cap, 1U << i))
6554 continue;
6555
6556 cap = &mcx_eth_cap_map[i];
6557 if (cap->cap_media == 0)
6558 continue;
6559
6560 ifmedia_add(&sc->sc_media, IFM_ETHER | cap->cap_media, 0, NULL);
6561 }
6562 }
6563
6564 static void
6565 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
6566 {
6567 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6568 struct mcx_reg_ptys ptys;
6569 int i;
6570 uint32_t proto_oper;
6571 uint64_t media_oper;
6572
6573 memset(&ptys, 0, sizeof(ptys));
6574 ptys.rp_local_port = 1;
6575 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6576
6577 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6578 sizeof(ptys)) != 0) {
6579 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6580 return;
6581 }
6582
6583 proto_oper = be32toh(ptys.rp_eth_proto_oper);
6584
6585 media_oper = 0;
6586
6587 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6588 const struct mcx_eth_proto_capability *cap;
6589 if (!ISSET(proto_oper, 1U << i))
6590 continue;
6591
6592 cap = &mcx_eth_cap_map[i];
6593
6594 if (cap->cap_media != 0)
6595 media_oper = cap->cap_media;
6596 }
6597
6598 ifmr->ifm_status = IFM_AVALID;
6599 /* not sure if this is the right thing to check, maybe paos? */
6600 if (proto_oper != 0) {
6601 ifmr->ifm_status |= IFM_ACTIVE;
6602 ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
6603 /* txpause, rxpause, duplex? */
6604 }
6605 }
6606
6607 static int
6608 mcx_media_change(struct ifnet *ifp)
6609 {
6610 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6611 struct mcx_reg_ptys ptys;
6612 struct mcx_reg_paos paos;
6613 uint32_t media;
6614 int i, error;
6615
6616 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
6617 return EINVAL;
6618
6619 error = 0;
6620
6621 if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
6622 /* read ptys to get supported media */
6623 memset(&ptys, 0, sizeof(ptys));
6624 ptys.rp_local_port = 1;
6625 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6626 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
6627 &ptys, sizeof(ptys)) != 0) {
6628 printf("%s: unable to read port type/speed\n",
6629 DEVNAME(sc));
6630 return EIO;
6631 }
6632
6633 media = be32toh(ptys.rp_eth_proto_cap);
6634 } else {
6635 /* map media type */
6636 media = 0;
6637 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6638 const struct mcx_eth_proto_capability *cap;
6639
6640 cap = &mcx_eth_cap_map[i];
6641 if (cap->cap_media ==
6642 IFM_SUBTYPE(sc->sc_media.ifm_media)) {
6643 media = (1 << i);
6644 break;
6645 }
6646 }
6647 }
6648
6649 /* disable the port */
6650 memset(&paos, 0, sizeof(paos));
6651 paos.rp_local_port = 1;
6652 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
6653 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6654 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6655 sizeof(paos)) != 0) {
6656 printf("%s: unable to set port state to down\n", DEVNAME(sc));
6657 return EIO;
6658 }
6659
6660 memset(&ptys, 0, sizeof(ptys));
6661 ptys.rp_local_port = 1;
6662 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6663 ptys.rp_eth_proto_admin = htobe32(media);
6664 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
6665 sizeof(ptys)) != 0) {
6666 printf("%s: unable to set port media type/speed\n",
6667 DEVNAME(sc));
6668 error = EIO;
6669 }
6670
6671 /* re-enable the port to start negotiation */
6672 memset(&paos, 0, sizeof(paos));
6673 paos.rp_local_port = 1;
6674 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
6675 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6676 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6677 sizeof(paos)) != 0) {
6678 printf("%s: unable to set port state to up\n", DEVNAME(sc));
6679 error = EIO;
6680 }
6681
6682 return error;
6683 }
6684
6685 static void
6686 mcx_port_change(struct work *wk, void *xsc)
6687 {
6688 struct mcx_softc *sc = xsc;
6689 struct ifnet *ifp = &sc->sc_ec.ec_if;
6690 struct mcx_reg_paos paos = {
6691 .rp_local_port = 1,
6692 };
6693 struct mcx_reg_ptys ptys = {
6694 .rp_local_port = 1,
6695 .rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH,
6696 };
6697 int link_state = LINK_STATE_DOWN;
6698
6699 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_READ, &paos,
6700 sizeof(paos)) == 0) {
6701 if (paos.rp_oper_status == MCX_REG_PAOS_OPER_STATUS_UP)
6702 link_state = LINK_STATE_UP;
6703 }
6704
6705 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6706 sizeof(ptys)) == 0) {
6707 uint32_t proto_oper = be32toh(ptys.rp_eth_proto_oper);
6708 uint64_t baudrate = 0;
6709 unsigned int i;
6710
6711 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6712 const struct mcx_eth_proto_capability *cap;
6713 if (!ISSET(proto_oper, 1U << i))
6714 continue;
6715
6716 cap = &mcx_eth_cap_map[i];
6717 if (cap->cap_baudrate == 0)
6718 continue;
6719
6720 baudrate = cap->cap_baudrate;
6721 break;
6722 }
6723
6724 ifp->if_baudrate = baudrate;
6725 }
6726
6727 if (link_state != ifp->if_link_state) {
6728 if_link_state_change(ifp, link_state);
6729 }
6730 }
6731
6732
6733 static inline uint32_t
6734 mcx_rd(struct mcx_softc *sc, bus_size_t r)
6735 {
6736 uint32_t word;
6737
6738 word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
6739
6740 return (be32toh(word));
6741 }
6742
6743 static inline void
6744 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
6745 {
6746 bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
6747 }
6748
6749 static inline void
6750 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
6751 {
6752 bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
6753 }
6754
6755 static uint64_t
6756 mcx_timer(struct mcx_softc *sc)
6757 {
6758 uint32_t hi, lo, ni;
6759
6760 hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6761 for (;;) {
6762 lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
6763 mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
6764 ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6765
6766 if (ni == hi)
6767 break;
6768
6769 hi = ni;
6770 }
6771
6772 return (((uint64_t)hi << 32) | (uint64_t)lo);
6773 }
6774
6775 static int
6776 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
6777 bus_size_t size, u_int align)
6778 {
6779 mxm->mxm_size = size;
6780
6781 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
6782 mxm->mxm_size, 0,
6783 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6784 &mxm->mxm_map) != 0)
6785 return (1);
6786 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
6787 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
6788 BUS_DMA_WAITOK) != 0)
6789 goto destroy;
6790 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
6791 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
6792 goto free;
6793 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
6794 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
6795 goto unmap;
6796
6797 mcx_dmamem_zero(mxm);
6798
6799 return (0);
6800 unmap:
6801 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6802 free:
6803 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6804 destroy:
6805 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6806 return (1);
6807 }
6808
6809 static void
6810 mcx_dmamem_zero(struct mcx_dmamem *mxm)
6811 {
6812 memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
6813 }
6814
6815 static void
6816 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
6817 {
6818 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
6819 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6820 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6821 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6822 }
6823
6824 static int
6825 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
6826 {
6827 bus_dma_segment_t *segs;
6828 bus_size_t len = pages * MCX_PAGE_SIZE;
6829 size_t seglen;
6830
6831 segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP);
6832 seglen = sizeof(*segs) * pages;
6833
6834 if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
6835 segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
6836 goto free_segs;
6837
6838 if (mhm->mhm_seg_count < pages) {
6839 size_t nseglen;
6840
6841 mhm->mhm_segs = kmem_alloc(
6842 sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP);
6843
6844 nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
6845
6846 memcpy(mhm->mhm_segs, segs, nseglen);
6847
6848 kmem_free(segs, seglen);
6849
6850 segs = mhm->mhm_segs;
6851 seglen = nseglen;
6852 } else
6853 mhm->mhm_segs = segs;
6854
6855 if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
6856 MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
6857 &mhm->mhm_map) != 0)
6858 goto free_dmamem;
6859
6860 if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
6861 mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
6862 goto destroy;
6863
6864 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6865 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
6866
6867 mhm->mhm_npages = pages;
6868
6869 return (0);
6870
6871 destroy:
6872 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6873 free_dmamem:
6874 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6875 free_segs:
6876 kmem_free(segs, seglen);
6877 mhm->mhm_segs = NULL;
6878
6879 return (-1);
6880 }
6881
6882 static void
6883 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
6884 {
6885 if (mhm->mhm_npages == 0)
6886 return;
6887
6888 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6889 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
6890
6891 bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
6892 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6893 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6894 kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
6895
6896 mhm->mhm_npages = 0;
6897 }
6898