if_mcx.c revision 1.12 1 /* $NetBSD: if_mcx.c,v 1.12 2020/03/15 23:04:50 thorpej Exp $ */
2 /* $OpenBSD: if_mcx.c,v 1.33 2019/09/12 04:23:59 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2017 David Gwynne <dlg (at) openbsd.org>
6 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #ifdef _KERNEL_OPT
22 #include "opt_net_mpsafe.h"
23 #endif
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/device.h>
32 #include <sys/pool.h>
33 #include <sys/queue.h>
34 #include <sys/callout.h>
35 #include <sys/workqueue.h>
36 #include <sys/atomic.h>
37 #include <sys/kmem.h>
38 #include <sys/bus.h>
39
40 #include <machine/intr.h>
41
42 #include <net/if.h>
43 #include <net/if_dl.h>
44 #include <net/if_ether.h>
45 #include <net/if_media.h>
46
47 #include <net/bpf.h>
48
49 #include <netinet/in.h>
50
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pcidevs.h>
54
55 /* XXX This driver is not yet MP-safe; don't claim to be! */
56 /* #ifdef NET_MPSAFE */
57 /* #define MCX_MPSAFE 1 */
58 /* #define CALLOUT_FLAGS CALLOUT_MPSAFE */
59 /* #else */
60 #define CALLOUT_FLAGS 0
61 /* #endif */
62
63 #define MCX_MAX_NINTR 1
64
65 #define BUS_DMASYNC_PRERW (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
66 #define BUS_DMASYNC_POSTRW (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
67
68 #define MCX_HCA_BAR PCI_MAPREG_START /* BAR 0 */
69
70 #define MCX_FW_VER 0x0000
71 #define MCX_FW_VER_MAJOR(_v) ((_v) & 0xffff)
72 #define MCX_FW_VER_MINOR(_v) ((_v) >> 16)
73 #define MCX_CMDIF_FW_SUBVER 0x0004
74 #define MCX_FW_VER_SUBMINOR(_v) ((_v) & 0xffff)
75 #define MCX_CMDIF(_v) ((_v) >> 16)
76
77 #define MCX_ISSI 1 /* as per the PRM */
78 #define MCX_CMD_IF_SUPPORTED 5
79
80 #define MCX_HARDMTU 9500
81
82 #define MCX_MAX_CQS 2 /* rq, sq */
83
84 /* queue sizes */
85 #define MCX_LOG_EQ_SIZE 6 /* one page */
86 #define MCX_LOG_CQ_SIZE 11
87 #define MCX_LOG_RQ_SIZE 10
88 #define MCX_LOG_SQ_SIZE 11
89
90 /* completion event moderation - about 10khz, or 90% of the cq */
91 #define MCX_CQ_MOD_PERIOD 50
92 #define MCX_CQ_MOD_COUNTER (((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
93
94 #define MCX_LOG_SQ_ENTRY_SIZE 6
95 #define MCX_SQ_ENTRY_MAX_SLOTS 4
96 #define MCX_SQ_SEGS_PER_SLOT \
97 (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
98 #define MCX_SQ_MAX_SEGMENTS \
99 1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
100
101 #define MCX_LOG_FLOW_TABLE_SIZE 5
102 #define MCX_NUM_STATIC_FLOWS 4 /* promisc, allmulti, ucast, bcast */
103 #define MCX_NUM_MCAST_FLOWS \
104 ((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
105
106 #define MCX_SQ_INLINE_SIZE 18
107
108 /* doorbell offsets */
109 #define MCX_CQ_DOORBELL_OFFSET 0
110 #define MCX_CQ_DOORBELL_SIZE 16
111 #define MCX_RQ_DOORBELL_OFFSET 64
112 #define MCX_SQ_DOORBELL_OFFSET 64
113
114 #define MCX_WQ_DOORBELL_MASK 0xffff
115
116 /* uar registers */
117 #define MCX_UAR_CQ_DOORBELL 0x20
118 #define MCX_UAR_EQ_DOORBELL_ARM 0x40
119 #define MCX_UAR_EQ_DOORBELL 0x48
120 #define MCX_UAR_BF 0x800
121
122 #define MCX_CMDQ_ADDR_HI 0x0010
123 #define MCX_CMDQ_ADDR_LO 0x0014
124 #define MCX_CMDQ_ADDR_NMASK 0xfff
125 #define MCX_CMDQ_LOG_SIZE(_v) ((_v) >> 4 & 0xf)
126 #define MCX_CMDQ_LOG_STRIDE(_v) ((_v) >> 0 & 0xf)
127 #define MCX_CMDQ_INTERFACE_MASK (0x3 << 8)
128 #define MCX_CMDQ_INTERFACE_FULL_DRIVER (0x0 << 8)
129 #define MCX_CMDQ_INTERFACE_DISABLED (0x1 << 8)
130
131 #define MCX_CMDQ_DOORBELL 0x0018
132
133 #define MCX_STATE 0x01fc
134 #define MCX_STATE_MASK (1U << 31)
135 #define MCX_STATE_INITIALIZING (1 << 31)
136 #define MCX_STATE_READY (0 << 31)
137 #define MCX_STATE_INTERFACE_MASK (0x3 << 24)
138 #define MCX_STATE_INTERFACE_FULL_DRIVER (0x0 << 24)
139 #define MCX_STATE_INTERFACE_DISABLED (0x1 << 24)
140
141 #define MCX_INTERNAL_TIMER 0x1000
142 #define MCX_INTERNAL_TIMER_H 0x1000
143 #define MCX_INTERNAL_TIMER_L 0x1004
144
145 #define MCX_CLEAR_INT 0x100c
146
147 #define MCX_REG_OP_WRITE 0
148 #define MCX_REG_OP_READ 1
149
150 #define MCX_REG_PMLP 0x5002
151 #define MCX_REG_PMTU 0x5003
152 #define MCX_REG_PTYS 0x5004
153 #define MCX_REG_PAOS 0x5006
154 #define MCX_REG_PFCC 0x5007
155 #define MCX_REG_PPCNT 0x5008
156 #define MCX_REG_MCIA 0x9014
157
158 #define MCX_ETHER_CAP_SGMII (1 << 0)
159 #define MCX_ETHER_CAP_1000_KX (1 << 1)
160 #define MCX_ETHER_CAP_10G_CX4 (1 << 2)
161 #define MCX_ETHER_CAP_10G_KX4 (1 << 3)
162 #define MCX_ETHER_CAP_10G_KR (1 << 4)
163 #define MCX_ETHER_CAP_20G_KR2 (1 << 5)
164 #define MCX_ETHER_CAP_40G_CR4 (1 << 6)
165 #define MCX_ETHER_CAP_40G_KR4 (1 << 7)
166 #define MCX_ETHER_CAP_56G_R4 (1 << 8)
167 #define MCX_ETHER_CAP_10G_CR (1 << 12)
168 #define MCX_ETHER_CAP_10G_SR (1 << 13)
169 #define MCX_ETHER_CAP_10G_LR (1 << 14)
170 #define MCX_ETHER_CAP_40G_SR4 (1 << 15)
171 #define MCX_ETHER_CAP_40G_LR4 (1 << 16)
172 #define MCX_ETHER_CAP_50G_SR2 (1 << 18)
173 #define MCX_ETHER_CAP_100G_CR4 (1 << 20)
174 #define MCX_ETHER_CAP_100G_SR4 (1 << 21)
175 #define MCX_ETHER_CAP_100G_KR4 (1 << 22)
176 #define MCX_ETHER_CAP_100G_LR4 (1 << 23)
177 #define MCX_ETHER_CAP_100_TX (1 << 24)
178 #define MCX_ETHER_CAP_1000_T (1 << 25)
179 #define MCX_ETHER_CAP_10G_T (1 << 26)
180 #define MCX_ETHER_CAP_25G_CR (1 << 27)
181 #define MCX_ETHER_CAP_25G_KR (1 << 28)
182 #define MCX_ETHER_CAP_25G_SR (1 << 29)
183 #define MCX_ETHER_CAP_50G_CR2 (1 << 30)
184 #define MCX_ETHER_CAP_50G_KR2 (1 << 31)
185
186 #define MCX_PAGE_SHIFT 12
187 #define MCX_PAGE_SIZE (1 << MCX_PAGE_SHIFT)
188 #define MCX_MAX_CQE 32
189
190 #define MCX_CMD_QUERY_HCA_CAP 0x100
191 #define MCX_CMD_QUERY_ADAPTER 0x101
192 #define MCX_CMD_INIT_HCA 0x102
193 #define MCX_CMD_TEARDOWN_HCA 0x103
194 #define MCX_CMD_ENABLE_HCA 0x104
195 #define MCX_CMD_DISABLE_HCA 0x105
196 #define MCX_CMD_QUERY_PAGES 0x107
197 #define MCX_CMD_MANAGE_PAGES 0x108
198 #define MCX_CMD_SET_HCA_CAP 0x109
199 #define MCX_CMD_QUERY_ISSI 0x10a
200 #define MCX_CMD_SET_ISSI 0x10b
201 #define MCX_CMD_SET_DRIVER_VERSION \
202 0x10d
203 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS \
204 0x203
205 #define MCX_CMD_CREATE_EQ 0x301
206 #define MCX_CMD_DESTROY_EQ 0x302
207 #define MCX_CMD_CREATE_CQ 0x400
208 #define MCX_CMD_DESTROY_CQ 0x401
209 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT \
210 0x754
211 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
212 0x755
213 #define MCX_CMD_QUERY_VPORT_COUNTERS \
214 0x770
215 #define MCX_CMD_ALLOC_PD 0x800
216 #define MCX_CMD_ALLOC_UAR 0x802
217 #define MCX_CMD_ACCESS_REG 0x805
218 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN \
219 0x816
220 #define MCX_CMD_CREATE_TIR 0x900
221 #define MCX_CMD_DESTROY_TIR 0x902
222 #define MCX_CMD_CREATE_SQ 0x904
223 #define MCX_CMD_MODIFY_SQ 0x905
224 #define MCX_CMD_DESTROY_SQ 0x906
225 #define MCX_CMD_QUERY_SQ 0x907
226 #define MCX_CMD_CREATE_RQ 0x908
227 #define MCX_CMD_MODIFY_RQ 0x909
228 #define MCX_CMD_DESTROY_RQ 0x90a
229 #define MCX_CMD_QUERY_RQ 0x90b
230 #define MCX_CMD_CREATE_TIS 0x912
231 #define MCX_CMD_DESTROY_TIS 0x914
232 #define MCX_CMD_SET_FLOW_TABLE_ROOT \
233 0x92f
234 #define MCX_CMD_CREATE_FLOW_TABLE \
235 0x930
236 #define MCX_CMD_DESTROY_FLOW_TABLE \
237 0x931
238 #define MCX_CMD_QUERY_FLOW_TABLE \
239 0x932
240 #define MCX_CMD_CREATE_FLOW_GROUP \
241 0x933
242 #define MCX_CMD_DESTROY_FLOW_GROUP \
243 0x934
244 #define MCX_CMD_QUERY_FLOW_GROUP \
245 0x935
246 #define MCX_CMD_SET_FLOW_TABLE_ENTRY \
247 0x936
248 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY \
249 0x937
250 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY \
251 0x938
252 #define MCX_CMD_ALLOC_FLOW_COUNTER \
253 0x939
254 #define MCX_CMD_QUERY_FLOW_COUNTER \
255 0x93b
256
257 #define MCX_QUEUE_STATE_RST 0
258 #define MCX_QUEUE_STATE_RDY 1
259 #define MCX_QUEUE_STATE_ERR 3
260
261 #define MCX_FLOW_TABLE_TYPE_RX 0
262 #define MCX_FLOW_TABLE_TYPE_TX 1
263
264 #define MCX_CMDQ_INLINE_DATASIZE 16
265
266 struct mcx_cmdq_entry {
267 uint8_t cq_type;
268 #define MCX_CMDQ_TYPE_PCIE 0x7
269 uint8_t cq_reserved0[3];
270
271 uint32_t cq_input_length;
272 uint64_t cq_input_ptr;
273 uint8_t cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
274
275 uint8_t cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
276 uint64_t cq_output_ptr;
277 uint32_t cq_output_length;
278
279 uint8_t cq_token;
280 uint8_t cq_signature;
281 uint8_t cq_reserved1[1];
282 uint8_t cq_status;
283 #define MCX_CQ_STATUS_SHIFT 1
284 #define MCX_CQ_STATUS_MASK (0x7f << MCX_CQ_STATUS_SHIFT)
285 #define MCX_CQ_STATUS_OK (0x00 << MCX_CQ_STATUS_SHIFT)
286 #define MCX_CQ_STATUS_INT_ERR (0x01 << MCX_CQ_STATUS_SHIFT)
287 #define MCX_CQ_STATUS_BAD_OPCODE (0x02 << MCX_CQ_STATUS_SHIFT)
288 #define MCX_CQ_STATUS_BAD_PARAM (0x03 << MCX_CQ_STATUS_SHIFT)
289 #define MCX_CQ_STATUS_BAD_SYS_STATE (0x04 << MCX_CQ_STATUS_SHIFT)
290 #define MCX_CQ_STATUS_BAD_RESOURCE (0x05 << MCX_CQ_STATUS_SHIFT)
291 #define MCX_CQ_STATUS_RESOURCE_BUSY (0x06 << MCX_CQ_STATUS_SHIFT)
292 #define MCX_CQ_STATUS_EXCEED_LIM (0x08 << MCX_CQ_STATUS_SHIFT)
293 #define MCX_CQ_STATUS_BAD_RES_STATE (0x09 << MCX_CQ_STATUS_SHIFT)
294 #define MCX_CQ_STATUS_BAD_INDEX (0x0a << MCX_CQ_STATUS_SHIFT)
295 #define MCX_CQ_STATUS_NO_RESOURCES (0x0f << MCX_CQ_STATUS_SHIFT)
296 #define MCX_CQ_STATUS_BAD_INPUT_LEN (0x50 << MCX_CQ_STATUS_SHIFT)
297 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN (0x51 << MCX_CQ_STATUS_SHIFT)
298 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
299 (0x10 << MCX_CQ_STATUS_SHIFT)
300 #define MCX_CQ_STATUS_BAD_SIZE (0x40 << MCX_CQ_STATUS_SHIFT)
301 #define MCX_CQ_STATUS_OWN_MASK 0x1
302 #define MCX_CQ_STATUS_OWN_SW 0x0
303 #define MCX_CQ_STATUS_OWN_HW 0x1
304 } __packed __aligned(8);
305
306 #define MCX_CMDQ_MAILBOX_DATASIZE 512
307
308 struct mcx_cmdq_mailbox {
309 uint8_t mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
310 uint8_t mb_reserved0[48];
311 uint64_t mb_next_ptr;
312 uint32_t mb_block_number;
313 uint8_t mb_reserved1[1];
314 uint8_t mb_token;
315 uint8_t mb_ctrl_signature;
316 uint8_t mb_signature;
317 } __packed __aligned(8);
318
319 #define MCX_CMDQ_MAILBOX_ALIGN (1 << 10)
320 #define MCX_CMDQ_MAILBOX_SIZE roundup(sizeof(struct mcx_cmdq_mailbox), \
321 MCX_CMDQ_MAILBOX_ALIGN)
322 /*
323 * command mailbox structres
324 */
325
326 struct mcx_cmd_enable_hca_in {
327 uint16_t cmd_opcode;
328 uint8_t cmd_reserved0[4];
329 uint16_t cmd_op_mod;
330 uint8_t cmd_reserved1[2];
331 uint16_t cmd_function_id;
332 uint8_t cmd_reserved2[4];
333 } __packed __aligned(4);
334
335 struct mcx_cmd_enable_hca_out {
336 uint8_t cmd_status;
337 uint8_t cmd_reserved0[3];
338 uint32_t cmd_syndrome;
339 uint8_t cmd_reserved1[4];
340 } __packed __aligned(4);
341
342 struct mcx_cmd_init_hca_in {
343 uint16_t cmd_opcode;
344 uint8_t cmd_reserved0[4];
345 uint16_t cmd_op_mod;
346 uint8_t cmd_reserved1[8];
347 } __packed __aligned(4);
348
349 struct mcx_cmd_init_hca_out {
350 uint8_t cmd_status;
351 uint8_t cmd_reserved0[3];
352 uint32_t cmd_syndrome;
353 uint8_t cmd_reserved1[8];
354 } __packed __aligned(4);
355
356 struct mcx_cmd_teardown_hca_in {
357 uint16_t cmd_opcode;
358 uint8_t cmd_reserved0[4];
359 uint16_t cmd_op_mod;
360 uint8_t cmd_reserved1[2];
361 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL 0x0
362 #define MCX_CMD_TEARDOWN_HCA_PANIC 0x1
363 uint16_t cmd_profile;
364 uint8_t cmd_reserved2[4];
365 } __packed __aligned(4);
366
367 struct mcx_cmd_teardown_hca_out {
368 uint8_t cmd_status;
369 uint8_t cmd_reserved0[3];
370 uint32_t cmd_syndrome;
371 uint8_t cmd_reserved1[8];
372 } __packed __aligned(4);
373
374 struct mcx_cmd_access_reg_in {
375 uint16_t cmd_opcode;
376 uint8_t cmd_reserved0[4];
377 uint16_t cmd_op_mod;
378 uint8_t cmd_reserved1[2];
379 uint16_t cmd_register_id;
380 uint32_t cmd_argument;
381 } __packed __aligned(4);
382
383 struct mcx_cmd_access_reg_out {
384 uint8_t cmd_status;
385 uint8_t cmd_reserved0[3];
386 uint32_t cmd_syndrome;
387 uint8_t cmd_reserved1[8];
388 } __packed __aligned(4);
389
390 struct mcx_reg_pmtu {
391 uint8_t rp_reserved1;
392 uint8_t rp_local_port;
393 uint8_t rp_reserved2[2];
394 uint16_t rp_max_mtu;
395 uint8_t rp_reserved3[2];
396 uint16_t rp_admin_mtu;
397 uint8_t rp_reserved4[2];
398 uint16_t rp_oper_mtu;
399 uint8_t rp_reserved5[2];
400 } __packed __aligned(4);
401
402 struct mcx_reg_ptys {
403 uint8_t rp_reserved1;
404 uint8_t rp_local_port;
405 uint8_t rp_reserved2;
406 uint8_t rp_proto_mask;
407 #define MCX_REG_PTYS_PROTO_MASK_ETH (1 << 2)
408 uint8_t rp_reserved3[8];
409 uint32_t rp_eth_proto_cap;
410 uint8_t rp_reserved4[8];
411 uint32_t rp_eth_proto_admin;
412 uint8_t rp_reserved5[8];
413 uint32_t rp_eth_proto_oper;
414 uint8_t rp_reserved6[24];
415 } __packed __aligned(4);
416
417 struct mcx_reg_paos {
418 uint8_t rp_reserved1;
419 uint8_t rp_local_port;
420 uint8_t rp_admin_status;
421 #define MCX_REG_PAOS_ADMIN_STATUS_UP 1
422 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN 2
423 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE 3
424 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED 4
425 uint8_t rp_oper_status;
426 #define MCX_REG_PAOS_OPER_STATUS_UP 1
427 #define MCX_REG_PAOS_OPER_STATUS_DOWN 2
428 #define MCX_REG_PAOS_OPER_STATUS_FAILED 4
429 uint8_t rp_admin_state_update;
430 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN (1 << 7)
431 uint8_t rp_reserved2[11];
432 } __packed __aligned(4);
433
434 struct mcx_reg_pfcc {
435 uint8_t rp_reserved1;
436 uint8_t rp_local_port;
437 uint8_t rp_reserved2[3];
438 uint8_t rp_prio_mask_tx;
439 uint8_t rp_reserved3;
440 uint8_t rp_prio_mask_rx;
441 uint8_t rp_pptx_aptx;
442 uint8_t rp_pfctx;
443 uint8_t rp_fctx_dis;
444 uint8_t rp_reserved4;
445 uint8_t rp_pprx_aprx;
446 uint8_t rp_pfcrx;
447 uint8_t rp_reserved5[2];
448 uint16_t rp_dev_stall_min;
449 uint16_t rp_dev_stall_crit;
450 uint8_t rp_reserved6[12];
451 } __packed __aligned(4);
452
453 #define MCX_PMLP_MODULE_NUM_MASK 0xff
454 struct mcx_reg_pmlp {
455 uint8_t rp_rxtx;
456 uint8_t rp_local_port;
457 uint8_t rp_reserved0;
458 uint8_t rp_width;
459 uint32_t rp_lane0_mapping;
460 uint32_t rp_lane1_mapping;
461 uint32_t rp_lane2_mapping;
462 uint32_t rp_lane3_mapping;
463 uint8_t rp_reserved1[44];
464 } __packed __aligned(4);
465
466 #define MCX_MCIA_EEPROM_BYTES 32
467 struct mcx_reg_mcia {
468 uint8_t rm_l;
469 uint8_t rm_module;
470 uint8_t rm_reserved0;
471 uint8_t rm_status;
472 uint8_t rm_i2c_addr;
473 uint8_t rm_page_num;
474 uint16_t rm_dev_addr;
475 uint16_t rm_reserved1;
476 uint16_t rm_size;
477 uint32_t rm_reserved2;
478 uint8_t rm_data[48];
479 } __packed __aligned(4);
480
481 struct mcx_cmd_query_issi_in {
482 uint16_t cmd_opcode;
483 uint8_t cmd_reserved0[4];
484 uint16_t cmd_op_mod;
485 uint8_t cmd_reserved1[8];
486 } __packed __aligned(4);
487
488 struct mcx_cmd_query_issi_il_out {
489 uint8_t cmd_status;
490 uint8_t cmd_reserved0[3];
491 uint32_t cmd_syndrome;
492 uint8_t cmd_reserved1[2];
493 uint16_t cmd_current_issi;
494 uint8_t cmd_reserved2[4];
495 } __packed __aligned(4);
496
497 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
498
499 struct mcx_cmd_query_issi_mb_out {
500 uint8_t cmd_reserved2[16];
501 uint8_t cmd_supported_issi[80]; /* very big endian */
502 } __packed __aligned(4);
503
504 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
505
506 struct mcx_cmd_set_issi_in {
507 uint16_t cmd_opcode;
508 uint8_t cmd_reserved0[4];
509 uint16_t cmd_op_mod;
510 uint8_t cmd_reserved1[2];
511 uint16_t cmd_current_issi;
512 uint8_t cmd_reserved2[4];
513 } __packed __aligned(4);
514
515 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
516
517 struct mcx_cmd_set_issi_out {
518 uint8_t cmd_status;
519 uint8_t cmd_reserved0[3];
520 uint32_t cmd_syndrome;
521 uint8_t cmd_reserved1[8];
522 } __packed __aligned(4);
523
524 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
525
526 struct mcx_cmd_query_pages_in {
527 uint16_t cmd_opcode;
528 uint8_t cmd_reserved0[4];
529 uint16_t cmd_op_mod;
530 #define MCX_CMD_QUERY_PAGES_BOOT 0x01
531 #define MCX_CMD_QUERY_PAGES_INIT 0x02
532 #define MCX_CMD_QUERY_PAGES_REGULAR 0x03
533 uint8_t cmd_reserved1[8];
534 } __packed __aligned(4);
535
536 struct mcx_cmd_query_pages_out {
537 uint8_t cmd_status;
538 uint8_t cmd_reserved0[3];
539 uint32_t cmd_syndrome;
540 uint8_t cmd_reserved1[2];
541 uint16_t cmd_func_id;
542 uint32_t cmd_num_pages;
543 } __packed __aligned(4);
544
545 struct mcx_cmd_manage_pages_in {
546 uint16_t cmd_opcode;
547 uint8_t cmd_reserved0[4];
548 uint16_t cmd_op_mod;
549 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
550 0x00
551 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
552 0x01
553 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
554 0x02
555 uint8_t cmd_reserved1[2];
556 uint16_t cmd_func_id;
557 uint32_t cmd_input_num_entries;
558 } __packed __aligned(4);
559
560 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
561
562 struct mcx_cmd_manage_pages_out {
563 uint8_t cmd_status;
564 uint8_t cmd_reserved0[3];
565 uint32_t cmd_syndrome;
566 uint32_t cmd_output_num_entries;
567 uint8_t cmd_reserved1[4];
568 } __packed __aligned(4);
569
570 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
571
572 struct mcx_cmd_query_hca_cap_in {
573 uint16_t cmd_opcode;
574 uint8_t cmd_reserved0[4];
575 uint16_t cmd_op_mod;
576 #define MCX_CMD_QUERY_HCA_CAP_MAX (0x0 << 0)
577 #define MCX_CMD_QUERY_HCA_CAP_CURRENT (0x1 << 0)
578 #define MCX_CMD_QUERY_HCA_CAP_DEVICE (0x0 << 1)
579 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD (0x1 << 1)
580 #define MCX_CMD_QUERY_HCA_CAP_FLOW (0x7 << 1)
581 uint8_t cmd_reserved1[8];
582 } __packed __aligned(4);
583
584 struct mcx_cmd_query_hca_cap_out {
585 uint8_t cmd_status;
586 uint8_t cmd_reserved0[3];
587 uint32_t cmd_syndrome;
588 uint8_t cmd_reserved1[8];
589 } __packed __aligned(4);
590
591 #define MCX_HCA_CAP_LEN 0x1000
592 #define MCX_HCA_CAP_NMAILBOXES \
593 (MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
594
595 #if __GNUC_PREREQ__(4, 3)
596 #define __counter__ __COUNTER__
597 #else
598 #define __counter__ __LINE__
599 #endif
600
601 #define __token(_tok, _num) _tok##_num
602 #define _token(_tok, _num) __token(_tok, _num)
603 #define __reserved__ _token(__reserved, __counter__)
604
605 struct mcx_cap_device {
606 uint8_t reserved0[16];
607
608 uint8_t log_max_srq_sz;
609 uint8_t log_max_qp_sz;
610 uint8_t __reserved__[1];
611 uint8_t log_max_qp; /* 5 bits */
612 #define MCX_CAP_DEVICE_LOG_MAX_QP 0x1f
613
614 uint8_t __reserved__[1];
615 uint8_t log_max_srq; /* 5 bits */
616 #define MCX_CAP_DEVICE_LOG_MAX_SRQ 0x1f
617 uint8_t __reserved__[2];
618
619 uint8_t __reserved__[1];
620 uint8_t log_max_cq_sz;
621 uint8_t __reserved__[1];
622 uint8_t log_max_cq; /* 5 bits */
623 #define MCX_CAP_DEVICE_LOG_MAX_CQ 0x1f
624
625 uint8_t log_max_eq_sz;
626 uint8_t log_max_mkey; /* 6 bits */
627 #define MCX_CAP_DEVICE_LOG_MAX_MKEY 0x3f
628 uint8_t __reserved__[1];
629 uint8_t log_max_eq; /* 4 bits */
630 #define MCX_CAP_DEVICE_LOG_MAX_EQ 0x0f
631
632 uint8_t max_indirection;
633 uint8_t log_max_mrw_sz; /* 7 bits */
634 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ 0x7f
635 uint8_t teardown_log_max_msf_list_size;
636 #define MCX_CAP_DEVICE_FORCE_TEARDOWN 0x80
637 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
638 0x3f
639 uint8_t log_max_klm_list_size; /* 6 bits */
640 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
641 0x3f
642
643 uint8_t __reserved__[1];
644 uint8_t log_max_ra_req_dc; /* 6 bits */
645 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC 0x3f
646 uint8_t __reserved__[1];
647 uint8_t log_max_ra_res_dc; /* 6 bits */
648 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
649 0x3f
650
651 uint8_t __reserved__[1];
652 uint8_t log_max_ra_req_qp; /* 6 bits */
653 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
654 0x3f
655 uint8_t __reserved__[1];
656 uint8_t log_max_ra_res_qp; /* 6 bits */
657 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
658 0x3f
659
660 uint8_t flags1;
661 #define MCX_CAP_DEVICE_END_PAD 0x80
662 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED 0x40
663 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
664 0x20
665 #define MCX_CAP_DEVICE_START_PAD 0x10
666 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
667 0x08
668 uint8_t __reserved__[1];
669 uint16_t gid_table_size;
670
671 uint16_t flags2;
672 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT 0x8000
673 #define MCX_CAP_DEVICE_VPORT_COUNTERS 0x4000
674 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
675 0x2000
676 #define MCX_CAP_DEVICE_DEBUG 0x1000
677 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
678 0x8000
679 #define MCX_CAP_DEVICE_RQ_DELAY_DROP 0x4000
680 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK 0x03ff
681 uint16_t pkey_table_size;
682
683 uint8_t flags3;
684 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
685 0x80
686 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
687 0x40
688 #define MCX_CAP_DEVICE_IB_VIRTUAL 0x20
689 #define MCX_CAP_DEVICE_ETH_VIRTUAL 0x10
690 #define MCX_CAP_DEVICE_ETS 0x04
691 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE 0x02
692 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
693 0x01
694 uint8_t local_ca_ack_delay; /* 5 bits */
695 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
696 0x1f
697 uint8_t port_type;
698 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
699 0x80
700 #define MCX_CAP_DEVICE_PORT_TYPE 0x03
701 uint8_t num_ports;
702
703 uint8_t snapshot_log_max_msg;
704 #define MCX_CAP_DEVICE_SNAPSHOT 0x80
705 #define MCX_CAP_DEVICE_LOG_MAX_MSG 0x1f
706 uint8_t max_tc; /* 4 bits */
707 #define MCX_CAP_DEVICE_MAX_TC 0x0f
708 uint8_t flags4;
709 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT 0x80
710 #define MCX_CAP_DEVICE_DCBX 0x40
711 #define MCX_CAP_DEVICE_ROL_S 0x02
712 #define MCX_CAP_DEVICE_ROL_G 0x01
713 uint8_t wol;
714 #define MCX_CAP_DEVICE_WOL_S 0x40
715 #define MCX_CAP_DEVICE_WOL_G 0x20
716 #define MCX_CAP_DEVICE_WOL_A 0x10
717 #define MCX_CAP_DEVICE_WOL_B 0x08
718 #define MCX_CAP_DEVICE_WOL_M 0x04
719 #define MCX_CAP_DEVICE_WOL_U 0x02
720 #define MCX_CAP_DEVICE_WOL_P 0x01
721
722 uint16_t stat_rate_support;
723 uint8_t __reserved__[1];
724 uint8_t cqe_version; /* 4 bits */
725 #define MCX_CAP_DEVICE_CQE_VERSION 0x0f
726
727 uint32_t flags5;
728 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
729 0x80000000
730 #define MCX_CAP_DEVICE_STRIDING_RQ 0x40000000
731 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
732 0x10000000
733 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
734 0x08000000
735 #define MCX_CAP_DEVICE_DC_CONNECT_CP 0x00040000
736 #define MCX_CAP_DEVICE_DC_CNAK_DRACE 0x00020000
737 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
738 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
739 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM 0x0000c000
740 #define MCX_CAP_DEVICE_SIGERR_QCE 0x00002000
741 #define MCX_CAP_DEVICE_WQ_SIGNATURE 0x00000800
742 #define MCX_CAP_DEVICE_SCTR_DATA_CQE 0x00000400
743 #define MCX_CAP_DEVICE_SHO 0x00000100
744 #define MCX_CAP_DEVICE_TPH 0x00000080
745 #define MCX_CAP_DEVICE_RF 0x00000040
746 #define MCX_CAP_DEVICE_DCT 0x00000020
747 #define MCX_CAP_DEVICE_QOS 0x00000010
748 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS 0x00000008
749 #define MCX_CAP_DEVICE_ROCE 0x00000004
750 #define MCX_CAP_DEVICE_ATOMIC 0x00000002
751
752 uint32_t flags6;
753 #define MCX_CAP_DEVICE_CQ_OI 0x80000000
754 #define MCX_CAP_DEVICE_CQ_RESIZE 0x40000000
755 #define MCX_CAP_DEVICE_CQ_MODERATION 0x20000000
756 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
757 0x10000000
758 #define MCX_CAP_DEVICE_CQ_INVALIDATE 0x08000000
759 #define MCX_CAP_DEVICE_RESERVED_AT_255 0x04000000
760 #define MCX_CAP_DEVICE_CQ_EQ_REMAP 0x02000000
761 #define MCX_CAP_DEVICE_PG 0x01000000
762 #define MCX_CAP_DEVICE_BLOCK_LB_MC 0x00800000
763 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
764 0x00400000
765 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
766 0x00200000
767 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
768 0x00100000
769 #define MCX_CAP_DEVICE_CD 0x00080000
770 #define MCX_CAP_DEVICE_ATM 0x00040000
771 #define MCX_CAP_DEVICE_APM 0x00020000
772 #define MCX_CAP_DEVICE_IMAICL 0x00010000
773 #define MCX_CAP_DEVICE_QKV 0x00000200
774 #define MCX_CAP_DEVICE_PKV 0x00000100
775 #define MCX_CAP_DEVICE_SET_DETH_SQPN 0x00000080
776 #define MCX_CAP_DEVICE_XRC 0x00000008
777 #define MCX_CAP_DEVICE_UD 0x00000004
778 #define MCX_CAP_DEVICE_UC 0x00000002
779 #define MCX_CAP_DEVICE_RC 0x00000001
780
781 uint8_t uar_flags;
782 #define MCX_CAP_DEVICE_UAR_4K 0x80
783 uint8_t uar_sz; /* 6 bits */
784 #define MCX_CAP_DEVICE_UAR_SZ 0x3f
785 uint8_t __reserved__[1];
786 uint8_t log_pg_sz;
787
788 uint8_t flags7;
789 #define MCX_CAP_DEVICE_BF 0x80
790 #define MCX_CAP_DEVICE_DRIVER_VERSION 0x40
791 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
792 0x20
793 uint8_t log_bf_reg_size; /* 5 bits */
794 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE 0x1f
795 uint8_t __reserved__[2];
796
797 uint16_t num_of_diagnostic_counters;
798 uint16_t max_wqe_sz_sq;
799
800 uint8_t __reserved__[2];
801 uint16_t max_wqe_sz_rq;
802
803 uint8_t __reserved__[2];
804 uint16_t max_wqe_sz_sq_dc;
805
806 uint32_t max_qp_mcg; /* 25 bits */
807 #define MCX_CAP_DEVICE_MAX_QP_MCG 0x1ffffff
808
809 uint8_t __reserved__[3];
810 uint8_t log_max_mcq;
811
812 uint8_t log_max_transport_domain; /* 5 bits */
813 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
814 0x1f
815 uint8_t log_max_pd; /* 5 bits */
816 #define MCX_CAP_DEVICE_LOG_MAX_PD 0x1f
817 uint8_t __reserved__[1];
818 uint8_t log_max_xrcd; /* 5 bits */
819 #define MCX_CAP_DEVICE_LOG_MAX_XRCD 0x1f
820
821 uint8_t __reserved__[2];
822 uint16_t max_flow_counter;
823
824 uint8_t log_max_rq; /* 5 bits */
825 #define MCX_CAP_DEVICE_LOG_MAX_RQ 0x1f
826 uint8_t log_max_sq; /* 5 bits */
827 #define MCX_CAP_DEVICE_LOG_MAX_SQ 0x1f
828 uint8_t log_max_tir; /* 5 bits */
829 #define MCX_CAP_DEVICE_LOG_MAX_TIR 0x1f
830 uint8_t log_max_tis; /* 5 bits */
831 #define MCX_CAP_DEVICE_LOG_MAX_TIS 0x1f
832
833 uint8_t flags8;
834 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
835 0x80
836 #define MCX_CAP_DEVICE_LOG_MAX_RMP 0x1f
837 uint8_t log_max_rqt; /* 5 bits */
838 #define MCX_CAP_DEVICE_LOG_MAX_RQT 0x1f
839 uint8_t log_max_rqt_size; /* 5 bits */
840 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE 0x1f
841 uint8_t log_max_tis_per_sq; /* 5 bits */
842 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
843 0x1f
844 } __packed __aligned(8);
845
846 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
847 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
848 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
849 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
850 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
851 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
852 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
853
854 struct mcx_cmd_set_driver_version_in {
855 uint16_t cmd_opcode;
856 uint8_t cmd_reserved0[4];
857 uint16_t cmd_op_mod;
858 uint8_t cmd_reserved1[8];
859 } __packed __aligned(4);
860
861 struct mcx_cmd_set_driver_version_out {
862 uint8_t cmd_status;
863 uint8_t cmd_reserved0[3];
864 uint32_t cmd_syndrome;
865 uint8_t cmd_reserved1[8];
866 } __packed __aligned(4);
867
868 struct mcx_cmd_set_driver_version {
869 uint8_t cmd_driver_version[64];
870 } __packed __aligned(8);
871
872 struct mcx_cmd_modify_nic_vport_context_in {
873 uint16_t cmd_opcode;
874 uint8_t cmd_reserved0[4];
875 uint16_t cmd_op_mod;
876 uint8_t cmd_reserved1[4];
877 uint32_t cmd_field_select;
878 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR 0x04
879 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC 0x10
880 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU 0x40
881 } __packed __aligned(4);
882
883 struct mcx_cmd_modify_nic_vport_context_out {
884 uint8_t cmd_status;
885 uint8_t cmd_reserved0[3];
886 uint32_t cmd_syndrome;
887 uint8_t cmd_reserved1[8];
888 } __packed __aligned(4);
889
890 struct mcx_cmd_query_nic_vport_context_in {
891 uint16_t cmd_opcode;
892 uint8_t cmd_reserved0[4];
893 uint16_t cmd_op_mod;
894 uint8_t cmd_reserved1[4];
895 uint8_t cmd_allowed_list_type;
896 uint8_t cmd_reserved2[3];
897 } __packed __aligned(4);
898
899 struct mcx_cmd_query_nic_vport_context_out {
900 uint8_t cmd_status;
901 uint8_t cmd_reserved0[3];
902 uint32_t cmd_syndrome;
903 uint8_t cmd_reserved1[8];
904 } __packed __aligned(4);
905
906 struct mcx_nic_vport_ctx {
907 uint32_t vp_min_wqe_inline_mode;
908 uint8_t vp_reserved0[32];
909 uint32_t vp_mtu;
910 uint8_t vp_reserved1[200];
911 uint16_t vp_flags;
912 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC (0)
913 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC (1 << 24)
914 #define MCX_NIC_VPORT_CTX_LIST_VLAN (2 << 24)
915 #define MCX_NIC_VPORT_CTX_PROMISC_ALL (1 << 13)
916 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST (1 << 14)
917 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST (1 << 15)
918 uint16_t vp_allowed_list_size;
919 uint64_t vp_perm_addr;
920 uint8_t vp_reserved2[4];
921 /* allowed list follows */
922 } __packed __aligned(4);
923
924 struct mcx_counter {
925 uint64_t packets;
926 uint64_t octets;
927 } __packed __aligned(4);
928
929 struct mcx_nic_vport_counters {
930 struct mcx_counter rx_err;
931 struct mcx_counter tx_err;
932 uint8_t reserved0[64]; /* 0x30 */
933 struct mcx_counter rx_bcast;
934 struct mcx_counter tx_bcast;
935 struct mcx_counter rx_ucast;
936 struct mcx_counter tx_ucast;
937 struct mcx_counter rx_mcast;
938 struct mcx_counter tx_mcast;
939 uint8_t reserved1[0x210 - 0xd0];
940 } __packed __aligned(4);
941
942 struct mcx_cmd_query_vport_counters_in {
943 uint16_t cmd_opcode;
944 uint8_t cmd_reserved0[4];
945 uint16_t cmd_op_mod;
946 uint8_t cmd_reserved1[8];
947 } __packed __aligned(4);
948
949 struct mcx_cmd_query_vport_counters_mb_in {
950 uint8_t cmd_reserved0[8];
951 uint8_t cmd_clear;
952 uint8_t cmd_reserved1[7];
953 } __packed __aligned(4);
954
955 struct mcx_cmd_query_vport_counters_out {
956 uint8_t cmd_status;
957 uint8_t cmd_reserved0[3];
958 uint32_t cmd_syndrome;
959 uint8_t cmd_reserved1[8];
960 } __packed __aligned(4);
961
962 struct mcx_cmd_query_flow_counter_in {
963 uint16_t cmd_opcode;
964 uint8_t cmd_reserved0[4];
965 uint16_t cmd_op_mod;
966 uint8_t cmd_reserved1[8];
967 } __packed __aligned(4);
968
969 struct mcx_cmd_query_flow_counter_mb_in {
970 uint8_t cmd_reserved0[8];
971 uint8_t cmd_clear;
972 uint8_t cmd_reserved1[5];
973 uint16_t cmd_flow_counter_id;
974 } __packed __aligned(4);
975
976 struct mcx_cmd_query_flow_counter_out {
977 uint8_t cmd_status;
978 uint8_t cmd_reserved0[3];
979 uint32_t cmd_syndrome;
980 uint8_t cmd_reserved1[8];
981 } __packed __aligned(4);
982
983 struct mcx_cmd_alloc_uar_in {
984 uint16_t cmd_opcode;
985 uint8_t cmd_reserved0[4];
986 uint16_t cmd_op_mod;
987 uint8_t cmd_reserved1[8];
988 } __packed __aligned(4);
989
990 struct mcx_cmd_alloc_uar_out {
991 uint8_t cmd_status;
992 uint8_t cmd_reserved0[3];
993 uint32_t cmd_syndrome;
994 uint32_t cmd_uar;
995 uint8_t cmd_reserved1[4];
996 } __packed __aligned(4);
997
998 struct mcx_cmd_query_special_ctx_in {
999 uint16_t cmd_opcode;
1000 uint8_t cmd_reserved0[4];
1001 uint16_t cmd_op_mod;
1002 uint8_t cmd_reserved1[8];
1003 } __packed __aligned(4);
1004
1005 struct mcx_cmd_query_special_ctx_out {
1006 uint8_t cmd_status;
1007 uint8_t cmd_reserved0[3];
1008 uint32_t cmd_syndrome;
1009 uint8_t cmd_reserved1[4];
1010 uint32_t cmd_resd_lkey;
1011 } __packed __aligned(4);
1012
1013 struct mcx_eq_ctx {
1014 uint32_t eq_status;
1015 #define MCX_EQ_CTX_ST_SHIFT 8
1016 #define MCX_EQ_CTX_ST_MASK (0xf << MCX_EQ_CTX_ST_SHIFT)
1017 #define MCX_EQ_CTX_ST_ARMED (0x9 << MCX_EQ_CTX_ST_SHIFT)
1018 #define MCX_EQ_CTX_ST_FIRED (0xa << MCX_EQ_CTX_ST_SHIFT)
1019 #define MCX_EQ_CTX_OI_SHIFT 17
1020 #define MCX_EQ_CTX_OI (1 << MCX_EQ_CTX_OI_SHIFT)
1021 #define MCX_EQ_CTX_EC_SHIFT 18
1022 #define MCX_EQ_CTX_EC (1 << MCX_EQ_CTX_EC_SHIFT)
1023 #define MCX_EQ_CTX_STATUS_SHIFT 28
1024 #define MCX_EQ_CTX_STATUS_MASK (0xf << MCX_EQ_CTX_STATUS_SHIFT)
1025 #define MCX_EQ_CTX_STATUS_OK (0x0 << MCX_EQ_CTX_STATUS_SHIFT)
1026 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE \
1027 (0xa << MCX_EQ_CTX_STATUS_SHIFT)
1028 uint32_t eq_reserved1;
1029 uint32_t eq_page_offset;
1030 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT 5
1031 uint32_t eq_uar_size;
1032 #define MCX_EQ_CTX_UAR_PAGE_MASK 0xffffff
1033 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT 24
1034 uint32_t eq_reserved2;
1035 uint8_t eq_reserved3[3];
1036 uint8_t eq_intr;
1037 uint32_t eq_log_page_size;
1038 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1039 uint32_t eq_reserved4[3];
1040 uint32_t eq_consumer_counter;
1041 uint32_t eq_producer_counter;
1042 #define MCX_EQ_CTX_COUNTER_MASK 0xffffff
1043 uint32_t eq_reserved5[4];
1044 } __packed __aligned(4);
1045
1046 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1047
1048 struct mcx_cmd_create_eq_in {
1049 uint16_t cmd_opcode;
1050 uint8_t cmd_reserved0[4];
1051 uint16_t cmd_op_mod;
1052 uint8_t cmd_reserved1[8];
1053 } __packed __aligned(4);
1054
1055 struct mcx_cmd_create_eq_mb_in {
1056 struct mcx_eq_ctx cmd_eq_ctx;
1057 uint8_t cmd_reserved0[8];
1058 uint64_t cmd_event_bitmask;
1059 #define MCX_EVENT_TYPE_COMPLETION 0x00
1060 #define MCX_EVENT_TYPE_CQ_ERROR 0x04
1061 #define MCX_EVENT_TYPE_INTERNAL_ERROR 0x08
1062 #define MCX_EVENT_TYPE_PORT_CHANGE 0x09
1063 #define MCX_EVENT_TYPE_CMD_COMPLETION 0x0a
1064 #define MCX_EVENT_TYPE_PAGE_REQUEST 0x0b
1065 #define MCX_EVENT_TYPE_LAST_WQE 0x13
1066 uint8_t cmd_reserved1[176];
1067 } __packed __aligned(4);
1068
1069 struct mcx_cmd_create_eq_out {
1070 uint8_t cmd_status;
1071 uint8_t cmd_reserved0[3];
1072 uint32_t cmd_syndrome;
1073 uint32_t cmd_eqn;
1074 uint8_t cmd_reserved1[4];
1075 } __packed __aligned(4);
1076
1077 struct mcx_eq_entry {
1078 uint8_t eq_reserved1;
1079 uint8_t eq_event_type;
1080 uint8_t eq_reserved2;
1081 uint8_t eq_event_sub_type;
1082
1083 uint8_t eq_reserved3[28];
1084 uint32_t eq_event_data[7];
1085 uint8_t eq_reserved4[2];
1086 uint8_t eq_signature;
1087 uint8_t eq_owner;
1088 #define MCX_EQ_ENTRY_OWNER_INIT 1
1089 } __packed __aligned(4);
1090
1091 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1092
1093 struct mcx_cmd_alloc_pd_in {
1094 uint16_t cmd_opcode;
1095 uint8_t cmd_reserved0[4];
1096 uint16_t cmd_op_mod;
1097 uint8_t cmd_reserved1[8];
1098 } __packed __aligned(4);
1099
1100 struct mcx_cmd_alloc_pd_out {
1101 uint8_t cmd_status;
1102 uint8_t cmd_reserved0[3];
1103 uint32_t cmd_syndrome;
1104 uint32_t cmd_pd;
1105 uint8_t cmd_reserved1[4];
1106 } __packed __aligned(4);
1107
1108 struct mcx_cmd_alloc_td_in {
1109 uint16_t cmd_opcode;
1110 uint8_t cmd_reserved0[4];
1111 uint16_t cmd_op_mod;
1112 uint8_t cmd_reserved1[8];
1113 } __packed __aligned(4);
1114
1115 struct mcx_cmd_alloc_td_out {
1116 uint8_t cmd_status;
1117 uint8_t cmd_reserved0[3];
1118 uint32_t cmd_syndrome;
1119 uint32_t cmd_tdomain;
1120 uint8_t cmd_reserved1[4];
1121 } __packed __aligned(4);
1122
1123 struct mcx_cmd_create_tir_in {
1124 uint16_t cmd_opcode;
1125 uint8_t cmd_reserved0[4];
1126 uint16_t cmd_op_mod;
1127 uint8_t cmd_reserved1[8];
1128 } __packed __aligned(4);
1129
1130 struct mcx_cmd_create_tir_mb_in {
1131 uint8_t cmd_reserved0[20];
1132 uint32_t cmd_disp_type;
1133 #define MCX_TIR_CTX_DISP_TYPE_SHIFT 28
1134 uint8_t cmd_reserved1[8];
1135 uint32_t cmd_lro;
1136 uint8_t cmd_reserved2[8];
1137 uint32_t cmd_inline_rqn;
1138 uint32_t cmd_indir_table;
1139 uint32_t cmd_tdomain;
1140 uint8_t cmd_rx_hash_key[40];
1141 uint32_t cmd_rx_hash_sel_outer;
1142 uint32_t cmd_rx_hash_sel_inner;
1143 uint8_t cmd_reserved3[152];
1144 } __packed __aligned(4);
1145
1146 struct mcx_cmd_create_tir_out {
1147 uint8_t cmd_status;
1148 uint8_t cmd_reserved0[3];
1149 uint32_t cmd_syndrome;
1150 uint32_t cmd_tirn;
1151 uint8_t cmd_reserved1[4];
1152 } __packed __aligned(4);
1153
1154 struct mcx_cmd_destroy_tir_in {
1155 uint16_t cmd_opcode;
1156 uint8_t cmd_reserved0[4];
1157 uint16_t cmd_op_mod;
1158 uint32_t cmd_tirn;
1159 uint8_t cmd_reserved1[4];
1160 } __packed __aligned(4);
1161
1162 struct mcx_cmd_destroy_tir_out {
1163 uint8_t cmd_status;
1164 uint8_t cmd_reserved0[3];
1165 uint32_t cmd_syndrome;
1166 uint8_t cmd_reserved1[8];
1167 } __packed __aligned(4);
1168
1169 struct mcx_cmd_create_tis_in {
1170 uint16_t cmd_opcode;
1171 uint8_t cmd_reserved0[4];
1172 uint16_t cmd_op_mod;
1173 uint8_t cmd_reserved1[8];
1174 } __packed __aligned(4);
1175
1176 struct mcx_cmd_create_tis_mb_in {
1177 uint8_t cmd_reserved[16];
1178 uint32_t cmd_prio;
1179 uint8_t cmd_reserved1[32];
1180 uint32_t cmd_tdomain;
1181 uint8_t cmd_reserved2[120];
1182 } __packed __aligned(4);
1183
1184 struct mcx_cmd_create_tis_out {
1185 uint8_t cmd_status;
1186 uint8_t cmd_reserved0[3];
1187 uint32_t cmd_syndrome;
1188 uint32_t cmd_tisn;
1189 uint8_t cmd_reserved1[4];
1190 } __packed __aligned(4);
1191
1192 struct mcx_cmd_destroy_tis_in {
1193 uint16_t cmd_opcode;
1194 uint8_t cmd_reserved0[4];
1195 uint16_t cmd_op_mod;
1196 uint32_t cmd_tisn;
1197 uint8_t cmd_reserved1[4];
1198 } __packed __aligned(4);
1199
1200 struct mcx_cmd_destroy_tis_out {
1201 uint8_t cmd_status;
1202 uint8_t cmd_reserved0[3];
1203 uint32_t cmd_syndrome;
1204 uint8_t cmd_reserved1[8];
1205 } __packed __aligned(4);
1206
1207 struct mcx_cq_ctx {
1208 uint32_t cq_status;
1209 uint32_t cq_reserved1;
1210 uint32_t cq_page_offset;
1211 uint32_t cq_uar_size;
1212 #define MCX_CQ_CTX_UAR_PAGE_MASK 0xffffff
1213 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT 24
1214 uint32_t cq_period_max_count;
1215 #define MCX_CQ_CTX_PERIOD_SHIFT 16
1216 uint32_t cq_eqn;
1217 uint32_t cq_log_page_size;
1218 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1219 uint32_t cq_reserved2;
1220 uint32_t cq_last_notified;
1221 uint32_t cq_last_solicit;
1222 uint32_t cq_consumer_counter;
1223 uint32_t cq_producer_counter;
1224 uint8_t cq_reserved3[8];
1225 uint64_t cq_doorbell;
1226 } __packed __aligned(4);
1227
1228 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1229
1230 struct mcx_cmd_create_cq_in {
1231 uint16_t cmd_opcode;
1232 uint8_t cmd_reserved0[4];
1233 uint16_t cmd_op_mod;
1234 uint8_t cmd_reserved1[8];
1235 } __packed __aligned(4);
1236
1237 struct mcx_cmd_create_cq_mb_in {
1238 struct mcx_cq_ctx cmd_cq_ctx;
1239 uint8_t cmd_reserved1[192];
1240 } __packed __aligned(4);
1241
1242 struct mcx_cmd_create_cq_out {
1243 uint8_t cmd_status;
1244 uint8_t cmd_reserved0[3];
1245 uint32_t cmd_syndrome;
1246 uint32_t cmd_cqn;
1247 uint8_t cmd_reserved1[4];
1248 } __packed __aligned(4);
1249
1250 struct mcx_cmd_destroy_cq_in {
1251 uint16_t cmd_opcode;
1252 uint8_t cmd_reserved0[4];
1253 uint16_t cmd_op_mod;
1254 uint32_t cmd_cqn;
1255 uint8_t cmd_reserved1[4];
1256 } __packed __aligned(4);
1257
1258 struct mcx_cmd_destroy_cq_out {
1259 uint8_t cmd_status;
1260 uint8_t cmd_reserved0[3];
1261 uint32_t cmd_syndrome;
1262 uint8_t cmd_reserved1[8];
1263 } __packed __aligned(4);
1264
1265 struct mcx_cq_entry {
1266 uint32_t __reserved__;
1267 uint32_t cq_lro;
1268 uint32_t cq_lro_ack_seq_num;
1269 uint32_t cq_rx_hash;
1270 uint8_t cq_rx_hash_type;
1271 uint8_t cq_ml_path;
1272 uint16_t __reserved__;
1273 uint32_t cq_checksum;
1274 uint32_t __reserved__;
1275 uint32_t cq_flags;
1276 uint32_t cq_lro_srqn;
1277 uint32_t __reserved__[2];
1278 uint32_t cq_byte_cnt;
1279 uint64_t cq_timestamp;
1280 uint8_t cq_rx_drops;
1281 uint8_t cq_flow_tag[3];
1282 uint16_t cq_wqe_count;
1283 uint8_t cq_signature;
1284 uint8_t cq_opcode_owner;
1285 #define MCX_CQ_ENTRY_FLAG_OWNER (1 << 0)
1286 #define MCX_CQ_ENTRY_FLAG_SE (1 << 1)
1287 #define MCX_CQ_ENTRY_FORMAT_SHIFT 2
1288 #define MCX_CQ_ENTRY_OPCODE_SHIFT 4
1289
1290 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE 0
1291 #define MCX_CQ_ENTRY_FORMAT_INLINE_32 1
1292 #define MCX_CQ_ENTRY_FORMAT_INLINE_64 2
1293 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED 3
1294
1295 #define MCX_CQ_ENTRY_OPCODE_REQ 0
1296 #define MCX_CQ_ENTRY_OPCODE_SEND 2
1297 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR 13
1298 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR 14
1299 #define MCX_CQ_ENTRY_OPCODE_INVALID 15
1300
1301 } __packed __aligned(4);
1302
1303 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1304
1305 struct mcx_cq_doorbell {
1306 uint32_t db_update_ci;
1307 uint32_t db_arm_ci;
1308 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT 28
1309 #define MCX_CQ_DOORBELL_ARM_CMD (1 << 24)
1310 #define MCX_CQ_DOORBELL_ARM_CI_MASK (0xffffff)
1311 } __packed __aligned(8);
1312
1313 struct mcx_wq_ctx {
1314 uint8_t wq_type;
1315 #define MCX_WQ_CTX_TYPE_CYCLIC (1 << 4)
1316 #define MCX_WQ_CTX_TYPE_SIGNATURE (1 << 3)
1317 uint8_t wq_reserved0[5];
1318 uint16_t wq_lwm;
1319 uint32_t wq_pd;
1320 uint32_t wq_uar_page;
1321 uint64_t wq_doorbell;
1322 uint32_t wq_hw_counter;
1323 uint32_t wq_sw_counter;
1324 uint16_t wq_log_stride;
1325 uint8_t wq_log_page_sz;
1326 uint8_t wq_log_size;
1327 uint8_t wq_reserved1[156];
1328 } __packed __aligned(4);
1329
1330 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1331
1332 struct mcx_sq_ctx {
1333 uint32_t sq_flags;
1334 #define MCX_SQ_CTX_RLKEY (1U << 31)
1335 #define MCX_SQ_CTX_FRE_SHIFT (1 << 29)
1336 #define MCX_SQ_CTX_FLUSH_IN_ERROR (1 << 28)
1337 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT 24
1338 #define MCX_SQ_CTX_STATE_SHIFT 20
1339 uint32_t sq_user_index;
1340 uint32_t sq_cqn;
1341 uint32_t sq_reserved1[5];
1342 uint32_t sq_tis_lst_sz;
1343 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT 16
1344 uint32_t sq_reserved2[2];
1345 uint32_t sq_tis_num;
1346 struct mcx_wq_ctx sq_wq;
1347 } __packed __aligned(4);
1348
1349 struct mcx_sq_entry_seg {
1350 uint32_t sqs_byte_count;
1351 uint32_t sqs_lkey;
1352 uint64_t sqs_addr;
1353 } __packed __aligned(4);
1354
1355 struct mcx_sq_entry {
1356 /* control segment */
1357 uint32_t sqe_opcode_index;
1358 #define MCX_SQE_WQE_INDEX_SHIFT 8
1359 #define MCX_SQE_WQE_OPCODE_NOP 0x00
1360 #define MCX_SQE_WQE_OPCODE_SEND 0x0a
1361 uint32_t sqe_ds_sq_num;
1362 #define MCX_SQE_SQ_NUM_SHIFT 8
1363 uint32_t sqe_signature;
1364 #define MCX_SQE_SIGNATURE_SHIFT 24
1365 #define MCX_SQE_SOLICITED_EVENT 0x02
1366 #define MCX_SQE_CE_CQE_ON_ERR 0x00
1367 #define MCX_SQE_CE_CQE_FIRST_ERR 0x04
1368 #define MCX_SQE_CE_CQE_ALWAYS 0x08
1369 #define MCX_SQE_CE_CQE_SOLICIT 0x0C
1370 #define MCX_SQE_FM_NO_FENCE 0x00
1371 #define MCX_SQE_FM_SMALL_FENCE 0x40
1372 uint32_t sqe_mkey;
1373
1374 /* ethernet segment */
1375 uint32_t sqe_reserved1;
1376 uint32_t sqe_mss_csum;
1377 #define MCX_SQE_L4_CSUM (1 << 31)
1378 #define MCX_SQE_L3_CSUM (1 << 30)
1379 uint32_t sqe_reserved2;
1380 uint16_t sqe_inline_header_size;
1381 uint16_t sqe_inline_headers[9];
1382
1383 /* data segment */
1384 struct mcx_sq_entry_seg sqe_segs[1];
1385 } __packed __aligned(64);
1386
1387 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1388
1389 struct mcx_cmd_create_sq_in {
1390 uint16_t cmd_opcode;
1391 uint8_t cmd_reserved0[4];
1392 uint16_t cmd_op_mod;
1393 uint8_t cmd_reserved1[8];
1394 } __packed __aligned(4);
1395
1396 struct mcx_cmd_create_sq_out {
1397 uint8_t cmd_status;
1398 uint8_t cmd_reserved0[3];
1399 uint32_t cmd_syndrome;
1400 uint32_t cmd_sqn;
1401 uint8_t cmd_reserved1[4];
1402 } __packed __aligned(4);
1403
1404 struct mcx_cmd_modify_sq_in {
1405 uint16_t cmd_opcode;
1406 uint8_t cmd_reserved0[4];
1407 uint16_t cmd_op_mod;
1408 uint32_t cmd_sq_state;
1409 uint8_t cmd_reserved1[4];
1410 } __packed __aligned(4);
1411
1412 struct mcx_cmd_modify_sq_mb_in {
1413 uint32_t cmd_modify_hi;
1414 uint32_t cmd_modify_lo;
1415 uint8_t cmd_reserved0[8];
1416 struct mcx_sq_ctx cmd_sq_ctx;
1417 } __packed __aligned(4);
1418
1419 struct mcx_cmd_modify_sq_out {
1420 uint8_t cmd_status;
1421 uint8_t cmd_reserved0[3];
1422 uint32_t cmd_syndrome;
1423 uint8_t cmd_reserved1[8];
1424 } __packed __aligned(4);
1425
1426 struct mcx_cmd_destroy_sq_in {
1427 uint16_t cmd_opcode;
1428 uint8_t cmd_reserved0[4];
1429 uint16_t cmd_op_mod;
1430 uint32_t cmd_sqn;
1431 uint8_t cmd_reserved1[4];
1432 } __packed __aligned(4);
1433
1434 struct mcx_cmd_destroy_sq_out {
1435 uint8_t cmd_status;
1436 uint8_t cmd_reserved0[3];
1437 uint32_t cmd_syndrome;
1438 uint8_t cmd_reserved1[8];
1439 } __packed __aligned(4);
1440
1441
1442 struct mcx_rq_ctx {
1443 uint32_t rq_flags;
1444 #define MCX_RQ_CTX_RLKEY (1U << 31)
1445 #define MCX_RQ_CTX_VLAN_STRIP_DIS (1 << 28)
1446 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT 24
1447 #define MCX_RQ_CTX_STATE_SHIFT 20
1448 #define MCX_RQ_CTX_FLUSH_IN_ERROR (1 << 18)
1449 uint32_t rq_user_index;
1450 uint32_t rq_cqn;
1451 uint32_t rq_reserved1;
1452 uint32_t rq_rmpn;
1453 uint32_t rq_reserved2[7];
1454 struct mcx_wq_ctx rq_wq;
1455 } __packed __aligned(4);
1456
1457 struct mcx_rq_entry {
1458 uint32_t rqe_byte_count;
1459 uint32_t rqe_lkey;
1460 uint64_t rqe_addr;
1461 } __packed __aligned(16);
1462
1463 struct mcx_cmd_create_rq_in {
1464 uint16_t cmd_opcode;
1465 uint8_t cmd_reserved0[4];
1466 uint16_t cmd_op_mod;
1467 uint8_t cmd_reserved1[8];
1468 } __packed __aligned(4);
1469
1470 struct mcx_cmd_create_rq_out {
1471 uint8_t cmd_status;
1472 uint8_t cmd_reserved0[3];
1473 uint32_t cmd_syndrome;
1474 uint32_t cmd_rqn;
1475 uint8_t cmd_reserved1[4];
1476 } __packed __aligned(4);
1477
1478 struct mcx_cmd_modify_rq_in {
1479 uint16_t cmd_opcode;
1480 uint8_t cmd_reserved0[4];
1481 uint16_t cmd_op_mod;
1482 uint32_t cmd_rq_state;
1483 uint8_t cmd_reserved1[4];
1484 } __packed __aligned(4);
1485
1486 struct mcx_cmd_modify_rq_mb_in {
1487 uint32_t cmd_modify_hi;
1488 uint32_t cmd_modify_lo;
1489 uint8_t cmd_reserved0[8];
1490 struct mcx_rq_ctx cmd_rq_ctx;
1491 } __packed __aligned(4);
1492
1493 struct mcx_cmd_modify_rq_out {
1494 uint8_t cmd_status;
1495 uint8_t cmd_reserved0[3];
1496 uint32_t cmd_syndrome;
1497 uint8_t cmd_reserved1[8];
1498 } __packed __aligned(4);
1499
1500 struct mcx_cmd_destroy_rq_in {
1501 uint16_t cmd_opcode;
1502 uint8_t cmd_reserved0[4];
1503 uint16_t cmd_op_mod;
1504 uint32_t cmd_rqn;
1505 uint8_t cmd_reserved1[4];
1506 } __packed __aligned(4);
1507
1508 struct mcx_cmd_destroy_rq_out {
1509 uint8_t cmd_status;
1510 uint8_t cmd_reserved0[3];
1511 uint32_t cmd_syndrome;
1512 uint8_t cmd_reserved1[8];
1513 } __packed __aligned(4);
1514
1515 struct mcx_cmd_create_flow_table_in {
1516 uint16_t cmd_opcode;
1517 uint8_t cmd_reserved0[4];
1518 uint16_t cmd_op_mod;
1519 uint8_t cmd_reserved1[8];
1520 } __packed __aligned(4);
1521
1522 struct mcx_flow_table_ctx {
1523 uint8_t ft_miss_action;
1524 uint8_t ft_level;
1525 uint8_t ft_reserved0;
1526 uint8_t ft_log_size;
1527 uint32_t ft_table_miss_id;
1528 uint8_t ft_reserved1[28];
1529 } __packed __aligned(4);
1530
1531 struct mcx_cmd_create_flow_table_mb_in {
1532 uint8_t cmd_table_type;
1533 uint8_t cmd_reserved0[7];
1534 struct mcx_flow_table_ctx cmd_ctx;
1535 } __packed __aligned(4);
1536
1537 struct mcx_cmd_create_flow_table_out {
1538 uint8_t cmd_status;
1539 uint8_t cmd_reserved0[3];
1540 uint32_t cmd_syndrome;
1541 uint32_t cmd_table_id;
1542 uint8_t cmd_reserved1[4];
1543 } __packed __aligned(4);
1544
1545 struct mcx_cmd_destroy_flow_table_in {
1546 uint16_t cmd_opcode;
1547 uint8_t cmd_reserved0[4];
1548 uint16_t cmd_op_mod;
1549 uint8_t cmd_reserved1[8];
1550 } __packed __aligned(4);
1551
1552 struct mcx_cmd_destroy_flow_table_mb_in {
1553 uint8_t cmd_table_type;
1554 uint8_t cmd_reserved0[3];
1555 uint32_t cmd_table_id;
1556 uint8_t cmd_reserved1[40];
1557 } __packed __aligned(4);
1558
1559 struct mcx_cmd_destroy_flow_table_out {
1560 uint8_t cmd_status;
1561 uint8_t cmd_reserved0[3];
1562 uint32_t cmd_syndrome;
1563 uint8_t cmd_reserved1[8];
1564 } __packed __aligned(4);
1565
1566 struct mcx_cmd_set_flow_table_root_in {
1567 uint16_t cmd_opcode;
1568 uint8_t cmd_reserved0[4];
1569 uint16_t cmd_op_mod;
1570 uint8_t cmd_reserved1[8];
1571 } __packed __aligned(4);
1572
1573 struct mcx_cmd_set_flow_table_root_mb_in {
1574 uint8_t cmd_table_type;
1575 uint8_t cmd_reserved0[3];
1576 uint32_t cmd_table_id;
1577 uint8_t cmd_reserved1[56];
1578 } __packed __aligned(4);
1579
1580 struct mcx_cmd_set_flow_table_root_out {
1581 uint8_t cmd_status;
1582 uint8_t cmd_reserved0[3];
1583 uint32_t cmd_syndrome;
1584 uint8_t cmd_reserved1[8];
1585 } __packed __aligned(4);
1586
1587 struct mcx_flow_match {
1588 /* outer headers */
1589 uint8_t mc_src_mac[6];
1590 uint16_t mc_ethertype;
1591 uint8_t mc_dest_mac[6];
1592 uint16_t mc_first_vlan;
1593 uint8_t mc_ip_proto;
1594 uint8_t mc_ip_dscp_ecn;
1595 uint8_t mc_vlan_flags;
1596 uint8_t mc_tcp_flags;
1597 uint16_t mc_tcp_sport;
1598 uint16_t mc_tcp_dport;
1599 uint32_t mc_reserved0;
1600 uint16_t mc_udp_sport;
1601 uint16_t mc_udp_dport;
1602 uint8_t mc_src_ip[16];
1603 uint8_t mc_dest_ip[16];
1604
1605 /* misc parameters */
1606 uint8_t mc_reserved1[8];
1607 uint16_t mc_second_vlan;
1608 uint8_t mc_reserved2[2];
1609 uint8_t mc_second_vlan_flags;
1610 uint8_t mc_reserved3[15];
1611 uint32_t mc_outer_ipv6_flow_label;
1612 uint8_t mc_reserved4[32];
1613
1614 uint8_t mc_reserved[384];
1615 } __packed __aligned(4);
1616
1617 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1618
1619 struct mcx_cmd_create_flow_group_in {
1620 uint16_t cmd_opcode;
1621 uint8_t cmd_reserved0[4];
1622 uint16_t cmd_op_mod;
1623 uint8_t cmd_reserved1[8];
1624 } __packed __aligned(4);
1625
1626 struct mcx_cmd_create_flow_group_mb_in {
1627 uint8_t cmd_table_type;
1628 uint8_t cmd_reserved0[3];
1629 uint32_t cmd_table_id;
1630 uint8_t cmd_reserved1[4];
1631 uint32_t cmd_start_flow_index;
1632 uint8_t cmd_reserved2[4];
1633 uint32_t cmd_end_flow_index;
1634 uint8_t cmd_reserved3[23];
1635 uint8_t cmd_match_criteria_enable;
1636 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER (1 << 0)
1637 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC (1 << 1)
1638 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER (1 << 2)
1639 struct mcx_flow_match cmd_match_criteria;
1640 uint8_t cmd_reserved4[448];
1641 } __packed __aligned(4);
1642
1643 struct mcx_cmd_create_flow_group_out {
1644 uint8_t cmd_status;
1645 uint8_t cmd_reserved0[3];
1646 uint32_t cmd_syndrome;
1647 uint32_t cmd_group_id;
1648 uint8_t cmd_reserved1[4];
1649 } __packed __aligned(4);
1650
1651 struct mcx_flow_ctx {
1652 uint8_t fc_reserved0[4];
1653 uint32_t fc_group_id;
1654 uint32_t fc_flow_tag;
1655 uint32_t fc_action;
1656 #define MCX_FLOW_CONTEXT_ACTION_ALLOW (1 << 0)
1657 #define MCX_FLOW_CONTEXT_ACTION_DROP (1 << 1)
1658 #define MCX_FLOW_CONTEXT_ACTION_FORWARD (1 << 2)
1659 #define MCX_FLOW_CONTEXT_ACTION_COUNT (1 << 3)
1660 uint32_t fc_dest_list_size;
1661 uint32_t fc_counter_list_size;
1662 uint8_t fc_reserved1[40];
1663 struct mcx_flow_match fc_match_value;
1664 uint8_t fc_reserved2[192];
1665 } __packed __aligned(4);
1666
1667 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE (1 << 24)
1668 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR (2 << 24)
1669
1670 struct mcx_cmd_destroy_flow_group_in {
1671 uint16_t cmd_opcode;
1672 uint8_t cmd_reserved0[4];
1673 uint16_t cmd_op_mod;
1674 uint8_t cmd_reserved1[8];
1675 } __packed __aligned(4);
1676
1677 struct mcx_cmd_destroy_flow_group_mb_in {
1678 uint8_t cmd_table_type;
1679 uint8_t cmd_reserved0[3];
1680 uint32_t cmd_table_id;
1681 uint32_t cmd_group_id;
1682 uint8_t cmd_reserved1[36];
1683 } __packed __aligned(4);
1684
1685 struct mcx_cmd_destroy_flow_group_out {
1686 uint8_t cmd_status;
1687 uint8_t cmd_reserved0[3];
1688 uint32_t cmd_syndrome;
1689 uint8_t cmd_reserved1[8];
1690 } __packed __aligned(4);
1691
1692 struct mcx_cmd_set_flow_table_entry_in {
1693 uint16_t cmd_opcode;
1694 uint8_t cmd_reserved0[4];
1695 uint16_t cmd_op_mod;
1696 uint8_t cmd_reserved1[8];
1697 } __packed __aligned(4);
1698
1699 struct mcx_cmd_set_flow_table_entry_mb_in {
1700 uint8_t cmd_table_type;
1701 uint8_t cmd_reserved0[3];
1702 uint32_t cmd_table_id;
1703 uint32_t cmd_modify_enable_mask;
1704 uint8_t cmd_reserved1[4];
1705 uint32_t cmd_flow_index;
1706 uint8_t cmd_reserved2[28];
1707 struct mcx_flow_ctx cmd_flow_ctx;
1708 } __packed __aligned(4);
1709
1710 struct mcx_cmd_set_flow_table_entry_out {
1711 uint8_t cmd_status;
1712 uint8_t cmd_reserved0[3];
1713 uint32_t cmd_syndrome;
1714 uint8_t cmd_reserved1[8];
1715 } __packed __aligned(4);
1716
1717 struct mcx_cmd_query_flow_table_entry_in {
1718 uint16_t cmd_opcode;
1719 uint8_t cmd_reserved0[4];
1720 uint16_t cmd_op_mod;
1721 uint8_t cmd_reserved1[8];
1722 } __packed __aligned(4);
1723
1724 struct mcx_cmd_query_flow_table_entry_mb_in {
1725 uint8_t cmd_table_type;
1726 uint8_t cmd_reserved0[3];
1727 uint32_t cmd_table_id;
1728 uint8_t cmd_reserved1[8];
1729 uint32_t cmd_flow_index;
1730 uint8_t cmd_reserved2[28];
1731 } __packed __aligned(4);
1732
1733 struct mcx_cmd_query_flow_table_entry_out {
1734 uint8_t cmd_status;
1735 uint8_t cmd_reserved0[3];
1736 uint32_t cmd_syndrome;
1737 uint8_t cmd_reserved1[8];
1738 } __packed __aligned(4);
1739
1740 struct mcx_cmd_query_flow_table_entry_mb_out {
1741 uint8_t cmd_reserved0[48];
1742 struct mcx_flow_ctx cmd_flow_ctx;
1743 } __packed __aligned(4);
1744
1745 struct mcx_cmd_delete_flow_table_entry_in {
1746 uint16_t cmd_opcode;
1747 uint8_t cmd_reserved0[4];
1748 uint16_t cmd_op_mod;
1749 uint8_t cmd_reserved1[8];
1750 } __packed __aligned(4);
1751
1752 struct mcx_cmd_delete_flow_table_entry_mb_in {
1753 uint8_t cmd_table_type;
1754 uint8_t cmd_reserved0[3];
1755 uint32_t cmd_table_id;
1756 uint8_t cmd_reserved1[8];
1757 uint32_t cmd_flow_index;
1758 uint8_t cmd_reserved2[28];
1759 } __packed __aligned(4);
1760
1761 struct mcx_cmd_delete_flow_table_entry_out {
1762 uint8_t cmd_status;
1763 uint8_t cmd_reserved0[3];
1764 uint32_t cmd_syndrome;
1765 uint8_t cmd_reserved1[8];
1766 } __packed __aligned(4);
1767
1768 struct mcx_cmd_query_flow_group_in {
1769 uint16_t cmd_opcode;
1770 uint8_t cmd_reserved0[4];
1771 uint16_t cmd_op_mod;
1772 uint8_t cmd_reserved1[8];
1773 } __packed __aligned(4);
1774
1775 struct mcx_cmd_query_flow_group_mb_in {
1776 uint8_t cmd_table_type;
1777 uint8_t cmd_reserved0[3];
1778 uint32_t cmd_table_id;
1779 uint32_t cmd_group_id;
1780 uint8_t cmd_reserved1[36];
1781 } __packed __aligned(4);
1782
1783 struct mcx_cmd_query_flow_group_out {
1784 uint8_t cmd_status;
1785 uint8_t cmd_reserved0[3];
1786 uint32_t cmd_syndrome;
1787 uint8_t cmd_reserved1[8];
1788 } __packed __aligned(4);
1789
1790 struct mcx_cmd_query_flow_group_mb_out {
1791 uint8_t cmd_reserved0[12];
1792 uint32_t cmd_start_flow_index;
1793 uint8_t cmd_reserved1[4];
1794 uint32_t cmd_end_flow_index;
1795 uint8_t cmd_reserved2[20];
1796 uint32_t cmd_match_criteria_enable;
1797 uint8_t cmd_match_criteria[512];
1798 uint8_t cmd_reserved4[448];
1799 } __packed __aligned(4);
1800
1801 struct mcx_cmd_query_flow_table_in {
1802 uint16_t cmd_opcode;
1803 uint8_t cmd_reserved0[4];
1804 uint16_t cmd_op_mod;
1805 uint8_t cmd_reserved1[8];
1806 } __packed __aligned(4);
1807
1808 struct mcx_cmd_query_flow_table_mb_in {
1809 uint8_t cmd_table_type;
1810 uint8_t cmd_reserved0[3];
1811 uint32_t cmd_table_id;
1812 uint8_t cmd_reserved1[40];
1813 } __packed __aligned(4);
1814
1815 struct mcx_cmd_query_flow_table_out {
1816 uint8_t cmd_status;
1817 uint8_t cmd_reserved0[3];
1818 uint32_t cmd_syndrome;
1819 uint8_t cmd_reserved1[8];
1820 } __packed __aligned(4);
1821
1822 struct mcx_cmd_query_flow_table_mb_out {
1823 uint8_t cmd_reserved0[4];
1824 struct mcx_flow_table_ctx cmd_ctx;
1825 } __packed __aligned(4);
1826
1827 struct mcx_cmd_alloc_flow_counter_in {
1828 uint16_t cmd_opcode;
1829 uint8_t cmd_reserved0[4];
1830 uint16_t cmd_op_mod;
1831 uint8_t cmd_reserved1[8];
1832 } __packed __aligned(4);
1833
1834 struct mcx_cmd_query_rq_in {
1835 uint16_t cmd_opcode;
1836 uint8_t cmd_reserved0[4];
1837 uint16_t cmd_op_mod;
1838 uint32_t cmd_rqn;
1839 uint8_t cmd_reserved1[4];
1840 } __packed __aligned(4);
1841
1842 struct mcx_cmd_query_rq_out {
1843 uint8_t cmd_status;
1844 uint8_t cmd_reserved0[3];
1845 uint32_t cmd_syndrome;
1846 uint8_t cmd_reserved1[8];
1847 } __packed __aligned(4);
1848
1849 struct mcx_cmd_query_rq_mb_out {
1850 uint8_t cmd_reserved0[16];
1851 struct mcx_rq_ctx cmd_ctx;
1852 };
1853
1854 struct mcx_cmd_query_sq_in {
1855 uint16_t cmd_opcode;
1856 uint8_t cmd_reserved0[4];
1857 uint16_t cmd_op_mod;
1858 uint32_t cmd_sqn;
1859 uint8_t cmd_reserved1[4];
1860 } __packed __aligned(4);
1861
1862 struct mcx_cmd_query_sq_out {
1863 uint8_t cmd_status;
1864 uint8_t cmd_reserved0[3];
1865 uint32_t cmd_syndrome;
1866 uint8_t cmd_reserved1[8];
1867 } __packed __aligned(4);
1868
1869 struct mcx_cmd_query_sq_mb_out {
1870 uint8_t cmd_reserved0[16];
1871 struct mcx_sq_ctx cmd_ctx;
1872 };
1873
1874 struct mcx_cmd_alloc_flow_counter_out {
1875 uint8_t cmd_status;
1876 uint8_t cmd_reserved0[3];
1877 uint32_t cmd_syndrome;
1878 uint8_t cmd_reserved1[2];
1879 uint16_t cmd_flow_counter_id;
1880 uint8_t cmd_reserved2[4];
1881 } __packed __aligned(4);
1882
1883 struct mcx_wq_doorbell {
1884 uint32_t db_recv_counter;
1885 uint32_t db_send_counter;
1886 } __packed __aligned(8);
1887
1888 struct mcx_dmamem {
1889 bus_dmamap_t mxm_map;
1890 bus_dma_segment_t mxm_seg;
1891 int mxm_nsegs;
1892 size_t mxm_size;
1893 void *mxm_kva;
1894 };
1895 #define MCX_DMA_MAP(_mxm) ((_mxm)->mxm_map)
1896 #define MCX_DMA_DVA(_mxm) ((_mxm)->mxm_map->dm_segs[0].ds_addr)
1897 #define MCX_DMA_KVA(_mxm) ((void *)(_mxm)->mxm_kva)
1898 #define MCX_DMA_LEN(_mxm) ((_mxm)->mxm_size)
1899
1900 struct mcx_hwmem {
1901 bus_dmamap_t mhm_map;
1902 bus_dma_segment_t *mhm_segs;
1903 unsigned int mhm_seg_count;
1904 unsigned int mhm_npages;
1905 };
1906
1907 struct mcx_slot {
1908 bus_dmamap_t ms_map;
1909 struct mbuf *ms_m;
1910 };
1911
1912 struct mcx_cq {
1913 int cq_n;
1914 struct mcx_dmamem cq_mem;
1915 uint32_t *cq_doorbell;
1916 uint32_t cq_cons;
1917 uint32_t cq_count;
1918 };
1919
1920 struct mcx_calibration {
1921 uint64_t c_timestamp; /* previous mcx chip time */
1922 uint64_t c_uptime; /* previous kernel nanouptime */
1923 uint64_t c_tbase; /* mcx chip time */
1924 uint64_t c_ubase; /* kernel nanouptime */
1925 uint64_t c_tdiff;
1926 uint64_t c_udiff;
1927 };
1928
1929 #define MCX_CALIBRATE_FIRST 2
1930 #define MCX_CALIBRATE_NORMAL 30
1931
1932 struct mcx_rxring {
1933 u_int rxr_total;
1934 u_int rxr_inuse;
1935 };
1936
1937 MBUFQ_HEAD(mcx_mbufq);
1938
1939 struct mcx_softc {
1940 device_t sc_dev;
1941 struct ethercom sc_ec;
1942 struct ifmedia sc_media;
1943 uint64_t sc_media_status;
1944 uint64_t sc_media_active;
1945 kmutex_t sc_media_mutex;
1946
1947 pci_chipset_tag_t sc_pc;
1948 pci_intr_handle_t *sc_intrs;
1949 void *sc_ihs[MCX_MAX_NINTR];
1950 pcitag_t sc_tag;
1951
1952 bus_dma_tag_t sc_dmat;
1953 bus_space_tag_t sc_memt;
1954 bus_space_handle_t sc_memh;
1955 bus_size_t sc_mems;
1956
1957 struct mcx_dmamem sc_cmdq_mem;
1958 unsigned int sc_cmdq_mask;
1959 unsigned int sc_cmdq_size;
1960
1961 unsigned int sc_cmdq_token;
1962
1963 struct mcx_hwmem sc_boot_pages;
1964 struct mcx_hwmem sc_init_pages;
1965 struct mcx_hwmem sc_regular_pages;
1966
1967 int sc_uar;
1968 int sc_pd;
1969 int sc_tdomain;
1970 uint32_t sc_lkey;
1971
1972 struct mcx_dmamem sc_doorbell_mem;
1973
1974 int sc_eqn;
1975 int sc_eq_cons;
1976 struct mcx_dmamem sc_eq_mem;
1977 int sc_hardmtu;
1978
1979 struct workqueue *sc_workq;
1980 struct work sc_port_change;
1981
1982 int sc_flow_table_id;
1983 #define MCX_FLOW_GROUP_PROMISC 0
1984 #define MCX_FLOW_GROUP_ALLMULTI 1
1985 #define MCX_FLOW_GROUP_MAC 2
1986 #define MCX_NUM_FLOW_GROUPS 3
1987 int sc_flow_group_id[MCX_NUM_FLOW_GROUPS];
1988 int sc_flow_group_size[MCX_NUM_FLOW_GROUPS];
1989 int sc_flow_group_start[MCX_NUM_FLOW_GROUPS];
1990 int sc_promisc_flow_enabled;
1991 int sc_allmulti_flow_enabled;
1992 int sc_mcast_flow_base;
1993 int sc_extra_mcast;
1994 uint8_t sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
1995
1996 struct mcx_calibration sc_calibration[2];
1997 unsigned int sc_calibration_gen;
1998 callout_t sc_calibrate;
1999
2000 struct mcx_cq sc_cq[MCX_MAX_CQS];
2001 int sc_num_cq;
2002
2003 /* rx */
2004 int sc_tirn;
2005 int sc_rqn;
2006 struct mcx_dmamem sc_rq_mem;
2007 struct mcx_slot *sc_rx_slots;
2008 uint32_t *sc_rx_doorbell;
2009
2010 uint32_t sc_rx_prod;
2011 callout_t sc_rx_refill;
2012 struct mcx_rxring sc_rxr;
2013
2014 /* tx */
2015 int sc_tisn;
2016 int sc_sqn;
2017 struct mcx_dmamem sc_sq_mem;
2018 struct mcx_slot *sc_tx_slots;
2019 uint32_t *sc_tx_doorbell;
2020 int sc_bf_size;
2021 int sc_bf_offset;
2022
2023 uint32_t sc_tx_cons;
2024 uint32_t sc_tx_prod;
2025
2026 uint64_t sc_last_cq_db;
2027 uint64_t sc_last_srq_db;
2028 };
2029 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
2030
2031 static int mcx_match(device_t, cfdata_t, void *);
2032 static void mcx_attach(device_t, device_t, void *);
2033
2034 static void mcx_rxr_init(struct mcx_rxring *, u_int, u_int);
2035 static u_int mcx_rxr_get(struct mcx_rxring *, u_int);
2036 static void mcx_rxr_put(struct mcx_rxring *, u_int);
2037 static u_int mcx_rxr_inuse(struct mcx_rxring *);
2038
2039 static int mcx_version(struct mcx_softc *);
2040 static int mcx_init_wait(struct mcx_softc *);
2041 static int mcx_enable_hca(struct mcx_softc *);
2042 static int mcx_teardown_hca(struct mcx_softc *, uint16_t);
2043 static int mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2044 int);
2045 static int mcx_issi(struct mcx_softc *);
2046 static int mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2047 static int mcx_hca_max_caps(struct mcx_softc *);
2048 static int mcx_hca_set_caps(struct mcx_softc *);
2049 static int mcx_init_hca(struct mcx_softc *);
2050 static int mcx_set_driver_version(struct mcx_softc *);
2051 static int mcx_iff(struct mcx_softc *);
2052 static int mcx_alloc_uar(struct mcx_softc *);
2053 static int mcx_alloc_pd(struct mcx_softc *);
2054 static int mcx_alloc_tdomain(struct mcx_softc *);
2055 static int mcx_create_eq(struct mcx_softc *);
2056 static int mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *);
2057 static int mcx_query_special_contexts(struct mcx_softc *);
2058 static int mcx_set_port_mtu(struct mcx_softc *, int);
2059 static int mcx_create_cq(struct mcx_softc *, int);
2060 static int mcx_destroy_cq(struct mcx_softc *, int);
2061 static int mcx_create_sq(struct mcx_softc *, int);
2062 static int mcx_destroy_sq(struct mcx_softc *);
2063 static int mcx_ready_sq(struct mcx_softc *);
2064 static int mcx_create_rq(struct mcx_softc *, int);
2065 static int mcx_destroy_rq(struct mcx_softc *);
2066 static int mcx_ready_rq(struct mcx_softc *);
2067 static int mcx_create_tir(struct mcx_softc *);
2068 static int mcx_destroy_tir(struct mcx_softc *);
2069 static int mcx_create_tis(struct mcx_softc *);
2070 static int mcx_destroy_tis(struct mcx_softc *);
2071 static int mcx_create_flow_table(struct mcx_softc *, int);
2072 static int mcx_set_flow_table_root(struct mcx_softc *);
2073 static int mcx_destroy_flow_table(struct mcx_softc *);
2074 static int mcx_create_flow_group(struct mcx_softc *, int, int,
2075 int, int, struct mcx_flow_match *);
2076 static int mcx_destroy_flow_group(struct mcx_softc *, int);
2077 static int mcx_set_flow_table_entry(struct mcx_softc *, int, int,
2078 const uint8_t *);
2079 static int mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2080
2081 #if 0
2082 static int mcx_dump_flow_table(struct mcx_softc *);
2083 static int mcx_dump_flow_table_entry(struct mcx_softc *, int);
2084 static int mcx_dump_flow_group(struct mcx_softc *);
2085 static int mcx_dump_rq(struct mcx_softc *);
2086 static int mcx_dump_sq(struct mcx_softc *);
2087 #endif
2088
2089
2090 /*
2091 static void mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2092 static void mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2093 */
2094 static void mcx_refill(void *);
2095 static int mcx_process_rx(struct mcx_softc *, struct mcx_cq_entry *,
2096 struct mcx_mbufq *, const struct mcx_calibration *);
2097 static void mcx_process_txeof(struct mcx_softc *, struct mcx_cq_entry *,
2098 int *);
2099 static void mcx_process_cq(struct mcx_softc *, struct mcx_cq *);
2100
2101 static void mcx_arm_cq(struct mcx_softc *, struct mcx_cq *);
2102 static void mcx_arm_eq(struct mcx_softc *);
2103 static int mcx_intr(void *);
2104
2105 static int mcx_init(struct ifnet *);
2106 static void mcx_stop(struct ifnet *, int);
2107 static int mcx_ioctl(struct ifnet *, u_long, void *);
2108 static void mcx_start(struct ifnet *);
2109 static void mcx_watchdog(struct ifnet *);
2110 static void mcx_media_add_types(struct mcx_softc *);
2111 static void mcx_media_status(struct ifnet *, struct ifmediareq *);
2112 static int mcx_media_change(struct ifnet *);
2113 #if 0
2114 static int mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2115 #endif
2116 static void mcx_port_change(struct work *, void *);
2117
2118 static void mcx_calibrate_first(struct mcx_softc *);
2119 static void mcx_calibrate(void *);
2120
2121 static inline uint32_t
2122 mcx_rd(struct mcx_softc *, bus_size_t);
2123 static inline void
2124 mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2125 static inline void
2126 mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2127
2128 static uint64_t mcx_timer(struct mcx_softc *);
2129
2130 static int mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2131 bus_size_t, u_int align);
2132 static void mcx_dmamem_zero(struct mcx_dmamem *);
2133 static void mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2134
2135 static int mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2136 unsigned int);
2137 static void mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2138
2139 CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL);
2140
2141 static const struct {
2142 pci_vendor_id_t vendor;
2143 pci_product_id_t product;
2144 } mcx_devices[] = {
2145 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700 },
2146 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710 },
2147 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800 },
2148 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800 },
2149 };
2150
2151 static const uint64_t mcx_eth_cap_map[] = {
2152 IFM_1000_SGMII,
2153 IFM_1000_KX,
2154 IFM_10G_CX4,
2155 IFM_10G_KX4,
2156 IFM_10G_KR,
2157 IFM_20G_KR2,
2158 IFM_40G_CR4,
2159 IFM_40G_KR4,
2160 IFM_56G_R4,
2161 0,
2162 0,
2163 0,
2164 IFM_10G_CR1,
2165 IFM_10G_SR,
2166 IFM_10G_LR,
2167 IFM_40G_SR4,
2168 IFM_40G_LR4,
2169 0,
2170 IFM_50G_SR2,
2171 0,
2172 IFM_100G_CR4,
2173 IFM_100G_SR4,
2174 IFM_100G_KR4,
2175 IFM_100G_LR4,
2176 IFM_100_TX,
2177 IFM_1000_T,
2178 IFM_10G_T,
2179 IFM_25G_CR,
2180 IFM_25G_KR,
2181 IFM_25G_SR,
2182 IFM_50G_CR2,
2183 IFM_50G_KR2
2184 };
2185
2186 static int
2187 mcx_match(device_t parent, cfdata_t cf, void *aux)
2188 {
2189 struct pci_attach_args *pa = aux;
2190 int n;
2191
2192 for (n = 0; n < __arraycount(mcx_devices); n++) {
2193 if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor &&
2194 PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product)
2195 return 1;
2196 }
2197
2198 return 0;
2199 }
2200
2201 void
2202 mcx_attach(device_t parent, device_t self, void *aux)
2203 {
2204 struct mcx_softc *sc = device_private(self);
2205 struct ifnet *ifp = &sc->sc_ec.ec_if;
2206 struct pci_attach_args *pa = aux;
2207 uint8_t enaddr[ETHER_ADDR_LEN];
2208 int counts[PCI_INTR_TYPE_SIZE];
2209 char intrbuf[PCI_INTRSTR_LEN];
2210 pcireg_t memtype;
2211 uint32_t r;
2212 unsigned int cq_stride;
2213 unsigned int cq_size;
2214 const char *intrstr;
2215 int i;
2216
2217 sc->sc_dev = self;
2218 sc->sc_pc = pa->pa_pc;
2219 sc->sc_tag = pa->pa_tag;
2220 if (pci_dma64_available(pa))
2221 sc->sc_dmat = pa->pa_dmat64;
2222 else
2223 sc->sc_dmat = pa->pa_dmat;
2224
2225 /* Map the PCI memory space */
2226 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2227 if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2228 0 /*BUS_SPACE_MAP_PREFETCHABLE*/, &sc->sc_memt, &sc->sc_memh,
2229 NULL, &sc->sc_mems)) {
2230 aprint_error(": unable to map register memory\n");
2231 return;
2232 }
2233
2234 pci_aprint_devinfo(pa, "Ethernet controller");
2235
2236 mutex_init(&sc->sc_media_mutex, MUTEX_DEFAULT, IPL_SOFTNET);
2237
2238 if (mcx_version(sc) != 0) {
2239 /* error printed by mcx_version */
2240 goto unmap;
2241 }
2242
2243 r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2244 cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2245 cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2246 if (cq_size > MCX_MAX_CQE) {
2247 aprint_error_dev(self,
2248 "command queue size overflow %u\n", cq_size);
2249 goto unmap;
2250 }
2251 if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2252 aprint_error_dev(self,
2253 "command queue entry size underflow %u\n", cq_stride);
2254 goto unmap;
2255 }
2256 if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2257 aprint_error_dev(self, "command queue page overflow\n");
2258 goto unmap;
2259 }
2260
2261 if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_PAGE_SIZE,
2262 MCX_PAGE_SIZE) != 0) {
2263 aprint_error_dev(self, "unable to allocate doorbell memory\n");
2264 goto unmap;
2265 }
2266
2267 if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2268 MCX_PAGE_SIZE) != 0) {
2269 aprint_error_dev(self, "unable to allocate command queue\n");
2270 goto dbfree;
2271 }
2272
2273 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2274 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2275 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2276 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2277
2278 if (mcx_init_wait(sc) != 0) {
2279 aprint_error_dev(self, "timeout waiting for init\n");
2280 goto cqfree;
2281 }
2282
2283 sc->sc_cmdq_mask = cq_size - 1;
2284 sc->sc_cmdq_size = cq_stride;
2285
2286 if (mcx_enable_hca(sc) != 0) {
2287 /* error printed by mcx_enable_hca */
2288 goto cqfree;
2289 }
2290
2291 if (mcx_issi(sc) != 0) {
2292 /* error printed by mcx_issi */
2293 goto teardown;
2294 }
2295
2296 if (mcx_pages(sc, &sc->sc_boot_pages,
2297 htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2298 /* error printed by mcx_pages */
2299 goto teardown;
2300 }
2301
2302 if (mcx_hca_max_caps(sc) != 0) {
2303 /* error printed by mcx_hca_max_caps */
2304 goto teardown;
2305 }
2306
2307 if (mcx_hca_set_caps(sc) != 0) {
2308 /* error printed by mcx_hca_set_caps */
2309 goto teardown;
2310 }
2311
2312 if (mcx_pages(sc, &sc->sc_init_pages,
2313 htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2314 /* error printed by mcx_pages */
2315 goto teardown;
2316 }
2317
2318 if (mcx_init_hca(sc) != 0) {
2319 /* error printed by mcx_init_hca */
2320 goto teardown;
2321 }
2322
2323 if (mcx_pages(sc, &sc->sc_regular_pages,
2324 htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2325 /* error printed by mcx_pages */
2326 goto teardown;
2327 }
2328
2329 /* apparently not necessary? */
2330 if (mcx_set_driver_version(sc) != 0) {
2331 /* error printed by mcx_set_driver_version */
2332 goto teardown;
2333 }
2334
2335 if (mcx_iff(sc) != 0) { /* modify nic vport context */
2336 /* error printed by mcx_iff? */
2337 goto teardown;
2338 }
2339
2340 if (mcx_alloc_uar(sc) != 0) {
2341 /* error printed by mcx_alloc_uar */
2342 goto teardown;
2343 }
2344
2345 if (mcx_alloc_pd(sc) != 0) {
2346 /* error printed by mcx_alloc_pd */
2347 goto teardown;
2348 }
2349
2350 if (mcx_alloc_tdomain(sc) != 0) {
2351 /* error printed by mcx_alloc_tdomain */
2352 goto teardown;
2353 }
2354
2355 /*
2356 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2357 * mellanox support tells me legacy interrupts are not supported,
2358 * so we're stuck with just msi-x.
2359 */
2360 counts[PCI_INTR_TYPE_MSIX] = 1;
2361 counts[PCI_INTR_TYPE_MSI] = 0;
2362 counts[PCI_INTR_TYPE_INTX] = 0;
2363 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) {
2364 aprint_error_dev(self, "unable to allocate interrupt\n");
2365 goto teardown;
2366 }
2367 KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX);
2368
2369 #ifdef MCX_MPSAFE
2370 pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
2371 #endif
2372
2373 intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[0], intrbuf,
2374 sizeof(intrbuf));
2375 sc->sc_ihs[0] = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[0],
2376 IPL_NET, mcx_intr, sc, DEVNAME(sc));
2377 if (sc->sc_ihs[0] == NULL) {
2378 aprint_error_dev(self, "unable to establish interrupt%s%s\n",
2379 intrstr ? " at " : "",
2380 intrstr ? intrstr : "");
2381 goto teardown;
2382 }
2383
2384 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
2385
2386 if (mcx_create_eq(sc) != 0) {
2387 /* error printed by mcx_create_eq */
2388 goto teardown;
2389 }
2390
2391 if (mcx_query_nic_vport_context(sc, enaddr) != 0) {
2392 /* error printed by mcx_query_nic_vport_context */
2393 goto teardown;
2394 }
2395
2396 if (mcx_query_special_contexts(sc) != 0) {
2397 /* error printed by mcx_query_special_contexts */
2398 goto teardown;
2399 }
2400
2401 if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2402 /* error printed by mcx_set_port_mtu */
2403 goto teardown;
2404 }
2405
2406 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2407 ether_sprintf(enaddr));
2408
2409 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2410 ifp->if_softc = sc;
2411 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2412 #ifdef MCX_MPSAFE
2413 ifp->if_extflags = IFEF_MPSAFE;
2414 #endif
2415 ifp->if_init = mcx_init;
2416 ifp->if_stop = mcx_stop;
2417 ifp->if_ioctl = mcx_ioctl;
2418 ifp->if_start = mcx_start;
2419 ifp->if_watchdog = mcx_watchdog;
2420 ifp->if_mtu = sc->sc_hardmtu;
2421 IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
2422 IFQ_SET_READY(&ifp->if_snd);
2423
2424 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
2425
2426 sc->sc_ec.ec_ifmedia = &sc->sc_media;
2427 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, mcx_media_change,
2428 mcx_media_status, &sc->sc_media_mutex);
2429 mcx_media_add_types(sc);
2430 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2431 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2432
2433 if_attach(ifp);
2434 if_deferred_start_init(ifp, NULL);
2435
2436 ether_ifattach(ifp, enaddr);
2437
2438 callout_init(&sc->sc_rx_refill, CALLOUT_FLAGS);
2439 callout_setfunc(&sc->sc_rx_refill, mcx_refill, sc);
2440 callout_init(&sc->sc_calibrate, CALLOUT_FLAGS);
2441 callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc);
2442
2443 if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc,
2444 PRI_NONE, IPL_NET, 0) != 0) {
2445 aprint_error_dev(self, "couldn't create port change workq\n");
2446 goto teardown;
2447 }
2448
2449 mcx_port_change(&sc->sc_port_change, sc);
2450
2451 sc->sc_flow_table_id = -1;
2452 for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
2453 sc->sc_flow_group_id[i] = -1;
2454 sc->sc_flow_group_size[i] = 0;
2455 sc->sc_flow_group_start[i] = 0;
2456 }
2457 sc->sc_extra_mcast = 0;
2458 memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
2459 return;
2460
2461 teardown:
2462 mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
2463 /* error printed by mcx_teardown_hca, and we're already unwinding */
2464 cqfree:
2465 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2466 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2467 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
2468 MCX_CMDQ_INTERFACE_DISABLED);
2469 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2470
2471 mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
2472 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2473 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
2474
2475 mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
2476 dbfree:
2477 mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
2478 unmap:
2479 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2480 sc->sc_mems = 0;
2481 }
2482
2483 static void
2484 mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm)
2485 {
2486 rxr->rxr_total = hwm;
2487 rxr->rxr_inuse = 0;
2488 }
2489
2490 static u_int
2491 mcx_rxr_get(struct mcx_rxring *rxr, u_int max)
2492 {
2493 const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse);
2494
2495 rxr->rxr_inuse += taken;
2496
2497 return taken;
2498 }
2499
2500 static void
2501 mcx_rxr_put(struct mcx_rxring *rxr, u_int n)
2502 {
2503 rxr->rxr_inuse -= n;
2504 }
2505
2506 static u_int
2507 mcx_rxr_inuse(struct mcx_rxring *rxr)
2508 {
2509 return rxr->rxr_inuse;
2510 }
2511
2512 static int
2513 mcx_version(struct mcx_softc *sc)
2514 {
2515 uint32_t fw0, fw1;
2516 uint16_t cmdif;
2517
2518 fw0 = mcx_rd(sc, MCX_FW_VER);
2519 fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
2520
2521 aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0),
2522 MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
2523
2524 cmdif = MCX_CMDIF(fw1);
2525 if (cmdif != MCX_CMD_IF_SUPPORTED) {
2526 aprint_error_dev(sc->sc_dev,
2527 "unsupported command interface %u\n", cmdif);
2528 return (-1);
2529 }
2530
2531 return (0);
2532 }
2533
2534 static int
2535 mcx_init_wait(struct mcx_softc *sc)
2536 {
2537 unsigned int i;
2538 uint32_t r;
2539
2540 for (i = 0; i < 2000; i++) {
2541 r = mcx_rd(sc, MCX_STATE);
2542 if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
2543 return (0);
2544
2545 delay(1000);
2546 mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
2547 BUS_SPACE_BARRIER_READ);
2548 }
2549
2550 return (-1);
2551 }
2552
2553 static uint8_t
2554 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2555 unsigned int msec)
2556 {
2557 unsigned int i;
2558
2559 for (i = 0; i < msec; i++) {
2560 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2561 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
2562
2563 if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
2564 MCX_CQ_STATUS_OWN_SW) {
2565 if (sc->sc_eqn != 0)
2566 mcx_intr(sc);
2567 return (0);
2568 }
2569
2570 delay(1000);
2571 }
2572
2573 return (ETIMEDOUT);
2574 }
2575
2576 static uint32_t
2577 mcx_mix_u64(uint32_t xor, uint64_t u64)
2578 {
2579 xor ^= u64 >> 32;
2580 xor ^= u64;
2581
2582 return (xor);
2583 }
2584
2585 static uint32_t
2586 mcx_mix_u32(uint32_t xor, uint32_t u32)
2587 {
2588 xor ^= u32;
2589
2590 return (xor);
2591 }
2592
2593 static uint32_t
2594 mcx_mix_u8(uint32_t xor, uint8_t u8)
2595 {
2596 xor ^= u8;
2597
2598 return (xor);
2599 }
2600
2601 static uint8_t
2602 mcx_mix_done(uint32_t xor)
2603 {
2604 xor ^= xor >> 16;
2605 xor ^= xor >> 8;
2606
2607 return (xor);
2608 }
2609
2610 static uint8_t
2611 mcx_xor(const void *buf, size_t len)
2612 {
2613 const uint32_t *dwords = buf;
2614 uint32_t xor = 0xff;
2615 size_t i;
2616
2617 len /= sizeof(*dwords);
2618
2619 for (i = 0; i < len; i++)
2620 xor ^= dwords[i];
2621
2622 return (mcx_mix_done(xor));
2623 }
2624
2625 static uint8_t
2626 mcx_cmdq_token(struct mcx_softc *sc)
2627 {
2628 uint8_t token;
2629
2630 do {
2631 token = ++sc->sc_cmdq_token;
2632 } while (token == 0);
2633
2634 return (token);
2635 }
2636
2637 static void
2638 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2639 uint32_t ilen, uint32_t olen, uint8_t token)
2640 {
2641 memset(cqe, 0, sc->sc_cmdq_size);
2642
2643 cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
2644 be32enc(&cqe->cq_input_length, ilen);
2645 be32enc(&cqe->cq_output_length, olen);
2646 cqe->cq_token = token;
2647 cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
2648 }
2649
2650 static void
2651 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
2652 {
2653 cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
2654 }
2655
2656 static int
2657 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
2658 {
2659 /* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 : 0); */
2660 return (0);
2661 }
2662
2663 static void *
2664 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
2665 {
2666 return (&cqe->cq_input_data);
2667 }
2668
2669 static void *
2670 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
2671 {
2672 return (&cqe->cq_output_data);
2673 }
2674
2675 static void
2676 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2677 unsigned int slot)
2678 {
2679 mcx_cmdq_sign(cqe);
2680
2681 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2682 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
2683
2684 mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
2685 }
2686
2687 static int
2688 mcx_enable_hca(struct mcx_softc *sc)
2689 {
2690 struct mcx_cmdq_entry *cqe;
2691 struct mcx_cmd_enable_hca_in *in;
2692 struct mcx_cmd_enable_hca_out *out;
2693 int error;
2694 uint8_t status;
2695
2696 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2697 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2698
2699 in = mcx_cmdq_in(cqe);
2700 in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
2701 in->cmd_op_mod = htobe16(0);
2702 in->cmd_function_id = htobe16(0);
2703
2704 mcx_cmdq_post(sc, cqe, 0);
2705
2706 error = mcx_cmdq_poll(sc, cqe, 1000);
2707 if (error != 0) {
2708 printf(", hca enable timeout\n");
2709 return (-1);
2710 }
2711 if (mcx_cmdq_verify(cqe) != 0) {
2712 printf(", hca enable command corrupt\n");
2713 return (-1);
2714 }
2715
2716 status = cqe->cq_output_data[0];
2717 if (status != MCX_CQ_STATUS_OK) {
2718 printf(", hca enable failed (%x)\n", status);
2719 return (-1);
2720 }
2721
2722 return (0);
2723 }
2724
2725 static int
2726 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
2727 {
2728 struct mcx_cmdq_entry *cqe;
2729 struct mcx_cmd_teardown_hca_in *in;
2730 struct mcx_cmd_teardown_hca_out *out;
2731 int error;
2732 uint8_t status;
2733
2734 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2735 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2736
2737 in = mcx_cmdq_in(cqe);
2738 in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
2739 in->cmd_op_mod = htobe16(0);
2740 in->cmd_profile = profile;
2741
2742 mcx_cmdq_post(sc, cqe, 0);
2743
2744 error = mcx_cmdq_poll(sc, cqe, 1000);
2745 if (error != 0) {
2746 printf(", hca teardown timeout\n");
2747 return (-1);
2748 }
2749 if (mcx_cmdq_verify(cqe) != 0) {
2750 printf(", hca teardown command corrupt\n");
2751 return (-1);
2752 }
2753
2754 status = cqe->cq_output_data[0];
2755 if (status != MCX_CQ_STATUS_OK) {
2756 printf(", hca teardown failed (%x)\n", status);
2757 return (-1);
2758 }
2759
2760 return (0);
2761 }
2762
2763 static int
2764 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
2765 unsigned int nmb, uint64_t *ptr, uint8_t token)
2766 {
2767 uint8_t *kva;
2768 uint64_t dva;
2769 int i;
2770 int error;
2771
2772 error = mcx_dmamem_alloc(sc, mxm,
2773 nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
2774 if (error != 0)
2775 return (error);
2776
2777 mcx_dmamem_zero(mxm);
2778
2779 dva = MCX_DMA_DVA(mxm);
2780 kva = MCX_DMA_KVA(mxm);
2781 for (i = 0; i < nmb; i++) {
2782 struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
2783
2784 /* patch the cqe or mbox pointing at this one */
2785 be64enc(ptr, dva);
2786
2787 /* fill in this mbox */
2788 be32enc(&mbox->mb_block_number, i);
2789 mbox->mb_token = token;
2790
2791 /* move to the next one */
2792 ptr = &mbox->mb_next_ptr;
2793
2794 dva += MCX_CMDQ_MAILBOX_SIZE;
2795 kva += MCX_CMDQ_MAILBOX_SIZE;
2796 }
2797
2798 return (0);
2799 }
2800
2801 static uint32_t
2802 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
2803 {
2804 uint32_t xor = 0xff;
2805
2806 /* only 3 fields get set, so mix them directly */
2807 xor = mcx_mix_u64(xor, mb->mb_next_ptr);
2808 xor = mcx_mix_u32(xor, mb->mb_block_number);
2809 xor = mcx_mix_u8(xor, mb->mb_token);
2810
2811 return (mcx_mix_done(xor));
2812 }
2813
2814 static void
2815 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
2816 {
2817 uint8_t *kva;
2818 int i;
2819
2820 kva = MCX_DMA_KVA(mxm);
2821
2822 for (i = 0; i < nmb; i++) {
2823 struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
2824 uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
2825 mb->mb_ctrl_signature = sig;
2826 mb->mb_signature = sig ^
2827 mcx_xor(mb->mb_data, sizeof(mb->mb_data));
2828
2829 kva += MCX_CMDQ_MAILBOX_SIZE;
2830 }
2831 }
2832
2833 static void
2834 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
2835 {
2836 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
2837 0, MCX_DMA_LEN(mxm), ops);
2838 }
2839
2840 static struct mcx_cmdq_mailbox *
2841 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
2842 {
2843 uint8_t *kva;
2844
2845 kva = MCX_DMA_KVA(mxm);
2846 kva += i * MCX_CMDQ_MAILBOX_SIZE;
2847
2848 return ((struct mcx_cmdq_mailbox *)kva);
2849 }
2850
2851 static inline void *
2852 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
2853 {
2854 return (&mb->mb_data);
2855 }
2856
2857 static void
2858 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
2859 void *b, size_t len)
2860 {
2861 uint8_t *buf = b;
2862 struct mcx_cmdq_mailbox *mb;
2863 int i;
2864
2865 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2866 for (i = 0; i < nmb; i++) {
2867
2868 memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len));
2869
2870 if (sizeof(mb->mb_data) >= len)
2871 break;
2872
2873 buf += sizeof(mb->mb_data);
2874 len -= sizeof(mb->mb_data);
2875 mb++;
2876 }
2877 }
2878
2879 static void
2880 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
2881 {
2882 uint8_t *buf = b;
2883 struct mcx_cmdq_mailbox *mb;
2884 int i;
2885
2886 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2887 for (i = 0; i < nmb; i++) {
2888 memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len));
2889
2890 if (sizeof(mb->mb_data) >= len)
2891 break;
2892
2893 buf += sizeof(mb->mb_data);
2894 len -= sizeof(mb->mb_data);
2895 mb++;
2896 }
2897 }
2898
2899 static void
2900 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
2901 {
2902 mcx_dmamem_free(sc, mxm);
2903 }
2904
2905 #if 0
2906 static void
2907 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
2908 {
2909 unsigned int i;
2910
2911 printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
2912 be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr));
2913
2914 printf(", idata ");
2915 for (i = 0; i < sizeof(cqe->cq_input_data); i++)
2916 printf("%02x", cqe->cq_input_data[i]);
2917
2918 printf(", odata ");
2919 for (i = 0; i < sizeof(cqe->cq_output_data); i++)
2920 printf("%02x", cqe->cq_output_data[i]);
2921
2922 printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
2923 be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length),
2924 cqe->cq_token, cqe->cq_signature, cqe->cq_status);
2925 }
2926
2927 static void
2928 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
2929 {
2930 int i, j;
2931 uint8_t *d;
2932
2933 for (i = 0; i < num; i++) {
2934 struct mcx_cmdq_mailbox *mbox;
2935 mbox = mcx_cq_mbox(mboxes, i);
2936
2937 d = mcx_cq_mbox_data(mbox);
2938 for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
2939 if (j != 0 && (j % 16 == 0))
2940 printf("\n");
2941 printf("%.2x ", d[j]);
2942 }
2943 }
2944 }
2945 #endif
2946
2947 static int
2948 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
2949 int len)
2950 {
2951 struct mcx_dmamem mxm;
2952 struct mcx_cmdq_entry *cqe;
2953 struct mcx_cmd_access_reg_in *in;
2954 struct mcx_cmd_access_reg_out *out;
2955 uint8_t token = mcx_cmdq_token(sc);
2956 int error, nmb;
2957
2958 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2959 mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
2960 token);
2961
2962 in = mcx_cmdq_in(cqe);
2963 in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
2964 in->cmd_op_mod = htobe16(op);
2965 in->cmd_register_id = htobe16(reg);
2966
2967 nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
2968 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, &cqe->cq_output_ptr, token) != 0) {
2969 printf(", unable to allocate access reg mailboxen\n");
2970 return (-1);
2971 }
2972 cqe->cq_input_ptr = cqe->cq_output_ptr;
2973 mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
2974 mcx_cmdq_mboxes_sign(&mxm, nmb);
2975 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
2976
2977 mcx_cmdq_post(sc, cqe, 0);
2978 error = mcx_cmdq_poll(sc, cqe, 1000);
2979 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
2980
2981 if (error != 0) {
2982 printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
2983 (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
2984 goto free;
2985 }
2986 error = mcx_cmdq_verify(cqe);
2987 if (error != 0) {
2988 printf("%s: access reg (%s %x) reply corrupt\n",
2989 (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
2990 reg);
2991 goto free;
2992 }
2993
2994 out = mcx_cmdq_out(cqe);
2995 if (out->cmd_status != MCX_CQ_STATUS_OK) {
2996 printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
2997 DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
2998 reg, out->cmd_status, out->cmd_syndrome);
2999 error = -1;
3000 goto free;
3001 }
3002
3003 mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
3004 free:
3005 mcx_dmamem_free(sc, &mxm);
3006
3007 return (error);
3008 }
3009
3010 static int
3011 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, unsigned int slot)
3012 {
3013 struct mcx_cmd_set_issi_in *in;
3014 struct mcx_cmd_set_issi_out *out;
3015 uint8_t status;
3016
3017 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3018
3019 in = mcx_cmdq_in(cqe);
3020 in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3021 in->cmd_op_mod = htobe16(0);
3022 in->cmd_current_issi = htobe16(MCX_ISSI);
3023
3024 mcx_cmdq_post(sc, cqe, slot);
3025 if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3026 return (-1);
3027 if (mcx_cmdq_verify(cqe) != 0)
3028 return (-1);
3029
3030 status = cqe->cq_output_data[0];
3031 if (status != MCX_CQ_STATUS_OK)
3032 return (-1);
3033
3034 return (0);
3035 }
3036
3037 static int
3038 mcx_issi(struct mcx_softc *sc)
3039 {
3040 struct mcx_dmamem mxm;
3041 struct mcx_cmdq_entry *cqe;
3042 struct mcx_cmd_query_issi_in *in;
3043 struct mcx_cmd_query_issi_il_out *out;
3044 struct mcx_cmd_query_issi_mb_out *mb;
3045 uint8_t token = mcx_cmdq_token(sc);
3046 uint8_t status;
3047 int error;
3048
3049 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3050 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3051
3052 in = mcx_cmdq_in(cqe);
3053 in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3054 in->cmd_op_mod = htobe16(0);
3055
3056 CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3057 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3058 &cqe->cq_output_ptr, token) != 0) {
3059 printf(", unable to allocate query issi mailbox\n");
3060 return (-1);
3061 }
3062 mcx_cmdq_mboxes_sign(&mxm, 1);
3063
3064 mcx_cmdq_post(sc, cqe, 0);
3065 error = mcx_cmdq_poll(sc, cqe, 1000);
3066 if (error != 0) {
3067 printf(", query issi timeout\n");
3068 goto free;
3069 }
3070 error = mcx_cmdq_verify(cqe);
3071 if (error != 0) {
3072 printf(", query issi reply corrupt\n");
3073 goto free;
3074 }
3075
3076 status = cqe->cq_output_data[0];
3077 switch (status) {
3078 case MCX_CQ_STATUS_OK:
3079 break;
3080 case MCX_CQ_STATUS_BAD_OPCODE:
3081 /* use ISSI 0 */
3082 goto free;
3083 default:
3084 printf(", query issi failed (%x)\n", status);
3085 error = -1;
3086 goto free;
3087 }
3088
3089 out = mcx_cmdq_out(cqe);
3090 if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3091 /* use ISSI 1 */
3092 goto free;
3093 }
3094
3095 /* don't need to read cqe anymore, can be used for SET ISSI */
3096
3097 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3098 CTASSERT(MCX_ISSI < NBBY);
3099 /* XXX math is hard */
3100 if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3101 /* use ISSI 0 */
3102 goto free;
3103 }
3104
3105 if (mcx_set_issi(sc, cqe, 0) != 0) {
3106 /* ignore the error, just use ISSI 0 */
3107 } else {
3108 /* use ISSI 1 */
3109 }
3110
3111 free:
3112 mcx_cq_mboxes_free(sc, &mxm);
3113 return (error);
3114 }
3115
3116 static int
3117 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3118 uint32_t *npages, uint16_t *func_id)
3119 {
3120 struct mcx_cmdq_entry *cqe;
3121 struct mcx_cmd_query_pages_in *in;
3122 struct mcx_cmd_query_pages_out *out;
3123
3124 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3125 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3126
3127 in = mcx_cmdq_in(cqe);
3128 in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3129 in->cmd_op_mod = type;
3130
3131 mcx_cmdq_post(sc, cqe, 0);
3132 if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3133 printf(", query pages timeout\n");
3134 return (-1);
3135 }
3136 if (mcx_cmdq_verify(cqe) != 0) {
3137 printf(", query pages reply corrupt\n");
3138 return (-1);
3139 }
3140
3141 out = mcx_cmdq_out(cqe);
3142 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3143 printf(", query pages failed (%x)\n", out->cmd_status);
3144 return (-1);
3145 }
3146
3147 *func_id = out->cmd_func_id;
3148 *npages = be32dec(&out->cmd_num_pages);
3149
3150 return (0);
3151 }
3152
3153 struct bus_dma_iter {
3154 bus_dmamap_t i_map;
3155 bus_size_t i_offset;
3156 unsigned int i_index;
3157 };
3158
3159 static void
3160 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3161 {
3162 i->i_map = map;
3163 i->i_offset = 0;
3164 i->i_index = 0;
3165 }
3166
3167 static bus_addr_t
3168 bus_dma_iter_addr(struct bus_dma_iter *i)
3169 {
3170 return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3171 }
3172
3173 static void
3174 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3175 {
3176 bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3177 bus_size_t diff;
3178
3179 do {
3180 diff = seg->ds_len - i->i_offset;
3181 if (size < diff)
3182 break;
3183
3184 size -= diff;
3185
3186 seg++;
3187
3188 i->i_offset = 0;
3189 i->i_index++;
3190 } while (size > 0);
3191
3192 i->i_offset += size;
3193 }
3194
3195 static int
3196 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3197 {
3198 struct mcx_dmamem mxm;
3199 struct mcx_cmdq_entry *cqe;
3200 struct mcx_cmd_manage_pages_in *in;
3201 struct mcx_cmd_manage_pages_out *out;
3202 unsigned int paslen, nmb, i, j, npages;
3203 struct bus_dma_iter iter;
3204 uint64_t *pas;
3205 uint8_t status;
3206 uint8_t token = mcx_cmdq_token(sc);
3207 int error;
3208
3209 npages = mhm->mhm_npages;
3210
3211 paslen = sizeof(*pas) * npages;
3212 nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3213
3214 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3215 mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3216
3217 in = mcx_cmdq_in(cqe);
3218 in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3219 in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3220 in->cmd_func_id = func_id;
3221 be32enc(&in->cmd_input_num_entries, npages);
3222
3223 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3224 &cqe->cq_input_ptr, token) != 0) {
3225 printf(", unable to allocate manage pages mailboxen\n");
3226 return (-1);
3227 }
3228
3229 bus_dma_iter_init(&iter, mhm->mhm_map);
3230 for (i = 0; i < nmb; i++) {
3231 unsigned int lim;
3232
3233 pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3234 lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3235
3236 for (j = 0; j < lim; j++) {
3237 be64enc(&pas[j], bus_dma_iter_addr(&iter));
3238 bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3239 }
3240
3241 npages -= lim;
3242 }
3243
3244 mcx_cmdq_mboxes_sign(&mxm, nmb);
3245
3246 mcx_cmdq_post(sc, cqe, 0);
3247 error = mcx_cmdq_poll(sc, cqe, 1000);
3248 if (error != 0) {
3249 printf(", manage pages timeout\n");
3250 goto free;
3251 }
3252 error = mcx_cmdq_verify(cqe);
3253 if (error != 0) {
3254 printf(", manage pages reply corrupt\n");
3255 goto free;
3256 }
3257
3258 status = cqe->cq_output_data[0];
3259 if (status != MCX_CQ_STATUS_OK) {
3260 printf(", manage pages failed (%x)\n", status);
3261 error = -1;
3262 goto free;
3263 }
3264
3265 free:
3266 mcx_dmamem_free(sc, &mxm);
3267
3268 return (error);
3269 }
3270
3271 static int
3272 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3273 {
3274 uint32_t npages;
3275 uint16_t func_id;
3276
3277 if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3278 /* error printed by mcx_query_pages */
3279 return (-1);
3280 }
3281
3282 if (npages == 0)
3283 return (0);
3284
3285 if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3286 printf(", unable to allocate hwmem\n");
3287 return (-1);
3288 }
3289
3290 if (mcx_add_pages(sc, mhm, func_id) != 0) {
3291 printf(", unable to add hwmem\n");
3292 goto free;
3293 }
3294
3295 return (0);
3296
3297 free:
3298 mcx_hwmem_free(sc, mhm);
3299
3300 return (-1);
3301 }
3302
3303 static int
3304 mcx_hca_max_caps(struct mcx_softc *sc)
3305 {
3306 struct mcx_dmamem mxm;
3307 struct mcx_cmdq_entry *cqe;
3308 struct mcx_cmd_query_hca_cap_in *in;
3309 struct mcx_cmd_query_hca_cap_out *out;
3310 struct mcx_cmdq_mailbox *mb;
3311 struct mcx_cap_device *hca;
3312 uint8_t status;
3313 uint8_t token = mcx_cmdq_token(sc);
3314 int error;
3315
3316 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3317 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3318 token);
3319
3320 in = mcx_cmdq_in(cqe);
3321 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3322 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3323 MCX_CMD_QUERY_HCA_CAP_DEVICE);
3324
3325 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3326 &cqe->cq_output_ptr, token) != 0) {
3327 printf(", unable to allocate query hca caps mailboxen\n");
3328 return (-1);
3329 }
3330 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3331 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3332
3333 mcx_cmdq_post(sc, cqe, 0);
3334 error = mcx_cmdq_poll(sc, cqe, 1000);
3335 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3336
3337 if (error != 0) {
3338 printf(", query hca caps timeout\n");
3339 goto free;
3340 }
3341 error = mcx_cmdq_verify(cqe);
3342 if (error != 0) {
3343 printf(", query hca caps reply corrupt\n");
3344 goto free;
3345 }
3346
3347 status = cqe->cq_output_data[0];
3348 if (status != MCX_CQ_STATUS_OK) {
3349 printf(", query hca caps failed (%x)\n", status);
3350 error = -1;
3351 goto free;
3352 }
3353
3354 mb = mcx_cq_mbox(&mxm, 0);
3355 hca = mcx_cq_mbox_data(mb);
3356
3357 if (hca->log_pg_sz > PAGE_SHIFT) {
3358 printf(", minimum system page shift %u is too large\n",
3359 hca->log_pg_sz);
3360 error = -1;
3361 goto free;
3362 }
3363 /*
3364 * blueflame register is split into two buffers, and we must alternate
3365 * between the two of them.
3366 */
3367 sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
3368
3369 free:
3370 mcx_dmamem_free(sc, &mxm);
3371
3372 return (error);
3373 }
3374
3375 static int
3376 mcx_hca_set_caps(struct mcx_softc *sc)
3377 {
3378 struct mcx_dmamem mxm;
3379 struct mcx_cmdq_entry *cqe;
3380 struct mcx_cmd_query_hca_cap_in *in;
3381 struct mcx_cmd_query_hca_cap_out *out;
3382 struct mcx_cmdq_mailbox *mb;
3383 struct mcx_cap_device *hca;
3384 uint8_t status;
3385 uint8_t token = mcx_cmdq_token(sc);
3386 int error;
3387
3388 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3389 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3390 token);
3391
3392 in = mcx_cmdq_in(cqe);
3393 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3394 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
3395 MCX_CMD_QUERY_HCA_CAP_DEVICE);
3396
3397 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3398 &cqe->cq_output_ptr, token) != 0) {
3399 printf(", unable to allocate manage pages mailboxen\n");
3400 return (-1);
3401 }
3402 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3403 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3404
3405 mcx_cmdq_post(sc, cqe, 0);
3406 error = mcx_cmdq_poll(sc, cqe, 1000);
3407 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3408
3409 if (error != 0) {
3410 printf(", query hca caps timeout\n");
3411 goto free;
3412 }
3413 error = mcx_cmdq_verify(cqe);
3414 if (error != 0) {
3415 printf(", query hca caps reply corrupt\n");
3416 goto free;
3417 }
3418
3419 status = cqe->cq_output_data[0];
3420 if (status != MCX_CQ_STATUS_OK) {
3421 printf(", query hca caps failed (%x)\n", status);
3422 error = -1;
3423 goto free;
3424 }
3425
3426 mb = mcx_cq_mbox(&mxm, 0);
3427 hca = mcx_cq_mbox_data(mb);
3428
3429 hca->log_pg_sz = PAGE_SHIFT;
3430
3431 free:
3432 mcx_dmamem_free(sc, &mxm);
3433
3434 return (error);
3435 }
3436
3437
3438 static int
3439 mcx_init_hca(struct mcx_softc *sc)
3440 {
3441 struct mcx_cmdq_entry *cqe;
3442 struct mcx_cmd_init_hca_in *in;
3443 struct mcx_cmd_init_hca_out *out;
3444 int error;
3445 uint8_t status;
3446
3447 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3448 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3449
3450 in = mcx_cmdq_in(cqe);
3451 in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
3452 in->cmd_op_mod = htobe16(0);
3453
3454 mcx_cmdq_post(sc, cqe, 0);
3455
3456 error = mcx_cmdq_poll(sc, cqe, 1000);
3457 if (error != 0) {
3458 printf(", hca init timeout\n");
3459 return (-1);
3460 }
3461 if (mcx_cmdq_verify(cqe) != 0) {
3462 printf(", hca init command corrupt\n");
3463 return (-1);
3464 }
3465
3466 status = cqe->cq_output_data[0];
3467 if (status != MCX_CQ_STATUS_OK) {
3468 printf(", hca init failed (%x)\n", status);
3469 return (-1);
3470 }
3471
3472 return (0);
3473 }
3474
3475 static int
3476 mcx_set_driver_version(struct mcx_softc *sc)
3477 {
3478 struct mcx_dmamem mxm;
3479 struct mcx_cmdq_entry *cqe;
3480 struct mcx_cmd_set_driver_version_in *in;
3481 struct mcx_cmd_set_driver_version_out *out;
3482 int error;
3483 int token;
3484 uint8_t status;
3485
3486 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3487 token = mcx_cmdq_token(sc);
3488 mcx_cmdq_init(sc, cqe, sizeof(*in) +
3489 sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
3490
3491 in = mcx_cmdq_in(cqe);
3492 in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
3493 in->cmd_op_mod = htobe16(0);
3494
3495 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3496 &cqe->cq_input_ptr, token) != 0) {
3497 printf(", unable to allocate set driver version mailboxen\n");
3498 return (-1);
3499 }
3500 strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
3501 "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
3502
3503 mcx_cmdq_mboxes_sign(&mxm, 1);
3504 mcx_cmdq_post(sc, cqe, 0);
3505
3506 error = mcx_cmdq_poll(sc, cqe, 1000);
3507 if (error != 0) {
3508 printf(", set driver version timeout\n");
3509 goto free;
3510 }
3511 if (mcx_cmdq_verify(cqe) != 0) {
3512 printf(", set driver version command corrupt\n");
3513 goto free;
3514 }
3515
3516 status = cqe->cq_output_data[0];
3517 if (status != MCX_CQ_STATUS_OK) {
3518 printf(", set driver version failed (%x)\n", status);
3519 error = -1;
3520 goto free;
3521 }
3522
3523 free:
3524 mcx_dmamem_free(sc, &mxm);
3525
3526 return (error);
3527 }
3528
3529 static int
3530 mcx_iff(struct mcx_softc *sc)
3531 {
3532 struct ifnet *ifp = &sc->sc_ec.ec_if;
3533 struct mcx_dmamem mxm;
3534 struct mcx_cmdq_entry *cqe;
3535 struct mcx_cmd_modify_nic_vport_context_in *in;
3536 struct mcx_cmd_modify_nic_vport_context_out *out;
3537 struct mcx_nic_vport_ctx *ctx;
3538 int error;
3539 int token;
3540 int insize;
3541
3542 /* enable or disable the promisc flow */
3543 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
3544 if (sc->sc_promisc_flow_enabled == 0) {
3545 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC,
3546 0, NULL);
3547 sc->sc_promisc_flow_enabled = 1;
3548 }
3549 } else if (sc->sc_promisc_flow_enabled != 0) {
3550 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
3551 sc->sc_promisc_flow_enabled = 0;
3552 }
3553
3554 /* enable or disable the all-multicast flow */
3555 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3556 if (sc->sc_allmulti_flow_enabled == 0) {
3557 uint8_t mcast[ETHER_ADDR_LEN];
3558
3559 memset(mcast, 0, sizeof(mcast));
3560 mcast[0] = 0x01;
3561 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI,
3562 0, mcast);
3563 sc->sc_allmulti_flow_enabled = 1;
3564 }
3565 } else if (sc->sc_allmulti_flow_enabled != 0) {
3566 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
3567 sc->sc_allmulti_flow_enabled = 0;
3568 }
3569
3570 insize = sizeof(struct mcx_nic_vport_ctx) + 240;
3571
3572 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3573 token = mcx_cmdq_token(sc);
3574 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3575
3576 in = mcx_cmdq_in(cqe);
3577 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
3578 in->cmd_op_mod = htobe16(0);
3579 in->cmd_field_select = htobe32(
3580 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
3581 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
3582
3583 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
3584 printf(", unable to allocate modify nic vport context mailboxen\n");
3585 return (-1);
3586 }
3587 ctx = (struct mcx_nic_vport_ctx *)
3588 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
3589 ctx->vp_mtu = htobe32(sc->sc_hardmtu);
3590 /*
3591 * always leave promisc-all enabled on the vport since we can't give it
3592 * a vlan list, and we're already doing multicast filtering in the flow
3593 * table.
3594 */
3595 ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
3596
3597 mcx_cmdq_mboxes_sign(&mxm, 1);
3598 mcx_cmdq_post(sc, cqe, 0);
3599
3600 error = mcx_cmdq_poll(sc, cqe, 1000);
3601 if (error != 0) {
3602 printf(", modify nic vport context timeout\n");
3603 goto free;
3604 }
3605 if (mcx_cmdq_verify(cqe) != 0) {
3606 printf(", modify nic vport context command corrupt\n");
3607 goto free;
3608 }
3609
3610 out = mcx_cmdq_out(cqe);
3611 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3612 printf(", modify nic vport context failed (%x, %x)\n",
3613 out->cmd_status, out->cmd_syndrome);
3614 error = -1;
3615 goto free;
3616 }
3617
3618 free:
3619 mcx_dmamem_free(sc, &mxm);
3620
3621 return (error);
3622 }
3623
3624 static int
3625 mcx_alloc_uar(struct mcx_softc *sc)
3626 {
3627 struct mcx_cmdq_entry *cqe;
3628 struct mcx_cmd_alloc_uar_in *in;
3629 struct mcx_cmd_alloc_uar_out *out;
3630 int error;
3631
3632 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3633 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3634
3635 in = mcx_cmdq_in(cqe);
3636 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
3637 in->cmd_op_mod = htobe16(0);
3638
3639 mcx_cmdq_post(sc, cqe, 0);
3640
3641 error = mcx_cmdq_poll(sc, cqe, 1000);
3642 if (error != 0) {
3643 printf(", alloc uar timeout\n");
3644 return (-1);
3645 }
3646 if (mcx_cmdq_verify(cqe) != 0) {
3647 printf(", alloc uar command corrupt\n");
3648 return (-1);
3649 }
3650
3651 out = mcx_cmdq_out(cqe);
3652 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3653 printf(", alloc uar failed (%x)\n", out->cmd_status);
3654 return (-1);
3655 }
3656
3657 sc->sc_uar = be32toh(out->cmd_uar);
3658
3659 return (0);
3660 }
3661
3662 static int
3663 mcx_create_eq(struct mcx_softc *sc)
3664 {
3665 struct mcx_cmdq_entry *cqe;
3666 struct mcx_dmamem mxm;
3667 struct mcx_cmd_create_eq_in *in;
3668 struct mcx_cmd_create_eq_mb_in *mbin;
3669 struct mcx_cmd_create_eq_out *out;
3670 struct mcx_eq_entry *eqe;
3671 int error;
3672 uint64_t *pas;
3673 int insize, npages, paslen, i, token;
3674
3675 sc->sc_eq_cons = 0;
3676
3677 npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
3678 MCX_PAGE_SIZE);
3679 paslen = npages * sizeof(*pas);
3680 insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
3681
3682 if (mcx_dmamem_alloc(sc, &sc->sc_eq_mem, npages * MCX_PAGE_SIZE,
3683 MCX_PAGE_SIZE) != 0) {
3684 printf(", unable to allocate event queue memory\n");
3685 return (-1);
3686 }
3687
3688 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
3689 for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
3690 eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
3691 }
3692
3693 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3694 token = mcx_cmdq_token(sc);
3695 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3696
3697 in = mcx_cmdq_in(cqe);
3698 in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
3699 in->cmd_op_mod = htobe16(0);
3700
3701 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3702 &cqe->cq_input_ptr, token) != 0) {
3703 printf(", unable to allocate create eq mailboxen\n");
3704 return (-1);
3705 }
3706 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3707 mbin->cmd_eq_ctx.eq_uar_size = htobe32(
3708 (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | sc->sc_uar);
3709 mbin->cmd_event_bitmask = htobe64(
3710 (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
3711 (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
3712 (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
3713 (1ull << MCX_EVENT_TYPE_PAGE_REQUEST));
3714
3715 /* physical addresses follow the mailbox in data */
3716 pas = (uint64_t *)(mbin + 1);
3717 for (i = 0; i < npages; i++) {
3718 pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_eq_mem) +
3719 (i * MCX_PAGE_SIZE));
3720 }
3721 mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
3722 mcx_cmdq_post(sc, cqe, 0);
3723
3724 error = mcx_cmdq_poll(sc, cqe, 1000);
3725 if (error != 0) {
3726 printf(", create eq timeout\n");
3727 goto free;
3728 }
3729 if (mcx_cmdq_verify(cqe) != 0) {
3730 printf(", create eq command corrupt\n");
3731 goto free;
3732 }
3733
3734 out = mcx_cmdq_out(cqe);
3735 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3736 printf(", create eq failed (%x, %x)\n", out->cmd_status,
3737 be32toh(out->cmd_syndrome));
3738 error = -1;
3739 goto free;
3740 }
3741
3742 sc->sc_eqn = be32toh(out->cmd_eqn);
3743 mcx_arm_eq(sc);
3744 free:
3745 mcx_dmamem_free(sc, &mxm);
3746 return (error);
3747 }
3748
3749 static int
3750 mcx_alloc_pd(struct mcx_softc *sc)
3751 {
3752 struct mcx_cmdq_entry *cqe;
3753 struct mcx_cmd_alloc_pd_in *in;
3754 struct mcx_cmd_alloc_pd_out *out;
3755 int error;
3756
3757 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3758 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3759
3760 in = mcx_cmdq_in(cqe);
3761 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
3762 in->cmd_op_mod = htobe16(0);
3763
3764 mcx_cmdq_post(sc, cqe, 0);
3765
3766 error = mcx_cmdq_poll(sc, cqe, 1000);
3767 if (error != 0) {
3768 printf(", alloc pd timeout\n");
3769 return (-1);
3770 }
3771 if (mcx_cmdq_verify(cqe) != 0) {
3772 printf(", alloc pd command corrupt\n");
3773 return (-1);
3774 }
3775
3776 out = mcx_cmdq_out(cqe);
3777 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3778 printf(", alloc pd failed (%x)\n", out->cmd_status);
3779 return (-1);
3780 }
3781
3782 sc->sc_pd = be32toh(out->cmd_pd);
3783 return (0);
3784 }
3785
3786 static int
3787 mcx_alloc_tdomain(struct mcx_softc *sc)
3788 {
3789 struct mcx_cmdq_entry *cqe;
3790 struct mcx_cmd_alloc_td_in *in;
3791 struct mcx_cmd_alloc_td_out *out;
3792 int error;
3793
3794 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3795 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3796
3797 in = mcx_cmdq_in(cqe);
3798 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
3799 in->cmd_op_mod = htobe16(0);
3800
3801 mcx_cmdq_post(sc, cqe, 0);
3802
3803 error = mcx_cmdq_poll(sc, cqe, 1000);
3804 if (error != 0) {
3805 printf(", alloc transport domain timeout\n");
3806 return (-1);
3807 }
3808 if (mcx_cmdq_verify(cqe) != 0) {
3809 printf(", alloc transport domain command corrupt\n");
3810 return (-1);
3811 }
3812
3813 out = mcx_cmdq_out(cqe);
3814 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3815 printf(", alloc transport domain failed (%x)\n",
3816 out->cmd_status);
3817 return (-1);
3818 }
3819
3820 sc->sc_tdomain = be32toh(out->cmd_tdomain);
3821 return (0);
3822 }
3823
3824 static int
3825 mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr)
3826 {
3827 struct mcx_dmamem mxm;
3828 struct mcx_cmdq_entry *cqe;
3829 struct mcx_cmd_query_nic_vport_context_in *in;
3830 struct mcx_cmd_query_nic_vport_context_out *out;
3831 struct mcx_nic_vport_ctx *ctx;
3832 uint8_t *addr;
3833 int error, token, i;
3834
3835 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3836 token = mcx_cmdq_token(sc);
3837 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
3838
3839 in = mcx_cmdq_in(cqe);
3840 in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
3841 in->cmd_op_mod = htobe16(0);
3842 in->cmd_allowed_list_type = 0;
3843
3844 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
3845 printf(", unable to allocate query nic vport context mailboxen\n");
3846 return (-1);
3847 }
3848 mcx_cmdq_mboxes_sign(&mxm, 1);
3849 mcx_cmdq_post(sc, cqe, 0);
3850
3851 error = mcx_cmdq_poll(sc, cqe, 1000);
3852 if (error != 0) {
3853 printf(", query nic vport context timeout\n");
3854 goto free;
3855 }
3856 if (mcx_cmdq_verify(cqe) != 0) {
3857 printf(", query nic vport context command corrupt\n");
3858 goto free;
3859 }
3860
3861 out = mcx_cmdq_out(cqe);
3862 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3863 printf(", query nic vport context failed (%x, %x)\n",
3864 out->cmd_status, out->cmd_syndrome);
3865 error = -1;
3866 goto free;
3867 }
3868
3869 ctx = (struct mcx_nic_vport_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
3870 addr = (uint8_t *)&ctx->vp_perm_addr;
3871 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3872 enaddr[i] = addr[i + 2];
3873 }
3874 free:
3875 mcx_dmamem_free(sc, &mxm);
3876
3877 return (error);
3878 }
3879
3880 static int
3881 mcx_query_special_contexts(struct mcx_softc *sc)
3882 {
3883 struct mcx_cmdq_entry *cqe;
3884 struct mcx_cmd_query_special_ctx_in *in;
3885 struct mcx_cmd_query_special_ctx_out *out;
3886 int error;
3887
3888 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3889 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3890
3891 in = mcx_cmdq_in(cqe);
3892 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
3893 in->cmd_op_mod = htobe16(0);
3894
3895 mcx_cmdq_post(sc, cqe, 0);
3896
3897 error = mcx_cmdq_poll(sc, cqe, 1000);
3898 if (error != 0) {
3899 printf(", query special contexts timeout\n");
3900 return (-1);
3901 }
3902 if (mcx_cmdq_verify(cqe) != 0) {
3903 printf(", query special contexts command corrupt\n");
3904 return (-1);
3905 }
3906
3907 out = mcx_cmdq_out(cqe);
3908 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3909 printf(", query special contexts failed (%x)\n",
3910 out->cmd_status);
3911 return (-1);
3912 }
3913
3914 sc->sc_lkey = be32toh(out->cmd_resd_lkey);
3915 return (0);
3916 }
3917
3918 static int
3919 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
3920 {
3921 struct mcx_reg_pmtu pmtu;
3922 int error;
3923
3924 /* read max mtu */
3925 memset(&pmtu, 0, sizeof(pmtu));
3926 pmtu.rp_local_port = 1;
3927 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
3928 sizeof(pmtu));
3929 if (error != 0) {
3930 printf(", unable to get port MTU\n");
3931 return error;
3932 }
3933
3934 mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu));
3935 pmtu.rp_admin_mtu = htobe16(mtu);
3936 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
3937 sizeof(pmtu));
3938 if (error != 0) {
3939 printf(", unable to set port MTU\n");
3940 return error;
3941 }
3942
3943 sc->sc_hardmtu = mtu;
3944 return 0;
3945 }
3946
3947 static int
3948 mcx_create_cq(struct mcx_softc *sc, int eqn)
3949 {
3950 struct mcx_cmdq_entry *cmde;
3951 struct mcx_cq_entry *cqe;
3952 struct mcx_cq *cq;
3953 struct mcx_dmamem mxm;
3954 struct mcx_cmd_create_cq_in *in;
3955 struct mcx_cmd_create_cq_mb_in *mbin;
3956 struct mcx_cmd_create_cq_out *out;
3957 int error;
3958 uint64_t *pas;
3959 int insize, npages, paslen, i, token;
3960
3961 if (sc->sc_num_cq >= MCX_MAX_CQS) {
3962 printf("%s: tried to create too many cqs\n", DEVNAME(sc));
3963 return (-1);
3964 }
3965 cq = &sc->sc_cq[sc->sc_num_cq];
3966
3967 npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
3968 MCX_PAGE_SIZE);
3969 paslen = npages * sizeof(*pas);
3970 insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
3971
3972 if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
3973 MCX_PAGE_SIZE) != 0) {
3974 printf("%s: unable to allocate completion queue memory\n",
3975 DEVNAME(sc));
3976 return (-1);
3977 }
3978 cqe = MCX_DMA_KVA(&cq->cq_mem);
3979 for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
3980 cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
3981 }
3982
3983 cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3984 token = mcx_cmdq_token(sc);
3985 mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
3986
3987 in = mcx_cmdq_in(cmde);
3988 in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
3989 in->cmd_op_mod = htobe16(0);
3990
3991 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3992 &cmde->cq_input_ptr, token) != 0) {
3993 printf("%s: unable to allocate create cq mailboxen\n", DEVNAME(sc));
3994 error = -1;
3995 goto free;
3996 }
3997 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3998 mbin->cmd_cq_ctx.cq_uar_size = htobe32(
3999 (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | sc->sc_uar);
4000 mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
4001 mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
4002 (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
4003 MCX_CQ_MOD_COUNTER);
4004 mbin->cmd_cq_ctx.cq_doorbell = htobe64(
4005 MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4006 MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4007
4008 /* physical addresses follow the mailbox in data */
4009 pas = (uint64_t *)(mbin + 1);
4010 for (i = 0; i < npages; i++) {
4011 pas[i] = htobe64(MCX_DMA_DVA(&cq->cq_mem) + (i * MCX_PAGE_SIZE));
4012 }
4013 mcx_cmdq_post(sc, cmde, 0);
4014
4015 error = mcx_cmdq_poll(sc, cmde, 1000);
4016 if (error != 0) {
4017 printf("%s: create cq timeout\n", DEVNAME(sc));
4018 goto free;
4019 }
4020 if (mcx_cmdq_verify(cmde) != 0) {
4021 printf("%s: create cq command corrupt\n", DEVNAME(sc));
4022 goto free;
4023 }
4024
4025 out = mcx_cmdq_out(cmde);
4026 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4027 printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4028 out->cmd_status, be32toh(out->cmd_syndrome));
4029 error = -1;
4030 goto free;
4031 }
4032
4033 cq->cq_n = be32toh(out->cmd_cqn);
4034 cq->cq_cons = 0;
4035 cq->cq_count = 0;
4036 cq->cq_doorbell = (void *)((uint8_t *)MCX_DMA_KVA(&sc->sc_doorbell_mem) +
4037 MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4038 mcx_arm_cq(sc, cq);
4039 sc->sc_num_cq++;
4040
4041 free:
4042 mcx_dmamem_free(sc, &mxm);
4043 return (error);
4044 }
4045
4046 static int
4047 mcx_destroy_cq(struct mcx_softc *sc, int index)
4048 {
4049 struct mcx_cmdq_entry *cqe;
4050 struct mcx_cmd_destroy_cq_in *in;
4051 struct mcx_cmd_destroy_cq_out *out;
4052 int error;
4053 int token;
4054
4055 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4056 token = mcx_cmdq_token(sc);
4057 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4058
4059 in = mcx_cmdq_in(cqe);
4060 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4061 in->cmd_op_mod = htobe16(0);
4062 in->cmd_cqn = htobe32(sc->sc_cq[index].cq_n);
4063
4064 mcx_cmdq_post(sc, cqe, 0);
4065 error = mcx_cmdq_poll(sc, cqe, 1000);
4066 if (error != 0) {
4067 printf("%s: destroy cq timeout\n", DEVNAME(sc));
4068 return error;
4069 }
4070 if (mcx_cmdq_verify(cqe) != 0) {
4071 printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4072 return error;
4073 }
4074
4075 out = mcx_cmdq_out(cqe);
4076 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4077 printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4078 out->cmd_status, be32toh(out->cmd_syndrome));
4079 return -1;
4080 }
4081
4082 sc->sc_cq[index].cq_n = 0;
4083 mcx_dmamem_free(sc, &sc->sc_cq[index].cq_mem);
4084 sc->sc_cq[index].cq_cons = 0;
4085 sc->sc_cq[index].cq_count = 0;
4086 return 0;
4087 }
4088
4089 static int
4090 mcx_create_rq(struct mcx_softc *sc, int cqn)
4091 {
4092 struct mcx_cmdq_entry *cqe;
4093 struct mcx_dmamem mxm;
4094 struct mcx_cmd_create_rq_in *in;
4095 struct mcx_cmd_create_rq_out *out;
4096 struct mcx_rq_ctx *mbin;
4097 int error;
4098 uint64_t *pas;
4099 uint8_t *doorbell;
4100 int insize, npages, paslen, i, token;
4101
4102 npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4103 MCX_PAGE_SIZE);
4104 paslen = npages * sizeof(*pas);
4105 insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4106
4107 if (mcx_dmamem_alloc(sc, &sc->sc_rq_mem, npages * MCX_PAGE_SIZE,
4108 MCX_PAGE_SIZE) != 0) {
4109 printf("%s: unable to allocate receive queue memory\n",
4110 DEVNAME(sc));
4111 return (-1);
4112 }
4113
4114 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4115 token = mcx_cmdq_token(sc);
4116 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4117
4118 in = mcx_cmdq_in(cqe);
4119 in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4120 in->cmd_op_mod = htobe16(0);
4121
4122 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4123 &cqe->cq_input_ptr, token) != 0) {
4124 printf("%s: unable to allocate create rq mailboxen\n",
4125 DEVNAME(sc));
4126 error = -1;
4127 goto free;
4128 }
4129 mbin = (struct mcx_rq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4130 mbin->rq_flags = htobe32(MCX_RQ_CTX_RLKEY | MCX_RQ_CTX_VLAN_STRIP_DIS);
4131 mbin->rq_cqn = htobe32(cqn);
4132 mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4133 mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4134 mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4135 MCX_RQ_DOORBELL_OFFSET);
4136 mbin->rq_wq.wq_log_stride = htobe16(4);
4137 mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4138
4139 /* physical addresses follow the mailbox in data */
4140 pas = (uint64_t *)(mbin + 1);
4141 for (i = 0; i < npages; i++) {
4142 pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_rq_mem) +
4143 (i * MCX_PAGE_SIZE));
4144 }
4145 mcx_cmdq_post(sc, cqe, 0);
4146
4147 error = mcx_cmdq_poll(sc, cqe, 1000);
4148 if (error != 0) {
4149 printf("%s: create rq timeout\n", DEVNAME(sc));
4150 goto free;
4151 }
4152 if (mcx_cmdq_verify(cqe) != 0) {
4153 printf("%s: create rq command corrupt\n", DEVNAME(sc));
4154 goto free;
4155 }
4156
4157 out = mcx_cmdq_out(cqe);
4158 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4159 printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4160 out->cmd_status, be32toh(out->cmd_syndrome));
4161 error = -1;
4162 goto free;
4163 }
4164
4165 sc->sc_rqn = be32toh(out->cmd_rqn);
4166
4167 doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4168 sc->sc_rx_doorbell = (uint32_t *)(doorbell + MCX_RQ_DOORBELL_OFFSET);
4169
4170 free:
4171 mcx_dmamem_free(sc, &mxm);
4172 return (error);
4173 }
4174
4175 static int
4176 mcx_ready_rq(struct mcx_softc *sc)
4177 {
4178 struct mcx_cmdq_entry *cqe;
4179 struct mcx_dmamem mxm;
4180 struct mcx_cmd_modify_rq_in *in;
4181 struct mcx_cmd_modify_rq_mb_in *mbin;
4182 struct mcx_cmd_modify_rq_out *out;
4183 int error;
4184 int token;
4185
4186 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4187 token = mcx_cmdq_token(sc);
4188 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4189
4190 in = mcx_cmdq_in(cqe);
4191 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4192 in->cmd_op_mod = htobe16(0);
4193 in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_rqn);
4194
4195 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4196 printf("%s: unable to allocate modify rq mailbox\n", DEVNAME(sc));
4197 return (-1);
4198 }
4199 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4200 mbin->cmd_rq_ctx.rq_flags = htobe32(
4201 MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4202
4203 mcx_cmdq_mboxes_sign(&mxm, 1);
4204 mcx_cmdq_post(sc, cqe, 0);
4205 error = mcx_cmdq_poll(sc, cqe, 1000);
4206 if (error != 0) {
4207 printf("%s: modify rq timeout\n", DEVNAME(sc));
4208 goto free;
4209 }
4210 if (mcx_cmdq_verify(cqe) != 0) {
4211 printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4212 goto free;
4213 }
4214
4215 out = mcx_cmdq_out(cqe);
4216 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4217 printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4218 out->cmd_status, be32toh(out->cmd_syndrome));
4219 error = -1;
4220 goto free;
4221 }
4222
4223 free:
4224 mcx_dmamem_free(sc, &mxm);
4225 return (error);
4226 }
4227
4228 static int
4229 mcx_destroy_rq(struct mcx_softc *sc)
4230 {
4231 struct mcx_cmdq_entry *cqe;
4232 struct mcx_cmd_destroy_rq_in *in;
4233 struct mcx_cmd_destroy_rq_out *out;
4234 int error;
4235 int token;
4236
4237 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4238 token = mcx_cmdq_token(sc);
4239 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4240
4241 in = mcx_cmdq_in(cqe);
4242 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4243 in->cmd_op_mod = htobe16(0);
4244 in->cmd_rqn = htobe32(sc->sc_rqn);
4245
4246 mcx_cmdq_post(sc, cqe, 0);
4247 error = mcx_cmdq_poll(sc, cqe, 1000);
4248 if (error != 0) {
4249 printf("%s: destroy rq timeout\n", DEVNAME(sc));
4250 return error;
4251 }
4252 if (mcx_cmdq_verify(cqe) != 0) {
4253 printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4254 return error;
4255 }
4256
4257 out = mcx_cmdq_out(cqe);
4258 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4259 printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4260 out->cmd_status, be32toh(out->cmd_syndrome));
4261 return -1;
4262 }
4263
4264 sc->sc_rqn = 0;
4265 return 0;
4266 }
4267
4268 static int
4269 mcx_create_tir(struct mcx_softc *sc)
4270 {
4271 struct mcx_cmdq_entry *cqe;
4272 struct mcx_dmamem mxm;
4273 struct mcx_cmd_create_tir_in *in;
4274 struct mcx_cmd_create_tir_mb_in *mbin;
4275 struct mcx_cmd_create_tir_out *out;
4276 int error;
4277 int token;
4278
4279 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4280 token = mcx_cmdq_token(sc);
4281 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4282
4283 in = mcx_cmdq_in(cqe);
4284 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4285 in->cmd_op_mod = htobe16(0);
4286
4287 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4288 printf("%s: unable to allocate create tir mailbox\n",
4289 DEVNAME(sc));
4290 return (-1);
4291 }
4292 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4293 /* leave disp_type = 0, so packets get sent to the inline rqn */
4294 mbin->cmd_inline_rqn = htobe32(sc->sc_rqn);
4295 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4296
4297 mcx_cmdq_post(sc, cqe, 0);
4298 error = mcx_cmdq_poll(sc, cqe, 1000);
4299 if (error != 0) {
4300 printf("%s: create tir timeout\n", DEVNAME(sc));
4301 goto free;
4302 }
4303 if (mcx_cmdq_verify(cqe) != 0) {
4304 printf("%s: create tir command corrupt\n", DEVNAME(sc));
4305 goto free;
4306 }
4307
4308 out = mcx_cmdq_out(cqe);
4309 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4310 printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
4311 out->cmd_status, be32toh(out->cmd_syndrome));
4312 error = -1;
4313 goto free;
4314 }
4315
4316 sc->sc_tirn = be32toh(out->cmd_tirn);
4317 free:
4318 mcx_dmamem_free(sc, &mxm);
4319 return (error);
4320 }
4321
4322 static int
4323 mcx_destroy_tir(struct mcx_softc *sc)
4324 {
4325 struct mcx_cmdq_entry *cqe;
4326 struct mcx_cmd_destroy_tir_in *in;
4327 struct mcx_cmd_destroy_tir_out *out;
4328 int error;
4329 int token;
4330
4331 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4332 token = mcx_cmdq_token(sc);
4333 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4334
4335 in = mcx_cmdq_in(cqe);
4336 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
4337 in->cmd_op_mod = htobe16(0);
4338 in->cmd_tirn = htobe32(sc->sc_tirn);
4339
4340 mcx_cmdq_post(sc, cqe, 0);
4341 error = mcx_cmdq_poll(sc, cqe, 1000);
4342 if (error != 0) {
4343 printf("%s: destroy tir timeout\n", DEVNAME(sc));
4344 return error;
4345 }
4346 if (mcx_cmdq_verify(cqe) != 0) {
4347 printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
4348 return error;
4349 }
4350
4351 out = mcx_cmdq_out(cqe);
4352 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4353 printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
4354 out->cmd_status, be32toh(out->cmd_syndrome));
4355 return -1;
4356 }
4357
4358 sc->sc_tirn = 0;
4359 return 0;
4360 }
4361
4362 static int
4363 mcx_create_sq(struct mcx_softc *sc, int cqn)
4364 {
4365 struct mcx_cmdq_entry *cqe;
4366 struct mcx_dmamem mxm;
4367 struct mcx_cmd_create_sq_in *in;
4368 struct mcx_sq_ctx *mbin;
4369 struct mcx_cmd_create_sq_out *out;
4370 int error;
4371 uint64_t *pas;
4372 uint8_t *doorbell;
4373 int insize, npages, paslen, i, token;
4374
4375 npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
4376 MCX_PAGE_SIZE);
4377 paslen = npages * sizeof(*pas);
4378 insize = sizeof(struct mcx_sq_ctx) + paslen;
4379
4380 if (mcx_dmamem_alloc(sc, &sc->sc_sq_mem, npages * MCX_PAGE_SIZE,
4381 MCX_PAGE_SIZE) != 0) {
4382 printf("%s: unable to allocate send queue memory\n", DEVNAME(sc));
4383 return (-1);
4384 }
4385
4386 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4387 token = mcx_cmdq_token(sc);
4388 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
4389 token);
4390
4391 in = mcx_cmdq_in(cqe);
4392 in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
4393 in->cmd_op_mod = htobe16(0);
4394
4395 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4396 &cqe->cq_input_ptr, token) != 0) {
4397 printf("%s: unable to allocate create sq mailboxen\n", DEVNAME(sc));
4398 error = -1;
4399 goto free;
4400 }
4401 mbin = (struct mcx_sq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4402 mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
4403 (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
4404 mbin->sq_cqn = htobe32(cqn);
4405 mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
4406 mbin->sq_tis_num = htobe32(sc->sc_tisn);
4407 mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4408 mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
4409 mbin->sq_wq.wq_uar_page = htobe32(sc->sc_uar);
4410 mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4411 MCX_SQ_DOORBELL_OFFSET);
4412 mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
4413 mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
4414
4415 /* physical addresses follow the mailbox in data */
4416 pas = (uint64_t *)(mbin + 1);
4417 for (i = 0; i < npages; i++) {
4418 pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_sq_mem) +
4419 (i * MCX_PAGE_SIZE));
4420 }
4421 mcx_cmdq_post(sc, cqe, 0);
4422
4423 error = mcx_cmdq_poll(sc, cqe, 1000);
4424 if (error != 0) {
4425 printf("%s: create sq timeout\n", DEVNAME(sc));
4426 goto free;
4427 }
4428 if (mcx_cmdq_verify(cqe) != 0) {
4429 printf("%s: create sq command corrupt\n", DEVNAME(sc));
4430 goto free;
4431 }
4432
4433 out = mcx_cmdq_out(cqe);
4434 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4435 printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
4436 out->cmd_status, be32toh(out->cmd_syndrome));
4437 error = -1;
4438 goto free;
4439 }
4440
4441 sc->sc_sqn = be32toh(out->cmd_sqn);
4442
4443 doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4444 sc->sc_tx_doorbell = (uint32_t *)(doorbell + MCX_SQ_DOORBELL_OFFSET + 4);
4445 free:
4446 mcx_dmamem_free(sc, &mxm);
4447 return (error);
4448 }
4449
4450 static int
4451 mcx_destroy_sq(struct mcx_softc *sc)
4452 {
4453 struct mcx_cmdq_entry *cqe;
4454 struct mcx_cmd_destroy_sq_in *in;
4455 struct mcx_cmd_destroy_sq_out *out;
4456 int error;
4457 int token;
4458
4459 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4460 token = mcx_cmdq_token(sc);
4461 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4462
4463 in = mcx_cmdq_in(cqe);
4464 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
4465 in->cmd_op_mod = htobe16(0);
4466 in->cmd_sqn = htobe32(sc->sc_sqn);
4467
4468 mcx_cmdq_post(sc, cqe, 0);
4469 error = mcx_cmdq_poll(sc, cqe, 1000);
4470 if (error != 0) {
4471 printf("%s: destroy sq timeout\n", DEVNAME(sc));
4472 return error;
4473 }
4474 if (mcx_cmdq_verify(cqe) != 0) {
4475 printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
4476 return error;
4477 }
4478
4479 out = mcx_cmdq_out(cqe);
4480 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4481 printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
4482 out->cmd_status, be32toh(out->cmd_syndrome));
4483 return -1;
4484 }
4485
4486 sc->sc_sqn = 0;
4487 return 0;
4488 }
4489
4490 static int
4491 mcx_ready_sq(struct mcx_softc *sc)
4492 {
4493 struct mcx_cmdq_entry *cqe;
4494 struct mcx_dmamem mxm;
4495 struct mcx_cmd_modify_sq_in *in;
4496 struct mcx_cmd_modify_sq_mb_in *mbin;
4497 struct mcx_cmd_modify_sq_out *out;
4498 int error;
4499 int token;
4500
4501 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4502 token = mcx_cmdq_token(sc);
4503 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4504
4505 in = mcx_cmdq_in(cqe);
4506 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
4507 in->cmd_op_mod = htobe16(0);
4508 in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_sqn);
4509
4510 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4511 printf("%s: unable to allocate modify sq mailbox\n",
4512 DEVNAME(sc));
4513 return (-1);
4514 }
4515 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4516 mbin->cmd_sq_ctx.sq_flags = htobe32(
4517 MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
4518
4519 mcx_cmdq_mboxes_sign(&mxm, 1);
4520 mcx_cmdq_post(sc, cqe, 0);
4521 error = mcx_cmdq_poll(sc, cqe, 1000);
4522 if (error != 0) {
4523 printf("%s: modify sq timeout\n", DEVNAME(sc));
4524 goto free;
4525 }
4526 if (mcx_cmdq_verify(cqe) != 0) {
4527 printf("%s: modify sq command corrupt\n", DEVNAME(sc));
4528 goto free;
4529 }
4530
4531 out = mcx_cmdq_out(cqe);
4532 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4533 printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
4534 out->cmd_status, be32toh(out->cmd_syndrome));
4535 error = -1;
4536 goto free;
4537 }
4538
4539 free:
4540 mcx_dmamem_free(sc, &mxm);
4541 return (error);
4542 }
4543
4544 static int
4545 mcx_create_tis(struct mcx_softc *sc)
4546 {
4547 struct mcx_cmdq_entry *cqe;
4548 struct mcx_dmamem mxm;
4549 struct mcx_cmd_create_tis_in *in;
4550 struct mcx_cmd_create_tis_mb_in *mbin;
4551 struct mcx_cmd_create_tis_out *out;
4552 int error;
4553 int token;
4554
4555 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4556 token = mcx_cmdq_token(sc);
4557 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4558
4559 in = mcx_cmdq_in(cqe);
4560 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
4561 in->cmd_op_mod = htobe16(0);
4562
4563 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4564 printf("%s: unable to allocate create tis mailbox\n", DEVNAME(sc));
4565 return (-1);
4566 }
4567 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4568 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4569
4570 mcx_cmdq_mboxes_sign(&mxm, 1);
4571 mcx_cmdq_post(sc, cqe, 0);
4572 error = mcx_cmdq_poll(sc, cqe, 1000);
4573 if (error != 0) {
4574 printf("%s: create tis timeout\n", DEVNAME(sc));
4575 goto free;
4576 }
4577 if (mcx_cmdq_verify(cqe) != 0) {
4578 printf("%s: create tis command corrupt\n", DEVNAME(sc));
4579 goto free;
4580 }
4581
4582 out = mcx_cmdq_out(cqe);
4583 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4584 printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
4585 out->cmd_status, be32toh(out->cmd_syndrome));
4586 error = -1;
4587 goto free;
4588 }
4589
4590 sc->sc_tisn = be32toh(out->cmd_tisn);
4591 free:
4592 mcx_dmamem_free(sc, &mxm);
4593 return (error);
4594 }
4595
4596 static int
4597 mcx_destroy_tis(struct mcx_softc *sc)
4598 {
4599 struct mcx_cmdq_entry *cqe;
4600 struct mcx_cmd_destroy_tis_in *in;
4601 struct mcx_cmd_destroy_tis_out *out;
4602 int error;
4603 int token;
4604
4605 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4606 token = mcx_cmdq_token(sc);
4607 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4608
4609 in = mcx_cmdq_in(cqe);
4610 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
4611 in->cmd_op_mod = htobe16(0);
4612 in->cmd_tisn = htobe32(sc->sc_tisn);
4613
4614 mcx_cmdq_post(sc, cqe, 0);
4615 error = mcx_cmdq_poll(sc, cqe, 1000);
4616 if (error != 0) {
4617 printf("%s: destroy tis timeout\n", DEVNAME(sc));
4618 return error;
4619 }
4620 if (mcx_cmdq_verify(cqe) != 0) {
4621 printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
4622 return error;
4623 }
4624
4625 out = mcx_cmdq_out(cqe);
4626 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4627 printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
4628 out->cmd_status, be32toh(out->cmd_syndrome));
4629 return -1;
4630 }
4631
4632 sc->sc_tirn = 0;
4633 return 0;
4634 }
4635
4636 #if 0
4637 static int
4638 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
4639 {
4640 struct mcx_cmdq_entry *cqe;
4641 struct mcx_cmd_alloc_flow_counter_in *in;
4642 struct mcx_cmd_alloc_flow_counter_out *out;
4643 int error;
4644
4645 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4646 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4647
4648 in = mcx_cmdq_in(cqe);
4649 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
4650 in->cmd_op_mod = htobe16(0);
4651
4652 mcx_cmdq_post(sc, cqe, 0);
4653
4654 error = mcx_cmdq_poll(sc, cqe, 1000);
4655 if (error != 0) {
4656 printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
4657 return (-1);
4658 }
4659 if (mcx_cmdq_verify(cqe) != 0) {
4660 printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
4661 return (-1);
4662 }
4663
4664 out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
4665 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4666 printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
4667 out->cmd_status);
4668 return (-1);
4669 }
4670
4671 sc->sc_flow_counter_id[i] = be16toh(out->cmd_flow_counter_id);
4672 printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
4673
4674 return (0);
4675 }
4676 #endif
4677
4678 static int
4679 mcx_create_flow_table(struct mcx_softc *sc, int log_size)
4680 {
4681 struct mcx_cmdq_entry *cqe;
4682 struct mcx_dmamem mxm;
4683 struct mcx_cmd_create_flow_table_in *in;
4684 struct mcx_cmd_create_flow_table_mb_in *mbin;
4685 struct mcx_cmd_create_flow_table_out *out;
4686 int error;
4687 int token;
4688
4689 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4690 token = mcx_cmdq_token(sc);
4691 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4692
4693 in = mcx_cmdq_in(cqe);
4694 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
4695 in->cmd_op_mod = htobe16(0);
4696
4697 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4698 printf("%s: unable to allocate create flow table mailbox\n",
4699 DEVNAME(sc));
4700 return (-1);
4701 }
4702 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4703 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4704 mbin->cmd_ctx.ft_log_size = log_size;
4705
4706 mcx_cmdq_mboxes_sign(&mxm, 1);
4707 mcx_cmdq_post(sc, cqe, 0);
4708 error = mcx_cmdq_poll(sc, cqe, 1000);
4709 if (error != 0) {
4710 printf("%s: create flow table timeout\n", DEVNAME(sc));
4711 goto free;
4712 }
4713 if (mcx_cmdq_verify(cqe) != 0) {
4714 printf("%s: create flow table command corrupt\n", DEVNAME(sc));
4715 goto free;
4716 }
4717
4718 out = mcx_cmdq_out(cqe);
4719 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4720 printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
4721 out->cmd_status, be32toh(out->cmd_syndrome));
4722 error = -1;
4723 goto free;
4724 }
4725
4726 sc->sc_flow_table_id = be32toh(out->cmd_table_id);
4727 free:
4728 mcx_dmamem_free(sc, &mxm);
4729 return (error);
4730 }
4731
4732 static int
4733 mcx_set_flow_table_root(struct mcx_softc *sc)
4734 {
4735 struct mcx_cmdq_entry *cqe;
4736 struct mcx_dmamem mxm;
4737 struct mcx_cmd_set_flow_table_root_in *in;
4738 struct mcx_cmd_set_flow_table_root_mb_in *mbin;
4739 struct mcx_cmd_set_flow_table_root_out *out;
4740 int error;
4741 int token;
4742
4743 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4744 token = mcx_cmdq_token(sc);
4745 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4746
4747 in = mcx_cmdq_in(cqe);
4748 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
4749 in->cmd_op_mod = htobe16(0);
4750
4751 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4752 printf("%s: unable to allocate set flow table root mailbox\n",
4753 DEVNAME(sc));
4754 return (-1);
4755 }
4756 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4757 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4758 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4759
4760 mcx_cmdq_mboxes_sign(&mxm, 1);
4761 mcx_cmdq_post(sc, cqe, 0);
4762 error = mcx_cmdq_poll(sc, cqe, 1000);
4763 if (error != 0) {
4764 printf("%s: set flow table root timeout\n", DEVNAME(sc));
4765 goto free;
4766 }
4767 if (mcx_cmdq_verify(cqe) != 0) {
4768 printf("%s: set flow table root command corrupt\n",
4769 DEVNAME(sc));
4770 goto free;
4771 }
4772
4773 out = mcx_cmdq_out(cqe);
4774 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4775 printf("%s: set flow table root failed (%x, %x)\n",
4776 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
4777 error = -1;
4778 goto free;
4779 }
4780
4781 free:
4782 mcx_dmamem_free(sc, &mxm);
4783 return (error);
4784 }
4785
4786 static int
4787 mcx_destroy_flow_table(struct mcx_softc *sc)
4788 {
4789 struct mcx_cmdq_entry *cqe;
4790 struct mcx_dmamem mxm;
4791 struct mcx_cmd_destroy_flow_table_in *in;
4792 struct mcx_cmd_destroy_flow_table_mb_in *mb;
4793 struct mcx_cmd_destroy_flow_table_out *out;
4794 int error;
4795 int token;
4796
4797 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4798 token = mcx_cmdq_token(sc);
4799 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4800
4801 in = mcx_cmdq_in(cqe);
4802 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
4803 in->cmd_op_mod = htobe16(0);
4804
4805 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4806 printf("%s: unable to allocate destroy flow table mailbox\n",
4807 DEVNAME(sc));
4808 return (-1);
4809 }
4810 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4811 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4812 mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4813
4814 mcx_cmdq_mboxes_sign(&mxm, 1);
4815 mcx_cmdq_post(sc, cqe, 0);
4816 error = mcx_cmdq_poll(sc, cqe, 1000);
4817 if (error != 0) {
4818 printf("%s: destroy flow table timeout\n", DEVNAME(sc));
4819 goto free;
4820 }
4821 if (mcx_cmdq_verify(cqe) != 0) {
4822 printf("%s: destroy flow table command corrupt\n", DEVNAME(sc));
4823 goto free;
4824 }
4825
4826 out = mcx_cmdq_out(cqe);
4827 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4828 printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
4829 out->cmd_status, be32toh(out->cmd_syndrome));
4830 error = -1;
4831 goto free;
4832 }
4833
4834 sc->sc_flow_table_id = -1;
4835 free:
4836 mcx_dmamem_free(sc, &mxm);
4837 return (error);
4838 }
4839
4840
4841 static int
4842 mcx_create_flow_group(struct mcx_softc *sc, int group, int start, int size,
4843 int match_enable, struct mcx_flow_match *match)
4844 {
4845 struct mcx_cmdq_entry *cqe;
4846 struct mcx_dmamem mxm;
4847 struct mcx_cmd_create_flow_group_in *in;
4848 struct mcx_cmd_create_flow_group_mb_in *mbin;
4849 struct mcx_cmd_create_flow_group_out *out;
4850 int error;
4851 int token;
4852
4853 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4854 token = mcx_cmdq_token(sc);
4855 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
4856 token);
4857
4858 in = mcx_cmdq_in(cqe);
4859 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
4860 in->cmd_op_mod = htobe16(0);
4861
4862 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4863 != 0) {
4864 printf("%s: unable to allocate create flow group mailbox\n",
4865 DEVNAME(sc));
4866 return (-1);
4867 }
4868 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4869 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4870 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4871 mbin->cmd_start_flow_index = htobe32(start);
4872 mbin->cmd_end_flow_index = htobe32(start + (size - 1));
4873
4874 mbin->cmd_match_criteria_enable = match_enable;
4875 memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
4876
4877 mcx_cmdq_mboxes_sign(&mxm, 2);
4878 mcx_cmdq_post(sc, cqe, 0);
4879 error = mcx_cmdq_poll(sc, cqe, 1000);
4880 if (error != 0) {
4881 printf("%s: create flow group timeout\n", DEVNAME(sc));
4882 goto free;
4883 }
4884 if (mcx_cmdq_verify(cqe) != 0) {
4885 printf("%s: create flow group command corrupt\n", DEVNAME(sc));
4886 goto free;
4887 }
4888
4889 out = mcx_cmdq_out(cqe);
4890 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4891 printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
4892 out->cmd_status, be32toh(out->cmd_syndrome));
4893 error = -1;
4894 goto free;
4895 }
4896
4897 sc->sc_flow_group_id[group] = be32toh(out->cmd_group_id);
4898 sc->sc_flow_group_size[group] = size;
4899 sc->sc_flow_group_start[group] = start;
4900
4901 free:
4902 mcx_dmamem_free(sc, &mxm);
4903 return (error);
4904 }
4905
4906 static int
4907 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
4908 {
4909 struct mcx_cmdq_entry *cqe;
4910 struct mcx_dmamem mxm;
4911 struct mcx_cmd_destroy_flow_group_in *in;
4912 struct mcx_cmd_destroy_flow_group_mb_in *mb;
4913 struct mcx_cmd_destroy_flow_group_out *out;
4914 int error;
4915 int token;
4916
4917 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4918 token = mcx_cmdq_token(sc);
4919 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4920
4921 in = mcx_cmdq_in(cqe);
4922 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
4923 in->cmd_op_mod = htobe16(0);
4924
4925 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
4926 printf("%s: unable to allocate destroy flow group mailbox\n",
4927 DEVNAME(sc));
4928 return (-1);
4929 }
4930 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4931 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4932 mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4933 mb->cmd_group_id = htobe32(sc->sc_flow_group_id[group]);
4934
4935 mcx_cmdq_mboxes_sign(&mxm, 2);
4936 mcx_cmdq_post(sc, cqe, 0);
4937 error = mcx_cmdq_poll(sc, cqe, 1000);
4938 if (error != 0) {
4939 printf("%s: destroy flow group timeout\n", DEVNAME(sc));
4940 goto free;
4941 }
4942 if (mcx_cmdq_verify(cqe) != 0) {
4943 printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
4944 goto free;
4945 }
4946
4947 out = mcx_cmdq_out(cqe);
4948 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4949 printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
4950 out->cmd_status, be32toh(out->cmd_syndrome));
4951 error = -1;
4952 goto free;
4953 }
4954
4955 sc->sc_flow_group_id[group] = -1;
4956 sc->sc_flow_group_size[group] = 0;
4957 free:
4958 mcx_dmamem_free(sc, &mxm);
4959 return (error);
4960 }
4961
4962 static int
4963 mcx_set_flow_table_entry(struct mcx_softc *sc, int group, int index,
4964 const uint8_t *macaddr)
4965 {
4966 struct mcx_cmdq_entry *cqe;
4967 struct mcx_dmamem mxm;
4968 struct mcx_cmd_set_flow_table_entry_in *in;
4969 struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
4970 struct mcx_cmd_set_flow_table_entry_out *out;
4971 uint32_t *dest;
4972 int error;
4973 int token;
4974
4975 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4976 token = mcx_cmdq_token(sc);
4977 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*dest),
4978 sizeof(*out), token);
4979
4980 in = mcx_cmdq_in(cqe);
4981 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
4982 in->cmd_op_mod = htobe16(0);
4983
4984 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4985 != 0) {
4986 printf("%s: unable to allocate set flow table entry mailbox\n",
4987 DEVNAME(sc));
4988 return (-1);
4989 }
4990 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4991 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4992 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4993 mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
4994 mbin->cmd_flow_ctx.fc_group_id = htobe32(sc->sc_flow_group_id[group]);
4995
4996 /* flow context ends at offset 0x330, 0x130 into the second mbox */
4997 dest = (uint32_t *)
4998 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
4999 mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5000 mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5001 *dest = htobe32(sc->sc_tirn | MCX_FLOW_CONTEXT_DEST_TYPE_TIR);
5002
5003 /* the only thing we match on at the moment is the dest mac address */
5004 if (macaddr != NULL) {
5005 memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
5006 ETHER_ADDR_LEN);
5007 }
5008
5009 mcx_cmdq_mboxes_sign(&mxm, 2);
5010 mcx_cmdq_post(sc, cqe, 0);
5011 error = mcx_cmdq_poll(sc, cqe, 1000);
5012 if (error != 0) {
5013 printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5014 goto free;
5015 }
5016 if (mcx_cmdq_verify(cqe) != 0) {
5017 printf("%s: set flow table entry command corrupt\n",
5018 DEVNAME(sc));
5019 goto free;
5020 }
5021
5022 out = mcx_cmdq_out(cqe);
5023 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5024 printf("%s: set flow table entry failed (%x, %x)\n",
5025 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5026 error = -1;
5027 goto free;
5028 }
5029
5030 free:
5031 mcx_dmamem_free(sc, &mxm);
5032 return (error);
5033 }
5034
5035 static int
5036 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
5037 {
5038 struct mcx_cmdq_entry *cqe;
5039 struct mcx_dmamem mxm;
5040 struct mcx_cmd_delete_flow_table_entry_in *in;
5041 struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
5042 struct mcx_cmd_delete_flow_table_entry_out *out;
5043 int error;
5044 int token;
5045
5046 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5047 token = mcx_cmdq_token(sc);
5048 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5049 token);
5050
5051 in = mcx_cmdq_in(cqe);
5052 in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
5053 in->cmd_op_mod = htobe16(0);
5054
5055 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
5056 printf("%s: unable to allocate delete flow table entry mailbox\n",
5057 DEVNAME(sc));
5058 return (-1);
5059 }
5060 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5061 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5062 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5063 mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
5064
5065 mcx_cmdq_mboxes_sign(&mxm, 2);
5066 mcx_cmdq_post(sc, cqe, 0);
5067 error = mcx_cmdq_poll(sc, cqe, 1000);
5068 if (error != 0) {
5069 printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
5070 goto free;
5071 }
5072 if (mcx_cmdq_verify(cqe) != 0) {
5073 printf("%s: delete flow table entry command corrupt\n",
5074 DEVNAME(sc));
5075 goto free;
5076 }
5077
5078 out = mcx_cmdq_out(cqe);
5079 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5080 printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
5081 DEVNAME(sc), group, index, out->cmd_status,
5082 be32toh(out->cmd_syndrome));
5083 error = -1;
5084 goto free;
5085 }
5086
5087 free:
5088 mcx_dmamem_free(sc, &mxm);
5089 return (error);
5090 }
5091
5092 #if 0
5093 int
5094 mcx_dump_flow_table(struct mcx_softc *sc)
5095 {
5096 struct mcx_dmamem mxm;
5097 struct mcx_cmdq_entry *cqe;
5098 struct mcx_cmd_query_flow_table_in *in;
5099 struct mcx_cmd_query_flow_table_mb_in *mbin;
5100 struct mcx_cmd_query_flow_table_out *out;
5101 struct mcx_cmd_query_flow_table_mb_out *mbout;
5102 uint8_t token = mcx_cmdq_token(sc);
5103 int error;
5104 int i;
5105 uint8_t *dump;
5106
5107 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5108 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5109 sizeof(*out) + sizeof(*mbout) + 16, token);
5110
5111 in = mcx_cmdq_in(cqe);
5112 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
5113 in->cmd_op_mod = htobe16(0);
5114
5115 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5116 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
5117 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5118 &cqe->cq_output_ptr, token) != 0) {
5119 printf(", unable to allocate query flow table mailboxes\n");
5120 return (-1);
5121 }
5122 cqe->cq_input_ptr = cqe->cq_output_ptr;
5123
5124 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5125 mbin->cmd_table_type = 0;
5126 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5127
5128 mcx_cmdq_mboxes_sign(&mxm, 1);
5129
5130 mcx_cmdq_post(sc, cqe, 0);
5131 error = mcx_cmdq_poll(sc, cqe, 1000);
5132 if (error != 0) {
5133 printf("%s: query flow table timeout\n", DEVNAME(sc));
5134 goto free;
5135 }
5136 error = mcx_cmdq_verify(cqe);
5137 if (error != 0) {
5138 printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
5139 goto free;
5140 }
5141
5142 out = mcx_cmdq_out(cqe);
5143 switch (out->cmd_status) {
5144 case MCX_CQ_STATUS_OK:
5145 break;
5146 default:
5147 printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
5148 out->cmd_status, be32toh(out->cmd_syndrome));
5149 error = -1;
5150 goto free;
5151 }
5152
5153 mbout = (struct mcx_cmd_query_flow_table_mb_out *)
5154 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5155 dump = (uint8_t *)mbout + 8;
5156 for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
5157 printf("%.2x ", dump[i]);
5158 if (i % 16 == 15)
5159 printf("\n");
5160 }
5161 free:
5162 mcx_cq_mboxes_free(sc, &mxm);
5163 return (error);
5164 }
5165 int
5166 mcx_dump_flow_table_entry(struct mcx_softc *sc, int index)
5167 {
5168 struct mcx_dmamem mxm;
5169 struct mcx_cmdq_entry *cqe;
5170 struct mcx_cmd_query_flow_table_entry_in *in;
5171 struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
5172 struct mcx_cmd_query_flow_table_entry_out *out;
5173 struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
5174 uint8_t token = mcx_cmdq_token(sc);
5175 int error;
5176 int i;
5177 uint8_t *dump;
5178
5179 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5180 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5181 sizeof(*out) + sizeof(*mbout) + 16, token);
5182
5183 in = mcx_cmdq_in(cqe);
5184 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
5185 in->cmd_op_mod = htobe16(0);
5186
5187 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5188 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5189 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5190 &cqe->cq_output_ptr, token) != 0) {
5191 printf(", unable to allocate query flow table entry mailboxes\n");
5192 return (-1);
5193 }
5194 cqe->cq_input_ptr = cqe->cq_output_ptr;
5195
5196 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5197 mbin->cmd_table_type = 0;
5198 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5199 mbin->cmd_flow_index = htobe32(index);
5200
5201 mcx_cmdq_mboxes_sign(&mxm, 1);
5202
5203 mcx_cmdq_post(sc, cqe, 0);
5204 error = mcx_cmdq_poll(sc, cqe, 1000);
5205 if (error != 0) {
5206 printf("%s: query flow table entry timeout\n", DEVNAME(sc));
5207 goto free;
5208 }
5209 error = mcx_cmdq_verify(cqe);
5210 if (error != 0) {
5211 printf("%s: query flow table entry reply corrupt\n",
5212 DEVNAME(sc));
5213 goto free;
5214 }
5215
5216 out = mcx_cmdq_out(cqe);
5217 switch (out->cmd_status) {
5218 case MCX_CQ_STATUS_OK:
5219 break;
5220 default:
5221 printf("%s: query flow table entry failed (%x/%x)\n",
5222 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5223 error = -1;
5224 goto free;
5225 }
5226
5227 mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
5228 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5229 dump = (uint8_t *)mbout;
5230 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5231 printf("%.2x ", dump[i]);
5232 if (i % 16 == 15)
5233 printf("\n");
5234 }
5235
5236 free:
5237 mcx_cq_mboxes_free(sc, &mxm);
5238 return (error);
5239 }
5240
5241 int
5242 mcx_dump_flow_group(struct mcx_softc *sc)
5243 {
5244 struct mcx_dmamem mxm;
5245 struct mcx_cmdq_entry *cqe;
5246 struct mcx_cmd_query_flow_group_in *in;
5247 struct mcx_cmd_query_flow_group_mb_in *mbin;
5248 struct mcx_cmd_query_flow_group_out *out;
5249 struct mcx_cmd_query_flow_group_mb_out *mbout;
5250 uint8_t token = mcx_cmdq_token(sc);
5251 int error;
5252 int i;
5253 uint8_t *dump;
5254
5255 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5256 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5257 sizeof(*out) + sizeof(*mbout) + 16, token);
5258
5259 in = mcx_cmdq_in(cqe);
5260 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
5261 in->cmd_op_mod = htobe16(0);
5262
5263 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5264 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5265 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5266 &cqe->cq_output_ptr, token) != 0) {
5267 printf(", unable to allocate query flow group mailboxes\n");
5268 return (-1);
5269 }
5270 cqe->cq_input_ptr = cqe->cq_output_ptr;
5271
5272 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5273 mbin->cmd_table_type = 0;
5274 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5275 mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
5276
5277 mcx_cmdq_mboxes_sign(&mxm, 1);
5278
5279 mcx_cmdq_post(sc, cqe, 0);
5280 error = mcx_cmdq_poll(sc, cqe, 1000);
5281 if (error != 0) {
5282 printf("%s: query flow group timeout\n", DEVNAME(sc));
5283 goto free;
5284 }
5285 error = mcx_cmdq_verify(cqe);
5286 if (error != 0) {
5287 printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
5288 goto free;
5289 }
5290
5291 out = mcx_cmdq_out(cqe);
5292 switch (out->cmd_status) {
5293 case MCX_CQ_STATUS_OK:
5294 break;
5295 default:
5296 printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
5297 out->cmd_status, be32toh(out->cmd_syndrome));
5298 error = -1;
5299 goto free;
5300 }
5301
5302 mbout = (struct mcx_cmd_query_flow_group_mb_out *)
5303 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5304 dump = (uint8_t *)mbout;
5305 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5306 printf("%.2x ", dump[i]);
5307 if (i % 16 == 15)
5308 printf("\n");
5309 }
5310 dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
5311 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5312 printf("%.2x ", dump[i]);
5313 if (i % 16 == 15)
5314 printf("\n");
5315 }
5316
5317 free:
5318 mcx_cq_mboxes_free(sc, &mxm);
5319 return (error);
5320 }
5321
5322 int
5323 mcx_dump_rq(struct mcx_softc *sc)
5324 {
5325 struct mcx_dmamem mxm;
5326 struct mcx_cmdq_entry *cqe;
5327 struct mcx_cmd_query_rq_in *in;
5328 struct mcx_cmd_query_rq_out *out;
5329 struct mcx_cmd_query_rq_mb_out *mbout;
5330 uint8_t token = mcx_cmdq_token(sc);
5331 int error;
5332
5333 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5334 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5335 token);
5336
5337 in = mcx_cmdq_in(cqe);
5338 in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
5339 in->cmd_op_mod = htobe16(0);
5340 in->cmd_rqn = htobe32(sc->sc_rqn);
5341
5342 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5343 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5344 &cqe->cq_output_ptr, token) != 0) {
5345 printf(", unable to allocate query flow group mailboxes\n");
5346 return (-1);
5347 }
5348
5349 mcx_cmdq_mboxes_sign(&mxm, 1);
5350
5351 mcx_cmdq_post(sc, cqe, 0);
5352 error = mcx_cmdq_poll(sc, cqe, 1000);
5353 if (error != 0) {
5354 printf("%s: query rq timeout\n", DEVNAME(sc));
5355 goto free;
5356 }
5357 error = mcx_cmdq_verify(cqe);
5358 if (error != 0) {
5359 printf("%s: query rq reply corrupt\n", DEVNAME(sc));
5360 goto free;
5361 }
5362
5363 out = mcx_cmdq_out(cqe);
5364 switch (out->cmd_status) {
5365 case MCX_CQ_STATUS_OK:
5366 break;
5367 default:
5368 printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
5369 out->cmd_status, be32toh(out->cmd_syndrome));
5370 error = -1;
5371 goto free;
5372 }
5373
5374 mbout = (struct mcx_cmd_query_rq_mb_out *)
5375 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5376 printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5377 DEVNAME(sc),
5378 (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5379 be32toh(mbout->cmd_ctx.rq_user_index),
5380 be32toh(mbout->cmd_ctx.rq_cqn),
5381 be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5382 mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5383 mbout->cmd_ctx.rq_wq.wq_log_size,
5384 be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5385 be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5386
5387 free:
5388 mcx_cq_mboxes_free(sc, &mxm);
5389 return (error);
5390 }
5391
5392 int
5393 mcx_dump_sq(struct mcx_softc *sc)
5394 {
5395 struct mcx_dmamem mxm;
5396 struct mcx_cmdq_entry *cqe;
5397 struct mcx_cmd_query_sq_in *in;
5398 struct mcx_cmd_query_sq_out *out;
5399 struct mcx_cmd_query_sq_mb_out *mbout;
5400 uint8_t token = mcx_cmdq_token(sc);
5401 int error;
5402 int i;
5403 uint8_t *dump;
5404
5405 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5406 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5407 token);
5408
5409 in = mcx_cmdq_in(cqe);
5410 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
5411 in->cmd_op_mod = htobe16(0);
5412 in->cmd_sqn = htobe32(sc->sc_sqn);
5413
5414 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5415 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5416 &cqe->cq_output_ptr, token) != 0) {
5417 printf(", unable to allocate query sq mailboxes\n");
5418 return (-1);
5419 }
5420
5421 mcx_cmdq_mboxes_sign(&mxm, 1);
5422
5423 mcx_cmdq_post(sc, cqe, 0);
5424 error = mcx_cmdq_poll(sc, cqe, 1000);
5425 if (error != 0) {
5426 printf("%s: query sq timeout\n", DEVNAME(sc));
5427 goto free;
5428 }
5429 error = mcx_cmdq_verify(cqe);
5430 if (error != 0) {
5431 printf("%s: query sq reply corrupt\n", DEVNAME(sc));
5432 goto free;
5433 }
5434
5435 out = mcx_cmdq_out(cqe);
5436 switch (out->cmd_status) {
5437 case MCX_CQ_STATUS_OK:
5438 break;
5439 default:
5440 printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
5441 out->cmd_status, be32toh(out->cmd_syndrome));
5442 error = -1;
5443 goto free;
5444 }
5445
5446 mbout = (struct mcx_cmd_query_sq_mb_out *)
5447 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5448 /*
5449 printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5450 DEVNAME(sc),
5451 (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5452 be32toh(mbout->cmd_ctx.rq_user_index),
5453 be32toh(mbout->cmd_ctx.rq_cqn),
5454 be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5455 mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5456 mbout->cmd_ctx.rq_wq.wq_log_size,
5457 be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5458 be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5459 */
5460 dump = (uint8_t *)mbout;
5461 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5462 printf("%.2x ", dump[i]);
5463 if (i % 16 == 15)
5464 printf("\n");
5465 }
5466
5467 free:
5468 mcx_cq_mboxes_free(sc, &mxm);
5469 return (error);
5470 }
5471
5472 static int
5473 mcx_dump_counters(struct mcx_softc *sc)
5474 {
5475 struct mcx_dmamem mxm;
5476 struct mcx_cmdq_entry *cqe;
5477 struct mcx_cmd_query_vport_counters_in *in;
5478 struct mcx_cmd_query_vport_counters_mb_in *mbin;
5479 struct mcx_cmd_query_vport_counters_out *out;
5480 struct mcx_nic_vport_counters *counters;
5481 int error, token;
5482
5483 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5484 token = mcx_cmdq_token(sc);
5485 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5486 sizeof(*out) + sizeof(*counters), token);
5487
5488 in = mcx_cmdq_in(cqe);
5489 in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
5490 in->cmd_op_mod = htobe16(0);
5491
5492 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5493 printf(", unable to allocate query nic vport counters mailboxen\n");
5494 return (-1);
5495 }
5496 cqe->cq_input_ptr = cqe->cq_output_ptr;
5497
5498 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5499 mbin->cmd_clear = 0x80;
5500
5501 mcx_cmdq_mboxes_sign(&mxm, 1);
5502 mcx_cmdq_post(sc, cqe, 0);
5503
5504 error = mcx_cmdq_poll(sc, cqe, 1000);
5505 if (error != 0) {
5506 printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
5507 goto free;
5508 }
5509 if (mcx_cmdq_verify(cqe) != 0) {
5510 printf("%s: query nic vport counters command corrupt\n",
5511 DEVNAME(sc));
5512 goto free;
5513 }
5514
5515 out = mcx_cmdq_out(cqe);
5516 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5517 printf("%s: query nic vport counters failed (%x, %x)\n",
5518 DEVNAME(sc), out->cmd_status, out->cmd_syndrome);
5519 error = -1;
5520 goto free;
5521 }
5522
5523 counters = (struct mcx_nic_vport_counters *)
5524 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5525 if (counters->rx_bcast.packets + counters->tx_bcast.packets +
5526 counters->rx_ucast.packets + counters->tx_ucast.packets +
5527 counters->rx_err.packets + counters->tx_err.packets)
5528 printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
5529 DEVNAME(sc),
5530 be64toh(counters->tx_err.packets),
5531 be64toh(counters->rx_err.packets),
5532 be64toh(counters->tx_ucast.packets),
5533 be64toh(counters->rx_ucast.packets),
5534 be64toh(counters->tx_bcast.packets),
5535 be64toh(counters->rx_bcast.packets));
5536 free:
5537 mcx_dmamem_free(sc, &mxm);
5538
5539 return (error);
5540 }
5541
5542 static int
5543 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
5544 {
5545 struct mcx_dmamem mxm;
5546 struct mcx_cmdq_entry *cqe;
5547 struct mcx_cmd_query_flow_counter_in *in;
5548 struct mcx_cmd_query_flow_counter_mb_in *mbin;
5549 struct mcx_cmd_query_flow_counter_out *out;
5550 struct mcx_counter *counters;
5551 int error, token;
5552
5553 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5554 token = mcx_cmdq_token(sc);
5555 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
5556 sizeof(*counters), token);
5557
5558 in = mcx_cmdq_in(cqe);
5559 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
5560 in->cmd_op_mod = htobe16(0);
5561
5562 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5563 printf(", unable to allocate query flow counter mailboxen\n");
5564 return (-1);
5565 }
5566 cqe->cq_input_ptr = cqe->cq_output_ptr;
5567 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5568 mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
5569 mbin->cmd_clear = 0x80;
5570
5571 mcx_cmdq_mboxes_sign(&mxm, 1);
5572 mcx_cmdq_post(sc, cqe, 0);
5573
5574 error = mcx_cmdq_poll(sc, cqe, 1000);
5575 if (error != 0) {
5576 printf("%s: query flow counter timeout\n", DEVNAME(sc));
5577 goto free;
5578 }
5579 if (mcx_cmdq_verify(cqe) != 0) {
5580 printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
5581 goto free;
5582 }
5583
5584 out = mcx_cmdq_out(cqe);
5585 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5586 printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
5587 out->cmd_status, out->cmd_syndrome);
5588 error = -1;
5589 goto free;
5590 }
5591
5592 counters = (struct mcx_counter *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5593 if (counters->packets)
5594 printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
5595 be64toh(counters->packets));
5596 free:
5597 mcx_dmamem_free(sc, &mxm);
5598
5599 return (error);
5600 }
5601
5602 #endif
5603
5604 static int
5605 mcx_rx_fill_slots(struct mcx_softc *sc, void *ring, struct mcx_slot *slots,
5606 uint *prod, int bufsize, uint nslots)
5607 {
5608 struct mcx_rq_entry *rqe;
5609 struct mcx_slot *ms;
5610 struct mbuf *m;
5611 uint slot, p, fills;
5612
5613 p = *prod;
5614 slot = (p % (1 << MCX_LOG_RQ_SIZE));
5615 rqe = ring;
5616 for (fills = 0; fills < nslots; fills++) {
5617 ms = &slots[slot];
5618 #if 0
5619 m = MCLGETI(NULL, M_DONTWAIT, NULL, bufsize + ETHER_ALIGN);
5620 if (m == NULL)
5621 break;
5622 #else
5623 m = NULL;
5624 MGETHDR(m, M_DONTWAIT, MT_DATA);
5625 if (m == NULL)
5626 break;
5627
5628 MCLGET(m, M_DONTWAIT);
5629 if ((m->m_flags & M_EXT) == 0) {
5630 m_freem(m);
5631 break;
5632 }
5633 #endif
5634
5635 m->m_data += ETHER_ALIGN;
5636 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size - ETHER_ALIGN;
5637 if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
5638 BUS_DMA_NOWAIT) != 0) {
5639 m_freem(m);
5640 break;
5641 }
5642 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5643 ms->ms_m = m;
5644
5645 rqe[slot].rqe_byte_count = htobe32(m->m_len);
5646 rqe[slot].rqe_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
5647 rqe[slot].rqe_lkey = htobe32(sc->sc_lkey);
5648
5649 p++;
5650 slot++;
5651 if (slot == (1 << MCX_LOG_RQ_SIZE))
5652 slot = 0;
5653 }
5654
5655 if (fills != 0) {
5656 *sc->sc_rx_doorbell = htobe32(p & MCX_WQ_DOORBELL_MASK);
5657 /* barrier? */
5658 }
5659
5660 *prod = p;
5661
5662 return (nslots - fills);
5663 }
5664
5665 static int
5666 mcx_rx_fill(struct mcx_softc *sc)
5667 {
5668 u_int slots;
5669
5670 slots = mcx_rxr_get(&sc->sc_rxr, (1 << MCX_LOG_RQ_SIZE));
5671 if (slots == 0)
5672 return (1);
5673
5674 slots = mcx_rx_fill_slots(sc, MCX_DMA_KVA(&sc->sc_rq_mem),
5675 sc->sc_rx_slots, &sc->sc_rx_prod, sc->sc_hardmtu, slots);
5676 mcx_rxr_put(&sc->sc_rxr, slots);
5677 return (0);
5678 }
5679
5680 void
5681 mcx_refill(void *xsc)
5682 {
5683 struct mcx_softc *sc = xsc;
5684
5685 mcx_rx_fill(sc);
5686
5687 if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5688 callout_schedule(&sc->sc_rx_refill, 1);
5689 }
5690
5691 void
5692 mcx_process_txeof(struct mcx_softc *sc, struct mcx_cq_entry *cqe, int *txfree)
5693 {
5694 struct mcx_slot *ms;
5695 bus_dmamap_t map;
5696 int slot, slots;
5697
5698 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
5699
5700 ms = &sc->sc_tx_slots[slot];
5701 map = ms->ms_map;
5702 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
5703 BUS_DMASYNC_POSTWRITE);
5704
5705 slots = 1;
5706 if (map->dm_nsegs > 1)
5707 slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
5708
5709 (*txfree) += slots;
5710 bus_dmamap_unload(sc->sc_dmat, map);
5711 m_freem(ms->ms_m);
5712 ms->ms_m = NULL;
5713 }
5714
5715 static uint64_t
5716 mcx_uptime(void)
5717 {
5718 struct timespec ts;
5719
5720 nanouptime(&ts);
5721
5722 return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
5723 }
5724
5725 static void
5726 mcx_calibrate_first(struct mcx_softc *sc)
5727 {
5728 struct mcx_calibration *c = &sc->sc_calibration[0];
5729
5730 sc->sc_calibration_gen = 0;
5731
5732 c->c_ubase = mcx_uptime();
5733 c->c_tbase = mcx_timer(sc);
5734 c->c_tdiff = 0;
5735
5736 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz);
5737 }
5738
5739 #define MCX_TIMESTAMP_SHIFT 10
5740
5741 static void
5742 mcx_calibrate(void *arg)
5743 {
5744 struct mcx_softc *sc = arg;
5745 struct mcx_calibration *nc, *pc;
5746 unsigned int gen;
5747
5748 if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING))
5749 return;
5750
5751 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz);
5752
5753 gen = sc->sc_calibration_gen;
5754 pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5755 gen++;
5756 nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5757
5758 nc->c_uptime = pc->c_ubase;
5759 nc->c_timestamp = pc->c_tbase;
5760
5761 nc->c_ubase = mcx_uptime();
5762 nc->c_tbase = mcx_timer(sc);
5763
5764 nc->c_udiff = (nc->c_ubase - nc->c_uptime) >> MCX_TIMESTAMP_SHIFT;
5765 nc->c_tdiff = (nc->c_tbase - nc->c_timestamp) >> MCX_TIMESTAMP_SHIFT;
5766
5767 membar_producer();
5768 sc->sc_calibration_gen = gen;
5769 }
5770
5771 static int
5772 mcx_process_rx(struct mcx_softc *sc, struct mcx_cq_entry *cqe,
5773 struct mcx_mbufq *mq, const struct mcx_calibration *c)
5774 {
5775 struct mcx_slot *ms;
5776 struct mbuf *m;
5777 int slot;
5778
5779 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
5780
5781 ms = &sc->sc_rx_slots[slot];
5782 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize,
5783 BUS_DMASYNC_POSTREAD);
5784 bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
5785
5786 m = ms->ms_m;
5787 ms->ms_m = NULL;
5788
5789 m_set_rcvif(m, &sc->sc_ec.ec_if);
5790 m->m_pkthdr.len = m->m_len = be32dec(&cqe->cq_byte_cnt);
5791
5792 #if 0
5793 if (cqe->cq_rx_hash_type) {
5794 m->m_pkthdr.ph_flowid = M_FLOWID_VALID |
5795 be32toh(cqe->cq_rx_hash);
5796 }
5797 #endif
5798
5799 #if 0
5800 if (c->c_tdiff) {
5801 uint64_t t = be64dec(&cqe->cq_timestamp) - c->c_timestamp;
5802 t *= c->c_udiff;
5803 t /= c->c_tdiff;
5804
5805 m->m_pkthdr.ph_timestamp = c->c_uptime + t;
5806 SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
5807 }
5808 #endif
5809
5810 MBUFQ_ENQUEUE(mq, m);
5811
5812 return (1);
5813 }
5814
5815 static struct mcx_cq_entry *
5816 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
5817 {
5818 struct mcx_cq_entry *cqe;
5819 int next;
5820
5821 cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
5822 next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
5823
5824 if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
5825 ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
5826 return (&cqe[next]);
5827 }
5828
5829 return (NULL);
5830 }
5831
5832 static void
5833 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5834 {
5835 bus_size_t offset;
5836 uint32_t val;
5837 uint64_t uval;
5838
5839 /* different uar per cq? */
5840 offset = (MCX_PAGE_SIZE * sc->sc_uar);
5841 val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
5842 val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5843
5844 cq->cq_doorbell[0] = htobe32(cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5845 cq->cq_doorbell[1] = htobe32(val);
5846
5847 uval = val;
5848 uval <<= 32;
5849 uval |= cq->cq_n;
5850 bus_space_write_8(sc->sc_memt, sc->sc_memh,
5851 offset + MCX_UAR_CQ_DOORBELL, htobe64(uval));
5852 mcx_bar(sc, offset + MCX_UAR_CQ_DOORBELL, sizeof(uint64_t),
5853 BUS_SPACE_BARRIER_WRITE);
5854 }
5855
5856 void
5857 mcx_process_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5858 {
5859 struct ifnet *ifp = &sc->sc_ec.ec_if;
5860 const struct mcx_calibration *c;
5861 unsigned int gen;
5862 struct mcx_cq_entry *cqe;
5863 struct mcx_mbufq mq;
5864 struct mbuf *m;
5865 int rxfree, txfree;
5866
5867 MBUFQ_INIT(&mq);
5868
5869 gen = sc->sc_calibration_gen;
5870 membar_consumer();
5871 c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5872
5873 rxfree = 0;
5874 txfree = 0;
5875 while ((cqe = mcx_next_cq_entry(sc, cq))) {
5876 uint8_t opcode;
5877 opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
5878 switch (opcode) {
5879 case MCX_CQ_ENTRY_OPCODE_REQ:
5880 mcx_process_txeof(sc, cqe, &txfree);
5881 break;
5882 case MCX_CQ_ENTRY_OPCODE_SEND:
5883 rxfree += mcx_process_rx(sc, cqe, &mq, c);
5884 break;
5885 case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
5886 case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
5887 /* uint8_t *cqp = (uint8_t *)cqe; */
5888 /* printf("%s: cq completion error: %x\n", DEVNAME(sc), cqp[0x37]); */
5889 break;
5890
5891 default:
5892 /* printf("%s: cq completion opcode %x??\n", DEVNAME(sc), opcode); */
5893 break;
5894 }
5895
5896 cq->cq_cons++;
5897 }
5898
5899 cq->cq_count++;
5900 mcx_arm_cq(sc, cq);
5901
5902 if (rxfree > 0) {
5903 mcx_rxr_put(&sc->sc_rxr, rxfree);
5904 while (MBUFQ_FIRST(&mq) != NULL) {
5905 MBUFQ_DEQUEUE(&mq, m);
5906 if_percpuq_enqueue(ifp->if_percpuq, m);
5907 }
5908
5909 mcx_rx_fill(sc);
5910
5911 if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5912 callout_schedule(&sc->sc_rx_refill, 1);
5913 }
5914 if (txfree > 0) {
5915 sc->sc_tx_cons += txfree;
5916 if_schedule_deferred_start(ifp);
5917 }
5918 }
5919
5920 static void
5921 mcx_arm_eq(struct mcx_softc *sc)
5922 {
5923 bus_size_t offset;
5924 uint32_t val;
5925
5926 offset = (MCX_PAGE_SIZE * sc->sc_uar) + MCX_UAR_EQ_DOORBELL_ARM;
5927 val = (sc->sc_eqn << 24) | (sc->sc_eq_cons & 0xffffff);
5928
5929 mcx_wr(sc, offset, val);
5930 /* barrier? */
5931 }
5932
5933 static struct mcx_eq_entry *
5934 mcx_next_eq_entry(struct mcx_softc *sc)
5935 {
5936 struct mcx_eq_entry *eqe;
5937 int next;
5938
5939 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
5940 next = sc->sc_eq_cons % (1 << MCX_LOG_EQ_SIZE);
5941 if ((eqe[next].eq_owner & 1) == ((sc->sc_eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
5942 sc->sc_eq_cons++;
5943 return (&eqe[next]);
5944 }
5945 return (NULL);
5946 }
5947
5948 int
5949 mcx_intr(void *xsc)
5950 {
5951 struct mcx_softc *sc = (struct mcx_softc *)xsc;
5952 struct mcx_eq_entry *eqe;
5953 int i, cq;
5954
5955 while ((eqe = mcx_next_eq_entry(sc))) {
5956 switch (eqe->eq_event_type) {
5957 case MCX_EVENT_TYPE_COMPLETION:
5958 cq = be32toh(eqe->eq_event_data[6]);
5959 for (i = 0; i < sc->sc_num_cq; i++) {
5960 if (sc->sc_cq[i].cq_n == cq) {
5961 mcx_process_cq(sc, &sc->sc_cq[i]);
5962 break;
5963 }
5964 }
5965 break;
5966
5967 case MCX_EVENT_TYPE_LAST_WQE:
5968 /* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
5969 break;
5970
5971 case MCX_EVENT_TYPE_CQ_ERROR:
5972 /* printf("%s: cq error\n", DEVNAME(sc)); */
5973 break;
5974
5975 case MCX_EVENT_TYPE_CMD_COMPLETION:
5976 /* wakeup probably */
5977 break;
5978
5979 case MCX_EVENT_TYPE_PORT_CHANGE:
5980 workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL);
5981 break;
5982
5983 default:
5984 /* printf("%s: something happened\n", DEVNAME(sc)); */
5985 break;
5986 }
5987 }
5988 mcx_arm_eq(sc);
5989 return (1);
5990 }
5991
5992 static void
5993 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
5994 int total)
5995 {
5996 struct mcx_slot *ms;
5997
5998 int i = allocated;
5999 while (i-- > 0) {
6000 ms = &slots[i];
6001 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
6002 if (ms->ms_m != NULL)
6003 m_freem(ms->ms_m);
6004 }
6005 kmem_free(slots, total * sizeof(*ms));
6006 }
6007
6008 static int
6009 mcx_init(struct ifnet *ifp)
6010 {
6011 struct mcx_softc *sc = ifp->if_softc;
6012 struct mcx_slot *ms;
6013 int i, start;
6014 struct mcx_flow_match match_crit;
6015
6016 if (ISSET(ifp->if_flags, IFF_RUNNING))
6017 mcx_stop(ifp, 0);
6018
6019 sc->sc_rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE),
6020 KM_SLEEP);
6021
6022 for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
6023 ms = &sc->sc_rx_slots[i];
6024 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
6025 sc->sc_hardmtu, 0,
6026 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6027 &ms->ms_map) != 0) {
6028 printf("%s: failed to allocate rx dma maps\n",
6029 DEVNAME(sc));
6030 goto destroy_rx_slots;
6031 }
6032 }
6033
6034 sc->sc_tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE),
6035 KM_SLEEP);
6036
6037 for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
6038 ms = &sc->sc_tx_slots[i];
6039 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
6040 MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
6041 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6042 &ms->ms_map) != 0) {
6043 printf("%s: failed to allocate tx dma maps\n",
6044 DEVNAME(sc));
6045 goto destroy_tx_slots;
6046 }
6047 }
6048
6049 if (mcx_create_cq(sc, sc->sc_eqn) != 0)
6050 goto down;
6051
6052 /* send queue */
6053 if (mcx_create_tis(sc) != 0)
6054 goto down;
6055
6056 if (mcx_create_sq(sc, sc->sc_cq[0].cq_n) != 0)
6057 goto down;
6058
6059 /* receive queue */
6060 if (mcx_create_rq(sc, sc->sc_cq[0].cq_n) != 0)
6061 goto down;
6062
6063 if (mcx_create_tir(sc) != 0)
6064 goto down;
6065
6066 if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE) != 0)
6067 goto down;
6068
6069 /* promisc flow group */
6070 start = 0;
6071 memset(&match_crit, 0, sizeof(match_crit));
6072 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_PROMISC, start, 1,
6073 0, &match_crit) != 0)
6074 goto down;
6075 sc->sc_promisc_flow_enabled = 0;
6076 start++;
6077
6078 /* all multicast flow group */
6079 match_crit.mc_dest_mac[0] = 0x01;
6080 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_ALLMULTI, start, 1,
6081 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6082 goto down;
6083 sc->sc_allmulti_flow_enabled = 0;
6084 start++;
6085
6086 /* mac address matching flow group */
6087 memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
6088 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_MAC, start,
6089 (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
6090 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6091 goto down;
6092
6093 /* flow table entries for unicast and broadcast */
6094 start = 0;
6095 if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6096 LLADDR(satosdl(ifp->if_dl->ifa_addr))) != 0)
6097 goto down;
6098 start++;
6099
6100 if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6101 etherbroadcastaddr) != 0)
6102 goto down;
6103 start++;
6104
6105 /* multicast entries go after that */
6106 sc->sc_mcast_flow_base = start;
6107
6108 /* re-add any existing multicast flows */
6109 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6110 if (sc->sc_mcast_flows[i][0] != 0) {
6111 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6112 sc->sc_mcast_flow_base + i,
6113 sc->sc_mcast_flows[i]);
6114 }
6115 }
6116
6117 if (mcx_set_flow_table_root(sc) != 0)
6118 goto down;
6119
6120 /* start the queues */
6121 if (mcx_ready_sq(sc) != 0)
6122 goto down;
6123
6124 if (mcx_ready_rq(sc) != 0)
6125 goto down;
6126
6127 mcx_rxr_init(&sc->sc_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
6128 sc->sc_rx_prod = 0;
6129 mcx_rx_fill(sc);
6130
6131 mcx_calibrate_first(sc);
6132
6133 SET(ifp->if_flags, IFF_RUNNING);
6134
6135 sc->sc_tx_cons = 0;
6136 sc->sc_tx_prod = 0;
6137 CLR(ifp->if_flags, IFF_OACTIVE);
6138 if_schedule_deferred_start(ifp);
6139
6140 return 0;
6141 destroy_tx_slots:
6142 mcx_free_slots(sc, sc->sc_tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
6143 sc->sc_rx_slots = NULL;
6144
6145 i = (1 << MCX_LOG_RQ_SIZE);
6146 destroy_rx_slots:
6147 mcx_free_slots(sc, sc->sc_rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
6148 sc->sc_rx_slots = NULL;
6149 down:
6150 mcx_stop(ifp, 0);
6151 return EIO;
6152 }
6153
6154 static void
6155 mcx_stop(struct ifnet *ifp, int disable)
6156 {
6157 struct mcx_softc *sc = ifp->if_softc;
6158 int group, i;
6159
6160 CLR(ifp->if_flags, IFF_RUNNING);
6161
6162 /*
6163 * delete flow table entries first, so no packets can arrive
6164 * after the barriers
6165 */
6166 if (sc->sc_promisc_flow_enabled)
6167 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
6168 if (sc->sc_allmulti_flow_enabled)
6169 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
6170 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
6171 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
6172 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6173 if (sc->sc_mcast_flows[i][0] != 0) {
6174 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6175 sc->sc_mcast_flow_base + i);
6176 }
6177 }
6178
6179 callout_halt(&sc->sc_calibrate, NULL);
6180
6181 for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
6182 if (sc->sc_flow_group_id[group] != -1)
6183 mcx_destroy_flow_group(sc,
6184 sc->sc_flow_group_id[group]);
6185 }
6186
6187 if (sc->sc_flow_table_id != -1)
6188 mcx_destroy_flow_table(sc);
6189
6190 if (sc->sc_tirn != 0)
6191 mcx_destroy_tir(sc);
6192 if (sc->sc_rqn != 0)
6193 mcx_destroy_rq(sc);
6194
6195 if (sc->sc_sqn != 0)
6196 mcx_destroy_sq(sc);
6197 if (sc->sc_tisn != 0)
6198 mcx_destroy_tis(sc);
6199
6200 for (i = 0; i < sc->sc_num_cq; i++)
6201 mcx_destroy_cq(sc, i);
6202 sc->sc_num_cq = 0;
6203
6204 if (sc->sc_tx_slots != NULL) {
6205 mcx_free_slots(sc, sc->sc_tx_slots, (1 << MCX_LOG_SQ_SIZE),
6206 (1 << MCX_LOG_SQ_SIZE));
6207 sc->sc_tx_slots = NULL;
6208 }
6209 if (sc->sc_rx_slots != NULL) {
6210 mcx_free_slots(sc, sc->sc_rx_slots, (1 << MCX_LOG_RQ_SIZE),
6211 (1 << MCX_LOG_RQ_SIZE));
6212 sc->sc_rx_slots = NULL;
6213 }
6214 }
6215
6216 static int
6217 mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6218 {
6219 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6220 struct ifreq *ifr = (struct ifreq *)data;
6221 struct ethercom *ec = &sc->sc_ec;
6222 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
6223 struct ether_multi *enm;
6224 struct ether_multistep step;
6225 int s, i, flags, error = 0;
6226
6227 s = splnet();
6228 switch (cmd) {
6229
6230 case SIOCADDMULTI:
6231 if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6232 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6233 if (error != 0) {
6234 splx(s);
6235 return (error);
6236 }
6237
6238 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6239 if (sc->sc_mcast_flows[i][0] == 0) {
6240 memcpy(sc->sc_mcast_flows[i], addrlo,
6241 ETHER_ADDR_LEN);
6242 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6243 mcx_set_flow_table_entry(sc,
6244 MCX_FLOW_GROUP_MAC,
6245 sc->sc_mcast_flow_base + i,
6246 sc->sc_mcast_flows[i]);
6247 }
6248 break;
6249 }
6250 }
6251
6252 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
6253 if (i == MCX_NUM_MCAST_FLOWS) {
6254 SET(ifp->if_flags, IFF_ALLMULTI);
6255 sc->sc_extra_mcast++;
6256 error = ENETRESET;
6257 }
6258
6259 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN)) {
6260 SET(ifp->if_flags, IFF_ALLMULTI);
6261 error = ENETRESET;
6262 }
6263 }
6264 }
6265 break;
6266
6267 case SIOCDELMULTI:
6268 if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6269 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6270 if (error != 0) {
6271 splx(s);
6272 return (error);
6273 }
6274
6275 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6276 if (memcmp(sc->sc_mcast_flows[i], addrlo,
6277 ETHER_ADDR_LEN) == 0) {
6278 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6279 mcx_delete_flow_table_entry(sc,
6280 MCX_FLOW_GROUP_MAC,
6281 sc->sc_mcast_flow_base + i);
6282 }
6283 sc->sc_mcast_flows[i][0] = 0;
6284 break;
6285 }
6286 }
6287
6288 if (i == MCX_NUM_MCAST_FLOWS)
6289 sc->sc_extra_mcast--;
6290
6291 if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
6292 sc->sc_extra_mcast == 0) {
6293 flags = 0;
6294 ETHER_LOCK(ec);
6295 ETHER_FIRST_MULTI(step, ec, enm);
6296 while (enm != NULL) {
6297 if (memcmp(enm->enm_addrlo,
6298 enm->enm_addrhi, ETHER_ADDR_LEN)) {
6299 SET(flags, IFF_ALLMULTI);
6300 break;
6301 }
6302 ETHER_NEXT_MULTI(step, enm);
6303 }
6304 ETHER_UNLOCK(ec);
6305 if (!ISSET(flags, IFF_ALLMULTI)) {
6306 CLR(ifp->if_flags, IFF_ALLMULTI);
6307 error = ENETRESET;
6308 }
6309 }
6310 }
6311 break;
6312
6313 default:
6314 error = ether_ioctl(ifp, cmd, data);
6315 }
6316
6317 if (error == ENETRESET) {
6318 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6319 (IFF_UP | IFF_RUNNING))
6320 mcx_iff(sc);
6321 error = 0;
6322 }
6323 splx(s);
6324
6325 return (error);
6326 }
6327
6328 #if 0
6329 static int
6330 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
6331 {
6332 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6333 struct mcx_reg_mcia mcia;
6334 struct mcx_reg_pmlp pmlp;
6335 int offset, error;
6336
6337 /* get module number */
6338 memset(&pmlp, 0, sizeof(pmlp));
6339 pmlp.rp_local_port = 1;
6340 error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
6341 sizeof(pmlp));
6342 if (error != 0) {
6343 printf("%s: unable to get eeprom module number\n",
6344 DEVNAME(sc));
6345 return error;
6346 }
6347
6348 for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
6349 memset(&mcia, 0, sizeof(mcia));
6350 mcia.rm_l = 0;
6351 mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) &
6352 MCX_PMLP_MODULE_NUM_MASK;
6353 mcia.rm_i2c_addr = sff->sff_addr / 2; /* apparently */
6354 mcia.rm_page_num = sff->sff_page;
6355 mcia.rm_dev_addr = htobe16(offset);
6356 mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
6357
6358 error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
6359 &mcia, sizeof(mcia));
6360 if (error != 0) {
6361 printf("%s: unable to read eeprom at %x\n",
6362 DEVNAME(sc), offset);
6363 return error;
6364 }
6365
6366 memcpy(sff->sff_data + offset, mcia.rm_data,
6367 MCX_MCIA_EEPROM_BYTES);
6368 }
6369
6370 return 0;
6371 }
6372 #endif
6373
6374 static int
6375 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
6376 {
6377 switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6378 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
6379 case 0:
6380 break;
6381
6382 case EFBIG:
6383 if (m_defrag(m, M_DONTWAIT) != NULL &&
6384 bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6385 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
6386 break;
6387
6388 /* FALLTHROUGH */
6389 default:
6390 return (1);
6391 }
6392
6393 ms->ms_m = m;
6394 return (0);
6395 }
6396
6397 static void
6398 mcx_start(struct ifnet *ifp)
6399 {
6400 struct mcx_softc *sc = ifp->if_softc;
6401 struct mcx_sq_entry *sq, *sqe;
6402 struct mcx_sq_entry_seg *sqs;
6403 struct mcx_slot *ms;
6404 bus_dmamap_t map;
6405 struct mbuf *m;
6406 u_int idx, free, used;
6407 uint64_t *bf;
6408 size_t bf_base;
6409 int i, seg, nseg;
6410
6411 bf_base = (sc->sc_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
6412
6413 idx = sc->sc_tx_prod % (1 << MCX_LOG_SQ_SIZE);
6414 free = (sc->sc_tx_cons + (1 << MCX_LOG_SQ_SIZE)) - sc->sc_tx_prod;
6415
6416 used = 0;
6417 bf = NULL;
6418 sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&sc->sc_sq_mem);
6419
6420 for (;;) {
6421 if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
6422 SET(ifp->if_flags, IFF_OACTIVE);
6423 break;
6424 }
6425
6426 IFQ_DEQUEUE(&ifp->if_snd, m);
6427 if (m == NULL) {
6428 break;
6429 }
6430
6431 sqe = sq + idx;
6432 ms = &sc->sc_tx_slots[idx];
6433 memset(sqe, 0, sizeof(*sqe));
6434
6435 /* ctrl segment */
6436 sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
6437 ((sc->sc_tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
6438 /* always generate a completion event */
6439 sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
6440
6441 /* eth segment */
6442 sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
6443 m_copydata(m, 0, MCX_SQ_INLINE_SIZE, sqe->sqe_inline_headers);
6444 m_adj(m, MCX_SQ_INLINE_SIZE);
6445
6446 if (mcx_load_mbuf(sc, ms, m) != 0) {
6447 m_freem(m);
6448 if_statinc(ifp, if_oerrors);
6449 continue;
6450 }
6451 bf = (uint64_t *)sqe;
6452
6453 if (ifp->if_bpf != NULL)
6454 bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers,
6455 MCX_SQ_INLINE_SIZE, m, BPF_D_OUT);
6456
6457 map = ms->ms_map;
6458 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6459 BUS_DMASYNC_PREWRITE);
6460
6461 sqe->sqe_ds_sq_num =
6462 htobe32((sc->sc_sqn << MCX_SQE_SQ_NUM_SHIFT) |
6463 (map->dm_nsegs + 3));
6464
6465 /* data segment - first wqe has one segment */
6466 sqs = sqe->sqe_segs;
6467 seg = 0;
6468 nseg = 1;
6469 for (i = 0; i < map->dm_nsegs; i++) {
6470 if (seg == nseg) {
6471 /* next slot */
6472 idx++;
6473 if (idx == (1 << MCX_LOG_SQ_SIZE))
6474 idx = 0;
6475 sc->sc_tx_prod++;
6476 used++;
6477
6478 sqs = (struct mcx_sq_entry_seg *)(sq + idx);
6479 seg = 0;
6480 nseg = MCX_SQ_SEGS_PER_SLOT;
6481 }
6482 sqs[seg].sqs_byte_count =
6483 htobe32(map->dm_segs[i].ds_len);
6484 sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
6485 sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
6486 seg++;
6487 }
6488
6489 idx++;
6490 if (idx == (1 << MCX_LOG_SQ_SIZE))
6491 idx = 0;
6492 sc->sc_tx_prod++;
6493 used++;
6494 }
6495
6496 if (used) {
6497 *sc->sc_tx_doorbell = htobe32(sc->sc_tx_prod & MCX_WQ_DOORBELL_MASK);
6498
6499 membar_sync();
6500
6501 /*
6502 * write the first 64 bits of the last sqe we produced
6503 * to the blue flame buffer
6504 */
6505 bus_space_write_8(sc->sc_memt, sc->sc_memh,
6506 bf_base + sc->sc_bf_offset, *bf);
6507 /* next write goes to the other buffer */
6508 sc->sc_bf_offset ^= sc->sc_bf_size;
6509
6510 membar_sync();
6511 }
6512 }
6513
6514 static void
6515 mcx_watchdog(struct ifnet *ifp)
6516 {
6517 }
6518
6519 static void
6520 mcx_media_add_types(struct mcx_softc *sc)
6521 {
6522 struct mcx_reg_ptys ptys;
6523 int i;
6524 uint32_t proto_cap;
6525
6526 memset(&ptys, 0, sizeof(ptys));
6527 ptys.rp_local_port = 1;
6528 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6529 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6530 sizeof(ptys)) != 0) {
6531 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6532 return;
6533 }
6534
6535 proto_cap = be32toh(ptys.rp_eth_proto_cap);
6536 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6537 if ((proto_cap & (1U << i)) && (mcx_eth_cap_map[i] != 0))
6538 ifmedia_add(&sc->sc_media, IFM_ETHER |
6539 mcx_eth_cap_map[i], 0, NULL);
6540 }
6541 }
6542
6543 static void
6544 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
6545 {
6546 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6547 struct mcx_reg_ptys ptys;
6548 int i;
6549 uint32_t /* proto_cap, */ proto_oper;
6550 uint64_t media_oper;
6551
6552 memset(&ptys, 0, sizeof(ptys));
6553 ptys.rp_local_port = 1;
6554 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6555
6556 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6557 sizeof(ptys)) != 0) {
6558 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6559 return;
6560 }
6561
6562 /* proto_cap = be32toh(ptys.rp_eth_proto_cap); */
6563 proto_oper = be32toh(ptys.rp_eth_proto_oper);
6564
6565 media_oper = 0;
6566 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6567 if (proto_oper & (1U << i)) {
6568 media_oper = mcx_eth_cap_map[i];
6569 }
6570 }
6571
6572 ifmr->ifm_status = IFM_AVALID;
6573 /* not sure if this is the right thing to check, maybe paos? */
6574 if (proto_oper != 0) {
6575 ifmr->ifm_status |= IFM_ACTIVE;
6576 ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
6577 /* txpause, rxpause, duplex? */
6578 }
6579 }
6580
6581 static int
6582 mcx_media_change(struct ifnet *ifp)
6583 {
6584 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6585 struct mcx_reg_ptys ptys;
6586 struct mcx_reg_paos paos;
6587 uint32_t media;
6588 int i, error;
6589
6590 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
6591 return EINVAL;
6592
6593 error = 0;
6594
6595 if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
6596 /* read ptys to get supported media */
6597 memset(&ptys, 0, sizeof(ptys));
6598 ptys.rp_local_port = 1;
6599 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6600 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
6601 &ptys, sizeof(ptys)) != 0) {
6602 printf("%s: unable to read port type/speed\n",
6603 DEVNAME(sc));
6604 return EIO;
6605 }
6606
6607 media = be32toh(ptys.rp_eth_proto_cap);
6608 } else {
6609 /* map media type */
6610 media = 0;
6611 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6612 if (mcx_eth_cap_map[i] ==
6613 IFM_SUBTYPE(sc->sc_media.ifm_media)) {
6614 media = (1 << i);
6615 break;
6616 }
6617 }
6618 }
6619
6620 /* disable the port */
6621 memset(&paos, 0, sizeof(paos));
6622 paos.rp_local_port = 1;
6623 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
6624 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6625 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6626 sizeof(paos)) != 0) {
6627 printf("%s: unable to set port state to down\n", DEVNAME(sc));
6628 return EIO;
6629 }
6630
6631 memset(&ptys, 0, sizeof(ptys));
6632 ptys.rp_local_port = 1;
6633 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6634 ptys.rp_eth_proto_admin = htobe32(media);
6635 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
6636 sizeof(ptys)) != 0) {
6637 printf("%s: unable to set port media type/speed\n",
6638 DEVNAME(sc));
6639 error = EIO;
6640 }
6641
6642 /* re-enable the port to start negotiation */
6643 memset(&paos, 0, sizeof(paos));
6644 paos.rp_local_port = 1;
6645 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
6646 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6647 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6648 sizeof(paos)) != 0) {
6649 printf("%s: unable to set port state to up\n", DEVNAME(sc));
6650 error = EIO;
6651 }
6652
6653 return error;
6654 }
6655
6656 static void
6657 mcx_port_change(struct work *wk, void *xsc)
6658 {
6659 struct mcx_softc *sc = xsc;
6660 struct ifnet *ifp = &sc->sc_ec.ec_if;
6661 struct mcx_reg_paos paos;
6662 int link_state = LINK_STATE_DOWN;
6663 struct ifmediareq ifmr;
6664
6665 memset(&paos, 0, sizeof(paos));
6666 paos.rp_local_port = 1;
6667 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_READ, &paos,
6668 sizeof(paos)) == 0) {
6669 if (paos.rp_oper_status == MCX_REG_PAOS_OPER_STATUS_UP)
6670 link_state = LINK_STATE_UP;
6671 mcx_media_status(ifp, &ifmr);
6672 ifp->if_baudrate = ifmedia_baudrate(ifmr.ifm_active);
6673 }
6674
6675 if (link_state != ifp->if_link_state) {
6676 if_link_state_change(ifp, link_state);
6677 }
6678 }
6679
6680
6681 static inline uint32_t
6682 mcx_rd(struct mcx_softc *sc, bus_size_t r)
6683 {
6684 uint32_t word;
6685
6686 word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
6687
6688 return (be32toh(word));
6689 }
6690
6691 static inline void
6692 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
6693 {
6694 bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
6695 }
6696
6697 static inline void
6698 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
6699 {
6700 bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
6701 }
6702
6703 static uint64_t
6704 mcx_timer(struct mcx_softc *sc)
6705 {
6706 uint32_t hi, lo, ni;
6707
6708 hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6709 for (;;) {
6710 lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
6711 mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
6712 ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6713
6714 if (ni == hi)
6715 break;
6716
6717 hi = ni;
6718 }
6719
6720 return (((uint64_t)hi << 32) | (uint64_t)lo);
6721 }
6722
6723 static int
6724 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
6725 bus_size_t size, u_int align)
6726 {
6727 mxm->mxm_size = size;
6728
6729 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
6730 mxm->mxm_size, 0,
6731 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6732 &mxm->mxm_map) != 0)
6733 return (1);
6734 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
6735 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
6736 BUS_DMA_WAITOK) != 0)
6737 goto destroy;
6738 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
6739 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
6740 goto free;
6741 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
6742 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
6743 goto unmap;
6744
6745 mcx_dmamem_zero(mxm);
6746
6747 return (0);
6748 unmap:
6749 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6750 free:
6751 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6752 destroy:
6753 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6754 return (1);
6755 }
6756
6757 static void
6758 mcx_dmamem_zero(struct mcx_dmamem *mxm)
6759 {
6760 memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
6761 }
6762
6763 static void
6764 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
6765 {
6766 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
6767 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6768 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6769 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6770 }
6771
6772 static int
6773 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
6774 {
6775 bus_dma_segment_t *segs;
6776 bus_size_t len = pages * MCX_PAGE_SIZE;
6777 size_t seglen;
6778
6779 segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP);
6780 seglen = sizeof(*segs) * pages;
6781
6782 if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
6783 segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
6784 goto free_segs;
6785
6786 if (mhm->mhm_seg_count < pages) {
6787 size_t nseglen;
6788
6789 mhm->mhm_segs = kmem_alloc(
6790 sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP);
6791
6792 nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
6793
6794 memcpy(mhm->mhm_segs, segs, nseglen);
6795
6796 kmem_free(segs, seglen);
6797
6798 segs = mhm->mhm_segs;
6799 seglen = nseglen;
6800 } else
6801 mhm->mhm_segs = segs;
6802
6803 if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
6804 MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
6805 &mhm->mhm_map) != 0)
6806 goto free_dmamem;
6807
6808 if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
6809 mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
6810 goto destroy;
6811
6812 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6813 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
6814
6815 mhm->mhm_npages = pages;
6816
6817 return (0);
6818
6819 destroy:
6820 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6821 free_dmamem:
6822 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6823 free_segs:
6824 kmem_free(segs, seglen);
6825 mhm->mhm_segs = NULL;
6826
6827 return (-1);
6828 }
6829
6830 static void
6831 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
6832 {
6833 if (mhm->mhm_npages == 0)
6834 return;
6835
6836 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6837 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
6838
6839 bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
6840 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6841 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6842 kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
6843
6844 mhm->mhm_npages = 0;
6845 }
6846