if_mcx.c revision 1.11 1 /* $NetBSD: if_mcx.c,v 1.11 2020/02/29 18:07:57 thorpej Exp $ */
2 /* $OpenBSD: if_mcx.c,v 1.33 2019/09/12 04:23:59 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2017 David Gwynne <dlg (at) openbsd.org>
6 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #ifdef _KERNEL_OPT
22 #include "opt_net_mpsafe.h"
23 #endif
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/device.h>
32 #include <sys/pool.h>
33 #include <sys/queue.h>
34 #include <sys/callout.h>
35 #include <sys/workqueue.h>
36 #include <sys/atomic.h>
37 #include <sys/kmem.h>
38 #include <sys/bus.h>
39
40 #include <machine/intr.h>
41
42 #include <net/if.h>
43 #include <net/if_dl.h>
44 #include <net/if_ether.h>
45 #include <net/if_media.h>
46
47 #include <net/bpf.h>
48
49 #include <netinet/in.h>
50
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pcidevs.h>
54
55 #ifdef NET_MPSAFE
56 #define MCX_MPSAFE 1
57 #define CALLOUT_FLAGS CALLOUT_MPSAFE
58 #else
59 #define CALLOUT_FLAGS 0
60 #endif
61
62 #define MCX_MAX_NINTR 1
63
64 #define BUS_DMASYNC_PRERW (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
65 #define BUS_DMASYNC_POSTRW (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
66
67 #define MCX_HCA_BAR PCI_MAPREG_START /* BAR 0 */
68
69 #define MCX_FW_VER 0x0000
70 #define MCX_FW_VER_MAJOR(_v) ((_v) & 0xffff)
71 #define MCX_FW_VER_MINOR(_v) ((_v) >> 16)
72 #define MCX_CMDIF_FW_SUBVER 0x0004
73 #define MCX_FW_VER_SUBMINOR(_v) ((_v) & 0xffff)
74 #define MCX_CMDIF(_v) ((_v) >> 16)
75
76 #define MCX_ISSI 1 /* as per the PRM */
77 #define MCX_CMD_IF_SUPPORTED 5
78
79 #define MCX_HARDMTU 9500
80
81 #define MCX_MAX_CQS 2 /* rq, sq */
82
83 /* queue sizes */
84 #define MCX_LOG_EQ_SIZE 6 /* one page */
85 #define MCX_LOG_CQ_SIZE 11
86 #define MCX_LOG_RQ_SIZE 10
87 #define MCX_LOG_SQ_SIZE 11
88
89 /* completion event moderation - about 10khz, or 90% of the cq */
90 #define MCX_CQ_MOD_PERIOD 50
91 #define MCX_CQ_MOD_COUNTER (((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
92
93 #define MCX_LOG_SQ_ENTRY_SIZE 6
94 #define MCX_SQ_ENTRY_MAX_SLOTS 4
95 #define MCX_SQ_SEGS_PER_SLOT \
96 (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
97 #define MCX_SQ_MAX_SEGMENTS \
98 1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
99
100 #define MCX_LOG_FLOW_TABLE_SIZE 5
101 #define MCX_NUM_STATIC_FLOWS 4 /* promisc, allmulti, ucast, bcast */
102 #define MCX_NUM_MCAST_FLOWS \
103 ((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
104
105 #define MCX_SQ_INLINE_SIZE 18
106
107 /* doorbell offsets */
108 #define MCX_CQ_DOORBELL_OFFSET 0
109 #define MCX_CQ_DOORBELL_SIZE 16
110 #define MCX_RQ_DOORBELL_OFFSET 64
111 #define MCX_SQ_DOORBELL_OFFSET 64
112
113 #define MCX_WQ_DOORBELL_MASK 0xffff
114
115 /* uar registers */
116 #define MCX_UAR_CQ_DOORBELL 0x20
117 #define MCX_UAR_EQ_DOORBELL_ARM 0x40
118 #define MCX_UAR_EQ_DOORBELL 0x48
119 #define MCX_UAR_BF 0x800
120
121 #define MCX_CMDQ_ADDR_HI 0x0010
122 #define MCX_CMDQ_ADDR_LO 0x0014
123 #define MCX_CMDQ_ADDR_NMASK 0xfff
124 #define MCX_CMDQ_LOG_SIZE(_v) ((_v) >> 4 & 0xf)
125 #define MCX_CMDQ_LOG_STRIDE(_v) ((_v) >> 0 & 0xf)
126 #define MCX_CMDQ_INTERFACE_MASK (0x3 << 8)
127 #define MCX_CMDQ_INTERFACE_FULL_DRIVER (0x0 << 8)
128 #define MCX_CMDQ_INTERFACE_DISABLED (0x1 << 8)
129
130 #define MCX_CMDQ_DOORBELL 0x0018
131
132 #define MCX_STATE 0x01fc
133 #define MCX_STATE_MASK (1U << 31)
134 #define MCX_STATE_INITIALIZING (1 << 31)
135 #define MCX_STATE_READY (0 << 31)
136 #define MCX_STATE_INTERFACE_MASK (0x3 << 24)
137 #define MCX_STATE_INTERFACE_FULL_DRIVER (0x0 << 24)
138 #define MCX_STATE_INTERFACE_DISABLED (0x1 << 24)
139
140 #define MCX_INTERNAL_TIMER 0x1000
141 #define MCX_INTERNAL_TIMER_H 0x1000
142 #define MCX_INTERNAL_TIMER_L 0x1004
143
144 #define MCX_CLEAR_INT 0x100c
145
146 #define MCX_REG_OP_WRITE 0
147 #define MCX_REG_OP_READ 1
148
149 #define MCX_REG_PMLP 0x5002
150 #define MCX_REG_PMTU 0x5003
151 #define MCX_REG_PTYS 0x5004
152 #define MCX_REG_PAOS 0x5006
153 #define MCX_REG_PFCC 0x5007
154 #define MCX_REG_PPCNT 0x5008
155 #define MCX_REG_MCIA 0x9014
156
157 #define MCX_ETHER_CAP_SGMII (1 << 0)
158 #define MCX_ETHER_CAP_1000_KX (1 << 1)
159 #define MCX_ETHER_CAP_10G_CX4 (1 << 2)
160 #define MCX_ETHER_CAP_10G_KX4 (1 << 3)
161 #define MCX_ETHER_CAP_10G_KR (1 << 4)
162 #define MCX_ETHER_CAP_20G_KR2 (1 << 5)
163 #define MCX_ETHER_CAP_40G_CR4 (1 << 6)
164 #define MCX_ETHER_CAP_40G_KR4 (1 << 7)
165 #define MCX_ETHER_CAP_56G_R4 (1 << 8)
166 #define MCX_ETHER_CAP_10G_CR (1 << 12)
167 #define MCX_ETHER_CAP_10G_SR (1 << 13)
168 #define MCX_ETHER_CAP_10G_LR (1 << 14)
169 #define MCX_ETHER_CAP_40G_SR4 (1 << 15)
170 #define MCX_ETHER_CAP_40G_LR4 (1 << 16)
171 #define MCX_ETHER_CAP_50G_SR2 (1 << 18)
172 #define MCX_ETHER_CAP_100G_CR4 (1 << 20)
173 #define MCX_ETHER_CAP_100G_SR4 (1 << 21)
174 #define MCX_ETHER_CAP_100G_KR4 (1 << 22)
175 #define MCX_ETHER_CAP_100G_LR4 (1 << 23)
176 #define MCX_ETHER_CAP_100_TX (1 << 24)
177 #define MCX_ETHER_CAP_1000_T (1 << 25)
178 #define MCX_ETHER_CAP_10G_T (1 << 26)
179 #define MCX_ETHER_CAP_25G_CR (1 << 27)
180 #define MCX_ETHER_CAP_25G_KR (1 << 28)
181 #define MCX_ETHER_CAP_25G_SR (1 << 29)
182 #define MCX_ETHER_CAP_50G_CR2 (1 << 30)
183 #define MCX_ETHER_CAP_50G_KR2 (1 << 31)
184
185 #define MCX_PAGE_SHIFT 12
186 #define MCX_PAGE_SIZE (1 << MCX_PAGE_SHIFT)
187 #define MCX_MAX_CQE 32
188
189 #define MCX_CMD_QUERY_HCA_CAP 0x100
190 #define MCX_CMD_QUERY_ADAPTER 0x101
191 #define MCX_CMD_INIT_HCA 0x102
192 #define MCX_CMD_TEARDOWN_HCA 0x103
193 #define MCX_CMD_ENABLE_HCA 0x104
194 #define MCX_CMD_DISABLE_HCA 0x105
195 #define MCX_CMD_QUERY_PAGES 0x107
196 #define MCX_CMD_MANAGE_PAGES 0x108
197 #define MCX_CMD_SET_HCA_CAP 0x109
198 #define MCX_CMD_QUERY_ISSI 0x10a
199 #define MCX_CMD_SET_ISSI 0x10b
200 #define MCX_CMD_SET_DRIVER_VERSION \
201 0x10d
202 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS \
203 0x203
204 #define MCX_CMD_CREATE_EQ 0x301
205 #define MCX_CMD_DESTROY_EQ 0x302
206 #define MCX_CMD_CREATE_CQ 0x400
207 #define MCX_CMD_DESTROY_CQ 0x401
208 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT \
209 0x754
210 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
211 0x755
212 #define MCX_CMD_QUERY_VPORT_COUNTERS \
213 0x770
214 #define MCX_CMD_ALLOC_PD 0x800
215 #define MCX_CMD_ALLOC_UAR 0x802
216 #define MCX_CMD_ACCESS_REG 0x805
217 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN \
218 0x816
219 #define MCX_CMD_CREATE_TIR 0x900
220 #define MCX_CMD_DESTROY_TIR 0x902
221 #define MCX_CMD_CREATE_SQ 0x904
222 #define MCX_CMD_MODIFY_SQ 0x905
223 #define MCX_CMD_DESTROY_SQ 0x906
224 #define MCX_CMD_QUERY_SQ 0x907
225 #define MCX_CMD_CREATE_RQ 0x908
226 #define MCX_CMD_MODIFY_RQ 0x909
227 #define MCX_CMD_DESTROY_RQ 0x90a
228 #define MCX_CMD_QUERY_RQ 0x90b
229 #define MCX_CMD_CREATE_TIS 0x912
230 #define MCX_CMD_DESTROY_TIS 0x914
231 #define MCX_CMD_SET_FLOW_TABLE_ROOT \
232 0x92f
233 #define MCX_CMD_CREATE_FLOW_TABLE \
234 0x930
235 #define MCX_CMD_DESTROY_FLOW_TABLE \
236 0x931
237 #define MCX_CMD_QUERY_FLOW_TABLE \
238 0x932
239 #define MCX_CMD_CREATE_FLOW_GROUP \
240 0x933
241 #define MCX_CMD_DESTROY_FLOW_GROUP \
242 0x934
243 #define MCX_CMD_QUERY_FLOW_GROUP \
244 0x935
245 #define MCX_CMD_SET_FLOW_TABLE_ENTRY \
246 0x936
247 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY \
248 0x937
249 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY \
250 0x938
251 #define MCX_CMD_ALLOC_FLOW_COUNTER \
252 0x939
253 #define MCX_CMD_QUERY_FLOW_COUNTER \
254 0x93b
255
256 #define MCX_QUEUE_STATE_RST 0
257 #define MCX_QUEUE_STATE_RDY 1
258 #define MCX_QUEUE_STATE_ERR 3
259
260 #define MCX_FLOW_TABLE_TYPE_RX 0
261 #define MCX_FLOW_TABLE_TYPE_TX 1
262
263 #define MCX_CMDQ_INLINE_DATASIZE 16
264
265 struct mcx_cmdq_entry {
266 uint8_t cq_type;
267 #define MCX_CMDQ_TYPE_PCIE 0x7
268 uint8_t cq_reserved0[3];
269
270 uint32_t cq_input_length;
271 uint64_t cq_input_ptr;
272 uint8_t cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
273
274 uint8_t cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
275 uint64_t cq_output_ptr;
276 uint32_t cq_output_length;
277
278 uint8_t cq_token;
279 uint8_t cq_signature;
280 uint8_t cq_reserved1[1];
281 uint8_t cq_status;
282 #define MCX_CQ_STATUS_SHIFT 1
283 #define MCX_CQ_STATUS_MASK (0x7f << MCX_CQ_STATUS_SHIFT)
284 #define MCX_CQ_STATUS_OK (0x00 << MCX_CQ_STATUS_SHIFT)
285 #define MCX_CQ_STATUS_INT_ERR (0x01 << MCX_CQ_STATUS_SHIFT)
286 #define MCX_CQ_STATUS_BAD_OPCODE (0x02 << MCX_CQ_STATUS_SHIFT)
287 #define MCX_CQ_STATUS_BAD_PARAM (0x03 << MCX_CQ_STATUS_SHIFT)
288 #define MCX_CQ_STATUS_BAD_SYS_STATE (0x04 << MCX_CQ_STATUS_SHIFT)
289 #define MCX_CQ_STATUS_BAD_RESOURCE (0x05 << MCX_CQ_STATUS_SHIFT)
290 #define MCX_CQ_STATUS_RESOURCE_BUSY (0x06 << MCX_CQ_STATUS_SHIFT)
291 #define MCX_CQ_STATUS_EXCEED_LIM (0x08 << MCX_CQ_STATUS_SHIFT)
292 #define MCX_CQ_STATUS_BAD_RES_STATE (0x09 << MCX_CQ_STATUS_SHIFT)
293 #define MCX_CQ_STATUS_BAD_INDEX (0x0a << MCX_CQ_STATUS_SHIFT)
294 #define MCX_CQ_STATUS_NO_RESOURCES (0x0f << MCX_CQ_STATUS_SHIFT)
295 #define MCX_CQ_STATUS_BAD_INPUT_LEN (0x50 << MCX_CQ_STATUS_SHIFT)
296 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN (0x51 << MCX_CQ_STATUS_SHIFT)
297 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
298 (0x10 << MCX_CQ_STATUS_SHIFT)
299 #define MCX_CQ_STATUS_BAD_SIZE (0x40 << MCX_CQ_STATUS_SHIFT)
300 #define MCX_CQ_STATUS_OWN_MASK 0x1
301 #define MCX_CQ_STATUS_OWN_SW 0x0
302 #define MCX_CQ_STATUS_OWN_HW 0x1
303 } __packed __aligned(8);
304
305 #define MCX_CMDQ_MAILBOX_DATASIZE 512
306
307 struct mcx_cmdq_mailbox {
308 uint8_t mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
309 uint8_t mb_reserved0[48];
310 uint64_t mb_next_ptr;
311 uint32_t mb_block_number;
312 uint8_t mb_reserved1[1];
313 uint8_t mb_token;
314 uint8_t mb_ctrl_signature;
315 uint8_t mb_signature;
316 } __packed __aligned(8);
317
318 #define MCX_CMDQ_MAILBOX_ALIGN (1 << 10)
319 #define MCX_CMDQ_MAILBOX_SIZE roundup(sizeof(struct mcx_cmdq_mailbox), \
320 MCX_CMDQ_MAILBOX_ALIGN)
321 /*
322 * command mailbox structres
323 */
324
325 struct mcx_cmd_enable_hca_in {
326 uint16_t cmd_opcode;
327 uint8_t cmd_reserved0[4];
328 uint16_t cmd_op_mod;
329 uint8_t cmd_reserved1[2];
330 uint16_t cmd_function_id;
331 uint8_t cmd_reserved2[4];
332 } __packed __aligned(4);
333
334 struct mcx_cmd_enable_hca_out {
335 uint8_t cmd_status;
336 uint8_t cmd_reserved0[3];
337 uint32_t cmd_syndrome;
338 uint8_t cmd_reserved1[4];
339 } __packed __aligned(4);
340
341 struct mcx_cmd_init_hca_in {
342 uint16_t cmd_opcode;
343 uint8_t cmd_reserved0[4];
344 uint16_t cmd_op_mod;
345 uint8_t cmd_reserved1[8];
346 } __packed __aligned(4);
347
348 struct mcx_cmd_init_hca_out {
349 uint8_t cmd_status;
350 uint8_t cmd_reserved0[3];
351 uint32_t cmd_syndrome;
352 uint8_t cmd_reserved1[8];
353 } __packed __aligned(4);
354
355 struct mcx_cmd_teardown_hca_in {
356 uint16_t cmd_opcode;
357 uint8_t cmd_reserved0[4];
358 uint16_t cmd_op_mod;
359 uint8_t cmd_reserved1[2];
360 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL 0x0
361 #define MCX_CMD_TEARDOWN_HCA_PANIC 0x1
362 uint16_t cmd_profile;
363 uint8_t cmd_reserved2[4];
364 } __packed __aligned(4);
365
366 struct mcx_cmd_teardown_hca_out {
367 uint8_t cmd_status;
368 uint8_t cmd_reserved0[3];
369 uint32_t cmd_syndrome;
370 uint8_t cmd_reserved1[8];
371 } __packed __aligned(4);
372
373 struct mcx_cmd_access_reg_in {
374 uint16_t cmd_opcode;
375 uint8_t cmd_reserved0[4];
376 uint16_t cmd_op_mod;
377 uint8_t cmd_reserved1[2];
378 uint16_t cmd_register_id;
379 uint32_t cmd_argument;
380 } __packed __aligned(4);
381
382 struct mcx_cmd_access_reg_out {
383 uint8_t cmd_status;
384 uint8_t cmd_reserved0[3];
385 uint32_t cmd_syndrome;
386 uint8_t cmd_reserved1[8];
387 } __packed __aligned(4);
388
389 struct mcx_reg_pmtu {
390 uint8_t rp_reserved1;
391 uint8_t rp_local_port;
392 uint8_t rp_reserved2[2];
393 uint16_t rp_max_mtu;
394 uint8_t rp_reserved3[2];
395 uint16_t rp_admin_mtu;
396 uint8_t rp_reserved4[2];
397 uint16_t rp_oper_mtu;
398 uint8_t rp_reserved5[2];
399 } __packed __aligned(4);
400
401 struct mcx_reg_ptys {
402 uint8_t rp_reserved1;
403 uint8_t rp_local_port;
404 uint8_t rp_reserved2;
405 uint8_t rp_proto_mask;
406 #define MCX_REG_PTYS_PROTO_MASK_ETH (1 << 2)
407 uint8_t rp_reserved3[8];
408 uint32_t rp_eth_proto_cap;
409 uint8_t rp_reserved4[8];
410 uint32_t rp_eth_proto_admin;
411 uint8_t rp_reserved5[8];
412 uint32_t rp_eth_proto_oper;
413 uint8_t rp_reserved6[24];
414 } __packed __aligned(4);
415
416 struct mcx_reg_paos {
417 uint8_t rp_reserved1;
418 uint8_t rp_local_port;
419 uint8_t rp_admin_status;
420 #define MCX_REG_PAOS_ADMIN_STATUS_UP 1
421 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN 2
422 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE 3
423 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED 4
424 uint8_t rp_oper_status;
425 #define MCX_REG_PAOS_OPER_STATUS_UP 1
426 #define MCX_REG_PAOS_OPER_STATUS_DOWN 2
427 #define MCX_REG_PAOS_OPER_STATUS_FAILED 4
428 uint8_t rp_admin_state_update;
429 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN (1 << 7)
430 uint8_t rp_reserved2[11];
431 } __packed __aligned(4);
432
433 struct mcx_reg_pfcc {
434 uint8_t rp_reserved1;
435 uint8_t rp_local_port;
436 uint8_t rp_reserved2[3];
437 uint8_t rp_prio_mask_tx;
438 uint8_t rp_reserved3;
439 uint8_t rp_prio_mask_rx;
440 uint8_t rp_pptx_aptx;
441 uint8_t rp_pfctx;
442 uint8_t rp_fctx_dis;
443 uint8_t rp_reserved4;
444 uint8_t rp_pprx_aprx;
445 uint8_t rp_pfcrx;
446 uint8_t rp_reserved5[2];
447 uint16_t rp_dev_stall_min;
448 uint16_t rp_dev_stall_crit;
449 uint8_t rp_reserved6[12];
450 } __packed __aligned(4);
451
452 #define MCX_PMLP_MODULE_NUM_MASK 0xff
453 struct mcx_reg_pmlp {
454 uint8_t rp_rxtx;
455 uint8_t rp_local_port;
456 uint8_t rp_reserved0;
457 uint8_t rp_width;
458 uint32_t rp_lane0_mapping;
459 uint32_t rp_lane1_mapping;
460 uint32_t rp_lane2_mapping;
461 uint32_t rp_lane3_mapping;
462 uint8_t rp_reserved1[44];
463 } __packed __aligned(4);
464
465 #define MCX_MCIA_EEPROM_BYTES 32
466 struct mcx_reg_mcia {
467 uint8_t rm_l;
468 uint8_t rm_module;
469 uint8_t rm_reserved0;
470 uint8_t rm_status;
471 uint8_t rm_i2c_addr;
472 uint8_t rm_page_num;
473 uint16_t rm_dev_addr;
474 uint16_t rm_reserved1;
475 uint16_t rm_size;
476 uint32_t rm_reserved2;
477 uint8_t rm_data[48];
478 } __packed __aligned(4);
479
480 struct mcx_cmd_query_issi_in {
481 uint16_t cmd_opcode;
482 uint8_t cmd_reserved0[4];
483 uint16_t cmd_op_mod;
484 uint8_t cmd_reserved1[8];
485 } __packed __aligned(4);
486
487 struct mcx_cmd_query_issi_il_out {
488 uint8_t cmd_status;
489 uint8_t cmd_reserved0[3];
490 uint32_t cmd_syndrome;
491 uint8_t cmd_reserved1[2];
492 uint16_t cmd_current_issi;
493 uint8_t cmd_reserved2[4];
494 } __packed __aligned(4);
495
496 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
497
498 struct mcx_cmd_query_issi_mb_out {
499 uint8_t cmd_reserved2[16];
500 uint8_t cmd_supported_issi[80]; /* very big endian */
501 } __packed __aligned(4);
502
503 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
504
505 struct mcx_cmd_set_issi_in {
506 uint16_t cmd_opcode;
507 uint8_t cmd_reserved0[4];
508 uint16_t cmd_op_mod;
509 uint8_t cmd_reserved1[2];
510 uint16_t cmd_current_issi;
511 uint8_t cmd_reserved2[4];
512 } __packed __aligned(4);
513
514 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
515
516 struct mcx_cmd_set_issi_out {
517 uint8_t cmd_status;
518 uint8_t cmd_reserved0[3];
519 uint32_t cmd_syndrome;
520 uint8_t cmd_reserved1[8];
521 } __packed __aligned(4);
522
523 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
524
525 struct mcx_cmd_query_pages_in {
526 uint16_t cmd_opcode;
527 uint8_t cmd_reserved0[4];
528 uint16_t cmd_op_mod;
529 #define MCX_CMD_QUERY_PAGES_BOOT 0x01
530 #define MCX_CMD_QUERY_PAGES_INIT 0x02
531 #define MCX_CMD_QUERY_PAGES_REGULAR 0x03
532 uint8_t cmd_reserved1[8];
533 } __packed __aligned(4);
534
535 struct mcx_cmd_query_pages_out {
536 uint8_t cmd_status;
537 uint8_t cmd_reserved0[3];
538 uint32_t cmd_syndrome;
539 uint8_t cmd_reserved1[2];
540 uint16_t cmd_func_id;
541 uint32_t cmd_num_pages;
542 } __packed __aligned(4);
543
544 struct mcx_cmd_manage_pages_in {
545 uint16_t cmd_opcode;
546 uint8_t cmd_reserved0[4];
547 uint16_t cmd_op_mod;
548 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
549 0x00
550 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
551 0x01
552 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
553 0x02
554 uint8_t cmd_reserved1[2];
555 uint16_t cmd_func_id;
556 uint32_t cmd_input_num_entries;
557 } __packed __aligned(4);
558
559 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
560
561 struct mcx_cmd_manage_pages_out {
562 uint8_t cmd_status;
563 uint8_t cmd_reserved0[3];
564 uint32_t cmd_syndrome;
565 uint32_t cmd_output_num_entries;
566 uint8_t cmd_reserved1[4];
567 } __packed __aligned(4);
568
569 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
570
571 struct mcx_cmd_query_hca_cap_in {
572 uint16_t cmd_opcode;
573 uint8_t cmd_reserved0[4];
574 uint16_t cmd_op_mod;
575 #define MCX_CMD_QUERY_HCA_CAP_MAX (0x0 << 0)
576 #define MCX_CMD_QUERY_HCA_CAP_CURRENT (0x1 << 0)
577 #define MCX_CMD_QUERY_HCA_CAP_DEVICE (0x0 << 1)
578 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD (0x1 << 1)
579 #define MCX_CMD_QUERY_HCA_CAP_FLOW (0x7 << 1)
580 uint8_t cmd_reserved1[8];
581 } __packed __aligned(4);
582
583 struct mcx_cmd_query_hca_cap_out {
584 uint8_t cmd_status;
585 uint8_t cmd_reserved0[3];
586 uint32_t cmd_syndrome;
587 uint8_t cmd_reserved1[8];
588 } __packed __aligned(4);
589
590 #define MCX_HCA_CAP_LEN 0x1000
591 #define MCX_HCA_CAP_NMAILBOXES \
592 (MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
593
594 #if __GNUC_PREREQ__(4, 3)
595 #define __counter__ __COUNTER__
596 #else
597 #define __counter__ __LINE__
598 #endif
599
600 #define __token(_tok, _num) _tok##_num
601 #define _token(_tok, _num) __token(_tok, _num)
602 #define __reserved__ _token(__reserved, __counter__)
603
604 struct mcx_cap_device {
605 uint8_t reserved0[16];
606
607 uint8_t log_max_srq_sz;
608 uint8_t log_max_qp_sz;
609 uint8_t __reserved__[1];
610 uint8_t log_max_qp; /* 5 bits */
611 #define MCX_CAP_DEVICE_LOG_MAX_QP 0x1f
612
613 uint8_t __reserved__[1];
614 uint8_t log_max_srq; /* 5 bits */
615 #define MCX_CAP_DEVICE_LOG_MAX_SRQ 0x1f
616 uint8_t __reserved__[2];
617
618 uint8_t __reserved__[1];
619 uint8_t log_max_cq_sz;
620 uint8_t __reserved__[1];
621 uint8_t log_max_cq; /* 5 bits */
622 #define MCX_CAP_DEVICE_LOG_MAX_CQ 0x1f
623
624 uint8_t log_max_eq_sz;
625 uint8_t log_max_mkey; /* 6 bits */
626 #define MCX_CAP_DEVICE_LOG_MAX_MKEY 0x3f
627 uint8_t __reserved__[1];
628 uint8_t log_max_eq; /* 4 bits */
629 #define MCX_CAP_DEVICE_LOG_MAX_EQ 0x0f
630
631 uint8_t max_indirection;
632 uint8_t log_max_mrw_sz; /* 7 bits */
633 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ 0x7f
634 uint8_t teardown_log_max_msf_list_size;
635 #define MCX_CAP_DEVICE_FORCE_TEARDOWN 0x80
636 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
637 0x3f
638 uint8_t log_max_klm_list_size; /* 6 bits */
639 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
640 0x3f
641
642 uint8_t __reserved__[1];
643 uint8_t log_max_ra_req_dc; /* 6 bits */
644 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC 0x3f
645 uint8_t __reserved__[1];
646 uint8_t log_max_ra_res_dc; /* 6 bits */
647 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
648 0x3f
649
650 uint8_t __reserved__[1];
651 uint8_t log_max_ra_req_qp; /* 6 bits */
652 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
653 0x3f
654 uint8_t __reserved__[1];
655 uint8_t log_max_ra_res_qp; /* 6 bits */
656 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
657 0x3f
658
659 uint8_t flags1;
660 #define MCX_CAP_DEVICE_END_PAD 0x80
661 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED 0x40
662 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
663 0x20
664 #define MCX_CAP_DEVICE_START_PAD 0x10
665 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
666 0x08
667 uint8_t __reserved__[1];
668 uint16_t gid_table_size;
669
670 uint16_t flags2;
671 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT 0x8000
672 #define MCX_CAP_DEVICE_VPORT_COUNTERS 0x4000
673 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
674 0x2000
675 #define MCX_CAP_DEVICE_DEBUG 0x1000
676 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
677 0x8000
678 #define MCX_CAP_DEVICE_RQ_DELAY_DROP 0x4000
679 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK 0x03ff
680 uint16_t pkey_table_size;
681
682 uint8_t flags3;
683 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
684 0x80
685 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
686 0x40
687 #define MCX_CAP_DEVICE_IB_VIRTUAL 0x20
688 #define MCX_CAP_DEVICE_ETH_VIRTUAL 0x10
689 #define MCX_CAP_DEVICE_ETS 0x04
690 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE 0x02
691 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
692 0x01
693 uint8_t local_ca_ack_delay; /* 5 bits */
694 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
695 0x1f
696 uint8_t port_type;
697 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
698 0x80
699 #define MCX_CAP_DEVICE_PORT_TYPE 0x03
700 uint8_t num_ports;
701
702 uint8_t snapshot_log_max_msg;
703 #define MCX_CAP_DEVICE_SNAPSHOT 0x80
704 #define MCX_CAP_DEVICE_LOG_MAX_MSG 0x1f
705 uint8_t max_tc; /* 4 bits */
706 #define MCX_CAP_DEVICE_MAX_TC 0x0f
707 uint8_t flags4;
708 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT 0x80
709 #define MCX_CAP_DEVICE_DCBX 0x40
710 #define MCX_CAP_DEVICE_ROL_S 0x02
711 #define MCX_CAP_DEVICE_ROL_G 0x01
712 uint8_t wol;
713 #define MCX_CAP_DEVICE_WOL_S 0x40
714 #define MCX_CAP_DEVICE_WOL_G 0x20
715 #define MCX_CAP_DEVICE_WOL_A 0x10
716 #define MCX_CAP_DEVICE_WOL_B 0x08
717 #define MCX_CAP_DEVICE_WOL_M 0x04
718 #define MCX_CAP_DEVICE_WOL_U 0x02
719 #define MCX_CAP_DEVICE_WOL_P 0x01
720
721 uint16_t stat_rate_support;
722 uint8_t __reserved__[1];
723 uint8_t cqe_version; /* 4 bits */
724 #define MCX_CAP_DEVICE_CQE_VERSION 0x0f
725
726 uint32_t flags5;
727 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
728 0x80000000
729 #define MCX_CAP_DEVICE_STRIDING_RQ 0x40000000
730 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
731 0x10000000
732 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
733 0x08000000
734 #define MCX_CAP_DEVICE_DC_CONNECT_CP 0x00040000
735 #define MCX_CAP_DEVICE_DC_CNAK_DRACE 0x00020000
736 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
737 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
738 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM 0x0000c000
739 #define MCX_CAP_DEVICE_SIGERR_QCE 0x00002000
740 #define MCX_CAP_DEVICE_WQ_SIGNATURE 0x00000800
741 #define MCX_CAP_DEVICE_SCTR_DATA_CQE 0x00000400
742 #define MCX_CAP_DEVICE_SHO 0x00000100
743 #define MCX_CAP_DEVICE_TPH 0x00000080
744 #define MCX_CAP_DEVICE_RF 0x00000040
745 #define MCX_CAP_DEVICE_DCT 0x00000020
746 #define MCX_CAP_DEVICE_QOS 0x00000010
747 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS 0x00000008
748 #define MCX_CAP_DEVICE_ROCE 0x00000004
749 #define MCX_CAP_DEVICE_ATOMIC 0x00000002
750
751 uint32_t flags6;
752 #define MCX_CAP_DEVICE_CQ_OI 0x80000000
753 #define MCX_CAP_DEVICE_CQ_RESIZE 0x40000000
754 #define MCX_CAP_DEVICE_CQ_MODERATION 0x20000000
755 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
756 0x10000000
757 #define MCX_CAP_DEVICE_CQ_INVALIDATE 0x08000000
758 #define MCX_CAP_DEVICE_RESERVED_AT_255 0x04000000
759 #define MCX_CAP_DEVICE_CQ_EQ_REMAP 0x02000000
760 #define MCX_CAP_DEVICE_PG 0x01000000
761 #define MCX_CAP_DEVICE_BLOCK_LB_MC 0x00800000
762 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
763 0x00400000
764 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
765 0x00200000
766 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
767 0x00100000
768 #define MCX_CAP_DEVICE_CD 0x00080000
769 #define MCX_CAP_DEVICE_ATM 0x00040000
770 #define MCX_CAP_DEVICE_APM 0x00020000
771 #define MCX_CAP_DEVICE_IMAICL 0x00010000
772 #define MCX_CAP_DEVICE_QKV 0x00000200
773 #define MCX_CAP_DEVICE_PKV 0x00000100
774 #define MCX_CAP_DEVICE_SET_DETH_SQPN 0x00000080
775 #define MCX_CAP_DEVICE_XRC 0x00000008
776 #define MCX_CAP_DEVICE_UD 0x00000004
777 #define MCX_CAP_DEVICE_UC 0x00000002
778 #define MCX_CAP_DEVICE_RC 0x00000001
779
780 uint8_t uar_flags;
781 #define MCX_CAP_DEVICE_UAR_4K 0x80
782 uint8_t uar_sz; /* 6 bits */
783 #define MCX_CAP_DEVICE_UAR_SZ 0x3f
784 uint8_t __reserved__[1];
785 uint8_t log_pg_sz;
786
787 uint8_t flags7;
788 #define MCX_CAP_DEVICE_BF 0x80
789 #define MCX_CAP_DEVICE_DRIVER_VERSION 0x40
790 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
791 0x20
792 uint8_t log_bf_reg_size; /* 5 bits */
793 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE 0x1f
794 uint8_t __reserved__[2];
795
796 uint16_t num_of_diagnostic_counters;
797 uint16_t max_wqe_sz_sq;
798
799 uint8_t __reserved__[2];
800 uint16_t max_wqe_sz_rq;
801
802 uint8_t __reserved__[2];
803 uint16_t max_wqe_sz_sq_dc;
804
805 uint32_t max_qp_mcg; /* 25 bits */
806 #define MCX_CAP_DEVICE_MAX_QP_MCG 0x1ffffff
807
808 uint8_t __reserved__[3];
809 uint8_t log_max_mcq;
810
811 uint8_t log_max_transport_domain; /* 5 bits */
812 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
813 0x1f
814 uint8_t log_max_pd; /* 5 bits */
815 #define MCX_CAP_DEVICE_LOG_MAX_PD 0x1f
816 uint8_t __reserved__[1];
817 uint8_t log_max_xrcd; /* 5 bits */
818 #define MCX_CAP_DEVICE_LOG_MAX_XRCD 0x1f
819
820 uint8_t __reserved__[2];
821 uint16_t max_flow_counter;
822
823 uint8_t log_max_rq; /* 5 bits */
824 #define MCX_CAP_DEVICE_LOG_MAX_RQ 0x1f
825 uint8_t log_max_sq; /* 5 bits */
826 #define MCX_CAP_DEVICE_LOG_MAX_SQ 0x1f
827 uint8_t log_max_tir; /* 5 bits */
828 #define MCX_CAP_DEVICE_LOG_MAX_TIR 0x1f
829 uint8_t log_max_tis; /* 5 bits */
830 #define MCX_CAP_DEVICE_LOG_MAX_TIS 0x1f
831
832 uint8_t flags8;
833 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
834 0x80
835 #define MCX_CAP_DEVICE_LOG_MAX_RMP 0x1f
836 uint8_t log_max_rqt; /* 5 bits */
837 #define MCX_CAP_DEVICE_LOG_MAX_RQT 0x1f
838 uint8_t log_max_rqt_size; /* 5 bits */
839 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE 0x1f
840 uint8_t log_max_tis_per_sq; /* 5 bits */
841 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
842 0x1f
843 } __packed __aligned(8);
844
845 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
846 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
847 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
848 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
849 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
850 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
851 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
852
853 struct mcx_cmd_set_driver_version_in {
854 uint16_t cmd_opcode;
855 uint8_t cmd_reserved0[4];
856 uint16_t cmd_op_mod;
857 uint8_t cmd_reserved1[8];
858 } __packed __aligned(4);
859
860 struct mcx_cmd_set_driver_version_out {
861 uint8_t cmd_status;
862 uint8_t cmd_reserved0[3];
863 uint32_t cmd_syndrome;
864 uint8_t cmd_reserved1[8];
865 } __packed __aligned(4);
866
867 struct mcx_cmd_set_driver_version {
868 uint8_t cmd_driver_version[64];
869 } __packed __aligned(8);
870
871 struct mcx_cmd_modify_nic_vport_context_in {
872 uint16_t cmd_opcode;
873 uint8_t cmd_reserved0[4];
874 uint16_t cmd_op_mod;
875 uint8_t cmd_reserved1[4];
876 uint32_t cmd_field_select;
877 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR 0x04
878 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC 0x10
879 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU 0x40
880 } __packed __aligned(4);
881
882 struct mcx_cmd_modify_nic_vport_context_out {
883 uint8_t cmd_status;
884 uint8_t cmd_reserved0[3];
885 uint32_t cmd_syndrome;
886 uint8_t cmd_reserved1[8];
887 } __packed __aligned(4);
888
889 struct mcx_cmd_query_nic_vport_context_in {
890 uint16_t cmd_opcode;
891 uint8_t cmd_reserved0[4];
892 uint16_t cmd_op_mod;
893 uint8_t cmd_reserved1[4];
894 uint8_t cmd_allowed_list_type;
895 uint8_t cmd_reserved2[3];
896 } __packed __aligned(4);
897
898 struct mcx_cmd_query_nic_vport_context_out {
899 uint8_t cmd_status;
900 uint8_t cmd_reserved0[3];
901 uint32_t cmd_syndrome;
902 uint8_t cmd_reserved1[8];
903 } __packed __aligned(4);
904
905 struct mcx_nic_vport_ctx {
906 uint32_t vp_min_wqe_inline_mode;
907 uint8_t vp_reserved0[32];
908 uint32_t vp_mtu;
909 uint8_t vp_reserved1[200];
910 uint16_t vp_flags;
911 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC (0)
912 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC (1 << 24)
913 #define MCX_NIC_VPORT_CTX_LIST_VLAN (2 << 24)
914 #define MCX_NIC_VPORT_CTX_PROMISC_ALL (1 << 13)
915 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST (1 << 14)
916 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST (1 << 15)
917 uint16_t vp_allowed_list_size;
918 uint64_t vp_perm_addr;
919 uint8_t vp_reserved2[4];
920 /* allowed list follows */
921 } __packed __aligned(4);
922
923 struct mcx_counter {
924 uint64_t packets;
925 uint64_t octets;
926 } __packed __aligned(4);
927
928 struct mcx_nic_vport_counters {
929 struct mcx_counter rx_err;
930 struct mcx_counter tx_err;
931 uint8_t reserved0[64]; /* 0x30 */
932 struct mcx_counter rx_bcast;
933 struct mcx_counter tx_bcast;
934 struct mcx_counter rx_ucast;
935 struct mcx_counter tx_ucast;
936 struct mcx_counter rx_mcast;
937 struct mcx_counter tx_mcast;
938 uint8_t reserved1[0x210 - 0xd0];
939 } __packed __aligned(4);
940
941 struct mcx_cmd_query_vport_counters_in {
942 uint16_t cmd_opcode;
943 uint8_t cmd_reserved0[4];
944 uint16_t cmd_op_mod;
945 uint8_t cmd_reserved1[8];
946 } __packed __aligned(4);
947
948 struct mcx_cmd_query_vport_counters_mb_in {
949 uint8_t cmd_reserved0[8];
950 uint8_t cmd_clear;
951 uint8_t cmd_reserved1[7];
952 } __packed __aligned(4);
953
954 struct mcx_cmd_query_vport_counters_out {
955 uint8_t cmd_status;
956 uint8_t cmd_reserved0[3];
957 uint32_t cmd_syndrome;
958 uint8_t cmd_reserved1[8];
959 } __packed __aligned(4);
960
961 struct mcx_cmd_query_flow_counter_in {
962 uint16_t cmd_opcode;
963 uint8_t cmd_reserved0[4];
964 uint16_t cmd_op_mod;
965 uint8_t cmd_reserved1[8];
966 } __packed __aligned(4);
967
968 struct mcx_cmd_query_flow_counter_mb_in {
969 uint8_t cmd_reserved0[8];
970 uint8_t cmd_clear;
971 uint8_t cmd_reserved1[5];
972 uint16_t cmd_flow_counter_id;
973 } __packed __aligned(4);
974
975 struct mcx_cmd_query_flow_counter_out {
976 uint8_t cmd_status;
977 uint8_t cmd_reserved0[3];
978 uint32_t cmd_syndrome;
979 uint8_t cmd_reserved1[8];
980 } __packed __aligned(4);
981
982 struct mcx_cmd_alloc_uar_in {
983 uint16_t cmd_opcode;
984 uint8_t cmd_reserved0[4];
985 uint16_t cmd_op_mod;
986 uint8_t cmd_reserved1[8];
987 } __packed __aligned(4);
988
989 struct mcx_cmd_alloc_uar_out {
990 uint8_t cmd_status;
991 uint8_t cmd_reserved0[3];
992 uint32_t cmd_syndrome;
993 uint32_t cmd_uar;
994 uint8_t cmd_reserved1[4];
995 } __packed __aligned(4);
996
997 struct mcx_cmd_query_special_ctx_in {
998 uint16_t cmd_opcode;
999 uint8_t cmd_reserved0[4];
1000 uint16_t cmd_op_mod;
1001 uint8_t cmd_reserved1[8];
1002 } __packed __aligned(4);
1003
1004 struct mcx_cmd_query_special_ctx_out {
1005 uint8_t cmd_status;
1006 uint8_t cmd_reserved0[3];
1007 uint32_t cmd_syndrome;
1008 uint8_t cmd_reserved1[4];
1009 uint32_t cmd_resd_lkey;
1010 } __packed __aligned(4);
1011
1012 struct mcx_eq_ctx {
1013 uint32_t eq_status;
1014 #define MCX_EQ_CTX_ST_SHIFT 8
1015 #define MCX_EQ_CTX_ST_MASK (0xf << MCX_EQ_CTX_ST_SHIFT)
1016 #define MCX_EQ_CTX_ST_ARMED (0x9 << MCX_EQ_CTX_ST_SHIFT)
1017 #define MCX_EQ_CTX_ST_FIRED (0xa << MCX_EQ_CTX_ST_SHIFT)
1018 #define MCX_EQ_CTX_OI_SHIFT 17
1019 #define MCX_EQ_CTX_OI (1 << MCX_EQ_CTX_OI_SHIFT)
1020 #define MCX_EQ_CTX_EC_SHIFT 18
1021 #define MCX_EQ_CTX_EC (1 << MCX_EQ_CTX_EC_SHIFT)
1022 #define MCX_EQ_CTX_STATUS_SHIFT 28
1023 #define MCX_EQ_CTX_STATUS_MASK (0xf << MCX_EQ_CTX_STATUS_SHIFT)
1024 #define MCX_EQ_CTX_STATUS_OK (0x0 << MCX_EQ_CTX_STATUS_SHIFT)
1025 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE \
1026 (0xa << MCX_EQ_CTX_STATUS_SHIFT)
1027 uint32_t eq_reserved1;
1028 uint32_t eq_page_offset;
1029 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT 5
1030 uint32_t eq_uar_size;
1031 #define MCX_EQ_CTX_UAR_PAGE_MASK 0xffffff
1032 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT 24
1033 uint32_t eq_reserved2;
1034 uint8_t eq_reserved3[3];
1035 uint8_t eq_intr;
1036 uint32_t eq_log_page_size;
1037 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1038 uint32_t eq_reserved4[3];
1039 uint32_t eq_consumer_counter;
1040 uint32_t eq_producer_counter;
1041 #define MCX_EQ_CTX_COUNTER_MASK 0xffffff
1042 uint32_t eq_reserved5[4];
1043 } __packed __aligned(4);
1044
1045 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1046
1047 struct mcx_cmd_create_eq_in {
1048 uint16_t cmd_opcode;
1049 uint8_t cmd_reserved0[4];
1050 uint16_t cmd_op_mod;
1051 uint8_t cmd_reserved1[8];
1052 } __packed __aligned(4);
1053
1054 struct mcx_cmd_create_eq_mb_in {
1055 struct mcx_eq_ctx cmd_eq_ctx;
1056 uint8_t cmd_reserved0[8];
1057 uint64_t cmd_event_bitmask;
1058 #define MCX_EVENT_TYPE_COMPLETION 0x00
1059 #define MCX_EVENT_TYPE_CQ_ERROR 0x04
1060 #define MCX_EVENT_TYPE_INTERNAL_ERROR 0x08
1061 #define MCX_EVENT_TYPE_PORT_CHANGE 0x09
1062 #define MCX_EVENT_TYPE_CMD_COMPLETION 0x0a
1063 #define MCX_EVENT_TYPE_PAGE_REQUEST 0x0b
1064 #define MCX_EVENT_TYPE_LAST_WQE 0x13
1065 uint8_t cmd_reserved1[176];
1066 } __packed __aligned(4);
1067
1068 struct mcx_cmd_create_eq_out {
1069 uint8_t cmd_status;
1070 uint8_t cmd_reserved0[3];
1071 uint32_t cmd_syndrome;
1072 uint32_t cmd_eqn;
1073 uint8_t cmd_reserved1[4];
1074 } __packed __aligned(4);
1075
1076 struct mcx_eq_entry {
1077 uint8_t eq_reserved1;
1078 uint8_t eq_event_type;
1079 uint8_t eq_reserved2;
1080 uint8_t eq_event_sub_type;
1081
1082 uint8_t eq_reserved3[28];
1083 uint32_t eq_event_data[7];
1084 uint8_t eq_reserved4[2];
1085 uint8_t eq_signature;
1086 uint8_t eq_owner;
1087 #define MCX_EQ_ENTRY_OWNER_INIT 1
1088 } __packed __aligned(4);
1089
1090 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1091
1092 struct mcx_cmd_alloc_pd_in {
1093 uint16_t cmd_opcode;
1094 uint8_t cmd_reserved0[4];
1095 uint16_t cmd_op_mod;
1096 uint8_t cmd_reserved1[8];
1097 } __packed __aligned(4);
1098
1099 struct mcx_cmd_alloc_pd_out {
1100 uint8_t cmd_status;
1101 uint8_t cmd_reserved0[3];
1102 uint32_t cmd_syndrome;
1103 uint32_t cmd_pd;
1104 uint8_t cmd_reserved1[4];
1105 } __packed __aligned(4);
1106
1107 struct mcx_cmd_alloc_td_in {
1108 uint16_t cmd_opcode;
1109 uint8_t cmd_reserved0[4];
1110 uint16_t cmd_op_mod;
1111 uint8_t cmd_reserved1[8];
1112 } __packed __aligned(4);
1113
1114 struct mcx_cmd_alloc_td_out {
1115 uint8_t cmd_status;
1116 uint8_t cmd_reserved0[3];
1117 uint32_t cmd_syndrome;
1118 uint32_t cmd_tdomain;
1119 uint8_t cmd_reserved1[4];
1120 } __packed __aligned(4);
1121
1122 struct mcx_cmd_create_tir_in {
1123 uint16_t cmd_opcode;
1124 uint8_t cmd_reserved0[4];
1125 uint16_t cmd_op_mod;
1126 uint8_t cmd_reserved1[8];
1127 } __packed __aligned(4);
1128
1129 struct mcx_cmd_create_tir_mb_in {
1130 uint8_t cmd_reserved0[20];
1131 uint32_t cmd_disp_type;
1132 #define MCX_TIR_CTX_DISP_TYPE_SHIFT 28
1133 uint8_t cmd_reserved1[8];
1134 uint32_t cmd_lro;
1135 uint8_t cmd_reserved2[8];
1136 uint32_t cmd_inline_rqn;
1137 uint32_t cmd_indir_table;
1138 uint32_t cmd_tdomain;
1139 uint8_t cmd_rx_hash_key[40];
1140 uint32_t cmd_rx_hash_sel_outer;
1141 uint32_t cmd_rx_hash_sel_inner;
1142 uint8_t cmd_reserved3[152];
1143 } __packed __aligned(4);
1144
1145 struct mcx_cmd_create_tir_out {
1146 uint8_t cmd_status;
1147 uint8_t cmd_reserved0[3];
1148 uint32_t cmd_syndrome;
1149 uint32_t cmd_tirn;
1150 uint8_t cmd_reserved1[4];
1151 } __packed __aligned(4);
1152
1153 struct mcx_cmd_destroy_tir_in {
1154 uint16_t cmd_opcode;
1155 uint8_t cmd_reserved0[4];
1156 uint16_t cmd_op_mod;
1157 uint32_t cmd_tirn;
1158 uint8_t cmd_reserved1[4];
1159 } __packed __aligned(4);
1160
1161 struct mcx_cmd_destroy_tir_out {
1162 uint8_t cmd_status;
1163 uint8_t cmd_reserved0[3];
1164 uint32_t cmd_syndrome;
1165 uint8_t cmd_reserved1[8];
1166 } __packed __aligned(4);
1167
1168 struct mcx_cmd_create_tis_in {
1169 uint16_t cmd_opcode;
1170 uint8_t cmd_reserved0[4];
1171 uint16_t cmd_op_mod;
1172 uint8_t cmd_reserved1[8];
1173 } __packed __aligned(4);
1174
1175 struct mcx_cmd_create_tis_mb_in {
1176 uint8_t cmd_reserved[16];
1177 uint32_t cmd_prio;
1178 uint8_t cmd_reserved1[32];
1179 uint32_t cmd_tdomain;
1180 uint8_t cmd_reserved2[120];
1181 } __packed __aligned(4);
1182
1183 struct mcx_cmd_create_tis_out {
1184 uint8_t cmd_status;
1185 uint8_t cmd_reserved0[3];
1186 uint32_t cmd_syndrome;
1187 uint32_t cmd_tisn;
1188 uint8_t cmd_reserved1[4];
1189 } __packed __aligned(4);
1190
1191 struct mcx_cmd_destroy_tis_in {
1192 uint16_t cmd_opcode;
1193 uint8_t cmd_reserved0[4];
1194 uint16_t cmd_op_mod;
1195 uint32_t cmd_tisn;
1196 uint8_t cmd_reserved1[4];
1197 } __packed __aligned(4);
1198
1199 struct mcx_cmd_destroy_tis_out {
1200 uint8_t cmd_status;
1201 uint8_t cmd_reserved0[3];
1202 uint32_t cmd_syndrome;
1203 uint8_t cmd_reserved1[8];
1204 } __packed __aligned(4);
1205
1206 struct mcx_cq_ctx {
1207 uint32_t cq_status;
1208 uint32_t cq_reserved1;
1209 uint32_t cq_page_offset;
1210 uint32_t cq_uar_size;
1211 #define MCX_CQ_CTX_UAR_PAGE_MASK 0xffffff
1212 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT 24
1213 uint32_t cq_period_max_count;
1214 #define MCX_CQ_CTX_PERIOD_SHIFT 16
1215 uint32_t cq_eqn;
1216 uint32_t cq_log_page_size;
1217 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1218 uint32_t cq_reserved2;
1219 uint32_t cq_last_notified;
1220 uint32_t cq_last_solicit;
1221 uint32_t cq_consumer_counter;
1222 uint32_t cq_producer_counter;
1223 uint8_t cq_reserved3[8];
1224 uint64_t cq_doorbell;
1225 } __packed __aligned(4);
1226
1227 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1228
1229 struct mcx_cmd_create_cq_in {
1230 uint16_t cmd_opcode;
1231 uint8_t cmd_reserved0[4];
1232 uint16_t cmd_op_mod;
1233 uint8_t cmd_reserved1[8];
1234 } __packed __aligned(4);
1235
1236 struct mcx_cmd_create_cq_mb_in {
1237 struct mcx_cq_ctx cmd_cq_ctx;
1238 uint8_t cmd_reserved1[192];
1239 } __packed __aligned(4);
1240
1241 struct mcx_cmd_create_cq_out {
1242 uint8_t cmd_status;
1243 uint8_t cmd_reserved0[3];
1244 uint32_t cmd_syndrome;
1245 uint32_t cmd_cqn;
1246 uint8_t cmd_reserved1[4];
1247 } __packed __aligned(4);
1248
1249 struct mcx_cmd_destroy_cq_in {
1250 uint16_t cmd_opcode;
1251 uint8_t cmd_reserved0[4];
1252 uint16_t cmd_op_mod;
1253 uint32_t cmd_cqn;
1254 uint8_t cmd_reserved1[4];
1255 } __packed __aligned(4);
1256
1257 struct mcx_cmd_destroy_cq_out {
1258 uint8_t cmd_status;
1259 uint8_t cmd_reserved0[3];
1260 uint32_t cmd_syndrome;
1261 uint8_t cmd_reserved1[8];
1262 } __packed __aligned(4);
1263
1264 struct mcx_cq_entry {
1265 uint32_t __reserved__;
1266 uint32_t cq_lro;
1267 uint32_t cq_lro_ack_seq_num;
1268 uint32_t cq_rx_hash;
1269 uint8_t cq_rx_hash_type;
1270 uint8_t cq_ml_path;
1271 uint16_t __reserved__;
1272 uint32_t cq_checksum;
1273 uint32_t __reserved__;
1274 uint32_t cq_flags;
1275 uint32_t cq_lro_srqn;
1276 uint32_t __reserved__[2];
1277 uint32_t cq_byte_cnt;
1278 uint64_t cq_timestamp;
1279 uint8_t cq_rx_drops;
1280 uint8_t cq_flow_tag[3];
1281 uint16_t cq_wqe_count;
1282 uint8_t cq_signature;
1283 uint8_t cq_opcode_owner;
1284 #define MCX_CQ_ENTRY_FLAG_OWNER (1 << 0)
1285 #define MCX_CQ_ENTRY_FLAG_SE (1 << 1)
1286 #define MCX_CQ_ENTRY_FORMAT_SHIFT 2
1287 #define MCX_CQ_ENTRY_OPCODE_SHIFT 4
1288
1289 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE 0
1290 #define MCX_CQ_ENTRY_FORMAT_INLINE_32 1
1291 #define MCX_CQ_ENTRY_FORMAT_INLINE_64 2
1292 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED 3
1293
1294 #define MCX_CQ_ENTRY_OPCODE_REQ 0
1295 #define MCX_CQ_ENTRY_OPCODE_SEND 2
1296 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR 13
1297 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR 14
1298 #define MCX_CQ_ENTRY_OPCODE_INVALID 15
1299
1300 } __packed __aligned(4);
1301
1302 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1303
1304 struct mcx_cq_doorbell {
1305 uint32_t db_update_ci;
1306 uint32_t db_arm_ci;
1307 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT 28
1308 #define MCX_CQ_DOORBELL_ARM_CMD (1 << 24)
1309 #define MCX_CQ_DOORBELL_ARM_CI_MASK (0xffffff)
1310 } __packed __aligned(8);
1311
1312 struct mcx_wq_ctx {
1313 uint8_t wq_type;
1314 #define MCX_WQ_CTX_TYPE_CYCLIC (1 << 4)
1315 #define MCX_WQ_CTX_TYPE_SIGNATURE (1 << 3)
1316 uint8_t wq_reserved0[5];
1317 uint16_t wq_lwm;
1318 uint32_t wq_pd;
1319 uint32_t wq_uar_page;
1320 uint64_t wq_doorbell;
1321 uint32_t wq_hw_counter;
1322 uint32_t wq_sw_counter;
1323 uint16_t wq_log_stride;
1324 uint8_t wq_log_page_sz;
1325 uint8_t wq_log_size;
1326 uint8_t wq_reserved1[156];
1327 } __packed __aligned(4);
1328
1329 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1330
1331 struct mcx_sq_ctx {
1332 uint32_t sq_flags;
1333 #define MCX_SQ_CTX_RLKEY (1U << 31)
1334 #define MCX_SQ_CTX_FRE_SHIFT (1 << 29)
1335 #define MCX_SQ_CTX_FLUSH_IN_ERROR (1 << 28)
1336 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT 24
1337 #define MCX_SQ_CTX_STATE_SHIFT 20
1338 uint32_t sq_user_index;
1339 uint32_t sq_cqn;
1340 uint32_t sq_reserved1[5];
1341 uint32_t sq_tis_lst_sz;
1342 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT 16
1343 uint32_t sq_reserved2[2];
1344 uint32_t sq_tis_num;
1345 struct mcx_wq_ctx sq_wq;
1346 } __packed __aligned(4);
1347
1348 struct mcx_sq_entry_seg {
1349 uint32_t sqs_byte_count;
1350 uint32_t sqs_lkey;
1351 uint64_t sqs_addr;
1352 } __packed __aligned(4);
1353
1354 struct mcx_sq_entry {
1355 /* control segment */
1356 uint32_t sqe_opcode_index;
1357 #define MCX_SQE_WQE_INDEX_SHIFT 8
1358 #define MCX_SQE_WQE_OPCODE_NOP 0x00
1359 #define MCX_SQE_WQE_OPCODE_SEND 0x0a
1360 uint32_t sqe_ds_sq_num;
1361 #define MCX_SQE_SQ_NUM_SHIFT 8
1362 uint32_t sqe_signature;
1363 #define MCX_SQE_SIGNATURE_SHIFT 24
1364 #define MCX_SQE_SOLICITED_EVENT 0x02
1365 #define MCX_SQE_CE_CQE_ON_ERR 0x00
1366 #define MCX_SQE_CE_CQE_FIRST_ERR 0x04
1367 #define MCX_SQE_CE_CQE_ALWAYS 0x08
1368 #define MCX_SQE_CE_CQE_SOLICIT 0x0C
1369 #define MCX_SQE_FM_NO_FENCE 0x00
1370 #define MCX_SQE_FM_SMALL_FENCE 0x40
1371 uint32_t sqe_mkey;
1372
1373 /* ethernet segment */
1374 uint32_t sqe_reserved1;
1375 uint32_t sqe_mss_csum;
1376 #define MCX_SQE_L4_CSUM (1 << 31)
1377 #define MCX_SQE_L3_CSUM (1 << 30)
1378 uint32_t sqe_reserved2;
1379 uint16_t sqe_inline_header_size;
1380 uint16_t sqe_inline_headers[9];
1381
1382 /* data segment */
1383 struct mcx_sq_entry_seg sqe_segs[1];
1384 } __packed __aligned(64);
1385
1386 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1387
1388 struct mcx_cmd_create_sq_in {
1389 uint16_t cmd_opcode;
1390 uint8_t cmd_reserved0[4];
1391 uint16_t cmd_op_mod;
1392 uint8_t cmd_reserved1[8];
1393 } __packed __aligned(4);
1394
1395 struct mcx_cmd_create_sq_out {
1396 uint8_t cmd_status;
1397 uint8_t cmd_reserved0[3];
1398 uint32_t cmd_syndrome;
1399 uint32_t cmd_sqn;
1400 uint8_t cmd_reserved1[4];
1401 } __packed __aligned(4);
1402
1403 struct mcx_cmd_modify_sq_in {
1404 uint16_t cmd_opcode;
1405 uint8_t cmd_reserved0[4];
1406 uint16_t cmd_op_mod;
1407 uint32_t cmd_sq_state;
1408 uint8_t cmd_reserved1[4];
1409 } __packed __aligned(4);
1410
1411 struct mcx_cmd_modify_sq_mb_in {
1412 uint32_t cmd_modify_hi;
1413 uint32_t cmd_modify_lo;
1414 uint8_t cmd_reserved0[8];
1415 struct mcx_sq_ctx cmd_sq_ctx;
1416 } __packed __aligned(4);
1417
1418 struct mcx_cmd_modify_sq_out {
1419 uint8_t cmd_status;
1420 uint8_t cmd_reserved0[3];
1421 uint32_t cmd_syndrome;
1422 uint8_t cmd_reserved1[8];
1423 } __packed __aligned(4);
1424
1425 struct mcx_cmd_destroy_sq_in {
1426 uint16_t cmd_opcode;
1427 uint8_t cmd_reserved0[4];
1428 uint16_t cmd_op_mod;
1429 uint32_t cmd_sqn;
1430 uint8_t cmd_reserved1[4];
1431 } __packed __aligned(4);
1432
1433 struct mcx_cmd_destroy_sq_out {
1434 uint8_t cmd_status;
1435 uint8_t cmd_reserved0[3];
1436 uint32_t cmd_syndrome;
1437 uint8_t cmd_reserved1[8];
1438 } __packed __aligned(4);
1439
1440
1441 struct mcx_rq_ctx {
1442 uint32_t rq_flags;
1443 #define MCX_RQ_CTX_RLKEY (1U << 31)
1444 #define MCX_RQ_CTX_VLAN_STRIP_DIS (1 << 28)
1445 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT 24
1446 #define MCX_RQ_CTX_STATE_SHIFT 20
1447 #define MCX_RQ_CTX_FLUSH_IN_ERROR (1 << 18)
1448 uint32_t rq_user_index;
1449 uint32_t rq_cqn;
1450 uint32_t rq_reserved1;
1451 uint32_t rq_rmpn;
1452 uint32_t rq_reserved2[7];
1453 struct mcx_wq_ctx rq_wq;
1454 } __packed __aligned(4);
1455
1456 struct mcx_rq_entry {
1457 uint32_t rqe_byte_count;
1458 uint32_t rqe_lkey;
1459 uint64_t rqe_addr;
1460 } __packed __aligned(16);
1461
1462 struct mcx_cmd_create_rq_in {
1463 uint16_t cmd_opcode;
1464 uint8_t cmd_reserved0[4];
1465 uint16_t cmd_op_mod;
1466 uint8_t cmd_reserved1[8];
1467 } __packed __aligned(4);
1468
1469 struct mcx_cmd_create_rq_out {
1470 uint8_t cmd_status;
1471 uint8_t cmd_reserved0[3];
1472 uint32_t cmd_syndrome;
1473 uint32_t cmd_rqn;
1474 uint8_t cmd_reserved1[4];
1475 } __packed __aligned(4);
1476
1477 struct mcx_cmd_modify_rq_in {
1478 uint16_t cmd_opcode;
1479 uint8_t cmd_reserved0[4];
1480 uint16_t cmd_op_mod;
1481 uint32_t cmd_rq_state;
1482 uint8_t cmd_reserved1[4];
1483 } __packed __aligned(4);
1484
1485 struct mcx_cmd_modify_rq_mb_in {
1486 uint32_t cmd_modify_hi;
1487 uint32_t cmd_modify_lo;
1488 uint8_t cmd_reserved0[8];
1489 struct mcx_rq_ctx cmd_rq_ctx;
1490 } __packed __aligned(4);
1491
1492 struct mcx_cmd_modify_rq_out {
1493 uint8_t cmd_status;
1494 uint8_t cmd_reserved0[3];
1495 uint32_t cmd_syndrome;
1496 uint8_t cmd_reserved1[8];
1497 } __packed __aligned(4);
1498
1499 struct mcx_cmd_destroy_rq_in {
1500 uint16_t cmd_opcode;
1501 uint8_t cmd_reserved0[4];
1502 uint16_t cmd_op_mod;
1503 uint32_t cmd_rqn;
1504 uint8_t cmd_reserved1[4];
1505 } __packed __aligned(4);
1506
1507 struct mcx_cmd_destroy_rq_out {
1508 uint8_t cmd_status;
1509 uint8_t cmd_reserved0[3];
1510 uint32_t cmd_syndrome;
1511 uint8_t cmd_reserved1[8];
1512 } __packed __aligned(4);
1513
1514 struct mcx_cmd_create_flow_table_in {
1515 uint16_t cmd_opcode;
1516 uint8_t cmd_reserved0[4];
1517 uint16_t cmd_op_mod;
1518 uint8_t cmd_reserved1[8];
1519 } __packed __aligned(4);
1520
1521 struct mcx_flow_table_ctx {
1522 uint8_t ft_miss_action;
1523 uint8_t ft_level;
1524 uint8_t ft_reserved0;
1525 uint8_t ft_log_size;
1526 uint32_t ft_table_miss_id;
1527 uint8_t ft_reserved1[28];
1528 } __packed __aligned(4);
1529
1530 struct mcx_cmd_create_flow_table_mb_in {
1531 uint8_t cmd_table_type;
1532 uint8_t cmd_reserved0[7];
1533 struct mcx_flow_table_ctx cmd_ctx;
1534 } __packed __aligned(4);
1535
1536 struct mcx_cmd_create_flow_table_out {
1537 uint8_t cmd_status;
1538 uint8_t cmd_reserved0[3];
1539 uint32_t cmd_syndrome;
1540 uint32_t cmd_table_id;
1541 uint8_t cmd_reserved1[4];
1542 } __packed __aligned(4);
1543
1544 struct mcx_cmd_destroy_flow_table_in {
1545 uint16_t cmd_opcode;
1546 uint8_t cmd_reserved0[4];
1547 uint16_t cmd_op_mod;
1548 uint8_t cmd_reserved1[8];
1549 } __packed __aligned(4);
1550
1551 struct mcx_cmd_destroy_flow_table_mb_in {
1552 uint8_t cmd_table_type;
1553 uint8_t cmd_reserved0[3];
1554 uint32_t cmd_table_id;
1555 uint8_t cmd_reserved1[40];
1556 } __packed __aligned(4);
1557
1558 struct mcx_cmd_destroy_flow_table_out {
1559 uint8_t cmd_status;
1560 uint8_t cmd_reserved0[3];
1561 uint32_t cmd_syndrome;
1562 uint8_t cmd_reserved1[8];
1563 } __packed __aligned(4);
1564
1565 struct mcx_cmd_set_flow_table_root_in {
1566 uint16_t cmd_opcode;
1567 uint8_t cmd_reserved0[4];
1568 uint16_t cmd_op_mod;
1569 uint8_t cmd_reserved1[8];
1570 } __packed __aligned(4);
1571
1572 struct mcx_cmd_set_flow_table_root_mb_in {
1573 uint8_t cmd_table_type;
1574 uint8_t cmd_reserved0[3];
1575 uint32_t cmd_table_id;
1576 uint8_t cmd_reserved1[56];
1577 } __packed __aligned(4);
1578
1579 struct mcx_cmd_set_flow_table_root_out {
1580 uint8_t cmd_status;
1581 uint8_t cmd_reserved0[3];
1582 uint32_t cmd_syndrome;
1583 uint8_t cmd_reserved1[8];
1584 } __packed __aligned(4);
1585
1586 struct mcx_flow_match {
1587 /* outer headers */
1588 uint8_t mc_src_mac[6];
1589 uint16_t mc_ethertype;
1590 uint8_t mc_dest_mac[6];
1591 uint16_t mc_first_vlan;
1592 uint8_t mc_ip_proto;
1593 uint8_t mc_ip_dscp_ecn;
1594 uint8_t mc_vlan_flags;
1595 uint8_t mc_tcp_flags;
1596 uint16_t mc_tcp_sport;
1597 uint16_t mc_tcp_dport;
1598 uint32_t mc_reserved0;
1599 uint16_t mc_udp_sport;
1600 uint16_t mc_udp_dport;
1601 uint8_t mc_src_ip[16];
1602 uint8_t mc_dest_ip[16];
1603
1604 /* misc parameters */
1605 uint8_t mc_reserved1[8];
1606 uint16_t mc_second_vlan;
1607 uint8_t mc_reserved2[2];
1608 uint8_t mc_second_vlan_flags;
1609 uint8_t mc_reserved3[15];
1610 uint32_t mc_outer_ipv6_flow_label;
1611 uint8_t mc_reserved4[32];
1612
1613 uint8_t mc_reserved[384];
1614 } __packed __aligned(4);
1615
1616 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1617
1618 struct mcx_cmd_create_flow_group_in {
1619 uint16_t cmd_opcode;
1620 uint8_t cmd_reserved0[4];
1621 uint16_t cmd_op_mod;
1622 uint8_t cmd_reserved1[8];
1623 } __packed __aligned(4);
1624
1625 struct mcx_cmd_create_flow_group_mb_in {
1626 uint8_t cmd_table_type;
1627 uint8_t cmd_reserved0[3];
1628 uint32_t cmd_table_id;
1629 uint8_t cmd_reserved1[4];
1630 uint32_t cmd_start_flow_index;
1631 uint8_t cmd_reserved2[4];
1632 uint32_t cmd_end_flow_index;
1633 uint8_t cmd_reserved3[23];
1634 uint8_t cmd_match_criteria_enable;
1635 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER (1 << 0)
1636 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC (1 << 1)
1637 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER (1 << 2)
1638 struct mcx_flow_match cmd_match_criteria;
1639 uint8_t cmd_reserved4[448];
1640 } __packed __aligned(4);
1641
1642 struct mcx_cmd_create_flow_group_out {
1643 uint8_t cmd_status;
1644 uint8_t cmd_reserved0[3];
1645 uint32_t cmd_syndrome;
1646 uint32_t cmd_group_id;
1647 uint8_t cmd_reserved1[4];
1648 } __packed __aligned(4);
1649
1650 struct mcx_flow_ctx {
1651 uint8_t fc_reserved0[4];
1652 uint32_t fc_group_id;
1653 uint32_t fc_flow_tag;
1654 uint32_t fc_action;
1655 #define MCX_FLOW_CONTEXT_ACTION_ALLOW (1 << 0)
1656 #define MCX_FLOW_CONTEXT_ACTION_DROP (1 << 1)
1657 #define MCX_FLOW_CONTEXT_ACTION_FORWARD (1 << 2)
1658 #define MCX_FLOW_CONTEXT_ACTION_COUNT (1 << 3)
1659 uint32_t fc_dest_list_size;
1660 uint32_t fc_counter_list_size;
1661 uint8_t fc_reserved1[40];
1662 struct mcx_flow_match fc_match_value;
1663 uint8_t fc_reserved2[192];
1664 } __packed __aligned(4);
1665
1666 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE (1 << 24)
1667 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR (2 << 24)
1668
1669 struct mcx_cmd_destroy_flow_group_in {
1670 uint16_t cmd_opcode;
1671 uint8_t cmd_reserved0[4];
1672 uint16_t cmd_op_mod;
1673 uint8_t cmd_reserved1[8];
1674 } __packed __aligned(4);
1675
1676 struct mcx_cmd_destroy_flow_group_mb_in {
1677 uint8_t cmd_table_type;
1678 uint8_t cmd_reserved0[3];
1679 uint32_t cmd_table_id;
1680 uint32_t cmd_group_id;
1681 uint8_t cmd_reserved1[36];
1682 } __packed __aligned(4);
1683
1684 struct mcx_cmd_destroy_flow_group_out {
1685 uint8_t cmd_status;
1686 uint8_t cmd_reserved0[3];
1687 uint32_t cmd_syndrome;
1688 uint8_t cmd_reserved1[8];
1689 } __packed __aligned(4);
1690
1691 struct mcx_cmd_set_flow_table_entry_in {
1692 uint16_t cmd_opcode;
1693 uint8_t cmd_reserved0[4];
1694 uint16_t cmd_op_mod;
1695 uint8_t cmd_reserved1[8];
1696 } __packed __aligned(4);
1697
1698 struct mcx_cmd_set_flow_table_entry_mb_in {
1699 uint8_t cmd_table_type;
1700 uint8_t cmd_reserved0[3];
1701 uint32_t cmd_table_id;
1702 uint32_t cmd_modify_enable_mask;
1703 uint8_t cmd_reserved1[4];
1704 uint32_t cmd_flow_index;
1705 uint8_t cmd_reserved2[28];
1706 struct mcx_flow_ctx cmd_flow_ctx;
1707 } __packed __aligned(4);
1708
1709 struct mcx_cmd_set_flow_table_entry_out {
1710 uint8_t cmd_status;
1711 uint8_t cmd_reserved0[3];
1712 uint32_t cmd_syndrome;
1713 uint8_t cmd_reserved1[8];
1714 } __packed __aligned(4);
1715
1716 struct mcx_cmd_query_flow_table_entry_in {
1717 uint16_t cmd_opcode;
1718 uint8_t cmd_reserved0[4];
1719 uint16_t cmd_op_mod;
1720 uint8_t cmd_reserved1[8];
1721 } __packed __aligned(4);
1722
1723 struct mcx_cmd_query_flow_table_entry_mb_in {
1724 uint8_t cmd_table_type;
1725 uint8_t cmd_reserved0[3];
1726 uint32_t cmd_table_id;
1727 uint8_t cmd_reserved1[8];
1728 uint32_t cmd_flow_index;
1729 uint8_t cmd_reserved2[28];
1730 } __packed __aligned(4);
1731
1732 struct mcx_cmd_query_flow_table_entry_out {
1733 uint8_t cmd_status;
1734 uint8_t cmd_reserved0[3];
1735 uint32_t cmd_syndrome;
1736 uint8_t cmd_reserved1[8];
1737 } __packed __aligned(4);
1738
1739 struct mcx_cmd_query_flow_table_entry_mb_out {
1740 uint8_t cmd_reserved0[48];
1741 struct mcx_flow_ctx cmd_flow_ctx;
1742 } __packed __aligned(4);
1743
1744 struct mcx_cmd_delete_flow_table_entry_in {
1745 uint16_t cmd_opcode;
1746 uint8_t cmd_reserved0[4];
1747 uint16_t cmd_op_mod;
1748 uint8_t cmd_reserved1[8];
1749 } __packed __aligned(4);
1750
1751 struct mcx_cmd_delete_flow_table_entry_mb_in {
1752 uint8_t cmd_table_type;
1753 uint8_t cmd_reserved0[3];
1754 uint32_t cmd_table_id;
1755 uint8_t cmd_reserved1[8];
1756 uint32_t cmd_flow_index;
1757 uint8_t cmd_reserved2[28];
1758 } __packed __aligned(4);
1759
1760 struct mcx_cmd_delete_flow_table_entry_out {
1761 uint8_t cmd_status;
1762 uint8_t cmd_reserved0[3];
1763 uint32_t cmd_syndrome;
1764 uint8_t cmd_reserved1[8];
1765 } __packed __aligned(4);
1766
1767 struct mcx_cmd_query_flow_group_in {
1768 uint16_t cmd_opcode;
1769 uint8_t cmd_reserved0[4];
1770 uint16_t cmd_op_mod;
1771 uint8_t cmd_reserved1[8];
1772 } __packed __aligned(4);
1773
1774 struct mcx_cmd_query_flow_group_mb_in {
1775 uint8_t cmd_table_type;
1776 uint8_t cmd_reserved0[3];
1777 uint32_t cmd_table_id;
1778 uint32_t cmd_group_id;
1779 uint8_t cmd_reserved1[36];
1780 } __packed __aligned(4);
1781
1782 struct mcx_cmd_query_flow_group_out {
1783 uint8_t cmd_status;
1784 uint8_t cmd_reserved0[3];
1785 uint32_t cmd_syndrome;
1786 uint8_t cmd_reserved1[8];
1787 } __packed __aligned(4);
1788
1789 struct mcx_cmd_query_flow_group_mb_out {
1790 uint8_t cmd_reserved0[12];
1791 uint32_t cmd_start_flow_index;
1792 uint8_t cmd_reserved1[4];
1793 uint32_t cmd_end_flow_index;
1794 uint8_t cmd_reserved2[20];
1795 uint32_t cmd_match_criteria_enable;
1796 uint8_t cmd_match_criteria[512];
1797 uint8_t cmd_reserved4[448];
1798 } __packed __aligned(4);
1799
1800 struct mcx_cmd_query_flow_table_in {
1801 uint16_t cmd_opcode;
1802 uint8_t cmd_reserved0[4];
1803 uint16_t cmd_op_mod;
1804 uint8_t cmd_reserved1[8];
1805 } __packed __aligned(4);
1806
1807 struct mcx_cmd_query_flow_table_mb_in {
1808 uint8_t cmd_table_type;
1809 uint8_t cmd_reserved0[3];
1810 uint32_t cmd_table_id;
1811 uint8_t cmd_reserved1[40];
1812 } __packed __aligned(4);
1813
1814 struct mcx_cmd_query_flow_table_out {
1815 uint8_t cmd_status;
1816 uint8_t cmd_reserved0[3];
1817 uint32_t cmd_syndrome;
1818 uint8_t cmd_reserved1[8];
1819 } __packed __aligned(4);
1820
1821 struct mcx_cmd_query_flow_table_mb_out {
1822 uint8_t cmd_reserved0[4];
1823 struct mcx_flow_table_ctx cmd_ctx;
1824 } __packed __aligned(4);
1825
1826 struct mcx_cmd_alloc_flow_counter_in {
1827 uint16_t cmd_opcode;
1828 uint8_t cmd_reserved0[4];
1829 uint16_t cmd_op_mod;
1830 uint8_t cmd_reserved1[8];
1831 } __packed __aligned(4);
1832
1833 struct mcx_cmd_query_rq_in {
1834 uint16_t cmd_opcode;
1835 uint8_t cmd_reserved0[4];
1836 uint16_t cmd_op_mod;
1837 uint32_t cmd_rqn;
1838 uint8_t cmd_reserved1[4];
1839 } __packed __aligned(4);
1840
1841 struct mcx_cmd_query_rq_out {
1842 uint8_t cmd_status;
1843 uint8_t cmd_reserved0[3];
1844 uint32_t cmd_syndrome;
1845 uint8_t cmd_reserved1[8];
1846 } __packed __aligned(4);
1847
1848 struct mcx_cmd_query_rq_mb_out {
1849 uint8_t cmd_reserved0[16];
1850 struct mcx_rq_ctx cmd_ctx;
1851 };
1852
1853 struct mcx_cmd_query_sq_in {
1854 uint16_t cmd_opcode;
1855 uint8_t cmd_reserved0[4];
1856 uint16_t cmd_op_mod;
1857 uint32_t cmd_sqn;
1858 uint8_t cmd_reserved1[4];
1859 } __packed __aligned(4);
1860
1861 struct mcx_cmd_query_sq_out {
1862 uint8_t cmd_status;
1863 uint8_t cmd_reserved0[3];
1864 uint32_t cmd_syndrome;
1865 uint8_t cmd_reserved1[8];
1866 } __packed __aligned(4);
1867
1868 struct mcx_cmd_query_sq_mb_out {
1869 uint8_t cmd_reserved0[16];
1870 struct mcx_sq_ctx cmd_ctx;
1871 };
1872
1873 struct mcx_cmd_alloc_flow_counter_out {
1874 uint8_t cmd_status;
1875 uint8_t cmd_reserved0[3];
1876 uint32_t cmd_syndrome;
1877 uint8_t cmd_reserved1[2];
1878 uint16_t cmd_flow_counter_id;
1879 uint8_t cmd_reserved2[4];
1880 } __packed __aligned(4);
1881
1882 struct mcx_wq_doorbell {
1883 uint32_t db_recv_counter;
1884 uint32_t db_send_counter;
1885 } __packed __aligned(8);
1886
1887 struct mcx_dmamem {
1888 bus_dmamap_t mxm_map;
1889 bus_dma_segment_t mxm_seg;
1890 int mxm_nsegs;
1891 size_t mxm_size;
1892 void *mxm_kva;
1893 };
1894 #define MCX_DMA_MAP(_mxm) ((_mxm)->mxm_map)
1895 #define MCX_DMA_DVA(_mxm) ((_mxm)->mxm_map->dm_segs[0].ds_addr)
1896 #define MCX_DMA_KVA(_mxm) ((void *)(_mxm)->mxm_kva)
1897 #define MCX_DMA_LEN(_mxm) ((_mxm)->mxm_size)
1898
1899 struct mcx_hwmem {
1900 bus_dmamap_t mhm_map;
1901 bus_dma_segment_t *mhm_segs;
1902 unsigned int mhm_seg_count;
1903 unsigned int mhm_npages;
1904 };
1905
1906 struct mcx_slot {
1907 bus_dmamap_t ms_map;
1908 struct mbuf *ms_m;
1909 };
1910
1911 struct mcx_cq {
1912 int cq_n;
1913 struct mcx_dmamem cq_mem;
1914 uint32_t *cq_doorbell;
1915 uint32_t cq_cons;
1916 uint32_t cq_count;
1917 };
1918
1919 struct mcx_calibration {
1920 uint64_t c_timestamp; /* previous mcx chip time */
1921 uint64_t c_uptime; /* previous kernel nanouptime */
1922 uint64_t c_tbase; /* mcx chip time */
1923 uint64_t c_ubase; /* kernel nanouptime */
1924 uint64_t c_tdiff;
1925 uint64_t c_udiff;
1926 };
1927
1928 #define MCX_CALIBRATE_FIRST 2
1929 #define MCX_CALIBRATE_NORMAL 30
1930
1931 struct mcx_rxring {
1932 u_int rxr_total;
1933 u_int rxr_inuse;
1934 };
1935
1936 MBUFQ_HEAD(mcx_mbufq);
1937
1938 struct mcx_softc {
1939 device_t sc_dev;
1940 struct ethercom sc_ec;
1941 struct ifmedia sc_media;
1942 uint64_t sc_media_status;
1943 uint64_t sc_media_active;
1944
1945 pci_chipset_tag_t sc_pc;
1946 pci_intr_handle_t *sc_intrs;
1947 void *sc_ihs[MCX_MAX_NINTR];
1948 pcitag_t sc_tag;
1949
1950 bus_dma_tag_t sc_dmat;
1951 bus_space_tag_t sc_memt;
1952 bus_space_handle_t sc_memh;
1953 bus_size_t sc_mems;
1954
1955 struct mcx_dmamem sc_cmdq_mem;
1956 unsigned int sc_cmdq_mask;
1957 unsigned int sc_cmdq_size;
1958
1959 unsigned int sc_cmdq_token;
1960
1961 struct mcx_hwmem sc_boot_pages;
1962 struct mcx_hwmem sc_init_pages;
1963 struct mcx_hwmem sc_regular_pages;
1964
1965 int sc_uar;
1966 int sc_pd;
1967 int sc_tdomain;
1968 uint32_t sc_lkey;
1969
1970 struct mcx_dmamem sc_doorbell_mem;
1971
1972 int sc_eqn;
1973 int sc_eq_cons;
1974 struct mcx_dmamem sc_eq_mem;
1975 int sc_hardmtu;
1976
1977 struct workqueue *sc_workq;
1978 struct work sc_port_change;
1979
1980 int sc_flow_table_id;
1981 #define MCX_FLOW_GROUP_PROMISC 0
1982 #define MCX_FLOW_GROUP_ALLMULTI 1
1983 #define MCX_FLOW_GROUP_MAC 2
1984 #define MCX_NUM_FLOW_GROUPS 3
1985 int sc_flow_group_id[MCX_NUM_FLOW_GROUPS];
1986 int sc_flow_group_size[MCX_NUM_FLOW_GROUPS];
1987 int sc_flow_group_start[MCX_NUM_FLOW_GROUPS];
1988 int sc_promisc_flow_enabled;
1989 int sc_allmulti_flow_enabled;
1990 int sc_mcast_flow_base;
1991 int sc_extra_mcast;
1992 uint8_t sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
1993
1994 struct mcx_calibration sc_calibration[2];
1995 unsigned int sc_calibration_gen;
1996 callout_t sc_calibrate;
1997
1998 struct mcx_cq sc_cq[MCX_MAX_CQS];
1999 int sc_num_cq;
2000
2001 /* rx */
2002 int sc_tirn;
2003 int sc_rqn;
2004 struct mcx_dmamem sc_rq_mem;
2005 struct mcx_slot *sc_rx_slots;
2006 uint32_t *sc_rx_doorbell;
2007
2008 uint32_t sc_rx_prod;
2009 callout_t sc_rx_refill;
2010 struct mcx_rxring sc_rxr;
2011
2012 /* tx */
2013 int sc_tisn;
2014 int sc_sqn;
2015 struct mcx_dmamem sc_sq_mem;
2016 struct mcx_slot *sc_tx_slots;
2017 uint32_t *sc_tx_doorbell;
2018 int sc_bf_size;
2019 int sc_bf_offset;
2020
2021 uint32_t sc_tx_cons;
2022 uint32_t sc_tx_prod;
2023
2024 uint64_t sc_last_cq_db;
2025 uint64_t sc_last_srq_db;
2026 };
2027 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
2028
2029 static int mcx_match(device_t, cfdata_t, void *);
2030 static void mcx_attach(device_t, device_t, void *);
2031
2032 static void mcx_rxr_init(struct mcx_rxring *, u_int, u_int);
2033 static u_int mcx_rxr_get(struct mcx_rxring *, u_int);
2034 static void mcx_rxr_put(struct mcx_rxring *, u_int);
2035 static u_int mcx_rxr_inuse(struct mcx_rxring *);
2036
2037 static int mcx_version(struct mcx_softc *);
2038 static int mcx_init_wait(struct mcx_softc *);
2039 static int mcx_enable_hca(struct mcx_softc *);
2040 static int mcx_teardown_hca(struct mcx_softc *, uint16_t);
2041 static int mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2042 int);
2043 static int mcx_issi(struct mcx_softc *);
2044 static int mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2045 static int mcx_hca_max_caps(struct mcx_softc *);
2046 static int mcx_hca_set_caps(struct mcx_softc *);
2047 static int mcx_init_hca(struct mcx_softc *);
2048 static int mcx_set_driver_version(struct mcx_softc *);
2049 static int mcx_iff(struct mcx_softc *);
2050 static int mcx_alloc_uar(struct mcx_softc *);
2051 static int mcx_alloc_pd(struct mcx_softc *);
2052 static int mcx_alloc_tdomain(struct mcx_softc *);
2053 static int mcx_create_eq(struct mcx_softc *);
2054 static int mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *);
2055 static int mcx_query_special_contexts(struct mcx_softc *);
2056 static int mcx_set_port_mtu(struct mcx_softc *, int);
2057 static int mcx_create_cq(struct mcx_softc *, int);
2058 static int mcx_destroy_cq(struct mcx_softc *, int);
2059 static int mcx_create_sq(struct mcx_softc *, int);
2060 static int mcx_destroy_sq(struct mcx_softc *);
2061 static int mcx_ready_sq(struct mcx_softc *);
2062 static int mcx_create_rq(struct mcx_softc *, int);
2063 static int mcx_destroy_rq(struct mcx_softc *);
2064 static int mcx_ready_rq(struct mcx_softc *);
2065 static int mcx_create_tir(struct mcx_softc *);
2066 static int mcx_destroy_tir(struct mcx_softc *);
2067 static int mcx_create_tis(struct mcx_softc *);
2068 static int mcx_destroy_tis(struct mcx_softc *);
2069 static int mcx_create_flow_table(struct mcx_softc *, int);
2070 static int mcx_set_flow_table_root(struct mcx_softc *);
2071 static int mcx_destroy_flow_table(struct mcx_softc *);
2072 static int mcx_create_flow_group(struct mcx_softc *, int, int,
2073 int, int, struct mcx_flow_match *);
2074 static int mcx_destroy_flow_group(struct mcx_softc *, int);
2075 static int mcx_set_flow_table_entry(struct mcx_softc *, int, int,
2076 const uint8_t *);
2077 static int mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2078
2079 #if 0
2080 static int mcx_dump_flow_table(struct mcx_softc *);
2081 static int mcx_dump_flow_table_entry(struct mcx_softc *, int);
2082 static int mcx_dump_flow_group(struct mcx_softc *);
2083 static int mcx_dump_rq(struct mcx_softc *);
2084 static int mcx_dump_sq(struct mcx_softc *);
2085 #endif
2086
2087
2088 /*
2089 static void mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2090 static void mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2091 */
2092 static void mcx_refill(void *);
2093 static int mcx_process_rx(struct mcx_softc *, struct mcx_cq_entry *,
2094 struct mcx_mbufq *, const struct mcx_calibration *);
2095 static void mcx_process_txeof(struct mcx_softc *, struct mcx_cq_entry *,
2096 int *);
2097 static void mcx_process_cq(struct mcx_softc *, struct mcx_cq *);
2098
2099 static void mcx_arm_cq(struct mcx_softc *, struct mcx_cq *);
2100 static void mcx_arm_eq(struct mcx_softc *);
2101 static int mcx_intr(void *);
2102
2103 static int mcx_init(struct ifnet *);
2104 static void mcx_stop(struct ifnet *, int);
2105 static int mcx_ioctl(struct ifnet *, u_long, void *);
2106 static void mcx_start(struct ifnet *);
2107 static void mcx_watchdog(struct ifnet *);
2108 static void mcx_media_add_types(struct mcx_softc *);
2109 static void mcx_media_status(struct ifnet *, struct ifmediareq *);
2110 static int mcx_media_change(struct ifnet *);
2111 #if 0
2112 static int mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2113 #endif
2114 static void mcx_port_change(struct work *, void *);
2115
2116 static void mcx_calibrate_first(struct mcx_softc *);
2117 static void mcx_calibrate(void *);
2118
2119 static inline uint32_t
2120 mcx_rd(struct mcx_softc *, bus_size_t);
2121 static inline void
2122 mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2123 static inline void
2124 mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2125
2126 static uint64_t mcx_timer(struct mcx_softc *);
2127
2128 static int mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2129 bus_size_t, u_int align);
2130 static void mcx_dmamem_zero(struct mcx_dmamem *);
2131 static void mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2132
2133 static int mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2134 unsigned int);
2135 static void mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2136
2137 CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL);
2138
2139 static const struct {
2140 pci_vendor_id_t vendor;
2141 pci_product_id_t product;
2142 } mcx_devices[] = {
2143 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700 },
2144 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710 },
2145 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800 },
2146 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800 },
2147 };
2148
2149 static const uint64_t mcx_eth_cap_map[] = {
2150 IFM_1000_SGMII,
2151 IFM_1000_KX,
2152 IFM_10G_CX4,
2153 IFM_10G_KX4,
2154 IFM_10G_KR,
2155 IFM_20G_KR2,
2156 IFM_40G_CR4,
2157 IFM_40G_KR4,
2158 IFM_56G_R4,
2159 0,
2160 0,
2161 0,
2162 IFM_10G_CR1,
2163 IFM_10G_SR,
2164 IFM_10G_LR,
2165 IFM_40G_SR4,
2166 IFM_40G_LR4,
2167 0,
2168 IFM_50G_SR2,
2169 0,
2170 IFM_100G_CR4,
2171 IFM_100G_SR4,
2172 IFM_100G_KR4,
2173 IFM_100G_LR4,
2174 IFM_100_TX,
2175 IFM_1000_T,
2176 IFM_10G_T,
2177 IFM_25G_CR,
2178 IFM_25G_KR,
2179 IFM_25G_SR,
2180 IFM_50G_CR2,
2181 IFM_50G_KR2
2182 };
2183
2184 static int
2185 mcx_match(device_t parent, cfdata_t cf, void *aux)
2186 {
2187 struct pci_attach_args *pa = aux;
2188 int n;
2189
2190 for (n = 0; n < __arraycount(mcx_devices); n++) {
2191 if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor &&
2192 PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product)
2193 return 1;
2194 }
2195
2196 return 0;
2197 }
2198
2199 void
2200 mcx_attach(device_t parent, device_t self, void *aux)
2201 {
2202 struct mcx_softc *sc = device_private(self);
2203 struct ifnet *ifp = &sc->sc_ec.ec_if;
2204 struct pci_attach_args *pa = aux;
2205 uint8_t enaddr[ETHER_ADDR_LEN];
2206 int counts[PCI_INTR_TYPE_SIZE];
2207 char intrbuf[PCI_INTRSTR_LEN];
2208 pcireg_t memtype;
2209 uint32_t r;
2210 unsigned int cq_stride;
2211 unsigned int cq_size;
2212 const char *intrstr;
2213 int i;
2214
2215 sc->sc_dev = self;
2216 sc->sc_pc = pa->pa_pc;
2217 sc->sc_tag = pa->pa_tag;
2218 if (pci_dma64_available(pa))
2219 sc->sc_dmat = pa->pa_dmat64;
2220 else
2221 sc->sc_dmat = pa->pa_dmat;
2222
2223 /* Map the PCI memory space */
2224 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2225 if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2226 0 /*BUS_SPACE_MAP_PREFETCHABLE*/, &sc->sc_memt, &sc->sc_memh,
2227 NULL, &sc->sc_mems)) {
2228 aprint_error(": unable to map register memory\n");
2229 return;
2230 }
2231
2232 pci_aprint_devinfo(pa, "Ethernet controller");
2233
2234 if (mcx_version(sc) != 0) {
2235 /* error printed by mcx_version */
2236 goto unmap;
2237 }
2238
2239 r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2240 cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2241 cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2242 if (cq_size > MCX_MAX_CQE) {
2243 aprint_error_dev(self,
2244 "command queue size overflow %u\n", cq_size);
2245 goto unmap;
2246 }
2247 if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2248 aprint_error_dev(self,
2249 "command queue entry size underflow %u\n", cq_stride);
2250 goto unmap;
2251 }
2252 if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2253 aprint_error_dev(self, "command queue page overflow\n");
2254 goto unmap;
2255 }
2256
2257 if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_PAGE_SIZE,
2258 MCX_PAGE_SIZE) != 0) {
2259 aprint_error_dev(self, "unable to allocate doorbell memory\n");
2260 goto unmap;
2261 }
2262
2263 if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2264 MCX_PAGE_SIZE) != 0) {
2265 aprint_error_dev(self, "unable to allocate command queue\n");
2266 goto dbfree;
2267 }
2268
2269 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2270 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2271 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2272 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2273
2274 if (mcx_init_wait(sc) != 0) {
2275 aprint_error_dev(self, "timeout waiting for init\n");
2276 goto cqfree;
2277 }
2278
2279 sc->sc_cmdq_mask = cq_size - 1;
2280 sc->sc_cmdq_size = cq_stride;
2281
2282 if (mcx_enable_hca(sc) != 0) {
2283 /* error printed by mcx_enable_hca */
2284 goto cqfree;
2285 }
2286
2287 if (mcx_issi(sc) != 0) {
2288 /* error printed by mcx_issi */
2289 goto teardown;
2290 }
2291
2292 if (mcx_pages(sc, &sc->sc_boot_pages,
2293 htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2294 /* error printed by mcx_pages */
2295 goto teardown;
2296 }
2297
2298 if (mcx_hca_max_caps(sc) != 0) {
2299 /* error printed by mcx_hca_max_caps */
2300 goto teardown;
2301 }
2302
2303 if (mcx_hca_set_caps(sc) != 0) {
2304 /* error printed by mcx_hca_set_caps */
2305 goto teardown;
2306 }
2307
2308 if (mcx_pages(sc, &sc->sc_init_pages,
2309 htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2310 /* error printed by mcx_pages */
2311 goto teardown;
2312 }
2313
2314 if (mcx_init_hca(sc) != 0) {
2315 /* error printed by mcx_init_hca */
2316 goto teardown;
2317 }
2318
2319 if (mcx_pages(sc, &sc->sc_regular_pages,
2320 htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2321 /* error printed by mcx_pages */
2322 goto teardown;
2323 }
2324
2325 /* apparently not necessary? */
2326 if (mcx_set_driver_version(sc) != 0) {
2327 /* error printed by mcx_set_driver_version */
2328 goto teardown;
2329 }
2330
2331 if (mcx_iff(sc) != 0) { /* modify nic vport context */
2332 /* error printed by mcx_iff? */
2333 goto teardown;
2334 }
2335
2336 if (mcx_alloc_uar(sc) != 0) {
2337 /* error printed by mcx_alloc_uar */
2338 goto teardown;
2339 }
2340
2341 if (mcx_alloc_pd(sc) != 0) {
2342 /* error printed by mcx_alloc_pd */
2343 goto teardown;
2344 }
2345
2346 if (mcx_alloc_tdomain(sc) != 0) {
2347 /* error printed by mcx_alloc_tdomain */
2348 goto teardown;
2349 }
2350
2351 /*
2352 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2353 * mellanox support tells me legacy interrupts are not supported,
2354 * so we're stuck with just msi-x.
2355 */
2356 counts[PCI_INTR_TYPE_MSIX] = 1;
2357 counts[PCI_INTR_TYPE_MSI] = 0;
2358 counts[PCI_INTR_TYPE_INTX] = 0;
2359 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) {
2360 aprint_error_dev(self, "unable to allocate interrupt\n");
2361 goto teardown;
2362 }
2363 KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX);
2364
2365 #ifdef MCX_MPSAFE
2366 pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
2367 #endif
2368
2369 intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[0], intrbuf,
2370 sizeof(intrbuf));
2371 sc->sc_ihs[0] = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[0],
2372 IPL_NET, mcx_intr, sc, DEVNAME(sc));
2373 if (sc->sc_ihs[0] == NULL) {
2374 aprint_error_dev(self, "unable to establish interrupt%s%s\n",
2375 intrstr ? " at " : "",
2376 intrstr ? intrstr : "");
2377 goto teardown;
2378 }
2379
2380 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
2381
2382 if (mcx_create_eq(sc) != 0) {
2383 /* error printed by mcx_create_eq */
2384 goto teardown;
2385 }
2386
2387 if (mcx_query_nic_vport_context(sc, enaddr) != 0) {
2388 /* error printed by mcx_query_nic_vport_context */
2389 goto teardown;
2390 }
2391
2392 if (mcx_query_special_contexts(sc) != 0) {
2393 /* error printed by mcx_query_special_contexts */
2394 goto teardown;
2395 }
2396
2397 if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2398 /* error printed by mcx_set_port_mtu */
2399 goto teardown;
2400 }
2401
2402 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2403 ether_sprintf(enaddr));
2404
2405 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2406 ifp->if_softc = sc;
2407 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2408 #ifdef MCX_MPSAFE
2409 ifp->if_extflags = IFEF_MPSAFE;
2410 #endif
2411 ifp->if_init = mcx_init;
2412 ifp->if_stop = mcx_stop;
2413 ifp->if_ioctl = mcx_ioctl;
2414 ifp->if_start = mcx_start;
2415 ifp->if_watchdog = mcx_watchdog;
2416 ifp->if_mtu = sc->sc_hardmtu;
2417 IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
2418 IFQ_SET_READY(&ifp->if_snd);
2419
2420 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
2421
2422 sc->sc_ec.ec_ifmedia = &sc->sc_media;
2423 ifmedia_init(&sc->sc_media, IFM_IMASK, mcx_media_change,
2424 mcx_media_status);
2425 mcx_media_add_types(sc);
2426 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2427 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2428
2429 if_attach(ifp);
2430 if_deferred_start_init(ifp, NULL);
2431
2432 ether_ifattach(ifp, enaddr);
2433
2434 callout_init(&sc->sc_rx_refill, CALLOUT_FLAGS);
2435 callout_setfunc(&sc->sc_rx_refill, mcx_refill, sc);
2436 callout_init(&sc->sc_calibrate, CALLOUT_FLAGS);
2437 callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc);
2438
2439 if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc,
2440 PRI_NONE, IPL_NET, 0) != 0) {
2441 aprint_error_dev(self, "couldn't create port change workq\n");
2442 goto teardown;
2443 }
2444
2445 mcx_port_change(&sc->sc_port_change, sc);
2446
2447 sc->sc_flow_table_id = -1;
2448 for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
2449 sc->sc_flow_group_id[i] = -1;
2450 sc->sc_flow_group_size[i] = 0;
2451 sc->sc_flow_group_start[i] = 0;
2452 }
2453 sc->sc_extra_mcast = 0;
2454 memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
2455 return;
2456
2457 teardown:
2458 mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
2459 /* error printed by mcx_teardown_hca, and we're already unwinding */
2460 cqfree:
2461 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2462 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2463 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
2464 MCX_CMDQ_INTERFACE_DISABLED);
2465 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2466
2467 mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
2468 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2469 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
2470
2471 mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
2472 dbfree:
2473 mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
2474 unmap:
2475 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2476 sc->sc_mems = 0;
2477 }
2478
2479 static void
2480 mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm)
2481 {
2482 rxr->rxr_total = hwm;
2483 rxr->rxr_inuse = 0;
2484 }
2485
2486 static u_int
2487 mcx_rxr_get(struct mcx_rxring *rxr, u_int max)
2488 {
2489 const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse);
2490
2491 rxr->rxr_inuse += taken;
2492
2493 return taken;
2494 }
2495
2496 static void
2497 mcx_rxr_put(struct mcx_rxring *rxr, u_int n)
2498 {
2499 rxr->rxr_inuse -= n;
2500 }
2501
2502 static u_int
2503 mcx_rxr_inuse(struct mcx_rxring *rxr)
2504 {
2505 return rxr->rxr_inuse;
2506 }
2507
2508 static int
2509 mcx_version(struct mcx_softc *sc)
2510 {
2511 uint32_t fw0, fw1;
2512 uint16_t cmdif;
2513
2514 fw0 = mcx_rd(sc, MCX_FW_VER);
2515 fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
2516
2517 aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0),
2518 MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
2519
2520 cmdif = MCX_CMDIF(fw1);
2521 if (cmdif != MCX_CMD_IF_SUPPORTED) {
2522 aprint_error_dev(sc->sc_dev,
2523 "unsupported command interface %u\n", cmdif);
2524 return (-1);
2525 }
2526
2527 return (0);
2528 }
2529
2530 static int
2531 mcx_init_wait(struct mcx_softc *sc)
2532 {
2533 unsigned int i;
2534 uint32_t r;
2535
2536 for (i = 0; i < 2000; i++) {
2537 r = mcx_rd(sc, MCX_STATE);
2538 if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
2539 return (0);
2540
2541 delay(1000);
2542 mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
2543 BUS_SPACE_BARRIER_READ);
2544 }
2545
2546 return (-1);
2547 }
2548
2549 static uint8_t
2550 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2551 unsigned int msec)
2552 {
2553 unsigned int i;
2554
2555 for (i = 0; i < msec; i++) {
2556 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2557 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
2558
2559 if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
2560 MCX_CQ_STATUS_OWN_SW) {
2561 if (sc->sc_eqn != 0)
2562 mcx_intr(sc);
2563 return (0);
2564 }
2565
2566 delay(1000);
2567 }
2568
2569 return (ETIMEDOUT);
2570 }
2571
2572 static uint32_t
2573 mcx_mix_u64(uint32_t xor, uint64_t u64)
2574 {
2575 xor ^= u64 >> 32;
2576 xor ^= u64;
2577
2578 return (xor);
2579 }
2580
2581 static uint32_t
2582 mcx_mix_u32(uint32_t xor, uint32_t u32)
2583 {
2584 xor ^= u32;
2585
2586 return (xor);
2587 }
2588
2589 static uint32_t
2590 mcx_mix_u8(uint32_t xor, uint8_t u8)
2591 {
2592 xor ^= u8;
2593
2594 return (xor);
2595 }
2596
2597 static uint8_t
2598 mcx_mix_done(uint32_t xor)
2599 {
2600 xor ^= xor >> 16;
2601 xor ^= xor >> 8;
2602
2603 return (xor);
2604 }
2605
2606 static uint8_t
2607 mcx_xor(const void *buf, size_t len)
2608 {
2609 const uint32_t *dwords = buf;
2610 uint32_t xor = 0xff;
2611 size_t i;
2612
2613 len /= sizeof(*dwords);
2614
2615 for (i = 0; i < len; i++)
2616 xor ^= dwords[i];
2617
2618 return (mcx_mix_done(xor));
2619 }
2620
2621 static uint8_t
2622 mcx_cmdq_token(struct mcx_softc *sc)
2623 {
2624 uint8_t token;
2625
2626 do {
2627 token = ++sc->sc_cmdq_token;
2628 } while (token == 0);
2629
2630 return (token);
2631 }
2632
2633 static void
2634 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2635 uint32_t ilen, uint32_t olen, uint8_t token)
2636 {
2637 memset(cqe, 0, sc->sc_cmdq_size);
2638
2639 cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
2640 be32enc(&cqe->cq_input_length, ilen);
2641 be32enc(&cqe->cq_output_length, olen);
2642 cqe->cq_token = token;
2643 cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
2644 }
2645
2646 static void
2647 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
2648 {
2649 cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
2650 }
2651
2652 static int
2653 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
2654 {
2655 /* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 : 0); */
2656 return (0);
2657 }
2658
2659 static void *
2660 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
2661 {
2662 return (&cqe->cq_input_data);
2663 }
2664
2665 static void *
2666 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
2667 {
2668 return (&cqe->cq_output_data);
2669 }
2670
2671 static void
2672 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2673 unsigned int slot)
2674 {
2675 mcx_cmdq_sign(cqe);
2676
2677 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2678 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
2679
2680 mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
2681 }
2682
2683 static int
2684 mcx_enable_hca(struct mcx_softc *sc)
2685 {
2686 struct mcx_cmdq_entry *cqe;
2687 struct mcx_cmd_enable_hca_in *in;
2688 struct mcx_cmd_enable_hca_out *out;
2689 int error;
2690 uint8_t status;
2691
2692 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2693 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2694
2695 in = mcx_cmdq_in(cqe);
2696 in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
2697 in->cmd_op_mod = htobe16(0);
2698 in->cmd_function_id = htobe16(0);
2699
2700 mcx_cmdq_post(sc, cqe, 0);
2701
2702 error = mcx_cmdq_poll(sc, cqe, 1000);
2703 if (error != 0) {
2704 printf(", hca enable timeout\n");
2705 return (-1);
2706 }
2707 if (mcx_cmdq_verify(cqe) != 0) {
2708 printf(", hca enable command corrupt\n");
2709 return (-1);
2710 }
2711
2712 status = cqe->cq_output_data[0];
2713 if (status != MCX_CQ_STATUS_OK) {
2714 printf(", hca enable failed (%x)\n", status);
2715 return (-1);
2716 }
2717
2718 return (0);
2719 }
2720
2721 static int
2722 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
2723 {
2724 struct mcx_cmdq_entry *cqe;
2725 struct mcx_cmd_teardown_hca_in *in;
2726 struct mcx_cmd_teardown_hca_out *out;
2727 int error;
2728 uint8_t status;
2729
2730 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2731 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2732
2733 in = mcx_cmdq_in(cqe);
2734 in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
2735 in->cmd_op_mod = htobe16(0);
2736 in->cmd_profile = profile;
2737
2738 mcx_cmdq_post(sc, cqe, 0);
2739
2740 error = mcx_cmdq_poll(sc, cqe, 1000);
2741 if (error != 0) {
2742 printf(", hca teardown timeout\n");
2743 return (-1);
2744 }
2745 if (mcx_cmdq_verify(cqe) != 0) {
2746 printf(", hca teardown command corrupt\n");
2747 return (-1);
2748 }
2749
2750 status = cqe->cq_output_data[0];
2751 if (status != MCX_CQ_STATUS_OK) {
2752 printf(", hca teardown failed (%x)\n", status);
2753 return (-1);
2754 }
2755
2756 return (0);
2757 }
2758
2759 static int
2760 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
2761 unsigned int nmb, uint64_t *ptr, uint8_t token)
2762 {
2763 uint8_t *kva;
2764 uint64_t dva;
2765 int i;
2766 int error;
2767
2768 error = mcx_dmamem_alloc(sc, mxm,
2769 nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
2770 if (error != 0)
2771 return (error);
2772
2773 mcx_dmamem_zero(mxm);
2774
2775 dva = MCX_DMA_DVA(mxm);
2776 kva = MCX_DMA_KVA(mxm);
2777 for (i = 0; i < nmb; i++) {
2778 struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
2779
2780 /* patch the cqe or mbox pointing at this one */
2781 be64enc(ptr, dva);
2782
2783 /* fill in this mbox */
2784 be32enc(&mbox->mb_block_number, i);
2785 mbox->mb_token = token;
2786
2787 /* move to the next one */
2788 ptr = &mbox->mb_next_ptr;
2789
2790 dva += MCX_CMDQ_MAILBOX_SIZE;
2791 kva += MCX_CMDQ_MAILBOX_SIZE;
2792 }
2793
2794 return (0);
2795 }
2796
2797 static uint32_t
2798 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
2799 {
2800 uint32_t xor = 0xff;
2801
2802 /* only 3 fields get set, so mix them directly */
2803 xor = mcx_mix_u64(xor, mb->mb_next_ptr);
2804 xor = mcx_mix_u32(xor, mb->mb_block_number);
2805 xor = mcx_mix_u8(xor, mb->mb_token);
2806
2807 return (mcx_mix_done(xor));
2808 }
2809
2810 static void
2811 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
2812 {
2813 uint8_t *kva;
2814 int i;
2815
2816 kva = MCX_DMA_KVA(mxm);
2817
2818 for (i = 0; i < nmb; i++) {
2819 struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
2820 uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
2821 mb->mb_ctrl_signature = sig;
2822 mb->mb_signature = sig ^
2823 mcx_xor(mb->mb_data, sizeof(mb->mb_data));
2824
2825 kva += MCX_CMDQ_MAILBOX_SIZE;
2826 }
2827 }
2828
2829 static void
2830 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
2831 {
2832 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
2833 0, MCX_DMA_LEN(mxm), ops);
2834 }
2835
2836 static struct mcx_cmdq_mailbox *
2837 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
2838 {
2839 uint8_t *kva;
2840
2841 kva = MCX_DMA_KVA(mxm);
2842 kva += i * MCX_CMDQ_MAILBOX_SIZE;
2843
2844 return ((struct mcx_cmdq_mailbox *)kva);
2845 }
2846
2847 static inline void *
2848 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
2849 {
2850 return (&mb->mb_data);
2851 }
2852
2853 static void
2854 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
2855 void *b, size_t len)
2856 {
2857 uint8_t *buf = b;
2858 struct mcx_cmdq_mailbox *mb;
2859 int i;
2860
2861 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2862 for (i = 0; i < nmb; i++) {
2863
2864 memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len));
2865
2866 if (sizeof(mb->mb_data) >= len)
2867 break;
2868
2869 buf += sizeof(mb->mb_data);
2870 len -= sizeof(mb->mb_data);
2871 mb++;
2872 }
2873 }
2874
2875 static void
2876 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
2877 {
2878 uint8_t *buf = b;
2879 struct mcx_cmdq_mailbox *mb;
2880 int i;
2881
2882 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2883 for (i = 0; i < nmb; i++) {
2884 memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len));
2885
2886 if (sizeof(mb->mb_data) >= len)
2887 break;
2888
2889 buf += sizeof(mb->mb_data);
2890 len -= sizeof(mb->mb_data);
2891 mb++;
2892 }
2893 }
2894
2895 static void
2896 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
2897 {
2898 mcx_dmamem_free(sc, mxm);
2899 }
2900
2901 #if 0
2902 static void
2903 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
2904 {
2905 unsigned int i;
2906
2907 printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
2908 be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr));
2909
2910 printf(", idata ");
2911 for (i = 0; i < sizeof(cqe->cq_input_data); i++)
2912 printf("%02x", cqe->cq_input_data[i]);
2913
2914 printf(", odata ");
2915 for (i = 0; i < sizeof(cqe->cq_output_data); i++)
2916 printf("%02x", cqe->cq_output_data[i]);
2917
2918 printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
2919 be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length),
2920 cqe->cq_token, cqe->cq_signature, cqe->cq_status);
2921 }
2922
2923 static void
2924 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
2925 {
2926 int i, j;
2927 uint8_t *d;
2928
2929 for (i = 0; i < num; i++) {
2930 struct mcx_cmdq_mailbox *mbox;
2931 mbox = mcx_cq_mbox(mboxes, i);
2932
2933 d = mcx_cq_mbox_data(mbox);
2934 for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
2935 if (j != 0 && (j % 16 == 0))
2936 printf("\n");
2937 printf("%.2x ", d[j]);
2938 }
2939 }
2940 }
2941 #endif
2942
2943 static int
2944 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
2945 int len)
2946 {
2947 struct mcx_dmamem mxm;
2948 struct mcx_cmdq_entry *cqe;
2949 struct mcx_cmd_access_reg_in *in;
2950 struct mcx_cmd_access_reg_out *out;
2951 uint8_t token = mcx_cmdq_token(sc);
2952 int error, nmb;
2953
2954 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2955 mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
2956 token);
2957
2958 in = mcx_cmdq_in(cqe);
2959 in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
2960 in->cmd_op_mod = htobe16(op);
2961 in->cmd_register_id = htobe16(reg);
2962
2963 nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
2964 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, &cqe->cq_output_ptr, token) != 0) {
2965 printf(", unable to allocate access reg mailboxen\n");
2966 return (-1);
2967 }
2968 cqe->cq_input_ptr = cqe->cq_output_ptr;
2969 mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
2970 mcx_cmdq_mboxes_sign(&mxm, nmb);
2971 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
2972
2973 mcx_cmdq_post(sc, cqe, 0);
2974 error = mcx_cmdq_poll(sc, cqe, 1000);
2975 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
2976
2977 if (error != 0) {
2978 printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
2979 (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
2980 goto free;
2981 }
2982 error = mcx_cmdq_verify(cqe);
2983 if (error != 0) {
2984 printf("%s: access reg (%s %x) reply corrupt\n",
2985 (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
2986 reg);
2987 goto free;
2988 }
2989
2990 out = mcx_cmdq_out(cqe);
2991 if (out->cmd_status != MCX_CQ_STATUS_OK) {
2992 printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
2993 DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
2994 reg, out->cmd_status, out->cmd_syndrome);
2995 error = -1;
2996 goto free;
2997 }
2998
2999 mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
3000 free:
3001 mcx_dmamem_free(sc, &mxm);
3002
3003 return (error);
3004 }
3005
3006 static int
3007 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, unsigned int slot)
3008 {
3009 struct mcx_cmd_set_issi_in *in;
3010 struct mcx_cmd_set_issi_out *out;
3011 uint8_t status;
3012
3013 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3014
3015 in = mcx_cmdq_in(cqe);
3016 in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3017 in->cmd_op_mod = htobe16(0);
3018 in->cmd_current_issi = htobe16(MCX_ISSI);
3019
3020 mcx_cmdq_post(sc, cqe, slot);
3021 if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3022 return (-1);
3023 if (mcx_cmdq_verify(cqe) != 0)
3024 return (-1);
3025
3026 status = cqe->cq_output_data[0];
3027 if (status != MCX_CQ_STATUS_OK)
3028 return (-1);
3029
3030 return (0);
3031 }
3032
3033 static int
3034 mcx_issi(struct mcx_softc *sc)
3035 {
3036 struct mcx_dmamem mxm;
3037 struct mcx_cmdq_entry *cqe;
3038 struct mcx_cmd_query_issi_in *in;
3039 struct mcx_cmd_query_issi_il_out *out;
3040 struct mcx_cmd_query_issi_mb_out *mb;
3041 uint8_t token = mcx_cmdq_token(sc);
3042 uint8_t status;
3043 int error;
3044
3045 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3046 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3047
3048 in = mcx_cmdq_in(cqe);
3049 in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3050 in->cmd_op_mod = htobe16(0);
3051
3052 CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3053 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3054 &cqe->cq_output_ptr, token) != 0) {
3055 printf(", unable to allocate query issi mailbox\n");
3056 return (-1);
3057 }
3058 mcx_cmdq_mboxes_sign(&mxm, 1);
3059
3060 mcx_cmdq_post(sc, cqe, 0);
3061 error = mcx_cmdq_poll(sc, cqe, 1000);
3062 if (error != 0) {
3063 printf(", query issi timeout\n");
3064 goto free;
3065 }
3066 error = mcx_cmdq_verify(cqe);
3067 if (error != 0) {
3068 printf(", query issi reply corrupt\n");
3069 goto free;
3070 }
3071
3072 status = cqe->cq_output_data[0];
3073 switch (status) {
3074 case MCX_CQ_STATUS_OK:
3075 break;
3076 case MCX_CQ_STATUS_BAD_OPCODE:
3077 /* use ISSI 0 */
3078 goto free;
3079 default:
3080 printf(", query issi failed (%x)\n", status);
3081 error = -1;
3082 goto free;
3083 }
3084
3085 out = mcx_cmdq_out(cqe);
3086 if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3087 /* use ISSI 1 */
3088 goto free;
3089 }
3090
3091 /* don't need to read cqe anymore, can be used for SET ISSI */
3092
3093 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3094 CTASSERT(MCX_ISSI < NBBY);
3095 /* XXX math is hard */
3096 if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3097 /* use ISSI 0 */
3098 goto free;
3099 }
3100
3101 if (mcx_set_issi(sc, cqe, 0) != 0) {
3102 /* ignore the error, just use ISSI 0 */
3103 } else {
3104 /* use ISSI 1 */
3105 }
3106
3107 free:
3108 mcx_cq_mboxes_free(sc, &mxm);
3109 return (error);
3110 }
3111
3112 static int
3113 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3114 uint32_t *npages, uint16_t *func_id)
3115 {
3116 struct mcx_cmdq_entry *cqe;
3117 struct mcx_cmd_query_pages_in *in;
3118 struct mcx_cmd_query_pages_out *out;
3119
3120 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3121 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3122
3123 in = mcx_cmdq_in(cqe);
3124 in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3125 in->cmd_op_mod = type;
3126
3127 mcx_cmdq_post(sc, cqe, 0);
3128 if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3129 printf(", query pages timeout\n");
3130 return (-1);
3131 }
3132 if (mcx_cmdq_verify(cqe) != 0) {
3133 printf(", query pages reply corrupt\n");
3134 return (-1);
3135 }
3136
3137 out = mcx_cmdq_out(cqe);
3138 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3139 printf(", query pages failed (%x)\n", out->cmd_status);
3140 return (-1);
3141 }
3142
3143 *func_id = out->cmd_func_id;
3144 *npages = be32dec(&out->cmd_num_pages);
3145
3146 return (0);
3147 }
3148
3149 struct bus_dma_iter {
3150 bus_dmamap_t i_map;
3151 bus_size_t i_offset;
3152 unsigned int i_index;
3153 };
3154
3155 static void
3156 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3157 {
3158 i->i_map = map;
3159 i->i_offset = 0;
3160 i->i_index = 0;
3161 }
3162
3163 static bus_addr_t
3164 bus_dma_iter_addr(struct bus_dma_iter *i)
3165 {
3166 return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3167 }
3168
3169 static void
3170 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3171 {
3172 bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3173 bus_size_t diff;
3174
3175 do {
3176 diff = seg->ds_len - i->i_offset;
3177 if (size < diff)
3178 break;
3179
3180 size -= diff;
3181
3182 seg++;
3183
3184 i->i_offset = 0;
3185 i->i_index++;
3186 } while (size > 0);
3187
3188 i->i_offset += size;
3189 }
3190
3191 static int
3192 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3193 {
3194 struct mcx_dmamem mxm;
3195 struct mcx_cmdq_entry *cqe;
3196 struct mcx_cmd_manage_pages_in *in;
3197 struct mcx_cmd_manage_pages_out *out;
3198 unsigned int paslen, nmb, i, j, npages;
3199 struct bus_dma_iter iter;
3200 uint64_t *pas;
3201 uint8_t status;
3202 uint8_t token = mcx_cmdq_token(sc);
3203 int error;
3204
3205 npages = mhm->mhm_npages;
3206
3207 paslen = sizeof(*pas) * npages;
3208 nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3209
3210 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3211 mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3212
3213 in = mcx_cmdq_in(cqe);
3214 in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3215 in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3216 in->cmd_func_id = func_id;
3217 be32enc(&in->cmd_input_num_entries, npages);
3218
3219 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3220 &cqe->cq_input_ptr, token) != 0) {
3221 printf(", unable to allocate manage pages mailboxen\n");
3222 return (-1);
3223 }
3224
3225 bus_dma_iter_init(&iter, mhm->mhm_map);
3226 for (i = 0; i < nmb; i++) {
3227 unsigned int lim;
3228
3229 pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3230 lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3231
3232 for (j = 0; j < lim; j++) {
3233 be64enc(&pas[j], bus_dma_iter_addr(&iter));
3234 bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3235 }
3236
3237 npages -= lim;
3238 }
3239
3240 mcx_cmdq_mboxes_sign(&mxm, nmb);
3241
3242 mcx_cmdq_post(sc, cqe, 0);
3243 error = mcx_cmdq_poll(sc, cqe, 1000);
3244 if (error != 0) {
3245 printf(", manage pages timeout\n");
3246 goto free;
3247 }
3248 error = mcx_cmdq_verify(cqe);
3249 if (error != 0) {
3250 printf(", manage pages reply corrupt\n");
3251 goto free;
3252 }
3253
3254 status = cqe->cq_output_data[0];
3255 if (status != MCX_CQ_STATUS_OK) {
3256 printf(", manage pages failed (%x)\n", status);
3257 error = -1;
3258 goto free;
3259 }
3260
3261 free:
3262 mcx_dmamem_free(sc, &mxm);
3263
3264 return (error);
3265 }
3266
3267 static int
3268 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3269 {
3270 uint32_t npages;
3271 uint16_t func_id;
3272
3273 if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3274 /* error printed by mcx_query_pages */
3275 return (-1);
3276 }
3277
3278 if (npages == 0)
3279 return (0);
3280
3281 if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3282 printf(", unable to allocate hwmem\n");
3283 return (-1);
3284 }
3285
3286 if (mcx_add_pages(sc, mhm, func_id) != 0) {
3287 printf(", unable to add hwmem\n");
3288 goto free;
3289 }
3290
3291 return (0);
3292
3293 free:
3294 mcx_hwmem_free(sc, mhm);
3295
3296 return (-1);
3297 }
3298
3299 static int
3300 mcx_hca_max_caps(struct mcx_softc *sc)
3301 {
3302 struct mcx_dmamem mxm;
3303 struct mcx_cmdq_entry *cqe;
3304 struct mcx_cmd_query_hca_cap_in *in;
3305 struct mcx_cmd_query_hca_cap_out *out;
3306 struct mcx_cmdq_mailbox *mb;
3307 struct mcx_cap_device *hca;
3308 uint8_t status;
3309 uint8_t token = mcx_cmdq_token(sc);
3310 int error;
3311
3312 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3313 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3314 token);
3315
3316 in = mcx_cmdq_in(cqe);
3317 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3318 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3319 MCX_CMD_QUERY_HCA_CAP_DEVICE);
3320
3321 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3322 &cqe->cq_output_ptr, token) != 0) {
3323 printf(", unable to allocate query hca caps mailboxen\n");
3324 return (-1);
3325 }
3326 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3327 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3328
3329 mcx_cmdq_post(sc, cqe, 0);
3330 error = mcx_cmdq_poll(sc, cqe, 1000);
3331 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3332
3333 if (error != 0) {
3334 printf(", query hca caps timeout\n");
3335 goto free;
3336 }
3337 error = mcx_cmdq_verify(cqe);
3338 if (error != 0) {
3339 printf(", query hca caps reply corrupt\n");
3340 goto free;
3341 }
3342
3343 status = cqe->cq_output_data[0];
3344 if (status != MCX_CQ_STATUS_OK) {
3345 printf(", query hca caps failed (%x)\n", status);
3346 error = -1;
3347 goto free;
3348 }
3349
3350 mb = mcx_cq_mbox(&mxm, 0);
3351 hca = mcx_cq_mbox_data(mb);
3352
3353 if (hca->log_pg_sz > PAGE_SHIFT) {
3354 printf(", minimum system page shift %u is too large\n",
3355 hca->log_pg_sz);
3356 error = -1;
3357 goto free;
3358 }
3359 /*
3360 * blueflame register is split into two buffers, and we must alternate
3361 * between the two of them.
3362 */
3363 sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
3364
3365 free:
3366 mcx_dmamem_free(sc, &mxm);
3367
3368 return (error);
3369 }
3370
3371 static int
3372 mcx_hca_set_caps(struct mcx_softc *sc)
3373 {
3374 struct mcx_dmamem mxm;
3375 struct mcx_cmdq_entry *cqe;
3376 struct mcx_cmd_query_hca_cap_in *in;
3377 struct mcx_cmd_query_hca_cap_out *out;
3378 struct mcx_cmdq_mailbox *mb;
3379 struct mcx_cap_device *hca;
3380 uint8_t status;
3381 uint8_t token = mcx_cmdq_token(sc);
3382 int error;
3383
3384 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3385 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3386 token);
3387
3388 in = mcx_cmdq_in(cqe);
3389 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3390 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
3391 MCX_CMD_QUERY_HCA_CAP_DEVICE);
3392
3393 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3394 &cqe->cq_output_ptr, token) != 0) {
3395 printf(", unable to allocate manage pages mailboxen\n");
3396 return (-1);
3397 }
3398 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3399 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3400
3401 mcx_cmdq_post(sc, cqe, 0);
3402 error = mcx_cmdq_poll(sc, cqe, 1000);
3403 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3404
3405 if (error != 0) {
3406 printf(", query hca caps timeout\n");
3407 goto free;
3408 }
3409 error = mcx_cmdq_verify(cqe);
3410 if (error != 0) {
3411 printf(", query hca caps reply corrupt\n");
3412 goto free;
3413 }
3414
3415 status = cqe->cq_output_data[0];
3416 if (status != MCX_CQ_STATUS_OK) {
3417 printf(", query hca caps failed (%x)\n", status);
3418 error = -1;
3419 goto free;
3420 }
3421
3422 mb = mcx_cq_mbox(&mxm, 0);
3423 hca = mcx_cq_mbox_data(mb);
3424
3425 hca->log_pg_sz = PAGE_SHIFT;
3426
3427 free:
3428 mcx_dmamem_free(sc, &mxm);
3429
3430 return (error);
3431 }
3432
3433
3434 static int
3435 mcx_init_hca(struct mcx_softc *sc)
3436 {
3437 struct mcx_cmdq_entry *cqe;
3438 struct mcx_cmd_init_hca_in *in;
3439 struct mcx_cmd_init_hca_out *out;
3440 int error;
3441 uint8_t status;
3442
3443 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3444 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3445
3446 in = mcx_cmdq_in(cqe);
3447 in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
3448 in->cmd_op_mod = htobe16(0);
3449
3450 mcx_cmdq_post(sc, cqe, 0);
3451
3452 error = mcx_cmdq_poll(sc, cqe, 1000);
3453 if (error != 0) {
3454 printf(", hca init timeout\n");
3455 return (-1);
3456 }
3457 if (mcx_cmdq_verify(cqe) != 0) {
3458 printf(", hca init command corrupt\n");
3459 return (-1);
3460 }
3461
3462 status = cqe->cq_output_data[0];
3463 if (status != MCX_CQ_STATUS_OK) {
3464 printf(", hca init failed (%x)\n", status);
3465 return (-1);
3466 }
3467
3468 return (0);
3469 }
3470
3471 static int
3472 mcx_set_driver_version(struct mcx_softc *sc)
3473 {
3474 struct mcx_dmamem mxm;
3475 struct mcx_cmdq_entry *cqe;
3476 struct mcx_cmd_set_driver_version_in *in;
3477 struct mcx_cmd_set_driver_version_out *out;
3478 int error;
3479 int token;
3480 uint8_t status;
3481
3482 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3483 token = mcx_cmdq_token(sc);
3484 mcx_cmdq_init(sc, cqe, sizeof(*in) +
3485 sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
3486
3487 in = mcx_cmdq_in(cqe);
3488 in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
3489 in->cmd_op_mod = htobe16(0);
3490
3491 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3492 &cqe->cq_input_ptr, token) != 0) {
3493 printf(", unable to allocate set driver version mailboxen\n");
3494 return (-1);
3495 }
3496 strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
3497 "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
3498
3499 mcx_cmdq_mboxes_sign(&mxm, 1);
3500 mcx_cmdq_post(sc, cqe, 0);
3501
3502 error = mcx_cmdq_poll(sc, cqe, 1000);
3503 if (error != 0) {
3504 printf(", set driver version timeout\n");
3505 goto free;
3506 }
3507 if (mcx_cmdq_verify(cqe) != 0) {
3508 printf(", set driver version command corrupt\n");
3509 goto free;
3510 }
3511
3512 status = cqe->cq_output_data[0];
3513 if (status != MCX_CQ_STATUS_OK) {
3514 printf(", set driver version failed (%x)\n", status);
3515 error = -1;
3516 goto free;
3517 }
3518
3519 free:
3520 mcx_dmamem_free(sc, &mxm);
3521
3522 return (error);
3523 }
3524
3525 static int
3526 mcx_iff(struct mcx_softc *sc)
3527 {
3528 struct ifnet *ifp = &sc->sc_ec.ec_if;
3529 struct mcx_dmamem mxm;
3530 struct mcx_cmdq_entry *cqe;
3531 struct mcx_cmd_modify_nic_vport_context_in *in;
3532 struct mcx_cmd_modify_nic_vport_context_out *out;
3533 struct mcx_nic_vport_ctx *ctx;
3534 int error;
3535 int token;
3536 int insize;
3537
3538 /* enable or disable the promisc flow */
3539 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
3540 if (sc->sc_promisc_flow_enabled == 0) {
3541 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC,
3542 0, NULL);
3543 sc->sc_promisc_flow_enabled = 1;
3544 }
3545 } else if (sc->sc_promisc_flow_enabled != 0) {
3546 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
3547 sc->sc_promisc_flow_enabled = 0;
3548 }
3549
3550 /* enable or disable the all-multicast flow */
3551 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3552 if (sc->sc_allmulti_flow_enabled == 0) {
3553 uint8_t mcast[ETHER_ADDR_LEN];
3554
3555 memset(mcast, 0, sizeof(mcast));
3556 mcast[0] = 0x01;
3557 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI,
3558 0, mcast);
3559 sc->sc_allmulti_flow_enabled = 1;
3560 }
3561 } else if (sc->sc_allmulti_flow_enabled != 0) {
3562 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
3563 sc->sc_allmulti_flow_enabled = 0;
3564 }
3565
3566 insize = sizeof(struct mcx_nic_vport_ctx) + 240;
3567
3568 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3569 token = mcx_cmdq_token(sc);
3570 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3571
3572 in = mcx_cmdq_in(cqe);
3573 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
3574 in->cmd_op_mod = htobe16(0);
3575 in->cmd_field_select = htobe32(
3576 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
3577 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
3578
3579 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
3580 printf(", unable to allocate modify nic vport context mailboxen\n");
3581 return (-1);
3582 }
3583 ctx = (struct mcx_nic_vport_ctx *)
3584 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
3585 ctx->vp_mtu = htobe32(sc->sc_hardmtu);
3586 /*
3587 * always leave promisc-all enabled on the vport since we can't give it
3588 * a vlan list, and we're already doing multicast filtering in the flow
3589 * table.
3590 */
3591 ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
3592
3593 mcx_cmdq_mboxes_sign(&mxm, 1);
3594 mcx_cmdq_post(sc, cqe, 0);
3595
3596 error = mcx_cmdq_poll(sc, cqe, 1000);
3597 if (error != 0) {
3598 printf(", modify nic vport context timeout\n");
3599 goto free;
3600 }
3601 if (mcx_cmdq_verify(cqe) != 0) {
3602 printf(", modify nic vport context command corrupt\n");
3603 goto free;
3604 }
3605
3606 out = mcx_cmdq_out(cqe);
3607 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3608 printf(", modify nic vport context failed (%x, %x)\n",
3609 out->cmd_status, out->cmd_syndrome);
3610 error = -1;
3611 goto free;
3612 }
3613
3614 free:
3615 mcx_dmamem_free(sc, &mxm);
3616
3617 return (error);
3618 }
3619
3620 static int
3621 mcx_alloc_uar(struct mcx_softc *sc)
3622 {
3623 struct mcx_cmdq_entry *cqe;
3624 struct mcx_cmd_alloc_uar_in *in;
3625 struct mcx_cmd_alloc_uar_out *out;
3626 int error;
3627
3628 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3629 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3630
3631 in = mcx_cmdq_in(cqe);
3632 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
3633 in->cmd_op_mod = htobe16(0);
3634
3635 mcx_cmdq_post(sc, cqe, 0);
3636
3637 error = mcx_cmdq_poll(sc, cqe, 1000);
3638 if (error != 0) {
3639 printf(", alloc uar timeout\n");
3640 return (-1);
3641 }
3642 if (mcx_cmdq_verify(cqe) != 0) {
3643 printf(", alloc uar command corrupt\n");
3644 return (-1);
3645 }
3646
3647 out = mcx_cmdq_out(cqe);
3648 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3649 printf(", alloc uar failed (%x)\n", out->cmd_status);
3650 return (-1);
3651 }
3652
3653 sc->sc_uar = be32toh(out->cmd_uar);
3654
3655 return (0);
3656 }
3657
3658 static int
3659 mcx_create_eq(struct mcx_softc *sc)
3660 {
3661 struct mcx_cmdq_entry *cqe;
3662 struct mcx_dmamem mxm;
3663 struct mcx_cmd_create_eq_in *in;
3664 struct mcx_cmd_create_eq_mb_in *mbin;
3665 struct mcx_cmd_create_eq_out *out;
3666 struct mcx_eq_entry *eqe;
3667 int error;
3668 uint64_t *pas;
3669 int insize, npages, paslen, i, token;
3670
3671 sc->sc_eq_cons = 0;
3672
3673 npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
3674 MCX_PAGE_SIZE);
3675 paslen = npages * sizeof(*pas);
3676 insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
3677
3678 if (mcx_dmamem_alloc(sc, &sc->sc_eq_mem, npages * MCX_PAGE_SIZE,
3679 MCX_PAGE_SIZE) != 0) {
3680 printf(", unable to allocate event queue memory\n");
3681 return (-1);
3682 }
3683
3684 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
3685 for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
3686 eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
3687 }
3688
3689 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3690 token = mcx_cmdq_token(sc);
3691 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3692
3693 in = mcx_cmdq_in(cqe);
3694 in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
3695 in->cmd_op_mod = htobe16(0);
3696
3697 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3698 &cqe->cq_input_ptr, token) != 0) {
3699 printf(", unable to allocate create eq mailboxen\n");
3700 return (-1);
3701 }
3702 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3703 mbin->cmd_eq_ctx.eq_uar_size = htobe32(
3704 (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | sc->sc_uar);
3705 mbin->cmd_event_bitmask = htobe64(
3706 (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
3707 (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
3708 (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
3709 (1ull << MCX_EVENT_TYPE_PAGE_REQUEST));
3710
3711 /* physical addresses follow the mailbox in data */
3712 pas = (uint64_t *)(mbin + 1);
3713 for (i = 0; i < npages; i++) {
3714 pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_eq_mem) +
3715 (i * MCX_PAGE_SIZE));
3716 }
3717 mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
3718 mcx_cmdq_post(sc, cqe, 0);
3719
3720 error = mcx_cmdq_poll(sc, cqe, 1000);
3721 if (error != 0) {
3722 printf(", create eq timeout\n");
3723 goto free;
3724 }
3725 if (mcx_cmdq_verify(cqe) != 0) {
3726 printf(", create eq command corrupt\n");
3727 goto free;
3728 }
3729
3730 out = mcx_cmdq_out(cqe);
3731 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3732 printf(", create eq failed (%x, %x)\n", out->cmd_status,
3733 be32toh(out->cmd_syndrome));
3734 error = -1;
3735 goto free;
3736 }
3737
3738 sc->sc_eqn = be32toh(out->cmd_eqn);
3739 mcx_arm_eq(sc);
3740 free:
3741 mcx_dmamem_free(sc, &mxm);
3742 return (error);
3743 }
3744
3745 static int
3746 mcx_alloc_pd(struct mcx_softc *sc)
3747 {
3748 struct mcx_cmdq_entry *cqe;
3749 struct mcx_cmd_alloc_pd_in *in;
3750 struct mcx_cmd_alloc_pd_out *out;
3751 int error;
3752
3753 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3754 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3755
3756 in = mcx_cmdq_in(cqe);
3757 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
3758 in->cmd_op_mod = htobe16(0);
3759
3760 mcx_cmdq_post(sc, cqe, 0);
3761
3762 error = mcx_cmdq_poll(sc, cqe, 1000);
3763 if (error != 0) {
3764 printf(", alloc pd timeout\n");
3765 return (-1);
3766 }
3767 if (mcx_cmdq_verify(cqe) != 0) {
3768 printf(", alloc pd command corrupt\n");
3769 return (-1);
3770 }
3771
3772 out = mcx_cmdq_out(cqe);
3773 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3774 printf(", alloc pd failed (%x)\n", out->cmd_status);
3775 return (-1);
3776 }
3777
3778 sc->sc_pd = be32toh(out->cmd_pd);
3779 return (0);
3780 }
3781
3782 static int
3783 mcx_alloc_tdomain(struct mcx_softc *sc)
3784 {
3785 struct mcx_cmdq_entry *cqe;
3786 struct mcx_cmd_alloc_td_in *in;
3787 struct mcx_cmd_alloc_td_out *out;
3788 int error;
3789
3790 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3791 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3792
3793 in = mcx_cmdq_in(cqe);
3794 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
3795 in->cmd_op_mod = htobe16(0);
3796
3797 mcx_cmdq_post(sc, cqe, 0);
3798
3799 error = mcx_cmdq_poll(sc, cqe, 1000);
3800 if (error != 0) {
3801 printf(", alloc transport domain timeout\n");
3802 return (-1);
3803 }
3804 if (mcx_cmdq_verify(cqe) != 0) {
3805 printf(", alloc transport domain command corrupt\n");
3806 return (-1);
3807 }
3808
3809 out = mcx_cmdq_out(cqe);
3810 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3811 printf(", alloc transport domain failed (%x)\n",
3812 out->cmd_status);
3813 return (-1);
3814 }
3815
3816 sc->sc_tdomain = be32toh(out->cmd_tdomain);
3817 return (0);
3818 }
3819
3820 static int
3821 mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr)
3822 {
3823 struct mcx_dmamem mxm;
3824 struct mcx_cmdq_entry *cqe;
3825 struct mcx_cmd_query_nic_vport_context_in *in;
3826 struct mcx_cmd_query_nic_vport_context_out *out;
3827 struct mcx_nic_vport_ctx *ctx;
3828 uint8_t *addr;
3829 int error, token, i;
3830
3831 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3832 token = mcx_cmdq_token(sc);
3833 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
3834
3835 in = mcx_cmdq_in(cqe);
3836 in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
3837 in->cmd_op_mod = htobe16(0);
3838 in->cmd_allowed_list_type = 0;
3839
3840 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
3841 printf(", unable to allocate query nic vport context mailboxen\n");
3842 return (-1);
3843 }
3844 mcx_cmdq_mboxes_sign(&mxm, 1);
3845 mcx_cmdq_post(sc, cqe, 0);
3846
3847 error = mcx_cmdq_poll(sc, cqe, 1000);
3848 if (error != 0) {
3849 printf(", query nic vport context timeout\n");
3850 goto free;
3851 }
3852 if (mcx_cmdq_verify(cqe) != 0) {
3853 printf(", query nic vport context command corrupt\n");
3854 goto free;
3855 }
3856
3857 out = mcx_cmdq_out(cqe);
3858 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3859 printf(", query nic vport context failed (%x, %x)\n",
3860 out->cmd_status, out->cmd_syndrome);
3861 error = -1;
3862 goto free;
3863 }
3864
3865 ctx = (struct mcx_nic_vport_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
3866 addr = (uint8_t *)&ctx->vp_perm_addr;
3867 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3868 enaddr[i] = addr[i + 2];
3869 }
3870 free:
3871 mcx_dmamem_free(sc, &mxm);
3872
3873 return (error);
3874 }
3875
3876 static int
3877 mcx_query_special_contexts(struct mcx_softc *sc)
3878 {
3879 struct mcx_cmdq_entry *cqe;
3880 struct mcx_cmd_query_special_ctx_in *in;
3881 struct mcx_cmd_query_special_ctx_out *out;
3882 int error;
3883
3884 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3885 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3886
3887 in = mcx_cmdq_in(cqe);
3888 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
3889 in->cmd_op_mod = htobe16(0);
3890
3891 mcx_cmdq_post(sc, cqe, 0);
3892
3893 error = mcx_cmdq_poll(sc, cqe, 1000);
3894 if (error != 0) {
3895 printf(", query special contexts timeout\n");
3896 return (-1);
3897 }
3898 if (mcx_cmdq_verify(cqe) != 0) {
3899 printf(", query special contexts command corrupt\n");
3900 return (-1);
3901 }
3902
3903 out = mcx_cmdq_out(cqe);
3904 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3905 printf(", query special contexts failed (%x)\n",
3906 out->cmd_status);
3907 return (-1);
3908 }
3909
3910 sc->sc_lkey = be32toh(out->cmd_resd_lkey);
3911 return (0);
3912 }
3913
3914 static int
3915 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
3916 {
3917 struct mcx_reg_pmtu pmtu;
3918 int error;
3919
3920 /* read max mtu */
3921 memset(&pmtu, 0, sizeof(pmtu));
3922 pmtu.rp_local_port = 1;
3923 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
3924 sizeof(pmtu));
3925 if (error != 0) {
3926 printf(", unable to get port MTU\n");
3927 return error;
3928 }
3929
3930 mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu));
3931 pmtu.rp_admin_mtu = htobe16(mtu);
3932 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
3933 sizeof(pmtu));
3934 if (error != 0) {
3935 printf(", unable to set port MTU\n");
3936 return error;
3937 }
3938
3939 sc->sc_hardmtu = mtu;
3940 return 0;
3941 }
3942
3943 static int
3944 mcx_create_cq(struct mcx_softc *sc, int eqn)
3945 {
3946 struct mcx_cmdq_entry *cmde;
3947 struct mcx_cq_entry *cqe;
3948 struct mcx_cq *cq;
3949 struct mcx_dmamem mxm;
3950 struct mcx_cmd_create_cq_in *in;
3951 struct mcx_cmd_create_cq_mb_in *mbin;
3952 struct mcx_cmd_create_cq_out *out;
3953 int error;
3954 uint64_t *pas;
3955 int insize, npages, paslen, i, token;
3956
3957 if (sc->sc_num_cq >= MCX_MAX_CQS) {
3958 printf("%s: tried to create too many cqs\n", DEVNAME(sc));
3959 return (-1);
3960 }
3961 cq = &sc->sc_cq[sc->sc_num_cq];
3962
3963 npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
3964 MCX_PAGE_SIZE);
3965 paslen = npages * sizeof(*pas);
3966 insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
3967
3968 if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
3969 MCX_PAGE_SIZE) != 0) {
3970 printf("%s: unable to allocate completion queue memory\n",
3971 DEVNAME(sc));
3972 return (-1);
3973 }
3974 cqe = MCX_DMA_KVA(&cq->cq_mem);
3975 for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
3976 cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
3977 }
3978
3979 cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3980 token = mcx_cmdq_token(sc);
3981 mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
3982
3983 in = mcx_cmdq_in(cmde);
3984 in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
3985 in->cmd_op_mod = htobe16(0);
3986
3987 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3988 &cmde->cq_input_ptr, token) != 0) {
3989 printf("%s: unable to allocate create cq mailboxen\n", DEVNAME(sc));
3990 error = -1;
3991 goto free;
3992 }
3993 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3994 mbin->cmd_cq_ctx.cq_uar_size = htobe32(
3995 (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | sc->sc_uar);
3996 mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
3997 mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
3998 (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
3999 MCX_CQ_MOD_COUNTER);
4000 mbin->cmd_cq_ctx.cq_doorbell = htobe64(
4001 MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4002 MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4003
4004 /* physical addresses follow the mailbox in data */
4005 pas = (uint64_t *)(mbin + 1);
4006 for (i = 0; i < npages; i++) {
4007 pas[i] = htobe64(MCX_DMA_DVA(&cq->cq_mem) + (i * MCX_PAGE_SIZE));
4008 }
4009 mcx_cmdq_post(sc, cmde, 0);
4010
4011 error = mcx_cmdq_poll(sc, cmde, 1000);
4012 if (error != 0) {
4013 printf("%s: create cq timeout\n", DEVNAME(sc));
4014 goto free;
4015 }
4016 if (mcx_cmdq_verify(cmde) != 0) {
4017 printf("%s: create cq command corrupt\n", DEVNAME(sc));
4018 goto free;
4019 }
4020
4021 out = mcx_cmdq_out(cmde);
4022 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4023 printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4024 out->cmd_status, be32toh(out->cmd_syndrome));
4025 error = -1;
4026 goto free;
4027 }
4028
4029 cq->cq_n = be32toh(out->cmd_cqn);
4030 cq->cq_cons = 0;
4031 cq->cq_count = 0;
4032 cq->cq_doorbell = (void *)((uint8_t *)MCX_DMA_KVA(&sc->sc_doorbell_mem) +
4033 MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4034 mcx_arm_cq(sc, cq);
4035 sc->sc_num_cq++;
4036
4037 free:
4038 mcx_dmamem_free(sc, &mxm);
4039 return (error);
4040 }
4041
4042 static int
4043 mcx_destroy_cq(struct mcx_softc *sc, int index)
4044 {
4045 struct mcx_cmdq_entry *cqe;
4046 struct mcx_cmd_destroy_cq_in *in;
4047 struct mcx_cmd_destroy_cq_out *out;
4048 int error;
4049 int token;
4050
4051 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4052 token = mcx_cmdq_token(sc);
4053 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4054
4055 in = mcx_cmdq_in(cqe);
4056 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4057 in->cmd_op_mod = htobe16(0);
4058 in->cmd_cqn = htobe32(sc->sc_cq[index].cq_n);
4059
4060 mcx_cmdq_post(sc, cqe, 0);
4061 error = mcx_cmdq_poll(sc, cqe, 1000);
4062 if (error != 0) {
4063 printf("%s: destroy cq timeout\n", DEVNAME(sc));
4064 return error;
4065 }
4066 if (mcx_cmdq_verify(cqe) != 0) {
4067 printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4068 return error;
4069 }
4070
4071 out = mcx_cmdq_out(cqe);
4072 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4073 printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4074 out->cmd_status, be32toh(out->cmd_syndrome));
4075 return -1;
4076 }
4077
4078 sc->sc_cq[index].cq_n = 0;
4079 mcx_dmamem_free(sc, &sc->sc_cq[index].cq_mem);
4080 sc->sc_cq[index].cq_cons = 0;
4081 sc->sc_cq[index].cq_count = 0;
4082 return 0;
4083 }
4084
4085 static int
4086 mcx_create_rq(struct mcx_softc *sc, int cqn)
4087 {
4088 struct mcx_cmdq_entry *cqe;
4089 struct mcx_dmamem mxm;
4090 struct mcx_cmd_create_rq_in *in;
4091 struct mcx_cmd_create_rq_out *out;
4092 struct mcx_rq_ctx *mbin;
4093 int error;
4094 uint64_t *pas;
4095 uint8_t *doorbell;
4096 int insize, npages, paslen, i, token;
4097
4098 npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4099 MCX_PAGE_SIZE);
4100 paslen = npages * sizeof(*pas);
4101 insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4102
4103 if (mcx_dmamem_alloc(sc, &sc->sc_rq_mem, npages * MCX_PAGE_SIZE,
4104 MCX_PAGE_SIZE) != 0) {
4105 printf("%s: unable to allocate receive queue memory\n",
4106 DEVNAME(sc));
4107 return (-1);
4108 }
4109
4110 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4111 token = mcx_cmdq_token(sc);
4112 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4113
4114 in = mcx_cmdq_in(cqe);
4115 in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4116 in->cmd_op_mod = htobe16(0);
4117
4118 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4119 &cqe->cq_input_ptr, token) != 0) {
4120 printf("%s: unable to allocate create rq mailboxen\n",
4121 DEVNAME(sc));
4122 error = -1;
4123 goto free;
4124 }
4125 mbin = (struct mcx_rq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4126 mbin->rq_flags = htobe32(MCX_RQ_CTX_RLKEY | MCX_RQ_CTX_VLAN_STRIP_DIS);
4127 mbin->rq_cqn = htobe32(cqn);
4128 mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4129 mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4130 mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4131 MCX_RQ_DOORBELL_OFFSET);
4132 mbin->rq_wq.wq_log_stride = htobe16(4);
4133 mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4134
4135 /* physical addresses follow the mailbox in data */
4136 pas = (uint64_t *)(mbin + 1);
4137 for (i = 0; i < npages; i++) {
4138 pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_rq_mem) +
4139 (i * MCX_PAGE_SIZE));
4140 }
4141 mcx_cmdq_post(sc, cqe, 0);
4142
4143 error = mcx_cmdq_poll(sc, cqe, 1000);
4144 if (error != 0) {
4145 printf("%s: create rq timeout\n", DEVNAME(sc));
4146 goto free;
4147 }
4148 if (mcx_cmdq_verify(cqe) != 0) {
4149 printf("%s: create rq command corrupt\n", DEVNAME(sc));
4150 goto free;
4151 }
4152
4153 out = mcx_cmdq_out(cqe);
4154 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4155 printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4156 out->cmd_status, be32toh(out->cmd_syndrome));
4157 error = -1;
4158 goto free;
4159 }
4160
4161 sc->sc_rqn = be32toh(out->cmd_rqn);
4162
4163 doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4164 sc->sc_rx_doorbell = (uint32_t *)(doorbell + MCX_RQ_DOORBELL_OFFSET);
4165
4166 free:
4167 mcx_dmamem_free(sc, &mxm);
4168 return (error);
4169 }
4170
4171 static int
4172 mcx_ready_rq(struct mcx_softc *sc)
4173 {
4174 struct mcx_cmdq_entry *cqe;
4175 struct mcx_dmamem mxm;
4176 struct mcx_cmd_modify_rq_in *in;
4177 struct mcx_cmd_modify_rq_mb_in *mbin;
4178 struct mcx_cmd_modify_rq_out *out;
4179 int error;
4180 int token;
4181
4182 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4183 token = mcx_cmdq_token(sc);
4184 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4185
4186 in = mcx_cmdq_in(cqe);
4187 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4188 in->cmd_op_mod = htobe16(0);
4189 in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_rqn);
4190
4191 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4192 printf("%s: unable to allocate modify rq mailbox\n", DEVNAME(sc));
4193 return (-1);
4194 }
4195 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4196 mbin->cmd_rq_ctx.rq_flags = htobe32(
4197 MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4198
4199 mcx_cmdq_mboxes_sign(&mxm, 1);
4200 mcx_cmdq_post(sc, cqe, 0);
4201 error = mcx_cmdq_poll(sc, cqe, 1000);
4202 if (error != 0) {
4203 printf("%s: modify rq timeout\n", DEVNAME(sc));
4204 goto free;
4205 }
4206 if (mcx_cmdq_verify(cqe) != 0) {
4207 printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4208 goto free;
4209 }
4210
4211 out = mcx_cmdq_out(cqe);
4212 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4213 printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4214 out->cmd_status, be32toh(out->cmd_syndrome));
4215 error = -1;
4216 goto free;
4217 }
4218
4219 free:
4220 mcx_dmamem_free(sc, &mxm);
4221 return (error);
4222 }
4223
4224 static int
4225 mcx_destroy_rq(struct mcx_softc *sc)
4226 {
4227 struct mcx_cmdq_entry *cqe;
4228 struct mcx_cmd_destroy_rq_in *in;
4229 struct mcx_cmd_destroy_rq_out *out;
4230 int error;
4231 int token;
4232
4233 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4234 token = mcx_cmdq_token(sc);
4235 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4236
4237 in = mcx_cmdq_in(cqe);
4238 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4239 in->cmd_op_mod = htobe16(0);
4240 in->cmd_rqn = htobe32(sc->sc_rqn);
4241
4242 mcx_cmdq_post(sc, cqe, 0);
4243 error = mcx_cmdq_poll(sc, cqe, 1000);
4244 if (error != 0) {
4245 printf("%s: destroy rq timeout\n", DEVNAME(sc));
4246 return error;
4247 }
4248 if (mcx_cmdq_verify(cqe) != 0) {
4249 printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4250 return error;
4251 }
4252
4253 out = mcx_cmdq_out(cqe);
4254 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4255 printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4256 out->cmd_status, be32toh(out->cmd_syndrome));
4257 return -1;
4258 }
4259
4260 sc->sc_rqn = 0;
4261 return 0;
4262 }
4263
4264 static int
4265 mcx_create_tir(struct mcx_softc *sc)
4266 {
4267 struct mcx_cmdq_entry *cqe;
4268 struct mcx_dmamem mxm;
4269 struct mcx_cmd_create_tir_in *in;
4270 struct mcx_cmd_create_tir_mb_in *mbin;
4271 struct mcx_cmd_create_tir_out *out;
4272 int error;
4273 int token;
4274
4275 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4276 token = mcx_cmdq_token(sc);
4277 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4278
4279 in = mcx_cmdq_in(cqe);
4280 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4281 in->cmd_op_mod = htobe16(0);
4282
4283 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4284 printf("%s: unable to allocate create tir mailbox\n",
4285 DEVNAME(sc));
4286 return (-1);
4287 }
4288 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4289 /* leave disp_type = 0, so packets get sent to the inline rqn */
4290 mbin->cmd_inline_rqn = htobe32(sc->sc_rqn);
4291 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4292
4293 mcx_cmdq_post(sc, cqe, 0);
4294 error = mcx_cmdq_poll(sc, cqe, 1000);
4295 if (error != 0) {
4296 printf("%s: create tir timeout\n", DEVNAME(sc));
4297 goto free;
4298 }
4299 if (mcx_cmdq_verify(cqe) != 0) {
4300 printf("%s: create tir command corrupt\n", DEVNAME(sc));
4301 goto free;
4302 }
4303
4304 out = mcx_cmdq_out(cqe);
4305 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4306 printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
4307 out->cmd_status, be32toh(out->cmd_syndrome));
4308 error = -1;
4309 goto free;
4310 }
4311
4312 sc->sc_tirn = be32toh(out->cmd_tirn);
4313 free:
4314 mcx_dmamem_free(sc, &mxm);
4315 return (error);
4316 }
4317
4318 static int
4319 mcx_destroy_tir(struct mcx_softc *sc)
4320 {
4321 struct mcx_cmdq_entry *cqe;
4322 struct mcx_cmd_destroy_tir_in *in;
4323 struct mcx_cmd_destroy_tir_out *out;
4324 int error;
4325 int token;
4326
4327 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4328 token = mcx_cmdq_token(sc);
4329 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4330
4331 in = mcx_cmdq_in(cqe);
4332 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
4333 in->cmd_op_mod = htobe16(0);
4334 in->cmd_tirn = htobe32(sc->sc_tirn);
4335
4336 mcx_cmdq_post(sc, cqe, 0);
4337 error = mcx_cmdq_poll(sc, cqe, 1000);
4338 if (error != 0) {
4339 printf("%s: destroy tir timeout\n", DEVNAME(sc));
4340 return error;
4341 }
4342 if (mcx_cmdq_verify(cqe) != 0) {
4343 printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
4344 return error;
4345 }
4346
4347 out = mcx_cmdq_out(cqe);
4348 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4349 printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
4350 out->cmd_status, be32toh(out->cmd_syndrome));
4351 return -1;
4352 }
4353
4354 sc->sc_tirn = 0;
4355 return 0;
4356 }
4357
4358 static int
4359 mcx_create_sq(struct mcx_softc *sc, int cqn)
4360 {
4361 struct mcx_cmdq_entry *cqe;
4362 struct mcx_dmamem mxm;
4363 struct mcx_cmd_create_sq_in *in;
4364 struct mcx_sq_ctx *mbin;
4365 struct mcx_cmd_create_sq_out *out;
4366 int error;
4367 uint64_t *pas;
4368 uint8_t *doorbell;
4369 int insize, npages, paslen, i, token;
4370
4371 npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
4372 MCX_PAGE_SIZE);
4373 paslen = npages * sizeof(*pas);
4374 insize = sizeof(struct mcx_sq_ctx) + paslen;
4375
4376 if (mcx_dmamem_alloc(sc, &sc->sc_sq_mem, npages * MCX_PAGE_SIZE,
4377 MCX_PAGE_SIZE) != 0) {
4378 printf("%s: unable to allocate send queue memory\n", DEVNAME(sc));
4379 return (-1);
4380 }
4381
4382 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4383 token = mcx_cmdq_token(sc);
4384 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
4385 token);
4386
4387 in = mcx_cmdq_in(cqe);
4388 in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
4389 in->cmd_op_mod = htobe16(0);
4390
4391 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4392 &cqe->cq_input_ptr, token) != 0) {
4393 printf("%s: unable to allocate create sq mailboxen\n", DEVNAME(sc));
4394 error = -1;
4395 goto free;
4396 }
4397 mbin = (struct mcx_sq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4398 mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
4399 (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
4400 mbin->sq_cqn = htobe32(cqn);
4401 mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
4402 mbin->sq_tis_num = htobe32(sc->sc_tisn);
4403 mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4404 mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
4405 mbin->sq_wq.wq_uar_page = htobe32(sc->sc_uar);
4406 mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4407 MCX_SQ_DOORBELL_OFFSET);
4408 mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
4409 mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
4410
4411 /* physical addresses follow the mailbox in data */
4412 pas = (uint64_t *)(mbin + 1);
4413 for (i = 0; i < npages; i++) {
4414 pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_sq_mem) +
4415 (i * MCX_PAGE_SIZE));
4416 }
4417 mcx_cmdq_post(sc, cqe, 0);
4418
4419 error = mcx_cmdq_poll(sc, cqe, 1000);
4420 if (error != 0) {
4421 printf("%s: create sq timeout\n", DEVNAME(sc));
4422 goto free;
4423 }
4424 if (mcx_cmdq_verify(cqe) != 0) {
4425 printf("%s: create sq command corrupt\n", DEVNAME(sc));
4426 goto free;
4427 }
4428
4429 out = mcx_cmdq_out(cqe);
4430 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4431 printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
4432 out->cmd_status, be32toh(out->cmd_syndrome));
4433 error = -1;
4434 goto free;
4435 }
4436
4437 sc->sc_sqn = be32toh(out->cmd_sqn);
4438
4439 doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4440 sc->sc_tx_doorbell = (uint32_t *)(doorbell + MCX_SQ_DOORBELL_OFFSET + 4);
4441 free:
4442 mcx_dmamem_free(sc, &mxm);
4443 return (error);
4444 }
4445
4446 static int
4447 mcx_destroy_sq(struct mcx_softc *sc)
4448 {
4449 struct mcx_cmdq_entry *cqe;
4450 struct mcx_cmd_destroy_sq_in *in;
4451 struct mcx_cmd_destroy_sq_out *out;
4452 int error;
4453 int token;
4454
4455 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4456 token = mcx_cmdq_token(sc);
4457 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4458
4459 in = mcx_cmdq_in(cqe);
4460 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
4461 in->cmd_op_mod = htobe16(0);
4462 in->cmd_sqn = htobe32(sc->sc_sqn);
4463
4464 mcx_cmdq_post(sc, cqe, 0);
4465 error = mcx_cmdq_poll(sc, cqe, 1000);
4466 if (error != 0) {
4467 printf("%s: destroy sq timeout\n", DEVNAME(sc));
4468 return error;
4469 }
4470 if (mcx_cmdq_verify(cqe) != 0) {
4471 printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
4472 return error;
4473 }
4474
4475 out = mcx_cmdq_out(cqe);
4476 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4477 printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
4478 out->cmd_status, be32toh(out->cmd_syndrome));
4479 return -1;
4480 }
4481
4482 sc->sc_sqn = 0;
4483 return 0;
4484 }
4485
4486 static int
4487 mcx_ready_sq(struct mcx_softc *sc)
4488 {
4489 struct mcx_cmdq_entry *cqe;
4490 struct mcx_dmamem mxm;
4491 struct mcx_cmd_modify_sq_in *in;
4492 struct mcx_cmd_modify_sq_mb_in *mbin;
4493 struct mcx_cmd_modify_sq_out *out;
4494 int error;
4495 int token;
4496
4497 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4498 token = mcx_cmdq_token(sc);
4499 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4500
4501 in = mcx_cmdq_in(cqe);
4502 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
4503 in->cmd_op_mod = htobe16(0);
4504 in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_sqn);
4505
4506 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4507 printf("%s: unable to allocate modify sq mailbox\n",
4508 DEVNAME(sc));
4509 return (-1);
4510 }
4511 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4512 mbin->cmd_sq_ctx.sq_flags = htobe32(
4513 MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
4514
4515 mcx_cmdq_mboxes_sign(&mxm, 1);
4516 mcx_cmdq_post(sc, cqe, 0);
4517 error = mcx_cmdq_poll(sc, cqe, 1000);
4518 if (error != 0) {
4519 printf("%s: modify sq timeout\n", DEVNAME(sc));
4520 goto free;
4521 }
4522 if (mcx_cmdq_verify(cqe) != 0) {
4523 printf("%s: modify sq command corrupt\n", DEVNAME(sc));
4524 goto free;
4525 }
4526
4527 out = mcx_cmdq_out(cqe);
4528 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4529 printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
4530 out->cmd_status, be32toh(out->cmd_syndrome));
4531 error = -1;
4532 goto free;
4533 }
4534
4535 free:
4536 mcx_dmamem_free(sc, &mxm);
4537 return (error);
4538 }
4539
4540 static int
4541 mcx_create_tis(struct mcx_softc *sc)
4542 {
4543 struct mcx_cmdq_entry *cqe;
4544 struct mcx_dmamem mxm;
4545 struct mcx_cmd_create_tis_in *in;
4546 struct mcx_cmd_create_tis_mb_in *mbin;
4547 struct mcx_cmd_create_tis_out *out;
4548 int error;
4549 int token;
4550
4551 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4552 token = mcx_cmdq_token(sc);
4553 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4554
4555 in = mcx_cmdq_in(cqe);
4556 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
4557 in->cmd_op_mod = htobe16(0);
4558
4559 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4560 printf("%s: unable to allocate create tis mailbox\n", DEVNAME(sc));
4561 return (-1);
4562 }
4563 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4564 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4565
4566 mcx_cmdq_mboxes_sign(&mxm, 1);
4567 mcx_cmdq_post(sc, cqe, 0);
4568 error = mcx_cmdq_poll(sc, cqe, 1000);
4569 if (error != 0) {
4570 printf("%s: create tis timeout\n", DEVNAME(sc));
4571 goto free;
4572 }
4573 if (mcx_cmdq_verify(cqe) != 0) {
4574 printf("%s: create tis command corrupt\n", DEVNAME(sc));
4575 goto free;
4576 }
4577
4578 out = mcx_cmdq_out(cqe);
4579 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4580 printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
4581 out->cmd_status, be32toh(out->cmd_syndrome));
4582 error = -1;
4583 goto free;
4584 }
4585
4586 sc->sc_tisn = be32toh(out->cmd_tisn);
4587 free:
4588 mcx_dmamem_free(sc, &mxm);
4589 return (error);
4590 }
4591
4592 static int
4593 mcx_destroy_tis(struct mcx_softc *sc)
4594 {
4595 struct mcx_cmdq_entry *cqe;
4596 struct mcx_cmd_destroy_tis_in *in;
4597 struct mcx_cmd_destroy_tis_out *out;
4598 int error;
4599 int token;
4600
4601 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4602 token = mcx_cmdq_token(sc);
4603 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4604
4605 in = mcx_cmdq_in(cqe);
4606 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
4607 in->cmd_op_mod = htobe16(0);
4608 in->cmd_tisn = htobe32(sc->sc_tisn);
4609
4610 mcx_cmdq_post(sc, cqe, 0);
4611 error = mcx_cmdq_poll(sc, cqe, 1000);
4612 if (error != 0) {
4613 printf("%s: destroy tis timeout\n", DEVNAME(sc));
4614 return error;
4615 }
4616 if (mcx_cmdq_verify(cqe) != 0) {
4617 printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
4618 return error;
4619 }
4620
4621 out = mcx_cmdq_out(cqe);
4622 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4623 printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
4624 out->cmd_status, be32toh(out->cmd_syndrome));
4625 return -1;
4626 }
4627
4628 sc->sc_tirn = 0;
4629 return 0;
4630 }
4631
4632 #if 0
4633 static int
4634 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
4635 {
4636 struct mcx_cmdq_entry *cqe;
4637 struct mcx_cmd_alloc_flow_counter_in *in;
4638 struct mcx_cmd_alloc_flow_counter_out *out;
4639 int error;
4640
4641 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4642 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4643
4644 in = mcx_cmdq_in(cqe);
4645 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
4646 in->cmd_op_mod = htobe16(0);
4647
4648 mcx_cmdq_post(sc, cqe, 0);
4649
4650 error = mcx_cmdq_poll(sc, cqe, 1000);
4651 if (error != 0) {
4652 printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
4653 return (-1);
4654 }
4655 if (mcx_cmdq_verify(cqe) != 0) {
4656 printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
4657 return (-1);
4658 }
4659
4660 out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
4661 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4662 printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
4663 out->cmd_status);
4664 return (-1);
4665 }
4666
4667 sc->sc_flow_counter_id[i] = be16toh(out->cmd_flow_counter_id);
4668 printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
4669
4670 return (0);
4671 }
4672 #endif
4673
4674 static int
4675 mcx_create_flow_table(struct mcx_softc *sc, int log_size)
4676 {
4677 struct mcx_cmdq_entry *cqe;
4678 struct mcx_dmamem mxm;
4679 struct mcx_cmd_create_flow_table_in *in;
4680 struct mcx_cmd_create_flow_table_mb_in *mbin;
4681 struct mcx_cmd_create_flow_table_out *out;
4682 int error;
4683 int token;
4684
4685 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4686 token = mcx_cmdq_token(sc);
4687 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4688
4689 in = mcx_cmdq_in(cqe);
4690 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
4691 in->cmd_op_mod = htobe16(0);
4692
4693 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4694 printf("%s: unable to allocate create flow table mailbox\n",
4695 DEVNAME(sc));
4696 return (-1);
4697 }
4698 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4699 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4700 mbin->cmd_ctx.ft_log_size = log_size;
4701
4702 mcx_cmdq_mboxes_sign(&mxm, 1);
4703 mcx_cmdq_post(sc, cqe, 0);
4704 error = mcx_cmdq_poll(sc, cqe, 1000);
4705 if (error != 0) {
4706 printf("%s: create flow table timeout\n", DEVNAME(sc));
4707 goto free;
4708 }
4709 if (mcx_cmdq_verify(cqe) != 0) {
4710 printf("%s: create flow table command corrupt\n", DEVNAME(sc));
4711 goto free;
4712 }
4713
4714 out = mcx_cmdq_out(cqe);
4715 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4716 printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
4717 out->cmd_status, be32toh(out->cmd_syndrome));
4718 error = -1;
4719 goto free;
4720 }
4721
4722 sc->sc_flow_table_id = be32toh(out->cmd_table_id);
4723 free:
4724 mcx_dmamem_free(sc, &mxm);
4725 return (error);
4726 }
4727
4728 static int
4729 mcx_set_flow_table_root(struct mcx_softc *sc)
4730 {
4731 struct mcx_cmdq_entry *cqe;
4732 struct mcx_dmamem mxm;
4733 struct mcx_cmd_set_flow_table_root_in *in;
4734 struct mcx_cmd_set_flow_table_root_mb_in *mbin;
4735 struct mcx_cmd_set_flow_table_root_out *out;
4736 int error;
4737 int token;
4738
4739 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4740 token = mcx_cmdq_token(sc);
4741 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4742
4743 in = mcx_cmdq_in(cqe);
4744 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
4745 in->cmd_op_mod = htobe16(0);
4746
4747 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4748 printf("%s: unable to allocate set flow table root mailbox\n",
4749 DEVNAME(sc));
4750 return (-1);
4751 }
4752 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4753 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4754 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4755
4756 mcx_cmdq_mboxes_sign(&mxm, 1);
4757 mcx_cmdq_post(sc, cqe, 0);
4758 error = mcx_cmdq_poll(sc, cqe, 1000);
4759 if (error != 0) {
4760 printf("%s: set flow table root timeout\n", DEVNAME(sc));
4761 goto free;
4762 }
4763 if (mcx_cmdq_verify(cqe) != 0) {
4764 printf("%s: set flow table root command corrupt\n",
4765 DEVNAME(sc));
4766 goto free;
4767 }
4768
4769 out = mcx_cmdq_out(cqe);
4770 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4771 printf("%s: set flow table root failed (%x, %x)\n",
4772 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
4773 error = -1;
4774 goto free;
4775 }
4776
4777 free:
4778 mcx_dmamem_free(sc, &mxm);
4779 return (error);
4780 }
4781
4782 static int
4783 mcx_destroy_flow_table(struct mcx_softc *sc)
4784 {
4785 struct mcx_cmdq_entry *cqe;
4786 struct mcx_dmamem mxm;
4787 struct mcx_cmd_destroy_flow_table_in *in;
4788 struct mcx_cmd_destroy_flow_table_mb_in *mb;
4789 struct mcx_cmd_destroy_flow_table_out *out;
4790 int error;
4791 int token;
4792
4793 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4794 token = mcx_cmdq_token(sc);
4795 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4796
4797 in = mcx_cmdq_in(cqe);
4798 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
4799 in->cmd_op_mod = htobe16(0);
4800
4801 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4802 printf("%s: unable to allocate destroy flow table mailbox\n",
4803 DEVNAME(sc));
4804 return (-1);
4805 }
4806 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4807 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4808 mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4809
4810 mcx_cmdq_mboxes_sign(&mxm, 1);
4811 mcx_cmdq_post(sc, cqe, 0);
4812 error = mcx_cmdq_poll(sc, cqe, 1000);
4813 if (error != 0) {
4814 printf("%s: destroy flow table timeout\n", DEVNAME(sc));
4815 goto free;
4816 }
4817 if (mcx_cmdq_verify(cqe) != 0) {
4818 printf("%s: destroy flow table command corrupt\n", DEVNAME(sc));
4819 goto free;
4820 }
4821
4822 out = mcx_cmdq_out(cqe);
4823 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4824 printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
4825 out->cmd_status, be32toh(out->cmd_syndrome));
4826 error = -1;
4827 goto free;
4828 }
4829
4830 sc->sc_flow_table_id = -1;
4831 free:
4832 mcx_dmamem_free(sc, &mxm);
4833 return (error);
4834 }
4835
4836
4837 static int
4838 mcx_create_flow_group(struct mcx_softc *sc, int group, int start, int size,
4839 int match_enable, struct mcx_flow_match *match)
4840 {
4841 struct mcx_cmdq_entry *cqe;
4842 struct mcx_dmamem mxm;
4843 struct mcx_cmd_create_flow_group_in *in;
4844 struct mcx_cmd_create_flow_group_mb_in *mbin;
4845 struct mcx_cmd_create_flow_group_out *out;
4846 int error;
4847 int token;
4848
4849 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4850 token = mcx_cmdq_token(sc);
4851 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
4852 token);
4853
4854 in = mcx_cmdq_in(cqe);
4855 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
4856 in->cmd_op_mod = htobe16(0);
4857
4858 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4859 != 0) {
4860 printf("%s: unable to allocate create flow group mailbox\n",
4861 DEVNAME(sc));
4862 return (-1);
4863 }
4864 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4865 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4866 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4867 mbin->cmd_start_flow_index = htobe32(start);
4868 mbin->cmd_end_flow_index = htobe32(start + (size - 1));
4869
4870 mbin->cmd_match_criteria_enable = match_enable;
4871 memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
4872
4873 mcx_cmdq_mboxes_sign(&mxm, 2);
4874 mcx_cmdq_post(sc, cqe, 0);
4875 error = mcx_cmdq_poll(sc, cqe, 1000);
4876 if (error != 0) {
4877 printf("%s: create flow group timeout\n", DEVNAME(sc));
4878 goto free;
4879 }
4880 if (mcx_cmdq_verify(cqe) != 0) {
4881 printf("%s: create flow group command corrupt\n", DEVNAME(sc));
4882 goto free;
4883 }
4884
4885 out = mcx_cmdq_out(cqe);
4886 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4887 printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
4888 out->cmd_status, be32toh(out->cmd_syndrome));
4889 error = -1;
4890 goto free;
4891 }
4892
4893 sc->sc_flow_group_id[group] = be32toh(out->cmd_group_id);
4894 sc->sc_flow_group_size[group] = size;
4895 sc->sc_flow_group_start[group] = start;
4896
4897 free:
4898 mcx_dmamem_free(sc, &mxm);
4899 return (error);
4900 }
4901
4902 static int
4903 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
4904 {
4905 struct mcx_cmdq_entry *cqe;
4906 struct mcx_dmamem mxm;
4907 struct mcx_cmd_destroy_flow_group_in *in;
4908 struct mcx_cmd_destroy_flow_group_mb_in *mb;
4909 struct mcx_cmd_destroy_flow_group_out *out;
4910 int error;
4911 int token;
4912
4913 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4914 token = mcx_cmdq_token(sc);
4915 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4916
4917 in = mcx_cmdq_in(cqe);
4918 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
4919 in->cmd_op_mod = htobe16(0);
4920
4921 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
4922 printf("%s: unable to allocate destroy flow group mailbox\n",
4923 DEVNAME(sc));
4924 return (-1);
4925 }
4926 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4927 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4928 mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4929 mb->cmd_group_id = htobe32(sc->sc_flow_group_id[group]);
4930
4931 mcx_cmdq_mboxes_sign(&mxm, 2);
4932 mcx_cmdq_post(sc, cqe, 0);
4933 error = mcx_cmdq_poll(sc, cqe, 1000);
4934 if (error != 0) {
4935 printf("%s: destroy flow group timeout\n", DEVNAME(sc));
4936 goto free;
4937 }
4938 if (mcx_cmdq_verify(cqe) != 0) {
4939 printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
4940 goto free;
4941 }
4942
4943 out = mcx_cmdq_out(cqe);
4944 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4945 printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
4946 out->cmd_status, be32toh(out->cmd_syndrome));
4947 error = -1;
4948 goto free;
4949 }
4950
4951 sc->sc_flow_group_id[group] = -1;
4952 sc->sc_flow_group_size[group] = 0;
4953 free:
4954 mcx_dmamem_free(sc, &mxm);
4955 return (error);
4956 }
4957
4958 static int
4959 mcx_set_flow_table_entry(struct mcx_softc *sc, int group, int index,
4960 const uint8_t *macaddr)
4961 {
4962 struct mcx_cmdq_entry *cqe;
4963 struct mcx_dmamem mxm;
4964 struct mcx_cmd_set_flow_table_entry_in *in;
4965 struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
4966 struct mcx_cmd_set_flow_table_entry_out *out;
4967 uint32_t *dest;
4968 int error;
4969 int token;
4970
4971 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4972 token = mcx_cmdq_token(sc);
4973 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*dest),
4974 sizeof(*out), token);
4975
4976 in = mcx_cmdq_in(cqe);
4977 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
4978 in->cmd_op_mod = htobe16(0);
4979
4980 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4981 != 0) {
4982 printf("%s: unable to allocate set flow table entry mailbox\n",
4983 DEVNAME(sc));
4984 return (-1);
4985 }
4986 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4987 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4988 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4989 mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
4990 mbin->cmd_flow_ctx.fc_group_id = htobe32(sc->sc_flow_group_id[group]);
4991
4992 /* flow context ends at offset 0x330, 0x130 into the second mbox */
4993 dest = (uint32_t *)
4994 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
4995 mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
4996 mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
4997 *dest = htobe32(sc->sc_tirn | MCX_FLOW_CONTEXT_DEST_TYPE_TIR);
4998
4999 /* the only thing we match on at the moment is the dest mac address */
5000 if (macaddr != NULL) {
5001 memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
5002 ETHER_ADDR_LEN);
5003 }
5004
5005 mcx_cmdq_mboxes_sign(&mxm, 2);
5006 mcx_cmdq_post(sc, cqe, 0);
5007 error = mcx_cmdq_poll(sc, cqe, 1000);
5008 if (error != 0) {
5009 printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5010 goto free;
5011 }
5012 if (mcx_cmdq_verify(cqe) != 0) {
5013 printf("%s: set flow table entry command corrupt\n",
5014 DEVNAME(sc));
5015 goto free;
5016 }
5017
5018 out = mcx_cmdq_out(cqe);
5019 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5020 printf("%s: set flow table entry failed (%x, %x)\n",
5021 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5022 error = -1;
5023 goto free;
5024 }
5025
5026 free:
5027 mcx_dmamem_free(sc, &mxm);
5028 return (error);
5029 }
5030
5031 static int
5032 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
5033 {
5034 struct mcx_cmdq_entry *cqe;
5035 struct mcx_dmamem mxm;
5036 struct mcx_cmd_delete_flow_table_entry_in *in;
5037 struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
5038 struct mcx_cmd_delete_flow_table_entry_out *out;
5039 int error;
5040 int token;
5041
5042 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5043 token = mcx_cmdq_token(sc);
5044 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5045 token);
5046
5047 in = mcx_cmdq_in(cqe);
5048 in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
5049 in->cmd_op_mod = htobe16(0);
5050
5051 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
5052 printf("%s: unable to allocate delete flow table entry mailbox\n",
5053 DEVNAME(sc));
5054 return (-1);
5055 }
5056 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5057 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5058 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5059 mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
5060
5061 mcx_cmdq_mboxes_sign(&mxm, 2);
5062 mcx_cmdq_post(sc, cqe, 0);
5063 error = mcx_cmdq_poll(sc, cqe, 1000);
5064 if (error != 0) {
5065 printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
5066 goto free;
5067 }
5068 if (mcx_cmdq_verify(cqe) != 0) {
5069 printf("%s: delete flow table entry command corrupt\n",
5070 DEVNAME(sc));
5071 goto free;
5072 }
5073
5074 out = mcx_cmdq_out(cqe);
5075 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5076 printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
5077 DEVNAME(sc), group, index, out->cmd_status,
5078 be32toh(out->cmd_syndrome));
5079 error = -1;
5080 goto free;
5081 }
5082
5083 free:
5084 mcx_dmamem_free(sc, &mxm);
5085 return (error);
5086 }
5087
5088 #if 0
5089 int
5090 mcx_dump_flow_table(struct mcx_softc *sc)
5091 {
5092 struct mcx_dmamem mxm;
5093 struct mcx_cmdq_entry *cqe;
5094 struct mcx_cmd_query_flow_table_in *in;
5095 struct mcx_cmd_query_flow_table_mb_in *mbin;
5096 struct mcx_cmd_query_flow_table_out *out;
5097 struct mcx_cmd_query_flow_table_mb_out *mbout;
5098 uint8_t token = mcx_cmdq_token(sc);
5099 int error;
5100 int i;
5101 uint8_t *dump;
5102
5103 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5104 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5105 sizeof(*out) + sizeof(*mbout) + 16, token);
5106
5107 in = mcx_cmdq_in(cqe);
5108 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
5109 in->cmd_op_mod = htobe16(0);
5110
5111 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5112 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
5113 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5114 &cqe->cq_output_ptr, token) != 0) {
5115 printf(", unable to allocate query flow table mailboxes\n");
5116 return (-1);
5117 }
5118 cqe->cq_input_ptr = cqe->cq_output_ptr;
5119
5120 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5121 mbin->cmd_table_type = 0;
5122 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5123
5124 mcx_cmdq_mboxes_sign(&mxm, 1);
5125
5126 mcx_cmdq_post(sc, cqe, 0);
5127 error = mcx_cmdq_poll(sc, cqe, 1000);
5128 if (error != 0) {
5129 printf("%s: query flow table timeout\n", DEVNAME(sc));
5130 goto free;
5131 }
5132 error = mcx_cmdq_verify(cqe);
5133 if (error != 0) {
5134 printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
5135 goto free;
5136 }
5137
5138 out = mcx_cmdq_out(cqe);
5139 switch (out->cmd_status) {
5140 case MCX_CQ_STATUS_OK:
5141 break;
5142 default:
5143 printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
5144 out->cmd_status, be32toh(out->cmd_syndrome));
5145 error = -1;
5146 goto free;
5147 }
5148
5149 mbout = (struct mcx_cmd_query_flow_table_mb_out *)
5150 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5151 dump = (uint8_t *)mbout + 8;
5152 for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
5153 printf("%.2x ", dump[i]);
5154 if (i % 16 == 15)
5155 printf("\n");
5156 }
5157 free:
5158 mcx_cq_mboxes_free(sc, &mxm);
5159 return (error);
5160 }
5161 int
5162 mcx_dump_flow_table_entry(struct mcx_softc *sc, int index)
5163 {
5164 struct mcx_dmamem mxm;
5165 struct mcx_cmdq_entry *cqe;
5166 struct mcx_cmd_query_flow_table_entry_in *in;
5167 struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
5168 struct mcx_cmd_query_flow_table_entry_out *out;
5169 struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
5170 uint8_t token = mcx_cmdq_token(sc);
5171 int error;
5172 int i;
5173 uint8_t *dump;
5174
5175 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5176 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5177 sizeof(*out) + sizeof(*mbout) + 16, token);
5178
5179 in = mcx_cmdq_in(cqe);
5180 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
5181 in->cmd_op_mod = htobe16(0);
5182
5183 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5184 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5185 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5186 &cqe->cq_output_ptr, token) != 0) {
5187 printf(", unable to allocate query flow table entry mailboxes\n");
5188 return (-1);
5189 }
5190 cqe->cq_input_ptr = cqe->cq_output_ptr;
5191
5192 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5193 mbin->cmd_table_type = 0;
5194 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5195 mbin->cmd_flow_index = htobe32(index);
5196
5197 mcx_cmdq_mboxes_sign(&mxm, 1);
5198
5199 mcx_cmdq_post(sc, cqe, 0);
5200 error = mcx_cmdq_poll(sc, cqe, 1000);
5201 if (error != 0) {
5202 printf("%s: query flow table entry timeout\n", DEVNAME(sc));
5203 goto free;
5204 }
5205 error = mcx_cmdq_verify(cqe);
5206 if (error != 0) {
5207 printf("%s: query flow table entry reply corrupt\n",
5208 DEVNAME(sc));
5209 goto free;
5210 }
5211
5212 out = mcx_cmdq_out(cqe);
5213 switch (out->cmd_status) {
5214 case MCX_CQ_STATUS_OK:
5215 break;
5216 default:
5217 printf("%s: query flow table entry failed (%x/%x)\n",
5218 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5219 error = -1;
5220 goto free;
5221 }
5222
5223 mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
5224 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5225 dump = (uint8_t *)mbout;
5226 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5227 printf("%.2x ", dump[i]);
5228 if (i % 16 == 15)
5229 printf("\n");
5230 }
5231
5232 free:
5233 mcx_cq_mboxes_free(sc, &mxm);
5234 return (error);
5235 }
5236
5237 int
5238 mcx_dump_flow_group(struct mcx_softc *sc)
5239 {
5240 struct mcx_dmamem mxm;
5241 struct mcx_cmdq_entry *cqe;
5242 struct mcx_cmd_query_flow_group_in *in;
5243 struct mcx_cmd_query_flow_group_mb_in *mbin;
5244 struct mcx_cmd_query_flow_group_out *out;
5245 struct mcx_cmd_query_flow_group_mb_out *mbout;
5246 uint8_t token = mcx_cmdq_token(sc);
5247 int error;
5248 int i;
5249 uint8_t *dump;
5250
5251 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5252 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5253 sizeof(*out) + sizeof(*mbout) + 16, token);
5254
5255 in = mcx_cmdq_in(cqe);
5256 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
5257 in->cmd_op_mod = htobe16(0);
5258
5259 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5260 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5261 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5262 &cqe->cq_output_ptr, token) != 0) {
5263 printf(", unable to allocate query flow group mailboxes\n");
5264 return (-1);
5265 }
5266 cqe->cq_input_ptr = cqe->cq_output_ptr;
5267
5268 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5269 mbin->cmd_table_type = 0;
5270 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5271 mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
5272
5273 mcx_cmdq_mboxes_sign(&mxm, 1);
5274
5275 mcx_cmdq_post(sc, cqe, 0);
5276 error = mcx_cmdq_poll(sc, cqe, 1000);
5277 if (error != 0) {
5278 printf("%s: query flow group timeout\n", DEVNAME(sc));
5279 goto free;
5280 }
5281 error = mcx_cmdq_verify(cqe);
5282 if (error != 0) {
5283 printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
5284 goto free;
5285 }
5286
5287 out = mcx_cmdq_out(cqe);
5288 switch (out->cmd_status) {
5289 case MCX_CQ_STATUS_OK:
5290 break;
5291 default:
5292 printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
5293 out->cmd_status, be32toh(out->cmd_syndrome));
5294 error = -1;
5295 goto free;
5296 }
5297
5298 mbout = (struct mcx_cmd_query_flow_group_mb_out *)
5299 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5300 dump = (uint8_t *)mbout;
5301 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5302 printf("%.2x ", dump[i]);
5303 if (i % 16 == 15)
5304 printf("\n");
5305 }
5306 dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
5307 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5308 printf("%.2x ", dump[i]);
5309 if (i % 16 == 15)
5310 printf("\n");
5311 }
5312
5313 free:
5314 mcx_cq_mboxes_free(sc, &mxm);
5315 return (error);
5316 }
5317
5318 int
5319 mcx_dump_rq(struct mcx_softc *sc)
5320 {
5321 struct mcx_dmamem mxm;
5322 struct mcx_cmdq_entry *cqe;
5323 struct mcx_cmd_query_rq_in *in;
5324 struct mcx_cmd_query_rq_out *out;
5325 struct mcx_cmd_query_rq_mb_out *mbout;
5326 uint8_t token = mcx_cmdq_token(sc);
5327 int error;
5328
5329 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5330 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5331 token);
5332
5333 in = mcx_cmdq_in(cqe);
5334 in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
5335 in->cmd_op_mod = htobe16(0);
5336 in->cmd_rqn = htobe32(sc->sc_rqn);
5337
5338 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5339 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5340 &cqe->cq_output_ptr, token) != 0) {
5341 printf(", unable to allocate query flow group mailboxes\n");
5342 return (-1);
5343 }
5344
5345 mcx_cmdq_mboxes_sign(&mxm, 1);
5346
5347 mcx_cmdq_post(sc, cqe, 0);
5348 error = mcx_cmdq_poll(sc, cqe, 1000);
5349 if (error != 0) {
5350 printf("%s: query rq timeout\n", DEVNAME(sc));
5351 goto free;
5352 }
5353 error = mcx_cmdq_verify(cqe);
5354 if (error != 0) {
5355 printf("%s: query rq reply corrupt\n", DEVNAME(sc));
5356 goto free;
5357 }
5358
5359 out = mcx_cmdq_out(cqe);
5360 switch (out->cmd_status) {
5361 case MCX_CQ_STATUS_OK:
5362 break;
5363 default:
5364 printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
5365 out->cmd_status, be32toh(out->cmd_syndrome));
5366 error = -1;
5367 goto free;
5368 }
5369
5370 mbout = (struct mcx_cmd_query_rq_mb_out *)
5371 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5372 printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5373 DEVNAME(sc),
5374 (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5375 be32toh(mbout->cmd_ctx.rq_user_index),
5376 be32toh(mbout->cmd_ctx.rq_cqn),
5377 be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5378 mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5379 mbout->cmd_ctx.rq_wq.wq_log_size,
5380 be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5381 be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5382
5383 free:
5384 mcx_cq_mboxes_free(sc, &mxm);
5385 return (error);
5386 }
5387
5388 int
5389 mcx_dump_sq(struct mcx_softc *sc)
5390 {
5391 struct mcx_dmamem mxm;
5392 struct mcx_cmdq_entry *cqe;
5393 struct mcx_cmd_query_sq_in *in;
5394 struct mcx_cmd_query_sq_out *out;
5395 struct mcx_cmd_query_sq_mb_out *mbout;
5396 uint8_t token = mcx_cmdq_token(sc);
5397 int error;
5398 int i;
5399 uint8_t *dump;
5400
5401 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5402 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5403 token);
5404
5405 in = mcx_cmdq_in(cqe);
5406 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
5407 in->cmd_op_mod = htobe16(0);
5408 in->cmd_sqn = htobe32(sc->sc_sqn);
5409
5410 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5411 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5412 &cqe->cq_output_ptr, token) != 0) {
5413 printf(", unable to allocate query sq mailboxes\n");
5414 return (-1);
5415 }
5416
5417 mcx_cmdq_mboxes_sign(&mxm, 1);
5418
5419 mcx_cmdq_post(sc, cqe, 0);
5420 error = mcx_cmdq_poll(sc, cqe, 1000);
5421 if (error != 0) {
5422 printf("%s: query sq timeout\n", DEVNAME(sc));
5423 goto free;
5424 }
5425 error = mcx_cmdq_verify(cqe);
5426 if (error != 0) {
5427 printf("%s: query sq reply corrupt\n", DEVNAME(sc));
5428 goto free;
5429 }
5430
5431 out = mcx_cmdq_out(cqe);
5432 switch (out->cmd_status) {
5433 case MCX_CQ_STATUS_OK:
5434 break;
5435 default:
5436 printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
5437 out->cmd_status, be32toh(out->cmd_syndrome));
5438 error = -1;
5439 goto free;
5440 }
5441
5442 mbout = (struct mcx_cmd_query_sq_mb_out *)
5443 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5444 /*
5445 printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5446 DEVNAME(sc),
5447 (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5448 be32toh(mbout->cmd_ctx.rq_user_index),
5449 be32toh(mbout->cmd_ctx.rq_cqn),
5450 be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5451 mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5452 mbout->cmd_ctx.rq_wq.wq_log_size,
5453 be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5454 be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5455 */
5456 dump = (uint8_t *)mbout;
5457 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5458 printf("%.2x ", dump[i]);
5459 if (i % 16 == 15)
5460 printf("\n");
5461 }
5462
5463 free:
5464 mcx_cq_mboxes_free(sc, &mxm);
5465 return (error);
5466 }
5467
5468 static int
5469 mcx_dump_counters(struct mcx_softc *sc)
5470 {
5471 struct mcx_dmamem mxm;
5472 struct mcx_cmdq_entry *cqe;
5473 struct mcx_cmd_query_vport_counters_in *in;
5474 struct mcx_cmd_query_vport_counters_mb_in *mbin;
5475 struct mcx_cmd_query_vport_counters_out *out;
5476 struct mcx_nic_vport_counters *counters;
5477 int error, token;
5478
5479 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5480 token = mcx_cmdq_token(sc);
5481 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5482 sizeof(*out) + sizeof(*counters), token);
5483
5484 in = mcx_cmdq_in(cqe);
5485 in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
5486 in->cmd_op_mod = htobe16(0);
5487
5488 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5489 printf(", unable to allocate query nic vport counters mailboxen\n");
5490 return (-1);
5491 }
5492 cqe->cq_input_ptr = cqe->cq_output_ptr;
5493
5494 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5495 mbin->cmd_clear = 0x80;
5496
5497 mcx_cmdq_mboxes_sign(&mxm, 1);
5498 mcx_cmdq_post(sc, cqe, 0);
5499
5500 error = mcx_cmdq_poll(sc, cqe, 1000);
5501 if (error != 0) {
5502 printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
5503 goto free;
5504 }
5505 if (mcx_cmdq_verify(cqe) != 0) {
5506 printf("%s: query nic vport counters command corrupt\n",
5507 DEVNAME(sc));
5508 goto free;
5509 }
5510
5511 out = mcx_cmdq_out(cqe);
5512 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5513 printf("%s: query nic vport counters failed (%x, %x)\n",
5514 DEVNAME(sc), out->cmd_status, out->cmd_syndrome);
5515 error = -1;
5516 goto free;
5517 }
5518
5519 counters = (struct mcx_nic_vport_counters *)
5520 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5521 if (counters->rx_bcast.packets + counters->tx_bcast.packets +
5522 counters->rx_ucast.packets + counters->tx_ucast.packets +
5523 counters->rx_err.packets + counters->tx_err.packets)
5524 printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
5525 DEVNAME(sc),
5526 be64toh(counters->tx_err.packets),
5527 be64toh(counters->rx_err.packets),
5528 be64toh(counters->tx_ucast.packets),
5529 be64toh(counters->rx_ucast.packets),
5530 be64toh(counters->tx_bcast.packets),
5531 be64toh(counters->rx_bcast.packets));
5532 free:
5533 mcx_dmamem_free(sc, &mxm);
5534
5535 return (error);
5536 }
5537
5538 static int
5539 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
5540 {
5541 struct mcx_dmamem mxm;
5542 struct mcx_cmdq_entry *cqe;
5543 struct mcx_cmd_query_flow_counter_in *in;
5544 struct mcx_cmd_query_flow_counter_mb_in *mbin;
5545 struct mcx_cmd_query_flow_counter_out *out;
5546 struct mcx_counter *counters;
5547 int error, token;
5548
5549 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5550 token = mcx_cmdq_token(sc);
5551 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
5552 sizeof(*counters), token);
5553
5554 in = mcx_cmdq_in(cqe);
5555 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
5556 in->cmd_op_mod = htobe16(0);
5557
5558 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5559 printf(", unable to allocate query flow counter mailboxen\n");
5560 return (-1);
5561 }
5562 cqe->cq_input_ptr = cqe->cq_output_ptr;
5563 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5564 mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
5565 mbin->cmd_clear = 0x80;
5566
5567 mcx_cmdq_mboxes_sign(&mxm, 1);
5568 mcx_cmdq_post(sc, cqe, 0);
5569
5570 error = mcx_cmdq_poll(sc, cqe, 1000);
5571 if (error != 0) {
5572 printf("%s: query flow counter timeout\n", DEVNAME(sc));
5573 goto free;
5574 }
5575 if (mcx_cmdq_verify(cqe) != 0) {
5576 printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
5577 goto free;
5578 }
5579
5580 out = mcx_cmdq_out(cqe);
5581 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5582 printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
5583 out->cmd_status, out->cmd_syndrome);
5584 error = -1;
5585 goto free;
5586 }
5587
5588 counters = (struct mcx_counter *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5589 if (counters->packets)
5590 printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
5591 be64toh(counters->packets));
5592 free:
5593 mcx_dmamem_free(sc, &mxm);
5594
5595 return (error);
5596 }
5597
5598 #endif
5599
5600 static int
5601 mcx_rx_fill_slots(struct mcx_softc *sc, void *ring, struct mcx_slot *slots,
5602 uint *prod, int bufsize, uint nslots)
5603 {
5604 struct mcx_rq_entry *rqe;
5605 struct mcx_slot *ms;
5606 struct mbuf *m;
5607 uint slot, p, fills;
5608
5609 p = *prod;
5610 slot = (p % (1 << MCX_LOG_RQ_SIZE));
5611 rqe = ring;
5612 for (fills = 0; fills < nslots; fills++) {
5613 ms = &slots[slot];
5614 #if 0
5615 m = MCLGETI(NULL, M_DONTWAIT, NULL, bufsize + ETHER_ALIGN);
5616 if (m == NULL)
5617 break;
5618 #else
5619 m = NULL;
5620 MGETHDR(m, M_DONTWAIT, MT_DATA);
5621 if (m == NULL)
5622 break;
5623
5624 MCLGET(m, M_DONTWAIT);
5625 if ((m->m_flags & M_EXT) == 0) {
5626 m_freem(m);
5627 break;
5628 }
5629 #endif
5630
5631 m->m_data += ETHER_ALIGN;
5632 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size - ETHER_ALIGN;
5633 if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
5634 BUS_DMA_NOWAIT) != 0) {
5635 m_freem(m);
5636 break;
5637 }
5638 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5639 ms->ms_m = m;
5640
5641 rqe[slot].rqe_byte_count = htobe32(m->m_len);
5642 rqe[slot].rqe_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
5643 rqe[slot].rqe_lkey = htobe32(sc->sc_lkey);
5644
5645 p++;
5646 slot++;
5647 if (slot == (1 << MCX_LOG_RQ_SIZE))
5648 slot = 0;
5649 }
5650
5651 if (fills != 0) {
5652 *sc->sc_rx_doorbell = htobe32(p & MCX_WQ_DOORBELL_MASK);
5653 /* barrier? */
5654 }
5655
5656 *prod = p;
5657
5658 return (nslots - fills);
5659 }
5660
5661 static int
5662 mcx_rx_fill(struct mcx_softc *sc)
5663 {
5664 u_int slots;
5665
5666 slots = mcx_rxr_get(&sc->sc_rxr, (1 << MCX_LOG_RQ_SIZE));
5667 if (slots == 0)
5668 return (1);
5669
5670 slots = mcx_rx_fill_slots(sc, MCX_DMA_KVA(&sc->sc_rq_mem),
5671 sc->sc_rx_slots, &sc->sc_rx_prod, sc->sc_hardmtu, slots);
5672 mcx_rxr_put(&sc->sc_rxr, slots);
5673 return (0);
5674 }
5675
5676 void
5677 mcx_refill(void *xsc)
5678 {
5679 struct mcx_softc *sc = xsc;
5680
5681 mcx_rx_fill(sc);
5682
5683 if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5684 callout_schedule(&sc->sc_rx_refill, 1);
5685 }
5686
5687 void
5688 mcx_process_txeof(struct mcx_softc *sc, struct mcx_cq_entry *cqe, int *txfree)
5689 {
5690 struct mcx_slot *ms;
5691 bus_dmamap_t map;
5692 int slot, slots;
5693
5694 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
5695
5696 ms = &sc->sc_tx_slots[slot];
5697 map = ms->ms_map;
5698 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
5699 BUS_DMASYNC_POSTWRITE);
5700
5701 slots = 1;
5702 if (map->dm_nsegs > 1)
5703 slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
5704
5705 (*txfree) += slots;
5706 bus_dmamap_unload(sc->sc_dmat, map);
5707 m_freem(ms->ms_m);
5708 ms->ms_m = NULL;
5709 }
5710
5711 static uint64_t
5712 mcx_uptime(void)
5713 {
5714 struct timespec ts;
5715
5716 nanouptime(&ts);
5717
5718 return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
5719 }
5720
5721 static void
5722 mcx_calibrate_first(struct mcx_softc *sc)
5723 {
5724 struct mcx_calibration *c = &sc->sc_calibration[0];
5725
5726 sc->sc_calibration_gen = 0;
5727
5728 c->c_ubase = mcx_uptime();
5729 c->c_tbase = mcx_timer(sc);
5730 c->c_tdiff = 0;
5731
5732 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz);
5733 }
5734
5735 #define MCX_TIMESTAMP_SHIFT 10
5736
5737 static void
5738 mcx_calibrate(void *arg)
5739 {
5740 struct mcx_softc *sc = arg;
5741 struct mcx_calibration *nc, *pc;
5742 unsigned int gen;
5743
5744 if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING))
5745 return;
5746
5747 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz);
5748
5749 gen = sc->sc_calibration_gen;
5750 pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5751 gen++;
5752 nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5753
5754 nc->c_uptime = pc->c_ubase;
5755 nc->c_timestamp = pc->c_tbase;
5756
5757 nc->c_ubase = mcx_uptime();
5758 nc->c_tbase = mcx_timer(sc);
5759
5760 nc->c_udiff = (nc->c_ubase - nc->c_uptime) >> MCX_TIMESTAMP_SHIFT;
5761 nc->c_tdiff = (nc->c_tbase - nc->c_timestamp) >> MCX_TIMESTAMP_SHIFT;
5762
5763 membar_producer();
5764 sc->sc_calibration_gen = gen;
5765 }
5766
5767 static int
5768 mcx_process_rx(struct mcx_softc *sc, struct mcx_cq_entry *cqe,
5769 struct mcx_mbufq *mq, const struct mcx_calibration *c)
5770 {
5771 struct mcx_slot *ms;
5772 struct mbuf *m;
5773 int slot;
5774
5775 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
5776
5777 ms = &sc->sc_rx_slots[slot];
5778 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize,
5779 BUS_DMASYNC_POSTREAD);
5780 bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
5781
5782 m = ms->ms_m;
5783 ms->ms_m = NULL;
5784
5785 m_set_rcvif(m, &sc->sc_ec.ec_if);
5786 m->m_pkthdr.len = m->m_len = be32dec(&cqe->cq_byte_cnt);
5787
5788 #if 0
5789 if (cqe->cq_rx_hash_type) {
5790 m->m_pkthdr.ph_flowid = M_FLOWID_VALID |
5791 be32toh(cqe->cq_rx_hash);
5792 }
5793 #endif
5794
5795 #if 0
5796 if (c->c_tdiff) {
5797 uint64_t t = be64dec(&cqe->cq_timestamp) - c->c_timestamp;
5798 t *= c->c_udiff;
5799 t /= c->c_tdiff;
5800
5801 m->m_pkthdr.ph_timestamp = c->c_uptime + t;
5802 SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
5803 }
5804 #endif
5805
5806 MBUFQ_ENQUEUE(mq, m);
5807
5808 return (1);
5809 }
5810
5811 static struct mcx_cq_entry *
5812 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
5813 {
5814 struct mcx_cq_entry *cqe;
5815 int next;
5816
5817 cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
5818 next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
5819
5820 if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
5821 ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
5822 return (&cqe[next]);
5823 }
5824
5825 return (NULL);
5826 }
5827
5828 static void
5829 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5830 {
5831 bus_size_t offset;
5832 uint32_t val;
5833 uint64_t uval;
5834
5835 /* different uar per cq? */
5836 offset = (MCX_PAGE_SIZE * sc->sc_uar);
5837 val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
5838 val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5839
5840 cq->cq_doorbell[0] = htobe32(cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5841 cq->cq_doorbell[1] = htobe32(val);
5842
5843 uval = val;
5844 uval <<= 32;
5845 uval |= cq->cq_n;
5846 bus_space_write_8(sc->sc_memt, sc->sc_memh,
5847 offset + MCX_UAR_CQ_DOORBELL, htobe64(uval));
5848 mcx_bar(sc, offset + MCX_UAR_CQ_DOORBELL, sizeof(uint64_t),
5849 BUS_SPACE_BARRIER_WRITE);
5850 }
5851
5852 void
5853 mcx_process_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5854 {
5855 struct ifnet *ifp = &sc->sc_ec.ec_if;
5856 const struct mcx_calibration *c;
5857 unsigned int gen;
5858 struct mcx_cq_entry *cqe;
5859 struct mcx_mbufq mq;
5860 struct mbuf *m;
5861 int rxfree, txfree;
5862
5863 MBUFQ_INIT(&mq);
5864
5865 gen = sc->sc_calibration_gen;
5866 membar_consumer();
5867 c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5868
5869 rxfree = 0;
5870 txfree = 0;
5871 while ((cqe = mcx_next_cq_entry(sc, cq))) {
5872 uint8_t opcode;
5873 opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
5874 switch (opcode) {
5875 case MCX_CQ_ENTRY_OPCODE_REQ:
5876 mcx_process_txeof(sc, cqe, &txfree);
5877 break;
5878 case MCX_CQ_ENTRY_OPCODE_SEND:
5879 rxfree += mcx_process_rx(sc, cqe, &mq, c);
5880 break;
5881 case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
5882 case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
5883 /* uint8_t *cqp = (uint8_t *)cqe; */
5884 /* printf("%s: cq completion error: %x\n", DEVNAME(sc), cqp[0x37]); */
5885 break;
5886
5887 default:
5888 /* printf("%s: cq completion opcode %x??\n", DEVNAME(sc), opcode); */
5889 break;
5890 }
5891
5892 cq->cq_cons++;
5893 }
5894
5895 cq->cq_count++;
5896 mcx_arm_cq(sc, cq);
5897
5898 if (rxfree > 0) {
5899 mcx_rxr_put(&sc->sc_rxr, rxfree);
5900 while (MBUFQ_FIRST(&mq) != NULL) {
5901 MBUFQ_DEQUEUE(&mq, m);
5902 if_percpuq_enqueue(ifp->if_percpuq, m);
5903 }
5904
5905 mcx_rx_fill(sc);
5906
5907 if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5908 callout_schedule(&sc->sc_rx_refill, 1);
5909 }
5910 if (txfree > 0) {
5911 sc->sc_tx_cons += txfree;
5912 if_schedule_deferred_start(ifp);
5913 }
5914 }
5915
5916 static void
5917 mcx_arm_eq(struct mcx_softc *sc)
5918 {
5919 bus_size_t offset;
5920 uint32_t val;
5921
5922 offset = (MCX_PAGE_SIZE * sc->sc_uar) + MCX_UAR_EQ_DOORBELL_ARM;
5923 val = (sc->sc_eqn << 24) | (sc->sc_eq_cons & 0xffffff);
5924
5925 mcx_wr(sc, offset, val);
5926 /* barrier? */
5927 }
5928
5929 static struct mcx_eq_entry *
5930 mcx_next_eq_entry(struct mcx_softc *sc)
5931 {
5932 struct mcx_eq_entry *eqe;
5933 int next;
5934
5935 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
5936 next = sc->sc_eq_cons % (1 << MCX_LOG_EQ_SIZE);
5937 if ((eqe[next].eq_owner & 1) == ((sc->sc_eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
5938 sc->sc_eq_cons++;
5939 return (&eqe[next]);
5940 }
5941 return (NULL);
5942 }
5943
5944 int
5945 mcx_intr(void *xsc)
5946 {
5947 struct mcx_softc *sc = (struct mcx_softc *)xsc;
5948 struct mcx_eq_entry *eqe;
5949 int i, cq;
5950
5951 while ((eqe = mcx_next_eq_entry(sc))) {
5952 switch (eqe->eq_event_type) {
5953 case MCX_EVENT_TYPE_COMPLETION:
5954 cq = be32toh(eqe->eq_event_data[6]);
5955 for (i = 0; i < sc->sc_num_cq; i++) {
5956 if (sc->sc_cq[i].cq_n == cq) {
5957 mcx_process_cq(sc, &sc->sc_cq[i]);
5958 break;
5959 }
5960 }
5961 break;
5962
5963 case MCX_EVENT_TYPE_LAST_WQE:
5964 /* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
5965 break;
5966
5967 case MCX_EVENT_TYPE_CQ_ERROR:
5968 /* printf("%s: cq error\n", DEVNAME(sc)); */
5969 break;
5970
5971 case MCX_EVENT_TYPE_CMD_COMPLETION:
5972 /* wakeup probably */
5973 break;
5974
5975 case MCX_EVENT_TYPE_PORT_CHANGE:
5976 workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL);
5977 break;
5978
5979 default:
5980 /* printf("%s: something happened\n", DEVNAME(sc)); */
5981 break;
5982 }
5983 }
5984 mcx_arm_eq(sc);
5985 return (1);
5986 }
5987
5988 static void
5989 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
5990 int total)
5991 {
5992 struct mcx_slot *ms;
5993
5994 int i = allocated;
5995 while (i-- > 0) {
5996 ms = &slots[i];
5997 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
5998 if (ms->ms_m != NULL)
5999 m_freem(ms->ms_m);
6000 }
6001 kmem_free(slots, total * sizeof(*ms));
6002 }
6003
6004 static int
6005 mcx_init(struct ifnet *ifp)
6006 {
6007 struct mcx_softc *sc = ifp->if_softc;
6008 struct mcx_slot *ms;
6009 int i, start;
6010 struct mcx_flow_match match_crit;
6011
6012 if (ISSET(ifp->if_flags, IFF_RUNNING))
6013 mcx_stop(ifp, 0);
6014
6015 sc->sc_rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE),
6016 KM_SLEEP);
6017
6018 for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
6019 ms = &sc->sc_rx_slots[i];
6020 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
6021 sc->sc_hardmtu, 0,
6022 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6023 &ms->ms_map) != 0) {
6024 printf("%s: failed to allocate rx dma maps\n",
6025 DEVNAME(sc));
6026 goto destroy_rx_slots;
6027 }
6028 }
6029
6030 sc->sc_tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE),
6031 KM_SLEEP);
6032
6033 for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
6034 ms = &sc->sc_tx_slots[i];
6035 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
6036 MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
6037 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6038 &ms->ms_map) != 0) {
6039 printf("%s: failed to allocate tx dma maps\n",
6040 DEVNAME(sc));
6041 goto destroy_tx_slots;
6042 }
6043 }
6044
6045 if (mcx_create_cq(sc, sc->sc_eqn) != 0)
6046 goto down;
6047
6048 /* send queue */
6049 if (mcx_create_tis(sc) != 0)
6050 goto down;
6051
6052 if (mcx_create_sq(sc, sc->sc_cq[0].cq_n) != 0)
6053 goto down;
6054
6055 /* receive queue */
6056 if (mcx_create_rq(sc, sc->sc_cq[0].cq_n) != 0)
6057 goto down;
6058
6059 if (mcx_create_tir(sc) != 0)
6060 goto down;
6061
6062 if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE) != 0)
6063 goto down;
6064
6065 /* promisc flow group */
6066 start = 0;
6067 memset(&match_crit, 0, sizeof(match_crit));
6068 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_PROMISC, start, 1,
6069 0, &match_crit) != 0)
6070 goto down;
6071 sc->sc_promisc_flow_enabled = 0;
6072 start++;
6073
6074 /* all multicast flow group */
6075 match_crit.mc_dest_mac[0] = 0x01;
6076 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_ALLMULTI, start, 1,
6077 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6078 goto down;
6079 sc->sc_allmulti_flow_enabled = 0;
6080 start++;
6081
6082 /* mac address matching flow group */
6083 memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
6084 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_MAC, start,
6085 (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
6086 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6087 goto down;
6088
6089 /* flow table entries for unicast and broadcast */
6090 start = 0;
6091 if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6092 LLADDR(satosdl(ifp->if_dl->ifa_addr))) != 0)
6093 goto down;
6094 start++;
6095
6096 if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6097 etherbroadcastaddr) != 0)
6098 goto down;
6099 start++;
6100
6101 /* multicast entries go after that */
6102 sc->sc_mcast_flow_base = start;
6103
6104 /* re-add any existing multicast flows */
6105 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6106 if (sc->sc_mcast_flows[i][0] != 0) {
6107 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6108 sc->sc_mcast_flow_base + i,
6109 sc->sc_mcast_flows[i]);
6110 }
6111 }
6112
6113 if (mcx_set_flow_table_root(sc) != 0)
6114 goto down;
6115
6116 /* start the queues */
6117 if (mcx_ready_sq(sc) != 0)
6118 goto down;
6119
6120 if (mcx_ready_rq(sc) != 0)
6121 goto down;
6122
6123 mcx_rxr_init(&sc->sc_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
6124 sc->sc_rx_prod = 0;
6125 mcx_rx_fill(sc);
6126
6127 mcx_calibrate_first(sc);
6128
6129 SET(ifp->if_flags, IFF_RUNNING);
6130
6131 sc->sc_tx_cons = 0;
6132 sc->sc_tx_prod = 0;
6133 CLR(ifp->if_flags, IFF_OACTIVE);
6134 if_schedule_deferred_start(ifp);
6135
6136 return 0;
6137 destroy_tx_slots:
6138 mcx_free_slots(sc, sc->sc_tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
6139 sc->sc_rx_slots = NULL;
6140
6141 i = (1 << MCX_LOG_RQ_SIZE);
6142 destroy_rx_slots:
6143 mcx_free_slots(sc, sc->sc_rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
6144 sc->sc_rx_slots = NULL;
6145 down:
6146 mcx_stop(ifp, 0);
6147 return EIO;
6148 }
6149
6150 static void
6151 mcx_stop(struct ifnet *ifp, int disable)
6152 {
6153 struct mcx_softc *sc = ifp->if_softc;
6154 int group, i;
6155
6156 CLR(ifp->if_flags, IFF_RUNNING);
6157
6158 /*
6159 * delete flow table entries first, so no packets can arrive
6160 * after the barriers
6161 */
6162 if (sc->sc_promisc_flow_enabled)
6163 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
6164 if (sc->sc_allmulti_flow_enabled)
6165 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
6166 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
6167 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
6168 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6169 if (sc->sc_mcast_flows[i][0] != 0) {
6170 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6171 sc->sc_mcast_flow_base + i);
6172 }
6173 }
6174
6175 callout_halt(&sc->sc_calibrate, NULL);
6176
6177 for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
6178 if (sc->sc_flow_group_id[group] != -1)
6179 mcx_destroy_flow_group(sc,
6180 sc->sc_flow_group_id[group]);
6181 }
6182
6183 if (sc->sc_flow_table_id != -1)
6184 mcx_destroy_flow_table(sc);
6185
6186 if (sc->sc_tirn != 0)
6187 mcx_destroy_tir(sc);
6188 if (sc->sc_rqn != 0)
6189 mcx_destroy_rq(sc);
6190
6191 if (sc->sc_sqn != 0)
6192 mcx_destroy_sq(sc);
6193 if (sc->sc_tisn != 0)
6194 mcx_destroy_tis(sc);
6195
6196 for (i = 0; i < sc->sc_num_cq; i++)
6197 mcx_destroy_cq(sc, i);
6198 sc->sc_num_cq = 0;
6199
6200 if (sc->sc_tx_slots != NULL) {
6201 mcx_free_slots(sc, sc->sc_tx_slots, (1 << MCX_LOG_SQ_SIZE),
6202 (1 << MCX_LOG_SQ_SIZE));
6203 sc->sc_tx_slots = NULL;
6204 }
6205 if (sc->sc_rx_slots != NULL) {
6206 mcx_free_slots(sc, sc->sc_rx_slots, (1 << MCX_LOG_RQ_SIZE),
6207 (1 << MCX_LOG_RQ_SIZE));
6208 sc->sc_rx_slots = NULL;
6209 }
6210 }
6211
6212 static int
6213 mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6214 {
6215 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6216 struct ifreq *ifr = (struct ifreq *)data;
6217 struct ethercom *ec = &sc->sc_ec;
6218 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
6219 struct ether_multi *enm;
6220 struct ether_multistep step;
6221 int s, i, flags, error = 0;
6222
6223 s = splnet();
6224 switch (cmd) {
6225
6226 case SIOCADDMULTI:
6227 if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6228 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6229 if (error != 0) {
6230 splx(s);
6231 return (error);
6232 }
6233
6234 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6235 if (sc->sc_mcast_flows[i][0] == 0) {
6236 memcpy(sc->sc_mcast_flows[i], addrlo,
6237 ETHER_ADDR_LEN);
6238 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6239 mcx_set_flow_table_entry(sc,
6240 MCX_FLOW_GROUP_MAC,
6241 sc->sc_mcast_flow_base + i,
6242 sc->sc_mcast_flows[i]);
6243 }
6244 break;
6245 }
6246 }
6247
6248 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
6249 if (i == MCX_NUM_MCAST_FLOWS) {
6250 SET(ifp->if_flags, IFF_ALLMULTI);
6251 sc->sc_extra_mcast++;
6252 error = ENETRESET;
6253 }
6254
6255 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN)) {
6256 SET(ifp->if_flags, IFF_ALLMULTI);
6257 error = ENETRESET;
6258 }
6259 }
6260 }
6261 break;
6262
6263 case SIOCDELMULTI:
6264 if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6265 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6266 if (error != 0) {
6267 splx(s);
6268 return (error);
6269 }
6270
6271 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6272 if (memcmp(sc->sc_mcast_flows[i], addrlo,
6273 ETHER_ADDR_LEN) == 0) {
6274 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6275 mcx_delete_flow_table_entry(sc,
6276 MCX_FLOW_GROUP_MAC,
6277 sc->sc_mcast_flow_base + i);
6278 }
6279 sc->sc_mcast_flows[i][0] = 0;
6280 break;
6281 }
6282 }
6283
6284 if (i == MCX_NUM_MCAST_FLOWS)
6285 sc->sc_extra_mcast--;
6286
6287 if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
6288 sc->sc_extra_mcast == 0) {
6289 flags = 0;
6290 ETHER_LOCK(ec);
6291 ETHER_FIRST_MULTI(step, ec, enm);
6292 while (enm != NULL) {
6293 if (memcmp(enm->enm_addrlo,
6294 enm->enm_addrhi, ETHER_ADDR_LEN)) {
6295 SET(flags, IFF_ALLMULTI);
6296 break;
6297 }
6298 ETHER_NEXT_MULTI(step, enm);
6299 }
6300 ETHER_UNLOCK(ec);
6301 if (!ISSET(flags, IFF_ALLMULTI)) {
6302 CLR(ifp->if_flags, IFF_ALLMULTI);
6303 error = ENETRESET;
6304 }
6305 }
6306 }
6307 break;
6308
6309 default:
6310 error = ether_ioctl(ifp, cmd, data);
6311 }
6312
6313 if (error == ENETRESET) {
6314 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6315 (IFF_UP | IFF_RUNNING))
6316 mcx_iff(sc);
6317 error = 0;
6318 }
6319 splx(s);
6320
6321 return (error);
6322 }
6323
6324 #if 0
6325 static int
6326 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
6327 {
6328 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6329 struct mcx_reg_mcia mcia;
6330 struct mcx_reg_pmlp pmlp;
6331 int offset, error;
6332
6333 /* get module number */
6334 memset(&pmlp, 0, sizeof(pmlp));
6335 pmlp.rp_local_port = 1;
6336 error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
6337 sizeof(pmlp));
6338 if (error != 0) {
6339 printf("%s: unable to get eeprom module number\n",
6340 DEVNAME(sc));
6341 return error;
6342 }
6343
6344 for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
6345 memset(&mcia, 0, sizeof(mcia));
6346 mcia.rm_l = 0;
6347 mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) &
6348 MCX_PMLP_MODULE_NUM_MASK;
6349 mcia.rm_i2c_addr = sff->sff_addr / 2; /* apparently */
6350 mcia.rm_page_num = sff->sff_page;
6351 mcia.rm_dev_addr = htobe16(offset);
6352 mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
6353
6354 error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
6355 &mcia, sizeof(mcia));
6356 if (error != 0) {
6357 printf("%s: unable to read eeprom at %x\n",
6358 DEVNAME(sc), offset);
6359 return error;
6360 }
6361
6362 memcpy(sff->sff_data + offset, mcia.rm_data,
6363 MCX_MCIA_EEPROM_BYTES);
6364 }
6365
6366 return 0;
6367 }
6368 #endif
6369
6370 static int
6371 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
6372 {
6373 switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6374 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
6375 case 0:
6376 break;
6377
6378 case EFBIG:
6379 if (m_defrag(m, M_DONTWAIT) != NULL &&
6380 bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6381 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
6382 break;
6383
6384 /* FALLTHROUGH */
6385 default:
6386 return (1);
6387 }
6388
6389 ms->ms_m = m;
6390 return (0);
6391 }
6392
6393 static void
6394 mcx_start(struct ifnet *ifp)
6395 {
6396 struct mcx_softc *sc = ifp->if_softc;
6397 struct mcx_sq_entry *sq, *sqe;
6398 struct mcx_sq_entry_seg *sqs;
6399 struct mcx_slot *ms;
6400 bus_dmamap_t map;
6401 struct mbuf *m;
6402 u_int idx, free, used;
6403 uint64_t *bf;
6404 size_t bf_base;
6405 int i, seg, nseg;
6406
6407 bf_base = (sc->sc_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
6408
6409 idx = sc->sc_tx_prod % (1 << MCX_LOG_SQ_SIZE);
6410 free = (sc->sc_tx_cons + (1 << MCX_LOG_SQ_SIZE)) - sc->sc_tx_prod;
6411
6412 used = 0;
6413 bf = NULL;
6414 sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&sc->sc_sq_mem);
6415
6416 for (;;) {
6417 if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
6418 SET(ifp->if_flags, IFF_OACTIVE);
6419 break;
6420 }
6421
6422 IFQ_DEQUEUE(&ifp->if_snd, m);
6423 if (m == NULL) {
6424 break;
6425 }
6426
6427 sqe = sq + idx;
6428 ms = &sc->sc_tx_slots[idx];
6429 memset(sqe, 0, sizeof(*sqe));
6430
6431 /* ctrl segment */
6432 sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
6433 ((sc->sc_tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
6434 /* always generate a completion event */
6435 sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
6436
6437 /* eth segment */
6438 sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
6439 m_copydata(m, 0, MCX_SQ_INLINE_SIZE, sqe->sqe_inline_headers);
6440 m_adj(m, MCX_SQ_INLINE_SIZE);
6441
6442 if (mcx_load_mbuf(sc, ms, m) != 0) {
6443 m_freem(m);
6444 if_statinc(ifp, if_oerrors);
6445 continue;
6446 }
6447 bf = (uint64_t *)sqe;
6448
6449 if (ifp->if_bpf != NULL)
6450 bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers,
6451 MCX_SQ_INLINE_SIZE, m, BPF_D_OUT);
6452
6453 map = ms->ms_map;
6454 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6455 BUS_DMASYNC_PREWRITE);
6456
6457 sqe->sqe_ds_sq_num =
6458 htobe32((sc->sc_sqn << MCX_SQE_SQ_NUM_SHIFT) |
6459 (map->dm_nsegs + 3));
6460
6461 /* data segment - first wqe has one segment */
6462 sqs = sqe->sqe_segs;
6463 seg = 0;
6464 nseg = 1;
6465 for (i = 0; i < map->dm_nsegs; i++) {
6466 if (seg == nseg) {
6467 /* next slot */
6468 idx++;
6469 if (idx == (1 << MCX_LOG_SQ_SIZE))
6470 idx = 0;
6471 sc->sc_tx_prod++;
6472 used++;
6473
6474 sqs = (struct mcx_sq_entry_seg *)(sq + idx);
6475 seg = 0;
6476 nseg = MCX_SQ_SEGS_PER_SLOT;
6477 }
6478 sqs[seg].sqs_byte_count =
6479 htobe32(map->dm_segs[i].ds_len);
6480 sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
6481 sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
6482 seg++;
6483 }
6484
6485 idx++;
6486 if (idx == (1 << MCX_LOG_SQ_SIZE))
6487 idx = 0;
6488 sc->sc_tx_prod++;
6489 used++;
6490 }
6491
6492 if (used) {
6493 *sc->sc_tx_doorbell = htobe32(sc->sc_tx_prod & MCX_WQ_DOORBELL_MASK);
6494
6495 membar_sync();
6496
6497 /*
6498 * write the first 64 bits of the last sqe we produced
6499 * to the blue flame buffer
6500 */
6501 bus_space_write_8(sc->sc_memt, sc->sc_memh,
6502 bf_base + sc->sc_bf_offset, *bf);
6503 /* next write goes to the other buffer */
6504 sc->sc_bf_offset ^= sc->sc_bf_size;
6505
6506 membar_sync();
6507 }
6508 }
6509
6510 static void
6511 mcx_watchdog(struct ifnet *ifp)
6512 {
6513 }
6514
6515 static void
6516 mcx_media_add_types(struct mcx_softc *sc)
6517 {
6518 struct mcx_reg_ptys ptys;
6519 int i;
6520 uint32_t proto_cap;
6521
6522 memset(&ptys, 0, sizeof(ptys));
6523 ptys.rp_local_port = 1;
6524 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6525 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6526 sizeof(ptys)) != 0) {
6527 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6528 return;
6529 }
6530
6531 proto_cap = be32toh(ptys.rp_eth_proto_cap);
6532 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6533 if ((proto_cap & (1U << i)) && (mcx_eth_cap_map[i] != 0))
6534 ifmedia_add(&sc->sc_media, IFM_ETHER |
6535 mcx_eth_cap_map[i], 0, NULL);
6536 }
6537 }
6538
6539 static void
6540 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
6541 {
6542 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6543 struct mcx_reg_ptys ptys;
6544 int i;
6545 uint32_t /* proto_cap, */ proto_oper;
6546 uint64_t media_oper;
6547
6548 memset(&ptys, 0, sizeof(ptys));
6549 ptys.rp_local_port = 1;
6550 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6551
6552 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6553 sizeof(ptys)) != 0) {
6554 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6555 return;
6556 }
6557
6558 /* proto_cap = be32toh(ptys.rp_eth_proto_cap); */
6559 proto_oper = be32toh(ptys.rp_eth_proto_oper);
6560
6561 media_oper = 0;
6562 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6563 if (proto_oper & (1U << i)) {
6564 media_oper = mcx_eth_cap_map[i];
6565 }
6566 }
6567
6568 ifmr->ifm_status = IFM_AVALID;
6569 /* not sure if this is the right thing to check, maybe paos? */
6570 if (proto_oper != 0) {
6571 ifmr->ifm_status |= IFM_ACTIVE;
6572 ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
6573 /* txpause, rxpause, duplex? */
6574 }
6575 }
6576
6577 static int
6578 mcx_media_change(struct ifnet *ifp)
6579 {
6580 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6581 struct mcx_reg_ptys ptys;
6582 struct mcx_reg_paos paos;
6583 uint32_t media;
6584 int i, error;
6585
6586 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
6587 return EINVAL;
6588
6589 error = 0;
6590
6591 if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
6592 /* read ptys to get supported media */
6593 memset(&ptys, 0, sizeof(ptys));
6594 ptys.rp_local_port = 1;
6595 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6596 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
6597 &ptys, sizeof(ptys)) != 0) {
6598 printf("%s: unable to read port type/speed\n",
6599 DEVNAME(sc));
6600 return EIO;
6601 }
6602
6603 media = be32toh(ptys.rp_eth_proto_cap);
6604 } else {
6605 /* map media type */
6606 media = 0;
6607 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6608 if (mcx_eth_cap_map[i] ==
6609 IFM_SUBTYPE(sc->sc_media.ifm_media)) {
6610 media = (1 << i);
6611 break;
6612 }
6613 }
6614 }
6615
6616 /* disable the port */
6617 memset(&paos, 0, sizeof(paos));
6618 paos.rp_local_port = 1;
6619 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
6620 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6621 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6622 sizeof(paos)) != 0) {
6623 printf("%s: unable to set port state to down\n", DEVNAME(sc));
6624 return EIO;
6625 }
6626
6627 memset(&ptys, 0, sizeof(ptys));
6628 ptys.rp_local_port = 1;
6629 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6630 ptys.rp_eth_proto_admin = htobe32(media);
6631 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
6632 sizeof(ptys)) != 0) {
6633 printf("%s: unable to set port media type/speed\n",
6634 DEVNAME(sc));
6635 error = EIO;
6636 }
6637
6638 /* re-enable the port to start negotiation */
6639 memset(&paos, 0, sizeof(paos));
6640 paos.rp_local_port = 1;
6641 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
6642 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6643 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6644 sizeof(paos)) != 0) {
6645 printf("%s: unable to set port state to up\n", DEVNAME(sc));
6646 error = EIO;
6647 }
6648
6649 return error;
6650 }
6651
6652 static void
6653 mcx_port_change(struct work *wk, void *xsc)
6654 {
6655 struct mcx_softc *sc = xsc;
6656 struct ifnet *ifp = &sc->sc_ec.ec_if;
6657 struct mcx_reg_paos paos;
6658 int link_state = LINK_STATE_DOWN;
6659 struct ifmediareq ifmr;
6660
6661 memset(&paos, 0, sizeof(paos));
6662 paos.rp_local_port = 1;
6663 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_READ, &paos,
6664 sizeof(paos)) == 0) {
6665 if (paos.rp_oper_status == MCX_REG_PAOS_OPER_STATUS_UP)
6666 link_state = LINK_STATE_UP;
6667 mcx_media_status(ifp, &ifmr);
6668 ifp->if_baudrate = ifmedia_baudrate(ifmr.ifm_active);
6669 }
6670
6671 if (link_state != ifp->if_link_state) {
6672 if_link_state_change(ifp, link_state);
6673 }
6674 }
6675
6676
6677 static inline uint32_t
6678 mcx_rd(struct mcx_softc *sc, bus_size_t r)
6679 {
6680 uint32_t word;
6681
6682 word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
6683
6684 return (be32toh(word));
6685 }
6686
6687 static inline void
6688 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
6689 {
6690 bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
6691 }
6692
6693 static inline void
6694 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
6695 {
6696 bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
6697 }
6698
6699 static uint64_t
6700 mcx_timer(struct mcx_softc *sc)
6701 {
6702 uint32_t hi, lo, ni;
6703
6704 hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6705 for (;;) {
6706 lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
6707 mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
6708 ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6709
6710 if (ni == hi)
6711 break;
6712
6713 hi = ni;
6714 }
6715
6716 return (((uint64_t)hi << 32) | (uint64_t)lo);
6717 }
6718
6719 static int
6720 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
6721 bus_size_t size, u_int align)
6722 {
6723 mxm->mxm_size = size;
6724
6725 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
6726 mxm->mxm_size, 0,
6727 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6728 &mxm->mxm_map) != 0)
6729 return (1);
6730 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
6731 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
6732 BUS_DMA_WAITOK) != 0)
6733 goto destroy;
6734 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
6735 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
6736 goto free;
6737 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
6738 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
6739 goto unmap;
6740
6741 mcx_dmamem_zero(mxm);
6742
6743 return (0);
6744 unmap:
6745 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6746 free:
6747 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6748 destroy:
6749 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6750 return (1);
6751 }
6752
6753 static void
6754 mcx_dmamem_zero(struct mcx_dmamem *mxm)
6755 {
6756 memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
6757 }
6758
6759 static void
6760 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
6761 {
6762 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
6763 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6764 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6765 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6766 }
6767
6768 static int
6769 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
6770 {
6771 bus_dma_segment_t *segs;
6772 bus_size_t len = pages * MCX_PAGE_SIZE;
6773 size_t seglen;
6774
6775 segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP);
6776 seglen = sizeof(*segs) * pages;
6777
6778 if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
6779 segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
6780 goto free_segs;
6781
6782 if (mhm->mhm_seg_count < pages) {
6783 size_t nseglen;
6784
6785 mhm->mhm_segs = kmem_alloc(
6786 sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP);
6787
6788 nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
6789
6790 memcpy(mhm->mhm_segs, segs, nseglen);
6791
6792 kmem_free(segs, seglen);
6793
6794 segs = mhm->mhm_segs;
6795 seglen = nseglen;
6796 } else
6797 mhm->mhm_segs = segs;
6798
6799 if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
6800 MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
6801 &mhm->mhm_map) != 0)
6802 goto free_dmamem;
6803
6804 if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
6805 mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
6806 goto destroy;
6807
6808 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6809 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
6810
6811 mhm->mhm_npages = pages;
6812
6813 return (0);
6814
6815 destroy:
6816 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6817 free_dmamem:
6818 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6819 free_segs:
6820 kmem_free(segs, seglen);
6821 mhm->mhm_segs = NULL;
6822
6823 return (-1);
6824 }
6825
6826 static void
6827 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
6828 {
6829 if (mhm->mhm_npages == 0)
6830 return;
6831
6832 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6833 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
6834
6835 bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
6836 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6837 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6838 kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
6839
6840 mhm->mhm_npages = 0;
6841 }
6842