if_mcx.c revision 1.9 1 /* $NetBSD: if_mcx.c,v 1.9 2019/11/29 15:17:14 msaitoh Exp $ */
2 /* $OpenBSD: if_mcx.c,v 1.33 2019/09/12 04:23:59 jmatthew Exp $ */
3
4 /*
5 * Copyright (c) 2017 David Gwynne <dlg (at) openbsd.org>
6 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #ifdef _KERNEL_OPT
22 #include "opt_net_mpsafe.h"
23 #endif
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/device.h>
32 #include <sys/pool.h>
33 #include <sys/queue.h>
34 #include <sys/callout.h>
35 #include <sys/workqueue.h>
36 #include <sys/atomic.h>
37 #include <sys/kmem.h>
38 #include <sys/bus.h>
39
40 #include <machine/intr.h>
41
42 #include <net/if.h>
43 #include <net/if_dl.h>
44 #include <net/if_ether.h>
45 #include <net/if_media.h>
46
47 #include <net/bpf.h>
48
49 #include <netinet/in.h>
50
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pcidevs.h>
54
55 #ifdef NET_MPSAFE
56 #define MCX_MPSAFE 1
57 #define CALLOUT_FLAGS CALLOUT_MPSAFE
58 #else
59 #define CALLOUT_FLAGS 0
60 #endif
61
62 #define MCX_MAX_NINTR 1
63
64 #define BUS_DMASYNC_PRERW (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
65 #define BUS_DMASYNC_POSTRW (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
66
67 #define MCX_HCA_BAR PCI_MAPREG_START /* BAR 0 */
68
69 #define MCX_FW_VER 0x0000
70 #define MCX_FW_VER_MAJOR(_v) ((_v) & 0xffff)
71 #define MCX_FW_VER_MINOR(_v) ((_v) >> 16)
72 #define MCX_CMDIF_FW_SUBVER 0x0004
73 #define MCX_FW_VER_SUBMINOR(_v) ((_v) & 0xffff)
74 #define MCX_CMDIF(_v) ((_v) >> 16)
75
76 #define MCX_ISSI 1 /* as per the PRM */
77 #define MCX_CMD_IF_SUPPORTED 5
78
79 #define MCX_HARDMTU 9500
80
81 #define MCX_MAX_CQS 2 /* rq, sq */
82
83 /* queue sizes */
84 #define MCX_LOG_EQ_SIZE 6 /* one page */
85 #define MCX_LOG_CQ_SIZE 11
86 #define MCX_LOG_RQ_SIZE 10
87 #define MCX_LOG_SQ_SIZE 11
88
89 /* completion event moderation - about 10khz, or 90% of the cq */
90 #define MCX_CQ_MOD_PERIOD 50
91 #define MCX_CQ_MOD_COUNTER (((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
92
93 #define MCX_LOG_SQ_ENTRY_SIZE 6
94 #define MCX_SQ_ENTRY_MAX_SLOTS 4
95 #define MCX_SQ_SEGS_PER_SLOT \
96 (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
97 #define MCX_SQ_MAX_SEGMENTS \
98 1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
99
100 #define MCX_LOG_FLOW_TABLE_SIZE 5
101 #define MCX_NUM_STATIC_FLOWS 4 /* promisc, allmulti, ucast, bcast */
102 #define MCX_NUM_MCAST_FLOWS \
103 ((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
104
105 #define MCX_SQ_INLINE_SIZE 18
106
107 /* doorbell offsets */
108 #define MCX_CQ_DOORBELL_OFFSET 0
109 #define MCX_CQ_DOORBELL_SIZE 16
110 #define MCX_RQ_DOORBELL_OFFSET 64
111 #define MCX_SQ_DOORBELL_OFFSET 64
112
113 #define MCX_WQ_DOORBELL_MASK 0xffff
114
115 /* uar registers */
116 #define MCX_UAR_CQ_DOORBELL 0x20
117 #define MCX_UAR_EQ_DOORBELL_ARM 0x40
118 #define MCX_UAR_EQ_DOORBELL 0x48
119 #define MCX_UAR_BF 0x800
120
121 #define MCX_CMDQ_ADDR_HI 0x0010
122 #define MCX_CMDQ_ADDR_LO 0x0014
123 #define MCX_CMDQ_ADDR_NMASK 0xfff
124 #define MCX_CMDQ_LOG_SIZE(_v) ((_v) >> 4 & 0xf)
125 #define MCX_CMDQ_LOG_STRIDE(_v) ((_v) >> 0 & 0xf)
126 #define MCX_CMDQ_INTERFACE_MASK (0x3 << 8)
127 #define MCX_CMDQ_INTERFACE_FULL_DRIVER (0x0 << 8)
128 #define MCX_CMDQ_INTERFACE_DISABLED (0x1 << 8)
129
130 #define MCX_CMDQ_DOORBELL 0x0018
131
132 #define MCX_STATE 0x01fc
133 #define MCX_STATE_MASK (1U << 31)
134 #define MCX_STATE_INITIALIZING (1 << 31)
135 #define MCX_STATE_READY (0 << 31)
136 #define MCX_STATE_INTERFACE_MASK (0x3 << 24)
137 #define MCX_STATE_INTERFACE_FULL_DRIVER (0x0 << 24)
138 #define MCX_STATE_INTERFACE_DISABLED (0x1 << 24)
139
140 #define MCX_INTERNAL_TIMER 0x1000
141 #define MCX_INTERNAL_TIMER_H 0x1000
142 #define MCX_INTERNAL_TIMER_L 0x1004
143
144 #define MCX_CLEAR_INT 0x100c
145
146 #define MCX_REG_OP_WRITE 0
147 #define MCX_REG_OP_READ 1
148
149 #define MCX_REG_PMLP 0x5002
150 #define MCX_REG_PMTU 0x5003
151 #define MCX_REG_PTYS 0x5004
152 #define MCX_REG_PAOS 0x5006
153 #define MCX_REG_PFCC 0x5007
154 #define MCX_REG_PPCNT 0x5008
155 #define MCX_REG_MCIA 0x9014
156
157 #define MCX_ETHER_CAP_SGMII (1 << 0)
158 #define MCX_ETHER_CAP_1000_KX (1 << 1)
159 #define MCX_ETHER_CAP_10G_CX4 (1 << 2)
160 #define MCX_ETHER_CAP_10G_KX4 (1 << 3)
161 #define MCX_ETHER_CAP_10G_KR (1 << 4)
162 #define MCX_ETHER_CAP_20G_KR2 (1 << 5)
163 #define MCX_ETHER_CAP_40G_CR4 (1 << 6)
164 #define MCX_ETHER_CAP_40G_KR4 (1 << 7)
165 #define MCX_ETHER_CAP_56G_R4 (1 << 8)
166 #define MCX_ETHER_CAP_10G_CR (1 << 12)
167 #define MCX_ETHER_CAP_10G_SR (1 << 13)
168 #define MCX_ETHER_CAP_10G_LR (1 << 14)
169 #define MCX_ETHER_CAP_40G_SR4 (1 << 15)
170 #define MCX_ETHER_CAP_40G_LR4 (1 << 16)
171 #define MCX_ETHER_CAP_50G_SR2 (1 << 18)
172 #define MCX_ETHER_CAP_100G_CR4 (1 << 20)
173 #define MCX_ETHER_CAP_100G_SR4 (1 << 21)
174 #define MCX_ETHER_CAP_100G_KR4 (1 << 22)
175 #define MCX_ETHER_CAP_100G_LR4 (1 << 23)
176 #define MCX_ETHER_CAP_100_TX (1 << 24)
177 #define MCX_ETHER_CAP_1000_T (1 << 25)
178 #define MCX_ETHER_CAP_10G_T (1 << 26)
179 #define MCX_ETHER_CAP_25G_CR (1 << 27)
180 #define MCX_ETHER_CAP_25G_KR (1 << 28)
181 #define MCX_ETHER_CAP_25G_SR (1 << 29)
182 #define MCX_ETHER_CAP_50G_CR2 (1 << 30)
183 #define MCX_ETHER_CAP_50G_KR2 (1 << 31)
184
185 #define MCX_PAGE_SHIFT 12
186 #define MCX_PAGE_SIZE (1 << MCX_PAGE_SHIFT)
187 #define MCX_MAX_CQE 32
188
189 #define MCX_CMD_QUERY_HCA_CAP 0x100
190 #define MCX_CMD_QUERY_ADAPTER 0x101
191 #define MCX_CMD_INIT_HCA 0x102
192 #define MCX_CMD_TEARDOWN_HCA 0x103
193 #define MCX_CMD_ENABLE_HCA 0x104
194 #define MCX_CMD_DISABLE_HCA 0x105
195 #define MCX_CMD_QUERY_PAGES 0x107
196 #define MCX_CMD_MANAGE_PAGES 0x108
197 #define MCX_CMD_SET_HCA_CAP 0x109
198 #define MCX_CMD_QUERY_ISSI 0x10a
199 #define MCX_CMD_SET_ISSI 0x10b
200 #define MCX_CMD_SET_DRIVER_VERSION \
201 0x10d
202 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS \
203 0x203
204 #define MCX_CMD_CREATE_EQ 0x301
205 #define MCX_CMD_DESTROY_EQ 0x302
206 #define MCX_CMD_CREATE_CQ 0x400
207 #define MCX_CMD_DESTROY_CQ 0x401
208 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT \
209 0x754
210 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
211 0x755
212 #define MCX_CMD_QUERY_VPORT_COUNTERS \
213 0x770
214 #define MCX_CMD_ALLOC_PD 0x800
215 #define MCX_CMD_ALLOC_UAR 0x802
216 #define MCX_CMD_ACCESS_REG 0x805
217 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN \
218 0x816
219 #define MCX_CMD_CREATE_TIR 0x900
220 #define MCX_CMD_DESTROY_TIR 0x902
221 #define MCX_CMD_CREATE_SQ 0x904
222 #define MCX_CMD_MODIFY_SQ 0x905
223 #define MCX_CMD_DESTROY_SQ 0x906
224 #define MCX_CMD_QUERY_SQ 0x907
225 #define MCX_CMD_CREATE_RQ 0x908
226 #define MCX_CMD_MODIFY_RQ 0x909
227 #define MCX_CMD_DESTROY_RQ 0x90a
228 #define MCX_CMD_QUERY_RQ 0x90b
229 #define MCX_CMD_CREATE_TIS 0x912
230 #define MCX_CMD_DESTROY_TIS 0x914
231 #define MCX_CMD_SET_FLOW_TABLE_ROOT \
232 0x92f
233 #define MCX_CMD_CREATE_FLOW_TABLE \
234 0x930
235 #define MCX_CMD_DESTROY_FLOW_TABLE \
236 0x931
237 #define MCX_CMD_QUERY_FLOW_TABLE \
238 0x932
239 #define MCX_CMD_CREATE_FLOW_GROUP \
240 0x933
241 #define MCX_CMD_DESTROY_FLOW_GROUP \
242 0x934
243 #define MCX_CMD_QUERY_FLOW_GROUP \
244 0x935
245 #define MCX_CMD_SET_FLOW_TABLE_ENTRY \
246 0x936
247 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY \
248 0x937
249 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY \
250 0x938
251 #define MCX_CMD_ALLOC_FLOW_COUNTER \
252 0x939
253 #define MCX_CMD_QUERY_FLOW_COUNTER \
254 0x93b
255
256 #define MCX_QUEUE_STATE_RST 0
257 #define MCX_QUEUE_STATE_RDY 1
258 #define MCX_QUEUE_STATE_ERR 3
259
260 #define MCX_FLOW_TABLE_TYPE_RX 0
261 #define MCX_FLOW_TABLE_TYPE_TX 1
262
263 #define MCX_CMDQ_INLINE_DATASIZE 16
264
265 struct mcx_cmdq_entry {
266 uint8_t cq_type;
267 #define MCX_CMDQ_TYPE_PCIE 0x7
268 uint8_t cq_reserved0[3];
269
270 uint32_t cq_input_length;
271 uint64_t cq_input_ptr;
272 uint8_t cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
273
274 uint8_t cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
275 uint64_t cq_output_ptr;
276 uint32_t cq_output_length;
277
278 uint8_t cq_token;
279 uint8_t cq_signature;
280 uint8_t cq_reserved1[1];
281 uint8_t cq_status;
282 #define MCX_CQ_STATUS_SHIFT 1
283 #define MCX_CQ_STATUS_MASK (0x7f << MCX_CQ_STATUS_SHIFT)
284 #define MCX_CQ_STATUS_OK (0x00 << MCX_CQ_STATUS_SHIFT)
285 #define MCX_CQ_STATUS_INT_ERR (0x01 << MCX_CQ_STATUS_SHIFT)
286 #define MCX_CQ_STATUS_BAD_OPCODE (0x02 << MCX_CQ_STATUS_SHIFT)
287 #define MCX_CQ_STATUS_BAD_PARAM (0x03 << MCX_CQ_STATUS_SHIFT)
288 #define MCX_CQ_STATUS_BAD_SYS_STATE (0x04 << MCX_CQ_STATUS_SHIFT)
289 #define MCX_CQ_STATUS_BAD_RESOURCE (0x05 << MCX_CQ_STATUS_SHIFT)
290 #define MCX_CQ_STATUS_RESOURCE_BUSY (0x06 << MCX_CQ_STATUS_SHIFT)
291 #define MCX_CQ_STATUS_EXCEED_LIM (0x08 << MCX_CQ_STATUS_SHIFT)
292 #define MCX_CQ_STATUS_BAD_RES_STATE (0x09 << MCX_CQ_STATUS_SHIFT)
293 #define MCX_CQ_STATUS_BAD_INDEX (0x0a << MCX_CQ_STATUS_SHIFT)
294 #define MCX_CQ_STATUS_NO_RESOURCES (0x0f << MCX_CQ_STATUS_SHIFT)
295 #define MCX_CQ_STATUS_BAD_INPUT_LEN (0x50 << MCX_CQ_STATUS_SHIFT)
296 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN (0x51 << MCX_CQ_STATUS_SHIFT)
297 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
298 (0x10 << MCX_CQ_STATUS_SHIFT)
299 #define MCX_CQ_STATUS_BAD_SIZE (0x40 << MCX_CQ_STATUS_SHIFT)
300 #define MCX_CQ_STATUS_OWN_MASK 0x1
301 #define MCX_CQ_STATUS_OWN_SW 0x0
302 #define MCX_CQ_STATUS_OWN_HW 0x1
303 } __packed __aligned(8);
304
305 #define MCX_CMDQ_MAILBOX_DATASIZE 512
306
307 struct mcx_cmdq_mailbox {
308 uint8_t mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
309 uint8_t mb_reserved0[48];
310 uint64_t mb_next_ptr;
311 uint32_t mb_block_number;
312 uint8_t mb_reserved1[1];
313 uint8_t mb_token;
314 uint8_t mb_ctrl_signature;
315 uint8_t mb_signature;
316 } __packed __aligned(8);
317
318 #define MCX_CMDQ_MAILBOX_ALIGN (1 << 10)
319 #define MCX_CMDQ_MAILBOX_SIZE roundup(sizeof(struct mcx_cmdq_mailbox), \
320 MCX_CMDQ_MAILBOX_ALIGN)
321 /*
322 * command mailbox structres
323 */
324
325 struct mcx_cmd_enable_hca_in {
326 uint16_t cmd_opcode;
327 uint8_t cmd_reserved0[4];
328 uint16_t cmd_op_mod;
329 uint8_t cmd_reserved1[2];
330 uint16_t cmd_function_id;
331 uint8_t cmd_reserved2[4];
332 } __packed __aligned(4);
333
334 struct mcx_cmd_enable_hca_out {
335 uint8_t cmd_status;
336 uint8_t cmd_reserved0[3];
337 uint32_t cmd_syndrome;
338 uint8_t cmd_reserved1[4];
339 } __packed __aligned(4);
340
341 struct mcx_cmd_init_hca_in {
342 uint16_t cmd_opcode;
343 uint8_t cmd_reserved0[4];
344 uint16_t cmd_op_mod;
345 uint8_t cmd_reserved1[8];
346 } __packed __aligned(4);
347
348 struct mcx_cmd_init_hca_out {
349 uint8_t cmd_status;
350 uint8_t cmd_reserved0[3];
351 uint32_t cmd_syndrome;
352 uint8_t cmd_reserved1[8];
353 } __packed __aligned(4);
354
355 struct mcx_cmd_teardown_hca_in {
356 uint16_t cmd_opcode;
357 uint8_t cmd_reserved0[4];
358 uint16_t cmd_op_mod;
359 uint8_t cmd_reserved1[2];
360 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL 0x0
361 #define MCX_CMD_TEARDOWN_HCA_PANIC 0x1
362 uint16_t cmd_profile;
363 uint8_t cmd_reserved2[4];
364 } __packed __aligned(4);
365
366 struct mcx_cmd_teardown_hca_out {
367 uint8_t cmd_status;
368 uint8_t cmd_reserved0[3];
369 uint32_t cmd_syndrome;
370 uint8_t cmd_reserved1[8];
371 } __packed __aligned(4);
372
373 struct mcx_cmd_access_reg_in {
374 uint16_t cmd_opcode;
375 uint8_t cmd_reserved0[4];
376 uint16_t cmd_op_mod;
377 uint8_t cmd_reserved1[2];
378 uint16_t cmd_register_id;
379 uint32_t cmd_argument;
380 } __packed __aligned(4);
381
382 struct mcx_cmd_access_reg_out {
383 uint8_t cmd_status;
384 uint8_t cmd_reserved0[3];
385 uint32_t cmd_syndrome;
386 uint8_t cmd_reserved1[8];
387 } __packed __aligned(4);
388
389 struct mcx_reg_pmtu {
390 uint8_t rp_reserved1;
391 uint8_t rp_local_port;
392 uint8_t rp_reserved2[2];
393 uint16_t rp_max_mtu;
394 uint8_t rp_reserved3[2];
395 uint16_t rp_admin_mtu;
396 uint8_t rp_reserved4[2];
397 uint16_t rp_oper_mtu;
398 uint8_t rp_reserved5[2];
399 } __packed __aligned(4);
400
401 struct mcx_reg_ptys {
402 uint8_t rp_reserved1;
403 uint8_t rp_local_port;
404 uint8_t rp_reserved2;
405 uint8_t rp_proto_mask;
406 #define MCX_REG_PTYS_PROTO_MASK_ETH (1 << 2)
407 uint8_t rp_reserved3[8];
408 uint32_t rp_eth_proto_cap;
409 uint8_t rp_reserved4[8];
410 uint32_t rp_eth_proto_admin;
411 uint8_t rp_reserved5[8];
412 uint32_t rp_eth_proto_oper;
413 uint8_t rp_reserved6[24];
414 } __packed __aligned(4);
415
416 struct mcx_reg_paos {
417 uint8_t rp_reserved1;
418 uint8_t rp_local_port;
419 uint8_t rp_admin_status;
420 #define MCX_REG_PAOS_ADMIN_STATUS_UP 1
421 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN 2
422 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE 3
423 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED 4
424 uint8_t rp_oper_status;
425 #define MCX_REG_PAOS_OPER_STATUS_UP 1
426 #define MCX_REG_PAOS_OPER_STATUS_DOWN 2
427 #define MCX_REG_PAOS_OPER_STATUS_FAILED 4
428 uint8_t rp_admin_state_update;
429 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN (1 << 7)
430 uint8_t rp_reserved2[11];
431 } __packed __aligned(4);
432
433 struct mcx_reg_pfcc {
434 uint8_t rp_reserved1;
435 uint8_t rp_local_port;
436 uint8_t rp_reserved2[3];
437 uint8_t rp_prio_mask_tx;
438 uint8_t rp_reserved3;
439 uint8_t rp_prio_mask_rx;
440 uint8_t rp_pptx_aptx;
441 uint8_t rp_pfctx;
442 uint8_t rp_fctx_dis;
443 uint8_t rp_reserved4;
444 uint8_t rp_pprx_aprx;
445 uint8_t rp_pfcrx;
446 uint8_t rp_reserved5[2];
447 uint16_t rp_dev_stall_min;
448 uint16_t rp_dev_stall_crit;
449 uint8_t rp_reserved6[12];
450 } __packed __aligned(4);
451
452 #define MCX_PMLP_MODULE_NUM_MASK 0xff
453 struct mcx_reg_pmlp {
454 uint8_t rp_rxtx;
455 uint8_t rp_local_port;
456 uint8_t rp_reserved0;
457 uint8_t rp_width;
458 uint32_t rp_lane0_mapping;
459 uint32_t rp_lane1_mapping;
460 uint32_t rp_lane2_mapping;
461 uint32_t rp_lane3_mapping;
462 uint8_t rp_reserved1[44];
463 } __packed __aligned(4);
464
465 #define MCX_MCIA_EEPROM_BYTES 32
466 struct mcx_reg_mcia {
467 uint8_t rm_l;
468 uint8_t rm_module;
469 uint8_t rm_reserved0;
470 uint8_t rm_status;
471 uint8_t rm_i2c_addr;
472 uint8_t rm_page_num;
473 uint16_t rm_dev_addr;
474 uint16_t rm_reserved1;
475 uint16_t rm_size;
476 uint32_t rm_reserved2;
477 uint8_t rm_data[48];
478 } __packed __aligned(4);
479
480 struct mcx_cmd_query_issi_in {
481 uint16_t cmd_opcode;
482 uint8_t cmd_reserved0[4];
483 uint16_t cmd_op_mod;
484 uint8_t cmd_reserved1[8];
485 } __packed __aligned(4);
486
487 struct mcx_cmd_query_issi_il_out {
488 uint8_t cmd_status;
489 uint8_t cmd_reserved0[3];
490 uint32_t cmd_syndrome;
491 uint8_t cmd_reserved1[2];
492 uint16_t cmd_current_issi;
493 uint8_t cmd_reserved2[4];
494 } __packed __aligned(4);
495
496 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
497
498 struct mcx_cmd_query_issi_mb_out {
499 uint8_t cmd_reserved2[16];
500 uint8_t cmd_supported_issi[80]; /* very big endian */
501 } __packed __aligned(4);
502
503 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
504
505 struct mcx_cmd_set_issi_in {
506 uint16_t cmd_opcode;
507 uint8_t cmd_reserved0[4];
508 uint16_t cmd_op_mod;
509 uint8_t cmd_reserved1[2];
510 uint16_t cmd_current_issi;
511 uint8_t cmd_reserved2[4];
512 } __packed __aligned(4);
513
514 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
515
516 struct mcx_cmd_set_issi_out {
517 uint8_t cmd_status;
518 uint8_t cmd_reserved0[3];
519 uint32_t cmd_syndrome;
520 uint8_t cmd_reserved1[8];
521 } __packed __aligned(4);
522
523 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
524
525 struct mcx_cmd_query_pages_in {
526 uint16_t cmd_opcode;
527 uint8_t cmd_reserved0[4];
528 uint16_t cmd_op_mod;
529 #define MCX_CMD_QUERY_PAGES_BOOT 0x01
530 #define MCX_CMD_QUERY_PAGES_INIT 0x02
531 #define MCX_CMD_QUERY_PAGES_REGULAR 0x03
532 uint8_t cmd_reserved1[8];
533 } __packed __aligned(4);
534
535 struct mcx_cmd_query_pages_out {
536 uint8_t cmd_status;
537 uint8_t cmd_reserved0[3];
538 uint32_t cmd_syndrome;
539 uint8_t cmd_reserved1[2];
540 uint16_t cmd_func_id;
541 uint32_t cmd_num_pages;
542 } __packed __aligned(4);
543
544 struct mcx_cmd_manage_pages_in {
545 uint16_t cmd_opcode;
546 uint8_t cmd_reserved0[4];
547 uint16_t cmd_op_mod;
548 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
549 0x00
550 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
551 0x01
552 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
553 0x02
554 uint8_t cmd_reserved1[2];
555 uint16_t cmd_func_id;
556 uint32_t cmd_input_num_entries;
557 } __packed __aligned(4);
558
559 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
560
561 struct mcx_cmd_manage_pages_out {
562 uint8_t cmd_status;
563 uint8_t cmd_reserved0[3];
564 uint32_t cmd_syndrome;
565 uint32_t cmd_output_num_entries;
566 uint8_t cmd_reserved1[4];
567 } __packed __aligned(4);
568
569 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
570
571 struct mcx_cmd_query_hca_cap_in {
572 uint16_t cmd_opcode;
573 uint8_t cmd_reserved0[4];
574 uint16_t cmd_op_mod;
575 #define MCX_CMD_QUERY_HCA_CAP_MAX (0x0 << 0)
576 #define MCX_CMD_QUERY_HCA_CAP_CURRENT (0x1 << 0)
577 #define MCX_CMD_QUERY_HCA_CAP_DEVICE (0x0 << 1)
578 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD (0x1 << 1)
579 #define MCX_CMD_QUERY_HCA_CAP_FLOW (0x7 << 1)
580 uint8_t cmd_reserved1[8];
581 } __packed __aligned(4);
582
583 struct mcx_cmd_query_hca_cap_out {
584 uint8_t cmd_status;
585 uint8_t cmd_reserved0[3];
586 uint32_t cmd_syndrome;
587 uint8_t cmd_reserved1[8];
588 } __packed __aligned(4);
589
590 #define MCX_HCA_CAP_LEN 0x1000
591 #define MCX_HCA_CAP_NMAILBOXES \
592 (MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
593
594 #if __GNUC_PREREQ__(4, 3)
595 #define __counter__ __COUNTER__
596 #else
597 #define __counter__ __LINE__
598 #endif
599
600 #define __token(_tok, _num) _tok##_num
601 #define _token(_tok, _num) __token(_tok, _num)
602 #define __reserved__ _token(__reserved, __counter__)
603
604 struct mcx_cap_device {
605 uint8_t reserved0[16];
606
607 uint8_t log_max_srq_sz;
608 uint8_t log_max_qp_sz;
609 uint8_t __reserved__[1];
610 uint8_t log_max_qp; /* 5 bits */
611 #define MCX_CAP_DEVICE_LOG_MAX_QP 0x1f
612
613 uint8_t __reserved__[1];
614 uint8_t log_max_srq; /* 5 bits */
615 #define MCX_CAP_DEVICE_LOG_MAX_SRQ 0x1f
616 uint8_t __reserved__[2];
617
618 uint8_t __reserved__[1];
619 uint8_t log_max_cq_sz;
620 uint8_t __reserved__[1];
621 uint8_t log_max_cq; /* 5 bits */
622 #define MCX_CAP_DEVICE_LOG_MAX_CQ 0x1f
623
624 uint8_t log_max_eq_sz;
625 uint8_t log_max_mkey; /* 6 bits */
626 #define MCX_CAP_DEVICE_LOG_MAX_MKEY 0x3f
627 uint8_t __reserved__[1];
628 uint8_t log_max_eq; /* 4 bits */
629 #define MCX_CAP_DEVICE_LOG_MAX_EQ 0x0f
630
631 uint8_t max_indirection;
632 uint8_t log_max_mrw_sz; /* 7 bits */
633 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ 0x7f
634 uint8_t teardown_log_max_msf_list_size;
635 #define MCX_CAP_DEVICE_FORCE_TEARDOWN 0x80
636 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
637 0x3f
638 uint8_t log_max_klm_list_size; /* 6 bits */
639 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
640 0x3f
641
642 uint8_t __reserved__[1];
643 uint8_t log_max_ra_req_dc; /* 6 bits */
644 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC 0x3f
645 uint8_t __reserved__[1];
646 uint8_t log_max_ra_res_dc; /* 6 bits */
647 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
648 0x3f
649
650 uint8_t __reserved__[1];
651 uint8_t log_max_ra_req_qp; /* 6 bits */
652 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
653 0x3f
654 uint8_t __reserved__[1];
655 uint8_t log_max_ra_res_qp; /* 6 bits */
656 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
657 0x3f
658
659 uint8_t flags1;
660 #define MCX_CAP_DEVICE_END_PAD 0x80
661 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED 0x40
662 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
663 0x20
664 #define MCX_CAP_DEVICE_START_PAD 0x10
665 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
666 0x08
667 uint8_t __reserved__[1];
668 uint16_t gid_table_size;
669
670 uint16_t flags2;
671 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT 0x8000
672 #define MCX_CAP_DEVICE_VPORT_COUNTERS 0x4000
673 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
674 0x2000
675 #define MCX_CAP_DEVICE_DEBUG 0x1000
676 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
677 0x8000
678 #define MCX_CAP_DEVICE_RQ_DELAY_DROP 0x4000
679 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK 0x03ff
680 uint16_t pkey_table_size;
681
682 uint8_t flags3;
683 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
684 0x80
685 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
686 0x40
687 #define MCX_CAP_DEVICE_IB_VIRTUAL 0x20
688 #define MCX_CAP_DEVICE_ETH_VIRTUAL 0x10
689 #define MCX_CAP_DEVICE_ETS 0x04
690 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE 0x02
691 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
692 0x01
693 uint8_t local_ca_ack_delay; /* 5 bits */
694 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
695 0x1f
696 uint8_t port_type;
697 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
698 0x80
699 #define MCX_CAP_DEVICE_PORT_TYPE 0x03
700 uint8_t num_ports;
701
702 uint8_t snapshot_log_max_msg;
703 #define MCX_CAP_DEVICE_SNAPSHOT 0x80
704 #define MCX_CAP_DEVICE_LOG_MAX_MSG 0x1f
705 uint8_t max_tc; /* 4 bits */
706 #define MCX_CAP_DEVICE_MAX_TC 0x0f
707 uint8_t flags4;
708 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT 0x80
709 #define MCX_CAP_DEVICE_DCBX 0x40
710 #define MCX_CAP_DEVICE_ROL_S 0x02
711 #define MCX_CAP_DEVICE_ROL_G 0x01
712 uint8_t wol;
713 #define MCX_CAP_DEVICE_WOL_S 0x40
714 #define MCX_CAP_DEVICE_WOL_G 0x20
715 #define MCX_CAP_DEVICE_WOL_A 0x10
716 #define MCX_CAP_DEVICE_WOL_B 0x08
717 #define MCX_CAP_DEVICE_WOL_M 0x04
718 #define MCX_CAP_DEVICE_WOL_U 0x02
719 #define MCX_CAP_DEVICE_WOL_P 0x01
720
721 uint16_t stat_rate_support;
722 uint8_t __reserved__[1];
723 uint8_t cqe_version; /* 4 bits */
724 #define MCX_CAP_DEVICE_CQE_VERSION 0x0f
725
726 uint32_t flags5;
727 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
728 0x80000000
729 #define MCX_CAP_DEVICE_STRIDING_RQ 0x40000000
730 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
731 0x10000000
732 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
733 0x08000000
734 #define MCX_CAP_DEVICE_DC_CONNECT_CP 0x00040000
735 #define MCX_CAP_DEVICE_DC_CNAK_DRACE 0x00020000
736 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
737 #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
738 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM 0x0000c000
739 #define MCX_CAP_DEVICE_SIGERR_QCE 0x00002000
740 #define MCX_CAP_DEVICE_WQ_SIGNATURE 0x00000800
741 #define MCX_CAP_DEVICE_SCTR_DATA_CQE 0x00000400
742 #define MCX_CAP_DEVICE_SHO 0x00000100
743 #define MCX_CAP_DEVICE_TPH 0x00000080
744 #define MCX_CAP_DEVICE_RF 0x00000040
745 #define MCX_CAP_DEVICE_DCT 0x00000020
746 #define MCX_CAP_DEVICE_QOS 0x00000010
747 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS 0x00000008
748 #define MCX_CAP_DEVICE_ROCE 0x00000004
749 #define MCX_CAP_DEVICE_ATOMIC 0x00000002
750
751 uint32_t flags6;
752 #define MCX_CAP_DEVICE_CQ_OI 0x80000000
753 #define MCX_CAP_DEVICE_CQ_RESIZE 0x40000000
754 #define MCX_CAP_DEVICE_CQ_MODERATION 0x20000000
755 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
756 0x10000000
757 #define MCX_CAP_DEVICE_CQ_INVALIDATE 0x08000000
758 #define MCX_CAP_DEVICE_RESERVED_AT_255 0x04000000
759 #define MCX_CAP_DEVICE_CQ_EQ_REMAP 0x02000000
760 #define MCX_CAP_DEVICE_PG 0x01000000
761 #define MCX_CAP_DEVICE_BLOCK_LB_MC 0x00800000
762 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
763 0x00400000
764 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
765 0x00200000
766 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
767 0x00100000
768 #define MCX_CAP_DEVICE_CD 0x00080000
769 #define MCX_CAP_DEVICE_ATM 0x00040000
770 #define MCX_CAP_DEVICE_APM 0x00020000
771 #define MCX_CAP_DEVICE_IMAICL 0x00010000
772 #define MCX_CAP_DEVICE_QKV 0x00000200
773 #define MCX_CAP_DEVICE_PKV 0x00000100
774 #define MCX_CAP_DEVICE_SET_DETH_SQPN 0x00000080
775 #define MCX_CAP_DEVICE_XRC 0x00000008
776 #define MCX_CAP_DEVICE_UD 0x00000004
777 #define MCX_CAP_DEVICE_UC 0x00000002
778 #define MCX_CAP_DEVICE_RC 0x00000001
779
780 uint8_t uar_flags;
781 #define MCX_CAP_DEVICE_UAR_4K 0x80
782 uint8_t uar_sz; /* 6 bits */
783 #define MCX_CAP_DEVICE_UAR_SZ 0x3f
784 uint8_t __reserved__[1];
785 uint8_t log_pg_sz;
786
787 uint8_t flags7;
788 #define MCX_CAP_DEVICE_BF 0x80
789 #define MCX_CAP_DEVICE_DRIVER_VERSION 0x40
790 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
791 0x20
792 uint8_t log_bf_reg_size; /* 5 bits */
793 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE 0x1f
794 uint8_t __reserved__[2];
795
796 uint16_t num_of_diagnostic_counters;
797 uint16_t max_wqe_sz_sq;
798
799 uint8_t __reserved__[2];
800 uint16_t max_wqe_sz_rq;
801
802 uint8_t __reserved__[2];
803 uint16_t max_wqe_sz_sq_dc;
804
805 uint32_t max_qp_mcg; /* 25 bits */
806 #define MCX_CAP_DEVICE_MAX_QP_MCG 0x1ffffff
807
808 uint8_t __reserved__[3];
809 uint8_t log_max_mcq;
810
811 uint8_t log_max_transport_domain; /* 5 bits */
812 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
813 0x1f
814 uint8_t log_max_pd; /* 5 bits */
815 #define MCX_CAP_DEVICE_LOG_MAX_PD 0x1f
816 uint8_t __reserved__[1];
817 uint8_t log_max_xrcd; /* 5 bits */
818 #define MCX_CAP_DEVICE_LOG_MAX_XRCD 0x1f
819
820 uint8_t __reserved__[2];
821 uint16_t max_flow_counter;
822
823 uint8_t log_max_rq; /* 5 bits */
824 #define MCX_CAP_DEVICE_LOG_MAX_RQ 0x1f
825 uint8_t log_max_sq; /* 5 bits */
826 #define MCX_CAP_DEVICE_LOG_MAX_SQ 0x1f
827 uint8_t log_max_tir; /* 5 bits */
828 #define MCX_CAP_DEVICE_LOG_MAX_TIR 0x1f
829 uint8_t log_max_tis; /* 5 bits */
830 #define MCX_CAP_DEVICE_LOG_MAX_TIS 0x1f
831
832 uint8_t flags8;
833 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
834 0x80
835 #define MCX_CAP_DEVICE_LOG_MAX_RMP 0x1f
836 uint8_t log_max_rqt; /* 5 bits */
837 #define MCX_CAP_DEVICE_LOG_MAX_RQT 0x1f
838 uint8_t log_max_rqt_size; /* 5 bits */
839 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE 0x1f
840 uint8_t log_max_tis_per_sq; /* 5 bits */
841 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
842 0x1f
843 } __packed __aligned(8);
844
845 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
846 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
847 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
848 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
849 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
850 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
851 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
852
853 struct mcx_cmd_set_driver_version_in {
854 uint16_t cmd_opcode;
855 uint8_t cmd_reserved0[4];
856 uint16_t cmd_op_mod;
857 uint8_t cmd_reserved1[8];
858 } __packed __aligned(4);
859
860 struct mcx_cmd_set_driver_version_out {
861 uint8_t cmd_status;
862 uint8_t cmd_reserved0[3];
863 uint32_t cmd_syndrome;
864 uint8_t cmd_reserved1[8];
865 } __packed __aligned(4);
866
867 struct mcx_cmd_set_driver_version {
868 uint8_t cmd_driver_version[64];
869 } __packed __aligned(8);
870
871 struct mcx_cmd_modify_nic_vport_context_in {
872 uint16_t cmd_opcode;
873 uint8_t cmd_reserved0[4];
874 uint16_t cmd_op_mod;
875 uint8_t cmd_reserved1[4];
876 uint32_t cmd_field_select;
877 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR 0x04
878 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC 0x10
879 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU 0x40
880 } __packed __aligned(4);
881
882 struct mcx_cmd_modify_nic_vport_context_out {
883 uint8_t cmd_status;
884 uint8_t cmd_reserved0[3];
885 uint32_t cmd_syndrome;
886 uint8_t cmd_reserved1[8];
887 } __packed __aligned(4);
888
889 struct mcx_cmd_query_nic_vport_context_in {
890 uint16_t cmd_opcode;
891 uint8_t cmd_reserved0[4];
892 uint16_t cmd_op_mod;
893 uint8_t cmd_reserved1[4];
894 uint8_t cmd_allowed_list_type;
895 uint8_t cmd_reserved2[3];
896 } __packed __aligned(4);
897
898 struct mcx_cmd_query_nic_vport_context_out {
899 uint8_t cmd_status;
900 uint8_t cmd_reserved0[3];
901 uint32_t cmd_syndrome;
902 uint8_t cmd_reserved1[8];
903 } __packed __aligned(4);
904
905 struct mcx_nic_vport_ctx {
906 uint32_t vp_min_wqe_inline_mode;
907 uint8_t vp_reserved0[32];
908 uint32_t vp_mtu;
909 uint8_t vp_reserved1[200];
910 uint16_t vp_flags;
911 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC (0)
912 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC (1 << 24)
913 #define MCX_NIC_VPORT_CTX_LIST_VLAN (2 << 24)
914 #define MCX_NIC_VPORT_CTX_PROMISC_ALL (1 << 13)
915 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST (1 << 14)
916 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST (1 << 15)
917 uint16_t vp_allowed_list_size;
918 uint64_t vp_perm_addr;
919 uint8_t vp_reserved2[4];
920 /* allowed list follows */
921 } __packed __aligned(4);
922
923 struct mcx_counter {
924 uint64_t packets;
925 uint64_t octets;
926 } __packed __aligned(4);
927
928 struct mcx_nic_vport_counters {
929 struct mcx_counter rx_err;
930 struct mcx_counter tx_err;
931 uint8_t reserved0[64]; /* 0x30 */
932 struct mcx_counter rx_bcast;
933 struct mcx_counter tx_bcast;
934 struct mcx_counter rx_ucast;
935 struct mcx_counter tx_ucast;
936 struct mcx_counter rx_mcast;
937 struct mcx_counter tx_mcast;
938 uint8_t reserved1[0x210 - 0xd0];
939 } __packed __aligned(4);
940
941 struct mcx_cmd_query_vport_counters_in {
942 uint16_t cmd_opcode;
943 uint8_t cmd_reserved0[4];
944 uint16_t cmd_op_mod;
945 uint8_t cmd_reserved1[8];
946 } __packed __aligned(4);
947
948 struct mcx_cmd_query_vport_counters_mb_in {
949 uint8_t cmd_reserved0[8];
950 uint8_t cmd_clear;
951 uint8_t cmd_reserved1[7];
952 } __packed __aligned(4);
953
954 struct mcx_cmd_query_vport_counters_out {
955 uint8_t cmd_status;
956 uint8_t cmd_reserved0[3];
957 uint32_t cmd_syndrome;
958 uint8_t cmd_reserved1[8];
959 } __packed __aligned(4);
960
961 struct mcx_cmd_query_flow_counter_in {
962 uint16_t cmd_opcode;
963 uint8_t cmd_reserved0[4];
964 uint16_t cmd_op_mod;
965 uint8_t cmd_reserved1[8];
966 } __packed __aligned(4);
967
968 struct mcx_cmd_query_flow_counter_mb_in {
969 uint8_t cmd_reserved0[8];
970 uint8_t cmd_clear;
971 uint8_t cmd_reserved1[5];
972 uint16_t cmd_flow_counter_id;
973 } __packed __aligned(4);
974
975 struct mcx_cmd_query_flow_counter_out {
976 uint8_t cmd_status;
977 uint8_t cmd_reserved0[3];
978 uint32_t cmd_syndrome;
979 uint8_t cmd_reserved1[8];
980 } __packed __aligned(4);
981
982 struct mcx_cmd_alloc_uar_in {
983 uint16_t cmd_opcode;
984 uint8_t cmd_reserved0[4];
985 uint16_t cmd_op_mod;
986 uint8_t cmd_reserved1[8];
987 } __packed __aligned(4);
988
989 struct mcx_cmd_alloc_uar_out {
990 uint8_t cmd_status;
991 uint8_t cmd_reserved0[3];
992 uint32_t cmd_syndrome;
993 uint32_t cmd_uar;
994 uint8_t cmd_reserved1[4];
995 } __packed __aligned(4);
996
997 struct mcx_cmd_query_special_ctx_in {
998 uint16_t cmd_opcode;
999 uint8_t cmd_reserved0[4];
1000 uint16_t cmd_op_mod;
1001 uint8_t cmd_reserved1[8];
1002 } __packed __aligned(4);
1003
1004 struct mcx_cmd_query_special_ctx_out {
1005 uint8_t cmd_status;
1006 uint8_t cmd_reserved0[3];
1007 uint32_t cmd_syndrome;
1008 uint8_t cmd_reserved1[4];
1009 uint32_t cmd_resd_lkey;
1010 } __packed __aligned(4);
1011
1012 struct mcx_eq_ctx {
1013 uint32_t eq_status;
1014 #define MCX_EQ_CTX_ST_SHIFT 8
1015 #define MCX_EQ_CTX_ST_MASK (0xf << MCX_EQ_CTX_ST_SHIFT)
1016 #define MCX_EQ_CTX_ST_ARMED (0x9 << MCX_EQ_CTX_ST_SHIFT)
1017 #define MCX_EQ_CTX_ST_FIRED (0xa << MCX_EQ_CTX_ST_SHIFT)
1018 #define MCX_EQ_CTX_OI_SHIFT 17
1019 #define MCX_EQ_CTX_OI (1 << MCX_EQ_CTX_OI_SHIFT)
1020 #define MCX_EQ_CTX_EC_SHIFT 18
1021 #define MCX_EQ_CTX_EC (1 << MCX_EQ_CTX_EC_SHIFT)
1022 #define MCX_EQ_CTX_STATUS_SHIFT 28
1023 #define MCX_EQ_CTX_STATUS_MASK (0xf << MCX_EQ_CTX_STATUS_SHIFT)
1024 #define MCX_EQ_CTX_STATUS_OK (0x0 << MCX_EQ_CTX_STATUS_SHIFT)
1025 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE \
1026 (0xa << MCX_EQ_CTX_STATUS_SHIFT)
1027 uint32_t eq_reserved1;
1028 uint32_t eq_page_offset;
1029 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT 5
1030 uint32_t eq_uar_size;
1031 #define MCX_EQ_CTX_UAR_PAGE_MASK 0xffffff
1032 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT 24
1033 uint32_t eq_reserved2;
1034 uint8_t eq_reserved3[3];
1035 uint8_t eq_intr;
1036 uint32_t eq_log_page_size;
1037 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1038 uint32_t eq_reserved4[3];
1039 uint32_t eq_consumer_counter;
1040 uint32_t eq_producer_counter;
1041 #define MCX_EQ_CTX_COUNTER_MASK 0xffffff
1042 uint32_t eq_reserved5[4];
1043 } __packed __aligned(4);
1044
1045 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1046
1047 struct mcx_cmd_create_eq_in {
1048 uint16_t cmd_opcode;
1049 uint8_t cmd_reserved0[4];
1050 uint16_t cmd_op_mod;
1051 uint8_t cmd_reserved1[8];
1052 } __packed __aligned(4);
1053
1054 struct mcx_cmd_create_eq_mb_in {
1055 struct mcx_eq_ctx cmd_eq_ctx;
1056 uint8_t cmd_reserved0[8];
1057 uint64_t cmd_event_bitmask;
1058 #define MCX_EVENT_TYPE_COMPLETION 0x00
1059 #define MCX_EVENT_TYPE_CQ_ERROR 0x04
1060 #define MCX_EVENT_TYPE_INTERNAL_ERROR 0x08
1061 #define MCX_EVENT_TYPE_PORT_CHANGE 0x09
1062 #define MCX_EVENT_TYPE_CMD_COMPLETION 0x0a
1063 #define MCX_EVENT_TYPE_PAGE_REQUEST 0x0b
1064 #define MCX_EVENT_TYPE_LAST_WQE 0x13
1065 uint8_t cmd_reserved1[176];
1066 } __packed __aligned(4);
1067
1068 struct mcx_cmd_create_eq_out {
1069 uint8_t cmd_status;
1070 uint8_t cmd_reserved0[3];
1071 uint32_t cmd_syndrome;
1072 uint32_t cmd_eqn;
1073 uint8_t cmd_reserved1[4];
1074 } __packed __aligned(4);
1075
1076 struct mcx_eq_entry {
1077 uint8_t eq_reserved1;
1078 uint8_t eq_event_type;
1079 uint8_t eq_reserved2;
1080 uint8_t eq_event_sub_type;
1081
1082 uint8_t eq_reserved3[28];
1083 uint32_t eq_event_data[7];
1084 uint8_t eq_reserved4[2];
1085 uint8_t eq_signature;
1086 uint8_t eq_owner;
1087 #define MCX_EQ_ENTRY_OWNER_INIT 1
1088 } __packed __aligned(4);
1089
1090 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1091
1092 struct mcx_cmd_alloc_pd_in {
1093 uint16_t cmd_opcode;
1094 uint8_t cmd_reserved0[4];
1095 uint16_t cmd_op_mod;
1096 uint8_t cmd_reserved1[8];
1097 } __packed __aligned(4);
1098
1099 struct mcx_cmd_alloc_pd_out {
1100 uint8_t cmd_status;
1101 uint8_t cmd_reserved0[3];
1102 uint32_t cmd_syndrome;
1103 uint32_t cmd_pd;
1104 uint8_t cmd_reserved1[4];
1105 } __packed __aligned(4);
1106
1107 struct mcx_cmd_alloc_td_in {
1108 uint16_t cmd_opcode;
1109 uint8_t cmd_reserved0[4];
1110 uint16_t cmd_op_mod;
1111 uint8_t cmd_reserved1[8];
1112 } __packed __aligned(4);
1113
1114 struct mcx_cmd_alloc_td_out {
1115 uint8_t cmd_status;
1116 uint8_t cmd_reserved0[3];
1117 uint32_t cmd_syndrome;
1118 uint32_t cmd_tdomain;
1119 uint8_t cmd_reserved1[4];
1120 } __packed __aligned(4);
1121
1122 struct mcx_cmd_create_tir_in {
1123 uint16_t cmd_opcode;
1124 uint8_t cmd_reserved0[4];
1125 uint16_t cmd_op_mod;
1126 uint8_t cmd_reserved1[8];
1127 } __packed __aligned(4);
1128
1129 struct mcx_cmd_create_tir_mb_in {
1130 uint8_t cmd_reserved0[20];
1131 uint32_t cmd_disp_type;
1132 #define MCX_TIR_CTX_DISP_TYPE_SHIFT 28
1133 uint8_t cmd_reserved1[8];
1134 uint32_t cmd_lro;
1135 uint8_t cmd_reserved2[8];
1136 uint32_t cmd_inline_rqn;
1137 uint32_t cmd_indir_table;
1138 uint32_t cmd_tdomain;
1139 uint8_t cmd_rx_hash_key[40];
1140 uint32_t cmd_rx_hash_sel_outer;
1141 uint32_t cmd_rx_hash_sel_inner;
1142 uint8_t cmd_reserved3[152];
1143 } __packed __aligned(4);
1144
1145 struct mcx_cmd_create_tir_out {
1146 uint8_t cmd_status;
1147 uint8_t cmd_reserved0[3];
1148 uint32_t cmd_syndrome;
1149 uint32_t cmd_tirn;
1150 uint8_t cmd_reserved1[4];
1151 } __packed __aligned(4);
1152
1153 struct mcx_cmd_destroy_tir_in {
1154 uint16_t cmd_opcode;
1155 uint8_t cmd_reserved0[4];
1156 uint16_t cmd_op_mod;
1157 uint32_t cmd_tirn;
1158 uint8_t cmd_reserved1[4];
1159 } __packed __aligned(4);
1160
1161 struct mcx_cmd_destroy_tir_out {
1162 uint8_t cmd_status;
1163 uint8_t cmd_reserved0[3];
1164 uint32_t cmd_syndrome;
1165 uint8_t cmd_reserved1[8];
1166 } __packed __aligned(4);
1167
1168 struct mcx_cmd_create_tis_in {
1169 uint16_t cmd_opcode;
1170 uint8_t cmd_reserved0[4];
1171 uint16_t cmd_op_mod;
1172 uint8_t cmd_reserved1[8];
1173 } __packed __aligned(4);
1174
1175 struct mcx_cmd_create_tis_mb_in {
1176 uint8_t cmd_reserved[16];
1177 uint32_t cmd_prio;
1178 uint8_t cmd_reserved1[32];
1179 uint32_t cmd_tdomain;
1180 uint8_t cmd_reserved2[120];
1181 } __packed __aligned(4);
1182
1183 struct mcx_cmd_create_tis_out {
1184 uint8_t cmd_status;
1185 uint8_t cmd_reserved0[3];
1186 uint32_t cmd_syndrome;
1187 uint32_t cmd_tisn;
1188 uint8_t cmd_reserved1[4];
1189 } __packed __aligned(4);
1190
1191 struct mcx_cmd_destroy_tis_in {
1192 uint16_t cmd_opcode;
1193 uint8_t cmd_reserved0[4];
1194 uint16_t cmd_op_mod;
1195 uint32_t cmd_tisn;
1196 uint8_t cmd_reserved1[4];
1197 } __packed __aligned(4);
1198
1199 struct mcx_cmd_destroy_tis_out {
1200 uint8_t cmd_status;
1201 uint8_t cmd_reserved0[3];
1202 uint32_t cmd_syndrome;
1203 uint8_t cmd_reserved1[8];
1204 } __packed __aligned(4);
1205
1206 struct mcx_cq_ctx {
1207 uint32_t cq_status;
1208 uint32_t cq_reserved1;
1209 uint32_t cq_page_offset;
1210 uint32_t cq_uar_size;
1211 #define MCX_CQ_CTX_UAR_PAGE_MASK 0xffffff
1212 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT 24
1213 uint32_t cq_period_max_count;
1214 #define MCX_CQ_CTX_PERIOD_SHIFT 16
1215 uint32_t cq_eqn;
1216 uint32_t cq_log_page_size;
1217 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1218 uint32_t cq_reserved2;
1219 uint32_t cq_last_notified;
1220 uint32_t cq_last_solicit;
1221 uint32_t cq_consumer_counter;
1222 uint32_t cq_producer_counter;
1223 uint8_t cq_reserved3[8];
1224 uint64_t cq_doorbell;
1225 } __packed __aligned(4);
1226
1227 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1228
1229 struct mcx_cmd_create_cq_in {
1230 uint16_t cmd_opcode;
1231 uint8_t cmd_reserved0[4];
1232 uint16_t cmd_op_mod;
1233 uint8_t cmd_reserved1[8];
1234 } __packed __aligned(4);
1235
1236 struct mcx_cmd_create_cq_mb_in {
1237 struct mcx_cq_ctx cmd_cq_ctx;
1238 uint8_t cmd_reserved1[192];
1239 } __packed __aligned(4);
1240
1241 struct mcx_cmd_create_cq_out {
1242 uint8_t cmd_status;
1243 uint8_t cmd_reserved0[3];
1244 uint32_t cmd_syndrome;
1245 uint32_t cmd_cqn;
1246 uint8_t cmd_reserved1[4];
1247 } __packed __aligned(4);
1248
1249 struct mcx_cmd_destroy_cq_in {
1250 uint16_t cmd_opcode;
1251 uint8_t cmd_reserved0[4];
1252 uint16_t cmd_op_mod;
1253 uint32_t cmd_cqn;
1254 uint8_t cmd_reserved1[4];
1255 } __packed __aligned(4);
1256
1257 struct mcx_cmd_destroy_cq_out {
1258 uint8_t cmd_status;
1259 uint8_t cmd_reserved0[3];
1260 uint32_t cmd_syndrome;
1261 uint8_t cmd_reserved1[8];
1262 } __packed __aligned(4);
1263
1264 struct mcx_cq_entry {
1265 uint32_t __reserved__;
1266 uint32_t cq_lro;
1267 uint32_t cq_lro_ack_seq_num;
1268 uint32_t cq_rx_hash;
1269 uint8_t cq_rx_hash_type;
1270 uint8_t cq_ml_path;
1271 uint16_t __reserved__;
1272 uint32_t cq_checksum;
1273 uint32_t __reserved__;
1274 uint32_t cq_flags;
1275 uint32_t cq_lro_srqn;
1276 uint32_t __reserved__[2];
1277 uint32_t cq_byte_cnt;
1278 uint64_t cq_timestamp;
1279 uint8_t cq_rx_drops;
1280 uint8_t cq_flow_tag[3];
1281 uint16_t cq_wqe_count;
1282 uint8_t cq_signature;
1283 uint8_t cq_opcode_owner;
1284 #define MCX_CQ_ENTRY_FLAG_OWNER (1 << 0)
1285 #define MCX_CQ_ENTRY_FLAG_SE (1 << 1)
1286 #define MCX_CQ_ENTRY_FORMAT_SHIFT 2
1287 #define MCX_CQ_ENTRY_OPCODE_SHIFT 4
1288
1289 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE 0
1290 #define MCX_CQ_ENTRY_FORMAT_INLINE_32 1
1291 #define MCX_CQ_ENTRY_FORMAT_INLINE_64 2
1292 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED 3
1293
1294 #define MCX_CQ_ENTRY_OPCODE_REQ 0
1295 #define MCX_CQ_ENTRY_OPCODE_SEND 2
1296 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR 13
1297 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR 14
1298 #define MCX_CQ_ENTRY_OPCODE_INVALID 15
1299
1300 } __packed __aligned(4);
1301
1302 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1303
1304 struct mcx_cq_doorbell {
1305 uint32_t db_update_ci;
1306 uint32_t db_arm_ci;
1307 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT 28
1308 #define MCX_CQ_DOORBELL_ARM_CMD (1 << 24)
1309 #define MCX_CQ_DOORBELL_ARM_CI_MASK (0xffffff)
1310 } __packed __aligned(8);
1311
1312 struct mcx_wq_ctx {
1313 uint8_t wq_type;
1314 #define MCX_WQ_CTX_TYPE_CYCLIC (1 << 4)
1315 #define MCX_WQ_CTX_TYPE_SIGNATURE (1 << 3)
1316 uint8_t wq_reserved0[5];
1317 uint16_t wq_lwm;
1318 uint32_t wq_pd;
1319 uint32_t wq_uar_page;
1320 uint64_t wq_doorbell;
1321 uint32_t wq_hw_counter;
1322 uint32_t wq_sw_counter;
1323 uint16_t wq_log_stride;
1324 uint8_t wq_log_page_sz;
1325 uint8_t wq_log_size;
1326 uint8_t wq_reserved1[156];
1327 } __packed __aligned(4);
1328
1329 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1330
1331 struct mcx_sq_ctx {
1332 uint32_t sq_flags;
1333 #define MCX_SQ_CTX_RLKEY (1U << 31)
1334 #define MCX_SQ_CTX_FRE_SHIFT (1 << 29)
1335 #define MCX_SQ_CTX_FLUSH_IN_ERROR (1 << 28)
1336 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT 24
1337 #define MCX_SQ_CTX_STATE_SHIFT 20
1338 uint32_t sq_user_index;
1339 uint32_t sq_cqn;
1340 uint32_t sq_reserved1[5];
1341 uint32_t sq_tis_lst_sz;
1342 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT 16
1343 uint32_t sq_reserved2[2];
1344 uint32_t sq_tis_num;
1345 struct mcx_wq_ctx sq_wq;
1346 } __packed __aligned(4);
1347
1348 struct mcx_sq_entry_seg {
1349 uint32_t sqs_byte_count;
1350 uint32_t sqs_lkey;
1351 uint64_t sqs_addr;
1352 } __packed __aligned(4);
1353
1354 struct mcx_sq_entry {
1355 /* control segment */
1356 uint32_t sqe_opcode_index;
1357 #define MCX_SQE_WQE_INDEX_SHIFT 8
1358 #define MCX_SQE_WQE_OPCODE_NOP 0x00
1359 #define MCX_SQE_WQE_OPCODE_SEND 0x0a
1360 uint32_t sqe_ds_sq_num;
1361 #define MCX_SQE_SQ_NUM_SHIFT 8
1362 uint32_t sqe_signature;
1363 #define MCX_SQE_SIGNATURE_SHIFT 24
1364 #define MCX_SQE_SOLICITED_EVENT 0x02
1365 #define MCX_SQE_CE_CQE_ON_ERR 0x00
1366 #define MCX_SQE_CE_CQE_FIRST_ERR 0x04
1367 #define MCX_SQE_CE_CQE_ALWAYS 0x08
1368 #define MCX_SQE_CE_CQE_SOLICIT 0x0C
1369 #define MCX_SQE_FM_NO_FENCE 0x00
1370 #define MCX_SQE_FM_SMALL_FENCE 0x40
1371 uint32_t sqe_mkey;
1372
1373 /* ethernet segment */
1374 uint32_t sqe_reserved1;
1375 uint32_t sqe_mss_csum;
1376 #define MCX_SQE_L4_CSUM (1 << 31)
1377 #define MCX_SQE_L3_CSUM (1 << 30)
1378 uint32_t sqe_reserved2;
1379 uint16_t sqe_inline_header_size;
1380 uint16_t sqe_inline_headers[9];
1381
1382 /* data segment */
1383 struct mcx_sq_entry_seg sqe_segs[1];
1384 } __packed __aligned(64);
1385
1386 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1387
1388 struct mcx_cmd_create_sq_in {
1389 uint16_t cmd_opcode;
1390 uint8_t cmd_reserved0[4];
1391 uint16_t cmd_op_mod;
1392 uint8_t cmd_reserved1[8];
1393 } __packed __aligned(4);
1394
1395 struct mcx_cmd_create_sq_out {
1396 uint8_t cmd_status;
1397 uint8_t cmd_reserved0[3];
1398 uint32_t cmd_syndrome;
1399 uint32_t cmd_sqn;
1400 uint8_t cmd_reserved1[4];
1401 } __packed __aligned(4);
1402
1403 struct mcx_cmd_modify_sq_in {
1404 uint16_t cmd_opcode;
1405 uint8_t cmd_reserved0[4];
1406 uint16_t cmd_op_mod;
1407 uint32_t cmd_sq_state;
1408 uint8_t cmd_reserved1[4];
1409 } __packed __aligned(4);
1410
1411 struct mcx_cmd_modify_sq_mb_in {
1412 uint32_t cmd_modify_hi;
1413 uint32_t cmd_modify_lo;
1414 uint8_t cmd_reserved0[8];
1415 struct mcx_sq_ctx cmd_sq_ctx;
1416 } __packed __aligned(4);
1417
1418 struct mcx_cmd_modify_sq_out {
1419 uint8_t cmd_status;
1420 uint8_t cmd_reserved0[3];
1421 uint32_t cmd_syndrome;
1422 uint8_t cmd_reserved1[8];
1423 } __packed __aligned(4);
1424
1425 struct mcx_cmd_destroy_sq_in {
1426 uint16_t cmd_opcode;
1427 uint8_t cmd_reserved0[4];
1428 uint16_t cmd_op_mod;
1429 uint32_t cmd_sqn;
1430 uint8_t cmd_reserved1[4];
1431 } __packed __aligned(4);
1432
1433 struct mcx_cmd_destroy_sq_out {
1434 uint8_t cmd_status;
1435 uint8_t cmd_reserved0[3];
1436 uint32_t cmd_syndrome;
1437 uint8_t cmd_reserved1[8];
1438 } __packed __aligned(4);
1439
1440
1441 struct mcx_rq_ctx {
1442 uint32_t rq_flags;
1443 #define MCX_RQ_CTX_RLKEY (1U << 31)
1444 #define MCX_RQ_CTX_VLAN_STRIP_DIS (1 << 28)
1445 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT 24
1446 #define MCX_RQ_CTX_STATE_SHIFT 20
1447 #define MCX_RQ_CTX_FLUSH_IN_ERROR (1 << 18)
1448 uint32_t rq_user_index;
1449 uint32_t rq_cqn;
1450 uint32_t rq_reserved1;
1451 uint32_t rq_rmpn;
1452 uint32_t rq_reserved2[7];
1453 struct mcx_wq_ctx rq_wq;
1454 } __packed __aligned(4);
1455
1456 struct mcx_rq_entry {
1457 uint32_t rqe_byte_count;
1458 uint32_t rqe_lkey;
1459 uint64_t rqe_addr;
1460 } __packed __aligned(16);
1461
1462 struct mcx_cmd_create_rq_in {
1463 uint16_t cmd_opcode;
1464 uint8_t cmd_reserved0[4];
1465 uint16_t cmd_op_mod;
1466 uint8_t cmd_reserved1[8];
1467 } __packed __aligned(4);
1468
1469 struct mcx_cmd_create_rq_out {
1470 uint8_t cmd_status;
1471 uint8_t cmd_reserved0[3];
1472 uint32_t cmd_syndrome;
1473 uint32_t cmd_rqn;
1474 uint8_t cmd_reserved1[4];
1475 } __packed __aligned(4);
1476
1477 struct mcx_cmd_modify_rq_in {
1478 uint16_t cmd_opcode;
1479 uint8_t cmd_reserved0[4];
1480 uint16_t cmd_op_mod;
1481 uint32_t cmd_rq_state;
1482 uint8_t cmd_reserved1[4];
1483 } __packed __aligned(4);
1484
1485 struct mcx_cmd_modify_rq_mb_in {
1486 uint32_t cmd_modify_hi;
1487 uint32_t cmd_modify_lo;
1488 uint8_t cmd_reserved0[8];
1489 struct mcx_rq_ctx cmd_rq_ctx;
1490 } __packed __aligned(4);
1491
1492 struct mcx_cmd_modify_rq_out {
1493 uint8_t cmd_status;
1494 uint8_t cmd_reserved0[3];
1495 uint32_t cmd_syndrome;
1496 uint8_t cmd_reserved1[8];
1497 } __packed __aligned(4);
1498
1499 struct mcx_cmd_destroy_rq_in {
1500 uint16_t cmd_opcode;
1501 uint8_t cmd_reserved0[4];
1502 uint16_t cmd_op_mod;
1503 uint32_t cmd_rqn;
1504 uint8_t cmd_reserved1[4];
1505 } __packed __aligned(4);
1506
1507 struct mcx_cmd_destroy_rq_out {
1508 uint8_t cmd_status;
1509 uint8_t cmd_reserved0[3];
1510 uint32_t cmd_syndrome;
1511 uint8_t cmd_reserved1[8];
1512 } __packed __aligned(4);
1513
1514 struct mcx_cmd_create_flow_table_in {
1515 uint16_t cmd_opcode;
1516 uint8_t cmd_reserved0[4];
1517 uint16_t cmd_op_mod;
1518 uint8_t cmd_reserved1[8];
1519 } __packed __aligned(4);
1520
1521 struct mcx_flow_table_ctx {
1522 uint8_t ft_miss_action;
1523 uint8_t ft_level;
1524 uint8_t ft_reserved0;
1525 uint8_t ft_log_size;
1526 uint32_t ft_table_miss_id;
1527 uint8_t ft_reserved1[28];
1528 } __packed __aligned(4);
1529
1530 struct mcx_cmd_create_flow_table_mb_in {
1531 uint8_t cmd_table_type;
1532 uint8_t cmd_reserved0[7];
1533 struct mcx_flow_table_ctx cmd_ctx;
1534 } __packed __aligned(4);
1535
1536 struct mcx_cmd_create_flow_table_out {
1537 uint8_t cmd_status;
1538 uint8_t cmd_reserved0[3];
1539 uint32_t cmd_syndrome;
1540 uint32_t cmd_table_id;
1541 uint8_t cmd_reserved1[4];
1542 } __packed __aligned(4);
1543
1544 struct mcx_cmd_destroy_flow_table_in {
1545 uint16_t cmd_opcode;
1546 uint8_t cmd_reserved0[4];
1547 uint16_t cmd_op_mod;
1548 uint8_t cmd_reserved1[8];
1549 } __packed __aligned(4);
1550
1551 struct mcx_cmd_destroy_flow_table_mb_in {
1552 uint8_t cmd_table_type;
1553 uint8_t cmd_reserved0[3];
1554 uint32_t cmd_table_id;
1555 uint8_t cmd_reserved1[40];
1556 } __packed __aligned(4);
1557
1558 struct mcx_cmd_destroy_flow_table_out {
1559 uint8_t cmd_status;
1560 uint8_t cmd_reserved0[3];
1561 uint32_t cmd_syndrome;
1562 uint8_t cmd_reserved1[8];
1563 } __packed __aligned(4);
1564
1565 struct mcx_cmd_set_flow_table_root_in {
1566 uint16_t cmd_opcode;
1567 uint8_t cmd_reserved0[4];
1568 uint16_t cmd_op_mod;
1569 uint8_t cmd_reserved1[8];
1570 } __packed __aligned(4);
1571
1572 struct mcx_cmd_set_flow_table_root_mb_in {
1573 uint8_t cmd_table_type;
1574 uint8_t cmd_reserved0[3];
1575 uint32_t cmd_table_id;
1576 uint8_t cmd_reserved1[56];
1577 } __packed __aligned(4);
1578
1579 struct mcx_cmd_set_flow_table_root_out {
1580 uint8_t cmd_status;
1581 uint8_t cmd_reserved0[3];
1582 uint32_t cmd_syndrome;
1583 uint8_t cmd_reserved1[8];
1584 } __packed __aligned(4);
1585
1586 struct mcx_flow_match {
1587 /* outer headers */
1588 uint8_t mc_src_mac[6];
1589 uint16_t mc_ethertype;
1590 uint8_t mc_dest_mac[6];
1591 uint16_t mc_first_vlan;
1592 uint8_t mc_ip_proto;
1593 uint8_t mc_ip_dscp_ecn;
1594 uint8_t mc_vlan_flags;
1595 uint8_t mc_tcp_flags;
1596 uint16_t mc_tcp_sport;
1597 uint16_t mc_tcp_dport;
1598 uint32_t mc_reserved0;
1599 uint16_t mc_udp_sport;
1600 uint16_t mc_udp_dport;
1601 uint8_t mc_src_ip[16];
1602 uint8_t mc_dest_ip[16];
1603
1604 /* misc parameters */
1605 uint8_t mc_reserved1[8];
1606 uint16_t mc_second_vlan;
1607 uint8_t mc_reserved2[2];
1608 uint8_t mc_second_vlan_flags;
1609 uint8_t mc_reserved3[15];
1610 uint32_t mc_outer_ipv6_flow_label;
1611 uint8_t mc_reserved4[32];
1612
1613 uint8_t mc_reserved[384];
1614 } __packed __aligned(4);
1615
1616 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1617
1618 struct mcx_cmd_create_flow_group_in {
1619 uint16_t cmd_opcode;
1620 uint8_t cmd_reserved0[4];
1621 uint16_t cmd_op_mod;
1622 uint8_t cmd_reserved1[8];
1623 } __packed __aligned(4);
1624
1625 struct mcx_cmd_create_flow_group_mb_in {
1626 uint8_t cmd_table_type;
1627 uint8_t cmd_reserved0[3];
1628 uint32_t cmd_table_id;
1629 uint8_t cmd_reserved1[4];
1630 uint32_t cmd_start_flow_index;
1631 uint8_t cmd_reserved2[4];
1632 uint32_t cmd_end_flow_index;
1633 uint8_t cmd_reserved3[23];
1634 uint8_t cmd_match_criteria_enable;
1635 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER (1 << 0)
1636 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC (1 << 1)
1637 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER (1 << 2)
1638 struct mcx_flow_match cmd_match_criteria;
1639 uint8_t cmd_reserved4[448];
1640 } __packed __aligned(4);
1641
1642 struct mcx_cmd_create_flow_group_out {
1643 uint8_t cmd_status;
1644 uint8_t cmd_reserved0[3];
1645 uint32_t cmd_syndrome;
1646 uint32_t cmd_group_id;
1647 uint8_t cmd_reserved1[4];
1648 } __packed __aligned(4);
1649
1650 struct mcx_flow_ctx {
1651 uint8_t fc_reserved0[4];
1652 uint32_t fc_group_id;
1653 uint32_t fc_flow_tag;
1654 uint32_t fc_action;
1655 #define MCX_FLOW_CONTEXT_ACTION_ALLOW (1 << 0)
1656 #define MCX_FLOW_CONTEXT_ACTION_DROP (1 << 1)
1657 #define MCX_FLOW_CONTEXT_ACTION_FORWARD (1 << 2)
1658 #define MCX_FLOW_CONTEXT_ACTION_COUNT (1 << 3)
1659 uint32_t fc_dest_list_size;
1660 uint32_t fc_counter_list_size;
1661 uint8_t fc_reserved1[40];
1662 struct mcx_flow_match fc_match_value;
1663 uint8_t fc_reserved2[192];
1664 } __packed __aligned(4);
1665
1666 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE (1 << 24)
1667 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR (2 << 24)
1668
1669 struct mcx_cmd_destroy_flow_group_in {
1670 uint16_t cmd_opcode;
1671 uint8_t cmd_reserved0[4];
1672 uint16_t cmd_op_mod;
1673 uint8_t cmd_reserved1[8];
1674 } __packed __aligned(4);
1675
1676 struct mcx_cmd_destroy_flow_group_mb_in {
1677 uint8_t cmd_table_type;
1678 uint8_t cmd_reserved0[3];
1679 uint32_t cmd_table_id;
1680 uint32_t cmd_group_id;
1681 uint8_t cmd_reserved1[36];
1682 } __packed __aligned(4);
1683
1684 struct mcx_cmd_destroy_flow_group_out {
1685 uint8_t cmd_status;
1686 uint8_t cmd_reserved0[3];
1687 uint32_t cmd_syndrome;
1688 uint8_t cmd_reserved1[8];
1689 } __packed __aligned(4);
1690
1691 struct mcx_cmd_set_flow_table_entry_in {
1692 uint16_t cmd_opcode;
1693 uint8_t cmd_reserved0[4];
1694 uint16_t cmd_op_mod;
1695 uint8_t cmd_reserved1[8];
1696 } __packed __aligned(4);
1697
1698 struct mcx_cmd_set_flow_table_entry_mb_in {
1699 uint8_t cmd_table_type;
1700 uint8_t cmd_reserved0[3];
1701 uint32_t cmd_table_id;
1702 uint32_t cmd_modify_enable_mask;
1703 uint8_t cmd_reserved1[4];
1704 uint32_t cmd_flow_index;
1705 uint8_t cmd_reserved2[28];
1706 struct mcx_flow_ctx cmd_flow_ctx;
1707 } __packed __aligned(4);
1708
1709 struct mcx_cmd_set_flow_table_entry_out {
1710 uint8_t cmd_status;
1711 uint8_t cmd_reserved0[3];
1712 uint32_t cmd_syndrome;
1713 uint8_t cmd_reserved1[8];
1714 } __packed __aligned(4);
1715
1716 struct mcx_cmd_query_flow_table_entry_in {
1717 uint16_t cmd_opcode;
1718 uint8_t cmd_reserved0[4];
1719 uint16_t cmd_op_mod;
1720 uint8_t cmd_reserved1[8];
1721 } __packed __aligned(4);
1722
1723 struct mcx_cmd_query_flow_table_entry_mb_in {
1724 uint8_t cmd_table_type;
1725 uint8_t cmd_reserved0[3];
1726 uint32_t cmd_table_id;
1727 uint8_t cmd_reserved1[8];
1728 uint32_t cmd_flow_index;
1729 uint8_t cmd_reserved2[28];
1730 } __packed __aligned(4);
1731
1732 struct mcx_cmd_query_flow_table_entry_out {
1733 uint8_t cmd_status;
1734 uint8_t cmd_reserved0[3];
1735 uint32_t cmd_syndrome;
1736 uint8_t cmd_reserved1[8];
1737 } __packed __aligned(4);
1738
1739 struct mcx_cmd_query_flow_table_entry_mb_out {
1740 uint8_t cmd_reserved0[48];
1741 struct mcx_flow_ctx cmd_flow_ctx;
1742 } __packed __aligned(4);
1743
1744 struct mcx_cmd_delete_flow_table_entry_in {
1745 uint16_t cmd_opcode;
1746 uint8_t cmd_reserved0[4];
1747 uint16_t cmd_op_mod;
1748 uint8_t cmd_reserved1[8];
1749 } __packed __aligned(4);
1750
1751 struct mcx_cmd_delete_flow_table_entry_mb_in {
1752 uint8_t cmd_table_type;
1753 uint8_t cmd_reserved0[3];
1754 uint32_t cmd_table_id;
1755 uint8_t cmd_reserved1[8];
1756 uint32_t cmd_flow_index;
1757 uint8_t cmd_reserved2[28];
1758 } __packed __aligned(4);
1759
1760 struct mcx_cmd_delete_flow_table_entry_out {
1761 uint8_t cmd_status;
1762 uint8_t cmd_reserved0[3];
1763 uint32_t cmd_syndrome;
1764 uint8_t cmd_reserved1[8];
1765 } __packed __aligned(4);
1766
1767 struct mcx_cmd_query_flow_group_in {
1768 uint16_t cmd_opcode;
1769 uint8_t cmd_reserved0[4];
1770 uint16_t cmd_op_mod;
1771 uint8_t cmd_reserved1[8];
1772 } __packed __aligned(4);
1773
1774 struct mcx_cmd_query_flow_group_mb_in {
1775 uint8_t cmd_table_type;
1776 uint8_t cmd_reserved0[3];
1777 uint32_t cmd_table_id;
1778 uint32_t cmd_group_id;
1779 uint8_t cmd_reserved1[36];
1780 } __packed __aligned(4);
1781
1782 struct mcx_cmd_query_flow_group_out {
1783 uint8_t cmd_status;
1784 uint8_t cmd_reserved0[3];
1785 uint32_t cmd_syndrome;
1786 uint8_t cmd_reserved1[8];
1787 } __packed __aligned(4);
1788
1789 struct mcx_cmd_query_flow_group_mb_out {
1790 uint8_t cmd_reserved0[12];
1791 uint32_t cmd_start_flow_index;
1792 uint8_t cmd_reserved1[4];
1793 uint32_t cmd_end_flow_index;
1794 uint8_t cmd_reserved2[20];
1795 uint32_t cmd_match_criteria_enable;
1796 uint8_t cmd_match_criteria[512];
1797 uint8_t cmd_reserved4[448];
1798 } __packed __aligned(4);
1799
1800 struct mcx_cmd_query_flow_table_in {
1801 uint16_t cmd_opcode;
1802 uint8_t cmd_reserved0[4];
1803 uint16_t cmd_op_mod;
1804 uint8_t cmd_reserved1[8];
1805 } __packed __aligned(4);
1806
1807 struct mcx_cmd_query_flow_table_mb_in {
1808 uint8_t cmd_table_type;
1809 uint8_t cmd_reserved0[3];
1810 uint32_t cmd_table_id;
1811 uint8_t cmd_reserved1[40];
1812 } __packed __aligned(4);
1813
1814 struct mcx_cmd_query_flow_table_out {
1815 uint8_t cmd_status;
1816 uint8_t cmd_reserved0[3];
1817 uint32_t cmd_syndrome;
1818 uint8_t cmd_reserved1[8];
1819 } __packed __aligned(4);
1820
1821 struct mcx_cmd_query_flow_table_mb_out {
1822 uint8_t cmd_reserved0[4];
1823 struct mcx_flow_table_ctx cmd_ctx;
1824 } __packed __aligned(4);
1825
1826 struct mcx_cmd_alloc_flow_counter_in {
1827 uint16_t cmd_opcode;
1828 uint8_t cmd_reserved0[4];
1829 uint16_t cmd_op_mod;
1830 uint8_t cmd_reserved1[8];
1831 } __packed __aligned(4);
1832
1833 struct mcx_cmd_query_rq_in {
1834 uint16_t cmd_opcode;
1835 uint8_t cmd_reserved0[4];
1836 uint16_t cmd_op_mod;
1837 uint32_t cmd_rqn;
1838 uint8_t cmd_reserved1[4];
1839 } __packed __aligned(4);
1840
1841 struct mcx_cmd_query_rq_out {
1842 uint8_t cmd_status;
1843 uint8_t cmd_reserved0[3];
1844 uint32_t cmd_syndrome;
1845 uint8_t cmd_reserved1[8];
1846 } __packed __aligned(4);
1847
1848 struct mcx_cmd_query_rq_mb_out {
1849 uint8_t cmd_reserved0[16];
1850 struct mcx_rq_ctx cmd_ctx;
1851 };
1852
1853 struct mcx_cmd_query_sq_in {
1854 uint16_t cmd_opcode;
1855 uint8_t cmd_reserved0[4];
1856 uint16_t cmd_op_mod;
1857 uint32_t cmd_sqn;
1858 uint8_t cmd_reserved1[4];
1859 } __packed __aligned(4);
1860
1861 struct mcx_cmd_query_sq_out {
1862 uint8_t cmd_status;
1863 uint8_t cmd_reserved0[3];
1864 uint32_t cmd_syndrome;
1865 uint8_t cmd_reserved1[8];
1866 } __packed __aligned(4);
1867
1868 struct mcx_cmd_query_sq_mb_out {
1869 uint8_t cmd_reserved0[16];
1870 struct mcx_sq_ctx cmd_ctx;
1871 };
1872
1873 struct mcx_cmd_alloc_flow_counter_out {
1874 uint8_t cmd_status;
1875 uint8_t cmd_reserved0[3];
1876 uint32_t cmd_syndrome;
1877 uint8_t cmd_reserved1[2];
1878 uint16_t cmd_flow_counter_id;
1879 uint8_t cmd_reserved2[4];
1880 } __packed __aligned(4);
1881
1882 struct mcx_wq_doorbell {
1883 uint32_t db_recv_counter;
1884 uint32_t db_send_counter;
1885 } __packed __aligned(8);
1886
1887 struct mcx_dmamem {
1888 bus_dmamap_t mxm_map;
1889 bus_dma_segment_t mxm_seg;
1890 int mxm_nsegs;
1891 size_t mxm_size;
1892 void *mxm_kva;
1893 };
1894 #define MCX_DMA_MAP(_mxm) ((_mxm)->mxm_map)
1895 #define MCX_DMA_DVA(_mxm) ((_mxm)->mxm_map->dm_segs[0].ds_addr)
1896 #define MCX_DMA_KVA(_mxm) ((void *)(_mxm)->mxm_kva)
1897 #define MCX_DMA_LEN(_mxm) ((_mxm)->mxm_size)
1898
1899 struct mcx_hwmem {
1900 bus_dmamap_t mhm_map;
1901 bus_dma_segment_t *mhm_segs;
1902 unsigned int mhm_seg_count;
1903 unsigned int mhm_npages;
1904 };
1905
1906 struct mcx_slot {
1907 bus_dmamap_t ms_map;
1908 struct mbuf *ms_m;
1909 };
1910
1911 struct mcx_cq {
1912 int cq_n;
1913 struct mcx_dmamem cq_mem;
1914 uint32_t *cq_doorbell;
1915 uint32_t cq_cons;
1916 uint32_t cq_count;
1917 };
1918
1919 struct mcx_calibration {
1920 uint64_t c_timestamp; /* previous mcx chip time */
1921 uint64_t c_uptime; /* previous kernel nanouptime */
1922 uint64_t c_tbase; /* mcx chip time */
1923 uint64_t c_ubase; /* kernel nanouptime */
1924 uint64_t c_tdiff;
1925 uint64_t c_udiff;
1926 };
1927
1928 #define MCX_CALIBRATE_FIRST 2
1929 #define MCX_CALIBRATE_NORMAL 30
1930
1931 struct mcx_rxring {
1932 u_int rxr_total;
1933 u_int rxr_inuse;
1934 };
1935
1936 MBUFQ_HEAD(mcx_mbufq);
1937
1938 struct mcx_softc {
1939 device_t sc_dev;
1940 struct ethercom sc_ec;
1941 struct ifmedia sc_media;
1942 uint64_t sc_media_status;
1943 uint64_t sc_media_active;
1944
1945 pci_chipset_tag_t sc_pc;
1946 pci_intr_handle_t *sc_intrs;
1947 void *sc_ihs[MCX_MAX_NINTR];
1948 pcitag_t sc_tag;
1949
1950 bus_dma_tag_t sc_dmat;
1951 bus_space_tag_t sc_memt;
1952 bus_space_handle_t sc_memh;
1953 bus_size_t sc_mems;
1954
1955 struct mcx_dmamem sc_cmdq_mem;
1956 unsigned int sc_cmdq_mask;
1957 unsigned int sc_cmdq_size;
1958
1959 unsigned int sc_cmdq_token;
1960
1961 struct mcx_hwmem sc_boot_pages;
1962 struct mcx_hwmem sc_init_pages;
1963 struct mcx_hwmem sc_regular_pages;
1964
1965 int sc_uar;
1966 int sc_pd;
1967 int sc_tdomain;
1968 uint32_t sc_lkey;
1969
1970 struct mcx_dmamem sc_doorbell_mem;
1971
1972 int sc_eqn;
1973 int sc_eq_cons;
1974 struct mcx_dmamem sc_eq_mem;
1975 int sc_hardmtu;
1976
1977 struct workqueue *sc_workq;
1978 struct work sc_port_change;
1979
1980 int sc_flow_table_id;
1981 #define MCX_FLOW_GROUP_PROMISC 0
1982 #define MCX_FLOW_GROUP_ALLMULTI 1
1983 #define MCX_FLOW_GROUP_MAC 2
1984 #define MCX_NUM_FLOW_GROUPS 3
1985 int sc_flow_group_id[MCX_NUM_FLOW_GROUPS];
1986 int sc_flow_group_size[MCX_NUM_FLOW_GROUPS];
1987 int sc_flow_group_start[MCX_NUM_FLOW_GROUPS];
1988 int sc_promisc_flow_enabled;
1989 int sc_allmulti_flow_enabled;
1990 int sc_mcast_flow_base;
1991 int sc_extra_mcast;
1992 uint8_t sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
1993
1994 struct mcx_calibration sc_calibration[2];
1995 unsigned int sc_calibration_gen;
1996 callout_t sc_calibrate;
1997
1998 struct mcx_cq sc_cq[MCX_MAX_CQS];
1999 int sc_num_cq;
2000
2001 /* rx */
2002 int sc_tirn;
2003 int sc_rqn;
2004 struct mcx_dmamem sc_rq_mem;
2005 struct mcx_slot *sc_rx_slots;
2006 uint32_t *sc_rx_doorbell;
2007
2008 uint32_t sc_rx_prod;
2009 callout_t sc_rx_refill;
2010 struct mcx_rxring sc_rxr;
2011
2012 /* tx */
2013 int sc_tisn;
2014 int sc_sqn;
2015 struct mcx_dmamem sc_sq_mem;
2016 struct mcx_slot *sc_tx_slots;
2017 uint32_t *sc_tx_doorbell;
2018 int sc_bf_size;
2019 int sc_bf_offset;
2020
2021 uint32_t sc_tx_cons;
2022 uint32_t sc_tx_prod;
2023
2024 uint64_t sc_last_cq_db;
2025 uint64_t sc_last_srq_db;
2026 };
2027 #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
2028
2029 static int mcx_match(device_t, cfdata_t, void *);
2030 static void mcx_attach(device_t, device_t, void *);
2031
2032 static void mcx_rxr_init(struct mcx_rxring *, u_int, u_int);
2033 static u_int mcx_rxr_get(struct mcx_rxring *, u_int);
2034 static void mcx_rxr_put(struct mcx_rxring *, u_int);
2035 static u_int mcx_rxr_inuse(struct mcx_rxring *);
2036
2037 static int mcx_version(struct mcx_softc *);
2038 static int mcx_init_wait(struct mcx_softc *);
2039 static int mcx_enable_hca(struct mcx_softc *);
2040 static int mcx_teardown_hca(struct mcx_softc *, uint16_t);
2041 static int mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2042 int);
2043 static int mcx_issi(struct mcx_softc *);
2044 static int mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2045 static int mcx_hca_max_caps(struct mcx_softc *);
2046 static int mcx_hca_set_caps(struct mcx_softc *);
2047 static int mcx_init_hca(struct mcx_softc *);
2048 static int mcx_set_driver_version(struct mcx_softc *);
2049 static int mcx_iff(struct mcx_softc *);
2050 static int mcx_alloc_uar(struct mcx_softc *);
2051 static int mcx_alloc_pd(struct mcx_softc *);
2052 static int mcx_alloc_tdomain(struct mcx_softc *);
2053 static int mcx_create_eq(struct mcx_softc *);
2054 static int mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *);
2055 static int mcx_query_special_contexts(struct mcx_softc *);
2056 static int mcx_set_port_mtu(struct mcx_softc *, int);
2057 static int mcx_create_cq(struct mcx_softc *, int);
2058 static int mcx_destroy_cq(struct mcx_softc *, int);
2059 static int mcx_create_sq(struct mcx_softc *, int);
2060 static int mcx_destroy_sq(struct mcx_softc *);
2061 static int mcx_ready_sq(struct mcx_softc *);
2062 static int mcx_create_rq(struct mcx_softc *, int);
2063 static int mcx_destroy_rq(struct mcx_softc *);
2064 static int mcx_ready_rq(struct mcx_softc *);
2065 static int mcx_create_tir(struct mcx_softc *);
2066 static int mcx_destroy_tir(struct mcx_softc *);
2067 static int mcx_create_tis(struct mcx_softc *);
2068 static int mcx_destroy_tis(struct mcx_softc *);
2069 static int mcx_create_flow_table(struct mcx_softc *, int);
2070 static int mcx_set_flow_table_root(struct mcx_softc *);
2071 static int mcx_destroy_flow_table(struct mcx_softc *);
2072 static int mcx_create_flow_group(struct mcx_softc *, int, int,
2073 int, int, struct mcx_flow_match *);
2074 static int mcx_destroy_flow_group(struct mcx_softc *, int);
2075 static int mcx_set_flow_table_entry(struct mcx_softc *, int, int,
2076 const uint8_t *);
2077 static int mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2078
2079 #if 0
2080 static int mcx_dump_flow_table(struct mcx_softc *);
2081 static int mcx_dump_flow_table_entry(struct mcx_softc *, int);
2082 static int mcx_dump_flow_group(struct mcx_softc *);
2083 static int mcx_dump_rq(struct mcx_softc *);
2084 static int mcx_dump_sq(struct mcx_softc *);
2085 #endif
2086
2087
2088 /*
2089 static void mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2090 static void mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2091 */
2092 static void mcx_refill(void *);
2093 static int mcx_process_rx(struct mcx_softc *, struct mcx_cq_entry *,
2094 struct mcx_mbufq *, const struct mcx_calibration *);
2095 static void mcx_process_txeof(struct mcx_softc *, struct mcx_cq_entry *,
2096 int *);
2097 static void mcx_process_cq(struct mcx_softc *, struct mcx_cq *);
2098
2099 static void mcx_arm_cq(struct mcx_softc *, struct mcx_cq *);
2100 static void mcx_arm_eq(struct mcx_softc *);
2101 static int mcx_intr(void *);
2102
2103 static int mcx_init(struct ifnet *);
2104 static void mcx_stop(struct ifnet *, int);
2105 static int mcx_ioctl(struct ifnet *, u_long, void *);
2106 static void mcx_start(struct ifnet *);
2107 static void mcx_watchdog(struct ifnet *);
2108 static void mcx_media_add_types(struct mcx_softc *);
2109 static void mcx_media_status(struct ifnet *, struct ifmediareq *);
2110 static int mcx_media_change(struct ifnet *);
2111 #if 0
2112 static int mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2113 #endif
2114 static void mcx_port_change(struct work *, void *);
2115
2116 static void mcx_calibrate_first(struct mcx_softc *);
2117 static void mcx_calibrate(void *);
2118
2119 static inline uint32_t
2120 mcx_rd(struct mcx_softc *, bus_size_t);
2121 static inline void
2122 mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2123 static inline void
2124 mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2125
2126 static uint64_t mcx_timer(struct mcx_softc *);
2127
2128 static int mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2129 bus_size_t, u_int align);
2130 static void mcx_dmamem_zero(struct mcx_dmamem *);
2131 static void mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2132
2133 static int mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2134 unsigned int);
2135 static void mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2136
2137 CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL);
2138
2139 static const struct {
2140 pci_vendor_id_t vendor;
2141 pci_product_id_t product;
2142 } mcx_devices[] = {
2143 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700 },
2144 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710 },
2145 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800 },
2146 { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800 },
2147 };
2148
2149 static const uint64_t mcx_eth_cap_map[] = {
2150 IFM_1000_SGMII,
2151 IFM_1000_KX,
2152 IFM_10G_CX4,
2153 IFM_10G_KX4,
2154 IFM_10G_KR,
2155 IFM_20G_KR2,
2156 IFM_40G_CR4,
2157 IFM_40G_KR4,
2158 IFM_56G_R4,
2159 0,
2160 0,
2161 0,
2162 IFM_10G_CR1,
2163 IFM_10G_SR,
2164 IFM_10G_LR,
2165 IFM_40G_SR4,
2166 IFM_40G_LR4,
2167 0,
2168 IFM_50G_SR2,
2169 0,
2170 IFM_100G_CR4,
2171 IFM_100G_SR4,
2172 IFM_100G_KR4,
2173 IFM_100G_LR4,
2174 IFM_100_TX,
2175 IFM_1000_T,
2176 IFM_10G_T,
2177 IFM_25G_CR,
2178 IFM_25G_KR,
2179 IFM_25G_SR,
2180 IFM_50G_CR2,
2181 IFM_50G_KR2
2182 };
2183
2184 static int
2185 mcx_match(device_t parent, cfdata_t cf, void *aux)
2186 {
2187 struct pci_attach_args *pa = aux;
2188 int n;
2189
2190 for (n = 0; n < __arraycount(mcx_devices); n++) {
2191 if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor &&
2192 PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product)
2193 return 1;
2194 }
2195
2196 return 0;
2197 }
2198
2199 void
2200 mcx_attach(device_t parent, device_t self, void *aux)
2201 {
2202 struct mcx_softc *sc = device_private(self);
2203 struct ifnet *ifp = &sc->sc_ec.ec_if;
2204 struct pci_attach_args *pa = aux;
2205 uint8_t enaddr[ETHER_ADDR_LEN];
2206 int counts[PCI_INTR_TYPE_SIZE];
2207 char intrbuf[PCI_INTRSTR_LEN];
2208 pcireg_t memtype;
2209 uint32_t r;
2210 unsigned int cq_stride;
2211 unsigned int cq_size;
2212 const char *intrstr;
2213 int i;
2214
2215 sc->sc_dev = self;
2216 sc->sc_pc = pa->pa_pc;
2217 sc->sc_tag = pa->pa_tag;
2218 sc->sc_dmat = pa->pa_dmat;
2219
2220 /* Map the PCI memory space */
2221 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2222 if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2223 0 /*BUS_SPACE_MAP_PREFETCHABLE*/, &sc->sc_memt, &sc->sc_memh,
2224 NULL, &sc->sc_mems)) {
2225 aprint_error(": unable to map register memory\n");
2226 return;
2227 }
2228
2229 pci_aprint_devinfo(pa, "Ethernet controller");
2230
2231 if (mcx_version(sc) != 0) {
2232 /* error printed by mcx_version */
2233 goto unmap;
2234 }
2235
2236 r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2237 cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2238 cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2239 if (cq_size > MCX_MAX_CQE) {
2240 aprint_error_dev(self,
2241 "command queue size overflow %u\n", cq_size);
2242 goto unmap;
2243 }
2244 if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2245 aprint_error_dev(self,
2246 "command queue entry size underflow %u\n", cq_stride);
2247 goto unmap;
2248 }
2249 if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2250 aprint_error_dev(self, "command queue page overflow\n");
2251 goto unmap;
2252 }
2253
2254 if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_PAGE_SIZE,
2255 MCX_PAGE_SIZE) != 0) {
2256 aprint_error_dev(self, "unable to allocate doorbell memory\n");
2257 goto unmap;
2258 }
2259
2260 if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2261 MCX_PAGE_SIZE) != 0) {
2262 aprint_error_dev(self, "unable to allocate command queue\n");
2263 goto dbfree;
2264 }
2265
2266 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2267 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2268 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2269 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2270
2271 if (mcx_init_wait(sc) != 0) {
2272 aprint_error_dev(self, "timeout waiting for init\n");
2273 goto cqfree;
2274 }
2275
2276 sc->sc_cmdq_mask = cq_size - 1;
2277 sc->sc_cmdq_size = cq_stride;
2278
2279 if (mcx_enable_hca(sc) != 0) {
2280 /* error printed by mcx_enable_hca */
2281 goto cqfree;
2282 }
2283
2284 if (mcx_issi(sc) != 0) {
2285 /* error printed by mcx_issi */
2286 goto teardown;
2287 }
2288
2289 if (mcx_pages(sc, &sc->sc_boot_pages,
2290 htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2291 /* error printed by mcx_pages */
2292 goto teardown;
2293 }
2294
2295 if (mcx_hca_max_caps(sc) != 0) {
2296 /* error printed by mcx_hca_max_caps */
2297 goto teardown;
2298 }
2299
2300 if (mcx_hca_set_caps(sc) != 0) {
2301 /* error printed by mcx_hca_set_caps */
2302 goto teardown;
2303 }
2304
2305 if (mcx_pages(sc, &sc->sc_init_pages,
2306 htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2307 /* error printed by mcx_pages */
2308 goto teardown;
2309 }
2310
2311 if (mcx_init_hca(sc) != 0) {
2312 /* error printed by mcx_init_hca */
2313 goto teardown;
2314 }
2315
2316 if (mcx_pages(sc, &sc->sc_regular_pages,
2317 htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2318 /* error printed by mcx_pages */
2319 goto teardown;
2320 }
2321
2322 /* apparently not necessary? */
2323 if (mcx_set_driver_version(sc) != 0) {
2324 /* error printed by mcx_set_driver_version */
2325 goto teardown;
2326 }
2327
2328 if (mcx_iff(sc) != 0) { /* modify nic vport context */
2329 /* error printed by mcx_iff? */
2330 goto teardown;
2331 }
2332
2333 if (mcx_alloc_uar(sc) != 0) {
2334 /* error printed by mcx_alloc_uar */
2335 goto teardown;
2336 }
2337
2338 if (mcx_alloc_pd(sc) != 0) {
2339 /* error printed by mcx_alloc_pd */
2340 goto teardown;
2341 }
2342
2343 if (mcx_alloc_tdomain(sc) != 0) {
2344 /* error printed by mcx_alloc_tdomain */
2345 goto teardown;
2346 }
2347
2348 /*
2349 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2350 * mellanox support tells me legacy interrupts are not supported,
2351 * so we're stuck with just msi-x.
2352 */
2353 counts[PCI_INTR_TYPE_MSIX] = 1;
2354 counts[PCI_INTR_TYPE_MSI] = 0;
2355 counts[PCI_INTR_TYPE_INTX] = 0;
2356 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) {
2357 aprint_error_dev(self, "unable to allocate interrupt\n");
2358 goto teardown;
2359 }
2360 KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX);
2361
2362 #ifdef MCX_MPSAFE
2363 pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
2364 #endif
2365
2366 intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[0], intrbuf,
2367 sizeof(intrbuf));
2368 sc->sc_ihs[0] = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[0],
2369 IPL_NET, mcx_intr, sc, DEVNAME(sc));
2370 if (sc->sc_ihs[0] == NULL) {
2371 aprint_error_dev(self, "unable to establish interrupt%s%s\n",
2372 intrstr ? " at " : "",
2373 intrstr ? intrstr : "");
2374 goto teardown;
2375 }
2376
2377 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
2378
2379 if (mcx_create_eq(sc) != 0) {
2380 /* error printed by mcx_create_eq */
2381 goto teardown;
2382 }
2383
2384 if (mcx_query_nic_vport_context(sc, enaddr) != 0) {
2385 /* error printed by mcx_query_nic_vport_context */
2386 goto teardown;
2387 }
2388
2389 if (mcx_query_special_contexts(sc) != 0) {
2390 /* error printed by mcx_query_special_contexts */
2391 goto teardown;
2392 }
2393
2394 if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2395 /* error printed by mcx_set_port_mtu */
2396 goto teardown;
2397 }
2398
2399 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2400 ether_sprintf(enaddr));
2401
2402 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2403 ifp->if_softc = sc;
2404 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2405 #ifdef MCX_MPSAFE
2406 ifp->if_extflags = IFEF_MPSAFE;
2407 #endif
2408 ifp->if_init = mcx_init;
2409 ifp->if_stop = mcx_stop;
2410 ifp->if_ioctl = mcx_ioctl;
2411 ifp->if_start = mcx_start;
2412 ifp->if_watchdog = mcx_watchdog;
2413 ifp->if_mtu = sc->sc_hardmtu;
2414 IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
2415 IFQ_SET_READY(&ifp->if_snd);
2416
2417 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
2418
2419 sc->sc_ec.ec_ifmedia = &sc->sc_media;
2420 ifmedia_init(&sc->sc_media, IFM_IMASK, mcx_media_change,
2421 mcx_media_status);
2422 mcx_media_add_types(sc);
2423 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2424 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2425
2426 if_attach(ifp);
2427 if_deferred_start_init(ifp, NULL);
2428
2429 ether_ifattach(ifp, enaddr);
2430
2431 callout_init(&sc->sc_rx_refill, CALLOUT_FLAGS);
2432 callout_setfunc(&sc->sc_rx_refill, mcx_refill, sc);
2433 callout_init(&sc->sc_calibrate, CALLOUT_FLAGS);
2434 callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc);
2435
2436 if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc,
2437 PRI_NONE, IPL_NET, 0) != 0) {
2438 aprint_error_dev(self, "couldn't create port change workq\n");
2439 goto teardown;
2440 }
2441
2442 mcx_port_change(&sc->sc_port_change, sc);
2443
2444 sc->sc_flow_table_id = -1;
2445 for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
2446 sc->sc_flow_group_id[i] = -1;
2447 sc->sc_flow_group_size[i] = 0;
2448 sc->sc_flow_group_start[i] = 0;
2449 }
2450 sc->sc_extra_mcast = 0;
2451 memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
2452 return;
2453
2454 teardown:
2455 mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
2456 /* error printed by mcx_teardown_hca, and we're already unwinding */
2457 cqfree:
2458 mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2459 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2460 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
2461 MCX_CMDQ_INTERFACE_DISABLED);
2462 mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2463
2464 mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
2465 mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2466 mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
2467
2468 mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
2469 dbfree:
2470 mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
2471 unmap:
2472 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2473 sc->sc_mems = 0;
2474 }
2475
2476 static void
2477 mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm)
2478 {
2479 rxr->rxr_total = hwm;
2480 rxr->rxr_inuse = 0;
2481 }
2482
2483 static u_int
2484 mcx_rxr_get(struct mcx_rxring *rxr, u_int max)
2485 {
2486 const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse);
2487
2488 rxr->rxr_inuse += taken;
2489
2490 return taken;
2491 }
2492
2493 static void
2494 mcx_rxr_put(struct mcx_rxring *rxr, u_int n)
2495 {
2496 rxr->rxr_inuse -= n;
2497 }
2498
2499 static u_int
2500 mcx_rxr_inuse(struct mcx_rxring *rxr)
2501 {
2502 return rxr->rxr_inuse;
2503 }
2504
2505 static int
2506 mcx_version(struct mcx_softc *sc)
2507 {
2508 uint32_t fw0, fw1;
2509 uint16_t cmdif;
2510
2511 fw0 = mcx_rd(sc, MCX_FW_VER);
2512 fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
2513
2514 aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0),
2515 MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
2516
2517 cmdif = MCX_CMDIF(fw1);
2518 if (cmdif != MCX_CMD_IF_SUPPORTED) {
2519 aprint_error_dev(sc->sc_dev,
2520 "unsupported command interface %u\n", cmdif);
2521 return (-1);
2522 }
2523
2524 return (0);
2525 }
2526
2527 static int
2528 mcx_init_wait(struct mcx_softc *sc)
2529 {
2530 unsigned int i;
2531 uint32_t r;
2532
2533 for (i = 0; i < 2000; i++) {
2534 r = mcx_rd(sc, MCX_STATE);
2535 if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
2536 return (0);
2537
2538 delay(1000);
2539 mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
2540 BUS_SPACE_BARRIER_READ);
2541 }
2542
2543 return (-1);
2544 }
2545
2546 static uint8_t
2547 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2548 unsigned int msec)
2549 {
2550 unsigned int i;
2551
2552 for (i = 0; i < msec; i++) {
2553 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2554 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
2555
2556 if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
2557 MCX_CQ_STATUS_OWN_SW) {
2558 if (sc->sc_eqn != 0)
2559 mcx_intr(sc);
2560 return (0);
2561 }
2562
2563 delay(1000);
2564 }
2565
2566 return (ETIMEDOUT);
2567 }
2568
2569 static uint32_t
2570 mcx_mix_u64(uint32_t xor, uint64_t u64)
2571 {
2572 xor ^= u64 >> 32;
2573 xor ^= u64;
2574
2575 return (xor);
2576 }
2577
2578 static uint32_t
2579 mcx_mix_u32(uint32_t xor, uint32_t u32)
2580 {
2581 xor ^= u32;
2582
2583 return (xor);
2584 }
2585
2586 static uint32_t
2587 mcx_mix_u8(uint32_t xor, uint8_t u8)
2588 {
2589 xor ^= u8;
2590
2591 return (xor);
2592 }
2593
2594 static uint8_t
2595 mcx_mix_done(uint32_t xor)
2596 {
2597 xor ^= xor >> 16;
2598 xor ^= xor >> 8;
2599
2600 return (xor);
2601 }
2602
2603 static uint8_t
2604 mcx_xor(const void *buf, size_t len)
2605 {
2606 const uint32_t *dwords = buf;
2607 uint32_t xor = 0xff;
2608 size_t i;
2609
2610 len /= sizeof(*dwords);
2611
2612 for (i = 0; i < len; i++)
2613 xor ^= dwords[i];
2614
2615 return (mcx_mix_done(xor));
2616 }
2617
2618 static uint8_t
2619 mcx_cmdq_token(struct mcx_softc *sc)
2620 {
2621 uint8_t token;
2622
2623 do {
2624 token = ++sc->sc_cmdq_token;
2625 } while (token == 0);
2626
2627 return (token);
2628 }
2629
2630 static void
2631 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2632 uint32_t ilen, uint32_t olen, uint8_t token)
2633 {
2634 memset(cqe, 0, sc->sc_cmdq_size);
2635
2636 cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
2637 be32enc(&cqe->cq_input_length, ilen);
2638 be32enc(&cqe->cq_output_length, olen);
2639 cqe->cq_token = token;
2640 cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
2641 }
2642
2643 static void
2644 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
2645 {
2646 cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
2647 }
2648
2649 static int
2650 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
2651 {
2652 /* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 : 0); */
2653 return (0);
2654 }
2655
2656 static void *
2657 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
2658 {
2659 return (&cqe->cq_input_data);
2660 }
2661
2662 static void *
2663 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
2664 {
2665 return (&cqe->cq_output_data);
2666 }
2667
2668 static void
2669 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2670 unsigned int slot)
2671 {
2672 mcx_cmdq_sign(cqe);
2673
2674 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2675 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
2676
2677 mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
2678 }
2679
2680 static int
2681 mcx_enable_hca(struct mcx_softc *sc)
2682 {
2683 struct mcx_cmdq_entry *cqe;
2684 struct mcx_cmd_enable_hca_in *in;
2685 struct mcx_cmd_enable_hca_out *out;
2686 int error;
2687 uint8_t status;
2688
2689 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2690 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2691
2692 in = mcx_cmdq_in(cqe);
2693 in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
2694 in->cmd_op_mod = htobe16(0);
2695 in->cmd_function_id = htobe16(0);
2696
2697 mcx_cmdq_post(sc, cqe, 0);
2698
2699 error = mcx_cmdq_poll(sc, cqe, 1000);
2700 if (error != 0) {
2701 printf(", hca enable timeout\n");
2702 return (-1);
2703 }
2704 if (mcx_cmdq_verify(cqe) != 0) {
2705 printf(", hca enable command corrupt\n");
2706 return (-1);
2707 }
2708
2709 status = cqe->cq_output_data[0];
2710 if (status != MCX_CQ_STATUS_OK) {
2711 printf(", hca enable failed (%x)\n", status);
2712 return (-1);
2713 }
2714
2715 return (0);
2716 }
2717
2718 static int
2719 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
2720 {
2721 struct mcx_cmdq_entry *cqe;
2722 struct mcx_cmd_teardown_hca_in *in;
2723 struct mcx_cmd_teardown_hca_out *out;
2724 int error;
2725 uint8_t status;
2726
2727 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2728 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2729
2730 in = mcx_cmdq_in(cqe);
2731 in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
2732 in->cmd_op_mod = htobe16(0);
2733 in->cmd_profile = profile;
2734
2735 mcx_cmdq_post(sc, cqe, 0);
2736
2737 error = mcx_cmdq_poll(sc, cqe, 1000);
2738 if (error != 0) {
2739 printf(", hca teardown timeout\n");
2740 return (-1);
2741 }
2742 if (mcx_cmdq_verify(cqe) != 0) {
2743 printf(", hca teardown command corrupt\n");
2744 return (-1);
2745 }
2746
2747 status = cqe->cq_output_data[0];
2748 if (status != MCX_CQ_STATUS_OK) {
2749 printf(", hca teardown failed (%x)\n", status);
2750 return (-1);
2751 }
2752
2753 return (0);
2754 }
2755
2756 static int
2757 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
2758 unsigned int nmb, uint64_t *ptr, uint8_t token)
2759 {
2760 uint8_t *kva;
2761 uint64_t dva;
2762 int i;
2763 int error;
2764
2765 error = mcx_dmamem_alloc(sc, mxm,
2766 nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
2767 if (error != 0)
2768 return (error);
2769
2770 mcx_dmamem_zero(mxm);
2771
2772 dva = MCX_DMA_DVA(mxm);
2773 kva = MCX_DMA_KVA(mxm);
2774 for (i = 0; i < nmb; i++) {
2775 struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
2776
2777 /* patch the cqe or mbox pointing at this one */
2778 be64enc(ptr, dva);
2779
2780 /* fill in this mbox */
2781 be32enc(&mbox->mb_block_number, i);
2782 mbox->mb_token = token;
2783
2784 /* move to the next one */
2785 ptr = &mbox->mb_next_ptr;
2786
2787 dva += MCX_CMDQ_MAILBOX_SIZE;
2788 kva += MCX_CMDQ_MAILBOX_SIZE;
2789 }
2790
2791 return (0);
2792 }
2793
2794 static uint32_t
2795 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
2796 {
2797 uint32_t xor = 0xff;
2798
2799 /* only 3 fields get set, so mix them directly */
2800 xor = mcx_mix_u64(xor, mb->mb_next_ptr);
2801 xor = mcx_mix_u32(xor, mb->mb_block_number);
2802 xor = mcx_mix_u8(xor, mb->mb_token);
2803
2804 return (mcx_mix_done(xor));
2805 }
2806
2807 static void
2808 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
2809 {
2810 uint8_t *kva;
2811 int i;
2812
2813 kva = MCX_DMA_KVA(mxm);
2814
2815 for (i = 0; i < nmb; i++) {
2816 struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
2817 uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
2818 mb->mb_ctrl_signature = sig;
2819 mb->mb_signature = sig ^
2820 mcx_xor(mb->mb_data, sizeof(mb->mb_data));
2821
2822 kva += MCX_CMDQ_MAILBOX_SIZE;
2823 }
2824 }
2825
2826 static void
2827 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
2828 {
2829 bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
2830 0, MCX_DMA_LEN(mxm), ops);
2831 }
2832
2833 static struct mcx_cmdq_mailbox *
2834 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
2835 {
2836 uint8_t *kva;
2837
2838 kva = MCX_DMA_KVA(mxm);
2839 kva += i * MCX_CMDQ_MAILBOX_SIZE;
2840
2841 return ((struct mcx_cmdq_mailbox *)kva);
2842 }
2843
2844 static inline void *
2845 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
2846 {
2847 return (&mb->mb_data);
2848 }
2849
2850 static void
2851 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
2852 void *b, size_t len)
2853 {
2854 uint8_t *buf = b;
2855 struct mcx_cmdq_mailbox *mb;
2856 int i;
2857
2858 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2859 for (i = 0; i < nmb; i++) {
2860
2861 memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len));
2862
2863 if (sizeof(mb->mb_data) >= len)
2864 break;
2865
2866 buf += sizeof(mb->mb_data);
2867 len -= sizeof(mb->mb_data);
2868 mb++;
2869 }
2870 }
2871
2872 static void
2873 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
2874 {
2875 uint8_t *buf = b;
2876 struct mcx_cmdq_mailbox *mb;
2877 int i;
2878
2879 mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2880 for (i = 0; i < nmb; i++) {
2881 memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len));
2882
2883 if (sizeof(mb->mb_data) >= len)
2884 break;
2885
2886 buf += sizeof(mb->mb_data);
2887 len -= sizeof(mb->mb_data);
2888 mb++;
2889 }
2890 }
2891
2892 static void
2893 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
2894 {
2895 mcx_dmamem_free(sc, mxm);
2896 }
2897
2898 #if 0
2899 static void
2900 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
2901 {
2902 unsigned int i;
2903
2904 printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
2905 be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr));
2906
2907 printf(", idata ");
2908 for (i = 0; i < sizeof(cqe->cq_input_data); i++)
2909 printf("%02x", cqe->cq_input_data[i]);
2910
2911 printf(", odata ");
2912 for (i = 0; i < sizeof(cqe->cq_output_data); i++)
2913 printf("%02x", cqe->cq_output_data[i]);
2914
2915 printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
2916 be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length),
2917 cqe->cq_token, cqe->cq_signature, cqe->cq_status);
2918 }
2919
2920 static void
2921 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
2922 {
2923 int i, j;
2924 uint8_t *d;
2925
2926 for (i = 0; i < num; i++) {
2927 struct mcx_cmdq_mailbox *mbox;
2928 mbox = mcx_cq_mbox(mboxes, i);
2929
2930 d = mcx_cq_mbox_data(mbox);
2931 for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
2932 if (j != 0 && (j % 16 == 0))
2933 printf("\n");
2934 printf("%.2x ", d[j]);
2935 }
2936 }
2937 }
2938 #endif
2939
2940 static int
2941 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
2942 int len)
2943 {
2944 struct mcx_dmamem mxm;
2945 struct mcx_cmdq_entry *cqe;
2946 struct mcx_cmd_access_reg_in *in;
2947 struct mcx_cmd_access_reg_out *out;
2948 uint8_t token = mcx_cmdq_token(sc);
2949 int error, nmb;
2950
2951 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2952 mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
2953 token);
2954
2955 in = mcx_cmdq_in(cqe);
2956 in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
2957 in->cmd_op_mod = htobe16(op);
2958 in->cmd_register_id = htobe16(reg);
2959
2960 nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
2961 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, &cqe->cq_output_ptr, token) != 0) {
2962 printf(", unable to allocate access reg mailboxen\n");
2963 return (-1);
2964 }
2965 cqe->cq_input_ptr = cqe->cq_output_ptr;
2966 mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
2967 mcx_cmdq_mboxes_sign(&mxm, nmb);
2968 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
2969
2970 mcx_cmdq_post(sc, cqe, 0);
2971 error = mcx_cmdq_poll(sc, cqe, 1000);
2972 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
2973
2974 if (error != 0) {
2975 printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
2976 (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
2977 goto free;
2978 }
2979 error = mcx_cmdq_verify(cqe);
2980 if (error != 0) {
2981 printf("%s: access reg (%s %x) reply corrupt\n",
2982 (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
2983 reg);
2984 goto free;
2985 }
2986
2987 out = mcx_cmdq_out(cqe);
2988 if (out->cmd_status != MCX_CQ_STATUS_OK) {
2989 printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
2990 DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
2991 reg, out->cmd_status, out->cmd_syndrome);
2992 error = -1;
2993 goto free;
2994 }
2995
2996 mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
2997 free:
2998 mcx_dmamem_free(sc, &mxm);
2999
3000 return (error);
3001 }
3002
3003 static int
3004 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, unsigned int slot)
3005 {
3006 struct mcx_cmd_set_issi_in *in;
3007 struct mcx_cmd_set_issi_out *out;
3008 uint8_t status;
3009
3010 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3011
3012 in = mcx_cmdq_in(cqe);
3013 in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3014 in->cmd_op_mod = htobe16(0);
3015 in->cmd_current_issi = htobe16(MCX_ISSI);
3016
3017 mcx_cmdq_post(sc, cqe, slot);
3018 if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3019 return (-1);
3020 if (mcx_cmdq_verify(cqe) != 0)
3021 return (-1);
3022
3023 status = cqe->cq_output_data[0];
3024 if (status != MCX_CQ_STATUS_OK)
3025 return (-1);
3026
3027 return (0);
3028 }
3029
3030 static int
3031 mcx_issi(struct mcx_softc *sc)
3032 {
3033 struct mcx_dmamem mxm;
3034 struct mcx_cmdq_entry *cqe;
3035 struct mcx_cmd_query_issi_in *in;
3036 struct mcx_cmd_query_issi_il_out *out;
3037 struct mcx_cmd_query_issi_mb_out *mb;
3038 uint8_t token = mcx_cmdq_token(sc);
3039 uint8_t status;
3040 int error;
3041
3042 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3043 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3044
3045 in = mcx_cmdq_in(cqe);
3046 in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3047 in->cmd_op_mod = htobe16(0);
3048
3049 CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3050 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3051 &cqe->cq_output_ptr, token) != 0) {
3052 printf(", unable to allocate query issi mailbox\n");
3053 return (-1);
3054 }
3055 mcx_cmdq_mboxes_sign(&mxm, 1);
3056
3057 mcx_cmdq_post(sc, cqe, 0);
3058 error = mcx_cmdq_poll(sc, cqe, 1000);
3059 if (error != 0) {
3060 printf(", query issi timeout\n");
3061 goto free;
3062 }
3063 error = mcx_cmdq_verify(cqe);
3064 if (error != 0) {
3065 printf(", query issi reply corrupt\n");
3066 goto free;
3067 }
3068
3069 status = cqe->cq_output_data[0];
3070 switch (status) {
3071 case MCX_CQ_STATUS_OK:
3072 break;
3073 case MCX_CQ_STATUS_BAD_OPCODE:
3074 /* use ISSI 0 */
3075 goto free;
3076 default:
3077 printf(", query issi failed (%x)\n", status);
3078 error = -1;
3079 goto free;
3080 }
3081
3082 out = mcx_cmdq_out(cqe);
3083 if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3084 /* use ISSI 1 */
3085 goto free;
3086 }
3087
3088 /* don't need to read cqe anymore, can be used for SET ISSI */
3089
3090 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3091 CTASSERT(MCX_ISSI < NBBY);
3092 /* XXX math is hard */
3093 if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3094 /* use ISSI 0 */
3095 goto free;
3096 }
3097
3098 if (mcx_set_issi(sc, cqe, 0) != 0) {
3099 /* ignore the error, just use ISSI 0 */
3100 } else {
3101 /* use ISSI 1 */
3102 }
3103
3104 free:
3105 mcx_cq_mboxes_free(sc, &mxm);
3106 return (error);
3107 }
3108
3109 static int
3110 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3111 uint32_t *npages, uint16_t *func_id)
3112 {
3113 struct mcx_cmdq_entry *cqe;
3114 struct mcx_cmd_query_pages_in *in;
3115 struct mcx_cmd_query_pages_out *out;
3116
3117 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3118 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3119
3120 in = mcx_cmdq_in(cqe);
3121 in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3122 in->cmd_op_mod = type;
3123
3124 mcx_cmdq_post(sc, cqe, 0);
3125 if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3126 printf(", query pages timeout\n");
3127 return (-1);
3128 }
3129 if (mcx_cmdq_verify(cqe) != 0) {
3130 printf(", query pages reply corrupt\n");
3131 return (-1);
3132 }
3133
3134 out = mcx_cmdq_out(cqe);
3135 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3136 printf(", query pages failed (%x)\n", out->cmd_status);
3137 return (-1);
3138 }
3139
3140 *func_id = out->cmd_func_id;
3141 *npages = be32dec(&out->cmd_num_pages);
3142
3143 return (0);
3144 }
3145
3146 struct bus_dma_iter {
3147 bus_dmamap_t i_map;
3148 bus_size_t i_offset;
3149 unsigned int i_index;
3150 };
3151
3152 static void
3153 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3154 {
3155 i->i_map = map;
3156 i->i_offset = 0;
3157 i->i_index = 0;
3158 }
3159
3160 static bus_addr_t
3161 bus_dma_iter_addr(struct bus_dma_iter *i)
3162 {
3163 return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3164 }
3165
3166 static void
3167 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3168 {
3169 bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3170 bus_size_t diff;
3171
3172 do {
3173 diff = seg->ds_len - i->i_offset;
3174 if (size < diff)
3175 break;
3176
3177 size -= diff;
3178
3179 seg++;
3180
3181 i->i_offset = 0;
3182 i->i_index++;
3183 } while (size > 0);
3184
3185 i->i_offset += size;
3186 }
3187
3188 static int
3189 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3190 {
3191 struct mcx_dmamem mxm;
3192 struct mcx_cmdq_entry *cqe;
3193 struct mcx_cmd_manage_pages_in *in;
3194 struct mcx_cmd_manage_pages_out *out;
3195 unsigned int paslen, nmb, i, j, npages;
3196 struct bus_dma_iter iter;
3197 uint64_t *pas;
3198 uint8_t status;
3199 uint8_t token = mcx_cmdq_token(sc);
3200 int error;
3201
3202 npages = mhm->mhm_npages;
3203
3204 paslen = sizeof(*pas) * npages;
3205 nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3206
3207 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3208 mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3209
3210 in = mcx_cmdq_in(cqe);
3211 in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3212 in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3213 in->cmd_func_id = func_id;
3214 be32enc(&in->cmd_input_num_entries, npages);
3215
3216 if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3217 &cqe->cq_input_ptr, token) != 0) {
3218 printf(", unable to allocate manage pages mailboxen\n");
3219 return (-1);
3220 }
3221
3222 bus_dma_iter_init(&iter, mhm->mhm_map);
3223 for (i = 0; i < nmb; i++) {
3224 unsigned int lim;
3225
3226 pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3227 lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3228
3229 for (j = 0; j < lim; j++) {
3230 be64enc(&pas[j], bus_dma_iter_addr(&iter));
3231 bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3232 }
3233
3234 npages -= lim;
3235 }
3236
3237 mcx_cmdq_mboxes_sign(&mxm, nmb);
3238
3239 mcx_cmdq_post(sc, cqe, 0);
3240 error = mcx_cmdq_poll(sc, cqe, 1000);
3241 if (error != 0) {
3242 printf(", manage pages timeout\n");
3243 goto free;
3244 }
3245 error = mcx_cmdq_verify(cqe);
3246 if (error != 0) {
3247 printf(", manage pages reply corrupt\n");
3248 goto free;
3249 }
3250
3251 status = cqe->cq_output_data[0];
3252 if (status != MCX_CQ_STATUS_OK) {
3253 printf(", manage pages failed (%x)\n", status);
3254 error = -1;
3255 goto free;
3256 }
3257
3258 free:
3259 mcx_dmamem_free(sc, &mxm);
3260
3261 return (error);
3262 }
3263
3264 static int
3265 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3266 {
3267 uint32_t npages;
3268 uint16_t func_id;
3269
3270 if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3271 /* error printed by mcx_query_pages */
3272 return (-1);
3273 }
3274
3275 if (npages == 0)
3276 return (0);
3277
3278 if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3279 printf(", unable to allocate hwmem\n");
3280 return (-1);
3281 }
3282
3283 if (mcx_add_pages(sc, mhm, func_id) != 0) {
3284 printf(", unable to add hwmem\n");
3285 goto free;
3286 }
3287
3288 return (0);
3289
3290 free:
3291 mcx_hwmem_free(sc, mhm);
3292
3293 return (-1);
3294 }
3295
3296 static int
3297 mcx_hca_max_caps(struct mcx_softc *sc)
3298 {
3299 struct mcx_dmamem mxm;
3300 struct mcx_cmdq_entry *cqe;
3301 struct mcx_cmd_query_hca_cap_in *in;
3302 struct mcx_cmd_query_hca_cap_out *out;
3303 struct mcx_cmdq_mailbox *mb;
3304 struct mcx_cap_device *hca;
3305 uint8_t status;
3306 uint8_t token = mcx_cmdq_token(sc);
3307 int error;
3308
3309 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3310 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3311 token);
3312
3313 in = mcx_cmdq_in(cqe);
3314 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3315 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3316 MCX_CMD_QUERY_HCA_CAP_DEVICE);
3317
3318 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3319 &cqe->cq_output_ptr, token) != 0) {
3320 printf(", unable to allocate query hca caps mailboxen\n");
3321 return (-1);
3322 }
3323 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3324 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3325
3326 mcx_cmdq_post(sc, cqe, 0);
3327 error = mcx_cmdq_poll(sc, cqe, 1000);
3328 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3329
3330 if (error != 0) {
3331 printf(", query hca caps timeout\n");
3332 goto free;
3333 }
3334 error = mcx_cmdq_verify(cqe);
3335 if (error != 0) {
3336 printf(", query hca caps reply corrupt\n");
3337 goto free;
3338 }
3339
3340 status = cqe->cq_output_data[0];
3341 if (status != MCX_CQ_STATUS_OK) {
3342 printf(", query hca caps failed (%x)\n", status);
3343 error = -1;
3344 goto free;
3345 }
3346
3347 mb = mcx_cq_mbox(&mxm, 0);
3348 hca = mcx_cq_mbox_data(mb);
3349
3350 if (hca->log_pg_sz > PAGE_SHIFT) {
3351 printf(", minimum system page shift %u is too large\n",
3352 hca->log_pg_sz);
3353 error = -1;
3354 goto free;
3355 }
3356 /*
3357 * blueflame register is split into two buffers, and we must alternate
3358 * between the two of them.
3359 */
3360 sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
3361
3362 free:
3363 mcx_dmamem_free(sc, &mxm);
3364
3365 return (error);
3366 }
3367
3368 static int
3369 mcx_hca_set_caps(struct mcx_softc *sc)
3370 {
3371 struct mcx_dmamem mxm;
3372 struct mcx_cmdq_entry *cqe;
3373 struct mcx_cmd_query_hca_cap_in *in;
3374 struct mcx_cmd_query_hca_cap_out *out;
3375 struct mcx_cmdq_mailbox *mb;
3376 struct mcx_cap_device *hca;
3377 uint8_t status;
3378 uint8_t token = mcx_cmdq_token(sc);
3379 int error;
3380
3381 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3382 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3383 token);
3384
3385 in = mcx_cmdq_in(cqe);
3386 in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3387 in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
3388 MCX_CMD_QUERY_HCA_CAP_DEVICE);
3389
3390 if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3391 &cqe->cq_output_ptr, token) != 0) {
3392 printf(", unable to allocate manage pages mailboxen\n");
3393 return (-1);
3394 }
3395 mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3396 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3397
3398 mcx_cmdq_post(sc, cqe, 0);
3399 error = mcx_cmdq_poll(sc, cqe, 1000);
3400 mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3401
3402 if (error != 0) {
3403 printf(", query hca caps timeout\n");
3404 goto free;
3405 }
3406 error = mcx_cmdq_verify(cqe);
3407 if (error != 0) {
3408 printf(", query hca caps reply corrupt\n");
3409 goto free;
3410 }
3411
3412 status = cqe->cq_output_data[0];
3413 if (status != MCX_CQ_STATUS_OK) {
3414 printf(", query hca caps failed (%x)\n", status);
3415 error = -1;
3416 goto free;
3417 }
3418
3419 mb = mcx_cq_mbox(&mxm, 0);
3420 hca = mcx_cq_mbox_data(mb);
3421
3422 hca->log_pg_sz = PAGE_SHIFT;
3423
3424 free:
3425 mcx_dmamem_free(sc, &mxm);
3426
3427 return (error);
3428 }
3429
3430
3431 static int
3432 mcx_init_hca(struct mcx_softc *sc)
3433 {
3434 struct mcx_cmdq_entry *cqe;
3435 struct mcx_cmd_init_hca_in *in;
3436 struct mcx_cmd_init_hca_out *out;
3437 int error;
3438 uint8_t status;
3439
3440 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3441 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3442
3443 in = mcx_cmdq_in(cqe);
3444 in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
3445 in->cmd_op_mod = htobe16(0);
3446
3447 mcx_cmdq_post(sc, cqe, 0);
3448
3449 error = mcx_cmdq_poll(sc, cqe, 1000);
3450 if (error != 0) {
3451 printf(", hca init timeout\n");
3452 return (-1);
3453 }
3454 if (mcx_cmdq_verify(cqe) != 0) {
3455 printf(", hca init command corrupt\n");
3456 return (-1);
3457 }
3458
3459 status = cqe->cq_output_data[0];
3460 if (status != MCX_CQ_STATUS_OK) {
3461 printf(", hca init failed (%x)\n", status);
3462 return (-1);
3463 }
3464
3465 return (0);
3466 }
3467
3468 static int
3469 mcx_set_driver_version(struct mcx_softc *sc)
3470 {
3471 struct mcx_dmamem mxm;
3472 struct mcx_cmdq_entry *cqe;
3473 struct mcx_cmd_set_driver_version_in *in;
3474 struct mcx_cmd_set_driver_version_out *out;
3475 int error;
3476 int token;
3477 uint8_t status;
3478
3479 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3480 token = mcx_cmdq_token(sc);
3481 mcx_cmdq_init(sc, cqe, sizeof(*in) +
3482 sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
3483
3484 in = mcx_cmdq_in(cqe);
3485 in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
3486 in->cmd_op_mod = htobe16(0);
3487
3488 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3489 &cqe->cq_input_ptr, token) != 0) {
3490 printf(", unable to allocate set driver version mailboxen\n");
3491 return (-1);
3492 }
3493 strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
3494 "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
3495
3496 mcx_cmdq_mboxes_sign(&mxm, 1);
3497 mcx_cmdq_post(sc, cqe, 0);
3498
3499 error = mcx_cmdq_poll(sc, cqe, 1000);
3500 if (error != 0) {
3501 printf(", set driver version timeout\n");
3502 goto free;
3503 }
3504 if (mcx_cmdq_verify(cqe) != 0) {
3505 printf(", set driver version command corrupt\n");
3506 goto free;
3507 }
3508
3509 status = cqe->cq_output_data[0];
3510 if (status != MCX_CQ_STATUS_OK) {
3511 printf(", set driver version failed (%x)\n", status);
3512 error = -1;
3513 goto free;
3514 }
3515
3516 free:
3517 mcx_dmamem_free(sc, &mxm);
3518
3519 return (error);
3520 }
3521
3522 static int
3523 mcx_iff(struct mcx_softc *sc)
3524 {
3525 struct ifnet *ifp = &sc->sc_ec.ec_if;
3526 struct mcx_dmamem mxm;
3527 struct mcx_cmdq_entry *cqe;
3528 struct mcx_cmd_modify_nic_vport_context_in *in;
3529 struct mcx_cmd_modify_nic_vport_context_out *out;
3530 struct mcx_nic_vport_ctx *ctx;
3531 int error;
3532 int token;
3533 int insize;
3534
3535 /* enable or disable the promisc flow */
3536 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
3537 if (sc->sc_promisc_flow_enabled == 0) {
3538 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC,
3539 0, NULL);
3540 sc->sc_promisc_flow_enabled = 1;
3541 }
3542 } else if (sc->sc_promisc_flow_enabled != 0) {
3543 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
3544 sc->sc_promisc_flow_enabled = 0;
3545 }
3546
3547 /* enable or disable the all-multicast flow */
3548 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3549 if (sc->sc_allmulti_flow_enabled == 0) {
3550 uint8_t mcast[ETHER_ADDR_LEN];
3551
3552 memset(mcast, 0, sizeof(mcast));
3553 mcast[0] = 0x01;
3554 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI,
3555 0, mcast);
3556 sc->sc_allmulti_flow_enabled = 1;
3557 }
3558 } else if (sc->sc_allmulti_flow_enabled != 0) {
3559 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
3560 sc->sc_allmulti_flow_enabled = 0;
3561 }
3562
3563 insize = sizeof(struct mcx_nic_vport_ctx) + 240;
3564
3565 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3566 token = mcx_cmdq_token(sc);
3567 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3568
3569 in = mcx_cmdq_in(cqe);
3570 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
3571 in->cmd_op_mod = htobe16(0);
3572 in->cmd_field_select = htobe32(
3573 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
3574 MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
3575
3576 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
3577 printf(", unable to allocate modify nic vport context mailboxen\n");
3578 return (-1);
3579 }
3580 ctx = (struct mcx_nic_vport_ctx *)
3581 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
3582 ctx->vp_mtu = htobe32(sc->sc_hardmtu);
3583 /*
3584 * always leave promisc-all enabled on the vport since we can't give it
3585 * a vlan list, and we're already doing multicast filtering in the flow
3586 * table.
3587 */
3588 ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
3589
3590 mcx_cmdq_mboxes_sign(&mxm, 1);
3591 mcx_cmdq_post(sc, cqe, 0);
3592
3593 error = mcx_cmdq_poll(sc, cqe, 1000);
3594 if (error != 0) {
3595 printf(", modify nic vport context timeout\n");
3596 goto free;
3597 }
3598 if (mcx_cmdq_verify(cqe) != 0) {
3599 printf(", modify nic vport context command corrupt\n");
3600 goto free;
3601 }
3602
3603 out = mcx_cmdq_out(cqe);
3604 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3605 printf(", modify nic vport context failed (%x, %x)\n",
3606 out->cmd_status, out->cmd_syndrome);
3607 error = -1;
3608 goto free;
3609 }
3610
3611 free:
3612 mcx_dmamem_free(sc, &mxm);
3613
3614 return (error);
3615 }
3616
3617 static int
3618 mcx_alloc_uar(struct mcx_softc *sc)
3619 {
3620 struct mcx_cmdq_entry *cqe;
3621 struct mcx_cmd_alloc_uar_in *in;
3622 struct mcx_cmd_alloc_uar_out *out;
3623 int error;
3624
3625 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3626 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3627
3628 in = mcx_cmdq_in(cqe);
3629 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
3630 in->cmd_op_mod = htobe16(0);
3631
3632 mcx_cmdq_post(sc, cqe, 0);
3633
3634 error = mcx_cmdq_poll(sc, cqe, 1000);
3635 if (error != 0) {
3636 printf(", alloc uar timeout\n");
3637 return (-1);
3638 }
3639 if (mcx_cmdq_verify(cqe) != 0) {
3640 printf(", alloc uar command corrupt\n");
3641 return (-1);
3642 }
3643
3644 out = mcx_cmdq_out(cqe);
3645 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3646 printf(", alloc uar failed (%x)\n", out->cmd_status);
3647 return (-1);
3648 }
3649
3650 sc->sc_uar = be32toh(out->cmd_uar);
3651
3652 return (0);
3653 }
3654
3655 static int
3656 mcx_create_eq(struct mcx_softc *sc)
3657 {
3658 struct mcx_cmdq_entry *cqe;
3659 struct mcx_dmamem mxm;
3660 struct mcx_cmd_create_eq_in *in;
3661 struct mcx_cmd_create_eq_mb_in *mbin;
3662 struct mcx_cmd_create_eq_out *out;
3663 struct mcx_eq_entry *eqe;
3664 int error;
3665 uint64_t *pas;
3666 int insize, npages, paslen, i, token;
3667
3668 sc->sc_eq_cons = 0;
3669
3670 npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
3671 MCX_PAGE_SIZE);
3672 paslen = npages * sizeof(*pas);
3673 insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
3674
3675 if (mcx_dmamem_alloc(sc, &sc->sc_eq_mem, npages * MCX_PAGE_SIZE,
3676 MCX_PAGE_SIZE) != 0) {
3677 printf(", unable to allocate event queue memory\n");
3678 return (-1);
3679 }
3680
3681 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
3682 for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
3683 eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
3684 }
3685
3686 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3687 token = mcx_cmdq_token(sc);
3688 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3689
3690 in = mcx_cmdq_in(cqe);
3691 in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
3692 in->cmd_op_mod = htobe16(0);
3693
3694 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3695 &cqe->cq_input_ptr, token) != 0) {
3696 printf(", unable to allocate create eq mailboxen\n");
3697 return (-1);
3698 }
3699 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3700 mbin->cmd_eq_ctx.eq_uar_size = htobe32(
3701 (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | sc->sc_uar);
3702 mbin->cmd_event_bitmask = htobe64(
3703 (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
3704 (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
3705 (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
3706 (1ull << MCX_EVENT_TYPE_PAGE_REQUEST));
3707
3708 /* physical addresses follow the mailbox in data */
3709 pas = (uint64_t *)(mbin + 1);
3710 for (i = 0; i < npages; i++) {
3711 pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_eq_mem) +
3712 (i * MCX_PAGE_SIZE));
3713 }
3714 mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
3715 mcx_cmdq_post(sc, cqe, 0);
3716
3717 error = mcx_cmdq_poll(sc, cqe, 1000);
3718 if (error != 0) {
3719 printf(", create eq timeout\n");
3720 goto free;
3721 }
3722 if (mcx_cmdq_verify(cqe) != 0) {
3723 printf(", create eq command corrupt\n");
3724 goto free;
3725 }
3726
3727 out = mcx_cmdq_out(cqe);
3728 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3729 printf(", create eq failed (%x, %x)\n", out->cmd_status,
3730 be32toh(out->cmd_syndrome));
3731 error = -1;
3732 goto free;
3733 }
3734
3735 sc->sc_eqn = be32toh(out->cmd_eqn);
3736 mcx_arm_eq(sc);
3737 free:
3738 mcx_dmamem_free(sc, &mxm);
3739 return (error);
3740 }
3741
3742 static int
3743 mcx_alloc_pd(struct mcx_softc *sc)
3744 {
3745 struct mcx_cmdq_entry *cqe;
3746 struct mcx_cmd_alloc_pd_in *in;
3747 struct mcx_cmd_alloc_pd_out *out;
3748 int error;
3749
3750 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3751 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3752
3753 in = mcx_cmdq_in(cqe);
3754 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
3755 in->cmd_op_mod = htobe16(0);
3756
3757 mcx_cmdq_post(sc, cqe, 0);
3758
3759 error = mcx_cmdq_poll(sc, cqe, 1000);
3760 if (error != 0) {
3761 printf(", alloc pd timeout\n");
3762 return (-1);
3763 }
3764 if (mcx_cmdq_verify(cqe) != 0) {
3765 printf(", alloc pd command corrupt\n");
3766 return (-1);
3767 }
3768
3769 out = mcx_cmdq_out(cqe);
3770 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3771 printf(", alloc pd failed (%x)\n", out->cmd_status);
3772 return (-1);
3773 }
3774
3775 sc->sc_pd = be32toh(out->cmd_pd);
3776 return (0);
3777 }
3778
3779 static int
3780 mcx_alloc_tdomain(struct mcx_softc *sc)
3781 {
3782 struct mcx_cmdq_entry *cqe;
3783 struct mcx_cmd_alloc_td_in *in;
3784 struct mcx_cmd_alloc_td_out *out;
3785 int error;
3786
3787 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3788 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3789
3790 in = mcx_cmdq_in(cqe);
3791 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
3792 in->cmd_op_mod = htobe16(0);
3793
3794 mcx_cmdq_post(sc, cqe, 0);
3795
3796 error = mcx_cmdq_poll(sc, cqe, 1000);
3797 if (error != 0) {
3798 printf(", alloc transport domain timeout\n");
3799 return (-1);
3800 }
3801 if (mcx_cmdq_verify(cqe) != 0) {
3802 printf(", alloc transport domain command corrupt\n");
3803 return (-1);
3804 }
3805
3806 out = mcx_cmdq_out(cqe);
3807 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3808 printf(", alloc transport domain failed (%x)\n",
3809 out->cmd_status);
3810 return (-1);
3811 }
3812
3813 sc->sc_tdomain = be32toh(out->cmd_tdomain);
3814 return (0);
3815 }
3816
3817 static int
3818 mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr)
3819 {
3820 struct mcx_dmamem mxm;
3821 struct mcx_cmdq_entry *cqe;
3822 struct mcx_cmd_query_nic_vport_context_in *in;
3823 struct mcx_cmd_query_nic_vport_context_out *out;
3824 struct mcx_nic_vport_ctx *ctx;
3825 uint8_t *addr;
3826 int error, token, i;
3827
3828 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3829 token = mcx_cmdq_token(sc);
3830 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
3831
3832 in = mcx_cmdq_in(cqe);
3833 in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
3834 in->cmd_op_mod = htobe16(0);
3835 in->cmd_allowed_list_type = 0;
3836
3837 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
3838 printf(", unable to allocate query nic vport context mailboxen\n");
3839 return (-1);
3840 }
3841 mcx_cmdq_mboxes_sign(&mxm, 1);
3842 mcx_cmdq_post(sc, cqe, 0);
3843
3844 error = mcx_cmdq_poll(sc, cqe, 1000);
3845 if (error != 0) {
3846 printf(", query nic vport context timeout\n");
3847 goto free;
3848 }
3849 if (mcx_cmdq_verify(cqe) != 0) {
3850 printf(", query nic vport context command corrupt\n");
3851 goto free;
3852 }
3853
3854 out = mcx_cmdq_out(cqe);
3855 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3856 printf(", query nic vport context failed (%x, %x)\n",
3857 out->cmd_status, out->cmd_syndrome);
3858 error = -1;
3859 goto free;
3860 }
3861
3862 ctx = (struct mcx_nic_vport_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
3863 addr = (uint8_t *)&ctx->vp_perm_addr;
3864 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3865 enaddr[i] = addr[i + 2];
3866 }
3867 free:
3868 mcx_dmamem_free(sc, &mxm);
3869
3870 return (error);
3871 }
3872
3873 static int
3874 mcx_query_special_contexts(struct mcx_softc *sc)
3875 {
3876 struct mcx_cmdq_entry *cqe;
3877 struct mcx_cmd_query_special_ctx_in *in;
3878 struct mcx_cmd_query_special_ctx_out *out;
3879 int error;
3880
3881 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3882 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3883
3884 in = mcx_cmdq_in(cqe);
3885 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
3886 in->cmd_op_mod = htobe16(0);
3887
3888 mcx_cmdq_post(sc, cqe, 0);
3889
3890 error = mcx_cmdq_poll(sc, cqe, 1000);
3891 if (error != 0) {
3892 printf(", query special contexts timeout\n");
3893 return (-1);
3894 }
3895 if (mcx_cmdq_verify(cqe) != 0) {
3896 printf(", query special contexts command corrupt\n");
3897 return (-1);
3898 }
3899
3900 out = mcx_cmdq_out(cqe);
3901 if (out->cmd_status != MCX_CQ_STATUS_OK) {
3902 printf(", query special contexts failed (%x)\n",
3903 out->cmd_status);
3904 return (-1);
3905 }
3906
3907 sc->sc_lkey = be32toh(out->cmd_resd_lkey);
3908 return (0);
3909 }
3910
3911 static int
3912 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
3913 {
3914 struct mcx_reg_pmtu pmtu;
3915 int error;
3916
3917 /* read max mtu */
3918 memset(&pmtu, 0, sizeof(pmtu));
3919 pmtu.rp_local_port = 1;
3920 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
3921 sizeof(pmtu));
3922 if (error != 0) {
3923 printf(", unable to get port MTU\n");
3924 return error;
3925 }
3926
3927 mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu));
3928 pmtu.rp_admin_mtu = htobe16(mtu);
3929 error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
3930 sizeof(pmtu));
3931 if (error != 0) {
3932 printf(", unable to set port MTU\n");
3933 return error;
3934 }
3935
3936 sc->sc_hardmtu = mtu;
3937 return 0;
3938 }
3939
3940 static int
3941 mcx_create_cq(struct mcx_softc *sc, int eqn)
3942 {
3943 struct mcx_cmdq_entry *cmde;
3944 struct mcx_cq_entry *cqe;
3945 struct mcx_cq *cq;
3946 struct mcx_dmamem mxm;
3947 struct mcx_cmd_create_cq_in *in;
3948 struct mcx_cmd_create_cq_mb_in *mbin;
3949 struct mcx_cmd_create_cq_out *out;
3950 int error;
3951 uint64_t *pas;
3952 int insize, npages, paslen, i, token;
3953
3954 if (sc->sc_num_cq >= MCX_MAX_CQS) {
3955 printf("%s: tried to create too many cqs\n", DEVNAME(sc));
3956 return (-1);
3957 }
3958 cq = &sc->sc_cq[sc->sc_num_cq];
3959
3960 npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
3961 MCX_PAGE_SIZE);
3962 paslen = npages * sizeof(*pas);
3963 insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
3964
3965 if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
3966 MCX_PAGE_SIZE) != 0) {
3967 printf("%s: unable to allocate completion queue memory\n",
3968 DEVNAME(sc));
3969 return (-1);
3970 }
3971 cqe = MCX_DMA_KVA(&cq->cq_mem);
3972 for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
3973 cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
3974 }
3975
3976 cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3977 token = mcx_cmdq_token(sc);
3978 mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
3979
3980 in = mcx_cmdq_in(cmde);
3981 in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
3982 in->cmd_op_mod = htobe16(0);
3983
3984 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3985 &cmde->cq_input_ptr, token) != 0) {
3986 printf("%s: unable to allocate create cq mailboxen\n", DEVNAME(sc));
3987 error = -1;
3988 goto free;
3989 }
3990 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3991 mbin->cmd_cq_ctx.cq_uar_size = htobe32(
3992 (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | sc->sc_uar);
3993 mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
3994 mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
3995 (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
3996 MCX_CQ_MOD_COUNTER);
3997 mbin->cmd_cq_ctx.cq_doorbell = htobe64(
3998 MCX_DMA_DVA(&sc->sc_doorbell_mem) +
3999 MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4000
4001 /* physical addresses follow the mailbox in data */
4002 pas = (uint64_t *)(mbin + 1);
4003 for (i = 0; i < npages; i++) {
4004 pas[i] = htobe64(MCX_DMA_DVA(&cq->cq_mem) + (i * MCX_PAGE_SIZE));
4005 }
4006 mcx_cmdq_post(sc, cmde, 0);
4007
4008 error = mcx_cmdq_poll(sc, cmde, 1000);
4009 if (error != 0) {
4010 printf("%s: create cq timeout\n", DEVNAME(sc));
4011 goto free;
4012 }
4013 if (mcx_cmdq_verify(cmde) != 0) {
4014 printf("%s: create cq command corrupt\n", DEVNAME(sc));
4015 goto free;
4016 }
4017
4018 out = mcx_cmdq_out(cmde);
4019 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4020 printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4021 out->cmd_status, be32toh(out->cmd_syndrome));
4022 error = -1;
4023 goto free;
4024 }
4025
4026 cq->cq_n = be32toh(out->cmd_cqn);
4027 cq->cq_cons = 0;
4028 cq->cq_count = 0;
4029 cq->cq_doorbell = (void *)((uint8_t *)MCX_DMA_KVA(&sc->sc_doorbell_mem) +
4030 MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4031 mcx_arm_cq(sc, cq);
4032 sc->sc_num_cq++;
4033
4034 free:
4035 mcx_dmamem_free(sc, &mxm);
4036 return (error);
4037 }
4038
4039 static int
4040 mcx_destroy_cq(struct mcx_softc *sc, int index)
4041 {
4042 struct mcx_cmdq_entry *cqe;
4043 struct mcx_cmd_destroy_cq_in *in;
4044 struct mcx_cmd_destroy_cq_out *out;
4045 int error;
4046 int token;
4047
4048 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4049 token = mcx_cmdq_token(sc);
4050 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4051
4052 in = mcx_cmdq_in(cqe);
4053 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4054 in->cmd_op_mod = htobe16(0);
4055 in->cmd_cqn = htobe32(sc->sc_cq[index].cq_n);
4056
4057 mcx_cmdq_post(sc, cqe, 0);
4058 error = mcx_cmdq_poll(sc, cqe, 1000);
4059 if (error != 0) {
4060 printf("%s: destroy cq timeout\n", DEVNAME(sc));
4061 return error;
4062 }
4063 if (mcx_cmdq_verify(cqe) != 0) {
4064 printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4065 return error;
4066 }
4067
4068 out = mcx_cmdq_out(cqe);
4069 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4070 printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4071 out->cmd_status, be32toh(out->cmd_syndrome));
4072 return -1;
4073 }
4074
4075 sc->sc_cq[index].cq_n = 0;
4076 mcx_dmamem_free(sc, &sc->sc_cq[index].cq_mem);
4077 sc->sc_cq[index].cq_cons = 0;
4078 sc->sc_cq[index].cq_count = 0;
4079 return 0;
4080 }
4081
4082 static int
4083 mcx_create_rq(struct mcx_softc *sc, int cqn)
4084 {
4085 struct mcx_cmdq_entry *cqe;
4086 struct mcx_dmamem mxm;
4087 struct mcx_cmd_create_rq_in *in;
4088 struct mcx_cmd_create_rq_out *out;
4089 struct mcx_rq_ctx *mbin;
4090 int error;
4091 uint64_t *pas;
4092 uint8_t *doorbell;
4093 int insize, npages, paslen, i, token;
4094
4095 npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4096 MCX_PAGE_SIZE);
4097 paslen = npages * sizeof(*pas);
4098 insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4099
4100 if (mcx_dmamem_alloc(sc, &sc->sc_rq_mem, npages * MCX_PAGE_SIZE,
4101 MCX_PAGE_SIZE) != 0) {
4102 printf("%s: unable to allocate receive queue memory\n",
4103 DEVNAME(sc));
4104 return (-1);
4105 }
4106
4107 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4108 token = mcx_cmdq_token(sc);
4109 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4110
4111 in = mcx_cmdq_in(cqe);
4112 in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4113 in->cmd_op_mod = htobe16(0);
4114
4115 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4116 &cqe->cq_input_ptr, token) != 0) {
4117 printf("%s: unable to allocate create rq mailboxen\n",
4118 DEVNAME(sc));
4119 error = -1;
4120 goto free;
4121 }
4122 mbin = (struct mcx_rq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4123 mbin->rq_flags = htobe32(MCX_RQ_CTX_RLKEY | MCX_RQ_CTX_VLAN_STRIP_DIS);
4124 mbin->rq_cqn = htobe32(cqn);
4125 mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4126 mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4127 mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4128 MCX_RQ_DOORBELL_OFFSET);
4129 mbin->rq_wq.wq_log_stride = htobe16(4);
4130 mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4131
4132 /* physical addresses follow the mailbox in data */
4133 pas = (uint64_t *)(mbin + 1);
4134 for (i = 0; i < npages; i++) {
4135 pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_rq_mem) +
4136 (i * MCX_PAGE_SIZE));
4137 }
4138 mcx_cmdq_post(sc, cqe, 0);
4139
4140 error = mcx_cmdq_poll(sc, cqe, 1000);
4141 if (error != 0) {
4142 printf("%s: create rq timeout\n", DEVNAME(sc));
4143 goto free;
4144 }
4145 if (mcx_cmdq_verify(cqe) != 0) {
4146 printf("%s: create rq command corrupt\n", DEVNAME(sc));
4147 goto free;
4148 }
4149
4150 out = mcx_cmdq_out(cqe);
4151 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4152 printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4153 out->cmd_status, be32toh(out->cmd_syndrome));
4154 error = -1;
4155 goto free;
4156 }
4157
4158 sc->sc_rqn = be32toh(out->cmd_rqn);
4159
4160 doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4161 sc->sc_rx_doorbell = (uint32_t *)(doorbell + MCX_RQ_DOORBELL_OFFSET);
4162
4163 free:
4164 mcx_dmamem_free(sc, &mxm);
4165 return (error);
4166 }
4167
4168 static int
4169 mcx_ready_rq(struct mcx_softc *sc)
4170 {
4171 struct mcx_cmdq_entry *cqe;
4172 struct mcx_dmamem mxm;
4173 struct mcx_cmd_modify_rq_in *in;
4174 struct mcx_cmd_modify_rq_mb_in *mbin;
4175 struct mcx_cmd_modify_rq_out *out;
4176 int error;
4177 int token;
4178
4179 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4180 token = mcx_cmdq_token(sc);
4181 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4182
4183 in = mcx_cmdq_in(cqe);
4184 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4185 in->cmd_op_mod = htobe16(0);
4186 in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_rqn);
4187
4188 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4189 printf("%s: unable to allocate modify rq mailbox\n", DEVNAME(sc));
4190 return (-1);
4191 }
4192 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4193 mbin->cmd_rq_ctx.rq_flags = htobe32(
4194 MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4195
4196 mcx_cmdq_mboxes_sign(&mxm, 1);
4197 mcx_cmdq_post(sc, cqe, 0);
4198 error = mcx_cmdq_poll(sc, cqe, 1000);
4199 if (error != 0) {
4200 printf("%s: modify rq timeout\n", DEVNAME(sc));
4201 goto free;
4202 }
4203 if (mcx_cmdq_verify(cqe) != 0) {
4204 printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4205 goto free;
4206 }
4207
4208 out = mcx_cmdq_out(cqe);
4209 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4210 printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4211 out->cmd_status, be32toh(out->cmd_syndrome));
4212 error = -1;
4213 goto free;
4214 }
4215
4216 free:
4217 mcx_dmamem_free(sc, &mxm);
4218 return (error);
4219 }
4220
4221 static int
4222 mcx_destroy_rq(struct mcx_softc *sc)
4223 {
4224 struct mcx_cmdq_entry *cqe;
4225 struct mcx_cmd_destroy_rq_in *in;
4226 struct mcx_cmd_destroy_rq_out *out;
4227 int error;
4228 int token;
4229
4230 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4231 token = mcx_cmdq_token(sc);
4232 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4233
4234 in = mcx_cmdq_in(cqe);
4235 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4236 in->cmd_op_mod = htobe16(0);
4237 in->cmd_rqn = htobe32(sc->sc_rqn);
4238
4239 mcx_cmdq_post(sc, cqe, 0);
4240 error = mcx_cmdq_poll(sc, cqe, 1000);
4241 if (error != 0) {
4242 printf("%s: destroy rq timeout\n", DEVNAME(sc));
4243 return error;
4244 }
4245 if (mcx_cmdq_verify(cqe) != 0) {
4246 printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4247 return error;
4248 }
4249
4250 out = mcx_cmdq_out(cqe);
4251 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4252 printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4253 out->cmd_status, be32toh(out->cmd_syndrome));
4254 return -1;
4255 }
4256
4257 sc->sc_rqn = 0;
4258 return 0;
4259 }
4260
4261 static int
4262 mcx_create_tir(struct mcx_softc *sc)
4263 {
4264 struct mcx_cmdq_entry *cqe;
4265 struct mcx_dmamem mxm;
4266 struct mcx_cmd_create_tir_in *in;
4267 struct mcx_cmd_create_tir_mb_in *mbin;
4268 struct mcx_cmd_create_tir_out *out;
4269 int error;
4270 int token;
4271
4272 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4273 token = mcx_cmdq_token(sc);
4274 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4275
4276 in = mcx_cmdq_in(cqe);
4277 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4278 in->cmd_op_mod = htobe16(0);
4279
4280 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4281 printf("%s: unable to allocate create tir mailbox\n",
4282 DEVNAME(sc));
4283 return (-1);
4284 }
4285 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4286 /* leave disp_type = 0, so packets get sent to the inline rqn */
4287 mbin->cmd_inline_rqn = htobe32(sc->sc_rqn);
4288 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4289
4290 mcx_cmdq_post(sc, cqe, 0);
4291 error = mcx_cmdq_poll(sc, cqe, 1000);
4292 if (error != 0) {
4293 printf("%s: create tir timeout\n", DEVNAME(sc));
4294 goto free;
4295 }
4296 if (mcx_cmdq_verify(cqe) != 0) {
4297 printf("%s: create tir command corrupt\n", DEVNAME(sc));
4298 goto free;
4299 }
4300
4301 out = mcx_cmdq_out(cqe);
4302 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4303 printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
4304 out->cmd_status, be32toh(out->cmd_syndrome));
4305 error = -1;
4306 goto free;
4307 }
4308
4309 sc->sc_tirn = be32toh(out->cmd_tirn);
4310 free:
4311 mcx_dmamem_free(sc, &mxm);
4312 return (error);
4313 }
4314
4315 static int
4316 mcx_destroy_tir(struct mcx_softc *sc)
4317 {
4318 struct mcx_cmdq_entry *cqe;
4319 struct mcx_cmd_destroy_tir_in *in;
4320 struct mcx_cmd_destroy_tir_out *out;
4321 int error;
4322 int token;
4323
4324 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4325 token = mcx_cmdq_token(sc);
4326 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4327
4328 in = mcx_cmdq_in(cqe);
4329 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
4330 in->cmd_op_mod = htobe16(0);
4331 in->cmd_tirn = htobe32(sc->sc_tirn);
4332
4333 mcx_cmdq_post(sc, cqe, 0);
4334 error = mcx_cmdq_poll(sc, cqe, 1000);
4335 if (error != 0) {
4336 printf("%s: destroy tir timeout\n", DEVNAME(sc));
4337 return error;
4338 }
4339 if (mcx_cmdq_verify(cqe) != 0) {
4340 printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
4341 return error;
4342 }
4343
4344 out = mcx_cmdq_out(cqe);
4345 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4346 printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
4347 out->cmd_status, be32toh(out->cmd_syndrome));
4348 return -1;
4349 }
4350
4351 sc->sc_tirn = 0;
4352 return 0;
4353 }
4354
4355 static int
4356 mcx_create_sq(struct mcx_softc *sc, int cqn)
4357 {
4358 struct mcx_cmdq_entry *cqe;
4359 struct mcx_dmamem mxm;
4360 struct mcx_cmd_create_sq_in *in;
4361 struct mcx_sq_ctx *mbin;
4362 struct mcx_cmd_create_sq_out *out;
4363 int error;
4364 uint64_t *pas;
4365 uint8_t *doorbell;
4366 int insize, npages, paslen, i, token;
4367
4368 npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
4369 MCX_PAGE_SIZE);
4370 paslen = npages * sizeof(*pas);
4371 insize = sizeof(struct mcx_sq_ctx) + paslen;
4372
4373 if (mcx_dmamem_alloc(sc, &sc->sc_sq_mem, npages * MCX_PAGE_SIZE,
4374 MCX_PAGE_SIZE) != 0) {
4375 printf("%s: unable to allocate send queue memory\n", DEVNAME(sc));
4376 return (-1);
4377 }
4378
4379 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4380 token = mcx_cmdq_token(sc);
4381 mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
4382 token);
4383
4384 in = mcx_cmdq_in(cqe);
4385 in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
4386 in->cmd_op_mod = htobe16(0);
4387
4388 if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4389 &cqe->cq_input_ptr, token) != 0) {
4390 printf("%s: unable to allocate create sq mailboxen\n", DEVNAME(sc));
4391 error = -1;
4392 goto free;
4393 }
4394 mbin = (struct mcx_sq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4395 mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
4396 (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
4397 mbin->sq_cqn = htobe32(cqn);
4398 mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
4399 mbin->sq_tis_num = htobe32(sc->sc_tisn);
4400 mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4401 mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
4402 mbin->sq_wq.wq_uar_page = htobe32(sc->sc_uar);
4403 mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4404 MCX_SQ_DOORBELL_OFFSET);
4405 mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
4406 mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
4407
4408 /* physical addresses follow the mailbox in data */
4409 pas = (uint64_t *)(mbin + 1);
4410 for (i = 0; i < npages; i++) {
4411 pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_sq_mem) +
4412 (i * MCX_PAGE_SIZE));
4413 }
4414 mcx_cmdq_post(sc, cqe, 0);
4415
4416 error = mcx_cmdq_poll(sc, cqe, 1000);
4417 if (error != 0) {
4418 printf("%s: create sq timeout\n", DEVNAME(sc));
4419 goto free;
4420 }
4421 if (mcx_cmdq_verify(cqe) != 0) {
4422 printf("%s: create sq command corrupt\n", DEVNAME(sc));
4423 goto free;
4424 }
4425
4426 out = mcx_cmdq_out(cqe);
4427 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4428 printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
4429 out->cmd_status, be32toh(out->cmd_syndrome));
4430 error = -1;
4431 goto free;
4432 }
4433
4434 sc->sc_sqn = be32toh(out->cmd_sqn);
4435
4436 doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4437 sc->sc_tx_doorbell = (uint32_t *)(doorbell + MCX_SQ_DOORBELL_OFFSET + 4);
4438 free:
4439 mcx_dmamem_free(sc, &mxm);
4440 return (error);
4441 }
4442
4443 static int
4444 mcx_destroy_sq(struct mcx_softc *sc)
4445 {
4446 struct mcx_cmdq_entry *cqe;
4447 struct mcx_cmd_destroy_sq_in *in;
4448 struct mcx_cmd_destroy_sq_out *out;
4449 int error;
4450 int token;
4451
4452 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4453 token = mcx_cmdq_token(sc);
4454 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4455
4456 in = mcx_cmdq_in(cqe);
4457 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
4458 in->cmd_op_mod = htobe16(0);
4459 in->cmd_sqn = htobe32(sc->sc_sqn);
4460
4461 mcx_cmdq_post(sc, cqe, 0);
4462 error = mcx_cmdq_poll(sc, cqe, 1000);
4463 if (error != 0) {
4464 printf("%s: destroy sq timeout\n", DEVNAME(sc));
4465 return error;
4466 }
4467 if (mcx_cmdq_verify(cqe) != 0) {
4468 printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
4469 return error;
4470 }
4471
4472 out = mcx_cmdq_out(cqe);
4473 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4474 printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
4475 out->cmd_status, be32toh(out->cmd_syndrome));
4476 return -1;
4477 }
4478
4479 sc->sc_sqn = 0;
4480 return 0;
4481 }
4482
4483 static int
4484 mcx_ready_sq(struct mcx_softc *sc)
4485 {
4486 struct mcx_cmdq_entry *cqe;
4487 struct mcx_dmamem mxm;
4488 struct mcx_cmd_modify_sq_in *in;
4489 struct mcx_cmd_modify_sq_mb_in *mbin;
4490 struct mcx_cmd_modify_sq_out *out;
4491 int error;
4492 int token;
4493
4494 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4495 token = mcx_cmdq_token(sc);
4496 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4497
4498 in = mcx_cmdq_in(cqe);
4499 in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
4500 in->cmd_op_mod = htobe16(0);
4501 in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_sqn);
4502
4503 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4504 printf("%s: unable to allocate modify sq mailbox\n",
4505 DEVNAME(sc));
4506 return (-1);
4507 }
4508 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4509 mbin->cmd_sq_ctx.sq_flags = htobe32(
4510 MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
4511
4512 mcx_cmdq_mboxes_sign(&mxm, 1);
4513 mcx_cmdq_post(sc, cqe, 0);
4514 error = mcx_cmdq_poll(sc, cqe, 1000);
4515 if (error != 0) {
4516 printf("%s: modify sq timeout\n", DEVNAME(sc));
4517 goto free;
4518 }
4519 if (mcx_cmdq_verify(cqe) != 0) {
4520 printf("%s: modify sq command corrupt\n", DEVNAME(sc));
4521 goto free;
4522 }
4523
4524 out = mcx_cmdq_out(cqe);
4525 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4526 printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
4527 out->cmd_status, be32toh(out->cmd_syndrome));
4528 error = -1;
4529 goto free;
4530 }
4531
4532 free:
4533 mcx_dmamem_free(sc, &mxm);
4534 return (error);
4535 }
4536
4537 static int
4538 mcx_create_tis(struct mcx_softc *sc)
4539 {
4540 struct mcx_cmdq_entry *cqe;
4541 struct mcx_dmamem mxm;
4542 struct mcx_cmd_create_tis_in *in;
4543 struct mcx_cmd_create_tis_mb_in *mbin;
4544 struct mcx_cmd_create_tis_out *out;
4545 int error;
4546 int token;
4547
4548 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4549 token = mcx_cmdq_token(sc);
4550 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4551
4552 in = mcx_cmdq_in(cqe);
4553 in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
4554 in->cmd_op_mod = htobe16(0);
4555
4556 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4557 printf("%s: unable to allocate create tis mailbox\n", DEVNAME(sc));
4558 return (-1);
4559 }
4560 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4561 mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4562
4563 mcx_cmdq_mboxes_sign(&mxm, 1);
4564 mcx_cmdq_post(sc, cqe, 0);
4565 error = mcx_cmdq_poll(sc, cqe, 1000);
4566 if (error != 0) {
4567 printf("%s: create tis timeout\n", DEVNAME(sc));
4568 goto free;
4569 }
4570 if (mcx_cmdq_verify(cqe) != 0) {
4571 printf("%s: create tis command corrupt\n", DEVNAME(sc));
4572 goto free;
4573 }
4574
4575 out = mcx_cmdq_out(cqe);
4576 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4577 printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
4578 out->cmd_status, be32toh(out->cmd_syndrome));
4579 error = -1;
4580 goto free;
4581 }
4582
4583 sc->sc_tisn = be32toh(out->cmd_tisn);
4584 free:
4585 mcx_dmamem_free(sc, &mxm);
4586 return (error);
4587 }
4588
4589 static int
4590 mcx_destroy_tis(struct mcx_softc *sc)
4591 {
4592 struct mcx_cmdq_entry *cqe;
4593 struct mcx_cmd_destroy_tis_in *in;
4594 struct mcx_cmd_destroy_tis_out *out;
4595 int error;
4596 int token;
4597
4598 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4599 token = mcx_cmdq_token(sc);
4600 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4601
4602 in = mcx_cmdq_in(cqe);
4603 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
4604 in->cmd_op_mod = htobe16(0);
4605 in->cmd_tisn = htobe32(sc->sc_tisn);
4606
4607 mcx_cmdq_post(sc, cqe, 0);
4608 error = mcx_cmdq_poll(sc, cqe, 1000);
4609 if (error != 0) {
4610 printf("%s: destroy tis timeout\n", DEVNAME(sc));
4611 return error;
4612 }
4613 if (mcx_cmdq_verify(cqe) != 0) {
4614 printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
4615 return error;
4616 }
4617
4618 out = mcx_cmdq_out(cqe);
4619 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4620 printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
4621 out->cmd_status, be32toh(out->cmd_syndrome));
4622 return -1;
4623 }
4624
4625 sc->sc_tirn = 0;
4626 return 0;
4627 }
4628
4629 #if 0
4630 static int
4631 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
4632 {
4633 struct mcx_cmdq_entry *cqe;
4634 struct mcx_cmd_alloc_flow_counter_in *in;
4635 struct mcx_cmd_alloc_flow_counter_out *out;
4636 int error;
4637
4638 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4639 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4640
4641 in = mcx_cmdq_in(cqe);
4642 in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
4643 in->cmd_op_mod = htobe16(0);
4644
4645 mcx_cmdq_post(sc, cqe, 0);
4646
4647 error = mcx_cmdq_poll(sc, cqe, 1000);
4648 if (error != 0) {
4649 printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
4650 return (-1);
4651 }
4652 if (mcx_cmdq_verify(cqe) != 0) {
4653 printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
4654 return (-1);
4655 }
4656
4657 out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
4658 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4659 printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
4660 out->cmd_status);
4661 return (-1);
4662 }
4663
4664 sc->sc_flow_counter_id[i] = be16toh(out->cmd_flow_counter_id);
4665 printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
4666
4667 return (0);
4668 }
4669 #endif
4670
4671 static int
4672 mcx_create_flow_table(struct mcx_softc *sc, int log_size)
4673 {
4674 struct mcx_cmdq_entry *cqe;
4675 struct mcx_dmamem mxm;
4676 struct mcx_cmd_create_flow_table_in *in;
4677 struct mcx_cmd_create_flow_table_mb_in *mbin;
4678 struct mcx_cmd_create_flow_table_out *out;
4679 int error;
4680 int token;
4681
4682 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4683 token = mcx_cmdq_token(sc);
4684 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4685
4686 in = mcx_cmdq_in(cqe);
4687 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
4688 in->cmd_op_mod = htobe16(0);
4689
4690 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4691 printf("%s: unable to allocate create flow table mailbox\n",
4692 DEVNAME(sc));
4693 return (-1);
4694 }
4695 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4696 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4697 mbin->cmd_ctx.ft_log_size = log_size;
4698
4699 mcx_cmdq_mboxes_sign(&mxm, 1);
4700 mcx_cmdq_post(sc, cqe, 0);
4701 error = mcx_cmdq_poll(sc, cqe, 1000);
4702 if (error != 0) {
4703 printf("%s: create flow table timeout\n", DEVNAME(sc));
4704 goto free;
4705 }
4706 if (mcx_cmdq_verify(cqe) != 0) {
4707 printf("%s: create flow table command corrupt\n", DEVNAME(sc));
4708 goto free;
4709 }
4710
4711 out = mcx_cmdq_out(cqe);
4712 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4713 printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
4714 out->cmd_status, be32toh(out->cmd_syndrome));
4715 error = -1;
4716 goto free;
4717 }
4718
4719 sc->sc_flow_table_id = be32toh(out->cmd_table_id);
4720 free:
4721 mcx_dmamem_free(sc, &mxm);
4722 return (error);
4723 }
4724
4725 static int
4726 mcx_set_flow_table_root(struct mcx_softc *sc)
4727 {
4728 struct mcx_cmdq_entry *cqe;
4729 struct mcx_dmamem mxm;
4730 struct mcx_cmd_set_flow_table_root_in *in;
4731 struct mcx_cmd_set_flow_table_root_mb_in *mbin;
4732 struct mcx_cmd_set_flow_table_root_out *out;
4733 int error;
4734 int token;
4735
4736 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4737 token = mcx_cmdq_token(sc);
4738 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4739
4740 in = mcx_cmdq_in(cqe);
4741 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
4742 in->cmd_op_mod = htobe16(0);
4743
4744 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4745 printf("%s: unable to allocate set flow table root mailbox\n",
4746 DEVNAME(sc));
4747 return (-1);
4748 }
4749 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4750 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4751 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4752
4753 mcx_cmdq_mboxes_sign(&mxm, 1);
4754 mcx_cmdq_post(sc, cqe, 0);
4755 error = mcx_cmdq_poll(sc, cqe, 1000);
4756 if (error != 0) {
4757 printf("%s: set flow table root timeout\n", DEVNAME(sc));
4758 goto free;
4759 }
4760 if (mcx_cmdq_verify(cqe) != 0) {
4761 printf("%s: set flow table root command corrupt\n",
4762 DEVNAME(sc));
4763 goto free;
4764 }
4765
4766 out = mcx_cmdq_out(cqe);
4767 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4768 printf("%s: set flow table root failed (%x, %x)\n",
4769 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
4770 error = -1;
4771 goto free;
4772 }
4773
4774 free:
4775 mcx_dmamem_free(sc, &mxm);
4776 return (error);
4777 }
4778
4779 static int
4780 mcx_destroy_flow_table(struct mcx_softc *sc)
4781 {
4782 struct mcx_cmdq_entry *cqe;
4783 struct mcx_dmamem mxm;
4784 struct mcx_cmd_destroy_flow_table_in *in;
4785 struct mcx_cmd_destroy_flow_table_mb_in *mb;
4786 struct mcx_cmd_destroy_flow_table_out *out;
4787 int error;
4788 int token;
4789
4790 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4791 token = mcx_cmdq_token(sc);
4792 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4793
4794 in = mcx_cmdq_in(cqe);
4795 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
4796 in->cmd_op_mod = htobe16(0);
4797
4798 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4799 printf("%s: unable to allocate destroy flow table mailbox\n",
4800 DEVNAME(sc));
4801 return (-1);
4802 }
4803 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4804 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4805 mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4806
4807 mcx_cmdq_mboxes_sign(&mxm, 1);
4808 mcx_cmdq_post(sc, cqe, 0);
4809 error = mcx_cmdq_poll(sc, cqe, 1000);
4810 if (error != 0) {
4811 printf("%s: destroy flow table timeout\n", DEVNAME(sc));
4812 goto free;
4813 }
4814 if (mcx_cmdq_verify(cqe) != 0) {
4815 printf("%s: destroy flow table command corrupt\n", DEVNAME(sc));
4816 goto free;
4817 }
4818
4819 out = mcx_cmdq_out(cqe);
4820 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4821 printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
4822 out->cmd_status, be32toh(out->cmd_syndrome));
4823 error = -1;
4824 goto free;
4825 }
4826
4827 sc->sc_flow_table_id = -1;
4828 free:
4829 mcx_dmamem_free(sc, &mxm);
4830 return (error);
4831 }
4832
4833
4834 static int
4835 mcx_create_flow_group(struct mcx_softc *sc, int group, int start, int size,
4836 int match_enable, struct mcx_flow_match *match)
4837 {
4838 struct mcx_cmdq_entry *cqe;
4839 struct mcx_dmamem mxm;
4840 struct mcx_cmd_create_flow_group_in *in;
4841 struct mcx_cmd_create_flow_group_mb_in *mbin;
4842 struct mcx_cmd_create_flow_group_out *out;
4843 int error;
4844 int token;
4845
4846 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4847 token = mcx_cmdq_token(sc);
4848 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
4849 token);
4850
4851 in = mcx_cmdq_in(cqe);
4852 in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
4853 in->cmd_op_mod = htobe16(0);
4854
4855 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4856 != 0) {
4857 printf("%s: unable to allocate create flow group mailbox\n",
4858 DEVNAME(sc));
4859 return (-1);
4860 }
4861 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4862 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4863 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4864 mbin->cmd_start_flow_index = htobe32(start);
4865 mbin->cmd_end_flow_index = htobe32(start + (size - 1));
4866
4867 mbin->cmd_match_criteria_enable = match_enable;
4868 memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
4869
4870 mcx_cmdq_mboxes_sign(&mxm, 2);
4871 mcx_cmdq_post(sc, cqe, 0);
4872 error = mcx_cmdq_poll(sc, cqe, 1000);
4873 if (error != 0) {
4874 printf("%s: create flow group timeout\n", DEVNAME(sc));
4875 goto free;
4876 }
4877 if (mcx_cmdq_verify(cqe) != 0) {
4878 printf("%s: create flow group command corrupt\n", DEVNAME(sc));
4879 goto free;
4880 }
4881
4882 out = mcx_cmdq_out(cqe);
4883 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4884 printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
4885 out->cmd_status, be32toh(out->cmd_syndrome));
4886 error = -1;
4887 goto free;
4888 }
4889
4890 sc->sc_flow_group_id[group] = be32toh(out->cmd_group_id);
4891 sc->sc_flow_group_size[group] = size;
4892 sc->sc_flow_group_start[group] = start;
4893
4894 free:
4895 mcx_dmamem_free(sc, &mxm);
4896 return (error);
4897 }
4898
4899 static int
4900 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
4901 {
4902 struct mcx_cmdq_entry *cqe;
4903 struct mcx_dmamem mxm;
4904 struct mcx_cmd_destroy_flow_group_in *in;
4905 struct mcx_cmd_destroy_flow_group_mb_in *mb;
4906 struct mcx_cmd_destroy_flow_group_out *out;
4907 int error;
4908 int token;
4909
4910 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4911 token = mcx_cmdq_token(sc);
4912 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4913
4914 in = mcx_cmdq_in(cqe);
4915 in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
4916 in->cmd_op_mod = htobe16(0);
4917
4918 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
4919 printf("%s: unable to allocate destroy flow group mailbox\n",
4920 DEVNAME(sc));
4921 return (-1);
4922 }
4923 mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4924 mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4925 mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4926 mb->cmd_group_id = htobe32(sc->sc_flow_group_id[group]);
4927
4928 mcx_cmdq_mboxes_sign(&mxm, 2);
4929 mcx_cmdq_post(sc, cqe, 0);
4930 error = mcx_cmdq_poll(sc, cqe, 1000);
4931 if (error != 0) {
4932 printf("%s: destroy flow group timeout\n", DEVNAME(sc));
4933 goto free;
4934 }
4935 if (mcx_cmdq_verify(cqe) != 0) {
4936 printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
4937 goto free;
4938 }
4939
4940 out = mcx_cmdq_out(cqe);
4941 if (out->cmd_status != MCX_CQ_STATUS_OK) {
4942 printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
4943 out->cmd_status, be32toh(out->cmd_syndrome));
4944 error = -1;
4945 goto free;
4946 }
4947
4948 sc->sc_flow_group_id[group] = -1;
4949 sc->sc_flow_group_size[group] = 0;
4950 free:
4951 mcx_dmamem_free(sc, &mxm);
4952 return (error);
4953 }
4954
4955 static int
4956 mcx_set_flow_table_entry(struct mcx_softc *sc, int group, int index,
4957 const uint8_t *macaddr)
4958 {
4959 struct mcx_cmdq_entry *cqe;
4960 struct mcx_dmamem mxm;
4961 struct mcx_cmd_set_flow_table_entry_in *in;
4962 struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
4963 struct mcx_cmd_set_flow_table_entry_out *out;
4964 uint32_t *dest;
4965 int error;
4966 int token;
4967
4968 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4969 token = mcx_cmdq_token(sc);
4970 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*dest),
4971 sizeof(*out), token);
4972
4973 in = mcx_cmdq_in(cqe);
4974 in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
4975 in->cmd_op_mod = htobe16(0);
4976
4977 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4978 != 0) {
4979 printf("%s: unable to allocate set flow table entry mailbox\n",
4980 DEVNAME(sc));
4981 return (-1);
4982 }
4983 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4984 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4985 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4986 mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
4987 mbin->cmd_flow_ctx.fc_group_id = htobe32(sc->sc_flow_group_id[group]);
4988
4989 /* flow context ends at offset 0x330, 0x130 into the second mbox */
4990 dest = (uint32_t *)
4991 (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
4992 mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
4993 mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
4994 *dest = htobe32(sc->sc_tirn | MCX_FLOW_CONTEXT_DEST_TYPE_TIR);
4995
4996 /* the only thing we match on at the moment is the dest mac address */
4997 if (macaddr != NULL) {
4998 memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
4999 ETHER_ADDR_LEN);
5000 }
5001
5002 mcx_cmdq_mboxes_sign(&mxm, 2);
5003 mcx_cmdq_post(sc, cqe, 0);
5004 error = mcx_cmdq_poll(sc, cqe, 1000);
5005 if (error != 0) {
5006 printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5007 goto free;
5008 }
5009 if (mcx_cmdq_verify(cqe) != 0) {
5010 printf("%s: set flow table entry command corrupt\n",
5011 DEVNAME(sc));
5012 goto free;
5013 }
5014
5015 out = mcx_cmdq_out(cqe);
5016 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5017 printf("%s: set flow table entry failed (%x, %x)\n",
5018 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5019 error = -1;
5020 goto free;
5021 }
5022
5023 free:
5024 mcx_dmamem_free(sc, &mxm);
5025 return (error);
5026 }
5027
5028 static int
5029 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
5030 {
5031 struct mcx_cmdq_entry *cqe;
5032 struct mcx_dmamem mxm;
5033 struct mcx_cmd_delete_flow_table_entry_in *in;
5034 struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
5035 struct mcx_cmd_delete_flow_table_entry_out *out;
5036 int error;
5037 int token;
5038
5039 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5040 token = mcx_cmdq_token(sc);
5041 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5042 token);
5043
5044 in = mcx_cmdq_in(cqe);
5045 in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
5046 in->cmd_op_mod = htobe16(0);
5047
5048 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
5049 printf("%s: unable to allocate delete flow table entry mailbox\n",
5050 DEVNAME(sc));
5051 return (-1);
5052 }
5053 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5054 mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5055 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5056 mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
5057
5058 mcx_cmdq_mboxes_sign(&mxm, 2);
5059 mcx_cmdq_post(sc, cqe, 0);
5060 error = mcx_cmdq_poll(sc, cqe, 1000);
5061 if (error != 0) {
5062 printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
5063 goto free;
5064 }
5065 if (mcx_cmdq_verify(cqe) != 0) {
5066 printf("%s: delete flow table entry command corrupt\n",
5067 DEVNAME(sc));
5068 goto free;
5069 }
5070
5071 out = mcx_cmdq_out(cqe);
5072 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5073 printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
5074 DEVNAME(sc), group, index, out->cmd_status,
5075 be32toh(out->cmd_syndrome));
5076 error = -1;
5077 goto free;
5078 }
5079
5080 free:
5081 mcx_dmamem_free(sc, &mxm);
5082 return (error);
5083 }
5084
5085 #if 0
5086 int
5087 mcx_dump_flow_table(struct mcx_softc *sc)
5088 {
5089 struct mcx_dmamem mxm;
5090 struct mcx_cmdq_entry *cqe;
5091 struct mcx_cmd_query_flow_table_in *in;
5092 struct mcx_cmd_query_flow_table_mb_in *mbin;
5093 struct mcx_cmd_query_flow_table_out *out;
5094 struct mcx_cmd_query_flow_table_mb_out *mbout;
5095 uint8_t token = mcx_cmdq_token(sc);
5096 int error;
5097 int i;
5098 uint8_t *dump;
5099
5100 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5101 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5102 sizeof(*out) + sizeof(*mbout) + 16, token);
5103
5104 in = mcx_cmdq_in(cqe);
5105 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
5106 in->cmd_op_mod = htobe16(0);
5107
5108 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5109 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
5110 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5111 &cqe->cq_output_ptr, token) != 0) {
5112 printf(", unable to allocate query flow table mailboxes\n");
5113 return (-1);
5114 }
5115 cqe->cq_input_ptr = cqe->cq_output_ptr;
5116
5117 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5118 mbin->cmd_table_type = 0;
5119 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5120
5121 mcx_cmdq_mboxes_sign(&mxm, 1);
5122
5123 mcx_cmdq_post(sc, cqe, 0);
5124 error = mcx_cmdq_poll(sc, cqe, 1000);
5125 if (error != 0) {
5126 printf("%s: query flow table timeout\n", DEVNAME(sc));
5127 goto free;
5128 }
5129 error = mcx_cmdq_verify(cqe);
5130 if (error != 0) {
5131 printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
5132 goto free;
5133 }
5134
5135 out = mcx_cmdq_out(cqe);
5136 switch (out->cmd_status) {
5137 case MCX_CQ_STATUS_OK:
5138 break;
5139 default:
5140 printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
5141 out->cmd_status, be32toh(out->cmd_syndrome));
5142 error = -1;
5143 goto free;
5144 }
5145
5146 mbout = (struct mcx_cmd_query_flow_table_mb_out *)
5147 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5148 dump = (uint8_t *)mbout + 8;
5149 for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
5150 printf("%.2x ", dump[i]);
5151 if (i % 16 == 15)
5152 printf("\n");
5153 }
5154 free:
5155 mcx_cq_mboxes_free(sc, &mxm);
5156 return (error);
5157 }
5158 int
5159 mcx_dump_flow_table_entry(struct mcx_softc *sc, int index)
5160 {
5161 struct mcx_dmamem mxm;
5162 struct mcx_cmdq_entry *cqe;
5163 struct mcx_cmd_query_flow_table_entry_in *in;
5164 struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
5165 struct mcx_cmd_query_flow_table_entry_out *out;
5166 struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
5167 uint8_t token = mcx_cmdq_token(sc);
5168 int error;
5169 int i;
5170 uint8_t *dump;
5171
5172 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5173 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5174 sizeof(*out) + sizeof(*mbout) + 16, token);
5175
5176 in = mcx_cmdq_in(cqe);
5177 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
5178 in->cmd_op_mod = htobe16(0);
5179
5180 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5181 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5182 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5183 &cqe->cq_output_ptr, token) != 0) {
5184 printf(", unable to allocate query flow table entry mailboxes\n");
5185 return (-1);
5186 }
5187 cqe->cq_input_ptr = cqe->cq_output_ptr;
5188
5189 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5190 mbin->cmd_table_type = 0;
5191 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5192 mbin->cmd_flow_index = htobe32(index);
5193
5194 mcx_cmdq_mboxes_sign(&mxm, 1);
5195
5196 mcx_cmdq_post(sc, cqe, 0);
5197 error = mcx_cmdq_poll(sc, cqe, 1000);
5198 if (error != 0) {
5199 printf("%s: query flow table entry timeout\n", DEVNAME(sc));
5200 goto free;
5201 }
5202 error = mcx_cmdq_verify(cqe);
5203 if (error != 0) {
5204 printf("%s: query flow table entry reply corrupt\n",
5205 DEVNAME(sc));
5206 goto free;
5207 }
5208
5209 out = mcx_cmdq_out(cqe);
5210 switch (out->cmd_status) {
5211 case MCX_CQ_STATUS_OK:
5212 break;
5213 default:
5214 printf("%s: query flow table entry failed (%x/%x)\n",
5215 DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5216 error = -1;
5217 goto free;
5218 }
5219
5220 mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
5221 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5222 dump = (uint8_t *)mbout;
5223 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5224 printf("%.2x ", dump[i]);
5225 if (i % 16 == 15)
5226 printf("\n");
5227 }
5228
5229 free:
5230 mcx_cq_mboxes_free(sc, &mxm);
5231 return (error);
5232 }
5233
5234 int
5235 mcx_dump_flow_group(struct mcx_softc *sc)
5236 {
5237 struct mcx_dmamem mxm;
5238 struct mcx_cmdq_entry *cqe;
5239 struct mcx_cmd_query_flow_group_in *in;
5240 struct mcx_cmd_query_flow_group_mb_in *mbin;
5241 struct mcx_cmd_query_flow_group_out *out;
5242 struct mcx_cmd_query_flow_group_mb_out *mbout;
5243 uint8_t token = mcx_cmdq_token(sc);
5244 int error;
5245 int i;
5246 uint8_t *dump;
5247
5248 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5249 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5250 sizeof(*out) + sizeof(*mbout) + 16, token);
5251
5252 in = mcx_cmdq_in(cqe);
5253 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
5254 in->cmd_op_mod = htobe16(0);
5255
5256 CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5257 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5258 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5259 &cqe->cq_output_ptr, token) != 0) {
5260 printf(", unable to allocate query flow group mailboxes\n");
5261 return (-1);
5262 }
5263 cqe->cq_input_ptr = cqe->cq_output_ptr;
5264
5265 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5266 mbin->cmd_table_type = 0;
5267 mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5268 mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
5269
5270 mcx_cmdq_mboxes_sign(&mxm, 1);
5271
5272 mcx_cmdq_post(sc, cqe, 0);
5273 error = mcx_cmdq_poll(sc, cqe, 1000);
5274 if (error != 0) {
5275 printf("%s: query flow group timeout\n", DEVNAME(sc));
5276 goto free;
5277 }
5278 error = mcx_cmdq_verify(cqe);
5279 if (error != 0) {
5280 printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
5281 goto free;
5282 }
5283
5284 out = mcx_cmdq_out(cqe);
5285 switch (out->cmd_status) {
5286 case MCX_CQ_STATUS_OK:
5287 break;
5288 default:
5289 printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
5290 out->cmd_status, be32toh(out->cmd_syndrome));
5291 error = -1;
5292 goto free;
5293 }
5294
5295 mbout = (struct mcx_cmd_query_flow_group_mb_out *)
5296 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5297 dump = (uint8_t *)mbout;
5298 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5299 printf("%.2x ", dump[i]);
5300 if (i % 16 == 15)
5301 printf("\n");
5302 }
5303 dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
5304 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5305 printf("%.2x ", dump[i]);
5306 if (i % 16 == 15)
5307 printf("\n");
5308 }
5309
5310 free:
5311 mcx_cq_mboxes_free(sc, &mxm);
5312 return (error);
5313 }
5314
5315 int
5316 mcx_dump_rq(struct mcx_softc *sc)
5317 {
5318 struct mcx_dmamem mxm;
5319 struct mcx_cmdq_entry *cqe;
5320 struct mcx_cmd_query_rq_in *in;
5321 struct mcx_cmd_query_rq_out *out;
5322 struct mcx_cmd_query_rq_mb_out *mbout;
5323 uint8_t token = mcx_cmdq_token(sc);
5324 int error;
5325
5326 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5327 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5328 token);
5329
5330 in = mcx_cmdq_in(cqe);
5331 in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
5332 in->cmd_op_mod = htobe16(0);
5333 in->cmd_rqn = htobe32(sc->sc_rqn);
5334
5335 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5336 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5337 &cqe->cq_output_ptr, token) != 0) {
5338 printf(", unable to allocate query flow group mailboxes\n");
5339 return (-1);
5340 }
5341
5342 mcx_cmdq_mboxes_sign(&mxm, 1);
5343
5344 mcx_cmdq_post(sc, cqe, 0);
5345 error = mcx_cmdq_poll(sc, cqe, 1000);
5346 if (error != 0) {
5347 printf("%s: query rq timeout\n", DEVNAME(sc));
5348 goto free;
5349 }
5350 error = mcx_cmdq_verify(cqe);
5351 if (error != 0) {
5352 printf("%s: query rq reply corrupt\n", DEVNAME(sc));
5353 goto free;
5354 }
5355
5356 out = mcx_cmdq_out(cqe);
5357 switch (out->cmd_status) {
5358 case MCX_CQ_STATUS_OK:
5359 break;
5360 default:
5361 printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
5362 out->cmd_status, be32toh(out->cmd_syndrome));
5363 error = -1;
5364 goto free;
5365 }
5366
5367 mbout = (struct mcx_cmd_query_rq_mb_out *)
5368 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5369 printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5370 DEVNAME(sc),
5371 (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5372 be32toh(mbout->cmd_ctx.rq_user_index),
5373 be32toh(mbout->cmd_ctx.rq_cqn),
5374 be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5375 mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5376 mbout->cmd_ctx.rq_wq.wq_log_size,
5377 be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5378 be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5379
5380 free:
5381 mcx_cq_mboxes_free(sc, &mxm);
5382 return (error);
5383 }
5384
5385 int
5386 mcx_dump_sq(struct mcx_softc *sc)
5387 {
5388 struct mcx_dmamem mxm;
5389 struct mcx_cmdq_entry *cqe;
5390 struct mcx_cmd_query_sq_in *in;
5391 struct mcx_cmd_query_sq_out *out;
5392 struct mcx_cmd_query_sq_mb_out *mbout;
5393 uint8_t token = mcx_cmdq_token(sc);
5394 int error;
5395 int i;
5396 uint8_t *dump;
5397
5398 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5399 mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5400 token);
5401
5402 in = mcx_cmdq_in(cqe);
5403 in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
5404 in->cmd_op_mod = htobe16(0);
5405 in->cmd_sqn = htobe32(sc->sc_sqn);
5406
5407 CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5408 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5409 &cqe->cq_output_ptr, token) != 0) {
5410 printf(", unable to allocate query sq mailboxes\n");
5411 return (-1);
5412 }
5413
5414 mcx_cmdq_mboxes_sign(&mxm, 1);
5415
5416 mcx_cmdq_post(sc, cqe, 0);
5417 error = mcx_cmdq_poll(sc, cqe, 1000);
5418 if (error != 0) {
5419 printf("%s: query sq timeout\n", DEVNAME(sc));
5420 goto free;
5421 }
5422 error = mcx_cmdq_verify(cqe);
5423 if (error != 0) {
5424 printf("%s: query sq reply corrupt\n", DEVNAME(sc));
5425 goto free;
5426 }
5427
5428 out = mcx_cmdq_out(cqe);
5429 switch (out->cmd_status) {
5430 case MCX_CQ_STATUS_OK:
5431 break;
5432 default:
5433 printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
5434 out->cmd_status, be32toh(out->cmd_syndrome));
5435 error = -1;
5436 goto free;
5437 }
5438
5439 mbout = (struct mcx_cmd_query_sq_mb_out *)
5440 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5441 /*
5442 printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5443 DEVNAME(sc),
5444 (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5445 be32toh(mbout->cmd_ctx.rq_user_index),
5446 be32toh(mbout->cmd_ctx.rq_cqn),
5447 be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5448 mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5449 mbout->cmd_ctx.rq_wq.wq_log_size,
5450 be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5451 be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5452 */
5453 dump = (uint8_t *)mbout;
5454 for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5455 printf("%.2x ", dump[i]);
5456 if (i % 16 == 15)
5457 printf("\n");
5458 }
5459
5460 free:
5461 mcx_cq_mboxes_free(sc, &mxm);
5462 return (error);
5463 }
5464
5465 static int
5466 mcx_dump_counters(struct mcx_softc *sc)
5467 {
5468 struct mcx_dmamem mxm;
5469 struct mcx_cmdq_entry *cqe;
5470 struct mcx_cmd_query_vport_counters_in *in;
5471 struct mcx_cmd_query_vport_counters_mb_in *mbin;
5472 struct mcx_cmd_query_vport_counters_out *out;
5473 struct mcx_nic_vport_counters *counters;
5474 int error, token;
5475
5476 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5477 token = mcx_cmdq_token(sc);
5478 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5479 sizeof(*out) + sizeof(*counters), token);
5480
5481 in = mcx_cmdq_in(cqe);
5482 in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
5483 in->cmd_op_mod = htobe16(0);
5484
5485 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5486 printf(", unable to allocate query nic vport counters mailboxen\n");
5487 return (-1);
5488 }
5489 cqe->cq_input_ptr = cqe->cq_output_ptr;
5490
5491 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5492 mbin->cmd_clear = 0x80;
5493
5494 mcx_cmdq_mboxes_sign(&mxm, 1);
5495 mcx_cmdq_post(sc, cqe, 0);
5496
5497 error = mcx_cmdq_poll(sc, cqe, 1000);
5498 if (error != 0) {
5499 printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
5500 goto free;
5501 }
5502 if (mcx_cmdq_verify(cqe) != 0) {
5503 printf("%s: query nic vport counters command corrupt\n",
5504 DEVNAME(sc));
5505 goto free;
5506 }
5507
5508 out = mcx_cmdq_out(cqe);
5509 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5510 printf("%s: query nic vport counters failed (%x, %x)\n",
5511 DEVNAME(sc), out->cmd_status, out->cmd_syndrome);
5512 error = -1;
5513 goto free;
5514 }
5515
5516 counters = (struct mcx_nic_vport_counters *)
5517 (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5518 if (counters->rx_bcast.packets + counters->tx_bcast.packets +
5519 counters->rx_ucast.packets + counters->tx_ucast.packets +
5520 counters->rx_err.packets + counters->tx_err.packets)
5521 printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
5522 DEVNAME(sc),
5523 be64toh(counters->tx_err.packets),
5524 be64toh(counters->rx_err.packets),
5525 be64toh(counters->tx_ucast.packets),
5526 be64toh(counters->rx_ucast.packets),
5527 be64toh(counters->tx_bcast.packets),
5528 be64toh(counters->rx_bcast.packets));
5529 free:
5530 mcx_dmamem_free(sc, &mxm);
5531
5532 return (error);
5533 }
5534
5535 static int
5536 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
5537 {
5538 struct mcx_dmamem mxm;
5539 struct mcx_cmdq_entry *cqe;
5540 struct mcx_cmd_query_flow_counter_in *in;
5541 struct mcx_cmd_query_flow_counter_mb_in *mbin;
5542 struct mcx_cmd_query_flow_counter_out *out;
5543 struct mcx_counter *counters;
5544 int error, token;
5545
5546 cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5547 token = mcx_cmdq_token(sc);
5548 mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
5549 sizeof(*counters), token);
5550
5551 in = mcx_cmdq_in(cqe);
5552 in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
5553 in->cmd_op_mod = htobe16(0);
5554
5555 if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5556 printf(", unable to allocate query flow counter mailboxen\n");
5557 return (-1);
5558 }
5559 cqe->cq_input_ptr = cqe->cq_output_ptr;
5560 mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5561 mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
5562 mbin->cmd_clear = 0x80;
5563
5564 mcx_cmdq_mboxes_sign(&mxm, 1);
5565 mcx_cmdq_post(sc, cqe, 0);
5566
5567 error = mcx_cmdq_poll(sc, cqe, 1000);
5568 if (error != 0) {
5569 printf("%s: query flow counter timeout\n", DEVNAME(sc));
5570 goto free;
5571 }
5572 if (mcx_cmdq_verify(cqe) != 0) {
5573 printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
5574 goto free;
5575 }
5576
5577 out = mcx_cmdq_out(cqe);
5578 if (out->cmd_status != MCX_CQ_STATUS_OK) {
5579 printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
5580 out->cmd_status, out->cmd_syndrome);
5581 error = -1;
5582 goto free;
5583 }
5584
5585 counters = (struct mcx_counter *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5586 if (counters->packets)
5587 printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
5588 be64toh(counters->packets));
5589 free:
5590 mcx_dmamem_free(sc, &mxm);
5591
5592 return (error);
5593 }
5594
5595 #endif
5596
5597 static int
5598 mcx_rx_fill_slots(struct mcx_softc *sc, void *ring, struct mcx_slot *slots,
5599 uint *prod, int bufsize, uint nslots)
5600 {
5601 struct mcx_rq_entry *rqe;
5602 struct mcx_slot *ms;
5603 struct mbuf *m;
5604 uint slot, p, fills;
5605
5606 p = *prod;
5607 slot = (p % (1 << MCX_LOG_RQ_SIZE));
5608 rqe = ring;
5609 for (fills = 0; fills < nslots; fills++) {
5610 ms = &slots[slot];
5611 #if 0
5612 m = MCLGETI(NULL, M_DONTWAIT, NULL, bufsize + ETHER_ALIGN);
5613 if (m == NULL)
5614 break;
5615 #else
5616 m = NULL;
5617 MGETHDR(m, M_DONTWAIT, MT_DATA);
5618 if (m == NULL)
5619 break;
5620
5621 MCLGET(m, M_DONTWAIT);
5622 if ((m->m_flags & M_EXT) == 0) {
5623 m_freem(m);
5624 break;
5625 }
5626 #endif
5627
5628 m->m_data += ETHER_ALIGN;
5629 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size - ETHER_ALIGN;
5630 if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
5631 BUS_DMA_NOWAIT) != 0) {
5632 m_freem(m);
5633 break;
5634 }
5635 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5636 ms->ms_m = m;
5637
5638 rqe[slot].rqe_byte_count = htobe32(m->m_len);
5639 rqe[slot].rqe_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
5640 rqe[slot].rqe_lkey = htobe32(sc->sc_lkey);
5641
5642 p++;
5643 slot++;
5644 if (slot == (1 << MCX_LOG_RQ_SIZE))
5645 slot = 0;
5646 }
5647
5648 if (fills != 0) {
5649 *sc->sc_rx_doorbell = htobe32(p & MCX_WQ_DOORBELL_MASK);
5650 /* barrier? */
5651 }
5652
5653 *prod = p;
5654
5655 return (nslots - fills);
5656 }
5657
5658 static int
5659 mcx_rx_fill(struct mcx_softc *sc)
5660 {
5661 u_int slots;
5662
5663 slots = mcx_rxr_get(&sc->sc_rxr, (1 << MCX_LOG_RQ_SIZE));
5664 if (slots == 0)
5665 return (1);
5666
5667 slots = mcx_rx_fill_slots(sc, MCX_DMA_KVA(&sc->sc_rq_mem),
5668 sc->sc_rx_slots, &sc->sc_rx_prod, sc->sc_hardmtu, slots);
5669 mcx_rxr_put(&sc->sc_rxr, slots);
5670 return (0);
5671 }
5672
5673 void
5674 mcx_refill(void *xsc)
5675 {
5676 struct mcx_softc *sc = xsc;
5677
5678 mcx_rx_fill(sc);
5679
5680 if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5681 callout_schedule(&sc->sc_rx_refill, 1);
5682 }
5683
5684 void
5685 mcx_process_txeof(struct mcx_softc *sc, struct mcx_cq_entry *cqe, int *txfree)
5686 {
5687 struct mcx_slot *ms;
5688 bus_dmamap_t map;
5689 int slot, slots;
5690
5691 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
5692
5693 ms = &sc->sc_tx_slots[slot];
5694 map = ms->ms_map;
5695 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
5696 BUS_DMASYNC_POSTWRITE);
5697
5698 slots = 1;
5699 if (map->dm_nsegs > 1)
5700 slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
5701
5702 (*txfree) += slots;
5703 bus_dmamap_unload(sc->sc_dmat, map);
5704 m_freem(ms->ms_m);
5705 ms->ms_m = NULL;
5706 }
5707
5708 static uint64_t
5709 mcx_uptime(void)
5710 {
5711 struct timespec ts;
5712
5713 nanouptime(&ts);
5714
5715 return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
5716 }
5717
5718 static void
5719 mcx_calibrate_first(struct mcx_softc *sc)
5720 {
5721 struct mcx_calibration *c = &sc->sc_calibration[0];
5722
5723 sc->sc_calibration_gen = 0;
5724
5725 c->c_ubase = mcx_uptime();
5726 c->c_tbase = mcx_timer(sc);
5727 c->c_tdiff = 0;
5728
5729 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz);
5730 }
5731
5732 #define MCX_TIMESTAMP_SHIFT 10
5733
5734 static void
5735 mcx_calibrate(void *arg)
5736 {
5737 struct mcx_softc *sc = arg;
5738 struct mcx_calibration *nc, *pc;
5739 unsigned int gen;
5740
5741 if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING))
5742 return;
5743
5744 callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz);
5745
5746 gen = sc->sc_calibration_gen;
5747 pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5748 gen++;
5749 nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5750
5751 nc->c_uptime = pc->c_ubase;
5752 nc->c_timestamp = pc->c_tbase;
5753
5754 nc->c_ubase = mcx_uptime();
5755 nc->c_tbase = mcx_timer(sc);
5756
5757 nc->c_udiff = (nc->c_ubase - nc->c_uptime) >> MCX_TIMESTAMP_SHIFT;
5758 nc->c_tdiff = (nc->c_tbase - nc->c_timestamp) >> MCX_TIMESTAMP_SHIFT;
5759
5760 membar_producer();
5761 sc->sc_calibration_gen = gen;
5762 }
5763
5764 static int
5765 mcx_process_rx(struct mcx_softc *sc, struct mcx_cq_entry *cqe,
5766 struct mcx_mbufq *mq, const struct mcx_calibration *c)
5767 {
5768 struct mcx_slot *ms;
5769 struct mbuf *m;
5770 int slot;
5771
5772 slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
5773
5774 ms = &sc->sc_rx_slots[slot];
5775 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize,
5776 BUS_DMASYNC_POSTREAD);
5777 bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
5778
5779 m = ms->ms_m;
5780 ms->ms_m = NULL;
5781
5782 m_set_rcvif(m, &sc->sc_ec.ec_if);
5783 m->m_pkthdr.len = m->m_len = be32dec(&cqe->cq_byte_cnt);
5784
5785 #if 0
5786 if (cqe->cq_rx_hash_type) {
5787 m->m_pkthdr.ph_flowid = M_FLOWID_VALID |
5788 be32toh(cqe->cq_rx_hash);
5789 }
5790 #endif
5791
5792 #if 0
5793 if (c->c_tdiff) {
5794 uint64_t t = be64dec(&cqe->cq_timestamp) - c->c_timestamp;
5795 t *= c->c_udiff;
5796 t /= c->c_tdiff;
5797
5798 m->m_pkthdr.ph_timestamp = c->c_uptime + t;
5799 SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
5800 }
5801 #endif
5802
5803 MBUFQ_ENQUEUE(mq, m);
5804
5805 return (1);
5806 }
5807
5808 static struct mcx_cq_entry *
5809 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
5810 {
5811 struct mcx_cq_entry *cqe;
5812 int next;
5813
5814 cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
5815 next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
5816
5817 if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
5818 ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
5819 return (&cqe[next]);
5820 }
5821
5822 return (NULL);
5823 }
5824
5825 static void
5826 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5827 {
5828 bus_size_t offset;
5829 uint32_t val;
5830 uint64_t uval;
5831
5832 /* different uar per cq? */
5833 offset = (MCX_PAGE_SIZE * sc->sc_uar);
5834 val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
5835 val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5836
5837 cq->cq_doorbell[0] = htobe32(cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5838 cq->cq_doorbell[1] = htobe32(val);
5839
5840 uval = val;
5841 uval <<= 32;
5842 uval |= cq->cq_n;
5843 bus_space_write_8(sc->sc_memt, sc->sc_memh,
5844 offset + MCX_UAR_CQ_DOORBELL, htobe64(uval));
5845 mcx_bar(sc, offset + MCX_UAR_CQ_DOORBELL, sizeof(uint64_t),
5846 BUS_SPACE_BARRIER_WRITE);
5847 }
5848
5849 void
5850 mcx_process_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5851 {
5852 struct ifnet *ifp = &sc->sc_ec.ec_if;
5853 const struct mcx_calibration *c;
5854 unsigned int gen;
5855 struct mcx_cq_entry *cqe;
5856 struct mcx_mbufq mq;
5857 struct mbuf *m;
5858 int rxfree, txfree;
5859
5860 MBUFQ_INIT(&mq);
5861
5862 gen = sc->sc_calibration_gen;
5863 membar_consumer();
5864 c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5865
5866 rxfree = 0;
5867 txfree = 0;
5868 while ((cqe = mcx_next_cq_entry(sc, cq))) {
5869 uint8_t opcode;
5870 opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
5871 switch (opcode) {
5872 case MCX_CQ_ENTRY_OPCODE_REQ:
5873 mcx_process_txeof(sc, cqe, &txfree);
5874 break;
5875 case MCX_CQ_ENTRY_OPCODE_SEND:
5876 rxfree += mcx_process_rx(sc, cqe, &mq, c);
5877 break;
5878 case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
5879 case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
5880 /* uint8_t *cqp = (uint8_t *)cqe; */
5881 /* printf("%s: cq completion error: %x\n", DEVNAME(sc), cqp[0x37]); */
5882 break;
5883
5884 default:
5885 /* printf("%s: cq completion opcode %x??\n", DEVNAME(sc), opcode); */
5886 break;
5887 }
5888
5889 cq->cq_cons++;
5890 }
5891
5892 cq->cq_count++;
5893 mcx_arm_cq(sc, cq);
5894
5895 if (rxfree > 0) {
5896 mcx_rxr_put(&sc->sc_rxr, rxfree);
5897 while (MBUFQ_FIRST(&mq) != NULL) {
5898 MBUFQ_DEQUEUE(&mq, m);
5899 if_percpuq_enqueue(ifp->if_percpuq, m);
5900 }
5901
5902 mcx_rx_fill(sc);
5903
5904 if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5905 callout_schedule(&sc->sc_rx_refill, 1);
5906 }
5907 if (txfree > 0) {
5908 sc->sc_tx_cons += txfree;
5909 if_schedule_deferred_start(ifp);
5910 }
5911 }
5912
5913 static void
5914 mcx_arm_eq(struct mcx_softc *sc)
5915 {
5916 bus_size_t offset;
5917 uint32_t val;
5918
5919 offset = (MCX_PAGE_SIZE * sc->sc_uar) + MCX_UAR_EQ_DOORBELL_ARM;
5920 val = (sc->sc_eqn << 24) | (sc->sc_eq_cons & 0xffffff);
5921
5922 mcx_wr(sc, offset, val);
5923 /* barrier? */
5924 }
5925
5926 static struct mcx_eq_entry *
5927 mcx_next_eq_entry(struct mcx_softc *sc)
5928 {
5929 struct mcx_eq_entry *eqe;
5930 int next;
5931
5932 eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
5933 next = sc->sc_eq_cons % (1 << MCX_LOG_EQ_SIZE);
5934 if ((eqe[next].eq_owner & 1) == ((sc->sc_eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
5935 sc->sc_eq_cons++;
5936 return (&eqe[next]);
5937 }
5938 return (NULL);
5939 }
5940
5941 int
5942 mcx_intr(void *xsc)
5943 {
5944 struct mcx_softc *sc = (struct mcx_softc *)xsc;
5945 struct mcx_eq_entry *eqe;
5946 int i, cq;
5947
5948 while ((eqe = mcx_next_eq_entry(sc))) {
5949 switch (eqe->eq_event_type) {
5950 case MCX_EVENT_TYPE_COMPLETION:
5951 cq = be32toh(eqe->eq_event_data[6]);
5952 for (i = 0; i < sc->sc_num_cq; i++) {
5953 if (sc->sc_cq[i].cq_n == cq) {
5954 mcx_process_cq(sc, &sc->sc_cq[i]);
5955 break;
5956 }
5957 }
5958 break;
5959
5960 case MCX_EVENT_TYPE_LAST_WQE:
5961 /* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
5962 break;
5963
5964 case MCX_EVENT_TYPE_CQ_ERROR:
5965 /* printf("%s: cq error\n", DEVNAME(sc)); */
5966 break;
5967
5968 case MCX_EVENT_TYPE_CMD_COMPLETION:
5969 /* wakeup probably */
5970 break;
5971
5972 case MCX_EVENT_TYPE_PORT_CHANGE:
5973 workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL);
5974 break;
5975
5976 default:
5977 /* printf("%s: something happened\n", DEVNAME(sc)); */
5978 break;
5979 }
5980 }
5981 mcx_arm_eq(sc);
5982 return (1);
5983 }
5984
5985 static void
5986 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
5987 int total)
5988 {
5989 struct mcx_slot *ms;
5990
5991 int i = allocated;
5992 while (i-- > 0) {
5993 ms = &slots[i];
5994 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
5995 if (ms->ms_m != NULL)
5996 m_freem(ms->ms_m);
5997 }
5998 kmem_free(slots, total * sizeof(*ms));
5999 }
6000
6001 static int
6002 mcx_init(struct ifnet *ifp)
6003 {
6004 struct mcx_softc *sc = ifp->if_softc;
6005 struct mcx_slot *ms;
6006 int i, start;
6007 struct mcx_flow_match match_crit;
6008
6009 if (ISSET(ifp->if_flags, IFF_RUNNING))
6010 mcx_stop(ifp, 0);
6011
6012 sc->sc_rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE),
6013 KM_SLEEP);
6014
6015 for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
6016 ms = &sc->sc_rx_slots[i];
6017 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
6018 sc->sc_hardmtu, 0,
6019 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6020 &ms->ms_map) != 0) {
6021 printf("%s: failed to allocate rx dma maps\n",
6022 DEVNAME(sc));
6023 goto destroy_rx_slots;
6024 }
6025 }
6026
6027 sc->sc_tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE),
6028 KM_SLEEP);
6029
6030 for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
6031 ms = &sc->sc_tx_slots[i];
6032 if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
6033 MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
6034 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6035 &ms->ms_map) != 0) {
6036 printf("%s: failed to allocate tx dma maps\n",
6037 DEVNAME(sc));
6038 goto destroy_tx_slots;
6039 }
6040 }
6041
6042 if (mcx_create_cq(sc, sc->sc_eqn) != 0)
6043 goto down;
6044
6045 /* send queue */
6046 if (mcx_create_tis(sc) != 0)
6047 goto down;
6048
6049 if (mcx_create_sq(sc, sc->sc_cq[0].cq_n) != 0)
6050 goto down;
6051
6052 /* receive queue */
6053 if (mcx_create_rq(sc, sc->sc_cq[0].cq_n) != 0)
6054 goto down;
6055
6056 if (mcx_create_tir(sc) != 0)
6057 goto down;
6058
6059 if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE) != 0)
6060 goto down;
6061
6062 /* promisc flow group */
6063 start = 0;
6064 memset(&match_crit, 0, sizeof(match_crit));
6065 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_PROMISC, start, 1,
6066 0, &match_crit) != 0)
6067 goto down;
6068 sc->sc_promisc_flow_enabled = 0;
6069 start++;
6070
6071 /* all multicast flow group */
6072 match_crit.mc_dest_mac[0] = 0x01;
6073 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_ALLMULTI, start, 1,
6074 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6075 goto down;
6076 sc->sc_allmulti_flow_enabled = 0;
6077 start++;
6078
6079 /* mac address matching flow group */
6080 memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
6081 if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_MAC, start,
6082 (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
6083 MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6084 goto down;
6085
6086 /* flow table entries for unicast and broadcast */
6087 start = 0;
6088 if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6089 LLADDR(satosdl(ifp->if_dl->ifa_addr))) != 0)
6090 goto down;
6091 start++;
6092
6093 if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6094 etherbroadcastaddr) != 0)
6095 goto down;
6096 start++;
6097
6098 /* multicast entries go after that */
6099 sc->sc_mcast_flow_base = start;
6100
6101 /* re-add any existing multicast flows */
6102 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6103 if (sc->sc_mcast_flows[i][0] != 0) {
6104 mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6105 sc->sc_mcast_flow_base + i,
6106 sc->sc_mcast_flows[i]);
6107 }
6108 }
6109
6110 if (mcx_set_flow_table_root(sc) != 0)
6111 goto down;
6112
6113 /* start the queues */
6114 if (mcx_ready_sq(sc) != 0)
6115 goto down;
6116
6117 if (mcx_ready_rq(sc) != 0)
6118 goto down;
6119
6120 mcx_rxr_init(&sc->sc_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
6121 sc->sc_rx_prod = 0;
6122 mcx_rx_fill(sc);
6123
6124 mcx_calibrate_first(sc);
6125
6126 SET(ifp->if_flags, IFF_RUNNING);
6127
6128 sc->sc_tx_cons = 0;
6129 sc->sc_tx_prod = 0;
6130 CLR(ifp->if_flags, IFF_OACTIVE);
6131 if_schedule_deferred_start(ifp);
6132
6133 return 0;
6134 destroy_tx_slots:
6135 mcx_free_slots(sc, sc->sc_tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
6136 sc->sc_rx_slots = NULL;
6137
6138 i = (1 << MCX_LOG_RQ_SIZE);
6139 destroy_rx_slots:
6140 mcx_free_slots(sc, sc->sc_rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
6141 sc->sc_rx_slots = NULL;
6142 down:
6143 mcx_stop(ifp, 0);
6144 return EIO;
6145 }
6146
6147 static void
6148 mcx_stop(struct ifnet *ifp, int disable)
6149 {
6150 struct mcx_softc *sc = ifp->if_softc;
6151 int group, i;
6152
6153 CLR(ifp->if_flags, IFF_RUNNING);
6154
6155 /*
6156 * delete flow table entries first, so no packets can arrive
6157 * after the barriers
6158 */
6159 if (sc->sc_promisc_flow_enabled)
6160 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
6161 if (sc->sc_allmulti_flow_enabled)
6162 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
6163 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
6164 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
6165 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6166 if (sc->sc_mcast_flows[i][0] != 0) {
6167 mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6168 sc->sc_mcast_flow_base + i);
6169 }
6170 }
6171
6172 callout_halt(&sc->sc_calibrate, NULL);
6173
6174 for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
6175 if (sc->sc_flow_group_id[group] != -1)
6176 mcx_destroy_flow_group(sc,
6177 sc->sc_flow_group_id[group]);
6178 }
6179
6180 if (sc->sc_flow_table_id != -1)
6181 mcx_destroy_flow_table(sc);
6182
6183 if (sc->sc_tirn != 0)
6184 mcx_destroy_tir(sc);
6185 if (sc->sc_rqn != 0)
6186 mcx_destroy_rq(sc);
6187
6188 if (sc->sc_sqn != 0)
6189 mcx_destroy_sq(sc);
6190 if (sc->sc_tisn != 0)
6191 mcx_destroy_tis(sc);
6192
6193 for (i = 0; i < sc->sc_num_cq; i++)
6194 mcx_destroy_cq(sc, i);
6195 sc->sc_num_cq = 0;
6196
6197 if (sc->sc_tx_slots != NULL) {
6198 mcx_free_slots(sc, sc->sc_tx_slots, (1 << MCX_LOG_SQ_SIZE),
6199 (1 << MCX_LOG_SQ_SIZE));
6200 sc->sc_tx_slots = NULL;
6201 }
6202 if (sc->sc_rx_slots != NULL) {
6203 mcx_free_slots(sc, sc->sc_rx_slots, (1 << MCX_LOG_RQ_SIZE),
6204 (1 << MCX_LOG_RQ_SIZE));
6205 sc->sc_rx_slots = NULL;
6206 }
6207 }
6208
6209 static int
6210 mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6211 {
6212 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6213 struct ifreq *ifr = (struct ifreq *)data;
6214 struct ethercom *ec = &sc->sc_ec;
6215 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
6216 struct ether_multi *enm;
6217 struct ether_multistep step;
6218 int s, i, flags, error = 0;
6219
6220 s = splnet();
6221 switch (cmd) {
6222
6223 case SIOCADDMULTI:
6224 if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6225 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6226 if (error != 0) {
6227 splx(s);
6228 return (error);
6229 }
6230
6231 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6232 if (sc->sc_mcast_flows[i][0] == 0) {
6233 memcpy(sc->sc_mcast_flows[i], addrlo,
6234 ETHER_ADDR_LEN);
6235 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6236 mcx_set_flow_table_entry(sc,
6237 MCX_FLOW_GROUP_MAC,
6238 sc->sc_mcast_flow_base + i,
6239 sc->sc_mcast_flows[i]);
6240 }
6241 break;
6242 }
6243 }
6244
6245 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
6246 if (i == MCX_NUM_MCAST_FLOWS) {
6247 SET(ifp->if_flags, IFF_ALLMULTI);
6248 sc->sc_extra_mcast++;
6249 error = ENETRESET;
6250 }
6251
6252 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN)) {
6253 SET(ifp->if_flags, IFF_ALLMULTI);
6254 error = ENETRESET;
6255 }
6256 }
6257 }
6258 break;
6259
6260 case SIOCDELMULTI:
6261 if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6262 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6263 if (error != 0) {
6264 splx(s);
6265 return (error);
6266 }
6267
6268 for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6269 if (memcmp(sc->sc_mcast_flows[i], addrlo,
6270 ETHER_ADDR_LEN) == 0) {
6271 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6272 mcx_delete_flow_table_entry(sc,
6273 MCX_FLOW_GROUP_MAC,
6274 sc->sc_mcast_flow_base + i);
6275 }
6276 sc->sc_mcast_flows[i][0] = 0;
6277 break;
6278 }
6279 }
6280
6281 if (i == MCX_NUM_MCAST_FLOWS)
6282 sc->sc_extra_mcast--;
6283
6284 if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
6285 sc->sc_extra_mcast == 0) {
6286 flags = 0;
6287 ETHER_LOCK(ec);
6288 ETHER_FIRST_MULTI(step, ec, enm);
6289 while (enm != NULL) {
6290 if (memcmp(enm->enm_addrlo,
6291 enm->enm_addrhi, ETHER_ADDR_LEN)) {
6292 SET(flags, IFF_ALLMULTI);
6293 break;
6294 }
6295 ETHER_NEXT_MULTI(step, enm);
6296 }
6297 ETHER_UNLOCK(ec);
6298 if (!ISSET(flags, IFF_ALLMULTI)) {
6299 CLR(ifp->if_flags, IFF_ALLMULTI);
6300 error = ENETRESET;
6301 }
6302 }
6303 }
6304 break;
6305
6306 default:
6307 error = ether_ioctl(ifp, cmd, data);
6308 }
6309
6310 if (error == ENETRESET) {
6311 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6312 (IFF_UP | IFF_RUNNING))
6313 mcx_iff(sc);
6314 error = 0;
6315 }
6316 splx(s);
6317
6318 return (error);
6319 }
6320
6321 #if 0
6322 static int
6323 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
6324 {
6325 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6326 struct mcx_reg_mcia mcia;
6327 struct mcx_reg_pmlp pmlp;
6328 int offset, error;
6329
6330 /* get module number */
6331 memset(&pmlp, 0, sizeof(pmlp));
6332 pmlp.rp_local_port = 1;
6333 error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
6334 sizeof(pmlp));
6335 if (error != 0) {
6336 printf("%s: unable to get eeprom module number\n",
6337 DEVNAME(sc));
6338 return error;
6339 }
6340
6341 for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
6342 memset(&mcia, 0, sizeof(mcia));
6343 mcia.rm_l = 0;
6344 mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) &
6345 MCX_PMLP_MODULE_NUM_MASK;
6346 mcia.rm_i2c_addr = sff->sff_addr / 2; /* apparently */
6347 mcia.rm_page_num = sff->sff_page;
6348 mcia.rm_dev_addr = htobe16(offset);
6349 mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
6350
6351 error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
6352 &mcia, sizeof(mcia));
6353 if (error != 0) {
6354 printf("%s: unable to read eeprom at %x\n",
6355 DEVNAME(sc), offset);
6356 return error;
6357 }
6358
6359 memcpy(sff->sff_data + offset, mcia.rm_data,
6360 MCX_MCIA_EEPROM_BYTES);
6361 }
6362
6363 return 0;
6364 }
6365 #endif
6366
6367 static int
6368 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
6369 {
6370 switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6371 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
6372 case 0:
6373 break;
6374
6375 case EFBIG:
6376 if (m_defrag(m, M_DONTWAIT) != NULL &&
6377 bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6378 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
6379 break;
6380
6381 /* FALLTHROUGH */
6382 default:
6383 return (1);
6384 }
6385
6386 ms->ms_m = m;
6387 return (0);
6388 }
6389
6390 static void
6391 mcx_start(struct ifnet *ifp)
6392 {
6393 struct mcx_softc *sc = ifp->if_softc;
6394 struct mcx_sq_entry *sq, *sqe;
6395 struct mcx_sq_entry_seg *sqs;
6396 struct mcx_slot *ms;
6397 bus_dmamap_t map;
6398 struct mbuf *m;
6399 u_int idx, free, used;
6400 uint64_t *bf;
6401 size_t bf_base;
6402 int i, seg, nseg;
6403
6404 bf_base = (sc->sc_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
6405
6406 idx = sc->sc_tx_prod % (1 << MCX_LOG_SQ_SIZE);
6407 free = (sc->sc_tx_cons + (1 << MCX_LOG_SQ_SIZE)) - sc->sc_tx_prod;
6408
6409 used = 0;
6410 bf = NULL;
6411 sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&sc->sc_sq_mem);
6412
6413 for (;;) {
6414 if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
6415 SET(ifp->if_flags, IFF_OACTIVE);
6416 break;
6417 }
6418
6419 IFQ_DEQUEUE(&ifp->if_snd, m);
6420 if (m == NULL) {
6421 break;
6422 }
6423
6424 sqe = sq + idx;
6425 ms = &sc->sc_tx_slots[idx];
6426 memset(sqe, 0, sizeof(*sqe));
6427
6428 /* ctrl segment */
6429 sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
6430 ((sc->sc_tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
6431 /* always generate a completion event */
6432 sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
6433
6434 /* eth segment */
6435 sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
6436 m_copydata(m, 0, MCX_SQ_INLINE_SIZE, sqe->sqe_inline_headers);
6437 m_adj(m, MCX_SQ_INLINE_SIZE);
6438
6439 if (mcx_load_mbuf(sc, ms, m) != 0) {
6440 m_freem(m);
6441 ifp->if_oerrors++;
6442 continue;
6443 }
6444 bf = (uint64_t *)sqe;
6445
6446 if (ifp->if_bpf != NULL)
6447 bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers,
6448 MCX_SQ_INLINE_SIZE, m, BPF_D_OUT);
6449
6450 map = ms->ms_map;
6451 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6452 BUS_DMASYNC_PREWRITE);
6453
6454 sqe->sqe_ds_sq_num =
6455 htobe32((sc->sc_sqn << MCX_SQE_SQ_NUM_SHIFT) |
6456 (map->dm_nsegs + 3));
6457
6458 /* data segment - first wqe has one segment */
6459 sqs = sqe->sqe_segs;
6460 seg = 0;
6461 nseg = 1;
6462 for (i = 0; i < map->dm_nsegs; i++) {
6463 if (seg == nseg) {
6464 /* next slot */
6465 idx++;
6466 if (idx == (1 << MCX_LOG_SQ_SIZE))
6467 idx = 0;
6468 sc->sc_tx_prod++;
6469 used++;
6470
6471 sqs = (struct mcx_sq_entry_seg *)(sq + idx);
6472 seg = 0;
6473 nseg = MCX_SQ_SEGS_PER_SLOT;
6474 }
6475 sqs[seg].sqs_byte_count =
6476 htobe32(map->dm_segs[i].ds_len);
6477 sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
6478 sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
6479 seg++;
6480 }
6481
6482 idx++;
6483 if (idx == (1 << MCX_LOG_SQ_SIZE))
6484 idx = 0;
6485 sc->sc_tx_prod++;
6486 used++;
6487 }
6488
6489 if (used) {
6490 *sc->sc_tx_doorbell = htobe32(sc->sc_tx_prod & MCX_WQ_DOORBELL_MASK);
6491
6492 membar_sync();
6493
6494 /*
6495 * write the first 64 bits of the last sqe we produced
6496 * to the blue flame buffer
6497 */
6498 bus_space_write_8(sc->sc_memt, sc->sc_memh,
6499 bf_base + sc->sc_bf_offset, *bf);
6500 /* next write goes to the other buffer */
6501 sc->sc_bf_offset ^= sc->sc_bf_size;
6502
6503 membar_sync();
6504 }
6505 }
6506
6507 static void
6508 mcx_watchdog(struct ifnet *ifp)
6509 {
6510 }
6511
6512 static void
6513 mcx_media_add_types(struct mcx_softc *sc)
6514 {
6515 struct mcx_reg_ptys ptys;
6516 int i;
6517 uint32_t proto_cap;
6518
6519 memset(&ptys, 0, sizeof(ptys));
6520 ptys.rp_local_port = 1;
6521 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6522 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6523 sizeof(ptys)) != 0) {
6524 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6525 return;
6526 }
6527
6528 proto_cap = be32toh(ptys.rp_eth_proto_cap);
6529 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6530 if ((proto_cap & (1U << i)) && (mcx_eth_cap_map[i] != 0))
6531 ifmedia_add(&sc->sc_media, IFM_ETHER |
6532 mcx_eth_cap_map[i], 0, NULL);
6533 }
6534 }
6535
6536 static void
6537 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
6538 {
6539 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6540 struct mcx_reg_ptys ptys;
6541 int i;
6542 uint32_t /* proto_cap, */ proto_oper;
6543 uint64_t media_oper;
6544
6545 memset(&ptys, 0, sizeof(ptys));
6546 ptys.rp_local_port = 1;
6547 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6548
6549 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6550 sizeof(ptys)) != 0) {
6551 printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6552 return;
6553 }
6554
6555 /* proto_cap = be32toh(ptys.rp_eth_proto_cap); */
6556 proto_oper = be32toh(ptys.rp_eth_proto_oper);
6557
6558 media_oper = 0;
6559 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6560 if (proto_oper & (1U << i)) {
6561 media_oper = mcx_eth_cap_map[i];
6562 }
6563 }
6564
6565 ifmr->ifm_status = IFM_AVALID;
6566 /* not sure if this is the right thing to check, maybe paos? */
6567 if (proto_oper != 0) {
6568 ifmr->ifm_status |= IFM_ACTIVE;
6569 ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
6570 /* txpause, rxpause, duplex? */
6571 }
6572 }
6573
6574 static int
6575 mcx_media_change(struct ifnet *ifp)
6576 {
6577 struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6578 struct mcx_reg_ptys ptys;
6579 struct mcx_reg_paos paos;
6580 uint32_t media;
6581 int i, error;
6582
6583 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
6584 return EINVAL;
6585
6586 error = 0;
6587
6588 if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
6589 /* read ptys to get supported media */
6590 memset(&ptys, 0, sizeof(ptys));
6591 ptys.rp_local_port = 1;
6592 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6593 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
6594 &ptys, sizeof(ptys)) != 0) {
6595 printf("%s: unable to read port type/speed\n",
6596 DEVNAME(sc));
6597 return EIO;
6598 }
6599
6600 media = be32toh(ptys.rp_eth_proto_cap);
6601 } else {
6602 /* map media type */
6603 media = 0;
6604 for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6605 if (mcx_eth_cap_map[i] ==
6606 IFM_SUBTYPE(sc->sc_media.ifm_media)) {
6607 media = (1 << i);
6608 break;
6609 }
6610 }
6611 }
6612
6613 /* disable the port */
6614 memset(&paos, 0, sizeof(paos));
6615 paos.rp_local_port = 1;
6616 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
6617 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6618 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6619 sizeof(paos)) != 0) {
6620 printf("%s: unable to set port state to down\n", DEVNAME(sc));
6621 return EIO;
6622 }
6623
6624 memset(&ptys, 0, sizeof(ptys));
6625 ptys.rp_local_port = 1;
6626 ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6627 ptys.rp_eth_proto_admin = htobe32(media);
6628 if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
6629 sizeof(ptys)) != 0) {
6630 printf("%s: unable to set port media type/speed\n",
6631 DEVNAME(sc));
6632 error = EIO;
6633 }
6634
6635 /* re-enable the port to start negotiation */
6636 memset(&paos, 0, sizeof(paos));
6637 paos.rp_local_port = 1;
6638 paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
6639 paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6640 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6641 sizeof(paos)) != 0) {
6642 printf("%s: unable to set port state to up\n", DEVNAME(sc));
6643 error = EIO;
6644 }
6645
6646 return error;
6647 }
6648
6649 static void
6650 mcx_port_change(struct work *wk, void *xsc)
6651 {
6652 struct mcx_softc *sc = xsc;
6653 struct ifnet *ifp = &sc->sc_ec.ec_if;
6654 struct mcx_reg_paos paos;
6655 int link_state = LINK_STATE_DOWN;
6656 struct ifmediareq ifmr;
6657
6658 memset(&paos, 0, sizeof(paos));
6659 paos.rp_local_port = 1;
6660 if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_READ, &paos,
6661 sizeof(paos)) == 0) {
6662 if (paos.rp_oper_status == MCX_REG_PAOS_OPER_STATUS_UP)
6663 link_state = LINK_STATE_UP;
6664 mcx_media_status(ifp, &ifmr);
6665 ifp->if_baudrate = ifmedia_baudrate(ifmr.ifm_active);
6666 }
6667
6668 if (link_state != ifp->if_link_state) {
6669 if_link_state_change(ifp, link_state);
6670 }
6671 }
6672
6673
6674 static inline uint32_t
6675 mcx_rd(struct mcx_softc *sc, bus_size_t r)
6676 {
6677 uint32_t word;
6678
6679 word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
6680
6681 return (be32toh(word));
6682 }
6683
6684 static inline void
6685 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
6686 {
6687 bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
6688 }
6689
6690 static inline void
6691 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
6692 {
6693 bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
6694 }
6695
6696 static uint64_t
6697 mcx_timer(struct mcx_softc *sc)
6698 {
6699 uint32_t hi, lo, ni;
6700
6701 hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6702 for (;;) {
6703 lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
6704 mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
6705 ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6706
6707 if (ni == hi)
6708 break;
6709
6710 hi = ni;
6711 }
6712
6713 return (((uint64_t)hi << 32) | (uint64_t)lo);
6714 }
6715
6716 static int
6717 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
6718 bus_size_t size, u_int align)
6719 {
6720 mxm->mxm_size = size;
6721
6722 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
6723 mxm->mxm_size, 0,
6724 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6725 &mxm->mxm_map) != 0)
6726 return (1);
6727 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
6728 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
6729 BUS_DMA_WAITOK) != 0)
6730 goto destroy;
6731 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
6732 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
6733 goto free;
6734 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
6735 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
6736 goto unmap;
6737
6738 mcx_dmamem_zero(mxm);
6739
6740 return (0);
6741 unmap:
6742 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6743 free:
6744 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6745 destroy:
6746 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6747 return (1);
6748 }
6749
6750 static void
6751 mcx_dmamem_zero(struct mcx_dmamem *mxm)
6752 {
6753 memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
6754 }
6755
6756 static void
6757 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
6758 {
6759 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
6760 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6761 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6762 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6763 }
6764
6765 static int
6766 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
6767 {
6768 bus_dma_segment_t *segs;
6769 bus_size_t len = pages * MCX_PAGE_SIZE;
6770 size_t seglen;
6771
6772 segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP);
6773 seglen = sizeof(*segs) * pages;
6774
6775 if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
6776 segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
6777 goto free_segs;
6778
6779 if (mhm->mhm_seg_count < pages) {
6780 size_t nseglen;
6781
6782 mhm->mhm_segs = kmem_alloc(
6783 sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP);
6784
6785 nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
6786
6787 memcpy(mhm->mhm_segs, segs, nseglen);
6788
6789 kmem_free(segs, seglen);
6790
6791 segs = mhm->mhm_segs;
6792 seglen = nseglen;
6793 } else
6794 mhm->mhm_segs = segs;
6795
6796 if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
6797 MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
6798 &mhm->mhm_map) != 0)
6799 goto free_dmamem;
6800
6801 if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
6802 mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
6803 goto destroy;
6804
6805 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6806 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
6807
6808 mhm->mhm_npages = pages;
6809
6810 return (0);
6811
6812 destroy:
6813 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6814 free_dmamem:
6815 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6816 free_segs:
6817 kmem_free(segs, seglen);
6818 mhm->mhm_segs = NULL;
6819
6820 return (-1);
6821 }
6822
6823 static void
6824 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
6825 {
6826 if (mhm->mhm_npages == 0)
6827 return;
6828
6829 bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6830 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
6831
6832 bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
6833 bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6834 bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6835 kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
6836
6837 mhm->mhm_npages = 0;
6838 }
6839